autographed
Advanced tools
Comparing version 0.1.1 to 0.1.2
@@ -9,3 +9,2 @@ import 'jest'; | ||
import { | ||
createRepo, | ||
createGraphProtocolTemplate, | ||
@@ -114,15 +113,4 @@ Environment, | ||
it("createRepo", () => { | ||
const dir = tempPath('autodave::jest::createRepo'); | ||
const { | ||
packageJson, | ||
tsConfigJson, | ||
} = createRepo({dir, purgeIfExists: true}); | ||
expect(fs.readFileSync(packageJson, 'utf-8')).toMatchSnapshot(); | ||
expect(fs.readFileSync(tsConfigJson, 'utf-8')).toMatchSnapshot(); | ||
}); | ||
const testHardhatProject = tempPath('autodave::jest::compileHardhatProject'); | ||
const cacheEnabled = true; | ||
const cacheEnabled = false; | ||
@@ -129,0 +117,0 @@ it("compileHardhatProject", () => { |
@@ -78,10 +78,4 @@ "use strict"; | ||
}); | ||
it("createRepo", () => { | ||
const dir = (0, src_1.tempPath)('autodave::jest::createRepo'); | ||
const { packageJson, tsConfigJson, } = (0, src_1.createRepo)({ dir, purgeIfExists: true }); | ||
expect(fs_extra_1.default.readFileSync(packageJson, 'utf-8')).toMatchSnapshot(); | ||
expect(fs_extra_1.default.readFileSync(tsConfigJson, 'utf-8')).toMatchSnapshot(); | ||
}); | ||
const testHardhatProject = (0, src_1.tempPath)('autodave::jest::compileHardhatProject'); | ||
const cacheEnabled = true; | ||
const cacheEnabled = false; | ||
it("compileHardhatProject", () => { | ||
@@ -88,0 +82,0 @@ if (fs_extra_1.default.existsSync(testHardhatProject) && cacheEnabled) |
{ | ||
"name": "autographed", | ||
"version": "0.1.1", | ||
"version": "0.1.2", | ||
"description": "The self-building, hot-reloading subgraph. The quickest way to start indexing your shit.", | ||
@@ -5,0 +5,0 @@ "main": "dist", |
/// <reference types="node" /> | ||
/// <reference types="node" /> | ||
import * as child_process from "child_process"; | ||
@@ -11,9 +12,2 @@ import { Environment, Source } from "../@types"; | ||
}) => void; | ||
export declare const createRepo: ({ dir, purgeIfExists, }: { | ||
readonly dir: string; | ||
readonly purgeIfExists?: boolean | undefined; | ||
}) => { | ||
packageJson: string; | ||
tsConfigJson: string; | ||
}; | ||
export declare const createGraphProtocolTemplate: ({ dir, purgeIfExists, }: { | ||
@@ -54,3 +48,3 @@ readonly dir: string; | ||
export declare const ipfs: () => Promise<unknown>; | ||
declare const postgres: ({ postgresDb, postgresPassword, postgresPort, postgresUser, }: { | ||
declare const postgres: ({ postgresDb, postgresPassword, postgresPort, postgresUser, dockerContainerName, }: { | ||
readonly postgresPort: number; | ||
@@ -60,2 +54,3 @@ readonly postgresDb: string; | ||
readonly postgresPassword: string; | ||
readonly dockerContainerName?: string | undefined; | ||
}) => Promise<unknown>; | ||
@@ -67,2 +62,3 @@ export declare const graphNode: ({ graphNodeInstallationDir, postgresPassword, postgresPort, postgresUser, postgresDb, ipfsPort, ethereumNetwork, ethereumPort, }: { | ||
readonly postgresPassword: string; | ||
readonly dockerContainerName?: string | undefined; | ||
} & { | ||
@@ -81,3 +77,3 @@ readonly ipfsPort: number; | ||
readonly subgraphName: string; | ||
}) => Promise<void>; | ||
}) => Promise<Buffer>; | ||
export declare const hardhatLocalNode: ({ hardhatProjectDir }: { | ||
@@ -87,3 +83,3 @@ readonly hardhatProjectDir: string; | ||
export declare const toDeployParams: ({ POSTGRES_PORT: postgresPort, POSTGRES_DB: postgresDb, POSTGRES_USER: postgresUser, POSTGRES_PASSWORD: postgresPassword, IPFS_PORT: ipfsPort, ETHEREUM_PORT: ethereumPort, ETHEREUM_NETWORK: ethereumNetwork, GRAPH_NODE_GRAPHQL_PORT: graphNodeGraphQLPort, GRAPH_NODE_STATUS_PORT: graphNodeStatusPort, }: Environment) => Omit<Parameters<typeof deploy>[0], 'graphNodeInstallationDir' | 'subgraphName' | 'subgraphTemplateDir' | 'hardhatProjectDir'>; | ||
export declare const deploy: ({ ethereumPort, postgresPassword, postgresDb, postgresPort, postgresUser, graphNodeInstallationDir, ipfsPort, ethereumNetwork, subgraphTemplateDir, graphNodeStatusPort, graphNodeGraphQLPort, subgraphName, versionLabel, hardhatProjectDir, }: Parameters<typeof graphNode>[0] & Parameters<typeof subgraph>[0] & Parameters<typeof hardhatLocalNode>[0]) => Promise<[child_process.ChildProcess, [unknown, unknown, unknown, void]]>; | ||
export declare const deploy: ({ ethereumPort, postgresPassword, postgresDb, postgresPort, postgresUser, graphNodeInstallationDir, ipfsPort, ethereumNetwork, subgraphTemplateDir, graphNodeStatusPort, graphNodeGraphQLPort, subgraphName, versionLabel, hardhatProjectDir, }: Parameters<typeof graphNode>[0] & Parameters<typeof subgraph>[0] & Parameters<typeof hardhatLocalNode>[0]) => Promise<[child_process.ChildProcess, [unknown, unknown, unknown, Buffer]]>; | ||
export {}; |
@@ -29,3 +29,3 @@ "use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.deploy = exports.toDeployParams = exports.hardhatLocalNode = exports.subgraph = exports.graphNode = exports.ipfs = exports.waitForGraph = exports.waitForIpfs = exports.waitForEthereum = exports.ensureGraphNodeInstallation = exports.buildSubgraph = exports.createSubgraphTemplate = exports.createGraphProtocolTemplate = exports.createRepo = exports.throwOrPurgeOnDirExists = exports.keccak = exports.randomTempPath = exports.tempPath = void 0; | ||
exports.deploy = exports.toDeployParams = exports.hardhatLocalNode = exports.subgraph = exports.graphNode = exports.ipfs = exports.waitForGraph = exports.waitForIpfs = exports.waitForEthereum = exports.ensureGraphNodeInstallation = exports.buildSubgraph = exports.createSubgraphTemplate = exports.createGraphProtocolTemplate = exports.throwOrPurgeOnDirExists = exports.keccak = exports.randomTempPath = exports.tempPath = void 0; | ||
const axios_1 = __importDefault(require("axios")); | ||
@@ -38,2 +38,3 @@ const child_process = __importStar(require("child_process")); | ||
const yaml_1 = require("yaml"); | ||
const nanoid_1 = require("nanoid"); | ||
const tempPath = (name) => path_1.default.resolve(os_1.default.tmpdir(), name); | ||
@@ -53,24 +54,2 @@ exports.tempPath = tempPath; | ||
exports.throwOrPurgeOnDirExists = throwOrPurgeOnDirExists; | ||
const createRepo = ({ dir, purgeIfExists, }) => { | ||
(0, exports.throwOrPurgeOnDirExists)({ dir, purgeIfExists }); | ||
fs_extra_1.default.mkdirSync(dir); | ||
const packageJson = path_1.default.resolve(dir, 'package.json'); | ||
const tsConfigJson = path_1.default.resolve(dir, 'tsconfig.json'); | ||
fs_extra_1.default.writeFileSync(packageJson, JSON.stringify({})); | ||
fs_extra_1.default.writeFileSync(tsConfigJson, JSON.stringify({ | ||
compilerOptions: { | ||
target: 'es2020', | ||
module: 'commonjs', | ||
esModuleInterop: true, | ||
forceConsistentCasingInFileNames: true, | ||
declaration: true, | ||
strict: true, | ||
skipLibCheck: true, | ||
outDir: 'dist', | ||
}, | ||
})); | ||
child_process.execSync('npm i --save-dev ts-node typescript', { stdio: 'inherit', cwd: dir }); | ||
return { packageJson, tsConfigJson }; | ||
}; | ||
exports.createRepo = createRepo; | ||
const createGraphProtocolTemplate = ({ dir, purgeIfExists, }) => { | ||
@@ -172,6 +151,4 @@ (0, exports.throwOrPurgeOnDirExists)({ dir, purgeIfExists }); | ||
exports.ipfs = ipfs; | ||
const postgres = ({ postgresDb, postgresPassword, postgresPort, postgresUser, }) => { | ||
child_process.execSync('docker rm autographed;'); | ||
return new Promise(() => child_process.exec(` | ||
docker run --name autographed \ | ||
const postgres = ({ postgresDb, postgresPassword, postgresPort, postgresUser, dockerContainerName = (0, nanoid_1.nanoid)(), }) => new Promise(() => child_process.exec(` | ||
docker run --name ${dockerContainerName} \ | ||
-p "${postgresPort}:${postgresPort}" \ | ||
@@ -182,4 +159,3 @@ -e "POSTGRES_DB=${postgresDb}" \ | ||
postgres:14-alpine | ||
`.trim())) /* forever */; | ||
}; | ||
`.trim())) /* forever */; | ||
const graphNode = async ({ graphNodeInstallationDir, postgresPassword, postgresPort, postgresUser, postgresDb, ipfsPort, ethereumNetwork, ethereumPort, }) => { | ||
@@ -205,3 +181,3 @@ // TODO: Ideally, we need to be able to wait for postgres too. Ipfs just happens to take | ||
}); | ||
child_process.execSync(`graph create --node http://localhost:${graphNodeStatusPort} ${subgraphName} && graph deploy --node http://localhost:${graphNodeStatusPort} --ipfs http://localhost:${ipfsPort} ${subgraphName} --version-label ${versionLabel}`, { cwd, stdio: 'inherit' }); | ||
return child_process.execSync(`graph create --node http://localhost:${graphNodeStatusPort} ${subgraphName} && graph deploy --node http://localhost:${graphNodeStatusPort} --ipfs http://localhost:${ipfsPort} ${subgraphName} --version-label ${versionLabel}`, { cwd, stdio: 'inherit' }); | ||
}; | ||
@@ -208,0 +184,0 @@ exports.subgraph = subgraph; |
{ | ||
"name": "autographed", | ||
"version": "0.1.1", | ||
"version": "0.1.2", | ||
"description": "The self-building, hot-reloading subgraph. The quickest way to start indexing your shit.", | ||
@@ -5,0 +5,0 @@ "main": "dist", |
@@ -8,2 +8,3 @@ import axios, {AxiosError} from "axios"; | ||
import {parse, stringify} from "yaml"; | ||
import {nanoid} from "nanoid"; | ||
@@ -39,40 +40,2 @@ import {Environment, Source} from "../@types"; | ||
export const createRepo = ({ | ||
dir, | ||
purgeIfExists, | ||
}: { | ||
readonly dir: string; | ||
readonly purgeIfExists?: boolean; | ||
}) => { | ||
throwOrPurgeOnDirExists({dir, purgeIfExists}); | ||
fs.mkdirSync(dir); | ||
const packageJson = path.resolve(dir, 'package.json'); | ||
const tsConfigJson = path.resolve(dir, 'tsconfig.json'); | ||
fs.writeFileSync(packageJson, JSON.stringify({})); | ||
fs.writeFileSync( | ||
tsConfigJson, | ||
JSON.stringify({ | ||
compilerOptions: { | ||
target: 'es2020', | ||
module: 'commonjs', | ||
esModuleInterop: true, | ||
forceConsistentCasingInFileNames: true, | ||
declaration: true, | ||
strict: true, | ||
skipLibCheck: true, | ||
outDir: 'dist', | ||
}, | ||
}), | ||
); | ||
child_process.execSync( | ||
'npm i --save-dev ts-node typescript', | ||
{stdio: 'inherit', cwd: dir}, | ||
); | ||
return {packageJson, tsConfigJson}; | ||
}; | ||
export const createGraphProtocolTemplate = ({ | ||
@@ -301,2 +264,3 @@ dir, | ||
postgresUser, | ||
dockerContainerName = nanoid(), | ||
}: { | ||
@@ -307,10 +271,7 @@ readonly postgresPort: number; | ||
readonly postgresPassword: string; | ||
}) => { | ||
child_process.execSync( | ||
'docker rm autographed;', | ||
); | ||
return new Promise( | ||
() => child_process.exec( | ||
` | ||
docker run --name autographed \ | ||
readonly dockerContainerName?: string; | ||
}) => new Promise( | ||
() => child_process.exec( | ||
` | ||
docker run --name ${dockerContainerName} \ | ||
-p "${postgresPort}:${postgresPort}" \ | ||
@@ -321,6 +282,5 @@ -e "POSTGRES_DB=${postgresDb}" \ | ||
postgres:14-alpine | ||
`.trim(), | ||
), | ||
) /* forever */; | ||
}; | ||
`.trim(), | ||
), | ||
) /* forever */; | ||
@@ -390,3 +350,3 @@ export const graphNode = async ({ | ||
}); | ||
child_process.execSync( | ||
return child_process.execSync( | ||
`graph create --node http://localhost:${ | ||
@@ -393,0 +353,0 @@ graphNodeStatusPort |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
1
58937
1350