├── img └── fast_start.gif ├── examples ├── ethers │ ├── package.json │ ├── index.js │ └── yarn.lock ├── start_stop_client.js ├── puppeth.js ├── create_account.js ├── docker_raw.js └── zokrates │ └── zokrates.js ├── .circleci └── config.yml ├── src ├── bin │ ├── commands │ │ ├── version.ts │ │ ├── list.ts │ │ ├── download.ts │ │ ├── start.ts │ │ └── exec.ts │ ├── ethbinary.ts │ └── lib │ │ └── index.ts ├── index.ts ├── ClientManager.spec.ts ├── Logger.ts ├── client_plugins │ ├── index.ts │ └── geth.ts ├── Client │ ├── BaseClient.ts │ ├── DockerizedClient.ts │ └── BinaryClient.ts ├── events.ts ├── DockerUtils.ts ├── ProcessManager.ts ├── types.ts ├── utils.ts ├── DockerManager.ts └── ClientManager.ts ├── .mocharc.js ├── docs ├── index.html └── README.md ├── package.json ├── .gitignore ├── tsconfig.json └── README.md /img/fast_start.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ethereum-binaries/HEAD/img/fast_start.gif -------------------------------------------------------------------------------- /examples/ethers/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ethers-repl", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "license": "MIT", 6 | "dependencies": { 7 | "ethers": "^4.0.47" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/start_stop_client.js: -------------------------------------------------------------------------------- 1 | const { getClient } = require('../dist/index.js') 2 | 3 | const run = async () => { 4 | const geth = await getClient('geth') 5 | await geth.start('--goerli') 6 | await geth.stop() 7 | } 8 | run() -------------------------------------------------------------------------------- /examples/puppeth.js: -------------------------------------------------------------------------------- 1 | const { getClient } = require('../dist/index.js') 2 | 3 | const run = async () => { 4 | const puppeth = await getClient('puppeth') 5 | await puppeth.start() 6 | console.log('binary started') 7 | await puppeth.whenState(log => log.includes('Please specify a network name ')) 8 | await puppeth.input('foobar') 9 | await puppeth.stop() 10 | } 11 | run() -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | orbs: 3 | node: circleci/node@1.1.6 4 | jobs: 5 | build-and-test: 6 | executor: 7 | name: node/default 8 | steps: 9 | - checkout 10 | - node/with-cache: 11 | steps: 12 | - run: npm install 13 | - run: npm test 14 | workflows: 15 | build-and-test: 16 | jobs: 17 | - build-and-test -------------------------------------------------------------------------------- /src/bin/commands/version.ts: -------------------------------------------------------------------------------- 1 | import {Command, command, metadata} from 'clime' 2 | import chalk from 'chalk' 3 | 4 | @command({ 5 | description: 'Prints the ethbinary version', 6 | }) 7 | export default class extends Command { 8 | @metadata 9 | public execute(){ 10 | console.log(chalk.bold('ethbinary version: ', require('../../../package.json').version)) 11 | console.log('\n') 12 | } 13 | } -------------------------------------------------------------------------------- /.mocharc.js: -------------------------------------------------------------------------------- 1 | let pattern = '*' 2 | let dirPattern = '**' 3 | if (process.argv.length === 3) { 4 | const arg = process.argv.pop() 5 | const DIRECTORIES = [] 6 | if(DIRECTORIES.includes(arg)) { 7 | dirPattern = arg 8 | } else { 9 | pattern = arg 10 | } 11 | } 12 | module.exports = { 13 | "extension": ["ts"], 14 | "spec": `./src/${dirPattern}/${pattern}.spec.ts`, 15 | "require": ["ts-node/register", "source-map-support/register"], 16 | "full-trace": true 17 | } -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { MultiClientManager, SingleClientManager, getClient, SHARED_DATA } from './ClientManager' 2 | export { SingleClientManager, MultiClientManager, getClient } 3 | 4 | export * from './types' 5 | export { CLIENT_STATE } from './Client/BaseClient' 6 | 7 | export { PROCESS_EVENTS } from './events' 8 | 9 | export * as ethpkg from 'ethpkg' 10 | 11 | export { SHARED_DATA } 12 | 13 | const instance = MultiClientManager.getInstance() 14 | export default instance 15 | -------------------------------------------------------------------------------- /src/bin/commands/list.ts: -------------------------------------------------------------------------------- 1 | import {Command, command, metadata} from 'clime' 2 | import { MultiClientManager as ClientManager } from '../../ClientManager' 3 | 4 | @command({ 5 | description: 'Lists the supported clients', 6 | }) 7 | export default class extends Command { 8 | @metadata 9 | public async execute(){ 10 | const availableClients = await ClientManager.getInstance().getAvailableClients() 11 | console.log('Supported clients: ', availableClients) 12 | console.log('\n') 13 | } 14 | } -------------------------------------------------------------------------------- /examples/create_account.js: -------------------------------------------------------------------------------- 1 | const { SingleClientManager } = require('../dist/index.js') 2 | 3 | const run = async () => { 4 | const cm = new SingleClientManager() 5 | const client = await cm.getClient('geth') 6 | const result = await cm.execute('account new', { 7 | timeout: 20 * 1000, // user has 20 seonds to enter password 8 | stdio: 'inherit' // NOTE: without 'inherit' "account new" will expect stdin input for password and will always time out 9 | }) 10 | console.log('result', result) 11 | } 12 | run() -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | ethbinary 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /src/ClientManager.spec.ts: -------------------------------------------------------------------------------- 1 | import { assert } from 'chai' 2 | import { MultiClientManager as ClientManager } from './index' 3 | 4 | describe('ClientManager', function() { 5 | this.timeout(60 * 1000) 6 | 7 | describe('stopClient', () => { 8 | it('stops running binaries', async () => { 9 | const cm = ClientManager.getInstance() 10 | const client = await cm.getClient('geth') 11 | await cm.startClient(client) 12 | await cm.stopClient(client) 13 | }) 14 | it.skip('stops running docker clients', async () => { 15 | const cm = ClientManager.getInstance() 16 | const client = await cm.getClient('prysm') 17 | await cm.startClient(client) 18 | await cm.stopClient(client) 19 | }) 20 | }) 21 | 22 | }) -------------------------------------------------------------------------------- /src/bin/commands/download.ts: -------------------------------------------------------------------------------- 1 | import { Command, command, metadata, param, option, Options } from 'clime' 2 | import { downloadClient } from '../lib' 3 | 4 | export class ClientOptions extends Options { 5 | @option({ 6 | name: 'clientFlags', 7 | description: 'list of flags', 8 | default: '', 9 | }) 10 | flags?: string; 11 | @option({ 12 | name: 'clientVersion', 13 | description: 'sets the client version', 14 | }) 15 | version?: string; 16 | } 17 | 18 | @command({ 19 | description: 'Downloads a client', 20 | }) 21 | export default class extends Command { 22 | @metadata 23 | public async execute( 24 | @param({ 25 | name: 'client', 26 | description: 'client name', 27 | required: true, 28 | }) 29 | clientName: string, 30 | options: ClientOptions 31 | ) { 32 | console.log('download client', clientName, 'version', options.version) 33 | await downloadClient(clientName, options.version) 34 | } 35 | } -------------------------------------------------------------------------------- /examples/docker_raw.js: -------------------------------------------------------------------------------- 1 | const { default: ClientManager } = require('../dist/index.js') 2 | 3 | const run = async () => { 4 | 5 | ClientManager.addClientConfig({ 6 | name: 'nodejs', 7 | displayName: 'Node.js', 8 | dockerimage: 'node:10', 9 | entryPoint: 'auto', 10 | service: false 11 | }) 12 | 13 | const client = await ClientManager.getClient('nodejs', { 14 | listener: (newState, args) => console.log('new state', newState, args ? args.progress : 0) 15 | }) 16 | 17 | await ClientManager.startClient(client) 18 | 19 | let result 20 | result = await ClientManager.execute(client, 'ls -la', { 21 | stdio: 'pipe', 22 | useBash: true, 23 | useEntrypoint: false 24 | }) 25 | console.log('If "pipe" is used for stdio - logs will be returned as array:') 26 | console.log(result) 27 | 28 | result = await ClientManager.execute(client, 'node -e "console.log(2+2)"', { 29 | stdio: 'inherit', 30 | useBash: true, 31 | useEntrypoint: false 32 | }) 33 | 34 | } 35 | run() -------------------------------------------------------------------------------- /examples/ethers/index.js: -------------------------------------------------------------------------------- 1 | const { getClient, CLIENT_STATE } = require('../../dist/index.js') 2 | const ethers = require('ethers') 3 | 4 | const runIpc = async () => { 5 | const geth = await getClient('geth') 6 | await geth.start(['--goerli']) 7 | await geth.whenState(CLIENT_STATE.IPC_READY) 8 | const provider = new ethers.providers.IpcProvider(geth.ipc) 9 | const network = await provider.getNetwork() // network { name: 'goerli', chainId: 5, ensAddress: '0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e' } 10 | // send tx, interact with or deploy contracts here... 11 | await geth.stop() 12 | } 13 | 14 | const runRpc = async () => { 15 | const geth = await getClient('geth') 16 | // note that --http is new syntax for deprecated --rpc 17 | await geth.start(['--dev', '--http']) 18 | await geth.whenState(CLIENT_STATE.HTTP_RPC_READY) 19 | const provider = new ethers.providers.JsonRpcProvider(geth.rpcUrl) 20 | const network = await provider.getNetwork() // network { chainId: 1337, name: 'unknown' } 21 | await geth.stop() 22 | } 23 | 24 | runRpc() 25 | -------------------------------------------------------------------------------- /src/Logger.ts: -------------------------------------------------------------------------------- 1 | 2 | export const LOGLEVEL = { 3 | WARN: -1, 4 | INFO: 0, 5 | VERBOSE: 2 6 | } 7 | 8 | export class Logger { 9 | private _loglevel = LOGLEVEL.WARN 10 | 11 | private static _instance : Logger 12 | 13 | private constructor() {} 14 | 15 | public static getInstance() { 16 | if (!Logger._instance) { 17 | Logger._instance = new Logger() 18 | } 19 | return Logger._instance 20 | } 21 | 22 | _log(loglevel = LOGLEVEL.INFO, message: string, ...optionalParams: any[]) { 23 | if (this._loglevel >= loglevel) { 24 | console.log(message, ...optionalParams) 25 | } 26 | } 27 | log(message: string, ...optionalParams: any[]) { 28 | this._log(LOGLEVEL.INFO, message, ...optionalParams) 29 | } 30 | verbose(message: string, ...optionalParams: any[]) { 31 | this._log(LOGLEVEL.VERBOSE, message, ...optionalParams) 32 | } 33 | warn(message: string, ...optionalParams: any[]) { 34 | this._log(LOGLEVEL.WARN, message, ...optionalParams) 35 | } 36 | } 37 | 38 | const logger = Logger.getInstance() 39 | 40 | export default logger -------------------------------------------------------------------------------- /examples/zokrates/zokrates.js: -------------------------------------------------------------------------------- 1 | const { getClient, SHARED_DATA } = require('../../dist/index.js') 2 | const fs = require('fs') 3 | const path = require('path') 4 | 5 | const log = async prom => { 6 | const logs = await prom 7 | logs.forEach(log => { 8 | console.log(log) 9 | }); 10 | } 11 | 12 | // see https://zokrates.github.io/gettingstarted.html#hello-zokrates 13 | const run = async () => { 14 | const zokrates = await getClient('zokrates') 15 | fs.writeFileSync(path.join(__dirname, 'test.zok'), ` 16 | def main(private field a, field b) -> (field): 17 | field result = if a * a == b then 1 else 0 fi 18 | return result 19 | `) 20 | await log(zokrates.execute(`compile -i ${SHARED_DATA}/test.zok`)) 21 | await log(zokrates.execute(`setup`)) 22 | await log(zokrates.execute('compute-witness -a 337 113569')) 23 | await log(zokrates.execute('generate-proof')) 24 | await log(zokrates.execute(`export-verifier`)) 25 | await log(zokrates.execute(`cp ./verifier.sol ${SHARED_DATA}`, { useBash: true, useEntrypoint: false })) 26 | 27 | await zokrates.stop() 28 | } 29 | run() -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ethbinary", 3 | "version": "0.9.0", 4 | "description": "", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "test": "nyc mocha", 8 | "start": "node ./dist/index.js", 9 | "build": "tsc -p ./tsconfig.json && yarn copy && chmod +x ./dist/bin/ethbinary.js", 10 | "copy": "node ./scripts/copy_assets", 11 | "docs": "docsify serve ./docs", 12 | "watch": "tsc -p ./tsconfig.json -w" 13 | }, 14 | "author": "", 15 | "license": "ISC", 16 | "bin": { 17 | "ethbinary": "./dist/bin/ethbinary.js" 18 | }, 19 | "dependencies": { 20 | "boxen": "^4.2.0", 21 | "chalk": "^4.0.0", 22 | "cli-progress": "^3.8.2", 23 | "clime": "^0.5.10", 24 | "dockerode": "^3.2.0", 25 | "ethpkg": "^0.10.0", 26 | "openpgp": "^4.10.4" 27 | }, 28 | "devDependencies": { 29 | "@types/chai": "^4.2.11", 30 | "@types/cli-progress": "^3.7.0", 31 | "@types/dockerode": "^2.5.28", 32 | "@types/mocha": "^7.0.2", 33 | "@types/node": "^14.0.1", 34 | "@types/openpgp": "^4.4.11", 35 | "chai": "^4.2.0", 36 | "mocha": "^7.1.2", 37 | "nyc": "^15.0.1", 38 | "ts-node": "^8.10.1", 39 | "typescript": "^3.9.2" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/client_plugins/index.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import path from 'path' 3 | import { ClientConfig } from '../types' 4 | const clientFiles = fs.readdirSync(__dirname) 5 | 6 | const clients : ClientConfig[] = [ 7 | { 8 | name: 'prysm.validator', 9 | displayName: 'Prysm Validator', 10 | repository: 'https://github.com/prysmaticlabs/prysm', 11 | isPackaged: false, 12 | filter: ({ fileName } : any) => fileName.includes('validator') 13 | }, 14 | { 15 | name: 'prysm.validator', 16 | displayName: 'Prysm Validator', 17 | dockerimage: 'gcr.io/prysmaticlabs/prysm/validator', 18 | ports: ['4000', '13000', '12000/udp'], 19 | }, 20 | { 21 | name: 'puppeth', 22 | displayName: 'Puppeth', 23 | repository: 'azure:gethstore', 24 | filter: { 25 | name: { 26 | excludes: ['unstable', 'swarm', 'mips', 'arm'] 27 | } 28 | }, 29 | prefix: `geth-alltools`, 30 | binaryName: 'puppeth', 31 | }, 32 | { 33 | name: 'zokrates', 34 | displayName: 'ZoKrates', 35 | dockerimage: 'zokrates/zokrates', 36 | entryPoint: './zokrates' 37 | } 38 | ] 39 | 40 | for (const client of clientFiles) { 41 | if (!client.startsWith('index')) { 42 | try { 43 | let config = require('./'+client).default 44 | clients.push(config) 45 | } catch (error) { 46 | 47 | } 48 | } 49 | } 50 | export { clients } -------------------------------------------------------------------------------- /src/bin/commands/start.ts: -------------------------------------------------------------------------------- 1 | import {Command, command, metadata, param, Options, option} from 'clime' 2 | import { startClient } from '../lib'; 3 | import chalk from 'chalk' 4 | import { PROCESS_EVENTS } from '../../events'; 5 | 6 | export class ClientOptions extends Options { 7 | @option({ 8 | name: 'clientFlags', 9 | description: 'list of flags', 10 | default: '', 11 | }) 12 | flags?: string; 13 | @option({ 14 | name: 'clientVersion', 15 | description: 'sets the client version', 16 | default: 'latest', 17 | }) 18 | version?: string; 19 | } 20 | 21 | @command({ 22 | description: 'Starts a client', 23 | }) 24 | export default class extends Command { 25 | @metadata 26 | public async execute( 27 | @param({ 28 | name: 'client', 29 | description: 'client name', 30 | required: true, 31 | }) 32 | clientName: string, 33 | options: ClientOptions 34 | ){ 35 | const { version: clientVersion } = options 36 | const flags = options.flags?.slice('/'.length).split(' ').filter(f => f !== '') 37 | console.log(chalk.bold(`Starting client: "${clientName}" version: "${clientVersion}" with flags:`, JSON.stringify(flags))) 38 | try { 39 | await startClient(clientName, clientVersion, flags, { 40 | stdio: 'inherit', 41 | }) 42 | } catch (error) { 43 | console.log(chalk.red.bold('Client error - '+error.message), error) 44 | } 45 | } 46 | } -------------------------------------------------------------------------------- /src/bin/commands/exec.ts: -------------------------------------------------------------------------------- 1 | import {Command, command, metadata, param, Options, option} from 'clime' 2 | import chalk from 'chalk' 3 | import { execClient } from '../lib'; 4 | 5 | export class ClientOptions extends Options { 6 | @option({ 7 | name: 'clientFlags', 8 | description: 'list of flags', 9 | default: '', 10 | }) 11 | flags?: string; 12 | @option({ 13 | name: 'clientVersion', 14 | description: 'sets the client version', 15 | default: 'latest', 16 | }) 17 | version?: string; 18 | } 19 | 20 | @command({ 21 | description: 'Executes a command on a client', 22 | }) 23 | export default class extends Command { 24 | @metadata 25 | public async execute( 26 | @param({ 27 | name: 'client', 28 | description: 'client name', 29 | required: true, 30 | }) 31 | clientName: string, 32 | @param({ 33 | name: 'command', 34 | description: 'command to execute', 35 | default: 'latest', 36 | required: false, 37 | }) 38 | command: string, 39 | options: ClientOptions 40 | ){ 41 | const { version: clientVersion } = options 42 | console.log(chalk.bold(`Executing client command - client: "${clientName}" version: "${clientVersion}" command: ${command}\n`)) 43 | try { 44 | await execClient(clientName, clientVersion, command) 45 | } catch (error) { 46 | console.log(chalk.red.bold('Client error - '+error.message)) 47 | } 48 | } 49 | } -------------------------------------------------------------------------------- /src/Client/BaseClient.ts: -------------------------------------------------------------------------------- 1 | import { IClient, ClientInfo, ClientStartOptions, CommandOptions } from "../types" 2 | import { uuid } from "../utils" 3 | import { EventEmitter } from "events" 4 | 5 | export enum CLIENT_STATE { 6 | INIT = 'INIT', 7 | STARTED = 'STARTED', 8 | STOPPED = 'STOPPED', 9 | IPC_READY = 'IPC_READY', 10 | HTTP_RPC_READY = 'HTTP_RPC_READY', 11 | ERROR = 'ERROR', 12 | } 13 | 14 | export abstract class BaseClient extends EventEmitter implements IClient { 15 | protected _uuid = uuid() 16 | protected _started: number = 0 17 | protected _stopped: number = 0 18 | protected _state: CLIENT_STATE = CLIENT_STATE.INIT 19 | protected _ipc?: string // store ipc path if it can be detected 20 | protected _rpcUrl?: string // store rpc server url if it can be detected 21 | protected _logs: string[] = [] 22 | 23 | get id() { 24 | return this._uuid 25 | } 26 | set ipc(ipcPath: string) { 27 | this._ipc = ipcPath 28 | this._state = CLIENT_STATE.IPC_READY 29 | this.emit('state', CLIENT_STATE.IPC_READY) 30 | } 31 | set rpc(rpcUrl: string) { 32 | this._rpcUrl = rpcUrl 33 | this._state = CLIENT_STATE.HTTP_RPC_READY 34 | this.emit('state', CLIENT_STATE.HTTP_RPC_READY) 35 | } 36 | info(): ClientInfo { 37 | throw new Error("Method not implemented.") 38 | } 39 | async start(flags: string[], options: ClientStartOptions): Promise { 40 | this._stopped = 0 41 | this._state = CLIENT_STATE.STARTED 42 | this._started = Date.now() 43 | this.emit('state', CLIENT_STATE.STARTED) 44 | } 45 | async stop(): Promise { 46 | this._state = CLIENT_STATE.STOPPED 47 | this._stopped = Date.now() 48 | this.emit('state', CLIENT_STATE.STOPPED) 49 | } 50 | addLog(log: string) { 51 | this._logs.push(log) 52 | this.emit('log', log) 53 | } 54 | abstract execute(command: string, options?: CommandOptions): Promise> 55 | } -------------------------------------------------------------------------------- /src/events.ts: -------------------------------------------------------------------------------- 1 | import { PROCESS_STATES } from 'ethpkg' 2 | 3 | export const PROCESS_EVENTS = { 4 | 5 | ...PROCESS_STATES, 6 | 7 | CLIENT_INIT_STARTED: 'CLIENT_INIT_STARTED', 8 | 9 | CLIENT_START_STARTED: 'CLIENT_START_STARTED', 10 | CLIENT_START_FINISHED: 'CLIENT_START_FINISHED', 11 | 12 | // the client manager is loading the release / version list from a remote repo 13 | RESOLVE_RELEASE_STARTED: 'RESOLVE_RELEASE_STARTED', 14 | RESOLVE_RELEASE_FINISHED: 'RESOLVE_RELEASE_FINISHED', 15 | 16 | RESOLVE_BINARY_STARTED: 'RESOLVE_BINARY_STARTED', 17 | RESOLVE_BINARY_FINISHED: 'RESOLVE_BINARY_FINISHED', 18 | 19 | // 20 | VERIFICATION_ERROR: 'VERIFICATION_ERROR', 21 | VERIFICATION_FAILED: 'VERIFICATION_FAILED', 22 | PACKAGE_WRITTEN: 'PACKAGE_WRITTEN', 23 | BINARY_EXTRACTION_STARTED: 'BINARY_EXTRACTION_STARTED', 24 | BINARY_EXTRACTION_PROGRESS: 'BINARY_EXTRACTION_PROGRESS', 25 | BINARY_EXTRACTION_FINISHED: 'BINARY_EXTRACTION_FINISHED', 26 | 27 | RESOLVE_DEPENDENCIES_STARTED: 'RESOLVE_DEPENDENCIES_STARTED', 28 | RESOLVE_DEPENDENCIES_FINISHED: 'RESOLVE_DEPENDENCIES_FINISHED', 29 | 30 | DOCKERFILE_FOUND: 'DOCKERFILE_FOUND', 31 | 32 | CREATE_DOCKER_IMAGE_FROM_FILE_STARTED: 'CREATE_DOCKER_IMAGE_FROM_FILE_STARTED', 33 | CREATE_DOCKER_IMAGE_FROM_FILE_FINISHED: 'CREATE_DOCKER_IMAGE_FROM_FILE_FINISHED', 34 | 35 | FIND_EXISTING_DOCKER_CONTAINER_STARTED: 'FIND_EXISTING_DOCKER_CONTAINER_STARTED', 36 | FIND_EXISTING_DOCKER_CONTAINER_FINISHED: 'FIND_EXISTING_DOCKER_CONTAINER_FINISHED', 37 | 38 | CREATE_DOCKER_CONTAINER_STARTED: 'CREATE_DOCKER_CONTAINER_STARTED', 39 | CREATE_DOCKER_CONTAINER_FINISHED: 'CREATE_DOCKER_CONTAINER_FINISHED', 40 | 41 | PULL_DOCKER_IMAGE_STARTED: 'PULL_DOCKER_IMAGE_STARTED', 42 | PULL_DOCKER_IMAGE_PROGRESS: 'PULL_DOCKER_IMAGE_PROGRESS', 43 | PULL_DOCKER_IMAGE_FINISHED: 'PULL_DOCKER_IMAGE_FINISHED', 44 | 45 | 46 | DOCKER_CLIENT_READY: 'DOCKER_CLIENT_READY', 47 | 48 | // raw docker events during setup of images & containers 49 | DOCKER_EVENT: 'DOCKER_EVENT', 50 | 51 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .DS_Store 3 | 4 | # Logs 5 | logs 6 | *.log 7 | npm-debug.log* 8 | yarn-debug.log* 9 | yarn-error.log* 10 | lerna-debug.log* 11 | 12 | # Diagnostic reports (https://nodejs.org/api/report.html) 13 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 14 | 15 | # Runtime data 16 | pids 17 | *.pid 18 | *.seed 19 | *.pid.lock 20 | 21 | # Directory for instrumented libs generated by jscoverage/JSCover 22 | lib-cov 23 | 24 | # Coverage directory used by tools like istanbul 25 | coverage 26 | *.lcov 27 | 28 | # nyc test coverage 29 | .nyc_output 30 | 31 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 32 | .grunt 33 | 34 | # Bower dependency directory (https://bower.io/) 35 | bower_components 36 | 37 | # node-waf configuration 38 | .lock-wscript 39 | 40 | # Compiled binary addons (https://nodejs.org/api/addons.html) 41 | build/Release 42 | 43 | # Dependency directories 44 | node_modules/ 45 | jspm_packages/ 46 | 47 | # TypeScript v1 declaration files 48 | typings/ 49 | 50 | # TypeScript cache 51 | *.tsbuildinfo 52 | 53 | # Optional npm cache directory 54 | .npm 55 | 56 | # Optional eslint cache 57 | .eslintcache 58 | 59 | # Microbundle cache 60 | .rpt2_cache/ 61 | .rts2_cache_cjs/ 62 | .rts2_cache_es/ 63 | .rts2_cache_umd/ 64 | 65 | # Optional REPL history 66 | .node_repl_history 67 | 68 | # Output of 'npm pack' 69 | *.tgz 70 | 71 | # Yarn Integrity file 72 | .yarn-integrity 73 | 74 | # dotenv environment variables file 75 | .env 76 | .env.test 77 | 78 | # parcel-bundler cache (https://parceljs.org/) 79 | .cache 80 | 81 | # Next.js build output 82 | .next 83 | 84 | # Nuxt.js build / generate output 85 | .nuxt 86 | dist 87 | 88 | # Gatsby files 89 | .cache/ 90 | # Comment in the public line in if your project uses Gatsby and *not* Next.js 91 | # https://nextjs.org/blog/next-9-1#public-directory-support 92 | # public 93 | 94 | # vuepress build output 95 | .vuepress/dist 96 | 97 | # Serverless directories 98 | .serverless/ 99 | 100 | # FuseBox cache 101 | .fusebox/ 102 | 103 | # DynamoDB Local files 104 | .dynamodb/ 105 | 106 | # TernJS port file 107 | .tern-port 108 | 109 | cache 110 | _README.md -------------------------------------------------------------------------------- /src/DockerUtils.ts: -------------------------------------------------------------------------------- 1 | import tty from "tty" 2 | import stream, { Stream } from "stream" 3 | 4 | export const collectLogs = (dockerStream: any) => { 5 | let currentChunk = Buffer.from(''); 6 | let stdOutAndStdErr: Buffer = Buffer.from(''); 7 | const attachStream = new stream.Writable({ 8 | write: function (chunk: Buffer, encoding, next) { 9 | //header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} 10 | currentChunk = Buffer.concat([currentChunk, chunk]); 11 | //const isStdOut = currentChunk.readInt8() === 0x01; 12 | //const isStdErr = currentChunk.readInt8() === 0x02; 13 | const payloadSize: number = currentChunk.readUInt32BE(4); 14 | 15 | while (currentChunk.byteLength >= 8 + payloadSize) { 16 | stdOutAndStdErr = Buffer.concat([stdOutAndStdErr, currentChunk.slice(8, 8 + payloadSize)]); 17 | currentChunk = currentChunk.slice(8 + payloadSize); 18 | } 19 | next(); 20 | }, 21 | }); 22 | 23 | return new Promise((resolve, reject) => { 24 | dockerStream.on('end', () => { 25 | resolve(currentChunk) 26 | }) 27 | }) 28 | } 29 | 30 | export class WritableMemoryStream extends stream.Writable { 31 | buffer: Buffer; 32 | data : any[] = [] 33 | constructor(){ 34 | super() 35 | this.buffer = Buffer.from('') 36 | this.data = [] 37 | this.once('finish', () => { 38 | this.buffer = this.data.length === 1 ? this.data.pop() : Buffer.concat(this.data) 39 | }) 40 | } 41 | _write (chunk : any, enc : string, cb : Function) { 42 | this.data.push(chunk) 43 | cb() 44 | } 45 | } 46 | 47 | export const attachStdOut = (stdout: any, dockerStream: any, modem: any, onResize: any) => { 48 | if (stdout instanceof Array) { 49 | dockerStream.on('end', function () { 50 | try { 51 | stdout[0].end(); 52 | } catch (e) { } 53 | try { 54 | stdout[1].end(); 55 | } catch (e) { } 56 | }); 57 | modem.demuxStream(dockerStream, stdout[0], stdout[1]); 58 | } else { 59 | dockerStream.setEncoding('utf8'); 60 | dockerStream.pipe(stdout, { 61 | end: true 62 | }); 63 | } 64 | 65 | stdout.on('resize', onResize); 66 | 67 | } 68 | 69 | export const detachStdout = (stdout: any, onResize: any) => { 70 | stdout.removeListener('resize', onResize); 71 | } 72 | 73 | export const attachStdin = (stdin: tty.ReadStream, dockerStream: any) => { 74 | stdin.setEncoding('utf8'); 75 | stdin.setRawMode(true); 76 | stdin.pipe(dockerStream); // stdin -> flow mode 77 | } 78 | 79 | export const detachStdin = (stdin: tty.ReadStream, wasRaw: boolean) => { 80 | stdin.removeAllListeners(); 81 | stdin.setRawMode(wasRaw); 82 | // The stdin stream is paused by default 83 | stdin.pause(); 84 | // @ts-ignore 85 | // https://stackoverflow.com/questions/26004519/why-doesnt-my-node-js-process-terminate-once-all-listeners-have-been-removed/26004758 86 | // console.log('handles', process._getActiveHandles()) 87 | stdin.unref() 88 | } -------------------------------------------------------------------------------- /src/ProcessManager.ts: -------------------------------------------------------------------------------- 1 | import { ChildProcess, spawn } from "child_process"; 2 | import { ManagedProcess } from "./types"; 3 | 4 | export interface SpawnOptions { 5 | stdio?: 'pipe' | 'inherit', 6 | timeout?: number 7 | } 8 | 9 | export class ProcessManager { 10 | private _processes: Array 11 | constructor() { 12 | this._processes = [] 13 | } 14 | add(process: ChildProcess, clientId: string) { 15 | this._processes.push({ 16 | processId: `${process.pid}`, 17 | process, 18 | clientId 19 | }) 20 | } 21 | /** 22 | * wait for process to finish or kill after timeout 23 | * @param proc 24 | * @param timeout 25 | */ 26 | onExit(proc: ChildProcess, timeout? : number) { 27 | return new Promise((resolve, reject) => { 28 | const cleanup = () => { 29 | proc.off('exit', onExit) 30 | proc.off('close', onExit) 31 | if (timeoutHandler) { 32 | clearTimeout(timeoutHandler) 33 | } 34 | } 35 | const onExit = (code: number) => { 36 | cleanup() 37 | resolve(code) 38 | } 39 | 40 | let timeoutHandler : any = undefined 41 | proc.on('exit', onExit) 42 | proc.on('close', onExit) 43 | if (timeout !== undefined) { 44 | timeoutHandler = setTimeout(() => { 45 | console.log('timeout reached') 46 | cleanup() 47 | this.kill(proc.pid) 48 | reject(new Error('Command timeout reached: '+timeout)) 49 | }, timeout) 50 | } 51 | }) 52 | } 53 | spawn(clientId: string, command: string, args: ReadonlyArray = [], { 54 | stdio = 'pipe' 55 | } : SpawnOptions = {}) { 56 | const _process = spawn(command, args, { 57 | // we "simulate" inherit to be able to intercept stdout 58 | // https://github.com/sindresorhus/execa/issues/121 59 | // https://github.com/nodejs/node/issues/8033 60 | stdio: stdio === 'inherit' ? ['inherit', 'pipe', 'pipe'] : [stdio, stdio, stdio], 61 | detached: false, 62 | shell: false, 63 | }) 64 | if (stdio === 'inherit') { 65 | const { stdout, stderr} = _process 66 | // please node that this is not a full replacement for 'inherit' 67 | // the child process can and will detect if stdout is a pty and change output based on it 68 | // the terminal context is lost & ansi information (coloring) etc will be lost 69 | // https://stackoverflow.com/questions/1401002/how-to-trick-an-application-into-thinking-its-stdout-is-a-terminal-not-a-pipe 70 | if (stdout && stderr) { 71 | stdout.pipe(process.stdout) 72 | stderr.pipe(process.stderr) 73 | } 74 | } 75 | this.add(_process, clientId) 76 | return _process 77 | } 78 | async exec(clientId: string, command: string, args: ReadonlyArray = [], options? : SpawnOptions) : Promise { 79 | return this.spawn(clientId, command, args, options) 80 | } 81 | kill(processId: number | string) { 82 | if (typeof processId !== 'string') { 83 | processId = `${processId}` 84 | } 85 | const managedProcess = this._processes.find(p => p.processId === processId); 86 | if (!managedProcess) { 87 | throw new Error(`Process could not be killed - not found: ${processId}`); 88 | } 89 | const killResult = managedProcess.process.kill("SIGKILL") 90 | // TODO log kill result 91 | this._processes = this._processes.filter(p => p.processId !== processId); 92 | } 93 | } -------------------------------------------------------------------------------- /src/client_plugins/geth.ts: -------------------------------------------------------------------------------- 1 | const pubKeyBuildServer = `-----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | xsFNBFggyxoBEAC299KoAS43p0FyJetAc7E0m1B/wnpyQesFycop/1csNQCjSGMy 4 | EvERt8Mv5VvbyZ696gTnzyLP/YHvx5+j/lKZhixw+7VkOng6JgPF3YgN3WrykIjK 5 | Yoigrpyzf+lnrD7BBrV6BM9pD9YJpLwYJQ1/Kz5d6hTYoetTGU5n4YPo/O/okVyZ 6 | WrCRTHD3iPpVxJK2qSepTPBTDjPxZSK0D/NvCPaM6B+A7oq1/5BmyUspfoMOUJhZ 7 | lK/TxHWjyHGl5kTrMSHIoGOFcaDK8gAWd3AhlU7xZwm8SSKnqII78h03nXLkQoqh 8 | tW45+w8VrXRAdkmzMJ3HKOjSFAfBso+nq23b3RzSIzbTYNoHb2uH1rRaCz4hcxZm 9 | IoM9owTB4XMpeICtL9iu4/C+hQifNI2pSLtMfZ2Z6hb2sKjKGBtoqW030CjWZuuw 10 | cna7Q2tckVqG1G6NyAPOsZN8YUym6sp8G3WCT5XOqFjRfSe/ExZ4HgL4f3Tmk3il 11 | L7jFrzXATcWF4IW/qqB0of/+O3Sn2oMAsSxlK8Tl05f3rJewgJWk/i3VTnk58fX5 12 | X34r3GskMmRdooXITKfebOH4xfoFYWnrYb1ojfcqFnDoViglZQjRITBrJaMVEOye 13 | TbtFlDkbuTCLwbXLh7Fl/ZTrIDz1MX9d8WhydMFXaxqdfW+t8VgmO3YS2wARAQAB 14 | zTBHbyBFdGhlcmV1bSBtYWNPUyBCdWlsZGVyIDxnZXRoLWNpQGV0aGVyZXVtLm9y 15 | Zz7CwXgEEwECACIFAlggyxoCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ 16 | EFWJFeF7niSBaJYP/2JEv+PUtDQoEC8YslqF60SQWvZayxsbR1JhKznjJuzlQJS8 17 | MecBMCOMNseNaIVTwyVNkgE2BF7yGbcoVKs6BpeQ4T3dEWPCiE8nsoeihM2n1WD8 18 | mW4rh4YbwhuM32iUbB15E1heINqpOXcECJKjDljAYLCj51szp/8B6vRaHpnKNHGd 19 | TeBPi7I0YUc2+XF/iCazgygEfJuSkaqF9b0a7co7zVcPdxp2ekIc40m3oZHLqqbR 20 | Pp05ip674X+y7hreRv6AAvSS78hiFHjHg7JcDNnSX2fWiAoO/svdvKOvUvNdSJE1 21 | UUyXbnjokZMmUATHIFqyZKXMynUmzh4RS9BpnXEC2eq63rzPpx3gcxpgcDB1/d++ 22 | 16+9/KvhbrzTK3LfYBmrnePxNuJ6oUIM4kALadKWFGlBGhnCseaT30ORjCHl6lHN 23 | LF3sr3iRWnGUjwuxTLShzxqOeP1LT8ai5FYn9Z/EXCFn9t1y+eRcM4X8fplPdvHe 24 | sesCx8ZcAjuSgEaK+QK03vdB3B2r8NPF72NhcPEcLLu47Cx4YhtlbUKlZuZRyzAf 25 | gHmPHBDqrBCeY6ZxB3z9Mx+WVQQUdxxcX7Qj6Q53JZaz6V4xb1PSNBBNOq7nYzLi 26 | 8sCMO0gSyNykrkKvpA/LTS6HxNvFCOdma8WNYTtetC14iCY0afi/eUorziPOwsFi 27 | BBMBCgAMBQJYZTzSBYMHhh+AAAoJECY6f9F94bH96fQP/R0iU0ID4wBaQ9iYdBu8 28 | buf25mc7+1cgf2aXWQ1teem82XMuB3MBYQhoyIMEfyZRChCSxrnwYpVj8m1bTRjt 29 | WfDC2ntk9xGbZnTnmrBEob/MBXLPiEAlmOF0nIoPbulq/rBaBLdSrg8W7PlMJonA 30 | z+jWnkA15mxvP7Sfqljqy72r6TxeRa7It4O39FYdaXfGt9DOlb5kJK0/ADFAgWtu 31 | A+VG0zdFdpyry/4QhOcR9NmoGQkB3yFNEefKkS0IjrWo9maKp8IleCVc9n4h6kCE 32 | 4zviA/e0IhAH8O9n7kB7hGINMzRtEC0PGknmAeYOavvML18KY0MzdHRZXRxXf8cG 33 | 96U6E7z63VhS3gscpswNBW7+tC9Gr29o0H3lnOkfNxfa4tY1x3Nu5guOE265Cemb 34 | 3F95QSH/s/7Eyw4xpY7SghRB7c2lUMoYuWgHcvdxgC6PEYYmj+5XihRzYr0C+Bui 35 | RQW2Xd8eM8Mx3qD9nPJ4raC67jxWKuLfSVbp/uiXYbu+zIro54wMowvxfrJS995i 36 | wEWYBS/0BxaBl+bDmeHpdn+xM8kXXDPorwAeFHD4vi0xvtEP1mvsp4IConjVPtYQ 37 | Fg+aEvewjHa64EdJwepWYCJwhFVdLt6Z3AyuJMW4HMrO9hsoa5VYBP7TlxQBfLNx 38 | QKn6RdNtNPQN6VoTOezPMf/twsDcBBABCgAGBQJaIWwZAAoJEOEDogis3/oR6AYL 39 | /iAHue/26hkdBVtZ5SjCBSpEm7X82xbaiwXmxp9dzPWfbQHZd2GkZp5zX0kv5LLt 40 | +XTGXBemZbUv/l8YWB7a5mIwOd8Nvx8nGzP7ZQE0CEMwqcLPWgTdCIghVO3p1HUQ 41 | HTsAb3V1UTphpRpcaLa47K/gL1RhiZ+SIARDTX0ftPIm0Rd0t4UhuMII5uN4gfXb 42 | mgPHOgOuHYDmpERRWa03u3Z/b3RyarmZ40aqsjzRcyOnj4r1xbXuwLwUuaH3HyKX 43 | KsTpbEoj4iZsmXpSzA7XXhEsb7ATDezOzIrcbh2ZnpfAhGr1OZq80a6vbee06Hy6 44 | iWmCkVWpHG6dph/zYwfCTUAgTyfpz5Zy3uF5vhK3YM8DrbSAL7/5tVjReJuiyOyH 45 | JkG7i2Htsy6wTuy8rgsbSCRNvkLl3yPpoAZzwcdNiIvW9m+TsvSVzO2X62c8hQg9 46 | Hd6N3IWVZMRfUoOddVV5nG/rAJdUlLGrnaG8s6k+OMUNpOAQeJap5EFKH+1wL5Ak 47 | Kw== 48 | =noc8 49 | -----END PGP PUBLIC KEY BLOCK-----` 50 | 51 | 52 | let platform = process.platform === 'win32' ? 'windows' : process.platform 53 | 54 | const geth_config = { 55 | name: 'geth', 56 | displayName: 'Geth', 57 | repository: 'azure:gethstore', 58 | filter: { 59 | name: { 60 | excludes: ['unstable', 'alltools', 'swarm', 'mips', 'arm'] 61 | } 62 | }, 63 | binaryName: 'geth', 64 | prefix: `geth-${platform}`, 65 | publicKey: pubKeyBuildServer 66 | } 67 | 68 | export default geth_config -------------------------------------------------------------------------------- /src/bin/ethbinary.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import * as path from 'path' 4 | import { CLI, Shim } from 'clime' 5 | import { clientSpecifierToCommand } from './lib' 6 | import { MultiClientManager as ClientManager } from '../ClientManager' 7 | 8 | const version = require('../../package.json').version 9 | console.log(` 10 | 11 | ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ 12 | ⧫ ⧫ 13 | ⧫ Ethbinary ${version} ⧫ 14 | ⧫ ⧫ 15 | ⧫ Manage & Interact with Ethereum Binaries ⧫ 16 | ⧫ ⧫ 17 | ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ ⧫ 18 | 19 | `) 20 | 21 | 22 | class MyCLI extends CLI { 23 | async execute(argv: string[], 24 | contextExtension: object | string | undefined, 25 | cwd?: string | undefined, ) { 26 | 27 | const supportedClients = await ClientManager.getInstance().getAvailableClients() 28 | let idx = argv.findIndex(arg => supportedClients.some(sc => arg.startsWith(sc))) 29 | 30 | if (idx === -1) { 31 | // @ts-ignore 32 | return super.execute(argv, contextExtension, cwd) 33 | } 34 | 35 | // transform client specifier syntax 36 | let clientName = argv[idx] 37 | let clientArgs = [clientName] 38 | if (clientName.includes('@')) { 39 | let [_clientName, clientVersion] = clientName.split('@') 40 | clientArgs = [_clientName, '--clientVersion', clientVersion] 41 | } 42 | 43 | // the problem is that we want to pass through all client flags such as --goerli 44 | // without explicitly defining ass possible flags here 45 | // therefore we need to detect the begin of client flags and escape them 46 | let clientCommands = argv.slice(idx + 1) // +1 do not include client name 47 | // console.log('client options', clientCommands) 48 | 49 | // flags handled by this cli shoud be skipped 50 | const whitelistFlags = ['--clientVersion'] 51 | const firstFlagIdx = clientCommands.findIndex(c => !whitelistFlags.includes(c) && c.startsWith('-')) 52 | let clientFlags : string[] = [] 53 | if (firstFlagIdx !== -1) { 54 | clientCommands = clientCommands.slice(0, firstFlagIdx) 55 | clientFlags = clientCommands.slice(firstFlagIdx) 56 | } 57 | 58 | // turn all client specific flags into one space separated string 59 | // use escape sequence to avoid syntax error --flags --... (cannot be another flag) 60 | const ESCAPE = '/' 61 | const escapedFlags = ['--clientFlags', ESCAPE + clientFlags.join(' ')] 62 | 63 | // take everything before client name, append escaped client flags 64 | // [ 'start', 'geth', '--goerli' ] => [ 'start', 'geth', '--clientFlags', '/--goerli' ] 65 | // [ 'start', 'geth@latest', '--goerli' ] => [ 'start', 'geth', '--clientVersion', 'latest', '--clientFlags', '/--goerli' ] 66 | const transformedCommand = [...argv.slice(0, idx), ...clientArgs, ...clientCommands, ...escapedFlags] 67 | 68 | // NOTE: use for debugging 69 | // console.log('transformed command', transformedCommand) 70 | 71 | // @ts-ignore 72 | return super.execute(transformedCommand, contextExtension, cwd) 73 | 74 | } 75 | } 76 | 77 | // The second parameter is the path to folder that contains command modules. 78 | let cli = new MyCLI(`ethbinary`, path.join(__dirname, 'commands')) 79 | 80 | // Clime in its core provides an object-based command-line infrastructure. 81 | // To have it work as a common CLI, a shim needs to be applied: 82 | let shim = new Shim(cli) 83 | shim.execute(process.argv) 84 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import { IRelease } from "ethpkg"; 2 | import { ChildProcess } from "child_process"; 3 | import { CLIENT_STATE } from "./Client/BaseClient"; 4 | import { EventEmitter } from "events"; 5 | 6 | export { IRelease } 7 | 8 | export interface FilterConfig { 9 | name: { 10 | includes?: string | Array; 11 | excludes?: string | Array; 12 | } 13 | } 14 | 15 | export declare type FilterFunction = (release: IRelease) => boolean; 16 | 17 | export interface ReleaseFilterOptions { 18 | version?: string; 19 | platform?: string; 20 | packagesOnly?: boolean; 21 | } 22 | 23 | export interface DownloadOptions extends ReleaseFilterOptions { 24 | listener?: StateListener; 25 | cachePath?: string; 26 | useDocker?: boolean; 27 | isPackaged?: boolean 28 | } 29 | 30 | export type GetClientOptions = DownloadOptions 31 | 32 | export interface ProcessOptions { 33 | listener?: StateListener 34 | stdio?: 'inherit' | 'pipe' 35 | tty?: boolean 36 | timeout?: number 37 | } 38 | 39 | export interface JobDetails { 40 | jobId?: string 41 | } 42 | 43 | export type ClientStartOptions = DownloadOptions & ProcessOptions & JobDetails 44 | 45 | export interface CommandOptions extends ProcessOptions { 46 | useBash?: boolean // is the command a bash command 47 | useEntrypoint?: boolean // is the command input for entrypoint 48 | volume?: string // docker -v 49 | } 50 | 51 | export interface IClient extends EventEmitter { 52 | readonly id: string; 53 | info(): ClientInfo 54 | start(flags: string[], options: ClientStartOptions): Promise; 55 | stop(): Promise; 56 | execute(command: string, options?: CommandOptions): Promise> 57 | } 58 | 59 | export declare type LogFilter = (log: string) => boolean; 60 | 61 | export interface ClientDependencies { 62 | runtime?: any[] 63 | client?: any[] 64 | } 65 | 66 | export interface ClientBaseConfig { 67 | name: string; 68 | displayName: string; 69 | flags?: string[]; // default flags to start the client with 70 | ports? : string[] | { [index: string] : string }; // ports a client uses 71 | dependencies?: ClientDependencies 72 | } 73 | 74 | export interface DockerConfig extends ClientBaseConfig { 75 | dockerimage: string; 76 | entryPoint?: string | 'auto'; // let ethbinary detect the entrypoint 77 | service?: boolean; 78 | } 79 | 80 | export interface PackageConfig extends ClientBaseConfig { 81 | repository: string; 82 | prefix?: string; 83 | filter?: FilterFunction | FilterConfig; 84 | binaryName?: string; // the name of binary in package - e.g. 'geth'; auto-expanded to geth.exe if necessary 85 | publicKey?: string; 86 | isPackaged?: boolean; 87 | extract?: boolean; // if true all package contents are extracted 88 | } 89 | 90 | export function instanceofDockerConfig(object: any): object is DockerConfig { 91 | return typeof object === 'object' && ('dockerimage' in object) 92 | } 93 | 94 | export function instanceofPackageConfig(object: any): object is PackageConfig { 95 | return typeof object === 'object' && ('repository' in object) 96 | } 97 | 98 | export type ClientConfig = PackageConfig | DockerConfig 99 | 100 | export function instanceofClientConfig(object: any): object is ClientConfig { 101 | return typeof object === 'object' && ('name' in object && ('repository' in object || 'dockerimage' in object)) 102 | } 103 | 104 | export declare type StateListener = (newState: string, args?: any) => void; 105 | 106 | 107 | export interface ClientInfo { 108 | id: string // generated internal uuid 109 | type: 'docker' | 'binary' 110 | state: CLIENT_STATE 111 | started: number // timestamp 112 | stopped: number // timestamp 113 | ipc?: string // ipc path (named pipe / socket) if it can be detected 114 | rpcUrl?: string // url to rpc server 115 | binaryPath?: string // can be undefined for docker clients 116 | processId: string // container id for docker clients 117 | logs: string[] 118 | } 119 | 120 | export function instanceofClientInfo(object: any): object is ClientInfo { 121 | return typeof object === 'object' && ('processId' in object && 'id' in object) 122 | } 123 | 124 | 125 | export interface ManagedProcess { 126 | processId: string; 127 | process: ChildProcess; 128 | clientId: string; 129 | } 130 | 131 | -------------------------------------------------------------------------------- /examples/ethers/yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | aes-js@3.0.0: 6 | version "3.0.0" 7 | resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" 8 | integrity sha1-4h3xCtbCBTKVvLuNq0Cwnb6ofk0= 9 | 10 | bn.js@^4.4.0: 11 | version "4.11.8" 12 | resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" 13 | integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA== 14 | 15 | brorand@^1.0.1: 16 | version "1.1.0" 17 | resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" 18 | integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= 19 | 20 | elliptic@6.5.2: 21 | version "6.5.2" 22 | resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.2.tgz#05c5678d7173c049d8ca433552224a495d0e3762" 23 | integrity sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw== 24 | dependencies: 25 | bn.js "^4.4.0" 26 | brorand "^1.0.1" 27 | hash.js "^1.0.0" 28 | hmac-drbg "^1.0.0" 29 | inherits "^2.0.1" 30 | minimalistic-assert "^1.0.0" 31 | minimalistic-crypto-utils "^1.0.0" 32 | 33 | ethers@^4.0.47: 34 | version "4.0.47" 35 | resolved "https://registry.yarnpkg.com/ethers/-/ethers-4.0.47.tgz#91b9cd80473b1136dd547095ff9171bd1fc68c85" 36 | integrity sha512-hssRYhngV4hiDNeZmVU/k5/E8xmLG8UpcNUzg6mb7lqhgpFPH/t7nuv20RjRrEf0gblzvi2XwR5Te+V3ZFc9pQ== 37 | dependencies: 38 | aes-js "3.0.0" 39 | bn.js "^4.4.0" 40 | elliptic "6.5.2" 41 | hash.js "1.1.3" 42 | js-sha3 "0.5.7" 43 | scrypt-js "2.0.4" 44 | setimmediate "1.0.4" 45 | uuid "2.0.1" 46 | xmlhttprequest "1.8.0" 47 | 48 | hash.js@1.1.3: 49 | version "1.1.3" 50 | resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.3.tgz#340dedbe6290187151c1ea1d777a3448935df846" 51 | integrity sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA== 52 | dependencies: 53 | inherits "^2.0.3" 54 | minimalistic-assert "^1.0.0" 55 | 56 | hash.js@^1.0.0, hash.js@^1.0.3: 57 | version "1.1.7" 58 | resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" 59 | integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== 60 | dependencies: 61 | inherits "^2.0.3" 62 | minimalistic-assert "^1.0.1" 63 | 64 | hmac-drbg@^1.0.0: 65 | version "1.0.1" 66 | resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" 67 | integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE= 68 | dependencies: 69 | hash.js "^1.0.3" 70 | minimalistic-assert "^1.0.0" 71 | minimalistic-crypto-utils "^1.0.1" 72 | 73 | inherits@^2.0.1, inherits@^2.0.3: 74 | version "2.0.4" 75 | resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" 76 | integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== 77 | 78 | js-sha3@0.5.7: 79 | version "0.5.7" 80 | resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" 81 | integrity sha1-DU/9gALVMzqrr0oj7tL2N0yfKOc= 82 | 83 | minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: 84 | version "1.0.1" 85 | resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" 86 | integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== 87 | 88 | minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: 89 | version "1.0.1" 90 | resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" 91 | integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= 92 | 93 | scrypt-js@2.0.4: 94 | version "2.0.4" 95 | resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-2.0.4.tgz#32f8c5149f0797672e551c07e230f834b6af5f16" 96 | integrity sha512-4KsaGcPnuhtCZQCxFxN3GVYIhKFPTdLd8PLC552XwbMndtD0cjRFAhDuuydXQ0h08ZfPgzqe6EKHozpuH74iDw== 97 | 98 | setimmediate@1.0.4: 99 | version "1.0.4" 100 | resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.4.tgz#20e81de622d4a02588ce0c8da8973cbcf1d3138f" 101 | integrity sha1-IOgd5iLUoCWIzgyNqJc8vPHTE48= 102 | 103 | uuid@2.0.1: 104 | version "2.0.1" 105 | resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.1.tgz#c2a30dedb3e535d72ccf82e343941a50ba8533ac" 106 | integrity sha1-wqMN7bPlNdcsz4LjQ5QaULqFM6w= 107 | 108 | xmlhttprequest@1.8.0: 109 | version "1.8.0" 110 | resolved "https://registry.yarnpkg.com/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz#67fe075c5c24fef39f9d65f5f7b7fe75171968fc" 111 | integrity sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw= 112 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | /* Basic Options */ 4 | "target": "es6", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017','ES2018' or 'ESNEXT'. */ 5 | "module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', or 'ESNext'. */ 6 | // "lib": [], /* Specify library files to be included in the compilation. */ 7 | // "allowJs": true, /* Allow javascript files to be compiled. */ 8 | // "checkJs": true, /* Report errors in .js files. */ 9 | // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ 10 | // "declaration": true, /* Generates corresponding '.d.ts' file. */ 11 | // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ 12 | // "sourceMap": true, /* Generates corresponding '.map' file. */ 13 | // "outFile": "./", /* Concatenate and emit output to single file. */ 14 | "outDir": "./dist", /* Redirect output structure to the directory. */ 15 | "rootDir": "./src", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ 16 | // "composite": true, /* Enable project compilation */ 17 | // "removeComments": true, /* Do not emit comments to output. */ 18 | // "noEmit": true, /* Do not emit outputs. */ 19 | // "importHelpers": true, /* Import emit helpers from 'tslib'. */ 20 | // "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */ 21 | // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */ 22 | 23 | /* Strict Type-Checking Options */ 24 | "strict": true, /* Enable all strict type-checking options. */ 25 | // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */ 26 | // "strictNullChecks": true, /* Enable strict null checks. */ 27 | // "strictFunctionTypes": true, /* Enable strict checking of function types. */ 28 | // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */ 29 | // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */ 30 | // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */ 31 | // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */ 32 | 33 | /* Additional Checks */ 34 | // "noUnusedLocals": true, /* Report errors on unused locals. */ 35 | // "noUnusedParameters": true, /* Report errors on unused parameters. */ 36 | // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ 37 | // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ 38 | 39 | /* Module Resolution Options */ 40 | // "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */ 41 | // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */ 42 | // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */ 43 | // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */ 44 | // "typeRoots": [], /* List of folders to include type definitions from. */ 45 | // "types": [], /* Type declaration files to be included in compilation. */ 46 | // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */ 47 | "esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */ 48 | // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */ 49 | 50 | /* Source Map Options */ 51 | // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */ 52 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ 53 | // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */ 54 | // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */ 55 | 56 | /* Experimental Options */ 57 | "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */ 58 | "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */ 59 | } 60 | } -------------------------------------------------------------------------------- /src/bin/lib/index.ts: -------------------------------------------------------------------------------- 1 | import { SingleClientManager } from "../../ClientManager"; 2 | import cliProgress from 'cli-progress' 3 | import boxen from 'boxen' 4 | import chalk from 'chalk' 5 | import { PROCESS_EVENTS } from "../../events"; 6 | const { Select } = require('enquirer') 7 | 8 | const printFormattedRelease = (release?: any) => { 9 | if(!release) { 10 | return console.log('No release info provided!') 11 | } 12 | if ('original' in release) { 13 | release = { ...release } 14 | release.original = '' 15 | } 16 | console.log(boxen(chalk.grey(JSON.stringify(release, undefined, 2)))) 17 | } 18 | 19 | export const clientSpecifierToCommand = (clientSpecifier?: string) => { 20 | if (!clientSpecifier) return [] 21 | if (clientSpecifier.includes('@')) { 22 | const [client, version] = clientSpecifier.split('@') 23 | return ['client', 'start', client, version] 24 | } 25 | return [clientSpecifier] 26 | } 27 | 28 | const createProgressListener = () => { 29 | let downloadProgressBar : any 30 | let extractProgressBar : any 31 | return (newState: string, args: any) => { 32 | // console.log('new state', newState) 33 | if (newState === PROCESS_EVENTS.RESOLVE_PACKAGE_STARTED) { 34 | console.log(chalk.green('Looking up latest release')) 35 | } 36 | if (newState === PROCESS_EVENTS.RESOLVE_PACKAGE_FINISHED) { 37 | console.log(chalk.green('Release resolved:')) 38 | printFormattedRelease(args.release) 39 | } 40 | if (newState === PROCESS_EVENTS.DOWNLOAD_STARTED) { 41 | console.log(chalk.green.bold('Download client')) 42 | downloadProgressBar = new cliProgress.SingleBar({}, cliProgress.Presets.shades_classic) 43 | downloadProgressBar.start(100, 0); 44 | } 45 | else if (newState === PROCESS_EVENTS.DOWNLOAD_PROGRESS) { 46 | if (!downloadProgressBar) return 47 | const { progress } = args 48 | downloadProgressBar.update(progress) 49 | } 50 | else if(newState === PROCESS_EVENTS.DOWNLOAD_FINISHED) { 51 | downloadProgressBar.stop() 52 | console.log('\n') 53 | } 54 | else if (newState === PROCESS_EVENTS.EXTRACT_PACKAGE_STARTED) { 55 | console.log(chalk.green.bold('Extract package contents')) 56 | extractProgressBar = new cliProgress.SingleBar({}, cliProgress.Presets.shades_classic) 57 | extractProgressBar.start(100, 0); 58 | } 59 | else if (newState === PROCESS_EVENTS.EXTRACT_PACKAGE_PROGRESS) { 60 | const { progress, file, destPath } = args 61 | extractProgressBar.update(progress) 62 | } 63 | else if (newState === PROCESS_EVENTS.EXTRACT_PACKAGE_FINISHED) { 64 | extractProgressBar.stop() 65 | console.log('\n') 66 | } 67 | else if (newState === PROCESS_EVENTS.RESOLVE_BINARY_FINISHED) { 68 | const { pkg } = args 69 | const { metadata } = pkg 70 | if (metadata.remote === false) { 71 | console.log(chalk.bold('Using cached binary from package at', pkg.filePath)) 72 | } 73 | } 74 | else if(newState === PROCESS_EVENTS.CLIENT_START_STARTED) { 75 | console.log(chalk.bold('Starting client now...\n')) 76 | } 77 | else { 78 | // console.log('new state', newState) 79 | } 80 | } 81 | } 82 | 83 | export const downloadClient = async (clientName = 'geth', clientVersion?: string) => { 84 | const cm = new SingleClientManager() 85 | if (!clientVersion) { 86 | let versions = await cm.getClientVersions(clientName) 87 | if (!versions || versions.length === 0) { 88 | throw new Error(`No releases found for client "${clientName}"`) 89 | } 90 | const prompt = new Select({ 91 | name: 'selectedVersion', 92 | message: 'Which version?', 93 | choices: versions.map((r, idx) => ({ 94 | name: r.version, 95 | message: `${r.version} (${r.updated_at})` 96 | })) 97 | }); 98 | const selectedVersion = await prompt.run() 99 | clientVersion = selectedVersion 100 | } 101 | 102 | const client = await cm.getClient(clientName, { 103 | version: clientVersion, 104 | listener: createProgressListener() 105 | }) 106 | 107 | // console.log('client binary path', client) 108 | } 109 | 110 | const combineListeners = (...listeners: Function[]) => { 111 | return (newState: string, args: any) => { 112 | listeners.forEach(listener => listener && listener(newState, args)) 113 | } 114 | } 115 | 116 | export const startClient = async (clientName = 'geth', version='latest', flags: string[] = [], options: any = {}) => { 117 | const cm = new SingleClientManager() 118 | const listener = combineListeners(createProgressListener(), options.listener) 119 | const client = await cm.getClient(clientName, { 120 | version, 121 | listener 122 | }) 123 | await client.start(flags, { 124 | listener, 125 | ...options 126 | }) 127 | } 128 | 129 | export const execClient = async (clientName = 'geth', clientVersion='latest', command? : string) => { 130 | if (!command) { 131 | throw new Error('Invalid command') 132 | } 133 | const listener = createProgressListener() 134 | const cm = new SingleClientManager() 135 | const client = await cm.getClient(clientName, { 136 | version: clientVersion, 137 | listener 138 | }) 139 | // result can be ignore because 'inherit' will log everything to stdout 140 | const result = await client.execute(command, { 141 | listener, 142 | stdio: 'inherit' 143 | }); 144 | return result 145 | } -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import path from 'path' 3 | import crypto from 'crypto' 4 | import stream from 'stream' 5 | import { FilterFunction, FilterConfig } from './types' 6 | const openpgp = require('openpgp') 7 | 8 | export const uuid = (filePath?: string) => { 9 | // create stable ids without leaking path 10 | if (typeof filePath === 'string') { 11 | return crypto.createHash('md5').update(filePath).digest("hex") 12 | } 13 | // replace special chars to make url friendly 14 | return crypto.randomBytes(3 * 4).toString('base64').replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/g, ''); 15 | } 16 | 17 | export const normalizePlatform = (platform: string) => { 18 | if (['mac'].includes(platform.toLowerCase())) { 19 | platform = 'darwin' 20 | } 21 | else if (['win32'].includes(platform.toLowerCase())) { 22 | platform = 'windows' 23 | } 24 | return platform 25 | } 26 | 27 | export const createFilterFunction = (filterConfig?: FilterConfig): FilterFunction => { 28 | if (!filterConfig) { 29 | return () => true 30 | } 31 | if (typeof filterConfig === 'function') { 32 | return filterConfig 33 | } 34 | if (!filterConfig || !('name' in filterConfig)) { 35 | return (() => true) 36 | } 37 | const { name } = filterConfig 38 | if (typeof name.includes === 'string') { 39 | name.includes = [ name.includes ] 40 | } 41 | if (typeof name.excludes === 'string') { 42 | name.excludes = [ name.excludes ] 43 | } 44 | const includes : Array = name.includes || [] 45 | const excludes: Array = name.excludes || [] 46 | return ({ fileName, version }: any) => { 47 | if (!fileName) { 48 | return false 49 | } 50 | fileName = fileName.toLowerCase() 51 | const shouldFilter = ( 52 | (!includes || includes.every(val => fileName.indexOf(val) >= 0)) && 53 | (!excludes || excludes.every(val => fileName.indexOf(val) === -1)) 54 | ) 55 | // console.log(fileName, shouldFilter, excludes) 56 | return shouldFilter 57 | } 58 | } 59 | 60 | export const validateConfig = (clientConfig: any) => { 61 | if (!clientConfig) { return false } 62 | if (typeof clientConfig.name !== 'string') { return false } 63 | if (!clientConfig.repository && !clientConfig.dockerimage) { 64 | return false 65 | } 66 | return true 67 | } 68 | 69 | export const bufferToStream = (buf: Buffer) => { 70 | const readable = new stream.Readable() 71 | readable._read = () => { } // _read is required but you can noop it 72 | readable.push(buf) 73 | readable.push(null) 74 | return readable 75 | } 76 | 77 | export const verifyPGP = async (filePath: string, publicKeyArmored: string, detachedSignature: string) => { 78 | const readableStream = fs.createReadStream(filePath) 79 | const options = { 80 | message: openpgp.message.fromBinary(readableStream), // CleartextMessage or Message object 81 | signature: await openpgp.signature.readArmored(detachedSignature), // parse detached signature 82 | publicKeys: (await openpgp.key.readArmored(publicKeyArmored)).keys // for verification 83 | } 84 | const result = await openpgp.verify(options) 85 | await openpgp.stream.readToEnd(result.data); 86 | const validity = await result.signatures[0].verified; 87 | if (validity) { 88 | return { 89 | isValid: true, 90 | signedBy: result.signatures[0].keyid.toHex() 91 | } 92 | } else { 93 | throw new Error('signature could not be verified'); 94 | } 95 | } 96 | 97 | export const verifyBinary = async (binaryPath: string, armoredPublicKeys: string, detachedSig: string) => { 98 | const result = await verifyPGP(binaryPath, armoredPublicKeys, detachedSig) 99 | return result 100 | } 101 | 102 | const getJavaVersion = (javaBinPath: string) => { 103 | 104 | } 105 | 106 | export const resolveRuntimeDependency = (runtimeDependency : any = {}) => { 107 | const { name, version, type } = runtimeDependency 108 | if (name === 'Java') { 109 | if ('JAVA_HOME' in process.env) { 110 | const JAVA_HOME = process.env['JAVA_HOME'] 111 | if (!JAVA_HOME) { 112 | return undefined 113 | } 114 | const JAVA_BIN = path.join( 115 | JAVA_HOME, 116 | 'bin', 117 | process.platform === 'win32' ? 'java.exe' : 'java' 118 | ) 119 | return fs.existsSync(JAVA_BIN) ? JAVA_BIN : undefined 120 | } else { 121 | // MAC: 122 | if (process.platform === 'darwin') { 123 | if (fs.existsSync('/Library/Java/JavaVirtualMachines/')) { 124 | const vms = fs.readdirSync('/Library/Java/JavaVirtualMachines/') 125 | // /Contents/Home/bin/java 126 | // console.log('found vms', vms) 127 | } 128 | // alternative tests 129 | // /usr/bin/java 130 | // /usr/libexec/java_home -V 131 | // execute 'which java' 132 | const javaPath = '/usr/bin/java' 133 | return fs.existsSync(javaPath) ? javaPath : undefined 134 | } 135 | // console.log(process.env.PATH.includes('java')) 136 | } 137 | return undefined 138 | } 139 | return undefined 140 | } 141 | 142 | export const extractPlatformFromString = (str : string) : 'windows' | 'darwin' | 'linux' | undefined => { 143 | str = str.toLowerCase() 144 | if (str.includes('win32') || str.includes('windows')) { 145 | return 'windows' 146 | } 147 | if (str.includes('darwin') || str.includes('mac') || str.includes('macos')) { 148 | return 'darwin' 149 | } 150 | if (str.includes('linux')) { 151 | return 'linux' 152 | } 153 | return undefined 154 | } 155 | 156 | // should match .sha256 => match length 7 157 | export const getFileExtension = (str: string) : string | undefined => { 158 | return str.match(/\.[0-9a-z]{1,7}$/i)?.shift() 159 | } 160 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Ethereum Binaries 2 | 3 | Fast, easy and secure Ethereum binary management. 4 | 5 | - [X] 🎁 **Package Extraction** 6 | - [x] 🔐 **Binary Verification** 7 | - [x] ♨️ **Runtime Detection** 🐍 8 | - [X] 🐳 **Docker Support** 9 | - [X] ⏰ **Lifecycle Events [ IPC_READY | SYNCED | STOPPED ... ]** 10 | - [x] ☁️ **Auto Update** 11 | - [x] ⚡ **Caching** 12 | - [x] 🐙 **Version Management** 13 | - [x] 🌈 **Multi Client Support** 14 | 15 | # Supported Clients & Binaries 16 | 17 | # Intro 18 | 19 | Binaries are an integral part of the Ethereum ecosystem. There are many amazing tools (Clef, ZoKrates, Puppeth, ...) that go even beyond the different client implementations (Geth, Prysm, Besu, Nethermind, ...). 20 | However, managing them can be a very complex task. There are no standards for how binaries are distributed and you might find Docker images, binaries hosted on (GitHub, Azure, Bintray, AWS), or even have to build them from source yourself by installing the respective toolchains first and learning about language specific details. 21 | Moreover, important steps such as binary verification with e.g. GPG are often skipped because it is too complex or inconvenient. 22 | Interacting with these binaries, e.g. from a script file when they are running inside a container creates a whole new set of challenges. 23 | The goal of this library is to create a unified interface to download, configure and interact with Ethereum binaries so that it's more about the `what` and less about `how`. 24 | 25 | # Installation 26 | ```shell 27 | npm install -g ethbinary 28 | ``` 29 | 30 | # Quickstart 🚀 31 | 32 | #### Example: Start Geth 33 | ```shell 34 | ethbinary geth@latest --goerli 35 | ``` 36 | 37 | Will download the latest version of geth and start geth with a connection to the goerli testnet: 38 | 39 | ![Fast Start Gif](r./../img/fast_start.gif?raw=true "Title") 40 | 41 | 42 | # Use in CLI 43 | 44 | ### Overview 45 | 46 | ```shell 47 | USAGE 48 | 49 | ethbinary 50 | 51 | SUBCOMMANDS 52 | 53 | download - Downloads a client 54 | exec - Executes a command on a client 55 | list - Lists the supported clients 56 | start - Starts a client 57 | version - Prints the ethbinary version 58 | ``` 59 | 60 | Commands will auto-detect the operating system and download binaries for the correct platform. 61 | All client commands support a shorthand `@`. 62 | ethbinary supports two special version string: `latest` & `cache`. 63 | 64 | 65 | ### Examples 66 | 67 | ```shell 68 | ethbinary list //example: returns [ 'besu', 'ewasm', 'geth', 'prysm' ] 69 | 70 | ethbinary download geth // will display version selector 71 | ethbinary download geth@1.9.10 // short-hand specifier 72 | ethbinary download geth --clientVersion 1.9.10 // equivalent to above syntax 73 | 74 | ethbinary exec geth@latest "version" // the command MUST be one string for the parser to work 75 | ethbinary exec geth@latest "account new" // is auto-attached to terminal so that stdin for password works 76 | ethbinary exec geth --clientVersion latest "account new" 77 | 78 | ethbinary start geth // will start latest geth version with mainnet connection (geth default) 79 | ethbinary start geth --goerli 80 | ethbinary start geth@1.9.10 --goerli 81 | ``` 82 | 83 | # Use as Module 84 | 85 | ### Example Start Client Service 86 | ```javascript 87 | const { default: cm } = require('ethbinary') // get the client manager instance 88 | const client = await cm.getClient('geth') 89 | await cm.startClient(client, ['--goerli']) 90 | await cm.stopClient(client) 91 | ``` 92 | 93 | ### More Examples 94 | 95 | #### [Create Account](examples/create_account.js) 96 | 97 | ### API 98 | 99 | #### ClientInfo 100 | 101 | ```typescript 102 | interface ClientInfo { 103 | id: string // generated internal uuid 104 | type: 'docker' | 'binary' 105 | state: 'INIT' | 'STARTED' | 'STOPPED' | 'ERROR' 106 | started: number // timestamp 107 | stopped: number // timestamp 108 | binaryPath: string // path to extracted binary, runtime or docker container name 109 | processId: string // process id for started binaries or container id for docker clients 110 | } 111 | ``` 112 | 113 | #### ClientManager 114 | 115 | ##### `public async getClientVersions(clientName: string) : Promise` 116 | 117 | Returns the release list for a client. 118 | 119 | ##### `public async getClient(clientSpec: string | ClientConfig, options?: DownloadOptions) : Promise` 120 | 121 | Uses a cached client, or downloads a new / updated one / pulls docker image and returns `ClientInfo`. 122 | If version is `latest` and a newer version than the one on the system exists, it will download the newer version automatically. 123 | If binary can be extracted from a package it will be extracted and written to `options.cachePath`. 124 | If the client uses a runtime such as Java it will extract all package contents and `binaryPath` will point to the Java runtime. 125 | If the client is distributed as a docker image `binaryPath` will be set to the name of the existing or generated Docker container. 126 | 127 | ##### `public async startClient(clientId: string | ClientInfo, version: string, flags?: string[], options?: DownloadOptions) : Promise` 128 | 129 | Uses `getClient` internally but also starts a new child process or container for the client binary. 130 | 131 | ##### `public async stopClient(clientId: string | ClientInfo,) : Promise` 132 | 133 | Stops the process(es) and container(s) associated with a client. 134 | Throws if no process is found. 135 | 136 | # Use with Docker 137 | 138 | Docker is great for isolated environments: each container instance has their own file system, networking, and isolated process tree separate from the host. 139 | This library comes with multiple tools to make interacting with docker easier. 140 | 141 | 142 | ### Clients 143 | 144 | `ethbinary` supports the execution of dockerized clients i.e. binaries that are distributed as Docker images. 145 | If a dockerized client is started, the `processId` of the `ClientInfo` object returned will be the respective container id. 146 | 147 | ### Wrapping Binaries 148 | 149 | # Binary Verification 150 | 151 | # Extension 152 | 153 | Client support is handled through a `ClientConfiguration`. 154 | 155 | There are two types of clients: Dockerized and PackagedBinary and each has their own configuration. 156 | 157 | ### Base Config 158 | 159 | ```typescript 160 | export interface ClientBaseConfig { 161 | name: string; 162 | displayName: string; 163 | flags?: string[] 164 | } 165 | ``` 166 | 167 | ### Packaged Binaries 168 | 169 | A `ClientConfiguration` for a `PackagedBinary` client has the form: 170 | 171 | ```typescript 172 | export interface PackageConfig extends ClientBaseConfig { 173 | repository: string; 174 | prefix?: undefined; 175 | filter?: FilterFunction; 176 | binaryName?: string; 177 | publicKey?: string; 178 | } 179 | ``` 180 | 181 | #### repository : ` | url` 182 | 183 | `` specifies the binary hoster with `ethpkg` syntax like `azure:gethstore`, `bintray:hyperledger-org/besu-repo/besu`, `github:ethereum/client` 184 | 185 | `url` a fully qualified url to the project / repository 186 | 187 | #### prefix : `string` 188 | 189 | The `prefix` is a server-side executed filter. Usually implemented as string matching on the file's key or path. 190 | AWS S3: https://docs.aws.amazon.com/AmazonS3/latest/dev/ListingKeysHierarchy.html 191 | Azure Blob storage: https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs#uri-parameters 192 | 193 | #### filter : `predicate` 194 | 195 | Contrary to `prefix`, `filter` specifies a predicate function that is executed client-side. Note that data, which is filtered out client-side, is unnecessarily transferred. 196 | If this can be avoided by using a `prefix` it should be implemented. 197 | 198 | #### binaryName : `string` 199 | 200 | The name or relative path of the binary within the package - e.g. 'geth'. 201 | The name is auto-expanded to geth.exe if necessary. 202 | 203 | ### Dockerized Clients 204 | 205 | Are binaries that are distributed as Docker images. 206 | 207 | The configuration for a Dockerized client has the following properties: 208 | 209 | ```typescript 210 | export interface DockerConfig extends ClientBaseConfig { 211 | dockerimage: string; 212 | entryPoint?: string; 213 | service: boolean; 214 | } 215 | ``` 216 | 217 | #### dockerimage : `path | url` 218 | `path` a new image will be created based on the locally available `Dockerfile` 219 | 220 | `url` the image is pulled from the registry 221 | 222 | #### entryPoint : `path | 'auto'` 223 | 224 | `path` similar to `binaryName`, the `entryPoint` helps to locate the binary inside of the docker container. 225 | 226 | `'auto'` ethbinary will try to automatically detect the container's entryPoint based on container metadata. 227 | 228 | #### service : `true | false` 229 | 230 | `true` the binary specified by `entryPoint` is executed to start the service. 231 | 232 | `false` the container is started and waits for the implementing client to issue comands via `execute` on the `entryPoint` 233 | 234 | -------------------------------------------------------------------------------- /src/Client/DockerizedClient.ts: -------------------------------------------------------------------------------- 1 | import path from 'path' 2 | import { BaseClient, CLIENT_STATE } from "./BaseClient" 3 | import { Container } from "dockerode" 4 | import DockerManager from "../DockerManager" 5 | import { DockerConfig, ClientInfo, ClientStartOptions, CommandOptions, GetClientOptions } from "../types" 6 | import stream, { Stream } from "stream" 7 | import { PROCESS_EVENTS } from "../events" 8 | import logger from '../Logger' 9 | 10 | const collectLogs = (stream: Stream, { 11 | clean = true 12 | } = {}): Promise => new Promise((resolve, reject) => { 13 | const _data: string[] = [] 14 | stream.on('data', (data: any) => { 15 | // remove all tty non-ascii control chars etc 16 | if (clean) { 17 | // console.log('>>', JSON.stringify(data.toString())) 18 | let printable = data.toString().replace(/[^ -~\r|\n]+/g, "") 19 | _data.push(printable) 20 | } 21 | if (data.toString().includes('runtime exec failed')) { 22 | reject(new Error('Command failed:\n' + data.toString())) 23 | } 24 | // console.log('data', data.toString()) 25 | }) 26 | let isResolved = false 27 | const _resolve = () => { 28 | let logs = _data.join().split(/\r|\n/).filter(l => !!l) 29 | if (!isResolved) { 30 | isResolved = true 31 | resolve(logs) 32 | } 33 | } 34 | stream.on('end', _resolve) 35 | // if a service is started we wait either till timeout or condition: ipc established 36 | /* 37 | if (!useBash) { 38 | setTimeout(_resolve, 3000) 39 | } 40 | */ 41 | }) 42 | 43 | export class DockerizedClient extends BaseClient { 44 | constructor( 45 | private _container: any, 46 | private _dockerManager: DockerManager, 47 | private _config: DockerConfig, 48 | private _imageName: string 49 | ) { super() } 50 | 51 | static async create(dockerManager: DockerManager, config: DockerConfig, { 52 | version, 53 | listener 54 | } : GetClientOptions) { 55 | // lazy init to avoid crashes for non docker functionality 56 | if (!dockerManager.isConnected()) { 57 | dockerManager.connect() 58 | } 59 | const imageName = await dockerManager.getOrCreateImage(config.name, config.dockerimage, { 60 | listener 61 | }) 62 | if (!imageName) { 63 | throw new Error('Docker image could not be found or created') 64 | } 65 | logger.log('image created', imageName) 66 | // only [a-zA-Z0-9][a-zA-Z0-9_.-] are allowed for container names 67 | // but image name can be urls like gcr.io/prysmaticlabs/prysm/validator 68 | // Date.now() allows to have multiple instances of one image 69 | // const containerName = `ethbinary_${config.name}_container_${Date.now()}` 70 | const containerName = `ethbinary_${config.name}_container` 71 | const container = await dockerManager.createContainer(imageName, containerName, { 72 | overwrite: true, // overwrite existing container 73 | dispose: false, // if containers are disposed they cannot be analyzed afterwards, therefore it is better to clean them on start 74 | overwriteEntrypoint: true, // use shell instead of potentially configured binary 75 | autoPort: false, // don't map to any port 76 | // FIXME don't use default ports 77 | ports: ['8545', '30303', '30303/udp'], 78 | volume: `${path.resolve(process.cwd())}:/shared_data` 79 | }) 80 | if (!container) { 81 | throw new Error('Docker container could not be created') 82 | } 83 | return new DockerizedClient(container, dockerManager, config, imageName) 84 | } 85 | 86 | info(): ClientInfo { 87 | return { 88 | id: this.id, 89 | type: 'docker', 90 | state: this._state, 91 | started: this._started, 92 | stopped: this._stopped, 93 | ipc: this._ipc, 94 | rpcUrl: this._rpcUrl, 95 | processId: this._container.id, 96 | binaryPath: this._config.entryPoint, 97 | logs: this._logs 98 | } 99 | } 100 | 101 | private async _getEntryPoint() : Promise{ 102 | const { _config: config, _container: container } = this 103 | if ('originalEntrypoint' in container) { 104 | // @ts-ignore 105 | return container.originalEntrypoint 106 | } 107 | let entryPoint = config.entryPoint 108 | if (entryPoint === 'auto') { 109 | entryPoint = await this._dockerManager.detectEntryPoint(container) 110 | console.log('entrypoint detected: ', entryPoint, 'based on container config') 111 | } 112 | return entryPoint 113 | } 114 | 115 | private async _isRunning() { 116 | const { _config: config, _container: container } = this 117 | const info = await container.inspect() 118 | return info.State.Running 119 | } 120 | 121 | async start(flags: string[] = [], options: ClientStartOptions = {}): Promise { 122 | await super.start(flags, options) 123 | const { _config: config, _container: container } = this 124 | 125 | const { listener = () => { } } = options 126 | listener(PROCESS_EVENTS.CLIENT_START_STARTED, { name: this._config.name, flags }) 127 | const result = await this._container.start() 128 | 129 | // if the client is a service, we should also start the binary inside the container 130 | if (config.service) { 131 | // add flags 132 | flags = config.flags || [] 133 | const entryPoint = await this._getEntryPoint() 134 | const cmd = [entryPoint, ...flags] 135 | const exec = await container.exec({ 136 | cmd, 137 | AttachStdin: true, 138 | AttachStdout: true, 139 | AttachStderr: true, 140 | Tty: true 141 | }) 142 | 143 | // send exec object to container and collect response from stream 144 | // stream can be multiplexed i.e. stderr and stdout are mixed over one transport (http) 145 | const stream = await exec.start() 146 | // TODO handle stdin 147 | if (options.stdio === 'inherit') { 148 | container.modem.demuxStream(stream, process.stdout, process.stderr); 149 | } 150 | } 151 | listener(PROCESS_EVENTS.CLIENT_START_FINISHED, { name: this._config.name, flags }) 152 | } 153 | 154 | async stop(): Promise { 155 | await super.stop() 156 | await this._container.stop() 157 | } 158 | 159 | async run(command: string, { 160 | stdio = 'pipe', 161 | timeout = undefined, 162 | tty = stdio === 'inherit', 163 | useBash = false, // default: await client.execute('ls -la') will NOT work 164 | useEntrypoint = true, // default: await client.execute('--version') WILL work 165 | volume = undefined 166 | }: CommandOptions = {}): Promise { 167 | const image = this._imageName 168 | const cmd = command.split(' ') 169 | // console.log('execute command', cmd) 170 | const logs = await this._dockerManager.run(image, cmd, { 171 | stdio, 172 | volume 173 | }) 174 | return logs 175 | } 176 | 177 | // https://github.com/apocas/dockerode/issues/520#issuecomment-520174673 178 | async execute(command: string = '', { 179 | stdio = 'pipe', 180 | timeout = undefined, 181 | tty = stdio === 'inherit', 182 | useBash = false, // default: await client.execute('ls -la') will NOT work 183 | useEntrypoint = true // default: await client.execute('--version') WILL work 184 | }: CommandOptions = {}): Promise { 185 | const { _config: config, _container: container } = this 186 | const entryPoint = await this._getEntryPoint() 187 | 188 | // start container if not running already 189 | const isRunning = await this._isRunning() 190 | if (!isRunning) { 191 | await container.start() 192 | this._state = CLIENT_STATE.STARTED 193 | } 194 | 195 | let cmdArray = typeof command === 'string' ? command.split(' ') : command 196 | cmdArray = cmdArray.filter(arg => !!arg) 197 | 198 | // const info = await container.inspect() 199 | 200 | // bash -c string: If the -c option is present, then commands are read from string. 201 | // If there are arguments after the string, they are assigned to the positional parameters, starting with $0. 202 | let cmd 203 | if (useBash) { 204 | cmd = ['/bin/sh', '-c', useEntrypoint ? `${entryPoint} ${command}` : command] 205 | } else { 206 | cmd = useEntrypoint ? [entryPoint, ...cmdArray] : cmdArray 207 | } 208 | 209 | // create exec payload object 210 | const exec = await container.exec({ 211 | cmd, 212 | // attach[Stream] means we want the container output 213 | AttachStdin: true, 214 | AttachStdout: true, 215 | AttachStderr: true, 216 | Tty: tty // docker sends multiplexed streams only if there no tty attached 217 | }) 218 | 219 | // TODO implement timeout 220 | // send exec object to container and collect response from stream 221 | // stream can be multiplexed i.e. stderr and stdout are mixed over one transport (http) 222 | // const stream = await exec.start({ hijack: true, stdin: true }) 223 | const stream = await exec.start() 224 | if (stdio === 'inherit') { 225 | if (tty) { 226 | stream.pipe(process.stdout) 227 | } else { 228 | container.modem.demuxStream(stream, process.stdout, process.stderr); 229 | } 230 | process.stdin.pipe(stream) 231 | } 232 | const logs = await collectLogs(stream) 233 | return logs 234 | } 235 | } -------------------------------------------------------------------------------- /src/Client/BinaryClient.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import path from 'path' 3 | import { ChildProcess, spawn } from "child_process" 4 | import { BaseClient, CLIENT_STATE } from "./BaseClient" 5 | import { PackageConfig, ClientInfo, ClientStartOptions, CommandOptions, GetClientOptions, FilterFunction } from "../types" 6 | import { ProcessManager } from "../ProcessManager" 7 | import { PROCESS_EVENTS } from "../events" 8 | import { PackageManager, IPackage, download } from "ethpkg" 9 | import { verifyBinary, resolveRuntimeDependency, getFileExtension } from '../utils' 10 | import logger from '../Logger' 11 | 12 | 13 | const extractBinary = async (pkg: IPackage, name: string, binaryName?: string, destPath: string = process.cwd()) => { 14 | const packagePath = pkg.filePath // only set if loaded from cache 15 | const entries = await pkg.getEntries() 16 | if (entries.length === 0) { 17 | throw new Error('Invalid or empty package') 18 | } 19 | let binaryEntry = undefined 20 | if (!binaryName) { 21 | logger.warn('No "binaryName" specified: trying to auto-detect executable within package') 22 | binaryName = name 23 | } 24 | 25 | const ext = getFileExtension(binaryName) 26 | if (process.platform === 'win32' && ext === undefined) { 27 | binaryName += '.exe' 28 | } 29 | 30 | // const isExecutable = mode => Boolean((mode & 0o0001) || (mode & 0o0010) || (mode & 0o0100)) 31 | binaryEntry = entries.find((e: any) => e.relativePath.endsWith(binaryName)) 32 | 33 | if (!binaryEntry) { 34 | throw new Error( 35 | 'Binary unpack failed: not found in package - try to specify binaryName in your plugin or check if package contains binary' 36 | ) 37 | } else { 38 | binaryName = binaryEntry.file.name 39 | logger.warn('Auto-detected binary:', binaryName) 40 | } 41 | 42 | const destAbs = path.join(destPath, `${binaryName}_${pkg.metadata?.version}`) 43 | if (fs.existsSync(destAbs)) { 44 | return destAbs 45 | // fs.unlinkSync(destAbs) 46 | } 47 | // IMPORTANT: if the binary already exists the mode cannot be set 48 | fs.writeFileSync( 49 | destAbs, 50 | await binaryEntry.file.readContent(), 51 | { 52 | mode: parseInt('754', 8) // strict mode prohibits octal numbers in some cases 53 | } 54 | ) 55 | return destAbs 56 | } 57 | 58 | const verifyBinaryPackage = async (pkg: IPackage, publicKey?: string) => { 59 | if (!pkg.metadata || !pkg.metadata.signature) { 60 | throw new Error('') 61 | } 62 | const detachedSignature = await download(pkg.metadata.signature) 63 | if (!pkg.filePath) { 64 | throw new Error('Package could not be located for verification') 65 | } 66 | if (!publicKey) { 67 | throw new Error('PackageConfig does not specify public key') 68 | } 69 | const verificationResult = await verifyBinary(pkg.filePath, publicKey, detachedSignature.toString()) 70 | return verificationResult 71 | } 72 | 73 | export class BinaryClient extends BaseClient { 74 | private _process?: ChildProcess 75 | 76 | constructor( 77 | private _binaryPath: string, 78 | private _processManager: ProcessManager, 79 | private _config: PackageConfig 80 | ) { 81 | super() 82 | } 83 | 84 | public static async create(packageManager: PackageManager, processManager: ProcessManager, config: PackageConfig, { 85 | version, 86 | platform, 87 | cachePath, 88 | isPackaged, // are the binaries packaged? 89 | listener = () => { } 90 | }: GetClientOptions) { 91 | 92 | if (!isPackaged) { 93 | throw new Error('Raw binaries should be handled by client manager') 94 | } 95 | 96 | listener(PROCESS_EVENTS.RESOLVE_BINARY_STARTED, {}) 97 | 98 | const pkg = await packageManager.getPackage(config.repository, { 99 | prefix: config.prefix, // server-side filter based on string prefix 100 | version: version === 'latest' ? undefined : version, // specific version or version range that should be returned 101 | platform, 102 | filter: config.filter, // string filter e.g. filter 'unstable' excludes geth-darwin-amd64-1.9.14-unstable-6f54ae24 103 | cache: cachePath, // avoids download if package is found in cache 104 | cacheOnly: version === 'cache', // get latest cached if version = 'cache 105 | destPath: cachePath, // where to write package + metadata 106 | listener, // listen to progress events 107 | extract: config.extract || false, // extracts all package contents (good for java / python runtime clients without single binary) 108 | verify: false // ethpkg verification 109 | }) 110 | if (!pkg) { 111 | throw new Error('Package not found') 112 | } 113 | listener(PROCESS_EVENTS.RESOLVE_BINARY_FINISHED, { pkg }) 114 | 115 | // verify package 116 | if (pkg.metadata && pkg.metadata.signature) { 117 | // TODO call listener 118 | try { 119 | const verificationResult = await verifyBinaryPackage(pkg, config.publicKey) 120 | console.log('Verification result', verificationResult) 121 | } catch (error) { 122 | console.log('Verification failed') 123 | } 124 | } 125 | 126 | if (config.dependencies) { 127 | if (config.dependencies.runtime) { 128 | const runtime = config.dependencies.runtime[0] 129 | const runtimeBinaryPath = resolveRuntimeDependency(runtime) 130 | if (!runtimeBinaryPath) { 131 | throw new Error('Could not find path for runtime: ' + runtime.name) 132 | } 133 | logger.log('Runtime resolved: ', runtimeBinaryPath) 134 | return new BinaryClient(runtimeBinaryPath, processManager, config) 135 | } 136 | else { 137 | throw new Error('Invalid dependency config') 138 | } 139 | } 140 | 141 | const binaryPath = await extractBinary(pkg, config.name, config.binaryName, cachePath) 142 | const client = new BinaryClient(binaryPath, processManager, config) 143 | return client 144 | } 145 | 146 | info(): ClientInfo { 147 | return { 148 | id: this.id, 149 | type: 'binary', 150 | state: this._state, 151 | started: this._started, 152 | stopped: this._stopped, 153 | ipc: this._ipc, 154 | rpcUrl: this._rpcUrl, 155 | processId: '' + (this._process ? this._process.pid : ''), 156 | binaryPath: this._binaryPath, 157 | logs: [...this._logs] 158 | } 159 | } 160 | private _parseLogs = (data: Buffer) => { 161 | let log = '' 162 | try { 163 | log = data.toString() 164 | } catch (error) { 165 | return 166 | } 167 | if (!log) { return } 168 | 169 | // split logs into lines and process + emit them line by line 170 | let lines = log.split(/\r|\n/) 171 | 172 | lines.forEach(line => { 173 | 174 | // ignore empty lines 175 | if (!line) { return } 176 | 177 | // search for IPC path in logs: 178 | if (line.endsWith('.ipc') || line.includes('IPC endpoint opened')) { 179 | // example geth: INFO [05-22|14:50:58.240] IPC endpoint opened url=/Users/user/Library/Ethereum/goerli/geth.ipc 180 | let ipcPath = line.split('=')[1].trim() 181 | // fix double escaping 182 | if (ipcPath.includes('\\\\')) { 183 | ipcPath = ipcPath.replace(/\\\\/g, '\\') 184 | } 185 | this.ipc = ipcPath 186 | } 187 | 188 | if (line.includes('HTTP endpoint opened')) { 189 | // example INFO [05-22|15:52:31.584] HTTP endpoint opened url=http://127.0.0.1:8545/ cors= vhosts=localhost 190 | const urlKeyVal = line.split(' ').find(l => l.startsWith('url')) 191 | if (urlKeyVal) { 192 | const [key, url] = urlKeyVal.split('=') 193 | if (url) { 194 | this.rpc = url.trim() 195 | } 196 | } 197 | } 198 | 199 | // will emit the log 200 | this.addLog(line) 201 | }) 202 | 203 | } 204 | async start(flags: string[] = [], options: ClientStartOptions = {}): Promise { 205 | await super.start(flags, options) 206 | options = { 207 | listener: () => { }, 208 | stdio: 'pipe', 209 | ...options 210 | } 211 | const { listener = () => { } } = options 212 | listener(PROCESS_EVENTS.CLIENT_START_STARTED, { name: this._config.name, flags }) 213 | this._started = Date.now() 214 | this._process = this._processManager.spawn(this._uuid, this._binaryPath, [...flags], { 215 | stdio: options.stdio, 216 | }) 217 | const { stdout, stderr, stdin } = this._process 218 | if (stdout && stderr) { 219 | stdout.on('data', this._parseLogs) 220 | stderr.on('data', this._parseLogs) 221 | } 222 | this._process.on('error', (error) => { 223 | // FIXME handle process errors 224 | }) 225 | listener(PROCESS_EVENTS.CLIENT_START_FINISHED, { name: this._config.name, flags }) 226 | } 227 | async stop(): Promise { 228 | await super.stop() 229 | if (!this._process) { 230 | return 231 | } 232 | const { stdout, stderr, stdin } = this._process 233 | if (stdout && stderr) { 234 | stdout.off('data', this._parseLogs) 235 | stderr.off('data', this._parseLogs) 236 | } 237 | this._processManager.kill('' + this._process.pid) 238 | this._process = undefined 239 | } 240 | async execute(command: string = '', options: CommandOptions = {}): Promise { 241 | if (this._process) { 242 | throw new Error('Binary already running') 243 | } 244 | const flags: string[] = command.split(' ') 245 | const stdio = options.stdio || 'pipe' 246 | 247 | 248 | // when stdio = 'inherit': this does not exist: process.stdout.on('data', onData) 249 | // and cp.stdout won't be available. therefore processManager simulates inherit 250 | // so that we can intercept / log the output 251 | const _process = await this._processManager.exec(this._uuid, this._binaryPath, [...flags], { 252 | stdio, 253 | }) 254 | 255 | // collect process logs for 30 seconds 256 | // if process did not exit => throws timeout exception 257 | // else return process output 258 | // this will only work when process spawned with stdio 'pipe' 259 | const timeout = options.timeout 260 | const commandLogs: Array = [] 261 | const { stdout, stderr, stdin } = _process 262 | 263 | // note: this is very similar to parseLogs but does not 264 | // emit or analyze the command output 265 | // we should maybe merge the two in the future 266 | const onData = (data: any) => { 267 | const log = data.toString() 268 | if (log) { 269 | let parts = log.split(/\r|\n/) 270 | parts = parts.filter((p: string) => p !== '') 271 | commandLogs.push(...parts) 272 | } 273 | } 274 | 275 | if (stdout && stderr) { 276 | stdout.on('data', onData) 277 | stderr.on('data', onData) 278 | } 279 | 280 | try { 281 | await this._processManager.onExit(_process, timeout) 282 | // update state to indicate that process successfully exited (no kill during cleanup) 283 | this._state = CLIENT_STATE.STOPPED 284 | } catch (error) { 285 | console.warn('Dumping output of cancelled command:') 286 | console.warn(commandLogs) 287 | throw error 288 | } 289 | return commandLogs 290 | } 291 | async input(_input: string) { 292 | if (!this._process) { 293 | throw new Error('Binary not running') 294 | } 295 | this._process.stdin?.write(`${_input}\n`) 296 | } 297 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | Build Status 3 | Downloads 4 | Version 5 | License 6 |
7 |

8 | 9 | # Ethereum Binaries 10 | 11 | ## 🔥 This project has been deprecated and will no longer be maintained. 🔥 12 | 13 |
14 | 15 | Fast, easy and secure Ethereum binary management. 16 | 17 | - [X] 🎁 **Package Extraction** 18 | - [x] 🔐 **Binary Verification** 19 | - [x] ♨️ **Runtime Detection** 🐍 20 | - [X] 🐳 **Docker Support** 21 | - [X] ⏰ **Lifecycle Events [ IPC_READY | SYNCED | STOPPED ... ]** 22 | - [x] ☁️ **Auto Update** 23 | - [x] ⚡ **Caching** 24 | - [x] 🐙 **Version Management** 25 | - [x] 🌈 **Multi Client Support** 26 | 27 | # Docs 28 | 29 | Documentation is available at [github.io/ethereum-binaries](https://ethereum.github.io/ethereum-binaries/#/) 30 | 31 | # Supported Clients & Binaries 32 | 33 | 34 | 35 | 36 | 41 | 46 | 51 | 52 | 57 | 58 | 59 |
37 | geth logo 38 |
39 | Geth 40 |
42 | prysm logo 43 |
44 | Prysm 45 |
47 | puppeth logo 48 |
49 | Puppeth 50 |
53 | zokrates logo 54 |
55 | ZoKrates 56 |
60 | 61 | **Supported clients can be referenced by their name and used directly. For all other binaries see [`Extension`](#extension)** 62 | 63 | # Intro 64 | 65 | Binaries are an integral part of the Ethereum ecosystem. There are many amazing tools (Clef, ZoKrates, Puppeth, ...) that go even beyond the different client implementations (Geth, Prysm, Besu, Nethermind, ...). 66 | However, managing them can be a very complex task. There are no standards for how binaries are distributed and you might find Docker images, binaries hosted on (GitHub, Azure, Bintray, AWS), or even have to build them from source yourself by installing the respective toolchains first and learning about language specific details. 67 | Moreover, important steps such as binary verification with e.g. GPG are often skipped because it is too complex or inconvenient. 68 | Interacting with these binaries, e.g. from a script file when they are running inside a container creates a whole new set of challenges. 69 | The goal of this library is to create a unified interface to download, configure and interact with Ethereum binaries so that it's more about the `what` and less about `how`. 70 | 71 | # Installation 72 | ```shell 73 | npm install -g ethbinary 74 | ``` 75 | 76 | # Quickstart 🚀 77 | 78 | ```shell 79 | ethbinary geth@latest --goerli 80 | ``` 81 | 82 | Will download the latest version of geth and start geth with a connection to the goerli testnet: 83 | 84 | ![Fast Start Gif](./img/fast_start.gif?raw=true) 85 | 86 | # Examples 87 | 88 | ## CLI 89 | ```shell 90 | ethbinary list //example: returns [ 'besu', 'ewasm', 'geth', 'prysm' ] 91 | 92 | ethbinary download geth // will display version selector 93 | ethbinary download geth@1.9.10 // short-hand specifier 94 | ethbinary download geth --clientVersion 1.9.10 // equivalent to above syntax 95 | 96 | ethbinary exec geth@latest "version" // the command MUST be one string for the parser to work 97 | ethbinary exec geth@latest "account new" // is auto-attached to terminal so that stdin for password works 98 | ethbinary exec geth --clientVersion latest "account new" // verbose syntax 99 | 100 | ethbinary start geth // will start latest geth version with mainnet connection (geth default) 101 | ethbinary start geth --goerli 102 | ethbinary start geth@1.9.10 --goerli 103 | ``` 104 | 105 | ## Module 106 | ### Minimal Start / Stop 107 | 108 | ```javascript 109 | const { getClient } = require('ethbinary') 110 | const geth = await getClient('geth') 111 | await geth.start('--goerli') 112 | await geth.stop() 113 | ``` 114 | 115 | ### ethers + ethbinary = ❤️ 116 | 117 | #### Ipc Provider 118 | 119 | ```javascript 120 | const { getClient, CLIENT_STATE } = require('ethbinary') 121 | const ethers = require('ethers') 122 | 123 | const geth = await getClient('geth') 124 | await geth.start(['--goerli']) 125 | await geth.whenState(CLIENT_STATE.IPC_READY) 126 | const provider = new ethers.providers.IpcProvider(geth.ipc) 127 | const network = await provider.getNetwork() // network { name: 'goerli', chainId: 5, ensAddress: '0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e' } 128 | // send tx, interact with or deploy contracts here... 129 | await geth.stop() 130 | ``` 131 | 132 | #### HTTP RPC Server 133 | 134 | ```javascript 135 | const geth = await getClient('geth') 136 | // note that --http is new syntax for deprecated --rpc 137 | await geth.start(['--dev', '--http']) 138 | await geth.whenState(CLIENT_STATE.HTTP_RPC_READY) 139 | const provider = new ethers.providers.JsonRpcProvider(geth.rpcUrl) 140 | const network = await provider.getNetwork() // network { chainId: 1337, name: 'unknown' } 141 | await geth.stop() 142 | ``` 143 | 144 | ### Multi Client API 145 | 146 | ```javascript 147 | const { default: cm } = require('ethbinary') // get the client manager instance 148 | const clientId = await cm.getClient('geth') 149 | await cm.startClient(clientId, 'latest', ['--goerli']) 150 | await cm.stopClient(clientId) 151 | ``` 152 | 153 | ### More Examples 154 | 155 | check out the other [examples](./examples) 156 | 157 | # Binary Types 158 | 159 | There are different types of binaries / programs that all require different implementation and interaction strategies. 160 | An attempt to classify them based on interactivity might look like this: 161 | 162 | ### Services 163 | 164 | Services or daemons are binaries that are started as background processes. They usually don't require *interaction*. 165 | 166 | `geth` for example can be started as a service. Interaction with the service is happening in this case only via the separate HTTP/IPC RPC API or not at all. 167 | 168 | **The interaction pattern is:** 169 | ```javascript 170 | service.start() 171 | service.whenState(/*rpc ready*/) 172 | // do something with API 173 | service.stop() // optional 174 | ``` 175 | 176 | ### Wizards / Assistants / REPL 177 | 178 | Wizards are programs that prompt the user interactively for input and perform operations in between those prompts or after they've received a full configuration processing all responses. 179 | read–eval–print loop (REPL) programs fall into this category because they constantly require user input and perform actions only after interaction. 180 | 181 | `puppeth` for example is an interactive wizard. 182 | 183 | **The interaction pattern is:** 184 | 185 | #### Full user-interaction 186 | ```javascript 187 | const puppeth = await getClient('puppeth') 188 | await puppeth.start({ 189 | stdio: 'inherit' // pass control to terminal: user interacts via stdin & stdout 190 | }) 191 | ``` 192 | 193 | #### Automation 194 | ```javascript 195 | const puppeth = await getClient('puppeth') 196 | await puppeth.start() 197 | await puppeth.whenState(log => log.includes('Please specify a network name ')) // parse logs to determine custom state 198 | await puppeth.input('my-network-name') // write response to stdin 199 | await puppeth.whenState(/*...*/) // wait again 200 | await puppeth.input(/*...*/) // respond again 201 | ``` 202 | 203 | ### Servers 204 | 205 | Programs that offer functionality via an API to users or other programs are called `servers` for simplicity. 206 | The calling program is called the `client` in the traditional client-server-model. ethbinary takes the `client` role when it is interacting with other programs and performing calls to their API. 207 | 208 | The `ZoKrates` compiler is an example for a program that receives a single command, processes it and returns a result. 209 | 210 | **The interaction pattern is:** 211 | ```javascript 212 | const zokrates = await getClient('zokrates') 213 | fs.writeFileSync(path.join(__dirname, 'test.zok'), ` 214 | def main(private field a, field b) -> (field): 215 | field result = if a * a == b then 1 else 0 fi 216 | return result 217 | `) 218 | await zokrates.execute(`compile -i ${SHARED_DATA}/test.zok`) 219 | await zokrates.execute(`setup`) 220 | await zokrates.execute('compute-witness -a 337 113569') 221 | await zokrates.execute('generate-proof') 222 | await zokrates.execute(`export-verifier`) 223 | await zokrates.execute(`cp ./verifier.sol ${SHARED_DATA}`, { useBash: true, useEntrypoint: false }) 224 | ``` 225 | Where a sequence of commands ins executed with `.execute` 226 | 227 | ### Hybrid 228 | 229 | Of course, some binaries can implement multiple behaviors and act as a service, execute commands and provide server functionality. 230 | 231 | `geth` is such an example: 232 | 233 | `geth account new` - issues a command which can also be interactive e.g. ask for password 234 | 235 | `geth` will start the service 236 | 237 | 238 | # Extension 239 | 240 | ethbinary was created with extension in mind. 241 | If your client is not (yet) supported, chances are good you can still make use of this module and benefit from all of its helpers: 242 | 243 | Some ad-hoc integrations will just magically work out of the box (more likely, if your project follows best practices). 244 | 245 | Some integrations require a little extra work. 246 | 247 | This is an example how a GitHub hosted binary can be added on the fly in case it's not available: 248 | 249 | ```typescript 250 | const cm = new SingleClientManager() 251 | const clientConfig = { 252 | name: 'prysm.validator', 253 | repository: 'https://github.com/prysmaticlabs/prysm', 254 | isPackaged: false, 255 | filter: ({ fileName }) => fileName.includes('validator') 256 | } 257 | const validator = await cm.getClient(clientConfig, { 258 | version: '1.0.0-alpha.6', 259 | }) 260 | ``` 261 | 262 | This is the same example, written in a rather verbose but detailed style (e.g. during development): 263 | 264 | ```typescript 265 | const cm = new SingleClientManager() 266 | 267 | // let's assume prysm is not supported.. 268 | // we add a new (minimal) config first 269 | // see docs or ./client-plugins for available configurations and properties 270 | cm.addClientConfig({ 271 | name: 'prysm.validator', 272 | repository: 'github:prysmaticlabs/prysm' // or 'https://github.com/prysmaticlabs/prysm' 273 | // dockerimage: 'gcr.io/prysmaticlabs/prysm/validator', // <= if it's a dockerized client 274 | }) 275 | 276 | // now, we can already use methods like getClient, getClientVersions etc.. 277 | // most of the time we are done here. but let's try a manual integration 278 | // prysm binaries are not packaged, but uploaded as raw binaries 279 | // we opt-out of the packaged binary flow with `packagesOnly: false` and take care of release assets ourselves 280 | // we will now get all release assets from the prysm github repository, ordered by latest version 281 | const versions = await cm.getClientVersions({ 282 | packagesOnly: false // prysm binaries are not packaged => return raw assets 283 | }) 284 | 285 | // prysm assets contain .sig, .sha256, .exe files among other things 286 | // if we want the latest binary we can just search e.g. for the first file with .exe or no extension 287 | // but let's say there is a bug in the .beta.8 so we search for .beta.6 288 | const latest = versions.find(release => { 289 | const ext = getFileExtension(release.fileName) 290 | const hasBinaryExtension = (ext === undefined || ext === '.exe') 291 | return hasBinaryExtension && release.fileName.includes('validator') && release.version === '1.0.0-alpha.6' 292 | }) 293 | 294 | // here, we could check our cache if the binary already exists... 295 | const clientPath = `path/to/${latest.fileName}` 296 | 297 | // to keep our dependency footprint small we can use the re-exported ethpkg module 298 | // which is the package manager used internally by ethbinary to manage (find, download, extract, verify...) assets 299 | const data = await ethpkg.download(latest.location, onProgress) 300 | fs.writeFileSync(clientPath, data, { 301 | mode: parseInt('754', 8) // make sure binary is executable 302 | }) 303 | 304 | // almost done: we create a client instance based on the binary 305 | const validator = await cm.getBinaryClient(clientPath) 306 | 307 | // that's it - we can now interact with a lifecycle managed binary :tada: 308 | const version = await validator.execute(`--version`) 309 | const result = await validator.execute(`accounts create -keystore-path "${__dirname}" --password="${password}"`) 310 | // ... 311 | 312 | ``` 313 | 314 | # Events 315 | 316 | ethbinary uses a an event listener mechanism to be notified about the different events during binary preparation. 317 | Most of the subroutines have 2-3 event(stage)s: `started`, `progress`, `finished` 318 | 319 | An event log for a binary download might look like this: 320 | ```javascript 321 | resolve_package_started // api request is made + cache is checked 322 | fetching_release_list_started // api response is processed: json / xml parsing 323 | fetching_release_list_finished // remote releases are merged with cached releases 324 | filter_release_list_started // invalid releases are removed, version + platform info is extracted, custom filter functions are applied 325 | sort_releases_started // releases are sorted by semver version & release date 326 | sort_releases_finished 327 | filter_release_list_finished 328 | resolve_package_finished // the latest release info is available 329 | download_started // the asset for the latest release are downloaded 330 | download_progress 331 | download_finished 332 | extraction_started // the binary is detected inside the package and the binary or all contents are extracted 333 | extraction_progreess 334 | extraction_finished 335 | ``` 336 | 337 | -------------------------------------------------------------------------------- /src/DockerManager.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs' 2 | import path from 'path' 3 | import Docker, { Container } from 'dockerode' 4 | import { bufferToStream } from './utils'; 5 | import { StateListener } from './types'; 6 | import ethpkg, { IPackage } from 'ethpkg' 7 | import { PROCESS_EVENTS } from './events'; 8 | import { Stream } from 'stream'; 9 | import { attachStdOut, attachStdin, detachStdout, detachStdin, WritableMemoryStream } from './DockerUtils'; 10 | 11 | export const isDirPath = (str: string) => !path.extname(str) 12 | 13 | const STATUS = { 14 | DOWNLOAD_COMPLETE: 'Download complete', 15 | VERIFYING_CHECKSUM: 'Verifying Checksum', 16 | DOWNLOADING: 'Downloading', 17 | PULLING_FS_LAYER: 'Pulling fs layer', 18 | PULL_COMPLETE: 'Pull complete', 19 | EXTRACTING: 'Extracting', 20 | } 21 | 22 | export interface ContainerConfig { 23 | dispose?: boolean; // destroy container after process finished 24 | ports?: string[]; // port mapping for container 25 | overwrite?: boolean; // if container with name exists -> remove 26 | overwriteEntrypoint?: boolean; // use /bin/sh instead of entrypoint 27 | autoPort?: boolean; // map ports to any available ports 28 | volume?: string; // shared volume 29 | cmd?: string[] 30 | } 31 | 32 | export interface GetImageOptions { 33 | listener?: StateListener 34 | } 35 | 36 | export default class DockerManager { 37 | public _docker: any; 38 | constructor(private prefix = 'ethbinary') { 39 | 40 | } 41 | 42 | public isConnected() { 43 | return this._docker !== undefined 44 | } 45 | 46 | public connect() { 47 | const socket = process.env.DOCKER_SOCKET || '/var/run/docker.sock'; 48 | const stats = fs.statSync(socket); 49 | if (!stats.isSocket()) { 50 | throw new Error('Could not establish Docker connection - is Docker running?'); 51 | } 52 | this._docker = new Docker({ socketPath: socket }); 53 | } 54 | 55 | public isValidRepoTag(imageTag: string) { 56 | // FIXME validate docker url 57 | return true 58 | } 59 | 60 | private pullImage = async (repoTag: string, listener: StateListener) => { 61 | listener(PROCESS_EVENTS.PULL_DOCKER_IMAGE_STARTED) 62 | return new Promise((resolve, reject) => { 63 | this._docker.pull(repoTag, (err: Error, stream: Stream) => { 64 | this._docker.modem.followProgress(stream, onFinished, onProgress); 65 | function onFinished(err: Error, output: Array) { 66 | listener(PROCESS_EVENTS.PULL_DOCKER_IMAGE_FINISHED) 67 | //output is an array with output json parsed objects 68 | resolve(output) 69 | } 70 | function onProgress(event: any) { 71 | // downloads are done in parallel with id referencing a specific download 72 | const { status, progressDetail, id } = event 73 | if (status === STATUS.DOWNLOADING && progressDetail) { 74 | const { current, total } = progressDetail 75 | listener(PROCESS_EVENTS.PULL_DOCKER_IMAGE_PROGRESS, { 76 | id, 77 | status, 78 | progress: (100 * (current / total)), 79 | progressDetail 80 | }) 81 | } 82 | } 83 | }) 84 | }) 85 | } 86 | 87 | /** 88 | * if docker hub => pull image 89 | * if dockerfile => build 90 | * if available => return local image 91 | * @param imageName 92 | * @param param1 93 | */ 94 | async getImage(imageName: string, { 95 | listener = (state: string, args: any) => { } 96 | } = {}) { 97 | throw new Error('not implemented') 98 | } 99 | 100 | public async createImage(imageName: string, dockerFile: IPackage, listener: StateListener = (newState: string, arg: any) => undefined) { 101 | if (!this.isConnected()) { 102 | throw new Error('Not connected to docker daemon - forgot .connect() ?') 103 | } 104 | const buf = await dockerFile.toBuffer() 105 | // always prefix images for detection 106 | imageName = imageName.startsWith(`${this.prefix}_`) ? imageName : `${this.prefix}_${imageName}` 107 | const _stream = await this._docker.buildImage(bufferToStream(buf), { 108 | t: imageName 109 | }) 110 | // parse stream and pass events to listener 111 | _stream.on('data', (chunk: any) => { 112 | let eventObjects = [] 113 | try { 114 | let st = chunk.toString('utf8') 115 | // st = st.replace(/\r*\n*\s*\S*/g, '') 116 | st = st.replace(/\r?\n|\r/g, '') 117 | st = st.replace(/}(?!,)/gi, '},') 118 | st = st.replace(',}', '}') 119 | let jsonString = `[${st}]` 120 | jsonString = jsonString.replace(',]', ']') 121 | eventObjects = JSON.parse(jsonString) 122 | } catch (error) { 123 | console.log('parse docker log error: ', error.message) 124 | } 125 | 126 | for (const dockerEvent of eventObjects) { 127 | if (dockerEvent.error) { 128 | // TODO handle error code 129 | throw new Error(dockerEvent.error) 130 | } 131 | const log = dockerEvent.stream ? dockerEvent.stream.trim() : '' 132 | if (log) { 133 | listener(PROCESS_EVENTS.DOCKER_EVENT, { log }) 134 | } 135 | } 136 | }) 137 | const res = await new Promise((resolve, reject) => { 138 | this._docker.modem.followProgress(_stream, (err: Error, res: any) => err ? reject(err) : resolve(res)); 139 | }) 140 | if (res) { 141 | return imageName 142 | } 143 | return undefined 144 | } 145 | 146 | public async createImageFromDockerfile(imageNameUnprefixed: string, dockerFilePath: string, listener?: StateListener) { 147 | const dirPath = path.dirname(dockerFilePath) 148 | const pkg = await ethpkg.createPackage(dirPath, { 149 | type: 'tar', 150 | compressed: true, 151 | listener: (newState, args) => { 152 | // console.log('newState', newState, args) 153 | } 154 | }) 155 | // TODO check pkg contains dockerfile 156 | const imageName = await this.createImage(imageNameUnprefixed, pkg, listener) 157 | return imageName 158 | } 159 | 160 | public async createImageFromDockerfileContent(imageNameUnprefixed: string, dockerFileContents: string, listener?: StateListener) { 161 | const pkg = await ethpkg.createPackage('temp', { 162 | type: 'tar', 163 | compressed: true 164 | }) 165 | await pkg.addEntry('Dockerfile', Buffer.from(dockerFileContents)) 166 | const imageName = await this.createImage(imageNameUnprefixed, pkg, listener) 167 | return imageName 168 | } 169 | 170 | public async getOrCreateImage(imageNameUnprefixed: string, imageSpecifier: string, { 171 | listener = () => { } 172 | } : GetImageOptions = {}) { 173 | // TODO if version = 'cached' do NOT pull image but use existing 174 | // local Dockerfile 175 | if (fs.existsSync(imageSpecifier)) { 176 | return this.createImageFromDockerfile(imageNameUnprefixed, imageSpecifier, listener) 177 | } 178 | // docker url / repo tag 179 | else if (this.isValidRepoTag(imageSpecifier)) { 180 | console.log('image specifier', imageSpecifier) 181 | const repoTag = imageSpecifier 182 | const image = await this.pullImage(repoTag, listener) 183 | if (!image) { 184 | throw new Error('Image could not be pulled') 185 | } 186 | return repoTag // NOTE: unprefixed 187 | } 188 | else { 189 | throw new Error('Invalid Dockerfile / image specifier') 190 | } 191 | } 192 | 193 | public async getContainer(containerName: string, stopRunning = true): Promise { 194 | const containers = await this._docker.listContainers({ all: true }); 195 | const containerInfo: Docker.ContainerInfo | undefined = containers.find((c: any) => c.Names[0] === `/${containerName}`) 196 | if (!containerInfo) { 197 | return undefined 198 | } 199 | const { Id, State, /*Names,*/ Image } = containerInfo 200 | const container = await this._docker.getContainer(Id) 201 | // handles "container already started" 202 | if (State !== 'stopped' && stopRunning) { 203 | try { 204 | await container.stop() 205 | } catch (error) { 206 | // ignore if stopped already 207 | // console.log('container could not be stopped:', error.message) 208 | } 209 | } 210 | return container 211 | } 212 | 213 | public async removeContainer(container: Container) { 214 | // force will stop if running 215 | return container.remove({ force: true }) 216 | } 217 | 218 | public async createContainer(imageName: string, containerName: string, { 219 | overwrite = false, 220 | dispose = false, 221 | autoPort = false, 222 | overwriteEntrypoint = false, 223 | ports = [], 224 | volume = undefined, 225 | cmd = undefined 226 | } : ContainerConfig = {}) { 227 | // TODO handle 'OCI runtime create failed: container_linux.go:346: starting container process caused "exec: \\"/bin/bash\\": stat /bin/bash: no such file or directory": unknown' 228 | // TODO handle no such container - No such image: golang:1.13-alpine 229 | const stopIfRunning = true 230 | 231 | let container = await this.getContainer(containerName, stopIfRunning) 232 | if (!container || overwrite) { 233 | if (container) { 234 | await this.removeContainer(container) 235 | } 236 | // https://docs.docker.com/engine/api/v1.40/#operation/ContainerCreate 237 | const containerConfig : any = { 238 | Image: imageName, 239 | name: containerName, 240 | AttachStdin: true, 241 | AttachStdout: true, 242 | AttachStderr: true, 243 | // The -it runs Docker interactively (so you get a pseudo-TTY with STDIN) 244 | Tty: true, // keeps container running 245 | OpenStdin: true, 246 | // StdinOnce: false, 247 | // FIXME a problem that probably occurs is that ports get configured by the user in between init() and start() 248 | ExposedPorts: { }, 249 | HostConfig: { 250 | // Automatically remove the container when the container's process exits (e.g. when stopped). 251 | AutoRemove: dispose, 252 | PortBindings: {}, 253 | Binds: volume ? [ volume ] : undefined 254 | }, 255 | Cmd: cmd 256 | } 257 | 258 | if (overwriteEntrypoint) { 259 | // containerConfig['Entrypoint'] = ['/bin/sh'] 260 | } 261 | 262 | // we auto-bind all ports that are exposed from the container to the host 263 | for (let port of ports) { 264 | // "/" 265 | if (!port.includes('/')) { 266 | port += '/tcp' // expand ports to tcp as default 267 | } 268 | 269 | // An object mapping ports to an empty object in the form: 270 | // {"/": {}} 271 | containerConfig['ExposedPorts'][port] = {} 272 | 273 | // PortMap describes the mapping of container ports to host ports, 274 | // using the container's port-number and protocol as key in the 275 | // format /, for example, 80/udp 276 | // 127.0.0.1 - is more restrictive as the default 0.0.0.0 for security reasons 277 | // "8545/tcp": [{ "HostPort": "8545" }], 278 | containerConfig['HostConfig']['PortBindings'][port] = [ { 'HostIp': '127.0.0.1', 'HostPort': autoPort ? undefined : port.split('/')[0] } ] 279 | } 280 | // console.log('config', JSON.stringify(containerConfig, null, 2)) 281 | try { 282 | // console.log('create container with config', containerConfig) 283 | container = await this._docker.createContainer(containerConfig) 284 | } catch (error) { 285 | console.log('create container error', error) 286 | } 287 | } 288 | if (overwriteEntrypoint) { 289 | // store overwritten entrypoint 290 | const image = await this._docker.getImage(imageName) 291 | const info = await image.inspect() 292 | const entryPoint = info.Config.Entrypoint 293 | if (entryPoint) { 294 | // @ts-ignore 295 | container.originalEntrypoint = Array.isArray(entryPoint) ? entryPoint[0] : entryPoint 296 | } 297 | } 298 | return container 299 | } 300 | 301 | /* 302 | public async getOrCreateContainer(imageName: string, containerName: string) { 303 | let container = await this.getContainer(containerName) 304 | if (container) { 305 | return container 306 | } 307 | const listener = () => { } 308 | // const image = await this.getImage(imageName, { listener }) 309 | } 310 | */ 311 | 312 | public async stopContainer(containerId: string) { 313 | const container = await this._docker.getContainer(containerId) 314 | return container.stop() 315 | } 316 | 317 | public async detectEntryPoint(container: Container) { 318 | const data = await container.inspect() 319 | if (data.Config.Entrypoint) { 320 | const entryPoint = data.Config.Entrypoint[0] 321 | return entryPoint 322 | } 323 | return undefined 324 | } 325 | 326 | // https://github.com/apocas/dockerode/blob/master/examples/run_stdin.js 327 | // https://github.com/apocas/dockerode/blob/master/lib/docker.js#L1442 328 | public async run(imageName: string, cmd: string[], { 329 | stdio = 'pipe', 330 | volume = undefined 331 | } : any = {}) { 332 | 333 | const optsc = { 334 | 'Hostname': '', 335 | 'User': '', 336 | OpenStdin: true, 337 | AttachStdin: true, 338 | AttachStdout: true, 339 | AttachStderr: true, 340 | StdinOnce: true, 341 | 'Tty': stdio === 'inherit', 342 | 'Env': null, 343 | 'Cmd': cmd, // this will overwrite the entrypoint 344 | 'Image': imageName, 345 | 'Volumes': {}, // use binds instead 346 | 'VolumesFrom': [], 347 | HostConfig: { 348 | // Automatically remove the container when the container's process exits (e.g. when stopped). 349 | AutoRemove: true, 350 | Binds: volume ? [ volume ] : undefined 351 | }, 352 | } 353 | 354 | const container = await this._docker.createContainer(optsc) 355 | 356 | if (!container) { 357 | throw new Error('Could not create container') 358 | } 359 | 360 | const onResize = async () => { 361 | let dimensions = { 362 | h: process.stdout.rows, 363 | w: process.stderr.columns 364 | }; 365 | if (dimensions.h != 0 && dimensions.w != 0) { 366 | await container.resize(dimensions); 367 | } 368 | } 369 | 370 | const dockerStream = await container.attach({ 371 | stream: true, 372 | stdin: true, 373 | stdout: true, 374 | stderr: true, 375 | }) 376 | 377 | let isRaw = process.stdin.isRaw // save for restore 378 | let bufferStream = new WritableMemoryStream() 379 | if (stdio === 'inherit') { 380 | attachStdOut(process.stdout, dockerStream, container.modem, onResize) 381 | // attachStdOut(attachStream, dockerStream, container.modem, onResize) 382 | attachStdin(process.stdin, dockerStream) 383 | } 384 | 385 | // write stream output to buffer: independent of stdio inherit or pipe 386 | dockerStream.pipe(bufferStream) 387 | 388 | const startOptions = undefined 389 | await container.start(startOptions) 390 | 391 | await onResize() 392 | 393 | const data = await container.wait() 394 | // TODO handle exit code 395 | console.log('data', data) 396 | 397 | // allow nodejs to exit 398 | if (stdio === 'inherit') { 399 | detachStdout(process.stdout, onResize) 400 | detachStdin(process.stdin, isRaw) 401 | dockerStream.end(); 402 | } 403 | 404 | // return stream output tokenized: remove ansi, split on newline 405 | // https://github.com/chalk/ansi-regex/blob/master/index.js#L3 406 | return bufferStream.buffer.toString().replace(/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, "").split(/[\r\n]+/g) 407 | } 408 | 409 | async getFile(container: Container, filePath: string) { 410 | if (!filePath) { 411 | throw new Error(`No path provided getFile()`) 412 | } 413 | const data = await container.inspect() 414 | // TODO maybe even relative to entry point? 415 | const cwd = data.Config.WorkingDir 416 | 417 | const stream = await container.getArchive({ 418 | 'path': filePath.startsWith('/') ? filePath : path.join(cwd, filePath) 419 | }) 420 | // @ts-ignore 421 | const buf = await streamToBuffer(stream) 422 | const pkg = await ethpkg.getPackage(buf) 423 | if (!pkg) { 424 | return undefined 425 | } 426 | // if dir return all files 427 | if (isDirPath(filePath)) { 428 | return pkg 429 | } 430 | return pkg.getContent(filePath) 431 | } 432 | 433 | } 434 | 435 | -------------------------------------------------------------------------------- /src/ClientManager.ts: -------------------------------------------------------------------------------- 1 | import fs, { stat } from 'fs' 2 | import path from 'path' 3 | import ethpkg, { PackageManager, IRelease, IPackage, download } from 'ethpkg' 4 | import { clients as defaultClients } from './client_plugins' 5 | import { normalizePlatform, uuid, createFilterFunction, validateConfig, extractPlatformFromString, getFileExtension } from './utils' 6 | import { ClientInfo, ClientConfig, DownloadOptions, ClientStartOptions, instanceofPackageConfig, instanceofDockerConfig, instanceofClientInfo, CommandOptions, IClient, instanceofClientConfig, PackageConfig, ReleaseFilterOptions, LogFilter, FilterFunction, GetClientOptions } from './types' 7 | import DockerManager from './DockerManager' 8 | import { Logger } from './Logger' 9 | import { ProcessManager } from './ProcessManager' 10 | import { DockerizedClient } from './Client/DockerizedClient' 11 | import { BinaryClient } from './Client/BinaryClient' 12 | import { CLIENT_STATE } from './Client/BaseClient' 13 | import { PROCESS_EVENTS } from './events' 14 | 15 | const DOCKER_PREFIX = 'ethbinary' 16 | 17 | export const SHARED_DATA = '/shared_data' 18 | 19 | export class MultiClientManager { 20 | 21 | private _packageManager: PackageManager 22 | private _clients: Array 23 | private _dockerManager: DockerManager 24 | private _processManager: ProcessManager 25 | private _logger: Logger 26 | private _clientConfigs: ClientConfig[] 27 | 28 | /** 29 | * Because a ClientManager instance handle process events like uncaughtException, exit, .. 30 | * there should only be one instance 31 | */ 32 | private static instance: MultiClientManager 33 | 34 | private constructor() { 35 | this._logger = Logger.getInstance() 36 | this._packageManager = new PackageManager() 37 | this._dockerManager = new DockerManager(DOCKER_PREFIX) 38 | this._processManager = new ProcessManager() 39 | this._clients = [] 40 | this._clientConfigs = [] 41 | 42 | this.addClientConfig(defaultClients) 43 | 44 | // exitHandler MUST only perform sync operations 45 | const exitHandler = (options: any, exitCode: any) => { 46 | console.log(' ==> exit handler called with code', exitCode) 47 | if (options.exit) process.exit(); 48 | } 49 | 50 | // https://stackoverflow.com/questions/40574218/how-to-perform-an-async-operation-on-exit 51 | // The 'beforeExit' event is emitted when Node.js empties its event loop and has no additional work to schedule. 52 | // Normally, the Node.js process will exit when there is no work scheduled, 53 | // but a listener registered on the 'beforeExit' event can make asynchronous calls, and thereby cause the Node.js process to continue. 54 | process.on('beforeExit', async (code) => { 55 | this._logger.log('ClientManager will exit. Cleaning up...') 56 | await this._cleanup() 57 | exitHandler({ exit: true }, code) 58 | }) 59 | process.on('SIGINT', async (code) => { 60 | console.log('sigint') 61 | this._logger.log('ClientManager got SIGINT. Cleaning up...') 62 | await this._cleanup() 63 | exitHandler({ exit: true }, code) 64 | }); 65 | process.on('unhandledRejection', async (reason, p) => { 66 | // console.error('Unhandled Rejection at Promise', p); 67 | console.error('Unhandled Promise Rejection', reason) 68 | await this._cleanup() 69 | exitHandler({ exit: true }, 0) 70 | }) 71 | } 72 | 73 | private async _cleanup() { 74 | const runningClients = this._clients.filter(client => client.info().state === CLIENT_STATE.STARTED) 75 | // TODO stop running docker containers or kill processes 76 | console.log('INFO Program will exit - try to stop running clients: ' + runningClients.length) 77 | for (const client of runningClients) { 78 | try { 79 | const info = client.info() 80 | console.log(`Trying to stop ${info.type} client in state ${info.state} id:`, client.id) 81 | await client.stop() 82 | console.log(`Client ${client.id} stopped.`, info) 83 | } catch (error) { 84 | console.error('Stop error', error.message) 85 | } 86 | } 87 | process.exit() 88 | } 89 | 90 | public static getInstance(): MultiClientManager { 91 | if (!MultiClientManager.instance) { 92 | MultiClientManager.instance = new MultiClientManager() 93 | } 94 | return MultiClientManager.instance 95 | } 96 | 97 | public status(clientId?: string | ClientInfo) { 98 | if (clientId) { 99 | const client = this._findClient(clientId) 100 | return client.info() 101 | } 102 | return { 103 | clients: this._clients.map(c => c.info()) 104 | } 105 | } 106 | 107 | private async _getClientConfig(clientName: string, useDocker : boolean | undefined = undefined): Promise { 108 | let config = this._clientConfigs.find(c => c.name === clientName && (useDocker === true ? instanceofDockerConfig(c) : true)) 109 | if (!config) { 110 | console.warn('Supported clients are', await this.getAvailableClients()) 111 | throw new Error('Unsupported client: ' + clientName+' - docker: '+useDocker) 112 | } 113 | return config 114 | } 115 | 116 | public addClientConfig(config: ClientConfig | Array) { 117 | if (Array.isArray(config)) { 118 | for (const _c of config) { 119 | this.addClientConfig(_c) 120 | } 121 | return 122 | } 123 | else if (instanceofClientConfig(config)) { 124 | let isValid = validateConfig(config) 125 | if (!isValid) { 126 | throw new Error('Invalid client config') 127 | } 128 | const _config = { 129 | // set defaults 130 | // @ts-ignore 131 | displayName: config.name, 132 | entryPoint: 'auto', 133 | service: false, 134 | ...config, 135 | // @ts-ignore 136 | filter: createFilterFunction(config.filter) 137 | } 138 | this._clientConfigs.push(_config) 139 | } 140 | } 141 | 142 | public async getAvailableClients() { 143 | return this._clientConfigs.map(c => `${c.name} - docker: ${instanceofDockerConfig(c)}`) 144 | } 145 | 146 | public async getClientVersions(clientName: string, { 147 | platform = (process.platform), 148 | packagesOnly = true, 149 | version = undefined 150 | }: ReleaseFilterOptions = {}): Promise> { 151 | const config = await this._getClientConfig(clientName) 152 | if (!instanceofPackageConfig(config)) { 153 | // TODO handle docker versions 154 | return [] 155 | } 156 | let releases = await this._packageManager.listPackages(config.repository, { 157 | prefix: config.prefix, 158 | filter: config.filter, 159 | version, // apply version or version range filter 160 | packagesOnly, // dangerous mode: return not packaged assets as well 161 | }) 162 | 163 | if (!packagesOnly) { 164 | // filter binaries only 165 | releases = releases.filter(release => { 166 | const ext = getFileExtension(release.fileName) 167 | const hasBinaryExtension = ext === undefined || (process.platform === 'win32' && ext === '.exe') 168 | return hasBinaryExtension 169 | }) 170 | } 171 | 172 | if (platform) { 173 | platform = normalizePlatform(platform) 174 | // filter releases for different platforms 175 | releases = releases.filter(release => { 176 | const releasePlatform = extractPlatformFromString(release.fileName) 177 | return releasePlatform !== undefined && (releasePlatform === platform) 178 | }) 179 | } 180 | 181 | return releases 182 | } 183 | 184 | public async getClient(clientSpec: string | ClientConfig, { 185 | version = 'latest', 186 | platform = process.platform, 187 | listener = () => {}, 188 | cachePath, 189 | useDocker = undefined // only use docker configs 190 | }: GetClientOptions = {}): Promise { 191 | 192 | let clientName = typeof clientSpec === 'string' ? clientSpec : clientSpec.name 193 | 194 | cachePath = cachePath || path.join(process.cwd(), 'cache', clientName) 195 | 196 | if (instanceofClientConfig(clientSpec)) { 197 | // this does additional validation and sets default: do NOT use config directly without checks 198 | this.addClientConfig(clientSpec) 199 | } 200 | 201 | const config = await this._getClientConfig(clientName, useDocker) 202 | 203 | platform = normalizePlatform(platform) 204 | 205 | let client 206 | if (instanceofDockerConfig(config)) { 207 | client = await DockerizedClient.create(this._dockerManager, config, { 208 | version, 209 | listener 210 | // platform and cache not relevant for docker 211 | }) 212 | } 213 | else if (instanceofPackageConfig(config)) { 214 | 215 | const { isPackaged = true } = config 216 | 217 | // make sure cache path exists (docker has no cache) 218 | if (!fs.existsSync(cachePath)) { 219 | fs.mkdirSync(cachePath, { recursive: true }) 220 | } 221 | 222 | if (!isPackaged) { 223 | // this handles version, binary and platform filtering already 224 | let releases = await this.getClientVersions(clientName, { 225 | version, 226 | packagesOnly: false 227 | }) 228 | 229 | releases = releases.filter(release => release.fileName !== undefined) 230 | 231 | const release = releases.shift() 232 | 233 | if (!release) { 234 | throw new Error('Specified binary could not be found') 235 | } 236 | 237 | let binaryPath = path.resolve(cachePath, release.fileName) 238 | // check cache 239 | if (!fs.existsSync(binaryPath)) { // download binary 240 | 241 | const { location } = release // get download url 242 | if (location === undefined) { 243 | throw new Error('Cannot download binary - undefined download url') 244 | } 245 | // wrap progress listener 246 | let progress = 0 247 | const onProgress = (p: number) => { 248 | const progressNew = Math.floor(p * 100); 249 | if (progressNew > progress) { 250 | progress = progressNew; 251 | listener(PROCESS_EVENTS.DOWNLOAD_PROGRESS, { progress, release, size: release.size }) 252 | } 253 | } 254 | listener(PROCESS_EVENTS.DOWNLOAD_STARTED, { location, release }) 255 | const data = await download(location, onProgress) 256 | listener(PROCESS_EVENTS.DOWNLOAD_FINISHED, { location, size: data.length, release }) 257 | 258 | fs.writeFileSync( 259 | binaryPath, 260 | data, 261 | { 262 | mode: parseInt('754', 8) // strict mode prohibits octal numbers in some cases 263 | } 264 | ) 265 | } 266 | 267 | client = new BinaryClient(binaryPath, this._processManager, config) 268 | 269 | } else { 270 | client = await BinaryClient.create(this._packageManager, this._processManager, config, { 271 | version, 272 | platform, 273 | cachePath, 274 | isPackaged, 275 | listener 276 | }) 277 | } 278 | } else { 279 | throw new Error(`Client config does not specify how to retrieve client: repository or dockerimage should be set`) 280 | } 281 | 282 | this._clients.push(client) 283 | return client.info() 284 | } 285 | 286 | // creates an "ad-hoc" client for existing binaries 287 | public async getBinaryClient(binaryPath: string): Promise { 288 | if (!fs.existsSync(binaryPath)) { 289 | throw new Error('Binary does not exist: ' + binaryPath) 290 | } 291 | const name = path.basename(binaryPath) 292 | const config: PackageConfig = { name, displayName: name, repository: path.dirname(binaryPath) } 293 | const client = new BinaryClient(binaryPath, this._processManager, config) 294 | this._clients.push(client) 295 | return client.info() 296 | } 297 | 298 | private _findClient(clientId: string | ClientInfo) { 299 | if (instanceofClientInfo(clientId)) { 300 | clientId = clientId.id 301 | } 302 | const client = this._clients.find(client => client.id === clientId); 303 | if (!client) { 304 | throw new Error('Client not found') 305 | } 306 | return client 307 | } 308 | 309 | public async startClient(clientId: string | ClientInfo, flags: string | string[] = [], options: ClientStartOptions = {}): Promise { 310 | if (typeof flags === 'string') { 311 | flags = flags.split(' ') 312 | } 313 | const client: IClient = this._findClient(clientId) 314 | // add started client to client list 315 | await client.start(flags, options) 316 | return client.info() 317 | } 318 | 319 | public async stopClient(clientId: string | ClientInfo): Promise { 320 | const client: IClient = this._findClient(clientId) 321 | await client.stop() 322 | // remove stopped client from client list // TODO make setting? 323 | // this._clients = this._clients.filter(c => c.id !== client.id) 324 | // console.log('Killing process:', path.basename(clientInfo.binaryPath), 'process pid:', _process.pid); 325 | return client.info() 326 | } 327 | 328 | public async execute(clientId: string | ClientInfo, command: string, options?: CommandOptions): Promise> { 329 | this._logger.verbose('execute on client', clientId, command) 330 | const client: IClient = this._findClient(clientId) 331 | options = { 332 | timeout: 30 * 1000, 333 | ...options 334 | } 335 | const result = await client.execute(command, options) 336 | return result 337 | } 338 | 339 | public async run(clientId: string | ClientInfo, command: string, options?: CommandOptions): Promise> { 340 | this._logger.verbose('run on client', clientId, command) 341 | const client: IClient = this._findClient(clientId) 342 | if (client.info().type !== 'docker') { 343 | throw new Error('run is only available for docker clients') 344 | } 345 | options = { 346 | timeout: 30 * 1000, 347 | ...options 348 | } 349 | const result = await (client).run(command, options) 350 | return result 351 | } 352 | 353 | // NOTE: whenState with callback might be complicated in client-server environments 354 | // the alternative approach is to poll on status.logs 355 | public async whenState(clientId: string | ClientInfo, state: string | LogFilter): Promise { 356 | const client: IClient = this._findClient(clientId) 357 | let status = client.info() 358 | 359 | // check if state was already reached 360 | if (state === CLIENT_STATE.HTTP_RPC_READY && status.rpcUrl) { 361 | return status 362 | } 363 | if (state === CLIENT_STATE.IPC_READY && status.ipc) { 364 | return status 365 | } 366 | // TODO find more generic solution 367 | if (state === CLIENT_STATE.STARTED && ![CLIENT_STATE.STOPPED, CLIENT_STATE.INIT].includes(status.state)) { 368 | return status 369 | } 370 | if (state === status.state) { 371 | return status 372 | } 373 | // if state not yet reached wait for it 374 | // TODO allow timeout 375 | return new Promise((resolve, reject) => { 376 | if (typeof state === 'function') { 377 | const filter = state 378 | const listener = (log: string) => { 379 | if (filter(log)) { 380 | resolve(client.info()) 381 | client.off('log', listener) 382 | } 383 | } 384 | client.on('log', listener) 385 | } else { 386 | // TODO remove listener 387 | client.on('state', (newState) => { 388 | if (newState === state) { 389 | resolve(client.info()) 390 | } 391 | }) 392 | } 393 | }) 394 | } 395 | 396 | public async input(clientId: string | ClientInfo, _input: string): Promise { 397 | const client: IClient = this._findClient(clientId) 398 | // TODO implement for docker 399 | await (client).input(_input) 400 | let status = client.info() 401 | return status 402 | } 403 | 404 | public async rpc() { 405 | 406 | } 407 | 408 | } 409 | 410 | /** 411 | * MultiClientManager is the main implementation 412 | * and it should ONLY RETURN SERIALIZABLE data 413 | * SingleClientManager is a convenience wrapper that should 414 | * have as little own functionality as possible and no state 415 | * so that it can be used e.g. in child processes or webpages that communicate 416 | * to the MultiClientManager server API 417 | */ 418 | export class SingleClientManager { 419 | private _clientManager: MultiClientManager 420 | private _clientInstance?: ClientInfo 421 | private _config?: ClientConfig 422 | 423 | constructor() { 424 | this._clientManager = MultiClientManager.getInstance() 425 | } 426 | 427 | private _getClientInstance(): ClientInfo { 428 | // if client was explicitly set -> use user defined client 429 | if (this._clientInstance) { 430 | return this._clientInstance 431 | } 432 | throw new Error('You are using the ClientManager in single-client mode with more than one client') 433 | } 434 | 435 | public addClientConfig(config: ClientConfig /* only allow one config to be added */) { 436 | this._config = config 437 | return this._clientManager.addClientConfig(config) 438 | } 439 | 440 | get ipc() { 441 | let info = this._clientManager.status(this._clientInstance) as ClientInfo 442 | return info.ipc 443 | } 444 | 445 | get rpcUrl() { 446 | let info = this._clientManager.status(this._clientInstance) as ClientInfo 447 | return info.rpcUrl 448 | } 449 | 450 | public async getClientVersions(clientName?: string, options?: any): Promise> { 451 | // overload: public async getClientVersions(options?: any) 452 | if (typeof clientName === 'object') { 453 | options = clientName 454 | } 455 | if (typeof clientName !== 'string') { 456 | if (this._config) { 457 | clientName = this._config.name 458 | } else { 459 | throw new Error('Versions for which client? Client name was not provided') 460 | } 461 | } 462 | return this._clientManager.getClientVersions(clientName, options) 463 | } 464 | 465 | public async getClient(clientSpec: string | ClientConfig, options?: DownloadOptions): Promise { 466 | if (this._clientInstance) { 467 | throw new Error('A client is already set. If you want to use different clients use MultiClientManager instead') 468 | } 469 | const client = await this._clientManager.getClient(clientSpec, { 470 | listener: (newState, args) => { 471 | // console.log('new state', newState) 472 | }, 473 | ...options 474 | }) 475 | this._clientInstance = client 476 | return this 477 | } 478 | 479 | public async getBinaryClient(binaryPath: string): Promise { 480 | if (this._clientInstance) { 481 | throw new Error('A client is already set. If you want to use different clients use MultiClientManager instead') 482 | } 483 | const client = await this._clientManager.getBinaryClient(binaryPath) 484 | this._clientInstance = client 485 | return this 486 | } 487 | 488 | public async start(flags: string | string[] = [], options?: ClientStartOptions): Promise { 489 | return this._clientManager.startClient(this._getClientInstance(), flags, options) 490 | } 491 | 492 | public async stop(): Promise { 493 | return this._clientManager.stopClient(this._getClientInstance()) 494 | } 495 | 496 | public async execute(command: string, options?: CommandOptions): Promise> { 497 | return this._clientManager.execute(this._getClientInstance(), command, options) 498 | } 499 | 500 | public async run(command: string, options?: CommandOptions): Promise> { 501 | return this._clientManager.run(this._getClientInstance(), command, options) 502 | } 503 | 504 | public async whenState(state: string | LogFilter): Promise { 505 | return this._clientManager.whenState(this._getClientInstance(), state) 506 | } 507 | 508 | public async input(_input: string): Promise { 509 | return this._clientManager.input(this._getClientInstance(), _input) 510 | } 511 | 512 | } 513 | 514 | export const getClient = async (clientSpec: string | ClientConfig, options?: DownloadOptions): Promise => { 515 | const cm = new SingleClientManager() 516 | return cm.getClient(clientSpec, options) 517 | } --------------------------------------------------------------------------------