├── bin ├── run │ ├── run.cmd │ └── run ├── index.js └── commands │ ├── stop.js │ ├── start.js │ └── fuse-setup.js ├── .travis.yml ├── scripts └── configure.js ├── lib ├── log.js ├── errors.js ├── drives │ ├── array-index.js │ └── index.js ├── metadata.js ├── common.js ├── fuse │ ├── virtual-files.js │ └── index.js ├── debug │ └── index.js ├── peersockets.js └── peers.js ├── .github └── ISSUE_TEMPLATE │ └── bug_report.md ├── LICENSE ├── .gitignore ├── test ├── util │ └── create.js ├── peersockets.js ├── replication.js └── hyperdrive.js ├── package.json ├── manager.js ├── index.js └── README.md /bin/run/run.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | node "%~dp0\run" %* 4 | -------------------------------------------------------------------------------- /bin/index.js: -------------------------------------------------------------------------------- 1 | module.exports = require('@oclif/command') 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "lts/*" 4 | - "13" 5 | - "12" 6 | - "10" 7 | -------------------------------------------------------------------------------- /bin/run/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | require('@oclif/command').run() 4 | .then(require('@oclif/command/flush')) 5 | .catch(require('@oclif/errors/handle')) 6 | -------------------------------------------------------------------------------- /scripts/configure.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const hyperfuse = require('hyperdrive-fuse') 3 | 4 | hyperfuse.configure(err => { 5 | if (err) return process.exit(1) 6 | return process.exit(0) 7 | }) 8 | -------------------------------------------------------------------------------- /lib/log.js: -------------------------------------------------------------------------------- 1 | const pino = require('pino') 2 | const argv = require('minimist')(process.argv.slice(2)) 3 | 4 | module.exports = pino({ 5 | name: 'hyperdrive', 6 | level: argv['log-level'] || 'info', 7 | enabled: true 8 | }, pino.destination(2)) 9 | -------------------------------------------------------------------------------- /lib/errors.js: -------------------------------------------------------------------------------- 1 | const grpc = require('@grpc/grpc-js') 2 | 3 | function serverError (err) { 4 | return { 5 | code: grpc.status.UNKNOWN, 6 | message: err.toString() 7 | } 8 | } 9 | 10 | function requestError (message) { 11 | return { 12 | // TODO: better error code for malformed requests? 13 | code: grpc.status.UNIMPLEMENTED, 14 | message 15 | } 16 | } 17 | 18 | module.exports = { 19 | serverError, 20 | requestError 21 | } 22 | -------------------------------------------------------------------------------- /lib/drives/array-index.js: -------------------------------------------------------------------------------- 1 | class ArrayIndex { 2 | constructor () { 3 | this._arr = [] 4 | } 5 | 6 | _getFreeIndex () { 7 | var idx = this._arr.indexOf(null) 8 | if (idx === -1) idx = this._arr.length 9 | if (!idx) idx = 1 10 | return idx 11 | } 12 | 13 | get (idx) { 14 | return this._arr[idx] 15 | } 16 | 17 | insert (value) { 18 | const idx = this._getFreeIndex() 19 | this._arr[idx] = value 20 | return idx 21 | } 22 | 23 | delete (idx) { 24 | this._arr[idx] = null 25 | } 26 | } 27 | 28 | module.exports = ArrayIndex 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior. 15 | 16 | **Expected Behavior** 17 | What did you expect to happen? 18 | 19 | ** OS ** 20 | 21 | ** Node version ** 22 | 23 | ** Was the daemon installed from NPM or bundled with Beaker? ** 24 | 25 | Add any other context about the problem here. 26 | 27 | __Important Note__: Daemon errors are likely to be found in `~/.hyperdrive/log.json` or `~/.hyperdrive/output.log` (the latter is for unexpected, non-JSON output). These files might contain sensitive drive keys, so don't upload the whole thing -- just extract any stack traces or odd error messages! 28 | -------------------------------------------------------------------------------- /lib/metadata.js: -------------------------------------------------------------------------------- 1 | const p = require('path') 2 | const fs = require('fs-extra') 3 | 4 | const hypercoreCrypto = require('hypercore-crypto') 5 | const mkdirp = require('mkdirp') 6 | 7 | const constants = require('hyperdrive-daemon-client/lib/constants') 8 | 9 | async function createMetadata (storage, endpoint) { 10 | var token = constants.env.token 11 | if (!token) { 12 | hypercoreCrypto.randomBytes(64) 13 | token = hypercoreCrypto.randomBytes(64).toString('hex') 14 | } 15 | await new Promise((resolve, reject) => { 16 | mkdirp(storage, err => { 17 | if (err) return reject(err) 18 | return resolve() 19 | }) 20 | }) 21 | 22 | const metadataPath = p.join(storage, 'config.json') 23 | const metadata = { token, endpoint } 24 | await fs.writeFile(metadataPath, JSON.stringify(metadata)) 25 | return metadata 26 | } 27 | 28 | async function deleteMetadata () { 29 | return fs.unlink(constants.metadata) 30 | } 31 | 32 | module.exports = { 33 | createMetadata, 34 | deleteMetadata 35 | } 36 | -------------------------------------------------------------------------------- /lib/common.js: -------------------------------------------------------------------------------- 1 | const collectStream = require('stream-collector') 2 | 3 | function getHandlers (manager) { 4 | const handlers = {} 5 | const rpcMethods = Object.getOwnPropertyNames(manager.__proto__).filter(methodName => methodName.startsWith('_rpc')) 6 | for (let methodName of rpcMethods) { 7 | let rpcMethodName = methodName.slice(4) 8 | rpcMethodName = rpcMethodName[0].toLowerCase() + rpcMethodName.slice(1) 9 | handlers[rpcMethodName] = manager[methodName].bind(manager) 10 | } 11 | return handlers 12 | } 13 | 14 | function dbCollect (index, opts) { 15 | return new Promise((resolve, reject) => { 16 | collectStream(index.createReadStream(opts), (err, list) => { 17 | if (err) return reject(err) 18 | return resolve(list) 19 | }) 20 | }) 21 | } 22 | 23 | async function dbGet (db, idx) { 24 | try { 25 | return await db.get(idx) 26 | } catch (err) { 27 | if (err && !err.notFound) throw err 28 | return null 29 | } 30 | } 31 | 32 | module.exports = { 33 | getHandlers, 34 | dbCollect, 35 | dbGet 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Andrew Osheroff 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | 63 | # package-lock.json 64 | package-lock.json 65 | -------------------------------------------------------------------------------- /bin/commands/stop.js: -------------------------------------------------------------------------------- 1 | const ora = require('ora') 2 | const { Command, flags } = require('@oclif/command') 3 | 4 | const constants = require('hyperdrive-daemon-client/lib/constants') 5 | const { stop } = require('../../manager') 6 | 7 | class StopCommand extends Command { 8 | static usage = 'stop' 9 | static description = 'Stop the Hyperdrive daemon.' 10 | static flags = { 11 | name: flags.string({ 12 | description: 'The PM2 process name to stop.', 13 | required: false, 14 | default: constants.processName 15 | }), 16 | port: flags.integer({ 17 | description: 'The gRPC port of the running daemon.', 18 | required: false, 19 | default: constants.port 20 | }) 21 | } 22 | 23 | async run () { 24 | const { flags } = this.parse(StopCommand) 25 | const spinner = ora('Stopping the Hyperdrive daemon (might take a while to unannounce)...').start() 26 | try { 27 | await stop(flags.name, flags.port) 28 | spinner.succeed('The Hyperdrive daemon has been stopped.') 29 | } catch (err) { 30 | spinner.fail('Could not stop the Hyperdrive daemon:') 31 | console.error(err) 32 | this.exit(1) 33 | } 34 | this.exit() 35 | } 36 | } 37 | 38 | module.exports = StopCommand 39 | -------------------------------------------------------------------------------- /lib/fuse/virtual-files.js: -------------------------------------------------------------------------------- 1 | const ArrayIndex = require('../drives/array-index') 2 | const hyperfuse = require('hyperdrive-fuse') 3 | 4 | // TODO: Should import from Hyperdrive. 5 | const STDIO_CAP = 20 6 | 7 | class VirtualFile { 8 | constructor (contents) { 9 | this.contents = Buffer.from(contents) 10 | } 11 | 12 | read (buffer, len, offset, cb) { 13 | const buf = this.contents.slice(offset, offset + len) 14 | return process.nextTick(cb, buf.copy(buffer)) 15 | } 16 | } 17 | 18 | /** 19 | * VirtualFiles use exclusively odd file descriptors, so that they don't clash with those created by Hyperdrive. 20 | */ 21 | class VirtualFiles { 22 | constructor () { 23 | this.descriptors = new ArrayIndex() 24 | } 25 | 26 | get (fd) { 27 | return this.descriptors.get((fd - STDIO_CAP - 1) / 2) 28 | } 29 | 30 | open (contents) { 31 | const idx = this.descriptors.insert(new VirtualFile(contents)) 32 | return 2 * idx + 1 + STDIO_CAP 33 | } 34 | 35 | close (path, fd, cb) { 36 | this.descriptors.delete((fd - STDIO_CAP - 1) / 2) 37 | return process.nextTick(cb, 0) 38 | } 39 | 40 | read (path, fd, buffer, len, offset, cb) { 41 | const virtualFile = this.get(fd) 42 | if (!virtualFile) return cb(hyperfuse.EBADF) 43 | return virtualFile.read(buffer, len, offset, cb) 44 | } 45 | } 46 | 47 | module.exports = { 48 | VirtualFiles 49 | } 50 | -------------------------------------------------------------------------------- /lib/debug/index.js: -------------------------------------------------------------------------------- 1 | const { EventEmitter } = require('events') 2 | const repl = require('repl') 3 | const streamx = require('streamx') 4 | const pumpify = require('pumpify') 5 | 6 | const { rpc } = require('hyperdrive-daemon-client') 7 | const messages = rpc.debug.messages 8 | 9 | const log = require('../log').child({ component: 'repl' }) 10 | 11 | module.exports = class DebugManager extends EventEmitter { 12 | constructor (daemon) { 13 | super() 14 | this.daemon = daemon 15 | } 16 | 17 | // RPC Methods 18 | 19 | async _rpcRepl (call) { 20 | const inputDecoder = new streamx.Transform({ 21 | highWaterMark: 1, 22 | transform: (req, cb) => { 23 | return cb(null, Buffer.from(req.getIo())) 24 | } 25 | }) 26 | const outputEncoder = new streamx.Transform({ 27 | highWaterMark: 1, 28 | transform: (chunk, cb) => { 29 | const responseMessage = new messages.ReplMessage() 30 | responseMessage.setIo(Buffer.from(chunk)) 31 | return cb(null, responseMessage) 32 | } 33 | }) 34 | const r = repl.start({ 35 | input: pumpify(call, inputDecoder), 36 | output: pumpify.obj(outputEncoder, call), 37 | preview: false, 38 | terminal: true, 39 | completer: line => { 40 | const keys = Object.keys(r.context) 41 | return [keys.filter(k => k.startsWith(line)), line] 42 | } 43 | }) 44 | Object.assign(r.context, { 45 | daemon: this.daemon, 46 | corestore: this.daemon.corestore, 47 | networker: this.daemon.networking, 48 | swarm: this.daemon.networking.swarm, 49 | drives: this.daemon.drives._drives, 50 | log 51 | }) 52 | call.on('end', () => r.close()) 53 | call.once('error', () => r.close()) 54 | } 55 | 56 | getHandlers () { 57 | return { 58 | repl: this._rpcRepl.bind(this) 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /bin/commands/start.js: -------------------------------------------------------------------------------- 1 | const ora = require('ora') 2 | const { Command, flags } = require('@oclif/command') 3 | 4 | const constants = require('hyperdrive-daemon-client/lib/constants') 5 | const { start } = require('../../manager') 6 | 7 | class StartCommand extends Command { 8 | static usage = 'start' 9 | static description = 'Start the Hyperdrive daemon.' 10 | static flags = { 11 | port: flags.integer({ 12 | description: 'The gRPC port that the daemon will bind to.', 13 | default: constants.port 14 | }), 15 | storage: flags.string({ 16 | description: 'The storage directory for hyperdrives and associated metadata.', 17 | default: constants.root 18 | }), 19 | 'log-level': flags.string({ 20 | description: 'The log level', 21 | default: constants.logLevel 22 | }), 23 | bootstrap: flags.string({ 24 | description: 'Comma-separated bootstrap servers to use.', 25 | default: constants.bootstrap, 26 | parse: bootstrapString => { 27 | return bootstrapString.split(',') 28 | } 29 | }), 30 | 'memory-only': flags.boolean({ 31 | description: 'Use in-memory storage only.', 32 | default: false 33 | }), 34 | foreground: flags.boolean({ 35 | description: 'Run the daemon in the foreground without detaching it from the launch process.', 36 | default: false 37 | }), 38 | 'no-announce': flags.boolean({ 39 | description: 'Never announce read-only drives on the swarm by default.', 40 | default: false 41 | }), 42 | 'no-debug': flags.boolean({ 43 | description: 'Disable debugging-related RPC methods.', 44 | default: false 45 | }) 46 | } 47 | 48 | async run () { 49 | const self = this 50 | const { flags } = this.parse(StartCommand) 51 | 52 | const spinner = ora('Starting the Hyperdrive daemon...').start() 53 | try { 54 | const { opts } = await start(flags) 55 | spinner.succeed(`Hyperdrive daemon listening on ${opts.endpoint}`) 56 | } catch (err) { 57 | spinner.fail(err) 58 | if (!flags.foreground) this.exit(1) 59 | } 60 | if (!flags.foreground) this.exit() 61 | } 62 | } 63 | 64 | module.exports = StartCommand 65 | -------------------------------------------------------------------------------- /test/util/create.js: -------------------------------------------------------------------------------- 1 | const tmp = require('tmp-promise') 2 | const dht = require('@hyperswarm/dht') 3 | 4 | const { HyperdriveClient } = require('hyperdrive-daemon-client') 5 | const HyperdriveDaemon = require('../..') 6 | 7 | const BASE_PORT = 4101 8 | const BOOTSTRAP_PORT = 3106 9 | const BOOTSTRAP_URL = `localhost:${BOOTSTRAP_PORT}` 10 | 11 | async function create (numServers, opts) { 12 | const cleanups = [] 13 | const clients = [] 14 | const daemons = [] 15 | const dirs = [] 16 | 17 | const bootstrapper = dht({ 18 | bootstrap: false 19 | }) 20 | bootstrapper.listen(BOOTSTRAP_PORT) 21 | await new Promise(resolve => { 22 | return bootstrapper.once('listening', resolve) 23 | }) 24 | 25 | for (let i = 0; i < numServers; i++) { 26 | const instanceOpts = Array.isArray(opts) ? opts[i] || {} : opts 27 | const { client, daemon, cleanup, dir } = await createInstance(i, BASE_PORT + i, [BOOTSTRAP_URL], instanceOpts) 28 | clients.push(client) 29 | daemons.push(daemon) 30 | cleanups.push(cleanup) 31 | dirs.push(dir) 32 | } 33 | 34 | return { clients, daemons, cleanup, dirs } 35 | 36 | async function cleanup (opts) { 37 | for (const cleanupInstance of cleanups) { 38 | await cleanupInstance(opts) 39 | } 40 | await bootstrapper.destroy() 41 | } 42 | } 43 | 44 | async function createOne (opts) { 45 | const { dirs, clients, cleanup, daemons } = await create(1, opts) 46 | return { 47 | dir: dirs[0], 48 | client: clients[0], 49 | daemon: daemons[0], 50 | cleanup 51 | } 52 | } 53 | 54 | async function createInstance (id, port, bootstrap, opts = {}) { 55 | const dir = opts.dir || await tmp.dir({ unsafeCleanup: true }) 56 | const { path, cleanup: dirCleanup } = dir 57 | 58 | const token = `test-token-${id}` 59 | const endpoint = `localhost:${port}` 60 | 61 | const daemon = new HyperdriveDaemon({ 62 | storage: path, 63 | bootstrap, 64 | port, 65 | // Added this so that old DHT messages from a live daemon don't connect to the test daemons. 66 | swarmPort: port * 2, 67 | memoryOnly: !!opts.memoryOnly, 68 | noAnnounce: !!opts.noAnnounce, 69 | metadata: { 70 | token, 71 | endpoint 72 | }, 73 | latency: opts.latency 74 | }) 75 | await daemon.start() 76 | 77 | const client = new HyperdriveClient(endpoint, token) 78 | await client.ready() 79 | 80 | return { 81 | dir, 82 | client, 83 | daemon, 84 | cleanup 85 | } 86 | 87 | async function cleanup (opts = {}) { 88 | await daemon.stop() 89 | if (!opts.persist) await dirCleanup() 90 | } 91 | } 92 | 93 | module.exports = { 94 | create, 95 | createOne 96 | } 97 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hyperdrive-daemon", 3 | "version": "1.14.5", 4 | "description": "A FUSE-mountable distributed filesystem, built on Hyperdrive", 5 | "main": "index.js", 6 | "bin": { 7 | "hyperdrive": "./bin/run/run" 8 | }, 9 | "scripts": { 10 | "test": "NODE_ENV=test tape test/*.js" 11 | }, 12 | "files": [ 13 | "index.js", 14 | "manager.js", 15 | "scripts/", 16 | "lib/", 17 | "bin/" 18 | ], 19 | "repository": { 20 | "type": "git", 21 | "url": "git+https://github.com/andrewosh/hyperdrive-daemon.git" 22 | }, 23 | "keywords": [ 24 | "hyperdrive", 25 | "fuse", 26 | "daemon" 27 | ], 28 | "author": "Andrew Osheorff ", 29 | "license": "MIT", 30 | "bugs": { 31 | "url": "https://github.com/andrewosh/hyperdrive-daemon/issues" 32 | }, 33 | "homepage": "https://github.com/andrewosh/hyperdrive-daemon#readme", 34 | "dependencies": { 35 | "@grpc/grpc-js": "^0.5.1", 36 | "@oclif/command": "^1.5.19", 37 | "@oclif/config": "^1.14.0", 38 | "@oclif/errors": "^1.2.2", 39 | "@oclif/plugin-autocomplete": "^0.1.5", 40 | "@oclif/plugin-help": "^2.2.3", 41 | "buffer-json-encoding": "^1.0.2", 42 | "call-me-maybe": "^1.0.1", 43 | "corestore": "^5.0.0", 44 | "corestore-swarm-networking": "^5.0.0", 45 | "dat-encoding": "^5.0.1", 46 | "end-of-stream": "^1.4.4", 47 | "fs-extra": "^7.0.1", 48 | "globby": "^11.0.0", 49 | "google-protobuf": "^3.8.0", 50 | "hypercore-cache": "^1.0.2", 51 | "hypercore-crypto": "^2.0.2", 52 | "hypercore-default-storage": "^1.0.0", 53 | "hypercore-protocol": "^8.0.0", 54 | "hyperdrive": "^10.8.10", 55 | "hyperdrive-daemon-client": "^1.14.3", 56 | "hyperdrive-schemas": "^1.9.0", 57 | "level": "^6.0.0", 58 | "level-mem": "^5.0.1", 59 | "minimist": "^1.2.5", 60 | "mkdirp": "^0.5.1", 61 | "nanoresource-promise": "^1.2.2", 62 | "ora": "^4.0.3", 63 | "peersockets": "^0.3.0", 64 | "pino": "^5.12.6", 65 | "pm2": "^4.2.1", 66 | "process-top": "^1.1.0", 67 | "pump": "^3.0.0", 68 | "pumpify": "^2.0.1", 69 | "random-access-memory": "^3.1.1", 70 | "stream-collector": "^1.0.1", 71 | "streamx": "^2.6.0", 72 | "subleveldown": "^4.0.0", 73 | "varint": "^5.0.0" 74 | }, 75 | "optionalDependencies": { 76 | "fuse-native": "^2.2.1", 77 | "hyperdrive-fuse": "^1.2.12" 78 | }, 79 | "devDependencies": { 80 | "standard": "^12.0.1", 81 | "tape": "^4.10.1", 82 | "tmp-promise": "^2.0.1" 83 | }, 84 | "standard": { 85 | "ignore": [ 86 | "lib/fuse/index.js", 87 | "bin/commands/*.js", 88 | "bin/commands/debug/*.js", 89 | "bin/commands/cleanup/*.js" 90 | ] 91 | }, 92 | "oclif": { 93 | "commands": "./bin/commands", 94 | "bin": "hyperdrive", 95 | "plugins": [ 96 | "@oclif/plugin-help", 97 | "@oclif/plugin-autocomplete", 98 | "hyperdrive-daemon-client" 99 | ] 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /lib/peersockets.js: -------------------------------------------------------------------------------- 1 | const { EventEmitter } = require('events') 2 | 3 | const { rpc } = require('hyperdrive-daemon-client') 4 | const messages = rpc.peersockets.messages 5 | const log = require('./log').child({ component: 'peersockets' }) 6 | 7 | const PeerMessageTypes = messages.PeerMessage.Type 8 | 9 | module.exports = class PeersocketsManager extends EventEmitter { 10 | constructor (networker, peers, peersockets, opts = {}) { 11 | super() 12 | this.networker = networker 13 | this.peers = peers 14 | this.peersockets = peersockets 15 | this.opts = opts 16 | this.handlesByTopic = new Map() 17 | this.handles = [] 18 | } 19 | 20 | // RPC Methods 21 | 22 | async _rpcJoin (call) { 23 | log.debug('opening topic handle') 24 | const topicHandler = new TopicHandler(this, this.peersockets, this.peers, call) 25 | this.handles.push(topicHandler) 26 | } 27 | } 28 | 29 | class TopicHandler { 30 | constructor (manager, peersockets, peers, call) { 31 | this.call = call 32 | this.manager = manager 33 | this.peersockets = peersockets 34 | this.peers = peers 35 | // Set when an open message is received 36 | this._topicName = null 37 | this._topic = null 38 | this.call.on('data', this._onmessage.bind(this)) 39 | this.call.on('error', this.close.bind(this)) 40 | this.call.on('end', this.close.bind(this)) 41 | } 42 | 43 | _onmessage (msg) { 44 | switch (msg.getType()) { 45 | case PeerMessageTypes.OPEN: 46 | return this._onopen(msg.getOpen()) 47 | case PeerMessageTypes.DATA: 48 | return this._ondata(msg.getData()) 49 | default: 50 | log.warn({ type: msg.getType() }, 'received a message with an invalid type') 51 | } 52 | } 53 | 54 | _createPeerMessage (type) { 55 | const peerMessage = new messages.PeerMessage() 56 | peerMessage.setType(type) 57 | return peerMessage 58 | } 59 | 60 | _onopen (openMessage) { 61 | this._topicName = openMessage.getTopic() 62 | 63 | var handles = this.manager.handlesByTopic.get(this._topicName) 64 | if (!handles) { 65 | handles = [] 66 | this.manager.handlesByTopic.set(this._topicName, handles) 67 | } 68 | handles.push(this) 69 | 70 | this._topic = this.peersockets.join(this._topicName, { 71 | onmessage: (remoteKey, msg) => { 72 | const alias = this.peers.getAlias(remoteKey) 73 | const peerMessage = this._createPeerMessage(PeerMessageTypes.DATA) 74 | const dataMessage = new messages.DataMessage() 75 | dataMessage.setMsg(msg) 76 | dataMessage.setAlias(alias) 77 | peerMessage.setData(dataMessage) 78 | this.call.write(peerMessage) 79 | } 80 | }) 81 | } 82 | 83 | _ondata (dataMessage) { 84 | const alias = dataMessage.getAlias() 85 | const msg = dataMessage.getMsg() 86 | const remoteKey = this.peers.getKey(alias) 87 | if (!remoteKey) return 88 | this._topic.send(remoteKey, Buffer.from(msg)) 89 | } 90 | 91 | close () { 92 | if (!this._topicName) return 93 | var handles = this.manager.handlesByTopic.get(this._topicName) 94 | if (!handles) return 95 | var idx = handles.indexOf(this) 96 | if (idx !== -1) { 97 | handles.splice(idx, 1) 98 | } 99 | idx = this.manager.handles.indexOf(this) 100 | this.manager.handles.splice(idx, 1) 101 | if (!handles.length) { 102 | this.manager.handlesByTopic.delete(this._topicName) 103 | this.peersockets.leave(this._topicName) 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /lib/peers.js: -------------------------------------------------------------------------------- 1 | const { EventEmitter } = require('events') 2 | const eos = require('end-of-stream') 3 | 4 | const { rpc } = require('hyperdrive-daemon-client') 5 | const messages = rpc.peers.messages 6 | const WatchPeersTypes = messages.WatchPeersResponse.Type 7 | 8 | const log = require('./log').child({ component: 'peers' }) 9 | 10 | const ALIAS = Symbol('hyperdrive-peer-alias') 11 | 12 | module.exports = class PeersManager extends EventEmitter { 13 | constructor (networker, peersockets, opts = {}) { 14 | super() 15 | this.networker = networker 16 | this.peersockets = peersockets 17 | this.opts = opts 18 | 19 | this._aliasCount = 1 20 | this._aliasesByKey = new Map() 21 | this._keysByAlias = new Map() 22 | } 23 | 24 | // RPC Methods 25 | 26 | async _rpcWatchPeers (call) { 27 | const discoveryKey = Buffer.from(call.request.getDiscoverykey()) 28 | log.debug({ discoveryKey: discoveryKey && discoveryKey.toString('hex') }, 'opening peer watching stream') 29 | const close = this.peersockets.watchPeers(discoveryKey, { 30 | onjoin: (remoteKey) => { 31 | const rsp = new messages.WatchPeersResponse() 32 | rsp.setType(WatchPeersTypes.JOINED) 33 | const aliases = [this.getAlias(remoteKey)] 34 | rsp.setPeersList(aliases) 35 | call.write(rsp) 36 | }, 37 | onleave: (remoteKey) => { 38 | const rsp = new messages.WatchPeersResponse() 39 | rsp.setType(WatchPeersTypes.LEFT) 40 | const aliases = [this.getAlias(remoteKey)] 41 | rsp.setPeersList(aliases) 42 | call.write(rsp) 43 | } 44 | }) 45 | eos(call, close) 46 | } 47 | 48 | async _rpcListPeers (call) { 49 | var discoveryKey = call.request.getDiscoverykey() 50 | if (discoveryKey) discoveryKey = Buffer.from(discoveryKey) 51 | log.debug({ discoveryKey: discoveryKey && discoveryKey.toString('hex') }, 'listing peers') 52 | const peerInfos = [] 53 | for (const peer of this.peersockets.listPeers(discoveryKey)) { 54 | const peerInfo = new messages.PeerInfo() 55 | peerInfo.setNoisekey(peer.key) 56 | peerInfo.setAddress(peer.address) 57 | peerInfo.setType(peer.type) 58 | peerInfos.push(peerInfo) 59 | } 60 | const rsp = new messages.ListPeersResponse() 61 | rsp.setPeersList(peerInfos) 62 | return rsp 63 | } 64 | 65 | async _rpcGetKey (call) { 66 | const rsp = new messages.GetKeyResponse() 67 | const key = this._keysByAlias.get(call.request.getAlias()) 68 | rsp.setKey(key) 69 | return rsp 70 | } 71 | 72 | async _rpcGetAlias (call) { 73 | const rsp = new messages.GetAliasResponse() 74 | const alias = this.getAlias(Buffer.from(call.request.getKey())) 75 | rsp.setAlias(alias) 76 | return rsp 77 | } 78 | 79 | // Public Methods 80 | 81 | getKey (alias) { 82 | return this._keysByAlias.get(alias) 83 | } 84 | 85 | getAlias (remoteKey) { 86 | if (!Buffer.isBuffer(remoteKey)) throw new Error('getAlias must be called with a Buffer.') 87 | // The alias is stored on the Buffer as a Symbol to enable fast lookups. 88 | if (remoteKey[ALIAS]) return remoteKey[ALIAS] 89 | 90 | const keyString = remoteKey.toString('hex') 91 | const existingAlias = this._aliasesByKey.get(keyString) 92 | if (existingAlias) { 93 | remoteKey[ALIAS] = existingAlias 94 | return existingAlias 95 | } 96 | 97 | const alias = this._aliasCount++ 98 | remoteKey[ALIAS] = alias 99 | this._aliasesByKey.set(keyString, alias) 100 | this._keysByAlias.set(alias, remoteKey) 101 | return alias 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /bin/commands/fuse-setup.js: -------------------------------------------------------------------------------- 1 | const p = require('path') 2 | const CONFIGURE_FUSE = [process.execPath, p.join(__dirname, '../../scripts/configure.js')] 3 | 4 | const fs = require('fs').promises 5 | const { spawn } = require('child_process') 6 | const { Command, flags } = require('@oclif/command') 7 | 8 | const { HyperdriveClient } = require('hyperdrive-daemon-client') 9 | const constants = require('hyperdrive-daemon-client/lib/constants') 10 | 11 | const isPosix = process.platform !== 'win32' 12 | 13 | class SetupCommand extends Command { 14 | static usage = 'fuse-setup' 15 | static description = 'Perform a one-time configuration step for FUSE.' 16 | static flags = { 17 | user: flags.integer({ 18 | description: `User that should own the ${constants.mountpoint} directory`, 19 | char: 'U', 20 | default: isPosix && process.geteuid() 21 | }), 22 | group: flags.integer({ 23 | description: `Group that should own the ${constants.mountpoint} directory`, 24 | char: 'G', 25 | default: isPosix && process.getegid() 26 | }), 27 | force: flags.boolean({ 28 | description: 'Force the setup to execute, even if it\'s already been performed once.', 29 | char: 'f', 30 | default: 'false' 31 | }) 32 | } 33 | async run () { 34 | try { 35 | var hyperfuse = require('hyperdrive-fuse') 36 | } catch (err) {} 37 | 38 | if (!hyperfuse) { 39 | console.warn('FUSE installation failed. You will be unable to mount your hyperdrives.') 40 | return 41 | } 42 | const { flags } = this.parse(SetupCommand) 43 | 44 | console.log('Configuring FUSE...') 45 | await makeRootDrive() 46 | try { 47 | await configureFuse() 48 | console.log('FUSE successfully configured:') 49 | console.log(' * Your root drive will be mounted at ~/Hyperdrive when the daemon is next started.') 50 | console.log(' * If your mountpoint ever becomes unresponsive, try running `hyperdrive force-unmount`.') 51 | } catch (err) { 52 | console.error('Could not configure the FUSE module:') 53 | console.error(err) 54 | } 55 | 56 | // If the daemon is running, refresh FUSE. 57 | try { 58 | const client = new HyperdriveClient() 59 | await client.ready() 60 | await client.refreshFuse() 61 | } catch (err) { 62 | // Emitting errors here would just be confusing, so suppress. 63 | } 64 | 65 | this.exit(0) 66 | 67 | async function configureFuse (cb) { 68 | const configured = await new Promise((resolve, reject) => { 69 | hyperfuse.isConfigured((err, fuseConfigured) => { 70 | if (err) return reject(err) 71 | return resolve(fuseConfigured) 72 | }) 73 | }) 74 | if (configured && !flags.force) { 75 | console.log('Note: FUSE is already configured.') 76 | } else { 77 | return new Promise((resolve, reject) => { 78 | const child = spawn('sudo', CONFIGURE_FUSE, { 79 | stdio: 'inherit' 80 | }) 81 | child.on('error', reject) 82 | child.on('exit', code => { 83 | if (code) return reject(new Error(code)) 84 | return resolve() 85 | }) 86 | }) 87 | } 88 | } 89 | 90 | async function makeRootDrive () { 91 | try { 92 | try { 93 | var mountpointStat = await fs.stat(constants.mountpoint) 94 | } catch (err) { 95 | if (err && err.code !== 'ENOENT') throw err 96 | } 97 | if (!mountpointStat) { 98 | await fs.mkdir(constants.mountpoint) 99 | // TODO: Uncomment when fuse-native path goes in. 100 | // await fs.writeFile(p.join(constants.mountpoint, 'HYPERDRIVE_IS_NOT_RUNNING'), '') 101 | await fs.chown(constants.mountpoint, flags.user, flags.group) 102 | } else { 103 | // If this is a symlink (legacy) delete it. 104 | try { 105 | await fs.unlink(constants.mountpoint) 106 | } catch (err) { 107 | // If Hyperdrive is a directory, this will error, but it doesn't matter. 108 | } 109 | } 110 | } catch (err) { 111 | console.error('Could not create the FUSE mountpoint:') 112 | console.error(err) 113 | } 114 | } 115 | } 116 | } 117 | 118 | module.exports = SetupCommand 119 | -------------------------------------------------------------------------------- /manager.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs').promises 2 | const p = require('path') 3 | 4 | const mkdirp = require('mkdirp') 5 | const pm2 = require('pm2') 6 | 7 | const { HyperdriveClient } = require('hyperdrive-daemon-client') 8 | const constants = require('hyperdrive-daemon-client/lib/constants') 9 | 10 | const HyperdriveDaemon = require('.') 11 | 12 | async function start (opts = {}) { 13 | const initialOpts = opts 14 | opts = { ...constants, ...opts } 15 | opts.endpoint = `localhost:${opts.port}` 16 | 17 | if (opts.env && !opts.env.PATH) { 18 | opts.env = { ...opts.env, PATH: process.env.PATH } 19 | } 20 | 21 | const client = new HyperdriveClient({ endpoint: opts.endpoint, storage: initialOpts.storage || opts.root }) 22 | const running = await new Promise((resolve, reject) => { 23 | client.ready(err => { 24 | if (!err) return resolve(true) 25 | if (err.versionMismatch) return reject(new Error(`Daemon is already running with incompatible version: ${err.version}`)) 26 | return resolve(false) 27 | }) 28 | }) 29 | if (running) return { opts } 30 | 31 | await new Promise((resolve, reject) => { 32 | const storagePath = p.join(opts.storage, 'storage') 33 | mkdirp(storagePath, err => { 34 | if (err) return reject(new Error(`Could not create storage directory: ${storagePath}`)) 35 | return resolve() 36 | }) 37 | }) 38 | 39 | opts.memoryOnly = opts['memory-only'] 40 | opts.noAnnounce = opts['no-announce'] 41 | opts.noDebug = opts['no-debug'] 42 | opts.logLevel = opts['log-level'] 43 | 44 | /** 45 | * HACK 46 | * If 'pm2' detects a space in the 'script' path, it assumes the call is something like "python foo.py". 47 | * When that's the case, it transforms the call into `/bin/bash -c python foo.py`. 48 | * This creates a problem for some hyperdrive apps because they may have spaces in their install paths. 49 | * The resulting call ends up being `${interpreter} /bin/bash -c ${script}`, which is wrong. 50 | * (To add a little more complexity, it does *not* do this on Windows.) 51 | * 52 | * To solve that, we craft the pm2 call to use '/bin/bash -c' correctly. 53 | * -prf 54 | */ 55 | const IS_WINDOWS = (process.platform === 'win32' || process.platform === 'win64' || /^(msys|cygwin)$/.test(process.env.OSTYPE)) 56 | var script = p.join(__dirname, 'index.js') 57 | 58 | var args = [] 59 | if (opts.port) args.push('--port', opts.port) 60 | if (opts.storage) args.push('--storage', opts.storage) 61 | if (opts.logLevel) args.push('--log-level', opts.logLevel) 62 | if (opts.memoryOnly) args.push('--memory-only') 63 | if (opts.noAnnounce) args.push('--no-announce') 64 | if (opts.noDebug) args.push('--no-debug') 65 | 66 | if (opts.bootstrap === false) args.push('--bootstrap', false) 67 | else if (Array.isArray(opts.bootstrap) && opts.bootstrap.length) args.push('--bootstrap', opts.bootstrap.join(',')) 68 | 69 | var interpreter = opts.interpreter || process.execPath 70 | var interpreterArgs = [`--max-old-space-size=${opts.heapSize}`] 71 | if (!IS_WINDOWS) { 72 | const execArg = [interpreter, interpreterArgs, script].concat(args).map(escapeStringArg).join(' ') 73 | args = ['-c', execArg] 74 | script = 'bash' 75 | interpreter = undefined 76 | interpreterArgs = undefined 77 | } 78 | 79 | const description = { 80 | script, 81 | args, 82 | interpreter, 83 | interpreterArgs, 84 | name: opts.processName || 'hyperdrive', 85 | env: opts.env || process.env, 86 | output: opts.unstructuredLog, 87 | error: opts.structuredLog, 88 | killTimeout: 10000, 89 | autorestart: false 90 | } 91 | 92 | try { 93 | if (opts.structuredLog === constants.structuredLog) { 94 | await fs.rename(constants.structuredLog, constants.structuredLog.replace('.json', '.old.json')) 95 | } 96 | if (opts.unstructuredLog === constants.unstructuredLog) { 97 | await fs.rename(constants.unstructuredLog, constants.unstructuredLog.replace('.log', '.old.log')) 98 | } 99 | } catch (err) { 100 | // If the log file couldn't be rotated, it's OK. 101 | } 102 | 103 | if (opts.foreground) { 104 | return startForeground(description, opts) 105 | } else { 106 | return startDaemon(description, opts) 107 | } 108 | 109 | function startForeground (description, opts) { 110 | const daemon = new HyperdriveDaemon({ ...opts, metadata: null, main: true }) 111 | process.title = 'hyperdrive' 112 | process.removeAllListeners('SIGINT') 113 | process.removeAllListeners('SIGTERM') 114 | daemon.start() 115 | return { opts, description } 116 | } 117 | 118 | function startDaemon (description, opts) { 119 | return new Promise((resolve, reject) => { 120 | pm2.connect(!!opts.noPM2DaemonMode, err => { 121 | if (err) return reject(new Error('Could not connect to the process manager to start the daemon.')) 122 | pm2.start(description, err => { 123 | pm2.disconnect() 124 | if (err) return reject(err) 125 | return resolve({ opts, description }) 126 | }) 127 | }) 128 | }) 129 | } 130 | } 131 | 132 | async function stop (name, port) { 133 | name = name || constants.processName 134 | port = port || constants.port 135 | 136 | return new Promise((resolve, reject) => { 137 | pm2.connect(err => { 138 | if (err) return reject(new Error('Could not connect to the process manager to stop the daemon.')) 139 | pm2.delete(name, err => { 140 | pm2.disconnect() 141 | if (err) return reject(err) 142 | return resolve() 143 | }) 144 | }) 145 | }) 146 | } 147 | 148 | module.exports = { 149 | start, 150 | stop 151 | } 152 | 153 | function escapeStringArg (v) { 154 | return (typeof v === 'string' && v.includes(' ')) ? `"${v}"` : v 155 | } 156 | -------------------------------------------------------------------------------- /test/peersockets.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | const { create } = require('./util/create') 3 | 4 | test('peersockets, unidirectional send one', async t => { 5 | const { clients, daemons, cleanup } = await create(2) 6 | const firstClient = clients[0] 7 | const secondClient = clients[1] 8 | 9 | const firstKey = daemons[0].noiseKeyPair.publicKey 10 | const secondKey = daemons[1].noiseKeyPair.publicKey 11 | let received = false 12 | 13 | try { 14 | const drive1 = await firstClient.drive.get() 15 | await drive1.configureNetwork({ lookup: true, announce: true }) 16 | await secondClient.drive.get({ key: drive1.key }) 17 | 18 | // 100 ms delay for swarming. 19 | await delay(100) 20 | 21 | // The two peers should be swarming now. 22 | const firstTopic = firstClient.peersockets.join('my-topic', { 23 | onmessage: async (peerId, msg) => { 24 | const remoteKey = await firstClient.peers.getKey(peerId) 25 | t.true(remoteKey.equals(secondKey)) 26 | t.same(msg, Buffer.from('hello peersockets!')) 27 | received = true 28 | } 29 | }) 30 | const secondTopic = secondClient.peersockets.join('my-topic') 31 | const peerId = await secondClient.peers.getAlias(firstKey) 32 | secondTopic.send(peerId, 'hello peersockets!') 33 | 34 | // 100 ms delay for the message to be sent. 35 | await delay(100) 36 | 37 | firstTopic.close() 38 | secondTopic.close() 39 | } catch (err) { 40 | t.fail(err) 41 | } 42 | 43 | t.true(received) 44 | await cleanup() 45 | t.end() 46 | }) 47 | 48 | test('peersockets, unidirectional send many', async t => { 49 | const { clients, daemons, cleanup } = await create(2) 50 | const firstClient = clients[0] 51 | const secondClient = clients[1] 52 | 53 | const firstKey = daemons[0].noiseKeyPair.publicKey 54 | const secondKey = daemons[1].noiseKeyPair.publicKey 55 | let received = 0 56 | const msgs = ['first', 'second', 'third', 'fourth', 'fifth'].map(s => Buffer.from(s)) 57 | 58 | try { 59 | const drive1 = await firstClient.drive.get() 60 | await drive1.configureNetwork({ lookup: true, announce: true }) 61 | await secondClient.drive.get({ key: drive1.key }) 62 | 63 | // 100 ms delay for replication. 64 | await delay(100) 65 | 66 | // The two peers should be swarming now. 67 | const firstTopic = firstClient.peersockets.join('my-topic', { 68 | onmessage: async (peerId, msg) => { 69 | const remoteKey = await firstClient.peers.getKey(peerId) 70 | t.true(remoteKey.equals(secondKey)) 71 | t.true(msg.equals(msgs[received++])) 72 | } 73 | }) 74 | const secondTopic = secondClient.peersockets.join('my-topic') 75 | const firstAlias = await secondClient.peers.getAlias(firstKey) 76 | for (const msg of msgs) { 77 | secondTopic.send(firstAlias, msg) 78 | } 79 | 80 | // 100 ms delay for the message to be send. 81 | await delay(100) 82 | 83 | firstTopic.close() 84 | secondTopic.close() 85 | } catch (err) { 86 | t.fail(err) 87 | } 88 | 89 | t.same(received, msgs.length) 90 | await cleanup() 91 | t.end() 92 | }) 93 | 94 | test('peersockets, bidirectional send one', async t => { 95 | const { clients, daemons, cleanup } = await create(2) 96 | const firstClient = clients[0] 97 | const secondClient = clients[1] 98 | 99 | const firstKey = daemons[0].noiseKeyPair.publicKey 100 | const secondKey = daemons[1].noiseKeyPair.publicKey 101 | let receivedFirst = false 102 | let receivedSecond = false 103 | 104 | try { 105 | const drive1 = await firstClient.drive.get() 106 | await drive1.configureNetwork({ lookup: true, announce: true }) 107 | await secondClient.drive.get({ key: drive1.key }) 108 | 109 | // 100 ms delay for replication. 110 | await delay(100) 111 | 112 | const msg1 = Buffer.from('hello peersockets!') 113 | const msg2 = Buffer.from('hello right back to ya') 114 | 115 | // The two peers should be swarming now. 116 | const firstTopic = firstClient.peersockets.join('my-topic', { 117 | onmessage: async (peerId, msg) => { 118 | const remoteKey = await firstClient.peers.getKey(peerId) 119 | t.true(remoteKey.equals(secondKey)) 120 | t.true(msg.equals(msg1)) 121 | firstTopic.send(peerId, msg2) 122 | receivedFirst = true 123 | } 124 | }) 125 | const secondTopic = secondClient.peersockets.join('my-topic', { 126 | onmessage: async (peerId, msg) => { 127 | const remoteKey = await secondClient.peers.getKey(peerId) 128 | t.true(remoteKey.equals(firstKey)) 129 | t.true(msg.equals(msg2)) 130 | receivedSecond = true 131 | } 132 | }) 133 | 134 | const firstAlias = await secondClient.peers.getAlias(firstKey) 135 | secondTopic.send(firstAlias, msg1) 136 | 137 | // 100 ms delay for the message to be send. 138 | await delay(100) 139 | 140 | firstTopic.close() 141 | secondTopic.close() 142 | } catch (err) { 143 | t.fail(err) 144 | } 145 | 146 | t.true(receivedFirst) 147 | t.true(receivedSecond) 148 | await cleanup() 149 | t.end() 150 | }) 151 | 152 | test('peersockets, bidirectional send many', async t => { 153 | const { clients, daemons, cleanup } = await create(2) 154 | const firstClient = clients[0] 155 | const secondClient = clients[1] 156 | 157 | const firstKey = daemons[0].noiseKeyPair.publicKey 158 | const secondKey = daemons[1].noiseKeyPair.publicKey 159 | 160 | let firstReceived = 0 161 | let secondReceived = 0 162 | const firstMsgs = ['first', 'second', 'third', 'fourth', 'fifth'].map(s => Buffer.from(s)) 163 | const secondMsgs = ['first-reply', 'second-reply', 'third-reply', 'fourth-reply', 'fifth-reply'].map(s => Buffer.from(s)) 164 | 165 | try { 166 | const drive1 = await firstClient.drive.get() 167 | await drive1.configureNetwork({ lookup: true, announce: true }) 168 | await secondClient.drive.get({ key: drive1.key }) 169 | 170 | // 100 ms delay for replication. 171 | await delay(100) 172 | 173 | // The two peers should be swarming now. 174 | const firstTopic = firstClient.peersockets.join('my-topic', { 175 | onmessage: async (peerId, msg) => { 176 | const remoteKey = await firstClient.peers.getKey(peerId) 177 | t.true(remoteKey.equals(secondKey)) 178 | t.true(msg.equals(firstMsgs[firstReceived])) 179 | firstTopic.send(peerId, secondMsgs[firstReceived++]) 180 | } 181 | }) 182 | const secondTopic = secondClient.peersockets.join('my-topic', { 183 | onmessage: async (peerId, msg) => { 184 | const remoteKey = await secondClient.peers.getKey(peerId) 185 | t.true(remoteKey.equals(firstKey)) 186 | t.true(msg.equals(secondMsgs[secondReceived++])) 187 | } 188 | }) 189 | 190 | const firstAlias = await secondClient.peers.getAlias(firstKey) 191 | for (const msg of firstMsgs) { 192 | secondTopic.send(firstAlias, msg) 193 | } 194 | 195 | // 100 ms delay for the message to be send. 196 | await delay(100) 197 | 198 | firstTopic.close() 199 | secondTopic.close() 200 | } catch (err) { 201 | t.fail(err) 202 | } 203 | 204 | t.same(firstReceived, firstMsgs.length) 205 | t.same(secondReceived, secondMsgs.length) 206 | await cleanup() 207 | t.end() 208 | }) 209 | 210 | test('peersockets, send to all peers swarming a drive, static peers', async t => { 211 | const NUM_PEERS = 10 212 | 213 | const { clients, daemons, cleanup } = await create(NUM_PEERS) 214 | const firstClient = clients[0] 215 | const firstRemoteKey = daemons[0].noiseKeyPair.publicKey 216 | 217 | const received = (new Array(NUM_PEERS - 1)).fill(0) 218 | const msgs = ['hello', 'world'].map(s => Buffer.from(s)) 219 | 220 | try { 221 | const drive1 = await firstClient.drive.get() 222 | await drive1.configureNetwork({ lookup: true, announce: true }) 223 | const receivers = [] 224 | const receiverTopics = [] 225 | 226 | // The first peer joins the topic immediately 227 | const firstTopic = firstClient.peersockets.join('my-topic') 228 | 229 | // Start observing all peers that swarm the drive's discovery key. 230 | const unwatch = firstClient.peers.watchPeers(drive1.discoveryKey, { 231 | onjoin: (peerId) => { 232 | receivers.push(peerId) 233 | }, 234 | onleave: (peerId) => { 235 | receivers.splice(receivers.indexOf(peerId), 1) 236 | } 237 | }) 238 | 239 | // Each receiver peers swarms the drive and joins the topic. 240 | for (let i = 1; i < NUM_PEERS; i++) { 241 | await clients[i].drive.get({ key: drive1.key }) 242 | receiverTopics.push(clients[i].peersockets.join('my-topic', { 243 | onmessage: async (peerId, msg) => { 244 | const remoteKey = await clients[i].peers.getKey(peerId) 245 | t.true(remoteKey.equals(firstRemoteKey)) 246 | t.true(msg.equals(msgs[received[i - 1]++])) 247 | } 248 | })) 249 | } 250 | 251 | // All the clients should be swarming now 252 | await delay(100) 253 | 254 | for (const msg of msgs) { 255 | for (const peerId of receivers) { 256 | firstTopic.send(peerId, msg) 257 | } 258 | } 259 | 260 | // 1000 ms delay for all messages to be sent. 261 | await delay(1000) 262 | 263 | unwatch() 264 | firstTopic.close() 265 | for (const topic of receiverTopics) { 266 | topic.close() 267 | } 268 | } catch (err) { 269 | t.fail(err) 270 | } 271 | 272 | for (const count of received) { 273 | t.same(count, msgs.length) 274 | } 275 | await cleanup() 276 | t.end() 277 | }) 278 | 279 | // TODO: There's a nondeterministic failure here on slow machines. Investigate. 280 | test('peersockets, send to all peers swarming a drive, dynamically-added peers', async t => { 281 | const NUM_PEERS = 10 282 | 283 | const { clients, daemons, cleanup } = await create(NUM_PEERS) 284 | const firstClient = clients[0] 285 | const firstRemoteKey = daemons[0].noiseKeyPair.publicKey 286 | 287 | const received = (new Array(NUM_PEERS - 1)).fill(0) 288 | const firstMessage = Buffer.from('hello world') 289 | 290 | try { 291 | const drive1 = await firstClient.drive.get() 292 | await drive1.configureNetwork({ lookup: true, announce: true }) 293 | const receivers = [] 294 | const receiverTopics = [] 295 | 296 | // The first peer joins the topic immediately 297 | const firstTopic = firstClient.peersockets.join('my-topic') 298 | 299 | // Start observing all peers that swarm the drive's discovery key. 300 | const unwatch = firstClient.peers.watchPeers(drive1.discoveryKey, { 301 | onjoin: (peerId) => { 302 | firstTopic.send(peerId, firstMessage) 303 | receivers.push(peerId) 304 | }, 305 | onleave: (peerId) => { 306 | receivers.splice(receivers.indexOf(peerId), 1) 307 | } 308 | }) 309 | 310 | // Each receiver peers swarms the drive and joins the topic. 311 | // Wait between each peer creation to test dynamic joins. 312 | for (let i = 1; i < NUM_PEERS; i++) { 313 | await clients[i].drive.get({ key: drive1.key }) 314 | receiverTopics.push(clients[i].peersockets.join('my-topic', { 315 | onmessage: async (peerId, msg) => { 316 | const remoteKey = await clients[i].peers.getKey(peerId) 317 | t.true(remoteKey.equals(firstRemoteKey)) 318 | t.true(msg.equals(firstMessage)) 319 | received[i - 1]++ 320 | } 321 | })) 322 | await delay(50) 323 | } 324 | 325 | unwatch() 326 | firstTopic.close() 327 | for (const topic of receiverTopics) { 328 | topic.close() 329 | } 330 | } catch (err) { 331 | t.fail(err) 332 | } 333 | 334 | for (const count of received) { 335 | t.same(count, 1) 336 | } 337 | await cleanup() 338 | t.end() 339 | }) 340 | 341 | test('closing the last topic handle closes the topic', async t => { 342 | const { clients, daemons, cleanup } = await create(2) 343 | const firstClient = clients[0] 344 | const secondClient = clients[1] 345 | 346 | const firstPeersockets = daemons[0].peersockets.peersockets 347 | const firstKey = daemons[0].noiseKeyPair.publicKey 348 | const secondKey = daemons[1].noiseKeyPair.publicKey 349 | let received = false 350 | 351 | try { 352 | const drive1 = await firstClient.drive.get() 353 | await drive1.configureNetwork({ lookup: true, announce: true }) 354 | await secondClient.drive.get({ key: drive1.key }) 355 | 356 | // 100 ms delay for swarming. 357 | await delay(100) 358 | 359 | // The two peers should be swarming now. 360 | const firstTopic = firstClient.peersockets.join('my-topic', { 361 | onmessage: async (peerId, msg) => { 362 | const remoteKey = await firstClient.peers.getKey(peerId) 363 | t.true(remoteKey.equals(secondKey)) 364 | t.same(msg, Buffer.from('hello peersockets!')) 365 | received = true 366 | } 367 | }) 368 | const secondTopic = secondClient.peersockets.join('my-topic') 369 | const peerId = await secondClient.peers.getAlias(firstKey) 370 | secondTopic.send(peerId, 'hello peersockets!') 371 | 372 | // 100 ms delay for the message to be sent. 373 | await delay(100) 374 | 375 | // The topic should still be registered on the connection. 376 | t.same(firstPeersockets.topicsByName.size, 1) 377 | 378 | firstTopic.close() 379 | secondTopic.close() 380 | } catch (err) { 381 | t.fail(err) 382 | } 383 | 384 | // Delay for topics to be closed 385 | await delay(100) 386 | 387 | t.true(received) 388 | t.same(firstPeersockets.topicsByName.size, 0) 389 | 390 | await cleanup() 391 | t.end() 392 | }) 393 | 394 | function delay (ms) { return new Promise(resolve => setTimeout(resolve, ms)) } 395 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const p = require('path') 2 | const { EventEmitter } = require('events') 3 | 4 | const mkdirp = require('mkdirp') 5 | const sub = require('subleveldown') 6 | const grpc = require('@grpc/grpc-js') 7 | const bjson = require('buffer-json-encoding') 8 | const processTop = require('process-top') 9 | const varint = require('varint') 10 | const Corestore = require('corestore') 11 | const HypercoreCache = require('hypercore-cache') 12 | const SwarmNetworker = require('corestore-swarm-networking') 13 | const HypercoreProtocol = require('hypercore-protocol') 14 | const Peersockets = require('peersockets') 15 | 16 | const { rpc, apiVersion } = require('hyperdrive-daemon-client') 17 | const { createMetadata } = require('./lib/metadata') 18 | const constants = require('hyperdrive-daemon-client/lib/constants') 19 | 20 | const DriveManager = require('./lib/drives') 21 | const PeersocketManager = require('./lib/peersockets') 22 | const PeersManager = require('./lib/peers') 23 | const DebugManager = require('./lib/debug') 24 | const FuseManager = require('./lib/fuse') 25 | const { serverError } = require('./lib/errors') 26 | const { getHandlers } = require('./lib/common') 27 | 28 | const log = require('./lib/log').child({ component: 'server' }) 29 | 30 | const NAMESPACE = 'hyperdrive-daemon' 31 | const STOP_EVENTS = ['SIGINT', 'SIGTERM', 'unhandledRejection', 'uncaughtException'] 32 | const WATCH_LIMIT = 300 33 | const MAX_PEERS = 128 34 | const SWARM_PORT = 49737 35 | 36 | const TOTAL_CACHE_SIZE = 1024 * 1024 * 512 37 | const CACHE_RATIO = 0.5 38 | const TREE_CACHE_SIZE = TOTAL_CACHE_SIZE * CACHE_RATIO 39 | const DATA_CACHE_SIZE = TOTAL_CACHE_SIZE * (1 - CACHE_RATIO) 40 | 41 | // This is set dynamically in refreshFuse. 42 | try { 43 | var hyperfuse = require('hyperdrive-fuse') 44 | } catch (err) {} 45 | 46 | class HyperdriveDaemon extends EventEmitter { 47 | constructor (opts = {}) { 48 | super() 49 | 50 | this.opts = opts 51 | this.root = opts.storage || constants.root 52 | this.storage = p.join(this.root, 'storage') 53 | 54 | this.port = opts.port || constants.port 55 | this.memoryOnly = !!opts.memoryOnly 56 | this.noAnnounce = !!opts.noAnnounce 57 | this.noDebug = !!opts.noDebug 58 | 59 | log.info('memory only?', this.memoryOnly, 'no announce?', this.noAnnounce) 60 | this._storageProvider = this.memoryOnly ? require('random-access-memory') : require('hypercore-default-storage') 61 | this._dbProvider = this.memoryOnly ? require('level-mem') : require('level') 62 | 63 | const corestoreOpts = { 64 | storage: path => this._storageProvider(`${this.storage}/cores/${path}`), 65 | sparse: true, 66 | // Collect networking statistics. 67 | stats: true, 68 | cache: { 69 | data: new HypercoreCache({ 70 | maxByteSize: DATA_CACHE_SIZE, 71 | estimateSize: val => val.length 72 | }), 73 | tree: new HypercoreCache({ 74 | maxByteSize: TREE_CACHE_SIZE, 75 | estimateSize: val => 40 76 | }) 77 | }, 78 | ifAvailable: true 79 | } 80 | this.corestore = new Corestore(corestoreOpts.storage, corestoreOpts) 81 | 82 | this._networkOpts = { 83 | announceLocalAddress: true, 84 | preferredPort: opts.swarmPort || SWARM_PORT, 85 | maxPeers: opts.maxPeers || MAX_PEERS 86 | } 87 | const bootstrapOpts = opts.bootstrap || constants.bootstrap 88 | if (bootstrapOpts && bootstrapOpts.length && bootstrapOpts[0] !== '') { 89 | if (bootstrapOpts === false || bootstrapOpts[0] === 'false') { 90 | this._networkOpts.bootstrap = false 91 | } else { 92 | this._networkOpts.bootstrap = bootstrapOpts 93 | } 94 | } 95 | if (opts.latency !== undefined) this._networkOpts.latency = +opts.latency 96 | 97 | // Set in ready. 98 | this.networking = null 99 | this.db = null 100 | this.drives = null 101 | this.fuse = null 102 | this.peersockets = null 103 | this.debug = null 104 | this.metadata = null 105 | this._startTime = null 106 | 107 | // Set in start. 108 | this.server = null 109 | this._topTimer = null 110 | this._dbs = null 111 | this._isMain = !!opts.main 112 | this._cleanup = null 113 | 114 | this._isClosed = false 115 | this._readyPromise = false 116 | 117 | this._versions = null 118 | 119 | this.ready = () => { 120 | if (this._isClosed) return Promise.resolve() 121 | if (this._readyPromise) return this._readyPromise 122 | this._readyPromise = this._ready() 123 | return this._readyPromise.catch(err => { 124 | log.error({ error: err, stack: err.stack }, 'error in daemon ready function -- cleaning up') 125 | return this.stop(err) 126 | }) 127 | } 128 | } 129 | 130 | async _ready () { 131 | // Always rotate the auth token when the daemon's restarted to prevent session mismatches. 132 | this.metadata = this.opts.metadata || await createMetadata(this.root, `localhost:${this.port}`) 133 | await this._ensureStorage() 134 | 135 | this._cleanup = this.stop.bind(this) 136 | for (const event of STOP_EVENTS) { 137 | process.on(event, this._cleanup) 138 | } 139 | 140 | this.db = this._dbProvider(`${this.storage}/db`, { valueEncoding: 'json' }) 141 | const dbs = { 142 | fuse: sub(this.db, 'fuse', { valueEncoding: bjson }), 143 | drives: sub(this.db, 'drives', { valueEncoding: bjson }), 144 | network: sub(this.db, 'network', { valueEncoding: 'json'}) 145 | } 146 | this._dbs = dbs 147 | 148 | await this.corestore.ready() 149 | 150 | // Note: This API is not exposed anymore -- this is a temporary fix. 151 | const seed = this.corestore.inner._deriveSecret(NAMESPACE, 'replication-keypair') 152 | const swarmId = this.corestore.inner._deriveSecret(NAMESPACE, 'swarm-id') 153 | 154 | this._networkOpts.keyPair = HypercoreProtocol.keyPair(seed) 155 | this._networkOpts.id = swarmId 156 | 157 | this.networking = new SwarmNetworker(this.corestore, this._networkOpts) 158 | this.networking.on('replication-error', err => { 159 | log.trace({ error: err.message, stack: err.stack }, 'replication error') 160 | if (err.message && err.message.indexOf('Remote signature could not be verified') !== -1) { 161 | log.warn('Remote signature verification is failing -- one of your hypercores appears to be forked or corrupted.') 162 | } 163 | }) 164 | this.networking.on('stream-opened', stream => { 165 | log.trace({ remoteType: stream.remoteType, remoteAddress: stream.remoteAddress }, 'replication stream opened') 166 | }) 167 | this.networking.on('stream-closed', stream => { 168 | log.trace({ remoteType: stream.remoteType, remoteAddress: stream.remoteAddress }, 'replication stream closed') 169 | }) 170 | await this.networking.listen() 171 | 172 | // Register the Hyperswarm timeout heuristics on all cores generated by our corestore. 173 | this._registerCoreTimeouts() 174 | 175 | const peersockets = new Peersockets(this.networking) 176 | this.peers = new PeersManager(this.networking, peersockets) 177 | this.peersockets = new PeersocketManager(this.networking, this.peers, peersockets) 178 | if (!this.noDebug) this.debug = new DebugManager(this) 179 | 180 | this.drives = new DriveManager(this.corestore, this.networking, dbs.drives, { 181 | ...this.opts, 182 | memoryOnly: this.memoryOnly, 183 | watchLimit: this.opts.watchLimit || WATCH_LIMIT 184 | }) 185 | this.drives.on('error', err => this.emit('error', err)) 186 | await this.drives.ready() 187 | 188 | this.fuse = new FuseManager(this.drives, this._dbs.fuse, this.opts) 189 | this.fuse.on('error', err => this.emit('error', err)) 190 | await this.fuse.ready() 191 | 192 | this._isReady = true 193 | this._startTime = Date.now() 194 | this._versions = { 195 | daemon: require('./package.json').version, 196 | client: require('hyperdrive-daemon-client/package.json').version, 197 | schema: require('hyperdrive-schemas/package.json').version, 198 | hyperdrive: require('hyperdrive/package.json').version 199 | } 200 | if (this.fuse && this.fuse.fuseConfigured) { 201 | this._versions.fuseNative = require('fuse-native/package.json').version 202 | this._versions.hyperdriveFuse = require('hyperdrive-fuse/package.json').version 203 | } 204 | } 205 | 206 | _ensureStorage () { 207 | return new Promise((resolve, reject) => { 208 | mkdirp(this.storage, err => { 209 | if (err) return reject(err) 210 | return resolve() 211 | }) 212 | }) 213 | } 214 | 215 | /** 216 | * This is where we define our main heuristic for allowing hypercore gets/updates to proceed. 217 | */ 218 | _registerCoreTimeouts () { 219 | const flushSets = new Map() 220 | 221 | this.networking.on('flushed', dkey => { 222 | const keyString = dkey.toString('hex') 223 | if (!flushSets.has(keyString)) return 224 | const { flushSet, peerAddSet } = flushSets.get(keyString) 225 | callAllInSet(flushSet) 226 | callAllInSet(peerAddSet) 227 | }) 228 | 229 | this.corestore.on('feed', core => { 230 | const discoveryKey = core.discoveryKey 231 | const peerAddSet = new Set() 232 | const flushSet = new Set() 233 | var globalFlushed = false 234 | 235 | this.networking.swarm.flush(() => { 236 | if (this.networking.joined(discoveryKey)) return 237 | globalFlushed = true 238 | callAllInSet(flushSet) 239 | callAllInSet(peerAddSet) 240 | }) 241 | 242 | flushSets.set(discoveryKey.toString('hex'), { flushSet, peerAddSet }) 243 | core.once('peer-add', () => callAllInSet(peerAddSet)) 244 | 245 | const timeouts = { 246 | get: (cb) => { 247 | if (this.networking.joined(discoveryKey)) { 248 | if (this.networking.flushed(discoveryKey)) return cb() 249 | return flushSet.add(cb) 250 | } 251 | if (globalFlushed) return cb() 252 | return flushSet.add(cb) 253 | }, 254 | update: (cb) => { 255 | if (core.peers.length) return cb() 256 | if (this.networking.joined(discoveryKey)) { 257 | if (this.networking.flushed(discoveryKey) && !core.peers.length) return cb() 258 | return peerAddSet.add(cb) 259 | } 260 | if (globalFlushed) return cb() 261 | return peerAddSet.add(cb) 262 | } 263 | } 264 | core.timeouts = timeouts 265 | }) 266 | } 267 | 268 | // RPC Methods 269 | 270 | async _rpcStatus (call) { 271 | const rsp = new rpc.main.messages.StatusResponse() 272 | rsp.setApiversion(apiVersion) 273 | rsp.setUptime(Date.now() - this._startTime) 274 | if (this._versions) { 275 | rsp.setDaemonversion(this._versions.daemon) 276 | rsp.setClientversion(this._versions.client) 277 | rsp.setSchemaversion(this._versions.schema) 278 | rsp.setHyperdriveversion(this._versions.hyperdrive) 279 | rsp.setNoisekey(this.noiseKeyPair.publicKey) 280 | 281 | const swarm = this.networking && this.networking.swarm 282 | if (swarm) { 283 | const remoteAddress = swarm.remoteAddress() 284 | rsp.setHolepunchable(swarm.holepunchable()) 285 | rsp.setRemoteaddress(remoteAddress ? remoteAddress.host + ':' + remoteAddress.port : '') 286 | } 287 | 288 | if (this._versions.fuseNative) rsp.setFusenativeversion(this._versions.fuseNative) 289 | if (this._versions.hyperdriveFuse) rsp.setHyperdrivefuseversion(this._versions.hyperdriveFuse) 290 | 291 | if (hyperfuse) { 292 | rsp.setFuseavailable(true) 293 | rsp.setFuseconfigured(this.fuse.fuseConfigured) 294 | } else { 295 | rsp.setFuseavailable(false) 296 | rsp.setFuseconfigured(false) 297 | } 298 | } 299 | return rsp 300 | } 301 | 302 | async _rpcRefreshFuse (call) { 303 | await this.fuse.ready() 304 | if (this.fuse && this.fuse.fuseConfigured) { 305 | hyperfuse = require('hyperdrive-fuse') 306 | this._versions.fuseNative = require('fuse-native/package.json').version 307 | this._versions.hyperdriveFuse = require('hyperdrive-fuse/package.json').version 308 | } 309 | return new rpc.main.messages.FuseRefreshResponse() 310 | } 311 | 312 | // Public Methods 313 | 314 | get uptime () { 315 | if (!this._startTime) return 0 316 | return Date.now() - this._startTime 317 | } 318 | 319 | get noiseKeyPair () { 320 | if (!this.networking) return null 321 | return this.networking.keyPair 322 | } 323 | 324 | async stop (err) { 325 | // Couldn't tell you why these propagate as uncaughtExceptions (gRPC is a PITA), but we should ignore them. 326 | if (err && ((err.code === 1) || (err.code === 'ERR_HTTP2_INVALID_STREAM'))) return 327 | if (err) log.error({ error: true, err, message: err.message, stack: err.stack, errno: err.errno }, 'stopping daemon due to error') 328 | if (this._isClosed) { 329 | log.info('force killing the process because stop has been called twice') 330 | if (this._isMain) return process.exit(0) 331 | return null 332 | } 333 | this._isClosed = true 334 | 335 | try { 336 | if (this._topTimer) { 337 | clearInterval(this._topTimer) 338 | this._topTimer = null 339 | } 340 | if (this.server) this.server.forceShutdown() 341 | log.info('waiting for fuse to unmount') 342 | if (this.fuse && this.fuse.fuseConfigured) await this.fuse.unmount() 343 | log.info('waiting for networking to close') 344 | if (this.networking) await this.networking.close() 345 | log.info('waiting for corestore to close') 346 | if (this.corestore) { 347 | await new Promise((resolve, reject) => { 348 | this.corestore.close(err => { 349 | if (err) return reject(err) 350 | return resolve() 351 | }) 352 | }) 353 | } 354 | log.info('waiting for db to close') 355 | if (this.db) await this.db.close() 356 | if (this._isMain) return process.exit(0) 357 | } catch (err) { 358 | log.error({ error: err.message, stack: err.stack }, 'error in cleanup') 359 | if (this._isMain) return process.exit(1) 360 | throw err 361 | } 362 | log.info('finished cleanup -- shutting down') 363 | 364 | for (const event of STOP_EVENTS) { 365 | process.removeListener(event, this._cleanup) 366 | } 367 | } 368 | 369 | async start () { 370 | await this.ready() 371 | this._topTimer = setInterval(() => { 372 | log.info(processTop().toJSON(), 'process stats') 373 | }, 1000 * 60) 374 | 375 | this.server = new grpc.Server() 376 | 377 | this.server.addService(rpc.fuse.services.FuseService, { 378 | ...wrap(this.metadata, getHandlers(this.fuse), { authenticate: true }) 379 | }) 380 | this.server.addService(rpc.drive.services.DriveService, { 381 | ...wrap(this.metadata, getHandlers(this.drives), { authenticate: true }) 382 | }) 383 | this.server.addService(rpc.peersockets.services.PeersocketsService, { 384 | ...wrap(this.metadata, getHandlers(this.peersockets), { authenticate: true }) 385 | }) 386 | this.server.addService(rpc.peers.services.PeersService, { 387 | ...wrap(this.metadata, getHandlers(this.peers), { authenticate: true }) 388 | }) 389 | if (this.debug) { 390 | this.server.addService(rpc.debug.services.DebugService, { 391 | ...wrap(this.metadata, getHandlers(this.debug), { authenticate: true }) 392 | }) 393 | } 394 | this.server.addService(rpc.main.services.HyperdriveService, { 395 | ...wrap(this.metadata, getHandlers(this), { authenticate: true }) 396 | }) 397 | 398 | await new Promise((resolve, reject) => { 399 | this.server.bindAsync(`0.0.0.0:${this.port}`, grpc.ServerCredentials.createInsecure(), (err, port) => { 400 | if (err) return reject(err) 401 | log.info({ port: port }, 'server listening') 402 | this.server.start() 403 | return resolve() 404 | }) 405 | }) 406 | } 407 | } 408 | 409 | function extractArguments () { 410 | const argv = require('minimist')(process.argv.slice(2), { 411 | string: ['storage', 'log-level', 'bootstrap'], 412 | boolean: ['announce', 'memory-only', 'debug'], 413 | default: { 414 | bootstrap: '', 415 | 'memory-only': false, 416 | announce: true, 417 | debug: true 418 | } 419 | }) 420 | if (argv.bootstrap === 'false') argv.bootstrap = false 421 | else if (argv.bootstrap) argv.bootstrap = argv.bootstrap.split(',') 422 | return argv 423 | } 424 | 425 | function wrap (metadata, methods, opts) { 426 | const wrapped = {} 427 | const authenticate = opts && opts.authenticate 428 | for (const methodName of Object.keys(methods)) { 429 | const method = methods[methodName] 430 | wrapped[methodName] = function (call, ...args) { 431 | const tag = { method: methodName, received: Date.now() } 432 | const cb = args.length ? args[args.length - 1] : null 433 | if (authenticate) { 434 | let token = call.metadata && call.metadata.get('token') 435 | if (token) token = token[0] 436 | log.trace({ ...tag, token }, 'received token') 437 | if (!token || token !== metadata.token) { 438 | log.warn(tag, 'request authentication failed') 439 | const err = { 440 | code: grpc.status.UNAUTHENTICATED, 441 | message: 'Invalid auth token.' 442 | } 443 | if (cb) return cb(err) 444 | return call.destroy(err) 445 | } 446 | log.trace(tag, 'request authentication succeeded') 447 | } 448 | method(call) 449 | .then(rsp => { 450 | log.trace(tag, 'request was successful') 451 | if (cb) process.nextTick(cb, null, rsp) 452 | }) 453 | .catch(err => { 454 | log.trace({ ...tag, error: err.toString() }, 'request failed') 455 | if (cb) return cb(serverError(err)) 456 | return call.destroy(err) 457 | }) 458 | } 459 | } 460 | return wrapped 461 | } 462 | 463 | function callAllInSet (set) { 464 | for (const cb of set) { 465 | cb() 466 | } 467 | set.clear() 468 | } 469 | 470 | if (require.main === module) { 471 | const opts = extractArguments() 472 | const daemon = new HyperdriveDaemon({ ...opts, main: true }) 473 | process.title = 'hyperdrive' 474 | daemon.start() 475 | } else { 476 | module.exports = HyperdriveDaemon 477 | } 478 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hyperdrive-daemon 2 | [![Build Status](https://travis-ci.com/hypercore-protocol/hyperdrive-daemon.svg?branch=master)](https://travis-ci.com/github/hypercore-protocol/hyperdrive-daemon) 3 | 4 | ## ⚠️ Deprecation Notice ⚠️ 5 | With the recent release of [Hyperspace](https://github.com/hypercore-protocol/hyperspace), this module won't be supported moving forward. 6 | 7 | All the existing functionality is still available in the [Hyperdrive service](https://github.com/hyperspace-org/hyperdrive-service), though -- our [Hyperspace blog post](https://blog.hypercore-protocol.org/posts/hyperspace/) and the Hyperdrive service's README explain how to transition. 8 | 9 | --- 10 | 11 | The Hyperdrive daemon helps you create, share, and manage Hyperdrives through a persistent process running on your computer, without having to deal with storage management or networking configuration. 12 | 13 | It provides both a gRPC API (see [`hyperdrive-daemon-client`](https://github.com/andrewosh/hyperdrive-daemon-client)) for interacting with remote drives, and an optional FUSE interface for mounting drives as directories in your local filesystem. 14 | 15 | #### Features 16 | * __Hyperswarm Networking__: Hyperdrives are announced and discovered using the [Hyperswarm DHT](https://github.com/hyperswarm/hyperswarm). 17 | * __Easy Storage__: All your Hyperdrives are stored in a single spot, the `~/.hyperdrive/storage` directory. 18 | * __gRPC API__: The daemon exposes an API for managing remote Hyperdrives over gRPC. We currently have a [NodeJS client](https://github.com/andrewosh/hyperdrive-daemon-client). 19 | * __FUSE support__: If you're using Linux or Mac, you can mount Hyperdrives as directories and work with them using standard filesystem syscalls. 20 | * __CLI Tools__: The `hyperdrive` CLI supports a handful of commands for managing the daemon, creating/sharing drives, getting statistics, and augmenting the FUSE interface to support Hyperdrive-specific functions (like mounts). 21 | * __Persistence__: Networking configuration info is stored in a [Level](https://github.com/level/level) instance, so your drives will reconnect to the network automatically when the daemon's restarted. 22 | * __PM2 Process Management__: We use [PM2](https://github.com/Unitech/pm2) to manage the daemon process. Separately installing the PM2 CLI gives you access to extra monitoring, and support for installing the Hyperdrive daemon as a system daemon 23 | 24 | ## Installation 25 | *Note: The daemon CLI currently requires Node 12 or greater* 26 | 27 | __Temporary Note: We're working out a [segfault issue](https://github.com/hypercore-protocol/hyperdrive-daemon/issues/47) that's causing the daemon to fail with Node 14. If you're on 14, check that issue for updates, but for now try using 12 or 13__. 28 | 29 | ``` 30 | npm i hyperdrive-daemon -g 31 | ``` 32 | 33 | ### Starting the daemon 34 | 35 | After installing/configuring, you'll need to start the daemon before running any other commands. To do this, first pick a storage directory for your mounted Hyperdrives. By default, the daemon will use `~/.hyperdrive/storage`. 36 | 37 | ``` 38 | ❯ hyperdrive start 39 | Daemon started at http://localhost:3101 40 | ``` 41 | 42 | If you want to stop the daemon, you can run: 43 | ``` 44 | ❯ hyperdrive stop 45 | The Hyperdrive daemon has been stopped. 46 | ``` 47 | 48 | ### Checking the status 49 | 50 | After it's been started, you can check if the daemon's running (and get lots of useful information) with the `status` command: 51 | ``` 52 | ❯ hyperdrive status 53 | The Hyperdrive daemon is running: 54 | 55 | API Version: 0 56 | Daemon Version: 1.7.15 57 | Client Version: 1.7.6 58 | Schema Version: 1.6.5 59 | Hyperdrive Version: 10.8.15 60 | Fuse Native Version: 2.2.1 61 | Hyperdrive Fuse Version: 1.2.14 62 | 63 | Holepunchable: true 64 | Remote Address: 194.62.216.174:35883 65 | 66 | Uptime: 0 Days 1 Hours 6 Minutes 2 Seconds 67 | ``` 68 | 69 | ## API 70 | The daemon exposes a gRPC API for interacting with remote Hyperdrives. [`hyperdrive-daemon-client`](https://github.com/andrewosh/hyperdrive-daemon-client) is a Node client that you can use to interact with the API. If you'd like to write a client in another language, check out the schema definitions in [`hyperdrive-schemas`](https://github.com/andrewosh/hyperdrive-schemas) 71 | 72 | ## CLI 73 | 74 | Hypermount provides an gRPC interface for mounting, unmounting, and providing status information about all current mounts. There's also a bundled CLI tool which wraps the gRPC API and provides the following commands: 75 | 76 | ### Basic Commands 77 | #### `hyperdrive fuse-setup` 78 | Performs a one-time configuration step that installs FUSE. This command will prompt you for `sudo`. 79 | 80 | #### `hyperdrive start` 81 | Start the Hyperdrive daemon. 82 | 83 | Options include: 84 | ``` 85 | --bootstrap ['host:port', 'host:port', ...] // Optional, alternative bootstrap servers 86 | --storage /my/storage/dir // The storage directory. Defaults to ~/.hyperdrive/storage 87 | --log-level info // Logging level 88 | --port 3101 // The port gRPC will bind to 89 | --memory-only // Run in in-memory mode 90 | --foreground // Do not launch a separate, PM2-managed process 91 | ``` 92 | 93 | #### `hyperdrive status` 94 | Gives the current status of the daemon, as well as version/networking info, and FUSE availability info. 95 | 96 | #### `hyperdrive stop` 97 | Stop the daemon. 98 | 99 | ### Importing/Exporting 100 | If you're on a system that doesn't support FUSE, or you just don't want to bother with it, the CLI provides the `import` and `export` commands for moving files in and out of Hyperdrives. 101 | 102 | #### Importing 103 | To import a directory into a new Hyperdrive, you can run `import` without specifying a key: 104 | ``` 105 | ❯ hyperdrive import ./path/to/directory 106 | Importing path/to/directory into aae4f36bd0b1a7a8bf68aa0bdd0b93997fd8ff053f4a3e816cb629210aa17737 (Ctrl+c to exit)... 107 | 108 | Importing | ======================================== | 100% | 3/3 Files 109 | ``` 110 | 111 | The command will remain running, watching the directory for any new changes, but you can always stop it with `Ctrl+c` 112 | 113 | `import` will save a special file called `.hyperdrive-import-key` inside the directory you uploaded. This makes it easier to resume a previous import later, without any additional arguments. 114 | 115 | Using the command above as an example, `hyperdrive import path/to/directory` subsequent times will always import into drive `aae4f36bd0b1a7a8bf68aa0bdd0b93997fd8ff053f4a3e816cb629210aa17737`. 116 | 117 | #### Exporting 118 | `hyperdrive export` is just the inverse of `import`: Given a key it will export the drive's contents into a directory: 119 | ``` 120 | ❯ hyperdrive export aae4f36bd0b1a7a8bf68aa0bdd0b93997fd8ff053f4a3e816cb629210aa17737 121 | Exporting aae4f36bd0b1a7a8bf68aa0bdd0b93997fd8ff053f4a3e816cb629210aa17737 into (my working directory)/aae4f36bd0b1a7a8bf68aa0bdd0b93997fd8ff053f4a3e816cb629210aa17737 (Ctrl+c to exit)... 122 | 123 | Exporting | ======================================== | 100% | 5/5 Metadata Blocks | 0 Peers 124 | ``` 125 | Unless an output directory is specified, `export` will store files in a subdirectory with the drive's key as its name. 126 | 127 | As with `import`, `export` will store a special file which lets you resume exports easily (just `cd` into your previous output directory and run `hyperdrive export`), and it will remain running, watching the remote drive for changes. 128 | 129 | ### Debugging Commands 130 | If you're testing bug fixes or features, some of these commands might be useful for you. 131 | 132 | #### `hyperdrive cleanup:remove-readonly-drives` 133 | Delete all read-only drives from disk. This will clear up storage, and makes it easier to test networking issues during development (as running this command will force you to re-sync test drives when the daemon is restarted). 134 | 135 | This command *must not* be run while the daemon is running. Since it deletes data, it's intentionally verbose! 136 | 137 | ## FUSE 138 | Using FUSE, the Hyperdrive daemon lets your mount Hyperdrives as normal filesystem directories on both OSX and Linux. To use FUSE, you need to run the `fuse-setup` command before you start the daemon the first time: 139 | 140 | ### Setup 141 | The setup command installs native, prebuilt FUSE bindings. We currently only provide bindings for OSX and Linux. The setup step is the only part of installation that requires `sudo` access: 142 | ``` 143 | ❯ hyperdrive fuse-setup 144 | Configuring FUSE... 145 | [sudo] password for andrewosh: 146 | Successfully configured FUSE! 147 | ``` 148 | 149 | You should only need to perform this step once (it will persist across restarts). In order to make sure that the setup step completed successfully, run the `status` command. It should contain the following two FUSE-related lines: 150 | ``` 151 | ❯ hyperdrive status 152 | ... 153 | Fuse Available: true 154 | Fuse Configured: true 155 | ``` 156 | 157 | If FUSE is both available and configured, then you're ready to continue with mounting your top-level, private drive! 158 | 159 | ### Usage 160 | The daemon requires all users to have a private "root" drive, mounted at `~/Hyperdrive`, into which additional subdrives can be mounted and shared with others. 161 | 162 | Think of this root drive as the `home` directory on your computer, where you might have Documents, Photos, or Videos directories. You'll likely never want to share your complete Documents folder with someone, but you can create a shareable mounted drive `Documents/coding-project-feb-2020` to share with collaborators on that project. 163 | 164 | #### Basic Mounting 165 | After starting the daemon with FUSE configured, you'll find a fresh root drive automatically mounted for you at `~/Hyperdrive`. This root drive will persist across daemon restarts, so it should always be available (just like your usual Home directory!). 166 | 167 | As with a home directory, you can might want to create directories like `~/Hyperdrive/Documents`, `~/Hyperdrive/Videos`, and `~/Hyperdrive/Projects`. Be careful though -- any directory you create with `mkdir` or through the OSX Finder will not be drive mounts, so they will not be shareable with others. 168 | 169 | There are two ways to create a shareable drive inside your root drive: 170 | 1. `hyperdrive create [path]` - This will create a new shareable drive at `path` (where `path` must be a subdirectory of `~/Hyperdrive`. This drive will look like a normal directory, but if you run `hyperdrive info [path]` it will tell you that it's shareable. 171 | 2. `hyperdrive mount [path] [key]` - This will mount an existing drive at `path`. It's useful if someone is sharing one of their drives with you, and you want to save it into your root drive. 172 | 173 | Here are a few examples of what this flow might look like: 174 | 175 | To mount a new drive, you can either provide a complete path to the desired mountpoint, or you can use a relative path if your current working directory is within `~/Hyperdrive`. As an example, here's how you would create a shareable drive called `Videos`, mounted inside your root drive: 176 | ``` 177 | ❯ hyperdrive create ~/Hyperdrive/videos 178 | Mounted a drive with the following info: 179 | 180 | Path : /home/foo/Hyperdrive/videos 181 | Key: b432f90b2f817164c32fe5056a06f50c60dc8db946e81331f92e3192f6d4b847 182 | Seeding: true 183 | ``` 184 | 185 | *__Note:__ Unless you use the `no-seed` flag, all new drives will be automatically "seeded," meaning they'll be announced on the Hyperswarm DHT. In the above example, this could be done with `hyperdrive create ~/Hyperdrive/videos --no-seed`. To announce it later, you can run `hyperdrive seed ~/Hyperdrive/videos`.* 186 | 187 | Equivalently: 188 | ``` 189 | ❯ cd ~/Hyperdrive 190 | ❯ hyperdrive create Videos 191 | ``` 192 | 193 | For most purposes, you can just treat this mounted drive like you would any other directory. The `hyperdrive` CLI gives you a few mount-specific commands for sharing drive keys and getting statistics for mounted drives. 194 | 195 | Mounted subdrives are seeded (announced on the DHT) by default, but if you've chosen to not seed (via the `--no-seed` flag), you can make them available with the `seed` command: 196 | ``` 197 | ❯ hyperdrive seed ~/Hyperdrive/Videos 198 | Seeding the drive mounted at ~/Hyperdrive/Videos 199 | ``` 200 | 201 | Seeding will start announcing the drive's discovery key on the hyperswarm DHT, and this setting is persistent -- the drive will be reannounced when the daemon is restarted. 202 | 203 | After seeding, another user can either: 204 | 1. Mount the same subdrive by key within their own root drive 205 | 2. Inspect the drive inside the `~/Hyperdrive/Network` directory (can be a symlink target outside the FUSE mount!): 206 | ``` 207 | ❯ hyperdrive info ~/Hyperdrive/Videos 208 | Drive Info: 209 | 210 | Key: b432f90b2f817164c32fe5056a06f50c60dc8db946e81331f92e3192f6d4b847 211 | Is Mount: true 212 | Writable: true 213 | 214 | ❯ ls ~/Hyperdrive/Network/b432f90b2f817164c32fe5056a06f50c60dc8db946e81331f92e3192f6d4b847 215 | vid.mkv 216 | ``` 217 | Or: 218 | ``` 219 | ❯ hyperdrive mount ~/Hyperdrive/a_friends_videos b432f90b2f817164c32fe5056a06f50c60dc8db946e81331f92e3192f6d4b847 220 | ... 221 | ❯ ls ~/Hyperdrive/home/a_friends_videos 222 | vid.mkv 223 | ``` 224 | 225 | If you ever want to remove a drive, you can use the `hyperdrive unmount [path]` command. 226 | 227 | ### The `Network` "Magic Folder" 228 | 229 | Within your root drive, you'll see a special directory called `~/Hyperdrive/Network`. This is a virtual directory (it does not actually exist inside the drive), but it provides read-only access to useful information, such as storage/networking stats for any drive in the daemon. Here's what you can do with the `Network` directory: 230 | 231 | #### Global Drive Paths 232 | For any drive that's being announced on the DHT, `~/Hyperdrive/Network/` will contain that drive's contents. This is super useful because these paths will be consistent across all daemon users! If you have an interesting file you want to share over IRC, you can just copy+paste `cat ~/Hyperdrive/Network//my-interesting-file.txt` into IRC and that command will work for everyone. 233 | 234 | #### Storage/Networking Statistics 235 | Inside `~/Hyperdrive/Network/Stats/` you'll find two files: `storage.json` and `networking.json` containing an assortment of statistics relating to that drive, such as per-file storage usage, current peers, and uploaded/downloaded bytes of the drive's metadata and content feeds. 236 | 237 | *__Note__: `storage.json` is dynamically computed every time the file is read -- if you have a drive containing millions of files, this can be an expensive operation, so be careful.* 238 | 239 | Since looking at `networking.json` is a common operation, we provide a shorthand command `hyperdrive stats` that prints this file for you. It uses your current working directory to determine the key of the mounted drive you're in. 240 | 241 | #### Active Drives 242 | The `~/Hyperdrive/Network/Active` directory contains symlinks to the `networking.json` stats files for every drive that your daemon is currently announcing. `ls`ing this directory gives you a quick overview of exactly what you're announcing. 243 | 244 | ### FUSE Commands 245 | *Note: Always be sure to run `hyperdrive fuse-setup` and check the FUSE status before doing any additional FUSE-related commands!* 246 | 247 | #### `hyperdrive create ` 248 | Create a new drive mounted at `path`. 249 | 250 | Newly-created drives are seeded by default. This behavior can be disabled with the `no-seed` flag, or toggled later through `hyperdrive seed ` or `hyperdrive unseed ` 251 | 252 | Options include: 253 | ``` 254 | --no-seed // Do not announce the drive on the DHT. 255 | ``` 256 | 257 | #### `hyperdrive mount ` 258 | Mount an existing Hyperdrive into your root drive at path `path`. 259 | 260 | If you don't specify a `key`, the `mount` command will behave identically to `hyperdrive create`. 261 | 262 | - `path` must be a subdirectory of `~/Hyperdrive/home`. 263 | - `key` is an optional drive key. 264 | 265 | CLI options include: 266 | ``` 267 | --checkout (version) // Mount a static version of a drive. 268 | --no-seed // Do not announce the drive on the DHT. 269 | ``` 270 | 271 | #### `hyperdrive info ` 272 | Display information about the drive mounted at `path`. The information will include the drive's key, and whether `path` is the top-level directory in a mountpoint (meaning it's directly shareable). 273 | 274 | - `path` must be a subdirectory of `~/Hyperdrive/`. If `path` is not specified, the command will use the enclosing mount of your current working directory. 275 | 276 | By default, this command will refuse to display the key of your root drive (to dissuade accidentally sharing it). To forcibly display your root drive key, run this command with `--root`. 277 | 278 | CLI options include: 279 | ``` 280 | --root // Forcibly display your root drive key. 281 | ``` 282 | 283 | #### `hyperdrive seed ` 284 | Start announcing a drive on the DHT so that it can be shared with other peers. 285 | 286 | - `path` must be a subdirectory of `~/Hyperdrive/`. If `path` is not specified, the command will use the enclosing mount of your current working directory. 287 | 288 | By default, this command will refuse to publish your root drive (to dissuade accidentally sharing it). To forcibly publish your root drive, run this command with `--root`. 289 | 290 | CLI options include: 291 | ``` 292 | --lookup (true|false) // Look up the drive key on the DHT. Defaults to true 293 | --announce (true|false) // Announce the drive key on the DHT. Defaults to true 294 | --remember (true|false) // Persist these network settings in the database. 295 | --root // Forcibly display your root drive key. 296 | ``` 297 | 298 | #### `hyperdrive unseed ` 299 | Stop advertising a previously-published subdrive on the network. 300 | 301 | - `path` must be a subdirectory of `~/Hyperdrive/`. If `path` is not specified, the command will use the enclosing mount of your current working directory. 302 | 303 | *Note: This command will currently not delete the Hyperdrive from disk. Support for this will be added soon.* 304 | 305 | #### `hyperdrive stats ` 306 | Display networking statistics for a drive. This is a shorthand for getting a drive's key with `hyperdrive info` and `cat`ing `~/Hyperdrive/Network/Stats//networking.json`. 307 | 308 | - `path` must be a subdirectory of `~/Hyperdrive/` and must have been previously mounted with the mount subcommand described above. If `path` is not specified, the command will use the enclosing mount of your current working directory. 309 | 310 | #### `hyperdrive force-unmount` 311 | If the daemon fails or is not stopped cleanly, then the `~/Hyperdrive` mountpoint might be left in an unusable state. Running this command before restarting the daemon will forcibly disconnect the mountpoint. 312 | 313 | This command should never be necessary! If your FUSE mountpoint isn't cleaned up on shutdown, and you're unable to restart your daemon (due to "Mountpoint in use") errors, please file an issue. 314 | 315 | ## License 316 | MIT 317 | -------------------------------------------------------------------------------- /test/replication.js: -------------------------------------------------------------------------------- 1 | const p = require('path') 2 | const test = require('tape') 3 | const { create } = require('./util/create') 4 | 5 | test('can replicate a single drive between daemons', async t => { 6 | const { clients, cleanup } = await create(2) 7 | const firstClient = clients[0] 8 | const secondClient = clients[1] 9 | 10 | try { 11 | const drive1 = await firstClient.drive.get() 12 | await drive1.configureNetwork({ lookup: true, announce: true }) 13 | 14 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 15 | 16 | await drive1.writeFile('hello', 'world') 17 | 18 | // 100 ms delay for replication. 19 | await delay(100) 20 | 21 | const replicatedContent = await drive2.readFile('hello') 22 | t.same(replicatedContent, Buffer.from('world')) 23 | } catch (err) { 24 | t.fail(err) 25 | } 26 | 27 | await cleanup() 28 | t.end() 29 | }) 30 | 31 | test('can get drive stats containing only networking info', async t => { 32 | const { clients, cleanup } = await create(2) 33 | const firstClient = clients[0] 34 | const secondClient = clients[1] 35 | 36 | try { 37 | const drive1 = await firstClient.drive.get() 38 | await drive1.configureNetwork({ lookup: true, announce: true }) 39 | 40 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 41 | 42 | await drive1.writeFile('hello', 'world') 43 | 44 | // 100 ms delay for replication. 45 | await delay(100) 46 | await drive2.readFile('hello') 47 | 48 | const { stats: stats1 } = await drive1.stats({ networkingOnly: true }) 49 | const { stats: stats2 } = await drive2.stats({ networkingOnly: true }) 50 | 51 | const firstStats = stats1[0] 52 | const secondStats = stats2[0] 53 | t.same(firstStats.metadata.peers, 1) 54 | t.same(secondStats.metadata.peers, 1) 55 | t.same(firstStats.metadata.downloadedBlocks, 0) 56 | t.same(secondStats.metadata.downloadedBlocks, 0) 57 | } catch (err) { 58 | t.fail(err) 59 | } 60 | 61 | await cleanup() 62 | t.end() 63 | }) 64 | 65 | test('can download a directory between daemons', async t => { 66 | const { clients, cleanup } = await create(2) 67 | const firstClient = clients[0] 68 | const secondClient = clients[1] 69 | 70 | try { 71 | const drive1 = await firstClient.drive.get() 72 | await drive1.configureNetwork({ lookup: true, announce: true }) 73 | 74 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 75 | 76 | await drive1.writeFile('/a/1', 'hello') 77 | await drive1.writeFile('/a/2', 'world') 78 | await drive1.writeFile('/a/3', 'three') 79 | await drive1.writeFile('/a/4', 'four') 80 | await drive1.writeFile('/a/5', 'five') 81 | 82 | var { stats } = await drive1.stats() 83 | t.same(stats[0].content.totalBlocks, 5) 84 | t.same(stats[0].content.downloadedBlocks, 5) 85 | 86 | // 100 ms delay for replication. 87 | await delay(100) 88 | 89 | const d2Stats1 = await drive2.stats() 90 | stats = d2Stats1.stats 91 | 92 | // Since there has not been a content read yet, the stats will not report the latest content length. 93 | t.same(stats[0].content.totalBlocks, 0) 94 | 95 | // TODO: Uncomment after hypercore bug fix. 96 | // t.same(stats[0].content.downloadedBlocks, 0) 97 | 98 | var fileStats = await drive2.fileStats('/a/1') 99 | 100 | // TODO: Uncomment after hypercore bug fix. 101 | // t.same(fileStats.get('/a/1').downloadedBlocks, 0) 102 | 103 | await drive2.download('a') 104 | 105 | // 200 ms delay for download to complete. 106 | await delay(200) 107 | 108 | const d2Stats2 = await drive2.stats() 109 | stats = d2Stats2.stats 110 | 111 | fileStats = await drive2.fileStats('a') 112 | t.same(stats[0].content.totalBlocks, 5) 113 | t.same(stats[0].content.downloadedBlocks, 5) 114 | t.same(fileStats.get('/a/1').downloadedBlocks, 1) 115 | t.same(fileStats.get('/a/2').downloadedBlocks, 1) 116 | t.same(fileStats.get('/a/3').downloadedBlocks, 1) 117 | t.same(fileStats.get('/a/4').downloadedBlocks, 1) 118 | t.same(fileStats.get('/a/5').downloadedBlocks, 1) 119 | } catch (err) { 120 | t.fail(err) 121 | } 122 | 123 | await cleanup() 124 | t.end() 125 | }) 126 | 127 | test('can cancel an active download', async t => { 128 | const { clients, cleanup } = await create(2) 129 | const firstClient = clients[0] 130 | const secondClient = clients[1] 131 | 132 | try { 133 | const drive1 = await firstClient.drive.get() 134 | await drive1.configureNetwork({ lookup: true, announce: true }) 135 | 136 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 137 | 138 | await writeFile(drive1, '/a/1', 50) 139 | await writeFile(drive1, '/a/2', 50) 140 | 141 | var fileStats = await drive2.fileStats('/a/1') 142 | // TODO: Uncomment after hypercore bug fix 143 | // t.same(fileStats.downloadedBlocks, 0) 144 | 145 | const handle = await drive2.download('a') 146 | await delay(100) 147 | await handle.destroy() 148 | 149 | // Wait to make sure that the download is not continuing. 150 | await delay(100) 151 | 152 | const { stats: totals } = await drive2.stats() 153 | fileStats = await drive2.fileStats('a') 154 | const contentTotals = totals[0].content 155 | t.true(contentTotals.downloadedBlocks < 100 && contentTotals.downloadedBlocks > 0) 156 | t.true(fileStats.get('/a/1').downloadedBlocks < 50 && fileStats.get('/a/1').downloadedBlocks > 0) 157 | t.true(fileStats.get('/a/2').downloadedBlocks < 50 && fileStats.get('/a/2').downloadedBlocks > 0) 158 | } catch (err) { 159 | t.fail(err) 160 | } 161 | 162 | await cleanup() 163 | t.end() 164 | 165 | async function writeFile (drive, name, numBlocks) { 166 | const writeStream = drive.createWriteStream(name) 167 | return new Promise((resolve, reject) => { 168 | writeStream.on('finish', resolve) 169 | writeStream.on('error', reject) 170 | for (let i = 0; i < numBlocks; i++) { 171 | writeStream.write(Buffer.alloc(1024 * 1024).fill('abcdefg')) 172 | } 173 | writeStream.end() 174 | }) 175 | } 176 | }) 177 | 178 | test('can mirror a single drive', async t => { 179 | const { clients, cleanup } = await create(2) 180 | const firstClient = clients[0] 181 | const secondClient = clients[1] 182 | 183 | try { 184 | const drive1 = await firstClient.drive.get() 185 | await drive1.configureNetwork({ lookup: true, announce: true }) 186 | 187 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 188 | 189 | await drive1.writeFile('/a/1', 'hello') 190 | await drive1.writeFile('/a/2', 'world') 191 | await drive1.writeFile('/a/3', 'three') 192 | await drive1.writeFile('/a/4', 'four') 193 | await drive1.writeFile('/a/5', 'five') 194 | 195 | // 100 ms delay for replication. 196 | await delay(100) 197 | 198 | const d2Stats1 = await drive2.stats() 199 | var stats = d2Stats1.stats 200 | 201 | // Since there has not been a content read yet, the stats will not report the latest content length. 202 | t.same(stats[0].content.totalBlocks, 0) 203 | 204 | await drive2.mirror() 205 | 206 | // 200 ms delay for download to complete. 207 | await delay(200) 208 | 209 | const d2Stats2 = await drive2.stats() 210 | stats = d2Stats2.stats 211 | 212 | const fileStats = await drive2.fileStats('a') 213 | t.same(stats[0].content.totalBlocks, 5) 214 | t.same(stats[0].content.downloadedBlocks, 5) 215 | t.same(fileStats.get('/a/1').downloadedBlocks, 1) 216 | t.same(fileStats.get('/a/2').downloadedBlocks, 1) 217 | t.same(fileStats.get('/a/3').downloadedBlocks, 1) 218 | t.same(fileStats.get('/a/4').downloadedBlocks, 1) 219 | t.same(fileStats.get('/a/5').downloadedBlocks, 1) 220 | } catch (err) { 221 | t.fail(err) 222 | } 223 | 224 | await cleanup() 225 | t.end() 226 | }) 227 | 228 | test('can mirror a drive with mounts', async t => { 229 | const { clients, cleanup } = await create(2) 230 | const firstClient = clients[0] 231 | const secondClient = clients[1] 232 | 233 | try { 234 | const drive1 = await firstClient.drive.get() 235 | const mount = await firstClient.drive.get() 236 | await drive1.configureNetwork({ lookup: true, announce: true }) 237 | 238 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 239 | 240 | await drive1.mount('/a', { key: mount.key }) 241 | await mount.writeFile('2', 'world') 242 | await mount.writeFile('3', 'three') 243 | await mount.writeFile('4', 'four') 244 | await mount.writeFile('5', 'five') 245 | 246 | // 100 ms delay for replication. 247 | await delay(100) 248 | 249 | const d2Stats1 = await drive2.stats() 250 | var stats = d2Stats1.stats 251 | 252 | await drive2.mirror() 253 | 254 | // 200 ms delay for download to complete. 255 | await delay(200) 256 | 257 | const d2Stats2 = await drive2.stats() 258 | stats = d2Stats2.stats 259 | 260 | const fileStats = await drive2.fileStats('a') 261 | t.same(stats[1].content.totalBlocks, 4) 262 | t.same(stats[1].content.downloadedBlocks, 4) 263 | t.same(fileStats.get('/a/2').downloadedBlocks, 1) 264 | t.same(fileStats.get('/a/3').downloadedBlocks, 1) 265 | t.same(fileStats.get('/a/4').downloadedBlocks, 1) 266 | t.same(fileStats.get('/a/5').downloadedBlocks, 1) 267 | } catch (err) { 268 | t.fail(err) 269 | } 270 | 271 | await cleanup() 272 | t.end() 273 | }) 274 | 275 | test('can cancel an active mirror', async t => { 276 | const { clients, cleanup } = await create(2) 277 | const firstClient = clients[0] 278 | const secondClient = clients[1] 279 | 280 | try { 281 | const drive1 = await firstClient.drive.get() 282 | await drive1.configureNetwork({ lookup: true, announce: true }) 283 | 284 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 285 | 286 | await writeFile(drive1, '/a/1', 50) 287 | await writeFile(drive1, '/a/2', 50) 288 | 289 | const unmirror = await drive2.mirror() 290 | await delay(100) 291 | await unmirror() 292 | 293 | // Wait to make sure that the download is not continuing. 294 | await delay(100) 295 | 296 | const { stats: totals } = await drive2.stats() 297 | const fileStats = await drive2.fileStats('a') 298 | const contentTotals = totals[0].content 299 | t.true(contentTotals.downloadedBlocks < 100 && contentTotals.downloadedBlocks > 0) 300 | t.true(fileStats.get('/a/1').downloadedBlocks < 50 && fileStats.get('/a/1').downloadedBlocks > 0) 301 | t.true(fileStats.get('/a/2').downloadedBlocks < 50 && fileStats.get('/a/2').downloadedBlocks > 0) 302 | } catch (err) { 303 | t.fail(err) 304 | } 305 | 306 | await cleanup() 307 | t.end() 308 | 309 | async function writeFile (drive, name, numBlocks) { 310 | const writeStream = drive.createWriteStream(name) 311 | return new Promise((resolve, reject) => { 312 | writeStream.on('finish', resolve) 313 | writeStream.on('error', reject) 314 | for (let i = 0; i < numBlocks; i++) { 315 | writeStream.write(Buffer.alloc(1024 * 1024).fill('abcdefg')) 316 | } 317 | writeStream.end() 318 | }) 319 | } 320 | }) 321 | 322 | test('can replicate many mounted drives between daemons', async t => { 323 | const { clients, cleanup } = await create(2) 324 | console.time('many-mounts') 325 | const firstClient = clients[0] 326 | const secondClient = clients[1] 327 | 328 | const NUM_MOUNTS = 15 329 | 330 | try { 331 | const mounts = await createFirst() 332 | const second = await createSecond(mounts) 333 | await validate(mounts, second) 334 | } catch (err) { 335 | t.fail(err) 336 | } 337 | 338 | await cleanup() 339 | console.timeEnd('many-mounts') 340 | t.end() 341 | 342 | async function createFirst () { 343 | const rootDrive = await firstClient.drive.get() 344 | const mounts = [] 345 | for (let i = 0; i < NUM_MOUNTS; i++) { 346 | const key = '' + i 347 | const mountDrive = await firstClient.drive.get() 348 | await mountDrive.configureNetwork({ lookup: true, announce: true }) 349 | await rootDrive.mount(key, { key: mountDrive.key }) 350 | await mountDrive.writeFile(key, key) 351 | mounts.push({ key: mountDrive.key, path: key + '/' + key, content: key, drive: mountDrive }) 352 | } 353 | return mounts 354 | } 355 | 356 | async function createSecond (mounts) { 357 | const rootDrive = await secondClient.drive.get() 358 | for (const { key, content } of mounts) { 359 | await rootDrive.mount(content, { key }) 360 | } 361 | return rootDrive 362 | } 363 | 364 | async function validate (mounts, secondRoot) { 365 | const contents = await Promise.all(mounts.map(async ({ path, content }) => { 366 | const contents = await secondRoot.readFile(path) 367 | return contents 368 | })) 369 | for (let i = 0; i < mounts.length; i++) { 370 | t.same(contents[i], Buffer.from(mounts[i].content)) 371 | } 372 | } 373 | }) 374 | 375 | test('can replicate nested mounts between daemons', async t => { 376 | const { clients, cleanup } = await create(2) 377 | const firstClient = clients[0] 378 | const secondClient = clients[1] 379 | 380 | try { 381 | const firstRoot = await firstClient.drive.get() 382 | const firstMount1 = await firstClient.drive.get() 383 | const firstMount2 = await firstClient.drive.get() 384 | await firstMount2.configureNetwork({ lookup: true, announce: true }) 385 | 386 | await firstRoot.mount('a', { key: firstMount1.key }) 387 | await firstMount1.mount('b', { key: firstMount2.key }) 388 | 389 | await firstMount2.writeFile('hello', 'world') 390 | 391 | const secondRoot = await secondClient.drive.get() 392 | await secondClient.drive.get({ key: firstMount2.key }) 393 | 394 | await secondRoot.mount('c', { key: firstMount2.key }) 395 | 396 | // 100 ms delay for replication. 397 | await delay(100) 398 | 399 | const replicatedContent = await secondRoot.readFile('c/hello') 400 | t.same(replicatedContent, Buffer.from('world')) 401 | } catch (err) { 402 | t.fail(err) 403 | } 404 | 405 | await cleanup() 406 | t.end() 407 | }) 408 | 409 | test('can get networking stats for multiple mounts', async t => { 410 | const { clients, cleanup } = await create(2) 411 | const firstClient = clients[0] 412 | const secondClient = clients[1] 413 | 414 | try { 415 | const firstRoot = await firstClient.drive.get() 416 | const firstMount1 = await firstClient.drive.get() 417 | const firstMount2 = await firstClient.drive.get() 418 | await firstMount2.configureNetwork({ lookup: true, announce: true }) 419 | 420 | await firstRoot.mount('a', { key: firstMount1.key }) 421 | await firstRoot.mount('b', { key: firstMount2.key }) 422 | await delay(100) 423 | 424 | await firstMount2.writeFile('hello', 'world') 425 | 426 | const firstStats = await firstClient.drive.allStats() 427 | t.same(firstStats.length, 3) 428 | const rootStats = firstStats[0] 429 | t.same(rootStats.length, 3) 430 | t.same(rootStats[0].metadata.uploadedBytes, 0) 431 | 432 | const secondRoot = await secondClient.drive.get() 433 | await secondClient.drive.get({ key: firstMount2.key }) 434 | 435 | await secondRoot.mount('c', { key: firstMount2.key }) 436 | 437 | // 100 ms delay for replication. 438 | await delay(100) 439 | 440 | const replicatedContent = await secondRoot.readFile('c/hello') 441 | t.same(replicatedContent, Buffer.from('world')) 442 | 443 | const secondStats = await firstClient.drive.allStats() 444 | t.same(secondStats.length, 3) 445 | 446 | var uploadedBytes = null 447 | for (const mountStats of secondStats) { 448 | if (mountStats[0].metadata.key.equals(firstMount2.key)) { 449 | uploadedBytes = mountStats[0].content.uploadedBytes 450 | t.notEqual(uploadedBytes, 0) 451 | } 452 | } 453 | t.true(uploadedBytes) 454 | 455 | const { stats: thirdStats } = await firstMount2.stats() 456 | t.same(thirdStats[0].content.uploadedBytes, uploadedBytes) 457 | } catch (err) { 458 | t.fail(err) 459 | } 460 | 461 | await cleanup() 462 | t.end() 463 | }) 464 | 465 | test('no-announce mode prevents discovery for read-only hyperdrives', async t => { 466 | const { clients, daemons, cleanup } = await create(3, [null, { noAnnounce: true }, { noAnnounce: true }]) 467 | const firstClient = clients[0] 468 | const secondClient = clients[1] 469 | const thirdClient = clients[2] 470 | 471 | try { 472 | const drive1 = await firstClient.drive.get() 473 | await drive1.configureNetwork({ lookup: true, announce: true }) 474 | 475 | const drive2 = await secondClient.drive.get({ key: drive1.key }) 476 | 477 | await drive1.writeFile('hello', 'world') 478 | 479 | // 100 ms delay for replication. 480 | await delay(100) 481 | 482 | const replicatedContent = await drive2.readFile('hello') 483 | t.same(replicatedContent, Buffer.from('world')) 484 | 485 | await daemons[0].stop() 486 | 487 | const drive3 = await thirdClient.drive.get({ key: drive1.key }) 488 | await delay(100) 489 | 490 | var error = null 491 | try { 492 | const shouldNotHave = await drive3.readFile('hello') 493 | t.false(shouldNotHave) 494 | } catch (err) { 495 | // This should error because the thirdClient cannot discover the secondClient 496 | error = err 497 | } 498 | t.true(error) 499 | } catch (err) { 500 | t.fail(err) 501 | } 502 | 503 | await cleanup() 504 | t.end() 505 | }) 506 | 507 | test('published drives are swarmed by both reader and writer', async t => { 508 | const { clients, daemons, cleanup } = await create(3) 509 | const serviceOwner = clients[0] 510 | const groupOwner = clients[1] 511 | const groupReader = clients[2] 512 | 513 | try { 514 | const service = await serviceOwner.drive.get() 515 | await service.writeFile('a/1', 'a/1') 516 | await service.writeFile('a/2', 'a/2') 517 | await service.writeFile('a/3', 'a/3') 518 | 519 | // The service owner announces the service. 520 | await service.configureNetwork({ lookup: true, announce: true }) 521 | 522 | const profile = await groupOwner.drive.get() 523 | const group = await groupOwner.drive.get() 524 | 525 | // The group owner announces the group. 526 | await group.configureNetwork({ announce: true, lookup: true }) 527 | await delay(100) 528 | 529 | await group.mount('profile', { key: profile.key }) 530 | await profile.mount('service', { key: service.key }) 531 | 532 | const reader = await groupReader.drive.get({ key: group.key }) 533 | 534 | // The profile should be discoverable through the group without a separate announce. 535 | const profileRootDir = await reader.readdir('profile') 536 | t.same(profileRootDir, ['service']) 537 | 538 | // The update heuristic should do any early abort here: 539 | // - the reader is connected to the profile peer, which has mount metadata for service but no files (reader <-> profile only) 540 | // - an update on service will proceed immediately because it has 1 peer (early abort), but that peer has no files 541 | try { 542 | await reader.stat('profile/service/a') 543 | } catch (err) { 544 | t.true(err) 545 | } 546 | 547 | // After a small delay, reader <-> service directly. 548 | await delay(100) 549 | 550 | let serviceDir = await reader.readdir('profile/service') 551 | t.same(serviceDir.length, 1) 552 | // This time it works because reader <-> service directly 553 | const stat = await reader.stat('profile/service/a') 554 | t.true(stat) 555 | 556 | // Killing the second daemon should still let us get service stats through the serviceOwner 557 | await daemons[1].stop() 558 | serviceDir = await reader.readdir('profile/service/a') 559 | t.same(serviceDir, ['3', '1', '2']) 560 | } catch (err) { 561 | t.fail(err) 562 | } 563 | 564 | await cleanup() 565 | t.end() 566 | }) 567 | 568 | test('deep mounts with added latency', async t => { 569 | const { clients, cleanup } = await create(2, { latency: 20 }) 570 | const firstClient = clients[0] 571 | const secondClient = clients[1] 572 | 573 | const DEPTH = 10 574 | 575 | try { 576 | const firstRoot = await createFirst(firstClient) 577 | const secondRoot = await secondClient.drive.get({ key: firstRoot.key }) 578 | 579 | let path = '' 580 | for (let i = 0; i < DEPTH; i++) { 581 | const component = '' + i 582 | console.time('readdir') 583 | const dirContents = await secondRoot.readdir(path) 584 | console.timeEnd('readdir') 585 | t.same(dirContents.length, 2) 586 | path = p.join(path, component) 587 | } 588 | } catch (err) { 589 | t.fail(err) 590 | } 591 | 592 | await cleanup() 593 | t.end() 594 | 595 | async function createFirst (client) { 596 | const rootDrive = await client.drive.get() 597 | await rootDrive.configureNetwork({ lookup: true, announce: true }) 598 | let currentDrive = rootDrive 599 | for (let i = 0; i < DEPTH; i++) { 600 | currentDrive.writeFile('content', '' + i) 601 | const nextDrive = await client.drive.get() 602 | currentDrive.mount('' + i, { key: nextDrive.key }) 603 | currentDrive = nextDrive 604 | } 605 | return rootDrive 606 | } 607 | }) 608 | 609 | test('can get peer counts for a drive', async t => { 610 | const { clients, cleanup } = await create(3) 611 | const firstClient = clients[0] 612 | const secondClient = clients[1] 613 | const thirdClient = clients[2] 614 | 615 | try { 616 | const drive1 = await firstClient.drive.get() 617 | const drive2 = await firstClient.drive.get() 618 | await drive1.writeFile('hello', 'world') 619 | await drive1.configureNetwork({ lookup: true, announce: true }) 620 | await drive2.configureNetwork({ lookup: true, announce: true }) 621 | 622 | await secondClient.drive.get({ key: drive1.key }) 623 | await thirdClient.drive.get({ key: drive1.key }) 624 | await thirdClient.drive.get({ key: drive2.key }) 625 | 626 | // 100 ms delay for replication. 627 | await delay(100) 628 | 629 | const peerCounts = await firstClient.drive.peerCounts([drive1.key, drive2.key]) 630 | t.same(peerCounts.length, 2) 631 | t.same(peerCounts[0], 2) 632 | t.same(peerCounts[1], 1) 633 | } catch (err) { 634 | t.fail(err) 635 | } 636 | 637 | await cleanup() 638 | t.end() 639 | }) 640 | 641 | test('can get peer info globally', async t => { 642 | const { clients, cleanup } = await create(3) 643 | const firstClient = clients[0] 644 | const secondClient = clients[1] 645 | const thirdClient = clients[2] 646 | 647 | try { 648 | const drive1 = await firstClient.drive.get() 649 | const drive2 = await firstClient.drive.get() 650 | await drive1.configureNetwork({ lookup: true, announce: true }) 651 | await drive2.configureNetwork({ lookup: true, announce: true }) 652 | 653 | await secondClient.drive.get({ key: drive1.key }) 654 | await thirdClient.drive.get({ key: drive2.key }) 655 | 656 | // 100 ms delay for replication. 657 | await delay(100) 658 | 659 | const peers = await firstClient.peers.listPeers() 660 | t.same(peers.length, 2) 661 | t.true(peers[0].address) 662 | t.true(peers[0].noiseKey) 663 | } catch (err) { 664 | t.fail(err) 665 | } 666 | 667 | await cleanup() 668 | t.end() 669 | }) 670 | 671 | test('can get peer info for one discovery key', async t => { 672 | const { clients, daemons, cleanup } = await create(3) 673 | const firstClient = clients[0] 674 | const secondClient = clients[1] 675 | const thirdClient = clients[2] 676 | 677 | try { 678 | const drive1 = await firstClient.drive.get() 679 | const drive2 = await firstClient.drive.get() 680 | await drive1.writeFile('hello', 'world') 681 | await drive1.configureNetwork({ lookup: true, announce: true }) 682 | await drive2.configureNetwork({ lookup: true, announce: true }) 683 | 684 | await secondClient.drive.get({ key: drive1.key }) 685 | await thirdClient.drive.get({ key: drive2.key }) 686 | 687 | // 100 ms delay for replication. 688 | await delay(100) 689 | 690 | const peers = await firstClient.peers.listPeers(drive2.discoveryKey) 691 | t.same(peers.length, 1) 692 | t.true(peers[0].noiseKey.equals(daemons[2].noiseKeyPair.publicKey)) 693 | } catch (err) { 694 | t.fail(err) 695 | } 696 | 697 | await cleanup() 698 | t.end() 699 | }) 700 | 701 | // This will hang until we add timeouts to the hyperdrive reads. 702 | test('can continue getting drive info after remote content is cleared (no longer available)', async t => { 703 | const { clients, cleanup, daemons } = await create(2) 704 | const firstClient = clients[0] 705 | const secondClient = clients[1] 706 | 707 | const localStore = daemons[0].corestore 708 | 709 | try { 710 | const drive = await firstClient.drive.get() 711 | await drive.configureNetwork({ announce: true, lookup: true }) 712 | await drive.writeFile('hello', 'world') 713 | const clone = await secondClient.drive.get({ key: drive.key }) 714 | 715 | await delay(500) 716 | 717 | t.same(await clone.readFile('hello'), Buffer.from('world')) 718 | await drive.writeFile('hello', 'brave new world') 719 | 720 | await clearContent([drive.key], localStore) 721 | 722 | // const cloneStats = await clone.stats() 723 | } catch (err) { 724 | t.fail(err) 725 | } 726 | 727 | await cleanup() 728 | t.end() 729 | 730 | async function clearContent (metadataKeys, store) { 731 | const metadataKeySet = new Set(metadataKeys.map(k => k.toString('hex'))) 732 | for (const [, core] of store.list()) { 733 | if (metadataKeySet.has(core.key.toString('hex'))) continue 734 | await new Promise((resolve, reject) => { 735 | core.clear(0, core.length, err => { 736 | if (err) return reject(err) 737 | return resolve() 738 | }) 739 | }) 740 | } 741 | } 742 | }) 743 | 744 | function delay (ms) { 745 | return new Promise(resolve => setTimeout(resolve, ms)) 746 | } 747 | -------------------------------------------------------------------------------- /lib/fuse/index.js: -------------------------------------------------------------------------------- 1 | const p = require('path') 2 | const { EventEmitter } = require('events') 3 | const crypto = require('crypto') 4 | 5 | const datEncoding = require('dat-encoding') 6 | 7 | const { Stat } = require('hyperdrive-schemas') 8 | const { rpc } = require('hyperdrive-daemon-client') 9 | const { fromHyperdriveOptions, toHyperdriveOptions, toDriveInfo } = require('hyperdrive-daemon-client/lib/common') 10 | const constants = require('hyperdrive-daemon-client/lib/constants') 11 | 12 | const log = require('../log').child({ component: 'fuse-manager' }) 13 | 14 | try { 15 | var hyperfuse = require('hyperdrive-fuse') 16 | var fuse = require('fuse-native') 17 | var { HyperdriveFuse } = hyperfuse 18 | var { VirtualFiles } = require('./virtual-files') 19 | } catch (err) { 20 | // Fuse is not available on the platform 21 | console.warn('FUSE is not available on your platform.') 22 | } 23 | 24 | const NETWORK_PATH = 'Network' 25 | const SPECIAL_DIRS = new Set(['Stats', 'Active']) 26 | 27 | class FuseManager extends EventEmitter { 28 | constructor (driveManager, db, opts) { 29 | super() 30 | 31 | this.driveManager = driveManager 32 | this.db = db 33 | this.opts = opts 34 | this.networkDirs = new NetworkSet() 35 | 36 | // TODO: Replace with an LRU cache. 37 | this._handlers = new Map() 38 | this._virtualFiles = null 39 | this._fuseLogger = log.child({ component: 'fuse' }) 40 | 41 | // Set in ready. 42 | this.fuseConfigured = false 43 | this._rootDrive = null 44 | this._rootMnt = null 45 | this._rootHandler = null 46 | } 47 | 48 | async ready () { 49 | try { 50 | await ensureFuse() 51 | this.fuseConfigured = true 52 | } catch (err) { 53 | this.fuseConfigured = false 54 | } 55 | // If FUSE is not configured, or the root drive is already mounted, bail here. 56 | if (!this.fuseConfigured || this._rootDrive) return null 57 | if (!this._virtualFiles) this._virtualFiles = new VirtualFiles() 58 | if (this.fuseConfigured && process.env['NODE_ENV'] !== 'test') return this._refreshMount() 59 | return null 60 | } 61 | 62 | async _refreshMount () { 63 | log.debug('attempting to refresh the root drive if it exists.') 64 | const rootDriveMeta = await this._getRootDriveInfo() 65 | const { opts = {}, mnt = constants.mountpoint } = rootDriveMeta || {} 66 | log.debug('refreshing root mount on restart') 67 | await this.mount(mnt, opts) 68 | return true 69 | } 70 | 71 | async _getRootDriveInfo () { 72 | log.debug('getting root drive metadata') 73 | try { 74 | const rootDriveMeta = await this.db.get('root-drive') 75 | log.debug('got root drive metadata') 76 | return rootDriveMeta 77 | } catch (err) { 78 | if (!err.notFound) throw err 79 | log.debug('no root drive metadata found') 80 | return null 81 | } 82 | } 83 | 84 | _wrapHandlers (handlers) { 85 | const rootInterceptorIndex = new Map() 86 | const networkInterceptorIndex = new Map() 87 | const { networkDirs } = this 88 | const started = Date.now() 89 | 90 | // The RootListHandler/NetworkInfoHandler operate on the top-level Hyperdrive. 91 | // If any requests have paths in Network/, they will be intercepted by the additional handlers below. 92 | 93 | const RootListHandler = { 94 | id: 'root', 95 | test: '^\/$', 96 | search: /^\/$/, 97 | ops: ['readdir'], 98 | handler: (op, match, args, cb) => { 99 | if (!this._rootHandler) return cb(0, []) 100 | return this._rootHandler.readdir.apply(null, [...args, (err, list) => { 101 | if (err) return cb(err) 102 | return cb(0, [...list, NETWORK_PATH]) 103 | }]) 104 | } 105 | } 106 | 107 | const NetworkInfoHandler = { 108 | id: 'networkinfo', 109 | test: '^/Network\/?$', 110 | search: /.*/, 111 | ops: ['getattr', 'readdir', 'getxattr'], 112 | handler: (op, match, args, cb) => { 113 | if (op === 'getxattr') return cb(0, null) 114 | if (op === 'getattr') return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid(), mtime: started, ctime: started })) 115 | else if (op === 'readdir') return cb(0, networkDirs.list) 116 | else return handlers[op].apply(null, [...args, cb]) 117 | } 118 | } 119 | 120 | // All subsequent handlers are bounded to operate within Network/, and the paths will be sliced accordingly 121 | // before being processed by the handlers. 122 | 123 | const NetworkHandler = { 124 | id: 'network', 125 | test: '^/Network/.+', 126 | search: /^\/Network\/(?.*)/, 127 | ops: '*', 128 | handler: (op, match, args, cb) => { 129 | return dispatchNetworkCall(op, match, args, cb) 130 | } 131 | } 132 | 133 | const ByKeyHandler = { 134 | id: 'bykey', 135 | test: '^\/.+', 136 | ops: ['readdir', 'getattr', 'open', 'read', 'close', 'symlink', 'release', 'releasedir', 'opendir', 'getxattr'], 137 | search: /^(\/(?\w+)(\+(?\d+))?(\+(?\w+))?\/?)?/, 138 | handler: (op, match, args, cb) => { 139 | // If this is a stat on '/Network', return a directory stat. 140 | if (!match.groups.key) { 141 | if (op === 'readdir') return cb(0, []) 142 | if (op === 'releasedir') return cb(0) 143 | if (op === 'getattr') return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid(), mtime: started, ctime: started })) 144 | return handlers[op].apply(null, [...args, cb]) 145 | } 146 | let key = match.groups.key 147 | if (SPECIAL_DIRS.has(key)) { 148 | if (op === 'getxattr') return cb(0, null) 149 | return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid() })) 150 | } 151 | 152 | // Otherwise this is operating on a subdir of by-key, in which case perform the op on the specified drive. 153 | try { 154 | key = datEncoding.decode(key) 155 | } catch (err) { 156 | return cb(-1) 157 | } 158 | 159 | var version = match.groups.version 160 | if (version && +version) version = +version 161 | 162 | if (op === 'getxattr') return cb(0, null) 163 | if (op === 'symlink') { 164 | // Symlinks into the 'by-key' directory should be treated as mounts in the root drive. 165 | const hash = match.groups.hash 166 | return this.mountDrive(args[0], { version, hash }) 167 | .then(() => cb(0)) 168 | .catch(err => { 169 | log.error({ err }, 'mount error') 170 | cb(-1) 171 | }) 172 | } 173 | 174 | if (version) networkDirs.add(key.toString('hex') + '+' + version) 175 | else networkDirs.add(key.toString('hex')) 176 | 177 | return this.driveManager.get(key, { ...this.opts, version, fuseNetwork: true }) 178 | .then(drive => { 179 | var driveFuse = this._handlers.get(drive) 180 | if (!driveFuse) { 181 | const fuse = new HyperdriveFuse(drive, `/Network/${key}`, { 182 | force: true, 183 | log: this._fuseLogger.trace.bind(this._fuseLogger) 184 | }) 185 | handlers = fuse.getBaseHandlers() 186 | driveFuse = { fuse, handlers } 187 | this._handlers.set(drive, driveFuse) 188 | } 189 | handlers = driveFuse.handlers 190 | args[0] = args[0].slice(match[0].length) || '/' 191 | return new Promise((resolve, reject) => { 192 | let errored = false 193 | try { 194 | handlers[op].apply(null, [...args, (err, result) => { 195 | if (errored) return 196 | if (err && op !== 'read' && op !== 'write') { 197 | log.trace({ err, op, args: [...args] }, 'error in sub-fuse handler') 198 | return reject(err) 199 | } 200 | return resolve([err, result]) 201 | }]) 202 | } catch (err) { 203 | errored = true 204 | return reject(err) 205 | } 206 | }) 207 | }) 208 | .then(args => { 209 | return cb(...args) 210 | }) 211 | .catch(err => { 212 | log.error({ err: err.stack }, 'by-key handler error') 213 | return cb(-1) 214 | }) 215 | } 216 | } 217 | 218 | const StatsHandler = { 219 | id: 'stats', 220 | test: '^\/Stats', 221 | ops: ['readdir', 'getattr', 'open', 'read', 'close', 'symlink', 'release', 'releasedir', 'opendir'], 222 | search: /^\/Stats(\/?((?\w+)(\/(?.+))?)?)?/, 223 | handler: async (op, match, args, cb) => { 224 | if (op === 'getattr') { 225 | // If this is a stat on '/Stats', return a directory stat. 226 | if (!match.groups.key && match.input.length !== 6) return cb(fuse.ENOENT) 227 | if (!match.groups.key) return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid() })) 228 | 229 | // If this is a stat on '/stats/(key)', return a directory stat. 230 | if (match.groups.key && !match.groups.filename) return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid() })) 231 | 232 | const filename = match.groups.filename 233 | if (filename !== 'networking.json' && filename !== 'storage.json') return cb(fuse.ENOENT) 234 | 235 | // Otherwise, this is a stat on a specific virtual file, so return a file stat. 236 | return cb(0, Stat.file({ uid: process.getuid(), gid: process.getgid(), size: 4096 * 1024 })) 237 | } 238 | 239 | if (op === 'readdir') { 240 | // If this is a readdir on '/Stats', return a listing of all drives. 241 | if (!match.groups.key) { 242 | try { 243 | const driveKeys = (await this.driveManager.listDrives()).map(d => d.key) 244 | return cb(0, driveKeys) 245 | } catch (err) { 246 | return cb(fuse.EIO) 247 | } 248 | } 249 | // If this is a readdir on '/Stats/(key)', return the two JSON filenames. 250 | if (match.groups.key && !match.groups.filename) return cb(0, ['networking.json', 'storage.json']) 251 | // Otherwise return an empty list 252 | return cb(0, []) 253 | } 254 | 255 | if (op === 'open') { 256 | if (!match.groups.key || !match.groups.filename) return cb(fuse.ENOENT) 257 | const filename = match.groups.filename 258 | if (filename !== 'networking.json' && filename !== 'storage.json') return cb(fuse.ENOENT) 259 | 260 | try { 261 | const key = datEncoding.decode(match.groups.key) 262 | log.debug({ filename }, 'opening stats file for drive') 263 | const drive = await this.driveManager.get(key) 264 | var stats = null 265 | 266 | if (filename === 'networking.json') { 267 | stats = await this.driveManager.getDriveStats(drive) 268 | stats.forEach(stat => { 269 | if (stat.metadata) { 270 | if (stat.metadata.key) stat.metadata.key = datEncoding.encode(stat.metadata.key) 271 | if (stat.metadata.discoveryKey) stat.metadata.discoveryKey = datEncoding.encode(stat.metadata.discoveryKey) 272 | } 273 | if (stat.content) { 274 | if (stat.content.key) stat.content.key = datEncoding.encode(stat.content.key) 275 | if (stat.content.discoveryKey) stat.content.discoveryKey = datEncoding.encode(stat.content.discoveryKey) 276 | } 277 | }) 278 | } else { 279 | stats = await new Promise((resolve, reject) => { 280 | drive.stats('/', (err, sts) => { 281 | if (err) return reject(err) 282 | const stObj = {} 283 | for (const [dir, st] of sts) { 284 | stObj[dir] = st 285 | } 286 | return resolve(stObj) 287 | }) 288 | }) 289 | } 290 | 291 | const fd = this._virtualFiles.open(JSON.stringify(stats, null, 2)) 292 | return cb(0, fd) 293 | } catch (err) { 294 | return cb(fuse.ENOENT) 295 | } 296 | } 297 | 298 | if (op === 'read') { 299 | if (!match.groups.key || !match.groups.filename) return cb(fuse.ENOENT) 300 | const filename = match.groups.filename 301 | if (filename !== 'networking.json' && filename !== 'storage.json') return cb(fuse.ENOENT) 302 | return this._virtualFiles.read(...[...args, cb]) 303 | } 304 | 305 | if (op === 'release') { 306 | if (!match.groups.key || !match.groups.filename) return cb(fuse.ENOENT) 307 | const filename = match.groups.filename 308 | if (filename !== 'networking.json' && filename !== 'storage.json') return cb(fuse.ENOENT) 309 | log.debug({ filename }, 'closing stats file') 310 | return this._virtualFiles.close(...[...args, cb]) 311 | } 312 | 313 | return handlers[op].apply(null, [...args, cb]) 314 | } 315 | } 316 | 317 | const ActiveHandler = { 318 | id: 'active', 319 | test: '^\/Active', 320 | ops: ['readdir', 'getattr', 'symlink', 'readlink'], 321 | search: /^\/(Active)(\/(?\w+)\.json\/?)?/, 322 | handler: async (op, match, args, cb) => { 323 | if (op === 'getattr') { 324 | // If this is a stat on '/active', return a directory stat. 325 | if (!match.groups.key && match.input.length !== 7) return cb(fuse.ENOENT) 326 | if (!match.groups.key) return cb(0, Stat.directory({ uid: process.getuid(), gid: process.getgid() })) 327 | // Othersise, it is a stat on a particular key, so return a symlink to the /stats dir for that key 328 | return cb(0, Stat.symlink({ uid: process.getuid(), gid: process.getgid() })) 329 | } 330 | 331 | if (op === 'readdir') { 332 | // TODO: This is pretty expensive, so it should be cached. 333 | let networkConfigurations = [...await this.driveManager.getAllNetworkConfigurations()] 334 | networkConfigurations = networkConfigurations 335 | .filter(([, value]) => value.opts && value.opts.announce) 336 | .map((([, value]) => value.key + '.json')) 337 | return cb(0, networkConfigurations) 338 | } 339 | 340 | if (op === 'readlink') { 341 | if (!match.groups.key) return cb(fuse.ENOENT) 342 | return cb(0, `${constants.mountpoint}/Network/Stats/${match.groups.key}/networking.json`) 343 | } 344 | 345 | return handlers[op].apply(null, [...args, cb]) 346 | } 347 | } 348 | 349 | const networkDirInterceptors = [ 350 | StatsHandler, 351 | ActiveHandler, 352 | ByKeyHandler 353 | ] 354 | const rootInterceptors = [ 355 | RootListHandler, 356 | NetworkInfoHandler, 357 | NetworkHandler 358 | ] 359 | for (const interceptor of rootInterceptors) { 360 | rootInterceptorIndex.set(interceptor.id, interceptor) 361 | } 362 | for (const interceptor of networkDirInterceptors) { 363 | networkInterceptorIndex.set(interceptor.id, interceptor) 364 | } 365 | 366 | const wrappedRootHandlers = {} 367 | const wrappedNetworkHandlers = {} 368 | 369 | for (const handlerName of Object.getOwnPropertyNames(handlers)) { 370 | const baseHandler = handlers[handlerName] 371 | if (typeof baseHandler !== 'function') { 372 | wrappedRootHandlers[handlerName] = baseHandler 373 | } else { 374 | wrappedRootHandlers[handlerName] = wrapHandler(rootInterceptors, rootInterceptorIndex, 0, handlerName, baseHandler) 375 | wrappedNetworkHandlers[handlerName] = wrapHandler(networkDirInterceptors, networkInterceptorIndex, NETWORK_PATH.length + 1, handlerName, baseHandler) 376 | } 377 | } 378 | 379 | return wrappedRootHandlers 380 | 381 | function wrapHandler (interceptors, index, depth, handlerName, handler) { 382 | log.debug({ handlerName }, 'wrapping handler') 383 | const activeInterceptors = interceptors.filter(({ ops }) => ops === '*' || (ops.indexOf(handlerName) !== -1)) 384 | if (!activeInterceptors.length) return handler 385 | 386 | const matcher = new RegExp(activeInterceptors.map(({ test, id }) => `(?<${id}>${test})`).join('|')) 387 | 388 | return function () { 389 | const args = [...arguments].slice(0, -1) 390 | const matchPosition = handlerName === 'symlink' ? 1 : 0 391 | if (depth) { 392 | args[matchPosition] = args[matchPosition].slice(depth) 393 | } 394 | 395 | const matchArg = args[matchPosition] 396 | const match = matcher.exec(matchArg) 397 | if (!match) return handler(...arguments) 398 | 399 | if (log.isLevelEnabled('trace')) { 400 | log.trace({ id: match[1], path: args[0] }, 'syscall interception') 401 | } 402 | 403 | // TODO: Don't iterate here. 404 | for (const key in match.groups) { 405 | if (!match.groups[key]) continue 406 | var id = key 407 | break 408 | } 409 | 410 | const { handler: wrappedHandler, search } = index.get(id) 411 | return wrappedHandler(handlerName, search.exec(matchArg), args, arguments[arguments.length - 1]) 412 | } 413 | } 414 | 415 | function dispatchNetworkCall (op, match, args, cb) { 416 | return wrappedNetworkHandlers[op](...args, cb) 417 | } 418 | } 419 | 420 | _getMountPath (path) { 421 | if (!this._rootDrive && path !== constants.mountpoint) { 422 | throw new Error(`You can only mount the root drive at ${constants.mountpoint}`) 423 | } 424 | if (path.startsWith(this._rootMnt) && path !== this._rootMnt) { 425 | const relativePath = path.slice(this._rootMnt.length) 426 | return { path: relativePath, root: false } 427 | } 428 | return { path: constants.mountpoint, root: true } 429 | } 430 | 431 | async _infoForPath (path) { 432 | if (!this._rootDrive) throw new Error('Cannot get mountpoint info when a root drive is not mounted.') 433 | if (!path.startsWith(this._rootMnt)) throw new Error(`The mountpoint must be beneath ${constants.mountpoint}.`) 434 | const self = this 435 | 436 | if (path !== this._rootMnt) { 437 | const relativePath = path.slice(this._rootMnt.length) 438 | const info = await getSubdriveInfo(relativePath) 439 | return { 440 | ...info, 441 | root: false, 442 | relativePath 443 | } 444 | } 445 | 446 | return { 447 | key: this._rootDrive.key, 448 | writable: true, 449 | mountPath: '', 450 | root: true 451 | } 452 | 453 | function getSubdriveInfo (relativePath) { 454 | const noopFilePath = '__does_not_exist' 455 | return new Promise((resolve, reject) => { 456 | self._rootDrive.stat(p.join(relativePath, noopFilePath ), { trie: true }, (err, stat, trie, _, __, mountPath) => { 457 | if (err && err.errno !== 2) return reject(err) 458 | else if (err && !trie) return resolve(null) 459 | return resolve({ 460 | key: trie.key, 461 | mountPath: mountPath.slice(0, mountPath.length - noopFilePath.length), 462 | writable: trie.feed.writable 463 | }) 464 | }) 465 | }) 466 | } 467 | } 468 | 469 | async mount (mnt, mountOpts = {}) { 470 | const self = this 471 | if (!this._rootDrive && mnt !== constants.mountpoint) throw new Error('Must mount a root drive before mounting subdrives.') 472 | mnt = mnt || constants.mountpoint 473 | 474 | await ensureFuse() 475 | log.debug({ mnt }, 'mounting a drive') 476 | 477 | // TODO: Stop using the hash field to pass this flag once hashes are supported. 478 | if (mnt === constants.mountpoint && (!mountOpts.hash || (mountOpts.hash.toString() !== 'force'))) { 479 | const rootDriveInfo = await this._getRootDriveInfo() 480 | if (rootDriveInfo) mountOpts = rootDriveInfo.opts 481 | } 482 | 483 | const drive = await this.driveManager.get(mountOpts.key, { ...mountOpts }) 484 | 485 | if (!this._rootDrive) { 486 | return mountRoot(drive) 487 | } 488 | 489 | const { path: relativePath } = this._getMountPath(mnt) 490 | return mountSubdrive(relativePath, drive) 491 | 492 | async function mountSubdrive (relativePath, drive) { 493 | log.debug({ discoveryKey: drive.discoveryKey.toString('hex') }, 'mounting a sub-hyperdrive') 494 | mountOpts.uid = process.getuid() 495 | mountOpts.gid = process.getgid() 496 | return new Promise((resolve, reject) => { 497 | self._rootDrive.mount(relativePath, drive.key, mountOpts, err => { 498 | if (err) return reject(err) 499 | return resolve({ ...mountOpts, key: drive.key }) 500 | }) 501 | }) 502 | } 503 | 504 | async function mountRoot (drive) { 505 | log.debug({ discoveryKey: drive.discoveryKey.toString('hex') }, 'mounting the root drive') 506 | 507 | const fuse = new HyperdriveFuse(drive, constants.mountpoint, { 508 | force: true, 509 | displayFolder: true, 510 | log: self._fuseLogger.trace.bind(self._fuseLogger) 511 | }) 512 | 513 | const handlers = fuse.getBaseHandlers() 514 | const wrappedHandlers = self._wrapHandlers(handlers) 515 | await fuse.mount(wrappedHandlers) 516 | 517 | log.debug({ mnt }, 'mounted the root drive') 518 | mountOpts.key = drive.key 519 | 520 | await self.db.put('root-drive', { mnt, opts: { ...mountOpts, key: datEncoding.encode(drive.key) } }) 521 | 522 | self._rootDrive = drive 523 | self._rootMnt = mnt 524 | self._rootFuse = fuse 525 | self._rootHandler = handlers 526 | 527 | // Ensure that a session is opened for every mountpoint accessed through the root. 528 | // TODO: Need better resource management here -- this will leak. 529 | self._rootDrive.on('mount', (feed, mountInfo) => { 530 | feed.ready(err => { 531 | if (err) log.error({ error: err }, 'mountpoint error') 532 | self.driveManager.createSession(null, feed.key) 533 | .then(() => { 534 | log.info({ discoveryKey: feed.discoveryKey.toString('hex') }, 'created session for mountpoint') 535 | }) 536 | .catch(err => { 537 | log.error({ error: err }, 'could not create session for mountpoint') 538 | }) 539 | }) 540 | }) 541 | 542 | return mountOpts 543 | } 544 | } 545 | 546 | async unmount (mnt) { 547 | log.debug({ mnt }, 'unmounting drive at mountpoint') 548 | await ensureFuse() 549 | const self = this 550 | 551 | if (!this._rootMnt) return 552 | 553 | // If a mountpoint is not specified, then it is assumed to be the root mount. 554 | if (!mnt || mnt === constants.mountpoint) return unmountRoot() 555 | 556 | // Otherwise, unmount the subdrive 557 | const { path, root } = this._getMountPath(mnt) 558 | if (root) return unmountRoot() 559 | return unmountSubdrive(path) 560 | 561 | async function unmountRoot () { 562 | log.debug({ mnt: self._rootMnt }, 'unmounting the root drive') 563 | 564 | await self._rootFuse.unmount() 565 | 566 | self._rootDrive = null 567 | self._rootMnt = null 568 | self._rootFuse = null 569 | self._rootHandler = null 570 | } 571 | 572 | function unmountSubdrive (path) { 573 | return new Promise((resolve, reject) => { 574 | self._rootDrive.unmount(path, err => { 575 | if (err) return reject(err) 576 | return resolve() 577 | }) 578 | }) 579 | } 580 | } 581 | 582 | async mountDrive (path, opts) { 583 | if (!this._rootDrive) throw new Error('The root hyperdrive must first be created before mounting additional drives.') 584 | if (!this._rootMnt || !path.startsWith(this._rootMnt)) throw new Error('Drives can only be mounted within the mountpoint.') 585 | 586 | // The corestore name is not very important here, since the initial drive will be discarded after mount. 587 | const drive = await this._createDrive(null, { ...this.opts, name: crypto.randomBytes(64).toString('hex') }) 588 | 589 | log.debug({ path, discoveryKey: drive.discoveryKey.toString('hex') }, 'mounting a drive at a path') 590 | return new Promise((resolve, reject) => { 591 | const innerPath = path.slice(this._rootMnt.length) 592 | this._rootDrive.mount(innerPath, opts, err => { 593 | if (err) return reject(err) 594 | log.debug({ path, discoveryKey: drive.discoveryKey.toString('hex') }, 'drive mounted') 595 | return resolve() 596 | }) 597 | }) 598 | } 599 | 600 | async info (mnt) { 601 | await ensureFuse() 602 | const { key, mountPath, writable, relativePath } = await this._infoForPath(mnt) 603 | if (!key) throw new Error(`A drive is not mounted at path: ${mnt}`) 604 | return { 605 | key: datEncoding.encode(key), 606 | mountPath: mountPath, 607 | writable: writable, 608 | path: relativePath 609 | } 610 | } 611 | 612 | list () { 613 | return new Map([...this._drives]) 614 | } 615 | 616 | // RPC Methods 617 | 618 | async _rpcMount (call) { 619 | var mountOpts = call.request.getOpts() 620 | const mnt = call.request.getPath() 621 | if (mountOpts) mountOpts = fromHyperdriveOptions(mountOpts) 622 | 623 | if (!mnt) throw new Error('A mount request must specify a mountpoint.') 624 | const mountInfo = await this.mount(mnt, mountOpts) 625 | 626 | const rsp = new rpc.fuse.messages.MountResponse() 627 | rsp.setMountinfo(toHyperdriveOptions(mountInfo)) 628 | rsp.setPath(mnt) 629 | 630 | return rsp 631 | } 632 | 633 | async _rpcUnmount (call) { 634 | const mnt = call.request.getPath() 635 | 636 | await this.unmount(mnt) 637 | 638 | return new rpc.fuse.messages.UnmountResponse() 639 | } 640 | 641 | async _rpcStatus (call) { 642 | const rsp = new rpc.fuse.messages.FuseStatusResponse() 643 | rsp.setAvailable(true) 644 | return new Promise((resolve, reject) => { 645 | hyperfuse.isConfigured((err, configured) => { 646 | if (err) return reject(err) 647 | rsp.setConfigured(configured) 648 | return resolve(rsp) 649 | }) 650 | }) 651 | } 652 | 653 | async _rpcInfo (call) { 654 | const rsp = new rpc.fuse.messages.InfoResponse() 655 | const mnt = call.request.getPath() 656 | 657 | const { key, mountPath, writable, relativePath } = await this.info(mnt) 658 | rsp.setKey(key) 659 | rsp.setPath(relativePath) 660 | rsp.setMountpath(mountPath) 661 | rsp.setWritable(writable) 662 | 663 | return rsp 664 | } 665 | 666 | async _rpcDownload (call) { 667 | const rsp = new rpc.fuse.messages.DownloadResponse() 668 | const path = call.request.getPath() 669 | 670 | const { downloadId, sessionId } = await this.download(path) 671 | rsp.setDownloadid(downloadId) 672 | rsp.setSessionid(sessionId) 673 | 674 | return rsp 675 | } 676 | } 677 | 678 | class NetworkSet { 679 | constructor () { 680 | this.list = ['Active', 'Stats'] 681 | } 682 | 683 | has (name) { 684 | return this.list.includes(name) 685 | } 686 | 687 | add (name) { 688 | if (this.list[this.list.length - 1] === name) return 689 | 690 | const idx = this.list.indexOf(name, 2) 691 | if (idx > -1) this.list.splice(idx, 1) 692 | this.list.push(name) 693 | 694 | if (this.list.length >= 18) this.list.shift() 695 | } 696 | } 697 | 698 | function ensureFuse () { 699 | return new Promise((resolve, reject) => { 700 | hyperfuse.isConfigured((err, configured) => { 701 | if (err) return reject(err) 702 | if (!configured) return reject(new Error('FUSE is not configured. Please run `hyperdrive fuse-setup` first.')) 703 | return resolve() 704 | }) 705 | }) 706 | } 707 | 708 | module.exports = FuseManager 709 | -------------------------------------------------------------------------------- /test/hyperdrive.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | 3 | const collectStream = require('stream-collector') 4 | const { createOne } = require('./util/create') 5 | 6 | test('can write/read a file from a remote hyperdrive', async t => { 7 | const { client, cleanup } = await createOne() 8 | 9 | try { 10 | const drive = await client.drive.get() 11 | t.true(drive.key) 12 | t.same(drive.id, 1) 13 | 14 | await drive.writeFile('hello', 'world') 15 | 16 | const contents = await drive.readFile('hello', { encoding: 'utf8' }) 17 | t.same(contents, 'world') 18 | 19 | await drive.close() 20 | } catch (err) { 21 | t.fail(err) 22 | } 23 | 24 | await cleanup() 25 | t.end() 26 | }) 27 | 28 | test('can write/read a large file from a remote hyperdrive', async t => { 29 | const { client, cleanup } = await createOne() 30 | 31 | const content = Buffer.alloc(3.9e7).fill('abcdefghi') 32 | 33 | try { 34 | const drive = await client.drive.get() 35 | t.true(drive.key) 36 | t.same(drive.id, 1) 37 | 38 | await drive.writeFile('hello', content) 39 | 40 | const contents = await drive.readFile('hello') 41 | t.same(contents, content) 42 | 43 | await drive.close() 44 | } catch (err) { 45 | t.fail(err) 46 | } 47 | 48 | await cleanup() 49 | t.end() 50 | }) 51 | 52 | test('can write/read file metadata alongside a file', async t => { 53 | const { client, cleanup } = await createOne() 54 | 55 | try { 56 | const drive = await client.drive.get() 57 | t.true(drive.key) 58 | t.same(drive.id, 1) 59 | 60 | await drive.writeFile('hello', 'world', { 61 | metadata: { 62 | hello: Buffer.from('world') 63 | } 64 | }) 65 | 66 | const stat = await drive.stat('hello') 67 | t.same(stat.metadata.hello, Buffer.from('world')) 68 | 69 | await drive.close() 70 | } catch (err) { 71 | t.fail(err) 72 | } 73 | 74 | await cleanup() 75 | t.end() 76 | }) 77 | 78 | test('can update file metadata', async t => { 79 | const { client, cleanup } = await createOne() 80 | 81 | try { 82 | const drive = await client.drive.get() 83 | t.true(drive.key) 84 | t.same(drive.id, 1) 85 | 86 | await drive.writeFile('hello', 'world', { 87 | metadata: { 88 | hello: Buffer.from('world') 89 | } 90 | }) 91 | 92 | var stat = await drive.stat('hello') 93 | t.same(stat.metadata.hello, Buffer.from('world')) 94 | 95 | await drive.updateMetadata('hello', { 96 | hello: Buffer.from('goodbye') 97 | }) 98 | 99 | stat = await drive.stat('hello') 100 | t.same(stat.metadata.hello, Buffer.from('goodbye')) 101 | 102 | await drive.close() 103 | } catch (err) { 104 | t.fail(err) 105 | } 106 | 107 | await cleanup() 108 | t.end() 109 | }) 110 | 111 | test('can delete metadata', async t => { 112 | const { client, cleanup } = await createOne() 113 | 114 | try { 115 | const drive = await client.drive.get() 116 | t.true(drive.key) 117 | t.same(drive.id, 1) 118 | 119 | await drive.writeFile('hello', 'world', { 120 | metadata: { 121 | first: Buffer.from('first'), 122 | second: Buffer.from('second') 123 | } 124 | }) 125 | 126 | var stat = await drive.stat('hello') 127 | t.same(stat.metadata.first, Buffer.from('first')) 128 | t.same(stat.metadata.second, Buffer.from('second')) 129 | 130 | await drive.deleteMetadata('hello', ['first']) 131 | 132 | stat = await drive.stat('hello') 133 | t.false(stat.metadata.first) 134 | t.same(stat.metadata.second, Buffer.from('second')) 135 | 136 | await drive.close() 137 | } catch (err) { 138 | t.fail(err) 139 | } 140 | 141 | await cleanup() 142 | t.end() 143 | }) 144 | 145 | test('can write/read a file from a remote hyperdrive using stream methods', async t => { 146 | const { client, cleanup } = await createOne() 147 | 148 | try { 149 | const drive = await client.drive.get() 150 | t.true(drive.key) 151 | t.same(drive.id, 1) 152 | 153 | const writeStream = drive.createWriteStream('hello', { uid: 999, gid: 999 }) 154 | writeStream.write('hello') 155 | writeStream.write('there') 156 | writeStream.end('friend') 157 | 158 | await new Promise((resolve, reject) => { 159 | writeStream.on('error', reject) 160 | writeStream.on('finish', resolve) 161 | }) 162 | 163 | const readStream = await drive.createReadStream('hello', { start: 5, length: Buffer.from('there').length + 1 }) 164 | const content = await new Promise((resolve, reject) => { 165 | collectStream(readStream, (err, bufs) => { 166 | if (err) return reject(err) 167 | return resolve(Buffer.concat(bufs)) 168 | }) 169 | }) 170 | t.same(content, Buffer.from('theref')) 171 | 172 | const stat = await drive.stat('hello') 173 | t.same(stat.uid, 999) 174 | t.same(stat.gid, 999) 175 | 176 | await drive.close() 177 | } catch (err) { 178 | t.fail(err) 179 | } 180 | 181 | await cleanup() 182 | t.end() 183 | }) 184 | 185 | test('assorted read parameters to createReadStream', async t => { 186 | const { client, cleanup } = await createOne() 187 | 188 | try { 189 | const drive = await client.drive.get() 190 | t.true(drive.key) 191 | t.same(drive.id, 1) 192 | 193 | let blocks = ['hello', 'hello', 'world', 'world'] 194 | let complete = blocks.join('') 195 | let tests = [ 196 | { 197 | params: {}, 198 | value: complete 199 | }, 200 | { 201 | params: { end: 10 }, 202 | value: complete.slice(0, 10 + 1) 203 | }, 204 | { 205 | params: { start: 4, end: 10 }, 206 | value: complete.slice(4, 10 + 1) 207 | }, 208 | { 209 | params: { end: complete.length - 1 }, 210 | value: complete 211 | }, 212 | { 213 | params: { start: 5, length: 5 }, 214 | value: complete.slice(5, 10) 215 | } 216 | ] 217 | 218 | const writeStream = drive.createWriteStream('hello', { uid: 999, gid: 999 }) 219 | for (let block of blocks) { 220 | writeStream.write(block) 221 | } 222 | writeStream.end() 223 | 224 | await new Promise((resolve, reject) => { 225 | writeStream.on('error', reject) 226 | writeStream.on('finish', resolve) 227 | }) 228 | 229 | for (let { params, value } of tests) { 230 | const readStream = await drive.createReadStream('hello', params) 231 | const content = await new Promise((resolve, reject) => { 232 | collectStream(readStream, (err, bufs) => { 233 | if (err) return reject(err) 234 | return resolve(Buffer.concat(bufs)) 235 | }) 236 | }) 237 | t.same(content.toString('utf8'), value) 238 | } 239 | 240 | await drive.close() 241 | } catch (err) { 242 | t.fail(err) 243 | } 244 | 245 | await cleanup() 246 | t.end() 247 | }) 248 | 249 | test('reading an invalid file propogates error', async t => { 250 | const { client, cleanup } = await createOne() 251 | 252 | try { 253 | const drive = await client.drive.get() 254 | t.true(drive.key) 255 | t.same(drive.id, 1) 256 | 257 | try { 258 | const readStream = await drive.createReadStream('hello', { start: 5, length: Buffer.from('there').length + 1 }) 259 | await new Promise((resolve, reject) => { 260 | collectStream(readStream, (err, bufs) => { 261 | if (err) return reject(err) 262 | return resolve(Buffer.concat(bufs)) 263 | }) 264 | }) 265 | t.fail('read stream did not throw error') 266 | } catch (err) { 267 | t.pass('read stream threw error') 268 | } 269 | 270 | await drive.close() 271 | } catch (err) { 272 | t.fail(err) 273 | } 274 | 275 | await cleanup() 276 | t.end() 277 | }) 278 | 279 | test('can stat a file from a remote hyperdrive', async t => { 280 | const { client, cleanup } = await createOne() 281 | 282 | try { 283 | const drive = await client.drive.get() 284 | 285 | await drive.writeFile('hello', 'world') 286 | 287 | const stat = await drive.stat('hello') 288 | t.same(stat.size, Buffer.from('world').length) 289 | t.same(stat.uid, 0) 290 | t.same(stat.gid, 0) 291 | 292 | await drive.close() 293 | } catch (err) { 294 | t.fail(err) 295 | } 296 | 297 | await cleanup() 298 | t.end() 299 | }) 300 | 301 | test('can list a directory from a remote hyperdrive', async t => { 302 | const { client, cleanup } = await createOne() 303 | 304 | try { 305 | const drive = await client.drive.get() 306 | 307 | await drive.writeFile('hello', 'world') 308 | await drive.writeFile('goodbye', 'dog') 309 | await drive.writeFile('adios', 'amigo') 310 | 311 | const files = await drive.readdir('') 312 | t.same(files.length, 3) 313 | t.notEqual(files.indexOf('hello'), -1) 314 | t.notEqual(files.indexOf('goodbye'), -1) 315 | t.notEqual(files.indexOf('adios'), -1) 316 | 317 | await drive.close() 318 | } catch (err) { 319 | t.fail(err) 320 | } 321 | 322 | await cleanup() 323 | t.end() 324 | }) 325 | 326 | test('can list a directory from a remote hyperdrive with stats', async t => { 327 | const { client, cleanup } = await createOne() 328 | 329 | try { 330 | const drive = await client.drive.get() 331 | 332 | await drive.writeFile('hello', 'world') 333 | await drive.writeFile('goodbye', 'dog') 334 | await drive.writeFile('adios', 'amigo') 335 | const expected = new Set(['hello', 'goodbye', 'adios']) 336 | 337 | const objs = await drive.readdir('', { includeStats: true }) 338 | t.same(objs.length, 3) 339 | for (const { name, stat, mount, innerPath } of objs) { 340 | t.true(expected.has(name)) 341 | t.same(stat.mode, 33188) 342 | t.true(mount.key.equals(drive.key)) 343 | t.same(innerPath, name) 344 | expected.delete(name) 345 | } 346 | 347 | await drive.close() 348 | } catch (err) { 349 | t.fail(err) 350 | } 351 | 352 | await cleanup() 353 | t.end() 354 | }) 355 | 356 | test('can list a large directory from a remote hyperdrive with stats', async t => { 357 | const { client, cleanup } = await createOne() 358 | const NUM_FILES = 5000 359 | const PARALLEL_WRITE = true 360 | 361 | try { 362 | const drive = await client.drive.get() 363 | 364 | const proms = [] 365 | for (let i = 0; i < NUM_FILES; i++) { 366 | const prom = drive.writeFile(String(i), String(i)) 367 | if (PARALLEL_WRITE) proms.push(prom) 368 | else await prom 369 | } 370 | if (PARALLEL_WRITE) await Promise.all(proms) 371 | 372 | const objs = await drive.readdir('', { includeStats: true }) 373 | t.same(objs.length, NUM_FILES) 374 | let statError = null 375 | let mountError = null 376 | for (const { stat, mount } of objs) { 377 | if (stat.mode !== 33188) statError = 'stat mode is incorrect' 378 | if (!mount.key.equals(drive.key)) mountError = 'mount key is not the drive key' 379 | } 380 | if (statError) t.fail(statError) 381 | if (mountError) t.fail(mountError) 382 | 383 | await drive.close() 384 | } catch (err) { 385 | t.fail(err) 386 | } 387 | 388 | await cleanup() 389 | t.end() 390 | }) 391 | 392 | test('can create a diff stream on a remote hyperdrive', async t => { 393 | const { client, cleanup } = await createOne() 394 | 395 | try { 396 | const drive1 = await client.drive.get() 397 | const drive2 = await client.drive.get() 398 | 399 | await drive1.writeFile('hello', 'world') 400 | const v1 = await drive1.version() 401 | await drive1.writeFile('goodbye', 'dog') 402 | const v2 = await drive1.version() 403 | await drive1.mount('d2', { key: drive2.key }) 404 | const v3 = await drive1.version() 405 | await drive1.unmount('d2') 406 | 407 | const diff1 = await drive1.createDiffStream() 408 | const checkout = await drive1.checkout(v2) 409 | const diff2 = await checkout.createDiffStream(v1) 410 | const diff3 = await drive1.createDiffStream(v3) 411 | const checkout2 = await drive1.checkout(v3) 412 | const diff4 = await checkout2.createDiffStream(v2) 413 | 414 | await validate(diff1, [ 415 | { type: 'put', name: 'goodbye' }, 416 | { type: 'put', name: 'hello' } 417 | ]) 418 | await validate(diff2, [ 419 | { type: 'put', name: 'goodbye' } 420 | ]) 421 | await validate(diff3, [ 422 | // TODO: The first is a false positive. 423 | { type: 'put', name: 'goodbye' }, 424 | { type: 'unmount', name: 'd2' } 425 | ]) 426 | await validate(diff4, [ 427 | { type: 'mount', name: 'd2', key: drive2.key } 428 | ]) 429 | 430 | await drive1.close() 431 | await drive2.close() 432 | } catch (err) { 433 | t.fail(err) 434 | } 435 | 436 | await cleanup() 437 | t.end() 438 | 439 | async function validate (stream, expected) { 440 | return new Promise((resolve, reject) => { 441 | var seen = 0 442 | stream.on('end', () => { 443 | t.same(seen, expected.length) 444 | return resolve() 445 | }) 446 | stream.on('error', t.fail.bind(t)) 447 | stream.on('data', ({ type, name, value }) => { 448 | t.same(name, expected[seen].name) 449 | t.same(type, expected[seen].type) 450 | if (type === 'mount') t.same(value.mount.key, expected[seen].key) 451 | seen++ 452 | }) 453 | }) 454 | } 455 | }) 456 | 457 | test('can read/write multiple remote hyperdrives on one server', async t => { 458 | const { client, cleanup } = await createOne() 459 | var startingId = 1 460 | 461 | const files = [ 462 | ['hello', 'world'], 463 | ['goodbye', 'dog'], 464 | ['random', 'file'] 465 | ] 466 | 467 | var drives = [] 468 | for (const [file, content] of files) { 469 | drives.push(await createAndWrite(file, content)) 470 | } 471 | 472 | for (let i = 0; i < files.length; i++) { 473 | const [file, content] = files[i] 474 | const drive = drives[i] 475 | const readContent = await drive.readFile(file) 476 | t.same(readContent, Buffer.from(content)) 477 | } 478 | 479 | async function createAndWrite (file, content) { 480 | const drive = await client.drive.get() 481 | t.same(drive.id, startingId++) 482 | await drive.writeFile(file, content) 483 | return drive 484 | } 485 | 486 | await cleanup() 487 | t.end() 488 | }) 489 | 490 | test('can mount a drive within a remote hyperdrive', async t => { 491 | const { client, cleanup } = await createOne() 492 | 493 | try { 494 | const drive1 = await client.drive.get() 495 | 496 | const drive2 = await client.drive.get() 497 | t.notEqual(drive1.key, drive2.key) 498 | 499 | await drive1.mount('a', { key: drive2.key }) 500 | 501 | await drive1.writeFile('a/hello', 'world') 502 | await drive1.writeFile('a/goodbye', 'dog') 503 | await drive1.writeFile('adios', 'amigo') 504 | await drive2.writeFile('hamster', 'wheel') 505 | 506 | t.same(await drive1.readFile('adios'), Buffer.from('amigo')) 507 | t.same(await drive1.readFile('a/hello'), Buffer.from('world')) 508 | t.same(await drive2.readFile('hello'), Buffer.from('world')) 509 | t.same(await drive2.readFile('hamster'), Buffer.from('wheel')) 510 | 511 | await drive1.close() 512 | await drive2.close() 513 | } catch (err) { 514 | t.fail(err) 515 | } 516 | 517 | await cleanup() 518 | t.end() 519 | }) 520 | 521 | test('can mount a drive within a remote hyperdrive multiple times', async t => { 522 | const { client, cleanup } = await createOne() 523 | 524 | try { 525 | const drive1 = await client.drive.get() 526 | const drive2 = await client.drive.get() 527 | await drive2.writeFile('x', 'y') 528 | 529 | await drive1.mount('a', { key: drive2.key }) 530 | await drive1.mount('b', { key: drive2.key }) 531 | 532 | t.same(await drive1.readFile('a/x'), Buffer.from('y')) 533 | t.same(await drive1.readFile('b/x'), Buffer.from('y')) 534 | 535 | await drive1.close() 536 | await drive2.close() 537 | } catch (err) { 538 | t.fail(err) 539 | } 540 | 541 | await cleanup() 542 | t.end() 543 | }) 544 | 545 | test('can mount a versioned drive within a remote hyperdrive', async t => { 546 | const { client, cleanup } = await createOne() 547 | 548 | try { 549 | const drive1 = await client.drive.get() 550 | 551 | const drive2 = await client.drive.get() 552 | await drive2.writeFile('hamster', 'wheel') 553 | const version1 = await drive2.version() 554 | await drive2.writeFile('blah', 'blahblah') 555 | 556 | await drive1.mount('a', { key: drive2.key }) 557 | await drive1.mount('aStatic', { key: drive2.key, version: version1 }) 558 | 559 | await drive1.writeFile('a/hello', 'world') 560 | await drive1.writeFile('adios', 'amigo') 561 | 562 | t.same(await drive1.readFile('adios'), Buffer.from('amigo')) 563 | t.same(await drive1.readFile('a/hello'), Buffer.from('world')) 564 | t.same(await drive2.readFile('hello'), Buffer.from('world')) 565 | t.same(await drive2.readFile('hamster'), Buffer.from('wheel')) 566 | t.same(await drive1.readFile('aStatic/hamster'), Buffer.from('wheel')) 567 | try { 568 | await drive1.readFile('aStatic/blah') 569 | t.fail('aStatic should be a versioned mount') 570 | } catch (err) { 571 | t.true(err) 572 | } 573 | 574 | await drive1.close() 575 | await drive2.close() 576 | } catch (err) { 577 | t.fail(err) 578 | } 579 | 580 | await cleanup() 581 | t.end() 582 | }) 583 | 584 | test('can unmount a drive within a remote hyperdrive', async t => { 585 | const { client, cleanup } = await createOne() 586 | 587 | try { 588 | const drive1 = await client.drive.get() 589 | const drive2 = await client.drive.get() 590 | t.notEqual(drive1.key, drive2.key) 591 | 592 | await drive1.mount('a', { key: drive2.key }) 593 | 594 | await drive1.writeFile('a/hello', 'world') 595 | await drive1.writeFile('a/goodbye', 'dog') 596 | await drive1.writeFile('adios', 'amigo') 597 | await drive2.writeFile('hamster', 'wheel') 598 | 599 | t.same(await drive1.readFile('adios'), Buffer.from('amigo')) 600 | t.same(await drive1.readFile('a/hello'), Buffer.from('world')) 601 | t.same(await drive2.readFile('hello'), Buffer.from('world')) 602 | t.same(await drive2.readFile('hamster'), Buffer.from('wheel')) 603 | 604 | await drive1.unmount('a') 605 | try { 606 | await drive1.readFile('a/hello') 607 | } catch (err) { 608 | t.true(err) 609 | t.same(err.code, 2) 610 | } 611 | 612 | await drive1.close() 613 | await drive2.close() 614 | } catch (err) { 615 | t.fail(err) 616 | } 617 | 618 | await cleanup() 619 | t.end() 620 | }) 621 | 622 | test('can watch a remote hyperdrive', async t => { 623 | const { client, cleanup } = await createOne() 624 | 625 | var triggered = 0 626 | 627 | try { 628 | const drive = await client.drive.get() 629 | 630 | const unwatch = drive.watch('', () => { 631 | triggered++ 632 | }) 633 | 634 | await drive.writeFile('hello', 'world') 635 | await delay(20) 636 | await unwatch() 637 | await delay(20) 638 | await drive.writeFile('world', 'hello') 639 | 640 | await drive.close() 641 | } catch (err) { 642 | t.fail(err) 643 | } 644 | 645 | t.same(triggered, 1) 646 | 647 | await cleanup() 648 | t.end() 649 | }) 650 | 651 | test('watch cleans up after unexpected close', async t => { 652 | const { client, cleanup, daemon } = await createOne() 653 | 654 | var triggered = 0 655 | 656 | try { 657 | const drive = await client.drive.get() 658 | 659 | drive.watch('', () => { 660 | triggered++ 661 | }) 662 | 663 | await drive.writeFile('hello', 'world') 664 | await delay(10) 665 | t.same(daemon.drives._watchCount, 1) 666 | await cleanup() 667 | } catch (err) { 668 | t.fail(err) 669 | } 670 | 671 | t.same(triggered, 1) 672 | t.same(daemon.drives._watchers.size, 0) 673 | t.same(daemon.drives._watchCount, 0) 674 | 675 | t.end() 676 | }) 677 | 678 | test('can create a symlink to directories', async t => { 679 | const { client, cleanup } = await createOne() 680 | 681 | try { 682 | const drive = await client.drive.get() 683 | await drive.mkdir('hello', { uid: 999 }) 684 | await drive.writeFile('hello/world', 'content') 685 | await drive.symlink('hello', 'other_hello') 686 | await drive.symlink('hello/world', 'other_world') 687 | 688 | const contents = await drive.readFile('other_world') 689 | t.same(contents, Buffer.from('content')) 690 | 691 | const files = await drive.readdir('other_hello') 692 | t.same(files.length, 1) 693 | t.same(files[0], 'world') 694 | 695 | const stat = await drive.lstat('other_world') 696 | t.true(stat.isSymbolicLink()) 697 | 698 | await drive.close() 699 | } catch (err) { 700 | t.fail(err) 701 | } 702 | 703 | await cleanup() 704 | t.end() 705 | }) 706 | 707 | test('drives are closed when all corresponding sessions are closed', async t => { 708 | const { client, cleanup, daemon } = await createOne() 709 | 710 | try { 711 | const drive = await client.drive.get() 712 | await drive.writeFile('a', 'a') 713 | await drive.writeFile('b', 'b') 714 | await drive.writeFile('c', 'c') 715 | const otherDrive = await client.drive.get({ key: drive.key }) 716 | const checkout1 = await client.drive.get({ key: drive.key, version: 1 }) 717 | 718 | await drive.close() 719 | t.same(daemon.drives._drives.size, 2) 720 | await otherDrive.close() 721 | t.same(daemon.drives._drives.size, 2) 722 | await checkout1.close() 723 | t.same(daemon.drives._drives.size, 0) 724 | } catch (err) { 725 | t.fail(err) 726 | } 727 | 728 | await cleanup() 729 | t.end() 730 | }) 731 | 732 | test('reopening a drive after previously closed works', async t => { 733 | const { client, cleanup, daemon } = await createOne() 734 | 735 | try { 736 | var drive = await client.drive.get() 737 | const driveKey = drive.key 738 | await drive.writeFile('a', 'a') 739 | await drive.writeFile('b', 'b') 740 | await drive.writeFile('c', 'c') 741 | const otherDrive = await client.drive.get({ key: driveKey }) 742 | const checkout1 = await client.drive.get({ key: driveKey, version: 1 }) 743 | 744 | await drive.close() 745 | t.same(daemon.drives._drives.size, 2) 746 | await otherDrive.close() 747 | t.same(daemon.drives._drives.size, 2) 748 | await checkout1.close() 749 | t.same(daemon.drives._drives.size, 0) 750 | 751 | drive = await client.drive.get({ key: driveKey }) 752 | await drive.writeFile('d', 'd') 753 | const contents = await drive.readFile('a') 754 | t.same(contents, Buffer.from('a')) 755 | } catch (err) { 756 | t.fail(err) 757 | } 758 | 759 | await cleanup() 760 | t.end() 761 | }) 762 | 763 | test('many quick closes/reopens', async t => { 764 | const NUM_CYCLES = 10 765 | const { client, cleanup, daemon } = await createOne() 766 | var driveKey = null 767 | const expected = new Array(NUM_CYCLES).fill(0).map((_, i) => '' + i) 768 | 769 | try { 770 | for (let i = 0; i < NUM_CYCLES; i++) { 771 | var drive = await client.drive.get({ key: driveKey }) 772 | if (!driveKey) driveKey = drive.key 773 | await drive.writeFile(expected[i], expected[i]) 774 | await drive.close() 775 | if (daemon.drives._drives.size !== 0) t.fail('session close did not trigger drive close') 776 | } 777 | drive = await client.drive.get({ key: driveKey }) 778 | const actual = [] 779 | for (let i = 0; i < NUM_CYCLES; i++) { 780 | const contents = await drive.readFile(expected[i]) 781 | actual[i] = contents.toString('utf8') 782 | } 783 | t.same(expected, actual) 784 | } catch (err) { 785 | t.fail(err) 786 | } 787 | 788 | await cleanup() 789 | t.end() 790 | }) 791 | 792 | test('drives are writable after a daemon restart', async t => { 793 | var { dir, client, cleanup } = await createOne() 794 | 795 | try { 796 | var drive = await client.drive.get() 797 | const driveKey = drive.key 798 | await drive.writeFile('a', 'a') 799 | 800 | await cleanup({ persist: true }) 801 | 802 | const newDaemon = await createOne({ dir }) 803 | client = newDaemon.client 804 | cleanup = newDaemon.cleanup 805 | 806 | drive = await client.drive.get({ key: driveKey }) 807 | t.same(await drive.readFile('a'), Buffer.from('a')) 808 | await drive.writeFile('b', 'b') 809 | t.same(await drive.readFile('b'), Buffer.from('b')) 810 | } catch (err) { 811 | t.fail(err) 812 | } 813 | 814 | await cleanup() 815 | t.end() 816 | }) 817 | 818 | test('cores are not closed incorrectly during the initial rejoin', async t => { 819 | var { dir, client, cleanup } = await createOne() 820 | 821 | try { 822 | var drive = await client.drive.get() 823 | const driveKey = drive.key 824 | await drive.writeFile('a', 'a') 825 | await drive.configureNetwork({ announce: true, lookup: true, remember: true }) 826 | 827 | await cleanup({ persist: true }) 828 | 829 | const newDaemon = await createOne({ dir }) 830 | client = newDaemon.client 831 | cleanup = newDaemon.cleanup 832 | drive = await client.drive.get({ key: driveKey }) 833 | 834 | t.same(await drive.readFile('a'), Buffer.from('a')) 835 | await drive.writeFile('b', 'b') 836 | t.same(await drive.readFile('b'), Buffer.from('b')) 837 | } catch (err) { 838 | t.fail(err) 839 | } 840 | 841 | await cleanup() 842 | t.end() 843 | }) 844 | 845 | test('mounts are writable in memory-only mode', async t => { 846 | var { client, cleanup } = await createOne({ memoryOnly: true }) 847 | 848 | try { 849 | var drive = await client.drive.get() 850 | var mount = await client.drive.get() 851 | const mountKey = mount.key 852 | 853 | await drive.writeFile('a', 'a') 854 | await drive.mount('b', { key: mountKey }) 855 | await drive.writeFile('b/c', 'b/c') 856 | await mount.writeFile('d', 'd') 857 | 858 | const aContents = await drive.readFile('a') 859 | const bcContents = await drive.readFile('b/c') 860 | const cContents = await mount.readFile('c') 861 | const dContents = await mount.readFile('d') 862 | 863 | t.same(aContents, Buffer.from('a')) 864 | t.same(bcContents, Buffer.from('b/c')) 865 | t.same(cContents, Buffer.from('b/c')) 866 | t.same(dContents, Buffer.from('d')) 867 | } catch (err) { 868 | t.fail(err) 869 | } 870 | 871 | await cleanup() 872 | t.end() 873 | }) 874 | 875 | test('can get network configuration alongside drive stats', async t => { 876 | var { client, cleanup } = await createOne({ memoryOnly: true }) 877 | 878 | try { 879 | const drive1 = await client.drive.get() 880 | const drive2 = await client.drive.get() 881 | 882 | await drive1.writeFile('a', 'a') 883 | await drive2.writeFile('b', 'bbbbbb') 884 | await drive2.writeFile('c', 'cccccc') 885 | 886 | await drive1.configureNetwork({ announce: true, lookup: true, remember: true }) 887 | await drive2.configureNetwork({ announce: false, lookup: true, remember: false }) 888 | 889 | const { network: network1 } = await drive1.stats() 890 | const { network: network2 } = await drive2.stats() 891 | 892 | t.true(network1.announce) 893 | t.true(network1.lookup) 894 | t.true(network1.remember) 895 | t.false(network2.announce) 896 | t.true(network2.lookup) 897 | t.false(network2.remember) 898 | } catch (err) { 899 | t.fail(err) 900 | } 901 | 902 | await cleanup() 903 | t.end() 904 | }) 905 | 906 | test('can get all network configurations', async t => { 907 | var { client, cleanup } = await createOne({ memoryOnly: true }) 908 | 909 | const configs = [ 910 | { announce: true, lookup: true, remember: true }, 911 | { announce: false, lookup: false, remember: true }, 912 | { announce: false, lookup: true, remember: false }, 913 | { announce: true, lookup: false, remember: false } 914 | ] 915 | const driveConfigs = new Map() 916 | 917 | try { 918 | for (const config of configs) { 919 | const drive = await client.drive.get() 920 | await drive.configureNetwork(config) 921 | const expectedConfig = (!config.announce && !config.lookup) ? null : config 922 | driveConfigs.set(drive.key.toString('hex'), expectedConfig) 923 | } 924 | const configMap = await client.drive.allNetworkConfigurations() 925 | for (const [key, config] of configMap) { 926 | const expectedDriveConfig = driveConfigs.get(key) 927 | if (!expectedDriveConfig) { 928 | t.same(config, null) 929 | } else { 930 | t.same(expectedDriveConfig.announce, config.announce) 931 | t.same(expectedDriveConfig.lookup, config.lookup) 932 | } 933 | } 934 | } catch (err) { 935 | t.fail(err) 936 | } 937 | 938 | await cleanup() 939 | t.end() 940 | }) 941 | 942 | // TODO: Figure out why the grpc server is not terminating. 943 | test.onFinish(() => { 944 | setTimeout(() => { 945 | process.exit(0) 946 | }, 100) 947 | }) 948 | 949 | function delay (ms) { 950 | return new Promise(resolve => setTimeout(resolve, ms)) 951 | } 952 | -------------------------------------------------------------------------------- /lib/drives/index.js: -------------------------------------------------------------------------------- 1 | const hyperdrive = require('hyperdrive') 2 | const hypercoreCrypto = require('hypercore-crypto') 3 | const datEncoding = require('dat-encoding') 4 | const pump = require('pump') 5 | const sub = require('subleveldown') 6 | const bjson = require('buffer-json-encoding') 7 | const collectStream = require('stream-collector') 8 | const { NanoresourcePromise: Nanoresource } = require('nanoresource-promise/emitter') 9 | const { Transform } = require('streamx') 10 | 11 | const { 12 | fromHyperdriveOptions, 13 | fromStat, 14 | fromMount, 15 | fromMetadata, 16 | fromDriveConfiguration, 17 | fromNetworkConfiguration, 18 | toNetworkConfiguration, 19 | toHyperdriveOptions, 20 | toStat, 21 | toMount, 22 | toMountInfo, 23 | toDriveStats, 24 | toDiffEntry, 25 | setFileStats, 26 | toChunks 27 | } = require('hyperdrive-daemon-client/lib/common') 28 | const { rpc } = require('hyperdrive-daemon-client') 29 | 30 | const ArrayIndex = require('./array-index') 31 | const { dbCollect, dbGet } = require('../common') 32 | const log = require('../log').child({ component: 'drive-manager' }) 33 | 34 | const TRIE_UPDATER_SYMBOL = Symbol('hyperdrive-daemon-trie-updater') 35 | 36 | class DriveManager extends Nanoresource { 37 | constructor (corestore, networking, db, opts = {}) { 38 | super() 39 | 40 | this.corestore = corestore 41 | this.networking = networking 42 | this.db = db 43 | this.opts = opts 44 | this.watchLimit = opts.watchLimit 45 | this.memoryOnly = !!opts.memoryOnly 46 | 47 | const dbs = DriveManager.generateSubDbs(db) 48 | 49 | this._driveIndex = dbs.drives 50 | this._mirrorIndex = dbs.mirrors 51 | this._seedIndex = dbs.seeding 52 | 53 | this._transientSeedIndex = new Map() 54 | this._drives = new Map() 55 | this._checkouts = new Map() 56 | this._watchers = new Map() 57 | this._sessionsByKey = new Map() 58 | this._configuredMounts = new Set() 59 | this._sessions = new ArrayIndex() 60 | this._downloads = new ArrayIndex() 61 | this._mirrors = new Map() 62 | this._watchCount = 0 63 | } 64 | 65 | ready () { 66 | return this.open() 67 | } 68 | 69 | async _open () { 70 | return Promise.all([ 71 | this._rejoin(), 72 | this._remirror() 73 | ]) 74 | } 75 | 76 | async _rejoin () { 77 | if (this.noAnnounce) return 78 | const seedList = await dbCollect(this._seedIndex) 79 | for (const { key: discoveryKey, value: networkOpts } of seedList) { 80 | const opts = networkOpts && networkOpts.opts 81 | if (!opts || !opts.announce) continue 82 | this.networking.join(discoveryKey, { ...networkOpts.opts }) 83 | } 84 | } 85 | 86 | async _remirror () { 87 | const mirrorList = await dbCollect(this._mirrorIndex) 88 | for (const { key } of mirrorList) { 89 | const drive = await this.get(key) 90 | await this._startMirroring(drive) 91 | } 92 | } 93 | 94 | _generateKeyString (key, opts) { 95 | var keyString = (key instanceof Buffer) ? key.toString('hex') : key 96 | if (opts && opts.version) keyString = keyString + '+' + opts.version 97 | if (opts && opts.hash) keyString = keyString + '+' + opts.hash 98 | return keyString 99 | } 100 | 101 | async _startMirroring (drive) { 102 | // A mirrored drive should never be closed. 103 | const { session: mirrorSession } = await this.createSession(drive) 104 | const unmirror = drive.mirror() 105 | const driveKey = drive.key.toString('hex') 106 | this._mirrors.set(driveKey, { 107 | session: mirrorSession, 108 | unmirror 109 | }) 110 | // Only the key is relevant, but gets for valid keys shouldn't return null. 111 | await this._mirrorIndex.put(driveKey, 'mirroring') 112 | log.info({ discoveryKey: drive.discoveryKey.toString('hex') }, 'mirroring drive') 113 | } 114 | 115 | async _stopMirroring (drive) { 116 | const driveKey = drive.key.toString('hex') 117 | const mirrorInfo = this._mirrors.get(driveKey) 118 | if (!mirrorInfo) return null 119 | this._mirrors.delete(driveKey) 120 | mirrorInfo.unmirror() 121 | this.closeSession(mirrorInfo.session) 122 | return this._mirrorIndex.del(driveKey) 123 | } 124 | 125 | driveForSession (sessionId) { 126 | const drive = this._sessions.get(sessionId) 127 | if (!drive) throw new Error('Session does not exist.') 128 | return drive 129 | } 130 | 131 | async createSession (drive, key, opts) { 132 | if (!drive) drive = await this.get(key, opts) 133 | key = drive.key.toString('hex') 134 | 135 | const sessionId = this._sessions.insert(drive) 136 | var driveSessions = this._sessionsByKey.get(key) 137 | 138 | if (!driveSessions) { 139 | driveSessions = [] 140 | this._sessionsByKey.set(key, driveSessions) 141 | } 142 | driveSessions.push(sessionId) 143 | 144 | return { drive, session: sessionId } 145 | } 146 | 147 | closeSession (id) { 148 | const drive = this._sessions.get(id) 149 | if (!drive) return null 150 | 151 | const driveKey = drive.key.toString('hex') 152 | const driveDKey = drive.discoveryKey.toString('hex') 153 | const driveSessions = this._sessionsByKey.get(driveKey) 154 | this._sessions.delete(id) 155 | const idx = driveSessions.indexOf(id) 156 | if (idx !== -1) driveSessions.splice(idx, 1) 157 | 158 | // If there are still active sessions, don't close the drive. 159 | if (driveSessions.length) return null 160 | 161 | log.debug({ id, discoveryKey: driveDKey }, 'closing drive because all associated sessions have closed') 162 | this._sessionsByKey.delete(driveKey) 163 | 164 | // If a drive is closed in memory-only mode, its storage will be deleted, so don't actually close. 165 | if (this.memoryOnly) { 166 | log.debug({ id, discoveryKey: driveDKey }, 'aborting drive close because we\'re in memory-only mode') 167 | return null 168 | } 169 | 170 | const watchers = this._watchers.get(driveKey) 171 | if (watchers && watchers.length) { 172 | for (const watcher of watchers) { 173 | watcher.destroy() 174 | } 175 | } 176 | this._watchers.delete(driveKey) 177 | 178 | this._drives.delete(driveKey) 179 | const checkouts = this._checkouts.get(driveKey) 180 | if (checkouts && checkouts.length) { 181 | for (const keyString of checkouts) { 182 | this._drives.delete(keyString) 183 | } 184 | } 185 | this._checkouts.delete(driveKey) 186 | 187 | return new Promise((resolve, reject) => { 188 | drive.close(err => { 189 | if (err) return reject(err) 190 | log.debug({ id, discoveryKey: driveDKey }, 'closed drive and cleaned up any remaining watchers') 191 | return resolve() 192 | }) 193 | }) 194 | } 195 | 196 | async configureNetwork (feed, opts = {}) { 197 | const self = this 198 | const encodedKey = datEncoding.encode(feed.discoveryKey) 199 | const networkOpts = { 200 | lookup: !!opts.lookup, 201 | announce: !!opts.announce, 202 | remember: !!opts.remember 203 | } 204 | const seeding = opts.lookup || opts.announce 205 | var networkingPromise 206 | 207 | const sameConfig = sameNetworkConfig(feed.discoveryKey, opts) 208 | // If all the networking options are the same, exit early. 209 | if (sameConfig) return 210 | 211 | const networkConfig = { key: datEncoding.encode(feed.key), opts: networkOpts } 212 | if (opts.remember) { 213 | if (seeding) await this._seedIndex.put(encodedKey, networkConfig) 214 | else await this._seedIndex.del(encodedKey) 215 | } else { 216 | this._transientSeedIndex.set(encodedKey, networkConfig) 217 | } 218 | 219 | // Failsafe 220 | if (networkOpts.announce && this.noAnnounce) networkOpts.announce = false 221 | 222 | try { 223 | if (seeding) { 224 | networkingPromise = this.networking.join(feed.discoveryKey, networkOpts) 225 | } else { 226 | networkingPromise = this.networking.leave(feed.discoveryKey) 227 | } 228 | networkingPromise.then(configurationSuccess) 229 | networkingPromise.catch(configurationError) 230 | } catch (err) { 231 | configurationError(err) 232 | } 233 | 234 | function sameNetworkConfig (discoveryKey, opts = {}) { 235 | const swarmStatus = self.networking.status(discoveryKey) 236 | if (!swarmStatus) return opts.lookup === false && opts.announce === false 237 | return swarmStatus.announce === opts.announce && swarmStatus.lookup === opts.lookup 238 | } 239 | 240 | function configurationError (err) { 241 | log.error({ err, discoveryKey: encodedKey }, 'network configuration error') 242 | } 243 | 244 | function configurationSuccess () { 245 | log.debug({ discoveryKey: encodedKey }, 'network configuration succeeded') 246 | } 247 | } 248 | 249 | async getNetworkConfiguration (discoveryKey) { 250 | const encodedKey = datEncoding.encode(discoveryKey) 251 | const networkOpts = this._transientSeedIndex.get(encodedKey) || await dbGet(this._seedIndex, encodedKey) 252 | if (networkOpts) return networkOpts.opts 253 | return null 254 | } 255 | 256 | async getAllNetworkConfigurations () { 257 | const storedConfigurations = (await dbCollect(this._seedIndex)).map(({ key, value }) => [key, value]) 258 | const transientConfigurations = [...this._transientSeedIndex] 259 | return new Map([...storedConfigurations, ...transientConfigurations]) 260 | } 261 | 262 | async getAllStats (opts) { 263 | const allStats = [] 264 | for (const [, drive] of this._drives) { 265 | const driveStats = await this.getDriveStats(drive, opts) 266 | allStats.push(driveStats) 267 | } 268 | return allStats 269 | } 270 | 271 | async getDriveStats (drive, opts = {}) { 272 | const mounts = await new Promise((resolve, reject) => { 273 | drive.getAllMounts({ memory: true, recursive: !!opts.recursive }, (err, mounts) => { 274 | if (err) return reject(err) 275 | return resolve(mounts) 276 | }) 277 | }) 278 | const stats = [] 279 | 280 | for (const [path, { metadata, content }] of mounts) { 281 | stats.push({ 282 | path, 283 | metadata: await getCoreStats(metadata), 284 | content: await getCoreStats(content) 285 | }) 286 | } 287 | 288 | return stats 289 | 290 | async function getCoreStats (core) { 291 | if (!core) return {} 292 | const stats = core.stats 293 | const openedPeers = core.peers.filter(p => p.remoteOpened) 294 | const networkingStats = { 295 | key: core.key, 296 | discoveryKey: core.discoveryKey, 297 | peerCount: core.peers.length, 298 | peers: openedPeers.map(p => { 299 | return { 300 | ...p.stats, 301 | remoteAddress: p.remoteAddress 302 | } 303 | }) 304 | } 305 | if (opts.networkingOnly) return networkingStats 306 | return { 307 | ...networkingStats, 308 | uploadedBytes: stats.totals.uploadedBytes, 309 | uploadedBlocks: stats.totals.uploadedBlocks, 310 | downloadedBytes: stats.totals.downloadedBytes, 311 | downloadedBlocks: core.downloaded(), 312 | totalBlocks: core.length 313 | } 314 | } 315 | } 316 | 317 | listDrives () { 318 | return dbCollect(this._driveIndex) 319 | } 320 | 321 | async get (key, opts = {}) { 322 | key = (key instanceof Buffer) ? datEncoding.decode(key) : key 323 | var keyString = this._generateKeyString(key, opts) 324 | const version = opts.version 325 | 326 | if (key) { 327 | // TODO: cache checkouts 328 | const existing = this._drives.get(keyString) 329 | if (existing) return existing 330 | } 331 | 332 | const driveOpts = { 333 | ...opts, 334 | version: null, 335 | key: null, 336 | sparse: opts.sparse !== false, 337 | sparseMetadata: opts.sparseMetadata !== false 338 | } 339 | var drive = this._drives.get(key) 340 | var checkout = null 341 | var unlisteners = [] 342 | 343 | if (!drive) { 344 | const randomNamespace = hypercoreCrypto.randomBytes(32).toString('hex') 345 | const store = this.corestore.namespace(randomNamespace) 346 | drive = hyperdrive(store, key, { 347 | ...driveOpts 348 | }) 349 | 350 | const errorListener = err => log.error(err) 351 | const metadataFeedListener = feed => { 352 | if (feed[TRIE_UPDATER_SYMBOL]) return 353 | feed[TRIE_UPDATER_SYMBOL] = true 354 | // Periodically update the trie. 355 | // TODO: This is to give the writer a bit of time between update requests, but we should do deferred HAVEs instead. 356 | let updateTimeout = null 357 | const loop = () => { 358 | updateTimeout = setTimeout(() => { 359 | feed.update(loop) 360 | }, 5000) 361 | } 362 | loop() 363 | const closeListener = () => clearTimeout(updateTimeout) 364 | feed.once('close', closeListener) 365 | unlisteners.push(() => { 366 | closeListener() 367 | feed.removeListener('close', closeListener) 368 | }) 369 | } 370 | drive.on('error', errorListener) 371 | drive.on('metadata-feed', metadataFeedListener) 372 | unlisteners.push(() => drive.removeListener('error', errorListener)) 373 | unlisteners.push(() => drive.removeListener('metadata-feed', metadataFeedListener)) 374 | } 375 | 376 | await new Promise((resolve, reject) => { 377 | drive.ready(err => { 378 | if (err) return reject(err) 379 | return resolve() 380 | }) 381 | }) 382 | 383 | if (version || (version === 0)) checkout = drive.checkout(version) 384 | 385 | key = datEncoding.encode(drive.key) 386 | keyString = this._generateKeyString(key, opts) 387 | 388 | var initialConfig 389 | // TODO: Need to fully work through all the default networking behaviors. 390 | if (opts.fuseNetwork) { 391 | // TODO: The Network drive does not announce any settings for now. 392 | initialConfig = { lookup: true, announce: false } 393 | await this.configureNetwork(drive.metadata, initialConfig) 394 | } else if (!drive.writable || opts.seed) { 395 | initialConfig = { lookup: true, announce: false } 396 | await this.configureNetwork(drive.metadata, initialConfig) 397 | } 398 | 399 | // Make sure that any inner mounts are recorded in the drive index. 400 | const mountListener = async (trie) => { 401 | const feed = trie.feed 402 | const mountInfo = { version: trie.version } 403 | const mountKey = feed.key.toString('hex') 404 | 405 | log.info({ discoveryKey: feed.discoveryKey.toString('hex') }, 'registering mountpoint in drive index') 406 | const parentConfig = (await this.getNetworkConfiguration(drive.discoveryKey)) || initialConfig || {} 407 | const existingMountConfig = (await this.getNetworkConfiguration(feed.discoveryKey)) || {} 408 | const mountConfig = { 409 | lookup: (existingMountConfig.lookup !== false) && (parentConfig.lookup !== false), 410 | announce: !!(existingMountConfig.announce || parentConfig.announce) 411 | } 412 | 413 | if (mountConfig) await this.configureNetwork(feed, mountConfig) 414 | this.emit('configured-mount', feed.key) 415 | this._configuredMounts.add(mountKey) 416 | 417 | const existingConfig = await dbGet(this._driveIndex, mountKey) 418 | if (!existingConfig) await this._driveIndex.put(mountKey, mountInfo) 419 | } 420 | drive.on('mount', mountListener) 421 | unlisteners.push(() => drive.removeAllListeners('mount')) 422 | 423 | drive.once('close', () => { 424 | for (const unlisten of unlisteners) { 425 | unlisten() 426 | } 427 | unlisteners = [] 428 | }) 429 | 430 | // TODO: This should all be in one batch. 431 | await Promise.all([ 432 | this._driveIndex.put(key, driveOpts) 433 | ]) 434 | this._drives.set(key, drive) 435 | if (checkout) { 436 | var checkouts = this._checkouts.get(key) 437 | if (!checkouts) { 438 | checkouts = [] 439 | this._checkouts.set(key, checkouts) 440 | } 441 | checkouts.push(keyString) 442 | this._drives.set(keyString, checkout) 443 | } 444 | 445 | return checkout || drive 446 | } 447 | 448 | download (drive, path) { 449 | const dl = drive.download(path) 450 | return this._downloads.insert(dl) 451 | } 452 | 453 | // RPC Methods 454 | async _rpcVersion (call) { 455 | const id = call.request.getId() 456 | 457 | if (!id) throw new Error('A version request must specify a session ID.') 458 | const drive = this.driveForSession(id) 459 | 460 | const rsp = new rpc.drive.messages.DriveVersionResponse() 461 | rsp.setVersion(drive.version) 462 | 463 | return rsp 464 | } 465 | 466 | async _rpcGet (call) { 467 | var driveOpts = fromHyperdriveOptions(call.request.getOpts()) 468 | 469 | const { drive, session } = await this.createSession(null, driveOpts.key, driveOpts) 470 | driveOpts.key = drive.key 471 | driveOpts.discoveryKey = drive.discoveryKey 472 | driveOpts.version = drive.version 473 | driveOpts.writable = drive.writable 474 | 475 | const rsp = new rpc.drive.messages.GetDriveResponse() 476 | rsp.setId(session) 477 | rsp.setOpts(toHyperdriveOptions(driveOpts)) 478 | 479 | return rsp 480 | } 481 | 482 | async _rpcAllStats (call) { 483 | const networkingOnly = call.request.getNetworkingonly() 484 | var stats = await this.getAllStats({ networkingOnly }) 485 | stats = stats.map(driveStats => toDriveStats(driveStats)) 486 | 487 | const rsp = new rpc.drive.messages.StatsResponse() 488 | rsp.setStatsList(stats) 489 | 490 | return rsp 491 | } 492 | 493 | async _rpcAllNetworkConfigurations (call) { 494 | const networkConfigurations = await this.getAllNetworkConfigurations() 495 | 496 | const rsp = new rpc.drive.messages.NetworkConfigurationsResponse() 497 | rsp.setConfigurationsList([...networkConfigurations].map(([, value]) => toNetworkConfiguration({ 498 | ...value.opts, 499 | key: Buffer.from(value.key, 'hex') 500 | }))) 501 | 502 | return rsp 503 | } 504 | 505 | async _rpcPeerCounts (call) { 506 | const rsp = new rpc.drive.messages.PeerCountsResponse() 507 | const keys = call.request.getKeysList() 508 | if (!keys) return rsp 509 | 510 | const counts = [] 511 | for (let key of keys) { 512 | key = Buffer.from(key) 513 | if (this.corestore.isLoaded(key)) { 514 | const core = this.corestore.get(key) 515 | const openPeers = core.peers.filter(p => p.remoteOpened) 516 | counts.push(openPeers.length) 517 | } else { 518 | counts.push(0) 519 | } 520 | } 521 | 522 | rsp.setPeercountsList(counts) 523 | return rsp 524 | } 525 | 526 | async _rpcConfigureNetwork (call) { 527 | const id = call.request.getId() 528 | 529 | if (!id) throw new Error('A network configuration request must specify a session ID.') 530 | const drive = this.driveForSession(id) 531 | const opts = fromNetworkConfiguration(call.request.getNetwork()) 532 | 533 | await this.configureNetwork(drive.metadata, { ...opts }) 534 | 535 | const rsp = new rpc.drive.messages.ConfigureNetworkResponse() 536 | return rsp 537 | } 538 | 539 | async _rpcStats (call) { 540 | const id = call.request.getId() 541 | 542 | if (!id) throw new Error('A stats request must specify a session ID.') 543 | const drive = this.driveForSession(id) 544 | 545 | const recursive = call.request.getRecursive() 546 | const networkingOnly = call.request.getNetworkingonly() 547 | const driveStats = await this.getDriveStats(drive, { recursive, networkingOnly }) 548 | const networkConfig = await this.getNetworkConfiguration(drive.discoveryKey) 549 | 550 | const rsp = new rpc.drive.messages.DriveStatsResponse() 551 | rsp.setStats(toDriveStats(driveStats)) 552 | if (networkConfig) rsp.setNetwork(toNetworkConfiguration(networkConfig)) 553 | return rsp 554 | } 555 | 556 | async _rpcMirror (call) { 557 | const id = call.request.getId() 558 | 559 | if (!id) throw new Error('A mirror request must specify a session ID.') 560 | const drive = this.driveForSession(id) 561 | await this._startMirroring(drive) 562 | 563 | const rsp = new rpc.drive.messages.MirrorResponse() 564 | return rsp 565 | } 566 | 567 | async _rpcUnmirror (call) { 568 | const id = call.request.getId() 569 | 570 | if (!id) throw new Error('An unmirror request must specify a session ID.') 571 | const drive = this.driveForSession(id) 572 | await this._stopMirroring(drive) 573 | 574 | const rsp = new rpc.drive.messages.UnmirrorResponse() 575 | return rsp 576 | } 577 | 578 | async _rpcDownload (call) { 579 | const id = call.request.getId() 580 | const path = call.request.getPath() 581 | 582 | if (!id) throw new Error('A download request must specify a session ID.') 583 | const drive = this.driveForSession(id) 584 | const downloadId = this.download(drive, path) 585 | 586 | const rsp = new rpc.drive.messages.DownloadResponse() 587 | rsp.setDownloadid(downloadId) 588 | return rsp 589 | } 590 | 591 | async _rpcUndownload (call) { 592 | const id = call.request.getId() 593 | const downloadId = call.request.getDownloadid() 594 | 595 | if (!id) throw new Error('An undownload request must specify a session ID.') 596 | if (!downloadId) throw new Error('An undownload request must specify a download ID.') 597 | 598 | const dl = this._downloads.get(downloadId) 599 | if (dl) dl.destroy() 600 | this._downloads.delete(downloadId) 601 | 602 | return new rpc.drive.messages.UndownloadResponse() 603 | } 604 | 605 | async _rpcCreateDiffStream (call) { 606 | const id = call.request.getId() 607 | const prefix = call.request.getPrefix() 608 | const otherVersion = call.request.getOther() 609 | 610 | if (!id) throw new Error('A diff stream request must specify a session ID.') 611 | const drive = this.driveForSession(id) 612 | 613 | const stream = drive.createDiffStream(otherVersion, prefix) 614 | 615 | const rspMapper = new Transform({ 616 | transform (chunk, cb) { 617 | const rsp = new rpc.drive.messages.DiffStreamResponse() 618 | if (!chunk) return rsp 619 | 620 | const { name, type, value } = chunk 621 | rsp.setType(type) 622 | rsp.setName(name) 623 | if (type === 'put') { 624 | rsp.setValue(toDiffEntry({ stat: value })) 625 | } else { 626 | rsp.setValue(toDiffEntry({ mount: value })) 627 | } 628 | 629 | return cb(null, rsp) 630 | } 631 | }) 632 | 633 | pump(stream, rspMapper, call, err => { 634 | if (err) { 635 | log.error({ id, err }, 'createDiffStream error') 636 | call.destroy(err) 637 | } 638 | }) 639 | } 640 | 641 | async _rpcCreateReadStream (call) { 642 | const id = call.request.getId() 643 | const path = call.request.getPath() 644 | const start = call.request.getStart() 645 | var end = call.request.getEnd() 646 | const length = call.request.getLength() 647 | 648 | if (!id) throw new Error('A createReadStream request must specify a session ID.') 649 | if (!path) throw new Error('A createReadStream request must specify a path.') 650 | const drive = this.driveForSession(id) 651 | 652 | const streamOpts = {} 653 | if (end !== 0) streamOpts.end = end 654 | if (length !== 0) streamOpts.length = length 655 | streamOpts.start = start 656 | const stream = drive.createReadStream(path, streamOpts) 657 | 658 | const rspMapper = new Transform({ 659 | transform (chunk, cb) { 660 | const rsp = new rpc.drive.messages.ReadStreamResponse() 661 | rsp.setChunk(chunk) 662 | return cb(null, rsp) 663 | } 664 | }) 665 | 666 | pump(stream, rspMapper, call, err => { 667 | if (err) { 668 | log.error({ id, err }, 'createReadStream error') 669 | call.destroy(err) 670 | } 671 | }) 672 | } 673 | 674 | async _rpcReadFile (call) { 675 | const id = call.request.getId() 676 | const path = call.request.getPath() 677 | 678 | if (!id) throw new Error('A readFile request must specify a session ID.') 679 | if (!path) throw new Error('A readFile request must specify a path.') 680 | const drive = this.driveForSession(id) 681 | 682 | const content = await new Promise((resolve, reject) => { 683 | drive.readFile(path, (err, content) => { 684 | if (err) return reject(err) 685 | return resolve(content) 686 | }) 687 | }) 688 | 689 | const chunks = toChunks(content) 690 | for (const chunk of chunks) { 691 | const rsp = new rpc.drive.messages.ReadFileResponse() 692 | rsp.setChunk(chunk) 693 | call.write(rsp) 694 | } 695 | call.end() 696 | } 697 | 698 | async _rpcCreateWriteStream (call) { 699 | const unpack = new Transform({ 700 | transform (msg, cb) { 701 | const chunk = msg.getChunk() 702 | return cb(null, Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)) 703 | } 704 | }) 705 | 706 | return new Promise((resolve, reject) => { 707 | call.once('data', req => { 708 | const id = req.getId() 709 | const path = req.getPath() 710 | const opts = fromStat(req.getOpts()) 711 | 712 | if (!id) throw new Error('A readFile request must specify a session ID.') 713 | if (!path) throw new Error('A readFile request must specify a path.') 714 | const drive = this.driveForSession(id) 715 | 716 | const stream = drive.createWriteStream(path, { mode: opts.mode, uid: opts.uid, gid: opts.gid, metadata: opts.metadata }) 717 | 718 | return onstream(resolve, reject, stream) 719 | }) 720 | }) 721 | 722 | function onstream (resolve, reject, stream) { 723 | pump(call, unpack, stream, err => { 724 | if (err) return reject(err) 725 | const rsp = new rpc.drive.messages.WriteStreamResponse() 726 | return resolve(rsp) 727 | }) 728 | } 729 | } 730 | 731 | async _rpcWriteFile (call) { 732 | return new Promise((resolve, reject) => { 733 | call.once('data', req => { 734 | const id = req.getId() 735 | const path = req.getPath() 736 | const opts = fromStat(req.getOpts()) 737 | 738 | if (!id) throw new Error('A writeFile request must specify a session ID.') 739 | if (!path) throw new Error('A writeFile request must specify a path.') 740 | const drive = this.driveForSession(id) 741 | 742 | return loadContent(resolve, reject, path, drive, opts) 743 | }) 744 | }) 745 | 746 | function loadContent (resolve, reject, path, drive, opts) { 747 | return collectStream(call, (err, reqs) => { 748 | if (err) return reject(err) 749 | const chunks = reqs.map(req => { 750 | const chunk = req.getChunk() 751 | return Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength) 752 | }) 753 | return drive.writeFile(path, Buffer.concat(chunks), opts, err => { 754 | if (err) return reject(err) 755 | const rsp = new rpc.drive.messages.WriteFileResponse() 756 | return resolve(rsp) 757 | }) 758 | }) 759 | } 760 | } 761 | 762 | async _rpcUpdateMetadata (call) { 763 | const id = call.request.getId() 764 | const path = call.request.getPath() 765 | const metadata = fromMetadata(call.request.getMetadataMap()) 766 | 767 | if (!id) throw new Error('A metadata update request must specify a session ID.') 768 | if (!path) throw new Error('A metadata update request must specify a path.') 769 | if (!metadata) throw new Error('A metadata update request must specify metadata.') 770 | const drive = this.driveForSession(id) 771 | 772 | return new Promise((resolve, reject) => { 773 | drive._update(path, { metadata }, err => { 774 | if (err) return reject(err) 775 | return resolve(new rpc.drive.messages.UpdateMetadataResponse()) 776 | }) 777 | }) 778 | } 779 | 780 | async _rpcDeleteMetadata (call) { 781 | const id = call.request.getId() 782 | const path = call.request.getPath() 783 | const keys = call.request.getKeysList() 784 | 785 | if (!id) throw new Error('A metadata update request must specify a session ID.') 786 | if (!path) throw new Error('A metadata update request must specify a path.') 787 | if (!keys) throw new Error('A metadata update request must specify metadata keys.') 788 | const drive = this.driveForSession(id) 789 | 790 | const metadata = {} 791 | for (const key of keys) { 792 | metadata[key] = null 793 | } 794 | 795 | return new Promise((resolve, reject) => { 796 | drive._update(path, { metadata }, err => { 797 | if (err) return reject(err) 798 | return resolve(new rpc.drive.messages.DeleteMetadataResponse()) 799 | }) 800 | }) 801 | } 802 | 803 | async _rpcStat (call) { 804 | const id = call.request.getId() 805 | const path = call.request.getPath() 806 | const lstat = call.request.getLstat() 807 | 808 | if (!id) throw new Error('A stat request must specify a session ID.') 809 | if (!path) throw new Error('A stat request must specify a path.') 810 | const drive = this.driveForSession(id) 811 | 812 | const method = lstat ? drive.lstat.bind(drive) : drive.stat.bind(drive) 813 | 814 | return new Promise((resolve, reject) => { 815 | method(path, (err, stat) => { 816 | if (err) return reject(err) 817 | 818 | const rsp = new rpc.drive.messages.StatResponse() 819 | rsp.setStat(toStat(stat)) 820 | 821 | return resolve(rsp) 822 | }) 823 | }) 824 | } 825 | 826 | async _rpcUnlink (call) { 827 | const id = call.request.getId() 828 | const path = call.request.getPath() 829 | 830 | if (!id) throw new Error('An unlink request must specify a session ID.') 831 | if (!path) throw new Error('An unlink request must specify a path. ') 832 | const drive = this.driveForSession(id) 833 | 834 | return new Promise((resolve, reject) => { 835 | drive.unlink(path, err => { 836 | if (err) return reject(err) 837 | const rsp = new rpc.drive.messages.UnlinkResponse() 838 | return resolve(rsp) 839 | }) 840 | }) 841 | } 842 | 843 | async _rpcReaddir (call) { 844 | const id = call.request.getId() 845 | const path = call.request.getPath() 846 | const recursive = call.request.getRecursive() 847 | const noMounts = call.request.getNomounts() 848 | const includeStats = call.request.getIncludestats() 849 | 850 | if (!id) throw new Error('A readdir request must specify a session ID.') 851 | if (!path) throw new Error('A readdir request must specify a path.') 852 | const drive = this.driveForSession(id) 853 | 854 | return new Promise((resolve, reject) => { 855 | drive.readdir(path, { recursive, noMounts, includeStats }, (err, files) => { 856 | if (err) return reject(err) 857 | 858 | const rsp = new rpc.drive.messages.ReadDirectoryResponse() 859 | if (!includeStats) { 860 | rsp.setFilesList(files) 861 | } else { 862 | const names = [] 863 | const stats = [] 864 | const mounts = [] 865 | const innerPaths = [] 866 | for (const { name, stat, mount, innerPath } of files) { 867 | names.push(name) 868 | stats.push(toStat(stat)) 869 | mounts.push(toMount(mount)) 870 | innerPaths.push(innerPath) 871 | } 872 | rsp.setFilesList(names) 873 | rsp.setStatsList(stats) 874 | rsp.setMountsList(mounts) 875 | rsp.setInnerpathsList(innerPaths) 876 | } 877 | return resolve(rsp) 878 | }) 879 | }) 880 | } 881 | 882 | async _rpcMkdir (call) { 883 | const id = call.request.getId() 884 | const path = call.request.getPath() 885 | const opts = fromStat(call.request.getOpts()) 886 | 887 | if (!id) throw new Error('A mkdir request must specify a session ID.') 888 | if (!path) throw new Error('A mkdir request must specify a directory path.') 889 | const drive = this.driveForSession(id) 890 | 891 | const mkdirOpts = {} 892 | if (opts.uid) mkdirOpts.uid = opts.uid 893 | if (opts.gid) mkdirOpts.gid = opts.gid 894 | if (opts.mode) mkdirOpts.mode = opts.mode 895 | 896 | return new Promise((resolve, reject) => { 897 | drive.mkdir(path, mkdirOpts, err => { 898 | if (err) return reject(err) 899 | 900 | const rsp = new rpc.drive.messages.MkdirResponse() 901 | return resolve(rsp) 902 | }) 903 | }) 904 | } 905 | 906 | async _rpcRmdir (call) { 907 | const id = call.request.getId() 908 | const path = call.request.getPath() 909 | 910 | if (!id) throw new Error('A rmdir request must specify a session ID.') 911 | if (!path) throw new Error('A rmdir request must specify a directory path.') 912 | const drive = this.driveForSession(id) 913 | 914 | return new Promise((resolve, reject) => { 915 | drive.rmdir(path, err => { 916 | if (err) return reject(err) 917 | 918 | const rsp = new rpc.drive.messages.RmdirResponse() 919 | return resolve(rsp) 920 | }) 921 | }) 922 | } 923 | 924 | async _rpcMount (call) { 925 | const id = call.request.getId() 926 | const mountInfo = call.request.getInfo() 927 | 928 | const path = mountInfo.getPath() 929 | const opts = fromMount(mountInfo.getOpts()) 930 | 931 | if (!id) throw new Error('A mount request must specify a session ID.') 932 | if (!path) throw new Error('A mount request must specify a path.') 933 | if (!opts) throw new Error('A mount request must specify mount options.') 934 | const drive = this.driveForSession(id) 935 | 936 | return new Promise((resolve, reject) => { 937 | let error = null 938 | const mountListener = key => { 939 | if (!opts.key || key.equals(opts.key)) { 940 | this.removeListener('configured-mount', mountListener) 941 | if (error) return 942 | const rsp = new rpc.drive.messages.MountDriveResponse() 943 | return resolve(rsp) 944 | } 945 | } 946 | this.on('configured-mount', mountListener) 947 | drive.mount(path, opts.key, opts, err => { 948 | if (err) { 949 | error = err 950 | return reject(err) 951 | } 952 | if (opts.key && this._configuredMounts.has(opts.key.toString('hex'))) { 953 | return mountListener(opts.key) 954 | } 955 | }) 956 | }) 957 | } 958 | 959 | async _rpcUnmount (call) { 960 | const id = call.request.getId() 961 | const path = call.request.getPath() 962 | 963 | if (!id) throw new Error('An unmount request must specify a session ID.') 964 | if (!path) throw new Error('An unmount request must specify a path.') 965 | const drive = this.driveForSession(id) 966 | 967 | return new Promise((resolve, reject) => { 968 | drive.unmount(path, err => { 969 | if (err) return reject(err) 970 | const rsp = new rpc.drive.messages.UnmountDriveResponse() 971 | return resolve(rsp) 972 | }) 973 | }) 974 | } 975 | 976 | async _rpcWatch (call) { 977 | const self = this 978 | var watcher = null 979 | var closed = false 980 | var driveWatchers = null 981 | var keyString = null 982 | 983 | call.once('data', req => { 984 | const id = req.getId() 985 | var path = req.getPath() 986 | 987 | if (!id) throw new Error('A watch request must specify a session ID.') 988 | if (!path) path = '/' 989 | const drive = this.driveForSession(id) 990 | keyString = drive.key.toString('hex') 991 | 992 | driveWatchers = this._watchers.get(keyString) 993 | if (!driveWatchers) { 994 | driveWatchers = [] 995 | this._watchers.set(keyString, driveWatchers) 996 | } 997 | 998 | watcher = drive.watch(path, () => { 999 | const rsp = new rpc.drive.messages.WatchResponse() 1000 | call.write(rsp) 1001 | }) 1002 | 1003 | const close = onclose.bind(null, id, path, driveWatchers) 1004 | 1005 | watcher.once('ready', subWatchers => { 1006 | // Add one in order to include the root watcher. 1007 | this._watchCount += subWatchers.length + 1 1008 | if (this._watchCount > this.watchLimit) { 1009 | return close('Watch limit reached. Please close watch connections then try again.') 1010 | } 1011 | driveWatchers.push(watcher) 1012 | 1013 | // Any subsequent messages are considered cancellations. 1014 | call.on('data', close) 1015 | call.on('close', close) 1016 | call.on('finish', close) 1017 | call.on('error', close) 1018 | call.on('end', close) 1019 | }) 1020 | }) 1021 | 1022 | function onclose (id, path, driveWatchers, err) { 1023 | if (closed) return 1024 | closed = true 1025 | log.debug({ id, path }, 'unregistering watcher') 1026 | if (watcher) { 1027 | watcher.destroy() 1028 | if (watcher.watchers) self._watchCount -= (watcher.watchers.length + 1) 1029 | driveWatchers.splice(driveWatchers.indexOf(watcher), 1) 1030 | if (!driveWatchers.length) self._watchers.delete(keyString) 1031 | } 1032 | call.end() 1033 | } 1034 | } 1035 | 1036 | async _rpcSymlink (call) { 1037 | const id = call.request.getId() 1038 | const target = call.request.getTarget() 1039 | const linkname = call.request.getLinkname() 1040 | 1041 | if (!id) throw new Error('A symlink request must specify a session ID.') 1042 | if (!target) throw new Error('A symlink request must specify a target.') 1043 | if (!linkname) throw new Error('A symlink request must specify a linkname.') 1044 | const drive = this.driveForSession(id) 1045 | 1046 | return new Promise((resolve, reject) => { 1047 | drive.symlink(target, linkname, err => { 1048 | if (err) return reject(err) 1049 | 1050 | const rsp = new rpc.drive.messages.SymlinkResponse() 1051 | return resolve(rsp) 1052 | }) 1053 | }) 1054 | } 1055 | 1056 | async _rpcClose (call) { 1057 | const id = call.request.getId() 1058 | 1059 | this.driveForSession(id) 1060 | await this.closeSession(id) 1061 | const rsp = new rpc.drive.messages.CloseSessionResponse() 1062 | 1063 | return rsp 1064 | } 1065 | 1066 | async _rpcFileStats (call) { 1067 | const id = call.request.getId() 1068 | const path = call.request.getPath() 1069 | 1070 | if (!id) throw new Error('A fileStats request must specify a session ID.') 1071 | if (!path) throw new Error('A fileStats request must specify a path.') 1072 | const drive = this.driveForSession(id) 1073 | 1074 | return new Promise((resolve, reject) => { 1075 | drive.stats(path, (err, stats) => { 1076 | if (err) return reject(err) 1077 | 1078 | if (!(stats instanceof Map)) { 1079 | const fileStats = stats 1080 | stats = new Map() 1081 | stats.set(path, fileStats) 1082 | } 1083 | const rsp = new rpc.drive.messages.FileStatsResponse() 1084 | setFileStats(rsp.getStatsMap(), stats) 1085 | 1086 | return resolve(rsp) 1087 | }) 1088 | }) 1089 | } 1090 | 1091 | async _rpcMounts (call) { 1092 | const id = call.request.getId() 1093 | const memory = call.request.getMemory() 1094 | const recursive = call.request.getRecursive() 1095 | 1096 | if (!id) throw new Error('A mounts request must specify a session ID.') 1097 | const drive = this.driveForSession(id) 1098 | 1099 | return new Promise((resolve, reject) => { 1100 | drive.getAllMounts({ memory, recursive }, (err, mounts) => { 1101 | if (err) return reject(err) 1102 | const rsp = new rpc.drive.messages.DriveMountsResponse() 1103 | if (!mounts) return resolve(rsp) 1104 | 1105 | const mountsList = [] 1106 | for (const [path, { metadata }] of mounts) { 1107 | mountsList.push(toMountInfo({ 1108 | path, 1109 | opts: { 1110 | key: metadata.key, 1111 | version: metadata.version 1112 | } 1113 | })) 1114 | } 1115 | rsp.setMountsList(mountsList) 1116 | return resolve(rsp) 1117 | }) 1118 | }) 1119 | } 1120 | } 1121 | 1122 | DriveManager.generateSubDbs = function (db) { 1123 | return { 1124 | drives: sub(db, 'drives', { valueEncoding: 'bjson' }), 1125 | mirrors: sub(db, 'mirrors', { valueEncoding: 'utf8' }), 1126 | seeding: sub(db, 'seeding', { valueEncoding: 'json' }) 1127 | } 1128 | } 1129 | 1130 | module.exports = DriveManager 1131 | --------------------------------------------------------------------------------