├── .DS_Store ├── .github ├── FUNDING.yml └── workflows │ └── node.js.yml ├── .gitignore ├── .gitlab-ci.yml ├── LICENSE ├── README.md ├── commitlint.config.js ├── index.js ├── lib ├── autobee.js ├── core.js ├── crypto.js ├── database.js └── swarm.js ├── package.json ├── tests ├── .DS_Store ├── data │ ├── email.eml │ └── test.doc ├── database.test.js ├── drive.test.js ├── helpers │ └── setup.js └── vars.json └── util ├── fixedChunker.js ├── requestChunker.js └── workerKeyPairs.js /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Telios-org/nebula-drive/cc4d18e96392fe383d4f04676d4017fbea5148be/.DS_Store -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [Telios-org] 4 | -------------------------------------------------------------------------------- /.github/workflows/node.js.yml: -------------------------------------------------------------------------------- 1 | name: Build Status 2 | 3 | on: push 4 | 5 | jobs: 6 | build: 7 | strategy: 8 | matrix: 9 | node-version: [14.x, 16.x] 10 | os: [ubuntu-latest, macos-latest, windows-latest] 11 | runs-on: ${{ matrix.os }} 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Use Node.js ${{ matrix.node-version }} 15 | uses: actions/setup-node@v1 16 | with: 17 | node-version: ${{ matrix.node-version }} 18 | - run: npm install 19 | - run: npm test 20 | publish: 21 | name: Publish Package 22 | needs: build 23 | runs-on: ubuntu-latest 24 | if: github.ref == 'refs/heads/master' 25 | steps: 26 | - uses: actions/checkout@v2 27 | - name: Use Node.js 16.x 28 | uses: actions/setup-node@v1 29 | with: 30 | node-version: 16.x 31 | - uses: JS-DevTools/npm-publish@v1 32 | with: 33 | token: ${{ secrets.NPM_TOKEN }} 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | 9 | # Diagnostic reports (https://nodejs.org/api/report.html) 10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 11 | 12 | # Runtime data 13 | pids 14 | *.pid 15 | *.seed 16 | *.pid.lock 17 | 18 | # Directory for instrumented libs generated by jscoverage/JSCover 19 | lib-cov 20 | 21 | # Coverage directory used by tools like istanbul 22 | coverage 23 | *.lcov 24 | 25 | # nyc test coverage 26 | .nyc_output 27 | 28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 29 | .grunt 30 | 31 | # Bower dependency directory (https://bower.io/) 32 | bower_components 33 | 34 | # node-waf configuration 35 | .lock-wscript 36 | 37 | # Compiled binary addons (https://nodejs.org/api/addons.html) 38 | build/Release 39 | 40 | # Dependency directories 41 | node_modules/ 42 | jspm_packages/ 43 | 44 | # TypeScript v1 declaration files 45 | typings/ 46 | 47 | # TypeScript cache 48 | *.tsbuildinfo 49 | 50 | # Optional npm cache directory 51 | .npm 52 | 53 | # Optional eslint cache 54 | .eslintcache 55 | 56 | # Microbundle cache 57 | .rpt2_cache/ 58 | .rts2_cache_cjs/ 59 | .rts2_cache_es/ 60 | .rts2_cache_umd/ 61 | 62 | # Optional REPL history 63 | .node_repl_history 64 | 65 | # Output of 'npm pack' 66 | *.tgz 67 | *.zip 68 | 69 | # Yarn Integrity file 70 | .yarn-integrity 71 | 72 | # dotenv environment variables file 73 | .env 74 | .env.test 75 | 76 | # parcel-bundler cache (https://parceljs.org/) 77 | .cache 78 | 79 | # Next.js build output 80 | .next 81 | 82 | # Nuxt.js build / generate output 83 | .nuxt 84 | dist 85 | 86 | # Gatsby files 87 | .cache/ 88 | # Comment in the public line in if your project uses Gatsby and *not* Next.js 89 | # https://nextjs.org/blog/next-9-1#public-directory-support 90 | # public 91 | 92 | # vuepress build output 93 | .vuepress/dist 94 | 95 | # Serverless directories 96 | .serverless/ 97 | 98 | # FuseBox cache 99 | .fusebox/ 100 | 101 | # DynamoDB Local files 102 | .dynamodb/ 103 | 104 | # TernJS port file 105 | .tern-port 106 | 107 | # Drives 108 | drive/ 109 | core/ 110 | tests/storage 111 | tests/localDrive 112 | tests/drive* 113 | !tests/drive.* 114 | tests/peer-drive 115 | tests/data/meta/* 116 | tests/data/enc_meta.tmp.json 117 | tests/vars.tmp.json 118 | tests/data/encrypted_tmp.email 119 | .tmp 120 | .vscode 121 | package-lock.json -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - build 3 | - test 4 | - publish 5 | - ci_status 6 | 7 | image: node:12.18.4 8 | 9 | workflow: 10 | rules: 11 | - if: $CI_COMMIT_BRANCH 12 | 13 | test: 14 | stage: test 15 | before_script: 16 | - | 17 | { 18 | echo "@${CI_PROJECT_ROOT_NAMESPACE}:registry=${CI_API_V4_URL}/packages/npm/" 19 | } | tee --append .npmrc 20 | - npm ci --cache .npm --prefer-offline 21 | script: 22 | - npm run test 23 | 24 | include: 25 | - project: telios2/telios-devops 26 | ref: master 27 | file: ".gitlab-ci.DiscordWebhook.yml" 28 | - project: telios2/telios-devops 29 | ref: master 30 | file: ".gitlab-ci.NPMPublish.yml" 31 | - template: Secret-Detection.gitlab-ci.yml 32 | - template: SAST.gitlab-ci.yml 33 | - template: License-Scanning.gitlab-ci.yml 34 | - template: Dependency-Scanning.gitlab-ci.yml 35 | 36 | publish:npm: 37 | stage: publish 38 | extends: .publish:npm 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Mathias Buus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nebula Drive 2 | 3 | ![Build Status](https://github.com/Telios-org/nebula-drive/actions/workflows/node.js.yml/badge.svg) 4 | 5 | #### ⚠️ This version has been deprecated and is no longer supported. Please visit the latest version [here](https://github.com/Telios-org/nebula) ⚠️ 6 | 7 | Nebula drives are real-time distributed storage for files and key value databases built on top of [Hypercore Protocol](https://hypercore-protocol.org/). This project exists because the [Telios](https://telios.io) email client needed a way to distribute and store encrypted emails on user's local file systems over a peer-to-peer (P2P) network. A lot of inspiration was taken from [Hyperdrive](https://github.com/hypercore-protocol/hyperdrive), but Hyperdrive didn't have options for fine-grain access control, multiple writers, and the ability to delete files from disk once added to the drives. 8 | 9 | Nebula drives come with a handful of useful features like: 10 | - __Shareable over company firewalls and mobile networks__: The P2P network runs on [Hyperswarm](https://github.com/hyperswarm/hyperswarm) which has the ability to hole-punch through most company firewalls and mobile connections. 11 | - __Access Control__: Control access to each file by sharing the file's hash and the drive's discovery key. 12 | - __Multiwriter__: Drives can have multiple peers with write access by sharing and adding eachother's diff keys. 13 | - __Collections__: Along with files, drives can create and share simple key value btree databases built on [Hyperbee](https://github.com/hypercore-protocol/hyperbee). Collections also have the option to be encrypted with a secret key. 14 | 15 | ### TODOs: 16 | - [x] Connect to drives behind corporate firewalls and mobile networks 17 | - [x] Create and share key value databases between peers 18 | - [x] Upgrade multiwriter to Hypercore v10 19 | - [ ] Share files by only their hash much like [IPFS](https://docs.ipfs.io/concepts/how-ipfs-works/) 20 | - [ ] Upgrade access control to limit sharing by a peer's public key 21 | - [ ] Turn an existing directory into a drive and watch for changes 22 | - [ ] Upgrade collections to be closer to MongoDB with [Hyperbeedeebee](https://github.com/RangerMauve/hyperbeedeebee) 23 | 24 | ## Installation 25 | 26 | ```js 27 | npm i @telios/nebula-drive 28 | ``` 29 | 30 | ## Usage 31 | 32 | ```js 33 | const Drive = require('nebula-drive') 34 | 35 | const encryptionKey = Buffer.alloc(32, 'hello world') 36 | 37 | const localDrive = new Drive(__dirname + "/drive", null, { 38 | keyPair, 39 | encryptionKey, 40 | swarmOpts: { 41 | server: true, 42 | client: true 43 | } 44 | }) 45 | 46 | await localDrive.ready() 47 | 48 | // Key to be shared with other devices or services that want to seed this drive 49 | const drivePubKey = localDrive.publicKey 50 | 51 | // Clone a remote drive 52 | const remoteDrive = new Drive(__dirname + "/drive_remote", drivePubKey, { 53 | keyPair, 54 | swarmOpts: { 55 | server: true, 56 | client: true 57 | } 58 | }) 59 | 60 | await remoteDrive.ready() 61 | 62 | 63 | localDrive.on('file-sync', file => { 64 | // Local drive has synced somefile.json from remote drive 65 | }) 66 | 67 | await remoteDrive.writeFile('/dest/path/on/drive/somefile.json', readableStream) 68 | 69 | ``` 70 | 71 | ## API / Examples 72 | 73 | #### `const drive = new Drive(storagePath, [key], [options])` 74 | 75 | Create a drive to be shared over the network which can be replicated and seeded by other peers. 76 | 77 | - `storagePath`: The directory where you want the drive to be created. 78 | - `key`: The public key of the remote drive you want to clone 79 | 80 | Options include: 81 | 82 | ```js 83 | { 84 | storage, // Override Hypercore's default random-access-file storage with a different random-access-storage module 85 | encryptionKey, // optionally pass an encryption key to encrypt the drive's database 86 | keyPair: { // ed25519 keypair 87 | publicKey, 88 | secretKey 89 | }, 90 | joinSwarm: true | false // Optionally set whether or not to join hyperswarm when starting the drive. Defaults to true. 91 | swarmOpts: { // Set server to true to start this drive as a server and announce its public key to the network 92 | server: true | false, 93 | client: true | false 94 | }, 95 | checkNetworkStatus: true | false // Listen for when the drive's network status changes 96 | } 97 | ``` 98 | 99 | ```js 100 | const Drive = require('nebula-drive') 101 | 102 | // Create a new local drive. 103 | const localDrive = new Drive(__dirname + "/drive", null, { 104 | keyPair, 105 | swarmOpts: { 106 | server: true, 107 | client: true 108 | } 109 | }) 110 | 111 | await localDrive.ready() 112 | 113 | // Key to be shared with other devices or services that want to seed this drive 114 | const drivePubKey = localDrive.publicKey 115 | 116 | // Clone a remote drive 117 | const remoteDrive = new Drive(__dirname + "/drive_remote", drivePubKey, { 118 | keyPair, 119 | swarmOpts: { 120 | server: true, 121 | client: true 122 | } 123 | }) 124 | 125 | await remoteDrive.ready() 126 | ``` 127 | 128 | #### `await drive.ready()` 129 | 130 | Initialize the drive and all resources needed. 131 | 132 | #### `await drive.addPeer(publicKey)` 133 | 134 | Adds a remote drive as a new writer. After a peer has been added, the drive will automatically try to reconnect to this peer after every restart. 135 | 136 | Example Usage: 137 | 138 | ```js 139 | // Local drive on Device A 140 | const drive1 = new Drive(__dirname + "/drive", null, { 141 | keyPair, 142 | swarmOpts: { 143 | server: true, 144 | client: true 145 | } 146 | }) 147 | 148 | // Local drive on Device B 149 | const drive2 = new Drive(__dirname + "/drive", null, { 150 | keyPair, 151 | swarmOpts: { 152 | server: true, 153 | client: true 154 | } 155 | }) 156 | 157 | 158 | await drive2.addPeer(drive1.publicKey) 159 | ``` 160 | 161 | #### `await drive.removePeer(publicKey)` 162 | 163 | Stop replicating with another drive peer. 164 | 165 | 166 | #### `const file = await drive.writeFile(path, readableStream, [opts])` 167 | 168 | Write a file from a readable stream. When choosing to encrypt a file, the encryption key will be passed back in the response. Each file is encrypted with a unique key which should be stored spearately. 169 | 170 | - `path`: Full path where the file resides on the local drive `dir/to/my/file.jpg` 171 | - `readableStream`: Any readableStream `fs.createReadableStream()` 172 | 173 | Options include: 174 | ```js 175 | // When encrypted is true a key and header value will be returned after the file has been written 176 | { 177 | encrypted: true 178 | } 179 | ``` 180 | 181 | #### `const stream = await drive.readFile(path)` 182 | 183 | Creates a readable stream of data from the requested file path. 184 | 185 | - `path`: Full path where the file resides on the local drive `dir/to/my/file.jpg` 186 | 187 | 192 | 193 | #### `const stream = await drive.fetchFileByDriveHash(discoveryKey, fileHash, [opts])` 194 | 195 | Drives with many files may not want to announce every file by it's hash due to network bandwidth limits. In this case, a drive has the option of sharing it's `discoveryKey` which peers can use to connect to the drive and then make a request file hash request. 196 | 197 | - `discoveryKey`: Remote drive's discovery key `drive.discoveryKey` which is used by peers to request resources from the drive. 198 | - `fileHash`: Hash of the file being requested on the remote drive. 199 | - `opts`: If a key and header are passed in then the return stream will be the deciphered data 200 | - `key`: Encryption key used for deciphering the encrypted stream. This key is returned from the `drive.writeFile` method. 201 | - `header`: Needed for validating the encrypted stream. This gets returned from `drive.writeFile()`. 202 | 203 | #### `const stream = drive.decryptFileStream(stream, key, header)` 204 | 205 | If `drive.fetchFileByDriveHash` is returning encrypted data, then `decryptFileStream` will transform that stream and return a new stream of deciphered data. 206 | 207 | - `stream`: Readable stream of encrypted data 208 | - `key`: Encryption key used for deciphering the encrypted stream. This key is returned from the `drive.writeFile` method. 209 | - `header`: Needed for validating the encrypted stream. This gets returned from `drive.writeFile()`. 210 | 211 | #### `await drive.fetchFileBatch(files, cb)` 212 | 213 | Fetching files as a batch automatically chunks parallel requests in a fixed batch size so a drive can request as many files as it needs without impacting performance. 214 | 215 | - `files`: Array of file objects with the following structure 216 | - `discovery_key`: Remote drive's discovery key `drive.discoveryKey` which is used by peers to request resources from the drive. 217 | - `hash`: Hash of the file being requested on the remote drive. 218 | - `key`: Encryption key used for deciphering the encrypted stream. This key is returned from the `drive.writeFile` method. 219 | - `header`: Needed for validating the encrypted stream. This gets returned from `drive.writeFile()`. 220 | - `cb`: Callback method that runs after every file stream has been initialized. Use this for handling what to do with the individual file streams. Note that this should return a promise. 221 | 222 | Example Usage: 223 | 224 | ```js 225 | 226 | await drive.fetchFileBatch(files, (stream, file) => { 227 | return new Promise((resolve, reject) => { 228 | const writeStream = fs.createWriteStream(`./${file.path}`) 229 | pump(stream, writeStream, (err) => { 230 | resolve() 231 | }) 232 | }) 233 | }) 234 | 235 | ``` 236 | 237 | #### `await drive.close()` 238 | 239 | Fully close the drive and all of it's resources. 240 | 241 | #### `drive.on('message', (peerPubKey, socket) => {})` 242 | 243 | Emitted when the drive has recieved a message from a peer. 244 | 245 | - `peerPubKey`: Public key of the peer that sent the message 246 | - `socket`: The socket returned on this event can be used as a duplex stream for bi-directional communication with the connecting peer. `socket.write` `socket.on('data, data => {})` 247 | 248 | #### `drive.on('file-add', (file, enc) => {})` 249 | 250 | Emitted when a new file has been added to a local drive. 251 | 252 | - `file`: A file object 253 | - `path`: drive path the file was saved to 254 | - `hash`: Hash of the file 255 | - `enc`: Passes back properties needed to decrypt the file 256 | - `key`: Key needed to decrypt the file 257 | - `header`: Needed for validating the encrypted stream 258 | 259 | #### `drive.on('sync', () => {})` 260 | 261 | Emitted when the drive has synced any remote data. 262 | 263 | #### `drive.on('file-sync', (file) => {})` 264 | 265 | Emitted when the drive has synced remote a remote file. 266 | 267 | #### `drive.on('file-unlink', (file) => {})` 268 | 269 | Emitted when a file has been deleted on the drive. 270 | 271 | #### `drive.on('fetch-error', (err) => {})` 272 | 273 | Emitted when there has been an error downloading from the remote drive 274 | 275 | #### `drive.on('network-updated', (network) => {})` 276 | 277 | Emitted when either the internet connection or the drive's connection to Hyperswarm has changed. The drive option `checkNetworkStatus` must be set to true in order for these events to be emitted. 278 | 279 | Returns: 280 | - `network` 281 | - `internet`: true|false 282 | - `drive`: true|false 283 | 284 | #### `const collection = await drive.db.collection(name)` 285 | 286 | Creates a new key value collection. Collections are encrypted with the drive's `encryptionKey` (`drive.encryptionKey`) when the key is passed in during initialization. 287 | 288 | #### `await collection.put(key, value)` 289 | 290 | Inserts a new document into the collection. Value should be a JSON object. 291 | 292 | #### `await collection.get(key)` 293 | 294 | Get a document by it's key 295 | 296 | #### `await collection.del(key)` 297 | 298 | Deletes a document by it's key 299 | -------------------------------------------------------------------------------- /commitlint.config.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Telios-org/nebula-drive/cc4d18e96392fe383d4f04676d4017fbea5148be/commitlint.config.js -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs') 2 | const EventEmitter = require('events') 3 | const getDirName = require('path').dirname 4 | const path = require('path') 5 | const Database = require('./lib/database') 6 | const Hyperbee = require('hyperbee') 7 | const Hypercore = require('./lib/core') 8 | const pump = require('pump') 9 | const Crypto = require('./lib/crypto') 10 | const Swarm = require('./lib/swarm') 11 | const stream = require('stream') 12 | const blake = require('blakejs') 13 | const Hyperswarm = require('hyperswarm') 14 | const MemoryStream = require('memorystream') 15 | const { v4: uuidv4 } = require('uuid') 16 | const FixedChunker = require('./util/fixedChunker.js') 17 | const RequestChunker = require('./util/requestChunker.js') 18 | const WorkerKeyPairs = require('./util/workerKeyPairs.js') 19 | const isOnline = require('is-online'); 20 | 21 | const HASH_OUTPUT_LENGTH = 32 // bytes 22 | const MAX_PLAINTEXT_BLOCK_SIZE = 65536 23 | const MAX_ENCRYPTED_BLOCK_SIZE = 65553 24 | const FILE_TIMEOUT = 10000 // How long to wait for the on data event when downloading a file from a remote drive. 25 | const FILE_RETRY_ATTEMPTS = 2 // Fail to fetch file after 3 attempts 26 | const FILE_BATCH_SIZE = 10 // How many parallel requests are made in each file request batch 27 | 28 | 29 | class Drive extends EventEmitter { 30 | constructor(drivePath, peerPubKey, { storage, keyPair, writable, swarmOpts, encryptionKey, fileTimeout, fileRetryAttempts, checkNetworkStatus, joinSwarm }) { 31 | super() 32 | 33 | this.storage = storage 34 | this.encryptionKey = encryptionKey 35 | this.database = null; 36 | this.db = null; 37 | this.drivePath = drivePath 38 | this.swarmOpts = swarmOpts 39 | this.publicKey = null 40 | this.peerPubKey = peerPubKey // Key used to clone and seed drive. Should only be shared with trusted sources 41 | this.keyPair = keyPair // ed25519 keypair to listen on 42 | this.writable = writable 43 | this.fileTimeout = fileTimeout || FILE_TIMEOUT 44 | this.fileRetryAttempts = fileRetryAttempts-1 || FILE_RETRY_ATTEMPTS-1 45 | this.requestQueue = new RequestChunker(null, FILE_BATCH_SIZE) 46 | this.checkNetworkStatus = checkNetworkStatus 47 | this.joinSwarm = typeof joinSwarm === 'boolean' ? joinSwarm : true 48 | this.network = { 49 | internet: false, 50 | drive: false 51 | } 52 | 53 | // When using custom storage, transform drive path into beginning of the storage namespace 54 | this.storageName = drivePath.slice(drivePath.lastIndexOf('/') + 1, drivePath.length) 55 | 56 | 57 | this._localCore = new Hypercore(this.storage || path.join(drivePath, `./LocalCore`), { storageNamespace: `${this.storageName}:local-core` }) 58 | this._swarm = null 59 | this._workerKeyPairs = new WorkerKeyPairs(FILE_BATCH_SIZE) 60 | this._collections = {} 61 | this._filesDir = path.join(drivePath, `./Files`) 62 | this._localHB = null // Local Key value datastore only. This db does not sync with remote drives. 63 | this._lastSeq = null 64 | this._checkInternetInt = null 65 | this._checkInternetInProgress = false 66 | 67 | if (!fs.existsSync(drivePath)) { 68 | fs.mkdirSync(drivePath) 69 | } 70 | 71 | if (!fs.existsSync(this._filesDir)) { 72 | fs.mkdirSync(this._filesDir) 73 | } 74 | 75 | this.requestQueue.on('process-queue', async files => { 76 | this.requestQueue.reset() 77 | 78 | await this.fetchFileBatch(files, (stream, file) => { 79 | return new Promise((resolve, reject) => { 80 | fs.mkdirSync(getDirName(this._filesDir + file.path), { recursive: true }) 81 | 82 | const writeStream = fs.createWriteStream(this._filesDir + file.path) 83 | 84 | pump(stream, writeStream, (err) => { 85 | if (err) reject(err) 86 | 87 | setTimeout(() => { 88 | this.emit('file-sync', file) 89 | }) 90 | 91 | resolve() 92 | }) 93 | }) 94 | }) 95 | }) 96 | 97 | process.on('uncaughtException', err => { 98 | // gracefully catch uncaught exceptions 99 | }) 100 | 101 | // Periodically check this drive is connected to the internet. 102 | // When internet is down, emit a network status updated event. 103 | if(this.checkNetworkStatus) { 104 | this._checkInternetInt = setInterval(async () => { 105 | if(!this._checkInternetInProgress) { 106 | this._checkInternetInProgress = true 107 | await this._checkInternet(); 108 | this._checkInternetInProgress = false 109 | } 110 | }, 1500) 111 | } 112 | } 113 | 114 | async ready() { 115 | await this._bootstrap() 116 | 117 | this.publicKey = this.database.localMetaCore.key.toString('hex') 118 | 119 | if (this.peerPubKey) { 120 | this.discoveryKey = createTopicHash(this.peerPubKey).toString('hex') 121 | } else { 122 | this.discoveryKey = createTopicHash(this.publicKey).toString('hex') 123 | } 124 | 125 | // Data here can only be read by peer drives 126 | // that are sharing the same drive secret 127 | this._collections.files = await this.database.collection('file') 128 | 129 | if (this.keyPair && this.joinSwarm) { 130 | await this.connect() 131 | } 132 | 133 | this._lastSeq = await this._localHB.get('lastSeq') 134 | 135 | const stream = this.database.metaBase.createReadStream({ live: true }) 136 | 137 | stream.on('data', async data => { 138 | const node = { 139 | ...JSON.parse(data.value.toString()), 140 | seq: data.seq 141 | } 142 | 143 | if ( 144 | node.key !== '__peers' && !this._lastSeq || 145 | node.key !== '__peers' && this._lastSeq && data.seq > this._lastSeq.seq 146 | ) { 147 | await this._update(node) 148 | } 149 | }) 150 | 151 | // This stopped streaming async updates after migrating to autobase 152 | // const hs = this.metadb.createHistoryStream({ live: true, gte: this._lastSeq ? -1 : 1 }) 153 | 154 | // hs.on('data', async data => { 155 | // this.emit('sync', data) 156 | // if (data.key !== '__peers') { 157 | // data.value = JSON.parse(data.value).value 158 | // await this._update(data) 159 | // } 160 | // }) 161 | 162 | // hs.on('error', err => { 163 | // // catch get out of bounds errors 164 | // }) 165 | 166 | this.opened = true 167 | } 168 | 169 | // Connect to the Hyperswarm network 170 | async connect() { 171 | if (this._swarm) { 172 | await this._swarm.close() 173 | } 174 | 175 | this._swarm = new Swarm({ 176 | keyPair: this.keyPair, 177 | workerKeyPairs: this._workerKeyPairs.keyPairs, 178 | topic: this.discoveryKey, 179 | publicKey: this.peerPubKey || this.publicKey, 180 | isServer: this.swarmOpts.server, 181 | isClient: this.swarmOpts.client, 182 | acl: this.swarmOpts.acl 183 | }) 184 | 185 | if(this.checkNetworkStatus) { 186 | this._swarm.on('disconnected', () => { 187 | if(this.network.drive) { 188 | this.network.drive = false 189 | this.emit('network-updated', { drive: this.network.drive }) 190 | } 191 | }) 192 | 193 | this._swarm.on('connected', () => { 194 | if(!this.network.drive) { 195 | this.network.drive = true 196 | this.emit('network-updated', { drive: this.network.drive }) 197 | } 198 | }) 199 | } 200 | 201 | 202 | this._swarm.on('message', (peerPubKey, data) => { 203 | this.emit('message', peerPubKey, data) 204 | }) 205 | 206 | this._swarm.on('file-requested', socket => { 207 | socket.once('data', async data => { 208 | const fileHash = data.toString('utf-8') 209 | const file = await this.metadb.get(fileHash) 210 | 211 | if (!file || file.value.deleted) { 212 | let err = new Error() 213 | err.message = 'Requested file was not found on drive' 214 | socket.destroy(err) 215 | } else { 216 | const readStream = fs.createReadStream(path.join(this.drivePath, `./Files${file.value.path}`)) 217 | pump(readStream, socket, (err) => { 218 | // handle done 219 | }) 220 | } 221 | }) 222 | 223 | socket.on('error', (err) => { 224 | // handle errors 225 | }) 226 | }) 227 | 228 | await this._swarm.ready() 229 | } 230 | 231 | async addPeer(peerKey) { 232 | const remotePeers = await this._localHB.get('remotePeers') 233 | 234 | const peers = [...remotePeers.value, peerKey] 235 | 236 | await this._localHB.put('remotePeers', peers) 237 | 238 | await this.database.addInput(peerKey) 239 | } 240 | 241 | // Remove Peer 242 | async removePeer(peerKey) { 243 | await this.database.removeInput(peerKey) 244 | } 245 | 246 | async writeFile(path, readStream, opts = {}) { 247 | let filePath = path 248 | let dest 249 | const uuid = uuidv4() 250 | 251 | if (filePath[0] === '/') { 252 | filePath = filePath.slice(1, filePath.length) 253 | } 254 | 255 | if (opts.encrypted) { 256 | dest = `${this._filesDir}/${uuid}` 257 | } else { 258 | fs.mkdirSync(getDirName(this._filesDir + path), { recursive: true }) 259 | dest = this._filesDir + path 260 | } 261 | 262 | return new Promise(async (resolve, reject) => { 263 | const pathSeg = filePath.split('/') 264 | let fullFile = pathSeg[pathSeg.length - 1] 265 | let fileName 266 | let fileExt 267 | 268 | if (fullFile.indexOf('.') > -1) { 269 | fileName = fullFile.split('.')[0] 270 | fileExt = fullFile.split('.')[1] 271 | } 272 | 273 | const writeStream = fs.createWriteStream(dest) 274 | 275 | if (opts.encrypted && !opts.skipEncryption) { 276 | const fixedChunker = new FixedChunker(readStream, MAX_PLAINTEXT_BLOCK_SIZE) 277 | const { key, header, file } = await Crypto.encryptStream(fixedChunker, writeStream) 278 | 279 | await this.metadb.put(file.hash, { 280 | uuid, 281 | size: file.size, 282 | hash: file.hash, 283 | path: `/${uuid}`, 284 | peer_key: this.keyPair.publicKey.toString('hex'), 285 | discovery_key: this.discoveryKey 286 | }) 287 | 288 | const fileMeta = { 289 | uuid, 290 | name: fileName, 291 | size: file.size, 292 | mimetype: fileExt, 293 | encrypted: true, 294 | key: key.toString('hex'), 295 | header: header.toString('hex'), 296 | hash: file.hash, 297 | path: filePath, 298 | peer_key: this.keyPair.publicKey.toString('hex'), 299 | discovery_key: this.discoveryKey 300 | } 301 | 302 | await this._collections.files.put(filePath, fileMeta) 303 | 304 | this.emit('file-add', fileMeta) 305 | 306 | resolve({ 307 | key: key.toString('hex'), 308 | header: header.toString('hex'), 309 | ...fileMeta 310 | }) 311 | } else { 312 | let bytes = '' 313 | const hash = blake.blake2bInit(HASH_OUTPUT_LENGTH, null) 314 | const calcHash = new stream.Transform({ 315 | transform 316 | }) 317 | 318 | function transform(chunk, encoding, callback) { 319 | bytes += chunk.byteLength 320 | 321 | blake.blake2bUpdate(hash, chunk) 322 | callback(null, chunk) 323 | } 324 | 325 | pump(readStream, calcHash, writeStream, async () => { 326 | setTimeout(async () => { 327 | const _hash = Buffer.from(blake.blake2bFinal(hash)).toString('hex') 328 | 329 | if (bytes > 0) { 330 | await this.metadb.put(_hash, { 331 | uuid, 332 | size: bytes, 333 | hash: _hash, 334 | path, 335 | peer_key: this.keyPair.publicKey.toString('hex'), 336 | discovery_key: this.discoveryKey 337 | }) 338 | 339 | const fileMeta = { 340 | uuid, 341 | name: fileName, 342 | size: bytes, 343 | mimetype: fileExt, 344 | hash: _hash, 345 | path: filePath, 346 | peer_key: this.keyPair.publicKey.toString('hex'), 347 | discovery_key: this.discoveryKey 348 | } 349 | 350 | await this._collections.files.put(filePath, fileMeta) 351 | 352 | this.emit('file-add', fileMeta) 353 | resolve(fileMeta) 354 | 355 | } else { 356 | reject('No bytes were written.') 357 | } 358 | }) 359 | }) 360 | } 361 | }) 362 | } 363 | 364 | async readFile(path) { 365 | let file 366 | let filePath = path 367 | 368 | if (filePath[0] === '/') { 369 | filePath = filePath.slice(1, filePath.length) 370 | } 371 | 372 | try { 373 | file = await this._collections.files.get(filePath) 374 | 375 | file = file.value 376 | 377 | const stream = fs.createReadStream(`${this._filesDir}/${file.uuid}`) 378 | 379 | // If key then decipher file 380 | if (file.encrypted && file.key && file.header) { 381 | const fixedChunker = new FixedChunker(stream, MAX_ENCRYPTED_BLOCK_SIZE) 382 | return Crypto.decryptStream(fixedChunker, file.key, file.header) 383 | } else { 384 | return stream 385 | } 386 | } catch (err) { 387 | throw err 388 | } 389 | } 390 | 391 | decryptFileStream(stream, key, header) { 392 | const fixedChunker = new FixedChunker(stream, MAX_ENCRYPTED_BLOCK_SIZE) 393 | return Crypto.decryptStream(fixedChunker, key, header) 394 | } 395 | 396 | // TODO: Implement this 397 | fetchFileByHash(fileHash) { 398 | } 399 | 400 | async fetchFileByDriveHash(discoveryKey, fileHash, opts = {}) { 401 | const keyPair = opts.keyPair || this.keyPair 402 | const memStream = new MemoryStream() 403 | const topic = blake.blake2bHex(discoveryKey, null, HASH_OUTPUT_LENGTH) 404 | 405 | 406 | if (!fileHash || typeof fileHash !== 'string') { 407 | return reject('File hash is required before making a request.') 408 | } 409 | 410 | if (!discoveryKey || typeof discoveryKey !== 'string') { 411 | return reject('Discovery key cannot be null and must be a string.') 412 | } 413 | 414 | try { 415 | await this._initFileSwarm(memStream, topic, fileHash, 0, { keyPair }) 416 | } catch(e) { 417 | setTimeout(() => { 418 | memStream.destroy(e) 419 | }) 420 | return memStream 421 | } 422 | 423 | if (opts.key && opts.header) { 424 | return this.decryptFileStream(memStream, opts.key, opts.header) 425 | } 426 | 427 | return memStream 428 | } 429 | 430 | async fetchFileBatch(files, cb) { 431 | const batches = new RequestChunker(files, FILE_BATCH_SIZE) 432 | 433 | for (let batch of batches) { 434 | const requests = [] 435 | 436 | for (let file of batch) { 437 | requests.push(new Promise(async (resolve, reject) => { 438 | if (file.discovery_key) { 439 | const keyPair = this._workerKeyPairs.getKeyPair() 440 | const stream = await this.fetchFileByDriveHash(file.discovery_key, file.hash, { key: file.key, header: file.header, keyPair }) 441 | 442 | await cb(stream, file) 443 | 444 | resolve() 445 | } else { 446 | // TODO: Fetch files by hash 447 | } 448 | })) 449 | } 450 | 451 | await Promise.all(requests) 452 | this.requestQueue.queue = [] 453 | } 454 | } 455 | 456 | async _initFileSwarm(stream, topic, fileHash, attempts, { keyPair }) { 457 | return new Promise((resolve, reject) => { 458 | if (attempts > this.fileRetryAttempts) { 459 | const err = new Error('Unable to make a connection or receive data within the allotted time.') 460 | err.fileHash = fileHash 461 | this._workerKeyPairs.release(keyPair.publicKey.toString('hex')) 462 | stream.destroy(err) 463 | return reject(err) 464 | } 465 | 466 | const swarm = new Hyperswarm({ keyPair }) 467 | 468 | let connected = false 469 | let receivedData = false 470 | let streamError = false 471 | 472 | swarm.join(Buffer.from(topic, 'hex'), { server: false, client: true }) 473 | 474 | swarm.on('connection', async (socket, info) => { 475 | receivedData = false 476 | 477 | if (!connected) { 478 | connected = true 479 | 480 | // Tell the host drive which file we want 481 | socket.write(fileHash) 482 | 483 | socket.on('data', (data) => { 484 | resolve() 485 | stream.write(data) 486 | receivedData = true 487 | }) 488 | 489 | socket.once('end', () => { 490 | if (receivedData) { 491 | this._workerKeyPairs.release(keyPair.publicKey.toString('hex')) 492 | stream.end() 493 | swarm.destroy() 494 | } 495 | }) 496 | 497 | socket.once('error', (err) => { 498 | stream.destroy(err) 499 | streamError = true 500 | reject(err) 501 | }) 502 | } 503 | }) 504 | 505 | setTimeout(async () => { 506 | if (!connected || streamError || !receivedData) { 507 | attempts += 1 508 | await swarm.leave(topic) 509 | await swarm.destroy() 510 | 511 | try { 512 | await this._initFileSwarm(stream, topic, fileHash, attempts, { keyPair }) 513 | resolve() 514 | } catch(e) { 515 | reject(e) 516 | } 517 | } 518 | }, this.fileTimeout) 519 | }) 520 | } 521 | 522 | async _checkInternet() { 523 | return new Promise((resolve, reject) => { 524 | isOnline().then((isOnline) => { 525 | if(!isOnline && this.network.internet) { 526 | this.network.internet = false 527 | this.emit('network-updated', { internet: this.network.internet }) 528 | } 529 | 530 | if(isOnline && !this.network.internet) { 531 | this.network.internet = true 532 | this.emit('network-updated', { internet: this.network.internet }) 533 | } 534 | 535 | resolve() 536 | }) 537 | }) 538 | } 539 | 540 | async unlink(filePath) { 541 | let fp = filePath 542 | 543 | if (fp[0] === '/') { 544 | fp = filePath.slice(1, fp.length) 545 | } 546 | 547 | try { 548 | let file = await this._collections.files.get(fp) 549 | 550 | if (!file) return 551 | 552 | file = await this.metadb.get(file.value.hash) 553 | 554 | if(!file) return 555 | 556 | fs.unlinkSync(path.join(this._filesDir, file.value.path)) 557 | 558 | await this._collections.files.put(fp, { 559 | uuid: file.value.uuid, 560 | deleted: true 561 | }) 562 | 563 | await this.metadb.put(file.value.hash, { 564 | uuid: file.value.uuid, 565 | discovery_key: file.value.discovery_key, 566 | deleted: true 567 | }) 568 | 569 | this.emit('file-unlink', file.value) 570 | } catch (err) { 571 | throw err 572 | } 573 | } 574 | 575 | async destroyHyperfile(path) { 576 | const filePath = await this.bee.get(path) 577 | const file = await this.bee.get(filePath.value.hash) 578 | await this._clearStorage(file.value) 579 | } 580 | 581 | async _bootstrap() { 582 | // Init local core 583 | this._localHB = new Hyperbee(this._localCore, { 584 | keyEncoding: 'utf-8', 585 | valueEncoding: 'json' 586 | }) 587 | 588 | this.database = new Database(this.storage || this.drivePath, { 589 | keyPair: this.keyPair, 590 | storageName: this.storageName, 591 | encryptionKey: this.encryptionKey, 592 | peerPubKey: this.peerPubKey, 593 | acl: this.swarmOpts.acl, 594 | joinSwarm: this.joinSwarm 595 | }) 596 | 597 | if(this.checkNetworkStatus) { 598 | this.database.on('disconnected', () => { 599 | if(this.network.drive) { 600 | this.network.drive = false 601 | this.emit('network-updated', { drive: this.network.drive }) 602 | } 603 | }) 604 | 605 | this.database.on('connected', () => { 606 | if(!this.network.drive) { 607 | this.network.drive = true 608 | this.emit('network-updated', { drive: this.network.drive }) 609 | } 610 | }) 611 | } 612 | 613 | await this.database.ready() 614 | 615 | this.db = this.database 616 | this.metadb = this.database.metadb 617 | } 618 | 619 | async _update(data) { 620 | 621 | let lastSeq 622 | lastSeq = await this._localHB.get(`lastSeq`) 623 | 624 | if (!lastSeq) lastSeq = { value: { seq: null } } 625 | 626 | if ( 627 | data.type === 'put' && 628 | !data.value.deleted && 629 | data.value.peer_key !== this.keyPair.publicKey.toString('hex') && 630 | lastSeq.value.seq !== data.seq 631 | ) { 632 | this.emit('sync') 633 | 634 | if (data.value.hash) { 635 | try { 636 | await this._localHB.put(`lastSeq`, { seq: data.seq }) 637 | this.requestQueue.addFile(data.value) 638 | } catch (err) { 639 | throw err 640 | } 641 | } 642 | } 643 | 644 | if ( 645 | data.type === 'put' && 646 | data.value.deleted && 647 | data.value.peer_key !== this.keyPair.publicKey.toString('hex') 648 | ) { 649 | try { 650 | const filePath = path.join(this._filesDir, `/${data.value.uuid}`) 651 | if (fs.existsSync(filePath)) { 652 | fs.unlinkSync(filePath) 653 | 654 | setTimeout(() => { 655 | this.emit('file-unlink', data.value) 656 | }) 657 | } 658 | } catch (err) { 659 | throw err 660 | } 661 | } 662 | } 663 | 664 | info() { 665 | const bytes = getTotalSize(this.drivePath) 666 | return { 667 | size: bytes 668 | } 669 | } 670 | 671 | /** 672 | * Close drive and disconnect from all Hyperswarm topics 673 | */ 674 | async close() { 675 | if(this.joinSwarm) { 676 | await this._swarm.close() 677 | } 678 | await this.database.close() 679 | await this._localCore.close() 680 | 681 | if(this.checkNetworkStatus) { 682 | clearInterval(this._checkInternetInt) 683 | 684 | this.network = { 685 | internet: false, 686 | drive: false 687 | } 688 | 689 | this.emit('network-updated', this.network) 690 | } 691 | 692 | this.openend = false 693 | } 694 | } 695 | 696 | function createTopicHash(topic) { 697 | const crypto = require('crypto') 698 | 699 | return crypto.createHash('sha256') 700 | .update(topic) 701 | .digest() 702 | } 703 | 704 | async function auditFile(stream, remoteHash) { 705 | return new Promise((resolve, reject) => { 706 | let hash = blake.blake2bInit(HASH_OUTPUT_LENGTH, null) 707 | 708 | stream.on('error', err => reject(err)) 709 | stream.on('data', chunk => { 710 | blake.blake2bUpdate(hash, chunk) 711 | }) 712 | stream.on('end', () => { 713 | const localHash = Buffer.from(blake.blake2bFinal(hash)).toString('hex') 714 | 715 | if (localHash === remoteHash) 716 | return resolve() 717 | 718 | reject('Hashes do not match') 719 | }) 720 | }) 721 | } 722 | 723 | 724 | const getAllFiles = function (dirPath, arrayOfFiles) { 725 | files = fs.readdirSync(dirPath) 726 | 727 | arrayOfFiles = arrayOfFiles || [] 728 | 729 | files.forEach(function (file) { 730 | if (fs.statSync(dirPath + "/" + file).isDirectory()) { 731 | arrayOfFiles = getAllFiles(dirPath + "/" + file, arrayOfFiles) 732 | } else { 733 | arrayOfFiles.push(path.join(dirPath, file)) 734 | } 735 | }) 736 | 737 | return arrayOfFiles 738 | } 739 | 740 | const getTotalSize = function (directoryPath) { 741 | const arrayOfFiles = getAllFiles(directoryPath) 742 | 743 | let totalSize = 0 744 | 745 | arrayOfFiles.forEach(function (filePath) { 746 | totalSize += fs.statSync(filePath).size 747 | }) 748 | 749 | return totalSize 750 | } 751 | 752 | module.exports = Drive 753 | -------------------------------------------------------------------------------- /lib/autobee.js: -------------------------------------------------------------------------------- 1 | 2 | const Hyperbee = require('hyperbee'); 3 | 4 | class Autobee { 5 | constructor(autobase, opts) { 6 | this.autobase = autobase; 7 | 8 | this.subs = {} 9 | 10 | const index = autobase.createRebasedIndex({ 11 | ...opts, 12 | unwrap: true, 13 | apply: this._apply.bind(this) 14 | }) 15 | 16 | this.bee = new Hyperbee(index, { 17 | ...opts, 18 | extension: false 19 | }) 20 | } 21 | 22 | ready() { 23 | return this.autobase.ready() 24 | } 25 | 26 | _encode(value, change, seq) { 27 | return JSON.stringify({ value, change: change.toString('hex'), seq }) 28 | } 29 | 30 | _decode(raw) { 31 | return JSON.parse(raw) 32 | } 33 | 34 | async _apply(batch, clocks, change) { 35 | const self = this 36 | const localClock = clocks.local 37 | let b = this.bee.batch({ update: false }) 38 | 39 | for (const node of batch) { 40 | 41 | const op = JSON.parse(node.value.toString()) 42 | 43 | if(op.type === 'del') { 44 | if (op.value.sub) { 45 | const sub = this.subs[op.value.sub] 46 | 47 | b = sub.batch({ update: false }) 48 | await b.del(op.key) 49 | } 50 | } 51 | 52 | if (op.type === 'put') { 53 | if (op.value.__sub) { 54 | const sub = this.subs[op.value.__sub] 55 | 56 | delete op.value.__sub; 57 | 58 | b = sub.batch({ update: false }) 59 | } 60 | 61 | const existing = await b.get(op.key, { update: false }) 62 | await b.put(op.key, this._encode(op.value, change, node.seq)) 63 | if (!existing) continue 64 | await handleConflict(existing) 65 | } 66 | } 67 | 68 | return await b.flush() 69 | 70 | async function handleConflict(existing) { 71 | const { change: existingChange, seq: existingSeq } = self._decode(existing.value) 72 | // If the existing write is not causally contained in the current clock. 73 | // TODO: Write a helper for this. 74 | const conflictKey = ['_conflict', existing.key].join('/') 75 | if (!localClock.has(existingChange) || (localClock.get(existingChange) < existingSeq)) { 76 | await b.put(conflictKey, existing.value) 77 | } else { 78 | await b.del(conflictKey) 79 | } 80 | } 81 | } 82 | 83 | async put(key, value, opts) { 84 | const op = Buffer.from(JSON.stringify({ type: 'put', key, value })) 85 | return await this.autobase.append(op, opts) 86 | } 87 | 88 | async get(key, opts = {}) { 89 | let b = this.bee 90 | 91 | if (opts.sub) { 92 | b = this.subs[opts.sub] 93 | } 94 | 95 | const node = await b.get(key) 96 | 97 | if (!node) return null 98 | node.value = this._decode(node.value).value 99 | return node 100 | } 101 | 102 | async del(key, value) { 103 | const op = Buffer.from(JSON.stringify({ type: 'del', key, value})) 104 | return await this.autobase.append(op) 105 | } 106 | 107 | async sub(key) { 108 | const sub = await this.bee.sub(key) 109 | this.subs[key] = sub 110 | return this 111 | } 112 | 113 | createReadStream(opts) { 114 | return this.autobase.createReadStream(opts) 115 | } 116 | 117 | createHistoryStream(opts) { 118 | return this.bee.createHistoryStream(opts) 119 | } 120 | } 121 | 122 | module.exports = Autobee; -------------------------------------------------------------------------------- /lib/core.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore'); 2 | 3 | class Hypercore { 4 | constructor(storage, key, opts) { 5 | this.storage = storage 6 | this.opts = opts 7 | 8 | if(key && typeof key === 'object') { 9 | this.opts = key 10 | } 11 | 12 | if(typeof storage !== 'string') { 13 | this.storage = (filename, opts) => { 14 | return storage(`${this.opts ? this.opts.storageNamespace + ':' : ''}${filename}`, opts) 15 | } 16 | } 17 | 18 | return new hypercore(this.storage, key, opts) 19 | } 20 | } 21 | 22 | module.exports = Hypercore -------------------------------------------------------------------------------- /lib/crypto.js: -------------------------------------------------------------------------------- 1 | const sodium = require('sodium-native'); 2 | const stream = require('stream'); 3 | const pump = require('pump'); 4 | const blake = require('blakejs'); 5 | 6 | exports.verifySig = (sig, publicKey, msg) => { 7 | let m = Buffer.from(JSON.stringify(msg)); 8 | let signature = Buffer.alloc(sodium.crypto_sign_BYTES); 9 | let pk = Buffer.alloc(sodium.crypto_sign_PUBLICKEYBYTES); 10 | 11 | pk.fill(Buffer.from(publicKey, 'hex')); 12 | signature.fill(Buffer.from(sig, 'hex')); 13 | 14 | return sodium.crypto_sign_verify_detached(signature, m, pk); 15 | }; 16 | 17 | exports.generateSigKeypair = () => { 18 | let pk = Buffer.alloc(sodium.crypto_sign_PUBLICKEYBYTES); 19 | let sk = Buffer.alloc(sodium.crypto_sign_SECRETKEYBYTES); 20 | 21 | sodium.crypto_sign_keypair(pk, sk); 22 | 23 | return { 24 | publicKey: pk.toString('hex'), 25 | privateKey: sk.toString('hex') 26 | } 27 | } 28 | 29 | exports.generateBoxKeypair = () => { 30 | let pk = Buffer.alloc(sodium.crypto_box_PUBLICKEYBYTES); 31 | let sk = Buffer.alloc(sodium.crypto_box_SECRETKEYBYTES); 32 | 33 | sodium.crypto_box_keypair(pk, sk); 34 | 35 | return { 36 | publicKey: pk.toString('hex'), 37 | privateKey: sk.toString('hex') 38 | } 39 | } 40 | 41 | exports.encryptPubSecretBoxMessage = (msg, sbpkey, privKey) => { 42 | const m = Buffer.from(msg, 'utf-8'); 43 | const c = Buffer.alloc(m.length + sodium.crypto_box_MACBYTES); 44 | const n = Buffer.alloc(sodium.crypto_box_NONCEBYTES); 45 | const pk = Buffer.alloc(sodium.crypto_box_PUBLICKEYBYTES); 46 | const sk = Buffer.alloc(sodium.crypto_box_SECRETKEYBYTES); 47 | 48 | pk.fill(Buffer.from(sbpkey, 'hex')); 49 | sk.fill(Buffer.from(privKey, 'hex')); 50 | 51 | sodium.crypto_box_easy(c, m, n, pk, sk); 52 | 53 | return c.toString('hex'); 54 | } 55 | 56 | exports.decryptPubSecretBoxMessage = (msg, sbpkey, privKey) => { 57 | const c = Buffer.from(msg, 'hex'); 58 | const m = Buffer.alloc(c.length - sodium.crypto_box_MACBYTES); 59 | const n = Buffer.alloc(sodium.crypto_box_NONCEBYTES); 60 | const pk = Buffer.alloc(sodium.crypto_box_PUBLICKEYBYTES); 61 | const sk = Buffer.alloc(sodium.crypto_box_SECRETKEYBYTES); 62 | 63 | pk.fill(Buffer.from(sbpkey, 'hex')); 64 | sk.fill(Buffer.from(privKey, 'hex')); 65 | 66 | const bool = sodium.crypto_box_open_easy(m, c, n, pk, sk); 67 | 68 | if (!bool) throw new Error('Unable to decrypt message.'); 69 | 70 | return m.toString('utf-8'); 71 | } 72 | 73 | exports.signDetached = (msg, privKey) => { 74 | let sig = Buffer.alloc(sodium.crypto_sign_BYTES); 75 | let m = Buffer.from(JSON.stringify(msg)); 76 | let sk = Buffer.alloc(sodium.crypto_sign_SECRETKEYBYTES); 77 | 78 | sk.fill(Buffer.from(privKey, 'hex')); 79 | 80 | sodium.crypto_sign_detached(sig, m, sk); 81 | 82 | const signature = sig.toString('hex'); 83 | 84 | return signature; 85 | }; 86 | 87 | exports.encryptSealedBox = (msg, pubKey) => { 88 | let m = Buffer.from(msg, 'utf-8'); 89 | let c = Buffer.alloc(m.length + sodium.crypto_box_SEALBYTES); 90 | let pk = Buffer.from(pubKey, 'hex'); 91 | 92 | sodium.crypto_box_seal(c, m, pk); 93 | 94 | return c; 95 | } 96 | 97 | exports.decryptSealedBox = (msg, privKey, pubKey) => { 98 | let c = Buffer.from(msg, 'hex'); 99 | let m = Buffer.alloc(c.length - sodium.crypto_box_SEALBYTES); 100 | let sk = Buffer.from(privKey, 'hex'); 101 | let pk = Buffer.from(pubKey, 'hex'); 102 | 103 | var bool = sodium.crypto_box_seal_open(m, c, pk, sk); 104 | 105 | if (!bool) throw new Error('Unable to decrypt message.'); 106 | 107 | return m.toString('utf-8'); 108 | } 109 | 110 | exports.hash = (str, k) => { 111 | let out = Buffer.alloc(sodium.crypto_generichash_BYTES); 112 | let txt = Buffer.from(str); 113 | 114 | if(k) { 115 | k = Buffer.from(k, 'hex'); 116 | sodium.crypto_generichash(out, txt, k); 117 | } else { 118 | sodium.crypto_generichash(out, txt); 119 | } 120 | 121 | return out.toString('hex'); 122 | }; 123 | 124 | 125 | exports.hashPassword = str => { 126 | let out = Buffer.alloc(sodium.crypto_pwhash_STRBYTES); 127 | let passwd = Buffer.from(str, 'utf-8'); 128 | let opslimit = sodium.crypto_pwhash_OPSLIMIT_MODERATE; 129 | let memlimit = sodium.crypto_pwhash_MEMLIMIT_MODERATE; 130 | 131 | sodium.crypto_pwhash_str(out, passwd, opslimit, memlimit); 132 | 133 | return out; 134 | }; 135 | 136 | exports.generateMasterKey = () => { 137 | let key = Buffer.alloc(sodium.crypto_kdf_KEYBYTES); 138 | sodium.crypto_kdf_keygen(key); 139 | return key; 140 | }; 141 | 142 | exports.deriveKeyFromMaster = (masterKey, skId) => { 143 | let subkey = Buffer.alloc(sodium.crypto_kdf_BYTES_MAX); 144 | let subkeyId = skId; 145 | let ctx = Buffer.alloc(sodium.crypto_kdf_CONTEXTBYTES); 146 | let key = Buffer.from(masterKey, 'hex'); 147 | 148 | sodium.crypto_kdf_derive_from_key(subkey, subkeyId, ctx, key); 149 | 150 | return subkey; 151 | }; 152 | 153 | exports.randomBytes = data => { 154 | let buf = Buffer.alloc(sodium.randombytes_SEEDBYTES); 155 | let seed = Buffer.from(data, 'utf-8'); 156 | 157 | sodium.randombytes_buf_deterministic(buf, seed); 158 | 159 | return buf.toString('hex'); 160 | }; 161 | 162 | exports.generateAEDKey = () => { 163 | let k = Buffer.alloc(sodium.crypto_aead_xchacha20poly1305_ietf_KEYBYTES); 164 | sodium.crypto_aead_xchacha20poly1305_ietf_keygen(k); 165 | return k.toString('hex'); 166 | } 167 | 168 | exports.encryptAED = (msg, key) => { 169 | let m = Buffer.from(msg, 'utf-8'); 170 | let c = Buffer.alloc(m.length + sodium.crypto_aead_xchacha20poly1305_ietf_ABYTES); 171 | let nonce = Buffer.alloc(sodium.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES); 172 | let k = Buffer.from(key, 'hex'); 173 | 174 | sodium.randombytes_buf(nonce); 175 | 176 | sodium.crypto_aead_xchacha20poly1305_ietf_encrypt(c, m, null, null, nonce, k); 177 | 178 | let encrypted = Buffer.from([]); 179 | encrypted = Buffer.concat([nonce, c], sodium.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES + c.length); 180 | 181 | return encrypted; 182 | } 183 | 184 | exports.decryptAED = (c, key) => { 185 | // slice nonce out of the encrypted message 186 | nonce = c.slice(0, sodium.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES); 187 | let cipher = c.slice(sodium.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES, c.length); 188 | 189 | let m = Buffer.alloc(cipher.length - sodium.crypto_aead_xchacha20poly1305_ietf_ABYTES); 190 | let k = Buffer.from(key, 'hex'); 191 | 192 | sodium.crypto_aead_xchacha20poly1305_ietf_decrypt(m, null, cipher, null, nonce, k); 193 | 194 | return m.toString(); 195 | } 196 | 197 | exports.generateStreamKey = () => { 198 | let k = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_KEYBYTES); 199 | sodium.crypto_secretstream_xchacha20poly1305_keygen(k); 200 | return k; 201 | } 202 | 203 | exports.initStreamPushState = (k) => { 204 | let state = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_STATEBYTES); 205 | let header = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_HEADERBYTES); 206 | sodium.crypto_secretstream_xchacha20poly1305_init_push(state, header, k); 207 | 208 | return { state: state, header: header }; 209 | } 210 | 211 | exports.secretStreamPush = (chunk, state) => { 212 | let c = Buffer.alloc(chunk.length + sodium.crypto_secretstream_xchacha20poly1305_ABYTES); 213 | let tag = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_TAGBYTES); 214 | 215 | sodium.crypto_secretstream_xchacha20poly1305_push(state, c, chunk, null, tag); 216 | 217 | return c; 218 | } 219 | 220 | exports.initStreamPullState = (header, k) => { 221 | let state = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_STATEBYTES); 222 | sodium.crypto_secretstream_xchacha20poly1305_init_pull(state, header, k); 223 | return state; 224 | } 225 | 226 | exports.secretStreamPull = (chunk, state) => { 227 | let m = Buffer.alloc(chunk.length - sodium.crypto_secretstream_xchacha20poly1305_ABYTES); 228 | let tag = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_TAGBYTES); 229 | 230 | sodium.crypto_secretstream_xchacha20poly1305_pull(state, m, tag, chunk, null); 231 | 232 | return m; 233 | } 234 | 235 | exports.encryptStream = async (readStream, writeStream) => { 236 | const OUTPUT_LENGTH = 32 // bytes 237 | const hash = blake.blake2bInit(OUTPUT_LENGTH, null); 238 | const key = _generateStreamKey(); 239 | 240 | let bytes = 0; 241 | let { state, header } = _initStreamPushState(key); 242 | 243 | return new Promise((resolve, reject) => { 244 | const encrypt = _encrypt(header, state); 245 | 246 | pump(readStream, encrypt, writeStream, (err) => { 247 | if(err) return reject(err); 248 | const file = { 249 | hash: Buffer.from(blake.blake2bFinal(hash)).toString('hex'), 250 | size: bytes 251 | } 252 | resolve({ key, header, file }); 253 | }) 254 | }); 255 | 256 | function _encrypt(header, state) { 257 | let message = Buffer.from([]); 258 | 259 | return new stream.Transform({ 260 | transform 261 | }); 262 | 263 | 264 | function transform(chunk, encoding, callback) { 265 | message = _secretStreamPush(chunk, state); 266 | bytes += message.byteLength; 267 | blake.blake2bUpdate(hash, message); 268 | callback(null, message); 269 | } 270 | } 271 | 272 | function _generateStreamKey() { 273 | let k = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_KEYBYTES); 274 | sodium.crypto_secretstream_xchacha20poly1305_keygen(k); 275 | return k; 276 | } 277 | 278 | function _initStreamPushState(k) { 279 | let state = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_STATEBYTES); 280 | let header = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_HEADERBYTES); 281 | sodium.crypto_secretstream_xchacha20poly1305_init_push(state, header, k); 282 | 283 | return { state: state, header: header }; 284 | } 285 | 286 | function _secretStreamPush(chunk, state) { 287 | let c = Buffer.alloc(chunk.length + sodium.crypto_secretstream_xchacha20poly1305_ABYTES); 288 | let tag = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_TAGBYTES); 289 | 290 | sodium.crypto_secretstream_xchacha20poly1305_push(state, c, chunk, null, tag); 291 | 292 | return c; 293 | } 294 | } 295 | 296 | exports.decryptStream = (readStream, key, header) => { 297 | if(!Buffer.isBuffer(key) && typeof key === 'string') { 298 | key = Buffer.from(key, 'hex'); 299 | } 300 | 301 | if(!Buffer.isBuffer(header) && typeof header === 'string') { 302 | header = Buffer.from(header, 'hex'); 303 | } 304 | 305 | const decrypt = _decrypt(key, header); 306 | 307 | pump(readStream, decrypt, (err) => { 308 | if(err) return err; 309 | }); 310 | 311 | return decrypt; 312 | 313 | 314 | function _decrypt(k, h) { 315 | let message = Buffer.from([]); 316 | let state = _initStreamPullState(h, k); 317 | 318 | return new stream.Transform({ 319 | writableObjectMode: true, 320 | transform 321 | }); 322 | 323 | function transform(chunk, encoding, callback) { 324 | try { 325 | message = _secretStreamPull(chunk, state); 326 | callback(null, message); 327 | } catch(err) { 328 | callback(err, null); 329 | } 330 | } 331 | } 332 | 333 | function _initStreamPullState(header, k) { 334 | let state = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_STATEBYTES); 335 | sodium.crypto_secretstream_xchacha20poly1305_init_pull(state, header, k); 336 | return state; 337 | } 338 | 339 | function _secretStreamPull(chunk, state) { 340 | try { 341 | let m = Buffer.alloc(chunk.length - sodium.crypto_secretstream_xchacha20poly1305_ABYTES); 342 | let tag = Buffer.alloc(sodium.crypto_secretstream_xchacha20poly1305_TAGBYTES); 343 | 344 | sodium.crypto_secretstream_xchacha20poly1305_pull(state, m, tag, chunk, null); 345 | 346 | return m; 347 | } catch(err) { 348 | throw err; 349 | } 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /lib/database.js: -------------------------------------------------------------------------------- 1 | const Autobase = require('autobase') 2 | const Hypercore = require('./core') 3 | const EventEmitter = require('events') 4 | const Autobee = require('./autobee') 5 | const Hyperswarm = require('hyperswarm') 6 | 7 | class Database extends EventEmitter { 8 | constructor(storage, opts = {}) { 9 | super() 10 | 11 | const key = opts.peerPubKey ? opts.peerPubKey : null; 12 | 13 | this.storageName = opts.storageName 14 | this.keyPair = opts.keyPair 15 | this.acl = opts.acl 16 | this.peerPubKey = key 17 | this.encryptionKey = opts.encryptionKey 18 | this.storage = typeof storage === 'string' ? `${storage}/Database` : storage 19 | this.storageIsString = typeof storage === 'string' ? true : false 20 | this.joinSwarm = typeof opts.joinSwarm === 'boolean' ? opts.joinSwarm : true 21 | 22 | // Init local Autobase 23 | this.localWriter = new Hypercore(this.storageIsString ? `${this.storage}/local-writer` : this.storage, 24 | { 25 | encryptionKey: this.encryptionKey, 26 | storageNamespace: `${this.storageName}:local-writer` 27 | } 28 | ) 29 | 30 | this.autobaseIndex = new Hypercore(this.storageIsString ? `${this.storage}/local-index` : this.storage, 31 | { 32 | encryptionKey: this.encryptionKey, 33 | storageNamespace: `${this.storageName}:local-index` 34 | } 35 | ) 36 | 37 | this.base = new Autobase([this.localWriter], {input: this.localWriter, indexes: this.autobaseIndex }) 38 | this.bee = new Autobee(this.base, { keyEncoding: 'utf-8', valueEncoding: 'json' }) 39 | 40 | // Init Meta Autobase 41 | this.localMetaCore = new Hypercore(this.storageIsString ? `${this.storage}/meta-local` : this.storage, { storageNamespace: `${this.storageName}:meta-local` }) 42 | this.remoteMetaCore = new Hypercore(this.storageIsString ? `${this.storage}/meta-remote` : this.storage, this.peerPubKey, { storageNamespace: `${this.storageName}:meta-remote` }) 43 | this.metaIndex = new Hypercore(this.storageIsString ? `${this.storage}/meta-index` : this.storage, null, { storageNamespace: `${this.storageName}:meta-index` }) 44 | this.metaBase = new Autobase([this.localMetaCore], { input: this.localMetaCore, indexes: this.metaIndex }) 45 | this.metadb = new Autobee(this.metaBase, { keyEncoding: 'utf-8', valueEncoding: 'json' }) 46 | 47 | this.collections = {} 48 | this.cores = [ 49 | this.localWriter, 50 | this.autobaseIndex, 51 | this.localMetaCore, 52 | this.remoteMetaCore, 53 | this.metaIndex 54 | ] 55 | this.connections = [] 56 | } 57 | 58 | async ready() { 59 | await this._joinSwarm(this.localWriter, { server: true, client: true }) 60 | await this._joinSwarm(this.localMetaCore, { server: true, client: true }) 61 | 62 | let remotePeers 63 | 64 | if(this.peerPubKey) { 65 | await this._joinSwarm(this.remoteMetaCore, { server: true, client: true }) 66 | await this.metaBase.addInput(this.remoteMetaCore) 67 | 68 | remotePeers = await this.metadb.get('__peers') 69 | } else { 70 | await this._joinSwarm(this.remoteMetaCore, { server: true, client: true }) 71 | } 72 | 73 | if(!remotePeers) { 74 | await this.metadb.put('__peers', { 75 | [this.keyPair.publicKey.toString('hex')]: { 76 | writer: this.localWriter.key.toString('hex'), 77 | meta: this.localMetaCore.key.toString('hex') 78 | } 79 | }) 80 | } else { 81 | for (const key in remotePeers.value) { 82 | const peer = remotePeers.value[key] 83 | 84 | const peerWriter = new Hypercore( 85 | this.storageIsString ? `${this.storage}/peers/${peer.writer}` : this.storage, 86 | peer.writer, 87 | { 88 | encryptionKey: this.encryptionKey, 89 | storageNamespace: `${this.storageName}:peers:${peer.writer}` 90 | } 91 | ) 92 | 93 | this.cores.push(peerWriter) 94 | 95 | await this._joinSwarm(peerWriter, { server: true, client: true }) 96 | await this.base.addInput(peerWriter) 97 | 98 | if(peer.meta !== this.remoteMetaCore.key.toString('hex')) { 99 | const peerMeta = new Hypercore(this.storageIsString ? `${this.storage}/peers/${peer.meta}` : this.storage, peer.meta, { storageNamespace: `${this.storageName}:peers:${peer.meta}` }) 100 | this.cores.push(peerMeta) 101 | await this._joinSwarm(peerMeta, { server: true, client: true }) 102 | await this.metaBase.addInput(peerMeta) 103 | } 104 | } 105 | } 106 | } 107 | 108 | async collection(name) { 109 | await this.bee.sub(name) 110 | const collection = new Collection(name, this.bee) 111 | this.collections[name] = collection 112 | return collection 113 | } 114 | 115 | async addInput(key) { 116 | const core = new Hypercore( this.storageIsString ? `${this.storage}/${key.toString('hex')}` : this.storage, 117 | key, 118 | { 119 | encryptionKey: this.encryptionKey, 120 | storageNamespace: `${this.storageName}:${key.toString('hex')}` 121 | } 122 | ) 123 | 124 | this.cores.push(core) 125 | 126 | await this.base.addInput(core) 127 | 128 | await this._joinSwarm(core, { server: true, client: true }) 129 | } 130 | 131 | async removeInput(key) { 132 | // TODO 133 | } 134 | 135 | async close() { 136 | for await(const core of this.cores) { 137 | await core.close() 138 | } 139 | 140 | for await(const conn of this.connections) { 141 | await conn.destroy() 142 | } 143 | } 144 | 145 | // TODO: Figure out how to multiplex these cnonections 146 | async _joinSwarm(core, { server, client }) { 147 | const swarm = new Hyperswarm() 148 | 149 | await core.ready() 150 | 151 | if(this.joinSwarm) { 152 | try { 153 | swarm.on('connection', async (socket, info) => { 154 | socket.pipe(core.replicate(info.client)).pipe(socket) 155 | }) 156 | 157 | const topic = core.discoveryKey; 158 | const discovery = swarm.join(topic, { server, client }) 159 | 160 | this.connections.push(discovery) 161 | 162 | if (server) { 163 | await discovery.flushed() 164 | this.emit('connected') 165 | return 166 | } 167 | 168 | await swarm.flush() 169 | this.emit('connected') 170 | 171 | } catch(e) { 172 | return this.emit('disconnected') 173 | } 174 | } 175 | } 176 | } 177 | 178 | class Collection { 179 | constructor(sub, db) { 180 | this.sub = sub 181 | this.db = db 182 | } 183 | 184 | async put(key, value) { 185 | let val = { ...value, __sub: this.sub } 186 | await this.db.put(key, val) 187 | } 188 | 189 | async get(key) { 190 | return this.db.get(key, { sub: this.sub }) 191 | } 192 | 193 | async del(key) { 194 | return this.db.del(key, { sub: this.sub }) 195 | } 196 | } 197 | 198 | module.exports = Database 199 | -------------------------------------------------------------------------------- /lib/swarm.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events'); 2 | const pump = require('pump'); 3 | const Hyperswarm = require('hyperswarm'); 4 | const blake = require('blakejs'); 5 | 6 | class Swarm extends EventEmitter { 7 | constructor({ acl, keyPair, workerKeyPairs, publicKey, topic, ephemeral, isServer, isClient }) { 8 | super() 9 | 10 | this.fileSwarm = new Hyperswarm({ // Networking for streaming file data 11 | firewall(remotePublicKey) { 12 | if (workerKeyPairs[remotePublicKey.toString('hex')]) { 13 | return true; 14 | } 15 | return false; 16 | } 17 | }) 18 | this.keyPair = keyPair 19 | this.publicKey = publicKey 20 | this.discoveryTopic = blake.blake2bHex(topic, null, 32) 21 | this.ephemeral = ephemeral 22 | this.closing = false 23 | this.fileDiscovery = null 24 | this.isServer = isServer 25 | this.isClient = isClient 26 | this.server = new Hyperswarm({ 27 | keyPair, 28 | firewall(remotePublicKey) { 29 | // validate if you want a connection from remotePublicKey 30 | if (acl && acl.length) { 31 | return acl.indexOf(remotePublicKey.toString('hex')) === -1; 32 | } 33 | 34 | return false; 35 | } 36 | }) 37 | } 38 | 39 | async ready() { 40 | this.fileSwarm.on('connection', async (socket, info) => { 41 | this.emit('file-requested', socket) 42 | }) 43 | 44 | this.server.on('connection', (socket, info) => { 45 | const peerPubKey = socket.remotePublicKey.toString('hex') 46 | 47 | socket.on('data', data => { 48 | this.emit('message', peerPubKey, data) 49 | }) 50 | }) 51 | 52 | try { 53 | await this.server.listen(this.keyPair) 54 | } catch(e) { 55 | return this.emit('disconnected') 56 | } 57 | 58 | try { 59 | this.fileDiscovery = this.fileSwarm.join(Buffer.from(this.discoveryTopic, 'hex'), { server: true, client: false }) 60 | await this.fileDiscovery.flushed() 61 | } catch(e) { 62 | return this.emit('disconnected') 63 | } 64 | 65 | this.emit('connected') 66 | } 67 | 68 | connect(publicKey) { 69 | const noiseSocket = this.node.connect(Buffer.from(publicKey, 'hex')) 70 | 71 | noiseSocket.on('open', function () { 72 | // noiseSocket fully open with the other peer 73 | this.emit('onPeerConnected', noiseSocket) 74 | }); 75 | } 76 | 77 | async close() { 78 | const promises = [] 79 | 80 | promises.push(this.fileDiscovery.destroy()) 81 | promises.push(this.server.leave(this.keyPair.publicKey)) 82 | 83 | try { 84 | await Promise.all(promises) 85 | } catch (err) { 86 | throw err 87 | } 88 | } 89 | } 90 | 91 | module.exports = Swarm 92 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@telios/nebula-drive", 3 | "version": "5.3.2", 4 | "description": "Real-time distributed file and data storage.", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "cross-env NODE_ENV=test_sdk tape tests/*.test.js | tap-spec", 8 | "semantic-release": "semantic-release", 9 | "commit": "cz", 10 | "debug": "cross-env NODE_ENV=test_sdk tape tests/drive.test.js | tap-spec --nolazy --debug-brk=5858" 11 | }, 12 | "repository": { 13 | "type": "git", 14 | "url": "https://github.com/Telios-org/nebula-drive.git" 15 | }, 16 | "author": "Hexadecibal", 17 | "license": "MIT", 18 | "bugs": { 19 | "url": "https://github.com/Telios-org/nebula-drive/issues" 20 | }, 21 | "homepage": "https://github.com/Telios-org/nebula-drive", 22 | "dependencies": { 23 | "@geut/hypercore-promise": "^2.1.1", 24 | "async": "^3.2.0", 25 | "autobase": "1.0.0-alpha.1", 26 | "blakejs": "^1.1.0", 27 | "bson": "^4.5.0", 28 | "chokidar": "^3.5.1", 29 | "dat-encoding": "^5.0.1", 30 | "hyperbee": "^1.6.3", 31 | "hypercore": "10.0.0-alpha.13", 32 | "hyperswarm": "^3.0.1", 33 | "is-online": "^9.0.1", 34 | "memorystream": "^0.3.1", 35 | "pump": "^3.0.0", 36 | "random-access-memory": "^3.1.1", 37 | "rimraf": "^3.0.2", 38 | "sodium-native": "^3.2.0", 39 | "uuid": "^8.2.0" 40 | }, 41 | "devDependencies": { 42 | "@commitlint/cli": "^12.0.1", 43 | "@commitlint/config-conventional": "^12.0.1", 44 | "@semantic-release/git": "^9.0.0", 45 | "@semantic-release/gitlab": "^6.0.9", 46 | "@semantic-release/npm": "^7.1.0", 47 | "cross-env": "^7.0.3", 48 | "cz-conventional-changelog": "^3.0.1", 49 | "del": "^6.0.0", 50 | "husky": "^6.0.0", 51 | "semantic-release": "^17.4.2", 52 | "tap-spec": "^2.2.2", 53 | "tape": "^5.2.2", 54 | "tape-promise": "^4.0.0" 55 | }, 56 | "directories": { 57 | "lib": "lib", 58 | "test": "tests" 59 | }, 60 | "keywords": [ 61 | "telios", 62 | "decentralized", 63 | "distributed", 64 | "file", 65 | "sharing", 66 | "drive", 67 | "storage", 68 | "p2p", 69 | "peer-to-peer", 70 | "hypercore", 71 | "hypercore-protocol", 72 | "hyperdrive", 73 | "hyperswarm" 74 | ], 75 | "husky": { 76 | "hooks": { 77 | "prepare-commit-msg": "exec < /dev/tty && git cz --hook || true" 78 | } 79 | }, 80 | "config": { 81 | "commitizen": { 82 | "path": "./node_modules/cz-conventional-changelog" 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /tests/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Telios-org/nebula-drive/cc4d18e96392fe383d4f04676d4017fbea5148be/tests/.DS_Store -------------------------------------------------------------------------------- /tests/data/test.doc: -------------------------------------------------------------------------------- 1 | this is a test document -------------------------------------------------------------------------------- /tests/database.test.js: -------------------------------------------------------------------------------- 1 | const tape = require('tape') 2 | const _test = require('tape-promise').default 3 | const test = _test(tape) 4 | const Database = require('../lib/database') 5 | const ram = require('random-access-memory') 6 | const DHT = require('@hyperswarm/dht') 7 | 8 | test('Database - Create new db', async t => { 9 | t.plan(1) 10 | 11 | const keyPair = DHT.keyPair() 12 | const encryptionKey = Buffer.alloc(32, 'hello world') 13 | 14 | try { 15 | const database = new Database(ram, { 16 | keyPair, 17 | encryptionKey 18 | }) 19 | 20 | await database.ready() 21 | 22 | t.ok(database.localMetaCore.key.toString('hex')) 23 | } catch (err) { 24 | console.log('ERROR: ', err) 25 | t.error(err) 26 | } 27 | }) 28 | 29 | test('Database - Test put/get', async t => { 30 | t.plan(1) 31 | 32 | const keyPair = DHT.keyPair() 33 | const encryptionKey = Buffer.alloc(32, 'hello world') 34 | 35 | try { 36 | const database = new Database(ram, { 37 | keyPair, 38 | encryptionKey 39 | }) 40 | 41 | await database.ready() 42 | 43 | const collection = await database.collection('foobar') 44 | await collection.put('foo', { hello: 'bar' }) 45 | 46 | const item = await collection.get('foo') 47 | 48 | t.equals(item.value.hello, 'bar') 49 | } catch (err) { 50 | t.error(err) 51 | } 52 | }) 53 | 54 | test('Database - delete from hyperbee', async t => { 55 | t.plan(1) 56 | 57 | const keyPair = DHT.keyPair() 58 | const encryptionKey = Buffer.alloc(32, 'hello world') 59 | 60 | try { 61 | const database = new Database(ram, { 62 | keyPair, 63 | encryptionKey 64 | }) 65 | 66 | await database.ready() 67 | 68 | const collection = await database.collection('foobar') 69 | await collection.put('foo', { hello: 'bar' }) 70 | await collection.del('foo') 71 | 72 | const item = await collection.get('foo') 73 | 74 | t.equals(item, null) 75 | } catch (err) { 76 | t.error(err) 77 | } 78 | }) -------------------------------------------------------------------------------- /tests/drive.test.js: -------------------------------------------------------------------------------- 1 | const tape = require('tape') 2 | const _test = require('tape-promise').default 3 | const test = _test(tape) 4 | const fs = require('fs') 5 | const path = require('path') 6 | const del = require('del') 7 | const Drive = require('..') 8 | const EventEmitter = require('events') 9 | const DHT = require('@hyperswarm/dht') 10 | const ram = require('random-access-memory') 11 | 12 | // const { signingKeypair } = Account.makeKeys() 13 | 14 | // const keyPair = { 15 | // publicKey: Buffer.from(signingKeypair.publicKey, 'hex'), 16 | // secretKey: Buffer.from(signingKeypair.privateKey, 'hex') 17 | // } 18 | const keyPair = DHT.keyPair() 19 | const keyPair2 = DHT.keyPair() 20 | const keyPair3 = DHT.keyPair() 21 | const keyPair4 = DHT.keyPair() 22 | 23 | let drive 24 | let drive2 25 | let drive3 26 | let drive4 27 | let drive5 28 | let hyperFiles = [] 29 | 30 | const encryptionKey = Buffer.alloc(32, 'hello world') 31 | 32 | test('Drive - Create', async t => { 33 | t.plan(5) 34 | 35 | await cleanup() 36 | 37 | drive = new Drive(__dirname + '/drive', null, { 38 | storage: ram, 39 | keyPair, 40 | encryptionKey, 41 | swarmOpts: { 42 | server: true, 43 | client: true 44 | } 45 | }) 46 | 47 | await drive.ready() 48 | 49 | t.ok(drive.publicKey, `Drive has public key ${drive.publicKey}`) 50 | t.ok(drive.keyPair, `Drive has peer keypair`) 51 | t.ok(drive.db, `Drive has Database`) 52 | t.ok(drive.drivePath, `Drive has path ${drive.drivePath}`) 53 | t.equals(true, drive.opened, `Drive is open`) 54 | }) 55 | 56 | test('Drive - Upload Local Encrypted File', async t => { 57 | t.plan(24) 58 | 59 | try { 60 | const readStream = fs.createReadStream(path.join(__dirname, '/data/email.eml')) 61 | const file = await drive.writeFile('/email/rawEmailEncrypted.eml', readStream, { encrypted: true }) 62 | 63 | hyperFiles.push(file) 64 | 65 | t.ok(file.key, `File was encrypted with key`) 66 | t.ok(file.header, `File was encrypted with header`) 67 | t.ok(file.hash, `Hash of file was returned ${file.hash}`) 68 | t.ok(file.size, `Size of file in bytes was returned ${file.size}`) 69 | 70 | for (let i = 0; i < 20; i++) { 71 | const readStream = fs.createReadStream(path.join(__dirname, '/data/email.eml')) 72 | const file = await drive.writeFile(`/email/rawEmailEncrypted${i}.eml`, readStream, { encrypted: true }) 73 | t.ok(file) 74 | } 75 | } catch (e) { 76 | t.error(e) 77 | } 78 | }) 79 | 80 | test('Drive - Read Local Encrypted File', async t => { 81 | t.plan(2) 82 | 83 | const origFile = fs.readFileSync(path.join(__dirname, '/data/email.eml'), { encoding: 'utf-8' }) 84 | const stream = await drive.readFile('/email/rawEmailEncrypted.eml') 85 | let decrypted = '' 86 | 87 | stream.on('data', chunk => { 88 | decrypted += chunk.toString('utf-8') 89 | }) 90 | 91 | stream.on('end', () => { 92 | t.ok(decrypted.length, 'Returned encrypted data') 93 | t.equals(origFile.length, decrypted.length, 'Decrypted file matches original') 94 | }) 95 | }) 96 | 97 | test('Drive - Create Seed Peer', async t => { 98 | t.plan(22) 99 | 100 | drive2 = new Drive(__dirname + '/drive2', drive.publicKey, { 101 | keyPair: keyPair2, 102 | // encryptionKey: drive.encryptionKey, 103 | swarmOpts: { 104 | server: true, 105 | client: true 106 | } 107 | }) 108 | 109 | await drive2.ready() 110 | // await drive2.addPeer(drive.publicKey) 111 | 112 | drive2.on('file-sync', async (file) => { 113 | t.ok(file.uuid, `File has synced from remote peer`) 114 | }) 115 | 116 | const readStream = fs.createReadStream(path.join(__dirname, '/data/test.doc')) 117 | const file = await drive.writeFile('/email/test.doc', readStream) 118 | 119 | hyperFiles.push(file) 120 | }) 121 | 122 | test('Drive - Fetch Files from Remote Drive', async t => { 123 | t.plan(4) 124 | 125 | drive3 = new Drive(__dirname + '/drive3', null, { 126 | keyPair: keyPair3, 127 | swarmOpts: { 128 | server: true, 129 | client: true 130 | } 131 | }) 132 | 133 | await drive3.ready() 134 | 135 | await drive3.fetchFileBatch(hyperFiles, (stream, file) => { 136 | return new Promise((resolve, reject) => { 137 | let content = '' 138 | 139 | stream.on('data', chunk => { 140 | content += chunk.toString() 141 | }) 142 | 143 | stream.on('error', (err) => { 144 | t.error(err, err.message) 145 | resolve() 146 | }) 147 | 148 | stream.on('end', () => { 149 | t.ok(file.hash, `File has hash ${file.hash}`) 150 | t.ok(content.length, `File downloaded from remote peer`) 151 | resolve() 152 | }) 153 | }) 154 | }) 155 | }) 156 | 157 | test('Drive - Fail to Fetch Files from Remote Drive', async t => { 158 | t.plan(2) 159 | 160 | drive4 = new Drive(__dirname + '/drive4', null, { 161 | keyPair: keyPair3, 162 | swarmOpts: { 163 | server: true, 164 | client: true 165 | } 166 | }) 167 | drive5 = new Drive(__dirname + '/drive5', null, { 168 | keyPair: keyPair4, 169 | swarmOpts: { 170 | server: true, 171 | client: true 172 | } 173 | }) 174 | 175 | await drive4.ready() 176 | await drive5.ready() 177 | 178 | const readStream = fs.createReadStream(path.join(__dirname, '/data/email.eml')) 179 | const file = await drive4.writeFile('/email/rawEmailEncrypted2.eml', readStream, { encrypted: true }) 180 | 181 | await drive4.unlink(file.path) 182 | 183 | await drive5.fetchFileBatch([file], (stream, file) => { 184 | return new Promise((resolve, reject) => { 185 | let content = '' 186 | 187 | stream.on('data', chunk => { 188 | content += chunk.toString() 189 | }) 190 | 191 | stream.on('error', (err) => { 192 | t.ok(err.message, `Error has message: ${err.message}`) 193 | t.equals(file.hash, err.fileHash, `Failed file hash matches the has in the request,`) 194 | resolve() 195 | }) 196 | }) 197 | }) 198 | }) 199 | 200 | test('Drive - Unlink Local File', async t => { 201 | t.plan(2) 202 | 203 | const drive1Size = drive.info().size 204 | const drive2Size = drive2.info().size 205 | 206 | drive.on('file-unlink', file => { 207 | t.ok(drive1Size > drive.info().size, `Drive1 size before: ${drive1Size} > size after: ${drive.info().size}`) 208 | }) 209 | 210 | drive2.on('file-unlink', file => { 211 | t.ok(drive2Size > drive2.info().size, `Drive2 size before: ${drive2Size} > size after: ${drive2.info().size}`) 212 | }) 213 | 214 | await drive.unlink('/email/rawEmailEncrypted.eml') 215 | }) 216 | 217 | 218 | test('Drive - Receive messages', async t => { 219 | t.plan(1) 220 | 221 | drive.on('message', (publicKey, data) => { 222 | const msg = JSON.parse(data.toString()) 223 | t.ok(msg, 'Drive can receive messages.') 224 | }) 225 | 226 | const node = new DHT() 227 | const noiseSocket = node.connect(keyPair.publicKey) 228 | 229 | noiseSocket.on('open', function () { 230 | noiseSocket.end(JSON.stringify({ 231 | type: 'newMail', 232 | meta: 'meta mail message' 233 | })) 234 | }) 235 | }) 236 | 237 | test('Drive - Get total size in bytes', t => { 238 | t.plan(3) 239 | 240 | t.ok(drive.info(), `Drive 1 has size ${drive.info().size}`) 241 | t.ok(drive2.info(), `Drive 2 has size ${drive2.info().size}`) 242 | t.ok(drive3.info(), `Drive 3 has size ${drive3.info().size}`) 243 | }) 244 | 245 | test.onFinish(async () => { 246 | await drive.close() 247 | await drive2.close() 248 | await drive3.close() 249 | await drive4.close() 250 | await drive5.close() 251 | 252 | await cleanup() 253 | 254 | process.exit(0) 255 | }) 256 | 257 | 258 | async function cleanup() { 259 | if (fs.existsSync(path.join(__dirname, '/drive'))) { 260 | await del([ 261 | path.join(__dirname, '/drive') 262 | ]) 263 | } 264 | 265 | if (fs.existsSync(path.join(__dirname, '/drive2'))) { 266 | await del([ 267 | path.join(__dirname, '/drive2') 268 | ]) 269 | } 270 | 271 | if (fs.existsSync(path.join(__dirname, '/drive3'))) { 272 | await del([ 273 | path.join(__dirname, '/drive3') 274 | ]) 275 | } 276 | 277 | if (fs.existsSync(path.join(__dirname, '/drive4'))) { 278 | await del([ 279 | path.join(__dirname, '/drive4') 280 | ]) 281 | } 282 | 283 | if (fs.existsSync(path.join(__dirname, '/drive5'))) { 284 | await del([ 285 | path.join(__dirname, '/drive5') 286 | ]) 287 | } 288 | } -------------------------------------------------------------------------------- /tests/helpers/setup.js: -------------------------------------------------------------------------------- 1 | const fs = require("fs"); 2 | const path = require("path"); 3 | const { Drive, Account } = require("../../"); 4 | const del = require("del"); 5 | 6 | module.exports.init = async () => { 7 | await cleanup(); 8 | }; 9 | 10 | async function cleanup() { 11 | if (fs.existsSync(path.join(__dirname, "../localDrive"))) { 12 | await del([path.join(__dirname, "../localDrive")]); 13 | } 14 | 15 | if (fs.existsSync(path.join(__dirname, "../drive1"))) { 16 | await del([path.join(__dirname, "../drive1")]); 17 | } 18 | 19 | if (fs.existsSync(path.join(__dirname, "../drive2"))) { 20 | await del([path.join(__dirname, "../drive2")]); 21 | } 22 | 23 | if (fs.existsSync(path.join(__dirname, "../drive"))) { 24 | await del([path.join(__dirname, "../drive")]); 25 | } 26 | 27 | if (fs.existsSync(path.join(__dirname, "../peer-drive"))) { 28 | await del([path.join(__dirname, "../peer-drive")]); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /tests/vars.json: -------------------------------------------------------------------------------- 1 | { 2 | "ALICE_SB_PUB_KEY": "", 3 | "ALICE_SB_PRIV_KEY": "", 4 | "ALICE_SIG_PUB_KEY": "", 5 | "ALICE_SIG_PRIV_KEY": "", 6 | "ALICE_PEER_PUB_KEY": "", 7 | "ALICE_DRIVE_KEY": "", 8 | "ALICE_DIFF_KEY": "", 9 | "ALICE_PEER_SECRET_KEY": "", 10 | "ALICE_RECOVERY": "alice@mail.com", 11 | "ALICE_MAILBOX": "alice@telios.io", 12 | "ALICE_ACCOUNT_SIG": "", 13 | "ALICE_ACCOUNT_SERVER_SIG": "", 14 | "ALICE_DEVICE_1_ID": "", 15 | "ALICE_DEVICE_1_CORE_NAME": "Alice", 16 | "ALICE_DEVICE_1_KEY": "", 17 | "BOB_SB_PUB_KEY": "", 18 | "BOB_SB_PRIV_KEY": "", 19 | "BOB_SIG_PUB_KEY": "", 20 | "BOB_SIG_PRIV_KEY": "", 21 | "TEST_EMAIL_ENCRYPTED_META": "", 22 | "TEST_EMAIL": { 23 | "to": [{ 24 | "name": "Alice Tester", 25 | "address": "alice@telios.io" 26 | }], 27 | "from": [{ 28 | "name": "Bob Tester", 29 | "address": "bob@telios.io" 30 | }], 31 | "subject": "Hello Alice", 32 | "text_body": "You're my favorite test person ever", 33 | "html_body": "

You're my favorite test person ever

", 34 | "attachments": [ 35 | { 36 | "filename": "test.pdf", 37 | "fileblob": "--base64-data--", 38 | "mimetype": "application/pdf" 39 | }, 40 | { 41 | "filename": "test.txt", 42 | "fileblob": "--base64-data--", 43 | "mimetype": "text/plain" 44 | } 45 | ] 46 | } 47 | } -------------------------------------------------------------------------------- /util/fixedChunker.js: -------------------------------------------------------------------------------- 1 | const MemoryStream = require('memorystream'); 2 | const EventEmitter = require('events'); 3 | 4 | class Chunker extends EventEmitter { 5 | constructor(readStream, maxBlockSize) { 6 | super(); 7 | 8 | this.chunkSize = maxBlockSize; 9 | this.chunkStream = readStream; 10 | this.stream = new MemoryStream(); 11 | this.buffer = Buffer.from([]); 12 | 13 | this.openStream(); 14 | 15 | return this.stream; 16 | } 17 | 18 | resetBuffer() { 19 | this.buffer = Buffer.from([]); 20 | } 21 | 22 | processBuffer() { 23 | if(this.buffer.length === this.chunkSize) { 24 | this.stream.write(this.buffer); 25 | this.resetBuffer(); 26 | } 27 | } 28 | 29 | openStream() { 30 | this.chunkStream.on('error', (err) => { 31 | this.stream.destroy(err); 32 | }); 33 | 34 | this.chunkStream.on('data', chunk => { 35 | this.processData(chunk); 36 | }); 37 | 38 | this.chunkStream.on('end', () => { 39 | this.stream.end(this.buffer); 40 | this.resetBuffer(); 41 | }); 42 | } 43 | 44 | processData(data) { 45 | const chunks = this.reduce(data); 46 | 47 | chunks.forEach((chunk) => { 48 | this.stream.write(chunk); 49 | }); 50 | } 51 | 52 | reduce(data) { 53 | const chunks = []; 54 | 55 | if(this.buffer.length) { 56 | const intoBuffer = data.slice(0, this.chunkSize - this.buffer.length); 57 | this.buffer = Buffer.concat([this.buffer, intoBuffer]); 58 | 59 | data = data.slice(intoBuffer.length); 60 | 61 | this.processBuffer(); 62 | } 63 | 64 | while (data.length > this.chunkSize) { 65 | const chunk = data.slice(0, this.chunkSize); 66 | data = data.slice(this.chunkSize); 67 | 68 | chunks.push(chunk); 69 | } 70 | 71 | if (data.length + this.buffer.length <= this.chunkSize) { 72 | this.buffer = Buffer.concat([this.buffer, data]); 73 | this.processBuffer(); 74 | } 75 | 76 | return chunks; 77 | } 78 | } 79 | 80 | module.exports = Chunker; -------------------------------------------------------------------------------- /util/requestChunker.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events'); 2 | 3 | class RequestChunker extends EventEmitter { 4 | constructor(files, batchSize) { 5 | super(); 6 | 7 | this.BATCH_SIZE = batchSize || 5; 8 | this.files = files; 9 | this.queue = []; 10 | this.resetTimeout = 2000; 11 | this.timer = this.resetTimeout; 12 | this.interval = null; 13 | 14 | if(files) { 15 | return this.chunkFileRequests(files); 16 | } 17 | } 18 | 19 | chunkFileRequests(files) { 20 | const size = this.BATCH_SIZE; 21 | const chunked = []; 22 | 23 | for (let i = 0; i < files.length; i += size) { 24 | chunked.push(files.slice(i, i + size)); 25 | } 26 | 27 | return chunked; 28 | } 29 | 30 | addFile(file) { 31 | let exists = false; 32 | 33 | if(!this.interval) { 34 | this.startTimer(); 35 | } 36 | 37 | for(let i = 0; i < this.queue.length; i += 1) { 38 | if(this.queue[i].hash === file.hash) { 39 | exists = true; 40 | this.queue.splice(i, 1); 41 | } 42 | } 43 | 44 | if(!exists) { 45 | this.queue.push(file); 46 | } 47 | 48 | this.timer = this.resetTimeout; 49 | } 50 | 51 | processQueue() { 52 | this.emit('process-queue', this.queue); 53 | } 54 | 55 | startTimer() { 56 | this.interval = setInterval(() => { 57 | if(this.timer > 0) { 58 | this.timer -= 100; 59 | } 60 | 61 | if(this.timer === 0) { 62 | this.clearTimer(); 63 | this.processQueue(); 64 | } 65 | }, 100); 66 | } 67 | 68 | clearTimer() { 69 | clearInterval(this.interval); 70 | this.interval = null; 71 | this.timer = this.resetTimeout; 72 | } 73 | 74 | reset() { 75 | this.queue = []; 76 | } 77 | } 78 | 79 | module.exports = RequestChunker; -------------------------------------------------------------------------------- /util/workerKeyPairs.js: -------------------------------------------------------------------------------- 1 | const DHT = require('@hyperswarm/dht'); 2 | 3 | class WorkerKeyPairs { 4 | constructor(count) { 5 | this.keyPairs = {}; 6 | 7 | for(let i = 0; i < count; i += 1) { 8 | const keyPair = DHT.keyPair(); 9 | this.keyPairs[keyPair.publicKey.toString('hex')] = { active: false, ...keyPair }; 10 | } 11 | } 12 | 13 | getKeyPair() { 14 | for(let key in this.keyPairs) { 15 | 16 | if(!this.keyPairs[key].active) { 17 | this.keyPairs[key].active = true; 18 | return this.keyPairs[key]; 19 | } 20 | } 21 | 22 | return null; 23 | } 24 | 25 | release(key) { 26 | if(this.keyPairs[key]) { 27 | this.keyPairs[key].active = false; 28 | } 29 | } 30 | } 31 | 32 | module.exports = WorkerKeyPairs; --------------------------------------------------------------------------------