├── .nvmrc ├── .dockerignore ├── .env.production.example ├── src ├── logger.js ├── errors.js ├── hasEntriesCache.js ├── healthcheckService.js ├── __tests__ │ ├── mock3id.js │ ├── healthcheckService.test.js │ ├── didSupport.test.js │ ├── testClient.js │ └── pinning.test.js ├── messageBroker.js ├── s3.js ├── analytics.js ├── util.js ├── node.js └── pinning.js ├── Dockerfile ├── .env.development.example ├── .dependabot └── config.yml ├── .gitignore ├── deploy.sh ├── DEPLOYMENT.md ├── LICENSE ├── package.json ├── README.md ├── RELEASE-NOTES.md └── .circleci └── config.yml /.nvmrc: -------------------------------------------------------------------------------- 1 | 10.15 -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .git 3 | tmp 4 | -------------------------------------------------------------------------------- /.env.production.example: -------------------------------------------------------------------------------- 1 | ADDRESS_SERVER_URL=https://beta.3box.io/address-server 2 | ORBITDB_PATH=/opt/orbitdb 3 | IPFS_PATH=/opt/ipfs 4 | REDIS_PATH=profilecache.h9luwi.0001.usw2.cache.amazonaws.com 5 | NODE_ENV=production 6 | -------------------------------------------------------------------------------- /src/logger.js: -------------------------------------------------------------------------------- 1 | const bunyan = require('bunyan') 2 | 3 | const defaultOptions = { 4 | codeVersion: process.env.CODE_VERSION 5 | } 6 | 7 | module.exports.createLogger = (opts) => bunyan.createLogger(Object.assign({}, defaultOptions, opts)) 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:10.18.1 2 | 3 | ARG CODE_VERSION="00000" 4 | 5 | ENV CODE_VERSION=${CODE_VERSION} 6 | 7 | WORKDIR /3box-pinning-server 8 | 9 | COPY package.json package-lock.json ./ 10 | RUN npm install 11 | 12 | COPY src ./src 13 | 14 | EXPOSE 8081 4002 4003 5002 9090 9229 15 | 16 | CMD npm run start:prod 17 | -------------------------------------------------------------------------------- /.env.development.example: -------------------------------------------------------------------------------- 1 | ADDRESS_SERVER_URL=https://beta.3box.io/address-server 2 | ORBITDB_PATH=./dev-data/orbitdb 3 | IPFS_PATH=./dev-data/ipfs 4 | REDIS_PATH=127.0.0.1 5 | AWS_BUCKET_NAME=experiment-3box-ipfs 6 | AWS_ACCESS_KEY_ID=AKIA0000000000000000 7 | AWS_SECRET_ACCESS_KEY=fG00000000000000000000000000000000000000 8 | ORBIT_REDIS_PATH=redispath 9 | -------------------------------------------------------------------------------- /src/errors.js: -------------------------------------------------------------------------------- 1 | const InvalidInputError = (message) => { 2 | const err = new Error(message) 3 | err.statusCode = 400 4 | return err 5 | } 6 | 7 | const ProfileNotFound = (message) => { 8 | const err = new Error(message) 9 | err.statusCode = 404 10 | return err 11 | } 12 | 13 | module.exports = { 14 | InvalidInputError, 15 | ProfileNotFound 16 | } 17 | -------------------------------------------------------------------------------- /.dependabot/config.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | update_configs: 3 | - package_manager: "javascript" 4 | target_branch: "develop" 5 | directory: "/" 6 | update_schedule: "weekly" 7 | default_reviewers: 8 | - "zachferland" 9 | default_assignees: 10 | - "zachferland" 11 | allowed_updates: 12 | - match: 13 | dependency_type: "production" 14 | update_type: "all" 15 | -------------------------------------------------------------------------------- /src/hasEntriesCache.js: -------------------------------------------------------------------------------- 1 | const redis = require('redis') 2 | 3 | const encode = (value) => (typeof value === 'string' ? value : value.toString()) 4 | const decode = (value) => value ? parseInt(value) : null 5 | 6 | class EntriesCache { 7 | constructor (redisOpts = {}) { 8 | this.store = redis.createClient(redisOpts) 9 | } 10 | 11 | get (key) { 12 | return new Promise((resolve, reject) => { 13 | this.store.get(key, (err, reply) => { 14 | if (err) reject(err) 15 | resolve(decode(reply)) 16 | }) 17 | }) 18 | } 19 | 20 | async set (key, value) { 21 | this.store.set(key, encode(value)) 22 | } 23 | } 24 | 25 | module.exports = EntriesCache 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ipfs & orbitdb data 2 | dev-data/ 3 | orbitdb/ 4 | 5 | 6 | # Logs 7 | logs 8 | *.log 9 | npm-debug.log* 10 | yarn-debug.log* 11 | yarn-error.log* 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | 25 | 26 | # Dependency directories 27 | node_modules/ 28 | jspm_packages/ 29 | 30 | 31 | # Optional npm cache directory 32 | .npm 33 | 34 | # Optional eslint cache 35 | .eslintcache 36 | 37 | # dotenv environment variables file 38 | .env 39 | .env.development 40 | .env.production 41 | 42 | # parcel-bundler cache (https://parceljs.org/) 43 | .cache 44 | 45 | 46 | .DS_Store 47 | tmp 48 | 49 | .idea -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cd ~/3box-pinning-server 3 | if [[ "$CIRCLE_BRANCH" == "develop" ]]; then 4 | git stash 5 | git checkout -f develop 6 | git pull origin develop 7 | git reset --hard origin/develop 8 | PM2_INSTALLED=$(npm list -g | grep pm2 | wc -l) 9 | if [ "$PM2_INSTALLED" -eq 0 ]; then 10 | npm i -g pm2 11 | pm2 update 12 | fi 13 | npm i 14 | if [ ! -f .env.development ]; then 15 | cp .env.development.example .env.development 16 | fi 17 | pm2 restart --max-memory-restart 1000M node 18 | fi 19 | if [ "$CIRCLE_BRANCH" == "master" ]; then 20 | git stash 21 | git checkout -f master 22 | git pull origin master 23 | git reset --hard origin/master 24 | PM2_INSTALLED=$(npm list -g | grep pm2 | wc -l) 25 | if [ "$PM2_INSTALLED" -eq 0 ]; then 26 | npm i -g pm2 27 | pm2 update 28 | fi 29 | npm i 30 | if [ ! -f .env.production ]; then 31 | cp .env.production.example .env.production 32 | fi 33 | pm2 restart --max-memory-restart 1000M node 34 | fi 35 | -------------------------------------------------------------------------------- /DEPLOYMENT.md: -------------------------------------------------------------------------------- 1 | # Pinning server deployment 2 | 3 | The pinning server is a node.js process that is running using a the PM2 process manager. 4 | 5 | ## Deployment 6 | 7 | Server deployment is manual at the moment. Continuous deployment is gonna be added in the future. 8 | 9 | To deploy it you should enter `ssh` into the machine, and then `cd` into `3box-pinning-server` 10 | Inside the home folder you need to execute 11 | 12 | `./deploy.sh` 13 | 14 | And that's it. The server is redeployed again. 15 | 16 | To ensure the server is running correctly, with the command 17 | 18 | `pm2 ls` 19 | 20 | You should see an output like this: 21 | 22 | | App name | id | version | mode | pid | status | restart | uptime | cpu | mem | user | watching | 23 | |---|---|---|---|---|---|---|---|---|---|---|---| 24 | | server | 0 | 1.0.0 | fork | 11864 | online | 2 | 3m | 0.6% | 90.7 MB | ec2-user | disabled | 25 | 26 | To inspect logs and trace possible bugs/errors, you just need to type the command: 27 | 28 | `pm2 logs` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Joel Thorstensson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/healthcheckService.js: -------------------------------------------------------------------------------- 1 | const express = require('express') 2 | const os = require('os-utils') 3 | 4 | const { createLogger } = require('./logger') 5 | 6 | const HEALTH_CPU_LIMIT_PERCENT = (process.env.HEALTH_CPU_LIMIT || 50) / 100 7 | // Temporarily Low Default, Mem Leak 8 | const HEALTH_MEM_LIMIT_PERCENT = (process.env.HEALTH_MEM_LIMIT || 20) / 100 9 | 10 | class HealthcheckService { 11 | constructor (pinning, port) { 12 | this.pinning = pinning 13 | this.port = port 14 | this.logger = createLogger({ name: 'healthcheckService' }) 15 | this.app = express() 16 | this.app.get('/healthcheck', this.healthcheckHandler.bind(this)) 17 | } 18 | 19 | async healthcheckHandler (req, res, next) { 20 | if (!this.pinning.ipfs.isOnline()) { 21 | return res.status(503).send() 22 | } 23 | 24 | const cpu = 1 - (await new Promise((resolve, reject) => os.cpuFree(resolve))) 25 | const mem = 1 - os.freememPercentage() 26 | 27 | if (cpu > HEALTH_CPU_LIMIT_PERCENT || mem > HEALTH_MEM_LIMIT_PERCENT) { 28 | return res.status(503).send() 29 | } 30 | return res.status(200).send() 31 | } 32 | 33 | start () { 34 | this.app.listen(this.port, () => this.logger.info(`Serving /healthcheck on port ${this.port}`)) 35 | } 36 | } 37 | 38 | module.exports = HealthcheckService 39 | -------------------------------------------------------------------------------- /src/__tests__/mock3id.js: -------------------------------------------------------------------------------- 1 | const didJWT = require('did-jwt') 2 | 3 | test('', () => {}) 4 | 5 | const mock3id = { 6 | DID: 'did:3:asdfasdf', 7 | getKeyringBySpaceName: () => { 8 | return { 9 | getPublicKeys: () => { 10 | return { signingKey: '044f5c08e2150b618264c4794d99a22238bf60f1133a7f563e74fcf55ddb16748159872687a613545c65567d2b7a4d4e3ac03763e1d9a5fcfe512a371faa48a781' } 11 | } 12 | } 13 | }, 14 | signJWT: payload => { 15 | return didJWT.createJWT(payload, { 16 | signer: didJWT.SimpleSigner('95838ece1ac686bde68823b21ce9f564bc536eebb9c3500fa6da81f17086a6be'), 17 | issuer: 'did:3:asdfasdf' 18 | }) 19 | } 20 | } 21 | 22 | // we need to have a fake 3id resolver since we have a fake 3id 23 | const getMock3idResolver = () => ({ 24 | '3': () => ({ 25 | '@context': 'https://w3id.org/did/v1', 26 | id: 'did:3:asdfasdf', 27 | publicKey: [{ 28 | id: 'did:3:asdfasdf#signingKey', 29 | type: 'Secp256k1VerificationKey2018', 30 | publicKeyHex: '044f5c08e2150b618264c4794d99a22238bf60f1133a7f563e74fcf55ddb16748159872687a613545c65567d2b7a4d4e3ac03763e1d9a5fcfe512a371faa48a781' 31 | }], 32 | authentication: [{ 33 | type: 'Secp256k1SignatureAuthentication2018', 34 | publicKey: 'did:3:asdfasdf#signingKey' 35 | }] 36 | }) 37 | }) 38 | 39 | module.exports = { 40 | mock3id, 41 | getMock3idResolver 42 | } 43 | -------------------------------------------------------------------------------- /src/messageBroker.js: -------------------------------------------------------------------------------- 1 | const Pubsub = require('orbit-db-pubsub') 2 | const redis = require('redis') 3 | 4 | const createMessage = (heads, id) => JSON.stringify({ from: id, heads }) 5 | const messageParse = (message) => JSON.parse(message) 6 | 7 | class MessageBroker extends Pubsub { 8 | constructor (ipfs, id, instanceId, redisOpts, onMessageCallback) { 9 | super(ipfs, id) 10 | this._topics = {} 11 | this.instanceId = instanceId 12 | this.onMessageCallback = onMessageCallback 13 | this.messageClientSub = redis.createClient(redisOpts) 14 | this.messageClientPub = redis.createClient(redisOpts) 15 | this.messageClientSub.on('message', this.messageHandler.bind(this)) 16 | } 17 | 18 | async subscribe (topic, onMessageCallback, onNewPeerCallback) { 19 | this.messageClientSub.subscribe(topic) 20 | return super.subscribe(topic, onMessageCallback, onNewPeerCallback) 21 | } 22 | 23 | async unsubscribe (topic) { 24 | this.messageClientSub.unsubscribe(topic) 25 | return super.unsubscribe(topic) 26 | } 27 | 28 | async messageHandler (topic, rawMessage) { 29 | const message = messageParse(rawMessage) 30 | if (message.from === this.instanceId) return 31 | this.onMessageCallback(topic, message.heads) 32 | return super.publish(topic, message.heads) 33 | } 34 | 35 | onMessageWrap (address, heads) { 36 | this.messageClientPub.publish(address, createMessage(heads, this.instanceId)) 37 | this.onMessageCallback(address, heads) 38 | } 39 | } 40 | 41 | module.exports = MessageBroker 42 | -------------------------------------------------------------------------------- /src/__tests__/healthcheckService.test.js: -------------------------------------------------------------------------------- 1 | const HealthcheckService = require('../healthcheckService') 2 | const request = require('supertest') 3 | const { cpuFree: cpuFreeMock, freememPercentage: freememPercentageMock } = require('os-utils') 4 | 5 | jest.mock('os-utils', () => { 6 | return { 7 | cpuFree: jest.fn(), 8 | freememPercentage: jest.fn() 9 | } 10 | }) 11 | 12 | describe('HealthcheckService', () => { 13 | const HEALTHCHECK_PORT = 8000 14 | const pinning = { ipfs: { isOnline: () => {} } } 15 | const healthcheckService = new HealthcheckService(pinning, HEALTHCHECK_PORT) 16 | let isOnlineMock 17 | 18 | beforeEach(() => { 19 | isOnlineMock = jest.spyOn(pinning.ipfs, 'isOnline').mockReturnValue(true) 20 | cpuFreeMock.mockImplementation(cb => cb(0.6)) // eslint-disable-line standard/no-callback-literal 21 | freememPercentageMock.mockReturnValue(0.85) 22 | }) 23 | 24 | afterEach(() => { 25 | jest.resetModules() 26 | isOnlineMock.mockRestore() 27 | }) 28 | 29 | it('should return a failure if IPFS isn\'t online', async (done) => { 30 | isOnlineMock.mockReturnValue(false) 31 | 32 | request(healthcheckService.app) 33 | .get('/healthcheck') 34 | .expect(503) 35 | .end(done) 36 | }) 37 | 38 | it('should return a failure on low cpu', async (done) => { 39 | cpuFreeMock.mockImplementation(cb => cb(0.01)) // eslint-disable-line standard/no-callback-literal 40 | 41 | request(healthcheckService.app) 42 | .get('/healthcheck') 43 | .expect(503) 44 | .end(done) 45 | }) 46 | 47 | it('should return a failure on low memory', async (done) => { 48 | freememPercentageMock.mockReturnValue(0.01) 49 | 50 | request(healthcheckService.app) 51 | .get('/healthcheck') 52 | .expect(503) 53 | .end(done) 54 | }) 55 | 56 | it('should return a success if memory, cpu and IPFS status are OK', async (done) => { 57 | request(healthcheckService.app) 58 | .get('/healthcheck') 59 | .expect(200) 60 | .end(done) 61 | }) 62 | }) 63 | -------------------------------------------------------------------------------- /src/s3.js: -------------------------------------------------------------------------------- 1 | const S3Store = require('datastore-s3') 2 | // const S3 = require('aws-sdk/clients/s3') 3 | const AWS = require('aws-sdk') 4 | 5 | const LevelStore = require('datastore-level') 6 | 7 | const S3_CACHE_ENTRY_TTL = process.env.S3_CACHE_ENTRY_TTL || 600000 // 60 seconds 8 | 9 | const https = require('https') 10 | 11 | const agent = new https.Agent({ 12 | maxSockets: 300, 13 | keepAlive: true 14 | }) 15 | 16 | AWS.config.update({ 17 | logger: console, 18 | httpOptions: { 19 | timeout: 45000, 20 | connectTimeout: 45000, 21 | agent: agent 22 | }, 23 | maxRetries: 10, 24 | retryDelayOptions: { 25 | base: 500 26 | } 27 | }) 28 | 29 | const S3 = AWS.S3 30 | const IPFSRepo = require('ipfs-repo') 31 | 32 | // Redundant with createRepo in datastore-s3, but needed to configure 33 | // additional S3 client parameters not otherwise exposed 34 | 35 | // A mock lock 36 | const notALock = { 37 | getLockfilePath: () => {}, 38 | lock: (_) => notALock.getCloser(), 39 | getCloser: (_) => ({ 40 | close: () => {} 41 | }), 42 | locked: (_) => false 43 | } 44 | 45 | const ipfsRepo = (config) => { 46 | const { 47 | path, 48 | bucket, 49 | accessKeyId, 50 | secretAccessKey, 51 | endpoint, 52 | s3ForcePathStyle, 53 | signatureVersion, 54 | shardBlockstore 55 | } = config 56 | const createIfMissing = true 57 | 58 | const storeConfig = { 59 | s3: new S3({ 60 | params: { 61 | Bucket: bucket 62 | }, 63 | accessKeyId, 64 | secretAccessKey, 65 | endpoint, 66 | s3ForcePathStyle, 67 | signatureVersion 68 | }), 69 | createIfMissing, 70 | cacheEnabled: true, 71 | cacheTTL: S3_CACHE_ENTRY_TTL 72 | } 73 | 74 | const blockStoreConfig = shardBlockstore ? Object.assign(storeConfig, { sharding: true }) : storeConfig 75 | 76 | return new IPFSRepo(path, { 77 | storageBackends: { 78 | blocks: S3Store, 79 | // datastore: S3Store, 80 | datastore: LevelStore, 81 | root: S3Store, 82 | keys: S3Store 83 | }, 84 | storageBackendOptions: { 85 | blocks: blockStoreConfig, 86 | // datastore: storeConfig, 87 | datastore: { 88 | db: require('level-mem') 89 | }, 90 | root: storeConfig, 91 | keys: storeConfig 92 | }, 93 | // lock: 'memory' 94 | lock: notALock 95 | }) 96 | } 97 | 98 | module.exports = { ipfsRepo } 99 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "3box-pinning-node", 3 | "version": "1.14.26", 4 | "description": "IPFS Node that runs OrbitDB under the hood", 5 | "main": "src/node.js", 6 | "scripts": { 7 | "lint": "./node_modules/.bin/standard --verbose src/**", 8 | "test": "rm -rf ./tmp ; mkdir -p ./tmp/test/one ; jest --forceExit --detectOpenHandles --coverage --runInBand", 9 | "start": "node ./src/node.js", 10 | "start:prod": "node --max-old-space-size=20480 ./src/node.js", 11 | "start:prod:profile": "node --inspect=0.0.0.0:9229 --max-old-space-size=20480 ./src/node.js", 12 | "start:dev": "./node_modules/.bin/nodemon ./src/node.js" 13 | }, 14 | "repository": { 15 | "type": "git", 16 | "url": "git+https://github.com/3box/3box-pinning-node.git" 17 | }, 18 | "keywords": [ 19 | "ipfs", 20 | "orbitdb" 21 | ], 22 | "author": "", 23 | "license": "MIT", 24 | "bugs": { 25 | "url": "https://github.com/3box/3box-pinning-node/issues" 26 | }, 27 | "homepage": "https://github.com/3box/3box-pinning-node#readme", 28 | "dependencies": { 29 | "3box-orbitdb-plugins": "^2.1.2", 30 | "3id-resolver": "^1.0.0", 31 | "analytics-node": "^3.3.0", 32 | "aws-sdk": "^2.702.0", 33 | "axios": "^0.19.0", 34 | "bunyan": "^1.8.12", 35 | "datastore-level": "^1.1.0", 36 | "datastore-s3": "github:3box/js-datastore-s3", 37 | "did-resolver": "^1.1.0", 38 | "dotenv": "^6.2.0", 39 | "exectimer": "^2.2.1", 40 | "express": "^4.16.4", 41 | "ipfs": "0.46.0", 42 | "ipfs-http-client": "^44.1.0", 43 | "ipfs-log": "^4.6.0", 44 | "ipfs-repo": "^3.0.2", 45 | "js-sha256": "^0.9.0", 46 | "level-mem": "^5.0.1", 47 | "multihashes": "^0.4.14", 48 | "muport-did-resolver": "^1.0.3", 49 | "orbit-db": "^0.24.2", 50 | "orbit-db-cache-redis": "^0.1.2", 51 | "os-utils": "0.0.14", 52 | "redis": "^2.8.0" 53 | }, 54 | "jest": { 55 | "testEnvironment": "jest-environment-uint8array" 56 | }, 57 | "bin": { 58 | "3box-pinning-node": "./src/node.js" 59 | }, 60 | "devDependencies": { 61 | "jest": "^23.6.0", 62 | "jest-environment-uint8array": "^1.0.0", 63 | "lodash.defaultsdeep": "^4.6.1", 64 | "nodemon": "^1.19.4", 65 | "redis-mock": "^0.47.0", 66 | "standard": "^14.1.0", 67 | "supertest": "^4.0.2", 68 | "tmp-promise": "^2.0.2" 69 | }, 70 | "standard": { 71 | "env": [ 72 | "jest" 73 | ] 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ⚠️ ⚠️ Deprecated in favor of Ceramic ⚠️ ⚠️ 2 | > 3box.js and related tools built by 3Box Labs are deprecated and no loger supported. Developers are encurraged to build with https://ceramic.network which is a more secure and decentralized protocol for sovereign data. 3 | 4 | 5 | # 3Box pinning node 6 | 7 | The pinning node is an ipfs and orbit-db node that persists the data of 3box users. 8 | 9 | ### Requirements 10 | 11 | - node v10 12 | - python v2 13 | 14 | ## Run as command line tool 15 | 16 | ```bash 17 | # Install via npm 18 | (sudo) npm install 3box-pinning-node --global 19 | # Install via yarn 20 | yarn global add 3box-pinning-node 21 | # Run node 22 | (sudo) 3box-pinning-node 23 | ``` 24 | 25 | ## Pubsub messages 26 | 27 | #### request to pin 28 | This message is sent from a 3box-js client when `openBox` is called. 29 | ```js 30 | { 31 | type: 'PIN_DB', 32 | odbAddress: 33 | } 34 | ``` 35 | 36 | #### length response 37 | This message is sent from the pinning node as a response to `PIN_DB`. 38 | ```js 39 | { 40 | type: 'HAS_ENTRIES', 41 | odbAddress: , 42 | numEntries: 43 | } 44 | ``` 45 | 46 | #### replicated response 47 | This message is sent from the pinning node when a db has been replicated. 48 | ```js 49 | { 50 | type: 'REPLICATED', 51 | odbAddress: , 52 | } 53 | ``` 54 | 55 | ## Configuration 56 | 57 | Configurations for both production and development environments can be found in both `.env.production` and `.env.development` respectively. The pinning service also runs a profile caching service. This can be disabled by running (i.e. you only require the pinning node) the following command instead. 58 | 59 | $ (sudo) npm run start -- --runCacheService=false 60 | 61 | The profile caching service also uses a Redis cache to cache requests. This is disabled by default in development. And can generally be disabled by not setting the env variable `REDIS_PATH`. 62 | 63 | To only handle a subset of pinning requests, you can use the DID and space whitelist options. These are configured with the `PIN_WHITELIST_DIDS` and `PIN_WHITELIST_SPACES` environment variables, which are comma-separated lists (no whitespace between items). For example: 64 | ``` 65 | PIN_WHITELIST_DIDS=did:3:bafyreie2i5l7fttwgzluctidfgbskyx47gjl2illqmbpp3vh4axacxpkqm,did:3:bafyreigwzej3toirnjur5ur3z3qwefnmrwonhlpok5dapfmgmc2i3sv2je 66 | PIN_WHITELIST_SPACES=abc,def 67 | ``` 68 | 69 | In addition, the PIN_SILENT environment variable can be set to `true` if the pinning node should not send responses to pin and sync requests (on private and 3rd party nodes, for example). 70 | 71 | ## Maintainers 72 | [@zachferland](https://github.com/zachferland) 73 | -------------------------------------------------------------------------------- /src/__tests__/didSupport.test.js: -------------------------------------------------------------------------------- 1 | const IPFS = require('ipfs') 2 | const OrbitDB = require('orbit-db') 3 | const { LegacyIPFS3BoxAccessController } = require('3box-orbitdb-plugins') 4 | const AccessControllers = require('orbit-db-access-controllers') 5 | AccessControllers.addAccessController({ AccessController: LegacyIPFS3BoxAccessController }) 6 | const Util = require('../util') 7 | const { Resolver } = require('did-resolver') 8 | const getMuportResolver = require('muport-did-resolver').getResolver 9 | 10 | const IPFS_PATH = './tmp/ipfs-did-1' 11 | const ODB_PATH = './tmp/orbitdb-did-1' 12 | 13 | const IPFS_CONF = { 14 | EXPERIMENTAL: { 15 | pubsub: true 16 | }, 17 | repo: IPFS_PATH, 18 | config: { 19 | Addresses: { 20 | Swarm: [ 21 | '/ip4/127.0.0.1/tcp/4016', 22 | '/ip4/127.0.0.1/tcp/4017/ws' 23 | ], 24 | API: '/ip4/127.0.0.1/tcp/5014', 25 | Gateway: '/ip4/127.0.0.1/tcp/9192' 26 | } 27 | } 28 | } 29 | 30 | const DID = 'did:muport:QmNQLKvMqGrDCrzmFS2C5p2JaRZ7bk6DqY7RJinhJoVxVT' 31 | const COMPRESSED_KEY = '02d1f48e3d5c52954a01f1aa104bad1a22e2eed6ecbd4961737fbffa8d75457cd4' 32 | const UNCOMPRESSED_KEY = '04d1f48e3d5c52954a01f1aa104bad1a22e2eed6ecbd4961737fbffa8d75457cd4ab9b98ef29b96c6a1bbc54c2b9ded4ea6e803c50201c38c017b7b34c7a2451e8' 33 | const MANIFEST = '{"version":1,"signingKey":"02d1f48e3d5c52954a01f1aa104bad1a22e2eed6ecbd4961737fbffa8d75457cd4","managementKey":"0x3334d0c1fd88529a1285a5f3c9cd71b382684073","asymEncryptionKey":"/MklZEmpCWWbUL/n5qnzLfEo6K0rtrtOrp60qNzrgVU="}' 34 | const ROOT_STORE_ADDR = '/orbitdb/QmYoTE9PGvofB6EDFsJtxbGRjNCKRd8MJj9dyfguunTRfz/12209fc6c6005af752c4297a187a55dfd5bb55e1f07e4b5022915dc803f3a6ae699c.root' 35 | 36 | describe('basic low level functions are working', () => { 37 | test('uncompress produces the correct key', () => { 38 | expect(Util.uncompressSECP256K1Key(COMPRESSED_KEY)).toEqual(UNCOMPRESSED_KEY) 39 | }) 40 | 41 | test('did extract signing key', async () => { 42 | const ipfs = { cat: jest.fn(() => Promise.resolve(MANIFEST)) } 43 | const resolver = new Resolver(getMuportResolver(ipfs)) 44 | const k = await Util.didExtractSigningKey(DID, { resolver }) 45 | expect(k).toEqual('02d1f48e3d5c52954a01f1aa104bad1a22e2eed6ecbd4961737fbffa8d75457cd4') 46 | }) 47 | }) 48 | 49 | describe('test with network', () => { 50 | jest.setTimeout(30000) 51 | let ipfs 52 | let orbitdb 53 | let did 54 | 55 | beforeAll(async () => { 56 | ipfs = await IPFS.create(IPFS_CONF) 57 | const cid = (await (await ipfs.add(Buffer.from(MANIFEST))).next()).value.cid 58 | did = 'did:muport:' + cid.toString() 59 | orbitdb = new OrbitDB(ipfs, ODB_PATH) 60 | }) 61 | 62 | afterAll(async () => { 63 | await orbitdb.close() 64 | return ipfs.close() 65 | }) 66 | 67 | it('can retrieve a root store', async () => { 68 | const addr = await Util.didToRootStoreAddress(did, { orbitdb }) 69 | expect(addr).toEqual(ROOT_STORE_ADDR) 70 | }) 71 | }) 72 | -------------------------------------------------------------------------------- /src/analytics.js: -------------------------------------------------------------------------------- 1 | const SegmentAnalytics = require('analytics-node') 2 | const sha256 = require('js-sha256').sha256 3 | 4 | const hash = str => str === null ? null : Buffer.from(sha256.digest(str)).toString('hex') 5 | 6 | class Analytics { 7 | constructor (client) { 8 | this.client = client 9 | } 10 | 11 | _track (data = {}, id) { 12 | if (this.client) { 13 | data.anonymousId = id || '3box' 14 | data.properties.time = Date.now() 15 | return this.client.track(data) 16 | } else { 17 | return false 18 | } 19 | } 20 | 21 | // trackOpenDB (address, duration) { 22 | // let data = {} 23 | // data.event = 'open_db' 24 | // data.properties = { address: address, duration: duration } 25 | // this._track(data) 26 | // } 27 | 28 | trackPinDB (did, newAccount) { 29 | const data = {} 30 | data.event = 'pin_db' 31 | data.properties = { new_account: newAccount } 32 | this._track(data, hash(did)) 33 | } 34 | 35 | // backwards compatible, pindb for dbs with address links not in rootstore 36 | trackPinDBAddress (address) { 37 | // Temporary 38 | // const data = {} 39 | // data.event = 'pin_db_address' 40 | // data.properties = { address_hash: hash(address) } 41 | // this._track(data, hash(address)) 42 | } 43 | 44 | trackSyncDB (odbAddress) { 45 | // Temporary 46 | // const data = {} 47 | // data.event = 'sync_db' 48 | // data.properties = { address: odbAddress } 49 | // this._track(data) 50 | } 51 | 52 | trackInfraMetrics () { 53 | const data = {} 54 | data.event = 'infra_metrics' 55 | data.properties = { 56 | resident_memory_usage: process.memoryUsage().rss / 1024 / 1024, 57 | heap_total_memory: process.memoryUsage().heapTotal / 1024 / 1024, 58 | heap_used_memory: process.memoryUsage().heapUsed / 1024 / 1024 59 | } 60 | this._track(data) 61 | } 62 | 63 | trackSpaceUpdate (address, spaceName, did) { 64 | const data = {} 65 | data.event = 'space_update' 66 | data.properties = { address, space: spaceName } 67 | this._track(data, hash(did)) 68 | this.trackSpaceUpdateByApp(address, spaceName) // Temporary, to get uniques on spaceNames 69 | } 70 | 71 | trackSpaceUpdateByApp (address, spaceName) { 72 | const data = {} 73 | data.event = 'space_update_app' 74 | data.properties = { address, space: spaceName } 75 | this._track(data, spaceName) 76 | } 77 | 78 | trackPublicUpdate (address, did) { 79 | const data = {} 80 | data.event = 'public_update' 81 | data.properties = { address } 82 | this._track(data, hash(did)) 83 | } 84 | 85 | trackPrivateUpdate (address, did) { 86 | const data = {} 87 | data.event = 'private_update' 88 | data.properties = { address } 89 | this._track(data, hash(did)) 90 | } 91 | 92 | // TODO differentiate types of updates 93 | trackRootUpdate (did) { 94 | const data = {} 95 | data.event = 'root_update' 96 | data.properties = { } 97 | this._track(data, hash(did)) 98 | } 99 | 100 | trackThreadUpdate (address, space, name) { 101 | const data = {} 102 | data.event = 'thread_update' 103 | data.properties = { address, space, name } 104 | this._track(data) 105 | } 106 | } 107 | 108 | module.exports = (writeKey, active = true) => { 109 | const client = writeKey && active ? new SegmentAnalytics(writeKey) : null 110 | return new Analytics(client) 111 | } 112 | -------------------------------------------------------------------------------- /src/util.js: -------------------------------------------------------------------------------- 1 | const elliptic = require('elliptic') 2 | const Multihash = require('multihashes') 3 | const sha256 = require('js-sha256').sha256 4 | const { Resolver } = require('did-resolver') 5 | const get3IdResolver = require('3id-resolver').getResolver 6 | const getMuportResolver = require('muport-did-resolver').getResolver 7 | const EC = elliptic.ec 8 | const ec = new EC('secp256k1') 9 | 10 | class Util { 11 | /** 12 | * Compute a multi-hash that is used in the did to root store process (fingerprinting) 13 | */ 14 | static sha256Multihash (str) { 15 | const digest = Buffer.from(sha256.digest(str)) 16 | return Multihash.encode(digest, 'sha2-256').toString('hex') 17 | } 18 | 19 | static uncompressSECP256K1Key (key) { 20 | const ec = new elliptic.ec('secp256k1') // eslint-disable-line new-cap 21 | return ec.keyFromPublic(key, 'hex').getPublic(false, 'hex') 22 | } 23 | 24 | static async didExtractSigningKey (did, { doc, resolver } = {}) { 25 | doc = doc || await resolver.resolve(did) 26 | const signingKey = doc.publicKey.find(key => key.id.includes('#signingKey')).publicKeyHex 27 | return signingKey 28 | } 29 | 30 | static createMuportDocument (signingKey, managementKey, asymEncryptionKey) { 31 | return { 32 | version: 1, 33 | signingKey, 34 | managementKey, 35 | asymEncryptionKey 36 | } 37 | } 38 | 39 | static async threeIDToMuport (did, { ipfs, doc }) { 40 | const resolver = new Resolver({ 41 | ...get3IdResolver(ipfs), 42 | ...getMuportResolver(ipfs) 43 | }) 44 | doc = doc || await resolver.resolve(did) 45 | let signingKey = doc.publicKey.find(key => key.id.includes('#signingKey')).publicKeyHex 46 | signingKey = ec.keyFromPublic(Buffer.from(signingKey, 'hex')).getPublic(true, 'hex') 47 | const managementKey = doc.publicKey.find(key => key.id.includes('#managementKey')).ethereumAddress 48 | const encryptionKey = doc.publicKey.find(key => key.id.includes('#encryptionKey')).publicKeyBase64 49 | const muportdoc = Util.createMuportDocument(signingKey, managementKey, encryptionKey) 50 | const docHash = (await ipfs.add(Buffer.from(JSON.stringify(muportdoc))))[0].hash 51 | return 'did:muport:' + docHash 52 | } 53 | 54 | static async didToRootStoreAddress (did, { orbitdb, ipfs }) { 55 | ipfs = ipfs || orbitdb._ipfs 56 | const is3ID = did.includes(':3:') 57 | const resolver = new Resolver({ 58 | ...get3IdResolver(ipfs), 59 | ...getMuportResolver(ipfs) 60 | }) 61 | const doc = await resolver.resolve(did) 62 | let signingKey = await Util.didExtractSigningKey(did, { doc }) 63 | // 3id signingKey already uncompressed in doc 64 | signingKey = is3ID ? signingKey : Util.uncompressSECP256K1Key(signingKey) 65 | // muport did require for address derivation 66 | did = is3ID ? await Util.threeIDToMuport(did, { ipfs, doc }) : did 67 | const fingerprint = Util.sha256Multihash(did) 68 | const rootStore = `${fingerprint}.root` 69 | 70 | const opts = { 71 | format: 'dag-pb', 72 | accessController: { 73 | write: [signingKey], 74 | type: 'legacy-ipfs-3box', 75 | skipManifest: true, 76 | resolver 77 | } 78 | } 79 | const addr = await orbitdb.determineAddress(rootStore, 'feed', opts) 80 | 81 | return addr.toString() 82 | } 83 | 84 | static randInt (max) { 85 | return Math.floor(Math.random() * max) 86 | } 87 | 88 | static isBooleanStringSet (boolString) { 89 | return typeof boolString === 'string' && !['0', 'f', 'false', 'no', 'off'].includes(boolString.toLowerCase()) 90 | } 91 | } 92 | 93 | module.exports = Util 94 | -------------------------------------------------------------------------------- /src/node.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const path = require('path') 4 | const IPFS = require('ipfs') 5 | const ipfsClient = require('ipfs-http-client') 6 | const Pinning = require('./pinning') 7 | const { ipfsRepo } = require('./s3') 8 | const analytics = require('./analytics') 9 | const { randInt, isBooleanStringSet } = require('./util') 10 | const HealthcheckService = require('./healthcheckService') 11 | 12 | const env = process.env.NODE_ENV || 'development' 13 | require('dotenv').config({ path: path.resolve(process.cwd(), `.env.${env}`) }) 14 | 15 | const ORBITDB_PATH = process.env.ORBITDB_PATH 16 | // const IPFS_PATH = process.env.IPFS_PATH 17 | const IPFS_PATH = 'ipfs' 18 | const SEGMENT_WRITE_KEY = process.env.SEGMENT_WRITE_KEY 19 | const ANALYTICS_ACTIVE = process.env.ANALYTICS_ACTIVE === 'true' 20 | const ORBIT_REDIS_PATH = process.env.ORBIT_REDIS_PATH 21 | const ENTRIES_NUM_REDIS_PATH = process.env.ENTRIES_NUM_REDIS_PATH 22 | const PUBSUB_REDIS_PATH = process.env.PUBSUB_REDIS_PATH 23 | const PINNING_ROOM = process.env.PINNING_ROOM || '3box-pinning' 24 | const HEALTHCHECK_PORT = process.env.HEALTHCHECK_PORT || 8081 25 | 26 | const AWS_BUCKET_NAME = process.env.AWS_BUCKET_NAME 27 | const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID 28 | const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY 29 | const AWS_S3_ENDPOINT = process.env.AWS_S3_ENDPOINT 30 | const AWS_S3_ADDRESSING_STYLE = process.env.AWS_S3_ADDRESSING_STYLE 31 | const AWS_S3_SIGNATURE_VERSION = process.env.AWS_S3_SIGNATURE_VERSION 32 | const SHARD_BLOCKSTORE = process.env.SHARD_BLOCKSTORE === 'true' 33 | 34 | const PIN_SILENT = isBooleanStringSet(process.env.PIN_SILENT) 35 | const PIN_WHITELIST_DIDS = process.env.PIN_WHITELIST_DIDS ? process.env.PIN_WHITELIST_DIDS.split(',') : null 36 | const PIN_WHITELIST_SPACES = process.env.PIN_WHITELIST_SPACES ? process.env.PIN_WHITELIST_SPACES.split(',') : null 37 | 38 | const INSTANCE_ID = randInt(10000000000).toString() 39 | 40 | const analyticsClient = analytics(SEGMENT_WRITE_KEY, ANALYTICS_ACTIVE) 41 | const orbitCacheRedisOpts = ORBIT_REDIS_PATH ? { host: ORBIT_REDIS_PATH } : null 42 | const entriesNumRedisOpts = ENTRIES_NUM_REDIS_PATH ? { host: ENTRIES_NUM_REDIS_PATH } : null 43 | const pubSubConfig = PUBSUB_REDIS_PATH && INSTANCE_ID ? { redis: { host: PUBSUB_REDIS_PATH }, instanceId: INSTANCE_ID } : null 44 | 45 | function prepareIPFSConfig () { 46 | let repo 47 | if (AWS_BUCKET_NAME) { 48 | if (!IPFS_PATH) { 49 | throw new Error('Invalid IPFS + s3 configuration') 50 | } 51 | 52 | repo = ipfsRepo({ 53 | path: IPFS_PATH, 54 | bucket: AWS_BUCKET_NAME, 55 | accessKeyId: AWS_ACCESS_KEY_ID, 56 | secretAccessKey: AWS_SECRET_ACCESS_KEY, 57 | endpoint: AWS_S3_ENDPOINT, 58 | s3ForcePathStyle: AWS_S3_ADDRESSING_STYLE === 'path', 59 | signatureVersion: AWS_S3_SIGNATURE_VERSION, 60 | shardBlockstore: SHARD_BLOCKSTORE 61 | }) 62 | } else if (IPFS_PATH) { 63 | repo = IPFS_PATH 64 | } 65 | 66 | let swarmAddresses = [ 67 | '/ip4/0.0.0.0/tcp/4002', 68 | '/ip4/127.0.0.1/tcp/4003/ws' 69 | ] 70 | if (process.env.RENDEZVOUS_ADDRESS) { 71 | swarmAddresses = [...swarmAddresses, process.env.RENDEZVOUS_ADDRESS] 72 | } 73 | 74 | const ipfsOpts = { 75 | repo, 76 | preload: { enabled: false }, 77 | libp2p: { modules: { dht: null } }, 78 | config: { 79 | Bootstrap: [], 80 | Addresses: { 81 | Swarm: swarmAddresses 82 | }, 83 | Swarm: { 84 | ConnMgr: { 85 | LowWater: 700, 86 | HighWater: 1500 87 | } 88 | } 89 | } 90 | } 91 | 92 | return ipfsOpts 93 | } 94 | 95 | async function retryBackoff (fn, maxBackoffTime = 60000) { 96 | async function _retryBackoff (fn, maxBackoffTime, jitter, wait) { 97 | if (wait > maxBackoffTime) return Promise.reject(new Error('Max backoff time exceeded')) 98 | try { 99 | return await fn() 100 | } catch (e) { 101 | console.warn(`call failed, retrying in ${wait} ms`) 102 | await new Promise(resolve => setTimeout(resolve, wait + Math.random() * jitter)) 103 | return _retryBackoff(fn, maxBackoffTime, jitter, wait * 2) 104 | } 105 | } 106 | return _retryBackoff(fn, maxBackoffTime, 100, 1000) 107 | } 108 | 109 | async function start () { 110 | let ipfs 111 | if (process.env.IPFS_API_URL) { 112 | ipfs = ipfsClient(process.env.IPFS_API_URL) 113 | await retryBackoff(ipfs.id) 114 | } else { 115 | const ipfsConfig = prepareIPFSConfig() 116 | ipfs = await IPFS.create(ipfsConfig) 117 | } 118 | 119 | const pinning = new Pinning(ipfs, ORBITDB_PATH, analyticsClient, orbitCacheRedisOpts, pubSubConfig, PINNING_ROOM, entriesNumRedisOpts, PIN_WHITELIST_DIDS, PIN_WHITELIST_SPACES, PIN_SILENT) 120 | await pinning.start() 121 | const healthcheckService = new HealthcheckService(pinning, HEALTHCHECK_PORT) 122 | healthcheckService.start() 123 | } 124 | 125 | start() 126 | -------------------------------------------------------------------------------- /RELEASE-NOTES.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | ### v1.14.26 - 2020-07-17 4 | * chore: healthcheck mem/cpu limit config, default low 5 | * feat: healthcheck enable 6 | 7 | ### v1.14.25 - 2020-07-08 8 | * feat: s3 cache ttl opt, 60 secs 9 | 10 | ### v1.14.24 - 2020-07-07 11 | * feat: cache s3 reqs, up datastore s3 12 | 13 | ### v1.14.12 - 2020-06-25 14 | * chore: node ver, mem watch, healthcheck 15 | 16 | ### v1.14.9 - 2020-06-23 17 | * chore: logs ipfs 18 | * fix: libp2p streams management 19 | 20 | ### v1.14.7 - 2020-06-22 21 | * fix: downgrade aws sdk 22 | 23 | ### v1.14.6 - 2020-06-19 24 | * feat: remove temp has entries cache fix 25 | 26 | ### v1.14.5 - 2020-06-19 27 | * chore: up ipfs 0.46.0 28 | 29 | ### v1.14.4 - 2020-06-18 30 | * chore: up aws, increase mem prod, temp logs 31 | 32 | ## v1.14.3 - 2020-06-17 33 | * build: increase node mem prod docker image 34 | 35 | ## v1.14.2 - 2020-06-03 36 | * core: update to `ipfs@0.44` 37 | 38 | ## v1.14.1 - 2020-05-25 39 | * chore: increase max number of swarm connections from 500 to 1500 40 | 41 | ## v1.14.0 - 2020-05-01 42 | 43 | * chore: upgrade did-resolver and did-jwt libraries 44 | * feat: add smoke tests to CI 45 | 46 | ## v1.13.0 - 2020-03-12 47 | * feat: add CODE_VERSION to dockerfile, circle ci image build step, and logger 48 | * feat: add bunyan logger 49 | * refactor: move ipfs init into main module, use ipfs-http-client instead of ipfs if IPFS_API_URL env var is set 50 | 51 | ## v1.12.0 - 2019-01-28 52 | * feat: upgrade `orbit-db` to v0.23.1 53 | * feat: upgrade `ipfs` to v0.40.0 54 | * feat: add parameter for only pinning content for specified root DIDs 55 | * feat: add parameter for only pinning content for specified space names 56 | * feat: add parameter for silently pinning content 57 | 58 | ## v1.11.0 - 2019-01-15 59 | Includes performance improvements that will allow syncing to start sooner on client 60 | and for onSync done to resolve more quickly. 61 | 62 | * feat: cache has entries and send on message 63 | * feat: send heads on connect, with orbit fix, remove wait 64 | 65 | ## v1.10.0 - 2019-01-08 66 | * feat: allow additional S3 client options to be configured for endpoint, addressing style and signature version 67 | * feat: allow pinning room channel to be configured 68 | 69 | ## v1.9.1 - 2019-12-18 70 | * fix: cache key write for db names with slashes, ie threads ac db 71 | 72 | ## v1.9.0 - 2019-12-12 73 | * feat: up orbit-db v0.22.1, cache change, performance up 74 | 75 | ## v1.8.0 - 2019-12-10 76 | * feat: remove profile API functionality 77 | 78 | ## v1.7.2 - 2019-12-02 79 | * fix: pin auth-data objects from rootstore 80 | 81 | ## v1.7.1 - 2019-10-10 82 | * feat: dedicated healthcheck endpoint 83 | 84 | ## v1.7.0 - 2019-09-20 85 | * feat: multi node support with internal message layer 86 | 87 | ## v1.6.4 - 2019-09-18 88 | * fix: wait to consumer/exchange heads until store ready (local orbitdb fix) 89 | 90 | ## v1.6.3 - 2019-09-10 91 | * fix: return author on getThread 92 | 93 | ## v1.6.2 - 2019-08-22 94 | * fix: correctly calculate HAS_ENTRIES response 95 | 96 | ## v1.6.1 - 2019-08-06 97 | * feat: origin api metrics, did hash metrics, unique spaces, metric properties 98 | 99 | ## v1.6.0 - 2019-07-12 100 | * feat: pin address-link entries from root-store 101 | * feat: getConfig now returns address-links 102 | 103 | ## v1.5.1 - 2019-06-19 104 | * feat: getProfile with 3ID 105 | * fix: getThread don't hang, return error when missing or invalid args 106 | 107 | ## v1.5.0 - 2019-06-11 108 | * feat: update orbitdb and ipfs 109 | * feat: add 3ID 110 | * feat: add support for moderator threads, and thread api changes 111 | * feat: add getConfig api endpoint 112 | 113 | ## v1.4.2 - 2019-05-06 114 | * Fix: Use correct timestamp format. 115 | 116 | ## v1.4.1 - 2019-04-27 117 | * Fix/Performance: Cache liveness and fix, rewrite cache on change instead of invalidate 118 | 119 | ## v1.4.0 - 2019-04-25 120 | * Feature: optional S3 IPFS Repo 121 | * Feature: optional Redis OribtDB cache 122 | * Feature: optional API only optimization 123 | * Fix: openBox concurrency issues 124 | * Build: dockerfile 125 | * Build: container based CI/CD pipeline 126 | 127 | ## v1.3.0 - 2019-04-12 128 | * Feature: Add the ability to query metadata in the profile and space APIs 129 | * Fix: Automatically pin DIDs that are sent along when opening a DB 130 | * Fix: Don't break when opening empty threads 131 | * Fix: Make getProfile API more resilient to errors 132 | 133 | ## v1.2.0 - 2019-03-28 134 | * Feature: Pinning for threads 135 | * Feature: Query by DID in getProfile and getSpace 136 | 137 | ## v1.1.1 - 2019-03-15 138 | * More granular invalidation of cache 139 | * Fix bug where space names where normalized when they shouldn't be 140 | 141 | 142 | ## v1.1.0 - 2019-03-12 143 | * add support for `space` and `list-space` REST endpoints 144 | * fix bug with malformed `PIN_DB` messages 145 | 146 | ## v1.0.1 - 2019-01-29 147 | * CI/CD on develop/master 148 | * Memory measurement for analytics 149 | * Automatic restart when a memory threshold has been reached 150 | * Fix: openDB responds only per db 151 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | aws-ecr: circleci/aws-ecr@6.8.1 5 | aws-cli: circleci/aws-cli@1.0.0 6 | slack: circleci/slack@3.4.2 7 | 8 | jobs: 9 | test: 10 | working_directory: ~/3box-pinning-server 11 | docker: 12 | - image: circleci/node:10 13 | steps: 14 | - checkout 15 | 16 | # Download and cache dependencies 17 | - restore_cache: 18 | keys: 19 | - dependencies-cache-{{ checksum "package.json" }} 20 | 21 | - run: 22 | name: install dependencies 23 | command: | 24 | sudo npm i -g codecov node-gyp 25 | npm i 26 | 27 | - run: 28 | name: test 29 | command: npm test && codecov 30 | 31 | - run: 32 | name: lint 33 | command: npm run lint 34 | 35 | - run: 36 | name: code-coverage 37 | command: bash <(curl -s https://codecov.io/bash) 38 | 39 | - save_cache: 40 | key: dependency-cache-{{ checksum "package.json" }} 41 | paths: 42 | - ./node_modules 43 | 44 | deploy-dev: 45 | working_directory: ~/3box-pinning-server 46 | docker: 47 | - image: circleci/python:3.7.1 48 | environment: 49 | AWS_PAGER: '' 50 | steps: 51 | - aws-cli/install 52 | - aws-cli/setup 53 | - run: 54 | name: Deploy Pinning Service 55 | command: aws ecs update-service --force-new-deployment --cluster pinning-node-cluster-dev --service pinning-node-service-dev 56 | - run: 57 | name: Deploy API Service 58 | command: aws ecs update-service --force-new-deployment --cluster pinning-node-cluster-dev --service profile-api-service-dev 59 | 60 | run-smoke-test: 61 | executor: aws-cli/default 62 | environment: 63 | AWS_PAGER: '' 64 | steps: 65 | - aws-cli/setup 66 | - run: 67 | name: Run task and wait for results 68 | command: | 69 | set -eo pipefail 70 | STARTED_BY=ci.${CIRCLE_PR_REPONAME:0:28}.$CIRCLE_BUILD_NUM 71 | TASK_DEFINITION=smoke-tests-dev 72 | CLUSTER=pinning-node-cluster-dev 73 | SUBNET_IDs=subnet-0c52903d646a95ac4 74 | SECURITY_GROUP_IDs=sg-0be84020206f86893,sg-0ce79fc4e14c351f7 75 | TASK_ARN=$(aws ecs run-task \ 76 | --task-definition $TASK_DEFINITION \ 77 | --cluster $CLUSTER \ 78 | --launch-type FARGATE \ 79 | --network-configuration awsvpcConfiguration="{subnets=[$SUBNET_IDs],securityGroups=[$SECURITY_GROUP_IDs]}" \ 80 | --started-by $STARTED_BY \ 81 | --query 'tasks[].taskArn' \ 82 | --output text) 83 | echo "Task ARN: $TASK_ARN" 84 | aws ecs wait tasks-stopped --cluster $CLUSTER --tasks $TASK_ARN 85 | EXIT_STATUS=$(aws ecs describe-tasks --cluster $CLUSTER --tasks $TASK_ARN --query 'tasks[0].containers[0].exitCode' --output text) 86 | if [ "$EXIT_STATUS" = "0" ]; then 87 | echo "Smoke tests succeeded" 88 | else 89 | echo "Smoke tests failed, see AWS logs for more info" 90 | exit 1 91 | fi 92 | - slack/status: 93 | failure_message: Smoke tests failed for DEV environment 94 | fail_only: true 95 | webhook: $DISCORD_WEBHOOK_URL 96 | 97 | # Need seperate clusters if we have dev and prod, depends how we want to treat dev 98 | deploy-prod: 99 | working_directory: ~/3box-pinning-server 100 | docker: 101 | - image: circleci/python:3.7.1 102 | environment: 103 | AWS_PAGER: '' 104 | steps: 105 | - aws-cli/install 106 | - aws-cli/setup 107 | - run: 108 | name: Deploy Pinning Service 109 | command: aws ecs update-service --force-new-deployment --cluster pinning-node-cluster-prod --service pinning-node-service-prod 110 | - run: 111 | name: Deploy API Service 112 | command: aws ecs update-service --force-new-deployment --cluster pinning-node-cluster-prod --service profile-api-service-prod 113 | 114 | workflows: 115 | build-deploy: 116 | jobs: 117 | - test 118 | - aws-ecr/build-and-push-image: 119 | name: push-dev-image 120 | repo: 3box-pinning-server 121 | tag: develop 122 | requires: 123 | - test 124 | extra-build-args: --build-arg CODE_VERSION=${CIRCLE_SHA1:0:7} 125 | filters: 126 | branches: 127 | only: develop 128 | - aws-ecr/build-and-push-image: 129 | name: push-prod-image 130 | repo: 3box-pinning-server 131 | tag: latest 132 | requires: 133 | - test 134 | extra-build-args: --build-arg CODE_VERSION=${CIRCLE_SHA1:0:7} 135 | filters: 136 | branches: 137 | only: master 138 | - deploy-dev: 139 | requires: 140 | - push-dev-image 141 | filters: 142 | branches: 143 | only: develop 144 | - run-smoke-test: 145 | requires: 146 | - deploy-dev 147 | filters: 148 | branches: 149 | only: develop 150 | - deploy-prod: 151 | requires: 152 | - push-prod-image 153 | filters: 154 | branches: 155 | only: master 156 | -------------------------------------------------------------------------------- /src/__tests__/testClient.js: -------------------------------------------------------------------------------- 1 | const defaultsDeep = require('lodash.defaultsdeep') 2 | const tmp = require('tmp-promise') 3 | tmp.setGracefulCleanup() 4 | 5 | const IPFS = require('ipfs') 6 | const OrbitDB = require('orbit-db') 7 | const Pubsub = require('orbit-db-pubsub') 8 | const { Resolver } = require('did-resolver') 9 | const get3IdResolver = require('3id-resolver').getResolver 10 | const getMuportResolver = require('muport-did-resolver').getResolver 11 | const { 12 | OdbIdentityProvider, 13 | LegacyIPFS3BoxAccessController, 14 | ThreadAccessController, 15 | ModeratorAccessController 16 | } = require('3box-orbitdb-plugins') 17 | const Identities = require('orbit-db-identity-provider') 18 | Identities.addIdentityProvider(OdbIdentityProvider) 19 | const AccessControllers = require('orbit-db-access-controllers') 20 | AccessControllers.addAccessController({ AccessController: LegacyIPFS3BoxAccessController }) 21 | AccessControllers.addAccessController({ AccessController: ThreadAccessController }) 22 | AccessControllers.addAccessController({ AccessController: ModeratorAccessController }) 23 | 24 | const { mock3id } = require('./mock3id') 25 | 26 | class TestClient { 27 | constructor (ipfsOpts, pinningRoom) { 28 | const defaultIpfsOpts = { 29 | config: { 30 | Bootstrap: [], 31 | Addresses: { 32 | Swarm: [ 33 | '/ip4/127.0.0.1/tcp/4006', 34 | '/ip4/127.0.0.1/tcp/4007/ws' 35 | ] 36 | } 37 | } 38 | } 39 | this._ipfsConfig = defaultsDeep({}, ipfsOpts, defaultIpfsOpts) 40 | this._pinningRoom = pinningRoom 41 | } 42 | 43 | async init () { 44 | this._tmpDir = await tmp.dir({ unsafeCleanup: true }) 45 | if (!this._ipfsConfig.repo) { 46 | this._ipfsConfig.repo = this._tmpDir.path + '/ipfs' 47 | } 48 | this.ipfs = await IPFS.create(this._ipfsConfig) 49 | this.identity = await Identities.createIdentity({ 50 | type: '3ID', 51 | threeId: mock3id, 52 | identityKeysPath: this._tmpDir.path + '/odbIdentityKeys' 53 | }) 54 | const ipfsId = await this.ipfs.id() 55 | this.orbitdb = await OrbitDB.createInstance(this.ipfs, { 56 | directory: this._tmpDir.path + '/orbitdb', 57 | identity: this.identity 58 | }) 59 | this.pubsub = new Pubsub(this.ipfs, ipfsId.id) 60 | } 61 | 62 | async stop () { 63 | await this.pubsub.disconnect() 64 | await this.orbitdb.stop() 65 | await this.ipfs.stop() 66 | } 67 | 68 | async cleanup () { 69 | await this.stop() 70 | await this._tmpDir.cleanup() 71 | } 72 | 73 | onMsg () { } 74 | 75 | async createDB (withData) { 76 | const key = mock3id.getKeyringBySpaceName().getPublicKeys(true).signingKey 77 | const threeIdResolver = get3IdResolver(this.ipfs) 78 | const muportResolver = getMuportResolver(this.ipfs) 79 | const resolver = new Resolver({...threeIdResolver, ...muportResolver}) 80 | OdbIdentityProvider.setDidResolver(resolver) 81 | const opts = { 82 | format: 'dag-pb', 83 | accessController: { 84 | write: [key], 85 | type: 'legacy-ipfs-3box', 86 | skipManifest: true, 87 | resolver, 88 | } 89 | } 90 | this.rootStore = await this.orbitdb.feed('rs.root', opts) 91 | this.pubStore = await this.orbitdb.keyvalue('test.public', opts) 92 | this.privStore = await this.orbitdb.keyvalue('test.private', opts) 93 | await this.rootStore.add({ odbAddress: this.pubStore.address.toString() }) 94 | await this.rootStore.add({ odbAddress: this.privStore.address.toString() }) 95 | if (withData) { 96 | for (const key in withData.public) { 97 | await this.pubStore.put(key, withData.public[key]) 98 | } 99 | for (const key in withData.private) { 100 | await this.privStore.put(key, withData.private[key]) 101 | } 102 | } 103 | } 104 | 105 | announceDB () { 106 | const rootStoreAddress = this.rootStore.address.toString() 107 | this.pubsub.subscribe(this._pinningRoom, (...args) => this.onMsg.apply(this, args), () => { 108 | this.pubsub.publish(this._pinningRoom, { type: 'PIN_DB', odbAddress: rootStoreAddress }) 109 | }) 110 | } 111 | 112 | async storeSynced ({ thread = false } = {}) { 113 | const syncStore = async store => { 114 | return new Promise((resolve, reject) => { 115 | store.events.on('replicate.progress', 116 | (odbAddress, entryHash, entry, num, max) => { 117 | if (num === max) { 118 | store.events.on('replicated', () => { 119 | resolve() 120 | }) 121 | } 122 | } 123 | ) 124 | }) 125 | } 126 | if (thread) { 127 | await syncStore(this.thread) 128 | } else { 129 | await Promise.all([ 130 | syncStore(this.pubStore), 131 | syncStore(this.privStore) 132 | ]) 133 | } 134 | } 135 | 136 | async createThread (withData) { 137 | const tName = '3box.thread.myspace.coolthread' 138 | this.thread = await this.orbitdb.feed(tName, { 139 | identity: this.identity, 140 | accessController: { 141 | type: 'thread-access', 142 | threadName: tName, 143 | members: false, 144 | firstModerator: mock3id.DID, 145 | identity: this.identity 146 | } 147 | }) 148 | if (withData) { 149 | for (const entry of withData) { 150 | await this.thread.add(entry) 151 | } 152 | } 153 | } 154 | 155 | async announceThread () { 156 | const address = this.thread.address.toString() 157 | await this.pubsub.publish(this._pinningRoom, { type: 'SYNC_DB', odbAddress: address, thread: true }) 158 | } 159 | 160 | async getThreadPosts () { 161 | return this.thread 162 | .iterator({ limit: -1 }) 163 | .collect().map(entry => { 164 | const post = Object.assign({ postId: entry.hash }, entry.payload.value) 165 | return post 166 | }) 167 | } 168 | 169 | async getProfile () { 170 | const profile = this.pubStore.all 171 | const parsedProfile = {} 172 | Object.keys(profile).map(key => { parsedProfile[key] = profile[key].value }) 173 | return parsedProfile 174 | } 175 | 176 | async getPrivate () { 177 | const priv = this.privStore.all 178 | const parsedProfile = {} 179 | Object.keys(priv).map(key => { parsedProfile[key] = priv[key].value }) 180 | return parsedProfile 181 | } 182 | } 183 | 184 | module.exports = TestClient 185 | -------------------------------------------------------------------------------- /src/__tests__/pinning.test.js: -------------------------------------------------------------------------------- 1 | jest.mock('3id-resolver', () => { 2 | const { getMock3idResolver } = require('./mock3id') 3 | return { getResolver: getMock3idResolver } 4 | }) 5 | 6 | const Pinning = require('../pinning') 7 | 8 | const EventEmitter = require('events') 9 | 10 | const IPFS = require('ipfs') 11 | const defaultsDeep = require('lodash.defaultsdeep') 12 | const tmp = require('tmp-promise') 13 | tmp.setGracefulCleanup() 14 | 15 | jest.mock('redis', () => { return require('redis-mock') }) 16 | const TestClient = require('./testClient') 17 | 18 | // Needed for ipfs spinup/teardown 19 | jest.setTimeout(15000) 20 | 21 | const pinningIpfsConfig = { 22 | Bootstrap: [], 23 | Addresses: { 24 | Swarm: [ 25 | '/ip4/127.0.0.1/tcp/4002', 26 | '/ip4/127.0.0.1/tcp/4003/ws' 27 | ] 28 | } 29 | } 30 | 31 | const analyticsMock = { 32 | trackPinDB: jest.fn(), 33 | trackSyncDB: jest.fn(), 34 | trackSpaceUpdate: jest.fn(), 35 | trackPublicUpdate: jest.fn(), 36 | trackRootUpdate: jest.fn(), 37 | trackThreadUpdate: jest.fn(), 38 | trackPrivateUpdate: jest.fn(), 39 | trackPinDBAddress: jest.fn(), 40 | trackSpaceUpdateByApp: jest.fn() 41 | } 42 | 43 | const mockProfileData = { 44 | public: { 45 | name: { timeStamp: 12000, value: 'very name' }, 46 | image: { timeStamp: 13000, value: 'such picture' } 47 | }, 48 | private: { 49 | shh: { timeStamp: 14000, value: 'many secret' }, 50 | quiet: { timeStamp: 15000, value: 'wow!' } 51 | } 52 | } 53 | 54 | const mockThreadEntries = [ 55 | { message: 'a great post' }, 56 | { message: 'another great post' } 57 | ] 58 | 59 | function addReplicatedEmitter (pinning) { 60 | pinning.events = new EventEmitter() 61 | const origOpenDB = pinning.openDB 62 | function myOpenDB (address, responseFn, onReplicatedFn, rootStoreAddress, analyticsFn) { 63 | const newReplicatedFn = (odbAddress) => { 64 | const numEntries = pinning.openDBs[odbAddress].db._oplog.values.length 65 | pinning.events.emit('replicated', { odbAddress, numEntries }) 66 | if (onReplicatedFn) { 67 | onReplicatedFn(odbAddress) 68 | } 69 | } 70 | origOpenDB.call(pinning, address, responseFn, newReplicatedFn, rootStoreAddress, analyticsFn) 71 | } 72 | pinning.openDB = myOpenDB 73 | return pinning 74 | } 75 | 76 | describe('Pinning', () => { 77 | let tmpDir 78 | let pinning 79 | let testClient 80 | let clientIpfsOpts 81 | 82 | const pinningRoom = 'test-pinning-room' 83 | 84 | beforeEach(async () => { 85 | tmpDir = await tmp.dir({ unsafeCleanup: true }) 86 | const orbitdbPath = tmpDir.path + '/orbitdb' 87 | const ipfsPath = tmpDir.path + '/ipfs' 88 | const ipfsOpts = { 89 | config: pinningIpfsConfig, 90 | repo: ipfsPath 91 | } 92 | const orbitCacheOpts = null 93 | const pubSubConfig = null 94 | const entriesNumCacheOpts = null 95 | const pinWhitelistDids = null 96 | const pinWhitelistSpaces = null 97 | const pinSilent = null 98 | 99 | const ipfs = await IPFS.create(ipfsOpts) 100 | pinning = new Pinning(ipfs, orbitdbPath, analyticsMock, orbitCacheOpts, pubSubConfig, pinningRoom, entriesNumCacheOpts, pinWhitelistDids, pinWhitelistSpaces, pinSilent) 101 | await pinning.start() 102 | await pinning.entriesCache.store.flushall() 103 | const pinningAddresses = await pinning.ipfs.swarm.localAddrs() 104 | clientIpfsOpts = { config: { Bootstrap: pinningAddresses } } 105 | testClient = new TestClient(clientIpfsOpts, pinningRoom) 106 | await testClient.init() 107 | pinning = addReplicatedEmitter(pinning) 108 | }) 109 | 110 | afterEach(async () => { 111 | await testClient.cleanup() 112 | await pinning.stop() 113 | await tmpDir.cleanup() 114 | }) 115 | 116 | it('should sync db correctly from client', async () => { 117 | await testClient.createDB(mockProfileData) 118 | const pinningReplicatedPromise = new Promise((resolve) => { 119 | const pinningStoreEntries = {} 120 | const checkIfStoresReplicated = (data) => { 121 | const storeType = data.odbAddress.split('.')[1] 122 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 123 | pinningStoreEntries[storeType] = data.numEntries 124 | } 125 | if (Object.keys(pinningStoreEntries).length === 3 && 126 | pinningStoreEntries.root === 2 && 127 | pinningStoreEntries.public === Object.keys(mockProfileData.public).length && 128 | pinningStoreEntries.private === Object.keys(mockProfileData.private).length) { 129 | pinning.events.off('replicated', checkIfStoresReplicated) 130 | resolve() 131 | } 132 | } 133 | pinning.events.on('replicated', checkIfStoresReplicated) 134 | }) 135 | const responsesPromise = new Promise((resolve, reject) => { 136 | const hasResponses = {} 137 | testClient.onMsg = (topic, data) => { 138 | if (data.type === 'HAS_ENTRIES') { 139 | const storeType = data.odbAddress.split('.')[1] 140 | if (!hasResponses[storeType] || data.numEntries > hasResponses[storeType]) { 141 | hasResponses[storeType] = data.numEntries 142 | } 143 | } 144 | if (['root', 'public', 'private'].every(storeType => storeType in hasResponses)) { 145 | resolve() 146 | } 147 | } 148 | }) 149 | await testClient.announceDB() 150 | await pinningReplicatedPromise 151 | await responsesPromise 152 | }) 153 | 154 | it('should sync db correctly to client', async () => { 155 | // -- Create databases on the pinning node using the test client 156 | await testClient.createDB(mockProfileData) 157 | const pinningReplicatedPromise = new Promise((resolve) => { 158 | const pinningStoreEntries = {} 159 | const checkIfStoresReplicated = (data) => { 160 | const storeType = data.odbAddress.split('.')[1] 161 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 162 | pinningStoreEntries[storeType] = data.numEntries 163 | } 164 | if (Object.keys(pinningStoreEntries).length === 3 && 165 | pinningStoreEntries.root === 2 && 166 | pinningStoreEntries.public === Object.keys(mockProfileData.public).length && 167 | pinningStoreEntries.private === Object.keys(mockProfileData.private).length) { 168 | pinning.events.off('replicated', checkIfStoresReplicated) 169 | resolve() 170 | } 171 | } 172 | pinning.events.on('replicated', checkIfStoresReplicated) 173 | }) 174 | await testClient.announceDB() 175 | await pinningReplicatedPromise 176 | 177 | // -- Create new client with no data 178 | const client2IpfsOpts = defaultsDeep({ 179 | config: { 180 | Addresses: { 181 | Swarm: [ 182 | '/ip4/127.0.0.1/tcp/4106', 183 | '/ip4/127.0.0.1/tcp/4107/ws' 184 | ] 185 | } 186 | } 187 | }, clientIpfsOpts) 188 | const testClient2 = new TestClient(client2IpfsOpts, pinningRoom) 189 | await testClient2.init() 190 | 191 | // -- Sync new client to pinning node 192 | await testClient2.createDB() 193 | await testClient2.announceDB() 194 | await testClient2.storeSynced() 195 | 196 | const expectedProfile = Object.keys(mockProfileData.public).reduce((acc, key) => { 197 | acc[key] = mockProfileData.public[key].value 198 | return acc 199 | }, {}) 200 | const expectedPrivate = Object.keys(mockProfileData.private).reduce((acc, key) => { 201 | acc[key] = mockProfileData.private[key].value 202 | return acc 203 | }, {}) 204 | expect(await testClient2.getProfile()).toEqual(expectedProfile) 205 | expect(await testClient2.getPrivate()).toEqual(expectedPrivate) 206 | testClient2.cleanup() 207 | }, 30000) 208 | 209 | it('dbs should close after 30 min, but not before', async () => { 210 | await testClient.createDB(mockProfileData) 211 | const pinningReplicatedPromise = new Promise((resolve) => { 212 | const pinningStoreEntries = {} 213 | const checkIfStoresReplicated = (data) => { 214 | const storeType = data.odbAddress.split('.')[1] 215 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 216 | pinningStoreEntries[storeType] = data.numEntries 217 | } 218 | if (Object.keys(pinningStoreEntries).length === 3 && 219 | pinningStoreEntries.root === 2 && 220 | pinningStoreEntries.public === Object.keys(mockProfileData.public).length && 221 | pinningStoreEntries.private === Object.keys(mockProfileData.private).length) { 222 | pinning.events.off('replicated', checkIfStoresReplicated) 223 | resolve() 224 | } 225 | } 226 | pinning.events.on('replicated', checkIfStoresReplicated) 227 | }) 228 | await testClient.announceDB() 229 | await pinningReplicatedPromise 230 | 231 | await pinning.checkAndCloseDBs() 232 | let numOpenDBs = Object.keys(pinning.openDBs).length 233 | expect(numOpenDBs).toEqual(3) 234 | // make 20 min pass 235 | // hacky way to get around Date.now() 236 | Object.keys(pinning.openDBs).map(key => { 237 | pinning.openDBs[key].latestTouch -= 20 * 60 * 1000 238 | }) 239 | await pinning.checkAndCloseDBs() 240 | numOpenDBs = Object.keys(pinning.openDBs).length 241 | expect(numOpenDBs).toEqual(3) 242 | // make additional 10 min pass 243 | Object.keys(pinning.openDBs).map(key => { 244 | pinning.openDBs[key].latestTouch -= 10 * 60 * 1000 245 | }) 246 | await pinning.checkAndCloseDBs() 247 | numOpenDBs = Object.keys(pinning.openDBs).length 248 | expect(numOpenDBs).toEqual(0) 249 | }) 250 | 251 | describe('Threads', () => { 252 | beforeEach(async () => { 253 | await testClient.createDB(mockProfileData) 254 | const pinningReplicatedPromise = new Promise((resolve) => { 255 | const pinningStoreEntries = {} 256 | const checkIfStoresReplicated = (data) => { 257 | const storeType = data.odbAddress.split('.')[1] 258 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 259 | pinningStoreEntries[storeType] = data.numEntries 260 | } 261 | if (Object.keys(pinningStoreEntries).length === 3 && 262 | pinningStoreEntries.root === 2 && 263 | pinningStoreEntries.public === Object.keys(mockProfileData.public).length && 264 | pinningStoreEntries.private === Object.keys(mockProfileData.private).length) { 265 | pinning.events.off('replicated', checkIfStoresReplicated) 266 | resolve() 267 | } 268 | } 269 | pinning.events.on('replicated', checkIfStoresReplicated) 270 | }) 271 | await testClient.announceDB() 272 | await pinningReplicatedPromise 273 | }) 274 | 275 | // TODO: reproduce root failure of following tests (see https://github.com/3box/3box-pinning-node/issues/288) 276 | it.skip('Test to reproduce error in retrieving the thread access node consecutive times', async () => { 277 | await testClient.createThread(mockThreadEntries) 278 | const CID = require('cids') 279 | const cid = new CID('zdpuAqS4Qc9Ff3uuUyT6juCpsC7waWw6NDVqtdPYYL9EZRnYx') 280 | console.log('STARTING') 281 | for (let i = 0; i < 10; i++) { 282 | console.log('fetching...', i) 283 | console.log('MANIFEST', await pinning.ipfs.dag.get(cid)) 284 | // without this delay, consecutive calls fail 285 | // await new Promise(resolve => setTimeout(resolve, 100)) 286 | } 287 | }) 288 | 289 | // TODO: fix (see https://github.com/3box/3box-pinning-node/issues/288) 290 | it.skip('should pin thread data correctly from client', async () => { 291 | await testClient.createThread(mockThreadEntries) 292 | const pinningThreadCreatedPromise = new Promise((resolve) => { 293 | const pinningStoreEntries = {} 294 | const checkIfThreadCreated = (data) => { 295 | console.log('replicated', data) 296 | const storeType = data.odbAddress.split('.')[1] 297 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 298 | pinningStoreEntries[storeType] = data.numEntries 299 | } 300 | if (pinningStoreEntries.thread === 2) { 301 | pinning.events.off('replicated', checkIfThreadCreated) 302 | resolve() 303 | } 304 | } 305 | pinning.events.on('replicated', checkIfThreadCreated) 306 | }) 307 | await testClient.announceThread() 308 | await pinningThreadCreatedPromise 309 | }) 310 | 311 | // TODO: fix (see https://github.com/3box/3box-pinning-node/issues/288) 312 | it.skip('should sync pinned thread to client', async () => { 313 | // -- Create thread on the pinning node using the test client 314 | await testClient.createThread(mockThreadEntries) 315 | const pinningThreadCreatedPromise = new Promise((resolve) => { 316 | const pinningStoreEntries = {} 317 | const checkIfThreadCreated = (data) => { 318 | console.log('replicated', data) 319 | const storeType = data.odbAddress.split('.')[1] 320 | if (!pinningStoreEntries[storeType] || data.numEntries > pinningStoreEntries[storeType]) { 321 | pinningStoreEntries[storeType] = data.numEntries 322 | } 323 | if (pinningStoreEntries.thread === 2) { 324 | pinning.events.off('replicated', checkIfThreadCreated) 325 | resolve() 326 | } 327 | } 328 | pinning.events.on('replicated', checkIfThreadCreated) 329 | }) 330 | await testClient.announceThread() 331 | await pinningThreadCreatedPromise 332 | 333 | // -- Create new client with no data 334 | const client2IpfsOpts = defaultsDeep({ 335 | config: { 336 | Addresses: { 337 | Swarm: [ 338 | '/ip4/127.0.0.1/tcp/4106', 339 | '/ip4/127.0.0.1/tcp/4107/ws' 340 | ] 341 | } 342 | } 343 | }, clientIpfsOpts) 344 | const testClient2 = new TestClient(client2IpfsOpts, pinningRoom) 345 | await testClient2.init() 346 | await testClient2.createDB() 347 | await testClient2.announceDB() 348 | await testClient2.createThread() 349 | let posts = await testClient2.getThreadPosts() 350 | expect(posts).toHaveLength(0) 351 | 352 | // -- Sync new client to pinning node 353 | await testClient2.createThread() 354 | await testClient2.announceThread() 355 | await testClient2.storeSynced({ thread: true }) 356 | posts = await testClient2.getThreadPosts() 357 | expect(posts[0].message).toEqual(mockThreadEntries[0].message) 358 | expect(posts[1].message).toEqual(mockThreadEntries[1].message) 359 | testClient2.cleanup() 360 | }) 361 | }) 362 | }) 363 | -------------------------------------------------------------------------------- /src/pinning.js: -------------------------------------------------------------------------------- 1 | const { CID } = require('ipfs') 2 | const OrbitDB = require('orbit-db') 3 | const MessageBroker = require('./messageBroker') 4 | const Pubsub = require('orbit-db-pubsub') 5 | const { Resolver } = require('did-resolver') 6 | const get3IdResolver = require('3id-resolver').getResolver 7 | const getMuportResolver = require('muport-did-resolver').getResolver 8 | const OrbitDBCache = require('orbit-db-cache-redis') 9 | const EntriesCache = require('./hasEntriesCache') 10 | const { 11 | OdbIdentityProvider, 12 | LegacyIPFS3BoxAccessController, 13 | ThreadAccessController, 14 | ModeratorAccessController 15 | } = require('3box-orbitdb-plugins') 16 | const Identities = require('orbit-db-identity-provider') 17 | Identities.addIdentityProvider(OdbIdentityProvider) 18 | const AccessControllers = require('orbit-db-access-controllers') 19 | const IPFSLog = require('ipfs-log') 20 | const { createLogger } = require('./logger') 21 | 22 | AccessControllers.addAccessController({ AccessController: LegacyIPFS3BoxAccessController }) 23 | AccessControllers.addAccessController({ AccessController: ThreadAccessController }) 24 | AccessControllers.addAccessController({ AccessController: ModeratorAccessController }) 25 | 26 | const manifestCacheKey = address => `${address}/_manifest` 27 | 28 | // const IPFS_METRICS_ENABLED = process.env.IPFS_METRICS_ENABLED || true 29 | const IPFS_METRICS_ENABLED = false 30 | const IPFS_METRICS_INTERVAL = process.env.IPFS_METRICS_INTERVAL || 10000 31 | 32 | // A temporary fix for issues described here - https://github.com/orbitdb/orbit-db/pull/688 33 | // Once a permant fix is merged into orbitdb and we upgrade, we no longer need the 34 | // fix implemented below. 35 | class OrbitDB3Box extends OrbitDB { 36 | // wrap to return OrbitDB3Box instead of OrbitDB instance 37 | static async createInstance (ipfs, options = {}) { 38 | const orbitdb = await super.createInstance(ipfs, options) 39 | 40 | options = Object.assign({}, options, { 41 | peerId: orbitdb.id, 42 | directory: orbitdb.directory, 43 | keystore: orbitdb.keystore 44 | }) 45 | 46 | return new OrbitDB3Box(orbitdb._ipfs, orbitdb.identity, options) 47 | } 48 | 49 | // register ready listener/state on creation 50 | async _createStore (type, address, options) { 51 | const store = await super._createStore(type, address, options) 52 | this.stores[address.toString()].ready = new Promise(resolve => { store.events.on('ready', resolve) }) 53 | return store 54 | } 55 | 56 | // block message consumption until ready 57 | async _onMessage (address, heads) { 58 | await this.stores[address].ready 59 | super._onMessage(address, heads) 60 | } 61 | } 62 | 63 | const TEN_MINUTES = 10 * 60 * 1000 64 | const THIRTY_MINUTES = 30 * 60 * 1000 65 | const rootEntryTypes = { 66 | SPACE: 'space', 67 | ADDRESS_LINK: 'address-link' 68 | } 69 | 70 | /** 71 | * Pinning - a class for pinning orbitdb stores of 3box users 72 | */ 73 | class Pinning { 74 | constructor (ipfs, orbitdbPath, analytics, orbitCacheOpts, pubSubConfig, pinningRoom, entriesNumCacheOpts, pinWhitelistDids, pinWhitelistSpaces, pinSilent) { 75 | this.ipfs = ipfs 76 | this.orbitdbPath = orbitdbPath 77 | this.openDBs = {} 78 | this.analytics = analytics 79 | this.orbitCacheOpts = orbitCacheOpts 80 | this.pubSubConfig = pubSubConfig 81 | this.pinningRoom = pinningRoom 82 | this.entriesNumCacheOpts = entriesNumCacheOpts 83 | this.dbOpenInterval = THIRTY_MINUTES 84 | this.dbCheckCloseInterval = TEN_MINUTES 85 | this.pinWhitelistDids = pinWhitelistDids 86 | this.pinWhitelistSpaces = pinWhitelistSpaces 87 | this.pinSilent = pinSilent 88 | this.logger = createLogger({ name: 'pinning' }) 89 | } 90 | 91 | async start () { 92 | const ipfsId = await this.ipfs.id() 93 | const threeIdResolver = get3IdResolver(this.ipfs) 94 | const muportResolver = getMuportResolver(this.ipfs) 95 | this._resolver = new Resolver({ ...threeIdResolver, ...muportResolver }) 96 | OdbIdentityProvider.setDidResolver(this._resolver) 97 | 98 | this._pinningResolver = new Resolver({ 99 | ...get3IdResolver(this.ipfs, { pin: false }), 100 | ...getMuportResolver(this.ipfs) 101 | }) 102 | 103 | this.logger.info('ipfsId', ipfsId) 104 | 105 | const orbitOpts = { 106 | directory: this.orbitdbPath 107 | } 108 | if (this.orbitCacheOpts) { 109 | orbitOpts.cache = new OrbitDBCache(this.orbitCacheOpts) 110 | } 111 | 112 | this.entriesCache = new EntriesCache(this.entriesNumCacheOpts) 113 | 114 | // Identity not used, passes ref to 3ID orbit identity provider 115 | orbitOpts.identity = await Identities.createIdentity({ id: 'nullid' }) 116 | 117 | this.orbitdb = await OrbitDB3Box.createInstance(this.ipfs, orbitOpts) 118 | if (this.pubSubConfig) { 119 | const orbitOnMessage = this.orbitdb._onMessage.bind(this.orbitdb) 120 | const messageBroker = new MessageBroker(this.orbitdb._ipfs, this.orbitdb.id, this.pubSubConfig.instanceId, this.pubSubConfig.redis, orbitOnMessage) 121 | this.orbitdb._pubsub = messageBroker 122 | this.orbitdb._onMessage = messageBroker.onMessageWrap.bind(messageBroker) 123 | } 124 | this.pubsub = new Pubsub(this.ipfs, ipfsId.id) 125 | await this.pubsub.subscribe(this.pinningRoom, this._onMessage.bind(this), this._onNewPeer.bind(this)) 126 | this._dbCloseinterval = setInterval(this.checkAndCloseDBs.bind(this), this.dbCheckCloseInterval) 127 | 128 | if (IPFS_METRICS_ENABLED) { 129 | // Log out the bandwidth stats periodically 130 | this._ipfsMetricsInterval = setInterval(async () => { 131 | try { 132 | let stats = this.ipfs.libp2p.metrics.global 133 | this.logger.info(`Bandwith Stats: ${JSON.stringify(stats)}`) 134 | 135 | stats = await this.ipfs.stats.bitswap() 136 | this.logger.info(`Bitswap Stats: ${JSON.stringify(stats)}`) 137 | 138 | stats = await this.ipfs.stats.repo() 139 | this.logger.info(`Repo Stats: ${JSON.stringify(stats)}`) 140 | } catch (err) { 141 | this.logger.error(`Error occurred trying to check node stats: ${err}`) 142 | } 143 | }, IPFS_METRICS_INTERVAL) 144 | } 145 | } 146 | 147 | async stop () { 148 | clearInterval(this._dbCloseinterval) 149 | 150 | if (IPFS_METRICS_ENABLED) { 151 | clearInterval(this._ipfsMetricsInterval) 152 | } 153 | 154 | await this.pubsub.disconnect() 155 | await this.checkAndCloseDBs() 156 | await this.orbitdb.stop() 157 | await this.ipfs.stop() 158 | } 159 | 160 | async checkAndCloseDBs () { 161 | try { 162 | await Promise.all(Object.keys(this.openDBs).map(async key => { 163 | if (Date.now() > this.openDBs[key].latestTouch + this.dbOpenInterval) { 164 | await this.dbClose(key) 165 | } 166 | })) 167 | } catch (e) { 168 | this.logger.error(`Error occurred trying to close dbs: ${e}`) 169 | } 170 | } 171 | 172 | async dbClose (address) { 173 | const entry = this.openDBs[address] 174 | if (entry) { 175 | if (!entry.loading) { 176 | const db = entry.db 177 | delete this.openDBs[address] 178 | await db.close() 179 | } else { 180 | // we should still close the DB even if we where not able to open it 181 | // otherwise we'll have a memory leak 182 | delete this.openDBs[address] 183 | } 184 | } 185 | } 186 | 187 | async _getDbPromise (address) { 188 | return new Promise((resolve, reject) => { 189 | const cid = new CID(address.split('/')[2]) 190 | const opts = { 191 | syncLocal: true, 192 | sortFn: IPFSLog.Sorting.SortByEntryHash, // this option is required now but will likely not be in the future. 193 | accessController: { 194 | type: 'legacy-ipfs-3box', 195 | skipManifest: true, 196 | resolver: this._resolver 197 | } 198 | } 199 | this.orbitdb.open(address, cid.version === 0 ? opts : {}).then(db => { 200 | db.events.on('ready', () => { resolve(db) }) 201 | db.load() 202 | }) 203 | }) 204 | } 205 | 206 | async openDB (address, responseFn, onReplicatedFn, rootStoreAddress, analyticsFn) { 207 | let root, did 208 | 209 | if (!this.openDBs[address]) { 210 | this.logger.info('Opening db:', address) 211 | 212 | this.openDBs[address] = { dbPromise: this._getDbPromise(address) } 213 | this.openDBs[address].latestTouch = Date.now() 214 | this.openDBs[address].loading = true 215 | 216 | this.openDBs[address].db = await this.openDBs[address].dbPromise 217 | this.openDBs[address].loading = false 218 | responseFn(address) 219 | 220 | root = address.split('.')[1] === 'root' ? address : rootStoreAddress 221 | did = root ? await this.rootStoreToDID(root) : null 222 | if (analyticsFn && did) analyticsFn(did, false) 223 | 224 | this.openDBs[address].db.events.on('replicated', async () => { 225 | if (onReplicatedFn) onReplicatedFn(address) 226 | if (!did) { 227 | did = root ? await this.rootStoreToDID(root) : null 228 | if (analyticsFn && did) analyticsFn(did, true) 229 | } 230 | this._cacheNumEntries(address) 231 | this.trackUpdates(address, rootStoreAddress, did) 232 | }) 233 | this.logger.info('Successful db open:', address) 234 | } else { 235 | this.openDBs[address].db = await this.openDBs[address].dbPromise 236 | responseFn(address) 237 | if (analyticsFn) { 238 | root = address.split('.')[1] === 'root' ? address : rootStoreAddress 239 | did = root ? await this.rootStoreToDID(root) : null 240 | analyticsFn(did, false) 241 | } 242 | } 243 | } 244 | 245 | async rootStoreToDID (rootStoreAddress) { 246 | try { 247 | const linkEntry = await this.openDBs[rootStoreAddress].db 248 | .iterator({ limit: -1 }) 249 | .collect() 250 | .find(e => { 251 | const value = e.payload.value 252 | return value.type === rootEntryTypes.ADDRESS_LINK 253 | }) 254 | if (!linkEntry) return null 255 | const linkAddress = linkEntry.payload.value.data 256 | const link = (await this.ipfs.dag.get(linkAddress)).value 257 | const did = /\bdid:.*\b/g.exec(link.message)[0] 258 | return did 259 | } catch (e) { 260 | return null 261 | } 262 | } 263 | 264 | async trackUpdates (odbAddress, rootStoreAddress, did) { 265 | const split = odbAddress.split('.') 266 | if (split[1] === 'space') { 267 | const spaceName = split[2] 268 | this.analytics.trackSpaceUpdate(odbAddress, spaceName, did) 269 | } else if (split[1] === 'public') { 270 | this.analytics.trackPublicUpdate(odbAddress, did) 271 | } else if (split[1] === 'root') { 272 | this.analytics.trackRootUpdate(did) 273 | } else if (split[1] === 'thread') { 274 | const threadName = split[2] 275 | const threadSpace = split[3] 276 | this.analytics.trackThreadUpdate(odbAddress, threadSpace, threadName) 277 | } else if (split[1] === 'private') { 278 | this.analytics.trackPrivateUpdate(odbAddress, did) 279 | } 280 | } 281 | 282 | _shouldHandlePinRequest (pinRequestMessage) { 283 | return !this.pinWhitelistDids || (pinRequestMessage && this.pinWhitelistDids.includes(pinRequestMessage.did)) 284 | } 285 | 286 | _shouldPinSpace (rootEntry) { 287 | const spaceName = rootEntry.odbAddress.split('.')[2] 288 | return !this.pinWhitelistSpaces || (this.pinWhitelistSpaces.includes(spaceName)) 289 | } 290 | 291 | _shouldSyncThread (syncRequestMessage) { 292 | const spaceName = syncRequestMessage.odbAddress.split('.')[3] 293 | return !this.pinWhitelistSpaces || (this.pinWhitelistSpaces.includes(spaceName)) 294 | } 295 | 296 | async _sendHasResponse (address, numEntries) { 297 | if (this.pinSilent) { 298 | return 299 | } 300 | 301 | const cacheEntries = typeof numEntries === 'number' ? numEntries : await this.entriesCache.get(address) 302 | 303 | // line can be removed in future 304 | // if (typeof cacheEntries !== 'number' && await this._dbOpenedBefore(address)) return 305 | await this._publish('HAS_ENTRIES', address, cacheEntries || 0) 306 | } 307 | 308 | async _dbOpenedBefore (address) { 309 | const val = await this.orbitdb.cache.get(manifestCacheKey(address)) 310 | return Boolean(val) 311 | } 312 | 313 | async _cacheNumEntries (address) { 314 | const numEntries = this.openDBs[address].db._oplog.values.length 315 | // 2 lines can be removed in future 316 | // const notCachedBefore = await this.entriesCache.get(address) === null 317 | // if (notCachedBefore) this._sendHasResponse(address, numEntries) 318 | 319 | this.entriesCache.set(address, numEntries) 320 | } 321 | 322 | _openSubStores (address) { 323 | const entries = this.openDBs[address].db.iterator({ limit: -1 }).collect().filter(e => Boolean(e.payload.value.odbAddress)) 324 | const uniqueEntries = entries.filter((e1, i, a) => { 325 | return a.findIndex(e2 => e2.payload.value.odbAddress === e1.payload.value.odbAddress) === i 326 | }) 327 | uniqueEntries.map(entry => { 328 | const data = entry.payload.value 329 | if (data.type === rootEntryTypes.SPACE) { 330 | // don't open db if the space entry is malformed 331 | if (!data.DID || !data.odbAddress) return 332 | if (!this._shouldPinSpace(data)) return 333 | this._pinDID(data.DID) 334 | } 335 | if (data.odbAddress) { 336 | this._sendHasResponse(data.odbAddress) 337 | this.openDB(data.odbAddress, this._cacheNumEntries.bind(this), null, address) 338 | } 339 | }) 340 | 341 | this._pinLinkAddressProofs(address) 342 | } 343 | 344 | _pinLinkAddressProofs (address) { 345 | // assuming address is root store 346 | const entries = this.openDBs[address].db.iterator({ limit: -1 }).collect() 347 | // Filter for address-links, get CID, and get to pin it 348 | const filter = e => e.payload.value.type === 'address-link' || e.payload.value.type === 'auth-data' 349 | entries.filter(filter).forEach(async e => { 350 | const cid = e.payload.value.data 351 | await this.ipfs.dag.get(cid) 352 | // this.ipfs.pin.add(cid) 353 | }) 354 | } 355 | 356 | async _pinDID (did) { 357 | if (!did) return 358 | // We resolve the DID in order to pin the ipfs object 359 | try { 360 | await this._pinningResolver.resolve(did) 361 | // if this throws it's not a DID 362 | } catch (err) { 363 | this.logger.error(`Error occurred trying to pin DID: ${err}`) 364 | } 365 | } 366 | 367 | _openSubStoresAndCacheEntries (address) { 368 | this._cacheNumEntries(address) 369 | this._openSubStores(address) 370 | } 371 | 372 | async _publish (type, odbAddress, data) { 373 | const dataObj = { type, odbAddress } 374 | if (type === 'HAS_ENTRIES') { 375 | dataObj.numEntries = data 376 | } else if (type === 'REPLICATED') { 377 | } 378 | this.pubsub.publish(this.pinningRoom, dataObj) 379 | } 380 | 381 | _onMessage (topic, data) { 382 | const promises = [] 383 | if (OrbitDB.isValidAddress(data.odbAddress)) { 384 | promises.push(this._sendHasResponse(data.odbAddress)) 385 | if (data.type === 'PIN_DB' && this._shouldHandlePinRequest(data)) { 386 | promises.push(this.openDB(data.odbAddress, this._openSubStoresAndCacheEntries.bind(this), this._openSubStores.bind(this), null, this.analytics.trackPinDB.bind(this.analytics))) 387 | this.analytics.trackPinDBAddress(data.odbAddress) 388 | } else if (data.type === 'SYNC_DB' && data.thread && this._shouldSyncThread(data)) { 389 | promises.push(this.openDB(data.odbAddress, this._cacheNumEntries.bind(this))) 390 | this.analytics.trackSyncDB(data.odbAddress) 391 | } 392 | if (data.did) { 393 | promises.push(this._pinDID(data.did)) 394 | } 395 | if (data.muportDID) { 396 | promises.push(this._pinDID(data.muportDID)) 397 | } 398 | } 399 | Promise.all(promises).catch((err) => this.logger.error(`Error occurred onMessage: ${err}`)) 400 | } 401 | 402 | _onNewPeer (topic, peer) { 403 | this.logger.info('peer joined room', topic, peer) 404 | } 405 | } 406 | 407 | module.exports = Pinning 408 | --------------------------------------------------------------------------------