├── .npmignore ├── packages ├── registry-mirror │ ├── test │ │ ├── node.js │ │ ├── fixtures │ │ │ └── create-replication-master.js │ │ └── mirror.spec.js │ ├── src │ │ ├── index.js │ │ ├── core │ │ │ ├── find-external-port.js │ │ │ ├── routes │ │ │ │ ├── root.js │ │ │ │ ├── packument.js │ │ │ │ └── tarball.js │ │ │ ├── pubsub.js │ │ │ ├── index.js │ │ │ └── config.js │ │ └── cli │ │ │ └── bin.js │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── package.json │ └── CHANGELOG.md ├── replication-master │ ├── src │ │ ├── index.js │ │ ├── cli │ │ │ ├── bin.js │ │ │ ├── worker.js │ │ │ └── master.js │ │ └── core │ │ │ ├── routes │ │ │ ├── worker-online.js │ │ │ ├── worker.js │ │ │ ├── workers.js │ │ │ └── root.js │ │ │ ├── clone │ │ │ ├── cluster-worker.js │ │ │ ├── main-thread-worker.js │ │ │ ├── ingest-module.js │ │ │ └── index.js │ │ │ ├── mdns.js │ │ │ ├── pubsub.js │ │ │ ├── sequence-file.js │ │ │ ├── index.js │ │ │ ├── save-tarballs.js │ │ │ └── config.js │ ├── test │ │ ├── node.js │ │ ├── fixtures │ │ │ └── create-skim-db.js │ │ └── replication.spec.js │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── package.json │ └── CHANGELOG.md └── common │ ├── handlers │ ├── favicon.png │ ├── error-log.js │ ├── cors.js │ ├── index.js │ ├── favicon.js │ ├── abortable-request.js │ └── request-log.js │ ├── index.js │ ├── utils │ ├── error-message.js │ ├── option.js │ ├── fs-repo.js │ ├── sanitise-name.js │ ├── to-boolean.js │ ├── timeout-promise.js │ ├── get-external-url.js │ ├── s3-repo.js │ ├── log.js │ ├── find-base-dir.js │ ├── save-packument.js │ ├── replace-tarball-urls.js │ ├── load-tarball.js │ ├── retry-request.js │ ├── get-an-ipfs.js │ ├── download-tarball.js │ ├── level-lock.js │ ├── cluster-repo.js │ ├── save-tarball.js │ ├── start-ipfs.js │ └── load-packument.js │ ├── test │ ├── error-message.spec.js │ ├── sanitise-name.spec.js │ ├── option.spec.js │ ├── timeout-promise.spec.js │ ├── to-boolean.spec.js │ ├── server.spec.js │ ├── save-packument.spec.js │ ├── replace-tarball-urls.spec.js │ ├── fixtures │ │ └── test-server.js │ ├── get-external-url.spec.js │ ├── find-base-dir.spec.js │ ├── save-tarball.spec.js │ ├── load-tarball.spec.js │ ├── retry-request.spec.js │ └── load-packument.spec.js │ ├── README.md │ ├── package.json │ └── server.js ├── img ├── ip-npm.png ├── ip-npm-small.png └── npm-on-ipfs.svg ├── lerna.json ├── monitoring ├── spiped └── netdata ├── deploy-dev.sh ├── .github └── workflows │ ├── stale.yml │ └── generated-pr.yml ├── deploy.sh ├── .travis.yml ├── upgrade.sh ├── conf └── proxy.conf ├── .gitignore ├── package.json ├── docker-compose.yml └── README.md /.npmignore: -------------------------------------------------------------------------------- 1 | tests/ipfs-repo-tests 2 | -------------------------------------------------------------------------------- /packages/registry-mirror/test/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | require('./mirror.spec') 4 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /packages/replication-master/src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /packages/replication-master/test/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | require('./replication.spec') 4 | -------------------------------------------------------------------------------- /img/ip-npm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/img/ip-npm.png -------------------------------------------------------------------------------- /img/ip-npm-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/img/ip-npm-small.png -------------------------------------------------------------------------------- /packages/common/handlers/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/packages/common/handlers/favicon.png -------------------------------------------------------------------------------- /packages/common/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | server: require('./server'), 5 | handlers: require('./handlers') 6 | } 7 | -------------------------------------------------------------------------------- /packages/common/utils/error-message.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lol = (message) => { 4 | return `` 5 | } 6 | 7 | module.exports = lol 8 | -------------------------------------------------------------------------------- /packages/replication-master/src/cli/bin.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | const cluster = require('cluster') 6 | 7 | if (cluster.isWorker) { 8 | require('./worker') 9 | } else { 10 | require('./master') 11 | } 12 | -------------------------------------------------------------------------------- /packages/replication-master/src/cli/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../package') 4 | const cluster = require('cluster') 5 | 6 | process.title = `${pkg.name}-worker-${cluster.worker.id}` 7 | 8 | require('../core/clone/cluster-worker') 9 | -------------------------------------------------------------------------------- /lerna.json: -------------------------------------------------------------------------------- 1 | { 2 | "lerna": "2.9.0", 3 | "packages": [ 4 | "packages/*" 5 | ], 6 | "version": "independent", 7 | "command": { 8 | "bootstrap": { 9 | "hoist": true 10 | }, 11 | "run": { 12 | "stream": true 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /monitoring/spiped: -------------------------------------------------------------------------------- 1 | FROM spiped 2 | 3 | ARG SPIPED_KEY 4 | ARG NETDATA_EIP 5 | 6 | EXPOSE 20000 7 | 8 | RUN mkdir /etc/spiped 9 | RUN echo $SPIPED_KEY > /etc/spiped/keyfile 10 | 11 | CMD spiped -F -e -s [0.0.0.0]:20000 -t $NETDATA_EIP:20000 -k /etc/spiped/keyfile 12 | -------------------------------------------------------------------------------- /packages/common/handlers/error-log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('../utils/log') 4 | 5 | module.exports = function (error, request, response, next) { 6 | log(`💀 ${request.method} ${request.url} ${response.statusCode}`, error) 7 | 8 | next() 9 | } 10 | -------------------------------------------------------------------------------- /packages/common/handlers/cors.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = function (request, response, next) { 4 | response.header("Access-Control-Allow-Origin", "*") 5 | response.header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") 6 | next() 7 | } 8 | -------------------------------------------------------------------------------- /packages/common/handlers/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | abortableRequest: require('./abortable-request'), 5 | errorLog: require('./error-log'), 6 | requestLog: require('./request-log'), 7 | favicon: require('./favicon'), 8 | cors: require('./cors') 9 | } 10 | -------------------------------------------------------------------------------- /deploy-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Build a Docker image 4 | docker-compose build replicate registry 5 | 6 | # Shut down the registry containers 7 | docker-compose stop replicate registry 8 | 9 | # Restart using the new image 10 | docker-compose up --no-deps proxy replicate registry 11 | -------------------------------------------------------------------------------- /packages/common/utils/option.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | function option () { 4 | for (let i = 0; i < arguments.length; i++) { 5 | const arg = arguments[i] 6 | 7 | if (arg !== undefined && arg !== null && arg.toString() !== 'NaN') { 8 | return arg 9 | } 10 | } 11 | } 12 | 13 | module.exports = option 14 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /packages/common/handlers/favicon.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const path = require('path') 5 | 6 | module.exports = (config, ipfs, app) => { 7 | return async (request, response, next) => { 8 | fs.createReadStream(path.join(__dirname, 'favicon.png')) 9 | .on('error', () => {}) 10 | .pipe(response) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /packages/common/handlers/abortable-request.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const AbortController = require('abort-controller') 4 | 5 | module.exports = function (request, response, next) { 6 | const controller = new AbortController() 7 | response.locals.signal = controller.signal 8 | 9 | request.on('aborted', () => { 10 | controller.abort() 11 | }) 12 | 13 | next() 14 | } 15 | -------------------------------------------------------------------------------- /packages/common/utils/fs-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | const IPFSRepo = require('ipfs-repo') 5 | 6 | const fsRepo = ({ repo }) => { 7 | if (process.env.NODE_ENV === 'development') { 8 | repo = `${repo}-test` 9 | } 10 | 11 | log(`📁 Using fs repo at ${repo}`) 12 | 13 | return new IPFSRepo(repo) 14 | } 15 | 16 | module.exports = fsRepo 17 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/worker-online.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | online 5 | } = require('./workers') 6 | 7 | module.exports = () => { 8 | return (request, response) => { 9 | online() 10 | 11 | response.statusCode = 204 12 | response.setHeader('Content-type', 'application/json; charset=utf-8') 13 | response.end() 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /packages/common/utils/sanitise-name.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const sanitiseName = (name) => { 4 | name = `${(name || '').trim()}`.replace(/^(\/)+/, '/') 5 | 6 | if (name.startsWith('/')) { 7 | name = name.substring(1) 8 | } 9 | 10 | if (name.startsWith('@')) { 11 | name = name.replace(/%2f/g, '/') 12 | } 13 | 14 | return name 15 | } 16 | 17 | module.exports = sanitiseName 18 | -------------------------------------------------------------------------------- /packages/common/utils/to-boolean.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = (value) => { 4 | if (value === undefined) { 5 | return undefined 6 | } 7 | 8 | if (value === 'false' || value === '0' || value === 'no') { 9 | return false 10 | } 11 | 12 | if (value === 'true' || value === '1' || value === 'yes') { 13 | return true 14 | } 15 | 16 | return Boolean(value) 17 | } 18 | 19 | module.exports = toBoolean 20 | -------------------------------------------------------------------------------- /packages/common/test/error-message.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const errorMessage = require('../utils/error-message') 8 | 9 | describe('error-message', () => { 10 | it('should return an error message', () => { 11 | const message = 'hello' 12 | 13 | expect(errorMessage(message)).to.contain(message) 14 | }) 15 | }) 16 | -------------------------------------------------------------------------------- /monitoring/netdata: -------------------------------------------------------------------------------- 1 | FROM netdata/netdata 2 | 3 | ARG NETDATA_API_KEY 4 | 5 | RUN echo $'[global]\n\ 6 | memory mode = none\n\ 7 | hostname = registry.js.ipfs.io\n\ 8 | [health]\n\ 9 | enabled = no\n ' > /etc/netdata/netdata.conf 10 | 11 | RUN echo $'[stream]\n\ 12 | enabled = yes\n\ 13 | destination = spiped:20000\n\ 14 | api key = '$NETDATA_API_KEY$'\n ' > /etc/netdata/stream.conf 15 | 16 | RUN chown root:netdata /etc/netdata/stream.conf 17 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Remove old images 4 | docker system prune -a -f 5 | docker rm $(docker ps -q -f 'status=exited') || echo 'Failed to remove old containers, maybe there was nothing to do' 6 | docker rmi $(docker images -q -f "dangling=true") || echo 'Failed to remove old images, maybe there was nothing to do' 7 | 8 | # Build a Docker image 9 | docker-compose build --no-cache 10 | 11 | # Restart using the new image 12 | docker-compose up -d --scale registry=5 13 | -------------------------------------------------------------------------------- /packages/common/utils/timeout-promise.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const delay = require('delay') 4 | 5 | const timeout = (promise, ms) => { 6 | return Promise.race([ 7 | promise, 8 | new Promise((resolve, reject) => { 9 | delay(ms) 10 | .then(() => { 11 | const error = new Error('Timed out') 12 | error.code = 'ETIMEOUT' 13 | 14 | reject(error) 15 | }, reject) 16 | }) 17 | ]) 18 | } 19 | 20 | module.exports = timeout 21 | -------------------------------------------------------------------------------- /packages/common/test/sanitise-name.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const sanitiseName = require('../utils/sanitise-name') 8 | 9 | describe('sanitise-name', () => { 10 | it('should sanitise a package name', () => { 11 | expect(sanitiseName('hello')).to.equal('hello') 12 | expect(sanitiseName(' /@hello/blah ')).to.equal('@hello/blah') 13 | expect(sanitiseName(' /@hello%2fblah ')).to.equal('@hello/blah') 14 | }) 15 | }) 16 | -------------------------------------------------------------------------------- /packages/common/test/option.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const option = require('../utils/option') 8 | 9 | describe('option', () => { 10 | it('should return the first non-undefined argument', () => { 11 | const result = option(null, 1, 2, 3) 12 | 13 | expect(result).to.equal(1) 14 | }) 15 | 16 | it('should return false arguments', () => { 17 | const result = option(null, false, 2, 3) 18 | 19 | expect(result).to.equal(false) 20 | }) 21 | }) 22 | -------------------------------------------------------------------------------- /packages/common/utils/get-external-url.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | URL 5 | } = require('url') 6 | 7 | module.exports = (config) => { 8 | const url = new URL('http://foo.com') 9 | url.protocol = (config.external && config.external.protocol) || config.http.protocol 10 | url.host = (config.external && config.external.host) || config.http.host 11 | url.port = (config.external && config.external.port) || config.http.port 12 | 13 | const string = url.toString() 14 | 15 | // strip the trailing slash 16 | return string.substring(0, string.length - 1) 17 | } 18 | -------------------------------------------------------------------------------- /packages/common/handlers/request-log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('../utils/log') 4 | 5 | module.exports = function (request, response, next) { 6 | response.locals.start = Date.now() 7 | 8 | response.on('finish', () => { 9 | const disposition = response.getHeader('Content-Disposition') 10 | let prefix = '📄' 11 | 12 | if (disposition && disposition.endsWith('tgz')) { 13 | prefix = '🎁' 14 | } 15 | 16 | log(`${prefix} ${request.method} ${request.url} ${response.statusCode} ${Date.now() - response.locals.start}ms`) 17 | }) 18 | 19 | next() 20 | } 21 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | connect 5 | } = require('./workers') 6 | 7 | module.exports = () => { 8 | return (request, response) => { 9 | const worker = request.query.worker 10 | 11 | if (!worker) { 12 | return response.status(400).send('Bad Request') 13 | } 14 | 15 | const info = { 16 | index: connect(worker) 17 | } 18 | 19 | response.statusCode = 200 20 | response.setHeader('Content-type', 'application/json; charset=utf-8') 21 | response.send(JSON.stringify(info, null, 2)) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /packages/common/utils/s3-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { createRepo } = require('datastore-s3') 4 | const log = require('./log') 5 | 6 | const s3Repo = ({ region, bucket, path, accessKeyId, secretAccessKey, createIfMissing }) => { 7 | if (process.env.NODE_ENV === 'development') { 8 | path = `${path}-test` 9 | } 10 | 11 | log(`☁️ Using s3 storage ${region}:${bucket}/${path}`) 12 | 13 | return createRepo({ 14 | path, 15 | createIfMissing 16 | }, { 17 | bucket, 18 | region, 19 | accessKeyId, 20 | secretAccessKey 21 | }) 22 | } 23 | 24 | module.exports = s3Repo 25 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | cache: npm 3 | stages: 4 | - check 5 | - test 6 | - cov 7 | 8 | branches: 9 | only: 10 | - master 11 | - /^release\/.*$/ 12 | 13 | node_js: 14 | - 'lts/*' 15 | - 'node' 16 | 17 | os: 18 | - linux 19 | 20 | script: npx nyc -s npm run test -- --bail 21 | after_success: npx nyc report --reporter=text-lcov > coverage.lcov && npx codecov 22 | 23 | jobs: 24 | include: 25 | - stage: check 26 | script: 27 | - npm run lint 28 | 29 | - stage: test 30 | name: node 31 | script: npm run test 32 | 33 | notifications: 34 | email: false 35 | -------------------------------------------------------------------------------- /packages/common/utils/log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = (message, error) => { 4 | const time = new Date() 5 | const timestamp = time.toLocaleDateString('en-GB', { 6 | year: 'numeric', 7 | month: 'numeric', 8 | day: 'numeric', 9 | hour: '2-digit', 10 | minute: '2-digit', 11 | second: '2-digit', 12 | timeZoneName: 'short', 13 | hour12: false 14 | }) 15 | 16 | if (error) { 17 | console.error(timestamp, message, error) // eslint-disable-line no-console 18 | 19 | return 20 | } 21 | 22 | console.info(timestamp, message) // eslint-disable-line no-console 23 | } 24 | -------------------------------------------------------------------------------- /upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Remove old images 4 | docker system prune -a -f 5 | docker rm $(docker ps -q -f 'status=exited') || echo 'Failed to remove old containers, maybe there was nothing to do' 6 | docker rmi $(docker images -q -f "dangling=true") || echo 'Failed to remove old images, maybe there was nothing to do' 7 | 8 | # Get the latest 9 | git pull 10 | 11 | # Build a Docker image 12 | docker-compose build --no-cache replicate registry 13 | 14 | # Shut down the registry containers 15 | docker-compose stop replicate registry 16 | 17 | # Restart using the new image 18 | docker-compose up -d --no-deps --scale registry=5 replicate registry 19 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/cluster-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getAnIPFS = require('ipfs-registry-mirror-common/utils/get-an-ipfs') 4 | const ingestModule = require('./ingest-module') 5 | 6 | process.on('message', async ({ packument, seq, options }) => { 7 | const ipfs = await getAnIPFS(options) 8 | 9 | try { 10 | process.send(await ingestModule({ packument, seq, ipfs, options })) 11 | } catch (error) { 12 | process.send({ 13 | seq, 14 | name: packument.name, 15 | error: { 16 | message: error.message, 17 | stack: error.stack, 18 | code: error.code 19 | } 20 | }) 21 | } 22 | }) 23 | -------------------------------------------------------------------------------- /packages/common/utils/find-base-dir.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | 5 | const findBaseDir = async (ipfs, config) => { 6 | try { 7 | const stats = await ipfs.files.stat(config.ipfs.prefix) 8 | 9 | log(`🌿 Root dir ${config.ipfs.prefix} is ${stats.cid}`) 10 | 11 | return stats.cid 12 | } catch (error) { 13 | if (error.message.includes('does not exist')) { 14 | log(`🐺 Creating base dir ${config.ipfs.prefix}`) 15 | 16 | await ipfs.files.mkdir(config.ipfs.prefix, { 17 | parents: true 18 | }) 19 | } 20 | 21 | return findBaseDir(ipfs, config) 22 | } 23 | } 24 | 25 | module.exports = findBaseDir 26 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/workers.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('ipfs-registry-mirror-common/utils/log') 4 | 5 | const workers = [] 6 | let initialised = 0 7 | 8 | module.exports = { 9 | status: () => { 10 | return { 11 | workers, 12 | initialised, 13 | ready: workers.length === 0 ? true : initialised === workers.length 14 | } 15 | }, 16 | 17 | connect: (worker) => { 18 | let index = workers.indexOf(worker) 19 | 20 | if (index === -1) { 21 | index = workers.push(worker) - 1 22 | } 23 | 24 | log(`👷♀️ Worker ${worker} assigned index ${index}`) 25 | 26 | return index 27 | }, 28 | 29 | online: () => { 30 | initialised++ 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /packages/common/utils/save-packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const savePackument = async (packument, ipfs, config) => { 4 | if (!packument.name) { 5 | throw new Error('No name found in packument') 6 | } 7 | 8 | let lastErr 9 | 10 | for (let i = 0; i < 5; i++) { 11 | try { 12 | const file = `${config.ipfs.prefix}/${packument.name}` 13 | 14 | await ipfs.files.write(file, JSON.stringify(packument, null, 2), { 15 | truncate: true, 16 | parents: true, 17 | create: true, 18 | cidVersion: 1, 19 | rawLeaves: true 20 | }) 21 | 22 | return 23 | } catch (err) { 24 | lastErr = err 25 | } 26 | } 27 | 28 | throw lastErr 29 | } 30 | 31 | module.exports = savePackument 32 | -------------------------------------------------------------------------------- /conf/proxy.conf: -------------------------------------------------------------------------------- 1 | # HTTP 1.1 support 2 | proxy_http_version 1.1; 3 | proxy_buffering off; 4 | proxy_set_header Host $http_host; 5 | proxy_set_header Upgrade $http_upgrade; 6 | proxy_set_header Connection $proxy_connection; 7 | proxy_set_header X-Real-IP $remote_addr; 8 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 9 | proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto; 10 | proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl; 11 | proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port; 12 | 13 | # Mitigate httpoxy attack 14 | proxy_set_header Proxy ""; 15 | 16 | # Increase proxy timeouts 17 | proxy_connect_timeout 75s; 18 | proxy_send_timeout 60s; 19 | proxy_read_timeout 3600s; 20 | 21 | client_max_body_size 1024m; 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/t-run* 2 | # Logs 3 | logs 4 | *.log 5 | 6 | 7 | registry 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | 13 | # Directory for instrumented libs generated by jscoverage/JSCover 14 | lib-cov 15 | 16 | # Coverage directory used by tools like istanbul 17 | coverage 18 | .nyc_output 19 | 20 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 21 | .grunt 22 | 23 | # node-waf configuration 24 | .lock-wscript 25 | 26 | # Compiled binary addons (http://nodejs.org/api/addons.html) 27 | build/Release 28 | 29 | # Dependency directory 30 | # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git 31 | node_modules 32 | 33 | docs 34 | .env 35 | seq.txt 36 | *.heapsnapshot 37 | .vscode 38 | -------------------------------------------------------------------------------- /packages/common/test/timeout-promise.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const timeoutPromise = require('../utils/timeout-promise') 8 | 9 | describe('timeout-promise', () => { 10 | it('should time out', async () => { 11 | try { 12 | await timeoutPromise(new Promise((resolve, reject) => {}), 100) 13 | throw new Error('Expected timeoutPromise to throw') 14 | } catch (error) { 15 | expect(error.code).to.equal('ETIMEOUT') 16 | } 17 | }) 18 | 19 | it('should not time out', async () => { 20 | const result = await timeoutPromise(new Promise((resolve, reject) => { 21 | resolve('ok') 22 | }), 1000) 23 | 24 | expect(result).to.equal('ok') 25 | }) 26 | }) 27 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/mdns.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('ipfs-registry-mirror-common/utils/log') 4 | const { 5 | Advertisement 6 | } = require('dnssd') 7 | 8 | const advertise = async (ipfs, config) => { 9 | if (!config.mdns.enabled) { 10 | return () => {} 11 | } 12 | 13 | log(`📣 Starting mDNS advert for ${config.mdns.name} on port ${config.ipfs.port}`) 14 | 15 | const advertisment = new Advertisement(config.mdns.name, config.ipfs.port, { 16 | txt: { 17 | id: (await ipfs.id()).id 18 | } 19 | }) 20 | advertisment.start() 21 | advertisment.on('error', err => { 22 | console.error(`💥 DNSSD Error: ${err}`) // eslint-disable-line no-console 23 | }) 24 | 25 | return () => { 26 | advertisment.stop() 27 | } 28 | } 29 | 30 | module.exports = advertise 31 | -------------------------------------------------------------------------------- /packages/common/utils/replace-tarball-urls.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getExternalUrl = require('./get-external-url') 4 | 5 | const replaceTarballUrls = (pkg, config) => { 6 | const prefix = getExternalUrl(config) 7 | const packageName = pkg.name 8 | const moduleName = packageName.startsWith('@') ? packageName.split('/').pop() : packageName 9 | 10 | // change tarball URLs to point to us 11 | Object.keys(pkg.versions || {}) 12 | .forEach(versionNumber => { 13 | const version = pkg.versions[versionNumber] 14 | 15 | if (version.dist.source) { 16 | return 17 | } 18 | 19 | version.dist.source = version.dist.tarball 20 | version.dist.tarball = `${prefix}/${packageName}/-/${moduleName}-${versionNumber}.tgz` 21 | }) 22 | 23 | return pkg 24 | } 25 | 26 | module.exports = replaceTarballUrls 27 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-registry-mirror", 3 | "version": "1.0.0", 4 | "description": "Install your dependencies from IPFS", 5 | "scripts": { 6 | "postinstall": "lerna bootstrap --ci", 7 | "reset": "lerna run --parallel clean && rm -rf packages/*/node_modules && rm -rf node_modules && npm i --ignore-scripts && lerna bootstrap", 8 | "test": "lerna run --parallel test", 9 | "test:node": "lerna run --parallel test", 10 | "coverage": "lerna run --parallel coverage", 11 | "build": "lerna run --parallel build", 12 | "deploy": "lerna run --parallel deploy", 13 | "start": "NODE_ENV=development lerna run --parallel start", 14 | "clean": "lerna run --parallel clean", 15 | "lint": "lerna run --parallel lint", 16 | "publish": "lerna publish" 17 | }, 18 | "dependencies": { 19 | "lerna": "^3.1.4" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /packages/common/test/to-boolean.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const toBoolean = require('../utils/to-boolean') 8 | 9 | describe('to-boolean', () => { 10 | it('should convert things to boolean', () => { 11 | expect(toBoolean('true')).to.be.true() 12 | expect(toBoolean('1')).to.be.true() 13 | expect(toBoolean('yes')).to.be.true() 14 | expect(toBoolean('ok')).to.be.true() 15 | expect(toBoolean(true)).to.be.true() 16 | expect(toBoolean(1)).to.be.true() 17 | 18 | expect(toBoolean('false')).to.be.false() 19 | expect(toBoolean('0')).to.be.false() 20 | expect(toBoolean('no')).to.be.false() 21 | expect(toBoolean(false)).to.be.false() 22 | expect(toBoolean(0)).to.be.false() 23 | expect(toBoolean(null)).to.be.false() 24 | 25 | expect(toBoolean(undefined)).to.be.undefined() 26 | }) 27 | }) 28 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/find-external-port.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 4 | 5 | module.exports = async (options) => { 6 | if (process.env.NODE_ENV === 'development' || process.env.NODE_ENV === 'test') { 7 | return 0 8 | } 9 | 10 | const docker = await request(Object.assign({}, { 11 | uri: 'http://unix:/tmp/docker.sock:/containers/' + process.env.HOSTNAME + '/json', 12 | json: true, 13 | retries: 100, 14 | retryDelay: 5000, 15 | headers: { 16 | host: ' ' 17 | } 18 | })) 19 | 20 | try { 21 | return docker.NetworkSettings.Ports[`${options.ipfs.port}/tcp`][0].HostPort 22 | } catch (err) { 23 | console.error('Could not find options.ipfs.port', options.ipfs.port, 'in') // eslint-disable-line no-console 24 | console.info(JSON.stringify(docker.NetworkSettings, null, 2)) // eslint-disable-line no-console 25 | 26 | throw err 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /packages/registry-mirror/test/fixtures/create-replication-master.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const hat = require('hat') 4 | const { 5 | createTestServer 6 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 7 | 8 | const createReplicationMaster = async () => { 9 | const topic = `topic-${hat()}` 10 | 11 | const replicationMaster = await createTestServer(async server => { 12 | return { 13 | '/': JSON.stringify({ 14 | ipfs: await server.ipfs.id(), 15 | // empty directory 16 | root: '/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', 17 | topic 18 | }), 19 | '/-/worker': JSON.stringify({ 20 | index: 0 21 | }) 22 | } 23 | }) 24 | 25 | replicationMaster.config = { 26 | pubsub: { 27 | topic 28 | }, 29 | ipfs: { 30 | prefix: '/reg-mas-root' 31 | } 32 | } 33 | 34 | return replicationMaster 35 | } 36 | 37 | module.exports = createReplicationMaster 38 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/main-thread-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { default: PQueue } = require('p-queue') 4 | const EventEmitter = require('events').EventEmitter 5 | const ingestModule = require('./ingest-module') 6 | 7 | const queue = new PQueue({ concurrency: 1 }) 8 | let ipfs 9 | 10 | const mainWorker = new EventEmitter() 11 | mainWorker.send = ({ 12 | packument, 13 | seq, 14 | options 15 | }) => { 16 | queue.add(async () => { 17 | try { 18 | mainWorker.emit('message', await ingestModule({ packument, seq, ipfs, options })) 19 | } catch (error) { 20 | mainWorker.emit('message', { 21 | seq, 22 | name: packument.name, 23 | error: { 24 | message: error.message, 25 | stack: error.stack, 26 | code: error.code 27 | } 28 | }) 29 | } 30 | }) 31 | } 32 | 33 | const mainThreadWorker = async (i) => { 34 | ipfs = i 35 | await queue.onIdle() 36 | 37 | return mainWorker 38 | } 39 | 40 | module.exports = mainThreadWorker 41 | -------------------------------------------------------------------------------- /packages/registry-mirror/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | EXPOSE 8080 4 | EXPOSE 10000-10009 5 | 6 | RUN export NODE_ENV=production 7 | RUN npm set unsafe-perm true 8 | 9 | COPY ./package.json /app/package.json 10 | COPY ./package-lock.json /app/package-lock.json 11 | COPY ./lerna.json /app/lerna.json 12 | COPY ./packages/common/package.json /app/packages/common/package.json 13 | COPY ./packages/common/package-lock.json /app/packages/common/package-lock.json 14 | COPY ./packages/registry-mirror/package.json /app/packages/registry-mirror/package.json 15 | COPY ./packages/registry-mirror/package-lock.json /app/packages/registry-mirror/package-lock.json 16 | 17 | WORKDIR /app 18 | 19 | RUN npm install --production 20 | 21 | COPY ./packages/common/utils /app/packages/common/utils 22 | COPY ./packages/common/handlers /app/packages/common/handlers 23 | COPY ./packages/common/server.js /app/packages/common/server.js 24 | COPY ./packages/registry-mirror/src /app/packages/registry-mirror/src 25 | 26 | WORKDIR /app/packages/registry-mirror 27 | 28 | CMD node . 29 | -------------------------------------------------------------------------------- /packages/replication-master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | EXPOSE 8080 4 | EXPOSE 4001 5 | 6 | RUN export NODE_ENV=production 7 | RUN npm set unsafe-perm true 8 | 9 | COPY ./package.json /app/package.json 10 | COPY ./package-lock.json /app/package-lock.json 11 | COPY ./lerna.json /app/lerna.json 12 | COPY ./packages/common/package.json /app/packages/common/package.json 13 | COPY ./packages/common/package-lock.json /app/packages/common/package-lock.json 14 | COPY ./packages/replication-master/package.json /app/packages/replication-master/package.json 15 | COPY ./packages/replication-master/package-lock.json /app/packages/replication-master/package-lock.json 16 | 17 | WORKDIR /app 18 | 19 | RUN npm install --production 20 | 21 | COPY ./packages/common/utils /app/packages/common/utils 22 | COPY ./packages/common/handlers /app/packages/common/handlers 23 | COPY ./packages/common/server.js /app/packages/common/server.js 24 | COPY ./packages/replication-master/src /app/packages/replication-master/src 25 | 26 | WORKDIR /app/packages/replication-master 27 | 28 | CMD node --max-old-space-size=4096 . 29 | -------------------------------------------------------------------------------- /packages/registry-mirror/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 David Dias 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /packages/replication-master/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 David Dias 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /packages/common/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-registry-mirror-common 2 | 3 |  4 | 5 | [](https://protocol.ai) 6 | [](http://ipfs.io/) 7 | [](http://webchat.freenode.net/?channels=%23ipfs) 8 | [](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Holds common files for the ipfs-npm-registry-mirror project 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/registry-mirror/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-npm-registry-mirror 2 | 3 |  4 | 5 | [](https://protocol.ai) 6 | [](http://ipfs.io/) 7 | [](http://webchat.freenode.net/?channels=%23ipfs) 8 | [](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Serves requests for npm modules mirrored on the IPFS network 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/replication-master/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-npm-replication-master 2 | 3 |  4 | 5 | [](https://protocol.ai) 6 | [](http://ipfs.io/) 7 | [](http://webchat.freenode.net/?channels=%23ipfs) 8 | [](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Continually syncs the npm registry and publishes new modules to IPFS 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/common/test/server.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const sinon = require('sinon') 9 | const request = require('../utils/retry-request') 10 | 11 | describe('server', function () { 12 | this.timeout(10000) 13 | let server 14 | let getAnIpfs 15 | let ipfs 16 | 17 | beforeEach(() => { 18 | ipfs = { 19 | stop: sinon.stub() 20 | } 21 | getAnIpfs = sinon.stub().returns(ipfs) 22 | 23 | mock('../utils/get-an-ipfs', getAnIpfs) 24 | 25 | server = mock.reRequire('../server') 26 | }) 27 | 28 | afterEach(() => { 29 | mock.stopAll() 30 | }) 31 | 32 | it('should create a server', async () => { 33 | const config = { 34 | http: { 35 | 36 | }, 37 | ipfs: { 38 | store: 'fs', 39 | fs: { 40 | 41 | } 42 | } 43 | } 44 | const s = await server(config) 45 | 46 | const result = await request({ 47 | uri: `http://localhost:${config.http.port}/favicon.ico` 48 | }) 49 | 50 | expect(result).to.be.ok() 51 | 52 | await s.stop() 53 | 54 | expect(ipfs.stop.called).to.be.true() 55 | }) 56 | }) 57 | -------------------------------------------------------------------------------- /packages/common/test/save-packument.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const sinon = require('sinon') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const hat = require('hat') 9 | const savePackument = require('../utils/save-packument') 10 | 11 | describe('save-packument', () => { 12 | let ipfs 13 | let config 14 | 15 | beforeEach(() => { 16 | config = { 17 | ipfs: { 18 | prefix: `/registry-prefix-${hat()}`, 19 | flush: true 20 | } 21 | } 22 | 23 | ipfs = { 24 | files: { 25 | write: sinon.stub() 26 | } 27 | } 28 | }) 29 | 30 | it('should save a packument to ipfs', async () => { 31 | const pkg = { 32 | name: `module-${hat()}` 33 | } 34 | 35 | ipfs.files.write.withArgs(`${config.ipfs.prefix}/${pkg.name}`) 36 | .resolves() 37 | 38 | await savePackument(pkg, ipfs, config) 39 | 40 | expect(ipfs.files.write.called).to.be.true() 41 | }) 42 | 43 | it('should require a package name', async () => { 44 | const pkg = { 45 | 46 | } 47 | 48 | try { 49 | await savePackument(pkg, ipfs, config) 50 | throw new Error('Expected savePackument to throw') 51 | } catch (error) { 52 | expect(error.message).to.contain('No name found') 53 | expect(ipfs.files.write.called).to.be.false() 54 | } 55 | }) 56 | }) 57 | -------------------------------------------------------------------------------- /packages/registry-mirror/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-registry-mirror", 3 | "version": "1.0.0", 4 | "description": "Serves tarballs and json manifests", 5 | "main": "src/cli/bin.js", 6 | "bin": { 7 | "ipfs-npm-registry-mirror": "src/cli/bin.js" 8 | }, 9 | "scripts": { 10 | "test": "aegir test -t node", 11 | "coverage": "aegir coverage", 12 | "lint": "aegir lint", 13 | "start": "node ." 14 | }, 15 | "repository": { 16 | "type": "git", 17 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 18 | }, 19 | "license": "MIT", 20 | "bugs": { 21 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 22 | }, 23 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 24 | "dependencies": { 25 | "aws-sdk": "^2.756.0", 26 | "debug": "^4.0.1", 27 | "dnscache": "^1.0.1", 28 | "dotenv": "^8.0.0", 29 | "express-http-proxy": "^1.4.0", 30 | "ipfs-registry-mirror-common": "^3.0.0", 31 | "p-queue": "^6.0.1", 32 | "uint8arrays": "^1.1.0", 33 | "yargs": "^16.0.3" 34 | }, 35 | "devDependencies": { 36 | "aegir": "^26.0.0", 37 | "chai": "^4.1.2", 38 | "dirty-chai": "^2.0.1", 39 | "hat": "~0.0.3", 40 | "ipfs-unixfs": "^2.0.3", 41 | "ipld-dag-pb": "^0.20.0", 42 | "mock-require": "^3.0.2", 43 | "sinon": "^9.0.2" 44 | }, 45 | "optionalDependencies": { 46 | "appmetrics-dash": "^5.3.0" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /packages/common/test/replace-tarball-urls.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const hat = require('hat') 8 | const replaceTarballUrls = require('../utils/replace-tarball-urls') 9 | 10 | describe('replace-tarball-urls', () => { 11 | it('should replace tarball urls', () => { 12 | const config = { 13 | external: { 14 | protocol: 'http', 15 | host: `localhost-${hat()}`, 16 | port: 80 17 | } 18 | } 19 | const pkg = { 20 | name: `module-${hat()}`, 21 | versions: { 22 | '1.0.0': { 23 | dist: { 24 | tarball: 'a-tarball' 25 | } 26 | }, 27 | '2.0.0': { 28 | dist: { 29 | source: 'original-tarball', 30 | tarball: 'replaced-tarball' 31 | } 32 | } 33 | } 34 | } 35 | 36 | const result = replaceTarballUrls(JSON.parse(JSON.stringify(pkg)), config) 37 | 38 | expect(result.versions['1.0.0'].dist.source).to.equal(pkg.versions['1.0.0'].dist.tarball) 39 | expect(result.versions['1.0.0'].dist.tarball).to.equal(`${config.external.protocol}://${config.external.host}/${pkg.name}/-/${pkg.name}-1.0.0.tgz`) 40 | 41 | // should not change anything if source is already present 42 | expect(result.versions['2.0.0'].dist.source).to.equal(pkg.versions['2.0.0'].dist.source) 43 | expect(result.versions['2.0.0'].dist.tarball).to.equal(pkg.versions['2.0.0'].dist.tarball) 44 | }) 45 | }) 46 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/pubsub.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const hat = require('hat') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | 7 | const topic = `ipfs-registry-pubsub-${hat()}` 8 | let lastBaseDir 9 | 10 | const publishIpnsName = async (ipfs, cid) => { 11 | if (cid.toString() !== lastBaseDir.toString()) { 12 | lastBaseDir = cid 13 | 14 | log(`🗞️ Publishing IPNS update, base dir is /ipfs/${cid}`) 15 | 16 | await ipfs.name.publish(`/ipfs/${cid}`) 17 | 18 | log('📰 Published IPNS update') 19 | } 20 | } 21 | 22 | const publishUpdate = async (ipfs, cid) => { 23 | await ipfs.pubsub.publish(topic, Buffer.from(JSON.stringify({ 24 | type: 'update', 25 | cid: cid.toString() 26 | }))) 27 | 28 | log(`📰 Broadcast update of ${cid}`) 29 | } 30 | 31 | const master = async (config, ipfs, emitter) => { 32 | emitter.on('processed', async () => { 33 | const cid = await findBaseDir(ipfs, config) 34 | 35 | if (config.clone.publish) { 36 | try { 37 | await publishIpnsName(ipfs, cid) 38 | } catch (error) { 39 | log('💥 Error publishing IPNS name', error) 40 | } 41 | } 42 | 43 | try { 44 | await publishUpdate(ipfs, cid) 45 | } catch (error) { 46 | log('💥 Error publishing to topic', error) 47 | } 48 | }) 49 | 50 | const root = await findBaseDir(ipfs, config) 51 | 52 | return { 53 | topic, 54 | root 55 | } 56 | } 57 | 58 | module.exports = master 59 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/root.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../../package.json') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | 6 | let info 7 | let lastUpdate 8 | 9 | const findInfo = async (config, ipfs, worker) => { 10 | if (!lastUpdate || lastUpdate < (Date.now() - 30000)) { 11 | const [ 12 | id, 13 | peers, 14 | topicPeers, 15 | baseDir 16 | ] = await Promise.all([ 17 | ipfs.id(), 18 | ipfs.swarm.addrs(), 19 | config.pubsub.topic ? ipfs.pubsub.peers(config.pubsub.topic) : [], 20 | findBaseDir(ipfs, config) 21 | ]) 22 | 23 | id.addresses = [ 24 | `/ip4/${config.external.ip}/tcp/${config.external.ipfsPort}/ipfs/${id.id}`, 25 | `/dns4/${config.external.host}/tcp/${config.external.ipfsPort}/ipfs/${id.id}` 26 | ] 27 | 28 | info = { 29 | name: pkg.name, 30 | index: worker.index, 31 | version: pkg.version, 32 | ipfs: id, 33 | peers: peers.map(peer => peer.id.toString()), 34 | topicPeers, 35 | // until js can resolve IPNS names remotely, just use the raw hash 36 | root: `/ipfs/${baseDir}` 37 | } 38 | 39 | lastUpdate = Date.now() 40 | } 41 | 42 | return info 43 | } 44 | 45 | module.exports = (config, ipfs, app, worker) => { 46 | return async (request, response, next) => { 47 | response.statusCode = 200 48 | response.setHeader('Content-type', 'application/json; charset=utf-8') 49 | response.send(JSON.stringify(await findInfo(config, request.app.locals.ipfs, worker), null, 2)) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /packages/common/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-registry-mirror-common", 3 | "version": "3.0.0", 4 | "description": "Shared libraries & utilities from ipfs-npm", 5 | "main": "./index.js", 6 | "scripts": { 7 | "test": "aegir test -t node", 8 | "coverage": "aegir coverage", 9 | "lint": "aegir lint" 10 | }, 11 | "repository": { 12 | "type": "git", 13 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 14 | }, 15 | "license": "MIT", 16 | "bugs": { 17 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 18 | }, 19 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 20 | "dependencies": { 21 | "abstract-leveldown": "^6.3.0", 22 | "cids": "^1.0.0", 23 | "datastore-fs": "^2.0.1", 24 | "datastore-level": "^2.0.0", 25 | "datastore-s3": "^3.0.0", 26 | "debug": "^4.0.1", 27 | "delay": "^4.3.0", 28 | "express": "^4.16.3", 29 | "express-prom-bundle": "^6.0.0", 30 | "ipfs": "^0.50.2", 31 | "ipfs-http-client": "^47.0.1", 32 | "ipfs-repo": "^6.0.3", 33 | "it-to-buffer": "^1.0.2", 34 | "level": "^6.0.1", 35 | "memdown": "^5.1.0", 36 | "mortice": "^2.0.0", 37 | "multileveldown": "^3.0.0", 38 | "once": "^1.4.0", 39 | "request": "^2.88.0", 40 | "request-promise": "^4.2.2", 41 | "which-promise": "^1.0.0" 42 | }, 43 | "devDependencies": { 44 | "aegir": "^26.0.0", 45 | "chai": "^4.1.2", 46 | "dirty-chai": "^2.0.1", 47 | "hat": "~0.0.3", 48 | "ipfsd-ctl": "^7.0.1", 49 | "mocha": "^8.1.3", 50 | "mock-require": "^3.0.2", 51 | "sinon": "^9.0.2" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:handlers:manifest') 4 | const loadPackument = require('ipfs-registry-mirror-common/utils/load-packument') 5 | const sanitiseName = require('ipfs-registry-mirror-common/utils/sanitise-name') 6 | const lol = require('ipfs-registry-mirror-common/utils/error-message') 7 | const log = require('ipfs-registry-mirror-common/utils/log') 8 | const replaceTarballUrls = require('ipfs-registry-mirror-common/utils/replace-tarball-urls') 9 | 10 | module.exports = (config, ipfs, app) => { 11 | return async (request, response, next) => { 12 | debug(`Requested ${request.path}`) 13 | 14 | const moduleName = sanitiseName(request.path) 15 | 16 | debug(`Loading packument for ${moduleName}`) 17 | 18 | try { 19 | let packument = await loadPackument(moduleName, ipfs, { 20 | signal: response.locals.signal, 21 | ...config 22 | }) 23 | packument = replaceTarballUrls(packument, config) 24 | 25 | response.statusCode = 200 26 | response.setHeader('Content-type', 'application/json; charset=utf-8') 27 | response.send(JSON.stringify(packument, null, 2)) 28 | } catch (error) { 29 | log(`💥 Could not load packument for ${moduleName}`, error) 30 | 31 | if (error.message.includes('Not found')) { 32 | response.statusCode = 404 33 | response.send(lol(`💥 Could not load ${moduleName}, has it been published?`)) 34 | 35 | return 36 | } 37 | 38 | // a 500 will cause the npm client to retry 39 | response.statusCode = 500 40 | response.send(lol(`💥 ${error.message}`)) 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /packages/common/utils/load-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const saveTarball = require('./save-tarball') 4 | const CID = require('cids') 5 | const loadPackument = require('./load-packument') 6 | 7 | const readOrDownloadTarball = async function * (path, ipfs, config) { 8 | const { 9 | packageName, 10 | packageVersion 11 | } = extractPackageDetails(path) 12 | 13 | let packument = await loadPackument(packageName, ipfs, config) 14 | let version = packument.versions[packageVersion] 15 | 16 | if (!version) { 17 | throw new Error(`Could not find version ${packageName}@${packageVersion} in available versions ${Object.keys(packument.versions)}`) 18 | } 19 | 20 | if (!version.dist.cid) { 21 | await saveTarball(packument.name, packageVersion, ipfs, config) 22 | 23 | packument = await loadPackument(packageName, ipfs, config) 24 | version = packument.versions[packageVersion] 25 | 26 | if (!version.dist.cid) { 27 | throw new Error(`CID for ${packageName}@${packageVersion} missing after download`) 28 | } 29 | } 30 | 31 | yield * ipfs.cat(new CID(version.dist.cid), { 32 | signal: config.signal 33 | }) 34 | } 35 | 36 | const extractPackageDetails = (path) => { 37 | let [ 38 | packageName, fileName 39 | ] = path.split('/-/') 40 | 41 | if (packageName.startsWith('/')) { 42 | packageName = packageName.substring(1) 43 | } 44 | 45 | let moduleName = packageName 46 | 47 | if (packageName.startsWith('@')) { 48 | moduleName = packageName.split('/').pop() 49 | } 50 | 51 | const packageVersion = fileName.substring(moduleName.length + 1, fileName.length - 4) 52 | 53 | return { 54 | packageName, 55 | packageVersion 56 | } 57 | } 58 | 59 | module.exports = readOrDownloadTarball 60 | -------------------------------------------------------------------------------- /packages/common/test/fixtures/test-server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const http = require('http') 4 | const IPFSFactory = require('ipfsd-ctl').createFactory({ 5 | type: 'proc', 6 | ipfsHttpModule: require('ipfs-http-client'), 7 | ipfsModule: require('ipfs'), 8 | test: true, 9 | disposable: true 10 | }) 11 | 12 | let testServers = [] 13 | 14 | module.exports = { 15 | createTestServer: async (resources) => { 16 | const server = http.createServer((request, response) => { 17 | let url = request.url 18 | 19 | if (url.includes('?')) { 20 | url = url.split('?')[0] 21 | } 22 | 23 | if (resources[url]) { 24 | if (typeof resources[url] === 'function') { 25 | return resources[url](request, response) 26 | } 27 | 28 | response.statusCode = 200 29 | return response.end(resources[url]) 30 | } 31 | 32 | response.statusCode = 404 33 | response.end('404') 34 | }) 35 | 36 | await new Promise((resolve, reject) => { 37 | server.listen((error) => { 38 | if (error) { 39 | return reject(error) 40 | } 41 | 42 | resolve() 43 | }) 44 | }) 45 | 46 | testServers.push(server) 47 | 48 | const node = await IPFSFactory.spawn() 49 | 50 | server.ipfs = node.api 51 | 52 | if (typeof resources === 'function') { 53 | resources = await resources(server) 54 | } 55 | 56 | return server 57 | }, 58 | 59 | destroyTestServers: () => { 60 | const servers = testServers 61 | testServers = [] 62 | 63 | return Promise.all( 64 | servers.map((server) => { 65 | return new Promise((resolve) => { 66 | server.ipfs.stop() 67 | server.close(resolve) 68 | }) 69 | }) 70 | ) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /packages/common/test/get-external-url.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const getExternalUrl = require('../utils/get-external-url') 8 | 9 | describe('get-external-url', () => { 10 | it('should use external url from config', () => { 11 | const config = { 12 | external: { 13 | protocol: 'http', 14 | host: 'external-host', 15 | port: 8080 16 | } 17 | } 18 | 19 | const result = getExternalUrl(config) 20 | 21 | expect(result).to.equal('http://external-host:8080') 22 | }) 23 | 24 | it('should omit common ports', () => { 25 | const config = { 26 | external: { 27 | protocol: 'http', 28 | host: 'external-host', 29 | port: 80 30 | } 31 | } 32 | 33 | const result = getExternalUrl(config) 34 | 35 | expect(result).to.equal('http://external-host') 36 | }) 37 | 38 | it('should use internal url from config if external is not configured', () => { 39 | const config = { 40 | http: { 41 | protocol: 'http', 42 | host: 'internal-host', 43 | port: 8080 44 | } 45 | } 46 | 47 | const result = getExternalUrl(config) 48 | 49 | expect(result).to.equal('http://internal-host:8080') 50 | }) 51 | 52 | it('should use prefer external configuration', () => { 53 | const config = { 54 | http: { 55 | protocol: 'http', 56 | host: 'internal-host', 57 | port: 8080 58 | }, 59 | external: { 60 | protocol: 'http', 61 | host: 'external-host', 62 | port: 8080 63 | } 64 | } 65 | 66 | const result = getExternalUrl(config) 67 | 68 | expect(result).to.equal('http://external-host:8080') 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /packages/common/utils/retry-request.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const requestPromise = require('request-promise') 4 | const request = require('request') 5 | const { 6 | PassThrough 7 | } = require('stream') 8 | const log = require('./log') 9 | 10 | const makeRequest = (config) => { 11 | if (config.json) { 12 | return requestPromise(config) 13 | } 14 | 15 | // resolve with stream 16 | return new Promise((resolve, reject) => { 17 | const output = new PassThrough() 18 | 19 | const stream = request(config) 20 | stream.on('response', (response) => { 21 | if (response.statusCode < 200 || response.statusCode > 299) { 22 | return reject(new Error(`${config.url} - ${response.statusCode}`)) 23 | } 24 | }) 25 | stream.on('error', (error) => { 26 | reject(error) 27 | }) 28 | stream.once('data', (data) => { 29 | resolve(output) 30 | }) 31 | stream.pipe(output) 32 | }) 33 | } 34 | 35 | const retryRequest = (config, attempt = 1) => { 36 | const maxAttempts = config.retries || 1 37 | const delay = config.retryDelay || 0 38 | 39 | return makeRequest(config) 40 | .catch(error => { 41 | const method = (config.method || 'GET').toUpperCase() 42 | 43 | log(`🚨 Request to ${method} ${config.uri} failed on attempt ${attempt}:`, error.message) 44 | 45 | if (attempt > maxAttempts) { 46 | return Promise.reject(new Error(`Gave up requesting ${method} ${config.uri} after ${attempt} attempts`)) 47 | } 48 | 49 | attempt += 1 50 | 51 | return new Promise((resolve, reject) => { 52 | setTimeout(() => { 53 | retryRequest(config, attempt) 54 | .then(resolve) 55 | .catch(reject) 56 | }, delay) 57 | }) 58 | }) 59 | } 60 | 61 | module.exports = retryRequest 62 | -------------------------------------------------------------------------------- /packages/common/test/find-base-dir.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const sinon = require('sinon') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const hat = require('hat') 9 | const findBaseDir = require('../utils/find-base-dir') 10 | 11 | describe('find-base-dir', () => { 12 | let containingDirectory 13 | let dirName 14 | let prefix 15 | let config 16 | let ipfs 17 | 18 | beforeEach(() => { 19 | containingDirectory = `/${hat()}/${hat()}` 20 | dirName = hat() 21 | prefix = `${containingDirectory}/${dirName}` 22 | config = { 23 | ipfs: { 24 | prefix 25 | } 26 | } 27 | ipfs = { 28 | files: { 29 | ls: sinon.stub(), 30 | mkdir: sinon.stub() 31 | } 32 | } 33 | }) 34 | 35 | it('should find an existing base dir', async () => { 36 | const dirHash = 'QmSomethingSomething' 37 | ipfs.files.stat = sinon.stub().withArgs(config.ipfs.prefix) 38 | .resolves({ 39 | name: dirName, 40 | cid: dirHash 41 | }) 42 | 43 | const result = await findBaseDir(ipfs, config) 44 | 45 | expect(result).to.equal(dirHash) 46 | expect(ipfs.files.mkdir.called).to.be.false() 47 | }) 48 | 49 | it('should create the base dir if it does not exist', async () => { 50 | const dirHash = 'QmSomethingSomething' 51 | ipfs.files.stat = sinon.stub() 52 | .onFirstCall().throws(new Error('basedir does not exist')) 53 | .onSecondCall().returns({ 54 | name: dirName, 55 | cid: dirHash 56 | }) 57 | 58 | const result = await findBaseDir(ipfs, config) 59 | 60 | expect(result).to.equal(dirHash) 61 | expect(ipfs.files.mkdir.called).to.be.true() 62 | expect(ipfs.files.mkdir.getCall(0).args[0]).to.equal(prefix) 63 | }) 64 | }) 65 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/root.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../../package.json') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | 6 | let info 7 | let lastUpdate 8 | 9 | const findInfo = async (config, ipfs, root, topic, seq) => { 10 | if (!lastUpdate || lastUpdate < (Date.now() - 30000)) { 11 | const [ 12 | id, 13 | peers, 14 | topicPeers 15 | ] = await Promise.all([ 16 | ipfs.id(), 17 | ipfs.swarm.addrs(), 18 | ipfs.pubsub.peers(topic) 19 | ]) 20 | 21 | id.addresses = [ 22 | `/ip4/${config.external.ip}/tcp/${config.ipfs.port}/ipfs/${id.id}`, 23 | `/dns4/${config.external.host}/tcp/${config.ipfs.port}/ipfs/${id.id}` 24 | ] 25 | 26 | info = { 27 | name: pkg.name, 28 | version: pkg.version, 29 | seq, 30 | ipfs: id, 31 | peers: peers.map(peer => peer.id.toString()), 32 | topicPeers, 33 | topic, 34 | // until js can resolve IPNS names remotely, just use the raw hash 35 | root: `/ipfs/${await findBaseDir(ipfs, config)}` 36 | } 37 | 38 | lastUpdate = Date.now() 39 | } 40 | 41 | return info 42 | } 43 | 44 | module.exports = (config, ipfs, app, root, topic) => { 45 | let seq 46 | 47 | app.on('seq', (s) => { 48 | seq = s 49 | }) 50 | 51 | return async (request, response, next) => { 52 | try { 53 | const info = await findInfo(config, ipfs, root, topic, seq) 54 | 55 | response.statusCode = 200 56 | response.setHeader('Content-type', 'application/json; charset=utf-8') 57 | response.send(JSON.stringify(info, null, 2)) 58 | } catch (error) { 59 | response.statusCode = 500 60 | response.setHeader('Content-type', 'application/text; charset=utf-8') 61 | response.send(error) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /packages/replication-master/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-replication-master", 3 | "version": "1.0.0", 4 | "description": "Replicates the npm registry and puts it onto IPFS", 5 | "main": "src/cli/bin.js", 6 | "bin": { 7 | "ipfs-npm-replication-master": "src/cli/bin.js" 8 | }, 9 | "scripts": { 10 | "test": "aegir test -t node", 11 | "coverage": "aegir coverage", 12 | "lint": "aegir lint", 13 | "start": "node .", 14 | "debug": "DEBUG='ipfs:*' NODE_ENV=development PROFILING=true node --inspect . --clone-delay 10000 --ipfs-store-type=s3 --ipfs-store-s3-bucket=npm-on-ipfs --ipfs-store-s3-region=us-west-1 --ipfs-store-s3-path=replication-master-test --follow-seq-file=seq-test.txt --follow-concurrency=1 --request-concurrency=1" 15 | }, 16 | "repository": { 17 | "type": "git", 18 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 19 | }, 20 | "license": "MIT", 21 | "bugs": { 22 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 23 | }, 24 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 25 | "dependencies": { 26 | "@achingbrain/follow-registry": "^5.0.0", 27 | "abort-controller": "^3.0.0", 28 | "aws-sdk": "^2.756.0", 29 | "cids": "^1.0.0", 30 | "debug": "^4.0.1", 31 | "delay": "^4.3.0", 32 | "dnscache": "^1.0.1", 33 | "dnssd": "^0.4.1", 34 | "dotenv": "^8.0.0", 35 | "fs-extra": "^9.0.0", 36 | "hat": "~0.0.3", 37 | "ipfs-registry-mirror-common": "^3.0.0", 38 | "it-last": "^1.0.2", 39 | "p-queue": "^6.0.1", 40 | "yargs": "^16.0.3" 41 | }, 42 | "devDependencies": { 43 | "aegir": "^26.0.0", 44 | "chai": "^4.1.2", 45 | "dirty-chai": "^2.0.1", 46 | "mock-require": "^3.0.2", 47 | "sinon": "^9.0.2" 48 | }, 49 | "optionalDependencies": { 50 | "appmetrics-dash": "^5.3.0" 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:handlers:tarball') 4 | const path = require('path') 5 | const loadTarball = require('ipfs-registry-mirror-common/utils/load-tarball') 6 | const lol = require('ipfs-registry-mirror-common/utils/error-message') 7 | const log = require('ipfs-registry-mirror-common/utils/log') 8 | 9 | module.exports = (config, ipfs, app) => { 10 | return async (request, response, next) => { 11 | debug(`Requested ${request.path}`) 12 | 13 | const file = request.path 14 | 15 | debug(`Loading ${file}`) 16 | 17 | try { 18 | response.statusCode = 200 19 | response.setHeader('Content-Disposition', `attachment; filename="${path.basename(request.url)}"`) 20 | 21 | for await (const chunk of loadTarball(file, ipfs, { 22 | signal: response.locals.signal, 23 | ...config 24 | })) { 25 | response.write(chunk) 26 | } 27 | 28 | response.end() 29 | } catch (error) { 30 | log(`💥 Could not load tarball for ${file}`, error) 31 | 32 | if (error.code === 'ECONNREFUSED') { 33 | response.statusCode = 504 34 | } else if (error.code === 'ECONNRESET') { 35 | // will trigger a retry from the npm client 36 | response.statusCode = 500 37 | } else if (error.message.includes('Not found')) { 38 | response.statusCode = 404 39 | response.send(lol(`💥 Could not load ${file}, has it been published?`)) 40 | } else if (error.message.includes('in available versions')) { 41 | response.statusCode = 404 42 | response.send(lol(`💥 Could not load ${file}, version unavailable`)) 43 | } else { 44 | // a 500 will cause the npm client to retry 45 | response.statusCode = 500 46 | response.send(lol(`💥 ${error.message}`)) 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /packages/common/server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const express = require('express') 4 | const once = require('once') 5 | const { 6 | abortableRequest, 7 | errorLog, 8 | favicon, 9 | requestLog, 10 | cors 11 | } = require('./handlers') 12 | const prometheus = require('express-prom-bundle') 13 | const promisify = require('util').promisify 14 | const metrics = prometheus({ 15 | includeMethod: true, 16 | autoregister: false 17 | }) 18 | const log = require('./utils/log') 19 | const getAnIPFS = require('./utils/get-an-ipfs') 20 | 21 | module.exports = async (config, handlers = async () => {}) => { 22 | const ipfs = await getAnIPFS(config) 23 | 24 | log('🛫 Starting server') 25 | 26 | const app = express() 27 | 28 | app.use(requestLog) 29 | app.use(metrics) 30 | app.use('/-/metrics', metrics.metricsMiddleware) 31 | app.use(cors) 32 | app.use(abortableRequest) 33 | 34 | app.get('/favicon.ico', favicon(config, ipfs, app)) 35 | app.get('/favicon.png', favicon(config, ipfs, app)) 36 | 37 | await handlers(app, ipfs) 38 | 39 | app.use(errorLog) 40 | 41 | return new Promise((resolve, reject) => { 42 | const callback = once((error) => { 43 | if (error) { 44 | reject(error) 45 | } 46 | 47 | if (!config.http.port) { 48 | config.http.port = server.address().port 49 | } 50 | 51 | log(`🚀 Server running on port ${config.http.port}`) 52 | 53 | resolve({ 54 | server, 55 | app, 56 | ipfs, 57 | stop: () => { 58 | return Promise.all([ 59 | promisify(server.close.bind(server))(), 60 | ipfs.stop() 61 | ]) 62 | .then(() => { 63 | log('✋ Server stopped') 64 | }) 65 | } 66 | }) 67 | }) 68 | 69 | const server = app.listen(config.http.port, callback) 70 | server.once('error', callback) 71 | 72 | app.locals.ipfs = ipfs 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /packages/replication-master/test/fixtures/create-skim-db.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | createTestServer 5 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | 8 | const createSkimDb = async (registry) => { 9 | const updates = [] 10 | let seq = 0 11 | 12 | const resources = { 13 | '/': JSON.stringify({ 14 | db_name: 'registry', 15 | doc_count: 807004, 16 | doc_del_count: 77670, 17 | update_seq: seq, 18 | purge_seq: 0, 19 | compact_running: false, 20 | disk_size: 6156660994, 21 | other: { 22 | data_size: 19122199289 23 | }, 24 | data_size: 5606706136, 25 | sizes: { 26 | file: 6156660994, 27 | active: 5606706136, 28 | external: 19122199289 29 | }, 30 | instance_start_time: '1538675327980753', 31 | disk_format_version: 6, 32 | committed_update_seq: 6425135, 33 | compacted_seq: 6423134, 34 | uuid: '370e266567ec9d1242acc2612839d6a7' 35 | }), 36 | '/_changes': (request, response, next) => { 37 | try { 38 | while (updates.length) { 39 | const update = updates.shift() 40 | 41 | seq++ 42 | 43 | response.write(JSON.stringify({ 44 | seq, 45 | id: update.name, 46 | changes: [{ 47 | _rev: update.json._rev 48 | }] 49 | }) + '\n') 50 | } 51 | } catch (error) { 52 | log(error) 53 | } 54 | 55 | response.end() 56 | } 57 | } 58 | 59 | const skimDb = await createTestServer(resources) 60 | 61 | skimDb.publish = (update, tarball) => { 62 | registry[`/${update.name}`] = JSON.stringify(update.json) 63 | 64 | if (tarball) { 65 | registry[tarball.path] = tarball.content 66 | } 67 | 68 | updates.push(update) 69 | } 70 | 71 | return skimDb 72 | } 73 | 74 | module.exports = createSkimDb 75 | -------------------------------------------------------------------------------- /packages/common/utils/get-an-ipfs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const s3Repo = require('./s3-repo') 5 | const fsRepo = require('./fs-repo') 6 | const clusterRepo = require('./cluster-repo') 7 | const log = require('./log') 8 | const cluster = require('cluster') 9 | 10 | const randomPort = () => { 11 | return Math.floor(Math.random() * 64535) + 1000 12 | } 13 | 14 | const getAnIPFS = async (config) => { 15 | if (config.ipfs.port && config.ipfs.host) { 16 | config.store.port = config.ipfs.port 17 | config.store.host = config.ipfs.host 18 | log(`👺 Connecting to remote IPFS daemon at ${config.ipfs.port}:${config.ipfs.host}`) 19 | } else { 20 | log('😈 Using in-process IPFS daemon') 21 | } 22 | 23 | let repo 24 | 25 | if (config.ipfs.store === 's3') { 26 | repo = s3Repo(config.ipfs.s3) 27 | } 28 | 29 | if (config.ipfs.store === 'fs') { 30 | if (config.clone.concurrency) { 31 | repo = clusterRepo(config.ipfs.fs) 32 | } else { 33 | repo = fsRepo(config.ipfs.fs) 34 | } 35 | } 36 | 37 | log('🏁 Starting an IPFS instance') 38 | 39 | const ipfs = await IPFS.create({ 40 | pass: config.ipfs.pass, 41 | init: { 42 | emptyRepo: true 43 | }, 44 | repo, 45 | EXPERIMENTAL: { 46 | sharding: true 47 | }, 48 | pubsub: { 49 | enabled: true 50 | }, 51 | preload: { 52 | enabled: false 53 | }, 54 | config: { 55 | Addresses: { 56 | Swarm: cluster.isMaster ? [ 57 | `/ip4/0.0.0.0/tcp/${config.ipfs.port || randomPort()}`, 58 | `/ip4/127.0.0.1/tcp/${config.ipfs.wsPort || randomPort()}/ws` 59 | ] : [], 60 | API: `/ip4/127.0.0.1/tcp/${config.ipfs.apiPort || randomPort()}`, 61 | Gateway: `/ip4/127.0.0.1/tcp/${config.ipfs.gatewayPort || randomPort()}` 62 | } 63 | } 64 | }) 65 | 66 | process.on('exit', () => { 67 | ipfs.stop() 68 | }) 69 | 70 | return ipfs 71 | } 72 | 73 | module.exports = getAnIPFS 74 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/sequence-file.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const S3 = require('aws-sdk/clients/s3') 4 | const log = require('ipfs-registry-mirror-common/utils/log') 5 | const fs = require('fs-extra') 6 | 7 | module.exports = ({ ipfs: { store, s3: { bucket, region, accessKeyId, secretAccessKey } }, follow: { seqFile } }) => { 8 | if (store !== 's3') { 9 | log('📁 Using fs sequence file', seqFile) 10 | return { 11 | async read () { // eslint-disable-line require-await 12 | try { 13 | return fs.readFile(seqFile, 'utf8') 14 | } catch (err) { 15 | log(err) 16 | return 0 17 | } 18 | }, 19 | async write (data) { 20 | await fs.writeFile(seqFile, data, 'utf8') 21 | }, 22 | async reset () { 23 | await fs.unlink(seqFile) 24 | } 25 | } 26 | } 27 | 28 | log('☁️ Using s3 sequence file', seqFile) 29 | 30 | const s3 = new S3({ 31 | params: { 32 | Bucket: bucket 33 | }, 34 | region, 35 | accessKeyId, 36 | secretAccessKey 37 | }) 38 | 39 | return { 40 | async read () { 41 | try { 42 | const data = await s3.getObject({ 43 | Key: seqFile 44 | }).promise() 45 | 46 | const seq = data.Body.toString('utf8') 47 | 48 | return parseInt(seq, 10) 49 | } catch (err) { 50 | log(`💥 Could not load seq file from ${seqFile}`, err) 51 | 52 | return 0 53 | } 54 | }, 55 | async write (data) { 56 | try { 57 | await s3.putObject({ 58 | Key: seqFile, 59 | Body: `${data}` 60 | }).promise() 61 | } catch (err) { 62 | log(`💥 Could not write seq file to ${seqFile}`, err) 63 | } 64 | }, 65 | async reset () { 66 | try { 67 | await s3.deleteObject({ 68 | Key: seqFile 69 | }).promise() 70 | } catch (err) { 71 | log(`💥 Could not reset seq file at ${seqFile}`, err) 72 | } 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/pubsub.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 4 | const log = require('ipfs-registry-mirror-common/utils/log') 5 | const { default: PQueue } = require('p-queue') 6 | const uint8ArrayToString = require('uint8arrays/to-string') 7 | const queue = new PQueue({ concurrency: 1 }) 8 | 9 | const findMaster = (config) => { 10 | return request(Object.assign({}, config.request, { 11 | uri: config.pubsub.master, 12 | json: true, 13 | retries: 100, 14 | retryDelay: 5000 15 | })) 16 | } 17 | 18 | const handleUpdate = (config, ipfs, event) => { 19 | if (event.type !== 'update' || !event.cid) { 20 | return 21 | } 22 | 23 | queue.clear() 24 | queue.add(async () => { 25 | log('🦄 Incoming update') 26 | 27 | try { 28 | log(`🐴 Removing old ${config.ipfs.prefix}`) 29 | await ipfs.files.rm(config.ipfs.prefix, { 30 | recursive: true 31 | }) 32 | log(`🐎 Copying /ipfs/${event.cid} to ${config.ipfs.prefix}`) 33 | await ipfs.files.cp(`/ipfs/${event.cid}`, config.ipfs.prefix) 34 | } catch (error) { 35 | log(`💥 Could not update ${event.module}`, error) 36 | } 37 | }) 38 | } 39 | 40 | const subscribeToTopic = async (config, ipfs, master) => { 41 | config.pubsub.topic = master.topic 42 | 43 | await ipfs.pubsub.subscribe(master.topic, (event) => { 44 | if (event.from !== master.ipfs.id) { 45 | return 46 | } 47 | 48 | handleUpdate(config, ipfs, JSON.parse(uint8ArrayToString(event.data, 'utf8'))) 49 | }) 50 | } 51 | 52 | const updateRoot = (config, ipfs, master) => { 53 | return ipfs.files.cp(master.root, config.ipfs.prefix) 54 | } 55 | 56 | const worker = async (config, ipfs) => { 57 | let timer = Date.now() 58 | const master = await findMaster(config) 59 | log(`🧚♀️ Found master id ${master.ipfs.id} in ${Date.now() - timer}ms`) 60 | 61 | timer = Date.now() 62 | await subscribeToTopic(config, ipfs, master) 63 | log(`🙋 Worker subscribed to ${master.topic} in ${Date.now() - timer}ms`) 64 | 65 | timer = Date.now() 66 | await updateRoot(config, ipfs, master) 67 | log(`🦓 Got root in ${Date.now() - timer}ms`) 68 | } 69 | 70 | module.exports = worker 71 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const config = require('./config') 4 | const clone = require('./clone') 5 | const replicationMaster = require('./pubsub') 6 | const advertise = require('./mdns') 7 | const server = require('ipfs-registry-mirror-common/server') 8 | const root = require('./routes/root') 9 | const worker = require('./routes/worker') 10 | const workerOnline = require('./routes/worker-online') 11 | const delay = require('delay') 12 | const { 13 | status 14 | } = require('./routes/workers') 15 | const log = require('ipfs-registry-mirror-common/utils/log') 16 | const AbortController = require('abort-controller') 17 | 18 | module.exports = async (options) => { 19 | options = config(options) 20 | 21 | const result = await server(options, async (app, ipfs) => { 22 | const res = await replicationMaster(options, ipfs, app) 23 | 24 | app.get('/', root(options, ipfs, app, res.root, res.topic)) 25 | app.get('/-/worker', worker()) 26 | app.post('/-/worker', workerOnline()) 27 | }) 28 | 29 | // give workers a chance to connect 30 | const time = Date.now() 31 | log(`⌚ Waiting for ${options.clone.delay}ms before starting to clone npm`) 32 | 33 | await delay(options.clone.delay || 0) 34 | 35 | const workerStatus = status() 36 | 37 | if (!workerStatus.ready) { 38 | log(`⌚ Waiting for ${workerStatus.workers.length - workerStatus.initialised} of ${workerStatus.workers.length} workers to be ready before starting to clone npm`) 39 | 40 | while (true) { 41 | await delay(options.clone.delay || 0) 42 | 43 | if (status().ready) { 44 | break 45 | } 46 | 47 | log(`⌚ Still waiting for ${workerStatus.workers.length - workerStatus.initialised} of ${workerStatus.workers.length} workers to be ready before starting to clone npm`) 48 | } 49 | } 50 | 51 | log(`⌚ Workers took ${Date.now() - time}ms to initialise`) 52 | 53 | const controller = new AbortController() 54 | 55 | clone(result.app, controller.signal, result.app.locals.ipfs, options) 56 | .then(() => {}, () => {}) 57 | 58 | const stop = result.stop 59 | const stopAdvert = await advertise(result.ipfs, options) 60 | 61 | result.stop = () => { 62 | controller.abort() 63 | stopAdvert() 64 | stop() 65 | } 66 | 67 | return result 68 | } 69 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const proxy = require('express-http-proxy') 4 | const config = require('./config') 5 | const replicationWorker = require('./pubsub') 6 | const getExternalUrl = require('ipfs-registry-mirror-common/utils/get-external-url') 7 | const server = require('ipfs-registry-mirror-common/server') 8 | const tarball = require('./routes/tarball') 9 | const packument = require('./routes/packument') 10 | const root = require('./routes/root') 11 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 12 | const findExternalPort = require('./find-external-port') 13 | const log = require('ipfs-registry-mirror-common/utils/log') 14 | 15 | module.exports = async (options) => { 16 | options = config(options) 17 | 18 | const worker = await request(Object.assign({}, config.request, { 19 | uri: `${options.pubsub.master}/-/worker`, 20 | qs: { 21 | worker: process.env.HOSTNAME 22 | }, 23 | json: true, 24 | retries: 100, 25 | retryDelay: 5000 26 | })) 27 | 28 | options.ipfs.s3.path = `${options.ipfs.s3.path}-${worker.index}` 29 | options.ipfs.fs.repo = `${options.ipfs.fs.repo}-${worker.index}` 30 | options.ipfs.port = 10000 + worker.index 31 | options.external.ipfsPort = await findExternalPort(options) 32 | 33 | const result = await server(options, async (app, ipfs) => { 34 | app.get('/', root(options, ipfs, app, worker)) 35 | 36 | // intercept requests for tarballs and manifests 37 | app.get('/*.tgz', tarball(options, ipfs, app)) 38 | app.get('/*', packument(options, ipfs, app)) 39 | 40 | // everything else should just proxy for the registry 41 | const registry = proxy(options.registries[0], { 42 | limit: options.registryUploadSizeLimit 43 | }) 44 | app.put('/*', registry) 45 | app.post('/*', registry) 46 | app.patch('/*', registry) 47 | app.delete('/*', registry) 48 | app.get('/-/whoami', registry) 49 | 50 | await replicationWorker(options, ipfs, app) 51 | }) 52 | 53 | // finished initialisation 54 | await request(Object.assign({}, config.request, { 55 | method: 'post', 56 | uri: `${options.pubsub.master}/-/worker`, 57 | json: true, 58 | retries: 100, 59 | retryDelay: 5000 60 | })) 61 | 62 | const url = getExternalUrl(options) 63 | 64 | log(`🔧 Please either update your npm config with 'npm config set registry ${url}'`) 65 | log(`🔧 or use the '--registry' flag, eg: 'npm install --registry=${url}'`) 66 | 67 | return result 68 | } 69 | -------------------------------------------------------------------------------- /img/npm-on-ipfs.svg: -------------------------------------------------------------------------------- 1 | 2 | 19 | -------------------------------------------------------------------------------- /packages/common/utils/download-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-common:utils:download-tarball') 4 | const crypto = require('crypto') 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | const { urlSource } = require('ipfs') 7 | 8 | const downloadTarball = async (packument, versionNumber, ipfs, options) => { 9 | const version = packument.versions[versionNumber] 10 | 11 | validate(version, versionNumber, packument.name) 12 | 13 | if (version.cid) { 14 | debug(`Skipping version ${versionNumber} of ${packument.name} - already downloaded`) 15 | 16 | return 17 | } 18 | 19 | const start = Date.now() 20 | 21 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, options) 22 | 23 | version.cid = `/ipfs/${cid}` 24 | 25 | log(`🏄♀️ Added ${version.dist.tarball} with CID ${version.cid} in ${Date.now() - start}ms`) 26 | } 27 | 28 | const validate = (version, versionNumber, packageName) => { 29 | if (!version) { 30 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 31 | } 32 | 33 | if (!version.dist) { 34 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 35 | } 36 | 37 | if (!version.dist.tarball) { 38 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no tarball`) 39 | } 40 | 41 | if (!version.dist.shasum) { 42 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 43 | } 44 | } 45 | 46 | const downloadFile = async (url, shasum, ipfs, options) => { 47 | for (let i = 0; i < options.request.retries; i++) { 48 | try { 49 | log(`⬇️ Downloading ${url}`) 50 | const start = Date.now() 51 | 52 | const { 53 | cid 54 | } = await ipfs.add(urlSource(url), { 55 | wrapWithDirectory: false, 56 | pin: options.clone.pin, 57 | version: 1, 58 | rawLeaves: true, 59 | signal: options.signal 60 | }) 61 | 62 | log(`✅ Downloaded ${url} in ${Date.now() - start}ms`) 63 | 64 | await validateShasum(cid, shasum, url, ipfs, options) 65 | 66 | log(`🌍 Added ${url} to IPFS with CID ${cid} in ${Date.now() - start}ms`) 67 | 68 | return cid 69 | } catch (err) { 70 | log(`💥 Download failed`, err) 71 | } 72 | } 73 | 74 | throw new Error(`💥 ${options.request.retries} retries exceeded while downloading ${url}`) 75 | } 76 | 77 | const validateShasum = async (cid, shasum, url, ipfs, options) => { 78 | const hashStart = Date.now() 79 | const hash = crypto.createHash('sha1') 80 | hash.on('error', () => {}) 81 | 82 | for await (const buf of ipfs.cat(cid, { 83 | signal: options.signal 84 | })) { 85 | hash.update(buf) 86 | } 87 | 88 | const result = hash.digest('hex') 89 | 90 | if (result !== shasum) { 91 | throw new Error(`Shasum of ${url} failed ${result} !== ${shasum}`) 92 | } 93 | 94 | log(`🙆 Checked shasum of ${url} in ${Date.now() - hashStart}ms`) 95 | } 96 | 97 | module.exports = downloadTarball 98 | -------------------------------------------------------------------------------- /packages/common/utils/level-lock.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | AbstractLevelDOWN, 5 | AbstractIterator 6 | } = require('abstract-leveldown') 7 | const mortice = require('mortice') 8 | 9 | const handle = (resolve, reject) => { 10 | return (err, res) => { 11 | if (err) { 12 | return reject(err) 13 | } 14 | 15 | resolve(res) 16 | } 17 | } 18 | 19 | class LevelLock extends AbstractLevelDOWN { 20 | constructor (db, opts) { 21 | super(db, opts) 22 | 23 | this.db = db 24 | this.opts = opts || {} 25 | this.mutex = mortice(this.opts.lock || 'level-lock') 26 | } 27 | 28 | _open (options, callback) { 29 | this.db.open(options, callback) 30 | } 31 | 32 | _close (callback) { 33 | this.db.close(callback) 34 | } 35 | 36 | _put (key, value, options, callback) { 37 | this.mutex.writeLock(() => { 38 | return new Promise((resolve, reject) => { 39 | this.db.put(key, value, options, handle(resolve, reject)) 40 | }) 41 | .then(res => callback(null, res), callback) 42 | }) 43 | } 44 | 45 | _get (key, options, callback) { 46 | this.mutex.readLock(() => { 47 | return new Promise((resolve, reject) => { 48 | this.db.get(key, options, handle(resolve, reject)) 49 | }) 50 | .then(res => callback(null, res), callback) 51 | }) 52 | } 53 | 54 | _del (key, options, callback) { 55 | this.mutex.writeLock(() => { 56 | return new Promise((resolve, reject) => { 57 | this.db.del(key, options, handle(resolve, reject)) 58 | }) 59 | .then(res => callback(null, res), callback) 60 | }) 61 | } 62 | 63 | _batch (operations, options, callback) { 64 | this.mutex.writeLock(() => { 65 | return new Promise((resolve, reject) => { 66 | this.db.batch(operations, options, handle(resolve, reject)) 67 | }) 68 | .then(res => callback(null, res), callback) 69 | }) 70 | } 71 | 72 | _serializeKey (key) { 73 | if (this.db._serializeKey) { 74 | return this.db._serializeKey(key) 75 | } 76 | 77 | return key 78 | } 79 | 80 | _serializeValue (value) { 81 | if (this.db._serializeValue) { 82 | return this.db._serializeValue(value) 83 | } 84 | 85 | return value 86 | } 87 | 88 | _iterator (options) { 89 | return new LevelLockIterator(this, options) 90 | } 91 | } 92 | 93 | class LevelLockIterator extends AbstractIterator { 94 | constructor (db, options) { 95 | super(db, options) 96 | 97 | this.mutex = db.mutex 98 | this.iter = db.db.iterator(options) 99 | } 100 | 101 | _next (callback) { 102 | this.mutex.readLock((cb) => { 103 | this.iter.next((err, value) => { 104 | cb() 105 | callback(err, value) 106 | }) 107 | }) 108 | } 109 | 110 | _seek (target) { 111 | this.mutex.readLock((cb) => { 112 | this.iter.seek(target) 113 | cb() 114 | }) 115 | } 116 | 117 | _end (callback) { 118 | this.iter.end(callback) 119 | } 120 | } 121 | 122 | module.exports = LevelLock 123 | -------------------------------------------------------------------------------- /packages/common/test/save-tarball.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const { 11 | PassThrough 12 | } = require('stream') 13 | const CID = require('cids') 14 | 15 | describe('save-tarball', () => { 16 | let saveTarball 17 | let loadPackument 18 | let savePackument 19 | let request 20 | let ipfs 21 | let config 22 | 23 | beforeEach(() => { 24 | config = { 25 | request: { 26 | 27 | }, 28 | clone: { 29 | 30 | } 31 | } 32 | 33 | request = sinon.stub() 34 | loadPackument = sinon.stub() 35 | savePackument = sinon.stub() 36 | 37 | mock('../utils/retry-request', request) 38 | mock('../utils/save-packument', savePackument) 39 | mock('../utils/load-packument', loadPackument) 40 | 41 | saveTarball = mock.reRequire('../utils/save-tarball') 42 | 43 | ipfs = { 44 | add: sinon.stub() 45 | } 46 | }) 47 | 48 | afterEach(() => { 49 | mock.stopAll() 50 | }) 51 | 52 | it('should not save a tarball we have already downloaded', async () => { 53 | const versionNumber = '1.0.0' 54 | const pkg = { 55 | name: `module-${hat()}`, 56 | versions: { 57 | [versionNumber]: { 58 | dist: { 59 | cid: 'a-cid', 60 | source: 'tarball-url', 61 | shasum: 'tarball-shasum' 62 | } 63 | } 64 | } 65 | } 66 | 67 | loadPackument.withArgs(pkg.name, ipfs, config) 68 | .resolves(pkg) 69 | 70 | await saveTarball(pkg.name, versionNumber, ipfs, config) 71 | 72 | expect(request.called).to.be.false() 73 | }) 74 | 75 | it('should download a missing tarball', async () => { 76 | const versionNumber = '1.0.0' 77 | const pkg = { 78 | name: `module-${hat()}`, 79 | versions: { 80 | [versionNumber]: { 81 | dist: { 82 | tarball: 'tarball-url', 83 | shasum: '3c4fb10163dc33fd83b588fe36af9aa5efba2985' 84 | } 85 | } 86 | } 87 | } 88 | 89 | loadPackument.withArgs(pkg.name, ipfs, config) 90 | .resolves(pkg) 91 | 92 | ipfs.add.callsFake(stream => { 93 | return new Promise((resolve) => { 94 | stream.on('end', () => { 95 | resolve({ 96 | cid: new CID('QmZEYeEin6wEB7WNyiT7stYTmbYFGy7BzM7T3hRDzRxTvY').toV1() 97 | }) 98 | }) 99 | }) 100 | }) 101 | 102 | request.withArgs({ 103 | uri: 'tarball-url' 104 | }) 105 | .callsFake(() => { 106 | const stream = new PassThrough() 107 | 108 | setTimeout(() => { 109 | stream.write('tarball-content') 110 | stream.end() 111 | }, 100) 112 | 113 | return Promise.resolve(stream) 114 | }) 115 | 116 | await saveTarball(pkg.name, versionNumber, ipfs, config) 117 | 118 | expect(request.called).to.be.true() 119 | }) 120 | }) 121 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/ingest-module.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:clone') 4 | const savePackument = require('ipfs-registry-mirror-common/utils/save-packument') 5 | const saveTarballs = require('../save-tarballs') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | 8 | const publishOrUpdateIPNSName = async (packument, ipfs, options) => { 9 | const timer = Date.now() 10 | const file = `${options.ipfs.prefix}/${packument.name}` 11 | let newNameCreated = false 12 | 13 | if (!packument.ipns) { 14 | // we need to create the ipns name (which will be stable), add it to the 15 | // manifest, save it again and then immediately update the ipns name 16 | 17 | try { 18 | await ipfs.key.gen(packument.name, { 19 | type: 'rsa', 20 | size: 2048 21 | }) 22 | } catch (err) { 23 | if (!err.message.includes('already exists')) { 24 | throw err 25 | } 26 | } 27 | 28 | newNameCreated = true 29 | } 30 | 31 | const stats = await ipfs.files.stat(file) 32 | 33 | const result = await ipfs.name.publish(`/ipfs/${stats.hash}`, { 34 | key: packument.name 35 | }) 36 | 37 | if (newNameCreated) { 38 | packument.ipns = result.name 39 | packument = await savePackument(packument, ipfs, options) 40 | 41 | const stats = await ipfs.files.stat(file) 42 | await ipfs.name.publish(`/ipfs/${stats.hash}`, { 43 | key: packument.name 44 | }) 45 | } 46 | 47 | log(`💾 Updated ${packument.name} IPNS name ${packument.ipns} in ${Date.now() - timer}ms`) 48 | } 49 | 50 | module.exports = async ({ packument, seq, ipfs, options }) => { 51 | log(`🎉 Updated version of ${packument.name}`) 52 | const mfsPath = `${options.ipfs.prefix}/${packument.name}` 53 | let mfsVersion = { 54 | versions: {} 55 | } 56 | let timer 57 | 58 | try { 59 | log(`📃 Reading ${packument.name} cached packument from ${mfsPath}`) 60 | timer = Date.now() 61 | mfsVersion = await ipfs.files.read(mfsPath) 62 | log(`📃 Read ${packument.name} cached packument from ${mfsPath} in ${Date.now() - timer}ms`) 63 | } catch (error) { 64 | if (error.message.includes('does not exist')) { 65 | debug(`${mfsPath} not in MFS`) 66 | } else { 67 | debug(`Could not read ${mfsPath}`, error) 68 | } 69 | } 70 | 71 | // save our existing versions so we don't re-download tarballs we already have 72 | Object.keys(mfsVersion.versions || {}).forEach(versionNumber => { 73 | packument.versions[versionNumber] = mfsVersion.versions[versionNumber] 74 | }) 75 | 76 | packument.ipns = mfsVersion.ipns 77 | 78 | timer = Date.now() 79 | await saveTarballs(packument, ipfs, options) 80 | log(`🧳 Saved ${packument.name} tarballs in ${Date.now() - timer}ms`) 81 | 82 | timer = Date.now() 83 | await savePackument(packument, ipfs, options) 84 | log(`💾 Saved ${packument.name} packument in ${Date.now() - timer}ms`) 85 | 86 | if (options.clone.publish) { 87 | await publishOrUpdateIPNSName(packument, ipfs, options) 88 | } 89 | 90 | return { 91 | seq, 92 | name: packument.name 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /packages/common/utils/cluster-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | const IPFSRepo = require('ipfs-repo') 5 | const cluster = require('cluster') 6 | const multileveldown = require('multileveldown') 7 | const LevelDataStore = require('datastore-level') 8 | const FileDataStore = require('datastore-fs') 9 | const level = require('level') 10 | const net = require('net') 11 | const memdown = require('memdown') 12 | const { Errors } = require('interface-datastore') 13 | 14 | let lock = 'fs' 15 | 16 | if (cluster.isWorker) { 17 | lock = { 18 | lock: () => { 19 | return { 20 | close: () => {} 21 | } 22 | } 23 | } 24 | } 25 | 26 | const clusterRepo = ({ repo }) => { 27 | if (process.env.NODE_ENV === 'development') { 28 | repo = `${repo}-test` 29 | } 30 | 31 | log(`📁 Using fs repo at ${repo}`) 32 | 33 | class MultiLeveLDataStore extends LevelDataStore { 34 | constructor (path, opts) { 35 | super(path, { 36 | ...opts, 37 | db: () => memdown() 38 | }) 39 | 40 | this.opts = opts 41 | } 42 | 43 | _initDb (database, path) { 44 | if (cluster.isMaster) { 45 | return level(path, { 46 | valueEncoding: 'binary', 47 | compression: false // same default as go 48 | }) 49 | } 50 | 51 | return multileveldown.client({ 52 | retry: true, 53 | valueEncoding: 'binary', 54 | compression: false // same default as go 55 | }) 56 | } 57 | 58 | async open () { 59 | if (cluster.isMaster) { 60 | try { 61 | await this.db.open() 62 | 63 | return new Promise((resolve, reject) => { 64 | this._server = net.createServer((sock) => { 65 | sock.on('error', () => { 66 | sock.destroy() 67 | }) 68 | 69 | sock.pipe(multileveldown.server(this.db)).pipe(sock) 70 | }) 71 | 72 | this._server.listen(this.opts.port, (err) => { 73 | if (err) { 74 | return reject(err) 75 | } 76 | 77 | resolve() 78 | }) 79 | }) 80 | } catch (err) { 81 | throw Errors.dbOpenFailedError(err) 82 | } 83 | } 84 | 85 | this._sock = net.connect(this.opts.port) 86 | this._sock.pipe(this.db.connect()).pipe(this._sock) 87 | } 88 | 89 | close () { 90 | if (cluster.isMaster) { 91 | this._server.close() 92 | return this.db.close() 93 | } 94 | 95 | this._sock.close() 96 | } 97 | } 98 | 99 | return new IPFSRepo(repo, { 100 | lock: lock, 101 | storageBackends: { 102 | root: FileDataStore, 103 | blocks: FileDataStore, 104 | keys: FileDataStore, 105 | datastore: MultiLeveLDataStore, 106 | pins: MultiLeveLDataStore 107 | }, 108 | storageBackendOptions: { 109 | datastore: { 110 | port: 39281 111 | }, 112 | pins: { 113 | port: 39282 114 | } 115 | } 116 | }) 117 | } 118 | 119 | module.exports = clusterRepo 120 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = require('ipfs-registry-mirror-common/utils/to-boolean') 4 | const option = require('ipfs-registry-mirror-common/utils/option') 5 | 6 | module.exports = (overrides = {}) => { 7 | return { 8 | registries: (overrides.registries || []).concat(option(process.env.REGISTRY, overrides.registry)).filter(Boolean), 9 | registryUpdateInterval: option(process.env.REGISTRY_UPDATE_INTERVAL, overrides.registryUpdateInterval), 10 | registryUploadSizeLimit: option(process.env.MIRROR_UPLOAD_SIZE_LIMIT, overrides.registryUploadSizeLimit), 11 | registryReadTimeout: option(Number(process.env.REGISTRY_READ_TIMEOUT), overrides.registryReadTimeout), 12 | 13 | http: { 14 | protocol: option(process.env.HTTP_PROTOCOL, overrides.httpProtocol), 15 | host: option(process.env.HTTP_HOST, overrides.httpHost), 16 | port: option(Number(process.env.HTTP_PORT), overrides.httpPort) 17 | }, 18 | 19 | external: { 20 | ip: option(process.env.EXTERNAL_IP, overrides.externalIp), 21 | protocol: option(process.env.EXTERNAL_PROTOCOL, overrides.externalProtocol), 22 | host: option(process.env.EXTERNAL_HOST, overrides.externalHost), 23 | port: option(process.env.EXTERNAL_PORT, overrides.externalPort) 24 | }, 25 | 26 | ipfs: { 27 | port: option(process.env.IPFS_SWARM_PORT, overrides.ipfsPort), 28 | prefix: option(process.env.IPFS_MFS_PREFIX, overrides.ipfsMfsPrefix), 29 | flush: option(toBoolean(process.env.IPFS_FLUSH), overrides.ipfsFlush), 30 | store: option(process.env.IPFS_STORE_TYPE, overrides.ipfsStoreType), 31 | 32 | s3: { 33 | region: option(process.env.STORE_S3_REGION, overrides.storeS3Region), 34 | bucket: option(process.env.STORE_S3_BUCKET, overrides.storeS3Bucket), 35 | path: option(process.env.STORE_S3_PATH, overrides.storeS3Path), 36 | accessKeyId: option(process.env.STORE_S3_ACCESS_KEY_ID, overrides.storeS3AccessKeyId), 37 | secretAccessKey: option(process.env.STORE_S3_SECRET_ACCESS_KEY, overrides.storeS3SecretAccessKey), 38 | createIfMissing: option(process.env.STORE_S3_CREATE_IF_MISSING, overrides.createIfMissing) 39 | }, 40 | 41 | fs: { 42 | repo: option(process.env.IPFS_REPO, overrides.ipfsRepo), 43 | port: option(process.env.IPFS_REPO_PORT, overrides.ipfsRepoPort) 44 | } 45 | }, 46 | 47 | pubsub: { 48 | master: option(process.env.PUBSUB_MASTER, overrides.pubsubMaster) 49 | }, 50 | 51 | clone: { 52 | pin: option(Number(process.env.CLONE_PIN), overrides.clonePin) 53 | }, 54 | 55 | request: { 56 | retries: option(process.env.REQUEST_RETRIES, overrides.requestRetries), 57 | retryDelay: option(process.env.REQUEST_RETRY_DELAY, overrides.requestRetryDelay), 58 | timeout: option(process.env.REQUEST_TIMEOUT, overrides.requestTimeout), 59 | forever: option(toBoolean(process.env.REQUEST_KEEP_ALIVE), overrides.requestKeepAlive), 60 | pool: { 61 | maxSockets: option(Number(process.env.REQUEST_MAX_SOCKETS), overrides.requestMaxSockets) 62 | } 63 | } 64 | } 65 | } 66 | 67 | module.exports.option = option 68 | module.exports.toBoolean = toBoolean 69 | -------------------------------------------------------------------------------- /packages/common/test/load-tarball.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const CID = require('cids') 11 | const toBuffer = require('it-to-buffer') 12 | 13 | describe('load-tarball', () => { 14 | let loadTarball 15 | let loadPackument 16 | let saveTarball 17 | let ipfs 18 | let config 19 | 20 | beforeEach(() => { 21 | config = { 22 | registryUpdateInterval: 0, 23 | registry: 'http://foo', 24 | ipfs: { 25 | prefix: `/registry-prefix-${hat()}` 26 | }, 27 | request: { 28 | 29 | }, 30 | http: { 31 | host: 'localhost', 32 | port: 8080, 33 | protocol: 'http' 34 | } 35 | } 36 | 37 | loadPackument = sinon.stub() 38 | saveTarball = sinon.stub() 39 | 40 | mock('../utils/load-packument', loadPackument) 41 | mock('../utils/save-tarball', saveTarball) 42 | 43 | loadTarball = mock.reRequire('../utils/load-tarball') 44 | 45 | ipfs = { 46 | cat: sinon.stub() 47 | } 48 | }) 49 | 50 | afterEach(() => { 51 | mock.stopAll() 52 | }) 53 | 54 | it('should load a tarball from ipfs', async () => { 55 | const packageName = `a-module-${hat()}` 56 | const packageVersion = '1.0.0' 57 | const path = `/${packageName}/-/${packageName}-${packageVersion}.tgz` 58 | const pkg = { 59 | name: packageName, 60 | versions: { 61 | [packageVersion]: { 62 | dist: { 63 | cid: 'QmZEYeEin6wEB7WNyiT7stYTmbYFGy7BzM7T3hRDzRxTvY' 64 | } 65 | } 66 | } 67 | } 68 | 69 | loadPackument.withArgs(packageName, ipfs, config) 70 | .returns(pkg) 71 | 72 | ipfs.cat 73 | .withArgs(new CID(pkg.versions[packageVersion].dist.cid)) 74 | .returns(async function * () { // eslint-disable-line require-await 75 | yield Buffer.from('ok') 76 | }()) 77 | 78 | const result = await toBuffer(loadTarball(path, ipfs, config)) 79 | 80 | expect(result.toString()).to.equal('ok') 81 | }) 82 | 83 | it('should download a tarball that has no cid', async () => { 84 | const packageName = `a-module-${hat()}` 85 | const packageVersion = '1.0.0' 86 | const path = `/${packageName}/-/${packageName}-${packageVersion}.tgz` 87 | const pkg = { 88 | name: packageName, 89 | versions: { 90 | [packageVersion]: { 91 | dist: { 92 | 93 | } 94 | } 95 | } 96 | } 97 | 98 | loadPackument.withArgs(packageName, ipfs, config) 99 | .returns(pkg) 100 | 101 | saveTarball.withArgs(pkg.name, packageVersion, ipfs, config) 102 | .callsFake(() => { 103 | pkg.versions[packageVersion].dist.cid = 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn' 104 | }) 105 | 106 | ipfs.cat 107 | .withArgs(new CID('QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn')) 108 | .returns(async function * () { // eslint-disable-line require-await 109 | yield Buffer.from('also ok') 110 | }()) 111 | 112 | const result = await toBuffer(loadTarball(path, ipfs, config)) 113 | 114 | expect(result.toString()).to.equal('also ok') 115 | }) 116 | }) 117 | -------------------------------------------------------------------------------- /packages/common/utils/save-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('./retry-request') 4 | const crypto = require('crypto') 5 | const loadPackument = require('./load-packument') 6 | const savePackument = require('./save-packument') 7 | const log = require('./log') 8 | 9 | const saveTarball = async function (packageName, versionNumber, ipfs, config) { 10 | const packument = await loadPackument(packageName, ipfs, config) 11 | const version = packument.versions[versionNumber] 12 | 13 | validate(version, versionNumber, packageName) 14 | 15 | if (version.dist.cid) { 16 | log(`Skipping version ${versionNumber} of ${packageName} - already downloaded`) 17 | return 18 | } 19 | 20 | const startTime = Date.now() 21 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, config) 22 | 23 | log(`🏄♀️ Added ${version.dist.tarball} with hash ${cid} in ${Date.now() - startTime}ms`) 24 | 25 | await updateCid(packageName, versionNumber, cid, ipfs, config) 26 | } 27 | 28 | const validate = (version, versionNumber, packageName) => { 29 | if (!version) { 30 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 31 | } 32 | 33 | if (!version.dist) { 34 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 35 | } 36 | 37 | if (!version.dist.shasum) { 38 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 39 | } 40 | } 41 | 42 | const updateCid = async (packageName, versionNumber, cid, ipfs, config) => { 43 | const cidString = cid.toString('base32') 44 | 45 | while (true) { 46 | let packument = await loadPackument(packageName, ipfs, config) 47 | packument.versions[versionNumber].dist.cid = cidString 48 | 49 | await savePackument(packument, ipfs, config) 50 | 51 | packument = await loadPackument(packageName, ipfs, config) 52 | 53 | if (packument.versions[versionNumber].dist.cid === cidString) { 54 | return 55 | } 56 | 57 | log(`Manifest version cid ${packument.versions[versionNumber].dist.cid} did not equal ${cidString}`) 58 | } 59 | } 60 | 61 | const downloadFile = async (url, shasum, ipfs, config) => { 62 | log(`Downloading ${url}`) 63 | 64 | const hash = crypto.createHash('sha1') 65 | hash.setEncoding('hex') 66 | hash.on('error', () => {}) 67 | 68 | const stream = await request(Object.assign({}, config.request, { 69 | uri: url 70 | })) 71 | stream.pipe(hash) 72 | 73 | const { cid } = await ipfs.add(stream, { 74 | wrapWithDirectory: false, 75 | pin: config.clone.pin, 76 | cidVersion: 1, 77 | rawLeaves: true 78 | }) 79 | 80 | const result = hash.read() 81 | 82 | if (result !== shasum) { 83 | if (config.clone.pin) { 84 | // if we pinned the corrupt download, unpin it so it will get garbage collected later 85 | await ipfs.pin.rm(cid) 86 | } 87 | 88 | // we've already piped to the client at this point so can't retry the download 89 | // abort saving the CID of the corrupted download to our copy of the manifest 90 | // instead so we retry next time it's requested 91 | throw new Error(`File downloaded from ${url} had invalid shasum ${result} - expected ${shasum}`) 92 | } 93 | 94 | log(`File downloaded from ${url} had shasum ${result} - matched ${shasum}`) 95 | 96 | return cid 97 | } 98 | 99 | module.exports = saveTarball 100 | -------------------------------------------------------------------------------- /packages/common/test/retry-request.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const { 11 | PassThrough 12 | } = require('stream') 13 | 14 | describe('retry-request', () => { 15 | let retryRequest 16 | let request 17 | let requestPromise 18 | 19 | beforeEach(() => { 20 | requestPromise = sinon.stub() 21 | request = sinon.stub() 22 | 23 | mock('request-promise', requestPromise) 24 | mock('request', request) 25 | 26 | retryRequest = mock.reRequire('../utils/retry-request') 27 | }) 28 | 29 | afterEach(() => { 30 | mock.stopAll() 31 | }) 32 | 33 | it('should retry a request', async () => { 34 | const pkg = { 35 | name: `module-${hat()}` 36 | } 37 | 38 | requestPromise 39 | .onFirstCall() 40 | .rejects(new Error('404')) 41 | 42 | requestPromise 43 | .onSecondCall() 44 | .resolves(JSON.parse(JSON.stringify(pkg))) 45 | 46 | const result = await retryRequest({ 47 | uri: 'something', 48 | json: true 49 | }) 50 | 51 | expect(result).to.deep.equal(pkg) 52 | }) 53 | 54 | it('should retry a streaming request', (done) => { 55 | request 56 | .onFirstCall() 57 | .callsFake(() => { 58 | const stream = new PassThrough() 59 | 60 | setTimeout(() => { 61 | stream.emit('error', new Error('404')) 62 | }, 100) 63 | 64 | return stream 65 | }) 66 | 67 | request 68 | .onSecondCall() 69 | .callsFake(() => { 70 | const stream = new PassThrough() 71 | 72 | setTimeout(() => { 73 | stream.emit('data', 'hello') 74 | stream.end() 75 | }, 100) 76 | 77 | return stream 78 | }) 79 | 80 | retryRequest({ 81 | uri: 'something' 82 | }) 83 | .then((stream) => { 84 | let result 85 | 86 | stream.on('data', (data) => { 87 | result = data.toString('utf8') 88 | }) 89 | 90 | stream.on('end', () => { 91 | expect(result).to.equal('hello') 92 | 93 | done() 94 | }) 95 | }) 96 | .catch(error => { 97 | done(error) 98 | }) 99 | }) 100 | 101 | it('should retry a streaming request that fails load', (done) => { 102 | request 103 | .onFirstCall() 104 | .callsFake(() => { 105 | const stream = new PassThrough() 106 | 107 | setTimeout(() => { 108 | stream.emit('response', { 109 | statusCode: 400 110 | }) 111 | }, 100) 112 | 113 | return stream 114 | }) 115 | 116 | request 117 | .onSecondCall() 118 | .callsFake(() => { 119 | const stream = new PassThrough() 120 | 121 | setTimeout(() => { 122 | stream.emit('response', { 123 | statusCode: 200 124 | }) 125 | stream.emit('data', 'hello') 126 | stream.end() 127 | }, 100) 128 | 129 | return stream 130 | }) 131 | 132 | retryRequest({ 133 | uri: 'something' 134 | }) 135 | .then((stream) => { 136 | let result 137 | 138 | stream.on('data', (data) => { 139 | result = data.toString('utf8') 140 | }) 141 | 142 | stream.on('end', () => { 143 | expect(result).to.equal('hello') 144 | 145 | done() 146 | }) 147 | }) 148 | .catch(error => { 149 | done(error) 150 | }) 151 | }) 152 | }) 153 | -------------------------------------------------------------------------------- /packages/common/utils/start-ipfs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IpfsApi = require('ipfs-http-client') 4 | const ipfsdCtrl = require('ipfsd-ctl') 5 | const which = require('which-promise') 6 | const s3Repo = require('./s3-repo') 7 | const fsRepo = require('./fs-repo') 8 | const IPFS = require('ipfs') 9 | 10 | const cleanUpOps = [] 11 | 12 | const cleanUp = () => { 13 | Promise.all( 14 | cleanUpOps.map(op => op()) 15 | ) 16 | .then(() => { 17 | process.exit(0) 18 | }) 19 | } 20 | 21 | process.on('SIGTERM', cleanUp) 22 | process.on('SIGINT', cleanUp) 23 | 24 | const randomPort = () => { 25 | return Math.floor(Math.random() * 64535) + 1000 26 | } 27 | 28 | const spawn = (createArgs, spawnArgs = { init: true }) => { 29 | return new Promise((resolve, reject) => { 30 | ipfsdCtrl 31 | .create(createArgs) 32 | .spawn(spawnArgs, (error, node) => { 33 | if (error) { 34 | return reject(error) 35 | } 36 | 37 | resolve(node) 38 | }) 39 | }) 40 | } 41 | 42 | const startIpfs = async (config) => { 43 | if (config.ipfs.node === 'proc') { 44 | console.info('😈 Spawning an in-process IPFS node') // eslint-disable-line no-console 45 | 46 | if (config.ipfs.store === 's3') { 47 | config.ipfs.repo = s3Repo(config.ipfs.s3) 48 | } 49 | 50 | if (config.ipfs.store === 'fs') { 51 | config.ipfs.repo = fsRepo(config.ipfs.fs) 52 | } 53 | 54 | const node = await IPFS.create({ 55 | repo: config.ipfs.repo, 56 | EXPERIMENTAL: { 57 | sharding: true 58 | }, 59 | pubsub: { 60 | enabled: true 61 | }, 62 | preload: { 63 | enabled: false 64 | }, 65 | config: { 66 | Addresses: { 67 | Swarm: [ 68 | `/ip4/0.0.0.0/tcp/${config.ipfs.port || randomPort()}`, 69 | `/ip4/127.0.0.1/tcp/${config.ipfs.wsPort || randomPort()}/ws` 70 | ], 71 | API: `/ip4/127.0.0.1/tcp/${config.ipfs.apiPort || randomPort()}`, 72 | Gateway: `/ip4/127.0.0.1/tcp/${config.ipfs.gatewayPort || randomPort()}` 73 | } 74 | } 75 | }) 76 | 77 | process.on('exit', () => { 78 | node.stop() 79 | }) 80 | 81 | return node 82 | } else if (config.ipfs.node === 'disposable') { 83 | console.info('😈 Spawning an in-process disposable IPFS node') // eslint-disable-line no-console 84 | 85 | return spawn({ 86 | type: 'proc', 87 | exec: IPFS 88 | }) 89 | } else if (config.ipfs.node === 'js') { 90 | console.info('😈 Spawning a js-IPFS node') // eslint-disable-line no-console 91 | 92 | return spawn({ 93 | type: 'js', 94 | exec: await which('jsipfs') 95 | }) 96 | } else if (config.ipfs.node === 'go') { 97 | console.info('😈 Spawning a go-IPFS node') // eslint-disable-line no-console 98 | 99 | return spawn({ 100 | type: 'go', 101 | exec: await which('ipfs') 102 | }) 103 | } 104 | 105 | console.info(`😈 Connecting to a remote IPFS node at ${config.ipfs.node}`) // eslint-disable-line no-console 106 | 107 | return { 108 | api: new IpfsApi(config.ipfs.node), 109 | stop: (cb) => cb() 110 | } 111 | } 112 | 113 | const createIpfs = options => { 114 | return async () => { 115 | const ipfs = await startIpfs(options) 116 | 117 | cleanUpOps.push(() => { 118 | return new Promise((resolve) => { 119 | if (options.ipfs.node !== 'proc') { 120 | return resolve() 121 | } 122 | 123 | ipfs.stop(() => { 124 | console.info('😈 IPFS node stopped') // eslint-disable-line no-console 125 | 126 | resolve() 127 | }) 128 | }) 129 | }) 130 | 131 | return ipfs 132 | } 133 | } 134 | 135 | module.exports = createIpfs 136 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/save-tarballs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:replicate:save-tarball') 4 | const crypto = require('crypto') 5 | const { default: PQueue } = require('p-queue') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | const { urlSource } = require('ipfs') 8 | 9 | let queue 10 | 11 | const saveTarball = async (packument, versionNumber, ipfs, options) => { 12 | const version = packument.versions[versionNumber] 13 | 14 | validate(version, versionNumber, packument.name) 15 | 16 | if (version.cid) { 17 | debug(`Skipping version ${versionNumber} of ${packument.name} - already downloaded`) 18 | 19 | return 20 | } 21 | 22 | const start = Date.now() 23 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, options) 24 | version.cid = `/ipfs/${cid}` 25 | 26 | log(`🏄♀️ Added ${version.tarball} with CID ${version.cid} in ${Date.now() - start}ms`) 27 | } 28 | 29 | const validate = (version, versionNumber, packageName) => { 30 | if (!version) { 31 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 32 | } 33 | 34 | if (!version.dist) { 35 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 36 | } 37 | 38 | if (!version.dist.tarball) { 39 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no tarball`) 40 | } 41 | 42 | if (!version.dist.shasum) { 43 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 44 | } 45 | } 46 | 47 | const downloadFile = async (url, shasum, ipfs, options) => { 48 | for (let i = 0; i < options.request.retries; i++) { 49 | try { 50 | log(`⬇️ Downloading ${url}`) 51 | const start = Date.now() 52 | 53 | const { 54 | cid 55 | } = await ipfs.add(urlSource(url), { 56 | wrapWithDirectory: false, 57 | pin: options.clone.pin, 58 | version: 1, 59 | rawLeaves: true 60 | }) 61 | 62 | log(`✅ Downloaded ${url} in ${Date.now() - start}ms`) 63 | 64 | await validateShasum(cid, shasum, url, ipfs, options) 65 | 66 | log(`🌍 Added ${url} to IPFS with CID ${cid} in ${Date.now() - start}ms`) 67 | 68 | return cid 69 | } catch (err) { 70 | log(`💥 Downloading tarballs failed`, err) 71 | } 72 | } 73 | 74 | throw new Error(`💥 ${options.request.retries} retries exceeded while downloading ${url}`) 75 | } 76 | 77 | const validateShasum = async (cid, shasum, url, ipfs, options) => { 78 | const hashStart = Date.now() 79 | const hash = crypto.createHash('sha1') 80 | hash.on('error', () => {}) 81 | 82 | for await (const buf of ipfs.cat(cid, { 83 | signal: options.signal 84 | })) { 85 | hash.update(buf) 86 | } 87 | 88 | const result = hash.digest('hex') 89 | 90 | if (result !== shasum) { 91 | throw new Error(`Shasum of ${url} failed ${result} !== ${shasum}`) 92 | } 93 | 94 | log(`🙆 Checked shasum of ${url} in ${Date.now() - hashStart}ms`) 95 | } 96 | 97 | const saveTarballs = (packument, ipfs, options) => { 98 | if (!queue) { 99 | queue = new PQueue({ concurrency: options.request.concurrency }) 100 | } 101 | 102 | return Promise.all( 103 | Object.keys(packument.versions || {}) 104 | .map(versionNumber => { 105 | return queue.add(async () => { 106 | try { 107 | await saveTarball(packument, versionNumber, ipfs, options) 108 | } catch (err) { 109 | log(`💥 Error storing tarball ${packument.name} ${versionNumber}`, err) 110 | } 111 | }) 112 | }) 113 | ) 114 | } 115 | 116 | module.exports = saveTarballs 117 | -------------------------------------------------------------------------------- /packages/common/utils/load-packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('./retry-request') 4 | const debug = require('debug')('ipfs:registry-mirror:utils:load-packument') 5 | const savePackument = require('./save-packument') 6 | const timeout = require('./timeout-promise') 7 | const log = require('./log') 8 | const toBuffer = require('it-to-buffer') 9 | 10 | const loadFromMfs = async (packageName, ipfs, options) => { 11 | const mfsPath = `${options.ipfs.prefix}/${packageName}` 12 | 13 | try { 14 | const start = Date.now() 15 | 16 | debug(`Reading from mfs ${mfsPath}`) 17 | 18 | const buf = await toBuffer(ipfs.files.read(mfsPath)) 19 | 20 | debug(`Read from mfs ${mfsPath} in ${Date.now() - start}ms`) 21 | 22 | return JSON.parse(buf.toString('utf8')) 23 | } catch (error) { 24 | if (error.code === 'ERR_NOT_FOUND') { 25 | debug(`${mfsPath} not in MFS`) 26 | } 27 | 28 | debug(`Could not read ${mfsPath}`, error) 29 | } 30 | } 31 | 32 | const requestFromRegistry = async (packageName, registry, options) => { 33 | const uri = `${registry}/${packageName}` 34 | 35 | try { 36 | debug(`Fetching ${uri}`) 37 | const start = Date.now() 38 | const json = await request(Object.assign({}, options.request, { 39 | uri, 40 | json: true 41 | })) 42 | 43 | debug(`Fetched ${uri} in ${Date.now() - start}ms`) 44 | 45 | return json 46 | } catch (error) { 47 | debug(`Could not download ${uri}`, error) 48 | } 49 | } 50 | 51 | const loadFromRegistry = async (packageName, ipfs, options) => { 52 | for (const registry of options.registries) { 53 | let result 54 | 55 | try { 56 | result = await timeout(requestFromRegistry(packageName, registry, options), options.registryReadTimeout) 57 | } catch (error) { 58 | if (error.code === 'ETIMEOUT') { 59 | debug(`Fetching ${packageName} timed out after ${options.registryReadTimeout}ms`) 60 | } 61 | } 62 | 63 | if (result) { 64 | return result 65 | } 66 | } 67 | } 68 | 69 | const findNewVersions = (cached, upstream) => { 70 | const cachedVersions = (cached && cached.versions) || {} 71 | const upstreamVersions = (upstream && upstream.versions) || {} 72 | 73 | return Object.keys(upstreamVersions) 74 | .filter(version => !cachedVersions[version]) 75 | } 76 | 77 | const loadPackument = async (packageName, ipfs, options) => { 78 | const mfsVersion = await loadFromMfs(packageName, ipfs, options) 79 | let registryVersion 80 | let willDownload = true 81 | 82 | if (mfsVersion) { 83 | const modified = new Date(mfsVersion.updated || 0) 84 | willDownload = (Date.now() - options.registryUpdateInterval) > modified.getTime() 85 | } 86 | 87 | if (willDownload) { 88 | registryVersion = await loadFromRegistry(packageName, ipfs, options) 89 | } 90 | 91 | if (!mfsVersion && !registryVersion) { 92 | throw new Error(`${packageName} not found, tried upstream registry: ${willDownload}`) 93 | } 94 | 95 | const newVerisons = findNewVersions(mfsVersion, registryVersion) 96 | 97 | if (mfsVersion && !newVerisons.length) { 98 | // we have a cached version and either fetching from npm failed or 99 | // our cached version matches the npm version 100 | return mfsVersion 101 | } 102 | 103 | if (newVerisons.length) { 104 | log(`🆕 New version${newVerisons.length > 1 ? 's' : ''} of ${packageName} detected - ${newVerisons.join(', ')}`) 105 | } 106 | 107 | // save our existing versions so we don't re-download tarballs we already have 108 | if (mfsVersion) { 109 | Object.keys(mfsVersion.versions || {}).forEach(versionNumber => { 110 | registryVersion.versions[versionNumber] = mfsVersion.versions[versionNumber] 111 | }) 112 | } 113 | 114 | // store it for next time 115 | await savePackument(registryVersion, ipfs, options) 116 | 117 | return registryVersion 118 | } 119 | 120 | module.exports = loadPackument 121 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = require('ipfs-registry-mirror-common/utils/to-boolean') 4 | const option = require('ipfs-registry-mirror-common/utils/option') 5 | 6 | module.exports = (overrides = {}) => { 7 | return { 8 | registries: (overrides.registries || []).concat(option(process.env.REGISTRY, overrides.registry)).filter(Boolean), 9 | registryUpdateInterval: option(process.env.REGISTRY_UPDATE_INTERVAL, overrides.registryUpdateInterval), 10 | registryReadTimeout: option(Number(process.env.REGISTRY_READ_TIMEOUT), overrides.registryReadTimeout), 11 | 12 | http: { 13 | protocol: option(process.env.HTTP_PROTOCOL, overrides.httpProtocol), 14 | host: option(process.env.HTTP_HOST, overrides.httpHost), 15 | port: option(Number(process.env.HTTP_PORT), overrides.httpPort) 16 | }, 17 | 18 | external: { 19 | ip: option(process.env.EXTERNAL_IP, overrides.externalIp), 20 | protocol: option(process.env.EXTERNAL_PROTOCOL, overrides.externalProtocol), 21 | host: option(process.env.EXTERNAL_HOST, overrides.externalHost), 22 | port: option(process.env.EXTERNAL_PORT, overrides.externalPort) 23 | }, 24 | 25 | ipfs: { 26 | pass: option(process.env.IPFS_PASS, overrides.ipfsPass), 27 | node: option(process.env.IPFS_NODE, overrides.ipfsNode), 28 | port: option(process.env.IPFS_SWARM_PORT, overrides.ipfsPort), 29 | prefix: option(process.env.IPFS_MFS_PREFIX, overrides.ipfsMfsPrefix), 30 | flush: option(toBoolean(process.env.IPFS_FLUSH), overrides.ipfsFlush), 31 | store: option(process.env.IPFS_STORE_TYPE, overrides.ipfsStoreType), 32 | 33 | s3: { 34 | region: option(process.env.STORE_S3_REGION, overrides.ipfsStoreS3Region), 35 | bucket: option(process.env.STORE_S3_BUCKET, overrides.ipfsStoreS3Bucket), 36 | path: option(process.env.STORE_S3_PATH, overrides.ipfsStoreS3Path), 37 | accessKeyId: option(process.env.STORE_S3_ACCESS_KEY_ID, overrides.ipfsStoreS3AccessKeyId), 38 | secretAccessKey: option(process.env.STORE_S3_SECRET_ACCESS_KEY, overrides.ipfsStoreS3SecretAccessKey), 39 | createIfMissing: option(process.env.STORE_S3_CREATE_IF_MISSING, overrides.ipfsStoreS3CreateIfMissing) 40 | }, 41 | 42 | fs: { 43 | repo: option(process.env.IPFS_REPO, overrides.ipfsRepo), 44 | port: option(process.env.IPFS_REPO_PORT, overrides.ipfsRepoPort) 45 | } 46 | }, 47 | 48 | follow: { 49 | ua: option(process.env.FOLLOW_USER_AGENT, overrides.followUserAgent), 50 | registry: option(process.env.FOLLOW_REGISTTRY, overrides.followRegistry), 51 | replicator: option(process.env.FOLLOW_REPLICATOR, overrides.followReplicator), 52 | concurrency: option(Number(process.env.FOLLOW_CONCURRENCY), overrides.followConcurrency), 53 | inactivityTimeout: option(process.env.FOLLOW_INACTIVITY_MS, overrides.followInactivityMs), 54 | seqFile: option(process.env.FOLLOW_SEQ_FILE, overrides.followSeqFile) 55 | }, 56 | 57 | clone: { 58 | delay: option(Number(process.env.CLONE_DELAY), overrides.cloneDelay), 59 | pin: option(Number(process.env.CLONE_PIN), overrides.clonePin), 60 | publish: option(process.env.CLONE_PUBLISH, overrides.clonePublish), 61 | concurrency: parseInt(option(process.env.CLONE_CONCCURRENCY, overrides.cloneConcurrency)) 62 | }, 63 | 64 | request: { 65 | retries: option(process.env.REQUEST_RETRIES, overrides.requestRetries), 66 | retryDelay: option(process.env.REQUEST_RETRY_DELAY, overrides.requestRetryDelay), 67 | timeout: option(process.env.REQUEST_TIMEOUT, overrides.requestTimeout), 68 | forever: option(toBoolean(process.env.REQUEST_KEEP_ALIVE), overrides.requestKeepAlive), 69 | concurrency: parseInt(option(process.env.REQUEST_CONCURRENCY, overrides.requestConcurrency), 10) 70 | }, 71 | 72 | mdns: { 73 | enabled: Boolean(process.env.MDNS_NAME || overrides.mdnsAdvert), 74 | name: option(process.env.MDNS_NAME, overrides.mdnsAdvert) 75 | } 76 | } 77 | } 78 | 79 | module.exports.option = option 80 | module.exports.toBoolean = toBoolean 81 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const follow = require('@achingbrain/follow-registry') 4 | const debug = require('debug')('ipfs:registry-mirror:clone') 5 | const sequenceFile = require('../sequence-file') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | const cluster = require('cluster') 8 | const delay = require('delay') 9 | const mainThreadWorker = require('./main-thread-worker') 10 | 11 | let processed = [] 12 | 13 | const stats = { 14 | update () { 15 | processed.push(Date.now()) 16 | const oneHourAgo = Date.now() - 3600000 17 | 18 | processed = processed.filter(time => { 19 | return time > oneHourAgo 20 | }) 21 | }, 22 | modulesPerSecond () { 23 | return (processed.length / 3600).toFixed(3) 24 | } 25 | } 26 | 27 | const createWorker = () => { 28 | return new Promise((resolve, reject) => { 29 | const worker = cluster.fork() 30 | worker.on('online', () => { 31 | resolve() 32 | }) 33 | worker.on('error', (err) => { 34 | reject(err) 35 | }) 36 | worker.on('disconnect', () => { 37 | // console.info('Worker disconnected') 38 | }) 39 | worker.on('exit', (code, signal) => { 40 | // console.info('Worker exited with code', code, 'and signal', signal) 41 | }) 42 | }) 43 | } 44 | 45 | const fillWorkerPool = async (options) => { 46 | // ensure worker pool is full 47 | if (Object.keys(cluster.workers).length === options.clone.concurrency) { 48 | return 49 | } 50 | 51 | while (Object.keys(cluster.workers).length < options.clone.concurrency) { 52 | await createWorker() 53 | } 54 | } 55 | 56 | const findWorker = async (ipfs, options) => { 57 | if (options.clone.concurrency === 0) { 58 | return mainThreadWorker(ipfs) 59 | } 60 | 61 | await fillWorkerPool(options) 62 | 63 | // wait for a free worker 64 | while (true) { 65 | const worker = Object 66 | .values(cluster.workers) 67 | .find(worker => !worker.processing) 68 | 69 | if (worker) { 70 | return worker 71 | } 72 | 73 | await delay(5000) 74 | } 75 | } 76 | 77 | module.exports = async (emitter, signal, ipfs, options) => { 78 | log(`🦎 Replicating registry with concurrency ${options.follow.concurrency}...`) 79 | 80 | if (options.clone.concurrency) { 81 | log(`👷 Using ${options.clone.concurrency} workers to process updates`) 82 | } else { 83 | log('👷 Processing package updates on main thread') 84 | } 85 | 86 | await fillWorkerPool(options) 87 | 88 | while (true) { 89 | try { 90 | for await (const { packument, seq, done } of follow({ ...options.follow, seq: sequenceFile(options) })) { 91 | if (signal.aborted) { 92 | return 93 | } 94 | 95 | if (!packument || !packument.name) { 96 | // invalid response from npm 97 | done().then(() => {}, () => {}) 98 | continue 99 | } 100 | 101 | const worker = await findWorker(ipfs, options) 102 | worker.updateStart = Date.now() 103 | worker.processing = true 104 | 105 | worker.once('message', (message) => { 106 | worker.processing = false 107 | 108 | if (message.error) { 109 | const err = new Error(message.error.message) 110 | err.stack = message.error.stack 111 | err.code = message.error.code 112 | 113 | debug(err) 114 | log(`💥 [${message.seq}] error processing ${message.name}`, err) 115 | } else { 116 | stats.update() 117 | 118 | log(`🦕 [${message.seq}] processed ${message.name} in ${Date.now() - worker.updateStart}ms, ${stats.modulesPerSecond()} modules/s`) 119 | 120 | emitter.emit('processed', message.name) 121 | emitter.emit('seq', message.seq) 122 | } 123 | 124 | done().then(() => {}, () => {}) 125 | }) 126 | 127 | worker.send({ 128 | packument, 129 | seq, 130 | options 131 | }) 132 | } 133 | } catch (err) { 134 | log('💥 Feed error', err) 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | proxy: 6 | image: jwilder/nginx-proxy:alpine 7 | mem_limit: 1024m 8 | links: 9 | - replicate 10 | - registry 11 | ports: 12 | - '80:80' 13 | - '443:443' 14 | logging: 15 | driver: "json-file" 16 | options: 17 | max-size: "1m" 18 | max-file: "3" 19 | volumes: 20 | - /var/run/docker.sock:/tmp/docker.sock:ro 21 | - /etc/nginx/vhost.d 22 | - /usr/share/nginx/html 23 | - /etc/nginx/certs 24 | - ./conf/proxy.conf:/etc/nginx/proxy.conf 25 | restart: 'always' 26 | 27 | letsencrypt-nginx-proxy-companion: 28 | image: jrcs/letsencrypt-nginx-proxy-companion 29 | mem_limit: 1024m 30 | volumes: 31 | - /var/run/docker.sock:/var/run/docker.sock:ro 32 | volumes_from: 33 | - proxy 34 | 35 | replicate: 36 | build: 37 | context: . 38 | dockerfile: packages/replication-master/Dockerfile 39 | restart: 'always' 40 | env_file: .env 41 | mem_limit: 4608m 42 | environment: 43 | - VIRTUAL_HOST=replication.registry.js.ipfs.io 44 | - VIRTUAL_PORT=8080 45 | - LETSENCRYPT_HOST=replication.registry.js.ipfs.io 46 | - LETSENCRYPT_EMAIL=alex.potsides@protocol.ai 47 | - NODE_ENV=${NODE_ENV} 48 | - EXTERNAL_PROTOCOL=https 49 | - EXTERNAL_HOST=registry.js.ipfs.io 50 | - EXTERNAL_PORT=443 51 | - EXTERNAL_IP=35.178.192.119 52 | - IPFS_STORE_TYPE=s3 53 | - STORE_S3_REGION=${STORE_S3_REGION} 54 | - STORE_S3_BUCKET=${STORE_S3_BUCKET} 55 | - STORE_S3_ACCESS_KEY_ID=${STORE_S3_ACCESS_KEY_ID} 56 | - STORE_S3_SECRET_ACCESS_KEY=${STORE_S3_SECRET_ACCESS_KEY} 57 | - STORE_S3_PATH=replication-master 58 | - FOLLOW_SEQ_FILE=seq.txt 59 | - CLONE_DELAY=30000 60 | - CLONE_CONCCURRENCY=0 61 | - FOLLOW_CONCURRENCY=5 62 | - REQUEST_CONCURRENCY=5 63 | logging: 64 | driver: "json-file" 65 | options: 66 | max-size: "1m" 67 | max-file: "3" 68 | 69 | registry: 70 | build: 71 | context: . 72 | dockerfile: packages/registry-mirror/Dockerfile 73 | restart: 'always' 74 | env_file: .env 75 | mem_limit: 2048m 76 | volumes: 77 | - /var/run/docker.sock:/tmp/docker.sock:ro 78 | environment: 79 | - VIRTUAL_HOST=registry.js.ipfs.io 80 | - VIRTUAL_PORT=8080 81 | - LETSENCRYPT_HOST=registry.js.ipfs.io 82 | - LETSENCRYPT_EMAIL=alex.potsides@protocol.ai 83 | - NODE_ENV=${NODE_ENV} 84 | - EXTERNAL_PROTOCOL=https 85 | - EXTERNAL_HOST=registry.js.ipfs.io 86 | - EXTERNAL_PORT=443 87 | - EXTERNAL_IP=35.178.192.119 88 | - IPFS_STORE_TYPE=s3 89 | - STORE_S3_REGION=${STORE_S3_REGION} 90 | - STORE_S3_BUCKET=${STORE_S3_BUCKET} 91 | - STORE_S3_ACCESS_KEY_ID=${STORE_S3_ACCESS_KEY_ID} 92 | - STORE_S3_SECRET_ACCESS_KEY=${STORE_S3_SECRET_ACCESS_KEY} 93 | - STORE_S3_PATH=worker 94 | - PUBSUB_MASTER=http://replicate:8080 95 | - REQUEST_MAX_SOCKETS=20 96 | links: 97 | - replicate 98 | ports: 99 | - 10000-10009:10000 100 | - 10010-10019:10001 101 | - 10020-10029:10002 102 | - 10030-10039:10003 103 | - 10040-10049:10004 104 | - 10050-10059:10005 105 | - 10060-10069:10006 106 | - 10070-10079:10007 107 | - 10080-10089:10008 108 | - 10090-10099:10009 109 | logging: 110 | driver: "json-file" 111 | options: 112 | max-size: "1m" 113 | max-file: "3" 114 | 115 | spiped: 116 | restart: always 117 | env_file: .env 118 | build: 119 | context: . 120 | dockerfile: monitoring/spiped 121 | args: 122 | SPIPED_KEY: ${SPIPED_KEY} 123 | NETDATA_EIP: ${NETDATA_EIP} 124 | mem_limit: 1024m 125 | 126 | netdata: 127 | restart: always 128 | mem_limit: 1024m 129 | hostname: registry.js.ipfs.io 130 | env_file: .env 131 | build: 132 | context: . 133 | dockerfile: monitoring/netdata 134 | args: 135 | NETDATA_API_KEY: ${NETDATA_API_KEY} 136 | cap_add: 137 | - SYS_PTRACE 138 | security_opt: 139 | - apparmor:unconfined 140 | volumes: 141 | - /proc:/host/proc:ro 142 | - /sys:/host/sys:ro 143 | - /var/run/docker.sock:/var/run/docker.sock:ro 144 | environment: 145 | # https://docs.netdata.cloud/packaging/docker/#docker-container-names-resolution 146 | - PGID=115 147 | links: 148 | - spiped 149 | depends_on: 150 | - spiped 151 | - replicate 152 | - registry 153 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/cli/bin.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | 7 | if (process.env.NODE_ENV !== 'production') { 8 | const url = '/-/dashboard' 9 | 10 | log(`🔍 Enabling profiling at ${url}`) 11 | 12 | try { 13 | require('appmetrics-dash').attach({ 14 | url 15 | }) 16 | } catch (error) { 17 | log('💥 Enabling profiling failed', error) 18 | } 19 | } 20 | 21 | require('dnscache')({ enable: true }) 22 | 23 | const pkg = require('../../package') 24 | const path = require('path') 25 | 26 | require('dotenv').config({ 27 | path: path.join(process.env.HOME, '.ipfs-npm-registry-mirror/registry-mirror.env') 28 | }) 29 | 30 | process.title = pkg.name 31 | 32 | const yargs = require('yargs') 33 | 34 | yargs.command('$0', 'Starts a registry server that uses IPFS to fetch js dependencies', (yargs) => { // eslint-disable-line no-unused-expressions 35 | yargs 36 | .option('registry', { 37 | describe: 'Which registry we are mirroring', 38 | default: 'https://registry.npmjs.com' 39 | }) 40 | .option('registry-update-interval', { 41 | describe: 'Only request the manifest for a given module every so many ms', 42 | default: 60000 43 | }) 44 | .option('registry-upload-size-limit', { 45 | describe: 'How large a file upload to allow when proxying for the registry', 46 | default: '1024MB' 47 | }) 48 | .option('registry-read-timeout', { 49 | describe: 'How long to wait for registry requests', 50 | default: 60000 51 | }) 52 | 53 | .option('http-protocol', { 54 | describe: 'Which protocol to use with the server', 55 | default: 'http' 56 | }) 57 | .option('http-host', { 58 | describe: 'Which host to listen to requests on', 59 | default: 'localhost' 60 | }) 61 | .option('http-port', { 62 | describe: 'Which port to listen to requests on', 63 | default: 8080 64 | }) 65 | 66 | .option('external-protocol', { 67 | describe: 'Which protocol to use when reaching this mirror' 68 | }) 69 | .option('external-host', { 70 | describe: 'Which host to use when reaching this mirror' 71 | }) 72 | .option('external-port', { 73 | describe: 'Which port to use when reaching this mirror' 74 | }) 75 | 76 | .option('ipfs-port', { 77 | describe: 'Which port to accept IPFS connections on', 78 | default: 4001 79 | }) 80 | .option('ipfs-mfs-prefix', { 81 | describe: 'Which mfs prefix to use', 82 | default: '/npm-registry' 83 | }) 84 | .option('ipfs-flush', { 85 | describe: 'Whether to flush the MFS cache', 86 | default: true 87 | }) 88 | .option('ipfs-repo', { 89 | describe: 'The path to the IPFS repo you wish to use', 90 | default: path.join(process.env.HOME, '.jsipfs') 91 | }) 92 | .option('ipfs-repo-port', { 93 | describe: 'The port for level workers to connect to', 94 | default: 9000 95 | }) 96 | .option('ipfs-store-type', { 97 | describe: 'Which type of datastore to use - fs, s3, etc', 98 | default: 'fs' 99 | }) 100 | .option('ipfs-store-s3-region', { 101 | describe: 'The s3 region to use' 102 | }) 103 | .option('ipfs-store-s3-bucket', { 104 | describe: 'The s3 bucket to use' 105 | }) 106 | .option('ipfs-store-s3-path', { 107 | describe: 'The path to use in an s3 bucket' 108 | }) 109 | .option('ipfs-store-s3-access-key-id', { 110 | describe: 'The s3 access key id to use' 111 | }) 112 | .option('ipfs-store-s3-secret-access-key', { 113 | describe: 'The s3 secret access key id to use' 114 | }) 115 | .option('ipfs-store-s3-create-if-missing', { 116 | describe: 'Whether to create the bucket if it is missing', 117 | default: false 118 | }) 119 | 120 | .option('pubsub-master', { 121 | describe: 'The url of the pubsub replication master', 122 | default: 'https://replication.registry.js.ipfs.io' 123 | }) 124 | 125 | .option('clone-pin', { 126 | describe: 'Whether to pin cloned modules', 127 | default: false 128 | }) 129 | 130 | .option('request-retries', { 131 | describe: 'How many times to retry when downloading manifests and tarballs from the registry', 132 | default: 5 133 | }) 134 | .option('request-retry-delay', { 135 | describe: 'How long in ms to wait between retries', 136 | default: 1000 137 | }) 138 | .option('request-timeout', { 139 | describe: 'How long in ms we should wait when requesting files', 140 | default: 30000 141 | }) 142 | .option('request-keep-alive', { 143 | describe: 'Whether to re-use connections', 144 | default: true 145 | }) 146 | .option('request-max-sockets', { 147 | describe: 'How many concurrent requests to have in flight', 148 | default: 100 149 | }) 150 | }, require('../core')) 151 | .argv 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |
3 |