├── .npmignore ├── packages ├── registry-mirror │ ├── test │ │ ├── node.js │ │ ├── fixtures │ │ │ └── create-replication-master.js │ │ └── mirror.spec.js │ ├── src │ │ ├── index.js │ │ ├── core │ │ │ ├── find-external-port.js │ │ │ ├── routes │ │ │ │ ├── root.js │ │ │ │ ├── packument.js │ │ │ │ └── tarball.js │ │ │ ├── pubsub.js │ │ │ ├── index.js │ │ │ └── config.js │ │ └── cli │ │ │ └── bin.js │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── package.json │ └── CHANGELOG.md ├── replication-master │ ├── src │ │ ├── index.js │ │ ├── cli │ │ │ ├── bin.js │ │ │ ├── worker.js │ │ │ └── master.js │ │ └── core │ │ │ ├── routes │ │ │ ├── worker-online.js │ │ │ ├── worker.js │ │ │ ├── workers.js │ │ │ └── root.js │ │ │ ├── clone │ │ │ ├── cluster-worker.js │ │ │ ├── main-thread-worker.js │ │ │ ├── ingest-module.js │ │ │ └── index.js │ │ │ ├── mdns.js │ │ │ ├── pubsub.js │ │ │ ├── sequence-file.js │ │ │ ├── index.js │ │ │ ├── save-tarballs.js │ │ │ └── config.js │ ├── test │ │ ├── node.js │ │ ├── fixtures │ │ │ └── create-skim-db.js │ │ └── replication.spec.js │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── package.json │ └── CHANGELOG.md └── common │ ├── handlers │ ├── favicon.png │ ├── error-log.js │ ├── cors.js │ ├── index.js │ ├── favicon.js │ ├── abortable-request.js │ └── request-log.js │ ├── index.js │ ├── utils │ ├── error-message.js │ ├── option.js │ ├── fs-repo.js │ ├── sanitise-name.js │ ├── to-boolean.js │ ├── timeout-promise.js │ ├── get-external-url.js │ ├── s3-repo.js │ ├── log.js │ ├── find-base-dir.js │ ├── save-packument.js │ ├── replace-tarball-urls.js │ ├── load-tarball.js │ ├── retry-request.js │ ├── get-an-ipfs.js │ ├── download-tarball.js │ ├── level-lock.js │ ├── cluster-repo.js │ ├── save-tarball.js │ ├── start-ipfs.js │ └── load-packument.js │ ├── test │ ├── error-message.spec.js │ ├── sanitise-name.spec.js │ ├── option.spec.js │ ├── timeout-promise.spec.js │ ├── to-boolean.spec.js │ ├── server.spec.js │ ├── save-packument.spec.js │ ├── replace-tarball-urls.spec.js │ ├── fixtures │ │ └── test-server.js │ ├── get-external-url.spec.js │ ├── find-base-dir.spec.js │ ├── save-tarball.spec.js │ ├── load-tarball.spec.js │ ├── retry-request.spec.js │ └── load-packument.spec.js │ ├── README.md │ ├── package.json │ └── server.js ├── img ├── ip-npm.png ├── ip-npm-small.png └── npm-on-ipfs.svg ├── lerna.json ├── monitoring ├── spiped └── netdata ├── deploy-dev.sh ├── .github └── workflows │ ├── stale.yml │ └── generated-pr.yml ├── deploy.sh ├── .travis.yml ├── upgrade.sh ├── conf └── proxy.conf ├── .gitignore ├── package.json ├── docker-compose.yml └── README.md /.npmignore: -------------------------------------------------------------------------------- 1 | tests/ipfs-repo-tests 2 | -------------------------------------------------------------------------------- /packages/registry-mirror/test/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | require('./mirror.spec') 4 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /packages/replication-master/src/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /packages/replication-master/test/node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | require('./replication.spec') 4 | -------------------------------------------------------------------------------- /img/ip-npm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/img/ip-npm.png -------------------------------------------------------------------------------- /img/ip-npm-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/img/ip-npm-small.png -------------------------------------------------------------------------------- /packages/common/handlers/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/ipfs-npm-registry-mirror/HEAD/packages/common/handlers/favicon.png -------------------------------------------------------------------------------- /packages/common/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | server: require('./server'), 5 | handlers: require('./handlers') 6 | } 7 | -------------------------------------------------------------------------------- /packages/common/utils/error-message.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const lol = (message) => { 4 | return `${message}` 5 | } 6 | 7 | module.exports = lol 8 | -------------------------------------------------------------------------------- /packages/replication-master/src/cli/bin.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | const cluster = require('cluster') 6 | 7 | if (cluster.isWorker) { 8 | require('./worker') 9 | } else { 10 | require('./master') 11 | } 12 | -------------------------------------------------------------------------------- /packages/replication-master/src/cli/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../package') 4 | const cluster = require('cluster') 5 | 6 | process.title = `${pkg.name}-worker-${cluster.worker.id}` 7 | 8 | require('../core/clone/cluster-worker') 9 | -------------------------------------------------------------------------------- /lerna.json: -------------------------------------------------------------------------------- 1 | { 2 | "lerna": "2.9.0", 3 | "packages": [ 4 | "packages/*" 5 | ], 6 | "version": "independent", 7 | "command": { 8 | "bootstrap": { 9 | "hoist": true 10 | }, 11 | "run": { 12 | "stream": true 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /monitoring/spiped: -------------------------------------------------------------------------------- 1 | FROM spiped 2 | 3 | ARG SPIPED_KEY 4 | ARG NETDATA_EIP 5 | 6 | EXPOSE 20000 7 | 8 | RUN mkdir /etc/spiped 9 | RUN echo $SPIPED_KEY > /etc/spiped/keyfile 10 | 11 | CMD spiped -F -e -s [0.0.0.0]:20000 -t $NETDATA_EIP:20000 -k /etc/spiped/keyfile 12 | -------------------------------------------------------------------------------- /packages/common/handlers/error-log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('../utils/log') 4 | 5 | module.exports = function (error, request, response, next) { 6 | log(`💀 ${request.method} ${request.url} ${response.statusCode}`, error) 7 | 8 | next() 9 | } 10 | -------------------------------------------------------------------------------- /packages/common/handlers/cors.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = function (request, response, next) { 4 | response.header("Access-Control-Allow-Origin", "*") 5 | response.header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") 6 | next() 7 | } 8 | -------------------------------------------------------------------------------- /packages/common/handlers/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = { 4 | abortableRequest: require('./abortable-request'), 5 | errorLog: require('./error-log'), 6 | requestLog: require('./request-log'), 7 | favicon: require('./favicon'), 8 | cors: require('./cors') 9 | } 10 | -------------------------------------------------------------------------------- /deploy-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Build a Docker image 4 | docker-compose build replicate registry 5 | 6 | # Shut down the registry containers 7 | docker-compose stop replicate registry 8 | 9 | # Restart using the new image 10 | docker-compose up --no-deps proxy replicate registry 11 | -------------------------------------------------------------------------------- /packages/common/utils/option.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | function option () { 4 | for (let i = 0; i < arguments.length; i++) { 5 | const arg = arguments[i] 6 | 7 | if (arg !== undefined && arg !== null && arg.toString() !== 'NaN') { 8 | return arg 9 | } 10 | } 11 | } 12 | 13 | module.exports = option 14 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /packages/common/handlers/favicon.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const path = require('path') 5 | 6 | module.exports = (config, ipfs, app) => { 7 | return async (request, response, next) => { 8 | fs.createReadStream(path.join(__dirname, 'favicon.png')) 9 | .on('error', () => {}) 10 | .pipe(response) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /packages/common/handlers/abortable-request.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const AbortController = require('abort-controller') 4 | 5 | module.exports = function (request, response, next) { 6 | const controller = new AbortController() 7 | response.locals.signal = controller.signal 8 | 9 | request.on('aborted', () => { 10 | controller.abort() 11 | }) 12 | 13 | next() 14 | } 15 | -------------------------------------------------------------------------------- /packages/common/utils/fs-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | const IPFSRepo = require('ipfs-repo') 5 | 6 | const fsRepo = ({ repo }) => { 7 | if (process.env.NODE_ENV === 'development') { 8 | repo = `${repo}-test` 9 | } 10 | 11 | log(`📁 Using fs repo at ${repo}`) 12 | 13 | return new IPFSRepo(repo) 14 | } 15 | 16 | module.exports = fsRepo 17 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/worker-online.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | online 5 | } = require('./workers') 6 | 7 | module.exports = () => { 8 | return (request, response) => { 9 | online() 10 | 11 | response.statusCode = 204 12 | response.setHeader('Content-type', 'application/json; charset=utf-8') 13 | response.end() 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /packages/common/utils/sanitise-name.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const sanitiseName = (name) => { 4 | name = `${(name || '').trim()}`.replace(/^(\/)+/, '/') 5 | 6 | if (name.startsWith('/')) { 7 | name = name.substring(1) 8 | } 9 | 10 | if (name.startsWith('@')) { 11 | name = name.replace(/%2f/g, '/') 12 | } 13 | 14 | return name 15 | } 16 | 17 | module.exports = sanitiseName 18 | -------------------------------------------------------------------------------- /packages/common/utils/to-boolean.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = (value) => { 4 | if (value === undefined) { 5 | return undefined 6 | } 7 | 8 | if (value === 'false' || value === '0' || value === 'no') { 9 | return false 10 | } 11 | 12 | if (value === 'true' || value === '1' || value === 'yes') { 13 | return true 14 | } 15 | 16 | return Boolean(value) 17 | } 18 | 19 | module.exports = toBoolean 20 | -------------------------------------------------------------------------------- /packages/common/test/error-message.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const errorMessage = require('../utils/error-message') 8 | 9 | describe('error-message', () => { 10 | it('should return an error message', () => { 11 | const message = 'hello' 12 | 13 | expect(errorMessage(message)).to.contain(message) 14 | }) 15 | }) 16 | -------------------------------------------------------------------------------- /monitoring/netdata: -------------------------------------------------------------------------------- 1 | FROM netdata/netdata 2 | 3 | ARG NETDATA_API_KEY 4 | 5 | RUN echo $'[global]\n\ 6 | memory mode = none\n\ 7 | hostname = registry.js.ipfs.io\n\ 8 | [health]\n\ 9 | enabled = no\n ' > /etc/netdata/netdata.conf 10 | 11 | RUN echo $'[stream]\n\ 12 | enabled = yes\n\ 13 | destination = spiped:20000\n\ 14 | api key = '$NETDATA_API_KEY$'\n ' > /etc/netdata/stream.conf 15 | 16 | RUN chown root:netdata /etc/netdata/stream.conf 17 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Remove old images 4 | docker system prune -a -f 5 | docker rm $(docker ps -q -f 'status=exited') || echo 'Failed to remove old containers, maybe there was nothing to do' 6 | docker rmi $(docker images -q -f "dangling=true") || echo 'Failed to remove old images, maybe there was nothing to do' 7 | 8 | # Build a Docker image 9 | docker-compose build --no-cache 10 | 11 | # Restart using the new image 12 | docker-compose up -d --scale registry=5 13 | -------------------------------------------------------------------------------- /packages/common/utils/timeout-promise.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const delay = require('delay') 4 | 5 | const timeout = (promise, ms) => { 6 | return Promise.race([ 7 | promise, 8 | new Promise((resolve, reject) => { 9 | delay(ms) 10 | .then(() => { 11 | const error = new Error('Timed out') 12 | error.code = 'ETIMEOUT' 13 | 14 | reject(error) 15 | }, reject) 16 | }) 17 | ]) 18 | } 19 | 20 | module.exports = timeout 21 | -------------------------------------------------------------------------------- /packages/common/test/sanitise-name.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const sanitiseName = require('../utils/sanitise-name') 8 | 9 | describe('sanitise-name', () => { 10 | it('should sanitise a package name', () => { 11 | expect(sanitiseName('hello')).to.equal('hello') 12 | expect(sanitiseName(' /@hello/blah ')).to.equal('@hello/blah') 13 | expect(sanitiseName(' /@hello%2fblah ')).to.equal('@hello/blah') 14 | }) 15 | }) 16 | -------------------------------------------------------------------------------- /packages/common/test/option.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const option = require('../utils/option') 8 | 9 | describe('option', () => { 10 | it('should return the first non-undefined argument', () => { 11 | const result = option(null, 1, 2, 3) 12 | 13 | expect(result).to.equal(1) 14 | }) 15 | 16 | it('should return false arguments', () => { 17 | const result = option(null, false, 2, 3) 18 | 19 | expect(result).to.equal(false) 20 | }) 21 | }) 22 | -------------------------------------------------------------------------------- /packages/common/utils/get-external-url.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | URL 5 | } = require('url') 6 | 7 | module.exports = (config) => { 8 | const url = new URL('http://foo.com') 9 | url.protocol = (config.external && config.external.protocol) || config.http.protocol 10 | url.host = (config.external && config.external.host) || config.http.host 11 | url.port = (config.external && config.external.port) || config.http.port 12 | 13 | const string = url.toString() 14 | 15 | // strip the trailing slash 16 | return string.substring(0, string.length - 1) 17 | } 18 | -------------------------------------------------------------------------------- /packages/common/handlers/request-log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('../utils/log') 4 | 5 | module.exports = function (request, response, next) { 6 | response.locals.start = Date.now() 7 | 8 | response.on('finish', () => { 9 | const disposition = response.getHeader('Content-Disposition') 10 | let prefix = '📄' 11 | 12 | if (disposition && disposition.endsWith('tgz')) { 13 | prefix = '🎁' 14 | } 15 | 16 | log(`${prefix} ${request.method} ${request.url} ${response.statusCode} ${Date.now() - response.locals.start}ms`) 17 | }) 18 | 19 | next() 20 | } 21 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | connect 5 | } = require('./workers') 6 | 7 | module.exports = () => { 8 | return (request, response) => { 9 | const worker = request.query.worker 10 | 11 | if (!worker) { 12 | return response.status(400).send('Bad Request') 13 | } 14 | 15 | const info = { 16 | index: connect(worker) 17 | } 18 | 19 | response.statusCode = 200 20 | response.setHeader('Content-type', 'application/json; charset=utf-8') 21 | response.send(JSON.stringify(info, null, 2)) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /packages/common/utils/s3-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { createRepo } = require('datastore-s3') 4 | const log = require('./log') 5 | 6 | const s3Repo = ({ region, bucket, path, accessKeyId, secretAccessKey, createIfMissing }) => { 7 | if (process.env.NODE_ENV === 'development') { 8 | path = `${path}-test` 9 | } 10 | 11 | log(`☁️ Using s3 storage ${region}:${bucket}/${path}`) 12 | 13 | return createRepo({ 14 | path, 15 | createIfMissing 16 | }, { 17 | bucket, 18 | region, 19 | accessKeyId, 20 | secretAccessKey 21 | }) 22 | } 23 | 24 | module.exports = s3Repo 25 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | cache: npm 3 | stages: 4 | - check 5 | - test 6 | - cov 7 | 8 | branches: 9 | only: 10 | - master 11 | - /^release\/.*$/ 12 | 13 | node_js: 14 | - 'lts/*' 15 | - 'node' 16 | 17 | os: 18 | - linux 19 | 20 | script: npx nyc -s npm run test -- --bail 21 | after_success: npx nyc report --reporter=text-lcov > coverage.lcov && npx codecov 22 | 23 | jobs: 24 | include: 25 | - stage: check 26 | script: 27 | - npm run lint 28 | 29 | - stage: test 30 | name: node 31 | script: npm run test 32 | 33 | notifications: 34 | email: false 35 | -------------------------------------------------------------------------------- /packages/common/utils/log.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | module.exports = (message, error) => { 4 | const time = new Date() 5 | const timestamp = time.toLocaleDateString('en-GB', { 6 | year: 'numeric', 7 | month: 'numeric', 8 | day: 'numeric', 9 | hour: '2-digit', 10 | minute: '2-digit', 11 | second: '2-digit', 12 | timeZoneName: 'short', 13 | hour12: false 14 | }) 15 | 16 | if (error) { 17 | console.error(timestamp, message, error) // eslint-disable-line no-console 18 | 19 | return 20 | } 21 | 22 | console.info(timestamp, message) // eslint-disable-line no-console 23 | } 24 | -------------------------------------------------------------------------------- /upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Remove old images 4 | docker system prune -a -f 5 | docker rm $(docker ps -q -f 'status=exited') || echo 'Failed to remove old containers, maybe there was nothing to do' 6 | docker rmi $(docker images -q -f "dangling=true") || echo 'Failed to remove old images, maybe there was nothing to do' 7 | 8 | # Get the latest 9 | git pull 10 | 11 | # Build a Docker image 12 | docker-compose build --no-cache replicate registry 13 | 14 | # Shut down the registry containers 15 | docker-compose stop replicate registry 16 | 17 | # Restart using the new image 18 | docker-compose up -d --no-deps --scale registry=5 replicate registry 19 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/cluster-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getAnIPFS = require('ipfs-registry-mirror-common/utils/get-an-ipfs') 4 | const ingestModule = require('./ingest-module') 5 | 6 | process.on('message', async ({ packument, seq, options }) => { 7 | const ipfs = await getAnIPFS(options) 8 | 9 | try { 10 | process.send(await ingestModule({ packument, seq, ipfs, options })) 11 | } catch (error) { 12 | process.send({ 13 | seq, 14 | name: packument.name, 15 | error: { 16 | message: error.message, 17 | stack: error.stack, 18 | code: error.code 19 | } 20 | }) 21 | } 22 | }) 23 | -------------------------------------------------------------------------------- /packages/common/utils/find-base-dir.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | 5 | const findBaseDir = async (ipfs, config) => { 6 | try { 7 | const stats = await ipfs.files.stat(config.ipfs.prefix) 8 | 9 | log(`🌿 Root dir ${config.ipfs.prefix} is ${stats.cid}`) 10 | 11 | return stats.cid 12 | } catch (error) { 13 | if (error.message.includes('does not exist')) { 14 | log(`🐺 Creating base dir ${config.ipfs.prefix}`) 15 | 16 | await ipfs.files.mkdir(config.ipfs.prefix, { 17 | parents: true 18 | }) 19 | } 20 | 21 | return findBaseDir(ipfs, config) 22 | } 23 | } 24 | 25 | module.exports = findBaseDir 26 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/workers.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('ipfs-registry-mirror-common/utils/log') 4 | 5 | const workers = [] 6 | let initialised = 0 7 | 8 | module.exports = { 9 | status: () => { 10 | return { 11 | workers, 12 | initialised, 13 | ready: workers.length === 0 ? true : initialised === workers.length 14 | } 15 | }, 16 | 17 | connect: (worker) => { 18 | let index = workers.indexOf(worker) 19 | 20 | if (index === -1) { 21 | index = workers.push(worker) - 1 22 | } 23 | 24 | log(`👷‍♀️ Worker ${worker} assigned index ${index}`) 25 | 26 | return index 27 | }, 28 | 29 | online: () => { 30 | initialised++ 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /packages/common/utils/save-packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const savePackument = async (packument, ipfs, config) => { 4 | if (!packument.name) { 5 | throw new Error('No name found in packument') 6 | } 7 | 8 | let lastErr 9 | 10 | for (let i = 0; i < 5; i++) { 11 | try { 12 | const file = `${config.ipfs.prefix}/${packument.name}` 13 | 14 | await ipfs.files.write(file, JSON.stringify(packument, null, 2), { 15 | truncate: true, 16 | parents: true, 17 | create: true, 18 | cidVersion: 1, 19 | rawLeaves: true 20 | }) 21 | 22 | return 23 | } catch (err) { 24 | lastErr = err 25 | } 26 | } 27 | 28 | throw lastErr 29 | } 30 | 31 | module.exports = savePackument 32 | -------------------------------------------------------------------------------- /conf/proxy.conf: -------------------------------------------------------------------------------- 1 | # HTTP 1.1 support 2 | proxy_http_version 1.1; 3 | proxy_buffering off; 4 | proxy_set_header Host $http_host; 5 | proxy_set_header Upgrade $http_upgrade; 6 | proxy_set_header Connection $proxy_connection; 7 | proxy_set_header X-Real-IP $remote_addr; 8 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 9 | proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto; 10 | proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl; 11 | proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port; 12 | 13 | # Mitigate httpoxy attack 14 | proxy_set_header Proxy ""; 15 | 16 | # Increase proxy timeouts 17 | proxy_connect_timeout 75s; 18 | proxy_send_timeout 60s; 19 | proxy_read_timeout 3600s; 20 | 21 | client_max_body_size 1024m; 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/t-run* 2 | # Logs 3 | logs 4 | *.log 5 | 6 | 7 | registry 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | 13 | # Directory for instrumented libs generated by jscoverage/JSCover 14 | lib-cov 15 | 16 | # Coverage directory used by tools like istanbul 17 | coverage 18 | .nyc_output 19 | 20 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 21 | .grunt 22 | 23 | # node-waf configuration 24 | .lock-wscript 25 | 26 | # Compiled binary addons (http://nodejs.org/api/addons.html) 27 | build/Release 28 | 29 | # Dependency directory 30 | # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git 31 | node_modules 32 | 33 | docs 34 | .env 35 | seq.txt 36 | *.heapsnapshot 37 | .vscode 38 | -------------------------------------------------------------------------------- /packages/common/test/timeout-promise.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const timeoutPromise = require('../utils/timeout-promise') 8 | 9 | describe('timeout-promise', () => { 10 | it('should time out', async () => { 11 | try { 12 | await timeoutPromise(new Promise((resolve, reject) => {}), 100) 13 | throw new Error('Expected timeoutPromise to throw') 14 | } catch (error) { 15 | expect(error.code).to.equal('ETIMEOUT') 16 | } 17 | }) 18 | 19 | it('should not time out', async () => { 20 | const result = await timeoutPromise(new Promise((resolve, reject) => { 21 | resolve('ok') 22 | }), 1000) 23 | 24 | expect(result).to.equal('ok') 25 | }) 26 | }) 27 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/mdns.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('ipfs-registry-mirror-common/utils/log') 4 | const { 5 | Advertisement 6 | } = require('dnssd') 7 | 8 | const advertise = async (ipfs, config) => { 9 | if (!config.mdns.enabled) { 10 | return () => {} 11 | } 12 | 13 | log(`📣 Starting mDNS advert for ${config.mdns.name} on port ${config.ipfs.port}`) 14 | 15 | const advertisment = new Advertisement(config.mdns.name, config.ipfs.port, { 16 | txt: { 17 | id: (await ipfs.id()).id 18 | } 19 | }) 20 | advertisment.start() 21 | advertisment.on('error', err => { 22 | console.error(`💥 DNSSD Error: ${err}`) // eslint-disable-line no-console 23 | }) 24 | 25 | return () => { 26 | advertisment.stop() 27 | } 28 | } 29 | 30 | module.exports = advertise 31 | -------------------------------------------------------------------------------- /packages/common/utils/replace-tarball-urls.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getExternalUrl = require('./get-external-url') 4 | 5 | const replaceTarballUrls = (pkg, config) => { 6 | const prefix = getExternalUrl(config) 7 | const packageName = pkg.name 8 | const moduleName = packageName.startsWith('@') ? packageName.split('/').pop() : packageName 9 | 10 | // change tarball URLs to point to us 11 | Object.keys(pkg.versions || {}) 12 | .forEach(versionNumber => { 13 | const version = pkg.versions[versionNumber] 14 | 15 | if (version.dist.source) { 16 | return 17 | } 18 | 19 | version.dist.source = version.dist.tarball 20 | version.dist.tarball = `${prefix}/${packageName}/-/${moduleName}-${versionNumber}.tgz` 21 | }) 22 | 23 | return pkg 24 | } 25 | 26 | module.exports = replaceTarballUrls 27 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-registry-mirror", 3 | "version": "1.0.0", 4 | "description": "Install your dependencies from IPFS", 5 | "scripts": { 6 | "postinstall": "lerna bootstrap --ci", 7 | "reset": "lerna run --parallel clean && rm -rf packages/*/node_modules && rm -rf node_modules && npm i --ignore-scripts && lerna bootstrap", 8 | "test": "lerna run --parallel test", 9 | "test:node": "lerna run --parallel test", 10 | "coverage": "lerna run --parallel coverage", 11 | "build": "lerna run --parallel build", 12 | "deploy": "lerna run --parallel deploy", 13 | "start": "NODE_ENV=development lerna run --parallel start", 14 | "clean": "lerna run --parallel clean", 15 | "lint": "lerna run --parallel lint", 16 | "publish": "lerna publish" 17 | }, 18 | "dependencies": { 19 | "lerna": "^3.1.4" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /packages/common/test/to-boolean.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const toBoolean = require('../utils/to-boolean') 8 | 9 | describe('to-boolean', () => { 10 | it('should convert things to boolean', () => { 11 | expect(toBoolean('true')).to.be.true() 12 | expect(toBoolean('1')).to.be.true() 13 | expect(toBoolean('yes')).to.be.true() 14 | expect(toBoolean('ok')).to.be.true() 15 | expect(toBoolean(true)).to.be.true() 16 | expect(toBoolean(1)).to.be.true() 17 | 18 | expect(toBoolean('false')).to.be.false() 19 | expect(toBoolean('0')).to.be.false() 20 | expect(toBoolean('no')).to.be.false() 21 | expect(toBoolean(false)).to.be.false() 22 | expect(toBoolean(0)).to.be.false() 23 | expect(toBoolean(null)).to.be.false() 24 | 25 | expect(toBoolean(undefined)).to.be.undefined() 26 | }) 27 | }) 28 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/find-external-port.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 4 | 5 | module.exports = async (options) => { 6 | if (process.env.NODE_ENV === 'development' || process.env.NODE_ENV === 'test') { 7 | return 0 8 | } 9 | 10 | const docker = await request(Object.assign({}, { 11 | uri: 'http://unix:/tmp/docker.sock:/containers/' + process.env.HOSTNAME + '/json', 12 | json: true, 13 | retries: 100, 14 | retryDelay: 5000, 15 | headers: { 16 | host: ' ' 17 | } 18 | })) 19 | 20 | try { 21 | return docker.NetworkSettings.Ports[`${options.ipfs.port}/tcp`][0].HostPort 22 | } catch (err) { 23 | console.error('Could not find options.ipfs.port', options.ipfs.port, 'in') // eslint-disable-line no-console 24 | console.info(JSON.stringify(docker.NetworkSettings, null, 2)) // eslint-disable-line no-console 25 | 26 | throw err 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /packages/registry-mirror/test/fixtures/create-replication-master.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const hat = require('hat') 4 | const { 5 | createTestServer 6 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 7 | 8 | const createReplicationMaster = async () => { 9 | const topic = `topic-${hat()}` 10 | 11 | const replicationMaster = await createTestServer(async server => { 12 | return { 13 | '/': JSON.stringify({ 14 | ipfs: await server.ipfs.id(), 15 | // empty directory 16 | root: '/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', 17 | topic 18 | }), 19 | '/-/worker': JSON.stringify({ 20 | index: 0 21 | }) 22 | } 23 | }) 24 | 25 | replicationMaster.config = { 26 | pubsub: { 27 | topic 28 | }, 29 | ipfs: { 30 | prefix: '/reg-mas-root' 31 | } 32 | } 33 | 34 | return replicationMaster 35 | } 36 | 37 | module.exports = createReplicationMaster 38 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/main-thread-worker.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { default: PQueue } = require('p-queue') 4 | const EventEmitter = require('events').EventEmitter 5 | const ingestModule = require('./ingest-module') 6 | 7 | const queue = new PQueue({ concurrency: 1 }) 8 | let ipfs 9 | 10 | const mainWorker = new EventEmitter() 11 | mainWorker.send = ({ 12 | packument, 13 | seq, 14 | options 15 | }) => { 16 | queue.add(async () => { 17 | try { 18 | mainWorker.emit('message', await ingestModule({ packument, seq, ipfs, options })) 19 | } catch (error) { 20 | mainWorker.emit('message', { 21 | seq, 22 | name: packument.name, 23 | error: { 24 | message: error.message, 25 | stack: error.stack, 26 | code: error.code 27 | } 28 | }) 29 | } 30 | }) 31 | } 32 | 33 | const mainThreadWorker = async (i) => { 34 | ipfs = i 35 | await queue.onIdle() 36 | 37 | return mainWorker 38 | } 39 | 40 | module.exports = mainThreadWorker 41 | -------------------------------------------------------------------------------- /packages/registry-mirror/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | EXPOSE 8080 4 | EXPOSE 10000-10009 5 | 6 | RUN export NODE_ENV=production 7 | RUN npm set unsafe-perm true 8 | 9 | COPY ./package.json /app/package.json 10 | COPY ./package-lock.json /app/package-lock.json 11 | COPY ./lerna.json /app/lerna.json 12 | COPY ./packages/common/package.json /app/packages/common/package.json 13 | COPY ./packages/common/package-lock.json /app/packages/common/package-lock.json 14 | COPY ./packages/registry-mirror/package.json /app/packages/registry-mirror/package.json 15 | COPY ./packages/registry-mirror/package-lock.json /app/packages/registry-mirror/package-lock.json 16 | 17 | WORKDIR /app 18 | 19 | RUN npm install --production 20 | 21 | COPY ./packages/common/utils /app/packages/common/utils 22 | COPY ./packages/common/handlers /app/packages/common/handlers 23 | COPY ./packages/common/server.js /app/packages/common/server.js 24 | COPY ./packages/registry-mirror/src /app/packages/registry-mirror/src 25 | 26 | WORKDIR /app/packages/registry-mirror 27 | 28 | CMD node . 29 | -------------------------------------------------------------------------------- /packages/replication-master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 2 | 3 | EXPOSE 8080 4 | EXPOSE 4001 5 | 6 | RUN export NODE_ENV=production 7 | RUN npm set unsafe-perm true 8 | 9 | COPY ./package.json /app/package.json 10 | COPY ./package-lock.json /app/package-lock.json 11 | COPY ./lerna.json /app/lerna.json 12 | COPY ./packages/common/package.json /app/packages/common/package.json 13 | COPY ./packages/common/package-lock.json /app/packages/common/package-lock.json 14 | COPY ./packages/replication-master/package.json /app/packages/replication-master/package.json 15 | COPY ./packages/replication-master/package-lock.json /app/packages/replication-master/package-lock.json 16 | 17 | WORKDIR /app 18 | 19 | RUN npm install --production 20 | 21 | COPY ./packages/common/utils /app/packages/common/utils 22 | COPY ./packages/common/handlers /app/packages/common/handlers 23 | COPY ./packages/common/server.js /app/packages/common/server.js 24 | COPY ./packages/replication-master/src /app/packages/replication-master/src 25 | 26 | WORKDIR /app/packages/replication-master 27 | 28 | CMD node --max-old-space-size=4096 . 29 | -------------------------------------------------------------------------------- /packages/registry-mirror/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 David Dias 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /packages/replication-master/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 David Dias 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /packages/common/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-registry-mirror-common 2 | 3 | ![npm on IPFS](https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/raw/master/img/npm-on-ipfs.svg?sanitize=true) 4 | 5 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 6 | [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) 7 | [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) 8 | [![Build Status](https://ci.ipfs.team/buildStatus/icon?job=IPFS%20Shipyard/ipfs-npm-registry-mirror/master)](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [![Code Coverage](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [![Dependency Status](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror.svg?style=flat-square)](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Holds common files for the ipfs-npm-registry-mirror project 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/registry-mirror/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-npm-registry-mirror 2 | 3 | ![npm on IPFS](https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/raw/master/img/npm-on-ipfs.svg?sanitize=true) 4 | 5 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 6 | [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) 7 | [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) 8 | [![Build Status](https://ci.ipfs.team/buildStatus/icon?job=IPFS%20Shipyard/ipfs-npm-registry-mirror/master)](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [![Code Coverage](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [![Dependency Status](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror.svg?style=flat-square)](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Serves requests for npm modules mirrored on the IPFS network 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/replication-master/README.md: -------------------------------------------------------------------------------- 1 | # ipfs-npm-replication-master 2 | 3 | ![npm on IPFS](https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/raw/master/img/npm-on-ipfs.svg?sanitize=true) 4 | 5 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 6 | [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) 7 | [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) 8 | [![Build Status](https://ci.ipfs.team/buildStatus/icon?job=IPFS%20Shipyard/ipfs-npm-registry-mirror/master)](https://ci.ipfs.team/job/IPFS%20Shipyard/job/ipfs-npm-registry-mirror/job/master/) 9 | [![Code Coverage](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 10 | [![Dependency Status](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror.svg?style=flat-square)](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | 12 | > Continually syncs the npm registry and publishes new modules to IPFS 13 | 14 | ## Lead Maintainer 15 | 16 | [Alex Potsides](https://github.com/achingbrain) 17 | -------------------------------------------------------------------------------- /packages/common/test/server.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const sinon = require('sinon') 9 | const request = require('../utils/retry-request') 10 | 11 | describe('server', function () { 12 | this.timeout(10000) 13 | let server 14 | let getAnIpfs 15 | let ipfs 16 | 17 | beforeEach(() => { 18 | ipfs = { 19 | stop: sinon.stub() 20 | } 21 | getAnIpfs = sinon.stub().returns(ipfs) 22 | 23 | mock('../utils/get-an-ipfs', getAnIpfs) 24 | 25 | server = mock.reRequire('../server') 26 | }) 27 | 28 | afterEach(() => { 29 | mock.stopAll() 30 | }) 31 | 32 | it('should create a server', async () => { 33 | const config = { 34 | http: { 35 | 36 | }, 37 | ipfs: { 38 | store: 'fs', 39 | fs: { 40 | 41 | } 42 | } 43 | } 44 | const s = await server(config) 45 | 46 | const result = await request({ 47 | uri: `http://localhost:${config.http.port}/favicon.ico` 48 | }) 49 | 50 | expect(result).to.be.ok() 51 | 52 | await s.stop() 53 | 54 | expect(ipfs.stop.called).to.be.true() 55 | }) 56 | }) 57 | -------------------------------------------------------------------------------- /packages/common/test/save-packument.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const sinon = require('sinon') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const hat = require('hat') 9 | const savePackument = require('../utils/save-packument') 10 | 11 | describe('save-packument', () => { 12 | let ipfs 13 | let config 14 | 15 | beforeEach(() => { 16 | config = { 17 | ipfs: { 18 | prefix: `/registry-prefix-${hat()}`, 19 | flush: true 20 | } 21 | } 22 | 23 | ipfs = { 24 | files: { 25 | write: sinon.stub() 26 | } 27 | } 28 | }) 29 | 30 | it('should save a packument to ipfs', async () => { 31 | const pkg = { 32 | name: `module-${hat()}` 33 | } 34 | 35 | ipfs.files.write.withArgs(`${config.ipfs.prefix}/${pkg.name}`) 36 | .resolves() 37 | 38 | await savePackument(pkg, ipfs, config) 39 | 40 | expect(ipfs.files.write.called).to.be.true() 41 | }) 42 | 43 | it('should require a package name', async () => { 44 | const pkg = { 45 | 46 | } 47 | 48 | try { 49 | await savePackument(pkg, ipfs, config) 50 | throw new Error('Expected savePackument to throw') 51 | } catch (error) { 52 | expect(error.message).to.contain('No name found') 53 | expect(ipfs.files.write.called).to.be.false() 54 | } 55 | }) 56 | }) 57 | -------------------------------------------------------------------------------- /packages/registry-mirror/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-registry-mirror", 3 | "version": "1.0.0", 4 | "description": "Serves tarballs and json manifests", 5 | "main": "src/cli/bin.js", 6 | "bin": { 7 | "ipfs-npm-registry-mirror": "src/cli/bin.js" 8 | }, 9 | "scripts": { 10 | "test": "aegir test -t node", 11 | "coverage": "aegir coverage", 12 | "lint": "aegir lint", 13 | "start": "node ." 14 | }, 15 | "repository": { 16 | "type": "git", 17 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 18 | }, 19 | "license": "MIT", 20 | "bugs": { 21 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 22 | }, 23 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 24 | "dependencies": { 25 | "aws-sdk": "^2.756.0", 26 | "debug": "^4.0.1", 27 | "dnscache": "^1.0.1", 28 | "dotenv": "^8.0.0", 29 | "express-http-proxy": "^1.4.0", 30 | "ipfs-registry-mirror-common": "^3.0.0", 31 | "p-queue": "^6.0.1", 32 | "uint8arrays": "^1.1.0", 33 | "yargs": "^16.0.3" 34 | }, 35 | "devDependencies": { 36 | "aegir": "^26.0.0", 37 | "chai": "^4.1.2", 38 | "dirty-chai": "^2.0.1", 39 | "hat": "~0.0.3", 40 | "ipfs-unixfs": "^2.0.3", 41 | "ipld-dag-pb": "^0.20.0", 42 | "mock-require": "^3.0.2", 43 | "sinon": "^9.0.2" 44 | }, 45 | "optionalDependencies": { 46 | "appmetrics-dash": "^5.3.0" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /packages/common/test/replace-tarball-urls.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const hat = require('hat') 8 | const replaceTarballUrls = require('../utils/replace-tarball-urls') 9 | 10 | describe('replace-tarball-urls', () => { 11 | it('should replace tarball urls', () => { 12 | const config = { 13 | external: { 14 | protocol: 'http', 15 | host: `localhost-${hat()}`, 16 | port: 80 17 | } 18 | } 19 | const pkg = { 20 | name: `module-${hat()}`, 21 | versions: { 22 | '1.0.0': { 23 | dist: { 24 | tarball: 'a-tarball' 25 | } 26 | }, 27 | '2.0.0': { 28 | dist: { 29 | source: 'original-tarball', 30 | tarball: 'replaced-tarball' 31 | } 32 | } 33 | } 34 | } 35 | 36 | const result = replaceTarballUrls(JSON.parse(JSON.stringify(pkg)), config) 37 | 38 | expect(result.versions['1.0.0'].dist.source).to.equal(pkg.versions['1.0.0'].dist.tarball) 39 | expect(result.versions['1.0.0'].dist.tarball).to.equal(`${config.external.protocol}://${config.external.host}/${pkg.name}/-/${pkg.name}-1.0.0.tgz`) 40 | 41 | // should not change anything if source is already present 42 | expect(result.versions['2.0.0'].dist.source).to.equal(pkg.versions['2.0.0'].dist.source) 43 | expect(result.versions['2.0.0'].dist.tarball).to.equal(pkg.versions['2.0.0'].dist.tarball) 44 | }) 45 | }) 46 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/pubsub.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const hat = require('hat') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | 7 | const topic = `ipfs-registry-pubsub-${hat()}` 8 | let lastBaseDir 9 | 10 | const publishIpnsName = async (ipfs, cid) => { 11 | if (cid.toString() !== lastBaseDir.toString()) { 12 | lastBaseDir = cid 13 | 14 | log(`🗞️ Publishing IPNS update, base dir is /ipfs/${cid}`) 15 | 16 | await ipfs.name.publish(`/ipfs/${cid}`) 17 | 18 | log('📰 Published IPNS update') 19 | } 20 | } 21 | 22 | const publishUpdate = async (ipfs, cid) => { 23 | await ipfs.pubsub.publish(topic, Buffer.from(JSON.stringify({ 24 | type: 'update', 25 | cid: cid.toString() 26 | }))) 27 | 28 | log(`📰 Broadcast update of ${cid}`) 29 | } 30 | 31 | const master = async (config, ipfs, emitter) => { 32 | emitter.on('processed', async () => { 33 | const cid = await findBaseDir(ipfs, config) 34 | 35 | if (config.clone.publish) { 36 | try { 37 | await publishIpnsName(ipfs, cid) 38 | } catch (error) { 39 | log('💥 Error publishing IPNS name', error) 40 | } 41 | } 42 | 43 | try { 44 | await publishUpdate(ipfs, cid) 45 | } catch (error) { 46 | log('💥 Error publishing to topic', error) 47 | } 48 | }) 49 | 50 | const root = await findBaseDir(ipfs, config) 51 | 52 | return { 53 | topic, 54 | root 55 | } 56 | } 57 | 58 | module.exports = master 59 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/root.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../../package.json') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | 6 | let info 7 | let lastUpdate 8 | 9 | const findInfo = async (config, ipfs, worker) => { 10 | if (!lastUpdate || lastUpdate < (Date.now() - 30000)) { 11 | const [ 12 | id, 13 | peers, 14 | topicPeers, 15 | baseDir 16 | ] = await Promise.all([ 17 | ipfs.id(), 18 | ipfs.swarm.addrs(), 19 | config.pubsub.topic ? ipfs.pubsub.peers(config.pubsub.topic) : [], 20 | findBaseDir(ipfs, config) 21 | ]) 22 | 23 | id.addresses = [ 24 | `/ip4/${config.external.ip}/tcp/${config.external.ipfsPort}/ipfs/${id.id}`, 25 | `/dns4/${config.external.host}/tcp/${config.external.ipfsPort}/ipfs/${id.id}` 26 | ] 27 | 28 | info = { 29 | name: pkg.name, 30 | index: worker.index, 31 | version: pkg.version, 32 | ipfs: id, 33 | peers: peers.map(peer => peer.id.toString()), 34 | topicPeers, 35 | // until js can resolve IPNS names remotely, just use the raw hash 36 | root: `/ipfs/${baseDir}` 37 | } 38 | 39 | lastUpdate = Date.now() 40 | } 41 | 42 | return info 43 | } 44 | 45 | module.exports = (config, ipfs, app, worker) => { 46 | return async (request, response, next) => { 47 | response.statusCode = 200 48 | response.setHeader('Content-type', 'application/json; charset=utf-8') 49 | response.send(JSON.stringify(await findInfo(config, request.app.locals.ipfs, worker), null, 2)) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /packages/common/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-registry-mirror-common", 3 | "version": "3.0.0", 4 | "description": "Shared libraries & utilities from ipfs-npm", 5 | "main": "./index.js", 6 | "scripts": { 7 | "test": "aegir test -t node", 8 | "coverage": "aegir coverage", 9 | "lint": "aegir lint" 10 | }, 11 | "repository": { 12 | "type": "git", 13 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 14 | }, 15 | "license": "MIT", 16 | "bugs": { 17 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 18 | }, 19 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 20 | "dependencies": { 21 | "abstract-leveldown": "^6.3.0", 22 | "cids": "^1.0.0", 23 | "datastore-fs": "^2.0.1", 24 | "datastore-level": "^2.0.0", 25 | "datastore-s3": "^3.0.0", 26 | "debug": "^4.0.1", 27 | "delay": "^4.3.0", 28 | "express": "^4.16.3", 29 | "express-prom-bundle": "^6.0.0", 30 | "ipfs": "^0.50.2", 31 | "ipfs-http-client": "^47.0.1", 32 | "ipfs-repo": "^6.0.3", 33 | "it-to-buffer": "^1.0.2", 34 | "level": "^6.0.1", 35 | "memdown": "^5.1.0", 36 | "mortice": "^2.0.0", 37 | "multileveldown": "^3.0.0", 38 | "once": "^1.4.0", 39 | "request": "^2.88.0", 40 | "request-promise": "^4.2.2", 41 | "which-promise": "^1.0.0" 42 | }, 43 | "devDependencies": { 44 | "aegir": "^26.0.0", 45 | "chai": "^4.1.2", 46 | "dirty-chai": "^2.0.1", 47 | "hat": "~0.0.3", 48 | "ipfsd-ctl": "^7.0.1", 49 | "mocha": "^8.1.3", 50 | "mock-require": "^3.0.2", 51 | "sinon": "^9.0.2" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:handlers:manifest') 4 | const loadPackument = require('ipfs-registry-mirror-common/utils/load-packument') 5 | const sanitiseName = require('ipfs-registry-mirror-common/utils/sanitise-name') 6 | const lol = require('ipfs-registry-mirror-common/utils/error-message') 7 | const log = require('ipfs-registry-mirror-common/utils/log') 8 | const replaceTarballUrls = require('ipfs-registry-mirror-common/utils/replace-tarball-urls') 9 | 10 | module.exports = (config, ipfs, app) => { 11 | return async (request, response, next) => { 12 | debug(`Requested ${request.path}`) 13 | 14 | const moduleName = sanitiseName(request.path) 15 | 16 | debug(`Loading packument for ${moduleName}`) 17 | 18 | try { 19 | let packument = await loadPackument(moduleName, ipfs, { 20 | signal: response.locals.signal, 21 | ...config 22 | }) 23 | packument = replaceTarballUrls(packument, config) 24 | 25 | response.statusCode = 200 26 | response.setHeader('Content-type', 'application/json; charset=utf-8') 27 | response.send(JSON.stringify(packument, null, 2)) 28 | } catch (error) { 29 | log(`💥 Could not load packument for ${moduleName}`, error) 30 | 31 | if (error.message.includes('Not found')) { 32 | response.statusCode = 404 33 | response.send(lol(`💥 Could not load ${moduleName}, has it been published?`)) 34 | 35 | return 36 | } 37 | 38 | // a 500 will cause the npm client to retry 39 | response.statusCode = 500 40 | response.send(lol(`💥 ${error.message}`)) 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /packages/common/utils/load-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const saveTarball = require('./save-tarball') 4 | const CID = require('cids') 5 | const loadPackument = require('./load-packument') 6 | 7 | const readOrDownloadTarball = async function * (path, ipfs, config) { 8 | const { 9 | packageName, 10 | packageVersion 11 | } = extractPackageDetails(path) 12 | 13 | let packument = await loadPackument(packageName, ipfs, config) 14 | let version = packument.versions[packageVersion] 15 | 16 | if (!version) { 17 | throw new Error(`Could not find version ${packageName}@${packageVersion} in available versions ${Object.keys(packument.versions)}`) 18 | } 19 | 20 | if (!version.dist.cid) { 21 | await saveTarball(packument.name, packageVersion, ipfs, config) 22 | 23 | packument = await loadPackument(packageName, ipfs, config) 24 | version = packument.versions[packageVersion] 25 | 26 | if (!version.dist.cid) { 27 | throw new Error(`CID for ${packageName}@${packageVersion} missing after download`) 28 | } 29 | } 30 | 31 | yield * ipfs.cat(new CID(version.dist.cid), { 32 | signal: config.signal 33 | }) 34 | } 35 | 36 | const extractPackageDetails = (path) => { 37 | let [ 38 | packageName, fileName 39 | ] = path.split('/-/') 40 | 41 | if (packageName.startsWith('/')) { 42 | packageName = packageName.substring(1) 43 | } 44 | 45 | let moduleName = packageName 46 | 47 | if (packageName.startsWith('@')) { 48 | moduleName = packageName.split('/').pop() 49 | } 50 | 51 | const packageVersion = fileName.substring(moduleName.length + 1, fileName.length - 4) 52 | 53 | return { 54 | packageName, 55 | packageVersion 56 | } 57 | } 58 | 59 | module.exports = readOrDownloadTarball 60 | -------------------------------------------------------------------------------- /packages/common/test/fixtures/test-server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const http = require('http') 4 | const IPFSFactory = require('ipfsd-ctl').createFactory({ 5 | type: 'proc', 6 | ipfsHttpModule: require('ipfs-http-client'), 7 | ipfsModule: require('ipfs'), 8 | test: true, 9 | disposable: true 10 | }) 11 | 12 | let testServers = [] 13 | 14 | module.exports = { 15 | createTestServer: async (resources) => { 16 | const server = http.createServer((request, response) => { 17 | let url = request.url 18 | 19 | if (url.includes('?')) { 20 | url = url.split('?')[0] 21 | } 22 | 23 | if (resources[url]) { 24 | if (typeof resources[url] === 'function') { 25 | return resources[url](request, response) 26 | } 27 | 28 | response.statusCode = 200 29 | return response.end(resources[url]) 30 | } 31 | 32 | response.statusCode = 404 33 | response.end('404') 34 | }) 35 | 36 | await new Promise((resolve, reject) => { 37 | server.listen((error) => { 38 | if (error) { 39 | return reject(error) 40 | } 41 | 42 | resolve() 43 | }) 44 | }) 45 | 46 | testServers.push(server) 47 | 48 | const node = await IPFSFactory.spawn() 49 | 50 | server.ipfs = node.api 51 | 52 | if (typeof resources === 'function') { 53 | resources = await resources(server) 54 | } 55 | 56 | return server 57 | }, 58 | 59 | destroyTestServers: () => { 60 | const servers = testServers 61 | testServers = [] 62 | 63 | return Promise.all( 64 | servers.map((server) => { 65 | return new Promise((resolve) => { 66 | server.ipfs.stop() 67 | server.close(resolve) 68 | }) 69 | }) 70 | ) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /packages/common/test/get-external-url.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const expect = require('chai') 5 | .use(require('dirty-chai')) 6 | .expect 7 | const getExternalUrl = require('../utils/get-external-url') 8 | 9 | describe('get-external-url', () => { 10 | it('should use external url from config', () => { 11 | const config = { 12 | external: { 13 | protocol: 'http', 14 | host: 'external-host', 15 | port: 8080 16 | } 17 | } 18 | 19 | const result = getExternalUrl(config) 20 | 21 | expect(result).to.equal('http://external-host:8080') 22 | }) 23 | 24 | it('should omit common ports', () => { 25 | const config = { 26 | external: { 27 | protocol: 'http', 28 | host: 'external-host', 29 | port: 80 30 | } 31 | } 32 | 33 | const result = getExternalUrl(config) 34 | 35 | expect(result).to.equal('http://external-host') 36 | }) 37 | 38 | it('should use internal url from config if external is not configured', () => { 39 | const config = { 40 | http: { 41 | protocol: 'http', 42 | host: 'internal-host', 43 | port: 8080 44 | } 45 | } 46 | 47 | const result = getExternalUrl(config) 48 | 49 | expect(result).to.equal('http://internal-host:8080') 50 | }) 51 | 52 | it('should use prefer external configuration', () => { 53 | const config = { 54 | http: { 55 | protocol: 'http', 56 | host: 'internal-host', 57 | port: 8080 58 | }, 59 | external: { 60 | protocol: 'http', 61 | host: 'external-host', 62 | port: 8080 63 | } 64 | } 65 | 66 | const result = getExternalUrl(config) 67 | 68 | expect(result).to.equal('http://external-host:8080') 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /packages/common/utils/retry-request.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const requestPromise = require('request-promise') 4 | const request = require('request') 5 | const { 6 | PassThrough 7 | } = require('stream') 8 | const log = require('./log') 9 | 10 | const makeRequest = (config) => { 11 | if (config.json) { 12 | return requestPromise(config) 13 | } 14 | 15 | // resolve with stream 16 | return new Promise((resolve, reject) => { 17 | const output = new PassThrough() 18 | 19 | const stream = request(config) 20 | stream.on('response', (response) => { 21 | if (response.statusCode < 200 || response.statusCode > 299) { 22 | return reject(new Error(`${config.url} - ${response.statusCode}`)) 23 | } 24 | }) 25 | stream.on('error', (error) => { 26 | reject(error) 27 | }) 28 | stream.once('data', (data) => { 29 | resolve(output) 30 | }) 31 | stream.pipe(output) 32 | }) 33 | } 34 | 35 | const retryRequest = (config, attempt = 1) => { 36 | const maxAttempts = config.retries || 1 37 | const delay = config.retryDelay || 0 38 | 39 | return makeRequest(config) 40 | .catch(error => { 41 | const method = (config.method || 'GET').toUpperCase() 42 | 43 | log(`🚨 Request to ${method} ${config.uri} failed on attempt ${attempt}:`, error.message) 44 | 45 | if (attempt > maxAttempts) { 46 | return Promise.reject(new Error(`Gave up requesting ${method} ${config.uri} after ${attempt} attempts`)) 47 | } 48 | 49 | attempt += 1 50 | 51 | return new Promise((resolve, reject) => { 52 | setTimeout(() => { 53 | retryRequest(config, attempt) 54 | .then(resolve) 55 | .catch(reject) 56 | }, delay) 57 | }) 58 | }) 59 | } 60 | 61 | module.exports = retryRequest 62 | -------------------------------------------------------------------------------- /packages/common/test/find-base-dir.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const sinon = require('sinon') 5 | const expect = require('chai') 6 | .use(require('dirty-chai')) 7 | .expect 8 | const hat = require('hat') 9 | const findBaseDir = require('../utils/find-base-dir') 10 | 11 | describe('find-base-dir', () => { 12 | let containingDirectory 13 | let dirName 14 | let prefix 15 | let config 16 | let ipfs 17 | 18 | beforeEach(() => { 19 | containingDirectory = `/${hat()}/${hat()}` 20 | dirName = hat() 21 | prefix = `${containingDirectory}/${dirName}` 22 | config = { 23 | ipfs: { 24 | prefix 25 | } 26 | } 27 | ipfs = { 28 | files: { 29 | ls: sinon.stub(), 30 | mkdir: sinon.stub() 31 | } 32 | } 33 | }) 34 | 35 | it('should find an existing base dir', async () => { 36 | const dirHash = 'QmSomethingSomething' 37 | ipfs.files.stat = sinon.stub().withArgs(config.ipfs.prefix) 38 | .resolves({ 39 | name: dirName, 40 | cid: dirHash 41 | }) 42 | 43 | const result = await findBaseDir(ipfs, config) 44 | 45 | expect(result).to.equal(dirHash) 46 | expect(ipfs.files.mkdir.called).to.be.false() 47 | }) 48 | 49 | it('should create the base dir if it does not exist', async () => { 50 | const dirHash = 'QmSomethingSomething' 51 | ipfs.files.stat = sinon.stub() 52 | .onFirstCall().throws(new Error('basedir does not exist')) 53 | .onSecondCall().returns({ 54 | name: dirName, 55 | cid: dirHash 56 | }) 57 | 58 | const result = await findBaseDir(ipfs, config) 59 | 60 | expect(result).to.equal(dirHash) 61 | expect(ipfs.files.mkdir.called).to.be.true() 62 | expect(ipfs.files.mkdir.getCall(0).args[0]).to.equal(prefix) 63 | }) 64 | }) 65 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/routes/root.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const pkg = require('../../../package.json') 4 | const findBaseDir = require('ipfs-registry-mirror-common/utils/find-base-dir') 5 | 6 | let info 7 | let lastUpdate 8 | 9 | const findInfo = async (config, ipfs, root, topic, seq) => { 10 | if (!lastUpdate || lastUpdate < (Date.now() - 30000)) { 11 | const [ 12 | id, 13 | peers, 14 | topicPeers 15 | ] = await Promise.all([ 16 | ipfs.id(), 17 | ipfs.swarm.addrs(), 18 | ipfs.pubsub.peers(topic) 19 | ]) 20 | 21 | id.addresses = [ 22 | `/ip4/${config.external.ip}/tcp/${config.ipfs.port}/ipfs/${id.id}`, 23 | `/dns4/${config.external.host}/tcp/${config.ipfs.port}/ipfs/${id.id}` 24 | ] 25 | 26 | info = { 27 | name: pkg.name, 28 | version: pkg.version, 29 | seq, 30 | ipfs: id, 31 | peers: peers.map(peer => peer.id.toString()), 32 | topicPeers, 33 | topic, 34 | // until js can resolve IPNS names remotely, just use the raw hash 35 | root: `/ipfs/${await findBaseDir(ipfs, config)}` 36 | } 37 | 38 | lastUpdate = Date.now() 39 | } 40 | 41 | return info 42 | } 43 | 44 | module.exports = (config, ipfs, app, root, topic) => { 45 | let seq 46 | 47 | app.on('seq', (s) => { 48 | seq = s 49 | }) 50 | 51 | return async (request, response, next) => { 52 | try { 53 | const info = await findInfo(config, ipfs, root, topic, seq) 54 | 55 | response.statusCode = 200 56 | response.setHeader('Content-type', 'application/json; charset=utf-8') 57 | response.send(JSON.stringify(info, null, 2)) 58 | } catch (error) { 59 | response.statusCode = 500 60 | response.setHeader('Content-type', 'application/text; charset=utf-8') 61 | response.send(error) 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /packages/replication-master/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-npm-replication-master", 3 | "version": "1.0.0", 4 | "description": "Replicates the npm registry and puts it onto IPFS", 5 | "main": "src/cli/bin.js", 6 | "bin": { 7 | "ipfs-npm-replication-master": "src/cli/bin.js" 8 | }, 9 | "scripts": { 10 | "test": "aegir test -t node", 11 | "coverage": "aegir coverage", 12 | "lint": "aegir lint", 13 | "start": "node .", 14 | "debug": "DEBUG='ipfs:*' NODE_ENV=development PROFILING=true node --inspect . --clone-delay 10000 --ipfs-store-type=s3 --ipfs-store-s3-bucket=npm-on-ipfs --ipfs-store-s3-region=us-west-1 --ipfs-store-s3-path=replication-master-test --follow-seq-file=seq-test.txt --follow-concurrency=1 --request-concurrency=1" 15 | }, 16 | "repository": { 17 | "type": "git", 18 | "url": "git+https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git" 19 | }, 20 | "license": "MIT", 21 | "bugs": { 22 | "url": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror/issues" 23 | }, 24 | "homepage": "https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror#readme", 25 | "dependencies": { 26 | "@achingbrain/follow-registry": "^5.0.0", 27 | "abort-controller": "^3.0.0", 28 | "aws-sdk": "^2.756.0", 29 | "cids": "^1.0.0", 30 | "debug": "^4.0.1", 31 | "delay": "^4.3.0", 32 | "dnscache": "^1.0.1", 33 | "dnssd": "^0.4.1", 34 | "dotenv": "^8.0.0", 35 | "fs-extra": "^9.0.0", 36 | "hat": "~0.0.3", 37 | "ipfs-registry-mirror-common": "^3.0.0", 38 | "it-last": "^1.0.2", 39 | "p-queue": "^6.0.1", 40 | "yargs": "^16.0.3" 41 | }, 42 | "devDependencies": { 43 | "aegir": "^26.0.0", 44 | "chai": "^4.1.2", 45 | "dirty-chai": "^2.0.1", 46 | "mock-require": "^3.0.2", 47 | "sinon": "^9.0.2" 48 | }, 49 | "optionalDependencies": { 50 | "appmetrics-dash": "^5.3.0" 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/routes/tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:handlers:tarball') 4 | const path = require('path') 5 | const loadTarball = require('ipfs-registry-mirror-common/utils/load-tarball') 6 | const lol = require('ipfs-registry-mirror-common/utils/error-message') 7 | const log = require('ipfs-registry-mirror-common/utils/log') 8 | 9 | module.exports = (config, ipfs, app) => { 10 | return async (request, response, next) => { 11 | debug(`Requested ${request.path}`) 12 | 13 | const file = request.path 14 | 15 | debug(`Loading ${file}`) 16 | 17 | try { 18 | response.statusCode = 200 19 | response.setHeader('Content-Disposition', `attachment; filename="${path.basename(request.url)}"`) 20 | 21 | for await (const chunk of loadTarball(file, ipfs, { 22 | signal: response.locals.signal, 23 | ...config 24 | })) { 25 | response.write(chunk) 26 | } 27 | 28 | response.end() 29 | } catch (error) { 30 | log(`💥 Could not load tarball for ${file}`, error) 31 | 32 | if (error.code === 'ECONNREFUSED') { 33 | response.statusCode = 504 34 | } else if (error.code === 'ECONNRESET') { 35 | // will trigger a retry from the npm client 36 | response.statusCode = 500 37 | } else if (error.message.includes('Not found')) { 38 | response.statusCode = 404 39 | response.send(lol(`💥 Could not load ${file}, has it been published?`)) 40 | } else if (error.message.includes('in available versions')) { 41 | response.statusCode = 404 42 | response.send(lol(`💥 Could not load ${file}, version unavailable`)) 43 | } else { 44 | // a 500 will cause the npm client to retry 45 | response.statusCode = 500 46 | response.send(lol(`💥 ${error.message}`)) 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /packages/common/server.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const express = require('express') 4 | const once = require('once') 5 | const { 6 | abortableRequest, 7 | errorLog, 8 | favicon, 9 | requestLog, 10 | cors 11 | } = require('./handlers') 12 | const prometheus = require('express-prom-bundle') 13 | const promisify = require('util').promisify 14 | const metrics = prometheus({ 15 | includeMethod: true, 16 | autoregister: false 17 | }) 18 | const log = require('./utils/log') 19 | const getAnIPFS = require('./utils/get-an-ipfs') 20 | 21 | module.exports = async (config, handlers = async () => {}) => { 22 | const ipfs = await getAnIPFS(config) 23 | 24 | log('🛫 Starting server') 25 | 26 | const app = express() 27 | 28 | app.use(requestLog) 29 | app.use(metrics) 30 | app.use('/-/metrics', metrics.metricsMiddleware) 31 | app.use(cors) 32 | app.use(abortableRequest) 33 | 34 | app.get('/favicon.ico', favicon(config, ipfs, app)) 35 | app.get('/favicon.png', favicon(config, ipfs, app)) 36 | 37 | await handlers(app, ipfs) 38 | 39 | app.use(errorLog) 40 | 41 | return new Promise((resolve, reject) => { 42 | const callback = once((error) => { 43 | if (error) { 44 | reject(error) 45 | } 46 | 47 | if (!config.http.port) { 48 | config.http.port = server.address().port 49 | } 50 | 51 | log(`🚀 Server running on port ${config.http.port}`) 52 | 53 | resolve({ 54 | server, 55 | app, 56 | ipfs, 57 | stop: () => { 58 | return Promise.all([ 59 | promisify(server.close.bind(server))(), 60 | ipfs.stop() 61 | ]) 62 | .then(() => { 63 | log('✋ Server stopped') 64 | }) 65 | } 66 | }) 67 | }) 68 | 69 | const server = app.listen(config.http.port, callback) 70 | server.once('error', callback) 71 | 72 | app.locals.ipfs = ipfs 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /packages/replication-master/test/fixtures/create-skim-db.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | createTestServer 5 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | 8 | const createSkimDb = async (registry) => { 9 | const updates = [] 10 | let seq = 0 11 | 12 | const resources = { 13 | '/': JSON.stringify({ 14 | db_name: 'registry', 15 | doc_count: 807004, 16 | doc_del_count: 77670, 17 | update_seq: seq, 18 | purge_seq: 0, 19 | compact_running: false, 20 | disk_size: 6156660994, 21 | other: { 22 | data_size: 19122199289 23 | }, 24 | data_size: 5606706136, 25 | sizes: { 26 | file: 6156660994, 27 | active: 5606706136, 28 | external: 19122199289 29 | }, 30 | instance_start_time: '1538675327980753', 31 | disk_format_version: 6, 32 | committed_update_seq: 6425135, 33 | compacted_seq: 6423134, 34 | uuid: '370e266567ec9d1242acc2612839d6a7' 35 | }), 36 | '/_changes': (request, response, next) => { 37 | try { 38 | while (updates.length) { 39 | const update = updates.shift() 40 | 41 | seq++ 42 | 43 | response.write(JSON.stringify({ 44 | seq, 45 | id: update.name, 46 | changes: [{ 47 | _rev: update.json._rev 48 | }] 49 | }) + '\n') 50 | } 51 | } catch (error) { 52 | log(error) 53 | } 54 | 55 | response.end() 56 | } 57 | } 58 | 59 | const skimDb = await createTestServer(resources) 60 | 61 | skimDb.publish = (update, tarball) => { 62 | registry[`/${update.name}`] = JSON.stringify(update.json) 63 | 64 | if (tarball) { 65 | registry[tarball.path] = tarball.content 66 | } 67 | 68 | updates.push(update) 69 | } 70 | 71 | return skimDb 72 | } 73 | 74 | module.exports = createSkimDb 75 | -------------------------------------------------------------------------------- /packages/common/utils/get-an-ipfs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IPFS = require('ipfs') 4 | const s3Repo = require('./s3-repo') 5 | const fsRepo = require('./fs-repo') 6 | const clusterRepo = require('./cluster-repo') 7 | const log = require('./log') 8 | const cluster = require('cluster') 9 | 10 | const randomPort = () => { 11 | return Math.floor(Math.random() * 64535) + 1000 12 | } 13 | 14 | const getAnIPFS = async (config) => { 15 | if (config.ipfs.port && config.ipfs.host) { 16 | config.store.port = config.ipfs.port 17 | config.store.host = config.ipfs.host 18 | log(`👺 Connecting to remote IPFS daemon at ${config.ipfs.port}:${config.ipfs.host}`) 19 | } else { 20 | log('😈 Using in-process IPFS daemon') 21 | } 22 | 23 | let repo 24 | 25 | if (config.ipfs.store === 's3') { 26 | repo = s3Repo(config.ipfs.s3) 27 | } 28 | 29 | if (config.ipfs.store === 'fs') { 30 | if (config.clone.concurrency) { 31 | repo = clusterRepo(config.ipfs.fs) 32 | } else { 33 | repo = fsRepo(config.ipfs.fs) 34 | } 35 | } 36 | 37 | log('🏁 Starting an IPFS instance') 38 | 39 | const ipfs = await IPFS.create({ 40 | pass: config.ipfs.pass, 41 | init: { 42 | emptyRepo: true 43 | }, 44 | repo, 45 | EXPERIMENTAL: { 46 | sharding: true 47 | }, 48 | pubsub: { 49 | enabled: true 50 | }, 51 | preload: { 52 | enabled: false 53 | }, 54 | config: { 55 | Addresses: { 56 | Swarm: cluster.isMaster ? [ 57 | `/ip4/0.0.0.0/tcp/${config.ipfs.port || randomPort()}`, 58 | `/ip4/127.0.0.1/tcp/${config.ipfs.wsPort || randomPort()}/ws` 59 | ] : [], 60 | API: `/ip4/127.0.0.1/tcp/${config.ipfs.apiPort || randomPort()}`, 61 | Gateway: `/ip4/127.0.0.1/tcp/${config.ipfs.gatewayPort || randomPort()}` 62 | } 63 | } 64 | }) 65 | 66 | process.on('exit', () => { 67 | ipfs.stop() 68 | }) 69 | 70 | return ipfs 71 | } 72 | 73 | module.exports = getAnIPFS 74 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/sequence-file.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const S3 = require('aws-sdk/clients/s3') 4 | const log = require('ipfs-registry-mirror-common/utils/log') 5 | const fs = require('fs-extra') 6 | 7 | module.exports = ({ ipfs: { store, s3: { bucket, region, accessKeyId, secretAccessKey } }, follow: { seqFile } }) => { 8 | if (store !== 's3') { 9 | log('📁 Using fs sequence file', seqFile) 10 | return { 11 | async read () { // eslint-disable-line require-await 12 | try { 13 | return fs.readFile(seqFile, 'utf8') 14 | } catch (err) { 15 | log(err) 16 | return 0 17 | } 18 | }, 19 | async write (data) { 20 | await fs.writeFile(seqFile, data, 'utf8') 21 | }, 22 | async reset () { 23 | await fs.unlink(seqFile) 24 | } 25 | } 26 | } 27 | 28 | log('☁️ Using s3 sequence file', seqFile) 29 | 30 | const s3 = new S3({ 31 | params: { 32 | Bucket: bucket 33 | }, 34 | region, 35 | accessKeyId, 36 | secretAccessKey 37 | }) 38 | 39 | return { 40 | async read () { 41 | try { 42 | const data = await s3.getObject({ 43 | Key: seqFile 44 | }).promise() 45 | 46 | const seq = data.Body.toString('utf8') 47 | 48 | return parseInt(seq, 10) 49 | } catch (err) { 50 | log(`💥 Could not load seq file from ${seqFile}`, err) 51 | 52 | return 0 53 | } 54 | }, 55 | async write (data) { 56 | try { 57 | await s3.putObject({ 58 | Key: seqFile, 59 | Body: `${data}` 60 | }).promise() 61 | } catch (err) { 62 | log(`💥 Could not write seq file to ${seqFile}`, err) 63 | } 64 | }, 65 | async reset () { 66 | try { 67 | await s3.deleteObject({ 68 | Key: seqFile 69 | }).promise() 70 | } catch (err) { 71 | log(`💥 Could not reset seq file at ${seqFile}`, err) 72 | } 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/pubsub.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 4 | const log = require('ipfs-registry-mirror-common/utils/log') 5 | const { default: PQueue } = require('p-queue') 6 | const uint8ArrayToString = require('uint8arrays/to-string') 7 | const queue = new PQueue({ concurrency: 1 }) 8 | 9 | const findMaster = (config) => { 10 | return request(Object.assign({}, config.request, { 11 | uri: config.pubsub.master, 12 | json: true, 13 | retries: 100, 14 | retryDelay: 5000 15 | })) 16 | } 17 | 18 | const handleUpdate = (config, ipfs, event) => { 19 | if (event.type !== 'update' || !event.cid) { 20 | return 21 | } 22 | 23 | queue.clear() 24 | queue.add(async () => { 25 | log('🦄 Incoming update') 26 | 27 | try { 28 | log(`🐴 Removing old ${config.ipfs.prefix}`) 29 | await ipfs.files.rm(config.ipfs.prefix, { 30 | recursive: true 31 | }) 32 | log(`🐎 Copying /ipfs/${event.cid} to ${config.ipfs.prefix}`) 33 | await ipfs.files.cp(`/ipfs/${event.cid}`, config.ipfs.prefix) 34 | } catch (error) { 35 | log(`💥 Could not update ${event.module}`, error) 36 | } 37 | }) 38 | } 39 | 40 | const subscribeToTopic = async (config, ipfs, master) => { 41 | config.pubsub.topic = master.topic 42 | 43 | await ipfs.pubsub.subscribe(master.topic, (event) => { 44 | if (event.from !== master.ipfs.id) { 45 | return 46 | } 47 | 48 | handleUpdate(config, ipfs, JSON.parse(uint8ArrayToString(event.data, 'utf8'))) 49 | }) 50 | } 51 | 52 | const updateRoot = (config, ipfs, master) => { 53 | return ipfs.files.cp(master.root, config.ipfs.prefix) 54 | } 55 | 56 | const worker = async (config, ipfs) => { 57 | let timer = Date.now() 58 | const master = await findMaster(config) 59 | log(`🧚‍♀️ Found master id ${master.ipfs.id} in ${Date.now() - timer}ms`) 60 | 61 | timer = Date.now() 62 | await subscribeToTopic(config, ipfs, master) 63 | log(`🙋 Worker subscribed to ${master.topic} in ${Date.now() - timer}ms`) 64 | 65 | timer = Date.now() 66 | await updateRoot(config, ipfs, master) 67 | log(`🦓 Got root in ${Date.now() - timer}ms`) 68 | } 69 | 70 | module.exports = worker 71 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const config = require('./config') 4 | const clone = require('./clone') 5 | const replicationMaster = require('./pubsub') 6 | const advertise = require('./mdns') 7 | const server = require('ipfs-registry-mirror-common/server') 8 | const root = require('./routes/root') 9 | const worker = require('./routes/worker') 10 | const workerOnline = require('./routes/worker-online') 11 | const delay = require('delay') 12 | const { 13 | status 14 | } = require('./routes/workers') 15 | const log = require('ipfs-registry-mirror-common/utils/log') 16 | const AbortController = require('abort-controller') 17 | 18 | module.exports = async (options) => { 19 | options = config(options) 20 | 21 | const result = await server(options, async (app, ipfs) => { 22 | const res = await replicationMaster(options, ipfs, app) 23 | 24 | app.get('/', root(options, ipfs, app, res.root, res.topic)) 25 | app.get('/-/worker', worker()) 26 | app.post('/-/worker', workerOnline()) 27 | }) 28 | 29 | // give workers a chance to connect 30 | const time = Date.now() 31 | log(`⌚ Waiting for ${options.clone.delay}ms before starting to clone npm`) 32 | 33 | await delay(options.clone.delay || 0) 34 | 35 | const workerStatus = status() 36 | 37 | if (!workerStatus.ready) { 38 | log(`⌚ Waiting for ${workerStatus.workers.length - workerStatus.initialised} of ${workerStatus.workers.length} workers to be ready before starting to clone npm`) 39 | 40 | while (true) { 41 | await delay(options.clone.delay || 0) 42 | 43 | if (status().ready) { 44 | break 45 | } 46 | 47 | log(`⌚ Still waiting for ${workerStatus.workers.length - workerStatus.initialised} of ${workerStatus.workers.length} workers to be ready before starting to clone npm`) 48 | } 49 | } 50 | 51 | log(`⌚ Workers took ${Date.now() - time}ms to initialise`) 52 | 53 | const controller = new AbortController() 54 | 55 | clone(result.app, controller.signal, result.app.locals.ipfs, options) 56 | .then(() => {}, () => {}) 57 | 58 | const stop = result.stop 59 | const stopAdvert = await advertise(result.ipfs, options) 60 | 61 | result.stop = () => { 62 | controller.abort() 63 | stopAdvert() 64 | stop() 65 | } 66 | 67 | return result 68 | } 69 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const proxy = require('express-http-proxy') 4 | const config = require('./config') 5 | const replicationWorker = require('./pubsub') 6 | const getExternalUrl = require('ipfs-registry-mirror-common/utils/get-external-url') 7 | const server = require('ipfs-registry-mirror-common/server') 8 | const tarball = require('./routes/tarball') 9 | const packument = require('./routes/packument') 10 | const root = require('./routes/root') 11 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 12 | const findExternalPort = require('./find-external-port') 13 | const log = require('ipfs-registry-mirror-common/utils/log') 14 | 15 | module.exports = async (options) => { 16 | options = config(options) 17 | 18 | const worker = await request(Object.assign({}, config.request, { 19 | uri: `${options.pubsub.master}/-/worker`, 20 | qs: { 21 | worker: process.env.HOSTNAME 22 | }, 23 | json: true, 24 | retries: 100, 25 | retryDelay: 5000 26 | })) 27 | 28 | options.ipfs.s3.path = `${options.ipfs.s3.path}-${worker.index}` 29 | options.ipfs.fs.repo = `${options.ipfs.fs.repo}-${worker.index}` 30 | options.ipfs.port = 10000 + worker.index 31 | options.external.ipfsPort = await findExternalPort(options) 32 | 33 | const result = await server(options, async (app, ipfs) => { 34 | app.get('/', root(options, ipfs, app, worker)) 35 | 36 | // intercept requests for tarballs and manifests 37 | app.get('/*.tgz', tarball(options, ipfs, app)) 38 | app.get('/*', packument(options, ipfs, app)) 39 | 40 | // everything else should just proxy for the registry 41 | const registry = proxy(options.registries[0], { 42 | limit: options.registryUploadSizeLimit 43 | }) 44 | app.put('/*', registry) 45 | app.post('/*', registry) 46 | app.patch('/*', registry) 47 | app.delete('/*', registry) 48 | app.get('/-/whoami', registry) 49 | 50 | await replicationWorker(options, ipfs, app) 51 | }) 52 | 53 | // finished initialisation 54 | await request(Object.assign({}, config.request, { 55 | method: 'post', 56 | uri: `${options.pubsub.master}/-/worker`, 57 | json: true, 58 | retries: 100, 59 | retryDelay: 5000 60 | })) 61 | 62 | const url = getExternalUrl(options) 63 | 64 | log(`🔧 Please either update your npm config with 'npm config set registry ${url}'`) 65 | log(`🔧 or use the '--registry' flag, eg: 'npm install --registry=${url}'`) 66 | 67 | return result 68 | } 69 | -------------------------------------------------------------------------------- /img/npm-on-ipfs.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | npm-on-ipfs 4 | Original idea by @olizilla. 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /packages/common/utils/download-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-common:utils:download-tarball') 4 | const crypto = require('crypto') 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | const { urlSource } = require('ipfs') 7 | 8 | const downloadTarball = async (packument, versionNumber, ipfs, options) => { 9 | const version = packument.versions[versionNumber] 10 | 11 | validate(version, versionNumber, packument.name) 12 | 13 | if (version.cid) { 14 | debug(`Skipping version ${versionNumber} of ${packument.name} - already downloaded`) 15 | 16 | return 17 | } 18 | 19 | const start = Date.now() 20 | 21 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, options) 22 | 23 | version.cid = `/ipfs/${cid}` 24 | 25 | log(`🏄‍♀️ Added ${version.dist.tarball} with CID ${version.cid} in ${Date.now() - start}ms`) 26 | } 27 | 28 | const validate = (version, versionNumber, packageName) => { 29 | if (!version) { 30 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 31 | } 32 | 33 | if (!version.dist) { 34 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 35 | } 36 | 37 | if (!version.dist.tarball) { 38 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no tarball`) 39 | } 40 | 41 | if (!version.dist.shasum) { 42 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 43 | } 44 | } 45 | 46 | const downloadFile = async (url, shasum, ipfs, options) => { 47 | for (let i = 0; i < options.request.retries; i++) { 48 | try { 49 | log(`⬇️ Downloading ${url}`) 50 | const start = Date.now() 51 | 52 | const { 53 | cid 54 | } = await ipfs.add(urlSource(url), { 55 | wrapWithDirectory: false, 56 | pin: options.clone.pin, 57 | version: 1, 58 | rawLeaves: true, 59 | signal: options.signal 60 | }) 61 | 62 | log(`✅ Downloaded ${url} in ${Date.now() - start}ms`) 63 | 64 | await validateShasum(cid, shasum, url, ipfs, options) 65 | 66 | log(`🌍 Added ${url} to IPFS with CID ${cid} in ${Date.now() - start}ms`) 67 | 68 | return cid 69 | } catch (err) { 70 | log(`💥 Download failed`, err) 71 | } 72 | } 73 | 74 | throw new Error(`💥 ${options.request.retries} retries exceeded while downloading ${url}`) 75 | } 76 | 77 | const validateShasum = async (cid, shasum, url, ipfs, options) => { 78 | const hashStart = Date.now() 79 | const hash = crypto.createHash('sha1') 80 | hash.on('error', () => {}) 81 | 82 | for await (const buf of ipfs.cat(cid, { 83 | signal: options.signal 84 | })) { 85 | hash.update(buf) 86 | } 87 | 88 | const result = hash.digest('hex') 89 | 90 | if (result !== shasum) { 91 | throw new Error(`Shasum of ${url} failed ${result} !== ${shasum}`) 92 | } 93 | 94 | log(`🙆 Checked shasum of ${url} in ${Date.now() - hashStart}ms`) 95 | } 96 | 97 | module.exports = downloadTarball 98 | -------------------------------------------------------------------------------- /packages/common/utils/level-lock.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | AbstractLevelDOWN, 5 | AbstractIterator 6 | } = require('abstract-leveldown') 7 | const mortice = require('mortice') 8 | 9 | const handle = (resolve, reject) => { 10 | return (err, res) => { 11 | if (err) { 12 | return reject(err) 13 | } 14 | 15 | resolve(res) 16 | } 17 | } 18 | 19 | class LevelLock extends AbstractLevelDOWN { 20 | constructor (db, opts) { 21 | super(db, opts) 22 | 23 | this.db = db 24 | this.opts = opts || {} 25 | this.mutex = mortice(this.opts.lock || 'level-lock') 26 | } 27 | 28 | _open (options, callback) { 29 | this.db.open(options, callback) 30 | } 31 | 32 | _close (callback) { 33 | this.db.close(callback) 34 | } 35 | 36 | _put (key, value, options, callback) { 37 | this.mutex.writeLock(() => { 38 | return new Promise((resolve, reject) => { 39 | this.db.put(key, value, options, handle(resolve, reject)) 40 | }) 41 | .then(res => callback(null, res), callback) 42 | }) 43 | } 44 | 45 | _get (key, options, callback) { 46 | this.mutex.readLock(() => { 47 | return new Promise((resolve, reject) => { 48 | this.db.get(key, options, handle(resolve, reject)) 49 | }) 50 | .then(res => callback(null, res), callback) 51 | }) 52 | } 53 | 54 | _del (key, options, callback) { 55 | this.mutex.writeLock(() => { 56 | return new Promise((resolve, reject) => { 57 | this.db.del(key, options, handle(resolve, reject)) 58 | }) 59 | .then(res => callback(null, res), callback) 60 | }) 61 | } 62 | 63 | _batch (operations, options, callback) { 64 | this.mutex.writeLock(() => { 65 | return new Promise((resolve, reject) => { 66 | this.db.batch(operations, options, handle(resolve, reject)) 67 | }) 68 | .then(res => callback(null, res), callback) 69 | }) 70 | } 71 | 72 | _serializeKey (key) { 73 | if (this.db._serializeKey) { 74 | return this.db._serializeKey(key) 75 | } 76 | 77 | return key 78 | } 79 | 80 | _serializeValue (value) { 81 | if (this.db._serializeValue) { 82 | return this.db._serializeValue(value) 83 | } 84 | 85 | return value 86 | } 87 | 88 | _iterator (options) { 89 | return new LevelLockIterator(this, options) 90 | } 91 | } 92 | 93 | class LevelLockIterator extends AbstractIterator { 94 | constructor (db, options) { 95 | super(db, options) 96 | 97 | this.mutex = db.mutex 98 | this.iter = db.db.iterator(options) 99 | } 100 | 101 | _next (callback) { 102 | this.mutex.readLock((cb) => { 103 | this.iter.next((err, value) => { 104 | cb() 105 | callback(err, value) 106 | }) 107 | }) 108 | } 109 | 110 | _seek (target) { 111 | this.mutex.readLock((cb) => { 112 | this.iter.seek(target) 113 | cb() 114 | }) 115 | } 116 | 117 | _end (callback) { 118 | this.iter.end(callback) 119 | } 120 | } 121 | 122 | module.exports = LevelLock 123 | -------------------------------------------------------------------------------- /packages/common/test/save-tarball.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const { 11 | PassThrough 12 | } = require('stream') 13 | const CID = require('cids') 14 | 15 | describe('save-tarball', () => { 16 | let saveTarball 17 | let loadPackument 18 | let savePackument 19 | let request 20 | let ipfs 21 | let config 22 | 23 | beforeEach(() => { 24 | config = { 25 | request: { 26 | 27 | }, 28 | clone: { 29 | 30 | } 31 | } 32 | 33 | request = sinon.stub() 34 | loadPackument = sinon.stub() 35 | savePackument = sinon.stub() 36 | 37 | mock('../utils/retry-request', request) 38 | mock('../utils/save-packument', savePackument) 39 | mock('../utils/load-packument', loadPackument) 40 | 41 | saveTarball = mock.reRequire('../utils/save-tarball') 42 | 43 | ipfs = { 44 | add: sinon.stub() 45 | } 46 | }) 47 | 48 | afterEach(() => { 49 | mock.stopAll() 50 | }) 51 | 52 | it('should not save a tarball we have already downloaded', async () => { 53 | const versionNumber = '1.0.0' 54 | const pkg = { 55 | name: `module-${hat()}`, 56 | versions: { 57 | [versionNumber]: { 58 | dist: { 59 | cid: 'a-cid', 60 | source: 'tarball-url', 61 | shasum: 'tarball-shasum' 62 | } 63 | } 64 | } 65 | } 66 | 67 | loadPackument.withArgs(pkg.name, ipfs, config) 68 | .resolves(pkg) 69 | 70 | await saveTarball(pkg.name, versionNumber, ipfs, config) 71 | 72 | expect(request.called).to.be.false() 73 | }) 74 | 75 | it('should download a missing tarball', async () => { 76 | const versionNumber = '1.0.0' 77 | const pkg = { 78 | name: `module-${hat()}`, 79 | versions: { 80 | [versionNumber]: { 81 | dist: { 82 | tarball: 'tarball-url', 83 | shasum: '3c4fb10163dc33fd83b588fe36af9aa5efba2985' 84 | } 85 | } 86 | } 87 | } 88 | 89 | loadPackument.withArgs(pkg.name, ipfs, config) 90 | .resolves(pkg) 91 | 92 | ipfs.add.callsFake(stream => { 93 | return new Promise((resolve) => { 94 | stream.on('end', () => { 95 | resolve({ 96 | cid: new CID('QmZEYeEin6wEB7WNyiT7stYTmbYFGy7BzM7T3hRDzRxTvY').toV1() 97 | }) 98 | }) 99 | }) 100 | }) 101 | 102 | request.withArgs({ 103 | uri: 'tarball-url' 104 | }) 105 | .callsFake(() => { 106 | const stream = new PassThrough() 107 | 108 | setTimeout(() => { 109 | stream.write('tarball-content') 110 | stream.end() 111 | }, 100) 112 | 113 | return Promise.resolve(stream) 114 | }) 115 | 116 | await saveTarball(pkg.name, versionNumber, ipfs, config) 117 | 118 | expect(request.called).to.be.true() 119 | }) 120 | }) 121 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/ingest-module.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:clone') 4 | const savePackument = require('ipfs-registry-mirror-common/utils/save-packument') 5 | const saveTarballs = require('../save-tarballs') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | 8 | const publishOrUpdateIPNSName = async (packument, ipfs, options) => { 9 | const timer = Date.now() 10 | const file = `${options.ipfs.prefix}/${packument.name}` 11 | let newNameCreated = false 12 | 13 | if (!packument.ipns) { 14 | // we need to create the ipns name (which will be stable), add it to the 15 | // manifest, save it again and then immediately update the ipns name 16 | 17 | try { 18 | await ipfs.key.gen(packument.name, { 19 | type: 'rsa', 20 | size: 2048 21 | }) 22 | } catch (err) { 23 | if (!err.message.includes('already exists')) { 24 | throw err 25 | } 26 | } 27 | 28 | newNameCreated = true 29 | } 30 | 31 | const stats = await ipfs.files.stat(file) 32 | 33 | const result = await ipfs.name.publish(`/ipfs/${stats.hash}`, { 34 | key: packument.name 35 | }) 36 | 37 | if (newNameCreated) { 38 | packument.ipns = result.name 39 | packument = await savePackument(packument, ipfs, options) 40 | 41 | const stats = await ipfs.files.stat(file) 42 | await ipfs.name.publish(`/ipfs/${stats.hash}`, { 43 | key: packument.name 44 | }) 45 | } 46 | 47 | log(`💾 Updated ${packument.name} IPNS name ${packument.ipns} in ${Date.now() - timer}ms`) 48 | } 49 | 50 | module.exports = async ({ packument, seq, ipfs, options }) => { 51 | log(`🎉 Updated version of ${packument.name}`) 52 | const mfsPath = `${options.ipfs.prefix}/${packument.name}` 53 | let mfsVersion = { 54 | versions: {} 55 | } 56 | let timer 57 | 58 | try { 59 | log(`📃 Reading ${packument.name} cached packument from ${mfsPath}`) 60 | timer = Date.now() 61 | mfsVersion = await ipfs.files.read(mfsPath) 62 | log(`📃 Read ${packument.name} cached packument from ${mfsPath} in ${Date.now() - timer}ms`) 63 | } catch (error) { 64 | if (error.message.includes('does not exist')) { 65 | debug(`${mfsPath} not in MFS`) 66 | } else { 67 | debug(`Could not read ${mfsPath}`, error) 68 | } 69 | } 70 | 71 | // save our existing versions so we don't re-download tarballs we already have 72 | Object.keys(mfsVersion.versions || {}).forEach(versionNumber => { 73 | packument.versions[versionNumber] = mfsVersion.versions[versionNumber] 74 | }) 75 | 76 | packument.ipns = mfsVersion.ipns 77 | 78 | timer = Date.now() 79 | await saveTarballs(packument, ipfs, options) 80 | log(`🧳 Saved ${packument.name} tarballs in ${Date.now() - timer}ms`) 81 | 82 | timer = Date.now() 83 | await savePackument(packument, ipfs, options) 84 | log(`💾 Saved ${packument.name} packument in ${Date.now() - timer}ms`) 85 | 86 | if (options.clone.publish) { 87 | await publishOrUpdateIPNSName(packument, ipfs, options) 88 | } 89 | 90 | return { 91 | seq, 92 | name: packument.name 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /packages/common/utils/cluster-repo.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('./log') 4 | const IPFSRepo = require('ipfs-repo') 5 | const cluster = require('cluster') 6 | const multileveldown = require('multileveldown') 7 | const LevelDataStore = require('datastore-level') 8 | const FileDataStore = require('datastore-fs') 9 | const level = require('level') 10 | const net = require('net') 11 | const memdown = require('memdown') 12 | const { Errors } = require('interface-datastore') 13 | 14 | let lock = 'fs' 15 | 16 | if (cluster.isWorker) { 17 | lock = { 18 | lock: () => { 19 | return { 20 | close: () => {} 21 | } 22 | } 23 | } 24 | } 25 | 26 | const clusterRepo = ({ repo }) => { 27 | if (process.env.NODE_ENV === 'development') { 28 | repo = `${repo}-test` 29 | } 30 | 31 | log(`📁 Using fs repo at ${repo}`) 32 | 33 | class MultiLeveLDataStore extends LevelDataStore { 34 | constructor (path, opts) { 35 | super(path, { 36 | ...opts, 37 | db: () => memdown() 38 | }) 39 | 40 | this.opts = opts 41 | } 42 | 43 | _initDb (database, path) { 44 | if (cluster.isMaster) { 45 | return level(path, { 46 | valueEncoding: 'binary', 47 | compression: false // same default as go 48 | }) 49 | } 50 | 51 | return multileveldown.client({ 52 | retry: true, 53 | valueEncoding: 'binary', 54 | compression: false // same default as go 55 | }) 56 | } 57 | 58 | async open () { 59 | if (cluster.isMaster) { 60 | try { 61 | await this.db.open() 62 | 63 | return new Promise((resolve, reject) => { 64 | this._server = net.createServer((sock) => { 65 | sock.on('error', () => { 66 | sock.destroy() 67 | }) 68 | 69 | sock.pipe(multileveldown.server(this.db)).pipe(sock) 70 | }) 71 | 72 | this._server.listen(this.opts.port, (err) => { 73 | if (err) { 74 | return reject(err) 75 | } 76 | 77 | resolve() 78 | }) 79 | }) 80 | } catch (err) { 81 | throw Errors.dbOpenFailedError(err) 82 | } 83 | } 84 | 85 | this._sock = net.connect(this.opts.port) 86 | this._sock.pipe(this.db.connect()).pipe(this._sock) 87 | } 88 | 89 | close () { 90 | if (cluster.isMaster) { 91 | this._server.close() 92 | return this.db.close() 93 | } 94 | 95 | this._sock.close() 96 | } 97 | } 98 | 99 | return new IPFSRepo(repo, { 100 | lock: lock, 101 | storageBackends: { 102 | root: FileDataStore, 103 | blocks: FileDataStore, 104 | keys: FileDataStore, 105 | datastore: MultiLeveLDataStore, 106 | pins: MultiLeveLDataStore 107 | }, 108 | storageBackendOptions: { 109 | datastore: { 110 | port: 39281 111 | }, 112 | pins: { 113 | port: 39282 114 | } 115 | } 116 | }) 117 | } 118 | 119 | module.exports = clusterRepo 120 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/core/config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = require('ipfs-registry-mirror-common/utils/to-boolean') 4 | const option = require('ipfs-registry-mirror-common/utils/option') 5 | 6 | module.exports = (overrides = {}) => { 7 | return { 8 | registries: (overrides.registries || []).concat(option(process.env.REGISTRY, overrides.registry)).filter(Boolean), 9 | registryUpdateInterval: option(process.env.REGISTRY_UPDATE_INTERVAL, overrides.registryUpdateInterval), 10 | registryUploadSizeLimit: option(process.env.MIRROR_UPLOAD_SIZE_LIMIT, overrides.registryUploadSizeLimit), 11 | registryReadTimeout: option(Number(process.env.REGISTRY_READ_TIMEOUT), overrides.registryReadTimeout), 12 | 13 | http: { 14 | protocol: option(process.env.HTTP_PROTOCOL, overrides.httpProtocol), 15 | host: option(process.env.HTTP_HOST, overrides.httpHost), 16 | port: option(Number(process.env.HTTP_PORT), overrides.httpPort) 17 | }, 18 | 19 | external: { 20 | ip: option(process.env.EXTERNAL_IP, overrides.externalIp), 21 | protocol: option(process.env.EXTERNAL_PROTOCOL, overrides.externalProtocol), 22 | host: option(process.env.EXTERNAL_HOST, overrides.externalHost), 23 | port: option(process.env.EXTERNAL_PORT, overrides.externalPort) 24 | }, 25 | 26 | ipfs: { 27 | port: option(process.env.IPFS_SWARM_PORT, overrides.ipfsPort), 28 | prefix: option(process.env.IPFS_MFS_PREFIX, overrides.ipfsMfsPrefix), 29 | flush: option(toBoolean(process.env.IPFS_FLUSH), overrides.ipfsFlush), 30 | store: option(process.env.IPFS_STORE_TYPE, overrides.ipfsStoreType), 31 | 32 | s3: { 33 | region: option(process.env.STORE_S3_REGION, overrides.storeS3Region), 34 | bucket: option(process.env.STORE_S3_BUCKET, overrides.storeS3Bucket), 35 | path: option(process.env.STORE_S3_PATH, overrides.storeS3Path), 36 | accessKeyId: option(process.env.STORE_S3_ACCESS_KEY_ID, overrides.storeS3AccessKeyId), 37 | secretAccessKey: option(process.env.STORE_S3_SECRET_ACCESS_KEY, overrides.storeS3SecretAccessKey), 38 | createIfMissing: option(process.env.STORE_S3_CREATE_IF_MISSING, overrides.createIfMissing) 39 | }, 40 | 41 | fs: { 42 | repo: option(process.env.IPFS_REPO, overrides.ipfsRepo), 43 | port: option(process.env.IPFS_REPO_PORT, overrides.ipfsRepoPort) 44 | } 45 | }, 46 | 47 | pubsub: { 48 | master: option(process.env.PUBSUB_MASTER, overrides.pubsubMaster) 49 | }, 50 | 51 | clone: { 52 | pin: option(Number(process.env.CLONE_PIN), overrides.clonePin) 53 | }, 54 | 55 | request: { 56 | retries: option(process.env.REQUEST_RETRIES, overrides.requestRetries), 57 | retryDelay: option(process.env.REQUEST_RETRY_DELAY, overrides.requestRetryDelay), 58 | timeout: option(process.env.REQUEST_TIMEOUT, overrides.requestTimeout), 59 | forever: option(toBoolean(process.env.REQUEST_KEEP_ALIVE), overrides.requestKeepAlive), 60 | pool: { 61 | maxSockets: option(Number(process.env.REQUEST_MAX_SOCKETS), overrides.requestMaxSockets) 62 | } 63 | } 64 | } 65 | } 66 | 67 | module.exports.option = option 68 | module.exports.toBoolean = toBoolean 69 | -------------------------------------------------------------------------------- /packages/common/test/load-tarball.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const CID = require('cids') 11 | const toBuffer = require('it-to-buffer') 12 | 13 | describe('load-tarball', () => { 14 | let loadTarball 15 | let loadPackument 16 | let saveTarball 17 | let ipfs 18 | let config 19 | 20 | beforeEach(() => { 21 | config = { 22 | registryUpdateInterval: 0, 23 | registry: 'http://foo', 24 | ipfs: { 25 | prefix: `/registry-prefix-${hat()}` 26 | }, 27 | request: { 28 | 29 | }, 30 | http: { 31 | host: 'localhost', 32 | port: 8080, 33 | protocol: 'http' 34 | } 35 | } 36 | 37 | loadPackument = sinon.stub() 38 | saveTarball = sinon.stub() 39 | 40 | mock('../utils/load-packument', loadPackument) 41 | mock('../utils/save-tarball', saveTarball) 42 | 43 | loadTarball = mock.reRequire('../utils/load-tarball') 44 | 45 | ipfs = { 46 | cat: sinon.stub() 47 | } 48 | }) 49 | 50 | afterEach(() => { 51 | mock.stopAll() 52 | }) 53 | 54 | it('should load a tarball from ipfs', async () => { 55 | const packageName = `a-module-${hat()}` 56 | const packageVersion = '1.0.0' 57 | const path = `/${packageName}/-/${packageName}-${packageVersion}.tgz` 58 | const pkg = { 59 | name: packageName, 60 | versions: { 61 | [packageVersion]: { 62 | dist: { 63 | cid: 'QmZEYeEin6wEB7WNyiT7stYTmbYFGy7BzM7T3hRDzRxTvY' 64 | } 65 | } 66 | } 67 | } 68 | 69 | loadPackument.withArgs(packageName, ipfs, config) 70 | .returns(pkg) 71 | 72 | ipfs.cat 73 | .withArgs(new CID(pkg.versions[packageVersion].dist.cid)) 74 | .returns(async function * () { // eslint-disable-line require-await 75 | yield Buffer.from('ok') 76 | }()) 77 | 78 | const result = await toBuffer(loadTarball(path, ipfs, config)) 79 | 80 | expect(result.toString()).to.equal('ok') 81 | }) 82 | 83 | it('should download a tarball that has no cid', async () => { 84 | const packageName = `a-module-${hat()}` 85 | const packageVersion = '1.0.0' 86 | const path = `/${packageName}/-/${packageName}-${packageVersion}.tgz` 87 | const pkg = { 88 | name: packageName, 89 | versions: { 90 | [packageVersion]: { 91 | dist: { 92 | 93 | } 94 | } 95 | } 96 | } 97 | 98 | loadPackument.withArgs(packageName, ipfs, config) 99 | .returns(pkg) 100 | 101 | saveTarball.withArgs(pkg.name, packageVersion, ipfs, config) 102 | .callsFake(() => { 103 | pkg.versions[packageVersion].dist.cid = 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn' 104 | }) 105 | 106 | ipfs.cat 107 | .withArgs(new CID('QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn')) 108 | .returns(async function * () { // eslint-disable-line require-await 109 | yield Buffer.from('also ok') 110 | }()) 111 | 112 | const result = await toBuffer(loadTarball(path, ipfs, config)) 113 | 114 | expect(result.toString()).to.equal('also ok') 115 | }) 116 | }) 117 | -------------------------------------------------------------------------------- /packages/common/utils/save-tarball.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('./retry-request') 4 | const crypto = require('crypto') 5 | const loadPackument = require('./load-packument') 6 | const savePackument = require('./save-packument') 7 | const log = require('./log') 8 | 9 | const saveTarball = async function (packageName, versionNumber, ipfs, config) { 10 | const packument = await loadPackument(packageName, ipfs, config) 11 | const version = packument.versions[versionNumber] 12 | 13 | validate(version, versionNumber, packageName) 14 | 15 | if (version.dist.cid) { 16 | log(`Skipping version ${versionNumber} of ${packageName} - already downloaded`) 17 | return 18 | } 19 | 20 | const startTime = Date.now() 21 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, config) 22 | 23 | log(`🏄‍♀️ Added ${version.dist.tarball} with hash ${cid} in ${Date.now() - startTime}ms`) 24 | 25 | await updateCid(packageName, versionNumber, cid, ipfs, config) 26 | } 27 | 28 | const validate = (version, versionNumber, packageName) => { 29 | if (!version) { 30 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 31 | } 32 | 33 | if (!version.dist) { 34 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 35 | } 36 | 37 | if (!version.dist.shasum) { 38 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 39 | } 40 | } 41 | 42 | const updateCid = async (packageName, versionNumber, cid, ipfs, config) => { 43 | const cidString = cid.toString('base32') 44 | 45 | while (true) { 46 | let packument = await loadPackument(packageName, ipfs, config) 47 | packument.versions[versionNumber].dist.cid = cidString 48 | 49 | await savePackument(packument, ipfs, config) 50 | 51 | packument = await loadPackument(packageName, ipfs, config) 52 | 53 | if (packument.versions[versionNumber].dist.cid === cidString) { 54 | return 55 | } 56 | 57 | log(`Manifest version cid ${packument.versions[versionNumber].dist.cid} did not equal ${cidString}`) 58 | } 59 | } 60 | 61 | const downloadFile = async (url, shasum, ipfs, config) => { 62 | log(`Downloading ${url}`) 63 | 64 | const hash = crypto.createHash('sha1') 65 | hash.setEncoding('hex') 66 | hash.on('error', () => {}) 67 | 68 | const stream = await request(Object.assign({}, config.request, { 69 | uri: url 70 | })) 71 | stream.pipe(hash) 72 | 73 | const { cid } = await ipfs.add(stream, { 74 | wrapWithDirectory: false, 75 | pin: config.clone.pin, 76 | cidVersion: 1, 77 | rawLeaves: true 78 | }) 79 | 80 | const result = hash.read() 81 | 82 | if (result !== shasum) { 83 | if (config.clone.pin) { 84 | // if we pinned the corrupt download, unpin it so it will get garbage collected later 85 | await ipfs.pin.rm(cid) 86 | } 87 | 88 | // we've already piped to the client at this point so can't retry the download 89 | // abort saving the CID of the corrupted download to our copy of the manifest 90 | // instead so we retry next time it's requested 91 | throw new Error(`File downloaded from ${url} had invalid shasum ${result} - expected ${shasum}`) 92 | } 93 | 94 | log(`File downloaded from ${url} had shasum ${result} - matched ${shasum}`) 95 | 96 | return cid 97 | } 98 | 99 | module.exports = saveTarball 100 | -------------------------------------------------------------------------------- /packages/common/test/retry-request.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | const { 11 | PassThrough 12 | } = require('stream') 13 | 14 | describe('retry-request', () => { 15 | let retryRequest 16 | let request 17 | let requestPromise 18 | 19 | beforeEach(() => { 20 | requestPromise = sinon.stub() 21 | request = sinon.stub() 22 | 23 | mock('request-promise', requestPromise) 24 | mock('request', request) 25 | 26 | retryRequest = mock.reRequire('../utils/retry-request') 27 | }) 28 | 29 | afterEach(() => { 30 | mock.stopAll() 31 | }) 32 | 33 | it('should retry a request', async () => { 34 | const pkg = { 35 | name: `module-${hat()}` 36 | } 37 | 38 | requestPromise 39 | .onFirstCall() 40 | .rejects(new Error('404')) 41 | 42 | requestPromise 43 | .onSecondCall() 44 | .resolves(JSON.parse(JSON.stringify(pkg))) 45 | 46 | const result = await retryRequest({ 47 | uri: 'something', 48 | json: true 49 | }) 50 | 51 | expect(result).to.deep.equal(pkg) 52 | }) 53 | 54 | it('should retry a streaming request', (done) => { 55 | request 56 | .onFirstCall() 57 | .callsFake(() => { 58 | const stream = new PassThrough() 59 | 60 | setTimeout(() => { 61 | stream.emit('error', new Error('404')) 62 | }, 100) 63 | 64 | return stream 65 | }) 66 | 67 | request 68 | .onSecondCall() 69 | .callsFake(() => { 70 | const stream = new PassThrough() 71 | 72 | setTimeout(() => { 73 | stream.emit('data', 'hello') 74 | stream.end() 75 | }, 100) 76 | 77 | return stream 78 | }) 79 | 80 | retryRequest({ 81 | uri: 'something' 82 | }) 83 | .then((stream) => { 84 | let result 85 | 86 | stream.on('data', (data) => { 87 | result = data.toString('utf8') 88 | }) 89 | 90 | stream.on('end', () => { 91 | expect(result).to.equal('hello') 92 | 93 | done() 94 | }) 95 | }) 96 | .catch(error => { 97 | done(error) 98 | }) 99 | }) 100 | 101 | it('should retry a streaming request that fails load', (done) => { 102 | request 103 | .onFirstCall() 104 | .callsFake(() => { 105 | const stream = new PassThrough() 106 | 107 | setTimeout(() => { 108 | stream.emit('response', { 109 | statusCode: 400 110 | }) 111 | }, 100) 112 | 113 | return stream 114 | }) 115 | 116 | request 117 | .onSecondCall() 118 | .callsFake(() => { 119 | const stream = new PassThrough() 120 | 121 | setTimeout(() => { 122 | stream.emit('response', { 123 | statusCode: 200 124 | }) 125 | stream.emit('data', 'hello') 126 | stream.end() 127 | }, 100) 128 | 129 | return stream 130 | }) 131 | 132 | retryRequest({ 133 | uri: 'something' 134 | }) 135 | .then((stream) => { 136 | let result 137 | 138 | stream.on('data', (data) => { 139 | result = data.toString('utf8') 140 | }) 141 | 142 | stream.on('end', () => { 143 | expect(result).to.equal('hello') 144 | 145 | done() 146 | }) 147 | }) 148 | .catch(error => { 149 | done(error) 150 | }) 151 | }) 152 | }) 153 | -------------------------------------------------------------------------------- /packages/common/utils/start-ipfs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const IpfsApi = require('ipfs-http-client') 4 | const ipfsdCtrl = require('ipfsd-ctl') 5 | const which = require('which-promise') 6 | const s3Repo = require('./s3-repo') 7 | const fsRepo = require('./fs-repo') 8 | const IPFS = require('ipfs') 9 | 10 | const cleanUpOps = [] 11 | 12 | const cleanUp = () => { 13 | Promise.all( 14 | cleanUpOps.map(op => op()) 15 | ) 16 | .then(() => { 17 | process.exit(0) 18 | }) 19 | } 20 | 21 | process.on('SIGTERM', cleanUp) 22 | process.on('SIGINT', cleanUp) 23 | 24 | const randomPort = () => { 25 | return Math.floor(Math.random() * 64535) + 1000 26 | } 27 | 28 | const spawn = (createArgs, spawnArgs = { init: true }) => { 29 | return new Promise((resolve, reject) => { 30 | ipfsdCtrl 31 | .create(createArgs) 32 | .spawn(spawnArgs, (error, node) => { 33 | if (error) { 34 | return reject(error) 35 | } 36 | 37 | resolve(node) 38 | }) 39 | }) 40 | } 41 | 42 | const startIpfs = async (config) => { 43 | if (config.ipfs.node === 'proc') { 44 | console.info('😈 Spawning an in-process IPFS node') // eslint-disable-line no-console 45 | 46 | if (config.ipfs.store === 's3') { 47 | config.ipfs.repo = s3Repo(config.ipfs.s3) 48 | } 49 | 50 | if (config.ipfs.store === 'fs') { 51 | config.ipfs.repo = fsRepo(config.ipfs.fs) 52 | } 53 | 54 | const node = await IPFS.create({ 55 | repo: config.ipfs.repo, 56 | EXPERIMENTAL: { 57 | sharding: true 58 | }, 59 | pubsub: { 60 | enabled: true 61 | }, 62 | preload: { 63 | enabled: false 64 | }, 65 | config: { 66 | Addresses: { 67 | Swarm: [ 68 | `/ip4/0.0.0.0/tcp/${config.ipfs.port || randomPort()}`, 69 | `/ip4/127.0.0.1/tcp/${config.ipfs.wsPort || randomPort()}/ws` 70 | ], 71 | API: `/ip4/127.0.0.1/tcp/${config.ipfs.apiPort || randomPort()}`, 72 | Gateway: `/ip4/127.0.0.1/tcp/${config.ipfs.gatewayPort || randomPort()}` 73 | } 74 | } 75 | }) 76 | 77 | process.on('exit', () => { 78 | node.stop() 79 | }) 80 | 81 | return node 82 | } else if (config.ipfs.node === 'disposable') { 83 | console.info('😈 Spawning an in-process disposable IPFS node') // eslint-disable-line no-console 84 | 85 | return spawn({ 86 | type: 'proc', 87 | exec: IPFS 88 | }) 89 | } else if (config.ipfs.node === 'js') { 90 | console.info('😈 Spawning a js-IPFS node') // eslint-disable-line no-console 91 | 92 | return spawn({ 93 | type: 'js', 94 | exec: await which('jsipfs') 95 | }) 96 | } else if (config.ipfs.node === 'go') { 97 | console.info('😈 Spawning a go-IPFS node') // eslint-disable-line no-console 98 | 99 | return spawn({ 100 | type: 'go', 101 | exec: await which('ipfs') 102 | }) 103 | } 104 | 105 | console.info(`😈 Connecting to a remote IPFS node at ${config.ipfs.node}`) // eslint-disable-line no-console 106 | 107 | return { 108 | api: new IpfsApi(config.ipfs.node), 109 | stop: (cb) => cb() 110 | } 111 | } 112 | 113 | const createIpfs = options => { 114 | return async () => { 115 | const ipfs = await startIpfs(options) 116 | 117 | cleanUpOps.push(() => { 118 | return new Promise((resolve) => { 119 | if (options.ipfs.node !== 'proc') { 120 | return resolve() 121 | } 122 | 123 | ipfs.stop(() => { 124 | console.info('😈 IPFS node stopped') // eslint-disable-line no-console 125 | 126 | resolve() 127 | }) 128 | }) 129 | }) 130 | 131 | return ipfs 132 | } 133 | } 134 | 135 | module.exports = createIpfs 136 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/save-tarballs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const debug = require('debug')('ipfs:registry-mirror:replicate:save-tarball') 4 | const crypto = require('crypto') 5 | const { default: PQueue } = require('p-queue') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | const { urlSource } = require('ipfs') 8 | 9 | let queue 10 | 11 | const saveTarball = async (packument, versionNumber, ipfs, options) => { 12 | const version = packument.versions[versionNumber] 13 | 14 | validate(version, versionNumber, packument.name) 15 | 16 | if (version.cid) { 17 | debug(`Skipping version ${versionNumber} of ${packument.name} - already downloaded`) 18 | 19 | return 20 | } 21 | 22 | const start = Date.now() 23 | const cid = await downloadFile(version.dist.tarball, version.dist.shasum, ipfs, options) 24 | version.cid = `/ipfs/${cid}` 25 | 26 | log(`🏄‍♀️ Added ${version.tarball} with CID ${version.cid} in ${Date.now() - start}ms`) 27 | } 28 | 29 | const validate = (version, versionNumber, packageName) => { 30 | if (!version) { 31 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - version not in manifest`) 32 | } 33 | 34 | if (!version.dist) { 35 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no dist section`) 36 | } 37 | 38 | if (!version.dist.tarball) { 39 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no tarball`) 40 | } 41 | 42 | if (!version.dist.shasum) { 43 | throw new Error(`Skipping invalid version ${versionNumber} of ${packageName} - no shasum`) 44 | } 45 | } 46 | 47 | const downloadFile = async (url, shasum, ipfs, options) => { 48 | for (let i = 0; i < options.request.retries; i++) { 49 | try { 50 | log(`⬇️ Downloading ${url}`) 51 | const start = Date.now() 52 | 53 | const { 54 | cid 55 | } = await ipfs.add(urlSource(url), { 56 | wrapWithDirectory: false, 57 | pin: options.clone.pin, 58 | version: 1, 59 | rawLeaves: true 60 | }) 61 | 62 | log(`✅ Downloaded ${url} in ${Date.now() - start}ms`) 63 | 64 | await validateShasum(cid, shasum, url, ipfs, options) 65 | 66 | log(`🌍 Added ${url} to IPFS with CID ${cid} in ${Date.now() - start}ms`) 67 | 68 | return cid 69 | } catch (err) { 70 | log(`💥 Downloading tarballs failed`, err) 71 | } 72 | } 73 | 74 | throw new Error(`💥 ${options.request.retries} retries exceeded while downloading ${url}`) 75 | } 76 | 77 | const validateShasum = async (cid, shasum, url, ipfs, options) => { 78 | const hashStart = Date.now() 79 | const hash = crypto.createHash('sha1') 80 | hash.on('error', () => {}) 81 | 82 | for await (const buf of ipfs.cat(cid, { 83 | signal: options.signal 84 | })) { 85 | hash.update(buf) 86 | } 87 | 88 | const result = hash.digest('hex') 89 | 90 | if (result !== shasum) { 91 | throw new Error(`Shasum of ${url} failed ${result} !== ${shasum}`) 92 | } 93 | 94 | log(`🙆 Checked shasum of ${url} in ${Date.now() - hashStart}ms`) 95 | } 96 | 97 | const saveTarballs = (packument, ipfs, options) => { 98 | if (!queue) { 99 | queue = new PQueue({ concurrency: options.request.concurrency }) 100 | } 101 | 102 | return Promise.all( 103 | Object.keys(packument.versions || {}) 104 | .map(versionNumber => { 105 | return queue.add(async () => { 106 | try { 107 | await saveTarball(packument, versionNumber, ipfs, options) 108 | } catch (err) { 109 | log(`💥 Error storing tarball ${packument.name} ${versionNumber}`, err) 110 | } 111 | }) 112 | }) 113 | ) 114 | } 115 | 116 | module.exports = saveTarballs 117 | -------------------------------------------------------------------------------- /packages/common/utils/load-packument.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const request = require('./retry-request') 4 | const debug = require('debug')('ipfs:registry-mirror:utils:load-packument') 5 | const savePackument = require('./save-packument') 6 | const timeout = require('./timeout-promise') 7 | const log = require('./log') 8 | const toBuffer = require('it-to-buffer') 9 | 10 | const loadFromMfs = async (packageName, ipfs, options) => { 11 | const mfsPath = `${options.ipfs.prefix}/${packageName}` 12 | 13 | try { 14 | const start = Date.now() 15 | 16 | debug(`Reading from mfs ${mfsPath}`) 17 | 18 | const buf = await toBuffer(ipfs.files.read(mfsPath)) 19 | 20 | debug(`Read from mfs ${mfsPath} in ${Date.now() - start}ms`) 21 | 22 | return JSON.parse(buf.toString('utf8')) 23 | } catch (error) { 24 | if (error.code === 'ERR_NOT_FOUND') { 25 | debug(`${mfsPath} not in MFS`) 26 | } 27 | 28 | debug(`Could not read ${mfsPath}`, error) 29 | } 30 | } 31 | 32 | const requestFromRegistry = async (packageName, registry, options) => { 33 | const uri = `${registry}/${packageName}` 34 | 35 | try { 36 | debug(`Fetching ${uri}`) 37 | const start = Date.now() 38 | const json = await request(Object.assign({}, options.request, { 39 | uri, 40 | json: true 41 | })) 42 | 43 | debug(`Fetched ${uri} in ${Date.now() - start}ms`) 44 | 45 | return json 46 | } catch (error) { 47 | debug(`Could not download ${uri}`, error) 48 | } 49 | } 50 | 51 | const loadFromRegistry = async (packageName, ipfs, options) => { 52 | for (const registry of options.registries) { 53 | let result 54 | 55 | try { 56 | result = await timeout(requestFromRegistry(packageName, registry, options), options.registryReadTimeout) 57 | } catch (error) { 58 | if (error.code === 'ETIMEOUT') { 59 | debug(`Fetching ${packageName} timed out after ${options.registryReadTimeout}ms`) 60 | } 61 | } 62 | 63 | if (result) { 64 | return result 65 | } 66 | } 67 | } 68 | 69 | const findNewVersions = (cached, upstream) => { 70 | const cachedVersions = (cached && cached.versions) || {} 71 | const upstreamVersions = (upstream && upstream.versions) || {} 72 | 73 | return Object.keys(upstreamVersions) 74 | .filter(version => !cachedVersions[version]) 75 | } 76 | 77 | const loadPackument = async (packageName, ipfs, options) => { 78 | const mfsVersion = await loadFromMfs(packageName, ipfs, options) 79 | let registryVersion 80 | let willDownload = true 81 | 82 | if (mfsVersion) { 83 | const modified = new Date(mfsVersion.updated || 0) 84 | willDownload = (Date.now() - options.registryUpdateInterval) > modified.getTime() 85 | } 86 | 87 | if (willDownload) { 88 | registryVersion = await loadFromRegistry(packageName, ipfs, options) 89 | } 90 | 91 | if (!mfsVersion && !registryVersion) { 92 | throw new Error(`${packageName} not found, tried upstream registry: ${willDownload}`) 93 | } 94 | 95 | const newVerisons = findNewVersions(mfsVersion, registryVersion) 96 | 97 | if (mfsVersion && !newVerisons.length) { 98 | // we have a cached version and either fetching from npm failed or 99 | // our cached version matches the npm version 100 | return mfsVersion 101 | } 102 | 103 | if (newVerisons.length) { 104 | log(`🆕 New version${newVerisons.length > 1 ? 's' : ''} of ${packageName} detected - ${newVerisons.join(', ')}`) 105 | } 106 | 107 | // save our existing versions so we don't re-download tarballs we already have 108 | if (mfsVersion) { 109 | Object.keys(mfsVersion.versions || {}).forEach(versionNumber => { 110 | registryVersion.versions[versionNumber] = mfsVersion.versions[versionNumber] 111 | }) 112 | } 113 | 114 | // store it for next time 115 | await savePackument(registryVersion, ipfs, options) 116 | 117 | return registryVersion 118 | } 119 | 120 | module.exports = loadPackument 121 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const toBoolean = require('ipfs-registry-mirror-common/utils/to-boolean') 4 | const option = require('ipfs-registry-mirror-common/utils/option') 5 | 6 | module.exports = (overrides = {}) => { 7 | return { 8 | registries: (overrides.registries || []).concat(option(process.env.REGISTRY, overrides.registry)).filter(Boolean), 9 | registryUpdateInterval: option(process.env.REGISTRY_UPDATE_INTERVAL, overrides.registryUpdateInterval), 10 | registryReadTimeout: option(Number(process.env.REGISTRY_READ_TIMEOUT), overrides.registryReadTimeout), 11 | 12 | http: { 13 | protocol: option(process.env.HTTP_PROTOCOL, overrides.httpProtocol), 14 | host: option(process.env.HTTP_HOST, overrides.httpHost), 15 | port: option(Number(process.env.HTTP_PORT), overrides.httpPort) 16 | }, 17 | 18 | external: { 19 | ip: option(process.env.EXTERNAL_IP, overrides.externalIp), 20 | protocol: option(process.env.EXTERNAL_PROTOCOL, overrides.externalProtocol), 21 | host: option(process.env.EXTERNAL_HOST, overrides.externalHost), 22 | port: option(process.env.EXTERNAL_PORT, overrides.externalPort) 23 | }, 24 | 25 | ipfs: { 26 | pass: option(process.env.IPFS_PASS, overrides.ipfsPass), 27 | node: option(process.env.IPFS_NODE, overrides.ipfsNode), 28 | port: option(process.env.IPFS_SWARM_PORT, overrides.ipfsPort), 29 | prefix: option(process.env.IPFS_MFS_PREFIX, overrides.ipfsMfsPrefix), 30 | flush: option(toBoolean(process.env.IPFS_FLUSH), overrides.ipfsFlush), 31 | store: option(process.env.IPFS_STORE_TYPE, overrides.ipfsStoreType), 32 | 33 | s3: { 34 | region: option(process.env.STORE_S3_REGION, overrides.ipfsStoreS3Region), 35 | bucket: option(process.env.STORE_S3_BUCKET, overrides.ipfsStoreS3Bucket), 36 | path: option(process.env.STORE_S3_PATH, overrides.ipfsStoreS3Path), 37 | accessKeyId: option(process.env.STORE_S3_ACCESS_KEY_ID, overrides.ipfsStoreS3AccessKeyId), 38 | secretAccessKey: option(process.env.STORE_S3_SECRET_ACCESS_KEY, overrides.ipfsStoreS3SecretAccessKey), 39 | createIfMissing: option(process.env.STORE_S3_CREATE_IF_MISSING, overrides.ipfsStoreS3CreateIfMissing) 40 | }, 41 | 42 | fs: { 43 | repo: option(process.env.IPFS_REPO, overrides.ipfsRepo), 44 | port: option(process.env.IPFS_REPO_PORT, overrides.ipfsRepoPort) 45 | } 46 | }, 47 | 48 | follow: { 49 | ua: option(process.env.FOLLOW_USER_AGENT, overrides.followUserAgent), 50 | registry: option(process.env.FOLLOW_REGISTTRY, overrides.followRegistry), 51 | replicator: option(process.env.FOLLOW_REPLICATOR, overrides.followReplicator), 52 | concurrency: option(Number(process.env.FOLLOW_CONCURRENCY), overrides.followConcurrency), 53 | inactivityTimeout: option(process.env.FOLLOW_INACTIVITY_MS, overrides.followInactivityMs), 54 | seqFile: option(process.env.FOLLOW_SEQ_FILE, overrides.followSeqFile) 55 | }, 56 | 57 | clone: { 58 | delay: option(Number(process.env.CLONE_DELAY), overrides.cloneDelay), 59 | pin: option(Number(process.env.CLONE_PIN), overrides.clonePin), 60 | publish: option(process.env.CLONE_PUBLISH, overrides.clonePublish), 61 | concurrency: parseInt(option(process.env.CLONE_CONCCURRENCY, overrides.cloneConcurrency)) 62 | }, 63 | 64 | request: { 65 | retries: option(process.env.REQUEST_RETRIES, overrides.requestRetries), 66 | retryDelay: option(process.env.REQUEST_RETRY_DELAY, overrides.requestRetryDelay), 67 | timeout: option(process.env.REQUEST_TIMEOUT, overrides.requestTimeout), 68 | forever: option(toBoolean(process.env.REQUEST_KEEP_ALIVE), overrides.requestKeepAlive), 69 | concurrency: parseInt(option(process.env.REQUEST_CONCURRENCY, overrides.requestConcurrency), 10) 70 | }, 71 | 72 | mdns: { 73 | enabled: Boolean(process.env.MDNS_NAME || overrides.mdnsAdvert), 74 | name: option(process.env.MDNS_NAME, overrides.mdnsAdvert) 75 | } 76 | } 77 | } 78 | 79 | module.exports.option = option 80 | module.exports.toBoolean = toBoolean 81 | -------------------------------------------------------------------------------- /packages/replication-master/src/core/clone/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const follow = require('@achingbrain/follow-registry') 4 | const debug = require('debug')('ipfs:registry-mirror:clone') 5 | const sequenceFile = require('../sequence-file') 6 | const log = require('ipfs-registry-mirror-common/utils/log') 7 | const cluster = require('cluster') 8 | const delay = require('delay') 9 | const mainThreadWorker = require('./main-thread-worker') 10 | 11 | let processed = [] 12 | 13 | const stats = { 14 | update () { 15 | processed.push(Date.now()) 16 | const oneHourAgo = Date.now() - 3600000 17 | 18 | processed = processed.filter(time => { 19 | return time > oneHourAgo 20 | }) 21 | }, 22 | modulesPerSecond () { 23 | return (processed.length / 3600).toFixed(3) 24 | } 25 | } 26 | 27 | const createWorker = () => { 28 | return new Promise((resolve, reject) => { 29 | const worker = cluster.fork() 30 | worker.on('online', () => { 31 | resolve() 32 | }) 33 | worker.on('error', (err) => { 34 | reject(err) 35 | }) 36 | worker.on('disconnect', () => { 37 | // console.info('Worker disconnected') 38 | }) 39 | worker.on('exit', (code, signal) => { 40 | // console.info('Worker exited with code', code, 'and signal', signal) 41 | }) 42 | }) 43 | } 44 | 45 | const fillWorkerPool = async (options) => { 46 | // ensure worker pool is full 47 | if (Object.keys(cluster.workers).length === options.clone.concurrency) { 48 | return 49 | } 50 | 51 | while (Object.keys(cluster.workers).length < options.clone.concurrency) { 52 | await createWorker() 53 | } 54 | } 55 | 56 | const findWorker = async (ipfs, options) => { 57 | if (options.clone.concurrency === 0) { 58 | return mainThreadWorker(ipfs) 59 | } 60 | 61 | await fillWorkerPool(options) 62 | 63 | // wait for a free worker 64 | while (true) { 65 | const worker = Object 66 | .values(cluster.workers) 67 | .find(worker => !worker.processing) 68 | 69 | if (worker) { 70 | return worker 71 | } 72 | 73 | await delay(5000) 74 | } 75 | } 76 | 77 | module.exports = async (emitter, signal, ipfs, options) => { 78 | log(`🦎 Replicating registry with concurrency ${options.follow.concurrency}...`) 79 | 80 | if (options.clone.concurrency) { 81 | log(`👷 Using ${options.clone.concurrency} workers to process updates`) 82 | } else { 83 | log('👷 Processing package updates on main thread') 84 | } 85 | 86 | await fillWorkerPool(options) 87 | 88 | while (true) { 89 | try { 90 | for await (const { packument, seq, done } of follow({ ...options.follow, seq: sequenceFile(options) })) { 91 | if (signal.aborted) { 92 | return 93 | } 94 | 95 | if (!packument || !packument.name) { 96 | // invalid response from npm 97 | done().then(() => {}, () => {}) 98 | continue 99 | } 100 | 101 | const worker = await findWorker(ipfs, options) 102 | worker.updateStart = Date.now() 103 | worker.processing = true 104 | 105 | worker.once('message', (message) => { 106 | worker.processing = false 107 | 108 | if (message.error) { 109 | const err = new Error(message.error.message) 110 | err.stack = message.error.stack 111 | err.code = message.error.code 112 | 113 | debug(err) 114 | log(`💥 [${message.seq}] error processing ${message.name}`, err) 115 | } else { 116 | stats.update() 117 | 118 | log(`🦕 [${message.seq}] processed ${message.name} in ${Date.now() - worker.updateStart}ms, ${stats.modulesPerSecond()} modules/s`) 119 | 120 | emitter.emit('processed', message.name) 121 | emitter.emit('seq', message.seq) 122 | } 123 | 124 | done().then(() => {}, () => {}) 125 | }) 126 | 127 | worker.send({ 128 | packument, 129 | seq, 130 | options 131 | }) 132 | } 133 | } catch (err) { 134 | log('💥 Feed error', err) 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | proxy: 6 | image: jwilder/nginx-proxy:alpine 7 | mem_limit: 1024m 8 | links: 9 | - replicate 10 | - registry 11 | ports: 12 | - '80:80' 13 | - '443:443' 14 | logging: 15 | driver: "json-file" 16 | options: 17 | max-size: "1m" 18 | max-file: "3" 19 | volumes: 20 | - /var/run/docker.sock:/tmp/docker.sock:ro 21 | - /etc/nginx/vhost.d 22 | - /usr/share/nginx/html 23 | - /etc/nginx/certs 24 | - ./conf/proxy.conf:/etc/nginx/proxy.conf 25 | restart: 'always' 26 | 27 | letsencrypt-nginx-proxy-companion: 28 | image: jrcs/letsencrypt-nginx-proxy-companion 29 | mem_limit: 1024m 30 | volumes: 31 | - /var/run/docker.sock:/var/run/docker.sock:ro 32 | volumes_from: 33 | - proxy 34 | 35 | replicate: 36 | build: 37 | context: . 38 | dockerfile: packages/replication-master/Dockerfile 39 | restart: 'always' 40 | env_file: .env 41 | mem_limit: 4608m 42 | environment: 43 | - VIRTUAL_HOST=replication.registry.js.ipfs.io 44 | - VIRTUAL_PORT=8080 45 | - LETSENCRYPT_HOST=replication.registry.js.ipfs.io 46 | - LETSENCRYPT_EMAIL=alex.potsides@protocol.ai 47 | - NODE_ENV=${NODE_ENV} 48 | - EXTERNAL_PROTOCOL=https 49 | - EXTERNAL_HOST=registry.js.ipfs.io 50 | - EXTERNAL_PORT=443 51 | - EXTERNAL_IP=35.178.192.119 52 | - IPFS_STORE_TYPE=s3 53 | - STORE_S3_REGION=${STORE_S3_REGION} 54 | - STORE_S3_BUCKET=${STORE_S3_BUCKET} 55 | - STORE_S3_ACCESS_KEY_ID=${STORE_S3_ACCESS_KEY_ID} 56 | - STORE_S3_SECRET_ACCESS_KEY=${STORE_S3_SECRET_ACCESS_KEY} 57 | - STORE_S3_PATH=replication-master 58 | - FOLLOW_SEQ_FILE=seq.txt 59 | - CLONE_DELAY=30000 60 | - CLONE_CONCCURRENCY=0 61 | - FOLLOW_CONCURRENCY=5 62 | - REQUEST_CONCURRENCY=5 63 | logging: 64 | driver: "json-file" 65 | options: 66 | max-size: "1m" 67 | max-file: "3" 68 | 69 | registry: 70 | build: 71 | context: . 72 | dockerfile: packages/registry-mirror/Dockerfile 73 | restart: 'always' 74 | env_file: .env 75 | mem_limit: 2048m 76 | volumes: 77 | - /var/run/docker.sock:/tmp/docker.sock:ro 78 | environment: 79 | - VIRTUAL_HOST=registry.js.ipfs.io 80 | - VIRTUAL_PORT=8080 81 | - LETSENCRYPT_HOST=registry.js.ipfs.io 82 | - LETSENCRYPT_EMAIL=alex.potsides@protocol.ai 83 | - NODE_ENV=${NODE_ENV} 84 | - EXTERNAL_PROTOCOL=https 85 | - EXTERNAL_HOST=registry.js.ipfs.io 86 | - EXTERNAL_PORT=443 87 | - EXTERNAL_IP=35.178.192.119 88 | - IPFS_STORE_TYPE=s3 89 | - STORE_S3_REGION=${STORE_S3_REGION} 90 | - STORE_S3_BUCKET=${STORE_S3_BUCKET} 91 | - STORE_S3_ACCESS_KEY_ID=${STORE_S3_ACCESS_KEY_ID} 92 | - STORE_S3_SECRET_ACCESS_KEY=${STORE_S3_SECRET_ACCESS_KEY} 93 | - STORE_S3_PATH=worker 94 | - PUBSUB_MASTER=http://replicate:8080 95 | - REQUEST_MAX_SOCKETS=20 96 | links: 97 | - replicate 98 | ports: 99 | - 10000-10009:10000 100 | - 10010-10019:10001 101 | - 10020-10029:10002 102 | - 10030-10039:10003 103 | - 10040-10049:10004 104 | - 10050-10059:10005 105 | - 10060-10069:10006 106 | - 10070-10079:10007 107 | - 10080-10089:10008 108 | - 10090-10099:10009 109 | logging: 110 | driver: "json-file" 111 | options: 112 | max-size: "1m" 113 | max-file: "3" 114 | 115 | spiped: 116 | restart: always 117 | env_file: .env 118 | build: 119 | context: . 120 | dockerfile: monitoring/spiped 121 | args: 122 | SPIPED_KEY: ${SPIPED_KEY} 123 | NETDATA_EIP: ${NETDATA_EIP} 124 | mem_limit: 1024m 125 | 126 | netdata: 127 | restart: always 128 | mem_limit: 1024m 129 | hostname: registry.js.ipfs.io 130 | env_file: .env 131 | build: 132 | context: . 133 | dockerfile: monitoring/netdata 134 | args: 135 | NETDATA_API_KEY: ${NETDATA_API_KEY} 136 | cap_add: 137 | - SYS_PTRACE 138 | security_opt: 139 | - apparmor:unconfined 140 | volumes: 141 | - /proc:/host/proc:ro 142 | - /sys:/host/sys:ro 143 | - /var/run/docker.sock:/var/run/docker.sock:ro 144 | environment: 145 | # https://docs.netdata.cloud/packaging/docker/#docker-container-names-resolution 146 | - PGID=115 147 | links: 148 | - spiped 149 | depends_on: 150 | - spiped 151 | - replicate 152 | - registry 153 | -------------------------------------------------------------------------------- /packages/registry-mirror/src/cli/bin.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | const log = require('ipfs-registry-mirror-common/utils/log') 6 | 7 | if (process.env.NODE_ENV !== 'production') { 8 | const url = '/-/dashboard' 9 | 10 | log(`🔍 Enabling profiling at ${url}`) 11 | 12 | try { 13 | require('appmetrics-dash').attach({ 14 | url 15 | }) 16 | } catch (error) { 17 | log('💥 Enabling profiling failed', error) 18 | } 19 | } 20 | 21 | require('dnscache')({ enable: true }) 22 | 23 | const pkg = require('../../package') 24 | const path = require('path') 25 | 26 | require('dotenv').config({ 27 | path: path.join(process.env.HOME, '.ipfs-npm-registry-mirror/registry-mirror.env') 28 | }) 29 | 30 | process.title = pkg.name 31 | 32 | const yargs = require('yargs') 33 | 34 | yargs.command('$0', 'Starts a registry server that uses IPFS to fetch js dependencies', (yargs) => { // eslint-disable-line no-unused-expressions 35 | yargs 36 | .option('registry', { 37 | describe: 'Which registry we are mirroring', 38 | default: 'https://registry.npmjs.com' 39 | }) 40 | .option('registry-update-interval', { 41 | describe: 'Only request the manifest for a given module every so many ms', 42 | default: 60000 43 | }) 44 | .option('registry-upload-size-limit', { 45 | describe: 'How large a file upload to allow when proxying for the registry', 46 | default: '1024MB' 47 | }) 48 | .option('registry-read-timeout', { 49 | describe: 'How long to wait for registry requests', 50 | default: 60000 51 | }) 52 | 53 | .option('http-protocol', { 54 | describe: 'Which protocol to use with the server', 55 | default: 'http' 56 | }) 57 | .option('http-host', { 58 | describe: 'Which host to listen to requests on', 59 | default: 'localhost' 60 | }) 61 | .option('http-port', { 62 | describe: 'Which port to listen to requests on', 63 | default: 8080 64 | }) 65 | 66 | .option('external-protocol', { 67 | describe: 'Which protocol to use when reaching this mirror' 68 | }) 69 | .option('external-host', { 70 | describe: 'Which host to use when reaching this mirror' 71 | }) 72 | .option('external-port', { 73 | describe: 'Which port to use when reaching this mirror' 74 | }) 75 | 76 | .option('ipfs-port', { 77 | describe: 'Which port to accept IPFS connections on', 78 | default: 4001 79 | }) 80 | .option('ipfs-mfs-prefix', { 81 | describe: 'Which mfs prefix to use', 82 | default: '/npm-registry' 83 | }) 84 | .option('ipfs-flush', { 85 | describe: 'Whether to flush the MFS cache', 86 | default: true 87 | }) 88 | .option('ipfs-repo', { 89 | describe: 'The path to the IPFS repo you wish to use', 90 | default: path.join(process.env.HOME, '.jsipfs') 91 | }) 92 | .option('ipfs-repo-port', { 93 | describe: 'The port for level workers to connect to', 94 | default: 9000 95 | }) 96 | .option('ipfs-store-type', { 97 | describe: 'Which type of datastore to use - fs, s3, etc', 98 | default: 'fs' 99 | }) 100 | .option('ipfs-store-s3-region', { 101 | describe: 'The s3 region to use' 102 | }) 103 | .option('ipfs-store-s3-bucket', { 104 | describe: 'The s3 bucket to use' 105 | }) 106 | .option('ipfs-store-s3-path', { 107 | describe: 'The path to use in an s3 bucket' 108 | }) 109 | .option('ipfs-store-s3-access-key-id', { 110 | describe: 'The s3 access key id to use' 111 | }) 112 | .option('ipfs-store-s3-secret-access-key', { 113 | describe: 'The s3 secret access key id to use' 114 | }) 115 | .option('ipfs-store-s3-create-if-missing', { 116 | describe: 'Whether to create the bucket if it is missing', 117 | default: false 118 | }) 119 | 120 | .option('pubsub-master', { 121 | describe: 'The url of the pubsub replication master', 122 | default: 'https://replication.registry.js.ipfs.io' 123 | }) 124 | 125 | .option('clone-pin', { 126 | describe: 'Whether to pin cloned modules', 127 | default: false 128 | }) 129 | 130 | .option('request-retries', { 131 | describe: 'How many times to retry when downloading manifests and tarballs from the registry', 132 | default: 5 133 | }) 134 | .option('request-retry-delay', { 135 | describe: 'How long in ms to wait between retries', 136 | default: 1000 137 | }) 138 | .option('request-timeout', { 139 | describe: 'How long in ms we should wait when requesting files', 140 | default: 30000 141 | }) 142 | .option('request-keep-alive', { 143 | describe: 'Whether to re-use connections', 144 | default: true 145 | }) 146 | .option('request-max-sockets', { 147 | describe: 'How many concurrent requests to have in flight', 148 | default: 100 149 | }) 150 | }, require('../core')) 151 | .argv 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | npm on IPFS logo 3 |

4 | 5 | # ipfs-npm-registry-mirror 6 | 7 | [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) 8 | [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) 9 | [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) 10 | [![Build Status](https://flat.badgen.net/travis/ipfs-shipyard/ipfs-npm-registry-mirror)](https://travis-ci.com/ipfs-shipyard/ipfs-npm-registry-mirror) 11 | [![Code Coverage](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-shipyard/ipfs-npm-registry-mirror) 12 | [![Dependency Status](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror.svg?style=flat-square)](https://david-dm.org/ipfs-shipyard/ipfs-npm-registry-mirror) 13 | 14 | > A npm mirror that adds files to IPFS and makes them available over the distributed web! 15 | 16 | ## Usage 17 | 18 | ```console 19 | # with docker installed 20 | $ git clone https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git 21 | $ cd ipfs-npm-registry-mirror 22 | $ ./deploy.sh 23 | ``` 24 | 25 | ## Overview 26 | 27 | There are two docker images, a [replication-master](./packages/replication-master/README.md) which continually syncs the npm registry and all of its packages and a [registry-mirror](./packages/registry-mirror/README.md) which serves files to clients. 28 | 29 | The replication-master publishes notifications of new packages to the mirrors via [pubsub](https://ipfs.io/blog/25-pubsub/), they then save the [CID](https://www.npmjs.com/package/cids)s of newly published modules and use them to resolve them on the IPFS network. 30 | 31 | You can can either use the [public http mirror](https://registry.js.ipfs.io) with npm/yarn (e.g. pass `--registry=https://registry.js.ipfs.io` to npm or yarn) or use the [`ipfs-npm`](https://www.npmjs.com/package/ipfs-npm) client directly. 32 | 33 | ## Lead Maintainer 34 | 35 | [Alex Potsides](https://github.com/achingbrain) 36 | 37 | 38 | ## Deployment 39 | 40 | Requirements: 41 | 42 | * Docker 43 | * docker-compose `v1.24.0-rc1` or later. 44 | 45 | ``` 46 | $ git clone https://github.com/ipfs-shipyard/ipfs-npm-registry-mirror.git 47 | $ cd ipfs-npm-registry-mirror 48 | $ echo NODE_ENV=production > .env 49 | $ ./deploy.sh 50 | ``` 51 | 52 | 53 | ```yml 54 | version: '2' 55 | 56 | services: 57 | 58 | proxy: 59 | image: jwilder/nginx-proxy:alpine 60 | mem_limit: 1024m 61 | links: 62 | - replicate 63 | - registry 64 | ports: 65 | - '80:80' 66 | - '443:443' 67 | logging: 68 | driver: "json-file" 69 | options: 70 | max-size: "1m" 71 | max-file: "3" 72 | volumes: 73 | - /var/run/docker.sock:/tmp/docker.sock:ro 74 | - /etc/nginx/vhost.d 75 | - /usr/share/nginx/html 76 | - /etc/nginx/certs 77 | - ./conf/proxy.conf:/etc/nginx/proxy.conf 78 | restart: 'always' 79 | 80 | replicate: 81 | build: 82 | context: . 83 | dockerfile: packages/replication-master/Dockerfile 84 | restart: 'always' 85 | env_file: .env 86 | mem_limit: 4608m 87 | environment: 88 | - VIRTUAL_HOST=replication.rig.home 89 | - VIRTUAL_PORT=8080 90 | - NODE_ENV=${NODE_ENV} 91 | - EXTERNAL_PROTOCOL=http 92 | - EXTERNAL_HOST=rig.home 93 | - EXTERNAL_PORT=80 94 | - EXTERNAL_IP=192.168.1.112 95 | - IPFS_STORE_TYPE=fs 96 | - IPFS_REPO=/usr/local/ipfs-npm-registry-mirror/replication-master 97 | - FOLLOW_SEQ_FILE=/usr/local/ipfs-npm-registry-mirror/seq.txt 98 | - CLONE_DELAY=30000 99 | - FOLLOW_CONCURRENCY=5 100 | - REQUEST_CONCURRENCY=5 101 | volumes: 102 | - /var/run/docker.sock:/tmp/docker.sock:ro 103 | - /usr/local/ipfs-npm-registry-mirror:/usr/local/ipfs-npm-registry-mirror 104 | logging: 105 | driver: "json-file" 106 | options: 107 | max-size: "100m" 108 | max-file: "3" 109 | 110 | registry: 111 | build: 112 | context: . 113 | dockerfile: packages/registry-mirror/Dockerfile 114 | restart: 'always' 115 | env_file: .env 116 | mem_limit: 2048m 117 | volumes: 118 | - /var/run/docker.sock:/tmp/docker.sock:ro 119 | environment: 120 | - VIRTUAL_HOST=rig.home 121 | - VIRTUAL_PORT=8080 122 | - NODE_ENV=${NODE_ENV} 123 | - EXTERNAL_PROTOCOL=http 124 | - EXTERNAL_HOST=rig.home 125 | - EXTERNAL_PORT=80 126 | - EXTERNAL_IP=192.168.1.112 127 | - IPFS_STORE_TYPE=fs 128 | - IPFS_REPO=/usr/local/ipfs-npm-registry-mirror/worker 129 | - PUBSUB_MASTER=http://replicate:8080 130 | - REQUEST_MAX_SOCKETS=20 131 | volumes: 132 | - /var/run/docker.sock:/tmp/docker.sock:ro 133 | - /usr/local/ipfs-npm-registry-mirror:/usr/local/ipfs-npm-registry-mirror 134 | links: 135 | - replicate 136 | ports: 137 | - "10000:10000" 138 | - "10001:10001" 139 | - "10002:10002" 140 | - "10003:10003" 141 | - "10004:10004" 142 | - "10005:10005" 143 | - "10006:10006" 144 | - "10007:10007" 145 | - "10008:10008" 146 | - "10009:10009" 147 | logging: 148 | driver: "json-file" 149 | options: 150 | max-size: "100m" 151 | max-file: "3" 152 | ``` 153 | -------------------------------------------------------------------------------- /packages/replication-master/src/cli/master.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const log = require('ipfs-registry-mirror-common/utils/log') 4 | const os = require('os') 5 | 6 | if (process.env.NODE_ENV !== 'production' || process.env.PROFILING) { 7 | const url = '/-/dashboard' 8 | 9 | log(`🔍 Enabling profiling at ${url}`) 10 | 11 | try { 12 | require('appmetrics-dash').attach({ 13 | url 14 | }) 15 | } catch (error) { 16 | log('💥 Enabling profiling failed', error) 17 | } 18 | } 19 | 20 | require('dnscache')({ enable: true }) 21 | 22 | const pkg = require('../../package') 23 | const path = require('path') 24 | 25 | require('dotenv').config({ 26 | path: path.join(process.env.HOME, '.ipfs-npm-registry-mirror/replication-master.env') 27 | }) 28 | 29 | process.title = pkg.name 30 | 31 | const yargs = require('yargs') 32 | 33 | yargs.command('$0', 'Starts a registry server that uses IPFS to fetch js dependencies', (yargs) => { // eslint-disable-line no-unused-expressions 34 | yargs 35 | .option('registry', { 36 | describe: 'Which registry we are mirroring', 37 | default: 'https://registry.npmjs.com' 38 | }) 39 | .option('registry-update-interval', { 40 | describe: 'Only request the manifest for a given module every so many ms', 41 | default: 60000 42 | }) 43 | .option('registry-read-timeout', { 44 | describe: 'How long to wait for registry requests', 45 | default: 60000 46 | }) 47 | 48 | .option('http-protocol', { 49 | describe: 'Which protocol to use with the server', 50 | default: 'http' 51 | }) 52 | .option('http-host', { 53 | describe: 'Which host to listen to requests on', 54 | default: 'localhost' 55 | }) 56 | .option('http-port', { 57 | describe: 'Which port to listen to requests on', 58 | default: 8080 59 | }) 60 | 61 | .option('external-ip', { 62 | describe: 'Which IP address to use when reaching this mirror' 63 | }) 64 | .option('external-protocol', { 65 | describe: 'Which protocol to use when reaching this mirror' 66 | }) 67 | .option('external-host', { 68 | describe: 'Which host to use when reaching this mirror' 69 | }) 70 | .option('external-port', { 71 | describe: 'Which port to use when reaching this mirror' 72 | }) 73 | 74 | .option('ipfs-port', { 75 | describe: 'Which port to accept IPFS connections on', 76 | default: 4001 77 | }) 78 | .option('ipfs-mfs-prefix', { 79 | describe: 'Which mfs prefix to use', 80 | default: '/npm-registry' 81 | }) 82 | .option('ipfs-flush', { 83 | describe: 'Whether to flush the MFS cache', 84 | default: true 85 | }) 86 | .option('ipfs-repo', { 87 | describe: 'The path to the IPFS repo you wish to use', 88 | default: path.join(process.env.HOME, '.jsipfs') 89 | }) 90 | .option('ipfs-repo-port', { 91 | describe: 'The port for level workers to connect to', 92 | default: 9000 93 | }) 94 | .option('ipfs-store-type', { 95 | describe: 'Which type of datastore to use - fs, s3, etc', 96 | default: 'fs' 97 | }) 98 | .option('ipfs-store-s3-region', { 99 | describe: 'The s3 region to use' 100 | }) 101 | .option('ipfs-store-s3-bucket', { 102 | describe: 'The s3 bucket to use' 103 | }) 104 | .option('ipfs-store-s3-path', { 105 | describe: 'The path to use in an s3 bucket' 106 | }) 107 | .option('ipfs-store-s3-access-key-id', { 108 | describe: 'The s3 access key id to use' 109 | }) 110 | .option('ipfs-store-s3-secret-access-key', { 111 | describe: 'The s3 secret access key id to use' 112 | }) 113 | .option('ipfs-store-s3-create-if-missing', { 114 | describe: 'Whether to create the bucket if it is missing', 115 | default: false 116 | }) 117 | .option('ipfs-pass', { 118 | describe: 'Used to secure operations on the keystore - must be over 20 characters long' 119 | }) 120 | 121 | .option('follow-replicator', { 122 | describe: 'Where to get changes from', 123 | default: 'https://replicate.npmjs.com/registry/_changes' 124 | }) 125 | .option('follow-registry', { 126 | describe: 'Which registry to clone', 127 | default: 'https://registry.npmjs.com' 128 | }) 129 | .option('follow-user-agent', { 130 | describe: 'What user agent to specify when contacting the registry', 131 | default: 'IPFS replication-master' 132 | }) 133 | .option('follow-concurrency', { 134 | describe: 'How many registry updates to process at once', 135 | default: 10 136 | }) 137 | .option('follow-seq-file', { 138 | describe: 'Where to store the seq file of how far through the npm feed we are', 139 | default: 'seq.txt' 140 | }) 141 | .options('follow-inactivity-ms', { 142 | describe: 'If no updates are received in this time, restart the feed', 143 | default: 1800000 144 | }) 145 | 146 | .option('clone-delay', { 147 | describe: 'How long to wait after startup before starting to clone npm', 148 | default: 0 149 | }) 150 | .option('clone-pin', { 151 | describe: 'Whether to pin cloned modules', 152 | default: false 153 | }) 154 | .option('clone-publish', { 155 | describe: 'Whether to publish IPNS names for cloned modules', 156 | default: false 157 | }) 158 | .option('clone-concurrency', { 159 | describe: 'How many cluster workers to use to process module updates', 160 | default: os.cpus().length - 1 161 | }) 162 | 163 | .option('request-retries', { 164 | describe: 'How many times to retry when downloading tarballs from the registry', 165 | default: 5 166 | }) 167 | .option('request-retry-delay', { 168 | describe: 'How long in ms to wait between retries', 169 | default: 1000 170 | }) 171 | .option('request-timeout', { 172 | describe: 'How long in ms we should wait when requesting files', 173 | default: 30000 174 | }) 175 | .option('request-keep-alive', { 176 | describe: 'Whether to re-use connections', 177 | default: true 178 | }) 179 | .option('request-concurrency', { 180 | describe: 'How many simultaneous requests to make', 181 | default: 50 182 | }) 183 | 184 | .option('mdns-advert', { 185 | describe: 'A string name to use to advertise this service over mDNS', 186 | default: '_ipfs-npm._tcp' 187 | }) 188 | }, require('../core')) 189 | .argv 190 | -------------------------------------------------------------------------------- /packages/common/test/load-packument.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const sinon = require('sinon') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const hat = require('hat') 10 | 11 | const pkg = (name) => { 12 | return { 13 | name: name, 14 | versions: { 15 | '0.0.1': { 16 | name: name, 17 | dist: { 18 | tarball: `https://foo.registry.com/${name}.tgz` 19 | } 20 | } 21 | } 22 | } 23 | } 24 | 25 | describe('load-packument', () => { 26 | let loadPackument 27 | let savePackument 28 | let request 29 | let ipfs 30 | let config 31 | 32 | const existentPackage = pkg(`i-exist-${hat()}`) 33 | const nonExistentPackage = pkg(`i-do-not-exist-${hat()}`) 34 | const newPackage = pkg(`i-am-new-${hat()}`) 35 | const updatedPackage = pkg(`i-have-new-${hat()}`) 36 | const newVersionOfUpdatedPackage = pkg(updatedPackage.name) 37 | 38 | beforeEach(() => { 39 | config = { 40 | registryUpdateInterval: 0, 41 | registryReadTimeout: 10000, 42 | registries: [ 43 | 'http://foo' 44 | ], 45 | ipfs: { 46 | prefix: `/registry-prefix-${hat()}` 47 | }, 48 | request: {}, 49 | http: { 50 | host: 'localhost', 51 | port: 8080, 52 | protocol: 'http' 53 | } 54 | } 55 | 56 | request = sinon.stub() 57 | savePackument = sinon.stub() 58 | 59 | mock('../utils/retry-request', request) 60 | mock('../utils/save-packument', savePackument) 61 | 62 | loadPackument = mock.reRequire('../utils/load-packument') 63 | 64 | ipfs = { 65 | files: { 66 | read: sinon.stub(), 67 | write: sinon.stub() 68 | } 69 | } 70 | }) 71 | 72 | afterEach(() => { 73 | mock.stopAll() 74 | }) 75 | 76 | it('should load a packument from ipfs', async () => { 77 | ipfs.files.read.withArgs(`${config.ipfs.prefix}/${existentPackage.name}`) 78 | .returns([ 79 | Buffer.from(JSON.stringify(existentPackage)) 80 | ]) 81 | 82 | request 83 | .withArgs({ 84 | uri: `${config.registry}/${existentPackage.name}`, 85 | json: true 86 | }) 87 | .resolves(existentPackage) 88 | 89 | const result = await loadPackument(existentPackage.name, ipfs, config) 90 | 91 | expect(result).to.deep.equal(existentPackage) 92 | expect(savePackument.called).to.be.false() 93 | expect(request.called).to.be.true() 94 | }) 95 | 96 | it('should load a packument from npm when not found in mfs', async () => { 97 | ipfs.files.read.withArgs(`${config.ipfs.prefix}/${newPackage.name}`) 98 | .returns(async function * () { // eslint-disable-line require-yield,require-await 99 | throw new Error('file does not exist') 100 | }()) 101 | 102 | request 103 | .withArgs({ 104 | uri: `${config.registries[0]}/${newPackage.name}`, 105 | json: true 106 | }) 107 | .resolves(newPackage) 108 | 109 | const result = await loadPackument(newPackage.name, ipfs, config) 110 | 111 | expect(result).to.deep.equal(newPackage) 112 | expect(savePackument.called).to.be.true() 113 | expect(request.called).to.be.true() 114 | }) 115 | 116 | it('should favour an updated packument from npm', async () => { 117 | updatedPackage.versions = { 118 | '0.0.1': { 119 | dist: { 120 | cid: 'a-cid', 121 | tarball: 'a-tarball', 122 | source: 'original-tarball' 123 | } 124 | } 125 | } 126 | 127 | newVersionOfUpdatedPackage.versions = { 128 | '0.0.1': { 129 | dist: { 130 | tarball: 'original-tarball' 131 | } 132 | }, 133 | '0.0.2': { 134 | dist: { 135 | tarball: 'new-tarball' 136 | } 137 | } 138 | } 139 | 140 | ipfs.files.read.withArgs(`${config.ipfs.prefix}/${updatedPackage.name}`) 141 | .returns(async function * () { // eslint-disable-line require-await 142 | yield Buffer.from(JSON.stringify(updatedPackage)) 143 | }()) 144 | 145 | request 146 | .withArgs({ 147 | uri: `${config.registries[0]}/${updatedPackage.name}`, 148 | json: true 149 | }) 150 | .resolves(JSON.parse(JSON.stringify(newVersionOfUpdatedPackage))) 151 | 152 | const result = await loadPackument(updatedPackage.name, ipfs, config) 153 | 154 | expect(result.versions['0.0.1'].dist.cid).to.equal(updatedPackage.versions['0.0.1'].dist.cid) 155 | expect(result.versions['0.0.2'].dist.tarball).to.equal(newVersionOfUpdatedPackage.versions['0.0.2'].dist.tarball) 156 | 157 | expect(savePackument.called).to.be.true() 158 | expect(request.called).to.be.true() 159 | }) 160 | 161 | it('should explode when a module does not exist', async () => { 162 | ipfs.files.read.withArgs(`${config.ipfs.prefix}/${nonExistentPackage.name}`) 163 | .returns(async function * () { // eslint-disable-line require-yield,require-await 164 | throw new Error('file does not exist') 165 | }()) 166 | 167 | request 168 | .withArgs({ 169 | uri: `${config.registries[0]}/${nonExistentPackage.name}`, 170 | json: true 171 | }) 172 | .rejects(new Error('404')) 173 | 174 | try { 175 | await loadPackument(nonExistentPackage.name, ipfs, config) 176 | throw new Error('Expected loadPackument to throw') 177 | } catch (error) { 178 | expect(error.message).to.include('not found') 179 | } 180 | }) 181 | 182 | it('should download from a backup registry', async () => { 183 | const options = { 184 | ...config, 185 | registries: [ 186 | ...config.registries, 187 | 'https://regregreg.npm.com' 188 | ] 189 | } 190 | 191 | ipfs.files.read.withArgs(`${options.ipfs.prefix}/${existentPackage.name}`) 192 | .returns(async function * () { // eslint-disable-line require-yield,require-await 193 | throw new Error('file does not exist') 194 | }()) 195 | 196 | request 197 | .withArgs({ 198 | uri: `${options.registries[0]}/${existentPackage.name}`, 199 | json: true 200 | }) 201 | .rejects(new Error('404')) 202 | 203 | request 204 | .withArgs({ 205 | uri: `${options.registries[1]}/${existentPackage.name}`, 206 | json: true 207 | }) 208 | .resolves(existentPackage) 209 | 210 | const result = await loadPackument(existentPackage.name, ipfs, options) 211 | 212 | expect(result).to.deep.equal(existentPackage) 213 | expect(savePackument.called).to.be.true() 214 | expect(request.called).to.be.true() 215 | }) 216 | }) 217 | -------------------------------------------------------------------------------- /packages/registry-mirror/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | # [0.10.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.9.0...v0.10.0) (2018-10-04) 3 | 4 | 5 | ### Features 6 | 7 | * add s3 storage option ([6a522c8](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/6a522c8)) 8 | 9 | 10 | 11 | 12 | # [0.9.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.8.0...v0.9.0) (2018-10-04) 13 | 14 | 15 | ### Bug Fixes 16 | 17 | * friendlier error pages ([747320d](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/747320d)) 18 | 19 | 20 | ### Features 21 | 22 | * use ipfs to fetch files if available ([5e135fe](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/5e135fe)) 23 | 24 | 25 | 26 | 27 | # [0.8.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.4...v0.8.0) (2018-09-21) 28 | 29 | 30 | ### Features 31 | 32 | * adds retry when npm 404s, fixes [#61](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/61) ([d884991](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/d884991)) 33 | 34 | 35 | 36 | 37 | ## [0.7.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.3...v0.7.4) (2018-09-20) 38 | 39 | 40 | 41 | 42 | ## [0.7.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.2...v0.7.3) (2018-09-20) 43 | 44 | 45 | ### Bug Fixes 46 | 47 | * increase max body size ([d478378](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/d478378)) 48 | 49 | 50 | 51 | 52 | ## [0.7.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.1...v0.7.2) (2018-09-20) 53 | 54 | 55 | ### Bug Fixes 56 | 57 | * fixes [#59](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/59) and [#60](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/60) with better error detection and messages ([93ad289](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/93ad289)) 58 | 59 | 60 | 61 | 62 | ## [0.7.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.0...v0.7.1) (2018-09-20) 63 | 64 | 65 | 66 | 67 | # [0.7.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.6.4...v0.7.0) (2018-09-20) 68 | 69 | 70 | ### Features 71 | 72 | * forward non-get requests to the registry, fixes [#58](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/58) ([b493e4c](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/b493e4c)) 73 | 74 | 75 | 76 | 77 | ## [0.6.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.5.1...v0.6.4) (2018-09-20) 78 | 79 | 80 | ### Bug Fixes 81 | 82 | * do not use path.join because windows ([8b51156](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/8b51156)) 83 | * handle scoped modules, fixes [#57](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/57) ([bd08ddd](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/bd08ddd)) 84 | * linting errors ([ca7fd83](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/ca7fd83)) 85 | * update arg name ([b707cc0](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/b707cc0)) 86 | * use same blobstore instance as it will create an ipfs node ([afe5c58](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/afe5c58)) 87 | 88 | 89 | ### Features 90 | 91 | * allow configuring external host address separately from internal ([3405f0a](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/3405f0a)) 92 | * mirror npm instead of pulling/publishing registry tree ([c734d44](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/c734d44)) 93 | 94 | 95 | ### Performance Improvements 96 | 97 | * increase concurrency when running local IPFS node ([126747b](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/126747b)) 98 | * throttle requests to daemon and use build in ipfs node ([4118ed5](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/4118ed5)) 99 | 100 | 101 | 102 | 103 | ## [0.5.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.5.0...v0.5.1) (2016-03-23) 104 | 105 | 106 | 107 | 108 | # [0.5.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.4.0...v0.5.0) (2016-02-01) 109 | 110 | 111 | 112 | 113 | # [0.4.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.3...v0.4.0) (2016-02-01) 114 | 115 | 116 | 117 | 118 | ## [0.3.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.1...v0.3.3) (2016-01-02) 119 | 120 | 121 | 122 | 123 | ## [0.3.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.0...v0.3.1) (2015-12-18) 124 | 125 | 126 | 127 | 128 | # [0.3.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.12...v0.3.0) (2015-12-18) 129 | 130 | 131 | 132 | 133 | ## [0.2.12](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.11...v0.2.12) (2015-12-17) 134 | 135 | 136 | 137 | 138 | ## [0.2.11](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.10...v0.2.11) (2015-12-17) 139 | 140 | 141 | 142 | 143 | ## [0.2.10](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.9...v0.2.10) (2015-11-30) 144 | 145 | 146 | 147 | 148 | ## [0.2.9](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.8...v0.2.9) (2015-11-26) 149 | 150 | 151 | 152 | 153 | ## [0.2.8](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.7...v0.2.8) (2015-11-25) 154 | 155 | 156 | 157 | 158 | ## [0.2.7](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.6...v0.2.7) (2015-11-25) 159 | 160 | 161 | 162 | 163 | ## [0.2.6](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.5...v0.2.6) (2015-11-25) 164 | 165 | 166 | 167 | 168 | ## [0.2.5](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.4...v0.2.5) (2015-11-24) 169 | 170 | 171 | 172 | 173 | ## [0.2.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.3...v0.2.4) (2015-11-24) 174 | 175 | 176 | 177 | 178 | ## [0.2.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.2...v0.2.3) (2015-11-23) 179 | 180 | 181 | 182 | 183 | ## [0.2.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.1...v0.2.2) (2015-11-23) 184 | 185 | 186 | 187 | 188 | ## [0.2.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.0...v0.2.1) (2015-11-23) 189 | 190 | 191 | 192 | 193 | # [0.2.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.6...v0.2.0) (2015-11-23) 194 | 195 | 196 | 197 | 198 | ## [0.1.6](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.5...v0.1.6) (2015-11-23) 199 | 200 | 201 | ### Features 202 | 203 | * Add host and port config. ([5c3eeea](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/5c3eeea)), closes [#3](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/3) 204 | 205 | 206 | 207 | 208 | ## [0.1.5](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.4...v0.1.5) (2015-11-18) 209 | 210 | 211 | 212 | 213 | ## [0.1.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.3...v0.1.4) (2015-11-16) 214 | 215 | 216 | 217 | 218 | ## [0.1.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.2...v0.1.3) (2015-11-16) 219 | 220 | 221 | 222 | 223 | ## [0.1.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.1...v0.1.2) (2015-11-16) 224 | 225 | 226 | 227 | 228 | ## [0.1.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.0...v0.1.1) (2015-11-16) 229 | 230 | 231 | 232 | 233 | # [0.1.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.0.1...v0.1.0) (2015-11-15) 234 | 235 | 236 | 237 | 238 | ## 0.0.1 (2015-11-14) 239 | 240 | 241 | 242 | -------------------------------------------------------------------------------- /packages/replication-master/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | # [0.10.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.9.0...v0.10.0) (2018-10-04) 3 | 4 | 5 | ### Features 6 | 7 | * add s3 storage option ([6a522c8](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/6a522c8)) 8 | 9 | 10 | 11 | 12 | # [0.9.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.8.0...v0.9.0) (2018-10-04) 13 | 14 | 15 | ### Bug Fixes 16 | 17 | * friendlier error pages ([747320d](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/747320d)) 18 | 19 | 20 | ### Features 21 | 22 | * use ipfs to fetch files if available ([5e135fe](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/5e135fe)) 23 | 24 | 25 | 26 | 27 | # [0.8.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.4...v0.8.0) (2018-09-21) 28 | 29 | 30 | ### Features 31 | 32 | * adds retry when npm 404s, fixes [#61](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/61) ([d884991](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/d884991)) 33 | 34 | 35 | 36 | 37 | ## [0.7.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.3...v0.7.4) (2018-09-20) 38 | 39 | 40 | 41 | 42 | ## [0.7.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.2...v0.7.3) (2018-09-20) 43 | 44 | 45 | ### Bug Fixes 46 | 47 | * increase max body size ([d478378](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/d478378)) 48 | 49 | 50 | 51 | 52 | ## [0.7.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.1...v0.7.2) (2018-09-20) 53 | 54 | 55 | ### Bug Fixes 56 | 57 | * fixes [#59](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/59) and [#60](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/60) with better error detection and messages ([93ad289](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/93ad289)) 58 | 59 | 60 | 61 | 62 | ## [0.7.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.7.0...v0.7.1) (2018-09-20) 63 | 64 | 65 | 66 | 67 | # [0.7.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.6.4...v0.7.0) (2018-09-20) 68 | 69 | 70 | ### Features 71 | 72 | * forward non-get requests to the registry, fixes [#58](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/58) ([b493e4c](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/b493e4c)) 73 | 74 | 75 | 76 | 77 | ## [0.6.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.5.1...v0.6.4) (2018-09-20) 78 | 79 | 80 | ### Bug Fixes 81 | 82 | * do not use path.join because windows ([8b51156](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/8b51156)) 83 | * handle scoped modules, fixes [#57](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/57) ([bd08ddd](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/bd08ddd)) 84 | * linting errors ([ca7fd83](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/ca7fd83)) 85 | * update arg name ([b707cc0](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/b707cc0)) 86 | * use same blobstore instance as it will create an ipfs node ([afe5c58](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/afe5c58)) 87 | 88 | 89 | ### Features 90 | 91 | * allow configuring external host address separately from internal ([3405f0a](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/3405f0a)) 92 | * mirror npm instead of pulling/publishing registry tree ([c734d44](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/c734d44)) 93 | 94 | 95 | ### Performance Improvements 96 | 97 | * increase concurrency when running local IPFS node ([126747b](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/126747b)) 98 | * throttle requests to daemon and use build in ipfs node ([4118ed5](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/4118ed5)) 99 | 100 | 101 | 102 | 103 | ## [0.5.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.5.0...v0.5.1) (2016-03-23) 104 | 105 | 106 | 107 | 108 | # [0.5.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.4.0...v0.5.0) (2016-02-01) 109 | 110 | 111 | 112 | 113 | # [0.4.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.3...v0.4.0) (2016-02-01) 114 | 115 | 116 | 117 | 118 | ## [0.3.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.1...v0.3.3) (2016-01-02) 119 | 120 | 121 | 122 | 123 | ## [0.3.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.3.0...v0.3.1) (2015-12-18) 124 | 125 | 126 | 127 | 128 | # [0.3.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.12...v0.3.0) (2015-12-18) 129 | 130 | 131 | 132 | 133 | ## [0.2.12](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.11...v0.2.12) (2015-12-17) 134 | 135 | 136 | 137 | 138 | ## [0.2.11](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.10...v0.2.11) (2015-12-17) 139 | 140 | 141 | 142 | 143 | ## [0.2.10](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.9...v0.2.10) (2015-11-30) 144 | 145 | 146 | 147 | 148 | ## [0.2.9](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.8...v0.2.9) (2015-11-26) 149 | 150 | 151 | 152 | 153 | ## [0.2.8](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.7...v0.2.8) (2015-11-25) 154 | 155 | 156 | 157 | 158 | ## [0.2.7](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.6...v0.2.7) (2015-11-25) 159 | 160 | 161 | 162 | 163 | ## [0.2.6](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.5...v0.2.6) (2015-11-25) 164 | 165 | 166 | 167 | 168 | ## [0.2.5](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.4...v0.2.5) (2015-11-24) 169 | 170 | 171 | 172 | 173 | ## [0.2.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.3...v0.2.4) (2015-11-24) 174 | 175 | 176 | 177 | 178 | ## [0.2.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.2...v0.2.3) (2015-11-23) 179 | 180 | 181 | 182 | 183 | ## [0.2.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.1...v0.2.2) (2015-11-23) 184 | 185 | 186 | 187 | 188 | ## [0.2.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.2.0...v0.2.1) (2015-11-23) 189 | 190 | 191 | 192 | 193 | # [0.2.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.6...v0.2.0) (2015-11-23) 194 | 195 | 196 | 197 | 198 | ## [0.1.6](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.5...v0.1.6) (2015-11-23) 199 | 200 | 201 | ### Features 202 | 203 | * Add host and port config. ([5c3eeea](https://github.com/ipfs-shipyard/npm-on-ipfs/commit/5c3eeea)), closes [#3](https://github.com/ipfs-shipyard/npm-on-ipfs/issues/3) 204 | 205 | 206 | 207 | 208 | ## [0.1.5](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.4...v0.1.5) (2015-11-18) 209 | 210 | 211 | 212 | 213 | ## [0.1.4](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.3...v0.1.4) (2015-11-16) 214 | 215 | 216 | 217 | 218 | ## [0.1.3](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.2...v0.1.3) (2015-11-16) 219 | 220 | 221 | 222 | 223 | ## [0.1.2](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.1...v0.1.2) (2015-11-16) 224 | 225 | 226 | 227 | 228 | ## [0.1.1](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.1.0...v0.1.1) (2015-11-16) 229 | 230 | 231 | 232 | 233 | # [0.1.0](https://github.com/ipfs-shipyard/npm-on-ipfs/compare/v0.0.1...v0.1.0) (2015-11-15) 234 | 235 | 236 | 237 | 238 | ## 0.0.1 (2015-11-14) 239 | 240 | 241 | 242 | -------------------------------------------------------------------------------- /packages/replication-master/test/replication.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const path = require('path') 6 | const os = require('os') 7 | const { 8 | createTestServer, 9 | destroyTestServers 10 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 11 | const createSkimDb = require('./fixtures/create-skim-db') 12 | const expect = require('chai') 13 | .use(require('dirty-chai')) 14 | .expect 15 | const hat = require('hat') 16 | const savePackument = require('ipfs-registry-mirror-common/utils/save-packument') 17 | const delay = require('delay') 18 | const request = require('ipfs-registry-mirror-common/utils/retry-request') 19 | 20 | const baseDir = '/commons-registry-clone-test' 21 | 22 | describe('replication', function () { 23 | this.timeout(120000) 24 | 25 | let replicationMaster 26 | const upstreamModules = {} 27 | let replicationMasterUrl 28 | let skim 29 | let config 30 | 31 | const serverConfig = (registry, skim, config = {}) => { 32 | return Object.assign({}, { 33 | httpProtocol: 'http', 34 | httpHost: '127.0.0.1', 35 | registries: [ 36 | `http://127.0.0.1:${registry.address().port}` 37 | ], 38 | requestRetries: 5, 39 | requestRetryDelay: 100, 40 | requestConcurrency: 5, 41 | ipfsMfsPrefix: baseDir, 42 | requestTimeout: 1000, 43 | ipfsStoreType: 'fs', 44 | ipfsRepo: path.join(os.tmpdir(), hat()), 45 | ipfsFlush: true, 46 | registryUpdateInterval: 0, 47 | followReplicator: `http://127.0.0.1:${skim.address().port}/_changes`, 48 | followRegistry: `http://127.0.0.1:${registry.address().port}`, 49 | followConcurrency: 1, 50 | followUserAgent: 'test UA', 51 | followSeqFile: path.join(os.tmpdir(), hat()), 52 | followInactivityMs: 60000, 53 | externalHost: 'replication.registry.ipfs.io', 54 | externalPort: 443, 55 | externalProtocol: 'https', 56 | externalIp: '35.178.192.119', 57 | cloneConcurrency: 0, 58 | cloneDelay: 1 59 | }, config) 60 | } 61 | 62 | before(async () => { 63 | const registry = await createTestServer(upstreamModules) 64 | skim = await createSkimDb(upstreamModules) 65 | 66 | config = serverConfig(registry, skim) 67 | 68 | const startReplication = mock.reRequire('../src/core') 69 | 70 | replicationMaster = await startReplication(config) 71 | 72 | config.httpPort = replicationMaster.server.address().port 73 | 74 | replicationMasterUrl = `${config.httpProtocol}://${config.httpHost}:${config.httpPort}` 75 | }) 76 | 77 | after(async () => { 78 | mock.stopAll() 79 | 80 | await destroyTestServers() 81 | 82 | if (replicationMaster && replicationMaster.stop) { 83 | await replicationMaster.stop() 84 | } 85 | }) 86 | 87 | it('should publish some info about this node', async () => { 88 | const info = await request({ 89 | uri: replicationMasterUrl, 90 | json: true 91 | }) 92 | 93 | expect(info.ipfs).to.be.ok() 94 | expect(info.ipfs.id).to.be.ok() 95 | expect(info.ipfs.addresses).to.be.ok() 96 | expect(info.ipfs.addresses.length).to.be.ok() 97 | expect(info.root).to.be.ok() 98 | expect(info.topic).to.be.ok() 99 | 100 | info.ipfs.addresses.forEach(address => { 101 | expect(address).to.not.contain('127.0.0.1') 102 | expect(address).to.not.contain('localhost') 103 | }) 104 | }) 105 | 106 | it('should download a new module', () => { 107 | const module = { 108 | name: `new-module-${hat()}`, 109 | version: '1.0.0' 110 | } 111 | const tarball = { 112 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 113 | content: 'I am some binary' 114 | } 115 | 116 | const data = { 117 | name: module.name, 118 | json: { 119 | name: module.name, 120 | _rev: '12345', 121 | versions: { 122 | [module.version]: { 123 | dist: { 124 | tarball: `${config.registries[0]}${tarball.path}`, 125 | shasum: '3f9f726832b39c2cc7ac515c8a6c97b94b608b0e' 126 | } 127 | } 128 | } 129 | } 130 | } 131 | 132 | skim.publish(data, tarball) 133 | 134 | return new Promise((resolve, reject) => { 135 | replicationMaster.app.once('processed', (name) => { 136 | try { 137 | expect(name).to.equal(module.name) 138 | } catch (error) { 139 | return reject(error) 140 | } 141 | 142 | resolve() 143 | }) 144 | }) 145 | }) 146 | 147 | it('should download a module even if the previous one fails', () => { 148 | const module1 = { 149 | name: `new-module-${hat()}`, 150 | version: '1.0.0' 151 | } 152 | const module2 = { 153 | name: `new-module-${hat()}`, 154 | version: '1.0.0' 155 | } 156 | const tarball1 = { 157 | path: `/${module1.name}/-/${module1.name}-${module1.version}.tgz`, 158 | content: 'I am some binary' 159 | } 160 | const tarball2 = { 161 | path: `/${module2.name}/-/${module2.name}-${module2.version}.tgz`, 162 | content: 'I am some binary' 163 | } 164 | 165 | const data1 = { 166 | name: module1.name, 167 | json: { 168 | name: module1.name, 169 | _rev: '12345', 170 | versions: { 171 | [module1.version]: { 172 | dist: { 173 | tarball: `${config.registries[0]}${tarball1.path}`, 174 | shasum: '3f9f726832b39c2cc7ac515c8a6c97b94b608b0e' 175 | } 176 | } 177 | } 178 | } 179 | } 180 | const data2 = { 181 | name: module2.name, 182 | json: { 183 | name: module2.name, 184 | _rev: '12345', 185 | versions: { 186 | [module2.version]: { 187 | dist: { 188 | tarball: `${config.registries[0]}${tarball2.path}`, 189 | shasum: '3f9f726832b39c2cc7ac515c8a6c97b94b608b0e' 190 | } 191 | } 192 | } 193 | } 194 | } 195 | 196 | skim.publish(data1) 197 | skim.publish(data2, tarball2) 198 | 199 | let sawModule1Update = false 200 | 201 | return new Promise((resolve, reject) => { 202 | replicationMaster.app.on('processed', (name) => { 203 | if (name === module1.name) { 204 | sawModule1Update = true 205 | return 206 | } 207 | 208 | try { 209 | expect(sawModule1Update).to.be.true() 210 | expect(name).to.equal(module2.name) 211 | } catch (error) { 212 | return reject(error) 213 | } 214 | 215 | resolve() 216 | }) 217 | }) 218 | }) 219 | 220 | it('should survive an invalid update', async () => { 221 | const module = { 222 | name: `new-module-${hat()}`, 223 | version: '1.0.0' 224 | } 225 | const tarball = { 226 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 227 | content: 'I am some binary' 228 | } 229 | 230 | const data = { 231 | name: module.name, 232 | json: { 233 | name: module.name, 234 | _rev: '12345', 235 | versions: [] 236 | } 237 | } 238 | 239 | skim.publish(data, tarball) 240 | 241 | await delay(1000) 242 | 243 | // no-one died 244 | }) 245 | 246 | it('should survive npm 503ing', async () => { 247 | const module = { 248 | name: `new-module-${hat()}`, 249 | version: '1.0.0' 250 | } 251 | const tarball = { 252 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 253 | content: 'I am some binary' 254 | } 255 | 256 | const data = { 257 | name: module.name, 258 | json: '

503 Service Unavailable

\nNo server is available to handle this request.\n\n\n' 259 | } 260 | 261 | skim.publish(data, tarball) 262 | 263 | await delay(1000) 264 | 265 | // no-one died 266 | }) 267 | 268 | it('should survive npm 504ing', async () => { 269 | const module = { 270 | name: `new-module-${hat()}`, 271 | version: '1.0.0' 272 | } 273 | const tarball = { 274 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 275 | content: 'I am some binary' 276 | } 277 | 278 | const data = { 279 | name: module.name, 280 | json: '

504 Gateway Time-out

\nThe server didn\'t respond in time.\n\n\n' 281 | } 282 | 283 | skim.publish(data, tarball) 284 | 285 | await delay(1000) 286 | 287 | // no-one died 288 | }) 289 | 290 | it('should survive npm 404ing', async () => { 291 | const module = { 292 | name: `new-module-${hat()}`, 293 | version: '1.0.0' 294 | } 295 | const tarball = { 296 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 297 | content: 'I am some binary' 298 | } 299 | 300 | const data = { 301 | name: module.name, 302 | json: { 303 | error: 'not_found', 304 | reason: 'missing' 305 | } 306 | } 307 | 308 | skim.publish(data, tarball) 309 | 310 | await delay(1000) 311 | 312 | // no-one died 313 | }) 314 | 315 | it('should not download a tarball that already exists', async () => { 316 | const module = { 317 | name: `new-module-${hat()}`, 318 | version: '1.0.0' 319 | } 320 | const tarball = { 321 | path: `/${module.name}/-/${module.name}-${module.version}.tgz`, 322 | content: 'I am some binary' 323 | } 324 | 325 | const data = { 326 | name: module.name, 327 | json: { 328 | name: module.name, 329 | _rev: '12345', 330 | versions: { 331 | [module.version]: { 332 | dist: { 333 | tarball: `${config.registries[0]}${tarball.path}`, 334 | shasum: '3f9f726832b39c2cc7ac515c8a6c97b94b608b0e' 335 | } 336 | } 337 | } 338 | } 339 | } 340 | 341 | const manifest = { 342 | name: module.name, 343 | _rev: '12345', 344 | versions: { 345 | '1.0.0': { 346 | dist: { 347 | tarball: `${config.externalProtocol}://${config.externalHost}:${config.externalPort}${tarball.path}`, 348 | source: `${config.registries[0]}${tarball.path}`, 349 | cid: 'QmZVQm5euZa69LtUFt8HuuBPSpLYMxcxACh6F5M8ZqpbR9', 350 | shasum: '123' 351 | } 352 | } 353 | } 354 | } 355 | 356 | await savePackument(manifest, replicationMaster.app.locals.ipfs, { 357 | ipfs: { 358 | prefix: baseDir, 359 | flush: true 360 | } 361 | }) 362 | 363 | skim.publish(data) 364 | 365 | return new Promise((resolve, reject) => { 366 | replicationMaster.app.once('processed', (name) => { 367 | try { 368 | expect(name).to.equal(module.name) 369 | } catch (error) { 370 | return reject(error) 371 | } 372 | 373 | resolve() 374 | }) 375 | }) 376 | }) 377 | 378 | it('should increment worker ids', async () => { 379 | const worker1 = await request({ 380 | uri: `${replicationMasterUrl}/-/worker`, 381 | qs: { 382 | worker: 'host1' 383 | }, 384 | json: true 385 | }) 386 | 387 | const worker2 = await request({ 388 | uri: `${replicationMasterUrl}/-/worker`, 389 | qs: { 390 | worker: 'host2' 391 | }, 392 | json: true 393 | }) 394 | 395 | expect(worker1.index).to.equal(0) 396 | expect(worker2.index).to.equal(1) 397 | }) 398 | 399 | it('should return the same worker id to the same worker', async () => { 400 | const worker1 = await request({ 401 | uri: `${replicationMasterUrl}/-/worker`, 402 | qs: { 403 | worker: 'host1' 404 | }, 405 | json: true 406 | }) 407 | 408 | const worker2 = await request({ 409 | uri: `${replicationMasterUrl}/-/worker`, 410 | qs: { 411 | worker: 'host1' 412 | }, 413 | json: true 414 | }) 415 | 416 | expect(worker1.index).to.equal(worker2.index) 417 | }) 418 | }) 419 | -------------------------------------------------------------------------------- /packages/registry-mirror/test/mirror.spec.js: -------------------------------------------------------------------------------- 1 | /* eslint-env mocha */ 2 | 'use strict' 3 | 4 | const mock = require('mock-require') 5 | const request = require('request-promise') 6 | const expect = require('chai') 7 | .use(require('dirty-chai')) 8 | .expect 9 | const { DAGNode } = require('ipld-dag-pb') 10 | const UnixFS = require('ipfs-unixfs') 11 | const { 12 | createTestServer, 13 | destroyTestServers 14 | } = require('ipfs-registry-mirror-common/test/fixtures/test-server') 15 | const createReplicationMaster = require('./fixtures/create-replication-master') 16 | const pkg = require('../package.json') 17 | const path = require('path') 18 | const os = require('os') 19 | const hat = require('hat') 20 | const delay = require('delay') 21 | const toBuffer = require('it-to-buffer') 22 | 23 | describe('mirror', function () { 24 | this.timeout(120000) 25 | 26 | let replicationMaster 27 | let baseDir 28 | let startMirror 29 | let mirror 30 | let mirrorUrl 31 | const upstreamModules = {} 32 | let config 33 | 34 | const serverConfig = (registry, replication, config = {}) => { 35 | return Object.assign({}, { 36 | httpProtocol: 'http', 37 | httpHost: '127.0.0.1', 38 | registries: [ 39 | `http://127.0.0.1:${registry.address().port}` 40 | ], 41 | registryReadTimeout: 5000, 42 | requestRetries: 5, 43 | requestRetryDelay: 100, 44 | ipfsMfsPrefix: baseDir, 45 | requestTimeout: 1000, 46 | ipfsStoreType: 'fs', 47 | ipfsRepo: path.join(os.tmpdir(), hat()), 48 | ipfsFlush: true, 49 | registryUpdateInterval: 0, 50 | pubsubMaster: `http://127.0.0.1:${replication.address().port}` 51 | }, config) 52 | } 53 | 54 | before(async () => { 55 | baseDir = `/commons-registry-test-${hat()}` 56 | 57 | startMirror = mock.reRequire('../src/core') 58 | 59 | const registryServer = await createTestServer(upstreamModules) 60 | replicationMaster = await createReplicationMaster() 61 | config = serverConfig(registryServer, replicationMaster) 62 | 63 | mirror = await startMirror(config) 64 | 65 | // make sure the mirror is connected to the master 66 | const master = await replicationMaster.ipfs.id() 67 | await mirror.app.locals.ipfs.swarm.connect(master.addresses[0]) 68 | 69 | config.httpPort = mirror.server.address().port 70 | 71 | mirrorUrl = `${config.httpProtocol}://${config.httpHost}:${config.httpPort}` 72 | }) 73 | 74 | after(async function () { 75 | mock.stopAll() 76 | 77 | await destroyTestServers() 78 | 79 | if (mirror && mirror.stop) { 80 | await mirror.stop() 81 | } 82 | }) 83 | 84 | it('should serve a packument', async function () { 85 | const moduleName = `module-${hat()}` 86 | const content = JSON.stringify({ 87 | _rev: '12345', 88 | name: moduleName, 89 | versions: {} 90 | }, null, 2) 91 | 92 | await mirror.app.locals.ipfs.files.write(`${baseDir}/${moduleName}`, Buffer.from(content), { 93 | parents: true, 94 | create: true, 95 | truncate: true 96 | }) 97 | 98 | const result = await request({ 99 | uri: `${mirrorUrl}/${moduleName}` 100 | }) 101 | 102 | expect(result).to.equal(content) 103 | }) 104 | 105 | it('should serve a tarball', async () => { 106 | const moduleName = `module-${hat()}` 107 | const tarballContent = 'tarball-content' 108 | const fsNode = new UnixFS({ type: 'file', data: Buffer.from(tarballContent) }) 109 | 110 | const node = new DAGNode(fsNode.marshal()) 111 | 112 | const cid = await mirror.app.locals.ipfs.dag.put(node, { 113 | version: 0, 114 | format: 'dag-pb', 115 | hashAlg: 'sha2-256' 116 | }) 117 | 118 | const manifest = JSON.stringify({ 119 | _rev: '12345', 120 | name: moduleName, 121 | versions: { 122 | '1.0.0': { 123 | dist: { 124 | cid: cid.toBaseEncodedString() 125 | } 126 | } 127 | } 128 | }) 129 | 130 | await mirror.app.locals.ipfs.files.write(`${baseDir}/${moduleName}`, Buffer.from(manifest), { 131 | parents: true, 132 | create: true, 133 | truncate: true 134 | }) 135 | 136 | const result = await request({ 137 | uri: `${mirrorUrl}/${moduleName}/-/${moduleName}-1.0.0.tgz` 138 | }) 139 | 140 | expect(result).to.equal(tarballContent) 141 | }) 142 | 143 | it('should serve some basic info', async () => { 144 | const result = JSON.parse(await request({ 145 | uri: `${mirrorUrl}` 146 | })) 147 | 148 | expect(result.name).to.equal(pkg.name) 149 | expect(result.version).to.equal(pkg.version) 150 | }) 151 | 152 | it('should download a missing packument', async () => { 153 | const moduleName = `module-${hat()}` 154 | const data = { 155 | name: moduleName, 156 | versions: { 157 | '0.0.1': { 158 | dist: { 159 | tarball: `https://some.registry.com/${moduleName}-0.0.1.tgz` 160 | } 161 | } 162 | } 163 | } 164 | 165 | upstreamModules[`/${moduleName}`] = (request, response) => { 166 | response.statusCode = 200 167 | response.end(JSON.stringify(data)) 168 | } 169 | 170 | const result = JSON.parse(await request({ 171 | uri: `${mirrorUrl}/${moduleName}` 172 | })) 173 | 174 | expect(result.name).to.equal(moduleName) 175 | expect(Object.keys(result.versions).length).to.equal(Object.keys(data.versions).length) 176 | expect(result.versions['0.0.1'].dist.source).to.equal(data.versions['0.0.1'].dist.tarball) 177 | }) 178 | 179 | it('should download a missing tarball from an existing module', async () => { 180 | const moduleName = `module-${hat()}` 181 | const tarballPath = `${moduleName}/-/${moduleName}-1.0.0.tgz` 182 | const tarballContent = 'tarball content' 183 | const packument = JSON.stringify({ 184 | _rev: '12345', 185 | name: moduleName, 186 | versions: { 187 | '1.0.0': { 188 | dist: { 189 | tarball: `${config.registries[0]}/${tarballPath}`, 190 | shasum: '15d0e36e27c69bc758231f8e9add837f40a40cd0' 191 | } 192 | } 193 | } 194 | }) 195 | 196 | upstreamModules[`/${moduleName}`] = (request, response) => { 197 | response.statusCode = 200 198 | response.end(packument) 199 | } 200 | upstreamModules[`/${tarballPath}`] = (request, response) => { 201 | response.statusCode = 200 202 | response.end(tarballContent) 203 | } 204 | 205 | const result = await request({ 206 | uri: `${mirrorUrl}/${tarballPath}` 207 | }) 208 | 209 | expect(result).to.equal(tarballContent) 210 | }) 211 | 212 | it('should download a manifest from a missing scoped module', async () => { 213 | const moduleName = `@my-scope/module-${hat()}` 214 | const data = { 215 | name: moduleName, 216 | versions: { 217 | '0.0.1': { 218 | dist: { 219 | tarball: `https://some.registry.com/${moduleName}-0.0.1.tgz` 220 | } 221 | } 222 | } 223 | } 224 | 225 | upstreamModules[`/${moduleName}`] = (request, response) => { 226 | response.statusCode = 200 227 | response.end(JSON.stringify(data)) 228 | } 229 | 230 | const result = JSON.parse(await request({ 231 | uri: `${mirrorUrl}/${moduleName.replace('/', '%2f')}` 232 | })) 233 | 234 | expect(result.name).to.equal(moduleName) 235 | expect(result.versions.length).to.equal(data.versions.length) 236 | expect(result.versions['0.0.1'].dist.source).to.equal(data.versions['0.0.1'].dist.tarball) 237 | }) 238 | 239 | it('should check with the upstream registry for updated versions', async () => { 240 | const moduleName = `module-${hat()}` 241 | const tarball1Path = `${moduleName}/-/${moduleName}-1.0.0.tgz` 242 | const tarball2Path = `${moduleName}/-/${moduleName}-2.0.0.tgz` 243 | const tarball1Content = 'tarball 1 content' 244 | const tarball2Content = 'tarball 2 content' 245 | const manifest1 = JSON.stringify({ 246 | _rev: '12345-1', 247 | name: moduleName, 248 | versions: { 249 | '1.0.0': { 250 | dist: { 251 | shasum: '669965318736dfe855479a6dd441d81f101ae5ae', 252 | tarball: `${config.registries[0]}/${tarball1Path}` 253 | } 254 | } 255 | } 256 | }) 257 | const manifest2 = JSON.stringify({ 258 | _rev: '12345-2', 259 | name: moduleName, 260 | versions: { 261 | '1.0.0': { 262 | dist: { 263 | shasum: '669965318736dfe855479a6dd441d81f101ae5ae', 264 | tarball: `${config.registries[0]}/${tarball1Path}` 265 | } 266 | }, 267 | '2.0.0': { 268 | dist: { 269 | shasum: '4e9dab818d5f0a45e4ded14021cf0bc28c456f74', 270 | tarball: `${config.registries[0]}/${tarball2Path}` 271 | } 272 | } 273 | } 274 | }) 275 | let invocations = 0 276 | 277 | upstreamModules[`/${moduleName}`] = (request, response) => { 278 | response.statusCode = 200 279 | invocations++ 280 | 281 | if (invocations === 1) { 282 | response.end(manifest1) 283 | } else { 284 | response.end(manifest2) 285 | } 286 | } 287 | upstreamModules[`/${tarball1Path}`] = (request, response) => { 288 | response.statusCode = 200 289 | response.end(tarball1Content) 290 | } 291 | upstreamModules[`/${tarball2Path}`] = (request, response) => { 292 | response.statusCode = 200 293 | response.end(tarball2Content) 294 | } 295 | 296 | const result1 = await request({ 297 | uri: `${mirrorUrl}/${tarball1Path}` 298 | }) 299 | const result2 = await request({ 300 | uri: `${mirrorUrl}/${tarball2Path}` 301 | }) 302 | 303 | expect(result1).to.equal(tarball1Content) 304 | expect(result2).to.equal(tarball2Content) 305 | }) 306 | 307 | it('should proxy all other requests to the registry', async () => { 308 | const data = 'hello world' 309 | 310 | upstreamModules['/-/user/org.couchdb.user:dave'] = data 311 | 312 | const result = await request({ 313 | uri: `${mirrorUrl}/-/user/org.couchdb.user:dave`, 314 | method: 'put' 315 | }) 316 | 317 | expect(result.trim()).to.equal(data.trim()) 318 | }) 319 | 320 | it('should retry when 404s are encountered', async () => { 321 | const moduleName = `module-404-${hat()}` 322 | const data = JSON.stringify({ 323 | name: moduleName, 324 | _rev: '12345', 325 | versions: {} 326 | }) 327 | let invocations = 0 328 | 329 | upstreamModules[`/${moduleName}`] = (request, response) => { 330 | invocations++ 331 | 332 | if (invocations === 1) { 333 | response.statusCode = 404 334 | return response.end('404') 335 | } 336 | 337 | response.statusCode = 200 338 | return response.end(data) 339 | } 340 | 341 | await request({ 342 | uri: `${mirrorUrl}/${moduleName}` 343 | }) 344 | 345 | expect(invocations).to.equal(2) 346 | }) 347 | 348 | it('should not save tarball CID when shasums do not match', async () => { 349 | const moduleName = `module-${hat()}` 350 | const tarballPath = `${moduleName}/-/${moduleName}-1.0.0.tgz` 351 | const tarballContent = 'tarball content' 352 | const manifest = JSON.stringify({ 353 | _rev: '12345', 354 | name: moduleName, 355 | versions: { 356 | '1.0.0': { 357 | dist: { 358 | tarball: `${config.registries[0]}/${tarballPath}`, 359 | shasum: 'nope!' 360 | } 361 | } 362 | } 363 | }) 364 | 365 | upstreamModules[`/${moduleName}`] = (request, response) => { 366 | response.statusCode = 200 367 | response.end(manifest) 368 | } 369 | upstreamModules[`/${tarballPath}`] = (request, response) => { 370 | response.statusCode = 200 371 | response.end(tarballContent) 372 | } 373 | 374 | await request({ 375 | uri: `${mirrorUrl}/${tarballPath}`, 376 | simple: false 377 | }) 378 | 379 | // let the download be processed 380 | await delay(1000) 381 | 382 | const updated = JSON.parse(await toBuffer(mirror.app.locals.ipfs.files.read(`${baseDir}/${moduleName}`))) 383 | 384 | expect(updated.versions['1.0.0'].dist.cid).to.not.be.ok() 385 | }) 386 | 387 | it('should process an update recieved over pubsub', async () => { 388 | const moduleName = `updated-module-name-${hat()}` 389 | const manifest = JSON.stringify({ 390 | _rev: '12345', 391 | name: moduleName, 392 | versions: { 393 | '1.0.0': { 394 | dist: { 395 | tarball: `${mirrorUrl}/${moduleName}/-/${moduleName}-1.0.0.tgz`, 396 | shasum: '123', 397 | cid: '456', 398 | source: `${config.registries[0]}/${moduleName}/-/${moduleName}-1.0.0.tgz` 399 | } 400 | } 401 | } 402 | }, null, 2) 403 | 404 | try { 405 | await request({ 406 | uri: `${mirrorUrl}/${moduleName}` 407 | }) 408 | } catch (err) { 409 | expect(err.message).to.include(`${moduleName} not found`) 410 | } 411 | 412 | const { cid: packumentCid } = await replicationMaster.ipfs.add(manifest) 413 | await replicationMaster.ipfs.files.cp(`/ipfs/${packumentCid}`, `${replicationMaster.config.ipfs.prefix}/${moduleName}`, { 414 | parents: true 415 | }) 416 | const { cid: rootCid } = await replicationMaster.ipfs.files.stat(replicationMaster.config.ipfs.prefix) 417 | 418 | await replicationMaster.ipfs.pubsub.publish(replicationMaster.config.pubsub.topic, Buffer.from( 419 | JSON.stringify({ 420 | type: 'update', 421 | module: moduleName, 422 | cid: rootCid.toV1().toBaseEncodedString('base32') 423 | }) 424 | )) 425 | 426 | await delay(5000) 427 | 428 | const packument = await request({ 429 | uri: `${mirrorUrl}/${moduleName}` 430 | }) 431 | 432 | expect(packument).to.deep.equal(manifest) 433 | }) 434 | }) 435 | --------------------------------------------------------------------------------