├── infrastructure ├── deploy │ ├── data │ │ └── certbot │ │ │ └── .gitkeep │ ├── controller.png │ ├── nginx │ │ ├── Dockerfile │ │ ├── run.sh │ │ └── nginx.conf │ ├── docker-compose.yaml │ ├── docker-compose.prod.yaml │ ├── README.md │ └── init-letsencrypt.sh ├── playbooks │ ├── roles │ │ ├── redeploy │ │ │ ├── handlers │ │ │ │ └── main.yaml │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ └── templates │ │ │ │ └── runner.service.j2 │ │ ├── logging │ │ │ ├── handlers │ │ │ │ └── main.yaml │ │ │ ├── templates │ │ │ │ └── filebeat.yml.j2 │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── common │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── security │ │ │ └── tasks │ │ │ └── main.yaml │ ├── controller.yaml │ ├── README.md │ └── benchmarks.yaml ├── prod-infrastructure.png ├── grafana │ ├── duplicate-graph.png │ ├── select-measurement.png │ └── README.md ├── inventory │ ├── inventory.yaml │ └── group_vars │ │ ├── controllers.yaml │ │ └── minions.yaml ├── ci │ └── dockerfile └── README.md ├── tests ├── fixtures │ └── .gitignore ├── browser │ ├── public │ │ ├── favicon.ico │ │ └── index.html │ ├── src │ │ ├── components │ │ │ ├── table.css │ │ │ ├── localState.js │ │ │ ├── getId.js │ │ │ ├── initialize-node.js │ │ │ ├── table.js │ │ │ ├── test-row.js │ │ │ ├── add-local-file.js │ │ │ └── peer-transfer.js │ │ ├── index.js │ │ ├── App.js │ │ ├── index.css │ │ └── App.css │ ├── README.md │ └── package.json ├── util │ ├── build-browser-test.sh │ ├── getCommit.sh │ ├── create-privateKey.js │ ├── getBranch.sh │ ├── get-commit.js │ └── create-files.js ├── constants │ └── index.js ├── lib │ ├── clean.js │ ├── runner.js │ ├── output.js │ ├── node-factory.js │ ├── fixtures.js │ └── create-node.js ├── benchmarks.sh ├── getIpfs.sh ├── upload-ipfs.sh ├── test.template ├── init-node.http.js ├── unit │ ├── output.test.js │ └── fixtures.test.js ├── config │ ├── default-config-browser.json │ ├── default-config.json │ ├── index.js │ ├── default-config-go.json │ ├── private-key.json │ └── go-configs.json ├── init-node.browser.js ├── local-add.go.js ├── init-node.js ├── local-add.js ├── add-multi-kb.browser.js ├── package.json ├── local-extract.js ├── add-multi-kb.js ├── local-add.browser.js ├── peer-transfer.browser.js ├── local-transfer.js ├── extract-js2.go.js ├── pubsub-message.js ├── extract-go2.js ├── multi-peer-transfer.js └── schema │ └── results.js ├── .dockerignore ├── architecture.png ├── runner ├── import-hover.png ├── outfile.js ├── lib │ ├── schema │ │ ├── restart.js │ │ ├── header.js │ │ ├── benchmark.js │ │ ├── get.js │ │ └── add.js │ └── configBenchmarks.js ├── cli.js ├── retrieve.js ├── local.js ├── package.json ├── ipfs.js ├── compress.js ├── test │ ├── parse.js │ ├── compress.js │ ├── configBenchmarks.js │ └── queue.js ├── provision.js ├── remote.js ├── persistence.js ├── README.md ├── queue.js ├── index.js ├── config.js └── runner.js ├── scripts ├── runProdEnv.sh ├── runLocalEnv.sh ├── README.md └── common.sh ├── Dockerfile-runner ├── .gitignore ├── LICENSE ├── CONTRIBUTING.md ├── .circleci └── config.yml ├── docker-compose.yml └── README.md /infrastructure/deploy/data/certbot/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fixtures/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | runner/node_modules 2 | test/node 3 | data -------------------------------------------------------------------------------- /architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/architecture.png -------------------------------------------------------------------------------- /runner/import-hover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/runner/import-hover.png -------------------------------------------------------------------------------- /tests/browser/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/tests/browser/public/favicon.ico -------------------------------------------------------------------------------- /tests/browser/src/components/table.css: -------------------------------------------------------------------------------- 1 | .table { 2 | height: 100%; 3 | width: 100%; 4 | min-width: 3000px; 5 | } 6 | -------------------------------------------------------------------------------- /tests/util/build-browser-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd browser 4 | pwd 5 | npm install --loglevel=error 6 | npm run build -------------------------------------------------------------------------------- /infrastructure/deploy/controller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/infrastructure/deploy/controller.png -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/redeploy/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: reload systemctl 2 | systemd: 3 | daemon_reload: yes -------------------------------------------------------------------------------- /infrastructure/prod-infrastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/infrastructure/prod-infrastructure.png -------------------------------------------------------------------------------- /infrastructure/grafana/duplicate-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/infrastructure/grafana/duplicate-graph.png -------------------------------------------------------------------------------- /infrastructure/grafana/select-measurement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jimpick/benchmarks/master/infrastructure/grafana/select-measurement.png -------------------------------------------------------------------------------- /scripts/runProdEnv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODE=prod 4 | SCRIPTPATH=$(dirname "$0") 5 | source $SCRIPTPATH/common.sh 6 | checkParam $1 7 | 8 | docker-compose $FILES $OP -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/logging/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: reload filebeat 2 | systemd: 3 | state: restarted 4 | daemon_reload: yes 5 | name: filebeat -------------------------------------------------------------------------------- /infrastructure/inventory/inventory.yaml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | controllers: 4 | hosts: 5 | 63.33.104.238 6 | minions: 7 | hosts: 8 | 147.75.33.155 -------------------------------------------------------------------------------- /infrastructure/deploy/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | COPY nginx.conf /etc/nginx/conf.d/default.conf 4 | COPY run.sh /usr/local/bin/run 5 | 6 | ENTRYPOINT ["/usr/local/bin/run"] -------------------------------------------------------------------------------- /runner/outfile.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const config = require('./config') 4 | 5 | const retrieveCommand = () => { 6 | 7 | try { 8 | return JSON.parse(strResult) 9 | } catch (e) { 10 | throw e 11 | } 12 | } -------------------------------------------------------------------------------- /tests/util/getCommit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IPFS_PATH=$1 4 | 5 | cd ${IPFS_PATH} 6 | if [ -d js-ipfs/.git ] 7 | then 8 | cd js-ipfs 9 | git rev-parse HEAD 10 | else 11 | echo "no commit" 12 | fi 13 | 14 | 15 | -------------------------------------------------------------------------------- /tests/util/create-privateKey.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const PeerId = require('peer-id') 4 | 5 | PeerId.create({ bits: 1024 }, (err, id) => { 6 | if (err) { throw err } 7 | console.log(JSON.stringify(id.toJSON(), null, 2)) 8 | }) 9 | -------------------------------------------------------------------------------- /tests/util/getBranch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IPFS_PATH=$1 4 | 5 | cd ${IPFS_PATH} 6 | if [ -d js-ipfs/.git ] 7 | then 8 | cd js-ipfs 9 | git branch | grep \* | cut -d ' ' -f2 10 | else 11 | echo "no commit" 12 | fi 13 | -------------------------------------------------------------------------------- /infrastructure/inventory/group_vars/controllers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # user account used to ssh the hosts 3 | remote_user: ubuntu 4 | remote_folder: /data/ipfs-benchmarks/ 5 | ansible_ssh_private_key_file: "~/.ssh/id_rsa_ipfs" 6 | nodejs_version: "10.x" 7 | -------------------------------------------------------------------------------- /tests/constants/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | export const smallFile = 'smallfile' 4 | export const largeFile = 'largefile' 5 | 6 | // sub tests name 7 | 8 | export const emptyRepo = 'empty-repo' 9 | export const populatedRepo = 'populated-repo' 10 | -------------------------------------------------------------------------------- /scripts/runLocalEnv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODE=local 4 | SCRIPTPATH=$(dirname "$0") 5 | source $SCRIPTPATH/common.sh 6 | checkParam $1 7 | 8 | 9 | mkdir -p $SCRIPTPATH/../data 10 | docker-compose -f $SCRIPTPATH/../infrastructure/deploy/docker-compose.yaml $OP -------------------------------------------------------------------------------- /tests/browser/src/components/localState.js: -------------------------------------------------------------------------------- 1 | const localState = { 2 | id: null, 3 | version: null, 4 | protocol_version: null, 5 | added_file_hash: null, 6 | added_file_contents: null, 7 | time_s: null, 8 | time_ms: null, 9 | ready: '' 10 | } 11 | 12 | export default localState 13 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | ## Convenience scripts 2 | 3 | These scripts have built in help. 4 | 5 | 1. `sudo ./runLocalEnv.sh up` run InfluxDB and Grafana locally 6 | 2. `sudo ./runProdEnv.sh up` run the above plus the containerized `runner`, an nginx proxy with let's encrypt [certbot](https://github.com/certbot/certbot). 7 | 8 | -------------------------------------------------------------------------------- /infrastructure/deploy/nginx/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # get Host IP so we can route traffic for the runner to it 4 | hostip=$(ip route show | awk '/default/ {print $3}') 5 | echo "Host ip = $hostip" 6 | # add host to /etc/hosts as "runner" so nginx can send traffic to it. 7 | echo "$hostip runner" >> /etc/hosts 8 | cat /etc/hosts 9 | 10 | exec nginx -g 'daemon off;' -------------------------------------------------------------------------------- /Dockerfile-runner: -------------------------------------------------------------------------------- 1 | FROM nearform/alpine3-s2i-nodejs:10 2 | 3 | # Create app directory 4 | WORKDIR /opt/app-root/src 5 | 6 | COPY runner runner 7 | USER root 8 | RUN cd runner && npm install 9 | USER 1001 10 | COPY infrastructure/inventory/inventory.yaml infrastructure/inventory/inventory.yaml 11 | 12 | EXPOSE 3000 13 | 14 | WORKDIR runner 15 | CMD [ "npm", "run", "start" ] -------------------------------------------------------------------------------- /runner/lib/schema/restart.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const restartResponse = { 4 | $id: 'restartResponse', 5 | 200: { 6 | description: 'Succesful response', 7 | type: 'object', 8 | properties: { 9 | id: { type: 'integer' }, 10 | restart: { type: 'boolean' } 11 | } 12 | } 13 | } 14 | 15 | module.exports = { 16 | restartResponse 17 | } 18 | -------------------------------------------------------------------------------- /infrastructure/inventory/group_vars/minions.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # user account used to ssh the hosts 3 | remote_user: ubuntu 4 | # ansible_ssh_private_key_file: "~/.ssh/id_rsa_ipfs" 5 | # non-sudo user to be created for running process 6 | # action_user: grafana 7 | # action_user_group: docker 8 | nodejs_version: "10" 9 | # docker__users: ["ubuntu", "runner"] 10 | go_ipfs_version: v0.4.10 -------------------------------------------------------------------------------- /tests/lib/clean.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const rimraf = require('rimraf') 4 | const { repoPath } = require('../package.json').config 5 | 6 | const peerRepos = () => { 7 | rimraf(repoPath, function () { 8 | console.log(`Removed ${repoPath}`) 9 | }) 10 | } 11 | 12 | const all = () => { 13 | peerRepos() 14 | } 15 | 16 | module.exports = { 17 | peerRepos, 18 | all 19 | } 20 | -------------------------------------------------------------------------------- /tests/browser/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import ReactDOM from 'react-dom' 3 | import './index.css' 4 | import 'tachyons' 5 | import 'ipfs-css' 6 | import App from './App' 7 | import { HashRouter, Route, Switch } from 'react-router-dom' 8 | 9 | ReactDOM.render( , document.getElementById('root')) 10 | -------------------------------------------------------------------------------- /runner/lib/schema/header.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const config = require('../../config') 4 | 5 | const headers = { 6 | $id: 'protect', 7 | type: 'object', 8 | properties: { 9 | 'x-ipfs-benchmarks-api-key': { 10 | type: 'string', 11 | const: config.server.apikey 12 | } 13 | }, 14 | required: ['x-ipfs-benchmarks-api-key'] 15 | } 16 | 17 | module.exports = { 18 | headers 19 | } 20 | -------------------------------------------------------------------------------- /tests/benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #! Add 100 1 KB files to local 4 | node add-multi-kb 5 | #! Initialize node without pre-generated key 6 | node init-node 7 | #! Add file to local repo using unixFS engine 8 | node local-add 9 | #! Get file to local repo 10 | node local-extract 11 | #! Transfer file between two local nodes 12 | node local-transfer 13 | #! transfer files from 4 nodes 14 | node multi-peer-transfer 15 | -------------------------------------------------------------------------------- /tests/browser/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from 'react' 2 | import './App.css' 3 | import 'react-table/react-table.css' 4 | import Table from './components/table' 5 | class App extends Component { 6 | render () { 7 | return
8 |

9 | IPFS Browser Benchmark 10 |

11 |
12 | 13 | } 14 | } 15 | export default App 16 | -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/redeploy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages based on package.json. 3 | become: no 4 | npm: 5 | path: "{{ item.path }}" 6 | with_items: 7 | - { path: "{{remote_folder}}/runner" } 8 | - { path: "{{remote_folder}}/tests" } 9 | - name: Ensure Unit file 10 | template: 11 | src: runner.service.j2 12 | dest: /lib/systemd/system/runner.service 13 | mode: 644 14 | notify: 15 | - reload systemctl -------------------------------------------------------------------------------- /tests/browser/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | padding: 0; 4 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", 5 | "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", 6 | sans-serif; 7 | -webkit-font-smoothing: antialiased; 8 | -moz-osx-font-smoothing: grayscale; 9 | } 10 | 11 | code { 12 | font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", 13 | monospace; 14 | } 15 | -------------------------------------------------------------------------------- /tests/browser/src/components/getId.js: -------------------------------------------------------------------------------- 1 | 2 | var util = require('util') 3 | require('util.promisify').shim() 4 | const getId = async (node, delta, state) => { 5 | const id = util.promisify(node.id) 6 | const res = await id() 7 | const results = { ...state } 8 | results.id = res.id 9 | results.version = res.agentVersion 10 | results.protocol_version = res.protocolVersion 11 | results.time_s = delta[0] 12 | results.time_ms = delta[1] 13 | results.ready = 'ready' 14 | return results 15 | } 16 | 17 | export default getId 18 | -------------------------------------------------------------------------------- /tests/browser/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | React App 9 | 10 | 11 | 14 |
15 | 16 | 17 | -------------------------------------------------------------------------------- /runner/cli.js: -------------------------------------------------------------------------------- 1 | 'user strict' 2 | 3 | const argv = require('yargs').argv 4 | const get = require('simple-get') 5 | const config = require('./config') 6 | 7 | const opts = { 8 | url: `http://localhost:${config.server.port}/`, 9 | body: { 10 | commit: argv.commit || '', 11 | clinic: argv.clinic || 'true' 12 | }, 13 | json: true, 14 | headers: { 15 | 'x-ipfs-benchmarks-api-key': config.server.apikey 16 | } 17 | } 18 | get.post(opts, function (err, res) { 19 | if (err) throw err 20 | res.pipe(process.stdout) // `res` is a stream 21 | }) 22 | -------------------------------------------------------------------------------- /tests/util/get-commit.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const util = require('util') 4 | const execute = util.promisify(util.promisify(require('child_process').exec)) 5 | 6 | const getIpfsCommit = async () => { 7 | const out = await execute(`${__dirname}/getCommit.sh ${__dirname}/../../`) 8 | return out.stdout.replace(/\n$/, '') 9 | } 10 | const getBranchName = async () => { 11 | const out = await execute(`${__dirname}/getBranch.sh ${__dirname}/../../`) 12 | return out.stdout.replace(/\n$/, '') 13 | } 14 | module.exports = { 15 | getIpfsCommit, 16 | getBranchName } 17 | -------------------------------------------------------------------------------- /infrastructure/ci/dockerfile: -------------------------------------------------------------------------------- 1 | FROM circleci/node:dubnium-browsers 2 | 3 | USER root 4 | RUN apt-get update \ 5 | && apt-get -y install --no-install-recommends \ 6 | python-all-dev python-pip rsync \ 7 | python-yaml python-jinja2 python-httplib2 python-paramiko python-pkg-resources python-keyczar \ 8 | && apt-get clean \ 9 | && rm -rf /var/cache/apt/archives/* /var/lib/apt/lists/* 10 | 11 | RUN pip install ansible && \ 12 | pip install passlib && \ 13 | pip install docker-compose 14 | 15 | USER circleci 16 | 17 | RUN ansible-galaxy install nickjj.docker && \ 18 | ansible-galaxy install geerlingguy.nodejs \ -------------------------------------------------------------------------------- /tests/browser/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | } 4 | 5 | .App-logo { 6 | animation: App-logo-spin infinite 20s linear; 7 | height: 40vmin; 8 | } 9 | 10 | .App-header { 11 | background-color: #282c34; 12 | min-height: 100vh; 13 | display: flex; 14 | flex-direction: column; 15 | align-items: center; 16 | justify-content: center; 17 | font-size: calc(10px + 2vmin); 18 | color: white; 19 | } 20 | 21 | .App-link { 22 | color: #61dafb; 23 | } 24 | 25 | @keyframes App-logo-spin { 26 | from { 27 | transform: rotate(0deg); 28 | } 29 | to { 30 | transform: rotate(360deg); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/logging/templates/filebeat.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | filebeat.inputs: 3 | - type: log 4 | enabled: true 5 | paths: 6 | - /var/log/runner.log 7 | - /var/log/runner.error.log 8 | filebeat.config.modules: 9 | path: ${path.config}/modules.d/*.yml 10 | reload.enabled: false 11 | setup.template.settings: 12 | index.number_of_shards: 3 13 | setup.kibana: 14 | output.logstash: 15 | hosts: ["logstash.locotorp.info:5045"] 16 | ssl.certificate_authorities: ["/etc/filebeat/ca.crt"] 17 | processors: 18 | - add_host_metadata: ~ 19 | - add_cloud_metadata: ~ 20 | - decode_json_fields: 21 | fields: ["message"] 22 | target: "" -------------------------------------------------------------------------------- /runner/lib/schema/benchmark.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const clinicOperation = { 4 | type: 'array', 5 | items: { 6 | type: 'object', 7 | properties: { 8 | fileSet: { type: 'string' } 9 | } 10 | } 11 | } 12 | const benchmarkResponse = { 13 | $id: 'benchmarkResponse', 14 | 200: { 15 | description: 'Succesful response', 16 | type: 'array', 17 | items: { 18 | type: 'object', 19 | properties: { 20 | name: { type: 'string' }, 21 | doctor: clinicOperation, 22 | flame: clinicOperation, 23 | bubbleProf: clinicOperation 24 | } 25 | } 26 | } 27 | } 28 | 29 | module.exports = { 30 | benchmarkResponse 31 | } 32 | -------------------------------------------------------------------------------- /tests/getIpfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IPFS_PATH=$1 4 | COMMIT=$2 5 | 6 | cd ${IPFS_PATH} 7 | if [ ! -d js-ipfs/.git ] 8 | then 9 | echo "> No git repo for js-ipfs, cloning..." 10 | git clone https://github.com/ipfs/js-ipfs.git 2>&1 11 | cd js-ipfs 12 | else 13 | echo "> Git repo for js-ipfs found, updating..." 14 | cd js-ipfs 15 | git checkout master 2>&1 16 | git pull 2>&1 17 | fi 18 | 19 | if [ -z "$COMMIT" ] 20 | then 21 | echo "> using MASTER" 22 | else 23 | echo "> using commit: $COMMIT" 24 | git config --global advice.detachedHead false 25 | git checkout $COMMIT 2>&1 26 | fi 27 | echo "run npm install for js-ipfs" 28 | source ~/.nvm/nvm.sh 29 | npm install --loglevel=error -------------------------------------------------------------------------------- /tests/util/create-files.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { generateFiles, verifyTestFiles } = require('../lib/fixtures') 4 | 5 | /** 6 | * This utlilty will verify or create files needed for the tests. 7 | * The config is at ../lib/fixtures.file 8 | * 9 | * @async 10 | * @function verifyAndCreateFiles 11 | */ 12 | const verifyAndCreateFiles = async () => { 13 | const valid = await verifyTestFiles() 14 | if (!valid) { 15 | console.log('Some files missing. Generating files') 16 | await generateFiles() 17 | } else { 18 | console.log('Files Verified') 19 | } 20 | } 21 | if (require.main === module) { 22 | verifyAndCreateFiles() 23 | } 24 | module.exports = verifyAndCreateFiles 25 | -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/redeploy/templates/runner.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IPFS benchmark runner 3 | Requires=network.target 4 | After=network.target 5 | [Service] 6 | ExecStart=/bin/bash -c "IPFS_PASSWORD=$(cat /etc/pwd.txt) /usr/bin/node {{ remote_folder }}/runner/index.js" 7 | User={{ remote_user }} 8 | Restart=always 9 | Environment="STAGE=remote" 10 | Environment="BENCHMARK_USER={{ remote_user }}" 11 | Environment="LOGLEVEL=debug" 12 | Environment="CLINIC=true" 13 | Environment="HOSTNAME=benchmarks.ipfs.team" 14 | Environment="DATADIR=/data/tmp" 15 | EnvironmentFile=-/etc/environment 16 | StandardOutput=file:/var/log/runner.log 17 | StandardError=file:/var/log/runner.error.log 18 | [Install] 19 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/logging/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add the filebeat apt key 3 | shell: | 4 | wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - 5 | - name: Add filebeat apt repository 6 | become: yes 7 | apt_repository: 8 | repo: deb https://artifacts.elastic.co/packages/6.x/apt stable main 9 | state: present 10 | - name: Update repositories cache and install filebeat 11 | become: yes 12 | apt: 13 | name: 14 | - filebeat 15 | update_cache: true 16 | state: present 17 | - name: Ensure filebeat config file 18 | become: yes 19 | template: 20 | src: filebeat.yml.j2 21 | dest: /etc/filebeat/filebeat.yml 22 | mode: "u=rwx,g=r,o=r" 23 | notify: 24 | - reload filebeat -------------------------------------------------------------------------------- /tests/browser/README.md: -------------------------------------------------------------------------------- 1 | ## Browser Benchmarks 2 | 3 | This is a single page app that containes benchamrks for running IPFS in the browser. 4 | The idea is that each test will have a button to active the test and give feedback on the time it takes. 5 | 6 | This app is integrated with the benchmark running where the results are feed to the IPFS dashbaord. 7 | 8 | 9 | ### `npm run build` 10 | 11 | Builds the app for production to the `build` folder.
12 | It correctly bundles React in production mode and optimizes the build for the best performance. 13 | 14 | The build is minified and the filenames include the hashes.
15 | Your app is ready to be deployed! 16 | 17 | The build/index.html is avaiable via web browser and does not need a web server to view the test table. 18 | 19 | 20 | -------------------------------------------------------------------------------- /scripts/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage info 4 | usage() { 5 | FILESTRING="/infrastructure/deploy/docker-compose.yaml" 6 | if [[ $0 == *"runProdEnv"* ]];then 7 | FILESTRING+=" plus overrides from /infrastructure/deploy/docker-compose.prod.yaml" 8 | fi 9 | cat << EOF 10 | 11 | Usage: ${0##*/} [COMMAND] 12 | 13 | Run a docker-compose COMMAND for local puposes using the compose file 14 | $FILESTRING 15 | COMMAND any of the supported COMMANDs from https://docs.docker.com/compose/reference/overview/ 16 | 17 | EOF 18 | } 19 | 20 | checkParam () { 21 | if [ -z "$1" ]; then 22 | usage 23 | exit 1 24 | else 25 | OP=$1 26 | fi 27 | } 28 | 29 | FILES="-f $SCRIPTPATH/../infrastructure/deploy/docker-compose.yaml -f $SCRIPTPATH/../infrastructure/deploy/docker-compose.prod.yaml" 30 | -------------------------------------------------------------------------------- /tests/upload-ipfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OUTPATH=$1 4 | TESTNAME=$2 5 | 6 | # Usage info 7 | usage() { 8 | cat << EOF 9 | 10 | Usage: ${0##*/} 11 | 12 | Upload a directory to ipfs and store the SHA in a json file at /.json 13 | 14 | outputPath something like '/tmp/out' 15 | TestName something like 'localAdd' 16 | 17 | EOF 18 | } 19 | 20 | if [ -z "$TESTNAME" ] || [ -z "$OUTPATH" ] 21 | then 22 | usage 23 | exit 1 24 | fi 25 | 26 | # doesn't work yet 27 | # ipfs-cluster-ctl --host /dnsaddr/cluster.ipfs.io --basic-auth $IPFSUSER:$IPFSPWD add /tmp/out/$TESTNAME 28 | SHA="c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" 29 | mkdir -p $OUTPATH 30 | # echo some sha for now 31 | echo "Writing IPFS sha to $OUTPATH/$TESTNAME.json" 32 | echo "{ \"sha\": \"$SHA\" }" > $OUTPATH/$TESTNAME.json -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/node_modules 2 | tests/browser/node_modules 3 | runner/node_modules 4 | runner/test/db 5 | local/data 6 | tests/dirHash.txt 7 | tests/out 8 | tests/data 9 | tests/package-lock.json 10 | tests/browser/package-lock.json 11 | js-ipfs 12 | infrastructure/inventory/inventory.local.yaml 13 | data 14 | infrastructure/playbooks/*.retry 15 | *clinic* 16 | tests/node_trace.*.log 17 | 18 | # browser dependencies 19 | tests/browser/node_modules 20 | tests/browser/.pnp 21 | tests/browser/.pnp.js 22 | 23 | # testing 24 | tests/browser/coverage 25 | 26 | # production 27 | tests/browser/build 28 | 29 | # misc 30 | tests/browser/.DS_Store 31 | tests/browser/.env.local 32 | tests/browser/.env.development.local 33 | tests/browser/.env.test.local 34 | tests/browser/.env.production.local 35 | 36 | tests/browser/npm-debug.log* 37 | tests/browser/yarn-debug.log* 38 | tests/browser/yarn-error.log* 39 | -------------------------------------------------------------------------------- /tests/test.template: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const { build } = require('./schema/results') 5 | const { file } = require('./lib/fixtures') 6 | const run = require('./lib/runner') 7 | 8 | async function ****TEST NAME***** (node, name, warmup, fileSet, version) { 9 | 10 | 11 | *** SETUP **** 12 | const filePath = await file(fileSet) 13 | const start = process.hrtime() 14 | 15 | **** ADD ACTION to be tested **** 16 | 17 | const end = process.hrtime(start) 18 | 19 | // Pass in test output to build and return 20 | 21 | return build({ 22 | name: name, 23 | warmup: warmup, 24 | file: filePath, 25 | meta: { version: version }, 26 | description: 'Get file to local repo', 27 | file_set: fileSet, 28 | duration: { 29 | s: end[0], 30 | ms: end[1] / 1000000 31 | } 32 | subtest: subtest, 33 | }) 34 | } 35 | 36 | run(****TEST NAME***** ) 37 | -------------------------------------------------------------------------------- /runner/retrieve.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Rsync = require('rsync') 4 | const fs = require('fs') 5 | 6 | module.exports = (config, run, targetDir) => { 7 | const targetPath = `${targetDir}/${run.benchmarkName}/${run.operation}/${run.fileSet}` 8 | fs.mkdirSync(targetPath, { recursive: true }) 9 | var rsync = new Rsync() 10 | .flags('avz') 11 | .shell(`ssh -i ${config.benchmarks.key}`) 12 | .source(`${config.benchmarks.user}@${config.benchmarks.host}:${config.outFolder}/${run.benchmarkName}/`) 13 | .destination(targetPath) 14 | config.log.info(`Retrieving the clinic files with [${rsync.command()}]`) 15 | return new Promise((resolve, reject) => { 16 | rsync.execute(function (error, code, cmd) { 17 | if (error) { 18 | reject(error) 19 | } else { 20 | config.log.info(code) 21 | config.log.info(cmd) 22 | resolve(targetPath) 23 | } 24 | }) 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /infrastructure/deploy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | services: 3 | influxdb: 4 | image: influxdb:1.7-alpine 5 | volumes: 6 | - type: volume 7 | source: grafana 8 | target: /var/lib/influxdb 9 | volume: 10 | nocopy: true 11 | ports: 12 | - "8086:8086" 13 | environment: 14 | INFLUXDB_DB: benchmarks 15 | grafana: 16 | image: grafana/grafana:latest 17 | ports: 18 | - "3000:3000" 19 | volumes: 20 | - type: volume 21 | source: grafana 22 | target: /var/lib/grafana 23 | volume: 24 | nocopy: true 25 | depends_on: 26 | - influxdb 27 | links: 28 | - influxdb 29 | environment: 30 | GF_SERVER_ROOT_URL: http://localhost 31 | GF_AUTH_ANONYMOUS_ENABLED: 'true' 32 | GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer 33 | rendezvous: 34 | image: libp2p/websocket-star-rendezvous:release 35 | ports: 36 | - "9090:9090" 37 | environment: 38 | DISABLE_METRICS: 1 39 | volumes: 40 | influx: 41 | grafana: -------------------------------------------------------------------------------- /tests/init-node.http.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const NodeFactory = require('./lib/node-factory') 6 | 7 | async function initializeNodeHttp (node, name, warmup, fileSet, version) { 8 | const start = process.hrtime() 9 | const nodeFactory = new NodeFactory() 10 | await nodeFactory.add('http', { 11 | 'Addresses': { 12 | 'API': '/ip4/127.0.0.1/tcp/6012', 13 | 'Gateway': '/ip4/127.0.0.1/tcp/9191', 14 | 'Swarm': [ 15 | '/ip4/0.0.0.0/tcp/7012', 16 | '/ip4/127.0.0.1/tcp/3022/ws' 17 | ] 18 | }, 19 | 'Bootstrap': [] 20 | }, { 'empty-repo': true }) 21 | const end = process.hrtime(start) 22 | await nodeFactory.stop('http') 23 | return build({ 24 | name: name, 25 | wamrup: warmup, 26 | file: '', 27 | meta: { version: version }, 28 | description: 'Initialize node without pre-generated key', 29 | file_set: '', 30 | duration: { s: end[0], 31 | ms: end[1] / 1000000 } 32 | }) 33 | } 34 | 35 | run(initializeNodeHttp) 36 | -------------------------------------------------------------------------------- /tests/unit/output.test.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const t = require('tap') 4 | const { createFilename, write } = require('../lib/output') 5 | const { resultsDTO, build, validate } = require('../schema/results') 6 | const test = t.test 7 | 8 | test('validate - results ', t => { 9 | t.plan(2) 10 | const e = validate(resultsDTO) 11 | t.equal(e, true) 12 | t.pass() 13 | }) 14 | 15 | test('validate - invalid results ', t => { 16 | t.plan(2) 17 | const valid = validate({ name: 0 }) 18 | t.equal(valid, false) 19 | t.pass() 20 | }) 21 | 22 | test('validate - create filename ', t => { 23 | t.plan(1) 24 | createFilename('out', resultsDTO) 25 | t.pass() 26 | }) 27 | 28 | test('validate - create error dir filename ', t => { 29 | t.plan(1) 30 | createFilename('out/error', { invalidefile: 'yes' }) 31 | t.pass() 32 | }) 33 | 34 | test('validate - write flename ', t => { 35 | t.plan(1) 36 | write(resultsDTO) 37 | t.pass() 38 | }) 39 | 40 | test('validate - write file to error directory ', t => { 41 | t.plan(1) 42 | write(build({ name: 0 })) 43 | t.pass() 44 | }) 45 | -------------------------------------------------------------------------------- /tests/unit/fixtures.test.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const t = require('tap') 4 | const { file, isDirectory } = require('../lib/fixtures') 5 | const test = t.test 6 | 7 | test('Files - file exists ', async t => { 8 | t.plan(2) 9 | const filepath = await file('one4mbfile') 10 | t.equal(filepath.includes('one4mbfile.txt'), true) 11 | t.pass() 12 | }) 13 | test('Files - file does not exist ', async t => { 14 | t.plan(2) 15 | const filepath = await file('NoOneKBFile') 16 | t.equal(typeof filepath === 'undefined', true) 17 | t.pass() 18 | }) 19 | test('Is not a Directory ', async t => { 20 | t.plan(2) 21 | const results = await isDirectory('OneKBFile') 22 | 23 | t.assert(!results) 24 | t.pass() 25 | }) 26 | 27 | test('Is a Directory ', async t => { 28 | t.plan(2) 29 | const results = await isDirectory('hundred1kbfile') 30 | 31 | t.assert(results) 32 | t.pass() 33 | }) 34 | test('File set ', async t => { 35 | t.plan(2) 36 | const results = await file('hundred1kbfile') 37 | console.log(results.length) 38 | t.assert(results.length === 100) 39 | t.pass() 40 | }) 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) Protocol Labs, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /runner/lib/schema/get.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const getResponse = { 4 | $id: 'getResponse', 5 | 200: { 6 | description: 'Succesful response', 7 | type: 'array', 8 | items: { 9 | type: 'object', 10 | properties: { 11 | jobId: { type: 'integer' }, 12 | work: { 13 | type: 'object', 14 | properties: { 15 | commit: { type: 'string' }, 16 | clinic: { 17 | type: 'object', 18 | properties: { 19 | enabled: { type: 'boolean' } 20 | } 21 | }, 22 | benchmarks: { 23 | type: 'object', 24 | properties: { 25 | tests: { 26 | type: 'array', 27 | items: { type: 'string' } 28 | } 29 | } 30 | }, 31 | remote: { type: 'boolean' }, 32 | nightly: { type: 'boolean' } 33 | } 34 | }, 35 | status: { type: 'string' }, 36 | queued: { type: 'string' }, 37 | started: { type: 'string' } 38 | } 39 | } 40 | } 41 | } 42 | 43 | module.exports = { 44 | getResponse 45 | } 46 | -------------------------------------------------------------------------------- /tests/browser/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "browser", 3 | "version": "0.1.0", 4 | "private": true, 5 | "homepage": "./", 6 | "config": { 7 | "repoPath": "./data/ipfs/" 8 | }, 9 | "dependencies": { 10 | "browser-process-hrtime": "^1.0.0", 11 | "filereader-stream": "^2.0.0", 12 | "ipfs": "file:../../js-ipfs", 13 | "ipfs-css": "^0.12.0", 14 | "libp2p": "^0.24.4", 15 | "libp2p-websockets": "^0.12.2", 16 | "react": "^16.6.3", 17 | "react-dom": "^16.6.3", 18 | "react-router-dom": "^4.3.1", 19 | "react-scripts": "2.1.1", 20 | "react-table": "^6.8.6", 21 | "stream-iterators-utils": "^0.1.0", 22 | "tachyons": "^4.11.1", 23 | "util": "^0.11.1", 24 | "util.promisify": "^1.0.0", 25 | "webcrypto-shim": "^0.1.4" 26 | }, 27 | "scripts": { 28 | "start": "REACT_APP_REMOTE=true react-scripts start", 29 | "build": "REACT_APP_REMOTE=true react-scripts build", 30 | "startDev": "react-scripts start", 31 | "buildDev": "react-scripts build", 32 | "test": "react-scripts test", 33 | "eject": "react-scripts eject" 34 | }, 35 | "browserslist": [ 36 | ">0.2%", 37 | "not dead", 38 | "not ie <= 11", 39 | "not op_mini all" 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/common/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create remote_folder 3 | become: yes 4 | file: 5 | path: "{{ remote_folder }}" 6 | state: directory 7 | mode: 0755 8 | owner: ubuntu 9 | group: docker 10 | recurse: yes 11 | - name: create influxdb and grafana data dirs 12 | become: yes 13 | file: 14 | path: "{{ item.path }}" 15 | owner: "{{ item.uid | default('ubuntu') }}" 16 | group: "{{ item.guid | default('docker') }}" 17 | state: directory 18 | recurse: yes 19 | mode: 0755 20 | with_items: 21 | - { path: '/data/influxdb' } 22 | - { path: '/data/grafana', uid: 472, guid: docker } 23 | - { path: '/data/ipfs-db' } 24 | - { path: '/data/tmp' } 25 | - name: install ansible 26 | pip: 27 | name: ansible 28 | - name: install ansible modules 29 | become: no 30 | shell: ansible-galaxy install geerlingguy.nodejs 31 | - name: install ipfs-cluster-ctl 32 | shell: | 33 | wget https://dist.ipfs.io/ipfs-cluster-ctl/v0.7.0/ipfs-cluster-ctl_v0.7.0_linux-amd64.tar.gz 34 | tar zxvf ipfs-cluster-ctl_v0.7.0_linux-amd64.tar.gz 35 | cp ipfs-cluster-ctl/ipfs-cluster-ctl /usr/local/bin 36 | chmod +x /usr/local/bin/ipfs-cluster-ctl 37 | args: 38 | creates: /usr/local/bin/ipfs-cluster-ctl -------------------------------------------------------------------------------- /tests/browser/src/components/initialize-node.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import IPFS from 'ipfs' 3 | import hrtime from 'browser-process-hrtime' 4 | import uuidv1 from 'uuid/v1' 5 | import 'react-table/react-table.css' 6 | import { once } from 'stream-iterators-utils' 7 | import getId from './getId' 8 | import localState from './localState' 9 | import TestRow from './test-row' 10 | class InitializeNode extends React.Component { 11 | constructor (props) { 12 | super(props) 13 | this.state = localState 14 | } 15 | 16 | async test (e) { 17 | // Create the IPFS node instance 18 | const start = hrtime() 19 | const node = new IPFS({ repo: String(uuidv1()) }) 20 | node.on('ready', () => { 21 | }) 22 | await once(node, 'ready') 23 | const delta = hrtime(start) 24 | const results = await getId(node, delta, this.state) 25 | this.setState(results) 26 | } 27 | render () { 28 | const name = 'initializeNode' 29 | const description = 'Initialize an IPFS node' 30 | return ( 31 | 38 | ) 39 | } 40 | } 41 | export default InitializeNode 42 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## repository organization 2 | ``` 3 | ├── docs 4 | ├── infrastructure 5 | │   ├── grafana 6 | │   ├── inventory 7 | │   ├── local 8 | │   └── playbooks 9 | ├── runner 10 | ├── scripts 11 | └── tests 12 | ``` 13 | 14 | ### docs 15 | Documentation and images for usage of this repository 16 | 17 | ### infrastructure 18 | * _grafana_: Any [Grafana](https://grafana.com/) configurations for dashboards and such. 19 | * _inventory_: Defines the hosts this project is targeting for production 20 | * _local_: Any infrastructure parts needed to run this in a local system. Requires `docker-compose` to be installed. It will run containers for [Grafana](https://grafana.com/) and [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/). See below for more details. 21 | * _playbooks_: [Ansible](https://www.ansible.com/) playbooks run by the runner to provision the target benchmark system. 22 | 23 | ### runner 24 | Houses the `runner` code and configuration. `npm install` should be run in this folder. 25 | Use `npm run tests` to execute tests (TBD) 26 | 27 | ### scripts 28 | Various convenience scripts to make running things a bit easier 29 | 30 | ### tests 31 | Houses the code for the benchmarks. `npm install` should be run in this folder. 32 | Use `npm run tests` to execute tests -------------------------------------------------------------------------------- /tests/browser/src/components/table.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import AddLocalFile from './add-local-file' 3 | import InitializeNode from './initialize-node' 4 | import PeerTransfer from './peer-transfer' 5 | import './table.css' 6 | export default class Table extends React.Component { 7 | render () { 8 | return ( 9 |
10 |
11 |
12 |
Start
Description
About Node
Results
13 |
14 |
15 | 16 | 17 | 18 |
19 |
20 |
21 | ) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /runner/local.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { spawn } = require('child_process') 4 | 5 | const config = require('./config') 6 | 7 | const run = (shell, name) => { 8 | config.log.info(`Running [${shell}] locally`) 9 | let args = shell.split(' ') 10 | let cmd = args[0] 11 | args.shift() 12 | 13 | return new Promise((resolve, reject) => { 14 | if (!shell) return reject(Error('shell command required')) 15 | let cmdInstance = spawn(cmd, args) 16 | let stdOut = '' 17 | let stdErr = '' 18 | const commandLogger = config.log.child({command: cmd}) 19 | cmdInstance.stdout.on('data', (data) => { 20 | commandLogger.debug(data.toString()) 21 | stdOut += data 22 | }) 23 | cmdInstance.stderr.on('data', (data) => { 24 | commandLogger.error(data.toString()) 25 | stdErr += data 26 | }) 27 | cmdInstance.on('close', (code) => { 28 | commandLogger.debug(`-- main command end ${code} --`) 29 | if (code === 0) { 30 | resolve(stdOut) 31 | } else { 32 | commandLogger.error('error', code) 33 | commandLogger.error(stdErr) 34 | reject(new Error(stdErr)) 35 | } 36 | }) 37 | cmdInstance.on('error', (err) => { 38 | commandLogger.error(err, 'Local command error') 39 | }) 40 | }) 41 | } 42 | 43 | module.exports = { 44 | run: run 45 | } 46 | -------------------------------------------------------------------------------- /tests/browser/src/components/test-row.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | 3 | export default class TestRow extends React.Component { 4 | render () { 5 | return (
6 |
7 | {this.props.type === 'button' ? 8 | : this.props.test(e)} />} 9 |
{this.props.description}
10 |

ID:{this.props.results.id}

11 |

IPFS version: {this.props.results.version}

12 |

IPFS protocol version:{this.props.results.protocol_version}

13 |
14 |
secs:{this.props.results.time_s}
15 |
milli:{this.props.results.time_ms}
16 |
) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /runner/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "runner", 3 | "version": "0.1.0", 4 | "description": "runner for benchmark tests", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "tap test/*.js", 8 | "lint": "standard --verbose", 9 | "start": "node index.js" 10 | }, 11 | "contributors": [ 12 | "Matteo Collina ", 13 | "Alex Knol ", 14 | "Ron Litzenberegr " 15 | ], 16 | "license": "MIT", 17 | "dependencies": { 18 | "ajv": "^6.5.5", 19 | "compressing": "^1.4.0", 20 | "fastify": "^1.13.0", 21 | "fastify-swagger": "^1.1.1", 22 | "folder-hash": "^2.1.2", 23 | "influx": "^5.0.7", 24 | "level-jobs": "^2.1.0", 25 | "leveldown": "^4.0.1", 26 | "levelup": "^4.0.0", 27 | "lodash": "^4.17.11", 28 | "make-promises-safe": "^3.0.0", 29 | "memdown": "^3.0.0", 30 | "moment": "^2.22.2", 31 | "nock-exec": "^0.1.0", 32 | "node-schedule": "^1.3.1", 33 | "pino": "^5.8.1", 34 | "pino-multi-stream": "^4.0.1", 35 | "pino-pretty": "^2.2.3", 36 | "rmfr": "^2.0.0", 37 | "rsync": "^0.6.1", 38 | "simple-get": "^3.0.3", 39 | "ssh-exec-plus": "^2.0.1", 40 | "yaml": "^1.0.2", 41 | "yargs": "^12.0.5" 42 | }, 43 | "devDependencies": { 44 | "standard": "^11.0.0", 45 | "tap": "^12.1.1" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /runner/ipfs.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const _ = require('lodash') 4 | const config = require('./config') 5 | const local = require('./local') 6 | const fs = require('fs') 7 | const util = require('util') 8 | const fsStat = util.promisify(fs.stat) 9 | 10 | const store = async (localpath) => { 11 | // return new Promise(async (resolve, reject) => { 12 | if (config.ipfs.network.password) { 13 | try { 14 | await fsStat(localpath) 15 | const shellCommand = `ipfs-cluster-ctl --host ${config.ipfs.network.address} --enc json --basic-auth ${config.ipfs.network.user}:${config.ipfs.network.password} add -r ${localpath}` 16 | config.log.debug(shellCommand) 17 | return await local.run(shellCommand) 18 | } catch (e) { 19 | throw e 20 | } 21 | } else { 22 | throw Error('Env var IPFS_PASSWORD not set! Upload failed') 23 | } 24 | // }) 25 | } 26 | 27 | const parse = (outString, name) => { 28 | config.log.info(`Return sha for: ${name}`) 29 | name = String(name) 30 | let retVal = '' 31 | var parts = outString.match(/({\n.*\n.*\n.*\n})/g) 32 | config.log.debug(parts) 33 | let almostJson = parts.join(',') 34 | config.log.debug(almostJson) 35 | let arrOut = JSON.parse(`[${almostJson}]`) 36 | config.log.debug(arrOut) 37 | retVal = _.find(arrOut, { name: name }).cid 38 | return retVal 39 | } 40 | 41 | module.exports = { 42 | store, 43 | parse 44 | } 45 | -------------------------------------------------------------------------------- /runner/compress.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const util = require('util') 5 | const rmfr = require('rmfr') 6 | const stat = util.promisify(fs.stat) 7 | const readDir = util.promisify(fs.readdir) 8 | const compressing = require('compressing') 9 | const config = require('./config') 10 | 11 | const _tgzDir = async (source, target) => { 12 | if (source && target) { 13 | config.log.info(`Compressing [${source}] to [${target}]`) 14 | await compressing.tgz.compressDir(source, target) 15 | return { result: 'ok' } 16 | } else { 17 | config.log.error(`compress.tgz - Source [${source}] and Target [${target}] are required`) 18 | } 19 | } 20 | 21 | const clinicFiles = async (path) => { 22 | try { 23 | let contents = await readDir(path) 24 | // find the dir 25 | let clinicDir 26 | for (let node of contents) { 27 | let stats = await stat(`${path}/${node}`) 28 | if (stats.isDirectory()) { 29 | clinicDir = node 30 | break 31 | } 32 | } 33 | if (clinicDir) { 34 | await _tgzDir(`${path}/${clinicDir}`, `${path}/${clinicDir}.tar.gz`) 35 | await rmfr(`${path}/${clinicDir}`) 36 | } else { 37 | config.log.error(`No clinic directory found in ${path}`) 38 | } 39 | } catch (e) { 40 | config.log.error(e) 41 | throw e 42 | } 43 | } 44 | 45 | module.exports = { 46 | _tgzDir, 47 | clinicFiles 48 | } 49 | -------------------------------------------------------------------------------- /infrastructure/playbooks/roles/security/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Allow SSH in UFW 3 | ufw: 4 | rule: allow 5 | port: ssh 6 | proto: tcp 7 | comment: Allow ssh access 8 | 9 | - name: limit ssh brute force 10 | ufw: 11 | rule: limit 12 | port: ssh 13 | proto: tcp 14 | comment: Limit ssh access 15 | 16 | - name: Allow access to http port 80 and include a comment 17 | ufw: 18 | rule: allow 19 | proto: tcp 20 | port: http 21 | comment: Allow http access 22 | 23 | - name: Allow access to https port 443 and include a comment 24 | ufw: 25 | rule: allow 26 | proto: tcp 27 | port: https 28 | comment: Allow https access 29 | 30 | - name: Allow access to https port 9090 and include a comment 31 | ufw: 32 | rule: allow 33 | proto: tcp 34 | port: '9090' 35 | src: '147.75.33.155' 36 | comment: Allow rendezvous server access from benchmark machine 37 | 38 | - name: Create a network with options 39 | docker_network: 40 | name: my-bridge 41 | driver_options: 42 | com.docker.network.bridge.name: my-bridge 43 | 44 | - name: Allow access to http port 9000 on the host include a comment 45 | ufw: 46 | rule: allow 47 | proto: tcp 48 | port: '9000' 49 | direction: in 50 | interface: my-bridge 51 | comment: Allow https access 52 | 53 | - name: Set firewall default policy 54 | ufw: 55 | state: enabled 56 | policy: reject 57 | become: yes -------------------------------------------------------------------------------- /infrastructure/playbooks/controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: controllers 3 | remote_user: "{{remote_user}}" 4 | become_method: sudo 5 | serial: 1 6 | gather_facts: False 7 | pre_tasks: 8 | - name: Install python for Ansible 9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 10 | register: test 11 | become: yes 12 | changed_when: test.stdout 13 | - setup: 14 | tasks: 15 | - import_role: 16 | name: logging 17 | become: yes 18 | tags: ["prepare","logging"] 19 | - import_role: 20 | name: security 21 | become: yes 22 | tags: ["prepare","security"] 23 | - import_role: 24 | name: "nickjj.docker" 25 | tags: ["prepare","docker"] 26 | become: yes 27 | - import_role: 28 | name: geerlingguy.nodejs 29 | become: yes 30 | tags: ["prepare","node"] 31 | - import_role: 32 | name: common 33 | become: yes 34 | tags: ["prepare","common"] 35 | - name: copy code 36 | become: no 37 | synchronize: 38 | src: ../../../ 39 | dest: "{{ remote_folder }}" 40 | checksum: yes 41 | rsync_opts: 42 | - "--exclude=runner/node_modules" 43 | - "--exclude=tests/node_modules" 44 | - "--exclude=tests/browser/node_modules" 45 | - "--exclude=tests/package-lock.json" 46 | - "--exclude=tests/fixtures/*" 47 | - import_role: 48 | name: redeploy 49 | become: yes 50 | tags: ["redeploy"] 51 | -------------------------------------------------------------------------------- /tests/config/default-config-browser.json: -------------------------------------------------------------------------------- 1 | { 2 | "Identity": { 3 | "PeerID": "", 4 | "PrivKey": "" 5 | }, 6 | "Datastore": { 7 | "Type": "", 8 | "Path": "", 9 | "StorageMax": "", 10 | "StorageGCWatermark": 0, 11 | "GCPeriod": "", 12 | "Params": null, 13 | "NoSync": false 14 | }, 15 | "Addresses": { 16 | "Swarm": [ 17 | "/ip4/127.0.0.1/tcp/0" 18 | ], 19 | "API": "/ip4/127.0.0.1/tcp/0", 20 | "Gateway": "/ip4/127.0.0.1/tcp/0" 21 | }, 22 | "Mounts": { 23 | "IPFS": "/ipfs", 24 | "IPNS": "/ipns", 25 | "FuseAllowOther": false 26 | }, 27 | "Version": { 28 | "Current": "jsipfs-dev", 29 | "Check": "error", 30 | "CheckDate": "0001-01-01T00:00:00Z", 31 | "CheckPeriod": "172800000000000", 32 | "AutoUpdate": "minor" 33 | }, 34 | "Discovery": { 35 | "MDNS": { 36 | "Enabled": false, 37 | "Interval": 10 38 | }, 39 | "webRTCStar": {} 40 | }, 41 | "Ipns": { 42 | "RepublishPeriod": "", 43 | "RecordLifetime": "", 44 | "ResolveCacheSize": 128 45 | }, 46 | "Bootstrap": [], 47 | "Tour": { 48 | "Last": "" 49 | }, 50 | "Gateway": { 51 | "HTTPHeaders": null, 52 | "RootRedirect": "", 53 | "Writable": false 54 | }, 55 | "SupernodeRouting": { 56 | "Servers": [] 57 | }, 58 | "API": { 59 | "HTTPHeaders": null 60 | }, 61 | "Swarm": { 62 | "AddrFilters": null 63 | }, 64 | "Log": { 65 | "MaxSizeMB": 250, 66 | "MaxBackups": 1, 67 | "MaxAgeDays": 0 68 | } 69 | } -------------------------------------------------------------------------------- /tests/browser/src/components/add-local-file.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import IPFS from 'ipfs' 3 | import hrtime from 'browser-process-hrtime' 4 | import uuidv1 from 'uuid/v1' 5 | import 'react-table/react-table.css' 6 | import { once } from 'stream-iterators-utils' 7 | import fileReaderStream from 'filereader-stream' 8 | import getId from './getId' 9 | import localState from './localState' 10 | import TestRow from './test-row' 11 | class AddLocalFile extends React.Component { 12 | constructor (props) { 13 | super(props) 14 | this.state = localState 15 | } 16 | 17 | async test (e) { 18 | // Create the IPFS node instance 19 | const node = new IPFS({ repo: String(uuidv1()) }) 20 | const fileArray = [...e.target.files] 21 | node.on('ready', () => {}) 22 | await once(node, 'ready') 23 | const start = hrtime() 24 | for (let i = 0; i < fileArray.length; i++) { 25 | const readStream = fileReaderStream(fileArray[i]) 26 | node.add ? await node.add(readStream) : await node.files.add(readStream) 27 | } 28 | const delta = hrtime(start) 29 | const results = await getId(node, delta, this.state) 30 | this.setState(results) 31 | } 32 | render () { 33 | const name = 'addLocalFile' 34 | const description = 'Add local files' 35 | return ( 36 | 42 | ) 43 | } 44 | } 45 | export default AddLocalFile 46 | -------------------------------------------------------------------------------- /tests/init-node.browser.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | 6 | /** 7 | * Initialize an IPFS peer benchmark test in the browser. 8 | * js0 -> js0 - A local test from one JS IPFS node to the same node 9 | * @async 10 | * @function unixFsAddBrowser 11 | * @param {array} browser - An array of headless browsers that contain IPFS tests. 12 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 13 | * @param {boolean} warmup - Not implemented. 14 | * @param {string} fileSet - Describes file or list of files used for the test. 15 | * @param {string} version - Version of IPFS used in benchmark. 16 | * @return {Promise} The data from the benchamrk 17 | */ 18 | async function initializeNodeBrowser (node, name, warmup, fileSet, version) { 19 | const page = node[0].page 20 | await page.click('.initializeNode') 21 | const t = await page.waitFor('.initializeNode_s_ready') 22 | const element = await page.waitFor('.initializeNode_ms_ready') 23 | const timeS = await page.evaluate(t => t.textContent, t) 24 | const timeMs = await page.evaluate(element => element.textContent, element) 25 | return build({ 26 | name: name, 27 | warmup: 'off', 28 | file: '', 29 | meta: { version: version }, 30 | description: 'Node initialization (local) js0 -> js0', 31 | file_set: 'none', 32 | duration: { s: parseInt(timeS.split(':')[1]), 33 | ms: parseInt(timeMs.split(':')[1]) / 1000000 } 34 | }) 35 | } 36 | 37 | run(initializeNodeBrowser, 1, 'browser') 38 | -------------------------------------------------------------------------------- /infrastructure/deploy/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name benchmarks.ipfs.team; 4 | 5 | location / { 6 | return 301 https://$host$request_uri; 7 | } 8 | 9 | location /.well-known/acme-challenge/ { 10 | root /var/www/certbot; 11 | } 12 | } 13 | 14 | server { 15 | listen 443 ssl; 16 | listen [::]:443 ipv6only=on; 17 | server_name benchmarks.ipfs.team; 18 | 19 | ssl_certificate /etc/letsencrypt/live/benchmarks.ipfs.team/fullchain.pem; 20 | ssl_certificate_key /etc/letsencrypt/live/benchmarks.ipfs.team/privkey.pem; 21 | 22 | include /etc/letsencrypt/options-ssl-nginx.conf; 23 | ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; 24 | 25 | location /runner { 26 | proxy_http_version 1.1; 27 | proxy_set_header Upgrade $http_upgrade; 28 | proxy_set_header Connection 'upgrade'; 29 | proxy_set_header Host $host; 30 | keepalive_timeout 10m; 31 | proxy_connect_timeout 600s; 32 | proxy_send_timeout 600s; 33 | proxy_read_timeout 600s; 34 | proxy_cache_bypass $http_upgrade; 35 | proxy_set_header X-Real-IP $remote_addr; 36 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 37 | proxy_set_header Host $http_host; 38 | rewrite ^/runner/(.*) /$1 break; 39 | proxy_pass http://runner:9000/; 40 | } 41 | 42 | location / { 43 | proxy_pass http://graf:3000; 44 | proxy_set_header Host $host; 45 | proxy_set_header X-Real-IP $remote_addr; 46 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 47 | } 48 | 49 | } -------------------------------------------------------------------------------- /runner/test/parse.js: -------------------------------------------------------------------------------- 1 | 'use srict' 2 | 3 | const tap = require('tap') 4 | 5 | const outString = 6 | `{ 7 | "name": "localAdd/15967.clinic-doctor/15967.clinic-doctor-processstat", 8 | "cid": "QmV4SAi1Ynz3BcRb2KjSTGgDVMN1mRCy7GJkC8nvW3TkmP", 9 | "size": 6566 10 | } 11 | { 12 | "name": "localAdd/15967.clinic-doctor/15967.clinic-doctor-systeminfo", 13 | "cid": "QmePZR5RKWquWP4b642E2Ffu3MDTLEUzcdprq8GMfPuVUb", 14 | "size": 481 15 | } 16 | { 17 | "name": "localAdd/15967.clinic-doctor/15967.clinic-doctor-traceevent", 18 | "cid": "QmPhn82fiHVDtz3RysWK3zryge4bfNEjsr5Frh2gQSb692", 19 | "size": 2225617 20 | } 21 | { 22 | "name": "localAdd/15967.clinic-doctor.html", 23 | "cid": "QmUJpPekYvP5zrULE6u9oG1Uww7ZeD7wbpRX2Qe2TjTAo4", 24 | "size": 260252 25 | } 26 | { 27 | "name": "localAdd/15967.clinic-doctor", 28 | "cid": "QmNTfogTCu7v6f3XaghhcmvpiC3RqnLhs9AeBoKqDjTeyU", 29 | "size": 2232890 30 | } 31 | { 32 | "name": "localAdd", 33 | "cid": "QmWvMq2tkQJUqUs5pyBWQ2fCVR714pSN285fqd2rFwYti6", 34 | "size": 2493278 35 | }` 36 | 37 | const outString2 = 38 | `{ 39 | "name": "1546104181577", 40 | "cid": "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn", 41 | "size": 4 42 | }` 43 | 44 | // the test subject 45 | const ipfs = require('../ipfs.js') 46 | 47 | tap.test('multi object string', async (t) => { 48 | const sha = ipfs.parse(outString, 'localAdd') 49 | tap.equal(sha, 'QmWvMq2tkQJUqUs5pyBWQ2fCVR714pSN285fqd2rFwYti6') 50 | t.end() 51 | }) 52 | 53 | tap.test('single object string', async (t) => { 54 | const sha = ipfs.parse(outString2, '1546104181577') 55 | tap.equal(sha, 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn') 56 | t.end() 57 | }) 58 | -------------------------------------------------------------------------------- /tests/local-add.go.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const os = require('os') 4 | const util = require('util') 5 | const execute = util.promisify(util.promisify(require('child_process').exec)) 6 | const { file } = require('./lib/fixtures.js') 7 | const { build } = require('./schema/results') 8 | const run = require('./lib/runner') 9 | const conf = { tmpPath: os.tmpdir() } 10 | 11 | /** 12 | * Add file benchmark using IPFS api add using the go daemon. 13 | * go0 -> go0 - A local test from one JS IPFS node to the same nod 14 | * @async 15 | * @function unixFsAdd 16 | * @param {array} peerArray - An array of IPFS peers used during the test. 17 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 18 | * @param {boolean} warmup - Not implemented. 19 | * @param {string} fileSet - Describes file or list of files used for the test. 20 | * @param {string} version - Version of IPFS used in benchmark. 21 | * @return {Promise} The data from the benchamrk 22 | */ 23 | 24 | const unixFsAddGo = async (peerArray, name, warmup, fileSet, version) => { 25 | const filePath = await file(fileSet) 26 | const start = process.hrtime() 27 | let command = `export IPFS_PATH=${conf.tmpPath}/ipfs0 && ipfs add ${filePath} > /dev/null` 28 | await execute(command) 29 | const end = process.hrtime(start) 30 | return build({ 31 | name: name, 32 | warmup: warmup, 33 | file_set: fileSet, 34 | file: filePath, 35 | meta: { version: version, project: 'go-ipfs' }, 36 | description: 'Add files (balanced). go0 -> go0', 37 | duration: { 38 | s: end[0], 39 | ms: end[1] / 1000000 40 | } 41 | }, 'go') 42 | } 43 | 44 | run(unixFsAddGo, 1, 'go') 45 | -------------------------------------------------------------------------------- /tests/init-node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const NodeFactory = require('./lib/node-factory') 6 | 7 | /** 8 | * Initialize an IPFS peer benchmark. 9 | * js0 -> js0 - A local test from one JS IPFS node to the same node 10 | * @async 11 | * @function unixFsAddBrowser 12 | * @param {array} browser - An array of headless browsers that contain IPFS tests. 13 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 14 | * @param {boolean} warmup - Not implemented. 15 | * @param {string} fileSet - Describes file or list of files used for the test. 16 | * @param {string} version - Version of IPFS used in benchmark. 17 | * @return {Promise} The data from the benchamrk 18 | */ 19 | 20 | async function initializeNode (node, name, warmup, fileSet, version) { 21 | const start = process.hrtime() 22 | const nodeFactory = new NodeFactory() 23 | await nodeFactory.add('nodejs', { 24 | 'Addresses': { 25 | 'API': '/ip4/127.0.0.1/tcp/6012', 26 | 'Gateway': '/ip4/127.0.0.1/tcp/9191', 27 | 'Swarm': [ 28 | '/ip4/0.0.0.0/tcp/7012', 29 | '/ip4/127.0.0.1/tcp/3022/ws' 30 | ] 31 | }, 32 | 'Bootstrap': [] 33 | }, { 'empty-repo': true }) 34 | const end = process.hrtime(start) 35 | await nodeFactory.stop('nodejs') 36 | return build({ 37 | name: name, 38 | wamrup: warmup, 39 | file: 'none', 40 | meta: { version: version }, 41 | description: 'Node initialization (local) js0 -> js0', 42 | file_set: 'none', 43 | duration: { s: end[0], 44 | ms: end[1] / 1000000 } 45 | }) 46 | } 47 | 48 | run(initializeNode) 49 | -------------------------------------------------------------------------------- /tests/local-add.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { file } = require('./lib/fixtures') 4 | const run = require('./lib/runner') 5 | const { build } = require('./schema/results') 6 | const fs = require('fs') 7 | const { description, strategy } = require('./config').parseParams() 8 | 9 | /** 10 | * Add file benchmark using IPFS api add. 11 | * js0 -> js0 - A local test from one JS IPFS node to the same nod 12 | * @async 13 | * @function unixFsAdd 14 | * @param {array} peerArray - An array of IPFS peers used during the test. 15 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 16 | * @param {boolean} warmup - Not implemented. 17 | * @param {string} fileSet - Describes file or list of files used for the test. 18 | * @param {string} version - Version of IPFS used in benchmark. 19 | * @return {Promise} The data from the benchamrk 20 | */ 21 | async function unixFsAdd (peerArray, name, warmup, fileSet, version) { 22 | const filePath = await file(fileSet) 23 | const fileStream = fs.createReadStream(filePath) 24 | console.log(` Adding files using strategy ${strategy}`) 25 | const start = process.hrtime() 26 | const peer = peerArray[0] 27 | // output file and dashboard name will match strategy. default is balanced 28 | await peer.add(fileStream, { strategy: strategy }) 29 | const end = process.hrtime(start) 30 | return build({ 31 | name: name, 32 | warmup: warmup, 33 | file: filePath, 34 | meta: { version: version }, 35 | description: `Add file ${description} js0 -> js0`, 36 | file_set: fileSet, 37 | duration: { 38 | s: end[0], 39 | ms: end[1] / 1000000 40 | } 41 | }) 42 | } 43 | 44 | run(unixFsAdd) 45 | -------------------------------------------------------------------------------- /tests/lib/runner.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const NodeFactory = require('./node-factory') 3 | const config = require('../config') 4 | const clean = require('./clean') 5 | const { store } = require('./output') 6 | const genTests = require('../util/create-files') 7 | const { name } = require('../config').parseParams() 8 | async function runner (test, nodeCount = 2, type = 'nodejs', options) { 9 | if (!config.verify) { 10 | await genTests() 11 | } 12 | const arrResults = [] 13 | const nodeFactory = new NodeFactory() 14 | const node = [] 15 | for (let i = 0; i < nodeCount; i++) { 16 | try { 17 | node.push(await nodeFactory.add(type, options, i)) 18 | } catch (e) { 19 | console.log(e) 20 | } 21 | } 22 | const version = await node[0].version() 23 | try { 24 | for (let subTest of config.test[test.name]) { 25 | if (config.fileSetParam) { 26 | arrResults.push(await test(node, `${test.name}${name}`, subTest.warmup.toLowerCase(), config.fileSetParam, version)) 27 | } else { 28 | for (let fileSet of subTest.fileSet) { 29 | arrResults.push(await test(node, `${test.name}${name}`, subTest.warmup.toLowerCase(), fileSet, version)) 30 | } 31 | } 32 | } 33 | } catch (err) { 34 | if (err.code === 'ENOENT') { 35 | console.log('ERROR -- Run "npm run generateFiles" then run test again.') 36 | await nodeFactory.stop(type) 37 | clean.peerRepos() 38 | process.exit(1) 39 | } 40 | console.log(err) 41 | console.log(err.message) 42 | await nodeFactory.stop(type) 43 | clean.peerRepos() 44 | process.exit(1) 45 | } 46 | store(arrResults) 47 | await nodeFactory.stop(type) 48 | clean.peerRepos() 49 | } 50 | module.exports = runner 51 | -------------------------------------------------------------------------------- /tests/add-multi-kb.browser.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const { file } = require('./lib/fixtures') 6 | /** 7 | * Add many small files benchmark using IPFS api add in the browser. 8 | * js0 -> js0 - A local test from one JS IPFS node to the same node 9 | * @async 10 | * @function addMultiKb 11 | * @param {array} peerArray - An array of IPFS peers used during the test. 12 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 13 | * @param {boolean} warmup - Not implemented. 14 | * @param {string} fileSet - Describes file or list of files used for the test. 15 | * @param {string} version - Version of IPFS used in benchmark. 16 | * @return {Promise} The data from the benchamrk 17 | */ 18 | async function addMultiKbBrowser (node, name, warmup, fileSet, version) { 19 | const filePath = await file(fileSet) 20 | const page = node[0].page 21 | await page.reload() 22 | const elementHandle = await page.$('.addLocalFile') 23 | await elementHandle.uploadFile(...filePath) 24 | const t = await page.waitFor('.addLocalFile_s_ready') 25 | const element = await page.waitFor('.addLocalFile_ms_ready') 26 | const timeS = await page.evaluate(t => t.textContent, t) 27 | const timeMs = await page.evaluate(element => element.textContent, element) 28 | return build({ 29 | name: name, 30 | warmup: 'off', 31 | file: filePath, 32 | meta: { version: version }, 33 | description: 'Add many files (local) js0 -> js0', 34 | file_set: fileSet, 35 | duration: { s: parseInt(timeS.split(':')[1]), 36 | ms: parseInt(timeMs.split(':')[1]) / 1000000 } 37 | }) 38 | } 39 | 40 | run(addMultiKbBrowser, 1, 'browser') 41 | -------------------------------------------------------------------------------- /tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ipfs-benchmarks", 3 | "version": "0.1.0", 4 | "description": "Benchmarks for js-ifps", 5 | "engines": { 6 | "node": ">=10.0.0" 7 | }, 8 | "main": "index.js", 9 | "config": { 10 | "repoPath": "./data/ipfs/" 11 | }, 12 | "scripts": { 13 | "test": "npm run generateFiles && tap -J unit/*.test.js", 14 | "test-local": "tap -J unit/*.test.js", 15 | "lint": "standard --verbose", 16 | "generateFiles": "node util/create-files", 17 | "build-browser": "./util/build-browser-test.sh", 18 | "benchmark": " npm run generateFiles && node local-add.js && node local-extract.js && node local-transfer && node multi-peer-transfer" 19 | }, 20 | "contributors": [ 21 | "Matteo Collina ", 22 | "Alex Knol ", 23 | "Ron Litzenberegr " 24 | ], 25 | "license": "ISC", 26 | "dependencies": { 27 | "ajv": "^6.5.5", 28 | "cli-table3": "^0.5.1", 29 | "eslint": "5.6.0", 30 | "fluent-schema": "^0.2.0", 31 | "go-ipfs-dep": "^0.4.18", 32 | "ipfs": "^0.34.0", 33 | "ipfs-http-client": "^28.1.0", 34 | "ipfsd-ctl": "^0.40.2", 35 | "libp2p-mplex": "^0.8.4", 36 | "libp2p-secio": "^0.11.1", 37 | "libp2p-spdy": "^0.13.1", 38 | "libp2p-tcp": "^0.13.0", 39 | "libp2p-websockets": "^0.12.2", 40 | "minimist": "^1.2.0", 41 | "os": "^0.1.1", 42 | "peer-id": "^0.12.0", 43 | "pretty-hrtime": "^1.0.3", 44 | "promise-retry": "^1.1.1", 45 | "proper-lockfile": "^3.2.0", 46 | "puppeteer": "^1.11.0", 47 | "rimraf": "^2.6.2", 48 | "stream-iterators-utils": "^0.1.0", 49 | "uuid": "^3.3.2" 50 | }, 51 | "devDependencies": { 52 | "standard": "^12.0.1", 53 | "tap": "^12.1.0" 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /tests/local-extract.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const { build } = require('./schema/results') 5 | const { file } = require('./lib/fixtures') 6 | const run = require('./lib/runner') 7 | const { once } = require('stream-iterators-utils') 8 | 9 | /** 10 | * Retrive file from local peer using catReadableStream. 11 | * js0 -> js0 - A local test from one JS IPFS node to the same nod 12 | * @async 13 | * @function localExtract 14 | * @param {array} peerArray - An array of IPFS peers used during the test. 15 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 16 | * @param {boolean} warmup - Not implemented. 17 | * @param {string} fileSet - Describes file or list of files used for the test. 18 | * @param {string} version - Version of IPFS used in benchmark. 19 | * @return {Promise} The data from the benchamrk 20 | */ 21 | async function localExtract (peerArray, name, warmup, fileSet, version) { 22 | const filePath = await file(fileSet) 23 | const fileStream = fs.createReadStream(filePath) 24 | const peer = peerArray[0] 25 | const inserted = await peer.add(fileStream) 26 | const start = process.hrtime() 27 | let stream = peer.catReadableStream(inserted[0].hash) 28 | // endof steam 29 | stream.resume() 30 | 31 | // we cannot use end-of-stream/pump for some reason here 32 | // investigate. 33 | // https://github.com/ipfs/js-ipfs/issues/1774 34 | await once(stream, 'end') 35 | 36 | const end = process.hrtime(start) 37 | return build({ 38 | name: 'localExtract', 39 | warmup: warmup, 40 | file: filePath, 41 | meta: { version: version }, 42 | description: 'Cat file (local) js0 -> js0', 43 | file_set: fileSet, 44 | duration: { s: end[0], 45 | ms: end[1] / 1000000 } 46 | }) 47 | } 48 | run(localExtract) 49 | -------------------------------------------------------------------------------- /tests/add-multi-kb.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const { build } = require('./schema/results') 5 | const { file } = require('./lib/fixtures') 6 | const run = require('./lib/runner') 7 | const { description, strategy } = require('./config').parseParams() 8 | 9 | /** 10 | * Add many small files benchmark using IPFS api add. 11 | * js0 -> js0 - A local test from one JS IPFS node to the same node 12 | * @async 13 | * @function addMultiKb 14 | * @param {array} peerArray - An array of IPFS peers used during the test. 15 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 16 | * @param {boolean} warmup - Not implemented. 17 | * @param {string} fileSet - Describes file or list of files used for the test. 18 | * @param {string} version - Version of IPFS used in benchmark. 19 | * @return {Promise} The data from the benchamrk 20 | */ 21 | async function addMultiKb (node, name, warmup, fileSet, version) { 22 | const fileArr = await file(fileSet) 23 | console.log(` Adding files using strategy ${strategy}`) 24 | const start = process.hrtime() 25 | const peer = node[0] 26 | 27 | // output file and dashboard name will match trategy. default is balanced 28 | 29 | for (var i = 0, len = fileArr.length; i < len; i++) { 30 | const fileStream = fs.createReadStream(fileArr[i]) 31 | await peer.add(fileStream, { strategy: strategy }) 32 | } 33 | const end = process.hrtime(start) 34 | 35 | // Pass in test output to build and return 36 | 37 | return build({ 38 | name: name, 39 | warmup: warmup, 40 | file: fileSet, 41 | meta: { version: version }, 42 | description: `Add many small files ${description} js0 -> js0`, 43 | file_set: fileSet, 44 | duration: { s: end[0], 45 | ms: end[1] / 1000000 } 46 | }) 47 | } 48 | run(addMultiKb) 49 | -------------------------------------------------------------------------------- /tests/local-add.browser.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const { file } = require('./lib/fixtures') 6 | const { description, strategy } = require('./config').parseParams() 7 | 8 | /** 9 | * Add file benchmark using IPFS api add in the browser. 10 | * js0 -> js0 - A local test from one JS IPFS node to the same node 11 | * @async 12 | * @function unixFsAddBrowser 13 | * @param {array} browser - An array of headless browsers that contain IPFS tests. 14 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 15 | * @param {boolean} warmup - Not implemented. 16 | * @param {string} fileSet - Describes file or list of files used for the test. 17 | * @param {string} version - Version of IPFS used in benchmark. 18 | * @return {Promise} The data from the benchamrk 19 | */ 20 | async function unixFsAddBrowser (browser, name, warmup, fileSet, version) { 21 | console.log(` Adding files using strategy ${strategy}`) 22 | const filePath = await file(fileSet) 23 | const page = browser[0].page 24 | await page.reload() 25 | const elementHandle = await page.$('.addLocalFile') 26 | await elementHandle.uploadFile(filePath) 27 | const t = await page.waitFor('.addLocalFile_s_ready') 28 | const element = await page.waitFor('.addLocalFile_ms_ready') 29 | const timeS = await page.evaluate(t => t.textContent, t) 30 | const timeMs = await page.evaluate(element => element.textContent, element) 31 | return build({ 32 | name: name, 33 | warmup: 'off', 34 | file: filePath, 35 | meta: { version: version }, 36 | description: `Add file ${description} js0 -> js0`, 37 | file_set: fileSet, 38 | duration: { s: parseInt(timeS.split(':')[1]), 39 | ms: parseInt(timeMs.split(':')[1]) / 1000000 } 40 | }) 41 | } 42 | 43 | run(unixFsAddBrowser, 1, 'browser') 44 | -------------------------------------------------------------------------------- /tests/peer-transfer.browser.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const { file } = require('./lib/fixtures') 6 | const { description } = require('./config').parseParams() 7 | 8 | /** 9 | * Retrive file between two peers in the browser. 10 | * js0 -> js1 - A test between two JS IPFS node 11 | * @async 12 | * @function peerTransferBrowser 13 | * @param {array} browser - An array of headless browsers that contain IPFS tests. 14 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 15 | * @param {boolean} warmup - Not implemented. 16 | * @param {string} fileSet - Describes file or list of files used for the test. 17 | * @param {string} version - Version of IPFS used in benchmark. 18 | * @return {Promise} The data from the benchamrk 19 | */ 20 | async function peerTransferBrowser (browser, name, warmup, fileSet, version) { 21 | const filePath = await file(fileSet) 22 | const page = browser[0].page 23 | await page.reload() 24 | page.on('console', msg => console.log('PAGE LOG:', msg._text)) 25 | const elementHandle = await page.$('.peerTransfer') 26 | await elementHandle.uploadFile(filePath) 27 | const t = await page.waitFor('.peerTransfer_s_ready', { timeout: 200000 }) 28 | const element = await page.waitFor('.peerTransfer_ms_ready') 29 | const timeS = await page.evaluate(t => t.textContent, t) 30 | const timeMs = await page.evaluate(element => element.textContent, element) 31 | return build({ 32 | name: name, 33 | warmup: 'off', 34 | file: filePath, 35 | meta: { version: version }, 36 | description: `Cat file ${description} js0 -> js1`, 37 | file_set: fileSet, 38 | duration: { s: parseInt(timeS.split(':')[1]), 39 | ms: parseInt(timeMs.split(':')[1]) / 1000000 } 40 | }) 41 | } 42 | 43 | run(peerTransferBrowser, 1, 'browser') 44 | -------------------------------------------------------------------------------- /runner/lib/schema/add.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const addBody = { 4 | $id: 'addBody', 5 | type: 'object', 6 | properties: { 7 | commit: { 8 | type: 'string', 9 | description: 'Commit of the js-IPFS library' 10 | }, 11 | nightly: { 12 | type: 'boolean', 13 | default: false, 14 | description: 'Set the "nightly" flag in meta data' 15 | }, 16 | tag: { 17 | type: 'string', 18 | description: 'arbitrary metsdata that gets propagated to the metrics DB as "tag"' 19 | }, 20 | benchmarks: { 21 | type: 'object', 22 | properties: { 23 | tests: { 24 | type: 'array', 25 | items: { 26 | type: 'string' 27 | }, 28 | description: 'Names of benchmark tests to be run' 29 | } 30 | } 31 | }, 32 | clinic: { 33 | type: 'object', 34 | properties: { 35 | enabled: { type: 'boolean', default: false }, 36 | tests: { 37 | type: 'array', 38 | items: { 39 | type: 'string' 40 | } 41 | } 42 | } 43 | } 44 | }, 45 | required: ['commit'] 46 | } 47 | 48 | const addResponse = { 49 | $id: 'addResponse', 50 | 200: { 51 | description: 'Succesful response', 52 | type: 'object', 53 | properties: { 54 | commit: { type: 'string' }, 55 | clinic: { 56 | type: 'object', 57 | properties: { 58 | enabled: { type: 'boolean' } 59 | } 60 | }, 61 | benchmarks: { 62 | type: 'object', 63 | properties: { 64 | tests: { 65 | type: 'array', 66 | items: { type: 'string' } 67 | } 68 | } 69 | }, 70 | remote: { type: 'boolean' }, 71 | nightly: { type: 'boolean' }, 72 | tag: { type: 'string' }, 73 | id: { type: 'integer' } 74 | } 75 | } 76 | } 77 | 78 | module.exports = { 79 | addBody, 80 | addResponse 81 | } 82 | -------------------------------------------------------------------------------- /tests/local-transfer.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const { file } = require('./lib/fixtures.js') 5 | const { build } = require('./schema/results') 6 | const run = require('./lib/runner') 7 | const { once } = require('stream-iterators-utils') 8 | const { description } = require('./config').parseParams() 9 | 10 | /** 11 | * Cat file between two peers using catReadableStream. 12 | * js0 -> js1 - A test between two JS IPFS nodes 13 | * @async 14 | * @function localTransfer 15 | * @param {array} peerArray - An array of IPFS peers used during the test. 16 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 17 | * @param {boolean} warmup - Not implemented. 18 | * @param {string} fileSet - Describes file or list of files used for the test. 19 | * @param {string} version - Version of IPFS used in benchmark. 20 | * @return {Promise} The data from the benchamrk 21 | */ 22 | const localTransfer = async (peerArray, name, warmup, fileSet, version) => { 23 | const filePath = await file(fileSet) 24 | const fileStream = fs.createReadStream(filePath) 25 | const peerA = peerArray[0] 26 | const peerB = peerArray[1] 27 | const peerAId = await peerA.id() 28 | peerB.swarm.connect(peerAId.addresses[0]) 29 | const inserted = await peerA.add(fileStream) 30 | const start = process.hrtime() 31 | let stream = peerB.catReadableStream(inserted[0].hash) 32 | // endof steam 33 | stream.resume() 34 | 35 | // we cannot use end-of-stream/pump for some reason here 36 | // investigate. 37 | // https://github.com/ipfs/js-ipfs/issues/1774 38 | await once(stream, 'end') 39 | 40 | const end = process.hrtime(start) 41 | return build({ 42 | name: name, 43 | warmup: warmup, 44 | file_set: fileSet, 45 | file: filePath, 46 | meta: { version: version }, 47 | description: `Cat file ${description} js0 -> js1`, 48 | duration: { 49 | s: end[0], 50 | ms: end[1] / 1000000 51 | } 52 | }) 53 | } 54 | 55 | run(localTransfer, 3) 56 | -------------------------------------------------------------------------------- /infrastructure/deploy/docker-compose.prod.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | networks: 3 | default: 4 | external: 5 | name: my-bridge 6 | services: 7 | influxdb: 8 | image: influxdb:1.7-alpine 9 | volumes: 10 | - /data/influxdb:/var/lib/influxdb 11 | ports: 12 | - "8086:8086" 13 | environment: 14 | INFLUXDB_DB: benchmarks 15 | # networks: 16 | # - my_bridge 17 | restart: always 18 | grafana: 19 | image: grafana/grafana:latest 20 | ports: 21 | - "3000:3000" 22 | volumes: 23 | - /data/grafana:/var/lib/grafana 24 | depends_on: 25 | - influxdb 26 | links: 27 | - influxdb 28 | # networks: 29 | # - my_bridge 30 | environment: 31 | GF_SERVER_ROOT_URL: https://benchmarks.ipfs.team 32 | GF_SECURITY_ADMIN_USER: ${GF_ADMIN_USER} 33 | GF_SECURITY_ADMIN_PASSWORD: ${GF_ADMIN_PASSWORD} 34 | GF_AUTH_ANONYMOUS_ENABLED: "true" 35 | restart: always 36 | nginx: 37 | image: nginx:alpine 38 | ports: 39 | - "80:80" 40 | - "443:443" 41 | links: 42 | - grafana:graf 43 | volumes: 44 | - ./nginx/nginx.conf:/etc/nginx/conf.d/default.conf 45 | - ./data/nginx:/etc/nginx/conf.d 46 | - ./data/certbot/conf:/etc/letsencrypt 47 | - ./data/certbot/www:/var/www/certbot 48 | depends_on: 49 | - grafana 50 | # networks: 51 | # - my_bridge 52 | restart: always 53 | command: 54 | - /bin/sh 55 | - -c 56 | - | 57 | ip -4 route list match 0/0 | awk '{print $$3" runner"}' >> /etc/hosts 58 | cat /etc/hosts 59 | exec nginx -g 'daemon off;' 60 | certbot: 61 | image: certbot/certbot 62 | volumes: 63 | - ./data/certbot/conf:/etc/letsencrypt 64 | - ./data/certbot/www:/var/www/certbot 65 | entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'" 66 | restart: always 67 | # networks: 68 | # - my_bridge 69 | rendezvous: 70 | image: libp2p/websocket-star-rendezvous:release 71 | ports: 72 | - "9090:9090" 73 | environment: 74 | DISABLE_METRICS: 1 75 | -------------------------------------------------------------------------------- /infrastructure/deploy/README.md: -------------------------------------------------------------------------------- 1 | ## Deploying the Controller IPFS benchmarks 2 | 3 | ![Controller](controller.png) 4 | 5 | The runner and it's supporting applications are deployed using [docker-compose](https://docs.docker.com/compose/reference/overview/#command-options-overview-and-help). To run a minimal local development setup you need the following parts: 6 | 1. Nginx 7 | 2. InfluxDB 8 | 3. Grafana 9 | 10 | To run the stack locally all you need is [docker-compose.yaml](docker-compose.yaml) file. The runner runs on the host, listening on port 9000, outside of Docker. The [Ansible](/infrastructure/playbooks/README.md) playbook takes care of creating a Docker network that is allowed to access port 9000 on the host. 11 | For production, there is an nginx reverse proxy to route traffic to `Grafana` on `/` and expose the runner's endpoint on `/runner`. 12 | In production we add the [docker-compose.prod.yaml](docker-compose.prod.yaml) to configure and run the production setup. Also SSL certificate provisioning is handled for production with [certbot](https://github.com/certbot/certbot) 13 | 14 | 4. nginx 15 | 5. certbot 16 | 17 | Convenience scripts to run the different docker-compose setups are located under [/scripts](/scripts/README.md) 18 | 19 | ### Certbot initialization 20 | Nginx is configured to use certificates and will fail if the certificate files are not present. On a new deployment, no certificates will be available so before the first run a dummy certificate should be installed with [sudo ./init_letsencrypt.sh](./init_letsencrypt.sh). This installs a self signed cert starts nginx and certbot will initiate a handshake with let's encrypt to retrieve the real certificate and replace the self signed cert with the real one. 21 | 22 | Let's encrypt only works with publicly resolvable domains. 23 | 24 | ## Continuous Integration and Deployment 25 | This project is being watched by CircleCi. At the end of each successful integration run the runner and test code are redeployed to the controller VM. 26 | The definition of the tests and deployment is contained in the [CircleCi](/.circleci/config.yml) configuration file. For more details, see [playbook](/infrastructure/playbooks/README.md) doc -------------------------------------------------------------------------------- /tests/extract-js2.go.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const os = require('os') 4 | const fs = require('fs') 5 | const { build } = require('./schema/results') 6 | const { file } = require('./lib/fixtures') 7 | const run = require('./lib/runner') 8 | const NodeFactory = require('./lib/node-factory') 9 | const util = require('util') 10 | const execute = util.promisify(util.promisify(require('child_process').exec)) 11 | const conf = { tmpPath: os.tmpdir() } 12 | const { description } = require('./config').parseParams() 13 | const argv = require('minimist')(process.argv.slice(2)) 14 | 15 | async function extractJs2Go (ipfs, name, warmup, fileSet, version) { 16 | // Runner returns the NodeJS ipfs but we need to create the Go ipfs 17 | const nodeFactory = new NodeFactory() 18 | try { 19 | await nodeFactory.add('go') 20 | } catch (e) { 21 | console.log(e) 22 | return 23 | } 24 | const filePath = await file(fileSet) 25 | const fileStream = fs.createReadStream(filePath) 26 | const peer = ipfs[0] 27 | const inserted = await peer.add(fileStream) 28 | 29 | const peerId = await peer.id() 30 | const protocal = argv.t === 'ws' ? 'ws' : 'tcp' 31 | // output file and dashboard name will match trategy. default is balanced 32 | name = protocal === 'ws' ? `${name}Ws` : name 33 | const id = protocal === 'ws' ? 2 : 0 34 | let command = `export IPFS_PATH=${conf.tmpPath}/ipfs0 && ipfs swarm connect ${peerId.addresses[id]}` 35 | try { 36 | await execute(command) 37 | } catch (e) { 38 | console.log(e) 39 | await nodeFactory.stop('go') 40 | return 41 | } 42 | 43 | const start = process.hrtime() 44 | command = `export IPFS_PATH=${conf.tmpPath}/ipfs0 && ipfs cat ${inserted[0].hash} > /dev/null` 45 | try { 46 | await execute(command) 47 | } catch (e) { 48 | console.log(e) 49 | await nodeFactory.stop('go') 50 | return 51 | } 52 | const end = process.hrtime(start) 53 | await nodeFactory.stop('go') 54 | return build({ 55 | name: name, 56 | warmup: warmup, 57 | file: filePath, 58 | meta: { version: version }, 59 | description: `Cat file ${description}`, 60 | file_set: fileSet, 61 | duration: { s: end[0], 62 | ms: end[1] / 1000000 } 63 | }) 64 | } 65 | run(extractJs2Go) 66 | -------------------------------------------------------------------------------- /infrastructure/grafana/README.md: -------------------------------------------------------------------------------- 1 | # Dashboard 2 | 3 | In this directory the last dashboard json export is stored. 4 | 5 | ## Metrics 6 | For each benchmark test carried out, there is a graph in the dashboard. 7 | * local transfer 8 | * local add 9 | * local extract 10 | * multi peer transfer 11 | 12 | Each graph contains 4 lines, one for every test combination: 13 | * empty repo / 10k file 14 | * empty repo / 1m file 15 | * populated repo / 10k file 16 | * populated repo / 1m file 17 | 18 | Each of these metrics is backed by an InfluxDB query. Queries are constructed using grafana's built in query constructor which provides autocomplete and instant feedback. An example of one of those raw queries is: 19 | ``` 20 | "SELECT mean("duration") FROM "localTransfer" WHERE ("project" = 'js-ipfs' AND "testClass" = 'smallfile' AND "subTest" = 'empty-repo') AND time >= now() - 7d GROUP BY time(15m) fill(none);SELECT mean("duration") FROM "localTransfer" WHERE ("project" = 'js-ipfs' AND "testClass" = 'largefile' AND "subTest" = 'empty-repo') AND time >= now() - 7d GROUP BY time(15m) fill(none);SELECT mean("duration") FROM "localTransfer" WHERE ("project" = 'js-ipfs' AND "testClass" = 'largefile' AND "subTest" = 'populated-repo') AND time >= now() - 7d GROUP BY time(15m) fill(none);SELECT mean("duration") FROM "localTransfer" WHERE ("project" = 'js-ipfs' AND "testClass" = 'smallfile' AND "subTest" = 'populated-repo') AND time >= now() - 7d GROUP BY time(15m) fill(none)" 21 | ``` 22 | ## Adding new metrics 23 | The easiest way to add a new metric to the dashboard is by duplicating an existing graph and modifying the query. 24 | You need to be logged in as `admin` to be able to do this. 25 | Duplicating is done by clicking the title of a graph and selecting `duplicate`, as in the image below. 26 | ![Duplicating](duplicate-graph.png) 27 | A new graph will appear in the next available space with the exact same contents. 28 | Clicking the title and selecting `edit`will allow you to change the queries. Each line in the graph is backed by an InfluxDB query. Just click the second field after `FROM`, to change the measurement source for the metric in the graph. In the image below you can see an example of that. 29 | ![Query](select-measurement.png) 30 | Do this for all queries and save the graph with a meaningful comment. -------------------------------------------------------------------------------- /tests/config/default-config.json: -------------------------------------------------------------------------------- 1 | [ {"libp2p": { 2 | "modules": { 3 | "transport": [], 4 | "streamMuxer": [], 5 | "connEncryption": [], 6 | "peerDiscovery": [] 7 | } 8 | }, 9 | "config":{ 10 | "Addresses": { 11 | "API": "/ip4/127.0.0.1/tcp/5012", 12 | "Gateway": "/ip4/127.0.0.1/tcp/9091", 13 | "Swarm": [ 14 | "/ip4/0.0.0.0/tcp/4012", 15 | "/ip4/0.0.0.0/tcp/4022/ws" 16 | ] 17 | }, 18 | "Bootstrap": [] 19 | } 20 | }, 21 | {"libp2p": { 22 | "modules": { 23 | "transport": [], 24 | "streamMuxer": [], 25 | "connEncryption": [], 26 | "peerDiscovery": [] 27 | } 28 | }, 29 | "config":{ 30 | "Addresses": { 31 | "API": "/ip4/127.0.0.1/tcp/5013", 32 | "Gateway": "/ip4/127.0.0.1/tcp/9092", 33 | "Swarm": [ 34 | "/ip4/0.0.0.0/tcp/4013", 35 | "/ip4/127.0.0.1/tcp/4023/ws" 36 | ] 37 | }, 38 | "Bootstrap": [] 39 | } 40 | }, 41 | {"libp2p": { 42 | "modules": { 43 | "transport": [], 44 | "streamMuxer": [], 45 | "connEncryption": [], 46 | "peerDiscovery": [] 47 | } 48 | }, 49 | "config":{ 50 | "Addresses": { 51 | "API": "/ip4/127.0.0.1/tcp/5014", 52 | "Gateway": "/ip4/127.0.0.1/tcp/9093", 53 | "Swarm": [ 54 | "/ip4/0.0.0.0/tcp/4014", 55 | "/ip4/127.0.0.1/tcp/4024/ws" 56 | ] 57 | }, 58 | "Bootstrap": [] 59 | } 60 | }, 61 | {"libp2p": { 62 | "modules": { 63 | "transport": [], 64 | "streamMuxer": [], 65 | "connEncryption": [], 66 | "peerDiscovery": [] 67 | } 68 | }, 69 | "config":{ 70 | "Addresses": { 71 | "API": "/ip4/127.0.0.1/tcp/5015", 72 | "Gateway": "/ip4/127.0.0.1/tcp/9094", 73 | "Swarm": [ 74 | "/ip4/0.0.0.0/tcp/4015", 75 | "/ip4/127.0.0.1/tcp/4025/ws" 76 | ] 77 | }, 78 | "Bootstrap": [] 79 | } 80 | }, 81 | {"libp2p": { 82 | "modules": { 83 | "transport": [], 84 | "streamMuxer": [], 85 | "connEncryption": [], 86 | "peerDiscovery": [] 87 | } 88 | }, 89 | "config":{ 90 | "Addresses": { 91 | "API": "/ip4/127.0.0.1/tcp/5016", 92 | "Gateway": "/ip4/127.0.0.1/tcp/9095", 93 | "Swarm": [ 94 | "/ip4/0.0.0.0/tcp/4016", 95 | "/ip4/127.0.0.1/tcp/4026/ws" 96 | ] 97 | }, 98 | "Bootstrap": [] 99 | } 100 | } 101 | ] -------------------------------------------------------------------------------- /tests/pubsub-message.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { build } = require('./schema/results') 4 | const run = require('./lib/runner') 5 | const promiseRetry = require('promise-retry') 6 | 7 | /** 8 | * Pubsub publish & receive a message 9 | * js0 -> js1 - A test between two JS IPFS node 10 | * @async 11 | * @function pubsubMessage 12 | * @param {array} peerArray - An array of IPFS peers used during the test. 13 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 14 | * @param {boolean} warmup - Not implemented. 15 | * @param {string} fileSet - Describes file or list of files used for the test. 16 | * @param {string} version - Version of IPFS used in benchmark. 17 | * @return {Promise} The data from the benchamrk 18 | */ 19 | async function pubsubMessage (peerArray, name, warmup, fileSet, version) { 20 | const topic = 'ipfs-benchmark' 21 | const peerA = peerArray[0] 22 | const peerB = peerArray[1] 23 | 24 | // connect peers 25 | const peerAId = await peerA.id() 26 | const peerBId = await peerB.id() 27 | 28 | await peerB.swarm.connect(peerAId.addresses[0]) 29 | 30 | return new Promise((resolve, reject) => { 31 | const start = process.hrtime() 32 | 33 | // Subscribe topic 34 | peerB.pubsub.subscribe(topic, () => { 35 | const end = process.hrtime(start) 36 | resolve(build({ 37 | name: 'pubsubMessage', 38 | warmup: warmup, 39 | file: 'none', 40 | meta: { version: version }, 41 | description: 'Pubsub publish & receive a message. js0 -> js1', 42 | file_set: 'none', 43 | duration: { 44 | s: end[0], 45 | ms: end[1] / 1000000 46 | } 47 | })) 48 | }) 49 | 50 | // wait for peerA to know about peerB subscription 51 | let peers 52 | promiseRetry(async (retry, number) => { 53 | peers = await peerA.pubsub.peers(topic) 54 | if (peers.length && peers.includes(peerBId.id)) { 55 | return Promise.resolve() 56 | } else { 57 | retry() 58 | } 59 | }).then(() => { 60 | // Publish 61 | return peerA.pubsub.publish(topic, Buffer.from('data')) 62 | }).catch((err) => reject(err)) 63 | }) 64 | } 65 | 66 | run(pubsubMessage, 2, 'nodejs', { EXPERIMENTAL: { 67 | pubsub: true 68 | } }) 69 | -------------------------------------------------------------------------------- /infrastructure/playbooks/README.md: -------------------------------------------------------------------------------- 1 | # Ansible playbooks 2 | 3 | There are two main playbooks: 4 | 1. [controller.yml](/infrastructure/playbooks/controller.yaml) 5 | 2. [benchmarks.yml](/infrastructure/playbooks/benchmarks.yaml) 6 | 7 | The controller playbook prepares a host with the following components: 8 | * Node.js 9 | * Docker 10 | * Ansible 11 | * Directories with proper rights to support the `Runner` 12 | 13 | The controller playbook is also used by CI to deploy a new runner. For this purpose all tasks tagged with `prepare` are skipped. To prepare or upgrade a Controller VM, run the playbook in its entirety. 14 | ``` 15 | ansible-playbook -i infrastructure/inventory/inventory.yaml infrastructure/playbooks/controller.yaml 16 | ``` 17 | Tags can be skipped by adding this to the command line: 18 | ``` 19 | --skip-tags "prepare" 20 | ``` 21 | 22 | ## Controller 23 | ### Host access 24 | Since the runner is not dockerized the nginx proxy running inside docker needs to be allowed access to the host, where the `Runner`is running. This is done by creating a docker interface named `my-bridge` and opening the firewall port 9000 on the host for traffic from that bridge. 25 | 26 | ### Runner process 27 | The runner is managed with [systemd](https://wiki.debian.org/systemd) and the proper files are installed by this playbook. When the process fails, it's automatically restarted. The `redeploy`ment part of the playbook uses systemctl to restart the runner after the new code has been copied and `npm install` was run. The deployment command used by CircleCI is: 28 | ``` 29 | ansible-playbook -i infrastructure/inventory/inventory.yaml infrastructure/playbooks/controller.yaml --skip-tags "prepare" 30 | ``` 31 | When you log in to the VM you can tail the logs with: 32 | ``` 33 | journalctl -u runner -f 34 | ``` 35 | 36 | ### Minion 37 | The runner uses the `benchmarks` playbook to provision minions to be able to run benchmark tests. The `controller` playbook takes care of provisioning the Controller so the Runner has all the tools needed to carry out this task. 38 | 39 | The playbook installs: 40 | * Node.js 41 | * Rsync the testing code 42 | 43 | The code for the tests is copied including the node_modules directory. Also Node.js is installed as a normal user via [nvm](https://github.com/creationix/nvm). The entire testing processes run under a normal user account. -------------------------------------------------------------------------------- /tests/lib/output.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const util = require('util') 5 | const path = require('path') 6 | const fsWriteFile = util.promisify(fs.writeFile) 7 | const fsMakeDir = util.promisify(fs.mkdir) 8 | const fsExists = util.promisify(fs.access) 9 | const { validate } = require('../schema/results.js') 10 | const Table = require('cli-table3') 11 | const folder = process.env.OUT_FOLDER || path.join(__dirname, '/../out') 12 | 13 | async function store (data) { 14 | if (Array.isArray(data)) { 15 | if (process.env.REMOTE === 'true' && (process.env.STAGE !== 'local' || process.env.DASHBOARD === 'local')) { 16 | console.log('Writing output in a single file') 17 | write(data) 18 | } else { 19 | console.log('Writing output in a multiple files') 20 | var table = new Table({ 21 | head: ['Test', 'Warmup', 'Description', 'File Set', 'Duration'], colWidths: [40, 10, 60, 20] 22 | }) 23 | for (let testResult of data) { 24 | table.push([testResult.name, testResult.warmup, testResult.description, testResult.file_set, `s:${testResult.duration.s} ms: ${testResult.duration.ms}`]) 25 | write(testResult) 26 | } 27 | console.log(table.toString()) 28 | } 29 | } else { 30 | throw Error('"store" requires an array') 31 | } 32 | } 33 | 34 | async function write (data) { 35 | const name = await createFilename(folder, data) 36 | if (validate(data)) { 37 | await fsWriteFile(`${name}.json`, JSON.stringify(data, null, 2)) 38 | } else { 39 | const name = await createFilename(`${folder}/error`, data) 40 | await fsWriteFile(`${name}.json`, JSON.stringify(data, null, 2)) 41 | } 42 | } 43 | 44 | const buildName = (data) => { 45 | if (process.env.REMOTE === 'true' && (process.env.STAGE !== 'local' || process.env.DASHBOARD === 'local')) { 46 | return `${folder}/${data[0].name || 'undefined'}` 47 | } else { 48 | return `${folder}/${data.name || 'undefined'}-${new Date().toISOString()}` 49 | } 50 | } 51 | 52 | async function createFilename (folder, data) { 53 | try { 54 | await fsExists(folder) 55 | return buildName(data) 56 | } catch (err) { 57 | try { 58 | await fsMakeDir(folder) 59 | return buildName(data) 60 | } catch (err) { 61 | return buildName(data) 62 | } 63 | } 64 | } 65 | module.exports = { 66 | createFilename, 67 | write, 68 | store 69 | } 70 | -------------------------------------------------------------------------------- /runner/provision.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const path = require('path') 4 | const config = require('./config') 5 | const { hashElement } = require('folder-hash') 6 | const local = require('./local') 7 | const remote = require('./remote') 8 | const fs = require('fs') 9 | const hashFile = '/dirHash.txt' 10 | 11 | const options = { 12 | folders: { include: ['.*'] }, 13 | files: { include: ['deploy.txt', '*.js', '*.*/*js', '*.json', '**/*.json', '*.sh'] } 14 | } 15 | 16 | const dirHash = (dir) => { 17 | config.log.info('Creating a hash over the [../tests] folder:') 18 | return hashElement(dir, options) 19 | } 20 | 21 | const writeHash = (hash, dir) => { 22 | let hashPath = path.join(dir, hashFile) 23 | try { 24 | fs.writeFileSync(hashPath, hash.hash) 25 | config.log.info(`Written local hash: [${hash.hash}]`) 26 | } catch (e) { 27 | throw Error(e) 28 | } 29 | } 30 | 31 | const checkHash = async (hashPath) => { 32 | try { 33 | let hash = await remote.run(`cat ${hashPath}`) 34 | config.log.info(`Remote hash is: [${hash}]`) 35 | return hash 36 | } catch (e) { 37 | if (e.message.includes('No such file or directory')) { 38 | return '' 39 | } else { 40 | config.log.error(e) 41 | throw Error(e) 42 | } 43 | } 44 | } 45 | 46 | const cloneIpfsRemote = async (commit) => { 47 | try { 48 | let out = await remote.run(`${config.benchmarks.remotePath}getIpfs.sh ${config.ipfs.path} ${commit || ''}`) 49 | config.log.info(out) 50 | return 51 | } catch (e) { 52 | config.log.error(e) 53 | throw Error(e) 54 | } 55 | } 56 | 57 | const ensure = async (commit) => { 58 | try { 59 | let hash = await dirHash(config.benchmarks.path) 60 | await writeHash(hash, config.benchmarks.path) 61 | let remotehash = await checkHash(path.join(config.benchmarks.remotePath, hashFile)) 62 | if (remotehash !== hash.hash) { 63 | config.log.info(`Tests on [${config.benchmarks.host}] are out of sync, updating...`) 64 | let ansible = await local.run(config.provison.command) 65 | config.log.info(ansible) 66 | } else { 67 | config.log.info(`Tests on [${config.benchmarks.host}] are up to date.`) 68 | } 69 | // provision required commit of ipfs 70 | await cloneIpfsRemote(commit) 71 | } catch (e) { 72 | throw Error(e) 73 | } 74 | } 75 | 76 | module.exports = { 77 | dirHash: dirHash, 78 | ensure: ensure, 79 | writeHash: writeHash 80 | } 81 | -------------------------------------------------------------------------------- /tests/extract-go2.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const os = require('os') 4 | const { build } = require('./schema/results') 5 | const { file } = require('./lib/fixtures') 6 | const run = require('./lib/runner') 7 | const { once } = require('stream-iterators-utils') 8 | const NodeFactory = require('./lib/node-factory') 9 | const util = require('util') 10 | const execute = util.promisify(util.promisify(require('child_process').exec)) 11 | const conf = { tmpPath: os.tmpdir() } 12 | const { description } = require('./config').parseParams() 13 | const argv = require('minimist')(process.argv.slice(2)) 14 | 15 | async function extractGo2Js (ipfs, name, warmup, fileSet, version) { 16 | // Runner rtunrs the NodeJS ipfs but we need to create the Go ipfs 17 | const nodeFactory = new NodeFactory() 18 | try { 19 | await nodeFactory.add('go') 20 | } catch (e) { 21 | console.log(e) 22 | } 23 | const filePath = await file(fileSet) 24 | const peer = ipfs[0] 25 | 26 | const peerId = await peer.id() 27 | const protocal = argv.t === 'ws' ? 'ws' : 'tcp' 28 | // output file and dashboard name will match trategy. default is balanced 29 | name = protocal === 'ws' ? `${name}Ws` : name 30 | console.log(peerId) 31 | let command = `export IPFS_PATH=${conf.tmpPath}/ipfs0 && ipfs swarm connect ${peerId.addresses[0]} > /dev/null` 32 | try { 33 | await execute(command) 34 | } catch (e) { 35 | console.log(e) 36 | await nodeFactory.stop('go') 37 | return 38 | } 39 | // redirect stderr to dev/null due to the progress of file being processed is sent to stderr causing maxBuffer error 40 | const addCommand = `export IPFS_PATH=${conf.tmpPath}/ipfs0 && ipfs add ${filePath} 2> /dev/null` 41 | try { 42 | await execute(command) 43 | } catch (e) { 44 | console.log(e) 45 | await nodeFactory.stop('go') 46 | return 47 | } 48 | const { stdout } = await execute(addCommand) 49 | const start = process.hrtime() 50 | let stream = peer.catReadableStream(stdout.split(' ')[1]) 51 | // endof steam 52 | stream.resume() 53 | await once(stream, 'end') 54 | const end = process.hrtime(start) 55 | await nodeFactory.stop('go') 56 | return build({ 57 | name: name, 58 | warmup: warmup, 59 | file: filePath, 60 | meta: { version: version }, 61 | description: `Cat file ${description}`, 62 | file_set: fileSet, 63 | duration: { s: end[0], 64 | ms: end[1] / 1000000 } 65 | }) 66 | } 67 | run(extractGo2Js) 68 | -------------------------------------------------------------------------------- /runner/remote.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const remoteExec = require('ssh-exec-plus') 4 | const config = require('./config') 5 | const sshConf = { 6 | user: config.benchmarks.user, 7 | host: config.benchmarks.host, 8 | key: config.benchmarks.key, 9 | timeout: 1000 * 60 * 3 10 | } 11 | 12 | const run = (shell, name) => { 13 | config.log.info(`Running [${shell}] on host [${config.benchmarks.host}] for user [${config.benchmarks.user}] using [${config.benchmarks.key}]`) 14 | const commandLogger = config.log.child({command: name || shell}) 15 | return new Promise((resolve, reject) => { 16 | let mainStream = remoteExec(shell, sshConf) 17 | let cmdOutput = '' 18 | mainStream.setEncoding('utf-8') 19 | mainStream.on('data', (data) => { 20 | commandLogger.info(data) 21 | cmdOutput += data 22 | }) 23 | mainStream.on('warn', (data) => { 24 | commandLogger.error(data) 25 | }) 26 | mainStream.on('end', () => { 27 | commandLogger.debug('-- main command end --') 28 | // if name is provided we assume it's a json file we read and pass back as the command's result. 29 | if (name) { 30 | let retrieveCommand = `cat ${config.outFolder}/${name}.json` 31 | const retrieveLogger = config.log.child({command: retrieveCommand}) 32 | config.log.info(`running [${retrieveCommand}] on [${config.benchmarks.host}]`) 33 | let retrieveStream = remoteExec(retrieveCommand, sshConf) 34 | let jsonResponse = '' 35 | retrieveStream.setEncoding('utf-8') 36 | retrieveStream.on('data', (data) => { 37 | retrieveLogger.info(data) 38 | jsonResponse += data 39 | }) 40 | retrieveStream.on('warn', (data) => { 41 | retrieveLogger.error(data) 42 | }) 43 | retrieveStream.on('end', () => { 44 | retrieveLogger.debug('-- retrieve command end --') 45 | try { 46 | let objResults = JSON.parse(jsonResponse) 47 | retrieveLogger.debug(objResults) 48 | resolve(objResults) 49 | } catch (e) { 50 | retrieveLogger.error(e) 51 | retrieveLogger.error(jsonResponse) 52 | resolve(e) 53 | } 54 | }) 55 | retrieveStream.on('error', (err) => { 56 | retrieveLogger.error(err, 'Retrievestream error') 57 | }) 58 | } else { 59 | resolve(cmdOutput) 60 | } 61 | }) 62 | mainStream.on('error', (err) => { 63 | commandLogger.error(err, 'Mainstream error') 64 | }) 65 | }) 66 | } 67 | 68 | module.exports = { 69 | run: run 70 | } 71 | -------------------------------------------------------------------------------- /tests/multi-peer-transfer.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const { file } = require('./lib/fixtures.js') 5 | const { build } = require('./schema/results') 6 | const { once } = require('stream-iterators-utils') 7 | const run = require('./lib/runner') 8 | const { description } = require('./config').parseParams() 9 | 10 | /** 11 | * With the same file inserted into 4 peers, this test captures the time for a 5th peer to retrieve file from swarm using catReadableStream. 12 | * js01234 -> js5 - A test from multiple JS IPFS nodes to a single JS IPFS node 13 | * @async 14 | * @function multiPeerTransfer 15 | * @param {array} peerArray - An array of IPFS peers used during the test. 16 | * @param {string} name - Name of the test used as sending results to the file with same name and data point in dashboard. 17 | * @param {boolean} warmup - Not implemented. 18 | * @param {string} fileSet - Describes file or list of files used for the test. 19 | * @param {string} version - Version of IPFS used in benchmark. 20 | * @return {Promise} The data from the benchamrk 21 | */ 22 | const multiPeerTransfer = async (node, name, warmup, fileSet, version) => { 23 | const filePath = await file(fileSet) 24 | const fileStream = fs.createReadStream(filePath) 25 | const peerA = node[0] 26 | const peerB = node[1] 27 | const peerC = node[2] 28 | const peerD = node[3] 29 | const peerE = node[4] 30 | const peerAId = await peerA.id() 31 | const peerBId = await peerB.id() 32 | const peerCId = await peerC.id() 33 | const peerDId = await peerD.id() 34 | const inserted = await peerA.add(fileStream) 35 | await peerB.add(fileStream) 36 | await peerC.add(fileStream) 37 | await peerD.add(fileStream) 38 | peerE.swarm.connect(peerAId.addresses[0]) 39 | peerE.swarm.connect(peerBId.addresses[0]) 40 | peerE.swarm.connect(peerCId.addresses[0]) 41 | peerE.swarm.connect(peerDId.addresses[0]) 42 | const start = process.hrtime() 43 | let stream = peerE.catReadableStream(inserted[0].hash) 44 | // endof steam 45 | stream.resume() 46 | 47 | // we cannot use end-of-stream/pump for some reason here 48 | // investigate. 49 | // https://github.com/ipfs/js-ipfs/issues/1774 50 | await once(stream, 'end') 51 | const end = process.hrtime(start) 52 | 53 | return build({ 54 | name: name, 55 | warmup: warmup, 56 | file_set: fileSet, 57 | file: filePath, 58 | description: `Cat file ${description} js01234 -> js5`, 59 | meta: { version: version }, 60 | duration: { 61 | s: end[0], 62 | ms: end[1] / 1000000 63 | } 64 | }) 65 | } 66 | 67 | run(multiPeerTransfer, 5) 68 | -------------------------------------------------------------------------------- /runner/test/compress.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const os = require('os') 5 | const util = require('util') 6 | const tap = require('tap') 7 | const mkDir = util.promisify(fs.mkdir) 8 | const writeFile = util.promisify(fs.writeFile) 9 | const readDir = util.promisify(fs.readdir) 10 | const stat = util.promisify(fs.stat) 11 | const rmfr = require('rmfr') 12 | const now = Date.now() 13 | const clinicFile = '1234.clinic-bubbleprof' 14 | const tmpDir = `${os.tmpdir()}/${now}-compress-test` 15 | const dataDir = `${tmpDir}/${clinicFile}` 16 | const compress = require('../compress') 17 | 18 | const createTestFiles = async () => { 19 | await mkDir(tmpDir) 20 | await writeFile(`${tmpDir}/${clinicFile}.html`, 'clinic') 21 | await mkDir(dataDir) 22 | await writeFile(`${dataDir}/1.txt`, '1') 23 | await writeFile(`${dataDir}/2.txt`, '2') 24 | await writeFile(`${dataDir}/3.txt`, '3') 25 | await writeFile(`${dataDir}/4.txt`, '4') 26 | } 27 | 28 | const cleanup = async () => { 29 | await rmfr(tmpDir) 30 | } 31 | 32 | tap.test('compress dir', async (t) => { 33 | await createTestFiles() 34 | await compress._tgzDir(tmpDir, `${tmpDir}/file.tar.gz`) 35 | const contents = await readDir(tmpDir) 36 | let found = false 37 | for (let node of contents) { 38 | if (node === 'file.tar.gz') found = true 39 | } 40 | tap.equal(found, true, 'found compressed file') 41 | await cleanup() 42 | t.done() 43 | }) 44 | 45 | tap.test('compress clinic files', async (t) => { 46 | await createTestFiles() 47 | await compress.clinicFiles(tmpDir) 48 | try { 49 | await stat(`${tmpDir}/${clinicFile}`) 50 | } catch (e) { 51 | tap.match(e.message, 'no such file or directory') 52 | } 53 | let htmlStats = await stat(`${tmpDir}/${clinicFile}.html`) 54 | tap.type(htmlStats, 'object') 55 | let targzStats = await stat(`${tmpDir}/${clinicFile}.tar.gz`) 56 | tap.type(targzStats, 'object') 57 | await cleanup() 58 | t.done() 59 | }) 60 | 61 | tap.test('fails if missing', async (t) => { 62 | try { 63 | await compress.clinicFiles(tmpDir) 64 | } catch (e) { 65 | tap.match(e.message, 'no such file or directory') 66 | } 67 | try { 68 | await stat(`${tmpDir}/${clinicFile}`) 69 | } catch (e) { 70 | tap.match(e.message, 'no such file or directory') 71 | } 72 | try { 73 | await stat(`${tmpDir}/${clinicFile}.html`) 74 | } catch (e) { 75 | tap.match(e.message, 'no such file or directory') 76 | } 77 | try { 78 | await stat(`${tmpDir}/${clinicFile}.tar.gz`) 79 | } catch (e) { 80 | tap.match(e.message, 'no such file or directory') 81 | } 82 | t.done() 83 | }) 84 | -------------------------------------------------------------------------------- /infrastructure/deploy/init-letsencrypt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | domains=( "benchmarks.ipfs.team" ) 4 | rsa_key_size=4096 5 | data_path="./data/certbot" 6 | email="alex.knol@nearform.com" #Adding a valid address is strongly recommended 7 | staging=1 #Set to 1 if you're just testing your setup to avoid hitting request limits 8 | FILES="-f docker-compose.yaml -f docker-compose.prod.yaml" 9 | 10 | echo "### Preparing directories in $data_path ..." 11 | rm -Rf "$data_path" 12 | mkdir -p "$data_path/www" 13 | mkdir -p "$data_path/conf/live/$domains" 14 | 15 | 16 | echo "### Creating dummy certificate ..." 17 | path="/etc/letsencrypt/live/$domains" 18 | mkdir -p "$path" 19 | docker-compose $FILES run --rm --entrypoint "\ 20 | openssl req -x509 -nodes -newkey rsa:1024 -days 1\ 21 | -keyout '$path/privkey.pem' \ 22 | -out '$path/fullchain.pem' \ 23 | -subj '/CN=localhost'" certbot 24 | 25 | 26 | echo "### Downloading recommended HTTPS parameters ..." 27 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf > "$data_path/conf/options-ssl-nginx.conf" 28 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/ssl-dhparams.pem > "$data_path/conf/ssl-dhparams.pem" 29 | 30 | 31 | echo "### Starting nginx ..." 32 | docker-compose $FILES up -d nginx 33 | 34 | 35 | echo "### Deleting dummy certificate ..." 36 | sudo rm -Rf "$data_path/conf/live" 37 | 38 | echo "### Downloading recommended TLS options ..." 39 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf > "$data_path/conf/options-ssl-nginx.conf" 40 | curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/ssl-dhparams.pem > "$data_path/conf/ssl-dhparams.pem" 41 | 42 | 43 | echo "### Requesting initial certificate ..." 44 | 45 | #Join $domains to -d args 46 | domain_args="" 47 | for domain in "${domains[@]}"; do 48 | domain_args="$domain_args -d $domain" 49 | done 50 | 51 | #Select appropriate email arg 52 | case "$email" in 53 | "") email_arg="--register-unsafely-without-email" ;; 54 | *) email_arg="--email $email" ;; 55 | esac 56 | 57 | #Enable staging mode if needed 58 | if [ $staging != "0" ]; then staging_arg="--staging"; fi 59 | 60 | docker-compose $FILES run --rm --entrypoint "\ 61 | certbot certonly --webroot -w /var/www/certbot \ 62 | $staging_arg \ 63 | $email_arg \ 64 | $domain_args \ 65 | --rsa-key-size $rsa_key_size \ 66 | --agree-tos \ 67 | --force-renewal" certbot 68 | 69 | docker-compose $FILES stop nginx 70 | 71 | echo "changing ownership of $data_path to ${UID}:docker" 72 | sudo chown ${UID}:docker -R $data_path 73 | -------------------------------------------------------------------------------- /runner/test/configBenchmarks.js: -------------------------------------------------------------------------------- 1 | 'use srict' 2 | 3 | const tap = require('tap') 4 | const configBenchmarks = require('../lib/configBenchmarks') 5 | 6 | tap.test('construct a single test', async (t) => { 7 | let benchmarks = configBenchmarks.constructTests( 8 | 'remote', 9 | false, 10 | ['unixFsAddBrowser_balanced'] 11 | ) 12 | tap.equal(benchmarks[0].name, 'unixFsAddBrowser_balanced', 'check test name') 13 | tap.equal(benchmarks.length, 1, 'should be 1 test') 14 | tap.contains(benchmarks[0].benchmark, 'local-add.browser.js', 'check test file') 15 | t.end() 16 | }) 17 | 18 | tap.test('construct a 2 tests', async (t) => { 19 | let benchmarks = configBenchmarks.constructTests( 20 | 'remote', 21 | false, 22 | ['unixFsAddBrowser_balanced', 'addMultiKbBrowser_balanced'] 23 | ) 24 | tap.equal(benchmarks[0].name, 'unixFsAddBrowser_balanced', 'check first test name') 25 | tap.equal(benchmarks[1].name, 'addMultiKbBrowser_balanced', 'check second test name') 26 | tap.equal(benchmarks.length, 2, 'should be 2 tests') 27 | t.end() 28 | }) 29 | 30 | tap.test('construct a 1 non-existing and 1 existing test', async (t) => { 31 | let benchmarks = configBenchmarks.constructTests( 32 | 'remote', 33 | false, 34 | ['blahblah', 'addMultiKbBrowser_balanced'] 35 | ) 36 | tap.equal(benchmarks[0].name, 'addMultiKbBrowser_balanced', 'check first and only test name') 37 | tap.equal(benchmarks.length, 1, 'should be 1 tests') 38 | t.end() 39 | }) 40 | 41 | tap.test('non existent testname', async (t) => { 42 | let benchmarks = configBenchmarks.constructTests( 43 | 'remote', 44 | false, 45 | ['blablah'] 46 | ) 47 | tap.equal(benchmarks.length, 0, 'there should be no test') 48 | t.end() 49 | }) 50 | 51 | tap.test('default tests', async (t) => { 52 | let benchmarks = configBenchmarks.constructTests( 53 | 'remote', 54 | false 55 | ) 56 | tap.equal(benchmarks.length, configBenchmarks.testAbstracts.length, 'compare # constructed to # abstracts') 57 | t.end() 58 | }) 59 | 60 | tap.test('construct a single test with clinic', async (t) => { 61 | let benchmarks = configBenchmarks.constructTests( 62 | 'remote', 63 | true, 64 | ['unixFsAddBrowser_balanced'] 65 | ) 66 | tap.equal(benchmarks[0].name, 'unixFsAddBrowser_balanced', 'check test name') 67 | tap.equal(benchmarks.length, 1, 'should be 1 test') 68 | tap.equal(typeof benchmarks[0]['doctor'] === 'undefined', false, 'contains doctor property') 69 | tap.equal(typeof benchmarks[0]['flame'] === 'undefined', false, 'contains flame property') 70 | tap.equal(typeof benchmarks[0]['bubbleProf'] === 'undefined', false, 'contains bubbleProf property') 71 | tap.equal(typeof benchmarks[0]['blahblah'] === 'undefined', true, 'does not contain blahblah property') 72 | t.end() 73 | }) 74 | -------------------------------------------------------------------------------- /tests/browser/src/components/peer-transfer.js: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import IPFS from 'ipfs' 3 | import hrtime from 'browser-process-hrtime' 4 | import uuidv1 from 'uuid/v1' 5 | import 'react-table/react-table.css' 6 | import { once } from 'stream-iterators-utils' 7 | import fileReaderStream from 'filereader-stream' 8 | import getId from './getId' 9 | import localState from './localState' 10 | import TestRow from './test-row' 11 | import WS from 'libp2p-websockets' 12 | import MPLEX from 'libp2p-mplex' 13 | class PeerTransfer extends React.Component { 14 | constructor (props) { 15 | super(props) 16 | this.state = localState 17 | } 18 | 19 | async test (e) { 20 | const server = process.env.REACT_APP_REMOTE === 'true' ? 'benchmarks.ipfs.team' : 'ws-star.discovery.libp2p.io' 21 | const fileArray = [...e.target.files] 22 | // Create the IPFS node instance 23 | const node = new IPFS({ repo: String(uuidv1()), 24 | config: { 'libp2p': { 25 | 'modules': { 26 | transport: [WS], 27 | streamMuxer: [MPLEX], 28 | connEncryption: [], 29 | 'peerDiscovery': [] 30 | } 31 | }, 32 | Addresses: { 33 | Swarm: [ 34 | `/dnsaddr/${server}/tcp/9090/ws/p2p-websocket-star/` 35 | ] 36 | } 37 | } 38 | }) 39 | const node2 = new IPFS({ repo: String(uuidv1()), 40 | config: { 'libp2p': { 41 | 'modules': { 42 | transport: [WS], 43 | streamMuxer: [MPLEX], 44 | connEncryption: [], 45 | 'peerDiscovery': [] 46 | } 47 | }, 48 | Addresses: { 49 | Swarm: [ 50 | `/dnsaddr/${server}/tcp/9090/ws/p2p-websocket-star/` 51 | ] 52 | } 53 | } 54 | }) 55 | node.on('ready', () => {}) 56 | node2.on('ready', () => {}) 57 | await once(node, 'ready') 58 | await once(node2, 'ready') 59 | const nodeId = await node.id() 60 | node2.swarm.connect(nodeId.addresses[0]) 61 | const fileStream = fileReaderStream(fileArray[0]) 62 | const inserted = node.add ? await node.add(fileStream) : await node.files.add(fileStream) 63 | const start = hrtime() 64 | let stream = node2.catReadableStream ? node2.catReadableStream(inserted[0].hash) : node2.files.catReadableStream(inserted[0].hash) 65 | stream.resume() 66 | await once(stream, 'end') 67 | const delta = hrtime(start) 68 | const results = await getId(node, delta, this.state) 69 | this.setState(results) 70 | } 71 | render () { 72 | const name = 'peerTransfer' 73 | const description = 'Transfer files between peers' 74 | return ( 75 | 81 | ) 82 | } 83 | } 84 | export default PeerTransfer 85 | -------------------------------------------------------------------------------- /infrastructure/README.md: -------------------------------------------------------------------------------- 1 | # Provisioning 2 | 3 | Ansible is used as a provisioning tool for the host running as controller. The playbook for that is located in `playbooks/controller.yaml`. The playbook targets the hosts under `controllers` in the `ìnventory/inventory.yaml`. To access the host it uses the ssh key referenced in `ìnventory/group_vars/controller` named `ansible_ssh_private_key_file`. The playbook also requires you to provide a password for the grafana (action) user. 4 | 5 | ## Prerequisites 6 | 7 | Install the required packages for your OS: 8 | 9 | * [python3](https://realpython.com/installing-python/) 10 | * [passlib](https://passlib.readthedocs.io/en/stable/install.html) 11 | * [Ansible](https://www.ansible.com/) 12 | * Ansible roles: 13 | * [geerlingguy.nodejs](https://github.com/geerlingguy/ansible-role-nodejs) 14 | * [nickjj.docker](https://github.com/nickjj/ansible-docker) 15 | 16 | A typical way to run the playbook would be: 17 | 18 | ```sh 19 | ansible-playbook -i infrastructure/inventory/inventory.yaml infrastructure/playbooks/controller.yaml --extra-vars "action_user_pw=test" 20 | ``` 21 | 22 | ## Architecture 23 | 24 | The diagram below describes the production setup. 25 | 26 | ![Production diagram](prod-infrastructure.png) 27 | 28 | Production is comprised of two hosts: 29 | 30 | * The `controller` is a Virtual Machine with the `runner`, `datastore` and `dashboard` 31 | * The `minion` is a Bare metal machine to run the benchmarks. 32 | 33 | ### Controller 34 | 35 | The controller runs the [runner](../runner/) as a daemon in a docker container and exposes an endpoint to be able to trigger a benchmark run with parameters. 36 | 37 | Also the datastore, `InfluxDB`, and the dashboard, `Grafana`, are run inside containers, all using [docker-compose](https://docs.docker.com/compose/). 38 | 39 | The runner can be triggered from a remote resource but will, at least, be triggered once a day on a schedule to ensure there is a data point for each day. 40 | 41 | The runner is responsible for running the latest benchmark tests on the `minion`. [Ansible](https://www.ansible.com/) is used to manage the provisioning state of the `minion` before each run. The benchmarks write the test results in a file determined by a parameter. After the run, this file is retrieved and parsed by the runner. The results are then written to the datastore as well as stored on the IPFS network. 42 | 43 | ### Minion 44 | 45 | This `bare metal` machine will be kept as clean as possible to enable reliable results from the benchmarks. Tests are run over `ssh` from the `controller`. 46 | 47 | After the benchmark test is run to get an accurate result the same test will be run using clinic tools to provide in depth analysis. 48 | 49 | Tests kept up to date before each run and are always run in sequence. After each test, any generated files and resources will be retrieved by the runner and cleaned up after successful reception. 50 | -------------------------------------------------------------------------------- /tests/config/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | const uuidv1 = require('uuid/v1') 3 | const guid = process.env.GUID || uuidv1() 4 | const fileSetParam = (process.env.FILESET && process.env.FILESET.toLowerCase()) || false 5 | const verify = process.env.VERIFYOFF && process.env.VERIFYOFF.toLowerCase() === 'true' 6 | const argv = require('minimist')(process.argv.slice(2)) 7 | 8 | const tests = { 'unixFsAdd': [{ 9 | 'warmup': 'Off', 10 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 11 | 'localExtract': [{ 12 | 'warmup': 'Off', 13 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 14 | 'localTransfer': [{ 15 | 'warmup': 'Off', 16 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 17 | 'multiPeerTransfer': [{ 18 | 'warmup': 'Off', 19 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 20 | 'addMultiKb': [{ 21 | 'warmup': 'Off', 22 | 'fileSet': ['Hundred1KBFile'] } ], 23 | 'initializeNode': [{ 24 | 'warmup': 'Off', 25 | 'fileSet': ['None'] }], 26 | 'initializeNodeBrowser': [{ 27 | 'warmup': 'Off', 28 | 'fileSet': ['None'] }], 29 | 'unixFsAddBrowser': [{ 30 | 'warmup': 'Off', 31 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] }], 32 | 'addMultiKbBrowser': [{ 33 | 'warmup': 'Off', 34 | 'fileSet': ['Hundred1KBFile'] } ], 35 | 'peerTransferBrowser': [{ 36 | 'warmup': 'Off', 37 | 'fileSet': ['OneKBFile', 'OneMBFile'] }], 38 | 'unixFsAddGo': [{ 39 | 'warmup': 'Off', 40 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 41 | 'extractJs2Go': [{ 42 | 'warmup': 'Off', 43 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 44 | 'extractGo2Js': [{ 45 | 'warmup': 'Off', 46 | 'fileSet': ['OneKBFile', 'OneMBFile', 'One4MBFile', 'One64MBFile'] } ], 47 | 'pubsubMessage': [{ 48 | 'warmup': 'Off', 49 | 'fileSet': ['None'] }] 50 | } 51 | const parseParams = () => { 52 | let name = '_' 53 | let desc = '(' 54 | let strategy = 'balanced' 55 | if (argv.s) { 56 | if (argv.s === 'trickle') { 57 | name = `${name}trickle` 58 | desc = `${desc}trickle` 59 | strategy = 'trickle' 60 | } else { 61 | name = `${name}balanced` 62 | desc = `${desc}balanced` 63 | } 64 | } else { 65 | if (argv.t === 'ws') { 66 | name = `${name}ws_` 67 | desc = `${desc}websocket, ` 68 | } else { 69 | name = `${name}tcp_` 70 | desc = `${desc}tcp, ` 71 | } 72 | if (argv.m === 'spdy') { 73 | name = `${name}spdy` 74 | desc = `${desc}spdy` 75 | } else { 76 | name = `${name}mplex` 77 | desc = `${desc}mplex` 78 | } 79 | if (argv.e === 'secio') { 80 | name = `${name}_secio` 81 | desc = `${desc}, secio` 82 | } 83 | } 84 | desc = `${desc})` 85 | return { name: name, description: desc, strategy: strategy } 86 | } 87 | const config = { 88 | test: tests, 89 | fileSetParam: fileSetParam, 90 | verify: verify, 91 | guid: guid, 92 | parseParams: parseParams 93 | } 94 | 95 | module.exports = config 96 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/ipfs 5 | docker: 6 | - image: nearform/circleci-node-ansible:1.0.3 7 | steps: 8 | - checkout 9 | - run: 10 | name: update-npm 11 | command: 'sudo npm install -g npm@6' 12 | 13 | - restore_cache: 14 | key: dependency-runner-cache-{{ checksum "runner/package.json" }} 15 | - run: 16 | name: npm-install-runner 17 | command: cd runner && npm install 18 | - save_cache: 19 | key: dependency-runner-cache-{{ checksum "runner/package.json" }} 20 | paths: 21 | - ./runner/node_modules 22 | 23 | - restore_cache: 24 | key: dependency-benchmark-cache-{{ checksum "tests/package.json" }} 25 | - run: 26 | name: npm-install-benchmarks 27 | command: cd tests && npm install 28 | - save_cache: 29 | key: dependency-benchmark-cache-{{ checksum "tests/package.json" }} 30 | paths: 31 | - ./tests/node_modules 32 | 33 | - run: 34 | name: test-benchmarks 35 | command: cd tests && NODE_ENV=test npm test 36 | - run: 37 | name: test-runner 38 | command: cd runner && NODE_ENV=test npm test 39 | # Persist the specified paths (workspace/echo-output) into the workspace for use in downstream job. 40 | - persist_to_workspace: 41 | # Must be an absolute path, or relative path from working_directory. This is a directory on the container which is 42 | # taken to be the root directory of the workspace. 43 | root: ~/ipfs 44 | # Must be relative path from root 45 | paths: 46 | - . 47 | deploy: 48 | working_directory: ~/ipfs 49 | docker: 50 | - image: nearform/circleci-node-ansible:1.0.3 51 | steps: 52 | - attach_workspace: 53 | at: ~/ipfs 54 | - run: 55 | name: get private key 56 | command: | 57 | mkdir ~/.ssh 58 | echo $IPFS_KEY | base64 -d > ~/.ssh/id_rsa_ipfs 59 | chmod 600 ~/.ssh/id_rsa_ipfs 60 | - run: 61 | name: deploy 62 | command: | 63 | ANSIBLE_HOST_KEY_CHECKING=False \ 64 | ansible-playbook -i infrastructure/inventory/inventory.yaml \ 65 | infrastructure/playbooks/controller.yaml \ 66 | --skip-tags "prepare" 67 | - run: 68 | name: write deploy.txt to trigger benchmark host sync on first run after deploy 69 | command: | 70 | ssh -i ~/.ssh/id_rsa_ipfs -o "StrictHostKeyChecking no" ubuntu@benchmarks.ipfs.team "echo $(date) > /data/ipfs-benchmarks/tests/deploy.txt" 71 | - run: 72 | name: schedule restart 73 | command: | 74 | curl -XPOST -H "x-ipfs-benchmarks-api-key: $APIKEY" -k https://benchmarks.ipfs.team/runner/restart 75 | 76 | workflows: 77 | version: 2 78 | build_and_test: 79 | jobs: 80 | - build 81 | - deploy: 82 | requires: 83 | - build 84 | filters: 85 | branches: 86 | only: master 87 | 88 | -------------------------------------------------------------------------------- /infrastructure/playbooks/benchmarks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: minions 3 | # become: yes 4 | vars: 5 | node_version: 10.13.0 6 | tasks: 7 | - name: Update repositories cache and install required packages 8 | become: yes 9 | apt: 10 | name: 11 | - git 12 | - build-essential 13 | - g++ 14 | - psmisc 15 | - golang-go 16 | - libpangocairo-1.0-0 17 | - libx11-xcb1 18 | - libxcomposite1 19 | - libxcursor1 20 | - libxdamage1 21 | - libxi6 22 | - libxtst6 23 | - libnss3 24 | - libcups2 25 | - libxss1 26 | - libxrandr2 27 | - libgconf2-4 28 | - libasound2 29 | - libatk1.0-0 30 | - libgtk-3-0 31 | update_cache: true 32 | state: present 33 | - name: Install nvm 34 | shell: | 35 | curl https://raw.githubusercontent.com/creationix/nvm/v0.7.0/install.sh | sh 36 | args: 37 | creates: /home/{{ ansible_user_id }}/.nvm/nvm.sh 38 | - name: Install node and set version 39 | shell: | 40 | /bin/bash -c "source ~/.nvm/nvm.sh && nvm install {{ node_version }} && nvm alias default {{ node_version }}" 41 | args: 42 | creates: /home/{{ ansible_user_id }}/.nvm/versions/node/v{{ node_version }} 43 | - name: Npm config 44 | shell: /bin/bash -c "source ~/.nvm/nvm.sh && npm config set package-lock false" 45 | - name: Install clinic and autocannon 46 | shell: > 47 | /bin/bash -c "source ~/.nvm/nvm.sh && npm i clinic autocannon -g" 48 | args: 49 | creates: /home/{{ ansible_user_id }}/.nvm/versions/node/v{{ node_version }}/bin/clinic 50 | - name: Install go-ipfs 51 | become: yes 52 | shell: > 53 | wget https://dist.ipfs.io/go-ipfs/{{go_ipfs_version}}/go-ipfs_{{go_ipfs_version}}_linux-386.tar.gz && \ 54 | tar xvfz go-ipfs_{{go_ipfs_version}}_linux-386.tar.gz && \ 55 | mv go-ipfs/ipfs /usr/local/bin/ipfs_{{go_ipfs_version}} && \ 56 | rm go-ipfs_{{go_ipfs_version}}_linux-386.tar.gz && \ 57 | rm -Rf go-ipfs && \ 58 | chmod +x /usr/local/bin/ipfs_{{go_ipfs_version}} && \ 59 | rm /usr/local/bin/ipfs && \ 60 | ln -s /usr/local/bin/ipfs_{{go_ipfs_version}} /usr/local/bin/ipfs 61 | args: 62 | creates: /usr/local/bin/ipfs_{{go_ipfs_version}} 63 | warn: false 64 | - file: 65 | path: ~/ipfs 66 | state: directory 67 | mode: 0755 68 | - name: copy tests 69 | synchronize: 70 | src: ../../tests 71 | dest: ~/ipfs/ 72 | rsync_opts: 73 | - "--exclude=node_modules" 74 | - "--exclude=data" 75 | - "--exclude=fixtures/*.txt" 76 | - name: Install npm modules 77 | shell: | 78 | /bin/bash -c "source ~/.nvm/nvm.sh && cd ~/ipfs/tests && rm -Rf node_modules && npm install" 79 | - name: generate test files 80 | shell: /bin/bash -c "source ~/.nvm/nvm.sh && cd ~/ipfs/tests && npm run generateFiles" 81 | - name: build-browser 82 | shell: /bin/bash -c "source ~/.nvm/nvm.sh && cd ~/ipfs/tests && npm run build-browser" -------------------------------------------------------------------------------- /tests/lib/node-factory.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { CreateNodeJs, CreateGo, CreateHttp, CreateBrowser } = require(`./create-node`) 4 | const IPFS = process.env.REMOTE === 'true' ? require('../../js-ipfs') : require('ipfs') 5 | 6 | class NodeFactory { 7 | constructor (ipfs) { 8 | this._ipfs = typeof ipfs !== 'undefined' ? ipfs : IPFS 9 | this._nodes = [] 10 | } 11 | 12 | async add (type, options, counter) { 13 | if (type === 'go') { 14 | const node = await this.addGo(options, counter) 15 | return node 16 | } 17 | if (type === 'nodejs') { 18 | const node = await this.addNodeJs(options, counter) 19 | return node 20 | } 21 | if (type === 'http') { 22 | const node = await this.addHttp(options) 23 | return node 24 | } 25 | if (type === 'browser') { 26 | const node = await this.addBrowser(options, counter) 27 | return node 28 | } 29 | } 30 | async addGo (config, counter) { 31 | const node = await CreateGo(config, this._ipfs, counter) 32 | this._nodes.push(node) 33 | return node 34 | } 35 | async addNodeJs (options, counter) { 36 | const node = await CreateNodeJs(options, this._ipfs, counter) 37 | this._nodes.push(node) 38 | return node 39 | } 40 | async addHttp (config, counter) { 41 | const node = await CreateHttp(config, this._ipfs, counter) 42 | this._nodes.push(node) 43 | return node 44 | } 45 | async addBrowser (config, counter) { 46 | const node = await CreateBrowser(config, this._ipfs, counter) 47 | this._nodes.push(node) 48 | return node 49 | } 50 | async stop (type) { 51 | switch (type) { 52 | case 'nodejs': 53 | await this.stopNodeJs() 54 | break 55 | case 'go': 56 | await this.stopGo() 57 | break 58 | case 'http': 59 | await this.stopHttp() 60 | break 61 | case 'browser': 62 | await this.stopBrowser() 63 | break 64 | } 65 | } 66 | 67 | async stopNodeJs () { 68 | for (let node of this._nodes) { 69 | try { 70 | await node.stop() 71 | } catch (e) { 72 | console.log(`Error stopping node: ${e}`) 73 | } 74 | } 75 | this._nodes.length = null 76 | } 77 | async stopGo () { 78 | for (let node of this._nodes) { 79 | try { 80 | await node.kill('SIGTERM') 81 | } catch (e) { 82 | console.log(`Error stopping node: ${e}`) 83 | } 84 | } 85 | this._nodes.length = null 86 | } 87 | async stopHttp () { 88 | for (let node of this._nodes) { 89 | try { 90 | await node.stop() 91 | } catch (e) { 92 | console.log(`Error stopping node: ${e}`) 93 | } 94 | } 95 | this._nodes.length = null 96 | } 97 | async stopBrowser () { 98 | for (let node of this._nodes) { 99 | try { 100 | await node.browser.close() 101 | } catch (e) { 102 | console.log(`Error stopping node: ${e}`) 103 | } 104 | } 105 | this._nodes.length = null 106 | } 107 | 108 | get () { 109 | return this._nodes 110 | } 111 | } 112 | 113 | module.exports = NodeFactory 114 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | # This is an example docker-compose file for IPFS Cluster 4 | # It runs two Cluster peers (cluster0, cluster1) attached to two 5 | # IPFS daemons (ipfs0, ipfs1). 6 | # 7 | # It expects a "compose" subfolder as follows where it will store configurations 8 | # and states permanently: 9 | # 10 | # compose/ 11 | # |-- cluster0 12 | # |-- cluster1 13 | # |-- ipfs0 14 | # |-- ipfs1 15 | # 16 | # 17 | # During the first start, default configurations are created for all peers. 18 | 19 | services: 20 | 21 | ################################################################################## 22 | ## Cluster PEER 0 ################################################################ 23 | ################################################################################## 24 | 25 | ipfs0: 26 | container_name: ipfs0 27 | image: ipfs/go-ipfs:release 28 | ports: 29 | - "4001:4001" # ipfs swarm 30 | # - "5001:5001" # expose if needed/wanted 31 | # - "8080:8080" # exposes if needed/wanted 32 | volumes: 33 | - ./compose/ipfs0:/data/ipfs 34 | 35 | cluster0: 36 | container_name: cluster0 37 | image: ipfs/ipfs-cluster:latest 38 | depends_on: 39 | - ipfs0 40 | environment: 41 | CLUSTER_SECRET: ${CLUSTER_SECRET} # From shell variable 42 | IPFS_API: /dns4/ipfs0/tcp/5001 43 | ports: 44 | - "127.0.0.1:9094:9094" # API 45 | # - "9096:9096" # Cluster IPFS Proxy endpoint 46 | volumes: 47 | - ./compose/cluster0:/data/ipfs-cluster 48 | 49 | ################################################################################## 50 | ## Cluster PEER 1 ################################################################ 51 | ################################################################################## 52 | 53 | ipfs1: 54 | container_name: ipfs1 55 | image: ipfs/go-ipfs:release 56 | ports: 57 | - "4101:4001" # ipfs swarm 58 | # - "5101:5001" # expose if needed/wanted 59 | # - "8180:8080" # exposes if needed/wanted 60 | volumes: 61 | - ./compose/ipfs1:/data/ipfs 62 | 63 | # cluster1 bootstraps to cluster0 if not bootstrapped before 64 | cluster1: 65 | container_name: cluster1 66 | image: ipfs/ipfs-cluster:latest 67 | depends_on: 68 | - cluster0 69 | - ipfs1 70 | environment: 71 | CLUSTER_SECRET: ${CLUSTER_SECRET} # From shell variable 72 | IPFS_API: /dns4/ipfs1/tcp/5001 73 | ports: 74 | - "127.0.0.1:9194:9094" # API 75 | # - "9196:9096" # Cluster IPFS Proxy endpoint 76 | volumes: 77 | - ./compose/cluster1:/data/ipfs-cluster 78 | entrypoint: 79 | - "/sbin/tini" 80 | - "--" 81 | # Translation: if state folder does not exist, find cluster0 id and bootstrap 82 | # to it. 83 | command: >- 84 | sh -c ' 85 | cmd="daemon --upgrade" 86 | if [ ! -d /data/ipfs-cluster/raft ]; then 87 | while ! ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id; do 88 | sleep 1 89 | done 90 | pid=`ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id | grep -o -E "^(\w+)"` 91 | sleep 10 92 | cmd="daemon --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid" 93 | fi 94 | exec /usr/local/bin/entrypoint.sh $$cmd 95 | ' 96 | 97 | # For adding more peers, copy PEER 1 and rename things to ipfs2, cluster2. 98 | # Keep bootstrapping to cluster0. 99 | -------------------------------------------------------------------------------- /runner/test/queue.js: -------------------------------------------------------------------------------- 1 | 'use srict' 2 | 3 | const tap = require('tap') 4 | const levelup = require('levelup') 5 | const memdown = require('memdown') 6 | 7 | const stopFn = () => { 8 | console.log('test/queue.js ->', 'stopping') 9 | } 10 | 11 | const runner = (id, params, cb) => { 12 | return { 13 | id: id, 14 | params: params 15 | } 16 | } 17 | 18 | const wait = ms => new Promise((resolve, reject) => setTimeout(resolve, ms)) 19 | 20 | // the test subject 21 | const Queue = require('../queue.js') 22 | 23 | const task1 = { 24 | commit: 'abcdef', 25 | clinic: 'off', 26 | remote: true 27 | } 28 | const task2 = { 29 | commit: '12345', 30 | clinic: 'on', 31 | remote: true 32 | } 33 | const task3 = { 34 | commit: '987654', 35 | clinic: 'on', 36 | remote: true 37 | } 38 | 39 | tap.test('add to queue', async (t) => { 40 | const db = levelup(memdown()) 41 | let q = new Queue(stopFn, runner, db) 42 | let taskOne = q.add(task1) 43 | tap.equal(taskOne.commit, task1.commit, 'compare task1.commit') 44 | let taskTwo = q.add(task2) 45 | tap.equal(taskTwo.commit, task2.commit, 'compare task2.commit') 46 | t.end() 47 | }) 48 | 49 | tap.test('get queue list', async (t) => { 50 | const db = levelup(memdown()) 51 | let q = new Queue(stopFn, runner, db) 52 | let taskOne = q.add(task1) 53 | let taskTwo = q.add(task2) 54 | let status = q.getStatus() 55 | tap.equal(Object.entries(status).length, 2, 'check queue length') 56 | tap.equal(status[taskOne.id].work.commit, task1.commit, 'compare task1 on list') 57 | tap.equal(status[taskTwo.id].work.commit, task2.commit, 'compare task2 on list') 58 | t.end() 59 | }) 60 | 61 | tap.test('check started', async (t) => { 62 | const db = levelup(memdown()) 63 | const runner = async (params) => { 64 | await wait(500) 65 | } 66 | let q = new Queue(stopFn, runner, db) 67 | let taskOne = q.add(task1) 68 | let statusOne = q.getStatus() 69 | tap.equal(statusOne[taskOne.id].status, 'pending', 'task1 pending') 70 | await wait(300) 71 | let statusTwo = q.getStatus() 72 | tap.equal(statusTwo[taskOne.id].status, 'started', 'task1 started') 73 | await wait(500) 74 | let statusThree = q.getStatus() 75 | tap.equal(Object.keys(statusThree).length, 0, 'empty task list') 76 | t.end() 77 | }) 78 | 79 | tap.test('drain queue', async (t) => { 80 | const db = levelup(memdown()) 81 | const runner = async (params) => { 82 | await wait(500) 83 | } 84 | let q = new Queue(stopFn, runner, db) 85 | let taskOne = q.add(task1) 86 | let taskTwo = q.add(task2) 87 | let taskThree = q.add(task3) 88 | let status = q.getStatus() 89 | tap.equal(Object.entries(status).length, 3, 'check queue length') 90 | await wait(200) 91 | let newStatus = await q.drain() 92 | tap.equal(Object.keys(newStatus).length, 1, 'empty task list') 93 | t.end() 94 | }) 95 | 96 | tap.test('schedule restart', async (t) => { 97 | const db = levelup(memdown()) 98 | let q = new Queue(stopFn, runner, db) 99 | const restartTask = { restart: true } 100 | let taskOne = q.add(task1) 101 | let taskTwo = q.add(restartTask) 102 | let status = q.getStatus() 103 | tap.equal(Object.entries(status).length, 2, 'check queue length') 104 | tap.equal(status[taskOne.id].work.commit, task1.commit, 'compare task1 on list') 105 | tap.equal(status[taskTwo.id].work.restart, true, 'compare restart task in list') 106 | t.end() 107 | }) 108 | -------------------------------------------------------------------------------- /runner/persistence.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const Influx = require('influx') 4 | const moment = require('moment') 5 | const config = require('./config') 6 | 7 | const influx = new Influx.InfluxDB({ 8 | host: config.influxdb.host, 9 | database: config.influxdb.db, 10 | schema: config.influxdb.schema 11 | }) 12 | 13 | const parseDuration = (objDuration) => { 14 | let ms = ((objDuration.s * 1000) + objDuration.ms) 15 | return parseFloat(ms) 16 | } 17 | 18 | const writePoints = (data) => { 19 | if (!Array.isArray(data)) { 20 | data = [data] 21 | } 22 | const timeStamp = moment().toDate() 23 | let payload = [] 24 | for (let point of data) { 25 | config.log.info(`${point.name} data point: `, point) 26 | payload.push({ 27 | measurement: point.name, 28 | tags: { warmup: point.warmup || 'tbd', 29 | commit: point.meta.commit || 'tbd', 30 | project: point.meta.project || 'tbd', 31 | file_set: point.file_set || 'tbd', 32 | version: point.meta.version.version || 'tbd', 33 | repo: point.meta.version.repo || 'tbd', 34 | guid: point.meta.guid || 'tbd', 35 | sha: point.meta.sha || 'tbd', 36 | branch: point.meta.branch || 'tbd', 37 | nightly: point.meta.nightly || false, 38 | tag: point.meta.tag || 'none' 39 | }, 40 | fields: { duration: parseDuration(point.duration), ipfs_sha: point.meta.sha || 'no upload' }, 41 | timestamp: timeStamp 42 | }) 43 | payload.push({ 44 | measurement: `${point.name}${config.benchmarks.measurements.memory}`, 45 | tags: { warmup: point.warmup || 'tbd', 46 | commit: point.meta.commit || 'tbd', 47 | project: point.meta.project || 'tbd', 48 | file_set: point.file_set || 'tbd', 49 | version: point.meta.version.version || 'tbd', 50 | repo: point.meta.version.repo || 'tbd', 51 | guid: point.meta.guid || 'tbd', 52 | sha: point.meta.sha || 'tbd', 53 | branch: point.meta.branch || 'tbd', 54 | nightly: point.meta.nightly || false 55 | }, 56 | fields: { memory: point.memory, ipfs_sha: point.meta.sha || 'no upload' }, 57 | timestamp: timeStamp 58 | }) 59 | payload.push({ 60 | measurement: `${point.name}${config.benchmarks.measurements.cpu}`, 61 | tags: { warmup: point.warmup || 'tbd', 62 | commit: point.meta.commit || 'tbd', 63 | project: point.meta.project || 'tbd', 64 | file_set: point.file_set || 'tbd', 65 | version: point.meta.version.version || 'tbd', 66 | repo: point.meta.version.repo || 'tbd', 67 | guid: point.meta.guid || 'tbd', 68 | sha: point.meta.sha || 'tbd', 69 | branch: point.meta.branch || 'tbd', 70 | nightly: point.meta.nightly || false 71 | }, 72 | fields: { cpu: point.cpu, ipfs_sha: point.meta.sha || 'no upload' }, 73 | timestamp: timeStamp 74 | }) 75 | } 76 | influx.writePoints(payload) 77 | } 78 | 79 | const ensureDb = async (db) => { 80 | let names = await influx.getDatabaseNames() 81 | if (!names.includes(db)) { 82 | return influx.createDatabase(db) 83 | } else { 84 | return influx 85 | } 86 | } 87 | 88 | const store = async (result) => { 89 | try { 90 | await ensureDb('benchmarks') 91 | await writePoints(result) 92 | } catch (err) { 93 | config.log.error(err) 94 | } 95 | return true 96 | } 97 | 98 | module.exports = { 99 | store: store 100 | } 101 | -------------------------------------------------------------------------------- /tests/schema/results.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { FluentSchema } = require('fluent-schema') 4 | const os = require('os') 5 | const Ajv = require('ajv') 6 | const config = require('../config') 7 | const ajv = new Ajv({ allErrors: true, useDefaults: true, removeAdditional: true }) 8 | const { getIpfsCommit, getBranchName } = require('../util/get-commit') 9 | const schema = FluentSchema() 10 | .id('ipfs') 11 | .title('IPFS Benchmarks') 12 | .description('IPFS benchmark results') 13 | .prop( 14 | 'name', 15 | FluentSchema() 16 | .asString() 17 | .default('Benchmark Test Name')) 18 | .required() 19 | .prop( 20 | 'warmup', 21 | FluentSchema() 22 | .asBoolean() 23 | .default(true)) 24 | .required() 25 | .prop( 26 | 'description', 27 | FluentSchema() 28 | .asString() 29 | .default('Description of test')) 30 | .prop( 31 | 'file_set', 32 | FluentSchema() 33 | .asString() 34 | .default('OneKBFile') 35 | ) 36 | .prop( 37 | 'date', 38 | FluentSchema() 39 | .asString() 40 | .default(new Date().toISOString()) 41 | ) 42 | .required() 43 | .definition( 44 | 'duration', 45 | FluentSchema() 46 | .prop('s', FluentSchema() 47 | .asInteger() 48 | .default(0)) 49 | .required() 50 | .prop('ms', FluentSchema() 51 | .asNumber() 52 | .default(0)) 53 | .required() 54 | ) 55 | .prop('duration') 56 | .ref('#definitions/duration') 57 | .definition( 58 | 'meta', 59 | FluentSchema() 60 | .prop('project', FluentSchema() 61 | .asString() 62 | .default('js-ipfs')) 63 | .prop('commit') 64 | .prop('version') 65 | ) 66 | .prop('meta') 67 | .ref('#definitions/meta') 68 | 69 | // TODO: use this until we get AJV to generate all defaults 70 | const resultsDTO = { 71 | 'name': 'test name', 72 | 'warmup': true, 73 | 'description': 'Description of benchmark', 74 | 'file_set': 'OneKBFile', 75 | 'date': 'date', 76 | 'file': 'file name', 77 | 'meta': { 78 | 'project': 'js-ipfs', 79 | 'commit': 'TBD', 80 | 'version': 'version of js-ipfs' 81 | }, 82 | 'duration': { 83 | 's': 0, 84 | 'ms': 0 85 | }, 86 | 'cpu': 'cpu', 87 | 'loadAvg': 'load average', 88 | 'memory': 'memory' 89 | } 90 | async function build (props, type = 'nodejs') { 91 | const results = { ...resultsDTO, ...props } 92 | results.cpu = getCpu() 93 | results.loadAvg = os.loadavg() 94 | results.memory = os.totalmem() - os.freemem() 95 | results.date = new Date() 96 | if (type !== 'go') { 97 | results.meta.project = 'js-ipfs' 98 | results.meta.commit = await getIpfsCommit() 99 | results.meta.branch = await getBranchName() 100 | } 101 | results.meta.guid = config.guid 102 | return results 103 | } 104 | const getCpu = () => { 105 | var cpus = os.cpus() 106 | let idle = 0 107 | for (let i = 0, len = cpus.length; i < len; i++) { 108 | let cpu = cpus[i] 109 | let total = 0 110 | for (let type in cpu.times) { 111 | total += cpu.times[type] 112 | } 113 | for (let type in cpu.times) { 114 | if (type === 'idle') { 115 | idle += Math.round(100 * cpu.times[type] / total) 116 | } 117 | } 118 | } 119 | console.log(`process time:${100 - (idle / cpus.length)} % of CPU(s)`) 120 | return 100 - (idle / cpus.length) 121 | } 122 | 123 | function validate (data) { 124 | const valid = ajv.validate(schema.valueOf(), data) 125 | return valid 126 | } 127 | 128 | module.exports = { schema, resultsDTO, build, validate } 129 | -------------------------------------------------------------------------------- /runner/README.md: -------------------------------------------------------------------------------- 1 | # Runner 2 | 3 | The [runner](../runner/) is the component that kicks off the tests and stores the results in a time series database for visualization. The runner runs as a daemon but can also be run ad-hoc. See below for the differences. 4 | 5 | ## Initial Setup Grafana InfluxDB 6 | 7 | ```bash 8 | > ./scripts/runLocalEnv.sh up 9 | ``` 10 | 11 | Open http://localhost:3000/ in a browser. The default username/password combination is `admin/admin`. You will be asked to change that password after initial login. Setup the datasource with type `influxDB`and use `http://influxdb:8086` as the URL. Next import the dashboard from `infrastructure/grafana/dashboard.json` by hovering over the `+` icon on the left of your screen. 12 | 13 | ![Grafana import dashboard](import-hover.png) 14 | 15 | * All of the Grafana configuration is stored in a folder adjacent to the this project's folder named `/data/grafana`. 16 | * The data for influxDB is stored in a folder adjacent to this project's folder named `/data/influxdb`. 17 | 18 | ## Run dashboard and the `runner` locally and send results to InfluxDB 19 | 20 | If you're not running it yet: 21 | 22 | ```bash 23 | > ./scripts/runLocalEnv.sh up 24 | ``` 25 | 26 | Keep docker running and in another tab you can start the runner in one of two ways: 27 | 28 | ### Start as a daemon 29 | 30 | ```bash 31 | > LOG_PRETTY=true node runner/index.js 32 | {"level":30,"time":1543488515841,"msg":"Server listening at http://127.0.0.1:9000","pid":47039,"hostname":"mbpro.local","v":1} 33 | {"level":30,"time":1543488515843,"msg":"server listening on 9000","pid":47039,"hostname":"mbpro.local","v":1} 34 | ``` 35 | 36 | A run can now be triggered by sending a POST to the daemon: 37 | 38 | ```bash 39 | > curl -XPOST -d '{"commit":"adfy3hk"}' \ 40 | -H "Content-Type: application/json" \ 41 | -H "x-ipfs-benchmarks-api-key: supersecret" \ 42 | localhost:9000 43 | ``` 44 | 45 | ### Add-hoc run 46 | 47 | ```bash 48 | > LOG_PRETTY=true node runner/cli.js 49 | INFO [1543488677803] (47515 on mbpro.local): Running [OUT_FOLDER=/tmp/out REMOTE=true node /Users/elexy/projects/nearform/ipfs/benchmarks/tests/local-transfer.js] locally 50 | INFO [1543488688568] (47515 on mbpro.local): Creating a node.. 51 | Swarm listening on /ip4/127.0.0.1/tcp/4014/ws/ipfs/QmYUDZWVCuahJfiVonM8GfftQ8rCDgMSvjM9zxpGANHEq2 52 | Swarm listening on /ip4/127.0.0.1/tcp/4012/ipfs/QmYUDZWVCuahJfiVonM8GfftQ8rCDgMSvjM9zxpGANHEq2 53 | Swarm listening on /ip4/127.94.0.1/tcp/4012/ipfs/QmYUDZWVCuahJfiVonM8GfftQ8rCDgMSvjM9zxpGANHEq2 54 | ... 55 | 3: { 56 | "measurement": "unixFS-extract", 57 | "tags": { 58 | "subTest": "populated-repo", 59 | "commit": "TBD", 60 | "project": "js-ipfs", 61 | "testClass": "largefile" 62 | }, 63 | "fields": { 64 | "duration": 19.077 65 | }, 66 | "timestamp": "2018-11-29T10:51:41.010Z" 67 | } 68 | ``` 69 | 70 | 71 | To view the Grafana dashboard: http://localhost:3000/ 72 | 73 | Use the default account admin/admin to login 74 | 75 | ## Runner configuration 76 | 77 | The runner can be configured with environment variables 78 | 79 | | name | default | function | 80 | |---|---|---| 81 | | NODE_ENV | `null` | Disable logging if `test` | 82 | | REMOTE_FOLDER | `~/ipfs/tests/` | The folder where tests are checked out on the benchmark host | 83 | | STAGE | `local` | `local` will skip provisioning a remote host, `remote` runs provisioning | 84 | | OUT_FOLDER | `/tmp/out` | Folder where tests write their results in a file named after the test | 85 | | INFLUX_HOST | `localhost` | Host name for InfluxDB to store metrics | 86 | | BENCHMARK_USER | `elexy` | Username for logging into the benchmark host | 87 | | BENCHMARK_KEY | `~/.ssh/id_rsa` | Path to the ssh identity to use for accessing the benchmark host | 88 | | API_KEY | `supersecret` | The api key for the http trigger to validate against | 89 | -------------------------------------------------------------------------------- /tests/config/default-config-go.json: -------------------------------------------------------------------------------- 1 | { 2 | "Datastore": { 3 | "StorageMax": "10GB", 4 | "StorageGCWatermark": 90, 5 | "GCPeriod": "1h", 6 | "Spec": { 7 | "mounts": [ 8 | { 9 | "child": { 10 | "path": "blocks", 11 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 12 | "sync": true, 13 | "type": "flatfs" 14 | }, 15 | "mountpoint": "/blocks", 16 | "prefix": "flatfs.datastore", 17 | "type": "measure" 18 | }, 19 | { 20 | "child": { 21 | "compression": "none", 22 | "path": "datastore", 23 | "type": "levelds" 24 | }, 25 | "mountpoint": "/", 26 | "prefix": "leveldb.datastore", 27 | "type": "measure" 28 | } 29 | ], 30 | "type": "mount" 31 | }, 32 | "HashOnRead": false, 33 | "BloomFilterSize": 0 34 | }, 35 | "Mounts": { 36 | "IPFS": "/ipfs", 37 | "IPNS": "/ipns", 38 | "FuseAllowOther": false 39 | }, 40 | "Discovery": { 41 | "MDNS": { 42 | "Enabled": true, 43 | "Interval": 10 44 | } 45 | }, 46 | "Routing": { 47 | "Type": "dht" 48 | }, 49 | "Ipns": { 50 | "RepublishPeriod": "", 51 | "RecordLifetime": "", 52 | "ResolveCacheSize": 128 53 | }, 54 | "Bootstrap": [ 55 | "/dnsaddr/bootstrap.libp2p.io/ipfs/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", 56 | "/dnsaddr/bootstrap.libp2p.io/ipfs/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", 57 | "/dnsaddr/bootstrap.libp2p.io/ipfs/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", 58 | "/dnsaddr/bootstrap.libp2p.io/ipfs/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", 59 | "/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", 60 | "/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM", 61 | "/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu", 62 | "/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64", 63 | "/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd", 64 | "/ip6/2604:a880:1:20::203:d001/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM", 65 | "/ip6/2400:6180:0:d0::151:6001/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu", 66 | "/ip6/2604:a880:800:10::4a:5001/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64", 67 | "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd" 68 | ], 69 | "Gateway": { 70 | "HTTPHeaders": { 71 | "Access-Control-Allow-Headers": [ 72 | "X-Requested-With", 73 | "Range" 74 | ], 75 | "Access-Control-Allow-Methods": [ 76 | "GET" 77 | ], 78 | "Access-Control-Allow-Origin": [ 79 | "*" 80 | ] 81 | }, 82 | "RootRedirect": "", 83 | "Writable": false, 84 | "PathPrefixes": [], 85 | "APICommands": [] 86 | }, 87 | "API": { 88 | "HTTPHeaders": {} 89 | }, 90 | "Swarm": { 91 | "AddrFilters": null, 92 | "DisableBandwidthMetrics": false, 93 | "DisableNatPortMap": false, 94 | "DisableRelay": false, 95 | "EnableRelayHop": false, 96 | "ConnMgr": { 97 | "Type": "basic", 98 | "LowWater": 600, 99 | "HighWater": 900, 100 | "GracePeriod": "20s" 101 | } 102 | }, 103 | "Pubsub": { 104 | "Router": "", 105 | "DisableSigning": false, 106 | "StrictSignatureVerification": false 107 | }, 108 | "Reprovider": { 109 | "Interval": "12h", 110 | "Strategy": "all" 111 | }, 112 | "Experimental": { 113 | "FilestoreEnabled": false, 114 | "UrlstoreEnabled": false, 115 | "ShardingEnabled": false, 116 | "Libp2pStreamMounting": true, 117 | "P2pHttpProxy": false, 118 | "QUIC": false 119 | } 120 | } -------------------------------------------------------------------------------- /tests/lib/fixtures.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const path = require('path') 4 | const crypto = require('crypto') 5 | const util = require('util') 6 | const fs = require('fs') 7 | const fsWriteFile = util.promisify(fs.writeFile) 8 | const fsMakeDir = util.promisify(fs.mkdir) 9 | const fsExists = util.promisify(fs.access) 10 | const fsStat = util.promisify(fs.lstat) 11 | const fsReadDir = util.promisify(fs.readdir) 12 | const KB = 1024 13 | const MB = KB * 1024 14 | const GB = MB * 1024 15 | const files = [ 16 | { size: KB, name: 'onekbfile' }, 17 | { size: KB, name: 'hundred1kbfile', count: 100 }, 18 | { size: 62 * KB, name: 'one62kbfile' }, 19 | { size: 64 * KB, name: 'one64kbfile' }, 20 | { size: 512 * KB, name: 'one512kbfile' }, 21 | { size: 768 * KB, name: 'one768kbfile' }, 22 | { size: 1023 * KB, name: 'one1023kbfile' }, 23 | { size: MB, name: 'onembfile' }, 24 | { size: 4 * MB, name: 'one4mbfile' }, 25 | { size: 8 * MB, name: 'one8mbfile' }, 26 | { size: 64 * MB, name: 'one64mbfile' }, 27 | { size: 128 * MB, name: 'one128mbfile' }, 28 | { size: 512 * MB, name: 'one512mbfile' }, 29 | { size: GB, name: 'onegbfile' } 30 | 31 | ] 32 | async function generateFiles () { 33 | const testPath = path.join(__dirname, `../fixtures/`) 34 | for (let file of files) { 35 | if (file.count) { 36 | try { 37 | await fsExists(`${testPath}${file.name}`) 38 | } catch (err) { 39 | await fsMakeDir(`${testPath}${file.name}`) 40 | } 41 | for (let i = 0; i < file.count; i++) { 42 | write(crypto.randomBytes(file.size), `${file.name}/${file.name}-${i}`) 43 | } 44 | } else { 45 | write(crypto.randomBytes(file.size), file.name) 46 | } 47 | } 48 | } 49 | 50 | async function write (data, name, folder) { 51 | await fsWriteFile(path.join(__dirname, `../fixtures/${name}.txt`), data) 52 | console.log(`File ${name} created.`) 53 | } 54 | 55 | async function file (name) { 56 | const isDir = await isDirectory(name.toLowerCase()) 57 | if (!isDir) { 58 | const file = files.find((file) => { 59 | return file.name === name.toLowerCase() 60 | }) 61 | if (typeof file !== 'undefined' && file) { 62 | return path.join(__dirname, `../fixtures/${file.name}.txt`) 63 | } else { 64 | if (name.includes(`/`)) { 65 | return path.join(__dirname, `../fixtures/${name.toLowerCase()}`) 66 | } else { 67 | return file 68 | } 69 | } 70 | } else { 71 | const arr = await fsReadDir(path.join(__dirname, `../fixtures/${name.toLowerCase()}`)) 72 | const fullPath = arr.map((fileName) => { 73 | return path.join(__dirname, `../fixtures/${name.toLowerCase()}/${fileName.toLowerCase()}`) 74 | }) 75 | return fullPath 76 | } 77 | } 78 | 79 | async function isDirectory (name) { 80 | try { 81 | const dir = path.join(__dirname, `../fixtures/${name.toLowerCase()}`) 82 | const stats = await fsStat(dir) 83 | return stats.isDirectory() 84 | } catch (e) { 85 | return false 86 | } 87 | } 88 | 89 | async function verifyTestFiles () { 90 | const fixtures = path.join(__dirname, `../fixtures`) 91 | try { 92 | await fsExists(fixtures) 93 | } catch (e) { 94 | await fsMakeDir(fixtures) 95 | } 96 | for (let f of files) { 97 | if (f.count) { 98 | console.log(`Verifying Directory ${f.name}`) 99 | const dir = await isDirectory(f.name) 100 | if (dir) { 101 | const fileArr = file(f.name) 102 | if (fileArr.length < f.count) { 103 | console.log(`Missing files in directory ${f.name}`) 104 | return false 105 | } 106 | } else { 107 | console.log(`Missing directory ${f.name}`) 108 | return false 109 | } 110 | } else { 111 | const filePath = await file(f.name) 112 | try { 113 | console.log(`Verifying File ${f.name}`) 114 | await fsExists(filePath) 115 | } catch (err) { 116 | console.log(`Missing ${f.name}`) 117 | return false 118 | } 119 | } 120 | } 121 | return true 122 | } 123 | 124 | module.exports = { generateFiles, file, isDirectory, verifyTestFiles } 125 | -------------------------------------------------------------------------------- /runner/queue.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const levelup = require('levelup') 4 | const leveldown = require('leveldown') 5 | const Jobs = require('level-jobs') 6 | const config = require('./config') 7 | 8 | const getStatus = (queueStatus, params) => { 9 | let retVal = {} 10 | if (params.id) { 11 | if (queueStatus[params.id]) { 12 | retVal = Object.assign({}, queueStatus[params.id]) 13 | } else { 14 | if (params.work) { 15 | retVal = { 16 | jobId: params.id, 17 | work: params.work 18 | } 19 | } 20 | } 21 | if (queueStatus[params.id]) { 22 | switch (params.status) { 23 | case 'pending': 24 | if (queueStatus[params.id].status !== 'pending') { 25 | retVal.status = 'pending' 26 | retVal.queued = new Date().toString() 27 | } 28 | break 29 | case 'started': 30 | if (queueStatus[params.id].status !== 'started') { 31 | retVal.status = 'started' 32 | retVal.started = new Date().toString() 33 | } 34 | break 35 | case 'error': 36 | if (queueStatus[params.id].status !== 'error') { 37 | retVal.status = 'error' 38 | retVal.queued = new Date().toString() 39 | } 40 | break 41 | } 42 | } else { 43 | retVal.status = 'pending' 44 | retVal.queued = new Date().toString() 45 | } 46 | } 47 | return retVal 48 | } 49 | 50 | class q { 51 | constructor (stopFn, runner, database) { 52 | let that = this 53 | this.stopFn = stopFn 54 | this.queueStatus = {} 55 | let dbRef = database 56 | if (!database) { 57 | dbRef = levelup(leveldown(`${config.dataDir}/${config.db}`)) 58 | } 59 | this.q = Jobs(dbRef, this._handler(runner), 1) 60 | this.q.pendingStream().on('data', function (d) { 61 | config.log.info(that.queueStatus) 62 | that.queueStatus[d.key] = getStatus(that.queueStatus, { 63 | id: d.key, 64 | status: 'pending', 65 | work: d.value 66 | }) 67 | config.log.info('Next job id: %s, work: %j', d.key, d.value) 68 | }) 69 | this.q.runningStream().on('data', function (d) { 70 | that.queueStatus[d.key] = getStatus(that.queueStatus, { 71 | id: d.key, 72 | status: 'pending', 73 | work: d.value 74 | }) 75 | config.log.info('Pending job id: %s, work: %j', d.key, d.value) 76 | }) 77 | } 78 | 79 | _handler (run) { 80 | let that = this 81 | return async (id, params, cb) => { 82 | config.log.info('Started job id: %s, work: %j', id, params) 83 | that.queueStatus[id] = getStatus(that.queueStatus, { id: id, status: 'started' }) 84 | try { 85 | if (params.restart) { 86 | this.stopFn(cb) 87 | } else { 88 | await run(params) 89 | config.log.info('Finished job id: %s, work: %j', id, params) 90 | if (that.queueStatus[id]) { 91 | delete that.queueStatus[id] 92 | } 93 | cb() 94 | } 95 | } catch (e) { 96 | config.log.error(e) 97 | if (that.queueStatus[id]) { 98 | that.queueStatus[id] = getStatus(that.queueStatus, { id: id, status: 'error' }) 99 | } 100 | cb(e) 101 | } 102 | } 103 | } 104 | 105 | add (params) { 106 | let task = Object.assign({}, params) 107 | let jobId = this.q.push(params, function (err) { 108 | if (err) config.log.error('Error pushing work into the queue', err.stack) 109 | }) 110 | this.queueStatus[jobId] = getStatus(this.queueStatus, { 111 | status: 'pending', 112 | id: `${jobId}`, 113 | work: params 114 | }) 115 | task.id = jobId 116 | config.log.info(`Added job with [${JSON.stringify(task)}] to the queue`) 117 | return task 118 | } 119 | 120 | async drain () { 121 | return new Promise((resolve, reject) => { 122 | if (Object.values(this.queueStatus)) { 123 | let taskIds = [] 124 | for (let task of Object.values(this.queueStatus)) { 125 | if (task.status !== 'started') { 126 | taskIds.push(task.jobId) 127 | delete this.queueStatus[task.jobId] 128 | } 129 | } 130 | this.q.delBatch(taskIds, (err) => { 131 | if (err) { 132 | config.log.error('Error deleting jobs', err.stack) 133 | reject(Error('Error deleting jobs')) 134 | } else { 135 | resolve(this.queueStatus) 136 | } 137 | }) 138 | } 139 | }) 140 | } 141 | 142 | getStatus (q) { 143 | return this.queueStatus 144 | } 145 | } 146 | 147 | module.exports = q 148 | -------------------------------------------------------------------------------- /runner/index.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | require('make-promises-safe') // installs an 'unhandledRejection' handler 3 | const schedule = require('node-schedule') 4 | const fastify = require('fastify')({ 5 | logger: true 6 | }) 7 | const headerSchema = require('./lib/schema/header') 8 | const addSchema = require('./lib/schema/add') 9 | const getSchema = require('./lib/schema/get') 10 | const benchmarkSchema = require('./lib/schema/benchmark') 11 | const restartSchema = require('./lib/schema/restart') 12 | const config = require('./config') 13 | const runner = require('./runner') 14 | const Queue = require('./queue') 15 | const docs = { 16 | benchmarks: config.server.api.benchmarks, 17 | clinic: config.server.api.clinic 18 | } 19 | docs.benchmarks.txt = `Benchmarks run with their own set of files mandated by the type of test.` 20 | docs.clinic.txt = `For clinic runs you can request to run wit a specific fileset.` 21 | 22 | // This function exits the main process, relying on process manager to restart 23 | // so that a new version of the runner can be applied on next startup 24 | const stopFn = (cb) => { 25 | config.log.info('Exiting for restart.') 26 | cb() 27 | fastify.close(() => { 28 | process.exit(0) 29 | }) 30 | } 31 | 32 | const queue = new Queue(stopFn, runner) 33 | 34 | if (config.server.schedule) { 35 | // run this every day at midnight, at least 36 | const cron = '0 0 * * *' 37 | config.log.info(`installing scheduled run with this schema: [${cron}]`) 38 | schedule.scheduleJob(cron, function () { 39 | queue.add({ 40 | commit: '', 41 | clinic: { 42 | enabled: true 43 | }, 44 | remote: true, 45 | nightly: true 46 | }) 47 | }) 48 | } else { 49 | config.log.info(`NOT installing scheduled run`) 50 | } 51 | 52 | fastify.register(require('fastify-swagger'), { 53 | routePrefix: '/docs', 54 | swagger: { 55 | info: { 56 | title: 'IPFS Runner API', 57 | description: 'Running benchmkarks for IPFS projects. For more documentation see https://github.com/ipfs/benchmarks', 58 | version: '1.0.0' 59 | }, 60 | host: `${config.server.hostname}:${config.server.port}${config.server.hostname === 'localhost' ? '' : '/runner'}`, 61 | schemes: ['http'], 62 | consumes: ['application/json'], 63 | produces: ['application/json'] 64 | }, 65 | exposeRoute: true 66 | }) 67 | 68 | fastify.addSchema(headerSchema.headers) 69 | fastify.addSchema(addSchema.addBody) 70 | fastify.addSchema(addSchema.addResponse) 71 | 72 | // add a new task to the queue 73 | fastify.route({ 74 | method: 'POST', 75 | url: '/', 76 | schema: { 77 | description: 'Add a job run to the queue.', 78 | body: 'addBody#', 79 | headers: 'protect#', 80 | response: 'addResponse#' 81 | }, 82 | handler: async (request, reply) => { 83 | let task = queue.add({ 84 | commit: request.body.commit, 85 | clinic: request.body.clinic, 86 | benchmarks: request.body.benchmarks, 87 | remote: true, 88 | nightly: request.body.nightly, 89 | tag: request.body.tag 90 | }) 91 | return task 92 | } 93 | }) 94 | 95 | fastify.addSchema(getSchema.getResponse) 96 | 97 | // list jobs in the queue 98 | fastify.route({ 99 | method: 'GET', 100 | url: '/', 101 | schema: { 102 | description: 'List all jobs in the Queue', 103 | response: 'getResponse#' 104 | }, 105 | handler: async (request, reply) => { 106 | let status = Object.values(queue.getStatus()) 107 | fastify.log.info('getting queue status', status) 108 | return status 109 | } 110 | }) 111 | 112 | fastify.addSchema(benchmarkSchema.benchmarkResponse) 113 | 114 | // list available benchmarks 115 | fastify.route({ 116 | method: 'GET', 117 | url: '/benchmarks', 118 | schema: { 119 | description: 'List all available benchmarks', 120 | response: 'benchmarkResponse#' 121 | }, 122 | handler: async (request, reply) => { 123 | return config.benchmarks.tests 124 | } 125 | }) 126 | 127 | // we do want to be able to drain the queue 128 | fastify.route({ 129 | method: 'POST', 130 | url: '/drain', 131 | schema: { 132 | description: 'Drain all non active jobs from the queue and return the queue status', 133 | headers: 'protect#', 134 | response: 'getResponse#' 135 | }, 136 | handler: async (request, reply) => { 137 | return queue.drain() 138 | } 139 | }) 140 | 141 | fastify.addSchema(restartSchema.restartResponse) 142 | 143 | // after CD deployed new code we queue a restart of the runner 144 | fastify.route({ 145 | method: 'POST', 146 | url: '/restart', 147 | schema: { 148 | description: 'Schedule a restart in the queu and return the scheduled job', 149 | headers: 'protect#', 150 | response: 'restartResponse#' 151 | }, 152 | handler: async (request, reply) => { 153 | let task = queue.add({ 154 | restart: true 155 | }) 156 | return task 157 | } 158 | }) 159 | 160 | // Run the server! 161 | const start = async () => { 162 | try { 163 | await fastify.listen(config.server.port, '0.0.0.0') 164 | fastify.log.info(`server listening on ${fastify.server.address().port}`) 165 | } catch (err) { 166 | fastify.log.error(err) 167 | process.exit(1) 168 | } 169 | } 170 | 171 | start() 172 | -------------------------------------------------------------------------------- /tests/lib/create-node.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { spawn } = require('child_process') 4 | const path = require('path') 5 | const rimraf = require('rimraf') 6 | const defaultConfig = require('../config/default-config.json') 7 | const defaultConfigGo = require('../config/default-config-go.json') 8 | const goConfigs = require('../config/go-configs.json') 9 | const fs = require('fs') 10 | const util = require('util') 11 | const fsMakeDir = util.promisify(fs.mkdir) 12 | const fsWriteFile = util.promisify(fs.writeFile) 13 | const rimrafPromise = util.promisify(rimraf) 14 | const privateKey = require('../config/private-key.json') 15 | const os = require('os') 16 | const conf = { tmpPath: os.tmpdir() } 17 | const { repoPath } = require('../package.json').config 18 | const ipfsClient = require('ipfs-http-client') 19 | const IPFSFactory = require('ipfsd-ctl') 20 | const uuidv1 = require('uuid/v1') 21 | const { once } = require('stream-iterators-utils') 22 | const puppeteer = require('puppeteer') 23 | const WS = require('libp2p-websockets') 24 | const MPLEX = require('libp2p-mplex') 25 | const TCP = require('libp2p-tcp') 26 | const SPDY = require('libp2p-spdy') 27 | const SECIO = require('libp2p-secio') 28 | const argv = require('minimist')(process.argv.slice(2)) 29 | 30 | const initRepo = async (path) => { 31 | let init = spawn('ipfs', ['init'], { env: Object.assign(process.env, { IPFS_PATH: path }) }) 32 | init.stdout.on('data', (data) => { 33 | console.log(`stdout: ${data}`) 34 | }) 35 | init.stderr.on('data', (errorData) => { 36 | console.error(`stderr: ${errorData}`) 37 | }) 38 | init.on('close', (code, signal) => { 39 | console.log('Repo initialized') 40 | }) 41 | await once(init, 'close') 42 | return init 43 | } 44 | const parseParams = (options) => { 45 | if (argv.t === 'ws') { 46 | options.libp2p.modules.transport.push(WS) 47 | } else { 48 | options.libp2p.modules.transport.push(TCP) 49 | } 50 | if (argv.m === 'spdy') { 51 | options.libp2p.modules.streamMuxer.push(SPDY) 52 | } else { 53 | options.libp2p.modules.streamMuxer.push(MPLEX) 54 | } 55 | if (argv.e === 'secio') { 56 | options.libp2p.modules.streamMuxer.push(SECIO) 57 | } 58 | } 59 | const CreateNodeJs = async (opt, IPFS, count) => { 60 | const config = defaultConfig[count].config 61 | const libp2p = defaultConfig[count].libp2p 62 | const options = { 63 | init: { privateKey: privateKey[count].privKey }, 64 | repo: `${repoPath}${String(uuidv1())}`, 65 | config, 66 | libp2p 67 | 68 | } 69 | const newOptions = { ...options, ...opt } 70 | parseParams(newOptions) 71 | const node = new IPFS(newOptions) 72 | node.on('ready', () => { 73 | console.log('Node ready') 74 | }) 75 | node.on('error', (err) => { 76 | console.error(err) 77 | }) 78 | node.on('stop', () => { 79 | console.log('Node stopped') 80 | }) 81 | await once(node, 'ready') 82 | return node 83 | } 84 | 85 | const CreateBrowser = async (opt, IPFS, count) => { 86 | const testPath = path.join(__dirname, `../browser/build/index.html`) 87 | const browser = await puppeteer.launch() 88 | const page = await browser.newPage() 89 | await page.goto(`file://${testPath}`) 90 | return { page: page, browser: browser, version: () => { return '1' } } 91 | } 92 | 93 | const CreateHttp = async (opt, IPFS, count) => { 94 | let client 95 | const factory = IPFSFactory.create() 96 | const spawn = util.promisify(factory.spawn).bind(factory) 97 | const _ipfsd = await spawn({ initOptions: { bits: 1024 } }) 98 | client = ipfsClient(_ipfsd.apiAddr) 99 | return client 100 | } 101 | 102 | const CreateGo = async (opt, IPFS, count = 0) => { 103 | const peerDir = `${conf.tmpPath}/ipfs${count}` 104 | const peerSpecificConf = goConfigs[count] 105 | const peerConf = Object.assign({}, defaultConfigGo, peerSpecificConf) 106 | await rimrafPromise(peerDir) 107 | await fsMakeDir(peerDir, { recursive: true }) 108 | await initRepo(peerDir) 109 | await fsWriteFile(`${peerDir}/config`, JSON.stringify(peerConf)) 110 | let peer = spawn('ipfs', ['daemon'], { env: Object.assign(process.env, { IPFS_PATH: peerDir }) }) 111 | peer.version = function () { return '1' } 112 | peer.addresses = '' 113 | peer.stdout.on('data', (data) => { 114 | let version = {} 115 | const addresses = [] 116 | if (data.includes('Swarm announcing')) { 117 | addresses.push(data.toString('utf8').split('Swarm announcing')[1]) 118 | peer.addresses = addresses 119 | } 120 | if (data.includes('go-ipfs version:')) { 121 | const stdArray = data.toString('utf8').split('\n') 122 | for (let item of stdArray) { 123 | if (item.includes('go-ipfs version:')) { 124 | version.version = item.split(':')[1].trim() 125 | } 126 | if (item.includes('Repo version:')) { 127 | version.repo = item.split(':')[1].trim() 128 | } 129 | } 130 | peer.version = function () { 131 | return version 132 | } 133 | } 134 | if (data.includes('Daemon is ready')) { 135 | peer.emit('done') 136 | } 137 | }) 138 | peer.stderr.on('data', (data) => { 139 | console.error(`${data}`) 140 | }) 141 | peer.on('close', (code, signal) => { 142 | console.error(`Daemon exited with code: ${code}`) 143 | }) 144 | peer.on('done', () => { 145 | console.log('Daemon is ready') 146 | }) 147 | await once(peer, 'done') 148 | 149 | return peer 150 | } 151 | 152 | module.exports = { 153 | CreateNodeJs, 154 | CreateGo, 155 | CreateBrowser, 156 | CreateHttp 157 | } 158 | -------------------------------------------------------------------------------- /tests/config/private-key.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "id": "QmSxYJBid9EUdV61T9dUELAsc2FncyQWaSEvm4ztcURiur", 3 | "privKey": "CAAS4AQwggJcAgEAAoGBAJGwCGgBEgVxzx6URFoTCm7UJHwDG+LFzolu56k/njk1E96u7zRjGZ+ihvSSfh5Na0IewXtO+X4lpVT91OQrZtX/if1vu8Vb2MZ2eWQ2dOkMxjSGt6E6Y2U/MCIH6PEJH4XofQ8MTzt3VKmZtNLfjc461KjXWEZb9bjHEYi9GfvZAgMBAAECgYEAjcMH+wQHoCqlSvElLazXew6MzetMiDbIiazUWTlhYfNG+Wmps4U22sIQpg2iESRuWTGKPc2UMm65WWGBdeDRt7FJWoA+tyoI0y57Jl4ohQyi1158S6C5sHaTWO7gX3K7zox5xoQ+ow5hnTsxXVgHJfmZttNihUJcQWsuzhkjGIECQQDcXERw5Ehvx46YZXspplh6Y5IiY2TiNvxoHP/3H7IGuBDPJkur4ROK0j5qOZnLoALyQZnCDMJJ2gRW7l/WEpDxAkEAqUAJeiBCO68mhSuajlGZy9PHd56RudFNg2dz5shYeeDsQ6UenXS/D4IMAntgmw/8AcAR/TtDXyhpQ2eF9KUZaQJACe62vwfrI+6wxLm+RXBUCKA4VAh64Z9s3RyBhRgOpDLjvxKQ0pyAjv9PBOa3we/ichz220JL95w2Gd0AwNtxYQJAGRjLzvY1nBAO0DR1CKGFArp2m4BB76HfspqGjzQqGniF0EoNzh9frXcFPOD6pEOshL3sbPZ6uQOPCFWzgWFx8QJAM19xWfr9RZSBqXMUKuk8YhK3ma/CQ1olqD4nHjwnKtvzTbLW/+4MMlRAyCPKvTicRPIB8NdsVrEtRl9JFr6WxA==", 4 | "pubKey": "CAASogEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJGwCGgBEgVxzx6URFoTCm7UJHwDG+LFzolu56k/njk1E96u7zRjGZ+ihvSSfh5Na0IewXtO+X4lpVT91OQrZtX/if1vu8Vb2MZ2eWQ2dOkMxjSGt6E6Y2U/MCIH6PEJH4XofQ8MTzt3VKmZtNLfjc461KjXWEZb9bjHEYi9GfvZAgMBAAE=" 5 | }, 6 | { 7 | "id": "QmNc18FMrKks42Nd2PVwtBHdSvmfkhcvANDBe9byVuXd3f", 8 | "privKey": "CAAS4QQwggJdAgEAAoGBAM7QJKkQQjN9y9qAvNQ4M1tTekRt1lLgTyGklo+9jlSog3sJnfKGWzYkz8mWQAHBTOc9xrH27nxTS/aY0ZCWpdxwsQ36L+gJSgla9qrtstZIURuIUUU8QuVaDhcXtGhkls5Br9L3gFneHJDvmzWWoehTIB2U3b2X69cczkeIzzAlAgMBAAECgYEAyMrDXTWA4NdY7ZsiOnUaP6frxLZTaQGJMqPG3mXu43j8tp9er5IMyqOw7gqnuEyBqt2KpUyWvTRuQ0yZ4uSwwHgDWG4OoBBt3KQ843AAqjxhPkO8/7arExyuJVgkvWiD3y0XaDWKfxMPjlGPAkJu98aobQbXMF2eq28TPaWDpEECQQDoMkl+Vcz0CGhrauZgzuQJMNMaED5ZLgmt0iZNCBSfvVDDwJJ4SdDCwaf2EQAWkfVq046zXfBVKd8dJusdxITVAkEA5AOzSVfMRRslMarJ9Uh892/F7R3J8GZHahHO6gXFgdspP2r3yJuQEKFjm/TBXoj4NSfg9q9nqRtazOelkz/mEQJBAIVOAYWHStA7V3978uFXRfHE1x1agAEA/kuNn7GP1w9SR+DqWOy4LCLzvTL21Z8fks8OwBlX8Bdkn5QD+O2xGjUCQBDDPOD9dWT3M+rM1YAUzisaqoBeGkR0IHfpyz5lIjoDtKqW4JR860BtvPbwLW8XvreZsO4xwvspqak6tm922SECQHdTKqH+JwmXnMgs6ZDszBzkr8Hu17zbMTFHZktqrjKAGo/+hI5XNJ42ZJkeFBN3KvZd+wkSKjiUufKMUcX5ZsM=", 9 | "pubKey": "CAASogEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM7QJKkQQjN9y9qAvNQ4M1tTekRt1lLgTyGklo+9jlSog3sJnfKGWzYkz8mWQAHBTOc9xrH27nxTS/aY0ZCWpdxwsQ36L+gJSgla9qrtstZIURuIUUU8QuVaDhcXtGhkls5Br9L3gFneHJDvmzWWoehTIB2U3b2X69cczkeIzzAlAgMBAAE=" 10 | }, 11 | { 12 | "id": "QmbZRmAfarFJYDUwa5WSQ1FazCQ72iA7Hp3qE2ZPhRF7is", 13 | "privKey": "CAAS4QQwggJdAgEAAoGBAK3GnPKwMAUA0iKRgi0CP2e7J5ocISAcp0lvLXDD/eHU+8r1nx3SAt/hjyKG+TAa7/xzzAXJ76bko5tiNIlFOiVYQ0DH5EKPSfiEsPYeXl/sMelE+u4eV9equ/fRpGCQB/Zt4Al2CZ6nqz2e00hp7UOUaKQ1niUv1UkQaucwNQhpAgMBAAECgYEAk1Asmpt7ibJBcSaTCR6/q2r/zmgMONGxitIh3ld3sOtBrLN9+OSgpTPsiCONuRcM3KE5kqq2a2+ltkKRMNEr0TdXCmO2DCSsniZur6QUU5SDqKvJom18iHAh3DnelNTcplJSJps9HvAdkgRG25OTd62Sx2uoLJbcBOksb7za59ECQQDpEAc05CgBBWCmeKFWFiMZ7f7JeGmvTU0jq7DDu9UYedWaQdHxUGlysYLuSMBfahO2FOpCxolJyoWS05sMA2rNAkEAvuDdacRfWRpzS4nEEq0yo3+ynntz047Cjmz0RmbhY1tMQF3O6cVbpj57mpSfk3xwm+cA4olr3H3g9jIxFSMMDQJBAOMYZh1IkB5mkgXEF1kd2vgiKplABqL1TMwMOQKQnRTrapKTvjFIbGeAxpbPNvwCDdDnUljcwduwRjWKkVLEZaUCQCoSTBsc4ls0Xkc/BU2MiUXKRqdx1HTD2ZQk5a6CzJjmrabeRlX2Xx4EVaAbtBSA8B0czHAiE/kUcboyE+lxwR0CQH+cHs/nUSpS00TYxeY3RPEHNzgt1gv6oBNkMP6R4k1aXHMMJfJn0lt9cGmiNK5aXZ7/5IA8Fjz9aoovUHPJ2qY=", 14 | "pubKey": "CAASogEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK3GnPKwMAUA0iKRgi0CP2e7J5ocISAcp0lvLXDD/eHU+8r1nx3SAt/hjyKG+TAa7/xzzAXJ76bko5tiNIlFOiVYQ0DH5EKPSfiEsPYeXl/sMelE+u4eV9equ/fRpGCQB/Zt4Al2CZ6nqz2e00hp7UOUaKQ1niUv1UkQaucwNQhpAgMBAAE=" 15 | }, 16 | { 17 | "id": "QmeKizHABFpHtEZoJiuZJmvVtC1yXi2MrRZ3UqgnqrUqet", 18 | "privKey": "CAAS4AQwggJcAgEAAoGBAKninycjHQ78497Tl9tNYfckoJVkQhgCplQwnPN4j/oQ22fMOw9DTnqNrovqGE4KvHQtRi4EAV8DhWPWGL2X4MQln67LZ2MrYkmgaaFVcQwjPDlVFcoY4hfYnjGDRSyF0cI1TYlqK4Q7LvSXQQ/pk+RfmqHun90+MwY5Nu8jUyXxAgMBAAECgYBfJOy9nozHuBpm0VpbGBulrn3BLUVW7ST6Qb+lHACGrKo0l8z731Y0jOUUtyaljp288x4650NdUzuBjoaMqGLYjiG9iKbfRsoBVSBItrVAyEWZTKeMpjjkILzkpKLc2GjV8G96J/6g/TYxiZxWUgcyfpHytZCvc1RCXb56K/CRgQJBANCfRrQN/NcE2esUjezdmNxSSltDL8UgsQBWukEz/iog6JniWJSQ/jzp8bhCnAxXMfGMrAwT1AvrpTVW+Qj+GR8CQQDQd0k354Rdc3nCCv3SYfYVi0oiNty99KnYt+qjGlJTK4FFOq84TegAyUK0fawCFLbafo0n74tJ6UOmzGB+Bg7vAkEAqW4BtO97PYFiHgp2bT1XDPAEFXROUpGHTtggsh2wuLQ7U+bAu/+5iu0Qc+4c3+Tc2ei+PZzI5Nl/Nf+ph0UeTwJAclV6KtcnmbmDxhERNRnVReunNSyx1N0jamE0p1AAZu0tcTLGdzMDU6sOkr96X4nM3/kYohaXZmoxWIIvSe84sQJAKUyT8k8fSfHlokrWEz5SVz0tPEDy93JZrie3iIrghVEtTB2S1h7IWJxqIDdxcUdMnK45eqfZ5DDJOv9ugVIlKA==", 19 | "pubKey": "CAASogEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAKninycjHQ78497Tl9tNYfckoJVkQhgCplQwnPN4j/oQ22fMOw9DTnqNrovqGE4KvHQtRi4EAV8DhWPWGL2X4MQln67LZ2MrYkmgaaFVcQwjPDlVFcoY4hfYnjGDRSyF0cI1TYlqK4Q7LvSXQQ/pk+RfmqHun90+MwY5Nu8jUyXxAgMBAAE=" 20 | }, 21 | { 22 | "id": "QmdX7uA1nQq1oghmxHhpKfb7VbvjETPd6DihW4w5Ht52Tt", 23 | "privKey": "CAAS3wQwggJbAgEAAoGBAL1sEY5BbZjRFpueXv4/QUpYp64wZ+VQeMmcbzxSJ73OKTPkJKSyKsK6gyC/5ym6nPZnlTgAsLZ3nCzt7Ka5bzV/74yFhgO5koB91umvOIG1rowOK3BpNJixtAYc/dSwmTpgnTB5zjZBoIMbvY0TZXPy4LQAvtBhGAqHnP/brVYzAgMBAAECgYAXmXAyffpOkLY1WyzQlkbnIVZ6wDMhgb4auC+vnziuUBIYEfNyxXPIYP5XRTFipIpLtKoFmShrA0nk2xuhxfaRx9qqkCYfkvsmbNHhYa43PezfGGK2LuTh365cwQWx5Lr3qLFRFI5IrKwFtkLM654MfJ+HzWY65eB4+dJQuuiHQQJBAOZJqJ1UumBiTbnRyc1sq8wLCTR32vhDGPENJ2QY4TMNWvHA9jgw0M23elkvYKhFhKY33unM7UdT/07UbVTGcdMCQQDSklgTsH7liNJK3HP0Pp9igJU3I/e3BBlNs/5QeUFLywGWZcgiWBkj3+4Bt125YjZjQ8Rpbzp6otBcxJrEEG4hAj8S4/fw8AJZ5bZ6h7sGHuP42O7oXuyXcOma3HlevKu8hFvxeOK5uUdnmKVwq3JCW3GGMQ8YHrdYIovO654MVjsCQAjHKPzJIsEWzw1n2z2XTrWgfpfp2+25MgTXOAbKxzoNoGdQnuMvoPyp3RYdOr77pSDGNm3ewSVTnDQBziHWPYECQQCSPtP/k94tIGXEx/VN2eRSgDCbpjeqWkBck3Er/5qrAYcnJG6RdDGpVb3XRy+s5JuqCzPegzWOONVJx3k18BEU", 24 | "pubKey": "CAASogEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAL1sEY5BbZjRFpueXv4/QUpYp64wZ+VQeMmcbzxSJ73OKTPkJKSyKsK6gyC/5ym6nPZnlTgAsLZ3nCzt7Ka5bzV/74yFhgO5koB91umvOIG1rowOK3BpNJixtAYc/dSwmTpgnTB5zjZBoIMbvY0TZXPy4LQAvtBhGAqHnP/brVYzAgMBAAE=" 25 | }] -------------------------------------------------------------------------------- /tests/config/go-configs.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Identity": { 4 | "PeerID": "QmYgrJGcrKoEtrECPmFznr16mAoCw8U4G9JvNpeNfYYr23", 5 | "PrivKey": "CAASpwkwggSjAgEAAoIBAQDWN02R4i0BmcXcQ7qiZwF92DXFTg7nMXhp5Fkd4UB3Jio+kR1NHn6Q4FhYlrXj+M1RRuVwvVF9f0D6uWcWtNVBo11kDMULFgV1CyEi1rWHh/k2MRzK3gS4Fdbh2nLenaKxANqsLQTFgurFB+Qerly139FunZtaO1462KTrvNG7Cs4L8QgMT3bK+5lL+OWlxF+fQWsGkFb2QVNfGXKhjSRN7LKOeguPQsXM2LORv4FMw2A7EYepf9WPaxs/zwMwxP9Lvl1p7rnw7ZY3uRFjXREkJZ4oH0A/iLrxhnCy1DFmemZlB0BEGW+cjMJKcwBxWnd2dz2mqVtXpB1GH2B02lbLAgMBAAECggEBALmASnv6EF9CekSzofrgmDoR8P5bCizKeiu4wjNNzDVhp1Nm9qjBr3hlzhto5znzSlkHu8hVKSwz9mEoDE1tav7cKRNqu3dOKough6FTrP8gkH6NKRHEEPIYd2w9bxu4BhqhUrumgGtJY2Qb4T9wXnhgyUuwz8bjgkYTNT2u1S/SJUTnGTZgifgxnk3n1HLoEdRb+FbL5xvM+piChAutdKRvAlS4bAtBtFidNkXfIXEMwPipfA9SPMbOehqrbbN2FZRIGmGu1UI87+g0xQbxPy1JP2UkSJQhTLGWmoHoZcix4YOhL3qdYoaBHpcdWhcQRPvftTudtNgo7fJP/r7ReYECgYEA3XIvhvlu+HEZMoYAqXLo651ASt6+3FcFW0js7fL3S6QD76d0qN6MlFPLPekzGcnrD6XqQw/qO/T4FF7Aag+ZCv4nNVJnFuGwqAJl7zzIUxzOK1VI8ujvF1EZ1FPirD2ImGauT8Q9H8dcIDhHUxgLpQ9D31YrSmgMSj2Jh7jfO5cCgYEA96RPQwQJjw5WpNhGLA41bq7eySx9jOtb6cV/LDSIa6xGRJTpO/0U4wuz0y56eOd2lR+JCr/FnPRmLQSKoH8NdMwxisgGai6NLY23ZSWvx6JnoWIDWUf5/L+EKDR+AfF7CG3qbXN1+4Qx1kmeSJMSgKXThEyCd3fd8xmgqvtGtO0CgYBIbX9j+xMziPI/1xpCUcYiwfT0R/RO5ttmpuxvSyddAxESgkeir9906A8yeaJwDd2m6ZI8SulGoPG+39q5Fc7tjIDQbNow3rJD0Tz7yXqPzH0WKTfJ0yvYCXuUObeKtDmH9xWOG1YOncrg8udJIzUz0FFXq7xPAsb6RTRFQs8okwKBgHeE17RSaIKguvSIZSWyHduzjlatoFK47P1gkkY1fHnrBWboL8ECHmiOJ1YPIpDjtXm3ZJ7Je4iK9B1aCcX4S/hI7Mg8TcxFd2G6+f5ZOAMzwKxE77TRZjez/wagfwhVEbo5wmDczt57KMpxcXE5ej6YygMaIgWi1n4TqlWbPL7pAoGADW9fy4KFhO2fvlZKOelGDGpQm4TWA5xYGZPwx8hsl4P5G/aMip4p+aIwYns9xNL3JclgPoHCclaIBFOjoZZHiISfzzb7siSehbH642tqa2t9tVk1AGzGN5fXJBNbT/e7II43F84e4kDd0YB91tO6h0k9A0IXMCD693v4rWwhR54=" 6 | }, 7 | "Addresses": { 8 | "Swarm": [ 9 | "/ip4/0.0.0.0/tcp/4001", 10 | "/ip4/0.0.0.0/tcp/4002/ws" 11 | ], 12 | "Announce": [], 13 | "NoAnnounce": [], 14 | "API": "/ip4/127.0.0.1/tcp/5002", 15 | "Gateway": "/ip4/127.0.0.1/tcp/8081" 16 | }, 17 | "Experimental": { 18 | "FilestoreEnabled": false, 19 | "UrlstoreEnabled": false, 20 | "ShardingEnabled": false, 21 | "Libp2pStreamMounting": true, 22 | "P2pHttpProxy": false, 23 | "QUIC": false 24 | } 25 | }, 26 | { 27 | "Identity": { 28 | "PeerID": "QmYgrJGcrKoEtrECPmFznr16mAoCw8U4G9JvNpeNfYYr23", 29 | "PrivKey": "CAASpwkwggSjAgEAAoIBAQDWN02R4i0BmcXcQ7qiZwF92DXFTg7nMXhp5Fkd4UB3Jio+kR1NHn6Q4FhYlrXj+M1RRuVwvVF9f0D6uWcWtNVBo11kDMULFgV1CyEi1rWHh/k2MRzK3gS4Fdbh2nLenaKxANqsLQTFgurFB+Qerly139FunZtaO1462KTrvNG7Cs4L8QgMT3bK+5lL+OWlxF+fQWsGkFb2QVNfGXKhjSRN7LKOeguPQsXM2LORv4FMw2A7EYepf9WPaxs/zwMwxP9Lvl1p7rnw7ZY3uRFjXREkJZ4oH0A/iLrxhnCy1DFmemZlB0BEGW+cjMJKcwBxWnd2dz2mqVtXpB1GH2B02lbLAgMBAAECggEBALmASnv6EF9CekSzofrgmDoR8P5bCizKeiu4wjNNzDVhp1Nm9qjBr3hlzhto5znzSlkHu8hVKSwz9mEoDE1tav7cKRNqu3dOKough6FTrP8gkH6NKRHEEPIYd2w9bxu4BhqhUrumgGtJY2Qb4T9wXnhgyUuwz8bjgkYTNT2u1S/SJUTnGTZgifgxnk3n1HLoEdRb+FbL5xvM+piChAutdKRvAlS4bAtBtFidNkXfIXEMwPipfA9SPMbOehqrbbN2FZRIGmGu1UI87+g0xQbxPy1JP2UkSJQhTLGWmoHoZcix4YOhL3qdYoaBHpcdWhcQRPvftTudtNgo7fJP/r7ReYECgYEA3XIvhvlu+HEZMoYAqXLo651ASt6+3FcFW0js7fL3S6QD76d0qN6MlFPLPekzGcnrD6XqQw/qO/T4FF7Aag+ZCv4nNVJnFuGwqAJl7zzIUxzOK1VI8ujvF1EZ1FPirD2ImGauT8Q9H8dcIDhHUxgLpQ9D31YrSmgMSj2Jh7jfO5cCgYEA96RPQwQJjw5WpNhGLA41bq7eySx9jOtb6cV/LDSIa6xGRJTpO/0U4wuz0y56eOd2lR+JCr/FnPRmLQSKoH8NdMwxisgGai6NLY23ZSWvx6JnoWIDWUf5/L+EKDR+AfF7CG3qbXN1+4Qx1kmeSJMSgKXThEyCd3fd8xmgqvtGtO0CgYBIbX9j+xMziPI/1xpCUcYiwfT0R/RO5ttmpuxvSyddAxESgkeir9906A8yeaJwDd2m6ZI8SulGoPG+39q5Fc7tjIDQbNow3rJD0Tz7yXqPzH0WKTfJ0yvYCXuUObeKtDmH9xWOG1YOncrg8udJIzUz0FFXq7xPAsb6RTRFQs8okwKBgHeE17RSaIKguvSIZSWyHduzjlatoFK47P1gkkY1fHnrBWboL8ECHmiOJ1YPIpDjtXm3ZJ7Je4iK9B1aCcX4S/hI7Mg8TcxFd2G6+f5ZOAMzwKxE77TRZjez/wagfwhVEbo5wmDczt57KMpxcXE5ej6YygMaIgWi1n4TqlWbPL7pAoGADW9fy4KFhO2fvlZKOelGDGpQm4TWA5xYGZPwx8hsl4P5G/aMip4p+aIwYns9xNL3JclgPoHCclaIBFOjoZZHiISfzzb7siSehbH642tqa2t9tVk1AGzGN5fXJBNbT/e7II43F84e4kDd0YB91tO6h0k9A0IXMCD693v4rWwhR54=" 30 | }, 31 | "Addresses": { 32 | "Swarm": [ 33 | "/ip4/0.0.0.0/tcp/4011", 34 | "/ip6/::/tcp/4011" 35 | ], 36 | "Announce": [], 37 | "NoAnnounce": [], 38 | "API": "/ip4/127.0.0.1/tcp/5011", 39 | "Gateway": "/ip4/127.0.0.1/tcp/8081" 40 | } 41 | }, 42 | { 43 | "Identity": { 44 | "PeerID": "QmScwAUZLjmXU9pJe4k4mwZKu7jpo9W6NqVfeFvUfV3Yk5", 45 | "PrivKey": "CAASqAkwggSkAgEAAoIBAQDgxZLlaJT3YzTiFvhgjAr+RHrOmpha+YF/5lH5ewUt/dJ/cx2lTuWTE+6JL6S/BhQ1Z+ene+2hcE6jq4kUqA74vcWHc3p9uvHWulNq3988GKJmVWH7plgqpvDyGwu2nCINVMnC78nDLnxUeUzwxinjam2YBt/ronFfOLU/kO3fhz5XdnPf3magExqL28xrLHQHL7+dOm1L8lONLYzz3JQ9+nGxAZPT6X88gwREn5Tx3+xVG7B8Qy889p/YN6sbQ7skJrW6QwzQXeV+26vlA6PaJvsnRJsJoKqSA8bP244hvdC8yLuEG/Ra65Z5iolowNW5yn+sQdadcF3mmXhyYtPZAgMBAAECggEBANvUX7a+cfMBs9brYEAMtzO3BeTKjfdrzpuoRsQY7RfuBtFfZL8oSp9t/v1s+1NYPqt807OaIO6jSEYKnib1tq7P60BuUH5nN98qnCsKiOndrfcnyoy0Q8ZzP3OJwAr46NTg9/MPUeTyseZsPDO/m3gvvr/Q+W8bkIXwkFN7Cypw5cTDkHoOzewqmtHqQRBYwDzwSmC05b7UBKsFmGl8LMjNVbYTdos4aOHrjmTX3po/7TwQz33PNbZ6SBiFMEiYMuHw7fqfPgMa8TEHH1KjhcB/FzutBTrO0D+e9+urkkADwWSquGgeF0edbBDzkj9tc5mn9e/DqsXK15rCTcCjRYECgYEA9nd9mty3yEqaGfYQOr533sfp8x26OrGaRQUCHd3fPxCELroe2Uk1JhOH4gS6RzV3t90AfGpsCyufu9h/JU1uckj1oINFMlZ30bXlM4a8dHFyy9ltXh6mMOxWPx7fgh/1Rm/nD5kI937y0Oga49SBRGlgEMdYHbshUI+VnnrknEsCgYEA6XdCghy3I0ZLF1lSVLtM3T6RTeZjq6+2fj19Fvc7vfXjiymWwplXvBs5T68QXH1Gj72Dk5ylN7o884Q0digIK5yM6dd3TPum/JqZXEZ+jnt46i5BULJoN+Vkto3zffc3FrpdZtOYbAeZedkjVRye9uSw68vConrTWBXbbhW3MesCgYAjMWDAEjAz62Mhvd8HZFcdepJPYrlK1D1hCZwJw7qy+JrHSVBKWoe6TiOjBGXM8TeNC4OcQF35IluGqR1OqEXTD9Py/YuN6oblbQGzX9PdFK39qahSPjIg3j5aeQAVRpbxXd0yfvmmzXvClMHyMk/bAujyvGQ6qpPROw90N7wZDQKBgBz3Ke5Ap8wdlYVxFJDkqBWvBJIYqncLluW0tUdbJzNbRX7niwfbxJHXAtZLDwmalV2SErgGsEzos1U3KtcaAmF+y9DbZbbMZhxjBfH34Q3GbesIEBx19g4xTLyFghc3y8LrHll6mSeWDcmbUhHmOibBmt22axCDkqMUGyReV1s/AoGBALw5pUlqMcJc35Hz1slA6k1Fft9dLi20YQzZzCqMJwVG0Motc0afZUJ6X684lULK9xlLw+SPlUeY9394lBWjx15EYTF0e+h7OPxPymUnKZmdhs+0dUcP3QHaC/OXg1tFJblvg2qt0ihxwV5D2NaLFxW3I/kYMTTIUuMMYkf1C5SO" 46 | }, 47 | "Addresses": { 48 | "Swarm": [ 49 | "/ip4/0.0.0.0/tcp/4020", 50 | "/ip6/::/tcp/4020" 51 | ], 52 | "Announce": [], 53 | "NoAnnounce": [], 54 | "API": "/ip4/127.0.0.1/tcp/5002", 55 | "Gateway": "/ip4/127.0.0.1/tcp/8082" 56 | } 57 | } 58 | ] -------------------------------------------------------------------------------- /runner/config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const fs = require('fs') 4 | const os = require('os') 5 | const path = require('path') 6 | const YAML = require('yaml') 7 | const Influx = require('influx') 8 | const util = require('util') 9 | const Pino = require('pino') 10 | const PinoPretty = require('pino-pretty') 11 | const pinoms = require('pino-multi-stream').multistream 12 | const PinoGetPrettyStream = require('pino/lib/tools').getPrettyStream 13 | const mkDir = util.promisify(fs.mkdir) 14 | 15 | let pino 16 | 17 | const configBenchmarks = require('./lib/configBenchmarks') 18 | const inventoryPath = process.env.INVENTORY || path.join(__dirname, '../infrastructure/inventory/inventory.yaml') 19 | const playbookPath = path.join(__dirname, '../infrastructure/playbooks/benchmarks.yaml') 20 | const HOME = process.env.HOME || process.env.USERPROFILE 21 | const keyfile = path.join(HOME, '.ssh', 'id_rsa') 22 | const memorySuffix = '_memory' 23 | const cpuSuffix = '_cpu' 24 | const ipfsAddress = process.env.IPFS_ADDRESS || '/dnsaddr/cluster.ipfs.io' 25 | const ipfsUser = process.env.IPFS_USER || 'ipfsbenchmarks' 26 | const ipfsPassword = process.env.IPFS_PASSWORD || false 27 | const now = Date.now() 28 | const logDir = `${os.tmpdir()}/${now}` 29 | const logFile = `${logDir}/stdout.log` 30 | const logLevel = process.env.LOGLEVEL || 'info' 31 | const hostname = process.env.HOSTNAME || 'localhost' 32 | 33 | mkDir(`${logDir}`, { recursive: true }) 34 | 35 | // pretty logs in local 36 | if (process.env.NODE_ENV === 'test') { 37 | pino = Pino({ 38 | enabled: false 39 | }) 40 | } else { 41 | let streams 42 | if (process.env.LOG_PRETTY && process.env.LOG_PRETTY === 'true') { 43 | let prettyStream = PinoGetPrettyStream({ 44 | levelFirst: false, 45 | translateTime: true, 46 | colorize: true 47 | }, PinoPretty, process.stdout) 48 | 49 | streams = [ 50 | { stream: fs.createWriteStream(logFile) }, 51 | { level: logLevel, stream: prettyStream } 52 | ] 53 | } else { 54 | streams = [ 55 | { stream: fs.createWriteStream(logFile) }, 56 | { level: logLevel, stream: process.stdout } 57 | ] 58 | } 59 | pino = Pino({ name: 'runner', level: 'debug' }, pinoms(streams)) 60 | } 61 | pino.info(`logFile: ${logFile}`) 62 | 63 | const getInventory = () => { 64 | return YAML.parse(fs.readFileSync(inventoryPath, 'utf8')) 65 | } 66 | 67 | const getBenchmarkHostname = () => { 68 | return getInventory().all.children.minions.hosts 69 | } 70 | 71 | let loc = 'remote' 72 | if (process.env.STAGE === 'local') { 73 | loc = 'local' 74 | } 75 | 76 | const runClinic = (process.env.CLINIC && (process.env.CLINIC === 'ON' || process.env.CLINIC === 'true' || process.env.CLINIC === true)) || false 77 | const tests = configBenchmarks.constructTests(loc, runClinic) 78 | 79 | const config = { 80 | provison: { 81 | command: `ansible-playbook -i ${inventoryPath} --key-file ${keyfile} ${playbookPath}` 82 | }, 83 | log: pino, 84 | stage: process.env.STAGE || 'local', 85 | outFolder: process.env.OUT_FOLDER || configBenchmarks.tmpOut, 86 | dataDir: process.env.DATADIR || './data/', 87 | logFile: logFile, // where we store all the stuff that is to be sent to IPFS 88 | now: now, 89 | db: 'ipfs-db', 90 | server: { 91 | port: 9000, 92 | apikey: process.env.API_KEY || 'supersecret', 93 | hostname: hostname, 94 | schedule: process.env.RUN_NIGHTLY || true, 95 | api: { 96 | clinic: { 97 | operations: configBenchmarks.clinicOperations, 98 | filesets: configBenchmarks.clinicFilesets 99 | }, 100 | benchmarks: { 101 | tests: configBenchmarks.testAbstracts 102 | } 103 | } 104 | }, 105 | influxdb: { 106 | host: process.env.INFLUX_HOST || 'localhost', 107 | db: 'benchmarks', 108 | schema: [ 109 | { 110 | measurement: tests[0].measurement, 111 | fields: { 112 | duration: Influx.FieldType.FLOAT, 113 | ipfs_sha: Influx.FieldType.STRING 114 | }, 115 | tags: [ 116 | 'warmup', 117 | 'commit', 118 | 'project', 119 | 'file_set', 120 | 'branch', 121 | 'guid', 122 | 'version', 123 | 'repo', 124 | 'sha', 125 | 'nightly', 126 | 'tag' 127 | ] 128 | }, 129 | { 130 | measurement: `${tests[0].measurement}${memorySuffix}`, 131 | fields: { 132 | memory: Influx.FieldType.INTEGER, 133 | ipfs_sha: Influx.FieldType.STRING 134 | }, 135 | tags: [ 136 | 'warmup', 137 | 'commit', 138 | 'project', 139 | 'file_set', 140 | 'branch', 141 | 'guid', 142 | 'version', 143 | 'repo', 144 | 'sha', 145 | 'nightly', 146 | 'tag' 147 | ] 148 | }, 149 | { 150 | measurement: `${tests[0].measurement}${cpuSuffix}`, 151 | fields: { 152 | cpu: Influx.FieldType.INTEGER, 153 | ipfs_sha: Influx.FieldType.STRING 154 | }, 155 | tags: [ 156 | 'warmup', 157 | 'commit', 158 | 'project', 159 | 'file_set', 160 | 'branch', 161 | 'guid', 162 | 'version', 163 | 'repo', 164 | 'sha', 165 | 'nightly' 166 | ] 167 | } 168 | ] 169 | }, 170 | benchmarks: { 171 | clinic: runClinic, 172 | host: getBenchmarkHostname(), 173 | user: process.env.BENCHMARK_USER || 'elexy', 174 | key: process.env.BENCHMARK_KEY || keyfile, 175 | path: path.join(__dirname, '../tests'), 176 | remotePath: configBenchmarks.remoteTestsPath, 177 | tests: tests, 178 | cleanup: `rm -Rf ${configBenchmarks.tmpOut}/*`, 179 | measurements: { 180 | memory: memorySuffix, 181 | cpu: cpuSuffix 182 | } 183 | }, 184 | ipfs: { 185 | path: configBenchmarks.remoteIpfsPath, 186 | network: { 187 | address: ipfsAddress, 188 | user: ipfsUser, 189 | password: ipfsPassword 190 | } 191 | } 192 | } 193 | 194 | config.log.debug(config.benchmarks.tests) 195 | 196 | module.exports = config 197 | -------------------------------------------------------------------------------- /runner/lib/configBenchmarks.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const path = require('path') 4 | const _ = require('lodash') 5 | const remoteTestsPath = process.env.REMOTE_FOLDER || '~/ipfs/tests/' 6 | const remoteIpfsPath = process.env.REMOTE_FOLDER || '~/ipfs/' 7 | const uuidv1 = require('uuid/v1') 8 | const locations = ['local', 'remote'] 9 | const clinicOperations = ['doctor', 'flame', 'bubbleProf'] 10 | const clinicFilesets = ['One4MBFile', 'One64MBFile'] 11 | const tmpOut = '/tmp/out' 12 | const params = `OUT_FOLDER=${tmpOut} REMOTE=true GUID=${uuidv1()} ` 13 | const remotePreNode = `killall node 2>/dev/null; killall ipfs 2>/dev/null; source ~/.nvm/nvm.sh && ` 14 | 15 | const testAbstracts = [ 16 | { 17 | name: 'localTransfer_tcp_mplex', 18 | file: 'local-transfer.js -t tcp -m mplex' 19 | }, 20 | { 21 | name: 'localTransfer_ws_mplex', 22 | file: 'local-transfer.js -t ws -m mplex' 23 | }, 24 | { 25 | name: 'localTransfer_ws_mplex', 26 | file: 'local-transfer.js -t ws -m mplex' 27 | }, 28 | { 29 | name: 'localTransfer_tcp_mplex_secio', 30 | file: 'local-transfer.js -t tcp -m mplex -e secio' 31 | }, 32 | { 33 | name: 'localTransfer_ws_mplex_secio', 34 | file: 'local-transfer.js -t ws -m mplex -e secio' 35 | }, 36 | { 37 | name: 'localTransfer_tcp_spdy', 38 | file: 'local-transfer.js -t tcp -m spdy' 39 | }, 40 | { 41 | name: 'localTransfer_ws_spdy', 42 | file: 'local-transfer.js -t ws -m spdy' 43 | }, 44 | { 45 | name: 'localTransfer_tcp_spdy_secio', 46 | file: 'local-transfer.js -t tcp -m spdy -e secio' 47 | }, 48 | { 49 | name: 'localTransfer_ws_spdy_secio', 50 | file: 'local-transfer.js -t ws -m spdy -e secio' 51 | }, 52 | { 53 | name: 'unixFsAdd_balanced', 54 | file: 'local-add.js -s balanced' 55 | }, 56 | { 57 | name: 'unixFsAdd_trickle', 58 | file: 'local-add.js -s trickle' 59 | }, 60 | { 61 | name: 'localExtract', 62 | file: 'local-extract.js' 63 | }, 64 | { 65 | name: 'multiPeerTransfer_tcp_mplex', 66 | file: 'multi-peer-transfer.js -t tcp -m mplex' 67 | }, 68 | { 69 | name: 'multiPeerTransfer_ws_mplex', 70 | file: 'multi-peer-transfer.js -t ws -m mplex' 71 | }, 72 | { 73 | name: 'multiPeerTransfer_tcp_mplex_secio', 74 | file: 'multi-peer-transfer.js -t tcp -m mplex -e secio' 75 | }, 76 | { 77 | name: 'multiPeerTransfer_ws_mplex_secio', 78 | file: 'multi-peer-transfer.js -t ws -m mplex -e secio' 79 | }, 80 | { 81 | name: 'multiPeerTransfer_tcp_spdy', 82 | file: 'multi-peer-transfer.js -t tcp -m spdy' 83 | }, 84 | { 85 | name: 'multiPeerTransfer_ws_spdy', 86 | file: 'multi-peer-transfer.js -t ws -m spdy' 87 | }, 88 | { 89 | name: 'multiPeerTransfer_tcp_spdy_secio', 90 | file: 'multi-peer-transfer.js -t tcp -m spdy -e secio' 91 | }, 92 | { 93 | name: 'multiPeerTransfer_ws_spdy_secio', 94 | file: 'multi-peer-transfer.js -t ws -m spdy -e secio' 95 | }, 96 | { 97 | name: 'addMultiKb_balanced', 98 | file: 'add-multi-kb -s balanced' 99 | }, 100 | { 101 | name: 'addMultiKb_trickle', 102 | file: 'add-multi-kb.js -s trickle' 103 | }, 104 | { 105 | name: 'initializeNodeBrowser', 106 | file: 'init-node.browser.js' 107 | }, 108 | { 109 | name: 'unixFsAddBrowser_balanced', 110 | file: 'local-add.browser.js -s balanced' 111 | }, 112 | { 113 | name: 'addMultiKbBrowser_balanced', 114 | file: 'add-multi-kb.browser.js -s balanced' 115 | }, 116 | { 117 | name: 'unixFsAddGo_balanced', 118 | file: 'local-add.go.js -s balanced' 119 | }, 120 | { 121 | name: 'peerTransferBrowser_ws_mplex', 122 | file: 'peer-transfer.browser.js -t ws -m mplex' 123 | }, 124 | { 125 | name: 'pubsubMessage', 126 | file: 'pubsub-message.js' 127 | } 128 | ] 129 | 130 | const clinicRuns = { 131 | doctor: { 132 | fileSets: ['One4MBFile', 'One64MBFile'] 133 | }, 134 | flame: { 135 | fileSets: ['One4MBFile', 'One64MBFile'] 136 | }, 137 | bubbleProf: { 138 | fileSets: ['One4MBFile'] 139 | } 140 | } 141 | 142 | const testDefaults = { 143 | path: { 144 | remote: remoteTestsPath, 145 | local: path.join(__dirname, '/../tests') 146 | }, 147 | params: params, 148 | remotePreCommand: remotePreNode 149 | } 150 | 151 | const getCommand = (test, loc) => { 152 | return `${loc === 'remote' ? remotePreNode : ''} ${testDefaults.params} node ${testDefaults.path[loc]}/${test.file}` 153 | } 154 | 155 | const getClinicCommands = (test, operation, loc) => { 156 | if (locations.includes(loc) && clinicOperations.includes(operation)) { 157 | let variations = [] 158 | for (let fileSet of clinicRuns[operation].fileSets) { 159 | let shellCommand = `${loc === 'remote' ? remotePreNode : ''} FILESET="${fileSet}" clinic ${operation} --dest ${tmpOut}/${test.name}/ -- node ${testDefaults.path[loc]}/${test.file}` 160 | variations.push({ 161 | command: shellCommand, 162 | fileSet: fileSet, 163 | benchmarkName: test.name, 164 | operation: operation 165 | }) 166 | } 167 | return variations 168 | } else { 169 | throw Error(`getClinicCommands requires an operation from ${clinicOperations} and a location from ${locations}`) 170 | } 171 | } 172 | 173 | const constructTests = (loc, doClinic, testNames) => { 174 | let tests = [] 175 | let testItems = [] 176 | if (testNames && testNames.length > 0) { 177 | testItems = testNames 178 | } else { 179 | testItems = testAbstracts 180 | } 181 | for (let testAbstract of testItems) { 182 | if (typeof testAbstract === 'string') { 183 | testAbstract = _.find(testAbstracts, { name: testAbstract }) 184 | } 185 | if (testAbstract) { 186 | let test = { 187 | name: testAbstract.name, 188 | benchmark: getCommand(testAbstract, loc) 189 | } 190 | if (doClinic) { 191 | test.doctor = getClinicCommands(testAbstract, 'doctor', loc) 192 | test.flame = getClinicCommands(testAbstract, 'flame', loc) 193 | test.bubbleProf = getClinicCommands(testAbstract, 'bubbleProf', loc) 194 | } 195 | tests.push(test) 196 | } 197 | } 198 | return tests 199 | } 200 | 201 | module.exports = { 202 | testAbstracts, 203 | constructTests, 204 | tmpOut, 205 | remoteIpfsPath, 206 | remoteTestsPath, 207 | clinicOperations, 208 | clinicFilesets 209 | } 210 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # js-ipfs Benchmarks [![CircleCI](https://circleci.com/gh/ipfs/benchmarks.svg?style=svg)](https://circleci.com/gh/ipfs/benchmarks) 2 | 3 | This is a set of benchmarks tests to track [js-ipfs](https://github.com/ipfs/js-ipfs) Benchmarks in a Grafana [Dashboard](https://benchmarks.ipfs.team). 4 | 5 | ## Purpose 6 | 7 | The IPFS team needs a historical view of various performance metrics around `js-ipfs` 8 | and how it compares to the reference implementation written in `go`. This project 9 | implements benchmark tests for `js-ipfs` and publishes the results in a dashboard. 10 | The artifacts are also made available on the IPFS network. Over time the historical 11 | view will expose how `js-ipfs` is hopefully approaching the `go` implementation 12 | and which areas need improvement. 13 | 14 | ![Architecture](architecture.png) 15 | 16 | The goal is to provide immediate feedback and long-term tracking around performance 17 | to developers and the community with an extremely low barrier. 18 | The CI system integrating code changes will trigger benchmark runs as well a scheduled 19 | run every night. Each run will provide a URL where the results will be visible. 20 | 21 | This project also provides a possibility to run tests locally on a development 22 | version of `js-ipfs`. Developers can then examine individual output files before 23 | submitting code to the community. 24 | 25 | ## Documentation Index 26 | 27 | * The [dashboard](infrastructure/grafana/README.md) documentation 28 | * [Architecture](infrastructure/README.md) of the `js-ipfs` benchmark system 29 | * Reference on how this [Repository](CONTRIBUTING.md) is organized 30 | * Using the [Runner](runner/README.md) to manage benchmark runs remotely, which includes an [API](https://benchmarks.ipfs.team/runner/docs/index.html) available [here](https://benchmarks.ipfs.team/runner) 31 | * Description of [tests](tests/README.md) 32 | * Convenience [scripts](scripts/README.md) for the docker-compose [deployment](infrastructure/deploy/README.md) 33 | * Overview [video](https://ipfs.io/ipfs/QmSZgcL7dyjcifZ5uJYmBDCCACfzQD5Ve2RFSoB4RdYATp) hosted on the IPFS network. 34 | * [Introduction to Clinic.js in the context of IPFS](https://github.com/ipfs/team-mgmt/issues/796) [Recording](https://nearform.zoom.us/recording/play/A-4Vn3jA5aeK9BCPwKCA44IfwpLZePIBlzvD1bUYF7JqTXnG2JptVaLEVcRUmQ1i) 35 | 36 | ## Benchmarks on the web 37 | 38 | The dashboard is available at [https://benchmarks.ipfs.team](https://benchmarks.ipfs.team) and can be viewed without a user account. 39 | A `Continuous Integration` server can trigger benchmark runs using the endpoint exposed on [https://benchmarks.ipfs.team/runner](https://benchmarks.ipfs.team/runner). A commit from the [js-ipfs](https://github.com/ipfs/js-ipfs) repository can be supplied to run the benchmarks against. An api key is also required to be able to trigger a run. Please check [Runner](runner/README.md) docs on how to configure an api key for the runner. An example invocation using curl is provided below. 40 | 41 | ```bash 42 | > curl -XPOST -d '{"commit":"adfy3hk"}' \ 43 | -H "Content-Type: application/json" \ 44 | -H "x-ipfs-benchmarks-api-key: " \ 45 | https://benchmarks.ipfs.team/runner 46 | ``` 47 | 48 | The response provides links to the output produced by the benchmark tests: 49 | 50 | ``` 51 | TBD 52 | ``` 53 | 54 | For more details about the dashboard see the [Grafana](infrastructure/grafana/README.md) doc. 55 | 56 | ## Quickstart 57 | 58 | Clone Benchmark tests and install: 59 | 60 | ```bash 61 | > git clone https://github.com/ipfs/benchmarks.git 62 | > cd benchmarks/runner 63 | > npm install 64 | > cd ../tests 65 | > npm install 66 | ``` 67 | 68 | ### Generate test files 69 | 70 | The files are defined in [fixtures](tests/lib/fixtures.js). 71 | 72 | ```bash 73 | > npm run generateFiles 74 | ``` 75 | 76 | ### Add test files 77 | 78 | Here is the file object for a single test: 79 | 80 | ```js 81 | { size: KB, name: 'OneKBFile' } 82 | ``` 83 | 84 | To add multiple test files add a count property: 85 | 86 | ```js 87 | { size: KB, name: 'OneHundredKBFile', count: 100 } 88 | ``` 89 | 90 | ### Run tests locally 91 | 92 | From the `benchmarks/tests` directory: 93 | 94 | ```bash 95 | > node local-add 96 | > node local-extract 97 | > node local-transfer 98 | ``` 99 | 100 | Run all benchmarks: 101 | 102 | ```bash 103 | > npm run benchmark 104 | ``` 105 | 106 | Create a pre-generated key: 107 | 108 | ```bash 109 | > node util/create-privateKey 110 | ``` 111 | 112 | #### FILESET 113 | 114 | Use env variable `FILESET` to run test just against that specific set of file(s). Options of `FILESET` are defined in the config. 115 | 116 | ```bash 117 | > FILESET="One64MBFile" node local-add 118 | ``` 119 | 120 | #### VERIFYOFF 121 | 122 | Use env variable `VERIFYOFF=true` to skip the pre-generation of test files. 123 | 124 | ```js 125 | > VERIFYOFF=true node local-add 126 | ``` 127 | 128 | #### Run tests locally on a js-ipfs branch 129 | 130 | Inside the `benchmarks/tests` dir is a script to pull down master branch and install: 131 | 132 | ```bash 133 | > ./getIpfs.sh ../ 134 | ``` 135 | 136 | Directory structure now : 137 | ``` 138 | ├── benchmarks 139 | ├──── js-ipfs 140 | ├──── tests 141 | ``` 142 | 143 | Run tests against branch 144 | 145 | ```bash 146 | > cd benchmarks/tests 147 | > STAGE=local REMOTE=true node local-add 148 | ``` 149 | 150 | #### FLAGS 151 | 152 | Below is a list of optional flags used by the tests to run a specific strategy or transport module in Libp2p. 153 | 154 | - `-s` DAG strategy (balanced | trickle) 155 | - `-t` Transport (tcp | ws) 156 | - `-m` Stream muxer (mplex, spdy) 157 | - `-e` Connection encryption (secio) 158 | 159 | ### Adding new tests 160 | 161 | See [README](tests/README.md). 162 | 163 | ### Results 164 | 165 | Results will be written to out directory under `benchmarks/tests` 166 | 167 | * `name`: Name of test 168 | * `warmup`: Flag for if we warm up db 169 | * `description`: Description of benchmark 170 | * `fileSet`: Set of files to be used in a test 171 | * `date`: Date of benchmark 172 | * `file`: Name of file used in benchmark 173 | * `meta.project`: Repo that are benchmarked 174 | * `meta.commit`: Commit used to trigger benchmark 175 | * `meta.version`: Version of js-ipfs 176 | * `duration.s`: The number of seconds for benchmark 177 | * `duration.ms`: The number of millisecs the benchmark took 178 | * `cpu`: Information about cpu benchmark was run on 179 | * `loadAvg`: The load average of machine 180 | 181 | ## License 182 | 183 | Copyright (c) Protocol Labs, Inc. under the MIT license. See [LICENSE file](./LICENSE) for details. 184 | -------------------------------------------------------------------------------- /runner/runner.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const rmfr = require('rmfr') 4 | const os = require('os') 5 | const util = require('util') 6 | const fs = require('fs') 7 | const writeFile = util.promisify(fs.writeFile) 8 | const fsReadfile = util.promisify(fs.readFile) 9 | const fsTruncate = util.promisify(fs.truncate) 10 | const mkDir = util.promisify(fs.mkdir) 11 | 12 | const config = require('./config') 13 | const remote = require('./remote.js') 14 | const local = require('./local.js') 15 | const provision = require('./provision') 16 | const persistence = require('./persistence') 17 | const compress = require('./compress') 18 | const retrieve = require('./retrieve') 19 | const ipfs = require('./ipfs') 20 | const configBenchmarks = require('./lib/configBenchmarks') 21 | const runCommand = (command, name) => { 22 | if (config.stage === 'local') { 23 | return local.run(command, name) 24 | } else { 25 | return remote.run(command, name) 26 | } 27 | } 28 | 29 | const enrichResultsMetas = (arrOfResultObjects, props) => { 30 | arrOfResultObjects.map((obj) => { 31 | if (Object.keys(props).length) { 32 | Object.assign(obj.meta, props) 33 | } 34 | return obj 35 | }) 36 | } 37 | 38 | // clear the log file 39 | const clearFile = async (path) => { 40 | const fd = fs.openSync(config.logFile, 'r+') 41 | await fsTruncate(fd) 42 | fs.closeSync(fd) 43 | } 44 | 45 | const run = async (params) => { 46 | config.log.debug(params) 47 | // start with a clean logfile 48 | config.log.info(`Clearing logs at ${config.logFile}`) 49 | await clearFile() 50 | config.stage = params.remote ? 'remote' : 'local' 51 | let results = [] 52 | const now = Date.now() 53 | const targetDir = `${config.dataDir}/${now}` 54 | config.log.info(`Target Directory: ${targetDir}`) 55 | try { 56 | await mkDir(`${targetDir}`, { recursive: true }) 57 | config.log.info('targetDir:', targetDir) 58 | } catch (e) { 59 | throw (e) 60 | } 61 | if (config.stage !== 'local') { 62 | try { 63 | await provision.ensure(params.commit) 64 | } catch (e) { 65 | config.log.error(e) 66 | } 67 | } 68 | let benchmarks 69 | if (params.benchmarks && params.benchmarks.tests && params.benchmarks.tests.length) { 70 | const testsJson = JSON.stringify(params.benchmarks.tests) 71 | config.log.info(`Running benchmarks from parameters: ${testsJson}`) 72 | benchmarks = configBenchmarks.constructTests(config.stage, params.clinic.enabled, params.benchmarks.tests) 73 | config.log.error(`The following benchmarks were constructed ${JSON.stringify(benchmarks)}`) 74 | if (!benchmarks) { 75 | config.log.error(`no valid benchmarks found in ${testsJson}`) 76 | } 77 | } else { 78 | config.log.info('Running ALL default benchmarks') 79 | benchmarks = config.benchmarks.tests 80 | } 81 | for (let test of benchmarks) { 82 | // first run the benchmark straight up 83 | try { 84 | await mkDir(`${targetDir}/${test.name}`, { recursive: true }) 85 | let arrResult = await runCommand(test.benchmark, test.name) 86 | config.log.debug(`Writing results ${targetDir}/${test.name}/results.json`) 87 | await writeFile(`${targetDir}/${test.name}/results.json`, JSON.stringify(arrResult, null, 2)) 88 | if (arrResult.length) { 89 | let objMeta = {} 90 | if (params.nightly) { 91 | objMeta.nightly = true 92 | } 93 | if (params.tag) { 94 | objMeta.tag = params.tag 95 | } 96 | enrichResultsMetas(arrResult, objMeta) 97 | results.push(arrResult) 98 | } else { 99 | config.log.info(`Skipping empty result array: ${arrResult}`) 100 | } 101 | } catch (e) { 102 | config.log.error(e) 103 | // TODO: maybe trigger an alert here ?? 104 | } 105 | if (config.benchmarks.clinic || params.clinic.enabled) { // then run it with each of the clinic tools 106 | config.log.info(`Running clinic: default [${config.benchmarks.clinic}] param [${params.clinic.enabled}]`) 107 | try { 108 | for (let op of ['doctor', 'flame', 'bubbleProf']) { 109 | for (let run of test[op]) { 110 | config.log.debug(`${run.benchmarkName}`) 111 | await runCommand(run.command) 112 | // retrieve the clinic files 113 | const clinicOperationPath = await retrieve(config, run, targetDir) 114 | // cleanup clinic files remotely 115 | await runCommand(config.benchmarks.cleanup) 116 | // compress the clinic files 117 | await compress.clinicFiles(clinicOperationPath) 118 | } 119 | } 120 | } catch (e) { 121 | config.log.error(e) 122 | } 123 | } else { 124 | config.log.info(`not running clinic: default [${config.benchmarks.clinic}] param [${params.clinic.enabled}]`) 125 | } 126 | } 127 | let isUploaded = false 128 | let sha 129 | try { 130 | try { 131 | config.log.info(`Copying logs from ${config.logFile} to ${targetDir}/stdout.log`) 132 | const logs = await fsReadfile(config.logFile) 133 | await writeFile(`${targetDir}/stdout.log`, logs) 134 | } catch (err) { 135 | config.log.error(err, 'Error copying logs') 136 | } 137 | config.log.info(`Uploading ${targetDir} to IPFS network`) 138 | const storeOutput = await ipfs.store(targetDir) 139 | // config.log.debug(storeOutput) 140 | sha = ipfs.parse(storeOutput, now) 141 | config.log.info(`sha: ${sha}`) 142 | // config.log.debug(results) 143 | results.map((arrOfResultObjects) => { 144 | arrOfResultObjects.map((obj) => { 145 | // add the sha to each measurement 146 | const _sha = (typeof sha !== 'undefined' && sha) ? sha : 'none' 147 | enrichResultsMetas(arrOfResultObjects, {sha: _sha}) 148 | return obj 149 | }) 150 | }) 151 | isUploaded = true 152 | } catch (e) { 153 | config.log.error({ e }, 'Error storing on IPFS network') 154 | } 155 | try { 156 | config.log.debug(`Persisting results in DB`) 157 | for (let result of results) { 158 | config.log.debug(`DB store: ${JSON.stringify(result)}`) 159 | await persistence.store(result) 160 | } 161 | } catch (e) { 162 | throw e 163 | } 164 | // cleanup tmpout 165 | try { 166 | // cleanup tmpout 167 | if (isUploaded) { 168 | await rmfr(targetDir) 169 | config.log.info(`Files are uploaded to [https://cloudflare-ipfs.com/ipfs/${sha}]`) 170 | } else { 171 | config.log.error(`Files haven't been uploaded, not removing them from [${targetDir}]`) 172 | } 173 | } catch (e) { 174 | throw e 175 | } 176 | } 177 | 178 | module.exports = run 179 | --------------------------------------------------------------------------------