├── .gitignore ├── logger.js ├── .eslintrc.json ├── .github └── workflows │ └── lint.yml ├── autobench.yml ├── markdown-review.js ├── package.json ├── README.md └── index.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | tags 3 | 4 | # Testing proposes 5 | bench/ 6 | -------------------------------------------------------------------------------- /logger.js: -------------------------------------------------------------------------------- 1 | const debug = require('debug') 2 | 3 | const logDebug = debug('autobench:debug') 4 | const logInfo = debug('autobench:info') 5 | const logError = debug('autobench:error') 6 | 7 | module.exports = { 8 | logDebug, 9 | logInfo, 10 | logError 11 | } 12 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "commonjs": true, 4 | "es2021": true, 5 | "node": true 6 | }, 7 | "extends": [ 8 | "standard" 9 | ], 10 | "parserOptions": { 11 | "ecmaVersion": 12 12 | }, 13 | "rules": { 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: pull_request 3 | 4 | jobs: 5 | lint: 6 | name: lint 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | - uses: actions/setup-node@v2 11 | with: 12 | node-version: '14.x' 13 | 14 | - name: Install deps 15 | run: yarn install 16 | 17 | - name: Run lint syntax validation 18 | run: yarn run lint 19 | -------------------------------------------------------------------------------- /autobench.yml: -------------------------------------------------------------------------------- 1 | name: 'Autobench Example' 2 | benchFolder: 'bench' 3 | url: 'http://localhost:3000' 4 | connections: 10 5 | pipelining: 1 6 | duration: 30 7 | benchmarks: 8 | - name: 'request 1' 9 | path: '/' 10 | method: 'POST' 11 | headers: 12 | Content-type: 'application/json' 13 | body: 14 | valid: true 15 | idReplacement: true 16 | 17 | - name: 'request 2' 18 | path: '/slow' 19 | method: 'GET' 20 | -------------------------------------------------------------------------------- /markdown-review.js: -------------------------------------------------------------------------------- 1 | class MarkdownReview { 2 | constructor () { 3 | this.reviewMessage = '' 4 | } 5 | 6 | addRequestChanges (route, output) { 7 | if (this.reviewMessage === '') { 8 | this.initRequestChangesMessage() 9 | } 10 | 11 | this.reviewMessage += ` 12 | --- 13 | The previous benchmark for ${route} was significantly performatic than from this PR. 14 | 15 | - **Router**: ${route} 16 | - **Requests Diff**: ${output.requests.difference} 17 | - **Throughput Diff**: ${output.throughput.difference} 18 | - **Latency Diff**: ${output.latency.difference} 19 | ` 20 | } 21 | 22 | initRequestChangesMessage () { 23 | this.reviewMessage = '## Performance Regression ⚠️\n' 24 | } 25 | 26 | hasReview () { 27 | return this.reviewMessage !== '' 28 | } 29 | } 30 | 31 | module.exports = { 32 | MarkdownReview 33 | } 34 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "autobench", 3 | "version": "1.4.0", 4 | "main": "index.js", 5 | "license": "MIT", 6 | "bin": { 7 | "autobench": "./index.js" 8 | }, 9 | "scripts": { 10 | "lint": "eslint .", 11 | "lint:fix": "eslint . --fix" 12 | }, 13 | "repository": { 14 | "type": "git", 15 | "url": "git+https://github.com/RafaelGSS/autobench.git" 16 | }, 17 | "author": "Rafael Gonzaga ", 18 | "bugs": { 19 | "url": "https://github.com/RafaelGSS/autobench/issues" 20 | }, 21 | "homepage": "https://github.com/RafaelGSS/autobench#readme", 22 | "dependencies": { 23 | "ajv": "^7.2.3", 24 | "autocannon": "^7.3.0", 25 | "autocannon-compare": "^0.4.0", 26 | "debug": "^4.3.1", 27 | "js-yaml": "^4.0.0" 28 | }, 29 | "devDependencies": { 30 | "eslint": "^7.23.0", 31 | "eslint-config-standard": "^16.0.2", 32 | "eslint-plugin-import": "^2.22.1", 33 | "eslint-plugin-node": "^11.1.0", 34 | "eslint-plugin-promise": "^4.3.1" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # autobench 2 | 3 | [![NPM version](https://img.shields.io/npm/v/autobench.svg?style=flat)](https://www.npmjs.com/package/autobench) 4 | [![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](https://standardjs.com/) 5 | 6 | Automated benchmark avoiding regression in HTTP Applications. 7 | 8 | Wrap [`autocannon`](https://github.com/mcollina/autocannon) and [`autocannon-compare`](https://github.com/mcollina/autocannon-compare) in a box to automatize and monitor HTTP routes. 9 | 10 | ## Installation 11 | 12 | This is a Node.js module available through the npm registry. It can be installed using the `npm` or `yarn` command line tools. 13 | 14 | ```sh 15 | npm i autobench 16 | ``` 17 | 18 | or globally 19 | 20 | ```sh 21 | npm i -g autobench 22 | ``` 23 | 24 | ## Usage 25 | 26 | ```sh 27 | autobench 28 | # or directly 29 | npx autobench 30 | ``` 31 | 32 | Add environment `DEBUG=autobench:*` to see the log applications. Example: 33 | 34 | ```sh 35 | DEBUG=autobench:debug autobench compare 36 | DEBUG=autobench:info autobench compare 37 | DEBUG=autobench:* autobench compare 38 | ``` 39 | 40 | ### Config file 41 | 42 | In order to use the `autobench`, the project **must** have a `autobench.yml` as config file. 43 | 44 | The config file parameters are described bellow: 45 | 46 | ```yaml 47 | # Name of project [OPTIONAL] 48 | name: 'Autobench Example' 49 | # Benchmarking folder to store and retrieve benchmarks. [REQUIRED] 50 | benchFolder: 'bench' 51 | # Root URL to perform the benchmarking. [REQUIRED] It could be sent by `AUTOBENCH_URL` environment variable 52 | url: 'http://localhost:3000' 53 | # Number of connections. See https://github.com/mcollina/autocannon to further explanation. [OPTIONAL] 54 | connections: 10 55 | # Number of pipelining. See https://github.com/mcollina/autocannon to further explanation. [OPTIONAL] 56 | pipelining: 1 57 | # Duration of benchmark. See https://github.com/mcollina/autocannon to further explanation. [OPTIONAL] 58 | duration: 30 59 | # Group of routes to perform benchmarking. [REQUIRED] 60 | benchmarks: 61 | # Benchmark route name. [REQUIRED] 62 | - name: 'request 1' 63 | # Route path. [REQUIRED] 64 | path: '/' 65 | # Method [OPTIONAL] - Default `GET` 66 | method: 'POST' 67 | # Headers to request [OPTIONAL] 68 | headers: 69 | Content-type: 'application/json' 70 | # Body to request [OPTIONAL] - It's automatically parsed to JSON object. 71 | body: 72 | example: 'true' 73 | email: 'hey-[]@example.com' 74 | # [OPTIONAL] when this field is set as `true` the `[]` is replaced with a generated HyperID at runtime 75 | idReplacement: true 76 | 77 | - name: 'request 2' 78 | path: '/slow' 79 | ``` 80 | 81 | See [`autobench.yml`](./autobench.yml) file to examples. 82 | 83 | ### Compare 84 | 85 | Command to perform benchmark and compare to the stored benchmark. 86 | It's required to have a previous benchmark stored in the `benchFolder`. See [Autobench Create](#create) to realize it. 87 | 88 | Options: 89 | 90 | | Option | Description | Full command | 91 | | - | - | - | 92 | | -s | When is identified a Performance Regression a `autobench-review.md` file is created with the summary | autobench compare -s | 93 | 94 | ```sh 95 | autobench compare [-s] 96 | ``` 97 | 98 | The `autobench-review.md` looks like: 99 | 100 | ```md 101 | ## Performance Regression ⚠️ 102 | 103 | --- 104 | The previous benchmark for request-1 was significantly performatic than from this PR. 105 | 106 | - **Router**: request-1 107 | - **Requests Diff**: 10% 108 | - **Throughput Diff**: 10% 109 | - **Latency Diff**: 10% 110 | 111 | --- 112 | The previous benchmark for request-2 was significantly performatic than from this PR. 113 | 114 | - **Router**: request-2 115 | - **Requests Diff**: 20% 116 | - **Throughput Diff**: 20% 117 | - **Latency Diff**: 20% 118 | ``` 119 | 120 | ### Create 121 | 122 | Command to store/override the results in the `benchFolder`. 123 | Usually, it should be used to update the to latest benchmarking result, for instance, after each PR merged. 124 | 125 | ```sh 126 | autobench create 127 | ``` 128 | 129 | ## Examples 130 | 131 | See [autobench-example](https://github.com/RafaelGSS/autobench-example) for further details. 132 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env node 2 | 3 | 'use strict' 4 | 5 | const autocannon = require('autocannon') 6 | const compare = require('autocannon-compare') 7 | const Ajv = require('ajv').default 8 | const yaml = require('js-yaml') 9 | const fs = require('fs') 10 | const log = require('./logger') 11 | const { MarkdownReview } = require('./markdown-review') 12 | 13 | const ajv = new Ajv({ useDefaults: true }) 14 | 15 | function runBench (autocannonParams) { 16 | return new Promise((resolve, reject) => { 17 | autocannon(autocannonParams, (err, results) => { 18 | if (err) { 19 | reject(err) 20 | } else { 21 | resolve(results) 22 | } 23 | }) 24 | }) 25 | } 26 | 27 | function getPreviousBenchmark (benchFolder, name) { 28 | try { 29 | const previous = require(`${benchFolder}/${name}.json`) 30 | return previous 31 | } catch (e) { 32 | return undefined 33 | } 34 | } 35 | 36 | function normalizeBenchmarkName (name) { 37 | return name.replace(/ /g, '-') 38 | } 39 | 40 | function compareResults (results, benchFolder) { 41 | const review = new MarkdownReview() 42 | 43 | for (const [key, value] of results.entries()) { 44 | const previousBench = getPreviousBenchmark(benchFolder, key) 45 | if (previousBench) { 46 | const result = compare(previousBench, value) 47 | if (result.aWins) { 48 | review.addRequestChanges(key, result) 49 | } 50 | 51 | log.logDebug(`${key} new benchmark overall is ${result.equal ? 'equal' : result.aWins ? 'worst' : 'better'} to previous.`) 52 | } else { 53 | log.logInfo(`${key} doesn't has a previous benchmark to compare. Skipping.`) 54 | } 55 | } 56 | 57 | return review 58 | } 59 | 60 | function storeResults (results, benchFolder) { 61 | if (!fs.existsSync(benchFolder)) { 62 | log.logDebug(`Creating benchmark folder: '${benchFolder}'`) 63 | fs.mkdirSync(benchFolder) 64 | } 65 | 66 | for (const [key, value] of results.entries()) { 67 | fs.writeFileSync(`${benchFolder}/${key}.json`, JSON.stringify(value, null, 4)) 68 | } 69 | } 70 | 71 | function storeReview (review) { 72 | if (review.hasReview()) { 73 | fs.writeFileSync('./autobench-review.md', review.reviewMessage) 74 | } else { 75 | log.logInfo('Empty review.') 76 | } 77 | } 78 | 79 | function validateConfig (cfg) { 80 | const validate = ajv.compile({ 81 | type: 'object', 82 | properties: { 83 | name: { 84 | type: 'string' 85 | }, 86 | url: { 87 | type: 'string' 88 | }, 89 | benchFolder: { 90 | type: 'string' 91 | }, 92 | connections: { 93 | type: 'number', 94 | default: 10 95 | }, 96 | pipelining: { 97 | type: 'number', 98 | default: 1 99 | }, 100 | duration: { 101 | type: 'number', 102 | default: 30 103 | }, 104 | benchmarks: { 105 | type: 'array', 106 | items: { 107 | type: 'object', 108 | properties: { 109 | name: { 110 | type: 'string' 111 | }, 112 | path: { 113 | type: 'string' 114 | }, 115 | method: { 116 | type: 'string', 117 | default: 'GET' 118 | }, 119 | headers: { 120 | type: 'object' 121 | }, 122 | body: { 123 | type: 'object' 124 | }, 125 | idReplacement: { 126 | type: 'boolean', 127 | default: false 128 | } 129 | }, 130 | required: ['name', 'path', 'method'] 131 | } 132 | } 133 | }, 134 | required: ['name', 'url', 'benchmarks', 'benchFolder', 'duration', 'pipelining', 'connections'] 135 | }) 136 | 137 | if (validate(cfg)) { 138 | return cfg 139 | } else { 140 | log.logError('The autobench config file has errors', validate.errors) 141 | process.exit(1) 142 | } 143 | } 144 | 145 | function parseConfig () { 146 | try { 147 | const cfg = yaml.load(fs.readFileSync('./autobench.yml')) 148 | if (!cfg.url) { 149 | if (!process.env.AUTOBENCH_URL) { 150 | log.logError('URL not provided. You should provide the `url` in autobench config or by AUTOBENCH_URL env variable.') 151 | process.exit(1) 152 | } 153 | cfg.url = process.env.AUTOBENCH_URL 154 | } 155 | 156 | validateConfig(cfg) 157 | return cfg 158 | } catch (e) { 159 | log.logError('Not found `autobench.yml` file.') 160 | process.exit(1) 161 | } 162 | } 163 | 164 | async function main () { 165 | const args = process.argv.slice(2) 166 | 167 | if (args.length < 1) { 168 | console.error('Usage: autobench [compare | create]') 169 | process.exit(127) 170 | } 171 | 172 | if (args[0] !== 'create' && args[0] !== 'compare') { 173 | console.error(`Option ${args[0]} not recognized!`) 174 | process.exit(127) 175 | } 176 | 177 | const config = parseConfig() 178 | const results = new Map() 179 | for (const instanceCfg of config.benchmarks) { 180 | const benchConfig = { 181 | url: config.url, 182 | connections: config.connections, 183 | pipelining: config.pipelining, 184 | duration: config.duration, 185 | requests: [ 186 | { 187 | method: instanceCfg.method, 188 | headers: instanceCfg.headers, 189 | path: instanceCfg.path, 190 | body: JSON.stringify(instanceCfg.body) 191 | } 192 | ], 193 | idReplacement: instanceCfg.idReplacement 194 | } 195 | log.logDebug(`Running ${instanceCfg.name} with config: ${JSON.stringify(benchConfig, null, 2)}`) 196 | const result = await runBench(benchConfig) 197 | results.set(normalizeBenchmarkName(instanceCfg.name), result) 198 | } 199 | 200 | if (args[0] === 'create') { 201 | storeResults(results, `${process.cwd()}/${config.benchFolder}`) 202 | } 203 | 204 | if (args[0] === 'compare') { 205 | const review = compareResults(results, `${process.cwd()}/${config.benchFolder}`) 206 | log.logInfo(review.reviewMessage) 207 | if (args[1] === '-s') { 208 | storeReview(review) 209 | } 210 | } 211 | 212 | log.logInfo('Done!') 213 | } 214 | 215 | main() 216 | --------------------------------------------------------------------------------