├── .travis.yml ├── test └── 10.sync.js ├── lib ├── sync.js ├── benchmark.js ├── async.js └── result.js ├── package.json ├── index.js ├── LICENSE ├── .gitignore └── README.md /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - '6' 4 | - '7' 5 | - '8' 6 | - 'node' 7 | -------------------------------------------------------------------------------- /test/10.sync.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const { expect } = require('chai'); 3 | 4 | describe('Synchronous benchmarks', function () { 5 | it('should work'); 6 | }); 7 | -------------------------------------------------------------------------------- /lib/sync.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | module.exports = (fn, setup, duration) => { 4 | const { hrtime } = process; 5 | const noop = () => {}; 6 | const samples = []; 7 | duration += Date.now(); 8 | 9 | do { 10 | setup(); 11 | const t0 = hrtime(); 12 | noop(); // Measure the noop time so we can factor it out 13 | const t1 = hrtime(); 14 | fn(); 15 | const t2 = hrtime(); 16 | samples.push(t2[1] - t1[1] + (t2[0] - t1[0]) * 1e9 - (t1[1] - t0[1] + (t1[0] - t0[0]) * 1e9)); 17 | } while (Date.now() < duration); 18 | 19 | return samples; 20 | }; 21 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodemark", 3 | "version": "0.3.0", 4 | "description": "A modern benchmarking library for Node.js", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "$(npm bin)/mocha --exit" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/JoshuaWise/nodemark.git" 12 | }, 13 | "keywords": [ 14 | "benchmark", 15 | "performance", 16 | "perf", 17 | "benchmarkjs", 18 | "speed", 19 | "test" 20 | ], 21 | "author": "Joshua Wise ", 22 | "license": "MIT", 23 | "bugs": { 24 | "url": "https://github.com/JoshuaWise/nodemark/issues" 25 | }, 26 | "homepage": "https://github.com/JoshuaWise/nodemark#readme", 27 | "devDependencies": { 28 | "chai": "^4.1.2", 29 | "mocha": "^5.0.0" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const benchmark = require('./lib/benchmark'); 3 | const makeAsync = fn => cb => { fn(); cb(); }; 4 | 5 | module.exports = (fn, setup, duration) => { 6 | if (typeof fn !== 'function') throw new TypeError('Expected benchmark subject to be a function'); 7 | if (setup == null) setup = fn.length ? cb => { cb(); } : () => {}; 8 | if (duration == null) duration = 3300; 9 | if (typeof setup !== 'function') throw new TypeError('Expected benchmark setup to be a function'); 10 | if (!Number.isInteger(duration)) throw new TypeError('Expected benchmark duration to be an integer'); 11 | if (duration <= 0) throw new TypeError('Expected benchmark duration to be positive'); 12 | if (!fn.length && !setup.length) return benchmark.sync(fn, setup, duration); 13 | if (!fn.length) fn = makeAsync(fn); 14 | if (!setup.length) setup = makeAsync(setup); 15 | return benchmark.async(fn, setup, duration); 16 | }; 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Joshua Wise 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (http://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # Typescript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | -------------------------------------------------------------------------------- /lib/benchmark.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const Result = require('./result'); 3 | const async = require('./async'); 4 | const sync = require('./sync'); 5 | 6 | exports.sync = (fn, setup, duration) => { 7 | const allSamples = []; 8 | let totalCount = 0; 9 | duration += Date.now(); 10 | 11 | // We measure in chunks of 100ms to periodically work the garbage collector. 12 | // This acts like a "dither" to smooth out the fluctuating memory usage caused 13 | // by the benchmarking engine itself (https://en.wikipedia.org/wiki/Dither). 14 | do { 15 | sync(() => {}, () => {}, 10); // Fake/dither benchmark 16 | const samples = sync(fn, setup, 100); // Actual benchmark 17 | allSamples.push(samples); 18 | totalCount += samples.length; 19 | } while (Date.now() < duration || totalCount < 10) 20 | 21 | return new Result([].concat(...allSamples)); 22 | }; 23 | 24 | exports.async = (fn, setup, duration) => { 25 | const allSamples = []; 26 | let totalCount = 0; 27 | duration += Date.now(); 28 | 29 | // Here we do the same thing as commented in sync(). 30 | const dither = () => async(cb => { cb(); }, cb => { cb(); }, 10); 31 | const actual = () => async(fn, setup, 100); 32 | const loop = (samples) => { 33 | allSamples.push(samples); 34 | totalCount += samples.length; 35 | if (Date.now() < duration || totalCount < 10) return dither().then(actual).then(loop); 36 | return new Result([].concat(...allSamples)); 37 | }; 38 | 39 | return dither().then(actual).then(loop); 40 | }; 41 | 42 | -------------------------------------------------------------------------------- /lib/async.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const async = (fn, setup, duration, callback) => { 4 | const { hrtime, nextTick } = process; 5 | const noop = cb => { cb(); }; 6 | const samples = []; 7 | duration += Date.now(); 8 | 9 | let t0; 10 | let t2; 11 | let noise = 0; 12 | let currentId = 0; 13 | 14 | const setupDone = (id) => (err) => { 15 | if (id !== currentId) return callback(new TypeError('The benchmark setup callback was invoked twice')); 16 | if (err) return callback(err); 17 | const noopCallback = noopDone(++currentId); 18 | t0 = hrtime(); 19 | noop(noopCallback); 20 | }; 21 | 22 | // We measure the noop function so we can factor it out later. 23 | // Much of the logic in this function is unnecessary for the noop case, but we 24 | // keep it to ensure that v8 doesn't optimize this function too differently. 25 | const noopDone = (fake1) => (fake2) => { 26 | const t1 = hrtime(); 27 | if (fake1 !== currentId) return callback(new TypeError('THIS SHOULD NEVER HAPPEN')); 28 | if (fake2) return callback(fake2); 29 | noise = t1[1] - t0[1] + (t1[0] - t0[0]) * 1e9; 30 | const subjectCallback = subjectDone(++currentId); 31 | t2 = hrtime(); 32 | fn(subjectCallback); 33 | }; 34 | 35 | const subjectDone = (id) => (err) => { 36 | const t3 = hrtime(); 37 | if (id !== currentId) return callback(new TypeError('The benchmark subject callback was invoked twice')); 38 | if (err) return callback(err); 39 | samples.push(t3[1] - t2[1] + (t3[0] - t2[0]) * 1e9 - noise); 40 | currentId += 1; 41 | if (Date.now() < duration) nextTick(runIteration); 42 | else callback(null, samples); 43 | }; 44 | 45 | const runIteration = () => { setup(setupDone(currentId)); }; 46 | nextTick(runIteration); 47 | }; 48 | 49 | module.exports = (fn, setup, duration) => { 50 | let callback; 51 | const promise = new Promise((resolve, reject) => { callback = 52 | (err, samples) => err ? reject(err) : resolve(samples); }); 53 | async(fn, setup, duration, callback); 54 | return promise; 55 | }; 56 | -------------------------------------------------------------------------------- /lib/result.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const util = require('util'); 3 | const fMax = (a, b) => a > b ? a : b; 4 | const fMin = (a, b) => a < b ? a : b; 5 | const fSum = (a, b) => a + b; 6 | const fSumSqDiff = u => (a, b) => a + (b - u) * (b - u); 7 | const getMean = arr => arr.reduce(fSum, 0) / arr.length; 8 | const getMgnError = (arr, mean) => Math.sqrt(arr.reduce(fSumSqDiff(mean), 0)) / arr.length * 2; 9 | 10 | class BenchmarkResult { 11 | constructor(samples) { 12 | samples.splice(0, 5); // Remove samples gathered before v8 optimization 13 | this.mean = getMean(samples); 14 | this.error = getMgnError(samples, this.mean) / this.mean; 15 | this.max = Math.max(0, samples.reduce(fMax)); 16 | this.min = Math.max(0, samples.reduce(fMin)); 17 | this.count = samples.length; 18 | if (!(this.mean >= 10)) this.mean = this.error = this.max = this.min = NaN; 19 | Object.freeze(this); 20 | } 21 | nanoseconds(precision = 0) { 22 | return round(this.mean, precision); 23 | } 24 | microseconds(precision = 0) { 25 | return round(this.mean / 1e3, precision); 26 | } 27 | milliseconds(precision = 0) { 28 | return round(this.mean / 1e6, precision); 29 | } 30 | seconds(precision = 0) { 31 | return round(this.mean / 1e9, precision); 32 | } 33 | hz(precision = 0) { 34 | return round(1e9 / this.mean, precision); 35 | } 36 | sd(precision = 0) { 37 | return round(this.error * this.mean * Math.sqrt(this.count) / 2, precision); 38 | } 39 | toString(format = 'hz') { 40 | if (!unitMap[format]) throw new TypeError(`Unrecognized toString format: ${format}`); 41 | const value = String(this[format]()).replace(/\B(?=(?:\d{3})+$)/g, ','); 42 | const error = Math.round(this.error * 10000) / 100; 43 | return `${value}${unitMap[format]} \xb1${error}% (${this.count} samples)`; 44 | } 45 | [util.inspect.custom]() { 46 | return this.toString(); 47 | } 48 | } 49 | 50 | const unitMap = Object.assign(Object.create(null), { 51 | nanoseconds: 'ns', 52 | microseconds: '\u03bcs', 53 | milliseconds: 'ms', 54 | seconds: 's', 55 | hz: ' ops/sec', 56 | }); 57 | 58 | const round = (n, e) => { 59 | if (e >>> 0 !== e || e > 6) throw new TypeError('Rounding precision must be an integer between 0 and 6'); 60 | const p = Math.pow(10, e); 61 | return Math.round(n * p) / p; 62 | }; 63 | 64 | module.exports = BenchmarkResult; 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # nodemark [![Build Status](https://travis-ci.org/JoshuaWise/nodemark.svg?branch=master)](https://travis-ci.org/JoshuaWise/nodemark) 2 | A modern benchmarking library for Node.js, capable of generating statistically significant results. 3 | 4 | ## Installation 5 | ``` 6 | npm install --save-dev nodemark 7 | ``` 8 | 9 | ## Usage 10 | ```js 11 | const benchmark = require('nodemark'); 12 | 13 | const result = benchmark(myFunction, setupFunction); 14 | console.log(result); // => 14,114,886 ops/sec ±0.58% (7906233 samples) 15 | console.log(result.nanoseconds()); // => 71 16 | ``` 17 | 18 | ## Statistical Significance 19 | 20 | In benchmarking, it's important to generate statistically significant results. Thankfully, `nodemark` makes this easy: 21 | 22 | * The *margin of error* is calculated for you. 23 | * The noise caused by `nodemark` is factored out of the results. 24 | * The garbage collector is manipulated to prevent early runs from having an unfair advantage. 25 | * Executions done before v8 has a chance to optimize things ([JIT](https://en.wikipedia.org/wiki/Just-in-time_compilation)) are ignored. 26 | 27 | The combination of these things makes it a highly accurate measuring device. However, any benchmark done in JavaScript has its limits. If the average time measured by a benchmark is too small to be reliable (< 10ns), the results will be `NaN` in order to avoid providing misleading information. 28 | 29 | # API 30 | 31 | ## benchmark(*subject*, [*setup*, [*duration*]]) -> *benchmarkResult* 32 | 33 | Runs a new benchmark. This measures the performance of the `subject` function. If a `setup` function is provided, it will be invoked before every execution of `subject`. 34 | 35 | By default, the benchmark runs for about 3 seconds, but this can be overridden by passing a `duration` number (in milliseconds). Regardless of the desired duration, the benchmark will not finish until the `subject` has been run at least 10 times. 36 | 37 | Both `subject` and `setup` can run asynchronously by declaring a callback argument in their signature. If you do this, you must invoke the callback to indicate that the operation is complete. When running an asyncronous benchmark, this function returns a promise. However, because `subject` and `setup` use callbacks rather than promises, synchronous errors will not automatically be caught. 38 | 39 | ```js 40 | benchmark(callback => fs.readFile('foo.txt', callback)) 41 | .then(console.log); 42 | ``` 43 | 44 | > There is no plan to support promises in `subject` and `setup` because it would cause too much overhead and yield inaccurate results. 45 | 46 | ## class *BenchmarkResult* 47 | 48 | Each benchmark returns an immutable object describing the result of that benchmark. It has five properties: 49 | 50 | * `mean`, the average measured time in nanoseconds 51 | * `error`, the margin of error as a ratio of the mean 52 | * `max`, the fastest measured time in nanoseconds 53 | * `min`, the slowest measured time in nanoseconds 54 | * `count`, the number of times the subject was invoked and measured 55 | 56 | ### .nanoseconds([*precision*]) -> *number* 57 | 58 | Returns `this.mean`, rounded to the nearest whole number or the number of decimal places specified by `precision`. 59 | 60 | ### .microseconds([*precision*]) -> *number* 61 | 62 | Same as [.nanoseconds()](#nanosecondsprecision---number), but the value is in microseconds. 63 | 64 | ### .milliseconds([*precision*]) -> *number* 65 | 66 | Same as [.nanoseconds()](#nanosecondsprecision---number), but the value is in milliseconds. 67 | 68 | ### .seconds([*precision*]) -> *number* 69 | 70 | Same as [.nanoseconds()](#nanosecondsprecision---number), but the value is in seconds. 71 | 72 | ### .hz([*precision*]) -> *number* 73 | 74 | Returns the average number of executions per second, rounded to the nearest whole number or the number of decimal places specified by `precision`. 75 | 76 | ### .sd([*precision*]) -> *number* 77 | 78 | Returns the standard deviation in nanoseconds, rounded to the nearest whole number or the number of decimal places specified by `precision`. 79 | 80 | ### .toString([*format*]) -> *number* 81 | 82 | Returns a nicely formatted string describing the result of the benchmark. By default, the `"hz"` format is used, which displays ops/sec, but you can optionally specify `"nanoseconds"`, `"microseconds"`, `"milliseconds"`, or `"seconds"` to change the displayed information. 83 | 84 | ## License 85 | 86 | [MIT](https://github.com/JoshuaWise/nodemark/blob/master/LICENSE) 87 | --------------------------------------------------------------------------------