├── .eslintignore ├── .eslintrc.json ├── .github └── workflows │ └── tests.yaml ├── .gitignore ├── .npmignore ├── .prettierignore ├── .prettierrc.json ├── CHANGELOG.md ├── LICENSE ├── README.md ├── bin └── tach.js ├── client ├── src │ └── bench.ts └── tsconfig.json ├── config.schema.json ├── images ├── about-tracing-small.png ├── screen1.png └── screen2.png ├── package-lock.json ├── package.json ├── src ├── browser.ts ├── cli.ts ├── config.ts ├── configfile.ts ├── csv.ts ├── defaults.ts ├── flags.ts ├── format.ts ├── github.ts ├── install.ts ├── json-output.ts ├── manual.ts ├── measure.ts ├── runner.ts ├── server.ts ├── specs.ts ├── stats.ts ├── test │ ├── browser_test.ts │ ├── config_test.ts │ ├── configfile_test.ts │ ├── csv_test.ts │ ├── data │ │ ├── 1_byte.txt │ │ ├── 3_bytes.txt │ │ ├── alt_npm_install_dir │ │ │ ├── node_modules │ │ │ │ └── dep1 │ │ │ │ │ ├── dep1-main.js │ │ │ │ │ └── package.json │ │ │ └── package.json │ │ ├── cpu-throttling-rate.json │ │ ├── delayed-callback.html │ │ ├── delayed-fcp.html │ │ ├── deprecated-horizons.json │ │ ├── for-loop.html │ │ ├── import-bare-module.html │ │ ├── import-bare-module.js │ │ ├── invalid-js.html │ │ ├── invalid-js.js │ │ ├── measurement-expression.json │ │ ├── multiple-measurements.html │ │ ├── multiple-measurements.json │ │ ├── mylib │ │ │ ├── mybench │ │ │ │ └── index.html │ │ │ ├── noindex │ │ │ │ └── other.html │ │ │ └── package.json │ │ ├── node_modules │ │ │ └── dep1 │ │ │ │ ├── dep1.js │ │ │ │ └── package.json │ │ ├── performance-measure.html │ │ ├── performance-measure.json │ │ ├── random-global.html │ │ ├── random-global.json │ │ ├── tracing-config.json │ │ └── window-size.html │ ├── e2e_test.ts │ ├── flags_test.ts │ ├── format_test.ts │ ├── install_test.ts │ ├── json-output_test.ts │ ├── server_test.ts │ ├── specs_test.ts │ ├── stats_test.ts │ ├── test_helpers.ts │ └── versions_test.ts ├── types.ts ├── types │ └── jstat │ │ └── index.d.ts ├── util.ts └── versions.ts ├── test └── mocha.opts ├── tsconfig-base.json └── tsconfig.json /.eslintignore: -------------------------------------------------------------------------------- 1 | /lib/ 2 | /client/lib/ 3 | /src/test/data/ 4 | /config.schema.json -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "eslint:recommended", 4 | "plugin:@typescript-eslint/eslint-recommended", 5 | "plugin:@typescript-eslint/recommended" 6 | ], 7 | "parser": "@typescript-eslint/parser", 8 | "parserOptions": { 9 | "ecmaVersion": 2020, 10 | "sourceType": "module" 11 | }, 12 | "plugins": ["@typescript-eslint", "eslint-plugin-no-only-tests"], 13 | "env": { 14 | "node": true 15 | }, 16 | "rules": { 17 | "@typescript-eslint/explicit-module-boundary-types": "off", 18 | "@typescript-eslint/no-non-null-assertion": "off", 19 | "no-constant-condition": "off", 20 | "no-empty": "off" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | strategy: 8 | matrix: 9 | job: [linux, macos] 10 | include: 11 | - job: linux 12 | os: ubuntu-latest 13 | browsers: chrome-headless, firefox-headless 14 | 15 | - job: macos 16 | os: macos-12 17 | browsers: safari 18 | 19 | runs-on: ${{ matrix.os }} 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - uses: actions/setup-node@v3 24 | with: 25 | node-version: 16 26 | cache: npm 27 | 28 | - run: npm ci 29 | - run: npm run build 30 | 31 | - run: npm test 32 | env: 33 | TACHOMETER_E2E_TEST_BROWSERS: ${{ matrix.browsers }} 34 | TACHOMETER_E2E_TEST_SHOW_OUTPUT: true 35 | 36 | lint: 37 | runs-on: ubuntu-latest 38 | steps: 39 | - uses: actions/checkout@v3 40 | - uses: actions/setup-node@v3 41 | with: 42 | node-version: 16 43 | cache: npm 44 | - run: npm ci 45 | - run: npm run lint 46 | 47 | check-format: 48 | runs-on: ubuntu-latest 49 | steps: 50 | - uses: actions/checkout@v3 51 | - uses: actions/setup-node@v3 52 | with: 53 | node-version: 16 54 | cache: npm 55 | - run: npm ci 56 | - run: npm run format:check 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules/ 2 | lib/ 3 | .idea 4 | .vscode 5 | .DS_Store 6 | npm-debug.log 7 | *.tgz 8 | *.code-workspace 9 | logs -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | test/ 2 | src/test/ 3 | lib/test/ 4 | .prettierignore 5 | .prettierrc.json 6 | tsconfig-base.json 7 | tsconfig.json 8 | .eslintignore 9 | .eslintrc.json 10 | *.tgz 11 | *.code-workspace -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | /lib/ 2 | /client/lib/ 3 | /src/test/data/logs/ 4 | /src/test/data/invalid-js.js 5 | /config.schema.json -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "bracketSpacing": false 4 | } 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019 Google LLC. All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /bin/tach.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * @license 5 | * Copyright 2019 Google LLC 6 | * SPDX-License-Identifier: BSD-3-Clause 7 | */ 8 | 9 | import {main} from '../lib/cli.js'; 10 | main(process.argv); 11 | -------------------------------------------------------------------------------- /client/src/bench.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2018 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | // Note: sync with runner/src/types.ts 8 | interface BenchmarkResponse { 9 | millis: number; 10 | } 11 | 12 | let startTime: number; 13 | export function start() { 14 | startTime = performance.now(); 15 | } 16 | 17 | export async function stop() { 18 | const end = performance.now(); 19 | const runtime = end - startTime; 20 | console.log('benchmark runtime', runtime, 'ms'); 21 | const response: BenchmarkResponse = { 22 | millis: runtime, 23 | }; 24 | fetch('/submitResults', { 25 | method: 'POST', 26 | headers: { 27 | 'Content-Type': 'application/json', 28 | }, 29 | body: JSON.stringify(response), 30 | }); 31 | } 32 | -------------------------------------------------------------------------------- /client/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig-base.json", 3 | "compilerOptions": { 4 | "outDir": "./lib", 5 | "lib": ["es2017", "esnext.asynciterable", "dom"] 6 | }, 7 | "include": ["src/**/*.ts"] 8 | } 9 | -------------------------------------------------------------------------------- /images/about-tracing-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/tachometer/1d8775cfa4f09cc1afb5ffbd07b4ac37f3cc9e10/images/about-tracing-small.png -------------------------------------------------------------------------------- /images/screen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/tachometer/1d8775cfa4f09cc1afb5ffbd07b4ac37f3cc9e10/images/screen1.png -------------------------------------------------------------------------------- /images/screen2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/tachometer/1d8775cfa4f09cc1afb5ffbd07b4ac37f3cc9e10/images/screen2.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "tachometer", 3 | "version": "0.7.1", 4 | "description": "Web benchmark runner", 5 | "main": "lib/cli.js", 6 | "directories": { 7 | "lib": "lib" 8 | }, 9 | "type": "module", 10 | "bin": { 11 | "tach": "bin/tach.js", 12 | "tachometer": "bin/tach.js" 13 | }, 14 | "scripts": { 15 | "prepare": "if [ -f './tsconfig.json' ]; then npm run build; fi;", 16 | "build": "rimraf lib/ client/lib/ && mkdir lib && npm run generate-json-schema && tsc && tsc -p client/ && npm run lint", 17 | "generate-json-schema": "typescript-json-schema tsconfig.json ConfigFile --include src/configfile.ts --required --noExtraProps > config.schema.json", 18 | "lint": "eslint .", 19 | "format": "prettier --write .", 20 | "format:check": "prettier --check .", 21 | "test": "npm run test:unit && npm run test:e2e", 22 | "test:unit": "mocha \"lib/test/**/*_test.js\" --grep \".*e2e.*\" --invert", 23 | "test:e2e": "mocha \"lib/test/**/*_test.js\" --grep \".*e2e.*\"" 24 | }, 25 | "repository": { 26 | "type": "git", 27 | "url": "git+https://github.com/Polymer/tachometer.git" 28 | }, 29 | "author": "Google LLC", 30 | "license": "BSD-3-Clause", 31 | "bugs": { 32 | "url": "https://github.com/Polymer/tachometer/issues" 33 | }, 34 | "homepage": "https://github.com/Polymer/tachometer#readme", 35 | "installsOnDemand": [ 36 | "iedriver", 37 | "geckodriver", 38 | "chromedriver" 39 | ], 40 | "dependencies": { 41 | "ansi-escape-sequences": "^6.0.1", 42 | "command-line-args": "^5.0.2", 43 | "command-line-usage": "^6.1.0", 44 | "csv-stringify": "^6.2.0", 45 | "fs-extra": "^10.0.0", 46 | "get-stream": "^6.0.0", 47 | "got": "^12.1.0", 48 | "jsonschema": "^1.4.0", 49 | "jsonwebtoken": "^9.0.0", 50 | "jstat": "^1.9.2", 51 | "koa": "^2.11.0", 52 | "koa-bodyparser": "^4.2.1", 53 | "koa-mount": "^4.0.0", 54 | "koa-node-resolve": "^1.0.0-pre.8", 55 | "koa-send": "^5.0.0", 56 | "koa-static": "^5.0.0", 57 | "pkg-install": "^1.0.0", 58 | "pkg-up": "^4.0.0", 59 | "progress": "^2.0.3", 60 | "sanitize-filename": "^1.6.3", 61 | "selenium-webdriver": "^4.0.0-alpha.8", 62 | "semver": "^7.1.1", 63 | "source-map-support": "^0.5.16", 64 | "strip-ansi": "^7.0.1", 65 | "systeminformation": "^5.3.3", 66 | "table": "^6.0.7", 67 | "ua-parser-js": "^1.0.2" 68 | }, 69 | "devDependencies": { 70 | "@types/ansi-escape-sequences": "^4.0.0", 71 | "@types/babel__generator": "^7.6.1", 72 | "@types/chai": "^4.2.4", 73 | "@types/chai-as-promised": "^7.1.2", 74 | "@types/command-line-args": "^5.0.0", 75 | "@types/command-line-usage": "^5.0.1", 76 | "@types/fs-extra": "^9.0.1", 77 | "@types/got": "^9.6.8", 78 | "@types/jsonwebtoken": "^9.0.0", 79 | "@types/koa": "^2.0.51", 80 | "@types/koa-bodyparser": "^4.2.1", 81 | "@types/koa-mount": "^4.0.0", 82 | "@types/koa-send": "^4.1.2", 83 | "@types/koa-static": "^4.0.0", 84 | "@types/mocha": "^9.0.0", 85 | "@types/node-fetch": "^2.5.3", 86 | "@types/progress": "^2.0.3", 87 | "@types/rimraf": "^3.0.0", 88 | "@types/selenium-webdriver": "^4.0.11", 89 | "@types/semver": "^7.3.1", 90 | "@types/source-map-support": "^0.5.4", 91 | "@types/ua-parser-js": "^0.7.32", 92 | "@typescript-eslint/eslint-plugin": "^5.30.6", 93 | "@typescript-eslint/parser": "^5.30.6", 94 | "chai": "^4.2.0", 95 | "chai-as-promised": "^7.1.1", 96 | "eslint": "^8.19.0", 97 | "eslint-plugin-no-only-tests": "^2.6.0", 98 | "mocha": "^10.0.0", 99 | "node-fetch": "^3.2.8", 100 | "prettier": "^2.4.1", 101 | "rimraf": "^3.0.2", 102 | "typescript": "^4.0.2", 103 | "typescript-json-schema": "^0.54.0" 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/browser.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as webdriver from 'selenium-webdriver'; 8 | import * as chrome from 'selenium-webdriver/chrome.js'; 9 | import * as edge from 'selenium-webdriver/edge.js'; 10 | import * as firefox from 'selenium-webdriver/firefox.js'; 11 | 12 | import {installOnDemand} from './install.js'; 13 | import {isHttpUrl} from './util.js'; 14 | 15 | /** Tachometer browser names. Often but not always equal to WebDriver's. */ 16 | export type BrowserName = 'chrome' | 'firefox' | 'safari' | 'edge' | 'ie'; 17 | 18 | /** Browsers we can drive. */ 19 | export const supportedBrowsers = new Set([ 20 | 'chrome', 21 | 'firefox', 22 | 'safari', 23 | 'edge', 24 | 'ie', 25 | ]); 26 | 27 | type WebdriverModuleName = 'chromedriver' | 'geckodriver' | 'iedriver'; 28 | 29 | // Note that the edgedriver package doesn't work on recent versions of 30 | // Windows 10, so users must manually install following Microsoft's 31 | // documentation. 32 | const browserWebdriverModules = new Map([ 33 | ['chrome', 'chromedriver'], 34 | ['firefox', 'geckodriver'], 35 | ['ie', 'iedriver'], 36 | ]); 37 | 38 | /** Cases where Tachometer's browser name scheme does not equal WebDriver's. */ 39 | const webdriverBrowserNames = new Map([ 40 | ['edge', 'MicrosoftEdge'], 41 | ['ie', 'internet explorer'], 42 | ]); 43 | 44 | /** Browsers that support headless mode. */ 45 | const headlessBrowsers = new Set(['chrome', 'firefox']); 46 | 47 | /** Browsers for which we can find the first contentful paint (FCP) time. */ 48 | export const fcpBrowsers = new Set(['chrome']); 49 | 50 | export interface BrowserConfig { 51 | /** Name of the browser. */ 52 | name: BrowserName; 53 | /** Whether to run in headless mode. */ 54 | headless: boolean; 55 | /** A remote WebDriver server to launch the browser from. */ 56 | remoteUrl?: string; 57 | /** Launch the browser window with these dimensions. */ 58 | windowSize: WindowSize; 59 | /** Path to custom browser binary. */ 60 | binary?: string; 61 | /** Additional binary arguments. */ 62 | addArguments?: string[]; 63 | /** WebDriver default binary arguments to omit. */ 64 | removeArguments?: string[]; 65 | /** CPU Throttling rate. (1 is no throttle, 2 is 2x slowdown, etc). */ 66 | cpuThrottlingRate?: number; 67 | /** Advanced preferences usually set from the about:config page. */ 68 | preferences?: {[name: string]: string | number | boolean}; 69 | /** Trace browser performance logs configuration */ 70 | trace?: TraceConfig; 71 | /** Path to profile directory to use instead of the default fresh one. */ 72 | profile?: string; 73 | } 74 | 75 | /** 76 | * Configuration to turn on performance tracing 77 | */ 78 | export interface TraceConfig { 79 | /** 80 | * The tracing categories the browser should log 81 | */ 82 | categories: string[]; 83 | 84 | /** 85 | * The directory to log performance traces to 86 | */ 87 | logDir: string; 88 | } 89 | 90 | export interface WindowSize { 91 | width: number; 92 | height: number; 93 | } 94 | 95 | /** 96 | * Create a deterministic unique string key for the given BrowserConfig. 97 | */ 98 | export function browserSignature(config: BrowserConfig): string { 99 | return JSON.stringify([ 100 | config.name, 101 | config.headless, 102 | config.remoteUrl ?? '', 103 | config.windowSize.width, 104 | config.windowSize.height, 105 | config.binary ?? '', 106 | config.addArguments ?? [], 107 | config.removeArguments ?? [], 108 | config.cpuThrottlingRate ?? 1, 109 | config.preferences ?? {}, 110 | config.profile ?? '', 111 | ]); 112 | } 113 | 114 | type BrowserConfigWithoutWindowSize = Pick< 115 | BrowserConfig, 116 | Exclude 117 | >; 118 | 119 | /** 120 | * Parse and validate a browser string specification. Examples: 121 | * 122 | * chrome 123 | * chrome-headless 124 | * chrome@ 125 | */ 126 | export function parseBrowserConfigString( 127 | str: string 128 | ): BrowserConfigWithoutWindowSize { 129 | let remoteUrl; 130 | const at = str.indexOf('@'); 131 | if (at !== -1) { 132 | remoteUrl = str.substring(at + 1); 133 | str = str.substring(0, at); 134 | } 135 | const headless = str.endsWith('-headless'); 136 | if (headless === true) { 137 | str = str.replace(/-headless$/, ''); 138 | } 139 | const name = str as BrowserName; 140 | const config: BrowserConfigWithoutWindowSize = {name, headless}; 141 | if (remoteUrl !== undefined) { 142 | config.remoteUrl = remoteUrl; 143 | } 144 | return config; 145 | } 146 | 147 | /** 148 | * Throw if any property of the given BrowserConfig is invalid. 149 | */ 150 | export function validateBrowserConfig({ 151 | name, 152 | headless, 153 | remoteUrl, 154 | windowSize, 155 | }: BrowserConfig) { 156 | if (!supportedBrowsers.has(name)) { 157 | throw new Error( 158 | `Browser ${name} is not supported, ` + 159 | `only ${[...supportedBrowsers].join(', ')} are currently supported.` 160 | ); 161 | } 162 | if (headless === true && !headlessBrowsers.has(name)) { 163 | throw new Error(`Browser ${name} does not support headless mode.`); 164 | } 165 | if (remoteUrl !== undefined && !isHttpUrl(remoteUrl)) { 166 | throw new Error(`Invalid browser remote URL "${remoteUrl}".`); 167 | } 168 | if (windowSize.width < 0 || windowSize.height < 0) { 169 | throw new Error(`Invalid window size, width and height must be >= 0.`); 170 | } 171 | } 172 | 173 | /** 174 | * Configure a WebDriver suitable for benchmarking the given browser. 175 | */ 176 | export async function makeDriver( 177 | config: BrowserConfig 178 | ): Promise { 179 | const browserName: BrowserName = config.name; 180 | const webdriverModuleName = browserWebdriverModules.get(browserName); 181 | 182 | if (webdriverModuleName != null) { 183 | await installOnDemand(webdriverModuleName); 184 | await import(webdriverModuleName); 185 | } 186 | 187 | const builder = new webdriver.Builder(); 188 | const webdriverName = webdriverBrowserNames.get(config.name) || config.name; 189 | builder.forBrowser(webdriverName); 190 | builder.setChromeOptions(chromeOpts(config)); 191 | builder.setFirefoxOptions(firefoxOpts(config)); 192 | if (config.remoteUrl !== undefined) { 193 | builder.usingServer(config.remoteUrl); 194 | } else if (config.name === 'edge') { 195 | // There appears to be bug where WebDriver doesn't automatically start or 196 | // find an Edge service and throws "Cannot read property 'start' of null" 197 | // so we need to start the service ourselves. 198 | // See https://stackoverflow.com/questions/48577924. 199 | builder.setEdgeService(new edge.ServiceBuilder()); 200 | } 201 | const driver = await builder.build(); 202 | if ( 203 | config.name === 'safari' || 204 | config.name === 'edge' || 205 | config.name === 'ie' 206 | ) { 207 | // Safari, Edge, and IE don't have flags we can use to launch with a given 208 | // window size, but webdriver can resize the window after we've started 209 | // up. Some versions of Safari have a bug where it is required to also 210 | // provide an x/y position (see 211 | // https://github.com/SeleniumHQ/selenium/issues/3796). 212 | const rect = 213 | config.name === 'safari' 214 | ? {...config.windowSize, x: 0, y: 0} 215 | : config.windowSize; 216 | await driver.manage().window().setRect(rect); 217 | } 218 | return driver; 219 | } 220 | 221 | function chromeOpts(config: BrowserConfig): chrome.Options { 222 | const opts = new chrome.Options(); 223 | if (config.binary) { 224 | opts.setChromeBinaryPath(config.binary); 225 | } 226 | if (config.headless === true) { 227 | opts.addArguments('--headless'); 228 | } 229 | if (config.addArguments) { 230 | opts.addArguments(...config.addArguments); 231 | } 232 | if (config.removeArguments) { 233 | opts.excludeSwitches(...config.removeArguments); 234 | } 235 | if (config.trace) { 236 | const loggingPrefs = new webdriver.logging.Preferences(); 237 | loggingPrefs.setLevel('browser', webdriver.logging.Level.ALL); 238 | loggingPrefs.setLevel('performance', webdriver.logging.Level.ALL); 239 | opts.setLoggingPrefs(loggingPrefs); 240 | 241 | opts.setPerfLoggingPrefs({ 242 | enableNetwork: true, 243 | enablePage: true, 244 | traceCategories: config.trace.categories.join(','), 245 | }); 246 | } 247 | const {width, height} = config.windowSize; 248 | opts.addArguments(`--window-size=${width},${height}`); 249 | if (config.profile) { 250 | opts.addArguments(`user-data-dir=${config.profile}`); 251 | } 252 | return opts; 253 | } 254 | 255 | function firefoxOpts(config: BrowserConfig): firefox.Options { 256 | const opts = new firefox.Options(); 257 | if (config.preferences) { 258 | for (const [name, value] of Object.entries(config.preferences)) { 259 | opts.setPreference(name, value); 260 | } 261 | } 262 | if (config.binary) { 263 | opts.setBinary(config.binary); 264 | } 265 | if (config.headless === true) { 266 | opts.addArguments('-headless'); 267 | } 268 | const {width, height} = config.windowSize; 269 | opts.addArguments(`-width=${width}`); 270 | opts.addArguments(`-height=${height}`); 271 | if (config.addArguments) { 272 | opts.addArguments(...config.addArguments); 273 | } 274 | if (config.profile) { 275 | // Note there is also a `-profile` flag for Firefox that could be set with 276 | // `addArguments`, but using that causes Selenium to timeout trying to 277 | // connect to the browser process. This `setProfile` method creates a 278 | // temporary copy of the profile. 279 | opts.setProfile(config.profile); 280 | } 281 | return opts; 282 | } 283 | 284 | /** 285 | * Open a new tab and switch to it. Assumes that the driver is on a page that 286 | * hasn't replaced `window.open` (e.g. the initial blank tab that we always 287 | * switch back to after running a benchmark). 288 | */ 289 | export async function openAndSwitchToNewTab( 290 | driver: webdriver.WebDriver, 291 | config: BrowserConfig 292 | ): Promise { 293 | // Chrome and Firefox add new tabs to the end of the handle list, but Safari 294 | // adds them to the beginning. Just look for the new one instead of making 295 | // any assumptions about this order. 296 | const tabsBefore = await driver.getAllWindowHandles(); 297 | if (tabsBefore.length !== 1) { 298 | throw new Error(`Expected only 1 open tab, got ${tabsBefore.length}`); 299 | } 300 | 301 | // "noopener=yes" prevents the new window from being able to access the 302 | // first window. We set that here because in Chrome (and perhaps other 303 | // browsers) we see a very significant improvement in the reliability of 304 | // measurements, in particular it appears to eliminate interference between 305 | // code across runs. It is likely this flag increases process isolation in a 306 | // way that prevents code caching across tabs. 307 | await driver.executeScript('window.open("", "", "noopener=yes");'); 308 | // Firefox (and maybe other browsers) won't always report the new tab ID 309 | // immediately, so we'll need to poll for it. 310 | const maxRetries = 20; 311 | const retrySleepMs = 250; 312 | let retries = 0; 313 | let newTabId; 314 | while (true) { 315 | const tabsAfter = await driver.getAllWindowHandles(); 316 | const newTabs = tabsAfter.filter((tab) => tab !== tabsBefore[0]); 317 | if (newTabs.length === 1) { 318 | newTabId = newTabs[0]; 319 | break; 320 | } 321 | retries++; 322 | if (newTabs.length > 1 || retries > maxRetries) { 323 | throw new Error(`Expected to create 1 new tab, got ${newTabs.length}`); 324 | } 325 | await new Promise((resolve) => setTimeout(resolve, retrySleepMs)); 326 | } 327 | await driver.switchTo().window(newTabId); 328 | 329 | if (config.name === 'ie' || config.name === 'safari') { 330 | // For IE and Safari (with rel=noopener) we get a new window instead of a 331 | // new tab, so we need to resize every time. 332 | const rect = 333 | config.name === 'safari' 334 | ? {...config.windowSize, x: 0, y: 0} 335 | : config.windowSize; 336 | await driver.manage().window().setRect(rect); 337 | } 338 | type WithSendDevToolsCommand = { 339 | sendDevToolsCommand?: (command: string, config: unknown) => Promise; 340 | }; 341 | 342 | const driverWithSendDevToolsCommand = driver as WithSendDevToolsCommand; 343 | if ( 344 | driverWithSendDevToolsCommand.sendDevToolsCommand && 345 | config.cpuThrottlingRate !== undefined 346 | ) { 347 | // Enables CPU throttling to emulate slow CPUs. 348 | await driverWithSendDevToolsCommand.sendDevToolsCommand( 349 | 'Emulation.setCPUThrottlingRate', 350 | {rate: config.cpuThrottlingRate} 351 | ); 352 | } 353 | } 354 | -------------------------------------------------------------------------------- /src/cli.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import sourceMapSupport from 'source-map-support'; 8 | sourceMapSupport.install(); 9 | 10 | import * as path from 'path'; 11 | import ansi from 'ansi-escape-sequences'; 12 | import semver from 'semver'; 13 | 14 | import commandLineUsage from 'command-line-usage'; 15 | 16 | import {optDefs, parseFlags} from './flags.js'; 17 | import {BenchmarkSpec} from './types.js'; 18 | import {makeConfig} from './config.js'; 19 | import {Server} from './server.js'; 20 | import {ResultStatsWithDifferences} from './stats.js'; 21 | import { 22 | prepareVersionDirectory, 23 | makeServerPlans, 24 | installGitDependency, 25 | } from './versions.js'; 26 | import {manualMode} from './manual.js'; 27 | import {Runner} from './runner.js'; 28 | import {runNpm} from './util.js'; 29 | 30 | import {createRequire} from 'module'; 31 | const require = createRequire(import.meta.url); 32 | 33 | const installedVersion = (): string => 34 | // eslint-disable-next-line @typescript-eslint/no-var-requires 35 | require(path.join('..', 'package.json')).version; 36 | 37 | export async function main( 38 | argv: string[] 39 | ): Promise | undefined> { 40 | // Don't block anything on a network query to NPM. 41 | const latestVersionPromise = latestVersionFromNpm(); 42 | let results; 43 | 44 | try { 45 | results = await realMain(argv); 46 | } catch (e) { 47 | console.error(e); 48 | process.exitCode = 1; 49 | } 50 | 51 | try { 52 | notifyIfOutdated(await latestVersionPromise); 53 | } catch (e) { 54 | // Don't set a non-zero exit code just because the NPM query failed. Maybe 55 | // we're behind a firewall and can't contact NPM. 56 | console.error(`\nFailed to check NPM for latest version:\n${e}`); 57 | } 58 | 59 | return results; 60 | } 61 | 62 | async function latestVersionFromNpm(): Promise { 63 | const stdout = await runNpm(['info', 'tachometer@latest', 'version']); 64 | return stdout.toString('utf8').trim(); 65 | } 66 | 67 | function notifyIfOutdated(latestVersion: string) { 68 | const iv = installedVersion(); 69 | if (semver.lt(iv, latestVersion)) { 70 | console.log( 71 | ansi.format(` 72 | [bold magenta]{Update available!} 73 | The latest version of tachometer is [green]{${latestVersion}} 74 | You are running version [yellow]{${iv}} 75 | See what's new at [cyan]{https://github.com/Polymer/tachometer/blob/master/CHANGELOG.md}`) 76 | ); 77 | } 78 | } 79 | 80 | async function realMain( 81 | argv: string[] 82 | ): Promise | undefined> { 83 | const opts = parseFlags(argv); 84 | 85 | if (opts.help) { 86 | console.log( 87 | commandLineUsage([ 88 | { 89 | header: 'tach', 90 | content: `v${installedVersion()}\nhttps://github.com/PolymerLabs/tachometer`, 91 | }, 92 | { 93 | header: 'Usage', 94 | content: ` 95 | Run a benchmark from a local file: 96 | $ tach foo.html 97 | 98 | Compare a benchmark with different URL parameters: 99 | $ tach foo.html?i=1 foo.html?i=2 100 | 101 | Benchmark index.html in a directory: 102 | $ tach foo/bar 103 | 104 | Benchmark a remote URL's First Contentful Paint time: 105 | $ tach http://example.com 106 | `, 107 | }, 108 | { 109 | header: 'Options', 110 | optionList: optDefs, 111 | }, 112 | ]) 113 | ); 114 | return; 115 | } 116 | 117 | if (opts.version) { 118 | console.log(installedVersion()); 119 | return; 120 | } 121 | 122 | const config = await makeConfig(opts); 123 | 124 | if (config.legacyJsonFile) { 125 | console.log( 126 | `Please use --json-file instead of --save. ` + 127 | `--save will be removed in the next major version.` 128 | ); 129 | } 130 | 131 | const {plans, gitInstalls} = await makeServerPlans( 132 | config.root, 133 | opts['npm-install-dir'], 134 | config.benchmarks 135 | ); 136 | 137 | await Promise.all( 138 | gitInstalls.map((gitInstall) => 139 | installGitDependency(gitInstall, config.forceCleanNpmInstall) 140 | ) 141 | ); 142 | 143 | const servers = new Map(); 144 | const promises = []; 145 | for (const {npmInstalls, mountPoints, specs} of plans) { 146 | promises.push( 147 | ...npmInstalls.map((install) => 148 | prepareVersionDirectory( 149 | install, 150 | config.forceCleanNpmInstall, 151 | config.npmrc 152 | ) 153 | ) 154 | ); 155 | promises.push( 156 | (async () => { 157 | const server = await Server.start({ 158 | host: opts.host, 159 | ports: opts.port, 160 | root: config.root, 161 | npmInstalls, 162 | mountPoints, 163 | resolveBareModules: config.resolveBareModules, 164 | cache: config.mode !== 'manual', 165 | }); 166 | for (const spec of specs) { 167 | servers.set(spec, server); 168 | } 169 | })() 170 | ); 171 | } 172 | await Promise.all(promises); 173 | 174 | if (config.mode === 'manual') { 175 | await manualMode(config, servers); 176 | } else { 177 | const runner = new Runner(config, servers); 178 | try { 179 | return await runner.run(); 180 | } finally { 181 | const allServers = new Set([...servers.values()]); 182 | await Promise.all([...allServers].map((server) => server.close())); 183 | } 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/config.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import fsExtra from 'fs-extra'; 8 | import * as path from 'path'; 9 | 10 | import {fcpBrowsers} from './browser.js'; 11 | import {parseConfigFile, writeBackSchemaIfNeeded} from './configfile.js'; 12 | import * as defaults from './defaults.js'; 13 | import {Opts} from './flags.js'; 14 | import {CheckConfig, parseGithubCheckFlag} from './github.js'; 15 | import {specsFromOpts} from './specs.js'; 16 | import {AutoSampleConditions} from './stats.js'; 17 | import {BenchmarkSpec} from './types.js'; 18 | import {fileKind} from './util.js'; 19 | 20 | /** 21 | * Validated and fully specified configuration. 22 | */ 23 | export interface Config { 24 | root: string; 25 | sampleSize: number; 26 | timeout: number; 27 | benchmarks: BenchmarkSpec[]; 28 | autoSampleConditions: AutoSampleConditions; 29 | mode: 'automatic' | 'manual'; 30 | jsonFile: string; 31 | // TODO(aomarks) Remove this in next major version. 32 | legacyJsonFile: string; 33 | githubCheck?: CheckConfig; 34 | resolveBareModules: boolean; 35 | remoteAccessibleHost: string; 36 | forceCleanNpmInstall: boolean; 37 | npmrc?: string; 38 | csvFileStats: string; 39 | csvFileRaw: string; 40 | } 41 | 42 | export async function makeConfig(opts: Opts): Promise { 43 | // These options are only controlled by flags. 44 | const baseConfig: Partial = { 45 | mode: (opts.manual === true ? 'manual' : 'automatic') as 46 | | 'manual' 47 | | 'automatic', 48 | jsonFile: opts['json-file'], 49 | legacyJsonFile: opts['save'], 50 | csvFileStats: opts['csv-file'], 51 | csvFileRaw: opts['csv-file-raw'], 52 | forceCleanNpmInstall: opts['force-clean-npm-install'], 53 | npmrc: opts['npmrc'], 54 | githubCheck: opts['github-check'] 55 | ? parseGithubCheckFlag(opts['github-check']) 56 | : undefined, 57 | remoteAccessibleHost: opts['remote-accessible-host'], 58 | }; 59 | 60 | let config: Config; 61 | if (opts.config) { 62 | if (opts.root !== undefined) { 63 | throw new Error('--root cannot be specified when using --config'); 64 | } 65 | if (opts.browser !== undefined) { 66 | throw new Error('--browser cannot be specified when using --config'); 67 | } 68 | if (opts['sample-size'] !== undefined) { 69 | throw new Error('--sample-size cannot be specified when using --config'); 70 | } 71 | if (opts.timeout !== undefined) { 72 | throw new Error('--timeout cannot be specified when using --config'); 73 | } 74 | if (opts['auto-sample-conditions'] !== undefined) { 75 | throw new Error( 76 | '--auto-sample-conditions cannot be specified when using --config' 77 | ); 78 | } 79 | if (opts.measure !== undefined) { 80 | throw new Error('--measure cannot be specified when using --config'); 81 | } 82 | if (opts['resolve-bare-modules'] !== undefined) { 83 | throw new Error( 84 | '--resolve-bare-modules cannot be specified when using --config' 85 | ); 86 | } 87 | if (opts['window-size'] !== undefined) { 88 | throw new Error('--window-size cannot be specified when using --config'); 89 | } 90 | const rawConfigObj = await fsExtra.readJson(opts.config); 91 | const validatedConfigObj = await parseConfigFile(rawConfigObj, opts.config); 92 | 93 | await writeBackSchemaIfNeeded(rawConfigObj, opts.config); 94 | 95 | config = applyDefaults({ 96 | ...baseConfig, 97 | ...validatedConfigObj, 98 | }); 99 | } else { 100 | config = applyDefaults({ 101 | ...baseConfig, 102 | root: opts.root, 103 | sampleSize: opts['sample-size'], 104 | timeout: opts.timeout, 105 | autoSampleConditions: 106 | opts['auto-sample-conditions'] !== undefined 107 | ? parseAutoSampleConditions(opts['auto-sample-conditions'].split(',')) 108 | : undefined, 109 | benchmarks: await specsFromOpts(opts), 110 | resolveBareModules: opts['resolve-bare-modules'], 111 | }); 112 | } 113 | 114 | if (config.sampleSize <= 1) { 115 | throw new Error('--sample-size must be > 1'); 116 | } 117 | 118 | if (config.timeout < 0) { 119 | throw new Error('--timeout must be >= 0'); 120 | } 121 | 122 | if (config.benchmarks.length === 0) { 123 | throw new Error('No benchmarks matched with the given flags'); 124 | } 125 | 126 | for (const spec of config.benchmarks) { 127 | for (const measurement of spec.measurement) { 128 | if ( 129 | measurement.mode === 'performance' && 130 | measurement.entryName === 'first-contentful-paint' && 131 | !fcpBrowsers.has(spec.browser.name) 132 | ) { 133 | throw new Error( 134 | `Browser ${spec.browser.name} does not support the ` + 135 | `first contentful paint (FCP) measurement` 136 | ); 137 | } 138 | } 139 | } 140 | 141 | return config; 142 | } 143 | 144 | export function applyDefaults(partial: Partial): Config { 145 | return { 146 | benchmarks: partial.benchmarks !== undefined ? partial.benchmarks : [], 147 | csvFileStats: 148 | partial.csvFileStats !== undefined ? partial.csvFileStats : '', 149 | csvFileRaw: partial.csvFileRaw !== undefined ? partial.csvFileRaw : '', 150 | forceCleanNpmInstall: 151 | partial.forceCleanNpmInstall !== undefined 152 | ? partial.forceCleanNpmInstall 153 | : defaults.forceCleanNpmInstall, 154 | npmrc: partial.npmrc !== undefined ? partial.npmrc : '', 155 | githubCheck: partial.githubCheck, 156 | autoSampleConditions: 157 | partial.autoSampleConditions !== undefined 158 | ? partial.autoSampleConditions 159 | : parseAutoSampleConditions([...defaults.autoSampleConditions]), 160 | jsonFile: partial.jsonFile !== undefined ? partial.jsonFile : '', 161 | legacyJsonFile: 162 | partial.legacyJsonFile !== undefined ? partial.legacyJsonFile : '', 163 | sampleSize: 164 | partial.sampleSize !== undefined 165 | ? partial.sampleSize 166 | : defaults.sampleSize, 167 | mode: partial.mode !== undefined ? partial.mode : defaults.mode, 168 | remoteAccessibleHost: 169 | partial.remoteAccessibleHost !== undefined 170 | ? partial.remoteAccessibleHost 171 | : '', 172 | resolveBareModules: 173 | partial.resolveBareModules !== undefined 174 | ? partial.resolveBareModules 175 | : defaults.resolveBareModules, 176 | root: partial.root !== undefined ? partial.root : defaults.root, 177 | timeout: partial.timeout !== undefined ? partial.timeout : defaults.timeout, 178 | }; 179 | } 180 | 181 | /** 182 | * Derives the URL that we'll use to benchmark using the given HTML file or 183 | * directory on disk, relative to the root directory we'll be serving. Throws if 184 | * it's a file that doesn't exist, or a directory without an index.html. 185 | */ 186 | export async function urlFromLocalPath( 187 | rootDir: string, 188 | diskPath: string 189 | ): Promise { 190 | const serverRelativePath = path.relative(rootDir, diskPath); 191 | if (serverRelativePath.startsWith('..')) { 192 | throw new Error( 193 | 'File or directory is not accessible from server root: ' + diskPath 194 | ); 195 | } 196 | 197 | const kind = await fileKind(diskPath); 198 | if (kind === undefined) { 199 | throw new Error(`No such file or directory: ${diskPath}`); 200 | } 201 | 202 | let urlPath = `/${serverRelativePath.replace(path.win32.sep, '/')}`; 203 | if (kind === 'dir') { 204 | if ((await fileKind(path.join(diskPath, 'index.html'))) !== 'file') { 205 | throw new Error(`Directory did not contain an index.html: ${diskPath}`); 206 | } 207 | // We need a trailing slash when serving a directory. Our static server 208 | // will serve index.html at both /foo and /foo/, without redirects. But 209 | // these two forms will have baseURIs that resolve relative URLs 210 | // differently, and we want the form that would work the same as 211 | // /foo/index.html. 212 | urlPath += '/'; 213 | } 214 | return urlPath; 215 | } 216 | 217 | /** 218 | * Parse auto sample condition strings. 219 | */ 220 | export function parseAutoSampleConditions( 221 | strs: string[] 222 | ): AutoSampleConditions { 223 | const absolute = new Set(); 224 | const relative = new Set(); 225 | for (const str of strs) { 226 | if (!str.match(/^[-+]?(\d*\.)?\d+(ms|%)$/)) { 227 | throw new Error(`Invalid auto sample condition ${str}`); 228 | } 229 | 230 | let num; 231 | let absOrRel; 232 | const isPercent = str.endsWith('%'); 233 | if (isPercent === true) { 234 | num = Number(str.slice(0, -1)) / 100; 235 | absOrRel = relative; 236 | } else { 237 | // Otherwise ends with "ms". 238 | num = Number(str.slice(0, -2)); // Note that Number("+1") === 1 239 | absOrRel = absolute; 240 | } 241 | 242 | if (str.startsWith('+') || str.startsWith('-') || num === 0) { 243 | // If the sign was explicit (e.g. "+0.1", "-0.1") then we're only 244 | // interested in that signed condition. 245 | absOrRel.add(num); 246 | } else { 247 | // Otherwise (e.g. "0.1") we're interested in the condition as a 248 | // difference in either direction. 249 | absOrRel.add(-num); 250 | absOrRel.add(num); 251 | } 252 | } 253 | return { 254 | absolute: [...absolute].sort((a, b) => a - b), 255 | relative: [...relative].sort((a, b) => a - b), 256 | }; 257 | } 258 | -------------------------------------------------------------------------------- /src/csv.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {stringify as csvStringify} from 'csv-stringify/sync'; 8 | 9 | import {ResultStatsWithDifferences} from './stats.js'; 10 | 11 | const precision = 5; 12 | 13 | /** 14 | * Format statistical results as a CSV file string. 15 | */ 16 | export function formatCsvStats(results: ResultStatsWithDifferences[]): string { 17 | // Note the examples in ./test/csv_test.ts should make this easier to 18 | // understand. 19 | const h1 = ['', '', '']; 20 | const h2 = ['', 'ms', '']; 21 | const h3 = ['', 'min', 'max']; 22 | const rows = []; 23 | for (const result of results) { 24 | h1.push(`vs ${result.result.name}`, '', '', ''); 25 | h2.push('% change', '', 'ms change', ''); 26 | h3.push('min', 'max', 'min', 'max'); 27 | const row = []; 28 | row.push( 29 | result.result.name, 30 | result.stats.meanCI.low.toFixed(precision), 31 | result.stats.meanCI.high.toFixed(precision) 32 | ); 33 | for (const diff of result.differences) { 34 | if (diff === null) { 35 | row.push('', '', '', ''); 36 | } else { 37 | row.push( 38 | (diff.relative.low * 100).toFixed(precision) + '%', 39 | (diff.relative.high * 100).toFixed(precision) + '%', 40 | diff.absolute.low.toFixed(precision), 41 | diff.absolute.high.toFixed(precision) 42 | ); 43 | } 44 | } 45 | rows.push(row); 46 | } 47 | return csvStringify([h1, h2, h3, ...rows]); 48 | } 49 | 50 | /** 51 | * Format raw sample results as a CSV file string. 52 | * 53 | * Columns correspond to benchmarks. Rows correspond to sample iterations. The 54 | * first row is headers containing the benchmark names. 55 | * 56 | * For example: 57 | * 58 | * foo, bar, baz 59 | * 1.2, 5.5, 9.4 60 | * 1.8, 5.6, 9.1 61 | * 1.3, 5.2, 9.8 62 | */ 63 | export function formatCsvRaw(results: ResultStatsWithDifferences[]): string { 64 | const headers = []; 65 | const rows: Array = []; 66 | for (let r = 0; r < results.length; r++) { 67 | const {result} = results[r]; 68 | headers.push(result.name); 69 | for (let m = 0; m < result.millis.length; m++) { 70 | if (rows[m] === undefined) { 71 | rows[m] = []; 72 | } 73 | rows[m][r] = result.millis[m]; 74 | } 75 | } 76 | return csvStringify([headers, ...rows]); 77 | } 78 | -------------------------------------------------------------------------------- /src/defaults.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as path from 'path'; 8 | import {BrowserName} from './browser.js'; 9 | import {LocalUrl, Measurement, RemoteUrl} from './types.js'; 10 | 11 | export const windowWidth = 1024; 12 | export const windowHeight = 768; 13 | export const root = '.'; 14 | export const browserName: BrowserName = 'chrome'; 15 | export const headless = false; 16 | export const sampleSize = 50; 17 | export const timeout = 3; 18 | export const autoSampleConditions = ['0%'] as const; 19 | export const mode = 'automatic'; 20 | export const resolveBareModules = true; 21 | export const forceCleanNpmInstall = false; 22 | export const measurementExpression = 'window.tachometerResult'; 23 | export const traceLogDir = path.join(process.cwd(), 'logs'); 24 | export const traceCategories = [ 25 | 'blink', 26 | 'blink.user_timing', 27 | 'v8', 28 | 'v8.execute', 29 | 'disabled-by-default-v8.compile', 30 | // Seems to sometimes cause errors in Chrome's about:tracing 31 | // "disabled-by-default-v8.cpu_profiler", 32 | 'disabled-by-default-v8.gc', 33 | 'disabled-by-default-v8.turbofan', 34 | ]; 35 | 36 | export function measurement(url: LocalUrl | RemoteUrl): Measurement { 37 | if (url.kind === 'remote') { 38 | return { 39 | mode: 'performance', 40 | entryName: 'first-contentful-paint', 41 | }; 42 | } 43 | return {mode: 'callback'}; 44 | } 45 | -------------------------------------------------------------------------------- /src/flags.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as os from 'os'; 8 | import * as path from 'path'; 9 | 10 | import {supportedBrowsers} from './browser.js'; 11 | import * as defaults from './defaults.js'; 12 | import {CommandLineMeasurements, measurements} from './types.js'; 13 | 14 | import commandLineArgs from 'command-line-args'; 15 | import commandLineUsage from 'command-line-usage'; 16 | 17 | export const defaultInstallDir = path.join( 18 | os.tmpdir(), 19 | 'tachometer', 20 | 'versions' 21 | ); 22 | 23 | export const optDefs: commandLineUsage.OptionDefinition[] = [ 24 | { 25 | name: 'help', 26 | description: 'Show documentation', 27 | type: Boolean, 28 | defaultValue: false, 29 | }, 30 | { 31 | name: 'version', 32 | description: 'Show the installed version of tachometer', 33 | type: Boolean, 34 | defaultValue: false, 35 | }, 36 | { 37 | name: 'root', 38 | description: `Root directory to search for benchmarks (default ${defaults.root})`, 39 | type: String, 40 | }, 41 | { 42 | name: 'host', 43 | description: 'Which host to run on', 44 | type: String, 45 | defaultValue: '127.0.0.1', 46 | }, 47 | { 48 | name: 'remote-accessible-host', 49 | description: 50 | 'When using a browser over a remote WebDriver connection, ' + 51 | 'the URL that those browsers should use to access the local ' + 52 | 'tachometer server (default to value of --host).', 53 | type: String, 54 | defaultValue: '', 55 | }, 56 | { 57 | name: 'port', 58 | description: 59 | 'Which ports to run on (comma-delimited preference list, ' + 60 | '0 for random, default [8080, 8081, ..., 0])', 61 | type: (flag: string) => flag.split(',').map(Number), 62 | defaultValue: [8080, 8081, 8082, 8083, 0], 63 | }, 64 | { 65 | name: 'config', 66 | description: 'Path to JSON config file (see README for format)', 67 | type: String, 68 | defaultValue: '', 69 | }, 70 | { 71 | name: 'package-version', 72 | description: 'Specify an NPM package version to swap in (see README)', 73 | alias: 'p', 74 | type: String, 75 | defaultValue: [], 76 | lazyMultiple: true, 77 | }, 78 | { 79 | name: 'npm-install-dir', 80 | description: 81 | `Where to install custom package versions ` + 82 | `(default ${defaultInstallDir})`, 83 | type: String, 84 | defaultValue: defaultInstallDir, 85 | }, 86 | { 87 | name: 'force-clean-npm-install', 88 | description: 89 | `Always do a from-scratch NPM install when using custom ` + 90 | `package versions. If false (the default), NPM install directories ` + 91 | `will be re-used as long as the dependency versions haven't changed.`, 92 | type: Boolean, 93 | defaultValue: false, 94 | }, 95 | { 96 | name: 'npmrc', 97 | description: `.npmrc file to copy into the test install directory.`, 98 | type: String, 99 | defaultValue: '', 100 | }, 101 | { 102 | name: 'browser', 103 | description: 104 | 'Which browsers to launch in automatic mode, ' + 105 | `comma-delimited (${[...supportedBrowsers].join(', ')}) ` + 106 | `(default ${defaults.browserName})`, 107 | alias: 'b', 108 | type: String, 109 | }, 110 | { 111 | name: 'sample-size', 112 | description: 113 | 'Minimum number of times to run each benchmark' + 114 | ` (default ${defaults.sampleSize})`, 115 | alias: 'n', 116 | type: Number, 117 | }, 118 | { 119 | name: 'manual', 120 | description: "Don't run automatically, just show URLs and collect results", 121 | alias: 'm', 122 | type: Boolean, 123 | defaultValue: false, 124 | }, 125 | { 126 | name: 'json-file', 127 | description: 'Save benchmark results to this JSON file.', 128 | type: String, 129 | defaultValue: '', 130 | }, 131 | { 132 | name: 'save', 133 | description: 134 | 'Deprecated. Use --json-file instead. ' + 135 | 'Save benchmark JSON data to this file', 136 | alias: 's', 137 | type: String, 138 | defaultValue: '', 139 | }, 140 | { 141 | name: 'csv-file', 142 | description: 'Save benchmark results to this CSV file.', 143 | type: String, 144 | defaultValue: '', 145 | }, 146 | { 147 | name: 'csv-file-raw', 148 | description: 'Save raw benchmark measurement samples to this CSV file.', 149 | type: String, 150 | defaultValue: '', 151 | }, 152 | { 153 | name: 'measure', 154 | description: 155 | 'Which time interval to measure. Options:\n' + 156 | '* callback: call bench.start() and bench.stop() (default)\n' + 157 | '* global: set window.tachometerResult = \n' + 158 | '* fcp: first contentful paint', 159 | type: (str: string): string => { 160 | if (!measurements.has(str)) { 161 | throw new Error( 162 | `Expected --measure flag to be one of: ` + 163 | `${[...measurements.values()].join(', ')} ` + 164 | `but was '${str}'` 165 | ); 166 | } 167 | return str; 168 | }, 169 | }, 170 | { 171 | name: 'measurement-expression', 172 | description: 173 | 'Javascript expression to poll from page to retrieve global\n' + 174 | 'result. Only valid when --measure=global.', 175 | type: String, 176 | defaultValue: defaults.measurementExpression, 177 | }, 178 | { 179 | name: 'auto-sample-conditions', 180 | description: 181 | 'The degrees of difference to try and resolve when auto-sampling ' + 182 | '(milliseconds, comma-delimited, optionally signed, ' + 183 | `default ${defaults.autoSampleConditions.join(',')})`, 184 | type: String, 185 | }, 186 | { 187 | name: 'horizon', 188 | description: 'Deprecated alias for --auto-sample-conditions', 189 | type: String, 190 | }, 191 | { 192 | name: 'timeout', 193 | description: 194 | 'The maximum number of minutes to spend auto-sampling ' + 195 | `(default ${defaults.timeout}).`, 196 | type: Number, 197 | }, 198 | { 199 | name: 'github-check', 200 | description: 201 | 'Post benchmark results as a GitHub Check. A JSON object ' + 202 | 'with properties appId, installationId, repo, and commit.', 203 | type: String, 204 | defaultValue: '', 205 | }, 206 | { 207 | name: 'resolve-bare-modules', 208 | description: 209 | 'Whether to automatically convert ES module imports with ' + 210 | 'bare module specifiers to paths (default true).', 211 | type: booleanString('resolve-bare-modules'), 212 | typeLabel: 'true|false', 213 | }, 214 | { 215 | name: 'window-size', 216 | description: 217 | `"width,height" in pixels of the window to open for all browsers` + 218 | ` (default "${defaults.windowWidth},${defaults.windowHeight}").`, 219 | type: String, 220 | }, 221 | { 222 | name: 'trace', 223 | description: '', 224 | type: Boolean, 225 | }, 226 | { 227 | name: 'trace-log-dir', 228 | description: '', 229 | type: String, 230 | defaultValue: defaults.traceLogDir, 231 | }, 232 | { 233 | name: 'trace-cat', 234 | description: '', 235 | type: String, 236 | defaultValue: defaults.traceCategories.join(','), 237 | }, 238 | ]; 239 | 240 | export interface Opts { 241 | help: boolean; 242 | version: boolean; 243 | root: string | undefined; 244 | host: string; 245 | port: number[]; 246 | config: string; 247 | 'package-version': string[]; 248 | 'npm-install-dir': string; 249 | browser: string | undefined; 250 | 'sample-size': number | undefined; 251 | manual: boolean; 252 | save: string; 253 | measure: CommandLineMeasurements | undefined; 254 | 'measurement-expression': string | undefined; 255 | 'auto-sample-conditions': string | undefined; 256 | timeout: number | undefined; 257 | 'github-check': string; 258 | 'resolve-bare-modules': boolean | undefined; 259 | 'remote-accessible-host': string; 260 | 'window-size': string; 261 | 'force-clean-npm-install': boolean; 262 | npmrc?: string; 263 | 'csv-file': string; 264 | 'csv-file-raw': string; 265 | 'json-file': string; 266 | trace: boolean; 267 | 'trace-log-dir': string; 268 | 'trace-cat': string; 269 | 270 | // Extra arguments not associated with a flag are put here. These are our 271 | // benchmark names/URLs. 272 | // 273 | // Note we could also define a flag and set `defaultOption: true`, but then 274 | // there would be two ways of specifying benchmark names/URLs. Also note the 275 | // _unknown property is defined in commandLineArgs.CommandLineOptions, but we 276 | // don't want to extend that because it includes `[propName: string]: any`. 277 | _unknown?: string[]; 278 | } 279 | 280 | interface OptsWithDeprecated extends Opts { 281 | horizon: string | undefined; 282 | } 283 | 284 | /** 285 | * Boolean flags that default to true are not supported 286 | * (https://github.com/75lb/command-line-args/issues/71). 287 | */ 288 | function booleanString(flagName: string): (str: string) => boolean { 289 | return (str: string) => { 290 | if (str === 'true' || str === '') { 291 | return true; 292 | } else if (str === 'false') { 293 | return false; 294 | } 295 | throw new Error( 296 | `Invalid --${flagName}. Expected true or false but was ${str}.` 297 | ); 298 | }; 299 | } 300 | 301 | /** 302 | * Parse the given CLI argument list. 303 | */ 304 | export function parseFlags(argv: string[]): Opts { 305 | const opts = commandLineArgs(optDefs, { 306 | partial: true, 307 | argv, 308 | }) as OptsWithDeprecated; 309 | // Note that when a flag is used but not set to a value (i.e. "tachometer 310 | // --resolve-bare-modules ..."), then the type function is not invoked, and 311 | // the value will be null. Since in default-false cases (which aren't 312 | // supported by command-line-args) that form should be true, we need to fix 313 | // those cases up after parsing. 314 | if (opts['resolve-bare-modules'] === null) { 315 | opts['resolve-bare-modules'] = true; 316 | } 317 | if (opts['horizon']) { 318 | if (opts['auto-sample-conditions']) { 319 | throw new Error( 320 | 'Please use only --auto-sample-conditions and not --horizons.' 321 | ); 322 | } 323 | console.warn( 324 | '\nNOTE: The --horizon flag has been renamed to --auto-sample-conditions.\n' + 325 | 'Please use --auto-sample-conditions going forward.\n' 326 | ); 327 | opts['auto-sample-conditions'] = opts['horizon']; 328 | delete opts['horizon']; 329 | } 330 | return opts as Opts; 331 | } 332 | -------------------------------------------------------------------------------- /src/format.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import stripAnsi from 'strip-ansi'; 8 | import * as table from 'table'; 9 | import {UAParser} from 'ua-parser-js'; 10 | import ansi from 'ansi-escape-sequences'; 11 | 12 | import { 13 | Difference, 14 | ConfidenceInterval, 15 | ResultStats, 16 | ResultStatsWithDifferences, 17 | } from './stats.js'; 18 | import {BenchmarkSpec, BenchmarkResult} from './types.js'; 19 | 20 | export const spinner = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'].map( 21 | (frame) => ansi.format(`[blue]{${frame}}`) 22 | ); 23 | 24 | /** 25 | * An abstraction for the various dimensions of data we display. 26 | */ 27 | interface Dimension { 28 | label: string; 29 | format: (r: ResultStats) => string; 30 | tableConfig?: table.ColumnUserConfig; 31 | } 32 | 33 | export interface ResultTable { 34 | dimensions: Dimension[]; 35 | results: ResultStats[]; 36 | } 37 | 38 | export interface AutomaticResults { 39 | fixed: ResultTable; 40 | unfixed: ResultTable; 41 | } 42 | 43 | /** 44 | * Create an automatic mode result table. 45 | */ 46 | export function automaticResultTable(results: ResultStats[]): AutomaticResults { 47 | // Typically most dimensions for a set of results share the same value (e.g 48 | // because we're only running one benchmark, one browser, etc.). To save 49 | // horizontal space and make the results easier to read, we first show the 50 | // fixed values in one table, then the unfixed values in another. 51 | const fixed: Dimension[] = []; 52 | const unfixed: Dimension[] = []; 53 | 54 | const possiblyFixed = [ 55 | benchmarkDimension, 56 | versionDimension, 57 | browserDimension, 58 | sampleSizeDimension, 59 | bytesSentDimension, 60 | ]; 61 | 62 | for (const dimension of possiblyFixed) { 63 | const values = new Set(); 64 | for (const res of results) { 65 | values.add(dimension.format(res)); 66 | } 67 | if (values.size === 1) { 68 | fixed.push(dimension); 69 | } else { 70 | unfixed.push(dimension); 71 | } 72 | } 73 | 74 | // These are the primary observed results, so they always go in the main 75 | // result table, even if they happen to be the same in one run. 76 | unfixed.push(runtimeConfidenceIntervalDimension); 77 | if (results.length > 1) { 78 | // Create an NxN matrix comparing every result to every other result. 79 | const labelFn = makeUniqueLabelFn(results.map((result) => result.result)); 80 | for (let i = 0; i < results.length; i++) { 81 | unfixed.push({ 82 | label: `vs ${labelFn(results[i].result)}`, 83 | tableConfig: { 84 | alignment: 'right', 85 | }, 86 | format: (r: ResultStats & Partial) => { 87 | if (r.differences === undefined) { 88 | return ''; 89 | } 90 | const diff = r.differences[i]; 91 | if (diff === null) { 92 | return ansi.format('\n[gray]{-} '); 93 | } 94 | return formatDifference(diff); 95 | }, 96 | }); 97 | } 98 | } 99 | 100 | const fixedTable = {dimensions: fixed, results: [results[0]]}; 101 | const unfixedTable = {dimensions: unfixed, results}; 102 | return {fixed: fixedTable, unfixed: unfixedTable}; 103 | } 104 | 105 | /** 106 | * Format a terminal text result table where each result is a row: 107 | * 108 | * +--------+--------+ 109 | * | Header | Header | 110 | * +--------+--------+ 111 | * | Value | Value | 112 | * +--------+--------+ 113 | * | Value | Value | 114 | * +--------+--------+ 115 | */ 116 | export function verticalTermResultTable({ 117 | dimensions, 118 | results, 119 | }: ResultTable): string { 120 | const columns = dimensions.map((d) => d.tableConfig || {}); 121 | const rows = [ 122 | dimensions.map((d) => ansi.format(`[bold]{${d.label}}`)), 123 | ...results.map((r) => dimensions.map((d) => d.format(r))), 124 | ]; 125 | return table.table(rows, { 126 | border: table.getBorderCharacters('norc'), 127 | // The table library only accepts an object with numeric keys, not an array. 128 | // https://github.com/gajus/table/issues/134 129 | columns: Object.fromEntries(Object.entries(columns)), 130 | }); 131 | } 132 | 133 | /** 134 | * Format a terminal text result table where each result is a column: 135 | * 136 | * +--------+-------+-------+ 137 | * | Header | Value | Value | 138 | * +--------+-------+-------+ 139 | * | Header | Value | Value | 140 | * +--------+-------+-------+ 141 | */ 142 | export function horizontalTermResultTable({ 143 | dimensions, 144 | results, 145 | }: ResultTable): string { 146 | const columns: table.ColumnUserConfig[] = [ 147 | {alignment: 'right'}, 148 | ...results.map((): table.ColumnUserConfig => ({alignment: 'left'})), 149 | ]; 150 | const rows = dimensions.map((d) => { 151 | return [ 152 | ansi.format(`[bold]{${d.label}}`), 153 | ...results.map((r) => d.format(r)), 154 | ]; 155 | }); 156 | return table.table(rows, { 157 | border: table.getBorderCharacters('norc'), 158 | // The table library only accepts an object with numeric keys, not an array. 159 | // https://github.com/gajus/table/issues/134 160 | columns: Object.fromEntries(Object.entries(columns)), 161 | }); 162 | } 163 | 164 | /** 165 | * Format an HTML result table where each result is a row: 166 | * 167 | * 168 | * 169 | * 170 | * 171 | *
Header Header
Value Value
Value Value
172 | */ 173 | export function verticalHtmlResultTable({ 174 | dimensions, 175 | results, 176 | }: ResultTable): string { 177 | const headers = dimensions.map((d) => `${d.label}`); 178 | const rows = []; 179 | for (const r of results) { 180 | const cells = dimensions.map( 181 | (d) => `${ansiCellToHtml(d.format(r))}` 182 | ); 183 | rows.push(`${cells.join('')}`); 184 | } 185 | return ` 186 | ${headers.join('')} 187 | ${rows.join('')} 188 |
`; 189 | } 190 | 191 | /** 192 | * Format an HTML result table where each result is a column: 193 | * 194 | * 195 | * 196 | * 197 | *
Header Value Value
Header Value Value
198 | */ 199 | export function horizontalHtmlResultTable({ 200 | dimensions, 201 | results, 202 | }: ResultTable): string { 203 | const rows: string[] = []; 204 | for (const d of dimensions) { 205 | const cells = [ 206 | `${d.label}`, 207 | ...results.map((r) => `${ansiCellToHtml(d.format(r))}`), 208 | ]; 209 | rows.push(`${cells.join('')}`); 210 | } 211 | return `${rows.join('')}
`; 212 | } 213 | 214 | function ansiCellToHtml(ansi: string): string { 215 | // For now, just remove ANSI color sequences and prevent line-breaks. We may 216 | // want to add an htmlFormat method to each dimension object so that we can 217 | // have more advanced control per dimension. 218 | return stripAnsi(ansi).replace(/ /g, ' '); 219 | } 220 | 221 | /** 222 | * Format a confidence interval as "[low, high]". 223 | */ 224 | const formatConfidenceInterval = ( 225 | ci: ConfidenceInterval, 226 | format: (n: number) => string 227 | ) => { 228 | return ansi.format(`${format(ci.low)} [gray]{-} ${format(ci.high)}`); 229 | }; 230 | 231 | /** 232 | * Prefix positive numbers with a red "+" and negative ones with a green "-". 233 | */ 234 | const colorizeSign = (n: number, format: (n: number) => string) => { 235 | if (n > 0) { 236 | return ansi.format(`[red bold]{+}${format(n)}`); 237 | } else if (n < 0) { 238 | // Negate the value so that we don't get a double negative sign. 239 | return ansi.format(`[green bold]{-}${format(-n)}`); 240 | } else { 241 | return format(n); 242 | } 243 | }; 244 | 245 | const benchmarkDimension: Dimension = { 246 | label: 'Benchmark', 247 | format: (r: ResultStats) => r.result.name, 248 | }; 249 | 250 | const versionDimension: Dimension = { 251 | label: 'Version', 252 | format: (r: ResultStats) => r.result.version || ansi.format('[gray]{}'), 253 | }; 254 | 255 | const browserDimension: Dimension = { 256 | label: 'Browser', 257 | format: (r: ResultStats) => { 258 | const browser = r.result.browser; 259 | let s = browser.name; 260 | if (browser.headless) { 261 | s += '-headless'; 262 | } 263 | if (browser.remoteUrl) { 264 | s += `\n@${browser.remoteUrl}`; 265 | } 266 | if (r.result.userAgent !== '') { 267 | // We'll only have a user agent when using the built-in static server. 268 | // TODO Get UA from window.navigator.userAgent so we always have it. 269 | const ua = new UAParser(r.result.userAgent).getBrowser(); 270 | s += `\n${ua.version}`; 271 | } 272 | return s; 273 | }, 274 | }; 275 | 276 | const sampleSizeDimension: Dimension = { 277 | label: 'Sample size', 278 | format: (r: ResultStats) => r.result.millis.length.toString(), 279 | }; 280 | 281 | const bytesSentDimension: Dimension = { 282 | label: 'Bytes', 283 | format: (r: ResultStats) => (r.result.bytesSent / 1024).toFixed(2) + ' KiB', 284 | }; 285 | 286 | const runtimeConfidenceIntervalDimension: Dimension = { 287 | label: 'Avg time', 288 | tableConfig: { 289 | alignment: 'right', 290 | }, 291 | format: (r: ResultStats) => formatConfidenceInterval(r.stats.meanCI, milli), 292 | }; 293 | 294 | function formatDifference({absolute, relative}: Difference): string { 295 | let word, rel, abs; 296 | if (absolute.low > 0 && relative.low > 0) { 297 | word = `[bold red]{slower}`; 298 | rel = formatConfidenceInterval(relative, percent); 299 | abs = formatConfidenceInterval(absolute, milli); 300 | } else if (absolute.high < 0 && relative.high < 0) { 301 | word = `[bold green]{faster}`; 302 | rel = formatConfidenceInterval(negate(relative), percent); 303 | abs = formatConfidenceInterval(negate(absolute), milli); 304 | } else { 305 | word = `[bold blue]{unsure}`; 306 | rel = formatConfidenceInterval(relative, (n) => colorizeSign(n, percent)); 307 | abs = formatConfidenceInterval(absolute, (n) => colorizeSign(n, milli)); 308 | } 309 | 310 | return ansi.format(`${word}\n${rel}\n${abs}`); 311 | } 312 | 313 | function percent(n: number): string { 314 | return (n * 100).toFixed(0) + '%'; 315 | } 316 | 317 | function milli(n: number): string { 318 | return n.toFixed(2) + 'ms'; 319 | } 320 | 321 | function negate(ci: ConfidenceInterval): ConfidenceInterval { 322 | return { 323 | low: -ci.high, 324 | high: -ci.low, 325 | }; 326 | } 327 | 328 | /** 329 | * Create a function that will return the shortest unambiguous label for a 330 | * result, given the full array of results. 331 | */ 332 | function makeUniqueLabelFn( 333 | results: BenchmarkResult[] 334 | ): (result: BenchmarkResult) => string { 335 | const names = new Set(); 336 | const versions = new Set(); 337 | const browsers = new Set(); 338 | for (const result of results) { 339 | names.add(result.name); 340 | versions.add(result.version); 341 | browsers.add(result.browser.name); 342 | } 343 | return (result: BenchmarkResult) => { 344 | const fields: string[] = []; 345 | if (names.size > 1) { 346 | fields.push(result.name); 347 | } 348 | if (versions.size > 1) { 349 | fields.push(result.version); 350 | } 351 | if (browsers.size > 1) { 352 | fields.push(result.browser.name); 353 | } 354 | return fields.join('\n'); 355 | }; 356 | } 357 | 358 | /** 359 | * Create a function that will return the shortest unambiguous label for a 360 | * benchmark spec, given the full array of specs 361 | */ 362 | export function makeUniqueSpecLabelFn( 363 | specs: BenchmarkSpec[] 364 | ): (spec: BenchmarkSpec) => string { 365 | const names = new Set(); 366 | const versions = new Set(); 367 | const browsers = new Set(); 368 | for (const spec of specs) { 369 | names.add(spec.name); 370 | browsers.add(spec.browser.name); 371 | 372 | if (spec.url.kind === 'local' && spec.url.version !== undefined) { 373 | versions.add(spec.url.version.label); 374 | } 375 | } 376 | return (spec: BenchmarkSpec) => { 377 | const fields: string[] = []; 378 | if (names.size > 1) { 379 | if (spec.name.startsWith('http://')) { 380 | fields.push(spec.name.slice(6)); 381 | } else if (spec.name.startsWith('https://')) { 382 | fields.push(spec.name.slice(7)); 383 | } else { 384 | fields.push(spec.name); 385 | } 386 | } 387 | if ( 388 | versions.size > 1 && 389 | spec.url.kind === 'local' && 390 | spec.url.version !== undefined 391 | ) { 392 | fields.push(spec.url.version.label); 393 | } 394 | if (browsers.size > 1) { 395 | fields.push(spec.browser.name); 396 | } 397 | return fields.join('-'); 398 | }; 399 | } 400 | 401 | /** 402 | * A one-line summary of a benchmark, e.g. for a progress bar: 403 | * 404 | * chrome my-benchmark [@my-version] 405 | */ 406 | export function benchmarkOneLiner(spec: BenchmarkSpec) { 407 | let maybeVersion = ''; 408 | if (spec.url.kind === 'local' && spec.url.version !== undefined) { 409 | maybeVersion = ` [@${spec.url.version.label}]`; 410 | } 411 | return `${spec.browser.name} ${spec.name}${maybeVersion}`; 412 | } 413 | -------------------------------------------------------------------------------- /src/github.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import got from 'got'; 8 | import * as jsonwebtoken from 'jsonwebtoken'; 9 | 10 | /** 11 | * Configuration data needed to create a GitHub Check. 12 | * 13 | * GitHub Checks are attached to a particular commit in a particular repo, and 14 | * are created by GitHub Apps, which are installed into an org or repo. 15 | * 16 | * More info at https://developer.github.com/v3/apps/ 17 | * 18 | * Note that we do not currently manage a generally-accessible GitHub App. We 19 | * only support a fully self-service integration, whereby users are expected to 20 | * create their own GitHub App, install it to their repos, grant full power to 21 | * this binary to act as that App via a private key, and then piggyback on e.g. 22 | * Travis CI to actually run the benchmarks. This avoids the need to run any 23 | * services for the time being, but still lets us have our own standalone Check 24 | * tab in the GitHub UI. 25 | */ 26 | export interface CheckConfig { 27 | label: string; 28 | appId: number; 29 | installationId: number; 30 | repo: string; 31 | commit: string; 32 | } 33 | 34 | /** 35 | * Parse the --github-check flag. 36 | */ 37 | export function parseGithubCheckFlag(flag: string): CheckConfig { 38 | const parsed = JSON.parse(flag) as Partial; 39 | if ( 40 | !parsed.appId || 41 | !parsed.installationId || 42 | !parsed.repo || 43 | !parsed.commit 44 | ) { 45 | throw new Error( 46 | `Invalid --github-check flag. Must be a JSON object ` + 47 | `with properties: appId, installationId, repo, and commit.` 48 | ); 49 | } 50 | return { 51 | label: String(parsed.label || 'Tachometer Benchmarks'), 52 | appId: Number(parsed.appId), 53 | installationId: Number(parsed.installationId), 54 | repo: String(parsed.repo), 55 | commit: String(parsed.commit), 56 | }; 57 | } 58 | 59 | /** 60 | * Create a pending GitHub check object and return a function that will mark 61 | * the check completed with the given markdown. 62 | */ 63 | export async function createCheck( 64 | config: CheckConfig 65 | ): Promise<(markdown: string) => void> { 66 | const {label, appId, installationId, repo, commit} = config; 67 | 68 | // We can directly store our GitHub App private key as a secret Travis 69 | // environment variable (as opposed to committing it as a file and 70 | // configuring to Travis decrypt it), but we have to be careful with the 71 | // spaces and newlines that PEM files have, since Travis does a raw Bash 72 | // substitution when it sets the variable. 73 | // 74 | // Given a PEM file from GitHub, the following command will escape spaces 75 | // and newlines so that it can be safely pasted into the Travis UI. The 76 | // spaces will get unescaped by Bash, and we'll unescape newlines ourselves. 77 | // 78 | // cat .pem \ 79 | // | awk '{printf "%s\\\\n", $0}' | sed 's/ /\\ /g' 80 | const appPrivateKey = (process.env.GITHUB_APP_PRIVATE_KEY || '') 81 | .trim() 82 | .replace(/\\n/g, '\n'); 83 | if (appPrivateKey === '') { 84 | throw new Error( 85 | 'Missing or empty GITHUB_APP_PRIVATE_KEY environment variable, ' + 86 | 'which is required when using --github-check.' 87 | ); 88 | } 89 | const appToken = getAppToken(appId, appPrivateKey); 90 | const installationToken = await getInstallationToken({ 91 | installationId, 92 | appToken, 93 | }); 94 | 95 | // Create the initial Check Run run now, so that it will show up in the 96 | // GitHub UI as pending. 97 | const checkId = await createCheckRun({ 98 | label, 99 | repo, 100 | commit, 101 | installationToken, 102 | }); 103 | 104 | return (markdown: string) => 105 | completeCheckRun({label, repo, installationToken, checkId, markdown}); 106 | } 107 | 108 | /** 109 | * Create a JSON Web Token (https://tools.ietf.org/html/rfc7519), which allows 110 | * us to perform actions as a GitHub App. 111 | * 112 | * @param appId GitHub App ID. Can be found on the GitHub App settings page. 113 | * @param privateKey Text of a PEM private key. Can be generated from the GitHub 114 | * App settings page. More info at 115 | * https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/ 116 | */ 117 | function getAppToken(appId: number, privateKey: string): string { 118 | const expireMinutes = 10; 119 | const issuedTimestamp = Math.floor(Date.now() / 1000); 120 | const expireTimestamp = issuedTimestamp + expireMinutes * 60; 121 | const payload = { 122 | iss: appId, // (iss)uer 123 | iat: issuedTimestamp, // (i)ssued (at) 124 | exp: expireTimestamp, // (exp)iration time 125 | }; 126 | return jsonwebtoken.sign(payload, privateKey, {algorithm: 'RS256'}); 127 | } 128 | 129 | /** 130 | * Create an access token which allows us to perform actions as a GitHub App 131 | * Installation. 132 | */ 133 | async function getInstallationToken({ 134 | installationId, 135 | appToken, 136 | }: { 137 | installationId: number; 138 | appToken: string; 139 | }): Promise { 140 | const resp = await got.post( 141 | `https://api.github.com/installations/${installationId}/access_tokens`, 142 | { 143 | headers: { 144 | Accept: 'application/vnd.github.machine-man-preview+json', 145 | Authorization: `Bearer ${appToken}`, 146 | }, 147 | } 148 | ); 149 | const data = JSON.parse(resp.body) as {token: string}; 150 | return data.token; 151 | } 152 | 153 | /** 154 | * Create a new GitHub Check Run (a single invocation of a Check on some commit) 155 | * and return its identifier. 156 | */ 157 | async function createCheckRun({ 158 | label, 159 | repo, 160 | commit, 161 | installationToken, 162 | }: { 163 | label: string; 164 | repo: string; 165 | commit: string; 166 | installationToken: string; 167 | }): Promise { 168 | const resp = await got.post( 169 | `https://api.github.com/repos/${repo}/check-runs`, 170 | { 171 | headers: { 172 | Accept: 'application/vnd.github.antiope-preview+json', 173 | Authorization: `Bearer ${installationToken}`, 174 | }, 175 | // https://developer.github.com/v3/checks/runs/#parameters 176 | body: JSON.stringify({ 177 | head_sha: commit, 178 | name: label, 179 | }), 180 | } 181 | ); 182 | const data = JSON.parse(resp.body) as {id: string}; 183 | return data.id; 184 | } 185 | 186 | /** 187 | * Update a GitHub Check run with the given markdown text and mark it as 188 | * complete. 189 | */ 190 | async function completeCheckRun({ 191 | label, 192 | repo, 193 | installationToken, 194 | checkId, 195 | markdown, 196 | }: { 197 | label: string; 198 | repo: string; 199 | checkId: string; 200 | markdown: string; 201 | installationToken: string; 202 | }) { 203 | await got.patch( 204 | `https://api.github.com/repos/${repo}/check-runs/${checkId}`, 205 | { 206 | headers: { 207 | Accept: 'application/vnd.github.antiope-preview+json', 208 | Authorization: `Bearer ${installationToken}`, 209 | }, 210 | // https://developer.github.com/v3/checks/runs/#parameters-1 211 | body: JSON.stringify({ 212 | name: label, 213 | completed_at: new Date().toISOString(), 214 | // Note that in the future we will likely want to be able to report 215 | // a failing check (e.g. if there appears to be a difference greater 216 | // than some threshold). 217 | conclusion: 'neutral', 218 | output: { 219 | title: label, 220 | summary: 'Benchmark results', 221 | text: markdown, 222 | }, 223 | }), 224 | } 225 | ); 226 | } 227 | -------------------------------------------------------------------------------- /src/install.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2020 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {exec} from 'child_process'; 8 | import {promisify} from 'util'; 9 | import {promises as fs} from 'fs'; 10 | import path from 'path'; 11 | import {install} from 'pkg-install'; 12 | import {pkgUp} from 'pkg-up'; 13 | 14 | const execPromise = promisify(exec); 15 | 16 | import * as url from 'url'; 17 | const __dirname = url.fileURLToPath(new URL('.', import.meta.url)); 18 | 19 | export type OnDemandDependencies = Map; 20 | 21 | /** 22 | * Asynchronously checks to see if a module is resolvable. This gives the 23 | * invoker information that is similar to what they would get from using 24 | * require.resolve. However, require.resolve is backed by an unclearable 25 | * internal cache, which this helper bypasses via a child process. 26 | * 27 | * @see https://github.com/nodejs/node/issues/31803 28 | */ 29 | export const assertResolvable = async (id: string) => { 30 | await execPromise( 31 | `"${process.execPath}" -e "require.resolve(process.env.ID)"`, 32 | { 33 | cwd: (await getPackageRoot()) || process.cwd(), 34 | env: {...process.env, ID: id}, 35 | } 36 | ); 37 | }; 38 | 39 | export interface ContainsOnDemandDependencies { 40 | [index: string]: unknown; 41 | installsOnDemand?: string[]; 42 | } 43 | 44 | export const getPackageJSONPath = async (): Promise => { 45 | // NOTE: This used to search starting with module.path, but module.path was 46 | // not added until Node.js v11. In order to preserve Node.js v10 compatibility 47 | // we use __dirname instead, which should be mostly the same thing (docs are 48 | // fuzzy on the specific differences, unfortunately). 49 | // @see https://nodejs.org/docs/latest/api/modules.html#modules_module_path 50 | // @see https://nodejs.org/docs/latest/api/modules.html#modules_dirname 51 | return pkgUp({cwd: __dirname}); 52 | }; 53 | 54 | export const getPackageRoot = async (): Promise => { 55 | const packageJSONPath = await getPackageJSONPath(); 56 | return packageJSONPath != null ? path.dirname(packageJSONPath) : null; 57 | }; 58 | 59 | /** 60 | * Extract a map of allowed "on-demand" dependencies from a given 61 | * package.json-shaped object. 62 | */ 63 | export const onDemandDependenciesFromPackageJSON = ( 64 | packageJSON: ContainsOnDemandDependencies 65 | ): OnDemandDependencies => { 66 | const onDemandDependencies = new Map(); 67 | 68 | const onDemandList: string[] = packageJSON?.installsOnDemand || []; 69 | 70 | for (const packageName of onDemandList) { 71 | onDemandDependencies.set(packageName, '*'); 72 | } 73 | 74 | return onDemandDependencies; 75 | }; 76 | 77 | /** 78 | * So-called "on-demand" dependencies are any packages that match the 79 | * following requirements: 80 | * 81 | * - They are enumerated in the non-normative package.json field 82 | * "installsOnDemand" 83 | * 84 | * This function resolves a map of package names and semver ranges including all 85 | * packages that match these requirements. 86 | */ 87 | export const getOnDemandDependencies = (() => { 88 | let cached: OnDemandDependencies | null = null; 89 | return async (): Promise => { 90 | if (cached == null) { 91 | const packageJSONPath = await getPackageJSONPath(); 92 | 93 | if (packageJSONPath != null) { 94 | const rawPackageJSON = await fs.readFile(packageJSONPath, { 95 | encoding: 'utf-8', 96 | }); 97 | const packageJSON = JSON.parse( 98 | rawPackageJSON.toString() 99 | ) as ContainsOnDemandDependencies; 100 | 101 | cached = onDemandDependenciesFromPackageJSON(packageJSON); 102 | } 103 | } 104 | 105 | return cached!; 106 | }; 107 | })(); 108 | 109 | /** 110 | * Install an "on-demand" package, resolving after the package has been 111 | * installed. Only packages designated as installable on-demand can be 112 | * installed this way (see documentation for "getOnDemandDependenies" for more 113 | * details). An attempt to install any other package this way will be rejected. 114 | * 115 | * On-demand packages are installed to this package's node_modules directory. 116 | * Any package that can already be resolved from this package's root directory 117 | * will be skipped. 118 | */ 119 | export const installOnDemand = async (packageName: string) => { 120 | try { 121 | await assertResolvable(packageName); 122 | return; 123 | } catch (_error) {} 124 | 125 | let dependencies = new Map(); 126 | try { 127 | dependencies = await getOnDemandDependencies(); 128 | } catch (error) { 129 | console.error(error); 130 | } 131 | 132 | if (!dependencies.has(packageName)) { 133 | throw new Error( 134 | `Package "${packageName}" cannot be installed on demand. ${dependencies}` 135 | ); 136 | } 137 | 138 | const version = dependencies.get(packageName); 139 | 140 | await install( 141 | {[packageName]: version}, 142 | { 143 | stdio: 'inherit', 144 | cwd: (await getPackageRoot()) || process.cwd(), 145 | noSave: true, 146 | } 147 | ); 148 | 149 | console.log(`Package "${packageName}@${version} installed."`); 150 | }; 151 | -------------------------------------------------------------------------------- /src/json-output.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as systeminformation from 'systeminformation'; 8 | 9 | import {BrowserConfig} from './browser.js'; 10 | import {measurementName} from './measure.js'; 11 | import {ResultStatsWithDifferences} from './stats.js'; 12 | import {BenchmarkResult, Measurement} from './types.js'; 13 | 14 | export interface JsonOutputFile { 15 | benchmarks: Benchmark[]; 16 | } 17 | 18 | interface BrowserConfigResult extends BrowserConfig { 19 | userAgent?: string; 20 | } 21 | 22 | interface Benchmark { 23 | name: string; 24 | bytesSent: number; 25 | version?: string; 26 | measurement: Measurement; 27 | browser?: BrowserConfigResult; 28 | mean: ConfidenceInterval; 29 | differences: Array; 30 | samples: number[]; 31 | } 32 | 33 | interface Difference { 34 | absolute: ConfidenceInterval; 35 | percentChange: ConfidenceInterval; 36 | } 37 | 38 | interface ConfidenceInterval { 39 | low: number; 40 | high: number; 41 | } 42 | 43 | export function jsonOutput( 44 | results: ResultStatsWithDifferences[] 45 | ): JsonOutputFile { 46 | const benchmarks: Benchmark[] = []; 47 | for (const result of results) { 48 | const differences: Array = []; 49 | for (const difference of result.differences) { 50 | if (difference === null) { 51 | differences.push(null); 52 | } else { 53 | differences.push({ 54 | absolute: { 55 | low: difference.absolute.low, 56 | high: difference.absolute.high, 57 | }, 58 | percentChange: { 59 | low: difference.relative.low * 100, 60 | high: difference.relative.high * 100, 61 | }, 62 | }); 63 | } 64 | } 65 | benchmarks.push({ 66 | name: result.result.name, 67 | bytesSent: result.result.bytesSent, 68 | version: result.result.version ? result.result.version : undefined, 69 | measurement: { 70 | name: measurementName(result.result.measurement), 71 | ...result.result.measurement, 72 | }, 73 | browser: { 74 | ...result.result.browser, 75 | userAgent: result.result.userAgent, 76 | }, 77 | mean: { 78 | low: result.stats.meanCI.low, 79 | high: result.stats.meanCI.high, 80 | }, 81 | differences, 82 | samples: result.result.millis, 83 | }); 84 | } 85 | return {benchmarks}; 86 | } 87 | 88 | // TODO(aomarks) Remove this in next major version. 89 | export interface LegacyJsonOutputFormat { 90 | benchmarks: BenchmarkResult[]; 91 | datetime: string; // YYYY-MM-DDTHH:mm:ss.sssZ 92 | system: { 93 | cpu: { 94 | manufacturer: string; 95 | model: string; 96 | family: string; 97 | speed: string; 98 | cores: number; 99 | }; 100 | load: { 101 | average: number; 102 | current: number; 103 | }; 104 | battery: { 105 | hasBattery: boolean; 106 | connected: boolean; 107 | }; 108 | memory: { 109 | total: number; 110 | free: number; 111 | used: number; 112 | active: number; 113 | available: number; 114 | }; 115 | }; 116 | } 117 | 118 | // TODO(aomarks) Remove this in next major version. 119 | export async function legacyJsonOutput( 120 | results: BenchmarkResult[] 121 | ): Promise { 122 | // TODO Add git info. 123 | const battery = await systeminformation.battery(); 124 | const cpu = await systeminformation.cpu(); 125 | const currentLoad = await systeminformation.currentLoad(); 126 | const memory = await systeminformation.mem(); 127 | return { 128 | benchmarks: results, 129 | datetime: new Date().toISOString(), 130 | system: { 131 | cpu: { 132 | manufacturer: cpu.manufacturer, 133 | model: cpu.model, 134 | family: cpu.family, 135 | speed: cpu.speed.toFixed(2), 136 | cores: cpu.cores, 137 | }, 138 | load: { 139 | average: currentLoad.avgLoad, 140 | current: currentLoad.currentLoad, 141 | }, 142 | battery: { 143 | hasBattery: battery.hasBattery, 144 | connected: battery.acConnected, 145 | }, 146 | memory: { 147 | total: memory.total, 148 | free: memory.free, 149 | used: memory.used, 150 | active: memory.active, 151 | available: memory.available, 152 | }, 153 | }, 154 | }; 155 | } 156 | -------------------------------------------------------------------------------- /src/manual.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import ansi from 'ansi-escape-sequences'; 8 | import {Server} from './server.js'; 9 | import {Config} from './config.js'; 10 | import {specUrl} from './specs.js'; 11 | import {BenchmarkSpec} from './types.js'; 12 | 13 | /** 14 | * Let the user run benchmarks manually. This process will not exit until 15 | * the user sends a termination signal. 16 | */ 17 | export async function manualMode( 18 | config: Config, 19 | servers: Map 20 | ) { 21 | if ( 22 | config.csvFileStats || 23 | config.csvFileRaw || 24 | config.jsonFile || 25 | config.legacyJsonFile 26 | ) { 27 | throw new Error(`Can't save results in manual mode`); 28 | } 29 | 30 | console.log('\nVisit these URLs in any browser:'); 31 | const allServers = new Set([...servers.values()]); 32 | for (const spec of config.benchmarks) { 33 | console.log(); 34 | if (spec.url.kind === 'local') { 35 | console.log( 36 | `${spec.name}${spec.url.queryString}` + 37 | (spec.url.version !== undefined 38 | ? ` [@${spec.url.version.label}]` 39 | : '') 40 | ); 41 | } 42 | console.log(ansi.format(`[yellow]{${specUrl(spec, servers, config)}}`)); 43 | } 44 | 45 | for (const server of [...allServers]) { 46 | (async function () { 47 | while (true) { 48 | const result = await server.nextResults(); 49 | server.endSession(); 50 | console.log(`${result.millis.toFixed(3)} ms`); 51 | } 52 | })(); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/measure.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2020 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as webdriver from 'selenium-webdriver'; 8 | 9 | import {Server} from './server.js'; 10 | import {Measurement, PerformanceEntryMeasurement} from './types.js'; 11 | import {throwUnreachable} from './util.js'; 12 | 13 | /** 14 | * Try to take a measurement in milliseconds from the given browser. Returns 15 | * undefined if the measurement is not available (which may just mean we need to 16 | * wait some more time). 17 | */ 18 | export async function measure( 19 | driver: webdriver.WebDriver, 20 | measurement: Measurement, 21 | server: Server | undefined 22 | ): Promise { 23 | switch (measurement.mode) { 24 | case 'callback': 25 | if (server === undefined) { 26 | throw new Error('Internal error: no server for spec'); 27 | } 28 | return (await server.nextResults()).millis; 29 | case 'expression': 30 | return queryForExpression(driver, measurement.expression); 31 | case 'performance': 32 | return queryForPerformanceEntry(driver, measurement); 33 | } 34 | throwUnreachable( 35 | measurement, 36 | `Internal error: unknown measurement type ` + JSON.stringify(measurement) 37 | ); 38 | } 39 | 40 | /** 41 | * https://developer.mozilla.org/en-US/docs/Web/API/PerformanceEntry 42 | * 43 | * Note a more complete interface for this is defined in the standard 44 | * lib.dom.d.ts, but we don't want to depend on that since it would make all 45 | * DOM types ambiently defined. 46 | */ 47 | interface PerformanceEntry { 48 | entryType: 49 | | 'frame' 50 | | 'navigation' 51 | | 'resource' 52 | | 'mark' 53 | | 'measure' 54 | | 'paint' 55 | | 'longtask'; 56 | name: string; 57 | startTime: number; 58 | duration: number; 59 | } 60 | 61 | /** 62 | * Query the browser for the Performance Entry matching the given criteria. 63 | * Returns undefined if no matching entry is found. Throws if the performance 64 | * entry has an unsupported type. If there are multiple entries matching the 65 | * same criteria, returns only the first one. 66 | */ 67 | async function queryForPerformanceEntry( 68 | driver: webdriver.WebDriver, 69 | measurement: PerformanceEntryMeasurement 70 | ): Promise { 71 | const escaped = escapeStringLiteral(measurement.entryName); 72 | const script = `return window.performance.getEntriesByName(\`${escaped}\`);`; 73 | const entries = (await driver.executeScript(script)) as PerformanceEntry[]; 74 | if (entries.length === 0) { 75 | return undefined; 76 | } 77 | if (entries.length > 1) { 78 | console.log( 79 | 'WARNING: Found multiple performance marks/measurements with name ' + 80 | `"${measurement.entryName}". This likely indicates an error. ` + 81 | 'Picking the first one.' 82 | ); 83 | } 84 | const entry = entries[0]; 85 | switch (entry.entryType) { 86 | case 'measure': 87 | return entry.duration; 88 | case 'mark': 89 | case 'paint': 90 | return entry.startTime; 91 | default: 92 | // We may want to support other entry types, but we'll need to investigate 93 | // how to interpret them, and we may need additional criteria to decide 94 | // which exact numbers to report from them. 95 | throw new Error( 96 | `Performance entry type not supported: ${entry.entryType}` 97 | ); 98 | } 99 | } 100 | 101 | /** 102 | * Execute the given expression in the browser and return the result, if it is a 103 | * positive number. If null or undefined, returns undefined. If some other type, 104 | * throws. 105 | */ 106 | async function queryForExpression( 107 | driver: webdriver.WebDriver, 108 | expression: string 109 | ): Promise { 110 | const result = (await driver.executeScript( 111 | `return (${expression});` 112 | )) as unknown; 113 | if (result !== undefined && result !== null) { 114 | if (typeof result !== 'number') { 115 | throw new Error( 116 | `'${expression}' was type ` + `${typeof result}, expected number.` 117 | ); 118 | } 119 | if (result < 0) { 120 | throw new Error(`'${expression}' was negative: ${result}`); 121 | } 122 | return result; 123 | } 124 | } 125 | 126 | /** 127 | * Escape a string such that it can be safely embedded in a JavaScript template 128 | * literal (backtick string). 129 | */ 130 | function escapeStringLiteral(unescaped: string): string { 131 | return unescaped 132 | .replace(/\\/g, '\\\\') 133 | .replace(/`/g, '\\`') 134 | .replace(/\$/g, '\\$'); 135 | } 136 | 137 | /** 138 | * Return a good-enough label for the given measurement, to disambiguate cases 139 | * where there are multiple measurements on the same page. 140 | */ 141 | export function measurementName(measurement: Measurement): string { 142 | if (measurement.name) { 143 | return measurement.name; 144 | } 145 | 146 | switch (measurement.mode) { 147 | case 'callback': 148 | return 'callback'; 149 | case 'expression': 150 | return measurement.expression; 151 | case 'performance': 152 | return measurement.entryName === 'first-contentful-paint' 153 | ? 'fcp' 154 | : measurement.entryName; 155 | } 156 | throwUnreachable( 157 | measurement, 158 | `Internal error: unknown measurement type ` + JSON.stringify(measurement) 159 | ); 160 | } 161 | -------------------------------------------------------------------------------- /src/server.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as http from 'http'; 8 | import * as net from 'net'; 9 | import * as path from 'path'; 10 | import {Stream} from 'stream'; 11 | 12 | import Koa from 'koa'; 13 | import mount from 'koa-mount'; 14 | import send from 'koa-send'; 15 | import getStream from 'get-stream'; 16 | import serve from 'koa-static'; 17 | import bodyParser from 'koa-bodyparser'; 18 | import {nodeResolve} from 'koa-node-resolve'; 19 | 20 | import {BenchmarkResponse, Deferred} from './types.js'; 21 | import {NpmInstall} from './versions.js'; 22 | 23 | import * as url from 'url'; 24 | const __dirname = url.fileURLToPath(new URL('.', import.meta.url)); 25 | 26 | export interface ServerOpts { 27 | host: string; 28 | ports: number[]; 29 | root: string; 30 | npmInstalls: NpmInstall[]; 31 | mountPoints: MountPoint[]; 32 | resolveBareModules: boolean; 33 | cache: boolean; 34 | } 35 | 36 | export interface MountPoint { 37 | diskPath: string; 38 | urlPath: string; 39 | } 40 | 41 | const clientLib = path.resolve(__dirname, '..', 'client', 'lib'); 42 | 43 | export interface Session { 44 | bytesSent: number; 45 | userAgent: string; 46 | } 47 | 48 | export class Server { 49 | readonly url: string; 50 | readonly port: number; 51 | private readonly server: net.Server; 52 | private session: Session = {bytesSent: 0, userAgent: ''}; 53 | private deferredResults = new Deferred(); 54 | private readonly urlCache = new Map< 55 | string, 56 | { 57 | status: number; 58 | headers: {[key: string]: string}; 59 | body: string | null | undefined; 60 | } 61 | >(); 62 | 63 | static start(opts: ServerOpts): Promise { 64 | const server = http.createServer(); 65 | const ports = [...opts.ports]; 66 | 67 | return new Promise((resolve, reject) => { 68 | const tryNextPort = () => { 69 | if (ports.length === 0) { 70 | reject(`No ports available, tried: ${opts.ports.join(', ')}`); 71 | } else { 72 | server.listen({host: opts.host, port: ports.shift()}); 73 | } 74 | }; 75 | 76 | server.on('listening', () => resolve(new Server(server, opts))); 77 | 78 | server.on('error', (e: {code?: string}) => { 79 | if (e.code === 'EADDRINUSE' || e.code === 'EACCES') { 80 | tryNextPort(); 81 | } else { 82 | reject(e); 83 | } 84 | }); 85 | 86 | tryNextPort(); 87 | }); 88 | } 89 | 90 | constructor(server: http.Server, opts: ServerOpts) { 91 | this.server = server; 92 | const app = new Koa(); 93 | 94 | app.use(bodyParser()); 95 | app.use(mount('/submitResults', this.submitResults.bind(this))); 96 | app.use(this.instrumentRequests.bind(this)); 97 | if (opts.cache) { 98 | app.use(this.cache.bind(this)); 99 | } 100 | app.use(this.serveBenchLib.bind(this)); 101 | 102 | if (opts.resolveBareModules === true) { 103 | const npmRoot = 104 | opts.npmInstalls.length > 0 105 | ? opts.npmInstalls[0].installDir 106 | : opts.root; 107 | 108 | app.use( 109 | nodeResolve({ 110 | root: npmRoot, 111 | // TODO Use default logging options after issues resolved: 112 | // https://github.com/Polymer/koa-node-resolve/issues/16 113 | // https://github.com/Polymer/koa-node-resolve/issues/17 114 | logger: false, 115 | }) 116 | ); 117 | } 118 | for (const {diskPath, urlPath} of opts.mountPoints) { 119 | app.use(mount(urlPath, serve(diskPath, {index: 'index.html'}))); 120 | } 121 | 122 | this.server.on('request', app.callback()); 123 | const address = this.server.address() as net.AddressInfo; 124 | let host = address.address; 125 | if (address.family === 'IPv6') { 126 | host = `[${host}]`; 127 | } 128 | this.port = address.port; 129 | this.url = `http://${host}:${this.port}`; 130 | } 131 | 132 | /** 133 | * Mark the end of one session, return the data instrumented from it, and 134 | * begin a new session. 135 | */ 136 | endSession(): Session { 137 | const session = this.session; 138 | this.session = {bytesSent: 0, userAgent: ''}; 139 | this.deferredResults = new Deferred(); 140 | return session; 141 | } 142 | 143 | async nextResults(): Promise { 144 | return this.deferredResults.promise; 145 | } 146 | 147 | async close() { 148 | return new Promise((resolve, reject) => { 149 | this.server.close((error: unknown) => { 150 | if (error) { 151 | reject(error); 152 | } else { 153 | resolve(); 154 | } 155 | }); 156 | }); 157 | } 158 | 159 | private async instrumentRequests( 160 | ctx: Koa.Context, 161 | next: () => Promise 162 | ): Promise { 163 | const session = this.session; 164 | if (session === undefined) { 165 | return next(); 166 | } 167 | 168 | session.userAgent = ctx.headers['user-agent'] ?? ''; 169 | // Note this assumes serial runs, as we guarantee in automatic mode. 170 | // If we ever wanted to support parallel requests, we would require 171 | // some kind of session tracking. 172 | await next(); 173 | if (typeof ctx.response.length === 'number') { 174 | session.bytesSent += ctx.response.length; 175 | } else if (ctx.status === 200) { 176 | console.log( 177 | `No response length for 200 response for ${ctx.url}, ` + 178 | `byte count may be inaccurate.` 179 | ); 180 | } 181 | } 182 | 183 | /** 184 | * Cache all downstream middleware responses by URL in memory. This is 185 | * especially helpful when bare module resolution is enabled, because that 186 | * requires expensive parsing of all HTML and JavaScript that we really don't 187 | * want to do for every benchmark sample. 188 | */ 189 | private async cache(ctx: Koa.Context, next: () => Promise) { 190 | const entry = this.urlCache.get(ctx.url); 191 | if (entry !== undefined) { 192 | ctx.response.set(entry.headers); 193 | ctx.response.body = entry.body; 194 | // Note we must set status after we set body, because when we set body to 195 | // undefined (which happens on e.g. 404s), Koa overrides the status to 196 | // 204. 197 | ctx.response.status = entry.status; 198 | return; 199 | } 200 | 201 | await next(); 202 | const body = ctx.response.body as 203 | | string 204 | | Buffer 205 | | Stream 206 | | null 207 | | undefined; 208 | let bodyString; 209 | if (typeof body === 'string') { 210 | bodyString = body; 211 | } else if (Buffer.isBuffer(body)) { 212 | bodyString = body.toString(); 213 | } else if (isStream(body)) { 214 | bodyString = await getStream(body); 215 | // We consumed the stream. 216 | ctx.response.body = bodyString; 217 | } else if (body === null || body === undefined) { 218 | // The static middleware sets no body for errors. Koa automatically 219 | // creates a body for errors later. Just cache as-is so that the same 220 | // thing happens on cache hits. 221 | bodyString = body; 222 | } else { 223 | throw new Error(`Unknown response type ${typeof body} for ${ctx.url}`); 224 | } 225 | this.urlCache.set(ctx.url, { 226 | body: bodyString, 227 | status: ctx.response.status, 228 | headers: ctx.response.headers as {[key: string]: string}, 229 | }); 230 | } 231 | 232 | private async serveBenchLib(ctx: Koa.Context, next: () => Promise) { 233 | if (ctx.path === '/bench.js') { 234 | await send(ctx, 'bench.js', {root: clientLib}); 235 | } else { 236 | await next(); 237 | } 238 | } 239 | 240 | private async submitResults(ctx: Koa.Context) { 241 | this.deferredResults.resolve(ctx.request.body as BenchmarkResponse); 242 | ctx.body = 'ok'; 243 | } 244 | } 245 | 246 | function isStream(value: unknown): value is Stream { 247 | return ( 248 | value !== null && 249 | typeof value === 'object' && 250 | typeof (value as {pipe: (() => unknown) | undefined}).pipe === 'function' 251 | ); 252 | } 253 | -------------------------------------------------------------------------------- /src/specs.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as path from 'path'; 8 | 9 | import { 10 | parseBrowserConfigString, 11 | TraceConfig, 12 | validateBrowserConfig, 13 | WindowSize, 14 | } from './browser.js'; 15 | import {Config, urlFromLocalPath} from './config.js'; 16 | import * as defaults from './defaults.js'; 17 | import {Opts} from './flags.js'; 18 | import {Server} from './server.js'; 19 | import { 20 | BenchmarkSpec, 21 | LocalUrl, 22 | Measurement, 23 | PackageVersion, 24 | RemoteUrl, 25 | } from './types.js'; 26 | import {isHttpUrl, throwUnreachable} from './util.js'; 27 | import {parsePackageVersions} from './versions.js'; 28 | 29 | /** 30 | * Derive the set of benchmark specifications we should run according to the 31 | * given options, which may require checking the layout on disk of the 32 | * benchmarks/ directory. 33 | */ 34 | export async function specsFromOpts(opts: Opts): Promise { 35 | let windowSize: WindowSize; 36 | if (opts['window-size']) { 37 | const match = opts['window-size'].match(/^(\d+),(\d+)$/); 38 | if (match === null) { 39 | throw new Error( 40 | `Invalid --window-size flag, must match "width,height, " ` + 41 | `but was "${opts['window-size']}"` 42 | ); 43 | } 44 | windowSize = { 45 | width: Number(match[1]), 46 | height: Number(match[2]), 47 | }; 48 | } else { 49 | windowSize = { 50 | width: defaults.windowWidth, 51 | height: defaults.windowHeight, 52 | }; 53 | } 54 | 55 | let trace: TraceConfig | undefined; 56 | if (opts['trace']) { 57 | const rawLogDir = opts['trace-log-dir']; 58 | trace = { 59 | categories: opts['trace-cat'].split(','), 60 | logDir: path.isAbsolute(rawLogDir) 61 | ? rawLogDir 62 | : path.join(process.cwd(), rawLogDir), 63 | }; 64 | } 65 | 66 | const browserStrings = new Set( 67 | (opts.browser || defaults.browserName) 68 | .replace(/\s+/, '') 69 | .split(',') 70 | .filter((b) => b !== '') 71 | ); 72 | if (browserStrings.size === 0) { 73 | throw new Error('At least one --browser must be specified'); 74 | } 75 | const browsers = [...browserStrings].map((str) => { 76 | const config = { 77 | ...parseBrowserConfigString(str), 78 | windowSize, 79 | }; 80 | if (trace) { 81 | config.trace = trace; 82 | } 83 | validateBrowserConfig(config); 84 | return config; 85 | }); 86 | 87 | const specs: BenchmarkSpec[] = []; 88 | 89 | const versions: Array = parsePackageVersions( 90 | opts['package-version'] 91 | ); 92 | if (versions.length === 0) { 93 | versions.push(undefined); 94 | } 95 | 96 | let measurement: Measurement | undefined; 97 | if (opts.measure === 'callback') { 98 | measurement = { 99 | mode: 'callback', 100 | }; 101 | } else if (opts.measure === 'fcp') { 102 | measurement = { 103 | mode: 'performance', 104 | entryName: 'first-contentful-paint', 105 | }; 106 | } else if (opts.measure === 'global') { 107 | measurement = { 108 | mode: 'expression', 109 | expression: 110 | opts['measurement-expression'] || defaults.measurementExpression, 111 | }; 112 | } else if (opts.measure !== undefined) { 113 | throwUnreachable( 114 | opts.measure, 115 | `Internal error: unknown measure ${JSON.stringify(opts.measure)}` 116 | ); 117 | } 118 | 119 | // Benchmark paths/URLs are the bare arguments not associated with a flag, so 120 | // they are found in _unknown. 121 | for (const argStr of opts._unknown || []) { 122 | const arg = parseBenchmarkArgument(argStr); 123 | 124 | if (arg.kind === 'remote') { 125 | const url: RemoteUrl = { 126 | kind: 'remote', 127 | url: arg.url, 128 | }; 129 | 130 | for (const browser of browsers) { 131 | const spec: BenchmarkSpec = { 132 | name: arg.alias || arg.url, 133 | browser, 134 | measurement: [ 135 | measurement === undefined ? defaults.measurement(url) : measurement, 136 | ], 137 | url, 138 | }; 139 | specs.push(spec); 140 | } 141 | } else { 142 | const root = opts.root || defaults.root; 143 | const urlPath = await urlFromLocalPath(root, arg.diskPath); 144 | let name = arg.alias; 145 | if (name === undefined) { 146 | const serverRelativePath = path.relative(root, arg.diskPath); 147 | name = serverRelativePath.replace(/\\/g, '/'); 148 | } 149 | for (const browser of browsers) { 150 | for (const version of versions) { 151 | const url: LocalUrl = { 152 | kind: 'local', 153 | urlPath, 154 | queryString: arg.queryString, 155 | version, 156 | }; 157 | const spec: BenchmarkSpec = { 158 | name, 159 | browser, 160 | measurement: [ 161 | measurement === undefined 162 | ? defaults.measurement(url) 163 | : measurement, 164 | ], 165 | url, 166 | }; 167 | specs.push(spec); 168 | } 169 | } 170 | } 171 | } 172 | 173 | return specs; 174 | } 175 | 176 | function parseBenchmarkArgument( 177 | str: string 178 | ): 179 | | {kind: 'remote'; url: string; alias?: string} 180 | | {kind: 'local'; diskPath: string; queryString: string; alias?: string} { 181 | if (isHttpUrl(str)) { 182 | // http://example.com 183 | return { 184 | kind: 'remote', 185 | url: str, 186 | }; 187 | } 188 | 189 | if (str.includes('=')) { 190 | const eq = str.indexOf('='); 191 | const maybeUrl = str.substring(eq + 1); 192 | if (isHttpUrl(maybeUrl)) { 193 | // foo=http://example.com 194 | return { 195 | kind: 'remote', 196 | url: maybeUrl, 197 | alias: str.substring(0, eq), 198 | }; 199 | } 200 | } 201 | 202 | let queryString = ''; 203 | if (str.includes('?')) { 204 | // a/b.html?a=b 205 | // foo=a/b.html?a=b 206 | const q = str.indexOf('?'); 207 | queryString = str.substring(q); 208 | str = str.substring(0, q); 209 | } 210 | 211 | let alias = undefined; 212 | if (str.includes('=')) { 213 | // foo=a/b.html?a=b 214 | // foo=a/b.html 215 | const eq = str.indexOf('='); 216 | alias = str.substring(0, eq); 217 | str = str.substring(eq + 1); 218 | } 219 | 220 | // a/b.html 221 | // a/b.html?a=b 222 | // foo=a/b.html 223 | // foo=a/b.html?a=b 224 | return { 225 | kind: 'local', 226 | alias, 227 | diskPath: str, 228 | queryString: queryString, 229 | }; 230 | } 231 | 232 | export function specUrl( 233 | spec: BenchmarkSpec, 234 | servers: Map, 235 | config: Config 236 | ): string { 237 | if (spec.url.kind === 'remote') { 238 | return spec.url.url; 239 | } 240 | const server = servers.get(spec); 241 | if (server === undefined) { 242 | throw new Error('Internal error: no server for spec'); 243 | } 244 | if ( 245 | config.remoteAccessibleHost !== '' && 246 | spec.browser.remoteUrl !== undefined 247 | ) { 248 | return ( 249 | 'http://' + 250 | config.remoteAccessibleHost + 251 | ':' + 252 | server.port + 253 | spec.url.urlPath + 254 | spec.url.queryString 255 | ); 256 | } 257 | return server.url + spec.url.urlPath + spec.url.queryString; 258 | } 259 | -------------------------------------------------------------------------------- /src/stats.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {BenchmarkResult} from './types.js'; 8 | import jstat from 'jstat'; 9 | 10 | interface Distribution { 11 | mean: number; 12 | variance: number; 13 | } 14 | 15 | export interface ConfidenceInterval { 16 | low: number; 17 | high: number; 18 | } 19 | 20 | export interface SummaryStats { 21 | size: number; 22 | mean: number; 23 | meanCI: ConfidenceInterval; 24 | variance: number; 25 | standardDeviation: number; 26 | relativeStandardDeviation: number; 27 | } 28 | 29 | export interface ResultStats { 30 | result: BenchmarkResult; 31 | stats: SummaryStats; 32 | } 33 | 34 | export interface ResultStatsWithDifferences extends ResultStats { 35 | differences: Array; 36 | } 37 | 38 | export interface Difference { 39 | absolute: ConfidenceInterval; 40 | relative: ConfidenceInterval; 41 | } 42 | 43 | export function summaryStats(data: number[]): SummaryStats { 44 | const size = data.length; 45 | const sum = sumOf(data); 46 | const mean = sum / size; 47 | const squareResiduals = data.map((val) => (val - mean) ** 2); 48 | // n - 1 due to https://en.wikipedia.org/wiki/Bessel%27s_correction 49 | const variance = sumOf(squareResiduals) / (size - 1); 50 | const stdDev = Math.sqrt(variance); 51 | return { 52 | size, 53 | mean, 54 | meanCI: confidenceInterval95( 55 | samplingDistributionOfTheMean({mean, variance}, size), 56 | size 57 | ), 58 | variance, 59 | standardDeviation: stdDev, 60 | // aka coefficient of variation 61 | relativeStandardDeviation: stdDev / mean, 62 | }; 63 | } 64 | 65 | /** 66 | * Compute a 95% confidence interval for the given distribution. 67 | */ 68 | function confidenceInterval95( 69 | {mean, variance}: Distribution, 70 | size: number 71 | ): ConfidenceInterval { 72 | // http://www.stat.yale.edu/Courses/1997-98/101/confint.htm 73 | const t = jstat.studentt.inv(1 - 0.05 / 2, size - 1); 74 | const stdDev = Math.sqrt(variance); 75 | const margin = t * stdDev; 76 | return { 77 | low: mean - margin, 78 | high: mean + margin, 79 | }; 80 | } 81 | 82 | /** 83 | * Return whether the given confidence interval contains a value. 84 | */ 85 | export function intervalContains( 86 | interval: ConfidenceInterval, 87 | value: number 88 | ): boolean { 89 | return value >= interval.low && value <= interval.high; 90 | } 91 | 92 | export interface AutoSampleConditions { 93 | absolute: number[]; 94 | relative: number[]; 95 | } 96 | 97 | /** 98 | * Return whether all difference confidence intervals are unambiguously located 99 | * on one side or the other of all given auto sample conditions. 100 | * 101 | * For example, given the conditions 0 and 1: 102 | * 103 | * <---> true 104 | * <---> false 105 | * <---> true 106 | * <---> false 107 | * <---> true 108 | * <-----------> false 109 | * 110 | * |-------|-------|-------| ms difference 111 | * -1 0 1 2 112 | */ 113 | export function autoSampleConditionsResolved( 114 | resultStats: ResultStatsWithDifferences[], 115 | conditions: AutoSampleConditions 116 | ): boolean { 117 | for (const {differences} of resultStats) { 118 | if (differences === undefined) { 119 | continue; 120 | } 121 | // TODO We may want to offer more control over which particular set of 122 | // differences we care about resolving. For the moment, a condition of 1% 123 | // means we'll try to resolve a 1% difference pairwise in both directions. 124 | for (const diff of differences) { 125 | if (diff === null) { 126 | continue; 127 | } 128 | for (const condition of conditions.absolute) { 129 | if (intervalContains(diff.absolute, condition)) { 130 | return false; 131 | } 132 | } 133 | for (const condition of conditions.relative) { 134 | if (intervalContains(diff.relative, condition)) { 135 | return false; 136 | } 137 | } 138 | } 139 | } 140 | return true; 141 | } 142 | 143 | function sumOf(data: number[]): number { 144 | return data.reduce((acc, cur) => acc + cur); 145 | } 146 | 147 | /** 148 | * Given an array of results, return a new array of results where each result 149 | * has additional statistics describing how it compares to each other result. 150 | */ 151 | export function computeDifferences( 152 | stats: ResultStats[] 153 | ): ResultStatsWithDifferences[] { 154 | return stats.map((result) => { 155 | return { 156 | ...result, 157 | differences: stats.map((other) => 158 | other === result ? null : computeDifference(other.stats, result.stats) 159 | ), 160 | }; 161 | }); 162 | } 163 | 164 | export function computeDifference( 165 | a: SummaryStats, 166 | b: SummaryStats 167 | ): Difference { 168 | const meanA = samplingDistributionOfTheMean(a, a.size); 169 | const meanB = samplingDistributionOfTheMean(b, b.size); 170 | const diffAbs = samplingDistributionOfAbsoluteDifferenceOfMeans(meanA, meanB); 171 | const diffRel = samplingDistributionOfRelativeDifferenceOfMeans(meanA, meanB); 172 | // We're assuming sample sizes are equal. If they're not for some reason, be 173 | // conservative and use the smaller one for the t-distribution's degrees of 174 | // freedom (since that will lead to a wider confidence interval). 175 | const minSize = Math.min(a.size, b.size); 176 | return { 177 | absolute: confidenceInterval95(diffAbs, minSize), 178 | relative: confidenceInterval95(diffRel, minSize), 179 | }; 180 | } 181 | 182 | /** 183 | * Estimates the sampling distribution of the mean. This models the distribution 184 | * of the means that we would compute under repeated samples of the given size. 185 | */ 186 | function samplingDistributionOfTheMean( 187 | dist: Distribution, 188 | sampleSize: number 189 | ): Distribution { 190 | // http://onlinestatbook.com/2/sampling_distributions/samp_dist_mean.html 191 | // http://www.stat.yale.edu/Courses/1997-98/101/sampmn.htm 192 | return { 193 | mean: dist.mean, 194 | // Error shrinks as sample size grows. 195 | variance: dist.variance / sampleSize, 196 | }; 197 | } 198 | 199 | /** 200 | * Estimates the sampling distribution of the difference of means (b-a). This 201 | * models the distribution of the difference between two means that we would 202 | * compute under repeated samples under the given two sampling distributions of 203 | * means. 204 | */ 205 | function samplingDistributionOfAbsoluteDifferenceOfMeans( 206 | a: Distribution, 207 | b: Distribution 208 | ): Distribution { 209 | // http://onlinestatbook.com/2/sampling_distributions/samplingdist_diff_means.html 210 | // http://www.stat.yale.edu/Courses/1997-98/101/meancomp.htm 211 | return { 212 | mean: b.mean - a.mean, 213 | // The error from both input sampling distributions of means accumulate. 214 | variance: a.variance + b.variance, 215 | }; 216 | } 217 | 218 | /** 219 | * Estimates the sampling distribution of the relative difference of means 220 | * ((b-a)/a). This models the distribution of the relative difference between 221 | * two means that we would compute under repeated samples under the given two 222 | * sampling distributions of means. 223 | */ 224 | function samplingDistributionOfRelativeDifferenceOfMeans( 225 | a: Distribution, 226 | b: Distribution 227 | ): Distribution { 228 | // http://blog.analytics-toolkit.com/2018/confidence-intervals-p-values-percent-change-relative-difference/ 229 | // Note that the above article also prevents an alternative calculation for a 230 | // confidence interval for relative differences, but the one chosen here is 231 | // is much simpler and passes our stochastic tests, so it seems sufficient. 232 | return { 233 | mean: (b.mean - a.mean) / a.mean, 234 | variance: 235 | (a.variance * b.mean ** 2 + b.variance * a.mean ** 2) / a.mean ** 4, 236 | }; 237 | } 238 | -------------------------------------------------------------------------------- /src/test/browser_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, test} from 'mocha'; 9 | 10 | import { 11 | BrowserName, 12 | parseBrowserConfigString, 13 | validateBrowserConfig, 14 | } from '../browser.js'; 15 | import * as defaults from '../defaults.js'; 16 | 17 | suite('browser', () => { 18 | suite('parseBrowserConfigString', () => { 19 | test('chrome', () => { 20 | assert.deepEqual(parseBrowserConfigString('chrome'), { 21 | name: 'chrome', 22 | headless: false, 23 | }); 24 | }); 25 | 26 | test('chrome-headless', () => { 27 | assert.deepEqual(parseBrowserConfigString('chrome-headless'), { 28 | name: 'chrome', 29 | headless: true, 30 | }); 31 | }); 32 | 33 | test('firefox', () => { 34 | assert.deepEqual(parseBrowserConfigString('firefox'), { 35 | name: 'firefox', 36 | headless: false, 37 | }); 38 | }); 39 | 40 | test('firefox-headless', () => { 41 | assert.deepEqual(parseBrowserConfigString('firefox-headless'), { 42 | name: 'firefox', 43 | headless: true, 44 | }); 45 | }); 46 | 47 | test('safari', () => { 48 | assert.deepEqual(parseBrowserConfigString('safari'), { 49 | name: 'safari', 50 | headless: false, 51 | }); 52 | }); 53 | 54 | test('chrome remote', () => { 55 | assert.deepEqual(parseBrowserConfigString('chrome@http://example.com'), { 56 | name: 'chrome', 57 | headless: false, 58 | remoteUrl: 'http://example.com', 59 | }); 60 | }); 61 | 62 | test('chrome-headless remote', () => { 63 | assert.deepEqual( 64 | parseBrowserConfigString('chrome-headless@http://example.com'), 65 | { 66 | name: 'chrome', 67 | headless: true, 68 | remoteUrl: 'http://example.com', 69 | } 70 | ); 71 | }); 72 | }); 73 | 74 | suite('validateBrowserConfig', () => { 75 | const defaultBrowser = { 76 | name: defaults.browserName, 77 | headless: false, 78 | windowSize: { 79 | width: defaults.windowWidth, 80 | height: defaults.windowHeight, 81 | }, 82 | }; 83 | 84 | test('unsupported browser', () => { 85 | assert.throws( 86 | () => 87 | validateBrowserConfig({ 88 | ...defaultBrowser, 89 | name: 'potato' as BrowserName, 90 | }), 91 | /browser potato is not supported/i 92 | ); 93 | }); 94 | 95 | test('headless not supported', () => { 96 | assert.throws( 97 | () => 98 | validateBrowserConfig({ 99 | ...defaultBrowser, 100 | name: 'safari', 101 | headless: true, 102 | }), 103 | /browser safari does not support headless/i 104 | ); 105 | }); 106 | 107 | test('empty remote url', () => { 108 | assert.throws( 109 | () => 110 | validateBrowserConfig({ 111 | ...defaultBrowser, 112 | remoteUrl: '', 113 | }), 114 | /invalid browser remote url ""/i 115 | ); 116 | }); 117 | 118 | test('invalid remote url', () => { 119 | assert.throws( 120 | () => 121 | validateBrowserConfig({ 122 | ...defaultBrowser, 123 | remoteUrl: 'potato', 124 | }), 125 | /invalid browser remote url "potato"/i 126 | ); 127 | }); 128 | 129 | test('invalid window size', () => { 130 | assert.throws( 131 | () => 132 | validateBrowserConfig({ 133 | ...defaultBrowser, 134 | windowSize: { 135 | width: -1, 136 | height: -1, 137 | }, 138 | }), 139 | /invalid window size/i 140 | ); 141 | }); 142 | }); 143 | }); 144 | -------------------------------------------------------------------------------- /src/test/config_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, suiteSetup, suiteTeardown, test} from 'mocha'; 9 | 10 | import {Config, makeConfig, parseAutoSampleConditions} from '../config.js'; 11 | import {parseFlags} from '../flags.js'; 12 | 13 | import {testData} from './test_helpers.js'; 14 | 15 | suite('makeConfig', function () { 16 | let prevCwd: string; 17 | 18 | suiteSetup(() => { 19 | prevCwd = process.cwd(); 20 | process.chdir(testData); 21 | }); 22 | 23 | suiteTeardown(() => { 24 | process.chdir(prevCwd); 25 | }); 26 | 27 | async function checkConfig(argv: string[], expected: Config) { 28 | const actual = await makeConfig(parseFlags(argv)); 29 | assert.deepEqual(actual, expected); 30 | } 31 | 32 | test('local file with all defaults', async () => { 33 | const argv = ['random-global.html']; 34 | const expected: Config = { 35 | mode: 'automatic', 36 | npmrc: '', 37 | sampleSize: 50, 38 | timeout: 3, 39 | root: '.', 40 | resolveBareModules: true, 41 | forceCleanNpmInstall: false, 42 | autoSampleConditions: {absolute: [], relative: [0]}, 43 | remoteAccessibleHost: '', 44 | jsonFile: '', 45 | legacyJsonFile: '', 46 | csvFileStats: '', 47 | csvFileRaw: '', 48 | githubCheck: undefined, 49 | benchmarks: [ 50 | { 51 | browser: { 52 | headless: false, 53 | name: 'chrome', 54 | windowSize: { 55 | height: 768, 56 | width: 1024, 57 | }, 58 | }, 59 | measurement: [ 60 | { 61 | mode: 'callback', 62 | }, 63 | ], 64 | name: 'random-global.html', 65 | url: { 66 | kind: 'local', 67 | queryString: '', 68 | urlPath: '/random-global.html', 69 | version: undefined, 70 | }, 71 | }, 72 | ], 73 | }; 74 | await checkConfig(argv, expected); 75 | }); 76 | 77 | test('config file', async () => { 78 | const argv = ['--config=random-global.json']; 79 | const expected: Config = { 80 | mode: 'automatic', 81 | npmrc: '', 82 | sampleSize: 50, 83 | timeout: 3, 84 | root: testData, 85 | resolveBareModules: true, 86 | forceCleanNpmInstall: false, 87 | autoSampleConditions: {absolute: [], relative: [0]}, 88 | remoteAccessibleHost: '', 89 | jsonFile: '', 90 | legacyJsonFile: '', 91 | csvFileStats: '', 92 | csvFileRaw: '', 93 | // TODO(aomarks) Be consistent about undefined vs unset. 94 | githubCheck: undefined, 95 | benchmarks: [ 96 | { 97 | browser: { 98 | headless: false, 99 | name: 'chrome', 100 | windowSize: { 101 | height: 768, 102 | width: 1024, 103 | }, 104 | }, 105 | measurement: [ 106 | { 107 | mode: 'callback', 108 | }, 109 | ], 110 | // TODO(aomarks) Why does this have a forward-slash? 111 | name: '/random-global.html', 112 | url: { 113 | kind: 'local', 114 | queryString: '', 115 | urlPath: '/random-global.html', 116 | }, 117 | }, 118 | ], 119 | }; 120 | await checkConfig(argv, expected); 121 | }); 122 | 123 | test('config file with --manual', async () => { 124 | const argv = ['--config=random-global.json', '--manual']; 125 | const expected: Config = { 126 | mode: 'manual', 127 | npmrc: '', 128 | sampleSize: 50, 129 | timeout: 3, 130 | root: testData, 131 | resolveBareModules: true, 132 | forceCleanNpmInstall: false, 133 | autoSampleConditions: {absolute: [], relative: [0]}, 134 | remoteAccessibleHost: '', 135 | jsonFile: '', 136 | legacyJsonFile: '', 137 | csvFileStats: '', 138 | csvFileRaw: '', 139 | githubCheck: undefined, 140 | benchmarks: [ 141 | { 142 | browser: { 143 | headless: false, 144 | name: 'chrome', 145 | windowSize: { 146 | height: 768, 147 | width: 1024, 148 | }, 149 | }, 150 | measurement: [ 151 | { 152 | mode: 'callback', 153 | }, 154 | ], 155 | // TODO(aomarks) Why does this have a forward-slash? 156 | name: '/random-global.html', 157 | url: { 158 | kind: 'local', 159 | queryString: '', 160 | urlPath: '/random-global.html', 161 | }, 162 | }, 163 | ], 164 | }; 165 | await checkConfig(argv, expected); 166 | }); 167 | 168 | test('config file with output files and force clean install', async () => { 169 | const argv = [ 170 | '--config=random-global.json', 171 | '--csv-file=stats.csv', 172 | '--csv-file-raw=raw.csv', 173 | '--json-file=out.json', 174 | '--force-clean-npm-install', 175 | ]; 176 | const expected: Config = { 177 | mode: 'automatic', 178 | npmrc: '', 179 | csvFileStats: 'stats.csv', 180 | csvFileRaw: 'raw.csv', 181 | jsonFile: 'out.json', 182 | legacyJsonFile: '', 183 | forceCleanNpmInstall: true, 184 | 185 | sampleSize: 50, 186 | timeout: 3, 187 | root: testData, 188 | resolveBareModules: true, 189 | autoSampleConditions: {absolute: [], relative: [0]}, 190 | remoteAccessibleHost: '', 191 | // TODO(aomarks) Be consistent about undefined vs unset. 192 | githubCheck: undefined, 193 | benchmarks: [ 194 | { 195 | browser: { 196 | headless: false, 197 | name: 'chrome', 198 | windowSize: { 199 | height: 768, 200 | width: 1024, 201 | }, 202 | }, 203 | measurement: [ 204 | { 205 | mode: 'callback', 206 | }, 207 | ], 208 | // TODO(aomarks) Why does this have a forward-slash? 209 | name: '/random-global.html', 210 | url: { 211 | kind: 'local', 212 | queryString: '', 213 | urlPath: '/random-global.html', 214 | }, 215 | }, 216 | ], 217 | }; 218 | await checkConfig(argv, expected); 219 | }); 220 | 221 | test('config file horizons is converted to autoSampleConditions', async () => { 222 | const argv = ['--config=deprecated-horizons.json']; 223 | const expected: Config = { 224 | mode: 'automatic', 225 | npmrc: '', 226 | csvFileStats: '', 227 | csvFileRaw: '', 228 | jsonFile: '', 229 | legacyJsonFile: '', 230 | forceCleanNpmInstall: false, 231 | 232 | sampleSize: 50, 233 | timeout: 3, 234 | root: testData, 235 | resolveBareModules: true, 236 | autoSampleConditions: {absolute: [], relative: [-0.1, 0, 0.1]}, 237 | remoteAccessibleHost: '', 238 | // TODO(aomarks) Be consistent about undefined vs unset. 239 | githubCheck: undefined, 240 | benchmarks: [ 241 | { 242 | browser: { 243 | headless: false, 244 | name: 'chrome', 245 | windowSize: { 246 | height: 768, 247 | width: 1024, 248 | }, 249 | }, 250 | measurement: [ 251 | { 252 | mode: 'callback', 253 | }, 254 | ], 255 | // TODO(aomarks) Why does this have a forward-slash? 256 | name: '/random-global.html', 257 | url: { 258 | kind: 'local', 259 | queryString: '', 260 | urlPath: '/random-global.html', 261 | }, 262 | }, 263 | ], 264 | }; 265 | await checkConfig(argv, expected); 266 | }); 267 | }); 268 | 269 | suite('parseAutoSampleConditions', function () { 270 | test('0ms', () => { 271 | assert.deepEqual(parseAutoSampleConditions(['0ms']), { 272 | absolute: [0], 273 | relative: [], 274 | }); 275 | }); 276 | 277 | test('0.1ms', () => { 278 | assert.deepEqual(parseAutoSampleConditions(['0.1ms']), { 279 | absolute: [-0.1, 0.1], 280 | relative: [], 281 | }); 282 | }); 283 | 284 | test('+0.1ms', () => { 285 | assert.deepEqual(parseAutoSampleConditions(['+0.1ms']), { 286 | absolute: [0.1], 287 | relative: [], 288 | }); 289 | }); 290 | 291 | test('-0.1ms', () => { 292 | assert.deepEqual(parseAutoSampleConditions(['-0.1ms']), { 293 | absolute: [-0.1], 294 | relative: [], 295 | }); 296 | }); 297 | 298 | test('0ms,0.1,1ms', () => { 299 | assert.deepEqual(parseAutoSampleConditions(['0ms', '0.1ms', '1ms']), { 300 | absolute: [-1, -0.1, 0, 0.1, 1], 301 | relative: [], 302 | }); 303 | }); 304 | 305 | test('0%', () => { 306 | assert.deepEqual(parseAutoSampleConditions(['0%']), { 307 | absolute: [], 308 | relative: [0], 309 | }); 310 | }); 311 | 312 | test('1%', () => { 313 | assert.deepEqual(parseAutoSampleConditions(['1%']), { 314 | absolute: [], 315 | relative: [-0.01, 0.01], 316 | }); 317 | }); 318 | 319 | test('+1%', () => { 320 | assert.deepEqual(parseAutoSampleConditions(['+1%']), { 321 | absolute: [], 322 | relative: [0.01], 323 | }); 324 | }); 325 | 326 | test('-1%', () => { 327 | assert.deepEqual(parseAutoSampleConditions(['-1%']), { 328 | absolute: [], 329 | relative: [-0.01], 330 | }); 331 | }); 332 | 333 | test('0%,1%,10%', () => { 334 | assert.deepEqual(parseAutoSampleConditions(['0%', '1%', '10%']), { 335 | absolute: [], 336 | relative: [-0.1, -0.01, 0, 0.01, 0.1], 337 | }); 338 | }); 339 | 340 | test('0ms,0.1ms,1ms,0%,1%,10%', () => { 341 | assert.deepEqual( 342 | parseAutoSampleConditions(['0ms', '0.1ms', '1ms', '0%', '1%', '10%']), 343 | { 344 | absolute: [-1, -0.1, 0, 0.1, 1], 345 | relative: [-0.1, -0.01, 0, 0.01, 0.1], 346 | } 347 | ); 348 | }); 349 | 350 | test('throws on nonsense', () => { 351 | assert.throws(() => parseAutoSampleConditions(['sailboat'])); 352 | }); 353 | 354 | test('throws on ambiguous unit', () => { 355 | assert.throws(() => parseAutoSampleConditions(['4'])); 356 | }); 357 | }); 358 | -------------------------------------------------------------------------------- /src/test/csv_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, test} from 'mocha'; 9 | 10 | import {ConfigFile} from '../configfile.js'; 11 | import {formatCsvRaw, formatCsvStats} from '../csv.js'; 12 | import {fakeResults} from './test_helpers.js'; 13 | 14 | /** 15 | * It's hard to visually verify raw CSV output, so this lets us align the 16 | * columns visually, but then remove that padding before comparison. 17 | */ 18 | const removePadding = (readable: string): string => 19 | readable 20 | .replace(/ *, */g, ',') 21 | .replace(/ *\n */gm, '\n') 22 | .trim() + '\n'; 23 | 24 | suite('csv', () => { 25 | test('stats: 2x2 matrix with quoting', async () => { 26 | const config: ConfigFile = { 27 | benchmarks: [ 28 | { 29 | name: 'foo', 30 | url: 'http://example.com?foo', 31 | }, 32 | { 33 | name: 'bar,baz', 34 | url: 'http://example.com?bar,baz', 35 | }, 36 | ], 37 | }; 38 | const results = await fakeResults(config); 39 | const actual = formatCsvStats(results); 40 | const expected = removePadding(` 41 | , , , vs foo, , , , "vs bar,baz", , , 42 | , ms, , % change, , ms change, , % change, , ms change, 43 | , min, max, min, max, min, max, min, max, min, max 44 | foo, 8.56459, 11.43541, , , , , -58.02419%, -41.97581%, -12.02998, -7.97002 45 | "bar,baz", 18.56459, 21.43541, 67.90324%, 132.09676%, 7.97002, 12.02998, , , , 46 | `); 47 | assert.equal(actual, expected); 48 | }); 49 | 50 | test('raw samples: 2x2 matrix with quoting', async () => { 51 | const config: ConfigFile = { 52 | sampleSize: 4, 53 | benchmarks: [ 54 | { 55 | name: 'foo', 56 | url: 'http://example.com?foo', 57 | }, 58 | { 59 | name: 'bar,baz', 60 | url: 'http://example.com?bar,baz', 61 | }, 62 | { 63 | name: 'qux', 64 | url: 'http://example.com?qux', 65 | }, 66 | ], 67 | }; 68 | const results = await fakeResults(config); 69 | const actual = formatCsvRaw(results); 70 | const expected = removePadding(` 71 | foo, "bar,baz", qux 72 | 5, 15, 25 73 | 5, 15, 25 74 | 15, 25, 35 75 | 15, 25, 35 76 | `); 77 | assert.equal(actual, expected); 78 | }); 79 | }); 80 | -------------------------------------------------------------------------------- /src/test/data/1_byte.txt: -------------------------------------------------------------------------------- 1 | 1 -------------------------------------------------------------------------------- /src/test/data/3_bytes.txt: -------------------------------------------------------------------------------- 1 | 123 -------------------------------------------------------------------------------- /src/test/data/alt_npm_install_dir/node_modules/dep1/dep1-main.js: -------------------------------------------------------------------------------- 1 | export const dep1 = 1111; 2 | -------------------------------------------------------------------------------- /src/test/data/alt_npm_install_dir/node_modules/dep1/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "dep1-main.js" 3 | } 4 | -------------------------------------------------------------------------------- /src/test/data/alt_npm_install_dir/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "dependencies": { 4 | "dep1": "0.0.0" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /src/test/data/cpu-throttling-rate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "sampleSize": 30, 4 | "timeout": 0, 5 | "benchmarks": [ 6 | { 7 | "name": "x1", 8 | "url": "for-loop.html?n=1000", 9 | "browser": { 10 | "name": "chrome", 11 | "headless": true, 12 | "cpuThrottlingRate": 1 13 | } 14 | }, 15 | { 16 | "name": "x2", 17 | "url": "for-loop.html?n=1000", 18 | "browser": { 19 | "name": "chrome", 20 | "headless": true, 21 | "cpuThrottlingRate": 2 22 | } 23 | }, 24 | { 25 | "name": "x4", 26 | "url": "for-loop.html?n=1000", 27 | "browser": { 28 | "name": "chrome", 29 | "headless": true, 30 | "cpuThrottlingRate": 4 31 | } 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /src/test/data/delayed-callback.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 8 | 9 | 10 | delayed bench.start/stop test 11 | 12 | 13 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /src/test/data/delayed-fcp.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 8 | 9 | 10 | 11 | delayed first-contentful-paint test 12 | 13 | 14 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /src/test/data/deprecated-horizons.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "benchmarks": [ 4 | { 5 | "url": "random-global.html" 6 | } 7 | ], 8 | "horizons": ["0%", "10%"] 9 | } 10 | -------------------------------------------------------------------------------- /src/test/data/for-loop.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | for loop test 10 | 11 | 12 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /src/test/data/import-bare-module.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /src/test/data/import-bare-module.js: -------------------------------------------------------------------------------- 1 | import {dep1} from 'dep1'; 2 | -------------------------------------------------------------------------------- /src/test/data/invalid-js.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/test/data/invalid-js.js: -------------------------------------------------------------------------------- 1 | this is not valid javascript 2 | -------------------------------------------------------------------------------- /src/test/data/measurement-expression.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "sampleSize": 10, 4 | "timeout": 0, 5 | "root": "..", 6 | "benchmarks": [ 7 | { 8 | "name": "one-plus-one", 9 | "url": "for-loop.html", 10 | "measurement": "global", 11 | "measurementExpression": "1+1", 12 | "browser": { 13 | "name": "chrome", 14 | "headless": true 15 | } 16 | }, 17 | { 18 | "name": "two-plus-two", 19 | "url": "for-loop.html", 20 | "measurement": "global", 21 | "measurementExpression": "2+2", 22 | "browser": { 23 | "name": "chrome", 24 | "headless": true 25 | } 26 | } 27 | ] 28 | } 29 | -------------------------------------------------------------------------------- /src/test/data/multiple-measurements.html: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | multiple performance measure test 8 | 9 | 10 | 11 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /src/test/data/multiple-measurements.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "sampleSize": 10, 4 | "timeout": 0, 5 | "benchmarks": [ 6 | { 7 | "name": "multi", 8 | "url": "multiple-measurements.html?foo=20&bar=60", 9 | "measurement": [ 10 | { 11 | "mode": "performance", 12 | "entryName": "foo" 13 | }, 14 | { 15 | "mode": "performance", 16 | "entryName": "bar" 17 | } 18 | ], 19 | "browser": { 20 | "name": "chrome", 21 | "headless": true 22 | } 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /src/test/data/mylib/mybench/index.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/test/data/mylib/noindex/other.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/test/data/mylib/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "mylib": "0.0.0", 4 | "otherlib": "0.0.0" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /src/test/data/node_modules/dep1/dep1.js: -------------------------------------------------------------------------------- 1 | export const dep1 = 1111; 2 | -------------------------------------------------------------------------------- /src/test/data/node_modules/dep1/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "main": "dep1.js" 3 | } 4 | -------------------------------------------------------------------------------- /src/test/data/performance-measure.html: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | performance measure test 8 | 9 | 10 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /src/test/data/performance-measure.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "sampleSize": 10, 4 | "timeout": 0, 5 | "benchmarks": [ 6 | { 7 | "name": "20", 8 | "url": "performance-measure.html?wait=20", 9 | "measurement": { 10 | "mode": "performance", 11 | "entryName": "foo" 12 | }, 13 | "browser": { 14 | "name": "chrome", 15 | "headless": true 16 | } 17 | }, 18 | { 19 | "name": "60", 20 | "url": "performance-measure.html?wait=60", 21 | "measurement": { 22 | "mode": "performance", 23 | "entryName": "foo" 24 | }, 25 | "browser": { 26 | "name": "chrome", 27 | "headless": true 28 | } 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /src/test/data/random-global.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 8 | 9 | 10 | random window.tachometerResult test 11 | 12 | 13 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /src/test/data/random-global.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "benchmarks": [ 4 | { 5 | "url": "random-global.html" 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /src/test/data/tracing-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/Polymer/tachometer/master/config.schema.json", 3 | "sampleSize": 2, 4 | "timeout": 0, 5 | "benchmarks": [ 6 | { 7 | "name": "bench1", 8 | "url": "for-loop.html", 9 | "browser": { 10 | "name": "chrome", 11 | "headless": true, 12 | "trace": { 13 | "logDir": "src/test/data/logs" 14 | } 15 | } 16 | }, 17 | { 18 | "name": "bench2", 19 | "url": "for-loop.html", 20 | "browser": { 21 | "name": "chrome", 22 | "headless": true, 23 | "trace": { 24 | "logDir": "src/test/data/logs" 25 | } 26 | } 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /src/test/data/window-size.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | 7 | 8 | 9 | window height test 10 | 11 | 12 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /src/test/e2e_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {existsSync} from 'fs'; 9 | import {suite, test} from 'mocha'; 10 | import * as path from 'path'; 11 | import rimraf from 'rimraf'; 12 | import {main} from '../cli.js'; 13 | import {ConfidenceInterval} from '../stats.js'; 14 | import {testData} from './test_helpers.js'; 15 | 16 | // Set this environment variable to change the browsers under test. 17 | let browsers = (process.env.TACHOMETER_E2E_TEST_BROWSERS || '') 18 | .split(',') 19 | .map((b) => b.trim()) 20 | .filter((b) => b.length > 0); 21 | 22 | if (browsers.length === 0) { 23 | browsers = ['chrome-headless', 'firefox-headless']; 24 | if (process.platform === 'darwin') { 25 | browsers.push('safari'); 26 | } 27 | } 28 | 29 | /** 30 | * Test function wrapper to suppress tachometer's stdout/stderr output. Note we 31 | * can't use setup and teardown for this purpose, because mocha logs each test 32 | * pass/fail status before teardown runs, so then we'd suppress that too. 33 | */ 34 | const hideOutput = (test: () => Promise) => async () => { 35 | const realStdoutWrite = process.stdout.write; 36 | const realStderrWrite = process.stderr.write; 37 | if (!process.env.TACHOMETER_E2E_TEST_SHOW_OUTPUT) { 38 | process.stdout.write = () => true; 39 | process.stderr.write = () => true; 40 | } 41 | try { 42 | await test(); 43 | } finally { 44 | process.stdout.write = realStdoutWrite; 45 | process.stderr.write = realStderrWrite; 46 | } 47 | }; 48 | 49 | function ciAverage(ci: ConfidenceInterval): number { 50 | return (ci.high + ci.low) / 2; 51 | } 52 | 53 | function rimrafAsync(path: string) { 54 | return new Promise(function (resolve, reject) { 55 | rimraf(path, {}, (error) => { 56 | if (error) { 57 | reject(error); 58 | } else { 59 | resolve(); 60 | } 61 | }); 62 | }); 63 | } 64 | 65 | suite('e2e', function () { 66 | // We're launching real browsers and running multiple samples. 67 | this.timeout(1000 * 60 * 2); 68 | 69 | for (const browser of browsers) { 70 | suite(browser, function () { 71 | test( 72 | 'window.tachometerResult', 73 | hideOutput(async function () { 74 | const avgA = 1; 75 | const minA = avgA - 0.1; 76 | const maxA = avgA + 0.1; 77 | 78 | const avgB = 2; 79 | const minB = avgB - 0.1; 80 | const maxB = avgB + 0.1; 81 | 82 | const argv = [ 83 | `--browser=${browser}`, 84 | '--measure=global', 85 | '--sample-size=20', 86 | '--timeout=0', 87 | path.join(testData, 'random-global.html') + 88 | `?min=${minA}&max=${maxA}`, 89 | path.join(testData, 'random-global.html') + 90 | `?min=${minB}&max=${maxB}`, 91 | ]; 92 | 93 | const actual = await main(argv); 94 | assert.isDefined(actual); 95 | assert.lengthOf(actual!, 2); 96 | const [a, b] = actual!; 97 | const diffAB = a.differences[1]!; 98 | const diffBA = b.differences[0]!; 99 | 100 | assert.closeTo(a.stats.mean, avgA, 0.1); 101 | assert.closeTo(b.stats.mean, avgB, 0.1); 102 | assert.closeTo(ciAverage(diffAB.absolute), avgA - avgB, 0.1); 103 | assert.closeTo(ciAverage(diffBA.absolute), avgB - avgA, 0.1); 104 | assert.closeTo(ciAverage(diffAB.relative), (avgA - avgB) / avgB, 0.1); 105 | assert.closeTo(ciAverage(diffBA.relative), (avgB - avgA) / avgA, 0.1); 106 | }) 107 | ); 108 | 109 | test( 110 | 'measurement expression', 111 | hideOutput(async function () { 112 | const avgA = 1; 113 | const minA = avgA - 0.1; 114 | const maxA = avgA + 0.1; 115 | 116 | const avgB = 2; 117 | const minB = avgB - 0.1; 118 | const maxB = avgB + 0.1; 119 | 120 | const argv = [ 121 | `--browser=${browser}`, 122 | '--measure=global', 123 | '--sample-size=20', 124 | '--timeout=0', 125 | `--measurement-expression=window.customResult`, 126 | path.join(testData, 'random-global.html') + 127 | `?min=${minA}&max=${maxA}&customResult=true`, 128 | path.join(testData, 'random-global.html') + 129 | `?min=${minB}&max=${maxB}&customResult=true`, 130 | ]; 131 | 132 | const actual = await main(argv); 133 | assert.isDefined(actual); 134 | assert.lengthOf(actual!, 2); 135 | const [a, b] = actual!; 136 | const diffAB = a.differences[1]!; 137 | const diffBA = b.differences[0]!; 138 | 139 | assert.closeTo(a.stats.mean, avgA, 0.1); 140 | assert.closeTo(b.stats.mean, avgB, 0.1); 141 | assert.closeTo(ciAverage(diffAB.absolute), avgA - avgB, 0.1); 142 | assert.closeTo(ciAverage(diffBA.absolute), avgB - avgA, 0.1); 143 | assert.closeTo(ciAverage(diffAB.relative), (avgA - avgB) / avgB, 0.1); 144 | assert.closeTo(ciAverage(diffBA.relative), (avgB - avgA) / avgA, 0.1); 145 | }) 146 | ); 147 | 148 | test( 149 | 'measurement expression via config file', 150 | hideOutput(async function () { 151 | const argv = [ 152 | `--config=${path.join(testData, 'measurement-expression.json')}`, 153 | ]; 154 | const actual = await main(argv); 155 | assert.isDefined(actual); 156 | assert.lengthOf(actual!, 2); 157 | const [a, b] = actual!; 158 | assert.equal(a.stats.mean, 2); 159 | assert.equal(b.stats.mean, 4); 160 | }) 161 | ); 162 | 163 | test( 164 | 'bench.start/stop', 165 | hideOutput(async function () { 166 | const delayA = 20; 167 | const delayB = 60; 168 | 169 | const argv = [ 170 | `--browser=${browser}`, 171 | '--measure=callback', 172 | '--sample-size=30', 173 | '--timeout=0', 174 | path.join(testData, 'delayed-callback.html') + `?delay=${delayA}`, 175 | path.join(testData, 'delayed-callback.html') + `?delay=${delayB}`, 176 | ]; 177 | 178 | const actual = await main(argv); 179 | assert.isDefined(actual); 180 | assert.lengthOf(actual!, 2); 181 | const [a, b] = actual!; 182 | const diffAB = a.differences[1]!; 183 | const diffBA = b.differences[0]!; 184 | 185 | // We can't be very precise with expectations here, since setTimeout 186 | // can be quite variable on a resource starved machine (e.g. some of 187 | // our CI builds). 188 | assert.isAtLeast(a.stats.mean, delayA); 189 | assert.isAtLeast(b.stats.mean, delayB); 190 | assert.isBelow(ciAverage(diffAB.absolute), 0); 191 | assert.isAbove(ciAverage(diffBA.absolute), 0); 192 | assert.isBelow(ciAverage(diffAB.relative), 0); 193 | assert.isAbove(ciAverage(diffBA.relative), 0); 194 | }) 195 | ); 196 | 197 | test( 198 | 'performance entry', 199 | hideOutput(async function () { 200 | const delayA = 20; 201 | const delayB = 60; 202 | 203 | // TODO(aomarks) This isn't actually testing each browser, since 204 | // the browser is hard coded in the config file. Generate the JSON 205 | // file dynamically instead. 206 | const argv = [ 207 | `--config=${path.join(testData, 'performance-measure.json')}`, 208 | ]; 209 | 210 | const actual = await main(argv); 211 | assert.isDefined(actual); 212 | assert.lengthOf(actual!, 2); 213 | const [a, b] = actual!; 214 | const diffAB = a.differences[1]!; 215 | const diffBA = b.differences[0]!; 216 | 217 | // We can't be very precise with expectations here, since 218 | // setTimeout can be quite variable on a resource starved machine 219 | // (e.g. some of our CI builds). 220 | assert.isAtLeast(a.stats.mean, delayA); 221 | assert.isAtLeast(b.stats.mean, delayB); 222 | assert.isBelow(ciAverage(diffAB.absolute), 0); 223 | assert.isAbove(ciAverage(diffBA.absolute), 0); 224 | assert.isBelow(ciAverage(diffAB.relative), 0); 225 | assert.isAbove(ciAverage(diffBA.relative), 0); 226 | }) 227 | ); 228 | 229 | test( 230 | 'multiple measurements', 231 | hideOutput(async function () { 232 | const delayA = 20; 233 | const delayB = 60; 234 | 235 | const argv = [ 236 | `--config=${path.join(testData, 'multiple-measurements.json')}`, 237 | ]; 238 | 239 | const actual = await main(argv); 240 | assert.isDefined(actual); 241 | assert.lengthOf(actual!, 2); 242 | const [a, b] = actual!; 243 | const diffAB = a.differences[1]!; 244 | const diffBA = b.differences[0]!; 245 | 246 | // We can't be very precise with expectations here, since 247 | // setTimeout can be quite variable on a resource starved machine 248 | // (e.g. some of our CI builds). 249 | assert.isAtLeast(a.stats.mean, delayA); 250 | assert.isAtLeast(b.stats.mean, delayB); 251 | assert.isBelow(ciAverage(diffAB.absolute), 0); 252 | assert.isAbove(ciAverage(diffBA.absolute), 0); 253 | assert.isBelow(ciAverage(diffAB.relative), 0); 254 | assert.isAbove(ciAverage(diffBA.relative), 0); 255 | }) 256 | ); 257 | 258 | // Only Chrome supports FCP and CPU throttling. 259 | if (browser.startsWith('chrome')) { 260 | test( 261 | 'fcp', 262 | hideOutput(async function () { 263 | const delayA = 20; 264 | const delayB = 60; 265 | 266 | const argv = [ 267 | `--browser=${browser}`, 268 | '--measure=fcp', 269 | '--sample-size=10', 270 | '--timeout=0', 271 | path.join(testData, 'delayed-fcp.html') + `?delay=${delayA}`, 272 | path.join(testData, 'delayed-fcp.html') + `?delay=${delayB}`, 273 | ]; 274 | 275 | const actual = await main(argv); 276 | assert.isDefined(actual); 277 | assert.lengthOf(actual!, 2); 278 | const [a, b] = actual!; 279 | const diffAB = a.differences[1]!; 280 | const diffBA = b.differences[0]!; 281 | 282 | // We can't be very precise with expectations here, since FCP is 283 | // so variable, but we can check that FCP takes at least as long 284 | // as our setTimeout delays, and that A paints before than B. 285 | assert.isAtLeast(a.stats.mean, delayA); 286 | assert.isAtLeast(b.stats.mean, delayB); 287 | assert.isBelow(ciAverage(diffAB.absolute), 0); 288 | assert.isAbove(ciAverage(diffBA.absolute), 0); 289 | assert.isBelow(ciAverage(diffAB.relative), 0); 290 | assert.isAbove(ciAverage(diffBA.relative), 0); 291 | }) 292 | ); 293 | 294 | test( 295 | 'cpu throttling rate', 296 | hideOutput(async function () { 297 | const argv = [ 298 | `--config=${path.join(testData, 'cpu-throttling-rate.json')}`, 299 | ]; 300 | const actual = await main(argv); 301 | assert.isDefined(actual); 302 | assert.lengthOf(actual!, 3); 303 | const [x1, x2, x4] = actual!; 304 | // The CPU throttling factors don't precisely result in the same 305 | // measured slowdown (though roughly close), so let's just check 306 | // that the rankings we expect hold. 307 | assert.isAbove(x2.stats.mean, x1.stats.mean); 308 | assert.isAbove(x4.stats.mean, x2.stats.mean); 309 | }) 310 | ); 311 | 312 | test( 313 | 'tracing', 314 | hideOutput(async function () { 315 | const logDir = path.join(testData, 'logs'); 316 | if (existsSync(logDir)) { 317 | await rimrafAsync(logDir); 318 | } 319 | const argv = [ 320 | `--config=${path.join(testData, 'tracing-config.json')}`, 321 | ]; 322 | const actual = await main(argv); 323 | assert.isDefined(actual); 324 | assert.lengthOf(actual!, 2); 325 | assert.isTrue(existsSync(logDir)); 326 | assert.isTrue(existsSync(path.join(logDir, 'bench1'))); 327 | assert.isTrue(existsSync(path.join(logDir, 'bench2'))); 328 | }) 329 | ); 330 | } 331 | 332 | test( 333 | 'window size', 334 | hideOutput(async function () { 335 | const width = 1024; 336 | const height = 768; 337 | const argv = [ 338 | `--browser=${browser}`, 339 | '--measure=global', 340 | '--sample-size=2', 341 | '--timeout=0', 342 | `--window-size=${width},${height}`, 343 | path.join(testData, 'window-size.html'), 344 | ]; 345 | // We're measuring window.innerWidth and height, so depending on 346 | // how much extra chrome the browser is rendering, we'll get 347 | // something smaller. 200 pixels seems to cover all the variation. 348 | const lowerBound = width * (height - 200); 349 | const upperBound = width * height; 350 | 351 | const actual = await main(argv); 352 | assert.isDefined(actual); 353 | assert.lengthOf(actual!, 1); 354 | const {stats} = actual![0]; 355 | assert.isAtMost(stats.mean, upperBound); 356 | assert.isAtLeast(stats.mean, lowerBound); 357 | }) 358 | ); 359 | }); 360 | } 361 | }); 362 | -------------------------------------------------------------------------------- /src/test/flags_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, test} from 'mocha'; 9 | 10 | import {parseFlags} from '../flags.js'; 11 | 12 | suite('flags', () => { 13 | suite('parseFlags', () => { 14 | suite('--resolve-bare-modules', () => { 15 | test('unset is undefined', () => { 16 | const actual = parseFlags([]); 17 | assert.isUndefined(actual['resolve-bare-modules']); 18 | }); 19 | 20 | test('set but empty is true', () => { 21 | const argv = ['--resolve-bare-modules']; 22 | const actual = parseFlags(argv); 23 | assert.isTrue(actual['resolve-bare-modules']); 24 | }); 25 | 26 | test('true is true', () => { 27 | const argv = ['--resolve-bare-modules=true']; 28 | const actual = parseFlags(argv); 29 | assert.isTrue(actual['resolve-bare-modules']); 30 | }); 31 | 32 | test('false is false', () => { 33 | const argv = ['--resolve-bare-modules=false']; 34 | const actual = parseFlags(argv); 35 | assert.isFalse(actual['resolve-bare-modules']); 36 | }); 37 | 38 | test('potato errors', () => { 39 | const argv = ['--resolve-bare-modules=potato']; 40 | assert.throw(() => parseFlags(argv), /invalid --resolve-bare-modules/i); 41 | }); 42 | 43 | test('--horizon is converted to --auto-sample-conditions', () => { 44 | const argv = ['--horizon=0,10%']; 45 | const actual = parseFlags(argv); 46 | assert.equal(actual['auto-sample-conditions'], '0,10%'); 47 | }); 48 | 49 | test('Error to use both --horizon and --auto-sample-conditions', () => { 50 | const argv = ['--horizon=0,10%', '--auto-sample-conditions=0,10%']; 51 | assert.throw( 52 | () => parseFlags(argv), 53 | 'Please use only --auto-sample-conditions and not --horizons' 54 | ); 55 | }); 56 | }); 57 | }); 58 | }); 59 | -------------------------------------------------------------------------------- /src/test/format_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, suiteSetup, suiteTeardown, test} from 'mocha'; 9 | import * as path from 'path'; 10 | import stripAnsi from 'strip-ansi'; 11 | 12 | import {ConfigFile} from '../configfile.js'; 13 | import { 14 | automaticResultTable, 15 | horizontalTermResultTable, 16 | verticalTermResultTable, 17 | } from '../format.js'; 18 | import {fakeResults, testData} from './test_helpers.js'; 19 | 20 | /** 21 | * Given a config file object, generates fake measurement results, and returns 22 | * the terminal formatted result table that would be printed (minus color etc. 23 | * formatting). 24 | */ 25 | async function fakeResultTable(configFile: ConfigFile): Promise { 26 | const results = await fakeResults(configFile); 27 | const {fixed, unfixed} = automaticResultTable(results); 28 | return stripAnsi( 29 | horizontalTermResultTable(fixed) + '\n' + verticalTermResultTable(unfixed) 30 | ); 31 | } 32 | 33 | suite('format', () => { 34 | let prevCwd: string; 35 | suiteSetup(() => { 36 | prevCwd = process.cwd(); 37 | process.chdir(path.join(testData, 'mylib')); 38 | }); 39 | 40 | suiteTeardown(() => { 41 | process.chdir(prevCwd); 42 | }); 43 | 44 | test('1 remote', async () => { 45 | const config: ConfigFile = { 46 | benchmarks: [ 47 | { 48 | url: 'http://example.com', 49 | browser: { 50 | name: 'chrome', 51 | }, 52 | }, 53 | ], 54 | }; 55 | 56 | const actual = await fakeResultTable(config); 57 | const expected = ` 58 | ┌─────────────┬────────────────────┐ 59 | │ Benchmark │ http://example.com │ 60 | ├─────────────┼────────────────────┤ 61 | │ Version │ │ 62 | ├─────────────┼────────────────────┤ 63 | │ Browser │ chrome │ 64 | │ │ 75.0.3770.100 │ 65 | ├─────────────┼────────────────────┤ 66 | │ Sample size │ 50 │ 67 | ├─────────────┼────────────────────┤ 68 | │ Bytes │ 1.00 KiB │ 69 | └─────────────┴────────────────────┘ 70 | 71 | ┌──────────────────┐ 72 | │ Avg time │ 73 | ├──────────────────┤ 74 | │ 8.56ms - 11.44ms │ 75 | └──────────────────┘ 76 | `; 77 | assert.equal(actual, expected.trim() + '\n'); 78 | }); 79 | 80 | test('2 remote, 2 browsers', async () => { 81 | const config: ConfigFile = { 82 | benchmarks: [ 83 | { 84 | url: 'http://example.com', 85 | browser: { 86 | name: 'chrome', 87 | }, 88 | }, 89 | { 90 | url: 'http://example.com', 91 | browser: { 92 | name: 'firefox', 93 | }, 94 | }, 95 | ], 96 | }; 97 | 98 | const actual = await fakeResultTable(config); 99 | const expected = ` 100 | ┌─────────────┬────────────────────┐ 101 | │ Benchmark │ http://example.com │ 102 | ├─────────────┼────────────────────┤ 103 | │ Version │ │ 104 | ├─────────────┼────────────────────┤ 105 | │ Sample size │ 50 │ 106 | └─────────────┴────────────────────┘ 107 | 108 | ┌───────────────┬──────────┬───────────────────┬──────────────────┬──────────────────┐ 109 | │ Browser │ Bytes │ Avg time │ vs chrome │ vs firefox │ 110 | ├───────────────┼──────────┼───────────────────┼──────────────────┼──────────────────┤ 111 | │ chrome │ 1.00 KiB │ 8.56ms - 11.44ms │ │ faster │ 112 | │ 75.0.3770.100 │ │ │ - │ 42% - 58% │ 113 | │ │ │ │ │ 7.97ms - 12.03ms │ 114 | ├───────────────┼──────────┼───────────────────┼──────────────────┼──────────────────┤ 115 | │ firefox │ 2.00 KiB │ 18.56ms - 21.44ms │ slower │ │ 116 | │ 60.0 │ │ │ 68% - 132% │ - │ 117 | │ │ │ │ 7.97ms - 12.03ms │ │ 118 | └───────────────┴──────────┴───────────────────┴──────────────────┴──────────────────┘ 119 | `; 120 | assert.equal(actual, expected.trim() + '\n'); 121 | }); 122 | 123 | test('remote and local, with query params, without labels', async () => { 124 | const config: ConfigFile = { 125 | benchmarks: [ 126 | { 127 | url: 'http://example.com?p=bar', 128 | browser: { 129 | name: 'chrome', 130 | }, 131 | }, 132 | { 133 | url: 'mybench/index.html?p=bar', 134 | browser: { 135 | name: 'chrome', 136 | }, 137 | }, 138 | ], 139 | }; 140 | 141 | const actual = await fakeResultTable(config); 142 | const expected = ` 143 | ┌─────────────┬───────────────┐ 144 | │ Version │ │ 145 | ├─────────────┼───────────────┤ 146 | │ Browser │ chrome │ 147 | │ │ 75.0.3770.100 │ 148 | ├─────────────┼───────────────┤ 149 | │ Sample size │ 50 │ 150 | └─────────────┴───────────────┘ 151 | 152 | ┌───────────────────────────┬──────────┬───────────────────┬─────────────────────────────┬──────────────────────────────┐ 153 | │ Benchmark │ Bytes │ Avg time │ vs http://example.com?p=bar │ vs /mybench/index.html?p=bar │ 154 | ├───────────────────────────┼──────────┼───────────────────┼─────────────────────────────┼──────────────────────────────┤ 155 | │ http://example.com?p=bar │ 1.00 KiB │ 8.56ms - 11.44ms │ │ faster │ 156 | │ │ │ │ - │ 42% - 58% │ 157 | │ │ │ │ │ 7.97ms - 12.03ms │ 158 | ├───────────────────────────┼──────────┼───────────────────┼─────────────────────────────┼──────────────────────────────┤ 159 | │ /mybench/index.html?p=bar │ 2.00 KiB │ 18.56ms - 21.44ms │ slower │ │ 160 | │ │ │ │ 68% - 132% │ - │ 161 | │ │ │ │ 7.97ms - 12.03ms │ │ 162 | └───────────────────────────┴──────────┴───────────────────┴─────────────────────────────┴──────────────────────────────┘ 163 | `; 164 | assert.equal(actual, expected.trim() + '\n'); 165 | }); 166 | 167 | test('remote and local, with query params, with labels', async () => { 168 | const config: ConfigFile = { 169 | benchmarks: [ 170 | { 171 | name: 'foo', 172 | url: 'http://example.com?p=bar', 173 | browser: { 174 | name: 'chrome', 175 | }, 176 | }, 177 | { 178 | name: 'bar', 179 | url: 'mybench/index.html?p=bar', 180 | browser: { 181 | name: 'chrome', 182 | }, 183 | }, 184 | ], 185 | }; 186 | 187 | const actual = await fakeResultTable(config); 188 | const expected = ` 189 | ┌─────────────┬───────────────┐ 190 | │ Version │ │ 191 | ├─────────────┼───────────────┤ 192 | │ Browser │ chrome │ 193 | │ │ 75.0.3770.100 │ 194 | ├─────────────┼───────────────┤ 195 | │ Sample size │ 50 │ 196 | └─────────────┴───────────────┘ 197 | 198 | ┌───────────┬──────────┬───────────────────┬──────────────────┬──────────────────┐ 199 | │ Benchmark │ Bytes │ Avg time │ vs foo │ vs bar │ 200 | ├───────────┼──────────┼───────────────────┼──────────────────┼──────────────────┤ 201 | │ foo │ 1.00 KiB │ 8.56ms - 11.44ms │ │ faster │ 202 | │ │ │ │ - │ 42% - 58% │ 203 | │ │ │ │ │ 7.97ms - 12.03ms │ 204 | ├───────────┼──────────┼───────────────────┼──────────────────┼──────────────────┤ 205 | │ bar │ 2.00 KiB │ 18.56ms - 21.44ms │ slower │ │ 206 | │ │ │ │ 68% - 132% │ - │ 207 | │ │ │ │ 7.97ms - 12.03ms │ │ 208 | └───────────┴──────────┴───────────────────┴──────────────────┴──────────────────┘ 209 | `; 210 | assert.equal(actual, expected.trim() + '\n'); 211 | }); 212 | }); 213 | -------------------------------------------------------------------------------- /src/test/install_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {existsSync, promises as fs} from 'fs'; 9 | import {suite, test} from 'mocha'; 10 | import * as os from 'os'; 11 | import * as path from 'path'; 12 | 13 | import { 14 | assertResolvable, 15 | onDemandDependenciesFromPackageJSON, 16 | } from '../install.js'; 17 | 18 | suite('install', () => { 19 | suite('onDemandDependenciesFromPackageJSON', () => { 20 | test('only includes packages enumerated in "installsOnDemand"', () => { 21 | const dependencies = onDemandDependenciesFromPackageJSON({ 22 | devDependencies: {foo: '*'}, 23 | dependencies: {bar: '*'}, 24 | installsOnDemand: ['baz'], 25 | }); 26 | 27 | assert.isFalse(dependencies.has('foo')); 28 | assert.isFalse(dependencies.has('bar')); 29 | assert.isTrue(dependencies.has('baz')); 30 | }); 31 | }); 32 | 33 | suite('assertResolvable', () => { 34 | test('resolves for resolvable module specifiers', async () => { 35 | await assertResolvable('chai'); 36 | }); 37 | 38 | test('rejects for not-resolvable module specifiers', async () => { 39 | let rejected = false; 40 | 41 | try { 42 | await assertResolvable('./definitely-not-resolvable.js'); 43 | } catch { 44 | rejected = true; 45 | } 46 | 47 | assert.isTrue(rejected); 48 | }); 49 | 50 | test('eventually resolves a module that was installed asynchronously', async () => { 51 | let rejected = false; 52 | const someModulePath = path.join(os.tmpdir(), 'foo.js'); 53 | if (existsSync(someModulePath)) { 54 | await fs.unlink(someModulePath); 55 | } 56 | 57 | try { 58 | await assertResolvable(someModulePath); 59 | } catch { 60 | rejected = true; 61 | } 62 | 63 | assert.isTrue(rejected); 64 | 65 | await fs.writeFile(someModulePath, 'console.log("hi")'); 66 | 67 | await assertResolvable(someModulePath); 68 | 69 | await fs.unlink(someModulePath); 70 | }); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /src/test/json-output_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, test} from 'mocha'; 9 | 10 | import {ConfigFile} from '../configfile.js'; 11 | import {jsonOutput, JsonOutputFile} from '../json-output.js'; 12 | import {fakeResults} from './test_helpers.js'; 13 | 14 | /** 15 | * We include the full precision statistics in the JSON output, but it's silly 16 | * to check them in this test to high precision. This function walks the JSON 17 | * output object (or any object) and reduces the precision of any number it 18 | * finds by rounding to the given number of decimal places. 19 | */ 20 | function roundPlacesAll(val: unknown, places: number): unknown { 21 | if (typeof val === 'number') { 22 | val = roundPlaces(val, places); 23 | } else if (Array.isArray(val)) { 24 | for (let i = 0; i < val.length; i++) { 25 | val[i] = roundPlacesAll(val[i], places); 26 | } 27 | } else if (typeof val === 'object' && val !== null && val !== undefined) { 28 | const obj = val as {[key: string]: unknown}; 29 | for (const p of Object.getOwnPropertyNames(obj)) { 30 | obj[p] = roundPlacesAll(obj[p], places); 31 | } 32 | } 33 | return val; 34 | } 35 | 36 | function roundPlaces(num: number, places: number): number { 37 | return Math.round(num * 10 ** places) / 10 ** places; 38 | } 39 | 40 | suite('jsonOutput', () => { 41 | test('2x2 matrix', async () => { 42 | const config: ConfigFile = { 43 | benchmarks: [ 44 | { 45 | name: 'foo', 46 | url: 'http://example.com?foo', 47 | }, 48 | { 49 | name: 'bar', 50 | url: 'http://example.com?bar', 51 | }, 52 | ], 53 | }; 54 | const results = await fakeResults(config); 55 | const actual = jsonOutput(results); 56 | const expected: JsonOutputFile = { 57 | benchmarks: [ 58 | { 59 | name: 'foo', 60 | bytesSent: 1024, 61 | version: undefined, 62 | measurement: { 63 | name: 'fcp', 64 | mode: 'performance', 65 | entryName: 'first-contentful-paint', 66 | }, 67 | browser: { 68 | name: 'chrome', 69 | headless: false, 70 | windowSize: {width: 1024, height: 768}, 71 | userAgent: 72 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 73 | }, 74 | samples: [...new Array(25).fill(5), ...new Array(25).fill(15)], 75 | mean: { 76 | low: 8.56459, 77 | high: 11.43541, 78 | }, 79 | differences: [ 80 | null, 81 | { 82 | absolute: { 83 | low: -12.02998, 84 | high: -7.97002, 85 | }, 86 | percentChange: { 87 | low: -58.02419, 88 | high: -41.97581, 89 | }, 90 | }, 91 | ], 92 | }, 93 | { 94 | name: 'bar', 95 | bytesSent: 2048, 96 | version: undefined, 97 | measurement: { 98 | name: 'fcp', 99 | mode: 'performance', 100 | entryName: 'first-contentful-paint', 101 | }, 102 | browser: { 103 | name: 'chrome', 104 | headless: false, 105 | windowSize: {width: 1024, height: 768}, 106 | userAgent: 107 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 108 | }, 109 | samples: [...new Array(25).fill(15), ...new Array(25).fill(25)], 110 | mean: { 111 | low: 18.56459, 112 | high: 21.43541, 113 | }, 114 | differences: [ 115 | { 116 | absolute: { 117 | low: 7.97002, 118 | high: 12.02998, 119 | }, 120 | percentChange: { 121 | low: 67.90324, 122 | high: 132.09676, 123 | }, 124 | }, 125 | null, 126 | ], 127 | }, 128 | ], 129 | }; 130 | assert.deepEqual(roundPlacesAll(actual, 5), expected); 131 | }); 132 | 133 | test('2x2 matrix with multiple measurements', async () => { 134 | const config: ConfigFile = { 135 | benchmarks: [ 136 | { 137 | name: 'foo', 138 | url: 'http://example.com?foo', 139 | measurement: [ 140 | {name: 'Metric 1', mode: 'performance', entryName: 'metric1'}, 141 | {name: 'Metric 2', mode: 'performance', entryName: 'metric2'}, 142 | ], 143 | }, 144 | { 145 | name: 'bar', 146 | url: 'http://example.com?bar', 147 | measurement: [ 148 | {name: 'Metric 1', mode: 'performance', entryName: 'metric1'}, 149 | {name: 'Metric 2', mode: 'performance', entryName: 'metric2'}, 150 | ], 151 | }, 152 | ], 153 | }; 154 | const results = await fakeResults(config); 155 | const actual = jsonOutput(results); 156 | const expected: JsonOutputFile = { 157 | benchmarks: [ 158 | { 159 | name: 'foo [Metric 1]', 160 | bytesSent: 1024, 161 | version: undefined, 162 | measurement: { 163 | name: 'Metric 1', 164 | mode: 'performance', 165 | entryName: 'metric1', 166 | }, 167 | browser: { 168 | name: 'chrome', 169 | headless: false, 170 | windowSize: {width: 1024, height: 768}, 171 | userAgent: 172 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 173 | }, 174 | samples: [...new Array(25).fill(5), ...new Array(25).fill(15)], 175 | mean: {low: 8.56459, high: 11.43541}, 176 | differences: [ 177 | null, 178 | { 179 | absolute: {high: 2.02998, low: -2.02998}, 180 | percentChange: {high: 20.29978, low: -20.29978}, 181 | }, 182 | { 183 | absolute: {low: -12.02998, high: -7.97002}, 184 | percentChange: {low: -58.02419, high: -41.97581}, 185 | }, 186 | { 187 | absolute: {high: -7.97002, low: -12.02998}, 188 | percentChange: {high: -41.97581, low: -58.02419}, 189 | }, 190 | ], 191 | }, 192 | { 193 | name: 'foo [Metric 2]', 194 | bytesSent: 1024, 195 | version: undefined, 196 | measurement: { 197 | name: 'Metric 2', 198 | mode: 'performance', 199 | entryName: 'metric2', 200 | }, 201 | browser: { 202 | name: 'chrome', 203 | headless: false, 204 | windowSize: {width: 1024, height: 768}, 205 | userAgent: 206 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 207 | }, 208 | samples: [...new Array(25).fill(5), ...new Array(25).fill(15)], 209 | mean: {high: 11.43541, low: 8.56459}, 210 | differences: [ 211 | { 212 | absolute: {high: 2.02998, low: -2.02998}, 213 | percentChange: {high: 20.29978, low: -20.29978}, 214 | }, 215 | null, 216 | { 217 | absolute: {high: -7.97002, low: -12.02998}, 218 | percentChange: {high: -41.97581, low: -58.02419}, 219 | }, 220 | { 221 | absolute: {high: -7.97002, low: -12.02998}, 222 | percentChange: {high: -41.97581, low: -58.02419}, 223 | }, 224 | ], 225 | }, 226 | { 227 | name: 'bar [Metric 1]', 228 | bytesSent: 2048, 229 | version: undefined, 230 | measurement: { 231 | name: 'Metric 1', 232 | mode: 'performance', 233 | entryName: 'metric1', 234 | }, 235 | browser: { 236 | name: 'chrome', 237 | headless: false, 238 | windowSize: {width: 1024, height: 768}, 239 | userAgent: 240 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 241 | }, 242 | samples: [...new Array(25).fill(15), ...new Array(25).fill(25)], 243 | mean: {low: 18.56459, high: 21.43541}, 244 | differences: [ 245 | { 246 | absolute: {low: 7.97002, high: 12.02998}, 247 | percentChange: {low: 67.90324, high: 132.09676}, 248 | }, 249 | { 250 | absolute: {high: 12.02998, low: 7.97002}, 251 | percentChange: {high: 132.09676, low: 67.90324}, 252 | }, 253 | null, 254 | { 255 | absolute: {high: 2.02998, low: -2.02998}, 256 | percentChange: {high: 10.14989, low: -10.14989}, 257 | }, 258 | ], 259 | }, 260 | { 261 | name: 'bar [Metric 2]', 262 | bytesSent: 2048, 263 | version: undefined, 264 | measurement: { 265 | name: 'Metric 2', 266 | mode: 'performance', 267 | entryName: 'metric2', 268 | }, 269 | browser: { 270 | name: 'chrome', 271 | headless: false, 272 | windowSize: {width: 1024, height: 768}, 273 | userAgent: 274 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 275 | }, 276 | samples: [...new Array(25).fill(15), ...new Array(25).fill(25)], 277 | mean: {low: 18.56459, high: 21.43541}, 278 | differences: [ 279 | { 280 | absolute: {high: 12.02998, low: 7.97002}, 281 | percentChange: {high: 132.09676, low: 67.90324}, 282 | }, 283 | { 284 | absolute: {high: 12.02998, low: 7.97002}, 285 | percentChange: {high: 132.09676, low: 67.90324}, 286 | }, 287 | { 288 | absolute: {high: 2.02998, low: -2.02998}, 289 | percentChange: {high: 10.14989, low: -10.14989}, 290 | }, 291 | null, 292 | ], 293 | }, 294 | ], 295 | }; 296 | assert.deepEqual(roundPlacesAll(actual, 5), expected); 297 | }); 298 | }); 299 | -------------------------------------------------------------------------------- /src/test/server_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import fsExtra from 'fs-extra'; 9 | import {setup, suite, teardown, test} from 'mocha'; 10 | import fetch from 'node-fetch'; 11 | import * as path from 'path'; 12 | 13 | import {Server} from '../server.js'; 14 | 15 | import {testData} from './test_helpers.js'; 16 | 17 | suite('server', () => { 18 | let server: Server; 19 | 20 | setup(async () => { 21 | server = await Server.start({ 22 | host: 'localhost', 23 | ports: [0], // random 24 | root: testData, 25 | resolveBareModules: true, 26 | npmInstalls: [], 27 | mountPoints: [ 28 | { 29 | diskPath: testData, 30 | urlPath: '/', 31 | }, 32 | ], 33 | cache: true, 34 | }); 35 | }); 36 | 37 | teardown(async () => { 38 | await server.close(); 39 | }); 40 | 41 | test('serves bench.js library', async () => { 42 | const res = await fetch(`${server.url}/bench.js`); 43 | assert.equal(res.status, 200); 44 | assert.include(await res.text(), 'performance.now()'); 45 | }); 46 | 47 | suite('bare modules', () => { 48 | test('resolves specifier in JS file', async () => { 49 | const res = await fetch(`${server.url}/import-bare-module.js`); 50 | assert.equal(res.status, 200); 51 | const body = await res.text(); 52 | assert.include(body, 'node_modules/dep1/dep1.js'); 53 | assert.notInclude(body, `'dep1'`); 54 | }); 55 | 56 | test('resolves specifier in HTML file', async () => { 57 | const res = await fetch(`${server.url}/import-bare-module.html`); 58 | assert.equal(res.status, 200); 59 | const body = await res.text(); 60 | assert.include(body, 'node_modules/dep1/dep1.js'); 61 | assert.notInclude(body, `'dep1'`); 62 | }); 63 | 64 | test('serves invalid JS unchanged', async () => { 65 | const res = await fetch(`${server.url}/invalid-js.js`); 66 | assert.equal(res.status, 200); 67 | const body = await res.text(); 68 | assert.include(body, 'this is not valid javascript'); 69 | }); 70 | 71 | test('serves invalid JS in HTML unchanged', async () => { 72 | const res = await fetch(`${server.url}/invalid-js.html`); 73 | assert.equal(res.status, 200); 74 | const body = await res.text(); 75 | assert.include(body, 'this is not valid javascript'); 76 | }); 77 | }); 78 | 79 | suite('bare modules with custom npm installs', async () => { 80 | setup(async () => { 81 | const installDir = path.join(testData, 'alt_npm_install_dir'); 82 | const packageJson = fsExtra.readJSONSync( 83 | path.join(installDir, 'package.json') 84 | ); 85 | 86 | // Close the base server and replace it with a custom server that is 87 | // configured with a custom npm install directory 88 | await server.close(); 89 | 90 | server = await Server.start({ 91 | host: 'localhost', 92 | ports: [0], // random 93 | root: testData, 94 | resolveBareModules: true, 95 | npmInstalls: [{installDir, packageJson}], 96 | mountPoints: [ 97 | { 98 | diskPath: testData, 99 | urlPath: '/', 100 | }, 101 | ], 102 | cache: true, 103 | }); 104 | }); 105 | 106 | test('resolves specifier in JS file to alt file', async () => { 107 | const res = await fetch(`${server.url}/import-bare-module.js`); 108 | assert.equal(res.status, 200); 109 | const body = await res.text(); 110 | assert.include(body, 'node_modules/dep1/dep1-main.js'); 111 | assert.notInclude(body, `/dep1.js'`); 112 | assert.notInclude(body, `'dep1'`); 113 | }); 114 | 115 | test('resolves specifier in HTML file to alt file', async () => { 116 | const res = await fetch(`${server.url}/import-bare-module.html`); 117 | assert.equal(res.status, 200); 118 | const body = await res.text(); 119 | assert.include(body, 'node_modules/dep1/dep1-main.js'); 120 | assert.notInclude(body, `/dep1.js'`); 121 | assert.notInclude(body, `'dep1'`); 122 | }); 123 | }); 124 | 125 | test('records bytes served in session', async () => { 126 | let session; 127 | 128 | await fetch(`${server.url}/1_byte.txt`); 129 | session = server.endSession(); 130 | assert.equal(session.bytesSent, 1); 131 | 132 | await fetch(`${server.url}/1_byte.txt`); 133 | await fetch(`${server.url}/3_bytes.txt`); 134 | session = server.endSession(); 135 | assert.equal(session.bytesSent, 4); 136 | 137 | session = server.endSession(); 138 | assert.equal(session.bytesSent, 0); 139 | }); 140 | }); 141 | -------------------------------------------------------------------------------- /src/test/specs_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as chai from 'chai'; 8 | import chaiAsPromised from 'chai-as-promised'; 9 | import {suite, suiteSetup, suiteTeardown, test} from 'mocha'; 10 | import * as path from 'path'; 11 | 12 | import * as defaults from '../defaults.js'; 13 | import {optDefs, Opts} from '../flags.js'; 14 | import {specsFromOpts} from '../specs.js'; 15 | import {BenchmarkSpec} from '../types.js'; 16 | 17 | import {testData} from './test_helpers.js'; 18 | 19 | import commandLineArgs from 'command-line-args'; 20 | 21 | import * as url from 'url'; 22 | const __dirname = url.fileURLToPath(new URL('.', import.meta.url)); 23 | 24 | chai.use(chaiAsPromised); 25 | const {assert} = chai; 26 | 27 | const parse = (argv: string[]) => 28 | commandLineArgs(optDefs, {argv, partial: true}) as Opts; 29 | 30 | const defaultBrowser = { 31 | name: defaults.browserName, 32 | headless: false, 33 | windowSize: { 34 | width: defaults.windowWidth, 35 | height: defaults.windowHeight, 36 | }, 37 | }; 38 | 39 | suite('specsFromOpts', () => { 40 | let prevCwd: string; 41 | suiteSetup(() => { 42 | prevCwd = process.cwd(); 43 | process.chdir(path.join(testData, 'mylib')); 44 | }); 45 | 46 | suiteTeardown(() => { 47 | process.chdir(prevCwd); 48 | }); 49 | 50 | test('nothing', async () => { 51 | const actual = await specsFromOpts(parse([])); 52 | assert.deepEqual(actual, []); 53 | }); 54 | 55 | test('remote url', async () => { 56 | const argv = ['http://example.com']; 57 | const actual = await specsFromOpts(parse(argv)); 58 | const expected: BenchmarkSpec[] = [ 59 | { 60 | name: 'http://example.com', 61 | url: { 62 | kind: 'remote', 63 | url: 'http://example.com', 64 | }, 65 | browser: defaultBrowser, 66 | measurement: [ 67 | { 68 | mode: 'performance', 69 | entryName: 'first-contentful-paint', 70 | }, 71 | ], 72 | }, 73 | ]; 74 | assert.deepEqual(actual, expected); 75 | }); 76 | 77 | test('remote url with label', async () => { 78 | const argv = ['potato=http://example.com']; 79 | const actual = await specsFromOpts(parse(argv)); 80 | const expected: BenchmarkSpec[] = [ 81 | { 82 | name: 'potato', 83 | url: { 84 | kind: 'remote', 85 | url: 'http://example.com', 86 | }, 87 | browser: defaultBrowser, 88 | measurement: [ 89 | { 90 | mode: 'performance', 91 | entryName: 'first-contentful-paint', 92 | }, 93 | ], 94 | }, 95 | ]; 96 | assert.deepEqual(actual, expected); 97 | }); 98 | 99 | test('local file', async () => { 100 | const argv = ['mybench/index.html']; 101 | const actual = await specsFromOpts(parse(argv)); 102 | const expected: BenchmarkSpec[] = [ 103 | { 104 | name: 'mybench/index.html', 105 | url: { 106 | kind: 'local', 107 | urlPath: '/mybench/index.html', 108 | queryString: '', 109 | version: undefined, 110 | }, 111 | browser: defaultBrowser, 112 | measurement: [ 113 | { 114 | mode: 'callback', 115 | }, 116 | ], 117 | }, 118 | ]; 119 | assert.deepEqual(actual, expected); 120 | }); 121 | 122 | test('local absolute file', async () => { 123 | const argv = [path.resolve('mybench/index.html')]; 124 | const actual = await specsFromOpts(parse(argv)); 125 | const expected: BenchmarkSpec[] = [ 126 | { 127 | name: 'mybench/index.html', 128 | url: { 129 | kind: 'local', 130 | urlPath: '/mybench/index.html', 131 | queryString: '', 132 | version: undefined, 133 | }, 134 | browser: defaultBrowser, 135 | measurement: [ 136 | { 137 | mode: 'callback', 138 | }, 139 | ], 140 | }, 141 | ]; 142 | assert.deepEqual(actual, expected); 143 | }); 144 | 145 | test('local file with label', async () => { 146 | const argv = ['potato=mybench/index.html']; 147 | const actual = await specsFromOpts(parse(argv)); 148 | const expected: BenchmarkSpec[] = [ 149 | { 150 | name: 'potato', 151 | url: { 152 | kind: 'local', 153 | urlPath: '/mybench/index.html', 154 | queryString: '', 155 | version: undefined, 156 | }, 157 | browser: defaultBrowser, 158 | measurement: [ 159 | { 160 | mode: 'callback', 161 | }, 162 | ], 163 | }, 164 | ]; 165 | assert.deepEqual(actual, expected); 166 | }); 167 | 168 | test('local directory', async () => { 169 | const argv = ['mybench/']; 170 | const actual = await specsFromOpts(parse(argv)); 171 | const expected: BenchmarkSpec[] = [ 172 | { 173 | name: 'mybench', 174 | url: { 175 | kind: 'local', 176 | urlPath: '/mybench/', 177 | queryString: '', 178 | version: undefined, 179 | }, 180 | browser: defaultBrowser, 181 | measurement: [ 182 | { 183 | mode: 'callback', 184 | }, 185 | ], 186 | }, 187 | ]; 188 | assert.deepEqual(actual, expected); 189 | }); 190 | 191 | test('local directory with query params', async () => { 192 | const argv = ['mybench?foo=bar']; 193 | const actual = await specsFromOpts(parse(argv)); 194 | const expected: BenchmarkSpec[] = [ 195 | { 196 | name: 'mybench', 197 | url: { 198 | kind: 'local', 199 | urlPath: '/mybench/', 200 | queryString: '?foo=bar', 201 | version: undefined, 202 | }, 203 | browser: defaultBrowser, 204 | measurement: [ 205 | { 206 | mode: 'callback', 207 | }, 208 | ], 209 | }, 210 | ]; 211 | assert.deepEqual(actual, expected); 212 | }); 213 | 214 | test('local directory with query params and label', async () => { 215 | const argv = ['potato=mybench?foo=bar']; 216 | const actual = await specsFromOpts(parse(argv)); 217 | const expected: BenchmarkSpec[] = [ 218 | { 219 | name: 'potato', 220 | url: { 221 | kind: 'local', 222 | urlPath: '/mybench/', 223 | queryString: '?foo=bar', 224 | version: undefined, 225 | }, 226 | browser: defaultBrowser, 227 | measurement: [ 228 | { 229 | mode: 'callback', 230 | }, 231 | ], 232 | }, 233 | ]; 234 | assert.deepEqual(actual, expected); 235 | }); 236 | 237 | test('local directory with versions', async () => { 238 | const argv = [ 239 | 'mybench', 240 | '--package-version=mylib@1.0.0', 241 | '--package-version=v2=mylib@2.0.0', 242 | ]; 243 | const actual = await specsFromOpts(parse(argv)); 244 | const expected: BenchmarkSpec[] = [ 245 | { 246 | name: 'mybench', 247 | url: { 248 | kind: 'local', 249 | urlPath: '/mybench/', 250 | queryString: '', 251 | version: { 252 | label: 'mylib@1.0.0', 253 | dependencyOverrides: { 254 | mylib: '1.0.0', 255 | }, 256 | }, 257 | }, 258 | browser: defaultBrowser, 259 | measurement: [ 260 | { 261 | mode: 'callback', 262 | }, 263 | ], 264 | }, 265 | { 266 | name: 'mybench', 267 | url: { 268 | kind: 'local', 269 | urlPath: '/mybench/', 270 | queryString: '', 271 | version: { 272 | label: 'v2', 273 | dependencyOverrides: { 274 | mylib: '2.0.0', 275 | }, 276 | }, 277 | }, 278 | browser: defaultBrowser, 279 | measurement: [ 280 | { 281 | mode: 'callback', 282 | }, 283 | ], 284 | }, 285 | ]; 286 | assert.deepEqual(actual, expected); 287 | }); 288 | 289 | suite('errors', () => { 290 | test('no such file', async () => { 291 | const argv = ['not-a-file']; 292 | await assert.isRejected(specsFromOpts(parse(argv)), /no such file/i); 293 | }); 294 | 295 | test('not accessible from server root', async () => { 296 | const argv = [path.resolve(__dirname, '..', '..')]; 297 | await assert.isRejected( 298 | specsFromOpts(parse(argv)), 299 | /not accessible from server root/i 300 | ); 301 | }); 302 | 303 | test('did not contain an index.html', async () => { 304 | const argv = ['noindex']; 305 | await assert.isRejected( 306 | specsFromOpts(parse(argv)), 307 | /did not contain an index\.html/i 308 | ); 309 | }); 310 | 311 | test('browser not supported', async () => { 312 | const argv = ['mybench', '--browser=potato']; 313 | await assert.isRejected( 314 | specsFromOpts(parse(argv)), 315 | /browser potato is not supported/i 316 | ); 317 | }); 318 | }); 319 | }); 320 | -------------------------------------------------------------------------------- /src/test/stats_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import jstat from 'jstat'; 8 | import {assert} from 'chai'; 9 | import {suite, test} from 'mocha'; 10 | import {summaryStats, computeDifference, intervalContains} from '../stats.js'; 11 | 12 | suite('statistics', function () { 13 | test('confidence intervals', function () { 14 | this.timeout(4 * 60_000); // Lots of arithmetic. 15 | 16 | // Increasing the number of trials increases the precision of our long-term 17 | // estimate of the proportion of correct confidence intervals (see below). 18 | // Empirically, this lets us reliably assert the proportion +/- 0.01. 19 | const numTrials = 20_000; 20 | 21 | // How many randomized configurations of hypothetical benchmarks to test. 22 | // More is better, but since we need a lot of trials, each scenario can take 23 | // many seconds. 24 | const numScenarios = 10; 25 | 26 | for (let s = 0; s < numScenarios; s++) { 27 | // Pick random parameters for our true distributions (imagine as the 28 | // underlying characteristics of some hypothetical benchmarks), and a 29 | // random number of samples to draw from those distributions (imagine as 30 | // the value of the --sample-size flag). 31 | const trueMeanA = randFloat(0.01, 1000); 32 | const trueMeanB = randFloat(0.01, 1000); 33 | const trueAbsoluteDifference = trueMeanB - trueMeanA; 34 | const trueRelativeDifference = (trueMeanB - trueMeanA) / trueMeanA; 35 | const stdDevA = randFloat(0.01, 10); 36 | const stdDevB = randFloat(0.01, 10); 37 | const sampleSize = randInt(50, 1000); 38 | 39 | // Imagine each trial as an end-to-end invocation of the benchmark runner. 40 | // Keep track of how often our confidence interval contains the true mean. 41 | let numGoodA = 0; 42 | let numGoodB = 0; 43 | let numGoodAbsoluteDiff = 0; 44 | let numGoodRelativeDiff = 0; 45 | for (let t = 0; t < numTrials; t++) { 46 | // TODO It does not theoretically matter if our underlying data is 47 | // normally distributed. Test with some other underlying distributions, 48 | // e.g. poisson, to verify. 49 | const valuesA = randNormalValues(sampleSize, trueMeanA, stdDevA); 50 | const valuesB = randNormalValues(sampleSize, trueMeanB, stdDevB); 51 | const statsA = summaryStats(valuesA); 52 | const statsB = summaryStats(valuesB); 53 | const difference = computeDifference(statsA, statsB); 54 | if (intervalContains(statsA.meanCI, trueMeanA)) { 55 | numGoodA++; 56 | } 57 | if (intervalContains(statsB.meanCI, trueMeanB)) { 58 | numGoodB++; 59 | } 60 | if (intervalContains(difference.absolute, trueAbsoluteDifference)) { 61 | numGoodAbsoluteDiff++; 62 | } 63 | if (intervalContains(difference.relative, trueRelativeDifference)) { 64 | numGoodRelativeDiff++; 65 | } 66 | } 67 | 68 | // We should expect, since we are using confidence = 0.95, that over the 69 | // long-run, the confidence intervals we generate should contain the true 70 | // mean 95% of the time (this is the definition of a confidence interval). 71 | assert.closeTo(numGoodA / numTrials, 0.95, 0.01); 72 | assert.closeTo(numGoodB / numTrials, 0.95, 0.01); 73 | assert.closeTo(numGoodAbsoluteDiff / numTrials, 0.95, 0.01); 74 | assert.closeTo(numGoodRelativeDiff / numTrials, 0.95, 0.01); 75 | } 76 | }); 77 | }); 78 | 79 | /** 80 | * Generate random numbers from the normal distribution with the given mean and 81 | * standard deviation. 82 | */ 83 | const randNormalValues = ( 84 | size: number, 85 | mean: number, 86 | stdDev: number 87 | ): number[] => { 88 | // Note jstat.randn generates random numbers from the standard normal 89 | // distribution (which has mean 0 and standard deviation 1) hence we must 90 | // transform it to our distribution. 91 | const [vals] = jstat.randn(1, size); 92 | return vals.map((v) => v * stdDev + mean); 93 | }; 94 | 95 | /** 96 | * Min inclusive, max exclusive. 97 | * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/random#Examples 98 | */ 99 | const randFloat = (min: number, max: number): number => 100 | Math.random() * (max - min) + min; 101 | 102 | /** 103 | * Min inclusive, max exclusive. 104 | * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/random#Examples 105 | */ 106 | const randInt = (min: number, max: number): number => { 107 | min = Math.ceil(min); 108 | max = Math.floor(max); 109 | return Math.floor(Math.random() * (max - min + 1)) + min; 110 | }; 111 | -------------------------------------------------------------------------------- /src/test/test_helpers.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import * as path from 'path'; 8 | 9 | import {applyDefaults} from '../config.js'; 10 | import {ConfigFile, parseConfigFile} from '../configfile.js'; 11 | import { 12 | computeDifferences, 13 | ResultStatsWithDifferences, 14 | summaryStats, 15 | } from '../stats.js'; 16 | 17 | import * as url from 'url'; 18 | const __dirname = url.fileURLToPath(new URL('.', import.meta.url)); 19 | 20 | /** 21 | * Absolute location on disk of our test data directory. 22 | */ 23 | export const testData = path.resolve( 24 | __dirname, 25 | '..', 26 | '..', 27 | 'src', 28 | 'test', 29 | 'data' 30 | ); 31 | 32 | const userAgents = new Map([ 33 | [ 34 | 'chrome', 35 | 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 36 | ], 37 | [ 38 | 'firefox', 39 | 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0', 40 | ], 41 | ]); 42 | 43 | /** 44 | * Given a config file object, generates fake measurement results, where the 45 | * measurement and byte size for each benchmark is based on its index in the 46 | * list of benchmarks (+10ms and +1KiB for each index). 47 | */ 48 | export async function fakeResults( 49 | configFile: ConfigFile 50 | ): Promise { 51 | const config = applyDefaults( 52 | await parseConfigFile(configFile, 'tachometer.json') 53 | ); 54 | const results = []; 55 | for (let i = 0; i < config.benchmarks.length; i++) { 56 | const {name, url, browser, measurement} = config.benchmarks[i]; 57 | const averageMillis = (i + 1) * 10; 58 | const bytesSent = (i + 1) * 1024; 59 | const millis = [ 60 | // Split the sample size in half to add +/- 5ms variance, just to make 61 | // things a little more interesting. 62 | ...new Array(Math.floor(config.sampleSize / 2)).fill(averageMillis - 5), 63 | ...new Array(Math.ceil(config.sampleSize / 2)).fill(averageMillis + 5), 64 | ]; 65 | for ( 66 | let measurementIndex = 0; 67 | measurementIndex < measurement.length; 68 | measurementIndex++ 69 | ) { 70 | const resultName = 71 | measurement.length === 1 72 | ? name 73 | : `${name} [${measurement[measurementIndex].name}]`; 74 | results.push({ 75 | stats: summaryStats(millis), 76 | result: { 77 | name: resultName, 78 | measurement: measurement[measurementIndex], 79 | measurementIndex, 80 | queryString: url.kind === 'local' ? url.queryString : '', 81 | version: 82 | url.kind === 'local' && url.version !== undefined 83 | ? url.version.label 84 | : '', 85 | millis, 86 | bytesSent, 87 | browser, 88 | userAgent: userAgents.get(browser.name) || '', 89 | }, 90 | }); 91 | } 92 | } 93 | return computeDifferences(results); 94 | } 95 | -------------------------------------------------------------------------------- /src/test/versions_test.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {assert} from 'chai'; 8 | import {suite, test} from 'mocha'; 9 | import * as path from 'path'; 10 | 11 | import * as defaults from '../defaults.js'; 12 | import {BenchmarkSpec} from '../types.js'; 13 | import { 14 | hashStrings, 15 | makeServerPlans, 16 | ServerPlan, 17 | tachometerVersion, 18 | } from '../versions.js'; 19 | import {testData} from './test_helpers.js'; 20 | 21 | const defaultBrowser = { 22 | name: defaults.browserName, 23 | headless: false, 24 | windowSize: { 25 | width: defaults.windowWidth, 26 | height: defaults.windowHeight, 27 | }, 28 | }; 29 | 30 | suite('versions', () => { 31 | suite('makeServerPlans', async () => { 32 | test('various', async () => { 33 | const specs: BenchmarkSpec[] = [ 34 | // mybench running with two custom versions. 35 | { 36 | name: 'mybench', 37 | url: { 38 | kind: 'local', 39 | urlPath: '/mylib/mybench/', 40 | version: { 41 | label: 'v1', 42 | dependencyOverrides: { 43 | mylib: '1.0.0', 44 | }, 45 | }, 46 | queryString: '', 47 | }, 48 | measurement: [ 49 | { 50 | mode: 'performance', 51 | entryName: 'first-contentful-paint', 52 | }, 53 | ], 54 | browser: defaultBrowser, 55 | }, 56 | { 57 | name: 'mybench', 58 | url: { 59 | kind: 'local', 60 | urlPath: '/mylib/mybench/', 61 | version: { 62 | label: 'v2', 63 | dependencyOverrides: { 64 | mylib: '2.0.0', 65 | }, 66 | }, 67 | queryString: '', 68 | }, 69 | measurement: [ 70 | { 71 | mode: 'performance', 72 | entryName: 'first-contentful-paint', 73 | }, 74 | ], 75 | browser: defaultBrowser, 76 | }, 77 | 78 | // mybench and other bench only need the default server. 79 | { 80 | name: 'mybench', 81 | url: { 82 | kind: 'local', 83 | urlPath: '/mylib/mybench/', 84 | queryString: '', 85 | }, 86 | measurement: [ 87 | { 88 | mode: 'performance', 89 | entryName: 'first-contentful-paint', 90 | }, 91 | ], 92 | browser: defaultBrowser, 93 | }, 94 | { 95 | name: 'otherbench', 96 | url: { 97 | kind: 'local', 98 | urlPath: '/otherlib/otherbench/', 99 | queryString: '', 100 | }, 101 | measurement: [ 102 | { 103 | mode: 'performance', 104 | entryName: 'first-contentful-paint', 105 | }, 106 | ], 107 | browser: defaultBrowser, 108 | }, 109 | 110 | // A remote URL doesn't need a server. 111 | { 112 | name: 'http://example.com', 113 | url: { 114 | kind: 'remote', 115 | url: 'http://example.com', 116 | }, 117 | measurement: [ 118 | { 119 | mode: 'performance', 120 | entryName: 'first-contentful-paint', 121 | }, 122 | ], 123 | browser: defaultBrowser, 124 | }, 125 | ]; 126 | 127 | const tempDir = '/tmp'; 128 | const {plans: actualPlans, gitInstalls: actualGitInstalls} = 129 | await makeServerPlans(testData, tempDir, specs); 130 | 131 | const v1Hash = hashStrings( 132 | tachometerVersion, 133 | path.join(testData, 'mylib', 'package.json'), 134 | JSON.stringify([ 135 | ['mylib', '1.0.0'], 136 | ['otherlib', '0.0.0'], 137 | ]) 138 | ); 139 | const v2Hash = hashStrings( 140 | tachometerVersion, 141 | path.join(testData, 'mylib', 'package.json'), 142 | JSON.stringify([ 143 | ['mylib', '2.0.0'], 144 | ['otherlib', '0.0.0'], 145 | ]) 146 | ); 147 | 148 | const expectedPlans: ServerPlan[] = [ 149 | { 150 | specs: [specs[2], specs[3]], 151 | npmInstalls: [], 152 | mountPoints: [ 153 | { 154 | diskPath: testData, 155 | urlPath: '/', 156 | }, 157 | ], 158 | }, 159 | 160 | { 161 | specs: [specs[0]], 162 | npmInstalls: [ 163 | { 164 | installDir: path.join(tempDir, v1Hash), 165 | packageJson: { 166 | private: true, 167 | dependencies: { 168 | mylib: '1.0.0', 169 | otherlib: '0.0.0', 170 | }, 171 | }, 172 | }, 173 | ], 174 | mountPoints: [ 175 | { 176 | diskPath: path.join(tempDir, v1Hash, 'node_modules'), 177 | urlPath: '/mylib/node_modules', 178 | }, 179 | { 180 | diskPath: testData, 181 | urlPath: '/', 182 | }, 183 | ], 184 | }, 185 | 186 | { 187 | specs: [specs[1]], 188 | npmInstalls: [ 189 | { 190 | installDir: path.join(tempDir, v2Hash), 191 | packageJson: { 192 | private: true, 193 | dependencies: { 194 | mylib: '2.0.0', 195 | otherlib: '0.0.0', 196 | }, 197 | }, 198 | }, 199 | ], 200 | mountPoints: [ 201 | { 202 | diskPath: path.join(tempDir, v2Hash, 'node_modules'), 203 | urlPath: '/mylib/node_modules', 204 | }, 205 | { 206 | diskPath: testData, 207 | urlPath: '/', 208 | }, 209 | ], 210 | }, 211 | ]; 212 | 213 | assert.deepEqual(actualPlans, expectedPlans); 214 | assert.deepEqual(actualGitInstalls, []); 215 | }); 216 | 217 | /** 218 | * Regression test for https://github.com/Polymer/tachometer/issues/82 219 | * where the node_modules/ directory was being mounted at the 220 | * "//node_modules" URL. 221 | */ 222 | test('node_modules as direct child of root dir', async () => { 223 | const specs: BenchmarkSpec[] = [ 224 | { 225 | name: 'mybench', 226 | url: { 227 | kind: 'local', 228 | urlPath: '/mybench/', 229 | version: { 230 | label: 'v1', 231 | dependencyOverrides: {mylib: '1.0.0'}, 232 | }, 233 | queryString: '', 234 | }, 235 | measurement: [ 236 | { 237 | mode: 'performance', 238 | entryName: 'first-contentful-paint', 239 | }, 240 | ], 241 | browser: defaultBrowser, 242 | }, 243 | ]; 244 | 245 | const tempDir = '/tmp'; 246 | const {plans: actualPlans, gitInstalls: actualGitInstalls} = 247 | await makeServerPlans(path.join(testData, 'mylib'), tempDir, specs); 248 | 249 | const v1Hash = hashStrings( 250 | tachometerVersion, 251 | path.join(testData, 'mylib', 'package.json'), 252 | JSON.stringify([ 253 | ['mylib', '1.0.0'], 254 | ['otherlib', '0.0.0'], 255 | ]) 256 | ); 257 | const expectedPlans: ServerPlan[] = [ 258 | { 259 | specs: [specs[0]], 260 | npmInstalls: [ 261 | { 262 | installDir: path.join(tempDir, v1Hash), 263 | packageJson: { 264 | private: true, 265 | dependencies: { 266 | mylib: '1.0.0', 267 | otherlib: '0.0.0', 268 | }, 269 | }, 270 | }, 271 | ], 272 | mountPoints: [ 273 | { 274 | diskPath: path.join(tempDir, v1Hash, 'node_modules'), 275 | urlPath: '/node_modules', 276 | }, 277 | { 278 | diskPath: path.join(testData, 'mylib'), 279 | urlPath: '/', 280 | }, 281 | ], 282 | }, 283 | ]; 284 | 285 | assert.deepEqual(actualPlans, expectedPlans); 286 | assert.deepEqual(actualGitInstalls, []); 287 | }); 288 | }); 289 | }); 290 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {BrowserConfig} from './browser.js'; 8 | 9 | export class Deferred { 10 | readonly promise: Promise; 11 | resolve!: (value: T) => void; 12 | reject!: (error: Error) => void; 13 | 14 | constructor() { 15 | this.promise = new Promise((resolve, reject) => { 16 | this.resolve = resolve; 17 | this.reject = reject; 18 | }); 19 | } 20 | } 21 | 22 | /** 23 | * A mapping from NPM package name to version specifier, as used in a 24 | * package.json's "dependencies" and "devDependencies". 25 | */ 26 | export interface PackageDependencyMap { 27 | [pkg: string]: string; 28 | } 29 | 30 | /** 31 | * Tachometer's extensions to the NPM "dependencies" field, which allows for 32 | * more advanced configurations. 33 | */ 34 | export interface ExtendedPackageDependencyMap { 35 | [pkg: string]: string | GitDependency; 36 | } 37 | 38 | /** 39 | * Configuration for cloning a Git repo at some ref with an optional package 40 | * sub-path for monorepos, for use as an NPM dependency. 41 | */ 42 | export interface GitDependency { 43 | kind: 'git'; 44 | // The git repository to clone. Any valid `git clone ` argument 45 | // (e.g. "git@github.com:webcomponents/polyfills.git"). 46 | repo: string; 47 | // The branch, tag, or SHA to checkout (e.g. "master", "my-feature"). 48 | ref: string; 49 | // For monorepos or other unusual file layouts, the path relative to the root 50 | // of the git repo where the "package.json" for the appropriate package can be 51 | // found (e.g. "packages/shadycss"). 52 | subdir?: string; 53 | // Install, bootstrap, build, etc. commands to run before installing this 54 | // package as a dependency (e.g. ["npm install", "npm run build"]). 55 | setupCommands?: string[]; 56 | } 57 | 58 | /** 59 | * The descriptor of a package version as specified by the --package-version 60 | * flag. 61 | */ 62 | export interface PackageVersion { 63 | label: string; 64 | dependencyOverrides: ExtendedPackageDependencyMap; 65 | } 66 | 67 | /** The subset of the format of an NPM package.json file we care about. */ 68 | export interface NpmPackageJson { 69 | private: boolean; 70 | dependencies: PackageDependencyMap; 71 | } 72 | 73 | /** The kinds of intervals we can measure. */ 74 | export type Measurement = 75 | | CallbackMeasurement 76 | | PerformanceEntryMeasurement 77 | | ExpressionMeasurement; 78 | 79 | export interface MeasurementBase { 80 | name?: string; 81 | } 82 | 83 | export interface CallbackMeasurement extends MeasurementBase { 84 | mode: 'callback'; 85 | } 86 | 87 | export interface PerformanceEntryMeasurement extends MeasurementBase { 88 | mode: 'performance'; 89 | entryName: string; 90 | } 91 | 92 | export interface ExpressionMeasurement extends MeasurementBase { 93 | mode: 'expression'; 94 | expression: string; 95 | } 96 | 97 | export type CommandLineMeasurements = 'callback' | 'fcp' | 'global'; 98 | 99 | export const measurements = new Set(['callback', 'fcp', 'global']); 100 | 101 | /** A specification of a benchmark to run. */ 102 | export interface BenchmarkSpec { 103 | url: LocalUrl | RemoteUrl; 104 | measurement: Measurement[]; 105 | name: string; 106 | browser: BrowserConfig; 107 | } 108 | 109 | export interface LocalUrl { 110 | kind: 'local'; 111 | version?: PackageVersion; 112 | urlPath: string; 113 | queryString: string; 114 | } 115 | 116 | export interface RemoteUrl { 117 | kind: 'remote'; 118 | url: string; 119 | } 120 | 121 | // Note: sync with client/src/index.ts 122 | export interface BenchmarkResponse { 123 | millis: number; 124 | } 125 | 126 | /** 127 | * Benchmark results for a particular measurement on a particular page, across 128 | * all samples. 129 | */ 130 | export interface BenchmarkResult { 131 | /** 132 | * Label for this result. When there is more than one per page, this will 133 | * contain both the page and measurement labels as "page [measurement]". 134 | */ 135 | name: string; 136 | /** 137 | * The measurement that produced this result 138 | */ 139 | measurement: Measurement; 140 | /** 141 | * A single page can return multiple measurements. The offset into the array 142 | * of measurements in the spec that this particular result corresponds to. 143 | */ 144 | measurementIndex: number; 145 | /** 146 | * Millisecond measurements for each sample. 147 | */ 148 | millis: number[]; 149 | queryString: string; 150 | version: string; 151 | browser: BrowserConfig; 152 | userAgent: string; 153 | bytesSent: number; 154 | } 155 | -------------------------------------------------------------------------------- /src/types/jstat/index.d.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2022 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | declare module 'jstat' { 8 | /** https://jstat.github.io/all.html#jStat.studentt.inv */ 9 | export const studentt: { 10 | inv(p: number, dof: number): number; 11 | }; 12 | 13 | /** https://jstat.github.io/all.html#randn */ 14 | export function randn(n: number, m: number): [number[]]; 15 | } 16 | -------------------------------------------------------------------------------- /src/util.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * @license 3 | * Copyright 2019 Google LLC 4 | * SPDX-License-Identifier: BSD-3-Clause 5 | */ 6 | 7 | import {execFile, ExecFileOptions} from 'child_process'; 8 | import fsExtra from 'fs-extra'; 9 | import {URL} from 'url'; 10 | import {promisify} from 'util'; 11 | 12 | /** Return whether the given string is a valid HTTP URL. */ 13 | export function isHttpUrl(str: string): boolean { 14 | try { 15 | const url = new URL(str); 16 | // Note an absolute Windows file path will parse as a URL (e.g. 17 | // 'C:\\foo\\bar' => {protocol: 'c:', pathname: '\\foo\\bar', ...}) 18 | return url.protocol === 'http:' || url.protocol === 'https:'; 19 | } catch (e) { 20 | return false; 21 | } 22 | } 23 | 24 | export async function fileKind( 25 | path: string 26 | ): Promise<'file' | 'dir' | undefined> { 27 | try { 28 | const stat = await fsExtra.stat(path); 29 | if (stat.isDirectory()) { 30 | return 'dir'; 31 | } 32 | if (stat.isFile()) { 33 | return 'file'; 34 | } 35 | } catch (e) { 36 | if ((e as Error & {code?: string}).code === 'ENOENT') { 37 | return undefined; 38 | } 39 | throw e; 40 | } 41 | } 42 | 43 | const npmCmd = process.platform === 'win32' ? 'npm.cmd' : 'npm'; 44 | export async function runNpm( 45 | args: string[], 46 | options?: ExecFileOptions 47 | ): Promise { 48 | return promisify(execFile)(npmCmd, args, options).then(({stdout}) => stdout); 49 | } 50 | 51 | /** 52 | * Promisified version of setTimeout. 53 | */ 54 | export const wait = (ms: number) => 55 | new Promise((resolve) => setTimeout(resolve, ms)); 56 | 57 | /** 58 | * A function that should never be called. But if it somehow is anyway, throw an 59 | * exception with the given message. 60 | */ 61 | export function throwUnreachable(_unreachable: never, message: string): void { 62 | throw new Error(message); 63 | } 64 | -------------------------------------------------------------------------------- /test/mocha.opts: -------------------------------------------------------------------------------- 1 | --ui tdd 2 | --require source-map-support/register 3 | lib/test/**/*_test.js 4 | -------------------------------------------------------------------------------- /tsconfig-base.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "module": "es2020", 5 | "moduleResolution": "node", 6 | "esModuleInterop": true, 7 | 8 | "strict": true, 9 | "noUnusedLocals": true, 10 | "noUnusedParameters": true, 11 | "preserveConstEnums": true, 12 | 13 | "lib": ["esnext", "esnext.asynciterable"], 14 | 15 | "declaration": true, 16 | "sourceMap": true, 17 | "pretty": true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig-base.json", 3 | "compilerOptions": { 4 | "outDir": "./lib", 5 | "typeRoots": ["./src/types"] 6 | }, 7 | "include": ["src/**/*.ts"] 8 | } 9 | --------------------------------------------------------------------------------