├── .gitignore ├── version.ts ├── .github ├── FUNDING.yml └── workflows │ └── ci.yml ├── docs ├── imgs │ ├── prettyBenchingResult_example.png │ ├── prettyBenchingHistory_progress_delta.png │ ├── prettyBenchingHistory_result_card_delta.png │ ├── prettyBenchingProgress_example_finished.png │ ├── prettyBenchingProgress_example_running.png │ ├── prettyBenchingProgress_example_threshold.png │ ├── prettyBenchingResult_example_full_extra.png │ ├── prettyBenchingResult_example_indicator.png │ ├── prettyBenchingResult_example_indicators.png │ ├── prettyBenchingResult_example_threshold.png │ ├── prettyBenchingProgress_example_indicators.png │ ├── prettyBenchingResult_example_threshold_line.png │ └── prettyBenchingResult_example_extrametrics_line.png ├── showcase │ ├── README.md │ ├── showcase_create.ts │ ├── showcase_read.ts │ ├── showcase.txt │ └── benchmark_result_input.json ├── prettyBenchmarkDown │ ├── pr_benchmark.yml │ ├── pr_benchmark_output.md │ └── pr_benchmarks.ts └── snapper.ts ├── tests ├── test_deps.ts ├── test_helpers.ts ├── mod_test.ts ├── colorer_test.ts ├── lib_test.ts ├── common_test.ts ├── table_test.ts └── utils_test.ts ├── deps.ts ├── colorer.ts ├── mod.ts ├── types.ts ├── benchmarks └── benchmark.ts ├── pretty_benchmark_result.ts ├── utils.ts ├── example.ts ├── table.ts ├── common.ts ├── history_extensions.ts ├── pretty_benchmark_progress.ts ├── benchmark_result_card.ts ├── pretty_benchmark_down.ts ├── pretty_benchmark_history.ts └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | 3 | cov/* 4 | cov.lcov -------------------------------------------------------------------------------- /version.ts: -------------------------------------------------------------------------------- 1 | export const VERSION = "0.3.3"; 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | ko_fi: littletof 2 | custom: ['https://www.buymeacoffee.com/littletof', 'https://coindrop.to/littletof'] 3 | -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example.png -------------------------------------------------------------------------------- /tests/test_deps.ts: -------------------------------------------------------------------------------- 1 | export { 2 | assert, 3 | assertEquals, 4 | assertThrows, 5 | } from "https://deno.land/std@0.91.0/testing/asserts.ts"; 6 | -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingHistory_progress_delta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingHistory_progress_delta.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingHistory_result_card_delta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingHistory_result_card_delta.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingProgress_example_finished.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingProgress_example_finished.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingProgress_example_running.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingProgress_example_running.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingProgress_example_threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingProgress_example_threshold.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_full_extra.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_full_extra.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_indicator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_indicator.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_indicators.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_indicators.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_threshold.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingProgress_example_indicators.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingProgress_example_indicators.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_threshold_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_threshold_line.png -------------------------------------------------------------------------------- /docs/imgs/prettyBenchingResult_example_extrametrics_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/littletof/prettyBenching/HEAD/docs/imgs/prettyBenchingResult_example_extrametrics_line.png -------------------------------------------------------------------------------- /deps.ts: -------------------------------------------------------------------------------- 1 | export { 2 | bench, 3 | clearBenchmarks, 4 | ProgressState, 5 | runBenchmarks, 6 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 7 | export type { 8 | BenchmarkResult, 9 | BenchmarkRunProgress, 10 | BenchmarkRunResult, 11 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 12 | 13 | export * as colors from "https://deno.land/std@0.91.0/fmt/colors.ts"; 14 | -------------------------------------------------------------------------------- /docs/showcase/README.md: -------------------------------------------------------------------------------- 1 | This is mainly a maintenance module, its main purpose is to have an example output, so the progression of the looks can be observed. 2 | 3 | The main dataset shouldt be changed, so the example stays consistent. There may be reasons to change, for example to showcase a new option better. 4 | To create a new dataset run: 5 | 6 | ```ts 7 | deno run --allow-read=./docs/showcase --allow-write=./docs/showcase --allow-hrtime --unstable .\docs\showcase\showcase_create.ts 8 | ``` 9 | 10 | If you have a dataset, than you can run this, to produce the generated showcase file. 11 | 12 | ```ts 13 | deno run --allow-read=./docs/showcase --allow-write=./docs/showcase --allow-hrtime --unstable .\docs\showcase\showcase_read.ts 14 | ``` 15 | 16 | ### TODO 17 | 18 | A helper script, to make the docs images more easily and to keep them up-to-date. 19 | -------------------------------------------------------------------------------- /colorer.ts: -------------------------------------------------------------------------------- 1 | import { colors } from "./deps.ts"; 2 | 3 | export class Colorer { 4 | doColor = true; 5 | 6 | setColorEnabled(to: boolean) { 7 | this.doColor = to; 8 | } 9 | 10 | black = (str: string) => this.color(str, colors.black); 11 | blue = (str: string) => this.color(str, colors.blue); 12 | cyan = (str: string) => this.color(str, colors.cyan); 13 | gray = (str: string) => this.color(str, colors.gray); 14 | green = (str: string) => this.color(str, colors.green); 15 | magenta = (str: string) => this.color(str, colors.magenta); 16 | red = (str: string) => this.color(str, colors.red); 17 | white = (str: string) => this.color(str, colors.white); 18 | yellow = (str: string) => this.color(str, colors.yellow); 19 | 20 | private color(str: string, colorFn: (str: string) => string): string { 21 | return this.doColor ? colorFn(str) : str; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tests/test_helpers.ts: -------------------------------------------------------------------------------- 1 | import { assertThrows } from "./test_deps.ts"; 2 | 3 | export interface TestCase { 4 | input: T; 5 | result?: K; 6 | // deno-lint-ignore no-explicit-any 7 | exception?: { error?: any; msg?: string }; 8 | desc?: string; 9 | } 10 | 11 | export function testEach( 12 | name: string, 13 | input: TestCase[], 14 | fn: (testCase: TestCase) => void, 15 | os?: "only" | "skip", 16 | ) { 17 | input.forEach((input, i) => { 18 | Deno.test({ 19 | name: `${name} [${i + 1}]`, 20 | fn() { 21 | if (input.exception) { 22 | assertThrows( 23 | () => fn(input), 24 | input.exception?.error, 25 | input.exception?.msg, 26 | ); 27 | } else { 28 | fn(input); 29 | } 30 | }, 31 | only: os === "only", 32 | ignore: os === "skip", 33 | }); 34 | }); 35 | } 36 | -------------------------------------------------------------------------------- /docs/prettyBenchmarkDown/pr_benchmark.yml: -------------------------------------------------------------------------------- 1 | name: Benchmark PR 2 | on: [pull_request] 3 | jobs: 4 | build: 5 | name: ubuntu-latest 6 | runs-on: ubuntu-latest 7 | timeout-minutes: 5 8 | steps: 9 | - name: Clone repository 10 | uses: actions/checkout@v2 11 | - name: Install deno 12 | uses: denolib/setup-deno@master 13 | with: 14 | deno-version: 1.8.2 15 | - name: Bench It 16 | run: deno run --allow-hrtime ./benchmarks/pr_benchmarks.ts | tee ./benchmarks/pr_benchmark.md 17 | # Feel free to remove this 'Add link' step 18 | - name: Add link 19 | run: echo "

@prettyBenching

" | tee -a ./benchmarks/pr_benchmark.md 20 | - uses: harupy/comment-on-pr@master 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | with: 24 | filename: ../../benchmarks/pr_benchmark.md 25 | -------------------------------------------------------------------------------- /mod.ts: -------------------------------------------------------------------------------- 1 | export type { BenchIndicator, Threshold, Thresholds } from "./types.ts"; 2 | 3 | export { 4 | calculateExtraMetrics, 5 | calculateStdDeviation, 6 | getThresholdResultsFrom, 7 | } from "./common.ts"; 8 | 9 | export { prettyBenchmarkResult } from "./pretty_benchmark_result.ts"; 10 | export type { prettyBenchmarkResultOptions } from "./pretty_benchmark_result.ts"; 11 | 12 | export { prettyBenchmarkProgress } from "./pretty_benchmark_progress.ts"; 13 | export type { prettyBenchmarkProgressOptions } from "./pretty_benchmark_progress.ts"; 14 | 15 | export { 16 | defaultColumns, 17 | extraMetricsColumns, 18 | indicatorColumn, 19 | prettyBenchmarkDown, 20 | thresholdResultColumn, 21 | thresholdsColumn, 22 | } from "./pretty_benchmark_down.ts"; 23 | export type { 24 | ColumnDefinition, 25 | GroupDefinition, 26 | prettyBenchmarkDownOptions, 27 | } from "./pretty_benchmark_down.ts"; 28 | 29 | export { 30 | calculateThresholds, 31 | prettyBenchmarkHistory, 32 | } from "./pretty_benchmark_history.ts"; 33 | export type { 34 | BenchmarkHistory, 35 | BenchmarkHistoryItem, 36 | BenchmarkHistoryRunItem, 37 | Delta, 38 | prettyBenchmarkHistoryOptions, 39 | strictHistoryRules, 40 | } from "./pretty_benchmark_history.ts"; 41 | 42 | export { 43 | deltaColumn, 44 | deltaProgressRowExtra, 45 | deltaResultInfoCell, 46 | historyColumns, 47 | } from "./history_extensions.ts"; 48 | -------------------------------------------------------------------------------- /tests/mod_test.ts: -------------------------------------------------------------------------------- 1 | import { assert, assertEquals } from "./test_deps.ts"; 2 | import * as mod from "../mod.ts"; 3 | 4 | Deno.test({ 5 | name: "public API assertions", 6 | fn() { 7 | assert(mod != null); 8 | assertEquals(typeof mod.prettyBenchmarkResult, "function"); 9 | assertEquals(typeof mod.prettyBenchmarkProgress, "function"); 10 | 11 | assertEquals(typeof mod.prettyBenchmarkDown, "function"); 12 | assertEquals(typeof mod.indicatorColumn, "function"); 13 | assertEquals(typeof mod.thresholdsColumn, "function"); 14 | assertEquals(typeof mod.thresholdResultColumn, "function"); 15 | assertEquals(typeof mod.extraMetricsColumns, "function"); 16 | assertEquals(typeof mod.defaultColumns, "function"); 17 | 18 | assertEquals(typeof mod.calculateExtraMetrics, "function"); 19 | assertEquals(typeof mod.calculateStdDeviation, "function"); 20 | assertEquals(typeof mod.calculateThresholds, "function"); 21 | assertEquals(typeof mod.getThresholdResultsFrom, "function"); 22 | 23 | assertEquals(typeof mod.prettyBenchmarkHistory, "function"); 24 | assertEquals(typeof mod.deltaResultInfoCell, "function"); 25 | assertEquals(typeof mod.deltaProgressRowExtra, "function"); 26 | assertEquals(typeof mod.deltaColumn, "function"); 27 | assertEquals(typeof mod.historyColumns, "function"); 28 | 29 | assertEquals(Object.keys(mod).length, 17); 30 | }, 31 | }); 32 | -------------------------------------------------------------------------------- /types.ts: -------------------------------------------------------------------------------- 1 | /** Defines an indicator that should be used for the matching benches */ 2 | export interface BenchIndicator { 3 | /** Selects which benches the indicator should be used for, by matching the `RegExp` against the bench names */ 4 | benches: RegExp; 5 | /** Modifies the default indicator character to the returned string. 6 | * 7 | * *Note*: If color functions are used, the `nocolor` option doesnt affect them 8 | * @param str: The default indicator char 9 | */ 10 | modFn?: ( 11 | str: string, 12 | ) => string | { indicator: string; visibleLength: number }; 13 | /** Defines a color that should be assosiated with the matching benches. A simple std color function should be provided. 14 | * 15 | * *Note*: If color functions are used, the `nocolor` option doesnt affect them 16 | */ 17 | color?: (str: string) => string; 18 | } 19 | 20 | /** Defines a threshold, which has three sections. 21 | * 22 | * 1. *`0` <= && <= `green`* --> `green` section 23 | * 2. *`green` < && <= `yellow`* --> `yellow` section 24 | * 3. *`yellow` < && <= infinite* --> `red` section 25 | */ 26 | export interface Threshold { 27 | /** Defines the upper limit of the `green` section in milliseconds. */ 28 | green: number; 29 | /** Deifnes the upper limit of the `yellow` section in milliseconds. */ 30 | yellow: number; 31 | } 32 | 33 | /** Defines `Threshold`-s for specific benches. Each `key` of this object should correspond to a specific benchmark's `name`*/ 34 | export interface Thresholds { 35 | [key: string]: Threshold; 36 | } 37 | -------------------------------------------------------------------------------- /benchmarks/benchmark.ts: -------------------------------------------------------------------------------- 1 | // deno-lint-ignore-file 2 | 3 | import { 4 | bench, 5 | runBenchmarks, 6 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 7 | import { join } from "https://deno.land/std@0.91.0/path/mod.ts"; 8 | import { prettyBenchmarkResult } from "../mod.ts"; 9 | import { colors } from "../deps.ts"; 10 | 11 | console.log( 12 | new URL( 13 | join("..", "docs", "showcase", "benchmark_result_input.json"), 14 | import.meta.url, 15 | ).href, 16 | ); 17 | 18 | await Deno.permissions.request({ name: "hrtime" }); 19 | 20 | const inputJSONPath = join("docs", "showcase", "benchmark_result_input.json"); 21 | const result = await Deno.permissions.request({ 22 | name: "read", 23 | path: inputJSONPath, 24 | }); 25 | if (result.state !== "granted") { 26 | console.error( 27 | colors.red("Can't run without input data for the benchmark. Exiting..."), 28 | ); 29 | Deno.exit(1); 30 | } 31 | 32 | const data = JSON.parse(Deno.readTextFileSync(inputJSONPath)); 33 | 34 | const resultFn = prettyBenchmarkResult( 35 | { 36 | nocolor: false, 37 | thresholds: { 38 | "multiple-runs": { green: 76, yellow: 82 }, 39 | "benchmark-start": { green: 2, yellow: 3 }, 40 | }, 41 | indicators: [ 42 | { 43 | benches: /multiple-runs/, 44 | color: colors.magenta, 45 | modFn: (str) => "🚀", 46 | }, 47 | ], 48 | parts: { threshold: true, extraMetrics: true, graph: true }, 49 | }, 50 | ); 51 | 52 | bench({ 53 | name: "benchCard", 54 | runs: 1, 55 | func(b) { 56 | b.start(); 57 | resultFn(data as any); 58 | b.stop(); 59 | }, 60 | }); 61 | 62 | // current 5.589ms / 1000 63 | 64 | await runBenchmarks(); 65 | -------------------------------------------------------------------------------- /tests/colorer_test.ts: -------------------------------------------------------------------------------- 1 | import { testEach } from "./test_helpers.ts"; 2 | import { assertEquals } from "./test_deps.ts"; 3 | 4 | import { Colorer } from "../colorer.ts"; 5 | import { colors } from "../deps.ts"; 6 | 7 | testEach<{ fn: (str: string) => string; str: string }, (str: string) => string>( 8 | "colorer.colors", 9 | [ 10 | { input: { fn: new Colorer().black, str: "test" }, result: colors.black }, 11 | { input: { fn: new Colorer().blue, str: "test" }, result: colors.blue }, 12 | { input: { fn: new Colorer().cyan, str: "test" }, result: colors.cyan }, 13 | { input: { fn: new Colorer().gray, str: "test" }, result: colors.gray }, 14 | { input: { fn: new Colorer().green, str: "test" }, result: colors.green }, 15 | { 16 | input: { fn: new Colorer().magenta, str: "test" }, 17 | result: colors.magenta, 18 | }, 19 | { input: { fn: new Colorer().red, str: "test" }, result: colors.red }, 20 | { input: { fn: new Colorer().white, str: "test" }, result: colors.white }, 21 | { input: { fn: new Colorer().yellow, str: "test" }, result: colors.yellow }, 22 | 23 | { input: { fn: new Colorer().yellow, str: "🦕" }, result: colors.yellow }, 24 | { input: { fn: new Colorer().blue, str: "🚀" }, result: colors.blue }, 25 | { input: { fn: new Colorer().red, str: "⚗️" }, result: colors.red }, 26 | ], 27 | (testCase) => { 28 | assertEquals( 29 | testCase.input.fn(testCase.input.str), 30 | testCase.result!(testCase.input.str), 31 | testCase.desc, 32 | ); 33 | }, 34 | ); 35 | 36 | Deno.test({ 37 | name: "colorer.doColor", 38 | fn() { 39 | const c = new Colorer(); 40 | assertEquals(c.green("test"), colors.green("test")); 41 | c.setColorEnabled(false); 42 | assertEquals(c.green("test"), "test"); 43 | }, 44 | }); 45 | -------------------------------------------------------------------------------- /pretty_benchmark_result.ts: -------------------------------------------------------------------------------- 1 | import { Colorer } from "./colorer.ts"; 2 | import { 3 | getResultCard, 4 | prettyBenchmarkCardResultOptions, 5 | } from "./benchmark_result_card.ts"; 6 | import type { BenchmarkRunResult } from "./deps.ts"; 7 | 8 | interface CommonOptions { 9 | /** Overrides the default output function, which is `console.log`. */ 10 | outputFn?: (log: string) => unknown; 11 | } 12 | 13 | /** Defines how the resulting output should look like. */ 14 | export type prettyBenchmarkResultOptions = 15 | & CommonOptions 16 | & (prettyBenchmarkCardResultOptions); 17 | 18 | const c: Colorer = new Colorer(); 19 | 20 | /** Returns a function that expects a `BenchmarkRunResult`, which than prints 21 | * the results in a nicely formatted way, based on the provided `options`. 22 | * 23 | * Typical basic usage: 24 | * 25 | * ```ts 26 | * // add benches, then 27 | * runBenchmarks().then(prettyBenchmarkResult()); 28 | * ``` 29 | * . 30 | */ 31 | export function prettyBenchmarkResult( 32 | /** Defines how the output should look like */ 33 | options?: prettyBenchmarkResultOptions, 34 | ) { 35 | return (result: BenchmarkRunResult) => 36 | _prettyBenchmarkResultCb( 37 | result, 38 | options, 39 | ); 40 | } 41 | 42 | function _prettyBenchmarkResultCb( 43 | results: BenchmarkRunResult, 44 | options?: prettyBenchmarkResultOptions, 45 | ) { 46 | if (options?.nocolor) { 47 | c.setColorEnabled(false); 48 | } 49 | 50 | const output = results.results.map((r) => { 51 | // TODO switch on options.type 52 | return getResultCard(r, c, options); 53 | }).join("\n"); 54 | 55 | typeof options?.outputFn == "function" 56 | ? options.outputFn(output) 57 | : console.log(output); 58 | 59 | if (options?.nocolor) { 60 | c.setColorEnabled(true); 61 | } 62 | 63 | return results; 64 | } 65 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | build: 11 | name: ${{ matrix.config.kind }} ${{ matrix.config.os }} 12 | runs-on: ${{ matrix.config.os }} 13 | timeout-minutes: 5 14 | strategy: 15 | matrix: 16 | config: 17 | - os: macOS-latest 18 | kind: test 19 | cov: false 20 | - os: windows-latest 21 | kind: test 22 | cov: false 23 | - os: ubuntu-latest 24 | kind: test 25 | cov: true 26 | - os: ubuntu-latest 27 | kind: lint 28 | cov: false 29 | steps: 30 | - name: Clone repository 31 | uses: actions/checkout@v2 32 | - name: Install deno 33 | uses: denolib/setup-deno@master 34 | with: 35 | deno-version: 1.8.2 36 | - name: Check lint 37 | if: matrix.config.kind == 'lint' 38 | run: deno lint --unstable 39 | - name: Check formatting 40 | if: matrix.config.kind == 'lint' 41 | run: deno fmt --check --ignore='./README.md,./docs' 42 | 43 | - name: Test ${{matrix.config.cov && '(with coverage)' || ''}} 44 | if: matrix.config.kind == 'test' 45 | run: deno test ${{matrix.config.cov && '--coverage=./cov --unstable' || ''}} 46 | - name: Generate lcov 47 | if: matrix.config.cov 48 | run: deno coverage --exclude=test --unstable --lcov ./cov > cov.lcov 49 | - name: Upload lcov 50 | if: matrix.config.cov 51 | uses: coverallsapp/github-action@v1.1.2 52 | with: 53 | github-token: ${{ secrets.GITHUB_TOKEN }} 54 | path-to-lcov: ./cov.lcov 55 | 56 | - name: Release 57 | uses: softprops/action-gh-release@v1 58 | if: | 59 | matrix.config.kind == 'test' && 60 | github.repository == 'littletof/prettyBenching' && 61 | startsWith(github.ref, 'refs/tags/') 62 | env: 63 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 64 | with: 65 | draft: true 66 | -------------------------------------------------------------------------------- /docs/showcase/showcase_create.ts: -------------------------------------------------------------------------------- 1 | // deno-lint-ignore-file 2 | 3 | import { prettyBenchmarkProgress } from "../../mod.ts"; 4 | 5 | import { bench, BenchmarkRunProgress, runBenchmarks } from "../../deps.ts"; 6 | 7 | await Deno.permissions.request({name: 'hrtime'}); 8 | 9 | const result = await Deno.permissions.request({name: 'write', path: './docs/showcase/'}); 10 | 11 | if(result.state !== 'granted') { 12 | console.error('Write permission is needed to write results to json. Exiting...'); 13 | Deno.exit(1); 14 | } 15 | 16 | bench({ 17 | name: "finished", 18 | runs: 100, 19 | func(b): void { 20 | b.start(); 21 | for (let i = 0; i < 1e6; i++); 22 | b.stop(); 23 | }, 24 | }); 25 | 26 | bench({ 27 | name: "benchmark-start", 28 | runs: 1, 29 | func(b): void { 30 | b.start(); 31 | for (let i = 0; i < 1e6; i++); 32 | b.stop(); 33 | }, 34 | }); 35 | 36 | bench({ 37 | name: "multiple-runs", 38 | runs: 100, 39 | func(b): void { 40 | b.start(); 41 | for (let i = 0; i < 1e8; i++); 42 | b.stop(); 43 | }, 44 | }); 45 | 46 | const progress: any[] = []; 47 | 48 | runBenchmarks({ silent: true }, (progress) => { 49 | prettyBenchmarkProgress()(progress), write(progress); 50 | }).then((x) => saveresult(x)) 51 | .catch( 52 | (e) => { 53 | console.error(e); 54 | }, 55 | ); 56 | 57 | function saveresult(result: any) { 58 | Deno.writeTextFileSync( 59 | "./docs/showcase/benchmark_progress_inputs.json", 60 | prettyJSON(progress), 61 | ); 62 | Deno.writeTextFileSync( 63 | "./docs/showcase/benchmark_result_input.json", 64 | prettyJSON(result), 65 | ); 66 | } 67 | 68 | function write(prog: BenchmarkRunProgress) { 69 | progress.push(prog); 70 | } 71 | 72 | function prettyJSON(obj: any) { 73 | if (typeof obj === "string") { 74 | obj = JSON.parse(obj); 75 | } 76 | const output = JSON.stringify(obj, function (k, v) { 77 | if (k === "measuredRunsMs" && v instanceof Array) { 78 | return JSON.stringify(v); 79 | } 80 | return v; 81 | }, 2) 82 | .replace(/\"\[/g, "[") 83 | .replace(/\]\"/g, "]") 84 | .replace(/\"\{/g, "{") 85 | .replace(/\}\"/g, "}"); 86 | 87 | return output; 88 | } 89 | -------------------------------------------------------------------------------- /utils.ts: -------------------------------------------------------------------------------- 1 | import { stripColor } from "./common.ts"; 2 | 3 | export function getTimePadSize() { 4 | return 12; // TODO 5 | } 6 | 7 | export function getTimePrecision() { 8 | return usingHrTime() ? 4 : 0; 9 | } 10 | 11 | export function usingHrTime(): boolean { 12 | // would need unstable for Deno.permissions.query({name: "hrtime"}); 13 | return isFloat(performance.now()); 14 | } 15 | 16 | export function isFloat(num: number) { 17 | return num % 1 !== 0; 18 | } 19 | 20 | export function padEndVisible(str: string, to: number, char = " ") { 21 | return str.padEnd(to + lDiff(str), char); 22 | } 23 | 24 | export function padStartVisible(str: string, to: number, char = " ") { 25 | return str.padStart(to + lDiff(str), char); 26 | } 27 | 28 | export function num(num: number, force?: boolean) { 29 | return isFloat(num) || force ? num.toFixed(4) : `${num}`; 30 | } 31 | 32 | export function perc(num: number) { 33 | return (num % 1 !== 0 && num < 99.95) ? num.toFixed(1) : num.toFixed(); 34 | } 35 | 36 | /** returns a float value with dynamic precision. for a (5+from) digit number no floating points, for a (4+form) digit one 1 decimal, ... up to 4 decimals */ 37 | export function rtime(num: number, from = 0) { 38 | const log = Math.max(Math.floor(Math.log10(num)), 0); 39 | const defPrec = isFloat(num) ? 4 : 0; 40 | return num.toFixed(Math.max(defPrec - Math.max(log - from, 0), 0)); 41 | } 42 | 43 | /** How many chars will the string shrink in the console, when rendered, compared to original length 44 | * An common emoji is 2 chars, and is 2 chars wide when rendered, so lDiff is 0 for it. 45 | * Of course there are exceptions to this like the alombic. 46 | */ 47 | export function lDiff(str: string) { 48 | const escaped = stripColor(str); 49 | return str.length - escaped.length; 50 | } 51 | 52 | /** returns an array, that contains each index where the regexp mathes the string */ 53 | export function matchWithIndex(line: string, regexp: RegExp) { 54 | const indexes = []; 55 | let match; 56 | while ((match = regexp.exec(line)) != null) { 57 | indexes.push(match.index); 58 | 59 | if (indexes.length > line.length) { 60 | throw Error( 61 | "Too many matches. Something bad with the regexp. Did you forgot the global? / /g", 62 | ); 63 | } 64 | } 65 | return indexes; 66 | } 67 | -------------------------------------------------------------------------------- /tests/lib_test.ts: -------------------------------------------------------------------------------- 1 | import { prettyBenchmarkProgress, prettyBenchmarkResult } from "../mod.ts"; 2 | import { 3 | bench, 4 | BenchmarkResult, 5 | BenchmarkRunProgress, 6 | BenchmarkRunResult, 7 | clearBenchmarks, 8 | runBenchmarks, 9 | } from "../deps.ts"; 10 | import { 11 | GroupDefinition, 12 | prettyBenchmarkDown, 13 | } from "../pretty_benchmark_down.ts"; 14 | 15 | import type { Thresholds } from "../types.ts"; 16 | 17 | // deno-lint-ignore camelcase 18 | import { benchmark_progress, benchmark_result } from "./test_data.ts"; 19 | 20 | const thresholds: Thresholds = { 21 | "Sorting arrays": { green: 0.60, yellow: 1 }, 22 | "Rotating arrays": { green: 1.85, yellow: 2 }, 23 | "Standing out": { green: 0.5, yellow: 0.74 }, 24 | }; 25 | 26 | Deno.test({ 27 | name: "commandlineBenching", 28 | fn: function (): void { 29 | const progressFn = prettyBenchmarkProgress({ thresholds }); 30 | const resultFn = prettyBenchmarkResult({ thresholds }); 31 | 32 | benchmark_progress.forEach((p) => progressFn(p as BenchmarkRunProgress)); 33 | resultFn(benchmark_result); 34 | }, 35 | }); 36 | 37 | Deno.test({ 38 | name: "without options", 39 | fn: function (): void { 40 | const progressFn = prettyBenchmarkProgress(); 41 | const resultFn = prettyBenchmarkResult(); 42 | 43 | benchmark_progress.forEach((p) => progressFn(p as BenchmarkRunProgress)); 44 | resultFn(benchmark_result); 45 | }, 46 | }); 47 | 48 | Deno.test({ 49 | name: "nocolor", 50 | fn: function (): void { 51 | const progressFn = prettyBenchmarkProgress({ nocolor: true }); 52 | const resultFn = prettyBenchmarkResult({ nocolor: true }); 53 | 54 | benchmark_progress.forEach((p) => progressFn(p as BenchmarkRunProgress)); 55 | resultFn(benchmark_result); 56 | }, 57 | }); 58 | 59 | Deno.test({ 60 | name: "prettyBenchmarkDown - issue #10 - Empty group results", 61 | fn: function (): void { 62 | clearBenchmarks(); 63 | 64 | dummyBench("Bench1"); 65 | dummyBench("Bench2"); 66 | dummyBench("Bench3"); 67 | 68 | runBenchmarks({ silent: true }) 69 | .then(prettyBenchmarkDown(() => {/* do not print */}, { 70 | groups: [ 71 | { 72 | include: /noBenchLikeThis/, 73 | name: "Fails on v0.2.0", 74 | description: ( 75 | gr: BenchmarkResult[], 76 | g: GroupDefinition, 77 | rr: BenchmarkRunResult, 78 | ) => `${gr.length}${gr.length}${rr.results.length}`, 79 | }, 80 | ], 81 | })); 82 | }, 83 | }); 84 | 85 | function dummyBench(name: string, runs = 1): void { 86 | bench({ 87 | name, 88 | runs, 89 | func(b) { 90 | b.start(); 91 | b.stop(); 92 | }, 93 | }); 94 | } 95 | -------------------------------------------------------------------------------- /tests/common_test.ts: -------------------------------------------------------------------------------- 1 | import { testEach } from "./test_helpers.ts"; 2 | import { assertEquals } from "./test_deps.ts"; 3 | 4 | import { disjunct, intersect, stripColor } from "../common.ts"; 5 | import { colors } from "../deps.ts"; 6 | 7 | testEach<{ a: unknown[]; b: unknown[] }, unknown[]>("common.intersect", [ 8 | { input: { a: [], b: [] }, result: [] }, 9 | { input: { a: [1, 2, 3], b: [4, 5, 6] }, result: [] }, 10 | { input: { a: [1, 2, 3], b: [3, 4, 5] }, result: [3] }, 11 | { input: { a: [1, 2, 3], b: [1, 2, 3] }, result: [1, 2, 3] }, 12 | { input: { a: [1, 2, 3], b: [2, 3, 4] }, result: [2, 3] }, 13 | { input: { a: [1, 1, 1, 1, 2], b: [1] }, result: [1, 1, 1, 1] }, 14 | { input: { a: [1, 2], b: [1, 1, 1, 1] }, result: [1] }, 15 | { input: { a: [1, 2], b: [1, 1, 1, 2] }, result: [1, 2] }, 16 | ], (testCase) => { 17 | assertEquals( 18 | intersect(testCase.input.a, testCase.input.b), 19 | testCase.result, 20 | testCase.desc, 21 | ); 22 | }); 23 | 24 | testEach<{ a: unknown[]; b: unknown[] }, unknown[]>("common.disjunct", [ 25 | { input: { a: [], b: [] }, result: [] }, 26 | { input: { a: [1, 2, 3], b: [4, 5, 6] }, result: [1, 2, 3] }, 27 | { input: { a: [1, 2, 3], b: [3, 4, 5] }, result: [1, 2] }, 28 | { input: { a: [1, 2, 3], b: [1, 2, 3] }, result: [] }, 29 | { input: { a: [1, 2, 3], b: [2, 3, 4] }, result: [1] }, 30 | { input: { a: [1, 1, 1, 1, 2], b: [1] }, result: [2] }, 31 | { input: { a: [1, 2], b: [1, 1, 1, 1] }, result: [2] }, 32 | { input: { a: [1, 2], b: [1, 1, 1, 2] }, result: [] }, 33 | ], (testCase) => { 34 | assertEquals( 35 | disjunct(testCase.input.a, testCase.input.b), 36 | testCase.result, 37 | testCase.desc, 38 | ); 39 | }); 40 | 41 | testEach("common.stripColor", [ 42 | { input: "", result: "" }, 43 | 44 | { input: colors.bgBlack("test"), result: "test" }, 45 | { input: colors.bgBlue("test"), result: "test" }, 46 | { input: colors.bgCyan("test"), result: "test" }, 47 | { input: colors.bgGreen("test"), result: "test" }, 48 | { input: colors.bgMagenta("test"), result: "test" }, 49 | { input: colors.bgRed("test"), result: "test" }, 50 | { input: colors.bgWhite("test"), result: "test" }, 51 | { input: colors.bgYellow("test"), result: "test" }, 52 | 53 | { input: colors.black("test"), result: "test" }, 54 | { input: colors.blue("test"), result: "test" }, 55 | { input: colors.cyan("test"), result: "test" }, 56 | { input: colors.green("test"), result: "test" }, 57 | { input: colors.magenta("test"), result: "test" }, 58 | { input: colors.red("test"), result: "test" }, 59 | { input: colors.white("test"), result: "test" }, 60 | { input: colors.yellow("test"), result: "test" }, 61 | 62 | { 63 | input: colors.red("testing " + colors.green("test")), 64 | result: "testing test", 65 | }, 66 | { input: colors.green("⚗️"), result: "⚗️" }, 67 | { input: colors.red("⚗️ " + colors.green("⚗️")), result: "⚗️ ⚗️" }, 68 | ], (testCase) => { 69 | assertEquals(stripColor(testCase.input), testCase.result, testCase.desc); 70 | }); 71 | 72 | // TODO calculateExtraMetrics 73 | // TODO calculateStdDeviation 74 | // TODO getBenchIndicator 75 | // TODO getInThresholdRange 76 | // TODO getTimeColor 77 | // TODO substrColored 78 | -------------------------------------------------------------------------------- /example.ts: -------------------------------------------------------------------------------- 1 | import { 2 | BenchIndicator, 3 | prettyBenchmarkHistory, 4 | prettyBenchmarkProgress, 5 | prettyBenchmarkResult, 6 | } from "https://deno.land/x/pretty_benching@v0.3.3/mod.ts"; 7 | 8 | import { 9 | bench, 10 | runBenchmarks, 11 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 12 | 13 | import * as colors from "https://deno.land/std@0.91.0/fmt/colors.ts"; 14 | import { 15 | deltaProgressRowExtra, 16 | deltaResultInfoCell, 17 | } from "./history_extensions.ts"; 18 | 19 | bench({ 20 | name: "Sorting arrays", 21 | runs: 4000, 22 | func(b): void { 23 | b.start(); 24 | new Array(10000).fill(Math.random()).sort(); 25 | b.stop(); 26 | }, 27 | }); 28 | 29 | bench({ 30 | name: "Rotating arrays", 31 | runs: 1000, 32 | func(b): void { 33 | b.start(); 34 | let a = new Array(500); 35 | for (let i = 0; i < 500; i++) { 36 | a.pop(); 37 | a = a.reverse(); 38 | } 39 | b.stop(); 40 | }, 41 | }); 42 | 43 | bench({ 44 | name: "Proving NP==P", 45 | runs: 1, 46 | func(b): void { 47 | b.start(); 48 | for (let i = 0; i < 1e9 / 5; i++) { 49 | const NPeP = Math.random() === Math.random(); 50 | } 51 | b.stop(); 52 | }, 53 | }); 54 | 55 | bench({ 56 | name: "Counting stars_long", 57 | runs: 1000, 58 | func(b): void { 59 | b.start(); 60 | const a = []; 61 | for (let i = 0; i < 1e12; i++) { 62 | a.push(i); 63 | } 64 | b.stop(); 65 | }, 66 | }); 67 | 68 | bench({ 69 | name: "Standing out", 70 | runs: 1000, 71 | func(b): void { 72 | b.start(); 73 | new Array(10000).fill(Math.random()).sort(); 74 | b.stop(); 75 | }, 76 | }); 77 | 78 | const historicData = 79 | '{"history":[{"date":"2020-09-18T10:43:57.695Z","benchmarks":{"Sorting arrays":{"measuredRunsAvgMs":0.4617750250000041,"runsCount":4000,"totalMs":1847.1001000000165},"Rotating arrays":{"measuredRunsAvgMs":2.2363983999999726,"runsCount":1000,"totalMs":2236.3983999999728},"Proving NP==P":{"measuredRunsAvgMs":5763.3927,"runsCount":1,"totalMs":5763.3927}}}]}'; 80 | const history = new prettyBenchmarkHistory(JSON.parse(historicData)); 81 | 82 | const thresholds = { 83 | "Rotating arrays": { green: 2.5, yellow: 3.4 }, 84 | "Proving NP==P": { green: 4600, yellow: 5500 }, 85 | }; 86 | 87 | const indicators: BenchIndicator[] = [ 88 | { 89 | benches: /Rotating arrays/, 90 | modFn: () => "🚀", 91 | }, 92 | { benches: /NP/, modFn: colors.yellow, color: colors.blue }, 93 | { 94 | benches: /Standing/, 95 | modFn: () => colors.bgRed("%"), 96 | color: colors.magenta, 97 | }, 98 | ]; 99 | 100 | runBenchmarks( 101 | { silent: true, skip: /_long/ }, 102 | prettyBenchmarkProgress( 103 | { 104 | indicators, 105 | thresholds, 106 | rowExtras: deltaProgressRowExtra(history), 107 | }, 108 | ), 109 | ).then( 110 | prettyBenchmarkResult( 111 | { 112 | thresholds, 113 | indicators, 114 | infoCell: deltaResultInfoCell(history), 115 | parts: { 116 | extraMetrics: true, 117 | threshold: true, 118 | graph: true, 119 | graphBars: 5, 120 | }, 121 | }, 122 | ), 123 | ); 124 | -------------------------------------------------------------------------------- /docs/showcase/showcase_read.ts: -------------------------------------------------------------------------------- 1 | // deno-lint-ignore-file 2 | 3 | import { prettyBenchmarkProgress, prettyBenchmarkResult } from "../../mod.ts"; 4 | import { join } from "https://deno.land/std@0.91.0/path/mod.ts"; 5 | import { 6 | BenchmarkRunProgress, 7 | ProgressState, 8 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 9 | 10 | const pathBase = join(".", "docs", "showcase"); 11 | 12 | await Deno.permissions.request({name: 'hrtime'}); 13 | const readResult = await Deno.permissions.request({name: 'read', path: pathBase}); 14 | if(readResult.state !== 'granted') { 15 | console.error('Can\'t run without input data for the benchmark. Exiting...'); 16 | Deno.exit(1); 17 | } 18 | const writeResult = await Deno.permissions.request({name: 'write', path: pathBase}); 19 | if(writeResult.state !== 'granted') { 20 | console.error('Can\'t save result without write permission. Exiting...'); 21 | Deno.exit(1); 22 | } 23 | 24 | const progressData: any[] = readJsonSync( 25 | join(pathBase, "benchmark_progress_inputs.json"), 26 | ) as any; 27 | const resultData = readJsonSync(join(pathBase, "benchmark_result_input.json")); 28 | 29 | const nocolor = true; 30 | 31 | const fid = await Deno.open( 32 | join(pathBase, "showcase.txt"), 33 | { create: true, write: true }, 34 | ); 35 | 36 | const pg = prettyBenchmarkProgress({ 37 | nocolor, 38 | thresholds: { "multiple-runs": { green: 76, yellow: 82 } }, 39 | indicators: [{ benches: /multiple-runs/, modFn: (str) => "%" }], 40 | outputFn: (str: string) => 41 | Deno.writeSync(fid.rid, new TextEncoder().encode(`${str}\n`)), 42 | }); 43 | 44 | const originalSTD = Deno.stdout.writeSync; 45 | const originalLog = globalThis.console.log; 46 | 47 | /*globalThis.console.log = (...args: unknown[]) => 48 | Deno.writeSync(fid.rid, new TextEncoder().encode(`${args[0] ? args[0] + "\n" : "\n"}`)); 49 | 50 | Deno.stdout.writeSync = (p: Uint8Array): number => { 51 | Deno.writeSync(fid.rid, p); 52 | return 0; 53 | };*/ 54 | 55 | progressData.forEach((pd: BenchmarkRunProgress) => { 56 | if ( 57 | (pd.state === ProgressState.BenchmarkingStart || 58 | pd.state === ProgressState.BenchmarkingEnd) || 59 | (pd.state === ProgressState.BenchResult && 60 | [...pd.results].reverse()[0].name == "finished") || 61 | (pd.state === ProgressState.BenchStart && 62 | pd.running?.name == "benchmark-start") || 63 | (pd.state === ProgressState.BenchPartialResult && 64 | pd.running?.measuredRunsMs.length == 52 && 65 | pd.running?.name == "multiple-runs") 66 | ) { 67 | pg(pd); 68 | } else { 69 | if ( 70 | pd.state === ProgressState.BenchResult && 71 | [...pd.results].reverse()[0].name != "benchmark-start" 72 | ) { 73 | // Deno.stdout.writeSync(new TextEncoder().encode("\n")); 74 | } 75 | } 76 | }); 77 | 78 | let resultLog: string = ""; 79 | const resultFn = prettyBenchmarkResult( 80 | { 81 | nocolor: true, 82 | outputFn: (log?: string) => resultLog = log!, 83 | thresholds: { "multiple-runs": { green: 76, yellow: 82 } }, 84 | indicators: [{ benches: /multiple-runs/, modFn: (str) => "%" }], 85 | parts: { extraMetrics: true, graphBars: 5, graph: true, threshold: true }, 86 | }, 87 | ); 88 | resultFn(resultData as any); 89 | 90 | Deno.writeSync( 91 | fid.rid, 92 | new TextEncoder().encode(`${"-".repeat(60)}${"-".repeat(60)}\n\n`), 93 | ); // separator line 94 | 95 | Deno.writeSync(fid.rid, new TextEncoder().encode(`${resultLog}`)); 96 | 97 | fid.close(); 98 | 99 | function readJsonSync(path: string) { 100 | return JSON.parse(Deno.readTextFileSync(path)); 101 | } 102 | -------------------------------------------------------------------------------- /docs/prettyBenchmarkDown/pr_benchmark_output.md: -------------------------------------------------------------------------------- 1 | # An example benchMarkdown 2 | 3 | This markdown was generated with the use of `prettyBenchmarkDown`. 4 | 5 | Check out how to generate a file like this: [pr_benchmarks.ts](https://github.com/littletof/prettyBenching/blob/docs/prettyBenchmarkDown/pr_benchmarks.ts) 6 | 7 | If you use a function for the `description` or `afterTables`, you can process the results here as well: 8 | 9 | > In this benchmark 7 benches were run, 1 filtered. 10 | 11 | ## Default columns and dynamic text 12 | 13 | This is a group's `description`. 14 | Here you can see what the default columns are, and how you can use a `function` as `description` or `afterTable` inside a group 15 | 16 | |Name|Runs|Total (ms)|Average (ms)| 17 | |:--|--:|--:|--:| 18 | |Sorting arrays|4000|1801.169|0.450| 19 | |Rotating arrays|1000|2021.054|2.021| 20 | 21 | This is a group's `afterTable`. 22 | Here you can access eg. the group name: `Default columns and dynamic text`, benchmarks in this group: `2` of them here, or the whole `BenchmarkRunResult`: `7` benchmarks total 23 | 24 | ## Custom columns 25 | 26 | |Name|Runs|Total (ms)|CustomTotal|Formatter|Undefined|Bad Config| 27 | |:--|--:|--:|:--|:-:|:-:|:-:| 28 | |Sorting arrays|4000|1801.169|1801.16940|syarra gnitroS:rettamroF <-|-|*| 29 | |Rotating arrays|1000|2021.054|2021.05430|syarra gnitatoR:rettamroF <-|-|*| 30 | 31 | If you see `-`, that means the value there was `undefined`, if you see `*` it means that column is badly configured, no `formatter` or `propertyKey` was defined. 32 | 33 | ## Predefiend columns 34 | 35 | Here you can see, what the predefined columns are. 36 | 37 | You can add the `indicators` and `thresholds` that you use in `prettyBenchmarkProgress` and `prettyBenchmarkResults`. 38 | 39 | You can see, how you can rename columns like with `Thresholds+` 40 | ||Name|Runs|Total (ms)|Average (ms)|Thresholds|Thresholds+|| 41 | |:-:|:--|--:|--:|--:|--:|--:|:-:| 42 | | |Rotating other things|1000|2143.992|2.144|-|-|-| 43 | |🎹|Rotating arrays|1000|2021.054|2.021|<= 3.5 ✅
<= 4.4 🔶
> 4.4 🔴
|<= 3.5 ✅ 🠴
<= 4.4 🔶 
> 4.4 🔴 
|✅| 44 | |%|Proving NP==P|1|4384.908|4384.908|<= 4141 ✅
<= 6000 🔶
> 6000 🔴
|<= 4141 ✅ 
<= 6000 🔶 🠴
> 6000 🔴 
|🔶| 45 | |🚀|Standing out|1000|375.708|0.376|<= 0.3 ✅
<= 0.33 🔶
> 0.33 🔴
|<= 0.3 ✅ 
<= 0.33 🔶 
> 0.33 🔴 🠴
|🔴| 46 | 47 | 48 | ## Extra metrics 49 | 50 | You can add `extraMetrics` columns too. In its `metrics` array you can define which columns you want. If you set `ignoreSingleRuns` to `true`, it wont show values on rows, where runCount is 1. 51 | |Name|Runs|Total (ms)|Average (ms)|Min|Max|Mean|Median|Std deviation| 52 | |:--|--:|--:|--:|--:|--:|--:|--:|--:| 53 | |Sorting arrays|4000|1801.169|0.450|0.305|1.632|0.969|0.401|0.143| 54 | |Rotating other things|1000|2143.992|2.144|1.757|4.585|3.171|2.082|0.314| 55 | |Rotating arrays|1000|2021.054|2.021|1.748|3.549|2.648|2.000|0.156| 56 | |Proving NP==P|1|4384.908|4384.908|-|-|-|-|-| 57 | |Standing out|1000|375.708|0.376|0.299|0.762|0.531|0.360|0.058| 58 | 59 | 60 | ## Ungrouped benches 61 | 62 | |Name|Runs|Total (ms)|Average (ms)| 63 | |:--|--:|--:|--:| 64 | |Ungrouped 1|1000|2290.916|2.291| 65 | |Ungrouped 2|1000|2101.089|2.101| 66 | 67 | 68 | 69 | --- 70 | 71 | This is the `afterTables`. This behaves the same as `description`, it just puts this at the bottom of the markdown. 72 | Here its defined with a simple string. 73 | 74 | Check out the Github Action, which comments a markdown like this on PRs: [pr_benchmarks.yml](https://github.com/littletof/prettyBenching/blob/docs/prettyBenchmarkDown/pr_benchmarks.yml). 75 | 76 | You can find an example repo that uses it [here](https://github.com/littletof/pretty-benching-action/pull/2) 77 | 78 | -------------------------------------------------------------------------------- /docs/showcase/showcase.txt: -------------------------------------------------------------------------------- 1 | 2 | ▒▒▒▒▒▒▒▒ Starting benchmarking 3 | ▒▒▒▒▒▒▒▒ Benchmarks queued: [ 3] filtered: [ 0] 4 | 5 |  Benched [finished --------------------------------] Runs: [ 100] Total time: [ 42.9500ms] Avg: [ 0.4295ms] 6 | Running [benchmark-start -------------------------] a total of [ 1] times 7 |  Running %[multiple-runs ---------------------------] [ 52/ 100] [======================52=% ] 8 | 9 | ▒▒▒▒▒▒▒▒ Benchmarking finished 10 | 11 | ------------------------------------------------------------------------------------------------------------------------ 12 | 13 | ┌───────────────────────────────────────────────────────────────────────────────────────────┐ 14 | │ Benchmark name: finished │ 15 | ├───────────────────────┬──────────────────────────────┬────────────────────────────────────┤ 16 | │ Total runs: 100 │ Total time: 42.9500 ms │ Avg time: 0.4295 ms │ 17 | ├───────────────────────┼────────────────────┬─────────┴───────────┬────────────────────────┤ 18 | │ min: 0.3284 ms │ max: 2.1839 ms │ mean: 1.2561 ms │ median: 0.3668 ms │ 19 | ├───────────────────────┴───────┬────────────┴─────────────────────┴────────────────────────┤ 20 | │ │ │ 21 | │ 0.3284 ms _[ 97][ 97%] │========================================================= │ 22 | │ 0.6995 ms _[ 1][ 1%] │= │ 23 | │ 1.0706 ms _[ 0][ 0%] │ │ 24 | │ 1.4417 ms _[ 0][ 0%] │ │ 25 | │ 1.8128 ms _[ 2][ 2%] │== │ 26 | │ │ │ 27 | └───────────────────────────────┴───────────────────────────────────────────────────────────┘ 28 | 29 | ┌───────────────────────────────────────────────────────────────────────────────────────────┐ 30 | │ Benchmark name: benchmark-start │ 31 | ├───────────────────────┬──────────────────────────────┬────────────────────────────────────┤ 32 | │ Total runs: 1 │ Total time: 1.9790 ms │ │ 33 | └───────────────────────┴──────────────────────────────┴────────────────────────────────────┘ 34 | 35 | ┌───────────────────────────────────────────────────────────────────────────────────────────┐ 36 | │ % Benchmark name: multiple-runs │ 37 | ├───────────────────────┬──────────────────────────────┬────────────────────────────────────┤ 38 | │ Total runs: 100 │ Total time: 7534.1828 ms │ Avg time: 75.3418 ms │ 39 | ├───────────────────────┼────────────────────┬─────────┴───────────┬────────────────────────┤ 40 | │ min: 69.8122 ms │ max: 88.2104 ms │ mean: 79.0113 ms │ median: 74.9466 ms │ 41 | ├───────────────────────┴────────────────────┴─────────────────────┴────────────────────────┤ 42 | │ Thresholds: 0 ========== 76 ========== 82 ========== ∞ │ 43 | ├───────────────────────────────┬───────────────────────────────────────────────────────────┤ 44 | │ │ │ 45 | │ 69.812 ms _[ 26][ 26%] │========================== │ 46 | │ 73.492 ms _[ 56][56.0%] │======================================================== │ 47 | │ 77.171 ms _[ 14][14.0%] │============== │ 48 | │ 80.851 ms _[ 3][ 3%] │=== │ 49 | │ 84.531 ms _[ 1][ 1%] │= │ 50 | │ │ │ 51 | └───────────────────────────────┴───────────────────────────────────────────────────────────┘ 52 | -------------------------------------------------------------------------------- /tests/table_test.ts: -------------------------------------------------------------------------------- 1 | import { testEach } from "./test_helpers.ts"; 2 | import { assertEquals } from "./test_deps.ts"; 3 | import { TableBuilder } from "../table.ts"; 4 | 5 | testEach<{ w: number; ops: (tb: TableBuilder) => void }, string>( 6 | "table", 7 | [ 8 | { 9 | input: { 10 | w: 1, 11 | ops: (tb) => { 12 | tb.line("1"); 13 | }, 14 | }, 15 | result: `┌─┐\n│1│\n└─┘\n`, 16 | }, 17 | { 18 | input: { 19 | w: 5, 20 | ops: (tb) => { 21 | tb.line("1"); 22 | }, 23 | }, 24 | result: `┌─────┐\n│1 │\n└─────┘\n`, 25 | }, 26 | { 27 | input: { 28 | w: 5, 29 | ops: (tb) => { 30 | tb.line("1"); 31 | tb.line("2"); 32 | }, 33 | }, 34 | result: `┌─────┐\n│1 │\n│2 │\n└─────┘\n`, 35 | }, 36 | { 37 | input: { 38 | w: 5, 39 | ops: (tb) => { 40 | tb.line("1"); 41 | tb.separator(); 42 | tb.line("2"); 43 | }, 44 | }, 45 | result: `┌─────┐\n│1 │\n├─────┤\n│2 │\n└─────┘\n`, 46 | }, 47 | { 48 | input: { 49 | w: 2, 50 | ops: (tb) => { 51 | tb.line("1"); 52 | tb.line("2"); 53 | tb.separator(); 54 | tb.line("3"); 55 | }, 56 | }, 57 | result: `┌──┐\n│1 │\n│2 │\n├──┤\n│3 │\n└──┘\n`, 58 | }, 59 | { 60 | input: { 61 | w: 3, 62 | ops: (tb) => { 63 | tb.cellLine("1", "2"); 64 | }, 65 | }, 66 | result: `┌─┬─┐\n│1│2│\n└─┴─┘\n`, 67 | }, 68 | { 69 | input: { 70 | w: 3, 71 | ops: (tb) => { 72 | tb.cellLine("1", "2"); 73 | tb.cellLine("4", "5"); 74 | }, 75 | }, 76 | result: `┌─┬─┐\n│1│2│\n│4│5│\n└─┴─┘\n`, 77 | }, 78 | { 79 | input: { 80 | w: 3, 81 | ops: (tb) => { 82 | tb.cellLine("1", "2"); 83 | tb.separator(); 84 | tb.cellLine("4", "5"); 85 | }, 86 | }, 87 | result: `┌─┬─┐\n│1│2│\n├─┼─┤\n│4│5│\n└─┴─┘\n`, 88 | }, 89 | { 90 | input: { 91 | w: 6, 92 | ops: (tb) => { 93 | tb.cellLine("1", "2"); 94 | tb.separator(); 95 | tb.cellLine("4 ", "5"); 96 | }, 97 | }, 98 | result: `┌─┬────┐\n│1│2 │\n├─┴──┬─┤\n│4 │5│\n└────┴─┘\n`, 99 | }, 100 | { 101 | input: { 102 | w: 9, 103 | ops: (tb) => { 104 | tb.line(" 1 "); 105 | tb.separator(); 106 | tb.cellLine("2 ", " 3 ", " 4"); 107 | tb.separator(); 108 | tb.cellLine(" 5 ", " 6 "); 109 | }, 110 | }, 111 | result: 112 | `┌─────────┐\n│ 1 │\n├──┬───┬──┤\n│2 │ 3 │ 4│\n├──┴─┬─┴──┤\n│ 5 │ 6 │\n└────┴────┘\n`, 113 | }, 114 | { 115 | input: { 116 | w: 9, 117 | ops: (tb) => { 118 | tb.line(" 1 "); 119 | tb.separator(); 120 | tb.cellLine("2 ", " 3 ", " 4"); 121 | tb.separator(); 122 | tb.cellLine(" 5 ", " 6 "); 123 | tb.separator(); 124 | tb.line(" 7 │ 8 "); 125 | }, 126 | }, 127 | result: 128 | `┌─────────┐\n│ 1 │\n├──┬───┬──┤\n│2 │ 3 │ 4│\n├──┴─┬─┴──┤\n│ 5 │ 6 │\n├────┼────┤\n│ 7 │ 8 │\n└────┴────┘\n`, 129 | }, 130 | ], 131 | (testCase) => { 132 | const tb = new TableBuilder( 133 | testCase.input.w, 134 | ((str) => str), 135 | ); 136 | testCase.input.ops(tb); 137 | const out = tb.build(); 138 | // console.log("\n" + out); 139 | assertEquals(out, testCase.result); 140 | 141 | // TODO fix color testing 142 | /* const tbCol = new TableBuilder( 143 | testCase.input.w, 144 | colors.green, 145 | ); 146 | testCase.input.ops(tbCol); 147 | const outCol = tbCol.build(); 148 | console.log(outCol); 149 | const colored = testCase.result?.replaceAll(/([┌┬─┐│└┴─┘├┼┤])/g, colors.green('$1')); 150 | console.log(colored); 151 | assertEquals(outCol, colored); */ 152 | }, 153 | ); 154 | -------------------------------------------------------------------------------- /docs/showcase/benchmark_result_input.json: -------------------------------------------------------------------------------- 1 | { 2 | "filtered": 0, 3 | "results": [ 4 | { 5 | "name": "finished", 6 | "totalMs": 42.94999999999891, 7 | "runsCount": 100, 8 | "measuredRunsAvgMs": 0.4294999999999891, 9 | "measuredRunsMs": [2.183899999999994,2.160400000000209,0.34340000000020154,0.38040000000000873,0.33019999999987704,0.39229999999997744,0.3350999999997839,0.39259999999990214,0.33890000000019427,0.396900000000187,0.3661000000001877,0.3735999999998967,0.3365000000001146,0.391399999999976,0.389699999999948,0.5087999999998374,0.3956999999998061,0.4498000000000957,0.3423999999999978,0.5325000000000273,0.34390000000007603,0.3471999999999298,0.3509999999998854,0.3528999999998632,0.34159999999997126,0.3442000000000007,0.453499999999849,0.8410999999998694,0.3588999999999487,0.33829999999989013,0.34120000000029904,0.41229999999995925,0.46910000000002583,0.3386999999997897,0.4335000000000946,0.3544999999999163,0.39019999999982247,0.33950000000004366,0.34819999999990614,0.4493999999999687,0.3427000000001499,0.39390000000003056,0.3409999999998945,0.3686999999999898,0.34170000000017353,0.6050000000000182,0.6212000000000444,0.36850000000004,0.3950999999999567,0.3490999999999076,0.3490999999999076,0.42409999999995307,0.33809999999994034,0.5733000000000175,0.3428000000001248,0.34149999999999636,0.4614999999998872,0.34940000000005966,0.5427999999999429,0.37540000000012697,0.375,0.3405999999999949,0.4333999999998923,0.3631000000000313,0.5549999999998363,0.3308999999999287,0.3812000000000353,0.36740000000008877,0.3283999999998741,0.4838000000002012,0.3430999999998221,0.613800000000083,0.350400000000036,0.33809999999994034,0.3386999999997897,0.34359999999992397,0.3689999999999145,0.3404000000000451,0.36590000000001055,0.3392999999998665,0.4079999999999018,0.33809999999994034,0.39990000000011605,0.3285000000000764,0.37370000000009895,0.3432999999999993,0.3708999999998923,0.3589000000001761,0.4516999999998461,0.580199999999877,0.44810000000006767,0.33950000000004366,0.41210000000000946,0.3387000000000171,0.33720000000016626,0.3760000000002037,0.3445999999999003,0.4679999999998472,0.37039999999979045,0.3464000000001306] 10 | }, 11 | { 12 | "name": "benchmark-start", 13 | "totalMs": 1.9790000000002692, 14 | "runsCount": 1, 15 | "measuredRunsAvgMs": 1.9790000000002692, 16 | "measuredRunsMs": [1.9790000000002692] 17 | }, 18 | { 19 | "name": "multiple-runs", 20 | "totalMs": 7534.1828, 21 | "runsCount": 100, 22 | "measuredRunsAvgMs": 75.34182799999999, 23 | "measuredRunsMs": [74.73080000000004,82.97389999999996,73.89879999999994,73.05279999999993,71.49330000000009,78.04950000000008,72.7528000000002,77.10359999999991,74.48660000000018,74.05899999999974,75.71950000000015,75.9378999999999,79.91649999999981,75.6256000000003,78.07940000000008,73.25859999999966,73.64789999999994,73.83489999999983,71.1917999999996,73.66379999999981,76.5648000000001,76.62339999999995,77.10069999999996,80.13359999999966,76.65250000000015,75.37180000000035,81.29840000000013,81.29010000000062,71.91309999999976,73.50849999999991,76.23790000000008,72.54550000000017,76.79799999999977,73.80490000000009,71.83169999999973,75.32359999999971,76.5003999999999,75.37299999999959,78.49479999999949,78.09299999999985,76.2548999999999,72.46939999999995,76.55429999999978,73.19880000000012,77.48030000000017,74.83590000000004,71.63010000000031,73.56829999999991,72.55540000000019,79.71690000000035,74.18040000000019,74.79969999999958,75.58839999999964,74.70179999999982,74.63600000000042,75.99379999999928,75.35320000000047,73.91100000000006,76.06829999999991,77.57130000000052,71.71759999999995,76.46670000000086,73.08349999999973,75.36140000000069,73.95130000000063,73.56580000000031,72.85710000000017,74.13209999999981,74.94409999999971,73.17950000000019,73.41650000000027,70.47379999999976,74.94920000000002,80.3098,69.8121999999994,72.78079999999954,76.16159999999945,72.75410000000011,74.86470000000008,74.11390000000029,73.38289999999961,72.10059999999976,75.65459999999894,88.21039999999994,75.91660000000047,72.14120000000003,79.12910000000011,75.64069999999992,76.10610000000088,76.6180000000004,76.65399999999863,75.30389999999898,73.13900000000103,77.372800000001,74.6872000000003,74.3107,80.54730000000018,73.11759999999958,77.07690000000002,78.17259999999987] 24 | } 25 | ] 26 | } -------------------------------------------------------------------------------- /table.ts: -------------------------------------------------------------------------------- 1 | import { disjunct, intersect, stripColor } from "./common.ts"; 2 | import { lDiff, matchWithIndex, padEndVisible } from "./utils.ts"; 3 | 4 | const separatorToken = "#&imaseparator&#"; 5 | 6 | enum chars { 7 | top = "─", 8 | topmid = "┬", 9 | topleft = "┌", 10 | topright = "┐", 11 | bottom = "─", 12 | bottommid = "┴", 13 | bottomleft = "└", 14 | bottomright = "┘", 15 | left = "│", 16 | midleft = "├", 17 | mid = "─", 18 | midmid = "┼", 19 | right = "│", 20 | midright = "┤", 21 | middle = "│", 22 | } 23 | 24 | type colorFunction = (str: string) => string; 25 | 26 | interface charset { 27 | start: string; 28 | stop: string; 29 | line: string; 30 | is: string; 31 | } 32 | 33 | export class TableBuilder { 34 | width: number; 35 | lines: string[] = []; 36 | colorFn: colorFunction; 37 | tempColorFn: colorFunction; 38 | 39 | constructor(width: number, colorFn: colorFunction) { 40 | this.width = width; 41 | this.colorFn = this.tempColorFn = colorFn; 42 | } 43 | 44 | line(line: string) { 45 | this.lines.push(this.tableLine(line)); 46 | return this; 47 | } 48 | 49 | cellLine(...cells: string[]) { 50 | if (cells.length == 1) { 51 | cells.push(""); 52 | } 53 | this.lines.push(this.tableLine(cells.join(this.tempColorFn(chars.middle)))); 54 | this.tempColorFn = this.colorFn; 55 | return this; 56 | } 57 | 58 | separator() { 59 | this.lines.push(separatorToken); 60 | return this; 61 | } 62 | 63 | color(colorFn: colorFunction) { 64 | this.colorFn = this.tempColorFn = colorFn; 65 | return this; 66 | } 67 | 68 | tc(colorFn: colorFunction) { // TODO reconsider 69 | this.tempColorFn = colorFn; 70 | return this; 71 | } 72 | 73 | build() { 74 | let result = ""; 75 | result += this.getHSeparator(undefined, this.lines[0]); 76 | this.lines.forEach((l, i, a) => { 77 | if (l === separatorToken) { 78 | if (i === a.length - 1) { // last line is separator token, remove it so table end connects properly 79 | this.lines.splice(i, 1); 80 | return; 81 | } 82 | result += this.getHSeparator(this.lines[i - 1], this.lines[i + 1]); 83 | return; 84 | } 85 | result += l; 86 | }); 87 | result += this.getHSeparator(this.lines[this.lines.length - 1], undefined); 88 | return result; 89 | } 90 | 91 | private getHSeparator(topLine?: string, bottomLine?: string) { 92 | const topc = topLine 93 | ? this.getWSeparatorPositions(stripColor(topLine)) 94 | : []; 95 | const bottomc = bottomLine 96 | ? this.getWSeparatorPositions(stripColor(bottomLine)) 97 | : []; 98 | const inter = intersect(topc, bottomc) as number[]; 99 | const topd = disjunct(topc, inter) as number[]; 100 | const bottomd = disjunct(bottomc, inter) as number[]; 101 | 102 | let lineBase = middlecharset(); 103 | 104 | if (topc.length == 0) { 105 | lineBase = topcharset(); 106 | } else if (bottomc.length == 0) { 107 | lineBase = bottomcharset(); 108 | } 109 | 110 | const crosses: { i: number; t: chars }[] = []; 111 | inter.forEach((ind: number) => 112 | !this.isCap(ind) && crosses.push({ i: ind, t: chars.midmid }) 113 | ); 114 | topd.forEach((ind: number) => 115 | !this.isCap(ind) && crosses.push({ i: ind, t: chars.bottommid }) 116 | ); 117 | bottomd.forEach((ind: number) => 118 | !this.isCap(ind) && crosses.push({ i: ind, t: chars.topmid }) 119 | ); 120 | 121 | return this.tableLine(undefined, crosses, lineBase); 122 | } 123 | 124 | private tableLine( 125 | content?: string, 126 | crosses?: { i: number; t: chars }[], 127 | chars: charset = tableLinecharset(), 128 | ) { 129 | const line = padEndVisible( 130 | `${this.colorFn(chars.start)}${content || 131 | this.colorFn(chars.line.repeat(this.width))}`, 132 | this.width + 1, 133 | ) + `${this.colorFn(chars.stop)}\n`; 134 | const lineArray = line.split(""); 135 | if (crosses) { 136 | crosses.forEach(({ i, t }) => { 137 | const colDiff = lDiff(line.substr(0, i)); 138 | lineArray.splice(i + colDiff, 1, t); 139 | }); 140 | } 141 | return lineArray.join(""); 142 | } 143 | 144 | private isCap(index: number) { 145 | return (index == 0 || index == this.width + 1); 146 | } 147 | 148 | private getWSeparatorPositions(line: string) { 149 | /* stash 150 | return matchWithIndex(line, /│/g).map(i => { 151 | return i - lDiff(line.slice(0, i)); 152 | }); */ 153 | return matchWithIndex(line, /│/g); 154 | } 155 | } 156 | 157 | const bottomcharset = () => ({ 158 | start: chars.bottomleft, 159 | stop: chars.bottomright, 160 | line: chars.bottom, 161 | is: chars.bottommid, 162 | }); 163 | const topcharset = () => ({ 164 | start: chars.topleft, 165 | stop: chars.topright, 166 | line: chars.top, 167 | is: chars.topmid, 168 | }); 169 | const middlecharset = () => ({ 170 | start: chars.midleft, 171 | stop: chars.midright, 172 | line: chars.mid, 173 | is: chars.midmid, 174 | }); 175 | const tableLinecharset = () => ({ 176 | start: chars.left, 177 | stop: chars.right, 178 | line: chars.mid, 179 | is: chars.midmid, 180 | }); 181 | -------------------------------------------------------------------------------- /common.ts: -------------------------------------------------------------------------------- 1 | import { colors } from "./deps.ts"; 2 | import { padStartVisible } from "./utils.ts"; 3 | import type { BenchIndicator, Thresholds } from "./types.ts"; 4 | import type { BenchmarkResult, BenchmarkRunResult } from "./deps.ts"; 5 | 6 | const { green, yellow, red, white } = colors; 7 | 8 | export function getTimeColor( 9 | name: string, 10 | time: number, 11 | nocolor?: boolean, 12 | thresholds?: Thresholds, 13 | ) { 14 | // if nocolor is set, than return a the same string without coloring 15 | if (nocolor) { 16 | return (str: string) => str; 17 | } 18 | 19 | const inRange = getInThresholdRange(name, time, thresholds); 20 | 21 | return [yellow, green, yellow, red][inRange || 0]; 22 | } 23 | 24 | export function getInThresholdRange( 25 | name: string, 26 | time: number, 27 | thresholds?: Thresholds, 28 | ): null | 1 | 2 | 3 { 29 | const th = thresholds && thresholds[name]; 30 | 31 | if (th) { 32 | if (time <= th.green) return 1; 33 | if (time <= th.yellow) return 2; 34 | if (th.yellow < time) return 3; 35 | } 36 | return null; 37 | } 38 | 39 | /** Gets the correct indicator for the named bench */ 40 | export function getIndicator( 41 | name: string, 42 | indicators?: BenchIndicator[], 43 | ) { 44 | if (indicators && indicators.length > 0) { 45 | const indChar = "▒▒"; 46 | const indicator = indicators.find(({ benches }) => benches.test(name)); 47 | if (indicator) { 48 | if (typeof indicator.modFn == "function") { 49 | const modded = indicator.modFn(indChar); 50 | return modded; // str or object 51 | } else { 52 | return false; // has indicator but no modFn 53 | } 54 | } 55 | } 56 | 57 | return undefined; 58 | } 59 | 60 | /** Handles the padding of indicators to specific lengths */ 61 | export function getPaddedIndicator( 62 | name: string, 63 | toLength: number, 64 | indicators?: BenchIndicator[], 65 | noIndicator: string = " ".repeat(toLength), 66 | ) { 67 | const indicator = getIndicator(name, indicators); 68 | if (indicator) { 69 | let newIndicator = ""; 70 | 71 | if ( 72 | typeof indicator === "object" && indicator.indicator && 73 | !isNaN(indicator.visibleLength) 74 | ) { 75 | newIndicator = padStartVisible( 76 | `${indicator.indicator}`, 77 | toLength + 78 | (stripColor(indicator.indicator).length - indicator.visibleLength), 79 | ); 80 | } else { //simple string 81 | newIndicator = padStartVisible(`${indicator}`, toLength); 82 | } 83 | 84 | return newIndicator; 85 | } 86 | 87 | return noIndicator; 88 | } 89 | 90 | /** strips terminal color */ 91 | export function stripColor(str: string) { 92 | // deno-lint-ignore no-control-regex 93 | return str.replace(/\x1b\[[0-9\;]*m/g, ""); 94 | } 95 | 96 | export function substrColored(str: string, length: number) { 97 | let visibleLength = 0; 98 | let cutStr = ""; 99 | const sa = [...str]; 100 | 101 | for (let i = 0; i < sa.length; i++) { 102 | const cs = sa[i]; 103 | if (cs === "\x1b") { 104 | const colorMod = str.slice(i, sa.indexOf("m", i) + 1); 105 | cutStr += colorMod; 106 | 107 | i = sa.indexOf("m", i); 108 | } else { 109 | if (visibleLength < length) { 110 | cutStr += cs; 111 | visibleLength++; 112 | } 113 | } 114 | } 115 | 116 | return cutStr; 117 | } 118 | 119 | export function intersect(a: unknown[], b: unknown[]) { 120 | return a.filter((value) => -1 !== b.indexOf(value)); 121 | } 122 | 123 | export function disjunct(base: unknown[], dis: unknown[]) { 124 | return base.filter((value) => -1 === dis.indexOf(value)); 125 | } 126 | 127 | /** Calculates the `min`, `max`, `mean` (as (`max`+`min`)/2) and `median` from the `measuredRunsMs` array. */ 128 | export function calculateExtraMetrics(result: BenchmarkResult) { 129 | const max = Math.max(...result.measuredRunsMs); 130 | const min = Math.min(...result.measuredRunsMs); 131 | const mean = (max + min) / 2; // not as avg 132 | 133 | const sorted = [...result.measuredRunsMs].sort(); 134 | const middle = Math.floor(sorted.length / 2); 135 | const median = sorted.length == 0 136 | ? 0 137 | : (sorted.length % 2 !== 0 ? sorted[middle] 138 | : (sorted[middle - 1] + sorted[middle]) / 2); 139 | 140 | return { 141 | max, 142 | min, 143 | mean, 144 | median, 145 | }; 146 | } 147 | 148 | /** Calculates the `standard deviation` from the `measuredRunsMs` array. */ 149 | export function calculateStdDeviation(result: BenchmarkResult) { 150 | const sorted = [...result.measuredRunsMs].sort(); 151 | const stdDeviation = Math.sqrt( 152 | sorted.map((x) => Math.pow(x - result.measuredRunsAvgMs, 2)).reduce(( 153 | a, 154 | b, 155 | ) => a + b) / sorted.length, 156 | ); 157 | 158 | return stdDeviation; 159 | } 160 | 161 | /** Returns the range into which the benchmarks with had a threshold set fell. */ 162 | export function getThresholdResultsFrom( 163 | runResult: BenchmarkRunResult, 164 | thresholds: Thresholds, 165 | ) { 166 | const thResults: { [key: string]: "red" | "yellow" | "green" } = {}; 167 | runResult.results.forEach((r) => { 168 | const t = thresholds[r.name]; 169 | if (t) { 170 | thResults[r.name] = r.measuredRunsAvgMs > t.green 171 | ? (r.measuredRunsAvgMs > t.yellow ? "red" : "yellow") 172 | : "green"; 173 | } 174 | }); 175 | 176 | return thResults; 177 | } 178 | -------------------------------------------------------------------------------- /history_extensions.ts: -------------------------------------------------------------------------------- 1 | import { BenchmarkResult, colors } from "./deps.ts"; 2 | import { rtime } from "./utils.ts"; 3 | import { stripColor } from "./common.ts"; 4 | 5 | import type { prettyBenchmarkProgressOptions } from "./pretty_benchmark_progress.ts"; 6 | import type { prettyBenchmarkResultOptions } from "./pretty_benchmark_result.ts"; 7 | import type { ColumnDefinition } from "./pretty_benchmark_down.ts"; 8 | import type { 9 | DeltaKey, 10 | prettyBenchmarkHistory, 11 | } from "./pretty_benchmark_history.ts"; 12 | 13 | /** Returns the calculated delta for the specific benchmark in a formatted string. 14 | * 15 | * Meant to be used as: 16 | * ```ts 17 | * prettyBenchmarkProgress({rowExtras: deltaProgressRowExtra(history)}); 18 | * ``` 19 | * .*/ 20 | export function deltaProgressRowExtra(history: prettyBenchmarkHistory) { 21 | return ( 22 | result: BenchmarkResult, 23 | options?: prettyBenchmarkProgressOptions, 24 | ) => { 25 | let deltaString = getCliDeltaString(history, result); 26 | 27 | if (options?.nocolor) { 28 | deltaString = stripColor(deltaString); 29 | } 30 | 31 | return ` [${deltaString}]`; 32 | }; 33 | } 34 | 35 | /** Returns the calculated delta for the specific benchmark in a formatted string. 36 | * 37 | * Meant to be used as: 38 | * ```ts 39 | * prettyBenchmarkResult({infoCell: deltaResultInfoCell(history)}); 40 | * ``` 41 | * .*/ 42 | export function deltaResultInfoCell(history: prettyBenchmarkHistory) { 43 | return (result: BenchmarkResult, options?: prettyBenchmarkResultOptions) => { 44 | let deltaString = getCliDeltaString(history, result); 45 | 46 | if (options?.nocolor) { 47 | deltaString = stripColor(deltaString); 48 | } 49 | 50 | return ` ${deltaString}`; 51 | }; 52 | } 53 | 54 | /** Defines a delta column, which shows the changes for the benchmark. Shows `-` when there was no previous measurements for the benchmark in the history. 55 | * 56 | * Calculates delta on `measuredRunsAvgMs` by default, which can be changed in the options with `key`.*/ 57 | export function deltaColumn( 58 | history: prettyBenchmarkHistory, 59 | options?: { key: DeltaKey }, 60 | ): ColumnDefinition { 61 | const workingKey = options?.key || "measuredRunsAvgMs"; 62 | 63 | return { 64 | title: `Change in ${options?.key || "average"}`, 65 | formatter: (result, cd) => { 66 | const delta = history.getDeltaForBenchmark(result, [workingKey]); 67 | if (delta) { 68 | const perc = (delta[workingKey as string].percent * 100).toFixed(0); 69 | const diff = rtime(Math.abs(delta.measuredRunsAvgMs.amount)); 70 | 71 | const notSpaceChar = " "; 72 | const smallSpace = " "; 73 | if (delta[workingKey as string].amount > 0) { 74 | return `🔺 ${`+${perc}`.padStart(4, notSpaceChar)}% (${ 75 | diff.padStart(6) 76 | }ms)`; 77 | } else { 78 | return `🟢${smallSpace} ${perc.padStart(4, notSpaceChar)}% (${ 79 | diff.padStart(6) 80 | }ms)`; 81 | } 82 | } 83 | return "-"; 84 | }, 85 | }; 86 | } 87 | 88 | /** Defines a column for each different `runBenchmarks` results in the history. 89 | * 90 | * The title of the columns are the dates they were run or the `id`'s of them if they are present. 91 | * 92 | * Shows `measuredRunsAvgMs` by default, which can be changed in the options with `key`.*/ 93 | export function historyColumns( 94 | history: prettyBenchmarkHistory, 95 | options?: { 96 | key?: DeltaKey; 97 | titleFormatter?: (dateString: string, id?: string) => string; 98 | }, 99 | ): ColumnDefinition[] { 100 | if (history.getData().history.length === 0) { 101 | return []; 102 | } 103 | 104 | const dateFormatter = (dateString: string) => { 105 | const parsedDate = new Date(dateString); 106 | return parsedDate.toISOString().split("T").join("
").replace(/Z/, ""); 107 | }; 108 | 109 | return history.getData().history.map((run) => { 110 | return { 111 | title: (typeof options?.titleFormatter === "function" 112 | ? options.titleFormatter(run.date, run.id) 113 | : run.id ?? dateFormatter(run.date)), 114 | toFixed: 3, 115 | align: "right", 116 | formatter: (result: BenchmarkResult) => { 117 | if (!run.benchmarks[result.name]) { 118 | return "-"; 119 | } 120 | 121 | const workingKey = options?.key ?? "measuredRunsAvgMs"; 122 | 123 | if (workingKey === "measuredRunsAvgMs" || workingKey === "totalMs") { 124 | //deno-lint-ignore no-explicit-any 125 | return (run.benchmarks[result.name] as any)[workingKey as any] || "-"; 126 | } else { 127 | return run.benchmarks[result.name].extras?.[workingKey] || "-"; 128 | } 129 | }, 130 | }; 131 | }); 132 | } 133 | 134 | function getCliDeltaString( 135 | history: prettyBenchmarkHistory, 136 | result: BenchmarkResult, 137 | ) { 138 | const delta = history.getDeltaForBenchmark(result); 139 | let deltaString = `${colors.gray(" ▪ no history ▪ ".padEnd(19))}`; 140 | if (delta) { 141 | const perc = (delta.measuredRunsAvgMs.percent * 100).toFixed(0); 142 | const diff = rtime(Math.abs(delta.measuredRunsAvgMs.amount)); 143 | 144 | if (delta.measuredRunsAvgMs.amount > 0) { 145 | deltaString = `${ 146 | colors.red(` ▲ ${`+${perc}`.padStart(4)}% (${diff.padStart(6)}ms)`) 147 | }`; 148 | } else { 149 | deltaString = `${ 150 | colors.green(` ▼ ${perc.padStart(4)}% (${diff.padStart(6)}ms)`) 151 | }`; 152 | } 153 | } 154 | 155 | return deltaString; 156 | } 157 | -------------------------------------------------------------------------------- /docs/prettyBenchmarkDown/pr_benchmarks.ts: -------------------------------------------------------------------------------- 1 | import { 2 | ColumnDefinition, 3 | defaultColumns, 4 | extraMetricsColumns, 5 | GroupDefinition, 6 | indicatorColumn, 7 | prettyBenchmarkDown, 8 | thresholdResultColumn, 9 | thresholdsColumn, 10 | } from "https://deno.land/x/pretty_benching@v0.3.3/pretty_benchmark_down.ts"; 11 | 12 | import { 13 | bench, 14 | BenchmarkResult, 15 | BenchmarkRunResult, 16 | runBenchmarks, 17 | } from "https://deno.land/std@0.91.0/testing/bench.ts"; 18 | 19 | import * as colors from "https://deno.land/std@0.91.0/fmt/colors.ts"; 20 | import type { BenchIndicator, Thresholds } from "../../types.ts"; 21 | 22 | bench({ 23 | name: "Sorting arrays", 24 | runs: 4000, 25 | func(b): void { 26 | b.start(); 27 | new Array(10000).fill(Math.random()).sort(); 28 | b.stop(); 29 | }, 30 | }); 31 | 32 | bench({ 33 | name: "Ungrouped 1", 34 | runs: 1000, 35 | func(b): void { 36 | b.start(); 37 | let a = new Array(500); 38 | for (let i = 0; i < 500; i++) { 39 | a.pop(); 40 | a = a.reverse(); 41 | } 42 | b.stop(); 43 | }, 44 | }); 45 | 46 | bench({ 47 | name: "Ungrouped 2", 48 | runs: 1000, 49 | func(b): void { 50 | b.start(); 51 | let a = new Array(500); 52 | for (let i = 0; i < 500; i++) { 53 | a.pop(); 54 | a = a.reverse(); 55 | } 56 | b.stop(); 57 | }, 58 | }); 59 | 60 | bench({ 61 | name: "Rotating other things", 62 | runs: 1000, 63 | func(b): void { 64 | b.start(); 65 | let a = new Array(500); 66 | for (let i = 0; i < 500; i++) { 67 | a.pop(); 68 | a = a.reverse(); 69 | } 70 | b.stop(); 71 | }, 72 | }); 73 | 74 | bench({ 75 | name: "Rotating arrays", 76 | runs: 1000, 77 | func(b): void { 78 | b.start(); 79 | let a = new Array(500); 80 | for (let i = 0; i < 500; i++) { 81 | a.pop(); 82 | a = a.reverse(); 83 | } 84 | b.stop(); 85 | }, 86 | }); 87 | 88 | bench({ 89 | name: "Proving NP==P", 90 | runs: 1, 91 | func(b): void { 92 | b.start(); 93 | for (let i = 0; i < 1e9 / 5; i++) { 94 | const NPeP = Math.random() === Math.random(); 95 | } 96 | b.stop(); 97 | }, 98 | }); 99 | 100 | bench({ 101 | name: "Counting stars_long", 102 | runs: 1000, 103 | func(b): void { 104 | b.start(); 105 | const a = []; 106 | for (let i = 0; i < 1e12; i++) { 107 | a.push(i); 108 | } 109 | b.stop(); 110 | }, 111 | }); 112 | 113 | bench({ 114 | name: "Standing out", 115 | runs: 1000, 116 | func(b): void { 117 | b.start(); 118 | new Array(10000).fill(Math.random()).sort(); 119 | b.stop(); 120 | }, 121 | }); 122 | 123 | const thresholds: Thresholds = { 124 | "Rotating arrays": { green: 3.5, yellow: 4.4 }, 125 | "Sorting arrays": { green: 0.5, yellow: 2 }, 126 | "Proving NP==P": { green: 4141, yellow: 6000 }, 127 | "Standing out": { green: 0.300, yellow: 0.330 }, 128 | }; 129 | 130 | const indicators: BenchIndicator[] = [ 131 | { benches: /NP/, modFn: () => colors.magenta("%") }, 132 | { benches: /array/, modFn: () => "🎹" }, 133 | { 134 | benches: /Standing/, 135 | modFn: () => "🚀", 136 | color: colors.magenta, 137 | }, 138 | ]; 139 | 140 | runBenchmarks( 141 | { silent: true, skip: /_long/ }, 142 | ) 143 | .then(prettyBenchmarkDown( 144 | console.log, 145 | { 146 | title: "An example benchMarkdown", 147 | description: (runResult: BenchmarkRunResult) => 148 | `This markdown was generated with the use of \`prettyBenchmarkDown\`.\nIf you use a function for the \`description\` or \`afterTables\`, you can process the results here as well: \n\n > In this benchmark ${runResult.results.length} benches were run, ${runResult.filtered} were filtered.`, 149 | afterTables: 150 | "\n---\n\nThis is the `afterTables`. This behaves the same as \`description\`, it just puts this at the bottom of the markdown.\nHere its defined with a simple string.\n\nCheck out the Github Action, which comments a markdown like this on PRs: $link", 151 | groups: [ 152 | { 153 | include: /array/, 154 | name: "Default columns and dynamic text", 155 | description: 156 | "This is a group's \`description\`.\nHere you can see what the default columns are, and how you can use a `function` as `description` or `afterTable` inside a group", 157 | afterTable: ( 158 | gr: BenchmarkResult[], 159 | g: GroupDefinition, 160 | rr: BenchmarkRunResult, 161 | ) => 162 | `This is a group's \`afterTable\`.\nHere you can access eg. the group name: \`${g.name}\`, benchmarks in this group: \`${gr.length}\` of them here, or the whole \`BenchmarkRunResult\`: \`${rr.results.length}\` benchmarks total`, 163 | columns: [...defaultColumns()], 164 | }, 165 | { 166 | include: /array/, 167 | name: "Custom columns", 168 | afterTable: 169 | "If you see `-`, that means the value there was `undefined`, if you see `*` it means that column is badly configured, no `formatter` or `propertyKey` was defined.", 170 | columns: [ 171 | ...defaultColumns(["name", "runsCount", "totalMs"]), 172 | { 173 | title: "CustomTotal", 174 | propertyKey: "totalMs", 175 | toFixed: 5, 176 | align: "left", 177 | }, 178 | { 179 | title: "Formatter", 180 | formatter: (r: BenchmarkResult, cd: ColumnDefinition) => 181 | `${r.name.split("").reverse().join("")}:${ 182 | cd.title.split("").reverse().join("") 183 | } <-`, 184 | }, 185 | { 186 | title: "Undefined", 187 | propertyKey: "dontHaveOneLikeThis", 188 | }, 189 | { 190 | title: "Bad Config", 191 | }, 192 | ], 193 | }, 194 | { 195 | include: /otating|Proving|Standing/, 196 | name: "Predefiend columns", 197 | description: 198 | "Here you can see, what the predefined columns are.\n\nYou can add the `indicators` and `thresholds` that you use in `prettyBenchmarkProgress` and `prettyBenchmarkResults`.\n\nYou can see, how you can rename columns like with `Thresholds+`", 199 | columns: [ 200 | indicatorColumn(indicators), 201 | ...defaultColumns(), 202 | thresholdsColumn(thresholds), 203 | { ...thresholdsColumn(thresholds, true), title: "Thresholds+" }, 204 | thresholdResultColumn(thresholds), 205 | ], 206 | }, 207 | { 208 | include: /i/, 209 | name: "Extra metrics", 210 | description: 211 | "You can add `extraMetrics` columns too. In its `metrics` array you can define which columns you want. If you set `ignoreSingleRuns` to `true`, it wont show values on rows, where runCount is 1.", 212 | columns: [ 213 | ...defaultColumns(), 214 | ...extraMetricsColumns({ ignoreSingleRuns: true }), 215 | ], 216 | }, 217 | ], 218 | }, 219 | )); 220 | -------------------------------------------------------------------------------- /pretty_benchmark_progress.ts: -------------------------------------------------------------------------------- 1 | import { Colorer } from "./colorer.ts"; 2 | import { BenchmarkResult, ProgressState } from "./deps.ts"; 3 | import { getPaddedIndicator, getTimeColor } from "./common.ts"; 4 | import { getTimePadSize, num, padEndVisible, usingHrTime } from "./utils.ts"; 5 | 6 | import type { BenchmarkRunProgress, BenchmarkRunResult } from "./deps.ts"; 7 | import type { BenchIndicator, Thresholds } from "./types.ts"; 8 | 9 | const headerPadding = "▒▒▒▒▒▒▒▒"; 10 | const c: Colorer = new Colorer(); 11 | 12 | /** Defines how the resulting output should look like. */ 13 | export interface prettyBenchmarkProgressOptions { 14 | /** If provided, the results will be colored accordingly */ 15 | thresholds?: Thresholds; 16 | /** If provided, the indicators will be placed before the specific benches */ 17 | indicators?: BenchIndicator[]; 18 | /** Adds the returned string at the end of each finished benchmark row */ 19 | rowExtras?: ( 20 | result: BenchmarkResult, 21 | options: prettyBenchmarkProgressOptions, 22 | ) => string; 23 | /** Strips all default colors from the output. 24 | * 25 | * *Note*: it doesnt strip the colors that come through user defined `thresholds` and `indicators` */ 26 | nocolor?: boolean; 27 | /** Overrides the default output function, which is `console.log`. */ 28 | outputFn?: (log: string) => unknown; 29 | } 30 | 31 | /** Returns a function that expects `BenchmarkRunProgress` object, which than prints 32 | * the benchmarking progress in a nicely formatted way, based on the provided `options`. 33 | * 34 | * Typical basic usage: 35 | * 36 | * ```ts 37 | * // add benches, then 38 | * runBenchmarks({silent: true}, prettyBenchmarkProgress()); 39 | * ``` 40 | * . 41 | */ 42 | export function prettyBenchmarkProgress( 43 | /** Defines how the output should look like */ 44 | options?: prettyBenchmarkProgressOptions, 45 | ) { 46 | if (options?.nocolor) c.setColorEnabled(false); 47 | 48 | return (progress: BenchmarkRunProgress) => 49 | _prettyBenchmarkProgress(progress, options); 50 | } 51 | 52 | function _prettyBenchmarkProgress( 53 | progress: BenchmarkRunProgress, 54 | options?: prettyBenchmarkProgressOptions, 55 | ) { 56 | const up1Line = "\x1B[1A"; 57 | const out = typeof options?.outputFn === "function" 58 | ? options.outputFn 59 | : console.log; 60 | 61 | // Started benching 62 | if (progress.state === ProgressState.BenchmarkingStart) { 63 | const line = startBenchingLine(progress, options); 64 | out(line); 65 | return; 66 | } 67 | 68 | // Starting bench run 69 | if (progress.state === ProgressState.BenchStart) { 70 | const line = startingBenchmarkLine(progress, options); 71 | out(`${line}\t`); 72 | return; 73 | } 74 | 75 | // Multiple run bench partial result 76 | if (progress.state === ProgressState.BenchPartialResult) { 77 | const line = runningBenchmarkLine(progress, options); 78 | out(`${up1Line}\r${line}\t`); 79 | return; 80 | } 81 | 82 | // Bench run result 83 | if (progress.state === ProgressState.BenchResult) { 84 | const line = finishedBenchmarkLine(progress, options); 85 | const appended = typeof options?.rowExtras === "function" 86 | ? options.rowExtras([...progress.results].reverse()[0], options) 87 | : ""; 88 | 89 | out(`${up1Line}\r${line}${appended}`); 90 | return; 91 | } 92 | 93 | // Finished benching 94 | if (progress.state === ProgressState.BenchmarkingEnd) { 95 | if (progress.running) { 96 | out("\n"); // Double empty line 97 | out( 98 | c.red( 99 | `${headerPadding} Benchmarking failed\n${headerPadding} An error was thrown while running benchmark [${progress.running.name}]\n`, 100 | ), 101 | ); 102 | return; 103 | } 104 | out(""); // Empty line 105 | considerPrecise(progress); 106 | const cyanHeader = `${c.cyan(headerPadding)}`; 107 | out(`${cyanHeader} Benchmarking finished\n`); 108 | return; 109 | } 110 | } 111 | 112 | function considerPrecise(result: BenchmarkRunResult) { 113 | if ( 114 | !usingHrTime() && 115 | !!result.results.find(({ measuredRunsAvgMs }) => measuredRunsAvgMs < 10) 116 | ) { 117 | const yellowHeader = `${c.yellow(headerPadding)}`; 118 | console.log( 119 | `${yellowHeader} Consider running benchmarks with ${ 120 | c.yellow(`--allow-hrtime`) 121 | } for a more precise measurement`, 122 | ); 123 | } 124 | } 125 | 126 | function startingBenchmarkLine( 127 | progress: BenchmarkRunProgress, 128 | options?: prettyBenchmarkProgressOptions, 129 | ): string { 130 | const fullName = benchNameFormatted(progress.running!.name, options); 131 | const fullTimes = `[${ 132 | c.yellow(progress.running!.runsCount.toString().padStart(7)) 133 | }]`; 134 | 135 | return `Running ${fullName} a total of ${fullTimes} times`; 136 | } 137 | 138 | function runningBenchmarkLine( 139 | progress: BenchmarkRunProgress, 140 | options?: prettyBenchmarkProgressOptions, 141 | ): string { 142 | const percent = Math.round( 143 | progress.running!.measuredRunsMs.length / progress.running!.runsCount * 100, 144 | ); 145 | 146 | const fullName = benchNameFormatted(progress.running!.name, options); 147 | 148 | const maxBarLength = 48; // needs to be even 149 | const progressBar = Array(Math.ceil(percent / 100 * maxBarLength)).fill("=") 150 | .join("").padEnd( 151 | maxBarLength, 152 | ); 153 | 154 | const inserted = progressBar.substr(0, maxBarLength / 2 - 2) + 155 | c.white( 156 | `${percent.toString().padEnd(2)}${ 157 | percent == 100 ? "" : c.green(progressBar.substr(maxBarLength / 2, 1)) 158 | }%`, 159 | ) + progressBar.substr(maxBarLength / 2 + 2); 160 | 161 | const fullProgressBar = `${c.yellow("[")}${c.green(inserted)}${ 162 | c.yellow("]") 163 | }`; 164 | 165 | const progressDone = `${ 166 | progress.running!.measuredRunsMs.length.toString().padStart(6) 167 | }`; 168 | const progressTotal = `${progress.running!.runsCount.toString().padStart(6)}`; 169 | const progressCount = `[${c.green(progressDone)}/${c.yellow(progressTotal)}]`; 170 | 171 | return `Running ${fullName} ${progressCount} ${fullProgressBar}`; 172 | } 173 | 174 | function finishedBenchmarkLine( 175 | progress: BenchmarkRunProgress, 176 | options?: prettyBenchmarkProgressOptions, 177 | ): string { 178 | const result = [...progress.results].reverse()[0]; 179 | 180 | const fullName = benchNameFormatted(result.name, options); 181 | 182 | const fullCount = `Runs: [${ 183 | c.yellow((result.runsCount || 1).toString().padStart(7)) 184 | }]`; 185 | 186 | const fullTotalTime = `Total time: [${ 187 | c.yellow( 188 | num(result.totalMs).padStart(getTimePadSize()), 189 | ) 190 | }${c.gray("ms")}]`; 191 | 192 | const avgTime = result.measuredRunsAvgMs; 193 | const paddedAvgTime = num(avgTime, true).padStart(getTimePadSize()); 194 | const colorFn = getTimeColor( 195 | result.name, 196 | avgTime, 197 | options?.nocolor, 198 | options?.thresholds, 199 | ); 200 | const coloredTime = colorFn(paddedAvgTime); 201 | const fullAverage = `Avg: [${coloredTime}${c.gray("ms")}]`; 202 | 203 | return `Benched ${fullName} ${fullCount} ${fullTotalTime} ${fullAverage}`; 204 | } 205 | 206 | function startBenchingLine( 207 | progress: BenchmarkRunProgress, 208 | options?: prettyBenchmarkProgressOptions, 209 | ): string { 210 | const cyanHeader = `${c.cyan(headerPadding)}`; 211 | const fullQueued = `Benchmarks queued: [${ 212 | c.yellow(progress.queued!.length.toString().padStart(5)) 213 | }]`; 214 | const fullFiltered = c.gray( 215 | ` filtered: [${progress.filtered.toString().padStart(5)}]`, 216 | ); 217 | 218 | return `\n${cyanHeader} Starting benchmarking\n${cyanHeader} ${fullQueued} ${fullFiltered}\n`; 219 | } 220 | 221 | function benchNameFormatted( 222 | name: string, 223 | options?: prettyBenchmarkProgressOptions, 224 | ) { 225 | let ob = "["; 226 | let clb = "]"; 227 | if (options?.indicators) { 228 | const indicator = options.indicators.find(({ benches }) => 229 | benches.test(name) 230 | ); 231 | if (typeof indicator?.color === "function") { 232 | ob = indicator.color(ob); 233 | clb = indicator.color(clb); 234 | } 235 | } 236 | 237 | return `${ 238 | getPaddedIndicator(name, options?.indicators ? 2 : 0, options?.indicators) 239 | }` + 240 | `${ob}${c.cyan(name)} ${ 241 | c.gray(padEndVisible("", 40 - name.length, "-")) 242 | }${clb}`; 243 | } 244 | -------------------------------------------------------------------------------- /benchmark_result_card.ts: -------------------------------------------------------------------------------- 1 | import { 2 | calculateExtraMetrics, 3 | getPaddedIndicator, 4 | getTimeColor, 5 | substrColored, 6 | } from "./common.ts"; 7 | 8 | import { padEndVisible, padStartVisible, perc, rtime } from "./utils.ts"; 9 | 10 | import { TableBuilder } from "./table.ts"; 11 | import type { Colorer } from "./colorer.ts"; 12 | import type { BenchmarkResult } from "./deps.ts"; 13 | import type { BenchIndicator, Thresholds } from "./types.ts"; 14 | 15 | /** Defines the options for card formatted results */ 16 | export interface prettyBenchmarkCardResultOptions { 17 | // type: "card"; TODO when multiple options 18 | /** If provided, the measured values will be colored accordingly in the card. Also needed, if `parts.threshold` is set to `true` */ 19 | thresholds?: Thresholds; 20 | /** If provided, the indicators will be placed for the specific benches */ 21 | indicators?: BenchIndicator[]; 22 | /** Strips all default colors from the output. 23 | * 24 | * *Note*: it doesnt strip the colors that come through user defined `thresholds` and `indicators` */ 25 | nocolor?: boolean; 26 | /** Overrides the default card `parts` option, which is ```{graph: true, graphBars: 5}``` */ 27 | parts?: { 28 | /** Adds extra calculated metrics line to the card, which consists of `min`, `max`, `mean as ((min+max)/2)` and `median` */ 29 | extraMetrics?: boolean; 30 | /** Add a line, where the threshold ranges are shown. Only shown, when a `Threshold` was provided for the specific benchmark. */ 31 | threshold?: boolean; 32 | /** Add a graph, that shows the distribution of the runs. It's only shown above `9` runs */ 33 | graph?: boolean; 34 | /** Defines how many groups the distribution graph should use. */ 35 | graphBars?: number; 36 | }; 37 | /** Add a cell with the generated content at the end of the header row of the result card. Overflowing text is cut. */ 38 | infoCell?: ( 39 | result: BenchmarkResult, 40 | options: prettyBenchmarkCardResultOptions, 41 | ) => string; 42 | } 43 | 44 | const tab = " "; 45 | const indPlaceholder = "˘˘˘˘"; 46 | let c: Colorer; 47 | 48 | export function getResultCard( 49 | result: BenchmarkResult, 50 | colorer: Colorer, 51 | options?: prettyBenchmarkCardResultOptions, 52 | ) { 53 | c = colorer; 54 | 55 | const defaultOptions: prettyBenchmarkCardResultOptions = { 56 | parts: { graph: true, graphBars: 5 }, 57 | }; 58 | 59 | // define default options and default parts 60 | options = options || defaultOptions; 61 | if (!options.parts) { 62 | options.parts = defaultOptions.parts; 63 | } 64 | 65 | const tableColor = getTableColor(result.name, options?.indicators); 66 | const tb = new TableBuilder(91, tableColor); 67 | 68 | const needsThreshold = options.parts!.threshold && !!options.thresholds && 69 | Object.keys(options.thresholds).length != 0; 70 | 71 | prettyBenchmarkHeader(tb, result, options); 72 | if (result.runsCount == 1) { 73 | prettyBenchmarkSingleRunMetrics(tb, result, options); 74 | needsThreshold && prettyBenchmarkThresholdLine(tb, result, options); 75 | } else { 76 | prettyBenchmarkMultipleRunMetrics(tb, result, options); 77 | options.parts!.extraMetrics && 78 | prettyBenchmarkMultipleRunCalcedMetrics(tb, result, options); 79 | needsThreshold && prettyBenchmarkThresholdLine(tb, result, options); 80 | if (options.parts!.graph && result.runsCount >= 10) { 81 | prettyBenchmarkMultipleRunGraph(tb, result, options); 82 | } 83 | } 84 | 85 | let table = tb.build(); 86 | 87 | // replace the indicator placeholder with the correct indicator 88 | table = table.replace( 89 | indPlaceholder, 90 | getPaddedIndicator( 91 | result.name, 92 | indPlaceholder.length - 1, 93 | options?.indicators, 94 | ) + " ", 95 | ); 96 | 97 | return table; 98 | } 99 | 100 | function prettyBenchmarkHeader( 101 | tb: TableBuilder, 102 | r: BenchmarkResult, 103 | options: prettyBenchmarkCardResultOptions, 104 | ) { 105 | const head = `${indPlaceholder}${`Benchmark name: ${ 106 | c.cyan(r.name.padEnd(43)) 107 | }`}`; 108 | 109 | if (typeof options?.infoCell === "function") { 110 | let infoCell = options.infoCell(r, options); 111 | infoCell = substrColored(infoCell, 27); 112 | 113 | tb.cellLine(head, infoCell); 114 | } else { 115 | tb.line(head); 116 | } 117 | 118 | tb.separator(); 119 | } 120 | 121 | function prettyBenchmarkSingleRunMetrics( 122 | tb: TableBuilder, 123 | result: BenchmarkResult, 124 | options: prettyBenchmarkCardResultOptions, 125 | ) { 126 | const totalRuns = `Total runs: ${c.yellow("1".padEnd(7))}`; 127 | const timeColor = getTimeColor( 128 | result.name, 129 | result.totalMs, 130 | options.nocolor, 131 | options.thresholds, 132 | ); 133 | const totalMS = `Total time: ${ 134 | padEndVisible(`${timeColor(rtime(result.totalMs, 4))} ms`, 16) 135 | }`; 136 | 137 | tb.cellLine(`${tab}${totalRuns}`, ` ${totalMS}`, ""); 138 | tb.separator(); 139 | } 140 | 141 | function prettyBenchmarkThresholdLine( 142 | tb: TableBuilder, 143 | result: BenchmarkResult, 144 | options: prettyBenchmarkCardResultOptions, 145 | ) { 146 | const threshold = options.thresholds && options.thresholds[result.name]; 147 | if (threshold) { 148 | const sep = "=".repeat(10); 149 | tb.line( 150 | `${tab}Thresholds: ${c.green(`0 ${sep} ${rtime(threshold.green)}`)} ${ 151 | c.yellow(`${sep} ${rtime(threshold.yellow)}`) 152 | } ${c.red(`${sep} ∞`)}`, 153 | ); 154 | tb.separator(); 155 | } 156 | } 157 | 158 | function prettyBenchmarkMultipleRunMetrics( 159 | tb: TableBuilder, 160 | result: BenchmarkResult, 161 | options: prettyBenchmarkCardResultOptions, 162 | ) { 163 | const totalRuns = `Total runs: ${ 164 | padEndVisible(c.yellow((result.runsCount).toString()), 7) 165 | }`; 166 | const totalMS = `Total time: ${ 167 | padEndVisible(`${c.yellow(rtime(result.totalMs, 4))} ms`, 16) 168 | }`; 169 | 170 | const timeColor = getTimeColor( 171 | result.name, 172 | result.measuredRunsAvgMs, 173 | options.nocolor, 174 | options.thresholds, 175 | ); 176 | const avgRun = `Avg time: ${ 177 | padEndVisible(`${timeColor(rtime(result.measuredRunsAvgMs, 4))} ms`, 8) 178 | }`; 179 | 180 | tb.cellLine(`${tab}${totalRuns}`, ` ${totalMS}`, ` ${avgRun}`); 181 | tb.separator(); 182 | } 183 | 184 | function prettyBenchmarkMultipleRunCalcedMetrics( 185 | tb: TableBuilder, 186 | result: BenchmarkResult, 187 | options: prettyBenchmarkCardResultOptions, 188 | ) { 189 | const { max, min, mean, median } = calculateExtraMetrics(result); 190 | 191 | const minColor = getTimeColor( 192 | result.name, 193 | min, 194 | options.nocolor, 195 | options.thresholds, 196 | ); 197 | const maxColor = getTimeColor( 198 | result.name, 199 | max, 200 | options.nocolor, 201 | options.thresholds, 202 | ); 203 | const meanColor = getTimeColor( 204 | result.name, 205 | mean, 206 | options.nocolor, 207 | options.thresholds, 208 | ); 209 | const medianColor = getTimeColor( 210 | result.name, 211 | median, 212 | options.nocolor, 213 | options.thresholds, 214 | ); 215 | 216 | tb.cellLine( 217 | `${tab}min: ${minColor(timeStr(min))} `, 218 | ` max: ${maxColor(timeStr(max))} `, 219 | ` mean: ${meanColor(timeStr(mean))} `, 220 | ` median: ${medianColor(timeStr(median))} `, 221 | ); 222 | tb.separator(); 223 | } 224 | 225 | function prettyBenchmarkMultipleRunGraph( 226 | tb: TableBuilder, 227 | result: BenchmarkResult, 228 | options: prettyBenchmarkCardResultOptions, 229 | ) { 230 | const barsCount = options.parts!.graphBars || 5; 231 | 232 | const max = Math.max(...result.measuredRunsMs); 233 | const min = Math.min(...result.measuredRunsMs); 234 | const unit = (max - min) / barsCount; 235 | const r = result.measuredRunsMs.reduce((prev, runMs, i, a) => { 236 | prev[Math.min(Math.floor(((runMs - min) / unit)), barsCount - 1)]++; 237 | 238 | return prev; 239 | }, new Array(barsCount).fill(0)); 240 | 241 | tb.tc(c.gray).cellLine(" ".repeat(31)); 242 | 243 | const rMax = Math.max(...r); 244 | const maxBarLength = 58; 245 | r.forEach((r: number, i: number) => { 246 | let rc = r; 247 | const rp = r / result.runsCount * 100; 248 | if (rMax > maxBarLength) { 249 | rc = Math.ceil(rp / 100 * maxBarLength); 250 | } 251 | 252 | const groupHead = min + i * unit; 253 | const bar = Array(rc).fill("=").join(""); 254 | 255 | const colorFn = getTimeColor( 256 | result.name, 257 | groupHead, 258 | options.nocolor, 259 | options.thresholds, 260 | ); 261 | 262 | const fullBar = colorFn(bar); 263 | 264 | const count = r.toString().padStart(6); 265 | const percent = perc(rp).padStart(4) + "%"; 266 | 267 | const barHeader = ` ${ 268 | padStartVisible( 269 | `${rtime(groupHead)} ms`, 270 | Math.max(rtime(max).length, 12), 271 | ) 272 | } _[${count}][${percent}] `; 273 | 274 | tb.tc(c.gray).cellLine(barHeader, fullBar); 275 | }); 276 | 277 | tb.tc(c.gray).cellLine(" ".repeat(31)); 278 | tb.separator(); 279 | } 280 | 281 | function timeStr(time: number, from = 3) { 282 | return padEndVisible(`${rtime(time, from)} ${c.white("ms")} `, 9 + 4); // TODO gray ms? 283 | } 284 | 285 | function getTableColor(name: string, indicators?: BenchIndicator[]) { 286 | if (indicators && indicators.length > 0) { 287 | const indicator = indicators.find(({ benches }) => benches.test(name)); 288 | return !!indicator && typeof indicator.color == "function" 289 | ? indicator.color 290 | : c.green; 291 | } 292 | 293 | return c.green; 294 | } 295 | -------------------------------------------------------------------------------- /docs/snapper.ts: -------------------------------------------------------------------------------- 1 | // deno-lint-ignore-file 2 | 3 | import { snap } from 'https://deno.land/x/snapper@v0.0.5/mod.ts'; 4 | import type { SnapParams } from 'https://deno.land/x/snapper@v0.0.5/mod.ts'; 5 | import { join } from "https://deno.land/std@0.91.0/path/win32.ts"; 6 | import { deltaProgressRowExtra, deltaResultInfoCell, prettyBenchmarkHistory, prettyBenchmarkProgress, prettyBenchmarkProgressOptions, prettyBenchmarkResult, prettyBenchmarkResultOptions } from "../mod.ts"; 7 | import { BenchmarkRunProgress, colors, ProgressState } from "../deps.ts"; 8 | 9 | async function generateDocsImages() { 10 | const pathBase = join(".", "docs", "showcase"); 11 | 12 | await Deno.permissions.request({name: 'hrtime'}); 13 | const readResult = await Deno.permissions.request({name: 'read', path: pathBase}); 14 | if(readResult.state !== 'granted') { 15 | console.error('Can\'t run without input data for the benchmark. Exiting...'); 16 | Deno.exit(1); 17 | } 18 | const writeResult = await Deno.permissions.request({name: 'write', path: pathBase}); 19 | if(writeResult.state !== 'granted') { 20 | console.error('Can\'t save result without write permission. Exiting...'); 21 | Deno.exit(1); 22 | } 23 | 24 | const resultData = readJsonSync(join(pathBase, "benchmark_result_input.json")); 25 | 26 | const postFix = '.png'; 27 | 28 | const thresholds = {"multiple-runs": { green: 76, yellow: 82 }}; 29 | const indicators = [{benches: /multiple-runs/, color: colors.magenta,modFn: () => ({indicator: "🚀 ", visibleLength: 2})}]; 30 | const history = new prettyBenchmarkHistory({ 31 | history: [{date: new Date().toISOString(), 32 | benchmarks: { 33 | runs100ForIncrementX1e6: {measuredRunsAvgMs: 4.235, totalMs: 4235, runsCount: 1000}, 34 | for100ForIncrementX1e6: {measuredRunsAvgMs: 5.4405, totalMs: 5440.5, runsCount: 1000}, 35 | "multiple-runs": {measuredRunsAvgMs: 133.2134, totalMs: 13321.34, runsCount: 100} 36 | }}] 37 | }); 38 | 39 | /* Result cards */ 40 | 41 | const optionsSet: {name: string, options: prettyBenchmarkResultOptions}[] = [ // TODO remove "snap" postfix 42 | {name: 'docs/imgs/prettyBenchingResult_example_threshold', options: {thresholds}}, 43 | {name: 'docs/imgs/prettyBenchingResult_example_indicator', options: {indicators}}, 44 | {name: 'docs/imgs/prettyBenchingResult_example_indicators', options: {thresholds, indicators}}, 45 | {name: 'docs/imgs/prettyBenchingResult_example_full_extra', options: {thresholds, indicators: [{benches: /multiple-runs/, color: colors.blue,modFn: () => colors.bgYellow(colors.bold(colors.black("%")))}], parts: {extraMetrics: true,threshold: true,graph: true,graphBars: 10 }}}, 46 | {name: 'docs/imgs/prettyBenchingResult_example_extrametrics_line', options: {thresholds, parts: {extraMetrics: true}}}, 47 | {name: 'docs/imgs/prettyBenchingResult_example_threshold_line', options: {thresholds, parts: {threshold: true, graph: true}}}, 48 | {name: 'docs/imgs/prettyBenchingHistory_result_card_delta', options: {infoCell: deltaResultInfoCell(history)}}, 49 | ]; 50 | 51 | const mr = {filtered: 0, results: [resultData.results[2]]}; 52 | const snaps: SnapParams[] = []; 53 | snaps.push(...optionsSet.map(op => { 54 | let result = ""; 55 | prettyBenchmarkResult({outputFn: (res) => result += res, ...op.options})(mr); 56 | result = result.replace(/\n$/, ''); 57 | return {content: result, imageSavePath: op.name,viewport:{width: 780}} as SnapParams 58 | })); 59 | 60 | let result = ""; 61 | prettyBenchmarkResult({outputFn: (res) => result += res})(resultData); 62 | snaps.push({content: result.replace(/\n$/, ''), imageSavePath: "docs/imgs/prettyBenchingResult_example", viewport: {width: 780}}); 63 | 64 | /* Progress */ 65 | 66 | const progressCases = [ 67 | {name: 'docs/imgs/prettyBenchingProgress_example_running', state: buildProgressState([ 68 | fakeStart(10,0), 69 | fakeBenched('runs100ForIncrementX1e6', 100, 44.2054, 0.4421), 70 | fakeBenched('for100ForIncrementX1e6', 100, 45.1917, 0.4519), 71 | fakeProgress('for100ForIncrementx1e8', 100, 54) 72 | ]), options: {}}, 73 | {name: 'docs/imgs/prettyBenchingProgress_example_finished', state: buildProgressState([ 74 | fakeStart(8,0), 75 | fakeBenched('runs100ForIncrementX1e6', 100, 44.2054, 0.4421), 76 | fakeBenched('for100ForIncrementX1e6', 100, 45.1917, 0.4519), 77 | fakeBenched('for100ForIncrementx1e8', 100, 3758.5008, 37.5850), 78 | 79 | fakeBenched('forIncrementX1e9', 1, 722.7738, 722.7738), 80 | fakeBenched('forIncrementX1e9x2', 1, 12620.5453, 12620.5453), 81 | fakeBenched('single', 1, 0.0183, 0.0183), 82 | fakeBenched('multiple', 2, 0.0513, 0.0256), 83 | fakeBenched('custom', 100, 18.9176, 0.0189), 84 | fakeFinished() 85 | ]), options: {}}, 86 | {name: 'docs/imgs/prettyBenchingProgress_example_indicators', state: buildProgressState([ 87 | fakeBenched('runs100ForIncrementX1e6', 100, 44.2054, 0.4421), 88 | fakeBenched('for100ForIncrementX1e6', 100, 45.1917, 0.4519), 89 | 90 | fakeBenched('forIncrementX1e9', 1, 722.7738, 722.7738), 91 | fakeBenched('single', 1, 0.0183, 0.0183), 92 | fakeBenched('multiple', 2, 0.0513, 0.0256), 93 | fakeBenched('custom', 100, 18.9176, 0.0189), 94 | ]), options: { 95 | indicators: [ 96 | { benches: /100/, modFn: () => colors.bgRed('#') }, 97 | { benches: /for/, modFn: () => colors.red('#') }, 98 | { benches: /multiple/, modFn: colors.cyan}, 99 | { benches: /custom/, modFn: () => colors.bgYellow(colors.black("%")) } 100 | ] 101 | } as prettyBenchmarkProgressOptions}, 102 | {name: 'docs/imgs/prettyBenchingProgress_example_threshold', state: buildProgressState([ 103 | fakeStart(5,3), 104 | fakeBenched('runs100ForIncrementX1e6', 100, 68, 0.68), 105 | fakeBenched('for100ForIncrementX1e6', 100, 108, 1.08), 106 | fakeBenched('for100ForIncrementx1e8', 100, 9056, 90.56), 107 | fakeBenched('forIncrementX1e9', 1, 848, 848), 108 | fakeBenched('forIncrementX1e9x2', 1, 14904,14904), 109 | fakeFinished() 110 | ]), options: { 111 | thresholds: { 112 | "for100ForIncrementX1e6": {green: 0.85, yellow: 1}, 113 | "for100ForIncrementX1e8": {green: 84, yellow: 93}, 114 | "forIncrementX1e9": {green: 900, yellow: 800}, 115 | "forIncrementX1e9x2": {green: 15000, yellow: 18000}, 116 | } 117 | } as prettyBenchmarkProgressOptions}, 118 | {name: 'docs/imgs/prettyBenchingHistory_progress_delta', state: buildProgressState([ 119 | fakeBenched('runs100ForIncrementX1e6', 1000, 2831.9055, 2.8319), 120 | fakeBenched('for100ForIncrementX1e6', 1000, 5687.3089, 5.6873), 121 | fakeBenched('for100ForIncrementx1e8', 1000, 2754.4350, 2.7544), 122 | ]), options: { 123 | rowExtras: deltaProgressRowExtra(history) 124 | } as prettyBenchmarkProgressOptions, viewport: {width: 1200}} 125 | ]; 126 | 127 | snaps.push(...progressCases.map(c => { 128 | let result = ""; 129 | const progressFnc = prettyBenchmarkProgress({outputFn: (res) => result += (res.indexOf('Benchmarking finished') !== -1 ? '\n\n':'') + res, ...c.options}); 130 | 131 | const states = buildProgressState(c.state); 132 | states.forEach(state => progressFnc(state)); 133 | 134 | result = result.replace(/^\x1B\[1A\r/, '').replace(/^\n/, '').replace(/\n$/, ''); //replace first up1Cl causing empty line 135 | 136 | return {content: result, imageSavePath: c.name,viewport:{width: 1000, ...c.viewport}} as SnapParams; 137 | }) as any) 138 | 139 | 140 | await snap(snaps.map(s => ({...s, imageSavePath: s.imageSavePath + postFix})), {verbose: true, viewport: {deviceScaleFactor: 1}}); 141 | } 142 | 143 | function readJsonSync(path: string) { 144 | return JSON.parse(Deno.readTextFileSync(path)); 145 | } 146 | 147 | function buildProgressState(fakeStates: any[]): BenchmarkRunProgress[] { 148 | return fakeStates.reduce((pv: any[], cv: any[]) => {return pv.concat(cv);}, []); 149 | } 150 | 151 | function fakeStart (queued: number, filtered: number) { 152 | return [{ 153 | queued: new Array(queued), 154 | results: [], 155 | filtered: filtered, 156 | state: ProgressState.BenchmarkingStart 157 | }]; 158 | } 159 | 160 | function fakeFinished () { 161 | return [{ 162 | queued: [], 163 | results: [], 164 | filtered: 0, 165 | state: ProgressState.BenchmarkingEnd 166 | }]; 167 | } 168 | 169 | function fakeBenched(name: string, runsCount: number, totalMs: number, avg: number) { 170 | return [ 171 | // ...fakeProgress(name, runsCount, 0), 172 | {queued: [],results: [{name, totalMs, runsCount, measuredRunsAvgMs: avg, measuredRunsMs:[]}],filtered: 0,state: ProgressState.BenchResult} 173 | ] 174 | }; 175 | function fakeProgress(name: string, runsCount: number, progress: number){ 176 | return [ 177 | // {state: ProgressState.BenchStart,filtered: 0,queued: [],results: [],running: {name,runsCount,measuredRunsMs: []}}, 178 | {queued: [],results: [],filtered: 0,running: {name,runsCount,measuredRunsMs: new Array(progress)},state: ProgressState.BenchPartialResult} 179 | ] 180 | }; 181 | 182 | if(import.meta.main) { 183 | generateDocsImages(); 184 | } 185 | -------------------------------------------------------------------------------- /tests/utils_test.ts: -------------------------------------------------------------------------------- 1 | import { testEach } from "./test_helpers.ts"; 2 | import { assertEquals } from "./test_deps.ts"; 3 | 4 | import { 5 | isFloat, 6 | lDiff, 7 | matchWithIndex, 8 | num, 9 | padEndVisible, 10 | padStartVisible, 11 | perc, 12 | rtime, 13 | } from "../utils.ts"; 14 | import { colors } from "../deps.ts"; 15 | 16 | testEach("utils.perc", [ 17 | { input: 0.10098, result: "0.1", desc: "should convert to 1dec precision" }, 18 | { input: 20.00098, result: "20.0", desc: "should convert to 1dec precision" }, 19 | { input: 0.263548, result: "0.3", desc: "should round up values" }, 20 | { input: 65.263548, result: "65.3", desc: "should round up values" }, 21 | { input: 99.94, result: "99.9", desc: "should only display 100% as integer" }, 22 | { input: 99.9999, result: "100", desc: "should display 100% as integer" }, 23 | { input: 99.95, result: "100", desc: "should display 100% as integer" }, 24 | ], (testCase) => { 25 | assertEquals(perc(testCase.input), testCase.result, testCase.desc); 26 | }); 27 | 28 | testEach("utils.isFloat", [ 29 | { input: 0.10098, result: true }, 30 | { input: 20.00098, result: true }, 31 | { input: 4, result: false }, 32 | { input: 65.000000, result: false }, 33 | { input: 99.000, result: false }, 34 | ], (testCase) => { 35 | assertEquals(isFloat(testCase.input), testCase.result, testCase.desc); 36 | }); 37 | 38 | testEach<{ value: number; from?: number }, string>("utils.rtime", [ 39 | { input: { value: 0.121244 }, result: "0.1212" }, 40 | { input: { value: 10.121244 }, result: "10.121" }, 41 | { input: { value: 100.121244 }, result: "100.12" }, 42 | { input: { value: 1000.121244 }, result: "1000.1" }, 43 | { input: { value: 10000.121244 }, result: "10000" }, 44 | { input: { value: 100000.121244 }, result: "100000" }, 45 | 46 | { input: { value: 0.121244, from: 2 }, result: "0.1212" }, 47 | { input: { value: 10.121244, from: 2 }, result: "10.1212" }, 48 | { input: { value: 100.121244, from: 2 }, result: "100.1212" }, 49 | { input: { value: 1000.121244, from: 2 }, result: "1000.121" }, 50 | { input: { value: 10000.121244, from: 2 }, result: "10000.12" }, 51 | { input: { value: 100000.121244, from: 2 }, result: "100000.1" }, 52 | { input: { value: 1000000.121244, from: 2 }, result: "1000000" }, 53 | ], (testCase) => { 54 | assertEquals( 55 | rtime(testCase.input.value, testCase.input.from), 56 | testCase.result, 57 | testCase.desc, 58 | ); 59 | }); 60 | 61 | testEach<{ str: string; regexp: RegExp }, number[]>("utils.matchWithIndex", [ 62 | { 63 | input: { str: "abababa", regexp: /b/ }, 64 | exception: { 65 | msg: 66 | "Too many matches. Something bad with the regexp. Did you forgot the global? / /g", 67 | }, 68 | desc: "should throw exception before infinity loop", 69 | }, 70 | { input: { str: "abababa", regexp: /b/g }, result: [1, 3, 5] }, 71 | { input: { str: "abababa", regexp: /a/g }, result: [0, 2, 4, 6] }, 72 | { input: { str: "ababcac", regexp: /ab/g }, result: [0, 2] }, 73 | // TODO something wrong? { input: {str: 'aaabcac', regexp: /aa/g}, result: [0,1]}, 74 | // { input: {str: '', regexp: / /}, result: []}, 75 | ], (testCase) => { 76 | assertEquals( 77 | matchWithIndex(testCase.input.str, testCase.input.regexp), 78 | testCase.result, 79 | testCase.desc, 80 | ); 81 | }); 82 | 83 | testEach("utils.num", [ 84 | { input: 0.10098, result: "0.1010" }, 85 | { input: 0.111, result: "0.1110" }, 86 | { input: 111, result: "111" }, 87 | { input: 111.000, result: "111" }, 88 | { input: 111.00009, result: "111.0001" }, 89 | ], (testCase) => { 90 | assertEquals(num(testCase.input), testCase.result, testCase.desc); 91 | }); 92 | 93 | testEach("utils.lDiff", [ 94 | { input: "[", result: 0 }, 95 | { input: "[[", result: 0 }, 96 | { input: ".[", result: 0 }, 97 | { input: colors.red(""), result: 10 }, 98 | { input: colors.red("test"), result: 10 }, 99 | { input: colors.blue(colors.red("")), result: 20 }, 100 | { input: colors.blue(colors.red("test")), result: 20 }, 101 | 102 | { input: colors.blue("another" + colors.red("test")), result: 20 }, 103 | { 104 | input: colors.blue(colors.green("green") + colors.red("test")), 105 | result: 30, 106 | }, 107 | { 108 | input: colors.blue(colors.green("green") + "and some" + colors.red("test")), 109 | result: 30, 110 | }, 111 | 112 | { input: "#", result: 0 }, 113 | { 114 | input: "⚗", 115 | result: 0, 116 | desc: "some chars are icons without any extra char", 117 | }, 118 | { 119 | input: "⚗\uFE0E", 120 | result: 0, 121 | desc: "should calc extra char so it behaves like emojis", 122 | }, 123 | { input: "⚗\uFE0E⚗\uFE0E", result: 0 }, 124 | { input: "‼️", result: 0, desc: "this has \uFE0E hidden char too." }, 125 | { input: "\u{1F9EA}", result: 0 }, 126 | { input: "🧪⚗️🈴🚀🦕⚗", result: 0 }, 127 | { input: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E", result: 0 }, 128 | { input: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E⚗\uFE0E", result: 0 }, 129 | { input: colors.green("⚗\uFE0E"), result: 10 }, 130 | { input: colors.green("‼️‼️"), result: 10 }, 131 | { input: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), result: 10 }, 132 | { input: colors.green("⚗\uFE0E ") + colors.red(" ⚗\uFE0E"), result: 20 }, 133 | { 134 | input: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "-⚗\uFE0E-" + 135 | colors.blue("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), 136 | result: 20, 137 | }, 138 | ], (testCase) => { 139 | assertEquals(lDiff(testCase.input), testCase.result /* , testCase.desc */); 140 | }); 141 | 142 | testEach<{ str: string; to: number }, string>("utils.padStartVisible", [ 143 | { input: { str: "", to: 4 }, result: "...." }, 144 | { input: { str: "#", to: 4 }, result: "...#" }, 145 | { input: { str: "⚗", to: 4 }, result: "...⚗" }, 146 | { input: { str: "⚗\uFE0E", to: 4 }, result: "..⚗\uFE0E" }, 147 | { input: { str: "⚗\uFE0E⚗\uFE0E", to: 4 }, result: "⚗\uFE0E⚗\uFE0E" }, 148 | { input: { str: "‼️", to: 4 }, result: "..‼️" }, 149 | { input: { str: "\u{1F9EA}", to: 4 }, result: "..\u{1F9EA}" }, // 6 150 | { input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗", to: 9 }, result: "\u{1F9EA}⚗️🈴🚀🦕⚗" }, 151 | { input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗", to: 15 }, result: "....\u{1F9EA}⚗️🈴🚀🦕⚗" }, 152 | { 153 | input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E", to: 15 }, 154 | result: "...\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E", 155 | }, 156 | { 157 | input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E⚗\uFE0E", to: 15 }, 158 | result: ".\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E⚗\uFE0E", 159 | }, 160 | { 161 | input: { str: colors.green("⚗\uFE0E"), to: 4 }, 162 | result: ".." + colors.green("⚗\uFE0E"), 163 | }, // 11 164 | { 165 | input: { str: colors.green("‼️‼️"), to: 5 }, 166 | result: "." + colors.green("‼️‼️"), 167 | }, 168 | { 169 | input: { str: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), to: 15 }, 170 | result: "..." + colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), 171 | }, 172 | { 173 | input: { str: colors.green("⚗\uFE0E_") + colors.red(".⚗\uFE0E"), to: 7 }, 174 | result: "." + colors.green("⚗\uFE0E_") + colors.red(".⚗\uFE0E"), 175 | }, 176 | { 177 | input: { 178 | str: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "-⚗\uFE0E-" + 179 | colors.blue("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), 180 | to: 31, 181 | }, 182 | result: "..." + colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "-⚗\uFE0E-" + 183 | colors.blue("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), 184 | }, 185 | ], (testCase) => { 186 | assertEquals( 187 | padStartVisible(testCase.input.str, testCase.input.to, "."), 188 | testCase.result, 189 | testCase.desc, 190 | ); 191 | }); 192 | 193 | testEach<{ str: string; to: number }, string>("utils.padEndVisible", [ 194 | { input: { str: "", to: 4 }, result: "...." }, 195 | { input: { str: "%", to: 4 }, result: "%..." }, 196 | { 197 | input: { str: Array(8).fill("=").join(""), to: 11 }, 198 | result: "========...", 199 | }, 200 | { 201 | input: { str: colors.green(Array(8).fill("=").join("")), to: 11 }, 202 | result: colors.green("========") + "...", 203 | }, 204 | { input: { str: "#", to: 4 }, result: "#..." }, 205 | { input: { str: "⚗", to: 4 }, result: "⚗..." }, 206 | { input: { str: "⚗\uFE0E", to: 4 }, result: "⚗\uFE0E.." }, // 6 207 | { input: { str: "⚗\uFE0E⚗\uFE0E", to: 5 }, result: "⚗\uFE0E⚗\uFE0E." }, 208 | { input: { str: "‼️", to: 4 }, result: "‼️.." }, 209 | { input: { str: "\u{1F9EA}", to: 4 }, result: "\u{1F9EA}.." }, 210 | { input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗", to: 9 }, result: "\u{1F9EA}⚗️🈴🚀🦕⚗" }, 211 | { input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗", to: 15 }, result: "\u{1F9EA}⚗️🈴🚀🦕⚗...." }, 212 | { 213 | input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E", to: 15 }, 214 | result: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E...", 215 | }, 216 | { 217 | input: { str: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E⚗\uFE0E", to: 15 }, 218 | result: "\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E⚗\uFE0E.", 219 | }, 220 | { 221 | input: { str: colors.green("⚗\uFE0E"), to: 4 }, 222 | result: colors.green("⚗\uFE0E") + "..", 223 | }, // 14 224 | { 225 | input: { str: colors.green("‼️‼️"), to: 5 }, 226 | result: colors.green("‼️‼️") + ".", 227 | }, 228 | { 229 | input: { str: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), to: 15 }, 230 | result: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "...", 231 | }, 232 | { 233 | input: { str: colors.green("⚗\uFE0E_") + colors.red(".⚗\uFE0E"), to: 7 }, 234 | result: colors.green("⚗\uFE0E_") + colors.red(".⚗\uFE0E") + ".", 235 | }, 236 | { 237 | input: { 238 | str: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "-⚗\uFE0E-" + 239 | colors.blue("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E"), 240 | to: 31, 241 | }, 242 | result: colors.green("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "-⚗\uFE0E-" + 243 | colors.blue("\u{1F9EA}⚗️🈴🚀🦕⚗\uFE0E") + "...", 244 | }, 245 | ], (testCase) => { 246 | assertEquals( 247 | padEndVisible(testCase.input.str, testCase.input.to, "."), 248 | testCase.result, 249 | testCase.desc, 250 | ); 251 | }); 252 | -------------------------------------------------------------------------------- /pretty_benchmark_down.ts: -------------------------------------------------------------------------------- 1 | import { 2 | calculateExtraMetrics, 3 | calculateStdDeviation, 4 | getInThresholdRange, 5 | getPaddedIndicator, 6 | stripColor, 7 | } from "./common.ts"; 8 | import type { BenchmarkResult, BenchmarkRunResult } from "./deps.ts"; 9 | import type { BenchIndicator, Thresholds } from "./types.ts"; 10 | 11 | /** Defines how the generated markdown should look like. */ 12 | export interface prettyBenchmarkDownOptions { 13 | /** Defines a `# title` for the markdown */ 14 | title?: string; 15 | /** Defines a section right after the `title`. When a `function` is provided, it receives the run's results */ 16 | description?: string | ((results: BenchmarkRunResult) => string); 17 | /** Defines a section at the end of the markdown. When a `function` is provided, it receives the run's results */ 18 | afterTables?: string | ((results: BenchmarkRunResult) => string); 19 | /** Defines `groups`, into which the benchmarks will be groupped. 20 | * Any benchmark result, that wasn't added into any group will be collected into one table called `Ungroupped`. 21 | * One benchmark can be in multiple groups. */ 22 | groups?: GroupDefinition[]; 23 | /** Defines the columns of the markdown tables. */ 24 | columns?: ColumnDefinition[]; 25 | } 26 | 27 | /** Defines one column of the markdown table. */ 28 | export interface ColumnDefinition { 29 | /** Defines the title of the column */ 30 | title: string; 31 | /** Defines which property of the `BenchmarkResult` should be displayed, if no `formatter` is defined. 32 | * 33 | * *Note*: custom `propertyKey`-s can be used, but values has to be manually mapped onto each `BenchmarkResult`. */ 34 | propertyKey?: string; 35 | /** Defines how the column should be aligned. Defaults to `center` */ 36 | align?: "left" | "center" | "right"; 37 | /** Calls `number.toFixed(x)` with this value, when defined and the cell value is a `number`. 38 | * 39 | * Also used on `formatter` output values. */ 40 | toFixed?: number; 41 | /** Allows to calculate custom cell values based on the `BenchmarkResult`, and its own `ColumnDefinition`. 42 | * 43 | * The value will be `-` for falsy values, and `*` when no `propertyKey` and `formatter` was provided 44 | * 45 | * Its favoured above `propertyKey`, when both is defined.*/ 46 | formatter?: (result: BenchmarkResult, columnDef: ColumnDefinition) => string; 47 | } 48 | 49 | /** Defines a group in the markdown. */ 50 | export interface GroupDefinition { 51 | /** Collects the benchmarks into the group, which `name` mathes the `RegExp` */ 52 | include: RegExp; 53 | /** Defines the name of the group, which will be a `## Title` in the markdown */ 54 | name: string; 55 | /** Defines the columns of the markdown table in the specific group. Overrides root and default column options. */ 56 | columns?: ColumnDefinition[]; 57 | /** Defines a section right after the `group title`. When a `function` is provided, it receives the run's results 58 | * for the benchmarks in the group, the group's definition and the overall benchmark results. */ 59 | description?: 60 | | string 61 | | (( 62 | groupResults: BenchmarkResult[], 63 | group: GroupDefinition, 64 | runResults: BenchmarkRunResult, 65 | ) => string); 66 | /** Defines a section at the end of the `group`. When a `function` is provided, it receives the run's results 67 | * for the benchmarks in the group, the group's definition and the overall benchmark results. */ 68 | afterTable?: 69 | | string 70 | | (( 71 | groupResults: BenchmarkResult[], 72 | group: GroupDefinition, 73 | runResults: BenchmarkRunResult, 74 | ) => string); 75 | } 76 | 77 | /** Returns a function that expects a `BenchmarkRunResult`, which than prints 78 | * the results in a nicely formatted `markdown`, based on the provided `options`. 79 | * 80 | * Without `options`, one markdown table will be generated, containing all the bench results. 81 | * 82 | * Typical basic usage: 83 | * 84 | * ```ts 85 | * // add benches, then 86 | * runBenchmarks().then(prettyBenchmarkDown(console.log)); 87 | * // or write to file 88 | * runBenchmarks().then(prettyBenchmarkDown((markdown: string) => { Deno.writeTextFileSync("./benchmark.md", markdown); }); 89 | * ``` 90 | * . 91 | */ 92 | export function prettyBenchmarkDown( 93 | /** Defines the output function which will be called with the generated string markdown output.*/ 94 | outputFn: (out: string) => void, 95 | /** Defines how the output should look like */ 96 | options?: prettyBenchmarkDownOptions, 97 | ) { 98 | return (result: BenchmarkRunResult) => 99 | _prettyBenchmarkDown(result, outputFn, options); 100 | } 101 | 102 | function _prettyBenchmarkDown( 103 | runResult: BenchmarkRunResult, 104 | outputFn: (out: string) => void, 105 | options?: prettyBenchmarkDownOptions, 106 | ) { 107 | let markdown = ""; 108 | 109 | if (options?.title) { 110 | markdown += `# ${options.title}\n\n`; 111 | } 112 | 113 | markdown += stringOrFunction(options?.description, runResult) + "\n"; 114 | 115 | if (options?.groups && options.groups.length > 0) { 116 | const grouppedResults: { 117 | [key: string]: GroupDefinition & { items: BenchmarkResult[] }; 118 | } = {}; 119 | const unmatched: GroupDefinition & { items: BenchmarkResult[] } = { 120 | name: "Ungrouped benches", 121 | items: [], 122 | // deno-lint-ignore no-explicit-any 123 | } as any; 124 | 125 | runResult.results.forEach((r) => { 126 | let matched = false; 127 | options.groups?.forEach((g, i) => { 128 | if (r.name.match(g.include)) { 129 | const groupProp = `${g.name}_${i}`; 130 | 131 | if (!grouppedResults[groupProp]) { 132 | grouppedResults[groupProp] = { ...g, items: [] }; 133 | } 134 | 135 | grouppedResults[groupProp].items.push(r); 136 | 137 | matched = true; 138 | } 139 | }); 140 | 141 | if (!matched) { 142 | if ( 143 | !unmatched.items.some((i: BenchmarkResult) => 144 | i.name === r.name && i.totalMs === r.totalMs 145 | ) 146 | ) { 147 | // if isnt already added, add to unmatched group 148 | unmatched.items.push(r); 149 | } 150 | } 151 | }); 152 | 153 | grouppedResults[`${unmatched.name}_${options.groups.length}`] = unmatched; 154 | 155 | const optionsGroup = [...options.groups]; 156 | if (unmatched.items.length > 0) { 157 | optionsGroup.push(unmatched); 158 | } 159 | 160 | optionsGroup.forEach((g, i) => { // to keep order works from options. 161 | markdown += `## ${g.name}\n\n`; 162 | 163 | const resultGroup = grouppedResults[`${g.name}_${i}`]; 164 | 165 | markdown += stringOrFunction( 166 | g.description, 167 | resultGroup?.items || [], 168 | g, 169 | runResult, 170 | ); 171 | 172 | if (resultGroup) { 173 | markdown += headerRow(options, g); 174 | resultGroup.items.forEach((r: BenchmarkResult) => { 175 | markdown += tableRow(r, options, g); 176 | }); 177 | } else { 178 | markdown += "> No benchmarks in this group.\n"; 179 | } 180 | 181 | markdown += "\n"; 182 | 183 | markdown += stringOrFunction( 184 | g.afterTable, 185 | resultGroup?.items || [], 186 | g, 187 | runResult, 188 | ) + "\n"; 189 | }); 190 | } else { 191 | markdown += headerRow(options); 192 | runResult.results.forEach((r) => { 193 | markdown += tableRow(r, options); 194 | }); 195 | markdown += "\n"; 196 | } 197 | 198 | markdown += stringOrFunction(options?.afterTables, runResult) + "\n"; 199 | 200 | outputFn(markdown); 201 | 202 | return runResult; 203 | } 204 | 205 | const defaultColumnsArray: ColumnDefinition[] = [ 206 | { title: "Name", propertyKey: "name", align: "left" }, 207 | { title: "Runs", propertyKey: "runsCount", align: "right" }, 208 | { title: "Total (ms)", propertyKey: "totalMs", align: "right", toFixed: 3 }, 209 | { 210 | title: "Average (ms)", 211 | propertyKey: "measuredRunsAvgMs", 212 | align: "right", 213 | toFixed: 3, 214 | }, 215 | ]; 216 | 217 | /** Defines the default `ColumnDefinitions`, which are `Name`, `Runs`, `Total (ms)` and `Average (ms)` */ 218 | export function defaultColumns( 219 | columns?: ("name" | "runsCount" | "totalMs" | "measuredRunsAvgMs")[], 220 | ): ColumnDefinition[] { 221 | if (columns) { 222 | return [...defaultColumnsArray].filter((dc) => 223 | (columns as string[]).indexOf(dc.propertyKey!) !== -1 224 | ); 225 | } else { 226 | return [...defaultColumnsArray]; 227 | } 228 | } 229 | 230 | /** Defines a column which contains the indicators for the benchmarks, where provided. 231 | * 232 | * *Note*: colors are stripped from the indicators in markdown */ 233 | export function indicatorColumn( 234 | indicators: BenchIndicator[], 235 | ): ColumnDefinition { 236 | return { 237 | title: "", 238 | formatter: (result: BenchmarkResult, cd: ColumnDefinition) => { 239 | return stripColor(getPaddedIndicator(result.name, 0, indicators)); 240 | }, 241 | }; 242 | } 243 | 244 | /** Defines a threshold result column, which shows into which range the benchmark fell. Shows `-` when no `Threshold` was provided for the given benchmark. */ 245 | export function thresholdResultColumn(thresholds: Thresholds) { 246 | return { 247 | title: "", 248 | formatter: (result: BenchmarkResult, cd: ColumnDefinition) => { 249 | const inRange = getInThresholdRange( 250 | result.name, 251 | result.measuredRunsAvgMs, 252 | thresholds, 253 | ); 254 | 255 | return ["-", "✅", "🔶", "🔴"][inRange || 0]; 256 | }, 257 | }; 258 | } 259 | 260 | /** Defines a threshold result column, which shows the threshold ranges for the benchmark. Shows `-` when no `Threshold` was provided for the given benchmark. 261 | * 262 | * If `indicateResult` is set, it shows in the same cell into which range the benchmark fell.*/ 263 | export function thresholdsColumn( 264 | thresholds: Thresholds, 265 | indicateResult?: boolean, 266 | ) { 267 | return { 268 | title: "Thresholds", 269 | align: "right", 270 | formatter: (result: BenchmarkResult, cd: ColumnDefinition) => { 271 | let value = ""; 272 | const inRange = getInThresholdRange( 273 | result.name, 274 | result.measuredRunsAvgMs, 275 | thresholds, 276 | ); 277 | const th = thresholds && thresholds[result.name]; 278 | 279 | if (!th) { 280 | return "-"; 281 | } 282 | 283 | const indicator = " 🠴"; 284 | const placeholder = " "; 285 | 286 | value += `<= ${th.green} ✅` + 287 | (indicateResult ? (inRange === 1 ? indicator : placeholder) : "") + 288 | "
"; 289 | value += `<= ${th.yellow} 🔶` + 290 | (indicateResult ? (inRange === 2 ? indicator : placeholder) : "") + 291 | "
"; 292 | value += ` > ${th.yellow} 🔴` + 293 | (indicateResult ? (inRange === 3 ? indicator : placeholder) : ""); 294 | 295 | value += "
"; 296 | 297 | return value; 298 | }, 299 | }; 300 | } 301 | 302 | /** Defines **multiple** columns, which contain extra calculated values, like `max`, `min`, `mean`, `median`, `stdDeviation`. 303 | * 304 | * Can be used like: 305 | * ```ts 306 | * columns: [...extraMetricsColumns()] 307 | * ``` 308 | * . */ 309 | export function extraMetricsColumns( 310 | options?: { 311 | /** Defines which metrics it should include */ 312 | metrics?: ("max" | "min" | "mean" | "median" | "stdDeviation")[]; 313 | /** If set, `-` will be placed into cells, where the benchmark was only run once. */ 314 | ignoreSingleRuns?: boolean; 315 | }, 316 | ): ColumnDefinition[] { 317 | const columns: ColumnDefinition[] = []; 318 | 319 | const selected = options?.metrics || 320 | ["min", "max", "mean", "median", "stdDeviation"]; 321 | 322 | selected.forEach((s) => { 323 | if (s === "stdDeviation") { 324 | columns.push({ 325 | title: "Std deviation", 326 | align: "right", 327 | toFixed: 3, 328 | formatter: (result: BenchmarkResult, cd: ColumnDefinition) => { 329 | if (options?.ignoreSingleRuns && result.runsCount === 1) { 330 | return "-"; 331 | } 332 | 333 | const calced = calculateStdDeviation(result); 334 | return `${calced}`; 335 | }, 336 | }); 337 | } else { 338 | columns.push({ 339 | title: s.charAt(0).toUpperCase() + s.slice(1), // capitalise 340 | align: "right", 341 | toFixed: 3, 342 | formatter: (result: BenchmarkResult, cd: ColumnDefinition) => { 343 | if (options?.ignoreSingleRuns && result.runsCount === 1) { 344 | return "-"; 345 | } 346 | 347 | const calced = calculateExtraMetrics(result); 348 | return `${calced[s]}`; 349 | }, 350 | }); 351 | } 352 | }); 353 | 354 | return columns; 355 | } 356 | 357 | function stringOrFunction( 358 | // deno-lint-ignore no-explicit-any 359 | value?: ((...params: any[]) => string) | string, 360 | // deno-lint-ignore no-explicit-any 361 | ...params: any[] 362 | ) { 363 | if (!value) { 364 | return ""; 365 | } 366 | 367 | if (typeof value === "function") { 368 | return `${value(...params)}\n`; 369 | } else { 370 | return `${value}\n`; 371 | } 372 | } 373 | 374 | function headerRow( 375 | options?: prettyBenchmarkDownOptions, 376 | group?: GroupDefinition, 377 | ) { 378 | let titles = "|"; 379 | let alignments = "|"; 380 | 381 | const columns: ColumnDefinition[] = group?.columns || options?.columns || 382 | defaultColumns(); 383 | 384 | columns.forEach((c) => { 385 | titles += `${c.title}|`; 386 | alignments += `${alignment(c.align)}|`; 387 | }); 388 | 389 | return `${titles}\n${alignments}\n`; 390 | } 391 | 392 | function tableRow( 393 | result: BenchmarkResult, 394 | options?: prettyBenchmarkDownOptions, 395 | group?: GroupDefinition, 396 | ) { 397 | let values = `|`; 398 | 399 | const columns: ColumnDefinition[] = group?.columns || options?.columns || 400 | defaultColumns(); 401 | 402 | columns.forEach((c) => { 403 | let value = null; 404 | if (typeof c.formatter === "function") { 405 | value = `${c.formatter(result, c)}`; 406 | } else { 407 | if (!c.propertyKey) { 408 | value = "*"; // this means no formatter function and no propertyKey was defined. 409 | } else { 410 | // deno-lint-ignore no-explicit-any 411 | const vc = (result as any)[c.propertyKey]; 412 | value = typeof vc !== "undefined" ? vc : "-"; 413 | } 414 | } 415 | 416 | if (!isNaN(parseFloat(value)) && !isNaN(c.toFixed!)) { 417 | value = parseFloat(value).toFixed(c.toFixed); 418 | } 419 | 420 | values += `${value}|`; 421 | }); 422 | 423 | return `${values}\n`; 424 | } 425 | 426 | function alignment(mode?: "left" | "center" | "right") { 427 | if (mode === "right") { 428 | return "--:"; 429 | } 430 | if (!mode || mode === "center") { 431 | return ":-:"; 432 | } 433 | if (mode === "left") { 434 | return ":--"; 435 | } 436 | } 437 | -------------------------------------------------------------------------------- /pretty_benchmark_history.ts: -------------------------------------------------------------------------------- 1 | import type { BenchmarkResult, BenchmarkRunResult } from "./deps.ts"; 2 | import type { Threshold, Thresholds } from "./types.ts"; 3 | 4 | /** Defines the rules on what and how the history should contain. 5 | * 6 | * @template T The type that is calculated with `benchExtras` function and stored in each benchmarks' `extras`. 7 | * @template K The type that is calculated with `runExtras` function and stored in each runs' `runExtras`. 8 | */ 9 | export interface prettyBenchmarkHistoryOptions { 10 | /** Allow storing low precision measurements */ 11 | easeOnlyHrTime?: boolean; 12 | /** Turns on strict mode. Setting it to boolean `true` gives the same result as setting each rule to `true` in the rules `object`. */ 13 | strict?: boolean | strictHistoryRules; 14 | /** Throw an error, when **any** benchmark has lower runsCount than the set value. */ 15 | minRequiredRuns?: number; 16 | /** Saves the `measuredRunsMs` array for each benchmark. 17 | * 18 | * **WARNING** this could result in a very big history file overtime. 19 | * 20 | * Consider calculating necessary values before save instead with `benchExtras` or `runExtras`.*/ 21 | saveIndividualRuns?: boolean; 22 | /** Saves the returned `object` for each benchmark into it's `extras` property. */ 23 | benchExtras?: (result: BenchmarkResult) => T; 24 | /** Saves the returned `object` for each run into it's `runExtras` property. */ 25 | runExtras?: (runResult: BenchmarkRunResult) => K; 26 | } 27 | 28 | /** Defines which strict rules to use. */ 29 | export type strictHistoryRules = { 30 | /** Throw an error, when previously saved benchmark is missing from the current set when calling `addResults`. Ignored on the very first set of benchmarks. */ 31 | noRemoval?: boolean; 32 | /** Throw an error, when previously not saved benchmark is added to the current set when calling `addResults`. Ignored on the very first set of benchmarks. */ 33 | noAddition?: boolean; 34 | /** Throw an error, when the `runsCount` changes for a benchmark from the previous run's `runsCount`. Ignored on new benchmarks. */ 35 | noRunsCountChange?: boolean; 36 | }; 37 | 38 | /** Represents the stored historic benchmark data 39 | * 40 | * @template T The type that is calculated with `benchExtras` function and stored in each benchmarks' `extras`. 41 | * @template K The type that is calculated with `runExtras` function and stored in each runs' `runExtras`. 42 | */ 43 | export interface BenchmarkHistory { 44 | /** The individual runs' values */ 45 | history: BenchmarkHistoryItem[]; 46 | } 47 | 48 | /** Represents the results of one `runBenchmarks` run. 49 | * 50 | * @template T The type that is calculated with `benchExtras` function and stored in each benchmarks' `extras`. 51 | * @template K The type that is calculated with `runExtras` function and stored in each runs' `runExtras`. 52 | */ 53 | export interface BenchmarkHistoryItem { 54 | /** The date of the measurement */ 55 | date: string; 56 | /** User provided identifier for the run */ 57 | id?: string; 58 | /** The object calculated by `runExtras` function if provided in the options */ 59 | runExtras?: K; 60 | 61 | /** The individual benchmarks' results for the specific run. */ 62 | benchmarks: { 63 | [key: string]: BenchmarkHistoryRunItem; 64 | }; 65 | } 66 | 67 | /** Represents the results of one benchmark's single run. 68 | * 69 | * @template T The type that is calculated with `benchExtras` function and stored in each benchmarks' `extras`. 70 | */ 71 | export interface BenchmarkHistoryRunItem { 72 | /** The total time it took to run a given bechmark */ 73 | totalMs: number; 74 | /** Times the benchmark was run in succession. */ 75 | runsCount: number; 76 | /** The average time of running the benchmark in milliseconds. */ 77 | measuredRunsAvgMs: number; 78 | /** The individual measurements in milliseconds it took to run the benchmark. 79 | * 80 | * Gets saved only, when `saveIndividualRuns` is set in the options. */ 81 | measuredRunsMs?: number[]; 82 | /** The object calculated by `benchExtras` function if provided in the options. */ 83 | extras?: T; 84 | } 85 | 86 | /** Represent the change in a variable's value. */ 87 | export interface Delta { 88 | /** The change in percents. */ 89 | percent: number; 90 | /** The actual change */ 91 | amount: number; 92 | } 93 | 94 | export type DeltaKey = (keyof T | "measuredRunsAvgMs" | "totalMs"); 95 | 96 | /** Handles and enforces the set rules on the historic benchmarking data. 97 | * 98 | * Typical usage: 99 | * ```ts 100 | * // add benches, then 101 | * 102 | * let historicData; 103 | * try { 104 | * historicData = JSON.parse(Deno.readTextFileSync("./benchmarks/history.json")); 105 | * } catch { 106 | * // Decide whether you want to proceed with no history 107 | * console.warn("⚠ cant read history file"); 108 | * } 109 | * 110 | * const history = new prettyBenchmarkHistory(historicData, { 111 | * //options 112 | * }); 113 | * 114 | * runBenchmarks().then((results: BenchmarkRunResult) => { 115 | * history.addResults(results); 116 | * Deno.writeTextFileSync("./benchmarks/history.json", history.getDataString()); 117 | * }); 118 | * ``` 119 | * 120 | * **Note** 121 | * 122 | * The saving and loading of the generated data is the user's responsibility, this class is not doing any file handling. See examples for more info. */ 123 | export class prettyBenchmarkHistory { 124 | private data!: BenchmarkHistory; 125 | private options?: prettyBenchmarkHistoryOptions; 126 | 127 | constructor( 128 | /** The previously saved historic data. */ 129 | previousData?: BenchmarkHistory, 130 | options?: prettyBenchmarkHistoryOptions< 131 | T, 132 | K 133 | >, 134 | ) { 135 | this.options = options; 136 | 137 | if (previousData) { 138 | this.load(previousData); 139 | } else { 140 | this.init(); 141 | } 142 | } 143 | 144 | private init() { 145 | this.data = { history: [] }; 146 | } 147 | 148 | private load(previousData: BenchmarkHistory) { 149 | this.data = previousData; 150 | } 151 | 152 | /** Stores the run's result into the historic data, enforces all set rules on the results. */ 153 | addResults(runResults: BenchmarkRunResult, options?: { 154 | /** Helps to identify the specific run, besides the date.*/ 155 | id?: string; 156 | /** Overrides the current date */ 157 | date?: Date | string; 158 | }) { 159 | const date: string = options?.date 160 | ? (typeof options.date === "string" 161 | ? options.date 162 | : options.date.toISOString()) 163 | : new Date().toISOString(); 164 | 165 | const duplicateNames = runResults.results.filter((r) => 166 | runResults.results.filter((rc) => rc.name === r.name).length > 1 167 | ); 168 | if (duplicateNames.length !== 0) { 169 | throw new Error( 170 | `Names must be unique to be able to store them. Colliding names: [${ 171 | [...new Set(duplicateNames.map((b) => b.name)).values()].join(", ") 172 | }].`, 173 | ); 174 | } 175 | 176 | if (this.options?.minRequiredRuns) { 177 | const notEnoughRuns = runResults.results.filter((r) => 178 | r.runsCount < this.options?.minRequiredRuns! || 179 | r.measuredRunsMs.length < this.options?.minRequiredRuns! 180 | ); 181 | if (notEnoughRuns.length !== 0) { 182 | throw new Error( 183 | `Minimum required runs (${this.options 184 | ?.minRequiredRuns}) was not fullfilled by benchmarks: [${ 185 | notEnoughRuns.map((r) => `"${r.name}" (${r.runsCount})`).join(", ") 186 | }]. The minimum required runs can be set with 'minRequiredRuns' option.`, 187 | ); 188 | } 189 | } 190 | 191 | if (!this.options?.easeOnlyHrTime) { 192 | const isHrTime = (ms: number) => ms % 1 !== 0; 193 | if (runResults.results.some((r) => !isHrTime(r.totalMs))) { // TODO consider: check on a subset of measurements too. 194 | throw new Error( 195 | `Seems like you are trying to add results, that were measured without the --allow-hrtime flag. You can bypass this check with the 'easeOnlyHrTime' option.`, 196 | ); 197 | } 198 | } 199 | 200 | if (this.options?.strict) { 201 | const strictIsBooleanTrue = typeof this.options?.strict === "boolean" && 202 | this.options?.strict; 203 | 204 | const hasDataAlready = Object.keys(this.data.history).length !== 0; 205 | if (hasDataAlready) { // strict has no effect on first set of results. 206 | const errors = []; 207 | 208 | const prevBenchmarks = this.getBenchmarkNames(); 209 | 210 | prevBenchmarks.forEach((pb) => { 211 | const benchInResults = runResults.results.find((r) => r.name === pb); 212 | if ( 213 | strictIsBooleanTrue || 214 | (this.options?.strict as strictHistoryRules).noRemoval 215 | ) { 216 | if (!benchInResults) { 217 | errors.push( 218 | `Missing benchmark named "${pb}" in current results. Set 'strict' or 'strict.noRemoval' option to false to bypass this check.`, 219 | ); 220 | } 221 | } 222 | 223 | if ( 224 | strictIsBooleanTrue || 225 | (this.options?.strict as strictHistoryRules).noRunsCountChange 226 | ) { 227 | const prevRuns = this.data.history.filter((h) => h.benchmarks[pb]); 228 | if (benchInResults && prevRuns.length > 0) { 229 | const lastRun = prevRuns.reverse()[0].benchmarks[pb]; 230 | if (lastRun.runsCount !== benchInResults.runsCount) { 231 | errors.push( 232 | `Runs count of benchmark "${pb}" (${benchInResults.runsCount}) doesnt match the previous runs count (${lastRun.runsCount}). Set 'strict' or 'strict.noRunsCountChange' option to false to bypass this check.`, 233 | ); 234 | } 235 | } 236 | } 237 | }); 238 | 239 | if ( 240 | strictIsBooleanTrue || 241 | (this.options?.strict as strictHistoryRules).noAddition 242 | ) { 243 | const newBenches = runResults.results.filter((r) => 244 | prevBenchmarks.indexOf(r.name) === -1 245 | ); 246 | if (newBenches.length !== 0) { 247 | errors.push( 248 | `Adding new benches is not allowed after the initial set of benchmarks. New benches: [${ 249 | newBenches.map((b) => b.name) 250 | }]. Set 'strict' or 'strict.noAddition' option to false to bypass this check.`, 251 | ); 252 | } 253 | } 254 | 255 | // TODO consider: checking changes in extras 256 | 257 | if (errors.length !== 0) { 258 | throw new Error( 259 | `Errors while trying to add new results to history: \n${ 260 | errors.join("\n") 261 | }`, 262 | ); 263 | } 264 | } 265 | } 266 | 267 | const benchmarks: { [key: string]: BenchmarkHistoryRunItem } = {}; 268 | runResults.results.forEach((r) => { 269 | benchmarks[r.name] = { 270 | measuredRunsAvgMs: r.measuredRunsAvgMs, 271 | runsCount: r.runsCount, 272 | totalMs: r.totalMs, 273 | measuredRunsMs: this.options?.saveIndividualRuns 274 | ? r.measuredRunsMs 275 | : undefined, 276 | extras: this.options?.benchExtras && this.options.benchExtras(r), 277 | }; 278 | }); 279 | 280 | this.data.history.push({ 281 | date: date, 282 | id: options?.id, 283 | runExtras: this.options?.runExtras && this.options.runExtras(runResults), 284 | benchmarks: benchmarks, 285 | }); 286 | 287 | // TODO! cant initiate date if a different dateformat is used in the string 288 | this.data.history = this.data.history.sort((a, b) => { 289 | return new Date(a.date).getTime() - new Date(b.date).getTime(); 290 | }); 291 | 292 | return this; 293 | } 294 | 295 | /** Calculates `deltas` for each benchmark in the provided `BenchmarkRunResult` for each provided property key. 296 | * 297 | * Keys are either `measuredRunsAvgMs`, `totalMs` or point to `number` properties of the calculated `extras`. 298 | * Error is thrown, when a key points to a non number property. 299 | * No delta is calculated for key's which are not present in the `extras` 300 | * 301 | * Returns `false` for a given benchmark when there is no history for it. */ 302 | getDeltasFrom( 303 | results: BenchmarkRunResult, 304 | keys: DeltaKey[] = ["measuredRunsAvgMs", "totalMs"], 305 | ): { [key: string]: { [key: string]: Delta } } { 306 | const deltas: { [key: string]: { [key: string]: Delta } } = {}; 307 | 308 | results.results.forEach((r) => { 309 | const d = this.getDeltaForBenchmark(r, keys); 310 | if (d) { 311 | deltas[r.name] = d; 312 | } 313 | }); 314 | 315 | return deltas; 316 | } 317 | 318 | /** Calculates `deltas` for given `BenchmarkResult` for each provided property key. 319 | * 320 | * Keys are either `measuredRunsAvgMs`, `totalMs` or point to `number` properties of the calculated `extras`. 321 | * Error is thrown, when a key points to a non number property. 322 | * No delta is calculated for key's which are not present in the `extras` 323 | * 324 | * Returns `false` when there is no history for the given benchmark. */ 325 | getDeltaForBenchmark( 326 | result: BenchmarkResult, 327 | keys: DeltaKey[] = ["measuredRunsAvgMs", "totalMs"], 328 | ) { 329 | const prevResults = this.data.history.filter((h) => 330 | h.benchmarks[result.name] 331 | ); 332 | const lastResult = prevResults.length > 0 333 | ? prevResults[prevResults.length - 1].benchmarks[result.name] 334 | : undefined; 335 | 336 | if (!lastResult) { // no previous result for this benchmark 337 | return false; 338 | } 339 | 340 | const currentResultExtras = this.options?.benchExtras && 341 | this.options.benchExtras(result); 342 | 343 | // deno-lint-ignore no-explicit-any 344 | const calcDelta = (current: any, prev: any, key: any) => { 345 | if (typeof current[key] !== "number" || typeof prev[key] !== "number") { 346 | throw new Error( 347 | `Type of value selected by key "${key}" must be number`, 348 | ); 349 | } 350 | 351 | const diff = current[key] - prev[key]; 352 | const percDiff = diff / prev[key]; 353 | 354 | return { 355 | percent: percDiff, 356 | amount: diff, 357 | }; 358 | }; 359 | 360 | const deltas: { [key: string]: Delta } = {}; 361 | 362 | keys.forEach((key) => { 363 | if (key === "measuredRunsAvgMs" || key === "totalMs") { 364 | deltas[key as string] = calcDelta(result, lastResult, key); 365 | } else { 366 | if ( 367 | !currentResultExtras || typeof currentResultExtras[key] === undefined 368 | ) { 369 | throw new Error( 370 | `No property named "${key}" in calculated extras for the currently measured benchmark named "${result.name}".`, 371 | ); 372 | } 373 | 374 | if (!lastResult.extras || !lastResult.extras[key]) { // TODO consider throwing 375 | return false; 376 | } 377 | 378 | deltas[key as string] = calcDelta( 379 | currentResultExtras, 380 | lastResult.extras, 381 | key, 382 | ); 383 | } 384 | }); 385 | 386 | return deltas; 387 | } 388 | 389 | /** Returns a copy of the historic data. */ 390 | getData(): BenchmarkHistory { 391 | // no complex objects so should be enough 392 | return JSON.parse(JSON.stringify(this.data)); 393 | } 394 | 395 | /** Returns the historic data in a pretty-printed JSON string */ 396 | getDataString() { 397 | return JSON.stringify(this.getData(), null, 2); 398 | } 399 | 400 | /** Returns every benchmark's name that is in the historic data. */ 401 | getBenchmarkNames() { 402 | return [ 403 | ...new Set( 404 | this.data.history.map((h) => Object.keys(h.benchmarks)).flat(), 405 | ), 406 | ]; 407 | } 408 | } 409 | 410 | /** Calculates `Thresholds` from the historic data for each benchmark. 411 | * 412 | * **EXPERIMENTAL** The default way of calculating may change, if you relay on thresholds provide your calculation so it wont change unexpectedly 413 | * 414 | * The default way the thresholds are calculated: 415 | * * only calculate threshold for benchmark, which has at least `5` previous runs 416 | * * `green` is the (minimum of the measured `measuredRunsAvgMs`) * `1.1` 417 | * * `yellow` is the (maximum of the measured `measuredRunsAvgMs`) * `1.2` 418 | * 419 | * This can be overridden with the options.*/ 420 | export function calculateThresholds( 421 | history: prettyBenchmarkHistory, 422 | options?: { 423 | minProceedingRuns?: number; 424 | calculate?: (runs: BenchmarkHistoryItem[]) => Threshold; 425 | }, 426 | ): Thresholds { 427 | const benchmarkNames = history.getBenchmarkNames(); 428 | const data = history.getData(); 429 | const thresholds: Thresholds = {}; 430 | 431 | benchmarkNames.forEach((bn) => { 432 | const runs = data.history.filter((h) => h.benchmarks[bn]); 433 | 434 | if (runs.length < (options?.minProceedingRuns ?? 5)) { 435 | return; 436 | } 437 | 438 | if (typeof options?.calculate === "function") { 439 | thresholds[bn] = options.calculate(runs); 440 | } else { 441 | const green = Math.min(...runs.map((r) => 442 | r.benchmarks[bn].measuredRunsAvgMs 443 | )) * 1.1; 444 | const yellow = Math.max(...runs.map((r) => 445 | r.benchmarks[bn].measuredRunsAvgMs 446 | )) * 1.2; 447 | 448 | thresholds[bn] = { green, yellow }; 449 | } 450 | }); 451 | 452 | return thresholds; 453 | } 454 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # prettyBenching 2 | 3 | A simple Deno library, that gives you pretty benchmarking progress and results in the commandline 4 | 5 | [![version](https://img.shields.io/badge/0.3.3-brightgreen?logo=v&logoColor=white&labelColor=gray)](https://deno.land/x/pretty_benching@v0.3.3) 6 | 7 | [![deno version](https://img.shields.io/badge/deno%201.8.2-success?logo=deno&logoColor=black&labelColor=white&color=black)](https://github.com/denoland/deno) 8 | [![deno/std version](https://img.shields.io/badge/deno/std%200.91.0-success?logo=deno&logoColor=black&labelColor=white&color=black)](https://deno.land/std@0.91.0) 9 | [![documentation](https://img.shields.io/badge/docs-blue?logo=deno&logoColor=black&labelColor=white&color=blue)](https://doc.deno.land/https/deno.land/x/pretty_benching/mod.ts) 10 | 11 | [![Build Status](https://github.com/littletof/prettyBenching/workflows/CI/badge.svg)](https://github.com/littletof/prettyBenching/actions?query=workflow%3ACI) 12 | [![Coverage Status](https://coveralls.io/repos/github/littletof/prettyBenching/badge.svg)](https://coveralls.io/github/littletof/prettyBenching) 13 | ![maintained](https://img.shields.io/maintenance/yes/2022) 14 | [![snapper_deno](https://img.shields.io/badge/-snapper%20%F0%9F%93%B7-%230DBC79)](https://github.com/littletof/snapper) 15 | 16 | ## Jump to 17 | 18 | - [prettyBenchmarkProgress](#prettybenchmarkprogress) 19 | - [prettyBenchmarkResults](#prettybenchmarkresults) 20 | - [prettyBenchmarkDown](#prettybenchmarkdown) [![deno version](https://img.shields.io/badge/Github_Action-4e4e4e?logo=github)](#as-a-github-action) 21 | - [prettyBenchmarkHistory](#prettybenchmarkhistory) 22 | - [Roadmap](#roadmap) 23 | 24 | ## Try it out 25 | 26 | This runs a short benchmark to showcase the module live. 27 | 28 | ```sh 29 | deno run -r --allow-hrtime https://deno.land/x/pretty_benching/example.ts 30 | ``` 31 | 32 | ## Getting started 33 | 34 | Add the following to your `deps.ts` 35 | 36 | ```ts 37 | export { 38 | prettyBenchmarkResult, 39 | prettyBenchmarkProgress, 40 | prettyBenchmarkDown, 41 | prettyBenchingHistory 42 | } from 'https://deno.land/x/pretty_benching@v0.3.3/mod.ts'; 43 | ``` 44 | 45 | or just simply import it directly: 46 | 47 | ```ts 48 | import { prettyBenchmarkResult, prettyBenchmarkProgress, prettyBenchmarkDown, prettyBenchingHistory } from 'https://deno.land/x/pretty_benching@v0.3.3/mod.ts'; 49 | ``` 50 | 51 | ## Note 52 | 53 | Using Deno's `--allow-hrtime` flag when running your code will result in a more precise benchmarking, because than float milliseconds will be used for measurement instead of integer. 54 | 55 | You can use `nocolor` in the options of both `prettyBenchmarkProgress` and `prettyBenchmarkResult` to turn off the coloring on the output. 56 | It doesn't interfere with the Deno's `fmt` color settings. 57 | 58 | # prettyBenchmarkProgress 59 | 60 | Prints the Deno `runBenchmarks()` method's `progressCb` callback values in a nicely readable format. 61 | 62 | ### Usage 63 | 64 | Simply add it to `runBenchmarks()` like below and you are good to go. Using `silent: true` is encouraged, so the default logs don't interfere 65 | 66 | ```ts 67 | await runBenchmarks({ silent: true }, prettyBenchmarkProgress()) 68 | ``` 69 | 70 | The output would look something like this during running: 71 | 72 | ![running](docs/imgs/prettyBenchingProgress_example_running.png) 73 | 74 | End when finished: 75 | 76 | ![finished](docs/imgs/prettyBenchingProgress_example_finished.png) 77 | 78 | ### Thresholds 79 | 80 | You can define thresholds to specific benchmarks and than the times of the runs will be colored respectively 81 | 82 | ```ts 83 | const thresholds: Thresholds = { 84 | "for100ForIncrementX1e6": {green: 0.85, yellow: 1}, 85 | "for100ForIncrementX1e8": {green: 84, yellow: 93}, 86 | "forIncrementX1e9": {green: 900, yellow: 800}, 87 | "forIncrementX1e9x2": {green: 15000, yellow: 18000}, 88 | } 89 | 90 | runBenchmarks({ silent: true }, prettyBenchmarkProgress({thresholds})) 91 | ``` 92 | 93 | ![threshold](docs/imgs/prettyBenchingProgress_example_threshold.png) 94 | 95 | ### Indicators 96 | 97 | You can use indicators, which help you categorise your benchmarks. You can change the character which gets added before the benchmark. 98 | 99 | ```ts 100 | const indicators: BenchIndicator[] = [ 101 | { benches: /100/, modFn: colors.bgRed }, 102 | { benches: /for/, modFn: colors.red }, 103 | { benches: /custom/, modFn: () => colors.bgYellow(colors.black("%")) }, // changes indicator char 104 | ]; 105 | ``` 106 | 107 | ![indicator](docs/imgs/prettyBenchingProgress_example_indicators.png) 108 | 109 | # prettyBenchmarkResults 110 | 111 | Prints the Deno `runBenchmarks()` method's result in a nicely readable format. 112 | 113 | ### Usage 114 | 115 | Simply call `prettyBenchmarkResult` with the desired settings. 116 | 117 | Setting the `nocolor` option to `true` will remove all the built in coloring. Its usefull, if you log it somewhere or save the output to a file. It won't interfere with Deno's `fmt` color settings. 118 | 119 | Use the `silent: true` flag in `runBenchmarks`, if you dont want to see the default output 120 | 121 | ```ts 122 | // ...add benches... 123 | 124 | runBenchmarks({silent: true}) 125 | .then(prettyBenchmarkResult()) 126 | .catch((e: any) => { 127 | console.error(e.stack); 128 | }); 129 | ``` 130 | 131 | The output would look something like this: 132 | 133 | ![example](docs/imgs/prettyBenchingResult_example.png) 134 | 135 | ### Thresholds 136 | 137 | You can define thresholds to specific benchmarks and than related things, like times or graph bars will be colored respectively. This can use the same thresholds object as in `prettyBenchmarkProgress`. 138 | 139 | ```ts 140 | const thresholds: Thresholds = { 141 | "multiple-runs": { green: 76, yellow: 82 }, 142 | "benchmark-start": { green: 2, yellow: 3 }, 143 | }; 144 | 145 | runBenchmarks().then(prettyBenchmarkResult({ thresholds })); 146 | ``` 147 | 148 | ![threshold](docs/imgs/prettyBenchingResult_example_threshold.png) 149 | 150 | ### Indicators 151 | 152 | You can use indicators, which help you categorise your benchmarks besides just their names. You can set what color the table should have. With `modFn` you can also change what color the marker should be, or even change the indicator icon like seen below (default is `#`). 153 | You can pass this object to `prettyBenchmarkProgress` too. 154 | 155 | ```ts 156 | const indicators: BenchIndicator[] = [ 157 | { 158 | benches: /multiple-runs/, 159 | color: colors.magenta, 160 | modFn: () => "🚀", 161 | } 162 | ]; 163 | 164 | runBenchmarks().then(prettyBenchmarkResult({ indicators })); 165 | ``` 166 | 167 | ![indicator](docs/imgs/prettyBenchingResult_example_indicators.png) 168 | 169 | ### Parts 170 | 171 | You can change what the result cards should contain with the `parts` object. Once you define it you have to set all parts you want. The default parts setting is `{ graph: true, graphBars: 5 }`. 172 | 173 | You can define what `parts` you want to use in the options, like this: 174 | 175 | ```ts 176 | prettyBenchmarkResult( 177 | { 178 | nocolor: false, 179 | thresholds, 180 | indicators, 181 | parts: { 182 | extraMetrics: true, 183 | threshold: true, 184 | graph: true, 185 | graphBars: 10, 186 | }, 187 | }, 188 | ) 189 | ``` 190 | 191 | Using all options: 192 | 193 | ![thresholdLine](docs/imgs/prettyBenchingResult_example_full_extra.png) 194 | 195 | ##### Extra metrics `{ extraMetrics: true }` 196 | 197 | Setting this will give you an extra row, which adds extra calculated values like `min`, `max`, `mean as ((min+max)/2)` , `median`. 198 | 199 | ![extraMetrics](docs/imgs/prettyBenchingResult_example_extrametrics_line.png) 200 | 201 | ##### Threshold `{ threshold: true }` 202 | 203 | > Need to have `thresholds` in the root of the `options` object, which have a matching threshold for the specific benchmark, otherwise it wont add it to the specific card. 204 | 205 | It simply show what the set thresholds for the benchmark. Can be usefull if `nocolor` is set to true. 206 | 207 | ![thresholdLine](docs/imgs/prettyBenchingResult_example_threshold_line.png) 208 | 209 | ##### Graph `{ graph: true, graphBars: 5 }` 210 | 211 | Adds a graph, which shows the distribution of the runs of the benchmark. 212 | > Only shows, when there are `10` or more runs set. 213 | 214 | The graph shows the results groupped into timeframes, where the groups frame start from the value on the head of its line, and end with excluding the value on the next line. 215 | 216 | With `graphBars` you can set how many bars it should show. Default is `5`. 217 | 218 | # prettyBenchmarkDown 219 | 220 | Generates a summary markdown from the results of the Deno `runBenchmarks()` method's result. 221 | 222 | >||Name|Runs|Total (ms)|Average (ms)|Thresholds|| 223 | >|:-:|:--|--:|--:|--:|--:|:-:| 224 | >| |Rotating other things|1000|2143.992|2.144|-|-| 225 | >|🎹|Rotating arrays|1000|2021.054|2.021|<= 3.5 ✅
<= 4.4 🔶
> 4.4 🔴
|✅| 226 | >|%|Proving NP==P|1|4384.908|4384.908|<= 4141 ✅
<= 6000 🔶
> 6000 🔴
|🔶| 227 | >|🚀|Standing out|1000|375.708|0.376|<= 0.3 ✅
<= 0.33 🔶
> 0.33 🔴
|🔴| 228 | 229 | A full example output: [pr_benchmark_output.md](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md) 230 | 231 | ### Usage 232 | 233 | Simply call `prettyBenchmarkDown` with the desired settings. 234 | 235 | ```ts 236 | // ...add benches... 237 | 238 | runBenchmarks() 239 | .then(prettyBenchmarkDown(console.log)) 240 | .catch((e: any) => { 241 | console.error(e.stack); 242 | }); 243 | ``` 244 | 245 | The first parameter of this function is an output function, where you cen recieve the generated markdown's text. In the example above it just print is to `console`. 246 | 247 | Without defining any options, it will generate one `markdown` table with one row for each benchmark. 248 | Something like this: 249 | 250 | > |Name|Runs|Total (ms)|Average (ms)| 251 | > |:--|--:|--:|--:| 252 | > |Sorting arrays|4000|1506.683|0.377| 253 | > |Rotating arrays|1000|1935.981|1.936| 254 | > |Proving NP==P|1|4194.431|4194.431| 255 | > |Standing out|1000|369.566|0.370| 256 | 257 | ##### Writing to a file 258 | 259 | ```ts 260 | runBenchmarks() 261 | .then(prettyBenchmarkDown( 262 | (markdown: string) => { Deno.writeTextFileSync("./benchmark.md", markdown); }, 263 | { /* ...options */ } 264 | )) 265 | .catch((e: any) => { 266 | console.error(e.stack); 267 | }); 268 | ``` 269 | 270 | > 🔽 Needs *--allow-write* flag to run 271 | 272 | ### Options 273 | 274 | You can fully customise the generated `markdown`. Add text, use predefined, or custom columns or group your benchmarks and define these per group. 275 | 276 | Here you can seen an example that showcases every option: [pr_benchmark_output.md](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md) 277 | It was generated with: [pr_benchmarks.ts](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmarks.ts) 278 | 279 | #### Extra texts 280 | 281 | * `options.title`: Defines a level 1 title (`# MyTitle`) on the top of the generated markdown 282 | * `options.description`: Defines a part, that is put before all of the result tables. If defined as a function, it recieves the `runBenchmarks` result, so it can be set dynamically. It also accepts a simple string as well. 283 | * `options.afterTables`: Defines a part, that is put after all of the result tables. If defined as a function, it recieves the `runBenchmarks` result, so it can be set dynamically. It also accepts a simple string as well. 284 | 285 | #### Columns ```options.columns```, ```group.columns``` 286 | 287 | You can customise, what columns you want to see in each table. To see what every column type generates check out the [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md) 288 | 289 | * If `not defined`, the generator uses the default columns defined by the module 290 | * If `defined`, you take full control, of what columns you want to see. The default columns are exported, and there are other premade columns for you to use. 291 | 292 | ##### defaultColumns(columns: string[]) [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#default-columns-and-dynamic-text) 293 | 294 | ```ts 295 | columns: [ 296 | ...defaultColumns(), 297 | ...defaultColumns(['name', 'measuredRunsAvgMs']) 298 | ] 299 | ``` 300 | 301 | It includes `Name`, `Runs`, `Total (ms)` and `Average (ms)` columns, these are the default values of the `BenchmarkRunResult`. Filter them with an array of propertyKeys. 302 | 303 | ##### indicatorColumn(indicators: BenchIndicator[]) [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#predefiend-columns) 304 | 305 | ```ts 306 | columns: [ 307 | indicatorColumn(indicators), 308 | ] 309 | ``` 310 | 311 | Defines a column, that contains the indicator for the given bench, if defined. Keep in mind, that it strips any color from the indicator. 312 | 313 | ##### thresholdsColumn(thresholds: Thresholds, indicateResult?: boolean) [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#predefiend-columns) 314 | 315 | ```ts 316 | columns: [ 317 | thresholdsColumn(thresholds), // only shows the threshold ranges 318 | thresholdsColumn(thresholds, true), // shows the result in the cell too 319 | ] 320 | ``` 321 | 322 | Defines a column, that shows the threshold ranges for the given bench, if defined. If you set `indicateResult` to true, it shows in what range the benchmark fell, in the same cell. 323 | 324 | ##### thresholdResultColumn(thresholds: Thresholds) [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#predefiend-columns) 325 | 326 | ```ts 327 | columns: [ 328 | thresholdResultColumn(thresholds), 329 | ] 330 | ``` 331 | 332 | Defines a column, that show into what threhold range the benchmark fell. 333 | 334 | ##### extraMetricsColumns(options?) [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#extra-metrics) 335 | 336 | ```ts 337 | columns: [ 338 | ...extraMetricsColumns(), 339 | ...extraMetricsColumns({ ignoreSingleRuns: true }), // puts '-' in cells, where bench was only run once 340 | ...extraMetricsColumns({ metrics: ["max", "min", "mean", "median", "stdDeviation"] }), 341 | ] 342 | ``` 343 | 344 | Defines columns, that show extra calculated metrics like `min`, `max`, `mean`, `median`, `stdDeviation`. You can define which of these you want, in the `metrics` array. You can also tell it, to put `-` in the cells, where the benchmark was only run once with `ignoreSingleRuns`. 345 | 346 | ##### Custom columns [example](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark_output.md#custom-columns) 347 | 348 | ```ts 349 | columns: [ 350 | { 351 | title: 'CustomTotal', 352 | propertyKey: 'totalMs', 353 | toFixed: 5, 354 | align: 'left' 355 | }, 356 | { 357 | title: 'Formatter', 358 | formatter: (r: BenchmarkResult, cd: ColumnDefinition) => `${r.name}:${cd.title}` 359 | }, 360 | ] 361 | ``` 362 | 363 | When you need something else, you can define you own columns. You can put custom `ColumnDefinitions` into the `columns` array. 364 | 365 | * The simplest way, is to give it a `propertyKey`, and than it shows that value of the `BenchmarkResult`. You can use any key here, but you will have to put these values into the results manually. If a `result[propertyKey]` is `undefined`, than it puts a `-` into that cell. 366 | If your returned value is a `number`, than you can use `toFixed` to tell what precision you want to see. (It's ignored if value is not a number) 367 | 368 | * If your usecase is more complex, than you can use the `formatter` method, where you get the benchmark result, and you can return any value that you want from that. The predefined column types above use this method as well. 369 | 370 | ```ts 371 | interface ColumnDefinition { 372 | title: string; 373 | propertyKey?: string; 374 | align?: "left" | "center" | "right"; 375 | toFixed?: number; 376 | formatter?: (result: BenchmarkResult, columnDef: ColumnDefinition) => string; 377 | } 378 | ``` 379 | 380 | #### Groups ```options.groups``` 381 | 382 | ```ts 383 | groups: [ 384 | { 385 | include: /array/, 386 | name: "A group for arrays", 387 | description: "The array group's description", 388 | afterTable: (gr: BenchmarkResult[], g: GroupDefinition, rr: BenchmarkRunResult) => `Dynamic ${g.name}, ${gr.length}, ${rr.results.length}`, 389 | columns: [/* ... */] 390 | } 391 | ] 392 | ``` 393 | 394 | You can group your benches, so they are separated in your generated markdown. For this, you need to define `include` RegExp. Right now, every benchmark, that doesnt fit any group will be put into one table at the bottom, so if you dont want some filter them before manually. 395 | 396 | In each group you can define a `name` which will be a level 2 heading (`## Name`) before you group. 397 | 398 | You can also define `description` and `afterTable`, which behave the same like the ones in the root of options. 399 | 400 | If you want, you can have different columns in each group, if you define them in the groups `columns` array. 401 | 402 | ```ts 403 | interface GroupDefinition { 404 | include: RegExp; 405 | name: string; 406 | columns?: ColumnDefinition[]; 407 | description?: string | ((groupResults: BenchmarkResult[], group: GroupDefinition,runResults: BenchmarkRunResult ) => string); 408 | afterTable?: string | ((groupResults: BenchmarkResult[], group: GroupDefinition, runResults: BenchmarkRunResult ) => string); 409 | } 410 | ``` 411 | 412 | ### As a Github Action 413 | 414 | Use this in a github action, eg. comment benchmark results on PRs. 415 | 416 | You can see an example Github Action for this [here](https://github.com/littletof/prettyBenching/blob/master/docs/prettyBenchmarkDown/pr_benchmark.yml) or see it in use in a showcase [repo](https://github.com/littletof/pretty-benching-action/pull/2). 417 | 418 | # prettyBenchmarkHistory 419 | 420 | Helps to keep track of the results of the different `runBenchmarks()` runs historically. 421 | 422 | ### Usage 423 | 424 | > **Note** this module doesn't handle the loading and saving of the data from/to the disk. See examples. 425 | 426 | First, if you already have saved historic data, you need to load it from disk (or elsewhere). 427 | If no previous historicData is provided in the constructor, it starts a fresh, empty history. 428 | 429 | After it was initiated with the `options` and data, you can simply call `addResults` with the new results, and save them again into a file, using `getDataString()` which returns the historic data in a pretty printed JSON string. If you want to work on the data itself, call `getData()`. 430 | 431 | You are able to set some rules in the `options`, like to only allow to add a result, if every benchmark was run a minimum of x times, or if no benchmark was added or removed or had its `runsCount` changed since the previous run. 432 | 433 | By default it only allows to add results that were measured with `--allow-hrtime` flag, but this rule can be disabled. 434 | 435 | ```ts 436 | // add benches, then 437 | 438 | let historicData; 439 | try { 440 | historicData = JSON.parse(Deno.readTextFileSync("./benchmarks/history.json")); 441 | } catch(e) { 442 | // Decide whether you want to proceed with no history 443 | console.warn(`⚠ cant read history file. (${e.message})`); 444 | } 445 | 446 | const history = new prettyBenchmarkHistory(historicData, {/*options*/}); 447 | 448 | runBenchmarks().then((results: BenchmarkRunResult) => { 449 | history.addResults(results {id: "version_tag"}); 450 | Deno.writeTextFileSync("./benchmarks/history.json", history.getDataString()); 451 | }); 452 | ``` 453 | 454 | The resulting historic data would look something like this, based on the options: 455 | 456 | ```json 457 | { 458 | "history": [ 459 | { 460 | "date": "2020-09-12T20:28:36.812Z", 461 | "id": "v1.15.2", 462 | "benchmarks": { 463 | "RotateArrays": { 464 | "measuredRunsAvgMs": 0.061707600000003596, 465 | "runsCount": 500, 466 | "totalMs": 30.853800000001797, 467 | "extras": { 468 | "max": 0.45420000000001437, 469 | "min": 0.034700000000043474, 470 | "mean": 0.24445000000002892, 471 | "median": 0.04179999999996653, 472 | "std": 0.04731720894389344 473 | } 474 | }, 475 | "x3#14": { 476 | "measuredRunsAvgMs": 2.6682033000000036, 477 | "runsCount": 1000, 478 | "totalMs": 2668.2033000000038, 479 | "extras": { 480 | "max": 9.25019999999995, 481 | "min": 1.983299999999872, 482 | ... 483 | ``` 484 | 485 | ### Rules and options 486 | 487 | * **`easeOnlyHrTime`**: Allows storing low precision measurements, which where measured without `--allow-hrtime` flag 488 | * **`strict`**: Contains a set of rules, which are all enforced, if boolean `true` is set, but can be individually controlled if an object is provided: 489 | 490 | * **`noRemoval`**: Throw an error, when previously saved benchmark is missing from the current set when calling `addResults`. Ignored on the very first set of benchmarks. 491 | * **`noAddition`**: Throw an error, when previously not saved benchmark is added to the current set when calling `addResults`. Ignored on the very first set of benchmarks. 492 | * **`noRunsCountChange`**: Throw an error, when the `runsCount` changes for a benchmark from the previous run's `runsCount`. Ignored on new benchmarks. 493 | 494 | * **`minRequiredRuns`**: Throw an error, when **any** benchmark has lower runsCount than the set value. 495 | 496 | * **`saveIndividualRuns`**: Saves the `measuredRunsMs` array for each benchmark. 497 | **WARNING** this could result in a very big history file overtime. 498 | Consider calculating necessary values before save instead with `benchExtras` or `runExtras`. 499 | 500 | * **`benchExtras`**(result: BenchmarkResult) => T : Saves the returned `object` for each benchmark into it's `extras` property. 501 | 502 | * **`runExtras`**(runResult: BenchmarkRunResult) => K : Saves the returned `object` for each run into it's `runExtras` property. 503 | 504 | ### Methods 505 | 506 | * **`addResults`**: Stores the run's result into the historic data, enforces all set rules on the results. You can specify an `id` in the options to help identify the specific historic data besides the date. It useful for example to set it to the benchmarked module's version number. 507 | 508 | * **`getDeltasFrom`**: Calls `getDeltaForBenchmark` for each benchmark in the provided `BenchmarkRunResults` and returns the values as one object. 509 | 510 | * **`getDeltaForBenchmark`**: Calculates `deltas` for given `BenchmarkResult` for each provided property key. 511 | 512 | * **`getData`**: Returns a copy of the historic data. 513 | 514 | * **`getDataString`**: Returns the historic data in a pretty-printed JSON string. 515 | 516 | * **`getBenchmarkNames`**: Returns an array of each benchmark's name, which result is present in the historic data. 517 | 518 | ### Usecases 519 | 520 | * Show `deltas` in the different formats: 521 | 522 | * **`prettyBenchmarkProgress`**: 523 | ![prettyBenchingHistory_progress_delta](docs/imgs/prettyBenchingHistory_progress_delta.png) 524 |
525 | code 526 | 527 | ```ts 528 | const history = new prettyBenchmarkHistory(historicData, {/*options*/}); 529 | 530 | runBenchmarks({ silent: true }, prettyBenchmarkProgress( 531 | { rowExtras: deltaProgressRowExtra(history) } 532 | )); 533 | ``` 534 |
535 | 536 | * **`prettyBenchmarkResults`**: 537 | ![prettyBenchingHistory_result_card_delta](docs/imgs/prettyBenchingHistory_result_card_delta.png) 538 |
539 | code 540 | 541 | ```ts 542 | const history = new prettyBenchmarkHistory(historicData, {/*options*/}); 543 | 544 | runBenchmarks().then(prettyBenchmarkResult( 545 | { infoCell: deltaResultInfoCell(history) } 546 | )); 547 | ``` 548 |
549 | 550 | * **`prettyBenchmarkDown`**: 551 | 552 | |Name|Average (ms)|Change in average| 553 | |:-:|:-:|:-:| 554 | |x3#14|2.8319|🟢   -33% (1.3895ms)| 555 | |MZ/X|5.6873|🔺   +5% (0.2468ms)| 556 | |MZ/T|2.7544|-| 557 | 558 |
559 | code 560 | 561 | ```ts 562 | const history = new prettyBenchmarkHistory(historicData, {/*options*/}); 563 | 564 | runBenchmarks().then(prettyBenchmarkDown(console.log, { 565 | columns: [ 566 | ...defaultColumns(['name', 'measuredRunsAvgMs']), 567 | deltaColumn(history), 568 | ] 569 | })); 570 | ``` 571 |
572 | 573 | * Show each previous measurement as a column in a markdown table 574 | 575 | >|Name|2020-09-12
21:54:53.706|v0.5.6|v0.8.0|Current|Change in average| 576 | >|:-:|--:|--:|--:|:-:|:-:| 577 | >|historic|0.0704|0.0740|0.0904|0.0650|🟢   -28% (0.0254ms)| 578 | >|x3#14|6.1675|2.9979|4.2214|3.6275|🟢   -14% (0.5939ms)| 579 | >|MZ/X|-|3.3095|5.4405|7.4553|🔺  +37% (2.0147ms)| 580 | >|MZ/T|-|-|-|3.7763|-| 581 | 582 |
583 | code 584 | 585 | ```ts 586 | const history = new prettyBenchmarkHistory(historicData, {/*options*/}); 587 | 588 | runBenchmarks().then(prettyBenchmarkDown(console.log, { 589 | columns: [ 590 | { title: "Name", propertyKey: "name" }, 591 | ...historyColumns(history), 592 | { title: "Current", propertyKey: "measuredRunsAvgMs", toFixed: 4 }, 593 | deltaColumn(history), 594 | ] 595 | })); 596 | ``` 597 |
598 | 599 | * Calculate thresholds from the previous results: `calculateThresholds` [docs](https://doc.deno.land/https/deno.land/x/pretty_benching/mod.ts#calculateThresholds) 600 | * Github Actions: Save results on version tags, report benchmarking results as a comment on PR-s. 601 | * Fail/warn in CI on a PR if the `delta` is too big or benchmark is in red `threshold` with: `getDeltasFrom` and `getThresholdResultsFrom` 602 | 603 | # Roadmap 604 | 605 | #### BenchmarkProgress 606 | 607 | - [x] Add `indicator` options 608 | - [x] Add `nocolor` option 609 | - [x] Unify `indicator` option types, use `color` 610 | - [x] Add overridable output function like in benchmark results 611 | 612 | #### BenchmarkResults 613 | 614 | - [x] Overrideable output function 615 | - [x] Refactor outputting result in a single call 616 | - [x] Add `nocolor` option 617 | - [x] Fix graph 618 | - [x] Add `indicator` options like in progress 619 | - [x] Tidy up current benchmark results look 620 | - [x] Add options to define what parts are shown in the result cards. (eg. show graph, more calculated values like mean, ...) 621 | - [ ] Find a place in `extraMetrics` for `standard deviation`. 622 | - [ ] Add option to crop outlayer results from graph (maybe with a percent limit). 623 | - [ ] Add an option to have a minimalist result output, that resembles the final progress output, instead of the big cards. 624 | 625 | #### Historic data 626 | 627 | - [x] Add module to enable historic data save/read inside repo 628 | - [x] Make use of historic module, enable automatic calculating of thresholds from previous runs 629 | - [x] Option to use historic data, to tell if benchmarks got better or worse from previous runs. 630 | 631 | #### Operational 632 | 633 | - [x] Write README docs 634 | - [x] Separate `prettyBenchmarkResults` and `prettyBenchmarkProgress` into independently importable modules. 635 | - [x] Add the ability to follow the change on how the outputs look like. 636 | - [x] Refactor how optional `options` are handled 637 | - [x] Write JSDocs 638 | - [ ] Proper tests 639 | - [ ] Refactor README 640 | - [ ] Add showcase module, which helps to have consistent docs images 641 | - [ ] Make module contributor friendly 642 | --------------------------------------------------------------------------------