├── test
├── data
│ ├── config
│ │ ├── out.txt
│ │ └── external.json
│ ├── write
│ │ ├── .gitignore
│ │ ├── with-index-html
│ │ │ ├── index.html
│ │ │ └── original_data.js
│ │ └── data-dir
│ │ │ └── original_data.js
│ └── extract
│ │ ├── benchmarkjs_output.txt
│ │ ├── cargo_output.txt
│ │ ├── customSmallerIsBetter_output.txt
│ │ ├── customBiggerIsBetter_output.txt
│ │ ├── go_output.txt
│ │ ├── googlecpp_output.txt
│ │ ├── issue16_output.txt
│ │ ├── julia_output.json
│ │ ├── catch2_output.txt
│ │ ├── pytest_output.txt
│ │ └── pytest_several_units.json
├── default_index_html.spec.ts
├── git.spec.ts
├── config.spec.ts
└── extract.spec.ts
├── examples
├── catch2
│ ├── .gitignore
│ ├── fib.hpp
│ ├── catch2_bench.cpp
│ ├── CMakeLists.txt
│ └── README.md
├── julia
│ ├── .gitignore
│ ├── Project.toml
│ ├── fib.jl
│ ├── README.md
│ └── Manifest.toml
├── benchmarkjs
│ ├── .gitignore
│ ├── index.js
│ ├── bench.js
│ ├── package.json
│ └── README.md
├── cpp
│ ├── .gitignore
│ ├── fib.hpp
│ ├── bench.cpp
│ ├── Makefile
│ └── README.md
├── pytest
│ ├── .gitignore
│ ├── fib
│ │ └── __init__.py
│ ├── bench.py
│ ├── requirements.txt
│ ├── setup.py
│ └── README.md
├── go
│ ├── fib.go
│ ├── fib_test.go
│ └── README.md
├── rust
│ ├── src
│ │ └── lib.rs
│ ├── Cargo.toml
│ ├── benches
│ │ └── bench.rs
│ └── README.md
└── criterion-rs
│ ├── Cargo.toml
│ ├── src
│ └── lib.rs
│ ├── benches
│ └── bench.rs
│ └── README.md
├── .prettierignore
├── .codecov.yml
├── .yamllint.yml
├── tsconfig.spec.json
├── tsconfig.build.json
├── .gitignore
├── jest.config.js
├── .prettierrc.json
├── src
├── index.ts
├── git.ts
├── default_index_html.ts
├── config.ts
└── write.ts
├── tsconfig.json
├── .github
└── workflows
│ ├── minimal.yml
│ ├── go.yml
│ ├── rust.yml
│ ├── benchmarkjs.yml
│ ├── criterion-rs.yml
│ ├── cpp.yml
│ ├── catch2.yml
│ ├── commit-comment.yml
│ ├── pytest.yml
│ ├── julia.yml
│ └── ci.yml
├── LICENSE.txt
├── scripts
├── prepare-release.sh
└── ci_validate_modification.ts
├── package.json
├── .eslintrc.json
├── CONTRIBUTING.md
├── action.yml
└── CHANGELOG.md
/test/data/config/out.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/examples/catch2/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 |
--------------------------------------------------------------------------------
/test/data/config/external.json:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/examples/julia/.gitignore:
--------------------------------------------------------------------------------
1 | /output.json
2 |
--------------------------------------------------------------------------------
/examples/benchmarkjs/.gitignore:
--------------------------------------------------------------------------------
1 | /node_modules
2 | /package-lock.json
3 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | test/data
2 | coverage
3 | dist
4 | **/*.md
5 | **/*.txt
6 |
--------------------------------------------------------------------------------
/examples/cpp/.gitignore:
--------------------------------------------------------------------------------
1 | /benchmark
2 | /a.out
3 | /benchmark_result.json
4 |
--------------------------------------------------------------------------------
/examples/pytest/.gitignore:
--------------------------------------------------------------------------------
1 | /venv
2 | __pycache__
3 | /.benchmarks
4 | /.pytest_cache
5 |
--------------------------------------------------------------------------------
/examples/julia/Project.toml:
--------------------------------------------------------------------------------
1 | [deps]
2 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
3 |
--------------------------------------------------------------------------------
/test/data/write/.gitignore:
--------------------------------------------------------------------------------
1 | /data-dir/data.js
2 | /with-index-html/data.js
3 | /new-data-dir
4 |
--------------------------------------------------------------------------------
/test/data/write/with-index-html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello!
4 |
5 |
--------------------------------------------------------------------------------
/.codecov.yml:
--------------------------------------------------------------------------------
1 | comment: off
2 |
3 | coverage:
4 | status:
5 | project: yes
6 | patch: no
7 | changes: no
8 |
--------------------------------------------------------------------------------
/examples/pytest/fib/__init__.py:
--------------------------------------------------------------------------------
1 | def fib(n):
2 | if n <= 1:
3 | return 1
4 | return fib(n - 2) + fib(n - 1)
5 |
--------------------------------------------------------------------------------
/.yamllint.yml:
--------------------------------------------------------------------------------
1 | extends: default
2 |
3 | rules:
4 | line-length: disable
5 | document-start: disable
6 | truthy: disable
7 |
--------------------------------------------------------------------------------
/examples/go/fib.go:
--------------------------------------------------------------------------------
1 | package fib
2 |
3 | func Fib(u uint) uint {
4 | if u <= 1 {
5 | return 1
6 | }
7 | return Fib(u-2) + Fib(u-1)
8 | }
9 |
--------------------------------------------------------------------------------
/examples/rust/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub fn fib(u: u32) -> u32 {
2 | if u <= 1 {
3 | 1
4 | } else {
5 | fib(u - 2) + fib(u - 1)
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/tsconfig.spec.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig.json",
3 | "compilerOptions": {
4 | "types": ["jest", "node"]
5 | },
6 | "include": ["**/*.ts"]
7 | }
8 |
--------------------------------------------------------------------------------
/examples/benchmarkjs/index.js:
--------------------------------------------------------------------------------
1 | function fib(n) {
2 | if (n <= 1) {
3 | return 1;
4 | }
5 | return fib(n - 2) + fib(n - 1);
6 | }
7 |
8 | exports.fib = fib;
9 |
--------------------------------------------------------------------------------
/tsconfig.build.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig.json",
3 | "compilerOptions": {
4 | "types": ["node"]
5 | },
6 | "include": ["**/*.ts"],
7 | "exclude": ["test/**/*.ts"]
8 | }
9 |
--------------------------------------------------------------------------------
/examples/pytest/bench.py:
--------------------------------------------------------------------------------
1 | from fib import fib
2 | import pytest
3 |
4 | def test_fib_10(benchmark):
5 | benchmark(fib, 10)
6 |
7 | def test_fib_20(benchmark):
8 | benchmark(fib, 20)
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /node_modules
2 | /examples/rust/Cargo.lock
3 | /examples/rust/target
4 | /examples/criterion-rs/Cargo.lock
5 | /examples/criterion-rs/target
6 | /.nyc_output
7 | /coverage
8 | /dist
9 | /.idea
10 |
--------------------------------------------------------------------------------
/examples/cpp/fib.hpp:
--------------------------------------------------------------------------------
1 | #if !defined FIB_HPP_INCLUDED
2 | #define FIB_HPP_INCLUDED
3 |
4 | int fib(int const i) {
5 | if (i <= 1) {
6 | return 1;
7 | }
8 | return fib(i - 2) + fib(i - 1);
9 | }
10 |
11 | #endif // FIB_HPP_INCLUDED
12 |
--------------------------------------------------------------------------------
/examples/catch2/fib.hpp:
--------------------------------------------------------------------------------
1 | #if !defined FIB_HPP_INCLUDED
2 | #define FIB_HPP_INCLUDED
3 |
4 | int fib(int const i) {
5 | if (i <= 1) {
6 | return 1;
7 | }
8 | return fib(i - 2) + fib(i - 1);
9 | }
10 |
11 | #endif // FIB_HPP_INCLUDED
12 |
--------------------------------------------------------------------------------
/test/data/extract/benchmarkjs_output.txt:
--------------------------------------------------------------------------------
1 | fib(10) x 1,431,759 ops/sec ±0.74% (93 runs sampled)
2 | fib(20) x 12,146 ops/sec ±0.32% (96 runs sampled)
3 |
4 | this x line should be ignored
5 |
6 | createObjectBuffer with 200 comments x 81.61 ops/sec ±1.70% (69 runs sampled)
7 |
--------------------------------------------------------------------------------
/examples/rust/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "rust_example"
3 | version = "0.1.0"
4 | authors = ["rhysd "]
5 | edition = "2018"
6 |
7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
8 |
9 | [dependencies]
10 |
--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('ts-jest/dist/types').InitialOptionsTsJest} */
2 | module.exports = {
3 | globals: {
4 | 'ts-jest': {
5 | tsconfig: 'tsconfig.spec.json',
6 | },
7 | },
8 | preset: 'ts-jest',
9 | testEnvironment: 'node',
10 | };
11 |
--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "tabWidth": 4,
3 | "semi": true,
4 | "singleQuote": true,
5 | "trailingComma": "all",
6 | "printWidth": 120,
7 | "overrides": [
8 | {
9 | "files": ["**/*.{yml,json}"],
10 | "options": {
11 | "tabWidth": 2
12 | }
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/examples/catch2/catch2_bench.cpp:
--------------------------------------------------------------------------------
1 | #include "fib.hpp"
2 | #define CATCH_CONFIG_MAIN
3 | #include
4 |
5 | TEST_CASE("Fibonacci") {
6 |
7 | // now let's benchmark:
8 | BENCHMARK("Fibonacci 10") { return fib(10); };
9 |
10 | BENCHMARK("Fibonacci 20") { return fib(20); };
11 | }
12 |
--------------------------------------------------------------------------------
/examples/go/fib_test.go:
--------------------------------------------------------------------------------
1 | package fib
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func BenchmarkFib10(b *testing.B) {
8 | for i := 0; i < b.N; i++ {
9 | var _ = Fib(10)
10 | }
11 | }
12 |
13 | func BenchmarkFib20(b *testing.B) {
14 | for i := 0; i < b.N; i++ {
15 | var _ = Fib(20)
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/examples/pytest/requirements.txt:
--------------------------------------------------------------------------------
1 | atomicwrites==1.3.0
2 | attrs==19.3.0
3 | importlib-metadata==0.23
4 | more-itertools==7.2.0
5 | packaging==19.2
6 | pluggy==0.13.0
7 | py==1.10.0
8 | py-cpuinfo==5.0.0
9 | pyparsing==2.4.5
10 | pytest==5.2.4
11 | pytest-benchmark==3.2.2
12 | six==1.13.0
13 | wcwidth==0.1.7
14 | zipp==0.6.0
15 |
--------------------------------------------------------------------------------
/test/data/extract/cargo_output.txt:
--------------------------------------------------------------------------------
1 |
2 | running 0 tests
3 |
4 | test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
5 |
6 |
7 | running 2 tests
8 | test bench_fib_10 ... bench: 135 ns/iter (+/- 24)
9 | test bench_fib_20 ... bench: 18,149 ns/iter (+/- 755)
10 |
11 | test result: ok. 0 passed; 0 failed; 0 ignored; 2 measured; 0 filtered out
12 |
13 |
--------------------------------------------------------------------------------
/examples/rust/benches/bench.rs:
--------------------------------------------------------------------------------
1 | #![feature(test)]
2 |
3 | extern crate test;
4 |
5 | use rust_example::fib;
6 | use test::Bencher;
7 |
8 | #[bench]
9 | fn bench_fib_10(b: &mut Bencher) {
10 | b.iter(|| {
11 | let _ = fib(10);
12 | });
13 | }
14 |
15 | #[bench]
16 | fn bench_fib_20(b: &mut Bencher) {
17 | b.iter(|| {
18 | let _ = fib(20);
19 | });
20 | }
21 |
--------------------------------------------------------------------------------
/examples/pytest/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup(
4 | name='benchmark-example',
5 | version='0.0.0',
6 | url='https://github.com/benchmark-action/github-action-benchmark',
7 | author='rhysd ',
8 | author_email='github@users.noreply.github.com',
9 | description='Benchmark example with timeit package',
10 | packages=['fib'],
11 | )
12 |
--------------------------------------------------------------------------------
/examples/benchmarkjs/bench.js:
--------------------------------------------------------------------------------
1 | const Benchmark = require('benchmark');
2 | const suite = new Benchmark.Suite();
3 | const { fib } = require('./index');
4 |
5 | suite
6 | .add('fib(10)', () => {
7 | fib(10);
8 | })
9 | .add('fib(20)', () => {
10 | fib(20);
11 | })
12 | .on('cycle', (event) => {
13 | console.log(String(event.target));
14 | })
15 | .run();
16 |
--------------------------------------------------------------------------------
/examples/criterion-rs/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "criterion_example"
3 | version = "0.1.0"
4 | authors = ["rhysd "]
5 | edition = "2018"
6 |
7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
8 |
9 | [dependencies]
10 | criterion = "*"
11 |
12 | [lib]
13 | bench = false
14 |
15 | [[bench]]
16 | name = "bench"
17 | harness = false
18 |
--------------------------------------------------------------------------------
/examples/julia/fib.jl:
--------------------------------------------------------------------------------
1 | using BenchmarkTools
2 |
3 | fib(n) = n <= 1 ? 1 : fib(n - 2) + fib(n - 1)
4 |
5 | suite = BenchmarkGroup()
6 |
7 | suite["fib"] = BenchmarkGroup(["tag1", "tag2"])
8 |
9 | suite["fib"][10] = @benchmarkable fib(10)
10 | suite["fib"][20] = @benchmarkable fib(20)
11 |
12 | tune!(suite)
13 | results = run(suite, verbose = true)
14 |
15 | BenchmarkTools.save("output.json", median(results))
16 |
--------------------------------------------------------------------------------
/examples/benchmarkjs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "benchmark-example",
3 | "private": true,
4 | "version": "0.0.0",
5 | "description": "",
6 | "main": "index.js",
7 | "scripts": {
8 | "test": "echo \"Error: no test specified\" && exit 1"
9 | },
10 | "author": "rhysd (https://rhysd.github.io/)",
11 | "license": "MIT",
12 | "devDependencies": {
13 | "benchmark": "^2.1.4"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/test/data/extract/customSmallerIsBetter_output.txt:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "My Custom Smaller Is Better Benchmark - CPU Load",
4 | "unit": "Percent",
5 | "value": 50,
6 | "range": "5%",
7 | "extra": "My Optional Information for the tooltip"
8 | },
9 | {
10 | "name": "My Custom Smaller Is Better Benchmark - Memory Used",
11 | "unit": "Megabytes",
12 | "value": 100
13 | }
14 | ]
15 |
--------------------------------------------------------------------------------
/test/data/extract/customBiggerIsBetter_output.txt:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "My Custom Bigger Is Better Benchmark - Throughput",
4 | "unit": "req/s",
5 | "value": 70
6 | },
7 | {
8 | "name": "My Custom Bigger Is Better Benchmark - Free Memory",
9 | "unit": "Megabytes",
10 | "value": 150,
11 | "range": "3",
12 | "extra": "Optional Value #1: 25\nHelpful Num #2: 100\nAnything Else!"
13 | }
14 | ]
15 |
--------------------------------------------------------------------------------
/test/data/extract/go_output.txt:
--------------------------------------------------------------------------------
1 | goos: darwin
2 | goarch: amd64
3 | BenchmarkFib10-8 5000000 325 ns/op
4 | BenchmarkFib20 30000 40537.123 ns/op
5 | BenchmarkFib/my_tabled_benchmark_-_10-8 5000000 325 ns/op
6 | BenchmarkFib/my_tabled_benchmark_-_20 30000 40537.123 ns/op
7 | BenchmarkFib/my/tabled/benchmark_-_20 30001 40537.456 ns/op
8 | PASS
9 | ok _/Users/rhayasd/Develop/github.com/benchmark-action/github-action-benchmark/examples/go 3.614s
10 |
--------------------------------------------------------------------------------
/examples/criterion-rs/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub fn fib(u: u32) -> u32 {
2 | if u <= 1 {
3 | 1
4 | } else {
5 | fib(u - 2) + fib(u - 1)
6 | }
7 | }
8 |
9 | pub fn fast_fib(n: u32) -> u32 {
10 | let mut a = 0;
11 | let mut b = 1;
12 |
13 | match n {
14 | 0 => b,
15 | _ => {
16 | for _ in 0..n {
17 | let c = a + b;
18 | a = b;
19 | b = c;
20 | }
21 | b
22 | }
23 | }
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
1 | import * as core from '@actions/core';
2 | import { configFromJobInput } from './config';
3 | import { extractResult } from './extract';
4 | import { writeBenchmark } from './write';
5 |
6 | async function main() {
7 | const config = await configFromJobInput();
8 | core.debug(`Config extracted from job: ${config}`);
9 |
10 | const bench = await extractResult(config);
11 | core.debug(`Benchmark result was extracted: ${bench}`);
12 |
13 | await writeBenchmark(bench, config);
14 | console.log('github-action-benchmark was run successfully!', '\nData:', bench);
15 | }
16 |
17 | main().catch((e) => core.setFailed(e.message));
18 |
--------------------------------------------------------------------------------
/examples/cpp/bench.cpp:
--------------------------------------------------------------------------------
1 | #include "./fib.hpp"
2 | #include "benchmark/benchmark.h"
3 |
4 | static void fib_10(benchmark::State &state) {
5 | for (auto _ : state) {
6 | // Suppress optimization otherwise this line is removed by DCE
7 | int i = 10;
8 | benchmark::DoNotOptimize(i);
9 | benchmark::DoNotOptimize(fib(i));
10 | }
11 | }
12 |
13 | static void fib_20(benchmark::State &state) {
14 | for (auto _ : state) {
15 | int i = 20;
16 | benchmark::DoNotOptimize(i);
17 | benchmark::DoNotOptimize(fib(i));
18 | }
19 | }
20 |
21 | // Register the function as a benchmark
22 | BENCHMARK(fib_10);
23 | BENCHMARK(fib_20);
24 |
25 | // Run the benchmark
26 | BENCHMARK_MAIN();
27 |
--------------------------------------------------------------------------------
/examples/julia/README.md:
--------------------------------------------------------------------------------
1 | # Julia example with `BenchmarkTools.jl`
2 |
3 | Please read the [docs](https://juliaci.github.io/BenchmarkTools.jl/stable/manual/) of `BenchmarkTools.jl` first. Expecially the [BenchmarkGroup](https://juliaci.github.io/BenchmarkTools.jl/stable/manual/#The-BenchmarkGroup-type) section. Generally speaking, we only need the `json` file exported by `BenchmarkTools.save`. You can checkout the [`fib.jl`](./fib.jl) file for how to do it. A [workflow](../../.github/workflows/julia.yml) for this example is also provided to help you integrate it in your project.
4 |
5 | **Note:** Currently we only support test suite after applying an estimation (`minimumm`,`median`, `mean`, `maximum`, `std`).
--------------------------------------------------------------------------------
/test/default_index_html.spec.ts:
--------------------------------------------------------------------------------
1 | import { strict as A } from 'assert';
2 | import * as cheerio from 'cheerio';
3 | import { Parser as JsParser } from 'acorn';
4 | import { DEFAULT_INDEX_HTML } from '../src/default_index_html';
5 |
6 | describe('DEFAULT_INDEX_HTML', function () {
7 | it('is valid HTML and its script is valid as JavaScript', function () {
8 | // Verify HTML syntax
9 | const q = cheerio.load(DEFAULT_INDEX_HTML);
10 | const s = q('#main-script');
11 | A.ok(s);
12 | const src = s.html();
13 | A.ok(src);
14 |
15 | // Verify JavaScript syntax. It raises an error if invalid
16 | JsParser.parse(src as string);
17 | });
18 | });
19 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "moduleResolution": "node",
5 | "outDir": "dist",
6 | "lib": ["esnext"],
7 | "preserveConstEnums": true,
8 | "noImplicitAny": true,
9 | "noImplicitReturns": true,
10 | "noImplicitThis": true,
11 | "noUnusedLocals": true,
12 | "noUnusedParameters": true,
13 | "noEmitOnError": true,
14 | "noFallthroughCasesInSwitch": true,
15 | "strict": true,
16 | "target": "es2019",
17 | "sourceMap": true,
18 | "esModuleInterop": true,
19 | "resolveJsonModule": true
20 | },
21 | "files": [],
22 | "include": [],
23 | "references": [
24 | {
25 | "path": "./tsconfig.app.json"
26 | },
27 | {
28 | "path": "./tsconfig.spec.json"
29 | }
30 | ],
31 | "exclude": ["node_modules"]
32 | }
33 |
--------------------------------------------------------------------------------
/examples/catch2/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.12)
2 | project("Catch2_bench")
3 |
4 | include(FetchContent)
5 |
6 | FetchContent_Declare(
7 | catch2
8 | GIT_REPOSITORY https://github.com/catchorg/Catch2.git
9 | GIT_TAG v2.11.0)
10 |
11 | FetchContent_GetProperties(catch2)
12 | if(NOT catch2_POPULATED)
13 | FetchContent_Populate(catch2)
14 | add_subdirectory(${catch2_SOURCE_DIR} ${catch2_BINARY_DIR})
15 | endif()
16 |
17 | add_executable(${PROJECT_NAME})
18 | target_sources(${PROJECT_NAME} PRIVATE catch2_bench.cpp)
19 | target_link_libraries(${PROJECT_NAME} Catch2::Catch2)
20 |
21 | target_compile_options(
22 | ${PROJECT_NAME}
23 | PRIVATE
24 | $<$:/DCATCH_CONFIG_ENABLE_BENCHMARKING>
25 | $<$,$,$>:-DCATCH_CONFIG_ENABLE_BENCHMARKING>
26 | )
27 |
--------------------------------------------------------------------------------
/.github/workflows/minimal.yml:
--------------------------------------------------------------------------------
1 | name: Example for minimal setup
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run minimal steps to run github-action-benchmark
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - uses: actions/setup-go@v1
18 | - name: Run benchmark
19 | run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt
20 | - name: Download previous benchmark data
21 | uses: actions/cache@v1
22 | with:
23 | path: ./cache
24 | key: ${{ runner.os }}-benchmark
25 | - name: Store benchmark result
26 | uses: benchmark-action/github-action-benchmark@v1
27 | with:
28 | tool: 'go'
29 | output-file-path: examples/go/output.txt
30 | external-data-json-path: ./cache/benchmark-data.json
31 | fail-on-alert: true
32 |
--------------------------------------------------------------------------------
/examples/cpp/Makefile:
--------------------------------------------------------------------------------
1 | bench: a.out
2 | ./a.out
3 |
4 | json: a.out
5 | ./a.out --benchmark_format=json | tee benchmark_result.json
6 |
7 | a.out: benchmark/build/src/libbenchmark.a bench.cpp fib.hpp
8 | clang++ -std=c++14 -O3 -I ./benchmark/include -L ./benchmark/build/src/ -pthread bench.cpp -l benchmark
9 |
10 | benchmark/build/src/libbenchmark.a: benchmark/build benchmark/googletest
11 | cd ./benchmark/build && \
12 | cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_TESTING=true ../ && \
13 | make -j
14 |
15 | benchmark/build: benchmark
16 | mkdir -p benchmark/build
17 |
18 | benchmark:
19 | [ -d benchmark ] || git clone --depth=1 --single-branch --branch v1.5.0 https://github.com/google/benchmark.git benchmark
20 |
21 | benchmark/googletest: benchmark
22 | [ -d benchmark/googletest ] || git clone --depth=1 --single-branch --branch release-1.10.0 https://github.com/google/googletest.git benchmark/googletest
23 |
24 | clean:
25 | rm -rf a.out benchmark
26 |
27 | .PHONY: bench json clean
28 |
--------------------------------------------------------------------------------
/.github/workflows/go.yml:
--------------------------------------------------------------------------------
1 | name: Go Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run Go benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - uses: actions/setup-go@v1
18 | - name: Run benchmark
19 | run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt
20 | - name: Store benchmark result
21 | uses: benchmark-action/github-action-benchmark@v1
22 | with:
23 | name: Go Benchmark
24 | tool: 'go'
25 | output-file-path: examples/go/output.txt
26 | github-token: ${{ secrets.GITHUB_TOKEN }}
27 | auto-push: true
28 | # Show alert with commit comment on detecting possible performance regression
29 | alert-threshold: '200%'
30 | comment-on-alert: true
31 | fail-on-alert: true
32 | alert-comment-cc-users: '@ktrz'
33 |
--------------------------------------------------------------------------------
/test/data/write/data-dir/original_data.js:
--------------------------------------------------------------------------------
1 | window.BENCHMARK_DATA = {
2 | "lastUpdate": 1574927128603,
3 | "repoUrl": "https://github.com/user/repo",
4 | "entries": {
5 | "Test benchmark": [
6 | {
7 | "commit": {
8 | "author": { "email": "dummy@example.com", "name": "User", "username": "user" },
9 | "committer": { "email": "dummy@example.com", "name": "User", "username": "user" },
10 | "distinct": false,
11 | "id": "prev commit id",
12 | "message": "dummy message",
13 | "timestamp": "dummy stamp",
14 | "tree_id": "dummy tree id",
15 | "url": "https://github.com/user/repo/commit/prev commit id"
16 | },
17 | "date": 1574927127603,
18 | "tool": "cargo",
19 | "benches": [{ "name": "bench_fib_10", "range": "± 20", "unit": "ns/iter", "value": 100 }]
20 | }
21 | ]
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/test/data/write/with-index-html/original_data.js:
--------------------------------------------------------------------------------
1 | window.BENCHMARK_DATA = {
2 | "lastUpdate": 1574927128603,
3 | "repoUrl": "https://github.com/user/repo",
4 | "entries": {
5 | "Test benchmark": [
6 | {
7 | "commit": {
8 | "author": { "email": "dummy@example.com", "name": "User", "username": "user" },
9 | "committer": { "email": "dummy@example.com", "name": "User", "username": "user" },
10 | "distinct": false,
11 | "id": "prev commit id",
12 | "message": "dummy message",
13 | "timestamp": "dummy stamp",
14 | "tree_id": "dummy tree id",
15 | "url": "https://github.com/user/repo/commit/prev commit id"
16 | },
17 | "date": 1574927127603,
18 | "tool": "cargo",
19 | "benches": [{ "name": "bench_fib_10", "range": "+/- 20", "unit": "ns/iter", "value": 100 }]
20 | }
21 | ]
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/.github/workflows/rust.yml:
--------------------------------------------------------------------------------
1 | name: Rust Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run Rust benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - run: rustup toolchain update nightly && rustup default nightly
18 | - name: Run benchmark
19 | run: cd examples/rust && cargo +nightly bench | tee output.txt
20 | - name: Store benchmark result
21 | uses: benchmark-action/github-action-benchmark@v1
22 | with:
23 | name: Rust Benchmark
24 | tool: 'cargo'
25 | output-file-path: examples/rust/output.txt
26 | github-token: ${{ secrets.GITHUB_TOKEN }}
27 | auto-push: true
28 | # Show alert with commit comment on detecting possible performance regression
29 | alert-threshold: '200%'
30 | comment-on-alert: true
31 | fail-on-alert: true
32 | alert-comment-cc-users: '@ktrz'
33 |
--------------------------------------------------------------------------------
/examples/criterion-rs/benches/bench.rs:
--------------------------------------------------------------------------------
1 | #[macro_use]
2 | extern crate criterion;
3 | use criterion::{black_box, Criterion, BenchmarkId};
4 | use criterion_example::{fib, fast_fib};
5 |
6 | fn bench_fib_10(c: &mut Criterion) {
7 | c.bench_function("BenchFib10", move |b| {
8 | b.iter(|| {
9 | let _ = fib(black_box(10));
10 | });
11 | });
12 | }
13 |
14 | fn bench_fib_20(c: &mut Criterion) {
15 | c.bench_function("BenchFib20", move |b| {
16 | b.iter(|| {
17 | let _ = fib(20);
18 | });
19 | });
20 | }
21 |
22 | fn bench_fibs(c: &mut Criterion) {
23 | let mut group = c.benchmark_group("Fibonacci");
24 | for i in [20, 21].iter() {
25 | group.bench_with_input(BenchmarkId::new("Recursive", i), i,
26 | |b, i| b.iter(|| fib(*i)));
27 | group.bench_with_input(BenchmarkId::new("Iterative", i), i,
28 | |b, i| b.iter(|| fast_fib(*i)));
29 | }
30 | group.finish();
31 | }
32 |
33 |
34 | criterion_group!(benches, bench_fib_10, bench_fib_20, bench_fibs);
35 | criterion_main!(benches);
36 |
--------------------------------------------------------------------------------
/.github/workflows/benchmarkjs.yml:
--------------------------------------------------------------------------------
1 | name: Benchmark.js Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run JavaScript benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - uses: actions/setup-node@v1
18 | - name: Run benchmark
19 | run: cd examples/benchmarkjs && npm install && node bench.js | tee output.txt
20 | - name: Store benchmark result
21 | uses: benchmark-action/github-action-benchmark@v1
22 | with:
23 | name: Benchmark.js Benchmark
24 | tool: 'benchmarkjs'
25 | output-file-path: examples/benchmarkjs/output.txt
26 | github-token: ${{ secrets.GITHUB_TOKEN }}
27 | auto-push: true
28 | # Show alert with commit comment on detecting possible performance regression
29 | alert-threshold: '200%'
30 | comment-on-alert: true
31 | fail-on-alert: true
32 | alert-comment-cc-users: '@ktrz'
33 |
--------------------------------------------------------------------------------
/.github/workflows/criterion-rs.yml:
--------------------------------------------------------------------------------
1 | name: Criterion.rs Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run Criterion.rs benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - run: rustup toolchain update nightly && rustup default nightly
18 | - name: Run benchmark
19 | run: cd examples/criterion-rs && cargo +nightly bench -- --output-format bencher | tee output.txt
20 | - name: Store benchmark result
21 | uses: benchmark-action/github-action-benchmark@v1
22 | with:
23 | name: Rust Benchmark
24 | tool: 'cargo'
25 | output-file-path: examples/criterion-rs/output.txt
26 | github-token: ${{ secrets.GITHUB_TOKEN }}
27 | auto-push: true
28 | # Show alert with commit comment on detecting possible performance regression
29 | alert-threshold: '200%'
30 | comment-on-alert: true
31 | fail-on-alert: true
32 | alert-comment-cc-users: '@ktrz'
33 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | the MIT License
2 |
3 | Copyright (c) 2019 rhysd
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 | of the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
16 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
17 | PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
20 | THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
22 |
--------------------------------------------------------------------------------
/.github/workflows/cpp.yml:
--------------------------------------------------------------------------------
1 | name: C++ Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run C++ benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Cache Benchmark library
18 | uses: actions/cache@v1
19 | with:
20 | path: examples/cpp/benchmark
21 | key: ${{ runner.os }}-googlebenchmark-v1.5.0
22 | - name: Run benchmark
23 | run: cd examples/cpp && make json
24 | - name: Store benchmark result
25 | uses: benchmark-action/github-action-benchmark@v1
26 | with:
27 | name: C++ Benchmark
28 | tool: 'googlecpp'
29 | output-file-path: examples/cpp/benchmark_result.json
30 | github-token: ${{ secrets.GITHUB_TOKEN }}
31 | auto-push: true
32 | # Show alert with commit comment on detecting possible performance regression
33 | alert-threshold: '200%'
34 | comment-on-alert: true
35 | fail-on-alert: true
36 | alert-comment-cc-users: '@ktrz'
37 |
--------------------------------------------------------------------------------
/.github/workflows/catch2.yml:
--------------------------------------------------------------------------------
1 | name: Catch2 C++ Example
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run C++ benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Build and run benchmarks with Catch2
18 | run: |
19 | cd examples/catch2
20 | mkdir build && cd build
21 | cmake -DCMAKE_BUILD_TYPE=Release ..
22 | cmake --build . --config Release
23 | ./Catch2_bench | tee ../benchmark_result.txt
24 | - name: Store benchmark result
25 | uses: benchmark-action/github-action-benchmark@v1
26 | with:
27 | name: Catch2 Benchmark
28 | tool: 'catch2'
29 | output-file-path: examples/catch2/benchmark_result.txt
30 | github-token: ${{ secrets.GITHUB_TOKEN }}
31 | auto-push: true
32 | # Show alert with commit comment on detecting possible performance regression
33 | alert-threshold: '200%'
34 | comment-on-alert: true
35 | fail-on-alert: true
36 | alert-comment-cc-users: '@bernedom,@ktrz'
37 |
--------------------------------------------------------------------------------
/.github/workflows/commit-comment.yml:
--------------------------------------------------------------------------------
1 | name: Example for alert with commit comment
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run alert check without using Git branch
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - uses: actions/setup-go@v1
18 | - name: Run benchmark
19 | run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt
20 | - name: Download previous benchmark data
21 | uses: actions/cache@v1
22 | with:
23 | path: ./cache
24 | key: ${{ runner.os }}-benchmark
25 | - name: Store benchmark result
26 | uses: benchmark-action/github-action-benchmark@v1
27 | with:
28 | name: Alert setup example with cache
29 | tool: 'go'
30 | output-file-path: examples/go/output.txt
31 | external-data-json-path: ./cache/benchmark-data.json
32 | github-token: ${{ secrets.GITHUB_TOKEN }}
33 | alert-threshold: '200%'
34 | comment-on-alert: true
35 | fail-on-alert: true
36 | alert-comment-cc-users: '@ktrz'
37 |
--------------------------------------------------------------------------------
/examples/criterion-rs/README.md:
--------------------------------------------------------------------------------
1 | # Rust Criterion example for benchmarking with `cargo bench`
2 |
3 | - [Workflow for this example](../../.github/workflows/criterion-rs.yml)
4 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
5 |
6 | This directory shows how to use [`github-action-benchmark`](https://github.com/benchmark-action/github-action-benchmark)
7 | with [`criterion`](https://github.com/bheisler/criterion.rs).
8 |
9 | ## Run benchmarks
10 |
11 | Official documentation for usage of `cargo bench` with Criterion:
12 |
13 | https://github.com/bheisler/criterion.rs
14 |
15 | e.g.
16 |
17 | ```yaml
18 | - name: Run benchmark
19 | run: cargo bench -- --output-format bencher | tee output.txt
20 | ```
21 |
22 | Note that you should run the benchmarks using the bencher output format.
23 |
24 |
25 | ## Process benchmark results
26 |
27 | Store the benchmark results with step using the action. Please set `cargo` to `tool` input.
28 |
29 | ```yaml
30 | - name: Store benchmark result
31 | uses: benchmark-action/github-action-benchmark@v1
32 | with:
33 | tool: 'cargo'
34 | output-file-path: output.txt
35 | ```
36 |
37 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
38 |
--------------------------------------------------------------------------------
/examples/go/README.md:
--------------------------------------------------------------------------------
1 | Go example for benchmarking with `go test -bench`
2 | =================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/go.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22Go+Example%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`](https://github.com/benchmark-action/github-action-benchmark)
9 | with `go test -bench` command.
10 |
11 | ## Run benchmarks
12 |
13 | Official documentation for usage of `go test -bench`:
14 |
15 | https://pkg.go.dev/testing#hdr-Benchmarks
16 |
17 | e.g.
18 |
19 | ```yaml
20 | - name: Run benchmark
21 | run: go test -bench 'Benchmark' | tee output.txt
22 | ```
23 |
24 | ## Process benchmark results
25 |
26 | Store the benchmark results with step using the action. Please set `go` to `tool` input.
27 |
28 | ```yaml
29 | - name: Store benchmark result
30 | uses: benchmark-action/github-action-benchmark@v1
31 | with:
32 | tool: 'go'
33 | output-file-path: output.txt
34 | ```
35 |
36 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
37 |
--------------------------------------------------------------------------------
/.github/workflows/pytest.yml:
--------------------------------------------------------------------------------
1 | name: Python Example with pytest
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run pytest-benchmark benchmark example
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - uses: actions/setup-python@v2
18 | with:
19 | python-version: 3.9
20 | - name: Run benchmark
21 | run: |
22 | cd examples/pytest
23 | pip install -r requirements.txt
24 | pytest bench.py --benchmark-json output.json
25 | - name: Store benchmark result
26 | uses: benchmark-action/github-action-benchmark@v1
27 | with:
28 | name: Python Benchmark with pytest-benchmark
29 | tool: 'pytest'
30 | output-file-path: examples/pytest/output.json
31 | # Use personal access token instead of GITHUB_TOKEN due to https://github.community/t/github-action-not-triggering-gh-pages-upon-push/16096
32 | github-token: ${{ secrets.GITHUB_TOKEN }}
33 | auto-push: true
34 | # Show alert with commit comment on detecting possible performance regression
35 | alert-threshold: '200%'
36 | comment-on-alert: true
37 | fail-on-alert: true
38 | alert-comment-cc-users: '@ktrz'
39 |
--------------------------------------------------------------------------------
/test/data/extract/googlecpp_output.txt:
--------------------------------------------------------------------------------
1 | {
2 | "context": {
3 | "date": "2019-11-29 21:26:59",
4 | "host_name": "Corgi.local",
5 | "executable": "./a.out",
6 | "num_cpus": 4,
7 | "mhz_per_cpu": 2700,
8 | "cpu_scaling_enabled": false,
9 | "caches": [
10 | {
11 | "type": "Data",
12 | "level": 1,
13 | "size": 32768,
14 | "num_sharing": 2
15 | },
16 | {
17 | "type": "Instruction",
18 | "level": 1,
19 | "size": 32768,
20 | "num_sharing": 2
21 | },
22 | {
23 | "type": "Unified",
24 | "level": 2,
25 | "size": 262144,
26 | "num_sharing": 2
27 | },
28 | {
29 | "type": "Unified",
30 | "level": 3,
31 | "size": 3145728,
32 | "num_sharing": 4
33 | }
34 | ],
35 | "load_avg": [1.68408,1.73779,2.02783],
36 | "library_build_type": "release"
37 | },
38 | "benchmarks": [
39 | {
40 | "name": "fib_10",
41 | "run_name": "fib_10",
42 | "run_type": "iteration",
43 | "repetitions": 0,
44 | "repetition_index": 0,
45 | "threads": 1,
46 | "iterations": 3070566,
47 | "real_time": 2.1498980114547953e+02,
48 | "cpu_time": 2.1365507206163295e+02,
49 | "time_unit": "ns"
50 | },
51 | {
52 | "name": "fib_20",
53 | "run_name": "fib_20",
54 | "run_type": "iteration",
55 | "repetitions": 0,
56 | "repetition_index": 0,
57 | "threads": 1,
58 | "iterations": 23968,
59 | "real_time": 2.7455600415007055e+04,
60 | "cpu_time": 2.7364903204272359e+04,
61 | "time_unit": "ns"
62 | }
63 | ]
64 | }
65 |
--------------------------------------------------------------------------------
/scripts/prepare-release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | version="$1"
6 |
7 | if [[ "$version" == "" ]]; then
8 | echo 'Release branch name must be given as first argument' >&2
9 | exit 1
10 | fi
11 |
12 | if [ ! -d .git ]; then
13 | echo 'This script must be run at root directory of this repository' >&2
14 | exit 1
15 | fi
16 |
17 | if ! git diff --quiet; then
18 | echo 'Working tree is dirty! Please ensure all changes are committed and working tree is clean' >&2
19 | exit 1
20 | fi
21 |
22 | if ! git diff --cached --quiet; then
23 | echo 'Git index is dirty! Please ensure all changes are committed and Git index is clean' >&2
24 | exit 1
25 | fi
26 |
27 | branch="$(git symbolic-ref --short HEAD)"
28 | if [[ "$branch" != "master" ]]; then
29 | echo 'Current branch is not master. Please move to master before running this script' >&2
30 | exit 1
31 | fi
32 |
33 | echo "Releasing to $version branch..."
34 |
35 | set -x
36 | npm install
37 | npm run build
38 | npm run lint
39 | npm test
40 | npm prune --production
41 |
42 | rm -rf .release
43 | mkdir -p .release
44 |
45 | cp action.yml src/*.js package.json package-lock.json .release/
46 | cp -R node_modules .release/node_modules
47 |
48 | git checkout "$version"
49 | git pull
50 | git rm -rf node_modules
51 | rm -rf node_modules # remove node_modules/.cache
52 | mkdir -p src
53 |
54 | mv .release/action.yml .
55 | mv .release/*.js ./src/
56 | mv .release/*.json .
57 | mv .release/node_modules .
58 |
59 | git add action.yml ./src/*.js package.json package-lock.json node_modules
60 | set +x
61 |
62 | echo "Done. Please check 'git diff --cached' to verify changes. If ok, add version tag and push it to remote"
63 |
--------------------------------------------------------------------------------
/examples/rust/README.md:
--------------------------------------------------------------------------------
1 | Rust example for benchmarking with `cargo bench`
2 | ================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/rust.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22Rust+Example%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`](https://github.com/benchmark-action/github-action-benchmark)
9 | with [`cargo bench`](https://doc.rust-lang.org/cargo/commands/cargo-bench.html).
10 |
11 | ## Run benchmarks
12 |
13 | Official documentation for usage of `cargo bench`:
14 |
15 | https://doc.rust-lang.org/unstable-book/library-features/test.html
16 |
17 | e.g.
18 |
19 | ```yaml
20 | - name: Run benchmark
21 | run: cargo +nightly bench | tee output.txt
22 | ```
23 |
24 | Note that `cargo bench` is available only with nightly toolchain.
25 |
26 | Note that this example does not use LTO for benchmarking because entire code in benchmark iteration
27 | will be removed as dead code. For normal use case, please enable it in `Cargo.toml` for production
28 | performance.
29 |
30 | ```yaml
31 | [profile.bench]
32 | lto = true
33 | ```
34 |
35 | ## Process benchmark results
36 |
37 | Store the benchmark results with step using the action. Please set `cargo` to `tool` input.
38 |
39 | ```yaml
40 | - name: Store benchmark result
41 | uses: benchmark-action/github-action-benchmark@v1
42 | with:
43 | tool: 'cargo'
44 | output-file-path: output.txt
45 | ```
46 |
47 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
48 |
49 |
50 |
--------------------------------------------------------------------------------
/test/data/extract/issue16_output.txt:
--------------------------------------------------------------------------------
1 |
2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 | bench.exe is a Catch v2.11.1 host application.
4 | Run with -? for options
5 |
6 | -------------------------------------------------------------------------------
7 | Fibonacci
8 | -------------------------------------------------------------------------------
9 | D:\a\bench_action_test\bench_action_test\catch2_bench.cpp(5)
10 | ...............................................................................
11 |
12 | benchmark name samples iterations estimated
13 | mean low mean high mean
14 | std dev low std dev high std dev
15 | -------------------------------------------------------------------------------
16 | Fibonacci 10 100 76353 0 ns
17 | 0 ns 0 ns 0 ns
18 | 0 ns 0 ns 0 ns
19 |
20 | Fibonacci 20 100 75814 0 ns
21 | 1 ns 1 ns 1 ns
22 | 0 ns 0 ns 0 ns
23 |
24 |
25 | ===============================================================================
26 | test cases: 1 | 1 passed
27 | assertions: - none -
28 |
29 |
--------------------------------------------------------------------------------
/examples/pytest/README.md:
--------------------------------------------------------------------------------
1 | Python example for benchmarking with [pytest-benchmark][tool]
2 | =============================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/pytest.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22Python+Example+with+pytest%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`](https://github.com/benchmark-action/github-action-benchmark)
9 | with [pytest-benchmark][tool].
10 |
11 | ## Run benchmarks
12 |
13 | Official documentation for usage of pytest-benchmark:
14 |
15 | https://pytest-benchmark.readthedocs.io/en/stable/
16 |
17 | Install dependencies with `venv` package using Python3.
18 |
19 | ```sh
20 | $ python -m venv venv
21 | $ source venv/bin/activate
22 | $ pip install pytest pytest-benchmark
23 | ```
24 |
25 | Prepare `bench.py` as follows:
26 |
27 | e.g.
28 |
29 | ```python
30 | import pytest
31 |
32 | def some_test_case(benchmark):
33 | benchmark(some_func, args)
34 | ```
35 |
36 | And run benchmarks with `--benchmark-json` in workflow. The JSON file will be an input to
37 | github-action-benchmark.
38 |
39 | e.g.
40 |
41 | ```yaml
42 | - name: Run benchmark
43 | run: pytest bench.py --benchmark-json output.json
44 | ```
45 |
46 | ## Process benchmark results
47 |
48 | Store the benchmark results with step using the action. Please set `pytest` to `tool` input.
49 |
50 | ```yaml
51 | - name: Store benchmark result
52 | uses: benchmark-action/github-action-benchmark@v1
53 | with:
54 | tool: 'pytest'
55 | output-file-path: output.json
56 | ```
57 |
58 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
59 |
60 | [tool]: https://pypi.org/project/pytest-benchmark/
61 |
--------------------------------------------------------------------------------
/examples/benchmarkjs/README.md:
--------------------------------------------------------------------------------
1 | JavaScript example for benchmarking with [benchmark.js][tool]
2 | =============================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/benchmarkjs.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22Benchmark.js+Example%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`](https://github.com/benchmark-action/github-action-benchmark)
9 | with [benchmark.js][tool].
10 |
11 | ## Run benchmarks
12 |
13 | Official documentation for usage of benchmark.js:
14 |
15 | https://benchmarkjs.com/
16 |
17 | Prepare script `bench.js` as follows:
18 |
19 | e.g.
20 |
21 | ```javascript
22 | const Benchmark = require('benchmark');
23 | const suite = new Benchmark.Suite();
24 |
25 | suite
26 | .add('some test case', () => {
27 | // ...
28 | })
29 | .on('cycle', event => {
30 | // Output benchmark result by converting benchmark result to string
31 | console.log(String(event.target));
32 | })
33 | .run();
34 | ```
35 |
36 | Ensure the output includes string values converted from benchmark results.
37 | This action extracts measured values fron the output.
38 |
39 | Run the script in workflow:
40 |
41 | e.g.
42 |
43 | ```yaml
44 | - name: Run benchmark
45 | run: node bench.js | tee output.txt
46 | ```
47 |
48 | ## Process benchmark results
49 |
50 | Store the benchmark results with step using the action. Please set `benchmarkjs` to `tool` input.
51 |
52 | ```yaml
53 | - name: Store benchmark result
54 | uses: benchmark-action/github-action-benchmark@v1
55 | with:
56 | tool: 'benchmarkjs'
57 | output-file-path: output.txt
58 | ```
59 |
60 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
61 |
62 | [tool]: https://benchmarkjs.com/
63 |
--------------------------------------------------------------------------------
/.github/workflows/julia.yml:
--------------------------------------------------------------------------------
1 | name: Julia Example with BenchmarkTools.jl
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | permissions:
8 | contents: write
9 | deployments: write
10 |
11 | jobs:
12 | benchmark:
13 | name: Run julia benchmark example
14 | runs-on: ${{ matrix.os }}
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | version:
19 | - '1'
20 | os:
21 | - ubuntu-latest
22 | arch:
23 | - x64
24 | steps:
25 | - uses: actions/checkout@v2
26 | - uses: julia-actions/setup-julia@v1
27 | with:
28 | version: ${{ matrix.version }}
29 | arch: ${{ matrix.arch }}
30 | - uses: actions/cache@v1
31 | env:
32 | cache-name: cache-artifacts
33 | with:
34 | path: ~/.julia/artifacts
35 | key: runner.os−test−env.cache−name−{{ hashFiles('**/Project.toml') }}
36 | restore-keys: |
37 | runner.os−test−
38 | ${{ env.cache-name }}-
39 | ${{ runner.os }}-test-
40 | ${{ runner.os }}-
41 | - name: Run benchmark
42 | run: |
43 | cd examples/julia
44 | julia --project --color=yes -e '
45 | using Pkg;
46 | Pkg.instantiate();
47 | include("fib.jl")'
48 | - name: Store benchmark result
49 | uses: benchmark-action/github-action-benchmark@v1
50 | with:
51 | name: Julia benchmark result
52 | tool: 'julia'
53 | output-file-path: examples/julia/output.json
54 | # Use personal access token instead of GITHUB_TOKEN due to https://github.community/t/github-action-not-triggering-gh-pages-upon-push/16096
55 | github-token: ${{ secrets.GITHUB_TOKEN }}
56 | auto-push: true
57 | # Show alert with commit comment on detecting possible performance regression
58 | alert-threshold: '200%'
59 | comment-on-alert: true
60 | fail-on-alert: true
61 | alert-comment-cc-users: '@findmyway'
62 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "github-action-benchmark",
3 | "version": "0.0.0",
4 | "private": true,
5 | "description": "",
6 | "main": "dist/src/index.js",
7 | "scripts": {
8 | "build": "tsc -p tsconfig.build.json",
9 | "lint": "eslint '**/*.{ts,js}'",
10 | "fix": "eslint --fix '**/*.{ts,js}'",
11 | "format": "prettier -w **",
12 | "format:check": "prettier -c **",
13 | "test": "jest",
14 | "test:watch": "jest --watch",
15 | "coverage": "jest --coverage",
16 | "coverage:open": "jest --coverage && open ./coverage/lcov-report/index.html"
17 | },
18 | "repository": {
19 | "type": "git",
20 | "url": "git+https://github.com/benchmark-action/github-action-benchmark.git"
21 | },
22 | "keywords": [
23 | "github",
24 | "action",
25 | "benchmark"
26 | ],
27 | "author": "github-action-benchmark developers ",
28 | "license": "MIT",
29 | "bugs": {
30 | "url": "https://github.com/benchmark-action/github-action-benchmark/issues"
31 | },
32 | "homepage": "https://github.com/benchmark-action/github-action-benchmark#readme",
33 | "dependencies": {
34 | "@actions/core": "^1.2.6",
35 | "@actions/exec": "^1.0.3",
36 | "@actions/github": "^2.1.1",
37 | "@actions/io": "^1.0.2"
38 | },
39 | "devDependencies": {
40 | "@types/acorn": "^4.0.5",
41 | "@types/cheerio": "^0.22.16",
42 | "@types/deep-diff": "^1.0.0",
43 | "@types/deep-equal": "^1.0.1",
44 | "@types/jest": "^27.0.3",
45 | "@types/markdown-it": "0.0.9",
46 | "@types/node": "^13.9.1",
47 | "@types/rimraf": "^2.0.3",
48 | "@typescript-eslint/eslint-plugin": "^5.4.0",
49 | "@typescript-eslint/parser": "^5.4.0",
50 | "acorn": "^7.1.1",
51 | "cheerio": "^1.0.0-rc.3",
52 | "deep-diff": "^1.0.2",
53 | "deep-equal": "^2.0.1",
54 | "eslint": "^8.2.0",
55 | "eslint-config-prettier": "^8.3.0",
56 | "eslint-plugin-jest": "^25.2.4",
57 | "eslint-plugin-prettier": "^4.0.0",
58 | "jest": "^27.3.1",
59 | "markdown-it": "^10.0.0",
60 | "prettier": "^2.4.1",
61 | "rimraf": "^3.0.2",
62 | "ts-jest": "^27.0.7",
63 | "typescript": "^4.5.2"
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/examples/catch2/README.md:
--------------------------------------------------------------------------------
1 | C++ example for benchmarking with [Catch2 Framework][tool]
2 | ====================================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/catch2.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22Catch2+C%2B%2B+Example%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`][action] with [Catch2 Framework][tool].
9 |
10 |
11 |
12 | ## Run benchmarks
13 |
14 | Official documentation for usage of Catch2 Framework can be found in its repository:
15 |
16 | https://github.com/catchorg/Catch2
17 |
18 | Since Catch2 is a header-only test framework, you don't need to build it in advance.
19 | Download and put the headers in your `include` directory and write your benchmarks.
20 |
21 | ```cpp
22 | #define CATCH_CONFIG_MAIN
23 | #include
24 |
25 | TEST_CASE("Fibonacci") {
26 | // now let's benchmark:
27 | BENCHMARK("Some benchmark") {
28 | // Your benchmark goes here
29 | };
30 | }
31 | ```
32 |
33 | Build the source with C++ compiler and run the built executable to get the benchmark output.
34 | Ensure to use `console` reporter for this. `xml` reporter may be supported in the future.
35 |
36 |
37 |
38 | ## Process benchmark results
39 |
40 | Store the benchmark results with step using the action. Please set `catch2` to `tool` input.
41 |
42 | ```yaml
43 | - name: Store benchmark result
44 | uses: benchmark-action/github-action-benchmark@v1
45 | with:
46 | tool: 'catch2'
47 | output-file-path: benchmark_result.json
48 | ```
49 |
50 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
51 |
52 |
53 |
54 | ## Run this example
55 |
56 | To try this example, please use [cmake](./CMakeLists.txt) and `clang++`.
57 |
58 | ```sh
59 | $ mkdir build
60 | $ cd build
61 | $ cmake -DCMAKE_BUILD_TYPE=Release ..
62 | $ cmake --build . --config Release
63 | ```
64 |
65 | This will create `Catch2_bench` executable. The results are output to stdout.
66 |
67 | [tool]: https://github.com/catchorg/Catch2
68 | [action]: https://github.com/benchmark-action/github-action-benchmark
69 |
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "root": true,
3 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended", "prettier", "plugin:prettier/recommended"],
4 | "env": {
5 | "es6": true,
6 | "node": true
7 | },
8 | "rules": {
9 | "prefer-spread": "off",
10 | "eqeqeq": "error"
11 | },
12 | "ignorePatterns": ["examples/**", "dist/**"],
13 | "overrides": [
14 | {
15 | "files": ["**/*.ts"],
16 | "parser": "@typescript-eslint/parser",
17 | "parserOptions": {
18 | "project": ["./tsconfig.build.json", "./tsconfig.spec.json"]
19 | },
20 | "plugins": ["@typescript-eslint", "prettier"],
21 | "rules": {
22 | "@typescript-eslint/naming-convention": "error",
23 | "@typescript-eslint/no-floating-promises": "error",
24 | "@typescript-eslint/no-unnecessary-type-arguments": "error",
25 | "@typescript-eslint/no-non-null-assertion": "error",
26 | "@typescript-eslint/no-empty-interface": "error",
27 | "@typescript-eslint/restrict-plus-operands": "error",
28 | "@typescript-eslint/no-extra-non-null-assertion": "error",
29 | "@typescript-eslint/prefer-nullish-coalescing": "error",
30 | "@typescript-eslint/prefer-optional-chain": "error",
31 | "@typescript-eslint/ban-ts-comment": "error",
32 | "@typescript-eslint/prefer-includes": "error",
33 | "@typescript-eslint/prefer-for-of": "error",
34 | "@typescript-eslint/prefer-string-starts-ends-with": "error",
35 | "@typescript-eslint/prefer-readonly": "error",
36 | "@typescript-eslint/no-non-null-asserted-optional-chain": "error",
37 | "@typescript-eslint/await-thenable": "error",
38 | "@typescript-eslint/no-unnecessary-boolean-literal-compare": "error",
39 | "@typescript-eslint/switch-exhaustiveness-check": "error"
40 | }
41 | },
42 | {
43 | "files": ["test/*.ts"],
44 | "plugins": ["jest"],
45 | "env": {
46 | "jest/globals": true
47 | },
48 | "rules": {
49 | "@typescript-eslint/no-explicit-any": "off",
50 | "@typescript-eslint/no-var-requires": "off",
51 | "@typescript-eslint/ban-ts-comment": "warn",
52 | "@typescript-eslint/naming-convention": "off"
53 | }
54 | },
55 | {
56 | "files": ["**/*.js"],
57 | "env": {
58 | "browser": true
59 | },
60 | "rules": {}
61 | }
62 | ]
63 | }
64 |
--------------------------------------------------------------------------------
/examples/julia/Manifest.toml:
--------------------------------------------------------------------------------
1 | # This file is machine-generated - editing it directly is not advised
2 |
3 | julia_version = "1.7.0-rc3"
4 | manifest_format = "2.0"
5 |
6 | [[deps.Artifacts]]
7 | uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
8 |
9 | [[deps.BenchmarkTools]]
10 | deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
11 | git-tree-sha1 = "61adeb0823084487000600ef8b1c00cc2474cd47"
12 | uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
13 | version = "1.2.0"
14 |
15 | [[deps.CompilerSupportLibraries_jll]]
16 | deps = ["Artifacts", "Libdl"]
17 | uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
18 |
19 | [[deps.Dates]]
20 | deps = ["Printf"]
21 | uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
22 |
23 | [[deps.JSON]]
24 | deps = ["Dates", "Mmap", "Parsers", "Unicode"]
25 | git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
26 | uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
27 | version = "0.21.2"
28 |
29 | [[deps.Libdl]]
30 | uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
31 |
32 | [[deps.LinearAlgebra]]
33 | deps = ["Libdl", "libblastrampoline_jll"]
34 | uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
35 |
36 | [[deps.Logging]]
37 | uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
38 |
39 | [[deps.Mmap]]
40 | uuid = "a63ad114-7e13-5084-954f-fe012c677804"
41 |
42 | [[deps.OpenBLAS_jll]]
43 | deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
44 | uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
45 |
46 | [[deps.Parsers]]
47 | deps = ["Dates"]
48 | git-tree-sha1 = "ae4bbcadb2906ccc085cf52ac286dc1377dceccc"
49 | uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
50 | version = "2.1.2"
51 |
52 | [[deps.Printf]]
53 | deps = ["Unicode"]
54 | uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
55 |
56 | [[deps.Profile]]
57 | deps = ["Printf"]
58 | uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
59 |
60 | [[deps.Random]]
61 | deps = ["SHA", "Serialization"]
62 | uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
63 |
64 | [[deps.SHA]]
65 | uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
66 |
67 | [[deps.Serialization]]
68 | uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
69 |
70 | [[deps.SparseArrays]]
71 | deps = ["LinearAlgebra", "Random"]
72 | uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
73 |
74 | [[deps.Statistics]]
75 | deps = ["LinearAlgebra", "SparseArrays"]
76 | uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
77 |
78 | [[deps.UUIDs]]
79 | deps = ["Random", "SHA"]
80 | uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
81 |
82 | [[deps.Unicode]]
83 | uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
84 |
85 | [[deps.libblastrampoline_jll]]
86 | deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
87 | uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
88 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | Contributing to github-action-benchmark
2 | =======================================
3 |
4 | ## How to add new benchmark tool support
5 |
6 | Thank you for being interested in adding a support for new benchmarking tool.
7 |
8 | At first, please determine how to get benchmark output from the new benchmarking tool.
9 | Some tools support multiple formats for outputting the results. In the case please choose
10 | machine-friendly one. For example, if a tool supports both human-readable console output
11 | and machine-friendly JSON output, please choose the latter.
12 |
13 | Adding support for new benchmarking tools is welcome!
14 |
15 | 1. Add your tool name in `src/config.ts`
16 | 2. Implement the logic to extract benchmark results from output in `src/extract.ts`
17 | 3. Add tests for your tool under `test/*.ts`
18 | 4. Add your tool's color in `default_index_html.ts`
19 | 5. Add example project under `examples/` directory
20 | 6. Add workflow to run the example project under `.github/workflows/` directory
21 | 7. Update `.github/workflows/ci.yml` to check your tool works without an error (see below for needed changes)
22 | 8. Add README.md in the example project directory and update README.md at root directory
23 |
24 | Important part is 2 and 3.
25 |
26 | For example, here are commits to add support for `go test -bench`:
27 |
28 | - https://github.com/benchmark-action/github-action-benchmark/commit/6425d898fdfe2ab1540f1af3adb3f37a0ae623f3
29 | - https://github.com/benchmark-action/github-action-benchmark/commit/272a6808eff6b652247813089ab9aef4b8a2bd50
30 | - https://github.com/benchmark-action/github-action-benchmark/commit/3a25daca11153c62be23142120fc6c93b4bd411d
31 |
32 | And for another example, here are commits to add support for `pytest-benchmark`:
33 |
34 | - Implement and add example: https://github.com/benchmark-action/github-action-benchmark/commit/18c82f288b20de1538f8d7a1669221b545968f54
35 | - Add test: https://github.com/benchmark-action/github-action-benchmark/commit/eb449170566ff5882e75eeaeb637f17a302fbf7e
36 | - Add workflows for test and example: https://github.com/benchmark-action/github-action-benchmark/commit/1e4ebf2e9ecde9e7620661c60455b22837a2bdaf
37 | - Add documentation: https://github.com/benchmark-action/github-action-benchmark/commit/895f92f564521597492bd281cbf6c8efd39f628e
38 |
39 | Optional: If you add a new example workflow under `.github/workflows/`, you might want to add your
40 | user name to `alert-comment-cc-users` input like `alert-comment-cc-users: '@rhysd,@you'`.
41 |
42 | If something is unclear for you, please ask me questions by creating a new issue.
43 |
44 |
45 |
46 | ## How to create a new release
47 |
48 | 1. Run `$ bash scripts/prepare-release.sh v1`
49 | 2. Check changes with `git diff --cached`
50 | 3. If ok, create a new commit and tag it with `v1.x.y`
51 | 4. Push the tag and commit to `v1` remote repository and make a new release on GitHub
52 |
--------------------------------------------------------------------------------
/src/git.ts:
--------------------------------------------------------------------------------
1 | import { exec } from '@actions/exec';
2 | import * as core from '@actions/core';
3 | import * as github from '@actions/github';
4 |
5 | interface ExecResult {
6 | stdout: string;
7 | stderr: string;
8 | code: number | null;
9 | }
10 |
11 | async function capture(cmd: string, args: string[]): Promise {
12 | const res: ExecResult = {
13 | stdout: '',
14 | stderr: '',
15 | code: null,
16 | };
17 |
18 | try {
19 | const code = await exec(cmd, args, {
20 | listeners: {
21 | stdout(data) {
22 | res.stdout += data.toString();
23 | },
24 | stderr(data) {
25 | res.stderr += data.toString();
26 | },
27 | },
28 | });
29 | res.code = code;
30 | return res;
31 | } catch (err) {
32 | const msg = `Command '${cmd}' failed with args '${args.join(' ')}': ${res.stderr}: ${err}`;
33 | core.debug(`@actions/exec.exec() threw an error: ${msg}`);
34 | throw new Error(msg);
35 | }
36 | }
37 |
38 | export async function cmd(...args: string[]): Promise {
39 | core.debug(`Executing Git: ${args.join(' ')}`);
40 | const userArgs = [
41 | '-c',
42 | 'user.name=github-action-benchmark',
43 | '-c',
44 | 'user.email=github@users.noreply.github.com',
45 | '-c',
46 | 'http.https://github.com/.extraheader=', // This config is necessary to support actions/checkout@v2 (#9)
47 | ];
48 | const res = await capture('git', userArgs.concat(args));
49 | if (res.code !== 0) {
50 | throw new Error(`Command 'git ${args.join(' ')}' failed: ${JSON.stringify(res)}`);
51 | }
52 | return res.stdout;
53 | }
54 |
55 | function getRemoteUrl(token: string): string {
56 | const { repo, owner } = github.context.repo;
57 | return `https://x-access-token:${token}@github.com/${owner}/${repo}.git`;
58 | }
59 |
60 | export async function push(token: string, branch: string, ...options: string[]): Promise {
61 | core.debug(`Executing 'git push' to branch '${branch}' with token and options '${options.join(' ')}'`);
62 |
63 | const remote = getRemoteUrl(token);
64 | let args = ['push', remote, `${branch}:${branch}`, '--no-verify'];
65 | if (options.length > 0) {
66 | args = args.concat(options);
67 | }
68 |
69 | return cmd(...args);
70 | }
71 |
72 | export async function pull(token: string | undefined, branch: string, ...options: string[]): Promise {
73 | core.debug(`Executing 'git pull' to branch '${branch}' with token and options '${options.join(' ')}'`);
74 |
75 | const remote = token !== undefined ? getRemoteUrl(token) : 'origin';
76 | let args = ['pull', remote, branch];
77 | if (options.length > 0) {
78 | args = args.concat(options);
79 | }
80 |
81 | return cmd(...args);
82 | }
83 |
--------------------------------------------------------------------------------
/test/data/extract/julia_output.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "Julia": "1.7.0-rc3",
4 | "BenchmarkTools": "1.0.0"
5 | },
6 | [
7 | [
8 | "BenchmarkGroup",
9 | {
10 | "data": {
11 | "fib": [
12 | "BenchmarkGroup",
13 | {
14 | "data": {
15 | "20": [
16 | "TrialEstimate",
17 | {
18 | "allocs": 0,
19 | "time": 31028.0,
20 | "memory": 0,
21 | "params": [
22 | "Parameters",
23 | {
24 | "gctrial": true,
25 | "time_tolerance": 0.05,
26 | "samples": 10000,
27 | "evals": 1,
28 | "gcsample": false,
29 | "seconds": 5.0,
30 | "overhead": 0.0,
31 | "memory_tolerance": 0.01
32 | }
33 | ],
34 | "gctime": 0.0
35 | }
36 | ],
37 | "10": [
38 | "TrialEstimate",
39 | {
40 | "allocs": 0,
41 | "time": 246.03846153846155,
42 | "memory": 0,
43 | "params": [
44 | "Parameters",
45 | {
46 | "gctrial": true,
47 | "time_tolerance": 0.05,
48 | "samples": 10000,
49 | "evals": 390,
50 | "gcsample": false,
51 | "seconds": 5.0,
52 | "overhead": 0.0,
53 | "memory_tolerance": 0.01
54 | }
55 | ],
56 | "gctime": 0.0
57 | }
58 | ]
59 | },
60 | "tags": [
61 | "tag1",
62 | "tag2"
63 | ]
64 | }
65 | ]
66 | },
67 | "tags": []
68 | }
69 | ]
70 | ]
71 | ]
72 |
--------------------------------------------------------------------------------
/examples/cpp/README.md:
--------------------------------------------------------------------------------
1 | C++ example for benchmarking with [Google Benchmark Framework][tool]
2 | ====================================================================
3 |
4 | - [Workflow for this example](../../.github/workflows/cpp.yml)
5 | - [Action log of this example](https://github.com/benchmark-action/github-action-benchmark/actions?query=workflow%3A%22C%2B%2B+Example%22)
6 | - [Benchmark results on GitHub pages](https://benchmark-action.github.io/github-action-benchmark/dev/bench/)
7 |
8 | This directory shows how to use [`github-action-benchmark`][action] with [Google Benchmark Framework][tool].
9 |
10 | ## Run benchmarks
11 |
12 | Official documentation for usage of Google Benchmark Framework:
13 |
14 | https://github.com/google/benchmark
15 |
16 | Build and install `benchmark` library and write up your benchmark suites following instructions in
17 | the above repository:
18 |
19 | ```cpp
20 | #include "benchmark/benchmark.h"
21 |
22 | static void bench1(benchmark::State &state) {
23 | for (auto _ : state) {
24 | // Your benchmark goes here
25 | }
26 | }
27 |
28 | // Register the function as a benchmark
29 | BENCHMARK(bench1);
30 |
31 | // Run the benchmark
32 | BENCHMARK_MAIN();
33 | ```
34 |
35 | Build the source with C++ compiler:
36 |
37 | ```sh
38 | $ clang++ -std=c++14 -O3 -l benchmark bench.cpp
39 | ```
40 |
41 | And run built executable to output the result in JSON format:
42 |
43 | ```sh
44 | $ ./a.out --benchmark_format=json | tee benchmark_result.json
45 | ```
46 |
47 | ## Process benchmark results
48 |
49 | Store the benchmark results with step using the action. Please set `googlecpp` to `tool` input.
50 |
51 | ```yaml
52 | - name: Store benchmark result
53 | uses: benchmark-action/github-action-benchmark@v1
54 | with:
55 | tool: 'googlecpp'
56 | output-file-path: benchmark_result.json
57 | ```
58 |
59 | Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for common usage.
60 |
61 | ## Run this example
62 |
63 | To try this example, please use [make](./Makefile) and `clang++`.
64 |
65 | ```sh
66 | $ make bench
67 | ```
68 |
69 | `bench` subcommand prepares all dependencies, compiles `bench.cpp` and runs benchmarks. The results
70 | are output to console.
71 |
72 | ```
73 | 2019-11-29 21:13:55
74 | Running ./a.out
75 | Run on (4 X 2700 MHz CPU s)
76 | CPU Caches:
77 | L1 Data 32K (x2)
78 | L1 Instruction 32K (x2)
79 | L2 Unified 262K (x2)
80 | L3 Unified 3145K (x1)
81 | Load Average: 1.66, 1.98, 2.49
82 | -----------------------------------------------------
83 | Benchmark Time CPU Iterations
84 | -----------------------------------------------------
85 | fib_10 210 ns 210 ns 3239181
86 | fib_20 27857 ns 27786 ns 25206
87 | ```
88 |
89 | To get JSON output for running [github-action-benchmark][action], please use another subcommand.
90 |
91 | ```sh
92 | $ make json
93 | ```
94 |
95 | `json` subcommand outputs the benchmark results in JSON format and generates `benchmark_result.json`
96 | file in current directory.
97 |
98 | [tool]: https://github.com/google/benchmark
99 | [action]: https://github.com/benchmark-action/github-action-benchmark
100 |
--------------------------------------------------------------------------------
/test/data/extract/catch2_output.txt:
--------------------------------------------------------------------------------
1 |
2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 | Catch2_bench is a Catch v2.11.0 host application.
4 | Run with -? for options
5 |
6 | -------------------------------------------------------------------------------
7 | Fibonacci
8 | -------------------------------------------------------------------------------
9 | /Users/rhayasd/Develop/github.com/benchmark-action/github-action-benchmark/examples/catch2/catch2_bench.cpp:5
10 | ...............................................................................
11 |
12 | benchmark name samples iterations estimated
13 | mean low mean high mean
14 | std dev low std dev high std dev
15 | -------------------------------------------------------------------------------
16 | Fibonacci 10 100 208 7.1968 ms
17 | 344 ns 341 ns 349 ns
18 | 19 ns 11 ns 29 ns
19 |
20 | Fibonacci 20 100 2 8.3712 ms
21 | 41.731 us 41.25 us 42.622 us
22 | 3.256 us 2.163 us 5.353 us
23 |
24 |
25 | -------------------------------------------------------------------------------
26 | More Fibonacci
27 | -------------------------------------------------------------------------------
28 | /Users/rhayasd/Develop/github.com/benchmark-action/github-action-benchmark/examples/catch2/catch2_bench.cpp:13
29 | ...............................................................................
30 |
31 | benchmark name samples iterations estimated
32 | mean low mean high mean
33 | std dev low std dev high std dev
34 | -------------------------------------------------------------------------------
35 | Fibonacci~ 5! 100 1961 7.0596 ms
36 | 36 ns 35 ns 37 ns
37 | 4 ns 3 ns 6 ns
38 |
39 | Fibonacci-15_bench 100 20 7.48 ms
40 | 3.789 us 3.734 us 3.888 us
41 | 362 ns 234 ns 539 ns
42 |
43 |
44 | ===============================================================================
45 | test cases: 2 | 2 passed
46 | assertions: - none -
47 |
48 |
--------------------------------------------------------------------------------
/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Continuous Benchmark'
2 | author: 'github-action-benchmark developers '
3 | description: 'Continuous Benchmark using GitHub pages as dash board for keeping performance'
4 | branding:
5 | icon: 'fast-forward'
6 | color: 'blue'
7 |
8 | inputs:
9 | name:
10 | description: 'Name of the benchmark. This value must be identical among all benchmarks'
11 | required: true
12 | default: 'Benchmark'
13 | tool:
14 | description: 'Tool to use get benchmark output. One of "cargo", "go", "benchmarkjs", "pytest", "customBiggerIsBetter", "customSmallerIsBetter"'
15 | required: true
16 | output-file-path:
17 | description: 'A path to file which contains the benchmark output'
18 | required: true
19 | gh-pages-branch:
20 | description: 'Branch for gh-pages'
21 | required: true
22 | default: 'gh-pages'
23 | benchmark-data-dir-path:
24 | description: 'Path to directory which contains benchmark files on GitHub pages branch'
25 | required: true
26 | default: 'dev/bench'
27 | github-token:
28 | description: 'GitHub API token to pull/push GitHub pages branch and deploy GitHub pages. For public repository, this must be personal access token for now. Please read README.md for more details'
29 | required: false
30 | auto-push:
31 | description: 'Push GitHub Pages branch to remote automatically. This option requires github-token input'
32 | required: false
33 | default: false
34 | skip-fetch-gh-pages:
35 | description: 'Skip pulling GitHub Pages branch before generating an auto commit'
36 | required: false
37 | default: false
38 | comment-always:
39 | description: 'Leave a comment with benchmark result comparison. To enable this feature, github-token input must be given as well'
40 | required: false
41 | default: false
42 | save-data-file:
43 | description: 'Save the benchmark data to external file'
44 | required: false
45 | default: true
46 | comment-on-alert:
47 | description: 'Leave an alert comment when current benchmark result is worse than previous. Threshold is specified with alert-comment-threshold input. To enable this feature, github-token input must be given as well'
48 | required: false
49 | default: false
50 | alert-threshold:
51 | description: 'Threshold which determines if an alert should happen or not. Percentage value such as "150%". For example, 150% means that an alert happens when current benchmark result is 1.5x worse than previous'
52 | required: false
53 | default: '200%'
54 | fail-on-alert:
55 | description: 'Workflow fails when alert comment happens'
56 | required: false
57 | # Note: Set to false by default since this action does not push to remote by default. When workflow
58 | # fails and auto-push is not set, there is no chance to push the result to remote.
59 | default: false
60 | fail-threshold:
61 | description: 'Threshold which determines if the current workflow fails. Format is the same as alert-threshold input. If this value is not specified, the same value as alert-threshold is used'
62 | required: false
63 | alert-comment-cc-users:
64 | description: 'Comma separated GitHub user names which start with @ (e.g. "@foo,@bar"). They will be mentioned in commit comment for alert.'
65 | required: false
66 | external-data-json-path:
67 | description: 'JSON data file for storing benchmark results. When this input is set, github-action-benchmark no longer uses Git branch to store data. Instead, it reads and appends benchmark data from/to the file. User must store the file anywhere'
68 | required: false
69 | max-items-in-chart:
70 | description: 'Max data points in a benchmark chart to avoid making the chart too busy. Value must be unsigned integer. No limit by default'
71 | required: false
72 |
73 | runs:
74 | using: 'node12'
75 | main: 'dist/src/index.js'
76 |
--------------------------------------------------------------------------------
/test/data/extract/pytest_output.txt:
--------------------------------------------------------------------------------
1 | {
2 | "machine_info": {
3 | "node": "Corgi.local",
4 | "processor": "i386",
5 | "machine": "x86_64",
6 | "python_compiler": "Clang 11.0.0 (clang-1100.0.33.8)",
7 | "python_implementation": "CPython",
8 | "python_implementation_version": "3.7.5",
9 | "python_version": "3.7.5",
10 | "python_build": [
11 | "default",
12 | "Nov 1 2019 02:16:32"
13 | ],
14 | "release": "18.7.0",
15 | "system": "Darwin",
16 | "cpu": {
17 | "vendor_id": "GenuineIntel",
18 | "hardware": "unknown",
19 | "brand": "Intel(R) Core(TM) i7-4771 CPU @ 3.50GHz"
20 | }
21 | },
22 | "commit_info": {
23 | "id": "9d55ab0434c9088746ae2d134702d782bf088644",
24 | "time": "2019-11-16T12:10:30+09:00",
25 | "author_time": "2019-11-16T12:10:30+09:00",
26 | "dirty": true,
27 | "project": "github-action-benchmark",
28 | "branch": "pytest"
29 | },
30 | "benchmarks": [
31 | {
32 | "group": null,
33 | "name": "test_fib_10",
34 | "fullname": "bench.py::test_fib_10",
35 | "params": null,
36 | "param": null,
37 | "extra_info": {},
38 | "options": {
39 | "disable_gc": false,
40 | "timer": "perf_counter",
41 | "min_rounds": 5,
42 | "max_time": 1.0,
43 | "min_time": 5e-06,
44 | "warmup": false
45 | },
46 | "stats": {
47 | "min": 2.2067000000181025e-05,
48 | "max": 0.0003125999999999962,
49 | "mean": 2.408868133322941e-05,
50 | "stddev": 6.175090189861328e-06,
51 | "rounds": 38523,
52 | "median": 2.2300000000363696e-05,
53 | "iqr": 1.2670000003600634e-06,
54 | "q1": 2.2255999999831744e-05,
55 | "q3": 2.3523000000191807e-05,
56 | "iqr_outliers": 2896,
57 | "stddev_outliers": 2523,
58 | "outliers": "2523;2896",
59 | "ld15iqr": 2.2067000000181025e-05,
60 | "hd15iqr": 2.5426000000106086e-05,
61 | "ops": 41513.272817492856,
62 | "total": 0.9279682709999966,
63 | "data": [
64 | 3.215300000025678e-05,
65 | 2.8706999999794647e-05,
66 | 2.489499999969169e-05,
67 | 2.487300000009185e-05,
68 | "... omitted to keep this file small"
69 | ],
70 | "iterations": 1
71 | }
72 | },
73 | {
74 | "group": null,
75 | "name": "test_fib_20",
76 | "fullname": "bench.py::test_fib_20",
77 | "params": null,
78 | "param": null,
79 | "extra_info": {},
80 | "options": {
81 | "disable_gc": false,
82 | "timer": "perf_counter",
83 | "min_rounds": 5,
84 | "max_time": 1.0,
85 | "min_time": 5e-06,
86 | "warmup": false
87 | },
88 | "stats": {
89 | "min": 0.0027349359999999656,
90 | "max": 0.003903139999999805,
91 | "mean": 0.002985030672661863,
92 | "stddev": 0.0001745301654140968,
93 | "rounds": 278,
94 | "median": 0.0029605825000003083,
95 | "iqr": 0.00021968899999968983,
96 | "q1": 0.0028489629999999266,
97 | "q3": 0.0030686519999996165,
98 | "iqr_outliers": 5,
99 | "stddev_outliers": 89,
100 | "outliers": "89;5",
101 | "ld15iqr": 0.0027349359999999656,
102 | "hd15iqr": 0.0034360730000000395,
103 | "ops": 335.0049328331567,
104 | "total": 0.8298385269999979,
105 | "data": [
106 | 0.0032874509999998303,
107 | 0.00317211499999992,
108 | 0.0029934199999996913,
109 | 0.00316091800000029,
110 | "... omitted to keep this file small"
111 | ],
112 | "iterations": 1
113 | }
114 | }
115 | ],
116 | "datetime": "2019-11-17T02:22:16.817862",
117 | "version": "3.2.2"
118 | }
119 |
--------------------------------------------------------------------------------
/test/git.spec.ts:
--------------------------------------------------------------------------------
1 | import { deepStrictEqual as eq, notDeepStrictEqual as neq, strict as A } from 'assert';
2 | import { cmd, pull, push } from '../src/git';
3 |
4 | interface ExecOptions {
5 | listeners: {
6 | stdout(b: Buffer): void;
7 | stderr(b: Buffer): void;
8 | };
9 | }
10 |
11 | class FakedExec {
12 | lastArgs: [string, string[], ExecOptions] | null;
13 | stdout: string;
14 | stderr: string | null;
15 | exitCode: number;
16 | error: string | null;
17 |
18 | constructor() {
19 | this.lastArgs = null;
20 | this.stdout = 'this is test';
21 | this.stderr = null;
22 | this.exitCode = 0;
23 | this.error = null;
24 | }
25 |
26 | reset() {
27 | this.lastArgs = null;
28 | this.stdout = 'this is test';
29 | this.stderr = null;
30 | this.exitCode = 0;
31 | this.error = null;
32 | }
33 | }
34 |
35 | const fakedExec = new FakedExec();
36 | const gitHubContext = {
37 | repo: {
38 | repo: 'repo',
39 | owner: 'user',
40 | },
41 | } as {
42 | repo: {
43 | repo: string;
44 | owner: string;
45 | };
46 | };
47 |
48 | jest.mock('@actions/exec', () => ({
49 | exec: (c: string, a: string[], o: ExecOptions) => {
50 | fakedExec.lastArgs = [c, a, o];
51 | o.listeners.stdout(Buffer.from(fakedExec.stdout));
52 | if (fakedExec.stderr !== null) {
53 | o.listeners.stderr(Buffer.from(fakedExec.stderr));
54 | }
55 | if (fakedExec.error === null) {
56 | return Promise.resolve(fakedExec.exitCode);
57 | } else {
58 | return Promise.reject(new Error(fakedExec.error));
59 | }
60 | },
61 | }));
62 | jest.mock('@actions/core', () => ({
63 | debug: () => {
64 | /* do nothing */
65 | },
66 | }));
67 | jest.mock('@actions/github', () => ({
68 | get context() {
69 | return gitHubContext;
70 | },
71 | }));
72 |
73 | const ok: (x: any) => asserts x = A.ok;
74 | const userArgs = [
75 | '-c',
76 | 'user.name=github-action-benchmark',
77 | '-c',
78 | 'user.email=github@users.noreply.github.com',
79 | '-c',
80 | 'http.https://github.com/.extraheader=',
81 | ];
82 |
83 | describe('git', function () {
84 | afterAll(function () {
85 | jest.unmock('@actions/exec');
86 | jest.unmock('@actions/core');
87 | jest.unmock('@actions/github');
88 | });
89 |
90 | afterEach(function () {
91 | fakedExec.reset();
92 | });
93 |
94 | describe('cmd()', function () {
95 | it('runs Git command successfully', async function () {
96 | const stdout = await cmd('log', '--oneline');
97 | const args = fakedExec.lastArgs;
98 |
99 | eq(stdout, 'this is test');
100 | ok(args);
101 | eq(args[0], 'git');
102 | eq(args[1], userArgs.concat(['log', '--oneline']));
103 | ok('listeners' in (args[2] as object));
104 | });
105 |
106 | it('raises an error when command returns non-zero exit code', async function () {
107 | fakedExec.exitCode = 101;
108 | await A.rejects(() => cmd('show'), /^Error: Command 'git show' failed: /);
109 | neq(fakedExec.lastArgs, null);
110 | });
111 |
112 | it('raises an error with stderr output', async function () {
113 | fakedExec.exitCode = 101;
114 | fakedExec.stderr = 'this is error output!';
115 | await A.rejects(() => cmd('show'), /this is error output!/);
116 | });
117 |
118 | it('raises an error when exec.exec() threw an error', async function () {
119 | fakedExec.error = 'this is error from exec.exec';
120 | fakedExec.stderr = 'this is stderr output!';
121 | await A.rejects(() => cmd('show'), /this is error from exec\.exec/);
122 | await A.rejects(() => cmd('show'), /this is stderr output!/);
123 | });
124 | });
125 |
126 | describe('push()', function () {
127 | it('runs `git push` with given branch and options', async function () {
128 | const stdout = await push('this-is-token', 'my-branch', 'opt1', 'opt2');
129 | const args = fakedExec.lastArgs;
130 |
131 | eq(stdout, 'this is test');
132 | ok(args);
133 | eq(args[0], 'git');
134 | eq(
135 | args[1],
136 | userArgs.concat([
137 | 'push',
138 | 'https://x-access-token:this-is-token@github.com/user/repo.git',
139 | 'my-branch:my-branch',
140 | '--no-verify',
141 | 'opt1',
142 | 'opt2',
143 | ]),
144 | );
145 | });
146 | });
147 |
148 | describe('pull()', function () {
149 | it('runs `git pull` with given branch and options with token', async function () {
150 | const stdout = await pull('this-is-token', 'my-branch', 'opt1', 'opt2');
151 | const args = fakedExec.lastArgs;
152 |
153 | eq(stdout, 'this is test');
154 | ok(args);
155 | eq(args[0], 'git');
156 | eq(
157 | args[1],
158 | userArgs.concat([
159 | 'pull',
160 | 'https://x-access-token:this-is-token@github.com/user/repo.git',
161 | 'my-branch',
162 | 'opt1',
163 | 'opt2',
164 | ]),
165 | );
166 | });
167 |
168 | it('runs `git pull` with given branch and options without token', async function () {
169 | const stdout = await pull(undefined, 'my-branch', 'opt1', 'opt2');
170 | const args = fakedExec.lastArgs;
171 |
172 | eq(stdout, 'this is test');
173 | ok(args);
174 | eq(args[0], 'git');
175 | eq(args[1], userArgs.concat(['pull', 'origin', 'my-branch', 'opt1', 'opt2']));
176 | });
177 | });
178 | });
179 |
--------------------------------------------------------------------------------
/test/data/extract/pytest_several_units.json:
--------------------------------------------------------------------------------
1 | {
2 | "machine_info": {
3 | "node": "Corgi.local",
4 | "processor": "i386",
5 | "machine": "x86_64",
6 | "python_compiler": "Clang 8.1.0 (clang-802.0.42)",
7 | "python_implementation": "CPython",
8 | "python_implementation_version": "3.7.5",
9 | "python_version": "3.7.5",
10 | "python_build": ["default", "Nov 28 2019 13:59:33"],
11 | "release": "16.7.0",
12 | "system": "Darwin",
13 | "cpu": {
14 | "vendor_id": "GenuineIntel",
15 | "hardware": "unknown",
16 | "brand": "Intel(R) Core(TM) i5-5257U CPU @ 2.70GHz"
17 | }
18 | },
19 | "commit_info": {
20 | "id": "b8e9e2af2493a1ee2f3fa758a9572cd9972e6db7",
21 | "time": "2019-12-06T16:36:53+09:00",
22 | "author_time": "2019-12-06T16:30:53+09:00",
23 | "dirty": true,
24 | "project": "github-action-benchmark",
25 | "branch": "master"
26 | },
27 | "benchmarks": [
28 | {
29 | "group": null,
30 | "name": "test_fib_1",
31 | "fullname": "bench.py::test_fib_1",
32 | "params": null,
33 | "param": null,
34 | "extra_info": {},
35 | "options": {
36 | "disable_gc": false,
37 | "timer": "perf_counter",
38 | "min_rounds": 5,
39 | "max_time": 1,
40 | "min_time": 0.000005,
41 | "warmup": false
42 | },
43 | "stats": {
44 | "min": 1.422099999981441e-7,
45 | "max": 0.000001708769999999582,
46 | "mean": 1.4995610248628836e-7,
47 | "stddev": 2.9351731952139377e-8,
48 | "rounds": 68536,
49 | "median": 1.437600000020467e-7,
50 | "iqr": 6.699999977044534e-10,
51 | "q1": 1.4348000000108384e-7,
52 | "q3": 1.441499999987883e-7,
53 | "iqr_outliers": 10729,
54 | "stddev_outliers": 1999,
55 | "outliers": "1999;10729",
56 | "ld15iqr": 1.4247999999827953e-7,
57 | "hd15iqr": 1.4515999999797912e-7,
58 | "ops": 6668618.238403659,
59 | "total": 0.010277391440000352,
60 | "data": [
61 | 1.4538999999924586e-7,
62 | 1.4500999999889076e-7,
63 | 1.4345999999942904e-7,
64 | 1.4399000000109297e-7,
65 | "... omitted to keep this file small"
66 | ],
67 | "iterations": 100
68 | }
69 | },
70 | {
71 | "group": null,
72 | "name": "test_fib_10",
73 | "fullname": "bench.py::test_fib_10",
74 | "params": null,
75 | "param": null,
76 | "extra_info": {},
77 | "options": {
78 | "disable_gc": false,
79 | "timer": "perf_counter",
80 | "min_rounds": 5,
81 | "max_time": 1,
82 | "min_time": 0.000005,
83 | "warmup": false
84 | },
85 | "stats": {
86 | "min": 0.000027831000000144712,
87 | "max": 0.00019098799999994753,
88 | "mean": 0.00002885754012484424,
89 | "stddev": 0.000005235937482008476,
90 | "rounds": 20025,
91 | "median": 0.000028052999999861328,
92 | "iqr": 1.2500000012849455e-7,
93 | "q1": 0.00002800099999999972,
94 | "q3": 0.000028126000000128215,
95 | "iqr_outliers": 1664,
96 | "stddev_outliers": 510,
97 | "outliers": "510;1664",
98 | "ld15iqr": 0.000027831000000144712,
99 | "hd15iqr": 0.000028314000000140283,
100 | "ops": 34652.98828915334,
101 | "total": 0.5778722410000059,
102 | "data": [
103 | 0.00004389500000012703,
104 | 0.00003724299999996461,
105 | 0.000042176000000004876,
106 | 0.000039165000000007666,
107 | "... omitted to keep this file small"
108 | ],
109 | "iterations": 1
110 | }
111 | },
112 | {
113 | "group": null,
114 | "name": "test_fib_20",
115 | "fullname": "bench.py::test_fib_20",
116 | "params": null,
117 | "param": null,
118 | "extra_info": {},
119 | "options": {
120 | "disable_gc": false,
121 | "timer": "perf_counter",
122 | "min_rounds": 5,
123 | "max_time": 1,
124 | "min_time": 0.000005,
125 | "warmup": false
126 | },
127 | "stats": {
128 | "min": 0.0034516560000001917,
129 | "max": 0.005351305000000028,
130 | "mean": 0.003611916368852473,
131 | "stddev": 0.0003737982822178215,
132 | "rounds": 122,
133 | "median": 0.003471104000000169,
134 | "iqr": 0.00010164100000009668,
135 | "q1": 0.003456519999999852,
136 | "q3": 0.0035581609999999486,
137 | "iqr_outliers": 18,
138 | "stddev_outliers": 8,
139 | "outliers": "8;18",
140 | "ld15iqr": 0.0034516560000001917,
141 | "hd15iqr": 0.003772042000000031,
142 | "ops": 276.8613383807958,
143 | "total": 0.44065379700000173,
144 | "data": [
145 | 0.0034733610000001747,
146 | 0.005047906999999796,
147 | 0.0035023750000000575,
148 | 0.0035686130000001093,
149 | "... omitted to keep this file small"
150 | ],
151 | "iterations": 1
152 | }
153 | },
154 | {
155 | "group": null,
156 | "name": "test_sleep_2",
157 | "fullname": "bench.py::test_sleep_2",
158 | "params": null,
159 | "param": null,
160 | "extra_info": {},
161 | "options": {
162 | "disable_gc": false,
163 | "timer": "perf_counter",
164 | "min_rounds": 5,
165 | "max_time": 1,
166 | "min_time": 0.000005,
167 | "warmup": false
168 | },
169 | "stats": {
170 | "min": 2.001044877,
171 | "max": 2.005173128,
172 | "mean": 2.0038430469999997,
173 | "stddev": 0.0018776587251587858,
174 | "rounds": 5,
175 | "median": 2.005124842999999,
176 | "iqr": 0.002820521000000298,
177 | "q1": 2.0023191352499996,
178 | "q3": 2.00513965625,
179 | "iqr_outliers": 0,
180 | "stddev_outliers": 1,
181 | "outliers": "1;0",
182 | "ld15iqr": 2.001044877,
183 | "hd15iqr": 2.005173128,
184 | "ops": 0.49904108083570886,
185 | "total": 10.019215234999999,
186 | "data": [2.005173128, 2.0051284989999996, 2.005124842999999, 2.0027438879999995, 2.001044877],
187 | "iterations": 1
188 | }
189 | }
190 | ],
191 | "datetime": "2019-12-06T08:58:58.115974",
192 | "version": "3.2.2"
193 | }
194 |
--------------------------------------------------------------------------------
/src/default_index_html.ts:
--------------------------------------------------------------------------------
1 | export const DEFAULT_INDEX_HTML = String.raw`
2 |
3 |
4 |
5 |
6 |
86 | Benchmarks
87 |
88 |
89 |
90 |
100 |
101 |
106 |
107 |
108 |
109 |
277 |
278 |
279 | `;
280 |
--------------------------------------------------------------------------------
/src/config.ts:
--------------------------------------------------------------------------------
1 | import * as core from '@actions/core';
2 | import { promises as fs } from 'fs';
3 | import * as os from 'os';
4 | import * as path from 'path';
5 |
6 | export type ToolType =
7 | | 'cargo'
8 | | 'go'
9 | | 'benchmarkjs'
10 | | 'pytest'
11 | | 'googlecpp'
12 | | 'catch2'
13 | | 'julia'
14 | | 'customBiggerIsBetter'
15 | | 'customSmallerIsBetter';
16 | export interface Config {
17 | name: string;
18 | tool: ToolType;
19 | outputFilePath: string;
20 | ghPagesBranch: string;
21 | benchmarkDataDirPath: string;
22 | githubToken: string | undefined;
23 | autoPush: boolean;
24 | skipFetchGhPages: boolean;
25 | commentAlways: boolean;
26 | saveDataFile: boolean;
27 | commentOnAlert: boolean;
28 | alertThreshold: number;
29 | failOnAlert: boolean;
30 | failThreshold: number;
31 | alertCommentCcUsers: string[];
32 | externalDataJsonPath: string | undefined;
33 | maxItemsInChart: number | null;
34 | }
35 |
36 | export const VALID_TOOLS: ToolType[] = [
37 | 'cargo',
38 | 'go',
39 | 'benchmarkjs',
40 | 'pytest',
41 | 'googlecpp',
42 | 'catch2',
43 | 'julia',
44 | 'customBiggerIsBetter',
45 | 'customSmallerIsBetter',
46 | ];
47 | const RE_UINT = /^\d+$/;
48 |
49 | function validateToolType(tool: string): asserts tool is ToolType {
50 | if ((VALID_TOOLS as string[]).includes(tool)) {
51 | return;
52 | }
53 | throw new Error(`Invalid value '${tool}' for 'tool' input. It must be one of ${VALID_TOOLS}`);
54 | }
55 |
56 | function resolvePath(p: string): string {
57 | if (p.startsWith('~')) {
58 | const home = os.homedir();
59 | if (!home) {
60 | throw new Error(`Cannot resolve '~' in ${p}`);
61 | }
62 | p = path.join(home, p.slice(1));
63 | }
64 | return path.resolve(p);
65 | }
66 |
67 | async function resolveFilePath(p: string): Promise {
68 | p = resolvePath(p);
69 |
70 | let s;
71 | try {
72 | s = await fs.stat(p);
73 | } catch (e) {
74 | throw new Error(`Cannot stat '${p}': ${e}`);
75 | }
76 |
77 | if (!s.isFile()) {
78 | throw new Error(`Specified path '${p}' is not a file`);
79 | }
80 |
81 | return p;
82 | }
83 |
84 | async function validateOutputFilePath(filePath: string): Promise {
85 | try {
86 | return await resolveFilePath(filePath);
87 | } catch (err) {
88 | throw new Error(`Invalid value for 'output-file-path' input: ${err}`);
89 | }
90 | }
91 |
92 | function validateGhPagesBranch(branch: string) {
93 | if (branch) {
94 | return;
95 | }
96 | throw new Error(`Branch value must not be empty for 'gh-pages-branch' input`);
97 | }
98 |
99 | function validateBenchmarkDataDirPath(dirPath: string): string {
100 | try {
101 | return resolvePath(dirPath);
102 | } catch (e) {
103 | throw new Error(`Invalid value for 'benchmark-data-dir-path': ${e}`);
104 | }
105 | }
106 |
107 | function validateName(name: string) {
108 | if (name) {
109 | return;
110 | }
111 | throw new Error('Name must not be empty');
112 | }
113 |
114 | function validateGitHubToken(inputName: string, githubToken: string | undefined, todo: string) {
115 | if (!githubToken) {
116 | throw new Error(`'${inputName}' is enabled but 'github-token' is not set. Please give API token ${todo}`);
117 | }
118 | }
119 |
120 | function getBoolInput(name: string): boolean {
121 | const input = core.getInput(name);
122 | if (!input) {
123 | return false;
124 | }
125 | if (input !== 'true' && input !== 'false') {
126 | throw new Error(`'${name}' input must be boolean value 'true' or 'false' but got '${input}'`);
127 | }
128 | return input === 'true';
129 | }
130 |
131 | function getPercentageInput(name: string): number | null {
132 | const input = core.getInput(name);
133 | if (!input) {
134 | return null;
135 | }
136 | if (!input.endsWith('%')) {
137 | throw new Error(`'${name}' input must ends with '%' for percentage value (e.g. '200%')`);
138 | }
139 |
140 | const percentage = parseFloat(input.slice(0, -1)); // Omit '%' at last
141 | if (isNaN(percentage)) {
142 | throw new Error(`Specified value '${input.slice(0, -1)}' in '${name}' input cannot be parsed as float number`);
143 | }
144 |
145 | return percentage / 100;
146 | }
147 |
148 | function getCommaSeparatedInput(name: string): string[] {
149 | const input = core.getInput(name);
150 | if (!input) {
151 | return [];
152 | }
153 | return input.split(',').map((s) => s.trim());
154 | }
155 |
156 | function validateAlertCommentCcUsers(users: string[]) {
157 | for (const u of users) {
158 | if (!u.startsWith('@')) {
159 | throw new Error(`User name in 'alert-comment-cc-users' input must start with '@' but got '${u}'`);
160 | }
161 | }
162 | }
163 |
164 | async function isDir(path: string) {
165 | try {
166 | const s = await fs.stat(path);
167 | return s.isDirectory();
168 | } catch (_) {
169 | return false;
170 | }
171 | }
172 |
173 | async function validateExternalDataJsonPath(path: string | undefined, autoPush: boolean): Promise {
174 | if (!path) {
175 | return Promise.resolve(undefined);
176 | }
177 | if (autoPush) {
178 | throw new Error(
179 | 'auto-push must be false when external-data-json-path is set since this action reads/writes the given JSON file and never pushes to remote',
180 | );
181 | }
182 | try {
183 | const p = resolvePath(path);
184 | if (await isDir(p)) {
185 | throw new Error(`Specified path '${p}' must be file but it is actually directory`);
186 | }
187 | return p;
188 | } catch (err) {
189 | throw new Error(`Invalid value for 'external-data-json-path' input: ${err}`);
190 | }
191 | }
192 |
193 | function getUintInput(name: string): number | null {
194 | const input = core.getInput(name);
195 | if (!input) {
196 | return null;
197 | }
198 | if (!RE_UINT.test(input)) {
199 | throw new Error(`'${name}' input must be unsigned integer but got '${input}'`);
200 | }
201 | const i = parseInt(input, 10);
202 | if (isNaN(i)) {
203 | throw new Error(`Unsigned integer value '${input}' in '${name}' input was parsed as NaN`);
204 | }
205 | return i;
206 | }
207 |
208 | function validateMaxItemsInChart(max: number | null) {
209 | if (max !== null && max <= 0) {
210 | throw new Error(`'max-items-in-chart' input value must be one or more but got ${max}`);
211 | }
212 | }
213 |
214 | function validateAlertThreshold(alertThreshold: number | null, failThreshold: number | null): asserts alertThreshold {
215 | if (alertThreshold === null) {
216 | throw new Error("'alert-threshold' input must not be empty");
217 | }
218 | if (failThreshold && alertThreshold > failThreshold) {
219 | throw new Error(
220 | `'alert-threshold' value must be smaller than 'fail-threshold' value but got ${alertThreshold} > ${failThreshold}`,
221 | );
222 | }
223 | }
224 |
225 | export async function configFromJobInput(): Promise {
226 | const tool: string = core.getInput('tool');
227 | let outputFilePath: string = core.getInput('output-file-path');
228 | const ghPagesBranch: string = core.getInput('gh-pages-branch');
229 | let benchmarkDataDirPath: string = core.getInput('benchmark-data-dir-path');
230 | const name: string = core.getInput('name');
231 | const githubToken: string | undefined = core.getInput('github-token') || undefined;
232 | const autoPush = getBoolInput('auto-push');
233 | const skipFetchGhPages = getBoolInput('skip-fetch-gh-pages');
234 | const commentAlways = getBoolInput('comment-always');
235 | const saveDataFile = getBoolInput('save-data-file');
236 | const commentOnAlert = getBoolInput('comment-on-alert');
237 | const alertThreshold = getPercentageInput('alert-threshold');
238 | const failOnAlert = getBoolInput('fail-on-alert');
239 | const alertCommentCcUsers = getCommaSeparatedInput('alert-comment-cc-users');
240 | let externalDataJsonPath: undefined | string = core.getInput('external-data-json-path');
241 | const maxItemsInChart = getUintInput('max-items-in-chart');
242 | let failThreshold = getPercentageInput('fail-threshold');
243 |
244 | validateToolType(tool);
245 | outputFilePath = await validateOutputFilePath(outputFilePath);
246 | validateGhPagesBranch(ghPagesBranch);
247 | benchmarkDataDirPath = validateBenchmarkDataDirPath(benchmarkDataDirPath);
248 | validateName(name);
249 | if (autoPush) {
250 | validateGitHubToken('auto-push', githubToken, 'to push GitHub pages branch to remote');
251 | }
252 | if (commentAlways) {
253 | validateGitHubToken('comment-always', githubToken, 'to send commit comment');
254 | }
255 | if (commentOnAlert) {
256 | validateGitHubToken('comment-on-alert', githubToken, 'to send commit comment on alert');
257 | }
258 | validateAlertThreshold(alertThreshold, failThreshold);
259 | validateAlertCommentCcUsers(alertCommentCcUsers);
260 | externalDataJsonPath = await validateExternalDataJsonPath(externalDataJsonPath, autoPush);
261 | validateMaxItemsInChart(maxItemsInChart);
262 | if (failThreshold === null) {
263 | failThreshold = alertThreshold;
264 | }
265 |
266 | return {
267 | name,
268 | tool,
269 | outputFilePath,
270 | ghPagesBranch,
271 | benchmarkDataDirPath,
272 | githubToken,
273 | autoPush,
274 | skipFetchGhPages,
275 | commentAlways,
276 | saveDataFile,
277 | commentOnAlert,
278 | alertThreshold,
279 | failOnAlert,
280 | alertCommentCcUsers,
281 | externalDataJsonPath,
282 | maxItemsInChart,
283 | failThreshold,
284 | };
285 | }
286 |
--------------------------------------------------------------------------------
/scripts/ci_validate_modification.ts:
--------------------------------------------------------------------------------
1 | import * as path from 'path';
2 | import { promises as fs } from 'fs';
3 | import * as cp from 'child_process';
4 | import { DataJson, BenchmarkSuites, SCRIPT_PREFIX } from '../src/write';
5 | import { VALID_TOOLS } from '../src/config';
6 | import { Benchmark } from '../src/extract';
7 | import { diff, Diff, DiffNew, DiffEdit, DiffArray } from 'deep-diff';
8 | import deepEq = require('deep-equal');
9 |
10 | function help(): never {
11 | throw new Error('Usage: node ci_validate_modification.js before_data.js "bechmark name"');
12 | }
13 |
14 | async function exec(cmd: string): Promise {
15 | console.log(`+ ${cmd}`);
16 | return new Promise((resolve, reject) => {
17 | cp.exec(cmd, (err, stdout, stderr) => {
18 | if (err) {
19 | reject(new Error(`Exec '${cmd}' failed with error ${err.message}. Stderr: '${stderr}'`));
20 | return;
21 | }
22 | resolve(stdout);
23 | });
24 | });
25 | }
26 |
27 | async function readDataJson(file: string): Promise {
28 | const content = await fs.readFile(file, 'utf8');
29 | return JSON.parse(content.slice(SCRIPT_PREFIX.length));
30 | }
31 |
32 | function validateDataJson(data: DataJson) {
33 | const { lastUpdate, repoUrl, entries: suites } = data;
34 | const now = Date.now();
35 | if (lastUpdate > now) {
36 | throw new Error(`Last update is not correct: ${lastUpdate} v.s. ${now}`);
37 | }
38 |
39 | if (!/^https:\/\/github\.com\/[^/]+\/github-action-benchmark$/.test(repoUrl)) {
40 | throw new Error(`repoUrl is not correct: ${repoUrl}`);
41 | }
42 |
43 | for (const benchName of Object.keys(suites)) {
44 | for (const suite of suites[benchName]) {
45 | const { commit, tool, date, benches } = suite;
46 | if (!(VALID_TOOLS as string[]).includes(tool)) {
47 | throw new Error(`Invalid tool ${tool}`);
48 | }
49 | if (
50 | !/^https:\/\/github\.com\/[^/]+\/github-action-benchmark\/commit\//.test(commit.url) &&
51 | !/\/pull\/\d+\/commits\/[a-f0-9]+$/.test(commit.url)
52 | ) {
53 | throw new Error(`Invalid commit url: ${commit.url}`);
54 | }
55 | if (!commit.url.endsWith(commit.id)) {
56 | throw new Error(`Commit ID ${commit.id} does not match to URL ${commit.url}`);
57 | }
58 | if (date > now) {
59 | throw new Error(`Benchmark date is not correct: ${date} v.s. ${now}`);
60 | }
61 | for (const bench of benches) {
62 | const { name, value, unit, range, extra } = bench;
63 | const json = JSON.stringify(bench);
64 | if (!name) {
65 | throw new Error(`Benchmark result name is invalid: ${name} (${json})`);
66 | }
67 | if (typeof value !== 'number' || isNaN(value)) {
68 | throw new Error(`Benchmark result value is invalid: ${value} (${json})`);
69 | }
70 | if (typeof unit !== 'string') {
71 | throw new Error(`Benchmark result unit is invalid: ${unit} (${json})`);
72 | }
73 | if (range && typeof range !== 'string') {
74 | throw new Error(`Benchmark result range is invalid: ${range} (${json})`);
75 | }
76 | if (extra && typeof extra !== 'string') {
77 | throw new Error(`Benchmark result extra is invalid: ${extra} (${json})`);
78 | }
79 | }
80 | }
81 | }
82 | }
83 |
84 | function assertNumberDiffEdit(diff: Diff): asserts diff is DiffEdit {
85 | if (diff.kind !== 'E') {
86 | throw new Error(`Given diff is not DiffEdit: ${JSON.stringify(diff)}`);
87 | }
88 | if (typeof diff.lhs !== 'number') {
89 | throw new Error(`Given DiffEdit's lhs is not for number: ${diff.lhs}`);
90 | }
91 | if (typeof diff.rhs !== 'number') {
92 | throw new Error(`Given DiffEdit's rhs is not for number: ${diff.rhs}`);
93 | }
94 | }
95 |
96 | function validateLastUpdateMod(diff: Diff) {
97 | assertNumberDiffEdit(diff);
98 | if (!deepEq(diff.path, ['lastUpdate'])) {
99 | throw new Error(`Not diff for lastUpdate: ${JSON.stringify(diff.path)}`);
100 | }
101 | const { lhs, rhs } = diff;
102 | if (lhs >= rhs) {
103 | throw new Error(`Update of datetime is not correct. New is older: ${lhs} v.s. ${rhs}`);
104 | }
105 | }
106 |
107 | function assertDiffArray(diff: Diff): asserts diff is DiffArray {
108 | if (diff.kind !== 'A') {
109 | throw new Error(`Given diff is not DiffArray: ${JSON.stringify(diff)}`);
110 | }
111 | }
112 |
113 | function assertDiffNewBench(diff: Diff): asserts diff is DiffNew {
114 | if (diff.kind !== 'N') {
115 | throw new Error(`Given diff is not DiffNew: ${JSON.stringify(diff)}`);
116 | }
117 | const { rhs } = diff;
118 | if (typeof rhs !== 'object' || rhs === null) {
119 | throw new Error(`DiffNew for Benchmark object is actually not a object: ${rhs}`);
120 | }
121 | for (const prop of ['commit', 'date', 'tool', 'benches']) {
122 | if (!(prop in rhs)) {
123 | throw new Error(`Not a valid benchmark object in DiffNew: ${JSON.stringify(rhs)}`);
124 | }
125 | }
126 | }
127 |
128 | function validateBenchmarkResultMod(diff: Diff, expectedBenchName: string, afterSuites: BenchmarkSuites) {
129 | if (!(expectedBenchName in afterSuites)) {
130 | throw new Error(`data.js after action does not contain '${expectedBenchName}' benchmark`);
131 | }
132 |
133 | const benchSuites = afterSuites[expectedBenchName];
134 | if (benchSuites.length === 0) {
135 | throw new Error('Benchmark suite is empty after action');
136 | }
137 |
138 | if (diff.kind === 'N') {
139 | // Previous data does not exist. This case occurs only once when new tool support is added.
140 | // Ignore checks.
141 | return;
142 | }
143 |
144 | assertDiffArray(diff);
145 |
146 | if (!deepEq(diff.path, ['entries', expectedBenchName])) {
147 | throw new Error(`Diff path is not expected for adding new benchmark: ${JSON.stringify(diff.path)}`);
148 | }
149 |
150 | diff = diff.item;
151 | assertDiffNewBench(diff);
152 |
153 | const added: Benchmark = diff.rhs;
154 | const last = benchSuites[benchSuites.length - 1];
155 | if (last.commit.id !== added.commit.id) {
156 | throw new Error(
157 | `Newly added benchmark ${JSON.stringify(added)} is not the last one in data.js ${JSON.stringify(last)}`,
158 | );
159 | }
160 |
161 | for (const suite of benchSuites) {
162 | if (suite.date > added.date) {
163 | throw new Error(`Older suite's date ${JSON.stringify(suite)} is newer than added ${JSON.stringify(added)}`);
164 | }
165 |
166 | if (suite.tool !== added.tool) {
167 | throw new Error(`Tool is different between ${JSON.stringify(suite)} and ${JSON.stringify(added)}`);
168 | }
169 |
170 | for (const addedBench of added.benches) {
171 | for (const prevBench of suite.benches) {
172 | if (prevBench.name === addedBench.name) {
173 | if (prevBench.unit !== addedBench.unit) {
174 | throw new Error(
175 | `Unit is different between previous benchmark and newly added benchmark: ${JSON.stringify(
176 | prevBench,
177 | )} v.v. ${JSON.stringify(addedBench)}`,
178 | );
179 | }
180 | }
181 | }
182 | }
183 | }
184 | }
185 |
186 | async function main() {
187 | console.log('Start validating modifications by action with args', process.argv);
188 |
189 | if (process.argv.length !== 4) {
190 | help();
191 | }
192 |
193 | if (['-h', '--help'].includes(process.argv[2])) {
194 | help();
195 | }
196 |
197 | console.log('Checking pre-condition');
198 | const stats = await fs.stat(path.resolve('.git'));
199 | if (!stats.isDirectory()) {
200 | throw new Error('This script must be run at root directory of repository');
201 | }
202 |
203 | const beforeDataJs = path.resolve(process.argv[2]);
204 | const expectedBenchName = process.argv[3];
205 |
206 | console.log('Validating modifications by action');
207 | console.log(` data.js before action: ${beforeDataJs}`);
208 |
209 | console.log('Reading data.js before action as JSON');
210 | const beforeJson = await readDataJson(beforeDataJs);
211 |
212 | console.log('Validating current branch');
213 | const branch = await exec('git rev-parse --abbrev-ref HEAD');
214 | if (branch === 'gh-pages') {
215 | throw new Error(`Current branch is still on '${branch}'`);
216 | }
217 |
218 | console.log('Retrieving data.js after action');
219 | await exec('git checkout gh-pages');
220 | const latestCommitLog = await exec('git log -n 1');
221 |
222 | console.log('Validating auto commit');
223 | const commitLogLines = latestCommitLog.split('\n');
224 |
225 | const commitAuthorLine = commitLogLines[1];
226 | if (!commitAuthorLine.startsWith('Author: github-action-benchmark')) {
227 | throw new Error(`Unexpected auto commit author in log '${latestCommitLog}'`);
228 | }
229 |
230 | const commitMessageLine = commitLogLines[4];
231 | const reCommitMessage = new RegExp(
232 | `add ${expectedBenchName.replace(
233 | /[.*+?^=!:${}()|[\]/\\]/g,
234 | '\\$&',
235 | )} \\([^)]+\\) benchmark result for [0-9a-f]+$`,
236 | );
237 | if (!reCommitMessage.test(commitMessageLine)) {
238 | throw new Error(`Unexpected auto commit message in log '${latestCommitLog}'`);
239 | }
240 |
241 | const afterJson = await readDataJson('dev/bench/data.js');
242 | await exec('git checkout -');
243 |
244 | console.log('Validating data.js both before/after action');
245 | validateDataJson(beforeJson);
246 | validateDataJson(afterJson);
247 |
248 | const diffs = diff(beforeJson, afterJson);
249 | console.log('Validating diffs:', diffs);
250 |
251 | if (!diffs || diffs.length !== 2) {
252 | throw new Error('Number of diffs are incorrect. Exact 2 diffs are expected');
253 | }
254 |
255 | console.log('Validating lastUpdate modification');
256 | validateLastUpdateMod(diffs[0]);
257 |
258 | console.log('Validating benchmark result modification');
259 | validateBenchmarkResultMod(diffs[1], expectedBenchName, afterJson.entries);
260 |
261 | console.log('👌');
262 | }
263 |
264 | main().catch((err) => {
265 | console.error(err);
266 | process.exit(110);
267 | });
268 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches:
5 | - master
6 | pull_request:
7 | branches:
8 | - master
9 |
10 | jobs:
11 | rust:
12 | name: Run Rust benchmark example
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v2
16 | - uses: actions/setup-node@v1
17 | - uses: actions/cache@v1
18 | with:
19 | path: ~/.npm
20 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
21 | - run: npm ci
22 | - run: npm run build
23 | - name: Save previous data.js
24 | run: |
25 | git fetch origin gh-pages
26 | git checkout gh-pages
27 | cp ./dev/bench/data.js before_data.js
28 | git checkout -
29 | - run: rustup toolchain update nightly && rustup default nightly
30 | - name: Run benchmark
31 | run: cd examples/rust && cargo +nightly bench | tee output.txt
32 | - name: Store benchmark result
33 | uses: ./
34 | with:
35 | name: Rust Benchmark
36 | tool: 'cargo'
37 | output-file-path: examples/rust/output.txt
38 | skip-fetch-gh-pages: true
39 | fail-on-alert: true
40 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Rust Benchmark'
41 | go:
42 | name: Run Go benchmark example
43 | runs-on: ubuntu-latest
44 | steps:
45 | - uses: actions/checkout@v2
46 | - uses: actions/setup-node@v1
47 | - uses: actions/setup-go@v1
48 | - uses: actions/cache@v1
49 | with:
50 | path: ~/.npm
51 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
52 | - run: npm ci
53 | - run: npm run build
54 | - name: Save previous data.js
55 | run: |
56 | git fetch origin gh-pages
57 | git checkout gh-pages
58 | cp ./dev/bench/data.js before_data.js
59 | git checkout -
60 | - name: Run benchmark
61 | run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt
62 | - name: Store benchmark result
63 | uses: ./
64 | with:
65 | name: Go Benchmark
66 | tool: 'go'
67 | output-file-path: examples/go/output.txt
68 | skip-fetch-gh-pages: true
69 | fail-on-alert: true
70 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Go Benchmark'
71 | benchmarkjs:
72 | name: Run JavaScript benchmark example
73 | runs-on: ubuntu-latest
74 | steps:
75 | - uses: actions/checkout@v2
76 | - uses: actions/setup-node@v1
77 | - uses: actions/cache@v1
78 | with:
79 | path: ~/.npm
80 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
81 | - run: npm ci
82 | - run: npm run build
83 | - name: Save previous data.js
84 | run: |
85 | git fetch origin gh-pages
86 | git checkout gh-pages
87 | cp ./dev/bench/data.js before_data.js
88 | git checkout -
89 | - name: Run benchmark
90 | run: cd examples/benchmarkjs && npm install && node bench.js | tee output.txt
91 | - name: Store benchmark result
92 | uses: ./
93 | with:
94 | name: Benchmark.js Benchmark
95 | tool: 'benchmarkjs'
96 | output-file-path: examples/benchmarkjs/output.txt
97 | skip-fetch-gh-pages: true
98 | fail-on-alert: true
99 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Benchmark.js Benchmark'
100 | pytest-benchmark:
101 | name: Run Pytest benchmark example
102 | runs-on: ubuntu-latest
103 | steps:
104 | - uses: actions/checkout@v2
105 | - uses: actions/setup-node@v1
106 | - uses: actions/setup-python@v2
107 | with:
108 | python-version: 3.9
109 | - uses: actions/cache@v1
110 | with:
111 | path: ~/.npm
112 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
113 | - run: npm ci
114 | - run: npm run build
115 | - name: Save previous data.js
116 | run: |
117 | git fetch origin gh-pages
118 | git checkout gh-pages
119 | cp ./dev/bench/data.js before_data.js
120 | git checkout -
121 | - name: Run benchmark
122 | run: |
123 | cd examples/pytest
124 | pip install -r requirements.txt
125 | pytest bench.py --benchmark-json output.json
126 | - name: Store benchmark result
127 | uses: ./
128 | with:
129 | name: Python Benchmark with pytest-benchmark
130 | tool: 'pytest'
131 | output-file-path: examples/pytest/output.json
132 | skip-fetch-gh-pages: true
133 | fail-on-alert: true
134 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Python Benchmark with pytest-benchmark'
135 | google-benchmark-framework:
136 | name: Run Google C++ Benchmark Framework example
137 | runs-on: ubuntu-latest
138 | steps:
139 | - uses: actions/checkout@v2
140 | - uses: actions/setup-node@v1
141 | - uses: actions/cache@v1
142 | with:
143 | path: ~/.npm
144 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
145 | - run: npm ci
146 | - run: npm run build
147 | - name: Save previous data.js
148 | run: |
149 | git fetch origin gh-pages
150 | git checkout gh-pages
151 | cp ./dev/bench/data.js before_data.js
152 | git checkout -
153 | - name: Cache Benchmark library
154 | uses: actions/cache@v1
155 | with:
156 | path: examples/cpp/benchmark
157 | key: ${{ runner.os }}-googlebenchmark-v1.5.0
158 | - name: Run benchmark
159 | run: |
160 | cd examples/cpp
161 | make json
162 | - name: Store benchmark result
163 | uses: ./
164 | with:
165 | name: C++ Benchmark
166 | tool: 'googlecpp'
167 | output-file-path: examples/cpp/benchmark_result.json
168 | skip-fetch-gh-pages: true
169 | fail-on-alert: true
170 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'C++ Benchmark'
171 | catch2-framework:
172 | name: Run Catch2 C++ Benchmark Framework example
173 | runs-on: ubuntu-latest
174 | steps:
175 | - uses: actions/checkout@v2
176 | - uses: actions/setup-node@v1
177 | - uses: actions/cache@v1
178 | with:
179 | path: ~/.npm
180 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
181 | - run: npm ci
182 | - run: npm run build
183 | - name: Save previous data.js
184 | run: |
185 | git fetch origin gh-pages
186 | git checkout gh-pages
187 | cp ./dev/bench/data.js before_data.js
188 | git checkout -
189 | - name: Run benchmark
190 | run: |
191 | cd examples/catch2
192 | mkdir build && cd build
193 | cmake -DCMAKE_BUILD_TYPE=Release ..
194 | cmake --build . --config Release
195 | ./Catch2_bench > ../benchmark_result.txt
196 | - name: Store benchmark result
197 | uses: ./
198 | with:
199 | name: Catch2 Benchmark
200 | tool: 'catch2'
201 | output-file-path: examples/catch2/benchmark_result.txt
202 | skip-fetch-gh-pages: true
203 | fail-on-alert: true
204 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Catch2 Benchmark'
205 |
206 | julia-benchmark:
207 | name: Run Julia benchmark example
208 | runs-on: ubuntu-latest
209 | steps:
210 | - uses: actions/checkout@v2
211 | - uses: actions/setup-node@v1
212 | - uses: actions/cache@v1
213 | with:
214 | path: ~/.npm
215 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
216 | - run: npm ci
217 | - run: npm run build
218 | - name: Save previous data.js
219 | run: |
220 | git fetch origin gh-pages
221 | git checkout gh-pages
222 | cp ./dev/bench/data.js before_data.js
223 | git checkout -
224 | - uses: julia-actions/setup-julia@v1
225 | with:
226 | version: '1'
227 | arch: x64
228 | - name: Run benchmark
229 | run: |
230 | cd examples/julia
231 | julia --project --color=yes -e '
232 | using Pkg;
233 | Pkg.instantiate();
234 | include("fib.jl")'
235 | - name: Store benchmark result
236 | uses: ./
237 | with:
238 | name: Julia benchmark
239 | tool: "julia"
240 | output-file-path: examples/julia/output.json
241 | skip-fetch-gh-pages: true
242 | fail-on-alert: true
243 | - run: node ./dist/scripts/ci_validate_modification.js before_data.js 'Julia benchmark'
244 |
245 | only-alert-with-cache:
246 | name: Run alert check with actions/cache
247 | runs-on: ubuntu-latest
248 | steps:
249 | - uses: actions/checkout@v2
250 | - uses: actions/setup-node@v1
251 | - uses: actions/cache@v1
252 | with:
253 | path: ~/.npm
254 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
255 | - run: npm ci
256 | - run: npm run build
257 | - uses: actions/setup-go@v1
258 | - name: Run benchmark
259 | run: cd examples/go && go test -bench 'BenchmarkFib' | tee output.txt
260 | - name: Download previous benchmark data
261 | uses: actions/cache@v1
262 | with:
263 | path: ./cache
264 | key: ${{ runner.os }}-ci-cache-test
265 | - name: Store benchmark result
266 | uses: ./
267 | with:
268 | name: Alert setup example with cache
269 | tool: 'go'
270 | output-file-path: examples/go/output.txt
271 | external-data-json-path: ./cache/benchmark-data.json
272 | github-token: ${{ secrets.GITHUB_TOKEN }}
273 | alert-threshold: '200%'
274 | comment-on-alert: true
275 | fail-on-alert: true
276 | alert-comment-cc-users: '@ktrz'
277 | unit-tests:
278 | name: Run unit tests
279 | runs-on: ubuntu-latest
280 | steps:
281 | - uses: actions/checkout@v2
282 | - uses: actions/setup-node@v1
283 | with:
284 | node-version: 12
285 | - uses: actions/cache@v1
286 | with:
287 | path: ~/.npm
288 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
289 | - run: npm ci
290 | - run: npm run coverage
291 | - name: Apply yamllint
292 | run: |
293 | sudo pip install yamllint
294 | yamllint --strict .github/workflows
295 | - uses: codecov/codecov-action@v2
296 |
297 | lint:
298 | name: Run linting and formatting check
299 | runs-on: ubuntu-latest
300 | steps:
301 | - uses: actions/checkout@v2
302 | - uses: actions/setup-node@v1
303 | with:
304 | node-version: 12
305 | - uses: actions/cache@v1
306 | with:
307 | path: ~/.npm
308 | key: ${{ runner.os }}-node-${{ hashFiles('package-lock.json') }}
309 | - run: npm ci
310 | - run: npm run lint
311 | - run: npm run format:check
312 |
--------------------------------------------------------------------------------
/test/config.spec.ts:
--------------------------------------------------------------------------------
1 | import { strict as A } from 'assert';
2 | import * as path from 'path';
3 | import * as os from 'os';
4 | import { configFromJobInput, VALID_TOOLS } from '../src/config';
5 |
6 | type Inputs = { [name: string]: string };
7 |
8 | const inputs: Inputs = {};
9 | function mockInputs(newInputs: Inputs) {
10 | for (const name of Object.getOwnPropertyNames(inputs)) {
11 | delete inputs[name];
12 | }
13 | Object.assign(inputs, newInputs);
14 | }
15 |
16 | jest.mock('@actions/core', () => ({
17 | getInput: (name: string) => inputs[name],
18 | }));
19 |
20 | describe('configFromJobInput()', function () {
21 | const cwd = process.cwd();
22 |
23 | beforeAll(function () {
24 | process.chdir(path.join(__dirname, 'data', 'config'));
25 | });
26 |
27 | afterAll(function () {
28 | jest.unmock('@actions/core');
29 | process.chdir(cwd);
30 | });
31 |
32 | const defaultInputs = {
33 | name: 'Benchmark',
34 | tool: 'cargo',
35 | 'output-file-path': 'out.txt',
36 | 'gh-pages-branch': 'gh-pages',
37 | 'benchmark-data-dir-path': '.',
38 | 'github-token': '',
39 | 'auto-push': 'false',
40 | 'skip-fetch-gh-pages': 'false',
41 | 'comment-on-alert': 'false',
42 | 'alert-threshold': '200%',
43 | 'fail-on-alert': 'false',
44 | 'alert-comment-cc-users': '',
45 | 'external-data-json-path': '',
46 | 'max-items-in-chart': '',
47 | };
48 |
49 | const validation_tests = [
50 | {
51 | what: 'wrong name',
52 | inputs: { ...defaultInputs, name: '' },
53 | expected: /^Error: Name must not be empty$/,
54 | },
55 | {
56 | what: 'wrong tool',
57 | inputs: { ...defaultInputs, tool: 'foo' },
58 | expected: /^Error: Invalid value 'foo' for 'tool' input/,
59 | },
60 | {
61 | what: 'output file does not exist',
62 | inputs: { ...defaultInputs, 'output-file-path': 'foo.txt' },
63 | expected: /^Error: Invalid value for 'output-file-path'/,
64 | },
65 | {
66 | what: 'output file is actually directory',
67 | inputs: { ...defaultInputs, 'output-file-path': '.' },
68 | expected: /Specified path '.*' is not a file/,
69 | },
70 | {
71 | what: 'wrong GitHub pages branch name',
72 | inputs: { ...defaultInputs, 'gh-pages-branch': '' },
73 | expected: /^Error: Branch value must not be empty/,
74 | },
75 | // Cannot check 'benchmark-data-dir-path' invalidation because it throws an error only when
76 | // current working directory is not obtainable.
77 | {
78 | what: 'auto-push is set but github-token is not set',
79 | inputs: { ...defaultInputs, 'auto-push': 'true', 'github-token': '' },
80 | expected: /'auto-push' is enabled but 'github-token' is not set/,
81 | },
82 | {
83 | what: 'auto-push is set to other than boolean',
84 | inputs: { ...defaultInputs, 'auto-push': 'hello', 'github-token': 'dummy' },
85 | expected: /'auto-push' input must be boolean value 'true' or 'false' but got 'hello'/,
86 | },
87 | {
88 | what: 'alert-threshold does not have percentage value',
89 | inputs: { ...defaultInputs, 'alert-threshold': '1.2' },
90 | expected: /'alert-threshold' input must ends with '%' for percentage value/,
91 | },
92 | {
93 | what: 'alert-threshold does not have correct percentage number',
94 | inputs: { ...defaultInputs, 'alert-threshold': 'foo%' },
95 | expected: /Specified value 'foo' in 'alert-threshold' input cannot be parsed as float number/,
96 | },
97 | {
98 | what: 'comment-on-alert is set but github-token is not set',
99 | inputs: { ...defaultInputs, 'comment-on-alert': 'true', 'github-token': '' },
100 | expected: /'comment-on-alert' is enabled but 'github-token' is not set/,
101 | },
102 | {
103 | what: 'user names in alert-comment-cc-users is not starting with @',
104 | inputs: { ...defaultInputs, 'alert-comment-cc-users': '@foo,bar' },
105 | expected: /User name in 'alert-comment-cc-users' input must start with '@' but got 'bar'/,
106 | },
107 | {
108 | what: 'external data file is actually directory',
109 | inputs: { ...defaultInputs, 'external-data-json-path': '.' },
110 | expected: /must be file but it is actually directory/,
111 | },
112 | {
113 | what: 'both external-data-json-path and auto-push are set at the same time',
114 | inputs: {
115 | ...defaultInputs,
116 | 'external-data-json-path': 'external.json',
117 | 'auto-push': 'true',
118 | 'github-token': 'dummy',
119 | },
120 | expected: /auto-push must be false when external-data-json-path is set/,
121 | },
122 | {
123 | what: 'invalid integer value for max-items-in-chart',
124 | inputs: {
125 | ...defaultInputs,
126 | 'max-items-in-chart': '3.14',
127 | },
128 | expected: /'max-items-in-chart' input must be unsigned integer but got '3.14'/,
129 | },
130 | {
131 | what: 'max-items-in-chart must not be zero',
132 | inputs: {
133 | ...defaultInputs,
134 | 'max-items-in-chart': '0',
135 | },
136 | expected: /'max-items-in-chart' input value must be one or more/,
137 | },
138 | {
139 | what: 'alert-threshold must not be empty',
140 | inputs: {
141 | ...defaultInputs,
142 | 'alert-threshold': '',
143 | },
144 | expected: /'alert-threshold' input must not be empty/,
145 | },
146 | {
147 | what: 'fail-threshold does not have percentage value',
148 | inputs: { ...defaultInputs, 'fail-threshold': '1.2' },
149 | expected: /'fail-threshold' input must ends with '%' for percentage value/,
150 | },
151 | {
152 | what: 'fail-threshold does not have correct percentage number',
153 | inputs: { ...defaultInputs, 'fail-threshold': 'foo%' },
154 | expected: /Specified value 'foo' in 'fail-threshold' input cannot be parsed as float number/,
155 | },
156 | {
157 | what: 'fail-threshold is smaller than alert-threshold',
158 | inputs: { ...defaultInputs, 'alert-threshold': '150%', 'fail-threshold': '120%' },
159 | expected: /'alert-threshold' value must be smaller than 'fail-threshold' value but got 1.5 > 1.2/,
160 | },
161 | ] as Array<{
162 | what: string;
163 | inputs: Inputs;
164 | expected: RegExp;
165 | }>;
166 |
167 | for (const test of validation_tests) {
168 | it(`validates ${test.what}`, async function () {
169 | mockInputs(test.inputs);
170 | await A.rejects(configFromJobInput, test.expected);
171 | });
172 | }
173 |
174 | interface ExpectedResult {
175 | name: string;
176 | tool: string;
177 | ghPagesBranch: string;
178 | githubToken: string | undefined;
179 | autoPush: boolean;
180 | skipFetchGhPages: boolean;
181 | commentOnAlert: boolean;
182 | alertThreshold: number;
183 | failOnAlert: boolean;
184 | alertCommentCcUsers: string[];
185 | hasExternalDataJsonPath: boolean;
186 | maxItemsInChart: null | number;
187 | failThreshold: number | null;
188 | }
189 |
190 | const defaultExpected: ExpectedResult = {
191 | name: 'Benchmark',
192 | tool: 'cargo',
193 | ghPagesBranch: 'gh-pages',
194 | autoPush: false,
195 | skipFetchGhPages: false,
196 | githubToken: undefined,
197 | commentOnAlert: false,
198 | alertThreshold: 2,
199 | failOnAlert: false,
200 | alertCommentCcUsers: [],
201 | hasExternalDataJsonPath: false,
202 | maxItemsInChart: null,
203 | failThreshold: null,
204 | };
205 |
206 | const returnedConfigTests = [
207 | ...VALID_TOOLS.map((tool: string) => ({
208 | what: 'valid tool ' + tool,
209 | inputs: { ...defaultInputs, tool },
210 | expected: { ...defaultExpected, tool },
211 | })),
212 | ...(
213 | [
214 | ['auto-push', 'autoPush'],
215 | ['skip-fetch-gh-pages', 'skipFetchGhPages'],
216 | ['comment-on-alert', 'commentOnAlert'],
217 | ['fail-on-alert', 'failOnAlert'],
218 | ] as const
219 | )
220 | .map(([name, prop]) =>
221 | ['true', 'false'].map((v) => ({
222 | what: `boolean input ${name} set to '${v}'`,
223 | inputs: { ...defaultInputs, 'github-token': 'dummy', [name]: v },
224 | expected: { ...defaultExpected, githubToken: 'dummy', [prop]: v === 'true' },
225 | })),
226 | )
227 | .flat(),
228 | {
229 | what: 'with specified name',
230 | inputs: { ...defaultInputs, name: 'My Name is...' },
231 | expected: { ...defaultExpected, name: 'My Name is...' },
232 | },
233 | {
234 | what: 'with specified GitHub Pages branch',
235 | inputs: { ...defaultInputs, 'gh-pages-branch': 'master' },
236 | expected: { ...defaultExpected, ghPagesBranch: 'master' },
237 | },
238 | ...[
239 | ['150%', 1.5],
240 | ['0%', 0],
241 | ['123.4%', 1.234],
242 | ].map(([v, e]) => ({
243 | what: `with alert threshold ${v}`,
244 | inputs: { ...defaultInputs, 'alert-threshold': v },
245 | expected: { ...defaultExpected, alertThreshold: e },
246 | })),
247 | ...[
248 | ['@foo', ['@foo']],
249 | ['@foo,@bar', ['@foo', '@bar']],
250 | ['@foo, @bar ', ['@foo', '@bar']],
251 | ].map(([v, e]) => ({
252 | what: `with comment CC users ${v}`,
253 | inputs: { ...defaultInputs, 'alert-comment-cc-users': v },
254 | expected: { ...defaultExpected, alertCommentCcUsers: e },
255 | })),
256 | {
257 | what: 'external JSON file',
258 | inputs: { ...defaultInputs, 'external-data-json-path': 'external.json' },
259 | expected: { ...defaultExpected, hasExternalDataJsonPath: true },
260 | },
261 | {
262 | what: 'max items in chart',
263 | inputs: { ...defaultInputs, 'max-items-in-chart': '50' },
264 | expected: { ...defaultExpected, maxItemsInChart: 50 },
265 | },
266 | {
267 | what: 'different failure threshold from alert threshold',
268 | inputs: { ...defaultInputs, 'fail-threshold': '300%' },
269 | expected: { ...defaultExpected, failThreshold: 3.0 },
270 | },
271 | {
272 | what: 'boolean value parsing an empty input as false',
273 | inputs: {
274 | ...defaultInputs,
275 | 'skip-fetch-gh-pages': '',
276 | 'comment-on-alert': '',
277 | 'fail-on-alert': '',
278 | },
279 | expected: defaultExpected,
280 | },
281 | ] as Array<{
282 | what: string;
283 | inputs: Inputs;
284 | expected: ExpectedResult;
285 | }>;
286 |
287 | for (const test of returnedConfigTests) {
288 | it(`returns validated config with ${test.what}`, async function () {
289 | mockInputs(test.inputs);
290 | const actual = await configFromJobInput();
291 | A.equal(actual.name, test.expected.name);
292 | A.equal(actual.tool, test.expected.tool);
293 | A.equal(actual.ghPagesBranch, test.expected.ghPagesBranch);
294 | A.equal(actual.githubToken, test.expected.githubToken);
295 | A.equal(actual.skipFetchGhPages, test.expected.skipFetchGhPages);
296 | A.equal(actual.commentOnAlert, test.expected.commentOnAlert);
297 | A.equal(actual.failOnAlert, test.expected.failOnAlert);
298 | A.equal(actual.alertThreshold, test.expected.alertThreshold);
299 | A.deepEqual(actual.alertCommentCcUsers, test.expected.alertCommentCcUsers);
300 | A.ok(path.isAbsolute(actual.outputFilePath), actual.outputFilePath);
301 | A.ok(path.isAbsolute(actual.benchmarkDataDirPath), actual.benchmarkDataDirPath);
302 | A.equal(actual.maxItemsInChart, test.expected.maxItemsInChart);
303 | if (test.expected.failThreshold === null) {
304 | A.equal(actual.failThreshold, test.expected.alertThreshold);
305 | } else {
306 | A.equal(actual.failThreshold, test.expected.failThreshold);
307 | }
308 |
309 | if (test.expected.hasExternalDataJsonPath) {
310 | A.equal(typeof actual.externalDataJsonPath, 'string');
311 | A.ok(path.isAbsolute(actual.externalDataJsonPath as string), actual.externalDataJsonPath);
312 | } else {
313 | A.equal(actual.externalDataJsonPath, undefined);
314 | }
315 | });
316 | }
317 |
318 | it('resolves relative paths in config', async function () {
319 | mockInputs({
320 | ...defaultInputs,
321 | 'output-file-path': 'out.txt',
322 | 'benchmark-data-dir-path': 'path/to/output',
323 | });
324 |
325 | const config = await configFromJobInput();
326 | A.equal(config.name, 'Benchmark');
327 | A.equal(config.tool, 'cargo');
328 | A.ok(path.isAbsolute(config.outputFilePath), config.outputFilePath);
329 | A.ok(config.outputFilePath.endsWith('out.txt'), config.outputFilePath);
330 | A.ok(path.isAbsolute(config.benchmarkDataDirPath), config.benchmarkDataDirPath);
331 | A.ok(config.benchmarkDataDirPath.endsWith('output'), config.benchmarkDataDirPath);
332 | });
333 |
334 | it('does not change abusolute paths in config', async function () {
335 | const outFile = path.resolve('out.txt');
336 | const dataDir = path.resolve('path/to/output');
337 | mockInputs({
338 | ...defaultInputs,
339 | 'output-file-path': outFile,
340 | 'benchmark-data-dir-path': dataDir,
341 | });
342 |
343 | const config = await configFromJobInput();
344 | A.equal(config.outputFilePath, outFile);
345 | A.equal(config.benchmarkDataDirPath, dataDir);
346 | });
347 |
348 | it('resolves home directory in output directory path', async function () {
349 | const home = os.homedir();
350 | const absCwd = process.cwd();
351 | if (!absCwd.startsWith(home)) {
352 | // Test was not run under home directory so "~" in paths cannot be tested
353 | fail('Test was not run under home directory so "~" in paths cannot be tested');
354 | }
355 |
356 | const cwd = path.join('~', absCwd.slice(home.length));
357 | const file = path.join(cwd, 'out.txt');
358 | const dir = path.join(cwd, 'outdir');
359 |
360 | mockInputs({
361 | ...defaultInputs,
362 | 'output-file-path': file,
363 | 'benchmark-data-dir-path': dir,
364 | });
365 |
366 | const config = await configFromJobInput();
367 | A.ok(path.isAbsolute(config.outputFilePath), config.outputFilePath);
368 | A.equal(config.outputFilePath, path.join(absCwd, 'out.txt'));
369 | A.ok(path.isAbsolute(config.benchmarkDataDirPath), config.benchmarkDataDirPath);
370 | A.equal(config.benchmarkDataDirPath, path.join(absCwd, 'outdir'));
371 | });
372 | });
373 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 |
2 | # [v1.10.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.10.0) - 28 Oct 2021
3 |
4 | - **New:** Allow user defined custom benchmarks (#81)
5 |
6 |
7 | # [v1.9.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.9.0) - 12 Oct 2021
8 |
9 | - **Fix:** manual and scheduled runs (#74)
10 |
11 |
12 | # [v1.8.1](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.8.1) - 10 Jun 2020
13 |
14 | - **Fix:** Allow `/` in `cargo bench` benchmark name (#26)
15 | - **New:** Add an example with [Criterion.rs](https://github.com/bheisler/criterion.rs) for Rust projects
16 |
17 | [Changes][v1.8.1]
18 |
19 |
20 |
21 | # [v1.8.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.8.0) - 17 Mar 2020
22 |
23 | - **New:** Added `comment-always` option to leave a comment of benchmarking results at the commit always. [Thanks @pksunkara](https://github.com/benchmark-action/github-action-benchmark/pull/17)
24 | - **New:** Added `save-data-file` option to skip saving data file. Setting `false` to this value is useful when you don't want to update Git repository. [Thanks @pksunkara](https://github.com/benchmark-action/github-action-benchmark/pull/17)
25 | - **Improve:** `+/-` is now replaced with `±`
26 | - **Improve:** Better formatting for floating point numbers
27 |
28 | [Changes][v1.8.0]
29 |
30 |
31 |
32 | # [v1.7.1](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.7.1) - 23 Feb 2020
33 |
34 | - **Fix:** Benchmark output parser could not parse `\r\n` as newline correctly (#16)
35 | - **Improve:** Prefer `@actions/github.GitHub` wrapper to `@octokit/rest.Octokit`
36 |
37 | [Changes][v1.7.1]
38 |
39 |
40 |
41 | # [v1.7.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.7.0) - 21 Jan 2020
42 |
43 | - **New:** Add [Catch2](https://github.com/catchorg/Catch2) support. Please read [the example](https://github.com/benchmark-action/github-action-benchmark/tree/master/examples/catch2) for more details. [Thanks @bernedom](https://github.com/benchmark-action/github-action-benchmark/pull/6)
44 | - **Fix:** Deploying to GitHub Pages did not work when checking out the repository with `actions/checkout@v2`
45 | - **Improve:** Update several dependencies including `@actions/*` packages
46 |
47 | [Changes][v1.7.0]
48 |
49 |
50 |
51 | # [v1.6.7](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.7) - 01 Jan 2020
52 |
53 | - **Fix:** Extracting the benchmark result value from `go test -bench` did not assume float numbers (Fixed [#5](https://github.com/benchmark-action/github-action-benchmark/issues/5))
54 | - **Fix:** Running this action on `pull_request` event caused an error since `head_commit` payload is not set at the event. In the case, now this action tries to extract the commit information from `pull_request` payload
55 |
56 | [Changes][v1.6.7]
57 |
58 |
59 |
60 | # [v1.6.6](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.6) - 23 Dec 2019
61 |
62 | - **Fix:** Parse floating numbers in the benchmark results from Benchmark.js. ([Thanks @Bnaya](https://github.com/benchmark-action/github-action-benchmark/pull/3))
63 |
64 | [Changes][v1.6.6]
65 |
66 |
67 |
68 | # [v1.6.5](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.5) - 19 Dec 2019
69 |
70 | - **Fix:** Titles are set to empty in auto-generated default `index.html`. To apply this fix, please remove current `index.html` in your GitHub Pages branch and run this action again
71 | - **Fix:** Skip fetching GitHub Pages branch before switching to the branch when `skip-fetch-gh-pages` is set to true
72 | - **Improve:** Explicitly note no action output from this action in README.md
73 |
74 | [Changes][v1.6.5]
75 |
76 |
77 |
78 | # [v1.6.4](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.4) - 16 Dec 2019
79 |
80 | - **Fix:** Supported [actions/checkout@v2](https://github.com/actions/checkout/releases/tag/v2.0.0)
81 | - **Improve:** Refactored `index.html` automatically generated when it does not exist
82 | - **Improve:** Update dependencies (`actions/github` v2)
83 |
84 | [Changes][v1.6.4]
85 |
86 |
87 |
88 | # [v1.6.3](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.3) - 08 Dec 2019
89 |
90 | - **Improve:** Tweak number of retries for more robust automatic `git push`
91 |
92 | [Changes][v1.6.3]
93 |
94 |
95 |
96 | # [v1.6.2](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.2) - 07 Dec 2019
97 |
98 | - **Fix:** Retry logic for `git push` did not work properly since stderr output was not included in error message
99 |
100 | [Changes][v1.6.2]
101 |
102 |
103 |
104 | # [v1.6.1](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.1) - 07 Dec 2019
105 |
106 | - **Fix:** Time unit of mean time in `pytest` benchmark results were always `sec`. Now time units are converted to `msec`, `usec` and `nsec` if necessary
107 | - **Fix:** Detecting rejection by remote on `git push` was not sufficient
108 | - **Improve:** Add a small link at right bottom of dashboard page to show this action provided the page
109 | - **Improve:** Showed at least 1 significant digit for threshold float values like `2.0`
110 | - **Improve:** Updated dependencies
111 |
112 |
113 | [Changes][v1.6.1]
114 |
115 |
116 |
117 | # [v1.6.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.6.0) - 04 Dec 2019
118 |
119 | - **New:** `fail-threshold` input was added. Format is the same as `alert-threshold`, but you can give different thresholds to sending a commit comment and making the workflow fail by giving different value to `fail-threshold` from `alert-threshold`. This value is optional. If omitted, `fail-threshold` value is the same as `alert-threshold`
120 | - **Improve:** Retry logic was improved on `git push` failed due to remote branch updates after `git pull`. Now this action retries entire process to update `gh-pages` branch when the remote rejected automatic `git push`. Previously this action tried to rebase the local onto the remote but it sometimes failed due to conflicts
121 |
122 | [Changes][v1.6.0]
123 |
124 |
125 |
126 | # [v1.5.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.5.0) - 30 Nov 2019
127 |
128 | - **New:** Added `max-items-in-chart` input was added to limit the number of data points in a graph chart.
129 | - **New:** Supported [Google C++ Benchmark Framework](https://github.com/google/benchmark) for C++ projects. Please check [the example project](https://github.com/benchmark-action/github-action-benchmark/tree/master/examples/cpp) and [the example workflow](https://github.com/benchmark-action/github-action-benchmark/blob/master/.github/workflows/cpp.yml) to know the setup
130 | - **Fix:** Fix the order of graphs in the default `index.html`. To apply this fix, please remove `index.html` in your GitHub Pages branch and run your benchmark workflow again
131 | - **Improve:** Use the actions marketplace URL for the link to this action in commit comment
132 | - **Improve:** Updated dependencies
133 | - **Dev:** Added Many tests for checking the updates on a new benchmark result
134 | - **Dev:** Changed directory structure. Sources are now put in `src/` directory
135 |
136 | [Changes][v1.5.0]
137 |
138 |
139 |
140 | # [v1.4.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.4.0) - 23 Nov 2019
141 |
142 | - **New:** `external-data-json-path` input was added to support to put benchmark data externally rather than Git branch
143 | - By using this input and [actions/cache](https://github.com/actions/cache), you no longer need to use Git branch for this action if you only want performance alerts. Benchmark data is stored as workflow cache.
144 | - By this input, minimal setup for this action is much easier. Please read ['How to use' section](https://github.com/benchmark-action/github-action-benchmark#minimal-setup) in README.md.
145 |
146 | [Changes][v1.4.0]
147 |
148 |
149 |
150 | # [v1.3.2](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.3.2) - 23 Nov 2019
151 |
152 | - **Improve:** Styles in alert commit comment were improved
153 | - **Fix:** When benchmark name (with `name` input) contained spaces, URL for the workflow which detected performance regression was broken
154 |
155 | [Changes][v1.3.2]
156 |
157 |
158 |
159 | # [v1.3.1](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.3.1) - 21 Nov 2019
160 |
161 | - **Fix:** `git push` sometimes failed in the situation where `prepush` hook is set and runs unexpectedly. Now `git push` is run with `--no-verify` for pushing auto generated commit to remote.
162 |
163 | [Changes][v1.3.1]
164 |
165 |
166 |
167 | # [v1.3.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.3.0) - 21 Nov 2019
168 |
169 | - **New:** Alert feature was added :tada:
170 | - With this feature enabled, you can get alert commit comment or make workflow fail when possible performance regression is detected [like this](https://github.com/rhysd/github-action-benchmark/commit/077dde1c236baba9244caad4d9e82ea8399dae20#commitcomment-36047186)
171 | - `comment-on-alert` input was added to enable commit comment on alert. `github-token` input is necessary as well to use GitHub API. Unlike deploying GitHub Pages, `secrets.GITHUB_TOKEN` is sufficient for this purpose (if you don't use GitHub Pages). The input is set to `false` by default.
172 | - `fail-on-alert` input was added to mark running workflow fail on alert. The input is set to `false` by default.
173 | - `alert-threshold` input was added to specify the threshold to check alerts. When current result gets worse than previous exceeding the threshold. Value is ratio such as `"200%"`. For example, when benchmark gets result 230 ns/iter and previous one was 100ns/iter, it means 230% worse and an alert will happen.
174 | - Please read [documentation](https://github.com/benchmark-action/github-action-benchmark#use-this-action-with-alert-commit-comment) for setup
175 | - **New:** `alert-comment-cc-users` input was added to specify users mentioned in an alert commit comment so that they can easily notice it via GitHub notification
176 | - **New:** `skip-fetch-gh-pages` input was added to skip `git pull` which is automatically executed on public repo or when you set `github-token` on private repo.
177 | - **Improve:** E2E checks on CI were added
178 | - **Improve:** Updated dependencies
179 |
180 | [Changes][v1.3.0]
181 |
182 |
183 |
184 | # [v1.2.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.2.0) - 17 Nov 2019
185 |
186 | - **New:** Support [pytest-benchmark](https://pypi.org/project/pytest-benchmark/) for Python projects which use pytest
187 | - Benchmark value is how long one iteration takes (seconds/iter)
188 | - **Improve:** Show more extra data in tooltip which are specific to tools
189 | - Go
190 | - Iterations
191 | - Number of CPUs used
192 | - Benchmark.js
193 | - Number of samples
194 | - pytest-benchmark
195 | - Mean time
196 | - Number of rounds
197 |
198 | For reflecting the extra data improvement, please refresh your `index.html`. Remove current `index.html` in GitHub Pages branch and push the change to remote, then re-run your benchmark workflow.
199 |
200 | [Changes][v1.2.0]
201 |
202 |
203 |
204 | # [v1.1.4](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.1.4) - 16 Nov 2019
205 |
206 | - **Improve:** Title styles in default `index.html` which is generated when no `index.html` is in your GitHub Pages branch. If you want to update your `index.html` to the latest, please remove it and push to remote at first then re-run your workflow which will invoke github-action-benchmark
207 | - **Improve:** More metadata in `action.yml`. Now icon and its color are set.
208 |
209 | [Changes][v1.1.4]
210 |
211 |
212 |
213 | # [v1.1.3](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.1.3) - 16 Nov 2019
214 |
215 | - **Fix:** Retry failed when no Git user config is provided. Ensure to give bot user info to each `git` command invocations
216 |
217 | [Changes][v1.1.3]
218 |
219 |
220 |
221 | # [v1.1.2](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.1.2) - 16 Nov 2019
222 |
223 | - **Improve:** Added retry for `git push`. When remote GitHub Pages branch is updated after the current workflow had fetched the branch, `git push` will fail because the remote branch is not up-to-date. In the case this action will try to rebase onto the latest remote by `git pull --rebase` and `git push` again. This is useful when your multiple workflows may be trying to push GitHub Pages branch at the same timing. `auto-push` input must be set to `true` for this.
224 | - **Fix:** Description for `auto-push` was missing in `action.yml`
225 |
226 | [Changes][v1.1.2]
227 |
228 |
229 |
230 | # [v1.1.1](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.1.1) - 14 Nov 2019
231 |
232 | - **Improve:** More strict check for `auto-push` input. Now the value must be one of `true`, `false` (default value is `false`)
233 |
234 | [Changes][v1.1.1]
235 |
236 |
237 |
238 | # [v1.1.0](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.1.0) - 14 Nov 2019
239 |
240 | - **New:** Added `auto-push` input
241 | - If this value is set to `true`, this action pushes GitHub Pages branch to remote automatically. You no longer need to push the branch by yourself.
242 | - Below `github-token` input must be set for this
243 | - This input is optional. You can still push the branch by yourself if you want
244 | - Please read [documentation](https://github.com/benchmark-action/github-action-benchmark#how-to-use) for more details
245 | - **New:** Added `github-token` input
246 | - For doing some operations which requires GitHub API token, this input is necessary
247 | - pull from remote branch when your repository is private
248 | - push to remote branch
249 | - deploy and trigger GitHub Pages build
250 | - This input is optional. When you do none of above operations, this input is not necessary
251 | - `README.md` was updated to avoid [the issue on public repository](https://github.community/t/github-action-not-triggering-gh-pages-upon-push/16096) (#1)
252 |
253 | e.g.
254 |
255 | ```yaml
256 | - name: Store benchmark result
257 | uses: rhysd/github-action-benchmark@v1
258 | with:
259 | name: My Project Go Benchmark
260 | tool: 'go'
261 | output-file-path: output.txt
262 | github-token: ${{ secrets.PERSONAL_GITHUB_TOKEN }}
263 | auto-push: true
264 | ```
265 |
266 | Note that you need to make a personal access token for deploying GitHub Pages from GitHub Action workflow. Please read `RADME.md` for more details.
267 |
268 | [Changes][v1.1.0]
269 |
270 |
271 |
272 | # [v1.0.2](https://github.com/benchmark-action/github-action-benchmark/releases/tag/v1.0.2) - 10 Nov 2019
273 |
274 | First release :tada:
275 |
276 | Please read documentation for getting started:
277 |
278 | https://github.com/benchmark-action/github-action-benchmark#readme
279 |
280 | [Changes][v1.0.2]
281 |
282 |
283 | [v1.9.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.8.1...v1.9.0
284 | [v1.8.1]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.8.0...v1.8.1
285 | [v1.8.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.7.1...v1.8.0
286 | [v1.7.1]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.7.0...v1.7.1
287 | [v1.7.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.7...v1.7.0
288 | [v1.6.7]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.6...v1.6.7
289 | [v1.6.6]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.5...v1.6.6
290 | [v1.6.5]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.4...v1.6.5
291 | [v1.6.4]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.3...v1.6.4
292 | [v1.6.3]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.2...v1.6.3
293 | [v1.6.2]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.1...v1.6.2
294 | [v1.6.1]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.6.0...v1.6.1
295 | [v1.6.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.5.0...v1.6.0
296 | [v1.5.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.4.0...v1.5.0
297 | [v1.4.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.3.2...v1.4.0
298 | [v1.3.2]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.3.1...v1.3.2
299 | [v1.3.1]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.3.0...v1.3.1
300 | [v1.3.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.2.0...v1.3.0
301 | [v1.2.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.1.4...v1.2.0
302 | [v1.1.4]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.1.3...v1.1.4
303 | [v1.1.3]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.1.2...v1.1.3
304 | [v1.1.2]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.1.1...v1.1.2
305 | [v1.1.1]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.1.0...v1.1.1
306 | [v1.1.0]: https://github.com/benchmark-action/github-action-benchmark/compare/v1.0.2...v1.1.0
307 | [v1.0.2]: https://github.com/benchmark-action/github-action-benchmark/tree/v1.0.2
308 |
309 |
310 |
--------------------------------------------------------------------------------
/test/extract.spec.ts:
--------------------------------------------------------------------------------
1 | import * as path from 'path';
2 | import { strict as A } from 'assert';
3 | import { Config, ToolType } from '../src/config';
4 | import { BenchmarkResult, extractResult } from '../src/extract';
5 |
6 | const dummyWebhookPayload = {
7 | head_commit: {
8 | author: null,
9 | committer: null,
10 | id: '123456789abcdef',
11 | message: 'this is dummy',
12 | timestamp: 'dummy timestamp',
13 | url: 'https://github.com/dummy/repo',
14 | },
15 | } as { [key: string]: any };
16 | let dummyCommitData = {};
17 | class DummyGitHub {
18 | repos = {
19 | getCommit: () => {
20 | return {
21 | status: 200,
22 | data: dummyCommitData,
23 | };
24 | },
25 | };
26 | }
27 | const dummyGitHubContext = {
28 | payload: dummyWebhookPayload,
29 | repo: {
30 | owner: 'dummy',
31 | repo: 'repo',
32 | },
33 | ref: 'abcd1234',
34 | };
35 |
36 | jest.mock('@actions/github', () => ({
37 | get context() {
38 | return dummyGitHubContext;
39 | },
40 | get GitHub() {
41 | return DummyGitHub;
42 | },
43 | }));
44 |
45 | describe('extractResult()', function () {
46 | afterAll(function () {
47 | jest.unmock('@actions/github');
48 | });
49 |
50 | afterEach(function () {
51 | dummyGitHubContext.payload = dummyWebhookPayload;
52 | });
53 |
54 | const normalCases: Array<{
55 | tool: ToolType;
56 | expected: BenchmarkResult[];
57 | file?: string;
58 | }> = [
59 | {
60 | tool: 'cargo',
61 | expected: [
62 | {
63 | name: 'bench_fib_10',
64 | range: '± 24',
65 | unit: 'ns/iter',
66 | value: 135,
67 | },
68 | {
69 | name: 'bench_fib_20',
70 | range: '± 755',
71 | unit: 'ns/iter',
72 | value: 18149,
73 | },
74 | ],
75 | },
76 | {
77 | tool: 'catch2',
78 | expected: [
79 | {
80 | name: 'Fibonacci 10',
81 | range: '± 19',
82 | unit: 'ns',
83 | value: 344,
84 | extra: '100 samples\n208 iterations',
85 | },
86 | {
87 | name: 'Fibonacci 20',
88 | range: '± 3.256',
89 | unit: 'us',
90 | value: 41.731,
91 | extra: '100 samples\n2 iterations',
92 | },
93 | {
94 | name: 'Fibonacci~ 5!',
95 | range: '± 4',
96 | unit: 'ns',
97 | value: 36,
98 | extra: '100 samples\n1961 iterations',
99 | },
100 | {
101 | name: 'Fibonacci-15_bench',
102 | range: '± 362',
103 | unit: 'us',
104 | value: 3.789,
105 | extra: '100 samples\n20 iterations',
106 | },
107 | ],
108 | },
109 | {
110 | tool: 'go',
111 | expected: [
112 | {
113 | name: 'BenchmarkFib10',
114 | unit: 'ns/op',
115 | value: 325,
116 | extra: '5000000 times\n8 procs',
117 | },
118 | {
119 | name: 'BenchmarkFib20',
120 | unit: 'ns/op',
121 | value: 40537.123,
122 | extra: '30000 times',
123 | },
124 | {
125 | name: 'BenchmarkFib/my_tabled_benchmark_-_10',
126 | unit: 'ns/op',
127 | value: 325,
128 | extra: '5000000 times\n8 procs',
129 | },
130 | {
131 | name: 'BenchmarkFib/my_tabled_benchmark_-_20',
132 | unit: 'ns/op',
133 | value: 40537.123,
134 | extra: '30000 times',
135 | },
136 | {
137 | name: 'BenchmarkFib/my/tabled/benchmark_-_20',
138 | unit: 'ns/op',
139 | value: 40537.456,
140 | extra: '30001 times',
141 | },
142 | ],
143 | },
144 | {
145 | tool: 'benchmarkjs',
146 | expected: [
147 | {
148 | name: 'fib(10)',
149 | range: '±0.74%',
150 | unit: 'ops/sec',
151 | value: 1431759,
152 | extra: '93 samples',
153 | },
154 | {
155 | name: 'fib(20)',
156 | range: '±0.32%',
157 | unit: 'ops/sec',
158 | value: 12146,
159 | extra: '96 samples',
160 | },
161 | {
162 | name: 'createObjectBuffer with 200 comments',
163 | range: '±1.70%',
164 | unit: 'ops/sec',
165 | value: 81.61,
166 | extra: '69 samples',
167 | },
168 | ],
169 | },
170 | {
171 | tool: 'pytest',
172 | expected: [
173 | {
174 | name: 'bench.py::test_fib_10',
175 | range: 'stddev: 0.000006175090189861328',
176 | unit: 'iter/sec',
177 | value: 41513.272817492856,
178 | extra: 'mean: 24.08868133322941 usec\nrounds: 38523',
179 | },
180 | {
181 | name: 'bench.py::test_fib_20',
182 | range: 'stddev: 0.0001745301654140968',
183 | unit: 'iter/sec',
184 | value: 335.0049328331567,
185 | extra: 'mean: 2.9850306726618627 msec\nrounds: 278',
186 | },
187 | ],
188 | },
189 | {
190 | tool: 'googlecpp',
191 | expected: [
192 | {
193 | extra: 'iterations: 3070566\ncpu: 213.65507206163295 ns\nthreads: 1',
194 | name: 'fib_10',
195 | unit: 'ns/iter',
196 | value: 214.98980114547953,
197 | },
198 | {
199 | extra: 'iterations: 23968\ncpu: 27364.90320427236 ns\nthreads: 1',
200 | name: 'fib_20',
201 | unit: 'ns/iter',
202 | value: 27455.600415007055,
203 | },
204 | ],
205 | },
206 | {
207 | tool: 'pytest',
208 | file: 'pytest_several_units.json',
209 | expected: [
210 | {
211 | extra: 'mean: 149.95610248628836 nsec\nrounds: 68536',
212 | name: 'bench.py::test_fib_1',
213 | range: 'stddev: 2.9351731952139377e-8',
214 | unit: 'iter/sec',
215 | value: 6668618.238403659,
216 | },
217 | {
218 | name: 'bench.py::test_fib_10',
219 | range: 'stddev: 0.000005235937482008476',
220 | unit: 'iter/sec',
221 | value: 34652.98828915334,
222 | extra: 'mean: 28.85754012484424 usec\nrounds: 20025',
223 | },
224 | {
225 | name: 'bench.py::test_fib_20',
226 | range: 'stddev: 0.0003737982822178215',
227 | unit: 'iter/sec',
228 | value: 276.8613383807958,
229 | extra: 'mean: 3.611916368852473 msec\nrounds: 122',
230 | },
231 | {
232 | extra: 'mean: 2.0038430469999997 sec\nrounds: 5',
233 | name: 'bench.py::test_sleep_2',
234 | range: 'stddev: 0.0018776587251587858',
235 | unit: 'iter/sec',
236 | value: 0.49904108083570886,
237 | },
238 | ],
239 | },
240 | {
241 | tool: 'catch2',
242 | file: 'issue16_output.txt',
243 | expected: [
244 | {
245 | extra: '100 samples\n76353 iterations',
246 | name: 'Fibonacci 10',
247 | range: '± 0',
248 | unit: 'ns',
249 | value: 0,
250 | },
251 | {
252 | extra: '100 samples\n75814 iterations',
253 | name: 'Fibonacci 20',
254 | range: '± 0',
255 | unit: 'ns',
256 | value: 1,
257 | },
258 | ],
259 | },
260 | {
261 | tool: 'julia',
262 | file: 'julia_output.json',
263 | expected: [
264 | {
265 | extra: 'gctime=0\nmemory=0\nallocs=0\nparams={"gctrial":true,"time_tolerance":0.05,"samples":10000,"evals":390,"gcsample":false,"seconds":5,"overhead":0,"memory_tolerance":0.01}',
266 | name: 'fib/10',
267 | unit: 'ns',
268 | value: 246.03846153846155,
269 | },
270 | {
271 | extra: 'gctime=0\nmemory=0\nallocs=0\nparams={"gctrial":true,"time_tolerance":0.05,"samples":10000,"evals":1,"gcsample":false,"seconds":5,"overhead":0,"memory_tolerance":0.01}',
272 | name: 'fib/20',
273 | unit: 'ns',
274 | value: 31028,
275 | },
276 | ],
277 | },
278 | {
279 | tool: 'customBiggerIsBetter',
280 | expected: [
281 | {
282 | name: 'My Custom Bigger Is Better Benchmark - Throughput',
283 | unit: 'req/s',
284 | value: 70,
285 | range: undefined,
286 | extra: undefined,
287 | },
288 | {
289 | name: 'My Custom Bigger Is Better Benchmark - Free Memory',
290 | unit: 'Megabytes',
291 | value: 150,
292 | range: '3',
293 | extra: 'Optional Value #1: 25\nHelpful Num #2: 100\nAnything Else!',
294 | },
295 | ],
296 | },
297 | {
298 | tool: 'customSmallerIsBetter',
299 | expected: [
300 | {
301 | name: 'My Custom Smaller Is Better Benchmark - CPU Load',
302 | unit: 'Percent',
303 | value: 50,
304 | range: '5%',
305 | extra: 'My Optional Information for the tooltip',
306 | },
307 | {
308 | name: 'My Custom Smaller Is Better Benchmark - Memory Used',
309 | unit: 'Megabytes',
310 | value: 100,
311 | range: undefined,
312 | extra: undefined,
313 | },
314 | ],
315 | },
316 | ];
317 |
318 | for (const test of normalCases) {
319 | it(`extracts benchmark output from ${test.tool}`, async function () {
320 | const file = test.file ?? `${test.tool}_output.txt`;
321 | const outputFilePath = path.join(__dirname, 'data', 'extract', file);
322 | const config = {
323 | tool: test.tool,
324 | outputFilePath,
325 | } as Config;
326 | const bench = await extractResult(config);
327 |
328 | A.equal(bench.commit, dummyWebhookPayload.head_commit);
329 | A.ok(bench.date <= Date.now(), bench.date.toString());
330 | A.equal(bench.tool, test.tool);
331 | A.deepEqual(test.expected, bench.benches);
332 | });
333 | }
334 |
335 | it('raises an error on unexpected tool', async function () {
336 | const config = {
337 | tool: 'foo' as any,
338 | outputFilePath: path.join(__dirname, 'data', 'extract', 'go_output.txt'),
339 | } as Config;
340 | await A.rejects(extractResult(config), /^Error: FATAL: Unexpected tool: 'foo'$/);
341 | });
342 |
343 | it('raises an error when output file is not readable', async function () {
344 | const config = {
345 | tool: 'go',
346 | outputFilePath: 'path/does/not/exist.txt',
347 | } as Config;
348 | await A.rejects(extractResult(config));
349 | });
350 |
351 | it('raises an error when no output found', async function () {
352 | const config = {
353 | tool: 'cargo',
354 | outputFilePath: path.join(__dirname, 'data', 'extract', 'go_output.txt'),
355 | } as Config;
356 | await A.rejects(extractResult(config), /^Error: No benchmark result was found in /);
357 | });
358 |
359 | const toolSpecificErrorCases: Array<{
360 | it: string;
361 | tool: ToolType;
362 | file: string;
363 | expected: RegExp;
364 | }> = [
365 | ...(['pytest', 'googlecpp', 'customBiggerIsBetter', 'customSmallerIsBetter'] as const).map((tool) => ({
366 | it: `raises an error when output file is not in JSON with tool '${tool}'`,
367 | tool,
368 | file: 'go_output.txt',
369 | expected: /must be JSON file/,
370 | })),
371 | ];
372 |
373 | for (const t of toolSpecificErrorCases) {
374 | it(t.it, async function () {
375 | // Note: go_output.txt is not in JSON format!
376 | const outputFilePath = path.join(__dirname, 'data', 'extract', t.file);
377 | const config = { tool: t.tool, outputFilePath } as Config;
378 | await A.rejects(extractResult(config), t.expected);
379 | });
380 | }
381 |
382 | it('collects the commit information from pull_request payload as fallback', async function () {
383 | dummyGitHubContext.payload = {
384 | pull_request: {
385 | title: 'this is title',
386 | html_url: 'https://github.com/dummy/repo/pull/1',
387 | head: {
388 | sha: 'abcdef0123456789',
389 | user: {
390 | login: 'user',
391 | },
392 | repo: {
393 | updated_at: 'repo updated at timestamp',
394 | },
395 | },
396 | },
397 | };
398 | const outputFilePath = path.join(__dirname, 'data', 'extract', 'go_output.txt');
399 | const config = {
400 | tool: 'go',
401 | outputFilePath,
402 | } as Config;
403 | const { commit } = await extractResult(config);
404 | const expectedUser = {
405 | name: 'user',
406 | username: 'user',
407 | };
408 | A.deepEqual(commit.author, expectedUser);
409 | A.deepEqual(commit.committer, expectedUser);
410 | A.equal(commit.id, 'abcdef0123456789');
411 | A.equal(commit.message, 'this is title');
412 | A.equal(commit.timestamp, 'repo updated at timestamp');
413 | A.equal(commit.url, 'https://github.com/dummy/repo/pull/1/commits/abcdef0123456789');
414 | });
415 |
416 | it('collects the commit information from current head via REST API as fallback when githubToken is provided', async function () {
417 | dummyGitHubContext.payload = {};
418 | dummyCommitData = {
419 | author: {
420 | login: 'testAuthorLogin',
421 | },
422 | committer: {
423 | login: 'testCommitterLogin',
424 | },
425 | commit: {
426 | author: {
427 | name: 'test author',
428 | date: 'author updated at timestamp',
429 | email: 'author@testdummy.com',
430 | },
431 | committer: {
432 | name: 'test committer',
433 | // We use the `author.date` instead.
434 | // date: 'committer updated at timestamp',
435 | email: 'committer@testdummy.com',
436 | },
437 | message: 'test message',
438 | },
439 | sha: 'abcd1234',
440 | html_url: 'https://github.com/dymmy/repo/commit/abcd1234',
441 | };
442 | const outputFilePath = path.join(__dirname, 'data', 'extract', 'go_output.txt');
443 | const config = {
444 | tool: 'go',
445 | outputFilePath,
446 | githubToken: 'abcd1234',
447 | } as Config;
448 |
449 | const { commit } = await extractResult(config);
450 |
451 | const expectedCommit = {
452 | id: 'abcd1234',
453 | message: 'test message',
454 | timestamp: 'author updated at timestamp',
455 | url: 'https://github.com/dymmy/repo/commit/abcd1234',
456 | author: {
457 | name: 'test author',
458 | username: 'testAuthorLogin',
459 | email: 'author@testdummy.com',
460 | },
461 | committer: {
462 | name: 'test committer',
463 | username: 'testCommitterLogin',
464 | email: 'committer@testdummy.com',
465 | },
466 | };
467 | A.deepEqual(commit, expectedCommit);
468 | });
469 |
470 | it('raises an error when commit information is not found in webhook payload and no githubToken is provided', async function () {
471 | dummyGitHubContext.payload = {};
472 | const outputFilePath = path.join(__dirname, 'data', 'extract', 'go_output.txt');
473 | const config = {
474 | tool: 'go',
475 | outputFilePath,
476 | } as Config;
477 | await A.rejects(extractResult(config), /^Error: No commit information is found in payload/);
478 | });
479 | });
480 |
--------------------------------------------------------------------------------
/src/write.ts:
--------------------------------------------------------------------------------
1 | import { promises as fs } from 'fs';
2 | import * as path from 'path';
3 | import * as io from '@actions/io';
4 | import * as core from '@actions/core';
5 | import * as github from '@actions/github';
6 | import * as git from './git';
7 | import { Benchmark, BenchmarkResult } from './extract';
8 | import { Config, ToolType } from './config';
9 | import { DEFAULT_INDEX_HTML } from './default_index_html';
10 |
11 | export type BenchmarkSuites = { [name: string]: Benchmark[] };
12 | export interface DataJson {
13 | lastUpdate: number;
14 | repoUrl: string;
15 | entries: BenchmarkSuites;
16 | }
17 |
18 | export const SCRIPT_PREFIX = 'window.BENCHMARK_DATA = ';
19 | const DEFAULT_DATA_JSON = {
20 | lastUpdate: 0,
21 | repoUrl: '',
22 | entries: {},
23 | };
24 |
25 | async function loadDataJs(dataPath: string): Promise {
26 | try {
27 | const script = await fs.readFile(dataPath, 'utf8');
28 | const json = script.slice(SCRIPT_PREFIX.length);
29 | const parsed = JSON.parse(json);
30 | core.debug(`Loaded data.js at ${dataPath}`);
31 | return parsed;
32 | } catch (err) {
33 | console.log(`Could not find data.js at ${dataPath}. Using empty default: ${err}`);
34 | return { ...DEFAULT_DATA_JSON };
35 | }
36 | }
37 |
38 | async function storeDataJs(dataPath: string, data: DataJson) {
39 | const script = SCRIPT_PREFIX + JSON.stringify(data, null, 2);
40 | await fs.writeFile(dataPath, script, 'utf8');
41 | core.debug(`Overwrote ${dataPath} for adding new data`);
42 | }
43 |
44 | async function addIndexHtmlIfNeeded(dir: string) {
45 | const indexHtml = path.join(dir, 'index.html');
46 | try {
47 | await fs.stat(indexHtml);
48 | core.debug(`Skipped to create default index.html since it is already existing: ${indexHtml}`);
49 | return;
50 | } catch (_) {
51 | // Continue
52 | }
53 |
54 | await fs.writeFile(indexHtml, DEFAULT_INDEX_HTML, 'utf8');
55 | await git.cmd('add', indexHtml);
56 | console.log('Created default index.html at', indexHtml);
57 | }
58 |
59 | function biggerIsBetter(tool: ToolType): boolean {
60 | switch (tool) {
61 | case 'cargo':
62 | return false;
63 | case 'go':
64 | return false;
65 | case 'benchmarkjs':
66 | return true;
67 | case 'pytest':
68 | return true;
69 | case 'googlecpp':
70 | return false;
71 | case 'catch2':
72 | return false;
73 | case 'julia':
74 | return false;
75 | case 'customBiggerIsBetter':
76 | return true;
77 | case 'customSmallerIsBetter':
78 | return false;
79 | }
80 | }
81 |
82 | interface Alert {
83 | current: BenchmarkResult;
84 | prev: BenchmarkResult;
85 | ratio: number;
86 | }
87 |
88 | function findAlerts(curSuite: Benchmark, prevSuite: Benchmark, threshold: number): Alert[] {
89 | core.debug(`Comparing current:${curSuite.commit.id} and prev:${prevSuite.commit.id} for alert`);
90 |
91 | const alerts = [];
92 | for (const current of curSuite.benches) {
93 | const prev = prevSuite.benches.find((b) => b.name === current.name);
94 | if (prev === undefined) {
95 | core.debug(`Skipped because benchmark '${current.name}' is not found in previous benchmarks`);
96 | continue;
97 | }
98 |
99 | const ratio = biggerIsBetter(curSuite.tool)
100 | ? prev.value / current.value // e.g. current=100, prev=200
101 | : current.value / prev.value; // e.g. current=200, prev=100
102 |
103 | if (ratio > threshold) {
104 | core.warning(
105 | `Performance alert! Previous value was ${prev.value} and current value is ${current.value}.` +
106 | ` It is ${ratio}x worse than previous exceeding a ratio threshold ${threshold}`,
107 | );
108 | alerts.push({ current, prev, ratio });
109 | }
110 | }
111 |
112 | return alerts;
113 | }
114 |
115 | function getCurrentRepoMetadata() {
116 | const { repo, owner } = github.context.repo;
117 | return {
118 | name: repo,
119 | owner: {
120 | login: owner,
121 | },
122 | // eslint-disable-next-line @typescript-eslint/naming-convention
123 | html_url: `https://github.com/${owner}/${repo}`,
124 | };
125 | }
126 |
127 | function floatStr(n: number) {
128 | if (Number.isInteger(n)) {
129 | return n.toFixed(0);
130 | }
131 |
132 | if (n > 0.1) {
133 | return n.toFixed(2);
134 | }
135 |
136 | return n.toString();
137 | }
138 |
139 | function strVal(b: BenchmarkResult): string {
140 | let s = `\`${b.value}\` ${b.unit}`;
141 | if (b.range) {
142 | s += ` (\`${b.range}\`)`;
143 | }
144 | return s;
145 | }
146 |
147 | function commentFooter(): string {
148 | const repoMetadata = getCurrentRepoMetadata();
149 | const repoUrl = repoMetadata.html_url ?? '';
150 | const actionUrl = repoUrl + '/actions?query=workflow%3A' + encodeURIComponent(github.context.workflow);
151 |
152 | return `This comment was automatically generated by [workflow](${actionUrl}) using [github-action-benchmark](https://github.com/marketplace/actions/continuous-benchmark).`;
153 | }
154 |
155 | function buildComment(benchName: string, curSuite: Benchmark, prevSuite: Benchmark): string {
156 | const lines = [
157 | `# ${benchName}`,
158 | '',
159 | '',
160 | '',
161 | `| Benchmark suite | Current: ${curSuite.commit.id} | Previous: ${prevSuite.commit.id} | Ratio |`,
162 | '|-|-|-|-|',
163 | ];
164 |
165 | for (const current of curSuite.benches) {
166 | let line;
167 | const prev = prevSuite.benches.find((i) => i.name === current.name);
168 |
169 | if (prev) {
170 | const ratio = biggerIsBetter(curSuite.tool)
171 | ? prev.value / current.value // e.g. current=100, prev=200
172 | : current.value / prev.value;
173 |
174 | line = `| \`${current.name}\` | ${strVal(current)} | ${strVal(prev)} | \`${floatStr(ratio)}\` |`;
175 | } else {
176 | line = `| \`${current.name}\` | ${strVal(current)} | | |`;
177 | }
178 |
179 | lines.push(line);
180 | }
181 |
182 | // Footer
183 | lines.push('', ' ', '', commentFooter());
184 |
185 | return lines.join('\n');
186 | }
187 |
188 | function buildAlertComment(
189 | alerts: Alert[],
190 | benchName: string,
191 | curSuite: Benchmark,
192 | prevSuite: Benchmark,
193 | threshold: number,
194 | cc: string[],
195 | ): string {
196 | // Do not show benchmark name if it is the default value 'Benchmark'.
197 | const benchmarkText = benchName === 'Benchmark' ? '' : ` **'${benchName}'**`;
198 | const title = threshold === 0 ? '# Performance Report' : '# :warning: **Performance Alert** :warning:';
199 | const thresholdString = floatStr(threshold);
200 | const lines = [
201 | title,
202 | '',
203 | `Possible performance regression was detected for benchmark${benchmarkText}.`,
204 | `Benchmark result of this commit is worse than the previous benchmark result exceeding threshold \`${thresholdString}\`.`,
205 | '',
206 | `| Benchmark suite | Current: ${curSuite.commit.id} | Previous: ${prevSuite.commit.id} | Ratio |`,
207 | '|-|-|-|-|',
208 | ];
209 |
210 | for (const alert of alerts) {
211 | const { current, prev, ratio } = alert;
212 | const line = `| \`${current.name}\` | ${strVal(current)} | ${strVal(prev)} | \`${floatStr(ratio)}\` |`;
213 | lines.push(line);
214 | }
215 |
216 | // Footer
217 | lines.push('', commentFooter());
218 |
219 | if (cc.length > 0) {
220 | lines.push('', `CC: ${cc.join(' ')}`);
221 | }
222 |
223 | return lines.join('\n');
224 | }
225 |
226 | async function leaveComment(commitId: string, body: string, token: string) {
227 | core.debug('Sending comment:\n' + body);
228 |
229 | const repoMetadata = getCurrentRepoMetadata();
230 | const repoUrl = repoMetadata.html_url ?? '';
231 | const client = new github.GitHub(token);
232 | const res = await client.repos.createCommitComment({
233 | owner: repoMetadata.owner.login,
234 | repo: repoMetadata.name,
235 | // eslint-disable-next-line @typescript-eslint/naming-convention
236 | commit_sha: commitId,
237 | body,
238 | });
239 |
240 | const commitUrl = `${repoUrl}/commit/${commitId}`;
241 | console.log(`Comment was sent to ${commitUrl}. Response:`, res.status, res.data);
242 |
243 | return res;
244 | }
245 |
246 | async function handleComment(benchName: string, curSuite: Benchmark, prevSuite: Benchmark, config: Config) {
247 | const { commentAlways, githubToken } = config;
248 |
249 | if (!commentAlways) {
250 | core.debug('Comment check was skipped because comment-always is disabled');
251 | return;
252 | }
253 |
254 | if (!githubToken) {
255 | throw new Error("'comment-always' input is set but 'github-token' input is not set");
256 | }
257 |
258 | core.debug('Commenting about benchmark comparison');
259 |
260 | const body = buildComment(benchName, curSuite, prevSuite);
261 |
262 | await leaveComment(curSuite.commit.id, body, githubToken);
263 | }
264 |
265 | async function handleAlert(benchName: string, curSuite: Benchmark, prevSuite: Benchmark, config: Config) {
266 | const { alertThreshold, githubToken, commentOnAlert, failOnAlert, alertCommentCcUsers, failThreshold } = config;
267 |
268 | if (!commentOnAlert && !failOnAlert) {
269 | core.debug('Alert check was skipped because both comment-on-alert and fail-on-alert were disabled');
270 | return;
271 | }
272 |
273 | const alerts = findAlerts(curSuite, prevSuite, alertThreshold);
274 | if (alerts.length === 0) {
275 | core.debug('No performance alert found happily');
276 | return;
277 | }
278 |
279 | core.debug(`Found ${alerts.length} alerts`);
280 | const body = buildAlertComment(alerts, benchName, curSuite, prevSuite, alertThreshold, alertCommentCcUsers);
281 | let message = body;
282 | let url = null;
283 |
284 | if (commentOnAlert) {
285 | if (!githubToken) {
286 | throw new Error("'comment-on-alert' input is set but 'github-token' input is not set");
287 | }
288 | const res = await leaveComment(curSuite.commit.id, body, githubToken);
289 | url = res.data.html_url;
290 | message = body + `\nComment was generated at ${url}`;
291 | }
292 |
293 | if (failOnAlert) {
294 | // Note: alertThreshold is smaller than failThreshold. It was checked in config.ts
295 | const len = alerts.length;
296 | const threshold = floatStr(failThreshold);
297 | const failures = alerts.filter((a) => a.ratio > failThreshold);
298 | if (failures.length > 0) {
299 | core.debug('Mark this workflow as fail since one or more fatal alerts found');
300 | if (failThreshold !== alertThreshold) {
301 | // Prepend message that explains how these alerts were detected with different thresholds
302 | message = `${failures.length} of ${len} alerts exceeded the failure threshold \`${threshold}\` specified by fail-threshold input:\n\n${message}`;
303 | }
304 | throw new Error(message);
305 | } else {
306 | core.debug(
307 | `${len} alerts exceeding the alert threshold ${alertThreshold} were found but` +
308 | ` all of them did not exceed the failure threshold ${threshold}`,
309 | );
310 | }
311 | }
312 | }
313 |
314 | function addBenchmarkToDataJson(
315 | benchName: string,
316 | bench: Benchmark,
317 | data: DataJson,
318 | maxItems: number | null,
319 | ): Benchmark | null {
320 | const repoMetadata = getCurrentRepoMetadata();
321 | const htmlUrl = repoMetadata.html_url ?? '';
322 |
323 | let prevBench: Benchmark | null = null;
324 | data.lastUpdate = Date.now();
325 | data.repoUrl = htmlUrl;
326 |
327 | // Add benchmark result
328 | if (data.entries[benchName] === undefined) {
329 | data.entries[benchName] = [bench];
330 | core.debug(`No suite was found for benchmark '${benchName}' in existing data. Created`);
331 | } else {
332 | const suites = data.entries[benchName];
333 | // Get last suite which has different commit ID for alert comment
334 | for (const e of suites.slice().reverse()) {
335 | if (e.commit.id !== bench.commit.id) {
336 | prevBench = e;
337 | break;
338 | }
339 | }
340 |
341 | suites.push(bench);
342 |
343 | if (maxItems !== null && suites.length > maxItems) {
344 | suites.splice(0, suites.length - maxItems);
345 | core.debug(
346 | `Number of data items for '${benchName}' was truncated to ${maxItems} due to max-items-in-charts`,
347 | );
348 | }
349 | }
350 |
351 | return prevBench;
352 | }
353 |
354 | function isRemoteRejectedError(err: unknown) {
355 | if (err instanceof Error) {
356 | return ['[remote rejected]', '[rejected]'].some((l) => err.message.includes(l));
357 | }
358 | return false;
359 | }
360 |
361 | async function writeBenchmarkToGitHubPagesWithRetry(
362 | bench: Benchmark,
363 | config: Config,
364 | retry: number,
365 | ): Promise {
366 | const {
367 | name,
368 | tool,
369 | ghPagesBranch,
370 | benchmarkDataDirPath,
371 | githubToken,
372 | autoPush,
373 | skipFetchGhPages,
374 | maxItemsInChart,
375 | } = config;
376 | const dataPath = path.join(benchmarkDataDirPath, 'data.js');
377 | // FIXME: This payload is not available on `schedule:` or `workflow_dispatch:` events.
378 | const isPrivateRepo = github.context.payload.repository?.private ?? false;
379 |
380 | if (!skipFetchGhPages && (!isPrivateRepo || githubToken)) {
381 | await git.pull(githubToken, ghPagesBranch);
382 | } else if (isPrivateRepo && !skipFetchGhPages) {
383 | core.warning(
384 | "'git pull' was skipped. If you want to ensure GitHub Pages branch is up-to-date " +
385 | "before generating a commit, please set 'github-token' input to pull GitHub pages branch",
386 | );
387 | }
388 |
389 | await io.mkdirP(benchmarkDataDirPath);
390 |
391 | const data = await loadDataJs(dataPath);
392 | const prevBench = addBenchmarkToDataJson(name, bench, data, maxItemsInChart);
393 |
394 | await storeDataJs(dataPath, data);
395 |
396 | await git.cmd('add', dataPath);
397 | await addIndexHtmlIfNeeded(benchmarkDataDirPath);
398 | await git.cmd('commit', '-m', `add ${name} (${tool}) benchmark result for ${bench.commit.id}`);
399 |
400 | if (githubToken && autoPush) {
401 | try {
402 | await git.push(githubToken, ghPagesBranch);
403 | console.log(
404 | `Automatically pushed the generated commit to ${ghPagesBranch} branch since 'auto-push' is set to true`,
405 | );
406 | } catch (err: any) {
407 | if (!isRemoteRejectedError(err)) {
408 | throw err;
409 | }
410 | // Fall through
411 |
412 | core.warning(`Auto-push failed because the remote ${ghPagesBranch} was updated after git pull`);
413 |
414 | if (retry > 0) {
415 | core.debug('Rollback the auto-generated commit before retry');
416 | await git.cmd('reset', '--hard', 'HEAD~1');
417 |
418 | core.warning(
419 | `Retrying to generate a commit and push to remote ${ghPagesBranch} with retry count ${retry}...`,
420 | );
421 | return await writeBenchmarkToGitHubPagesWithRetry(bench, config, retry - 1); // Recursively retry
422 | } else {
423 | core.warning(`Failed to add benchmark data to '${name}' data: ${JSON.stringify(bench)}`);
424 | throw new Error(
425 | `Auto-push failed 3 times since the remote branch ${ghPagesBranch} rejected pushing all the time. Last exception was: ${err.message}`,
426 | );
427 | }
428 | }
429 | } else {
430 | core.debug(
431 | `Auto-push to ${ghPagesBranch} is skipped because it requires both 'github-token' and 'auto-push' inputs`,
432 | );
433 | }
434 |
435 | return prevBench;
436 | }
437 |
438 | async function writeBenchmarkToGitHubPages(bench: Benchmark, config: Config): Promise {
439 | const { ghPagesBranch, skipFetchGhPages } = config;
440 | if (!skipFetchGhPages) {
441 | await git.cmd('fetch', 'origin', `${ghPagesBranch}:${ghPagesBranch}`);
442 | }
443 | await git.cmd('switch', ghPagesBranch);
444 | try {
445 | return await writeBenchmarkToGitHubPagesWithRetry(bench, config, 10);
446 | } finally {
447 | // `git switch` does not work for backing to detached head
448 | await git.cmd('checkout', '-');
449 | }
450 | }
451 |
452 | async function loadDataJson(jsonPath: string): Promise {
453 | try {
454 | const content = await fs.readFile(jsonPath, 'utf8');
455 | const json: DataJson = JSON.parse(content);
456 | core.debug(`Loaded external JSON file at ${jsonPath}`);
457 | return json;
458 | } catch (err) {
459 | core.warning(
460 | `Could not find external JSON file for benchmark data at ${jsonPath}. Using empty default: ${err}`,
461 | );
462 | return { ...DEFAULT_DATA_JSON };
463 | }
464 | }
465 |
466 | async function writeBenchmarkToExternalJson(
467 | bench: Benchmark,
468 | jsonFilePath: string,
469 | config: Config,
470 | ): Promise {
471 | const { name, maxItemsInChart, saveDataFile } = config;
472 | const data = await loadDataJson(jsonFilePath);
473 | const prevBench = addBenchmarkToDataJson(name, bench, data, maxItemsInChart);
474 |
475 | if (!saveDataFile) {
476 | core.debug('Skipping storing benchmarks in external data file');
477 | return prevBench;
478 | }
479 |
480 | try {
481 | const jsonDirPath = path.dirname(jsonFilePath);
482 | await io.mkdirP(jsonDirPath);
483 | await fs.writeFile(jsonFilePath, JSON.stringify(data, null, 2), 'utf8');
484 | } catch (err) {
485 | throw new Error(`Could not store benchmark data as JSON at ${jsonFilePath}: ${err}`);
486 | }
487 |
488 | return prevBench;
489 | }
490 |
491 | export async function writeBenchmark(bench: Benchmark, config: Config) {
492 | const { name, externalDataJsonPath } = config;
493 | const prevBench = externalDataJsonPath
494 | ? await writeBenchmarkToExternalJson(bench, externalDataJsonPath, config)
495 | : await writeBenchmarkToGitHubPages(bench, config);
496 |
497 | // Put this after `git push` for reducing possibility to get conflict on push. Since sending
498 | // comment take time due to API call, do it after updating remote branch.
499 | if (prevBench === null) {
500 | core.debug('Alert check was skipped because previous benchmark result was not found');
501 | } else {
502 | await handleComment(name, bench, prevBench, config);
503 | await handleAlert(name, bench, prevBench, config);
504 | }
505 | }
506 |
--------------------------------------------------------------------------------