├── .github ├── dependabot.yml └── workflows │ └── CICD.yml ├── .gitignore ├── CHANGELOG.md ├── CITATION.cff ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build.rs ├── doc ├── execution-order.png ├── execution-order.svg ├── histogram.png ├── hyperfine.1 ├── sponsors.md ├── sponsors │ └── warp-logo.png └── whisker.png ├── scripts ├── README.md ├── advanced_statistics.py ├── plot_benchmark_comparison.py ├── plot_histogram.py ├── plot_parametrized.py ├── plot_progression.py ├── plot_whisker.py ├── ruff.toml └── welch_ttest.py ├── src ├── benchmark │ ├── benchmark_result.rs │ ├── executor.rs │ ├── mod.rs │ ├── relative_speed.rs │ ├── scheduler.rs │ └── timing_result.rs ├── cli.rs ├── command.rs ├── error.rs ├── export │ ├── asciidoc.rs │ ├── csv.rs │ ├── json.rs │ ├── markdown.rs │ ├── markup.rs │ ├── mod.rs │ ├── orgmode.rs │ └── tests.rs ├── main.rs ├── options.rs ├── outlier_detection.rs ├── output │ ├── format.rs │ ├── mod.rs │ ├── progress_bar.rs │ └── warnings.rs ├── parameter │ ├── mod.rs │ ├── range_step.rs │ └── tokenize.rs ├── timer │ ├── mod.rs │ ├── unix_timer.rs │ ├── wall_clock_timer.rs │ └── windows_timer.rs └── util │ ├── exit_code.rs │ ├── min_max.rs │ ├── mod.rs │ ├── number.rs │ ├── randomized_environment_offset.rs │ └── units.rs └── tests ├── common.rs ├── example_input_file.txt ├── execution_order_tests.rs └── integration_tests.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: monthly 7 | time: "04:00" 8 | timezone: Europe/Berlin 9 | open-pull-requests-limit: 2 10 | - package-ecosystem: "github-actions" 11 | directory: "/" 12 | schedule: 13 | interval: "daily" 14 | -------------------------------------------------------------------------------- /.github/workflows/CICD.yml: -------------------------------------------------------------------------------- 1 | name: CICD 2 | 3 | env: 4 | CICD_INTERMEDIATES_DIR: "_cicd-intermediates" 5 | MSRV_FEATURES: "" 6 | 7 | on: 8 | workflow_dispatch: 9 | pull_request: 10 | push: 11 | branches: 12 | - master 13 | tags: 14 | - '*' 15 | 16 | jobs: 17 | crate_metadata: 18 | name: Extract crate metadata 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Extract crate information 23 | id: crate_metadata 24 | run: | 25 | cargo metadata --no-deps --format-version 1 | jq -r ' 26 | .packages[0] | 27 | [ 28 | "name=" + .name, 29 | "version=" + .version, 30 | "maintainer=" + (.authors[0] // ""), 31 | "homepage=" + (.homepage // ""), 32 | "msrv=" + (.rust_version // ""), 33 | "bin-name=" + ( (.targets[] | select(.kind[0] == "bin") | .name) // .name ) 34 | ] | 35 | join("\n") 36 | ' | tee -a $GITHUB_OUTPUT 37 | outputs: 38 | name: ${{ steps.crate_metadata.outputs.name }} 39 | version: ${{ steps.crate_metadata.outputs.version }} 40 | maintainer: ${{ steps.crate_metadata.outputs.maintainer }} 41 | homepage: ${{ steps.crate_metadata.outputs.homepage }} 42 | msrv: ${{ steps.crate_metadata.outputs.msrv }} 43 | bin-name: ${{ steps.crate_metadata.outputs.bin-name }} 44 | 45 | ensure_cargo_fmt: 46 | name: Ensure 'cargo fmt' has been run 47 | runs-on: ubuntu-20.04 48 | steps: 49 | - uses: dtolnay/rust-toolchain@stable 50 | with: 51 | components: rustfmt 52 | - uses: actions/checkout@v4 53 | - run: cargo fmt -- --check 54 | 55 | min_version: 56 | name: Minimum supported rust version 57 | runs-on: ubuntu-20.04 58 | needs: crate_metadata 59 | steps: 60 | - name: Checkout source code 61 | uses: actions/checkout@v4 62 | 63 | - name: Install rust toolchain (v${{ needs.crate_metadata.outputs.msrv }}) 64 | uses: dtolnay/rust-toolchain@master 65 | with: 66 | toolchain: ${{ needs.crate_metadata.outputs.msrv }} 67 | components: clippy 68 | - name: Run clippy (on minimum supported rust version to prevent warnings we can't fix) 69 | run: cargo clippy --locked --all-targets ${{ env.MSRV_FEATURES }} 70 | - name: Run tests 71 | run: cargo test --locked ${{ env.MSRV_FEATURES }} 72 | 73 | build: 74 | name: ${{ matrix.job.target }} (${{ matrix.job.os }}) 75 | runs-on: ${{ matrix.job.os }} 76 | needs: crate_metadata 77 | strategy: 78 | fail-fast: false 79 | matrix: 80 | job: 81 | - { target: aarch64-unknown-linux-gnu , os: ubuntu-20.04, use-cross: true } 82 | - { target: arm-unknown-linux-gnueabihf , os: ubuntu-20.04, use-cross: true } 83 | - { target: arm-unknown-linux-musleabihf, os: ubuntu-20.04, use-cross: true } 84 | - { target: i686-pc-windows-msvc , os: windows-2019 } 85 | - { target: i686-unknown-linux-gnu , os: ubuntu-20.04, use-cross: true } 86 | - { target: i686-unknown-linux-musl , os: ubuntu-20.04, use-cross: true } 87 | - { target: x86_64-apple-darwin , os: macos-13 } 88 | - { target: aarch64-apple-darwin , os: macos-14 } 89 | # - { target: x86_64-pc-windows-gnu , os: windows-2019 } 90 | - { target: x86_64-pc-windows-msvc , os: windows-2019 } 91 | - { target: x86_64-unknown-linux-gnu , os: ubuntu-20.04, use-cross: true } 92 | - { target: x86_64-unknown-linux-musl , os: ubuntu-20.04, use-cross: true } 93 | env: 94 | BUILD_CMD: cargo 95 | steps: 96 | - name: Checkout source code 97 | uses: actions/checkout@v4 98 | 99 | - name: Install prerequisites 100 | shell: bash 101 | run: | 102 | case ${{ matrix.job.target }} in 103 | arm-unknown-linux-*) sudo apt-get -y update ; sudo apt-get -y install gcc-arm-linux-gnueabihf ;; 104 | aarch64-unknown-linux-gnu) sudo apt-get -y update ; sudo apt-get -y install gcc-aarch64-linux-gnu ;; 105 | esac 106 | 107 | - name: Install Rust toolchain 108 | uses: dtolnay/rust-toolchain@stable 109 | with: 110 | targets: ${{ matrix.job.target }} 111 | 112 | - name: Install cross 113 | if: matrix.job.use-cross 114 | uses: taiki-e/install-action@v2 115 | with: 116 | tool: cross 117 | 118 | - name: Overwrite build command env variable 119 | if: matrix.job.use-cross 120 | shell: bash 121 | run: echo "BUILD_CMD=cross" >> $GITHUB_ENV 122 | 123 | - name: Show version information (Rust, cargo, GCC) 124 | shell: bash 125 | run: | 126 | set -x 127 | gcc --version || true 128 | rustup -V 129 | rustup toolchain list 130 | rustup default 131 | cargo -V 132 | rustc -V 133 | 134 | - name: Build 135 | shell: bash 136 | run: $BUILD_CMD build --locked --release --target=${{ matrix.job.target }} 137 | 138 | - name: Set binary name & path 139 | id: bin 140 | shell: bash 141 | run: | 142 | # Figure out suffix of binary 143 | EXE_suffix="" 144 | case ${{ matrix.job.target }} in 145 | *-pc-windows-*) EXE_suffix=".exe" ;; 146 | esac; 147 | 148 | # Setup paths 149 | BIN_NAME="${{ needs.crate_metadata.outputs.bin-name }}${EXE_suffix}" 150 | BIN_PATH="target/${{ matrix.job.target }}/release/${BIN_NAME}" 151 | 152 | # Let subsequent steps know where to find the binary 153 | echo "BIN_PATH=${BIN_PATH}" >> $GITHUB_OUTPUT 154 | echo "BIN_NAME=${BIN_NAME}" >> $GITHUB_OUTPUT 155 | 156 | - name: Set testing options 157 | id: test-options 158 | shell: bash 159 | run: | 160 | # test only library unit tests and binary for arm-type targets 161 | unset CARGO_TEST_OPTIONS 162 | unset CARGO_TEST_OPTIONS ; case ${{ matrix.job.target }} in arm-* | aarch64-*) CARGO_TEST_OPTIONS="--bin ${{ steps.bin.outputs.BIN_NAME }}" ;; esac; 163 | echo "CARGO_TEST_OPTIONS=${CARGO_TEST_OPTIONS}" >> $GITHUB_OUTPUT 164 | 165 | - name: Run tests 166 | shell: bash 167 | run: $BUILD_CMD test --locked --target=${{ matrix.job.target }} ${{ steps.test-options.outputs.CARGO_TEST_OPTIONS}} 168 | 169 | - name: Create tarball 170 | id: package 171 | shell: bash 172 | run: | 173 | PKG_suffix=".tar.gz" ; case ${{ matrix.job.target }} in *-pc-windows-*) PKG_suffix=".zip" ;; esac; 174 | PKG_BASENAME=${{ needs.crate_metadata.outputs.name }}-v${{ needs.crate_metadata.outputs.version }}-${{ matrix.job.target }} 175 | PKG_NAME=${PKG_BASENAME}${PKG_suffix} 176 | echo "PKG_NAME=${PKG_NAME}" >> $GITHUB_OUTPUT 177 | 178 | PKG_STAGING="${{ env.CICD_INTERMEDIATES_DIR }}/package" 179 | ARCHIVE_DIR="${PKG_STAGING}/${PKG_BASENAME}/" 180 | mkdir -p "${ARCHIVE_DIR}" 181 | mkdir -p "${ARCHIVE_DIR}/autocomplete" 182 | 183 | # Binary 184 | cp "${{ steps.bin.outputs.BIN_PATH }}" "$ARCHIVE_DIR" 185 | 186 | # README, LICENSE and CHANGELOG files 187 | cp "README.md" "LICENSE-MIT" "LICENSE-APACHE" "CHANGELOG.md" "$ARCHIVE_DIR" 188 | 189 | # Man page 190 | cp 'doc/${{ needs.crate_metadata.outputs.name }}.1' "$ARCHIVE_DIR" 191 | 192 | # Autocompletion files 193 | cp 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'${{ needs.crate_metadata.outputs.name }}.bash' "$ARCHIVE_DIR/autocomplete/" 194 | cp 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'${{ needs.crate_metadata.outputs.name }}.fish' "$ARCHIVE_DIR/autocomplete/" 195 | cp 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'_${{ needs.crate_metadata.outputs.name }}.ps1' "$ARCHIVE_DIR/autocomplete/" 196 | cp 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'_${{ needs.crate_metadata.outputs.name }}' "$ARCHIVE_DIR/autocomplete/" 197 | 198 | # base compressed package 199 | pushd "${PKG_STAGING}/" >/dev/null 200 | case ${{ matrix.job.target }} in 201 | *-pc-windows-*) 7z -y a "${PKG_NAME}" "${PKG_BASENAME}"/* | tail -2 ;; 202 | *) tar czf "${PKG_NAME}" "${PKG_BASENAME}"/* ;; 203 | esac; 204 | popd >/dev/null 205 | 206 | # Let subsequent steps know where to find the compressed package 207 | echo "PKG_PATH=${PKG_STAGING}/${PKG_NAME}" >> $GITHUB_OUTPUT 208 | 209 | - name: Create Debian package 210 | id: debian-package 211 | shell: bash 212 | if: startsWith(matrix.job.os, 'ubuntu') 213 | run: | 214 | COPYRIGHT_YEARS="2018 - "$(date "+%Y") 215 | DPKG_STAGING="${{ env.CICD_INTERMEDIATES_DIR }}/debian-package" 216 | DPKG_DIR="${DPKG_STAGING}/dpkg" 217 | mkdir -p "${DPKG_DIR}" 218 | 219 | DPKG_BASENAME=${{ needs.crate_metadata.outputs.name }} 220 | DPKG_CONFLICTS=${{ needs.crate_metadata.outputs.name }}-musl 221 | case ${{ matrix.job.target }} in *-musl*) DPKG_BASENAME=${{ needs.crate_metadata.outputs.name }}-musl ; DPKG_CONFLICTS=${{ needs.crate_metadata.outputs.name }} ;; esac; 222 | DPKG_VERSION=${{ needs.crate_metadata.outputs.version }} 223 | 224 | unset DPKG_ARCH 225 | case ${{ matrix.job.target }} in 226 | aarch64-*-linux-*) DPKG_ARCH=arm64 ;; 227 | arm-*-linux-*hf) DPKG_ARCH=armhf ;; 228 | i686-*-linux-*) DPKG_ARCH=i686 ;; 229 | x86_64-*-linux-*) DPKG_ARCH=amd64 ;; 230 | *) DPKG_ARCH=notset ;; 231 | esac; 232 | 233 | DPKG_NAME="${DPKG_BASENAME}_${DPKG_VERSION}_${DPKG_ARCH}.deb" 234 | echo "DPKG_NAME=${DPKG_NAME}" >> $GITHUB_OUTPUT 235 | 236 | # Binary 237 | install -Dm755 "${{ steps.bin.outputs.BIN_PATH }}" "${DPKG_DIR}/usr/bin/${{ steps.bin.outputs.BIN_NAME }}" 238 | 239 | # Man page 240 | install -Dm644 'doc/${{ needs.crate_metadata.outputs.name }}.1' "${DPKG_DIR}/usr/share/man/man1/${{ needs.crate_metadata.outputs.name }}.1" 241 | gzip -n --best "${DPKG_DIR}/usr/share/man/man1/${{ needs.crate_metadata.outputs.name }}.1" 242 | 243 | # Autocompletion files 244 | install -Dm644 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'${{ needs.crate_metadata.outputs.name }}.bash' "${DPKG_DIR}/usr/share/bash-completion/completions/${{ needs.crate_metadata.outputs.name }}" 245 | install -Dm644 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'${{ needs.crate_metadata.outputs.name }}.fish' "${DPKG_DIR}/usr/share/fish/vendor_completions.d/${{ needs.crate_metadata.outputs.name }}.fish" 246 | install -Dm644 'target/${{ matrix.job.target }}/release/build/${{ needs.crate_metadata.outputs.name }}'*/out/'_${{ needs.crate_metadata.outputs.name }}' "${DPKG_DIR}/usr/share/zsh/vendor-completions/_${{ needs.crate_metadata.outputs.name }}" 247 | 248 | # README and LICENSE 249 | install -Dm644 "README.md" "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/README.md" 250 | install -Dm644 "LICENSE-MIT" "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/LICENSE-MIT" 251 | install -Dm644 "LICENSE-APACHE" "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/LICENSE-APACHE" 252 | install -Dm644 "CHANGELOG.md" "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/changelog" 253 | gzip -n --best "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/changelog" 254 | 255 | cat > "${DPKG_DIR}/usr/share/doc/${DPKG_BASENAME}/copyright" < "${DPKG_DIR}/DEBIAN/control" <> $GITHUB_OUTPUT 313 | 314 | # build dpkg 315 | fakeroot dpkg-deb --build "${DPKG_DIR}" "${DPKG_PATH}" 316 | 317 | - name: "Artifact upload: tarball" 318 | uses: actions/upload-artifact@master 319 | with: 320 | name: ${{ steps.package.outputs.PKG_NAME }} 321 | path: ${{ steps.package.outputs.PKG_PATH }} 322 | 323 | - name: "Artifact upload: Debian package" 324 | uses: actions/upload-artifact@master 325 | if: steps.debian-package.outputs.DPKG_NAME 326 | with: 327 | name: ${{ steps.debian-package.outputs.DPKG_NAME }} 328 | path: ${{ steps.debian-package.outputs.DPKG_PATH }} 329 | 330 | - name: Check for release 331 | id: is-release 332 | shell: bash 333 | run: | 334 | unset IS_RELEASE ; if [[ $GITHUB_REF =~ ^refs/tags/v[0-9].* ]]; then IS_RELEASE='true' ; fi 335 | echo "IS_RELEASE=${IS_RELEASE}" >> $GITHUB_OUTPUT 336 | 337 | - name: Publish archives and packages 338 | uses: softprops/action-gh-release@v2 339 | if: steps.is-release.outputs.IS_RELEASE 340 | with: 341 | files: | 342 | ${{ steps.package.outputs.PKG_PATH }} 343 | ${{ steps.debian-package.outputs.DPKG_PATH }} 344 | env: 345 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 346 | 347 | winget: 348 | name: Publish to Winget 349 | runs-on: ubuntu-latest 350 | needs: build 351 | if: startsWith(github.ref, 'refs/tags/v') 352 | steps: 353 | - uses: vedantmgoyal2009/winget-releaser@v2 354 | with: 355 | identifier: sharkdp.hyperfine 356 | installers-regex: '-pc-windows-msvc\.zip$' 357 | token: ${{ secrets.WINGET_TOKEN }} 358 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | /target/ 3 | **/*.rs.bk 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # v1.19.0 2 | 3 | ## Features 4 | 5 | - Add a new `--reference ` option to specify a reference command for the relative speed comparison, see #579, #577 and #744 (@sharkdp) 6 | - Add `--conclude` argument (analog to `--prepare`), see #565 and #719 (@jackoconnordev) 7 | - Allow `--output=…` to appear once for each command, enabling use cases like `hyperfine --output=null my-cmd --output=./file.log my-cmd`, see #529 and #775 (@sharkdp) 8 | - The environment variable `$HYPERFINE_ITERATION` will now contain the current iteration number for each benchmarked command, see #775 (@sharkdp) 9 | - Add iteration information to failure error message, see #771 and #772 (@sharkdp) 10 | - Python scripts: 11 | - legend modification parameters and output DPI, see #758 (@Spreadcat) 12 | - Nicer whiskers plot, see #727 (@serpent7776) 13 | 14 | ## Bugfixes 15 | 16 | - ETA not clearly visible on terminals with a block cursor, see #698 and #699 (@overclockworked64) 17 | - Fix zsh completions, see #717 (@xzfc) 18 | 19 | ## Other 20 | 21 | - Build binaries for aarch64-apple-darwin, see #728 (@Phault) 22 | - Various cleanups (@hamirmahal, @one230six) 23 | 24 | # v1.18.0 25 | 26 | ## Features 27 | 28 | - Add support for microseconds via `--time-unit microsecond`, see #684 (@sharkdp) 29 | 30 | ## Bugfixes 31 | 32 | - Proper argument quoting on Windows CMD, see #296 and #678 (@PedroWitzel) 33 | 34 | 35 | # v1.17.0 36 | 37 | ## Features 38 | 39 | - Add new `--sort` option to control the order in the rel. speed comparison and in markup export formats, see #601, #614, #655 (@sharkdp) 40 | - Parameters which are unused in the command line are now displayed in parentheses, see #600 and #644 (@sharkdp). 41 | - Added `--log-count` option for histogram plots, see `scripts/plot_histogram.py` (@sharkdp) 42 | 43 | ## Changes 44 | 45 | - Updated hyperfine to use `windows-sys` instead of the unmaintained `winapi`, see #624, #639, #636, #641 (@clemenswasser) 46 | - Silenced deprecation warning in Python scripts, see #633 (@nicovank) 47 | - Major update of the man page, see 0ce6578, #647 (@sharkdp) 48 | 49 | ## Bugfixes 50 | 51 | - Do not export intermediate results to stdout when using `-` as a file name, see #640 and #643 (@sharkdp) 52 | - Markup exporting does not fail if benchmark results are zero, see #642 (@sharkdp) 53 | 54 | 55 | # v1.16.1 56 | 57 | ## Bugfixes 58 | 59 | - Fix line-wrapping of `--help` text (@sharkdp) 60 | - Fix `--input=null` (@sharkdp) 61 | 62 | 63 | # v1.16.0 64 | 65 | ## Features 66 | 67 | - Added new `--input` option, see #541 and #563 (@snease) 68 | - Added possibility to specify `-` as the filename in the 69 | `--export-*` options, see #615 and #623 (@humblepenguinn) 70 | 71 | ## Changes 72 | 73 | - Improve hints for outlier warnings if `--warmup` or `--prepare` are in use already, 74 | see #570 (@sharkdp) 75 | 76 | ## Bugfixes 77 | 78 | - Fix uncolored output on Windows if `TERM` is not set, see #583 (@nabijaczleweli) 79 | - On Windows, only run `cmd.exe` with the `/C` option. Use `-c` for all other shells. 80 | See #568 and #582 (@FilipAndersson245) 81 | 82 | ## Other 83 | 84 | - Thanks to @berombau for working on dependency upgrades, see #584 85 | - Fixed installationm on Windows, see #595 and #596 (@AntoniosBarotsis) 86 | 87 | 88 | # v1.15.0 89 | 90 | ## Features 91 | 92 | - Disable colorized output in case of `TERM=dumb` or `NO_COLOR=1`, see #542 and #555 (@nabijaczleweli) 93 | - Add new (experimental) `--min-benchmarking-time ` option, see #527 (@sharkdp) 94 | 95 | ## Bugfixes 96 | 97 | - Fix user and kernel times on Windows, see #368 and #538 (@clemenswasser) 98 | 99 | ## Other 100 | 101 | - Improve `--help` texts of `--export-*` options, see #506 and #522 (@Engineer-of-Efficiency) 102 | 103 | 104 | # v1.14.0 105 | 106 | ## Features 107 | 108 | - Add a new `--output={null,pipe,inherit,}` option to control 109 | where the output of the benchmarked program is redirected (if at all), 110 | see #377 and #509 (@tavianator, originally suggested by @BurntSushi) 111 | - Add Emacs org-mode as a new export format, see #491 (@ppaulweber) 112 | 113 | 114 | # v1.13.0 115 | 116 | ## Features 117 | 118 | - Added a new `--shell=none`/`-N` option to disable the intermediate 119 | shell for executing the benchmarked commands. Hyperfine normally 120 | measures and subtracts the shell spawning time, but the intermediate 121 | shell always introduces a certain level of measurement noise. Using 122 | `--shell=none`/`-N` allows users to benchmark very fast commands 123 | (with a runtime on the order of a few milliseconds). See #336, #429, 124 | and #487 (@cipriancraciun and @sharkdp) 125 | - Added `--setup`/`-s` option that can be used to run `make all` or 126 | similar. It runs once per set of tests, like `--cleanup`/`-c` (@avar) 127 | - Added new `plot_progression.py` script to debug background interference 128 | effects. 129 | 130 | ## Changes 131 | 132 | - Breaking change: the `-s` short option for `--style` is now used for 133 | the new `--setup` option. 134 | - The environment offset randomization is now also available on Windows, 135 | see #484 136 | 137 | ## Other 138 | 139 | - Improved documentation and test coverage, cleaned up code base for 140 | future improvements. 141 | 142 | 143 | # v1.12.0 144 | 145 | ## Features 146 | 147 | - `--command-name` can now take parameter names from `--parameter-*` options, see #351 and #391 (@silathdiir) 148 | - Exit codes (or signals) are now printed in cases of command failures, see #342 (@KaindlJulian) 149 | - Exit codes are now part of the JSON output, see #371 (@JordiChauzi) 150 | - Colorized output should now be enabled on Windows by default, see #427 151 | 152 | ## Changes 153 | 154 | - When `--export-*` commands are used, result files are created before benchmark execution 155 | to fail early in case of, e.g., wrong permissions. See #306 (@s1ck). 156 | - When `--export-*` options are used, result files are written after each individual 157 | benchmark command instead of writing after all benchmarks have finished. See #306 (@s1ck). 158 | - Reduce number of shell startup time measurements from 200 to 50, generally speeding up benchmarks. See #378 159 | - User and system time are now in consistent time units, see #408 and #409 (@film42) 160 | 161 | 162 | 163 | # v1.11.0 164 | 165 | ## Features 166 | 167 | - The `-L`/`--parameter-list` option can now be specified multiple times to 168 | evaluate all possible combinations of the listed parameters: 169 | 170 | ``` bash 171 | hyperfine -L number 1,2 -L letter a,b,c \ 172 | "echo {number}{letter}" \ 173 | "printf '%s\n' {number}{letter}" 174 | # runs 12 benchmarks: 2 commands (echo and printf) times 6 combinations of 175 | # the "letter" and "number" parameters 176 | ``` 177 | 178 | See: #253, #318 (@wchargin) 179 | 180 | - Add CLI option to identify a command with a custom name, see #326 (@scampi) 181 | 182 | ## Changes 183 | 184 | - When parameters are used with `--parameter-list` or `--parameter-scan`, the JSON export format 185 | now contains a dictionary `parameters` instead of a single key `parameter`. See #253, #318. 186 | - The `plot_parametrized.py` script now infers the parameter name, and its `--parameter-name` 187 | argument has been deprecated. See #253, #318. 188 | 189 | ## Bugfixes 190 | 191 | - Fix a bug in the outlier detection which would only detect "slow outliers" but not the fast 192 | ones (runs that are much faster than the rest of the benchmarking runs), see #329 193 | - Better error messages for very fast commands that would lead to inf/nan results in the relative 194 | speed comparison, see #319 195 | - Show error message if `--warmup` or `--*runs` arguments can not be parsed, see #337 196 | - Keep output colorized when the output is not interactive and `--style=full` or `--style=color` is used. 197 | 198 | 199 | # v1.10.0 200 | 201 | ## Features 202 | 203 | - Hyperfine now comes with shell completion files for Bash, Zsh, Fish 204 | and PowerShell, see #290 (@four0000four). 205 | - Hyperfine now comes with a basic man page, see #257 (@cadeef) 206 | - During execution of benchmarks, hyperfine will now set a `HYPERFINE_RANDOMIZED_ENVIRONMENT_OFFSET` environment variable in order to randomize the memory layout. See #235 and #241 for references and details. 207 | - A few enhancements for the histogram plotting scripts and the 208 | advanced statistics script 209 | - Updates for the `plot_whisker.py` script, see #275 (@ghaiklor) 210 | 211 | ## Bugfixes 212 | 213 | - Fix Spin Icon on Windows, see #229 214 | - A few typos have been fixed, see #292 (@McMartin) 215 | 216 | ## Packaging 217 | 218 | - `hyperfine` is now available on MacPorts for macOS, see #281 (@herbygillot) 219 | - `hyperfine` is now available on OpenBSD, see #289 (@minusf) 220 | 221 | Package authors: note that Hyperfine now comes with a set of shell completion files and a man page (see above) 222 | 223 | # v1.9.0 224 | 225 | ## Features 226 | 227 | - The new `--parameter-list ` option can be used to run 228 | a parametrized benchmark on a user-specified list of values. 229 | This is similar to `--parameter-scan `, but doesn't 230 | necessarily required numeric arguments. 231 | 232 | ``` bash 233 | hyperfine --parameter-list compiler "gcc,clang" \ 234 | "{compiler} -O2 main.cpp" 235 | ``` 236 | 237 | See: #227, #234 (@JuanPotato) 238 | 239 | - Added `none` as a possible choice for the `--style` option to 240 | run `hyperfine` without any output, see #193 (@knidarkness) 241 | 242 | - Added a few new scripts for plotting various types of benchmark 243 | results (https://github.com/sharkdp/hyperfine/tree/master/scripts) 244 | 245 | ## Changes 246 | 247 | - The `--prepare` command is now also run during the warmup 248 | phase, see #182 (@sseemayer) 249 | 250 | - Better estimation of the remaining benchmark time due to an update 251 | of the `indicatif` crate. 252 | 253 | ## Other 254 | 255 | - `hyperfine` is now available on NixOS, see #240 (@tuxinaut) 256 | 257 | # v1.8.0 258 | 259 | ## Features 260 | 261 | - The `--prepare ` option can now be specified multiple times to 262 | run specific preparation commands for each of the benchmarked programs: 263 | 264 | ``` bash 265 | hyperfine --prepare "make clean; git checkout master" "make" \ 266 | --prepare "make clean; git checkout feature" "make" 267 | ``` 268 | 269 | See: #216, #218 (@iamsauravsharma) 270 | 271 | - Added a new [`welch_ttest.py`](https://github.com/sharkdp/hyperfine/blob/master/scripts/welch_ttest.py) script to test whether or not the two benchmark 272 | results are the same, see #222 (@uetchy) 273 | 274 | - The Markdown export has been improved. The relative speed is now exported 275 | with a higher precision (see #208) and includes the standard deviation 276 | (see #225). 277 | 278 | ## Other 279 | 280 | - Improved documentation for [`scripts`](https://github.com/sharkdp/hyperfine/tree/master/scripts) folder (@matthieusb) 281 | 282 | # v1.7.0 283 | 284 | ## Features 285 | 286 | - Added a new `-D`,`--parameter-step-size` option that can be used to control 287 | the step size for `--parameter-scan` benchmarks. In addition, decimal numbers 288 | are now allowed for parameter scans. For example, the following command runs 289 | `sleep 0.3`, `sleep 0.5` and `sleep 0.7`: 290 | ``` bash 291 | hyperfine --parameter-scan delay 0.3 0.7 -D 0.2 'sleep {delay}' 292 | ``` 293 | For more details, see #184 (@piyushrungta25) 294 | 295 | ## Other 296 | 297 | - hyperfine is now in the official Alpine repositories, see #177 (@maxice8, @5paceToast) 298 | - hyperfine is now in the official Fedora repositories, see #196 (@ignatenkobrain) 299 | - hyperfine is now in the official Arch Linux repositories 300 | - hyperfine can be installed on FreeBSD, see #204 (@0mp) 301 | - Enabled LTO for slightly smaller binary sizes, see #179 (@Calinou) 302 | - Various small improvements all over the code base, see #194 (@phimuemue) 303 | 304 | # v1.6.0 305 | 306 | ## Features 307 | 308 | - Added a `-c, --cleanup ` option to execute `CMD` after the completion of all benchmarking runs for a given command. This is useful if the commands to be benchmarked produce artifacts that need to be cleaned up. See #91 (@RalfJung and @colinwahl) 309 | - Add parameter values (for `--parameter-scan` benchmarks) to exported CSV and JSON files. See #131 (@bbannier) 310 | - Added AsciiDoc export option, see #137 (@5paceToast) 311 | - The relative speed is now part of the Markdown export, see #127 (@mathiasrw and @sharkdp). 312 | - The *median* run time is now exported via CSV and JSON, see #171 (@hosewiejacke and @sharkdp). 313 | 314 | ## Other 315 | 316 | - Hyperfine has been updated to Rust 2018 (@AnderEnder). The minimum supported Rust version is now 1.31. 317 | 318 | # v1.5.0 319 | 320 | ## Features 321 | 322 | - Show the number of runs in `hyperfine`s output (@tcmal) 323 | - Added two Python scripts to post-process exported benchmark results (see [`scripts/`](https://github.com/sharkdp/hyperfine/tree/master/scripts) folder) 324 | 325 | ## Other 326 | 327 | - Refined `--help` text for the `--export-*` flags (@psteinb) 328 | - Added Snapcraft file (@popey) 329 | - Small improvements in the progress bar "experience". 330 | 331 | # v1.4.0 332 | 333 | ## Features 334 | 335 | - Added `-S`/`--shell` option to override the default shell, see #61 (@mqudsi and @jasonpeacock) 336 | - Added `-u`/`--time-unit` option to change the unit of time (`second` or `millisecond`), see #80 (@jasonpeacock) 337 | - Markdown export auto-selects time unit, see #71 (@jasonpeacock) 338 | 339 | # v1.3.0 340 | 341 | ## Feature 342 | 343 | - Compute and print standard deviation of the speed ratio, see #83 (@Shnatsel) 344 | - More compact output format, see #70 (@jasonpeacock) 345 | - Added `--style=color`, see #70 (@jasonpeacock) 346 | - Added options to specify the max/exact numbers of runs, see #77 (@orium) 347 | 348 | ## Bugfixes 349 | 350 | - Change Windows `cmd` interpreter to `cmd.exe` to prevent accidentally calling other programs, see #74 (@tathanhdinh) 351 | 352 | ## Other 353 | 354 | - Binary releases for Windows are now available, see #87 355 | 356 | # v1.2.0 357 | 358 | - Support parameters in preparation commands, see #68 (@siiptuo) 359 | - Updated dependencies, see #69. The minimum required Rust version is now 1.24. 360 | 361 | # v1.1.0 362 | 363 | * Added `--show-output` option (@chrisduerr and @sevagh) 364 | * Refactoring work (@stevepentland) 365 | 366 | # v1.0.0 367 | 368 | ## Features 369 | 370 | * Support for various export-formats like CSV, JSON and Markdown - see #38, #44, #49, #42 (@stevepentland) 371 | * Summary output that compares the different benchmarks, see #6 (@stevepentland) 372 | * Parameterized benchmarks via `-P`, `--parameter-scan `, see #19 373 | 374 | ## Thanks 375 | 376 | I'd like to say a big THANK YOU to @stevepentland for implementing new features, 377 | for reviewing pull requests and for giving very valuable feedback. 378 | 379 | # v0.5.0 380 | 381 | * Proper Windows support (@stevepentland) 382 | * Added `--style auto/basic/nocolor/full` option (@stevepentland) 383 | * Correctly estimate the full execution time, see #27 (@rleungx) 384 | * Added Void Linux install instructions (@wpbirney) 385 | 386 | # v0.4.0 387 | 388 | - New `--style` option to disable output coloring and interactive CLI features, see #24 (@stevepentland) 389 | - Statistical outlier detection, see #23 #18 390 | 391 | # v0.3.0 392 | 393 | ## Features 394 | 395 | - In addition to 'real' (wall clock) time, Hyperfine can now also measure 'user' and 'system' time (see #5). 396 | - Added `--prepare` option that can be used to clear up disk caches before timing runs, for example (see #8). 397 | 398 | ## Other 399 | 400 | - [Arch Linux package](https://aur.archlinux.org/packages/hyperfine) for Hyperfine (@jD91mZM2). 401 | - Ubuntu/Debian packages are now are available. 402 | 403 | # v0.2.0 404 | 405 | Initial public release 406 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | title: hyperfine 3 | message: >- 4 | If you use this software in scientific 5 | publications, please consider citing it using the 6 | metadata from this file. 7 | type: software 8 | authors: 9 | - given-names: David 10 | family-names: Peter 11 | email: mail@david-peter.de 12 | orcid: 'https://orcid.org/0000-0001-7950-9915' 13 | repository-code: 'https://github.com/sharkdp/hyperfine' 14 | abstract: A command-line benchmarking tool. 15 | license: MIT 16 | version: 1.16.1 17 | date-released: '2023-03-21' 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["David Peter "] 3 | categories = ["command-line-utilities"] 4 | description = "A command-line benchmarking tool" 5 | homepage = "https://github.com/sharkdp/hyperfine" 6 | license = "MIT OR Apache-2.0" 7 | name = "hyperfine" 8 | readme = "README.md" 9 | repository = "https://github.com/sharkdp/hyperfine" 10 | version = "1.19.0" 11 | edition = "2018" 12 | build = "build.rs" 13 | rust-version = "1.76.0" 14 | 15 | [features] 16 | # Use the nightly feature windows_process_extensions_main_thread_handle 17 | windows_process_extensions_main_thread_handle = [] 18 | 19 | [dependencies] 20 | colored = "2.1" 21 | indicatif = "=0.17.4" 22 | statistical = "1.0" 23 | csv = "1.3" 24 | serde = { version = "1.0", features = ["derive"] } 25 | serde_json = "1.0" 26 | rust_decimal = "1.36" 27 | rand = "0.8" 28 | shell-words = "1.0" 29 | thiserror = "2.0" 30 | anyhow = "1.0" 31 | 32 | [target.'cfg(not(windows))'.dependencies] 33 | libc = "0.2" 34 | 35 | [target.'cfg(windows)'.dependencies] 36 | windows-sys = { version = "0.59", features = [ 37 | "Win32_Foundation", 38 | "Win32_Security", 39 | "Win32_System_JobObjects", 40 | "Win32_System_LibraryLoader", 41 | "Win32_System_Threading", 42 | ] } 43 | 44 | [target.'cfg(all(windows, not(windows_process_extensions_main_thread_handle)))'.dependencies] 45 | once_cell = "1.19" 46 | 47 | [target.'cfg(target_os="linux")'.dependencies] 48 | nix = { version = "0.29", features = ["zerocopy"] } 49 | 50 | [dependencies.clap] 51 | version = "4" 52 | default-features = false 53 | features = [ 54 | "suggestions", 55 | "color", 56 | "wrap_help", 57 | "cargo", 58 | "help", 59 | "usage", 60 | "error-context", 61 | ] 62 | 63 | [dev-dependencies] 64 | approx = "0.5" 65 | assert_cmd = "2.0" 66 | insta = { version = "1.41.1", features = ["yaml"] } 67 | predicates = "3.0" 68 | tempfile = "3.14" 69 | 70 | [profile.dev.package] 71 | insta.opt-level = 3 72 | similar.opt-level = 3 73 | 74 | [build-dependencies] 75 | clap = "4.5.23" 76 | clap_complete = "4.2.1" 77 | 78 | [profile.release] 79 | lto = true 80 | strip = true 81 | codegen-units = 1 82 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018-2022 David Peter, and all hyperfine contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hyperfine 2 | [![CICD](https://github.com/sharkdp/hyperfine/actions/workflows/CICD.yml/badge.svg)](https://github.com/sharkdp/hyperfine/actions/workflows/CICD.yml) 3 | [![Version info](https://img.shields.io/crates/v/hyperfine.svg)](https://crates.io/crates/hyperfine) 4 | [中文](https://github.com/chinanf-boy/hyperfine-zh) 5 | 6 | A command-line benchmarking tool. 7 | 8 | **Demo**: Benchmarking [`fd`](https://github.com/sharkdp/fd) and 9 | [`find`](https://www.gnu.org/software/findutils/): 10 | 11 | ![hyperfine](https://i.imgur.com/z19OYxE.gif) 12 | 13 | ### Sponsors 14 | 15 | A special *thank you* goes to our biggest sponsor:
16 | 17 | 18 | Warp 19 |
20 | Warp, the intelligent terminal 21 |
22 | Available on MacOS, Linux, Windows 23 |
24 | 25 | ## Features 26 | 27 | * Statistical analysis across multiple runs. 28 | * Support for arbitrary shell commands. 29 | * Constant feedback about the benchmark progress and current estimates. 30 | * Warmup runs can be executed before the actual benchmark. 31 | * Cache-clearing commands can be set up before each timing run. 32 | * Statistical outlier detection to detect interference from other programs and caching effects. 33 | * Export results to various formats: CSV, JSON, Markdown, AsciiDoc. 34 | * Parameterized benchmarks (e.g. vary the number of threads). 35 | * Cross-platform 36 | 37 | ## Usage 38 | 39 | ### Basic benchmarks 40 | 41 | To run a benchmark, you can simply call `hyperfine ...`. The argument(s) can be any 42 | shell command. For example: 43 | ```sh 44 | hyperfine 'sleep 0.3' 45 | ``` 46 | 47 | Hyperfine will automatically determine the number of runs to perform for each command. By default, 48 | it will perform *at least* 10 benchmarking runs and measure for at least 3 seconds. To change this, 49 | you can use the `-r`/`--runs` option: 50 | ```sh 51 | hyperfine --runs 5 'sleep 0.3' 52 | ``` 53 | 54 | If you want to compare the runtimes of different programs, you can pass multiple commands: 55 | ```sh 56 | hyperfine 'hexdump file' 'xxd file' 57 | ``` 58 | 59 | ### Warmup runs and preparation commands 60 | 61 | For programs that perform a lot of disk I/O, the benchmarking results can be heavily influenced 62 | by disk caches and whether they are cold or warm. 63 | 64 | If you want to run the benchmark on a warm cache, you can use the `-w`/`--warmup` option to 65 | perform a certain number of program executions before the actual benchmark: 66 | ```sh 67 | hyperfine --warmup 3 'grep -R TODO *' 68 | ``` 69 | 70 | Conversely, if you want to run the benchmark for a cold cache, you can use the `-p`/`--prepare` 71 | option to run a special command before *each* timing run. For example, to clear harddisk caches 72 | on Linux, you can run 73 | ```sh 74 | sync; echo 3 | sudo tee /proc/sys/vm/drop_caches 75 | ``` 76 | To use this specific command with hyperfine, call `sudo -v` to temporarily gain sudo permissions 77 | and then call: 78 | ```sh 79 | hyperfine --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' 'grep -R TODO *' 80 | ``` 81 | 82 | ### Parameterized benchmarks 83 | 84 | If you want to run a series of benchmarks where a single parameter is varied (say, the number of 85 | threads), you can use the `-P`/`--parameter-scan` option and call: 86 | ```sh 87 | hyperfine --prepare 'make clean' --parameter-scan num_threads 1 12 'make -j {num_threads}' 88 | ``` 89 | This also works with decimal numbers. The `-D`/`--parameter-step-size` option can be used 90 | to control the step size: 91 | ```sh 92 | hyperfine --parameter-scan delay 0.3 0.7 -D 0.2 'sleep {delay}' 93 | ``` 94 | This runs `sleep 0.3`, `sleep 0.5` and `sleep 0.7`. 95 | 96 | For non-numeric parameters, you can also supply a list of values with the `-L`/`--parameter-list` 97 | option: 98 | ``` 99 | hyperfine -L compiler gcc,clang '{compiler} -O2 main.cpp' 100 | ``` 101 | 102 | ### Intermediate shell 103 | 104 | By default, commands are executed using a predefined shell (`/bin/sh` on Unix, `cmd.exe` on Windows). 105 | If you want to use a different shell, you can use the `-S, --shell ` option: 106 | ```sh 107 | hyperfine --shell zsh 'for i in {1..10000}; do echo test; done' 108 | ``` 109 | 110 | Note that hyperfine always *corrects for the shell spawning time*. To do this, it performs a calibration 111 | procedure where it runs the shell with an empty command (multiple times), to measure the startup time 112 | of the shell. It will then subtract this time from the total to show the actual time used by the command 113 | in question. 114 | 115 | If you want to run a benchmark *without an intermediate shell*, you can use the `-N` or `--shell=none` 116 | option. This is helpful for very fast commands (< 5 ms) where the shell startup overhead correction would 117 | produce a significant amount of noise. Note that you cannot use shell syntax like `*` or `~` in this case. 118 | ``` 119 | hyperfine -N 'grep TODO /home/user' 120 | ``` 121 | 122 | 123 | ### Shell functions and aliases 124 | 125 | If you are using bash, you can export shell functions to directly benchmark them with hyperfine: 126 | 127 | ```bash 128 | my_function() { sleep 1; } 129 | export -f my_function 130 | hyperfine --shell=bash my_function 131 | ``` 132 | 133 | Otherwise, inline them into or source them from the benchmarked program: 134 | 135 | ```sh 136 | hyperfine 'my_function() { sleep 1; }; my_function' 137 | 138 | echo 'alias my_alias="sleep 1"' > /tmp/my_alias.sh 139 | hyperfine '. /tmp/my_alias.sh; my_alias' 140 | ``` 141 | 142 | ### Exporting results 143 | 144 | Hyperfine has multiple options for exporting benchmark results to CSV, JSON, Markdown and other 145 | formats (see `--help` text for details). 146 | 147 | #### Markdown 148 | 149 | You can use the `--export-markdown ` option to create tables like the following: 150 | 151 | | Command | Mean [s] | Min [s] | Max [s] | Relative | 152 | |:---|---:|---:|---:|---:| 153 | | `find . -iregex '.*[0-9]\.jpg$'` | 2.275 ± 0.046 | 2.243 | 2.397 | 9.79 ± 0.22 | 154 | | `find . -iname '*[0-9].jpg'` | 1.427 ± 0.026 | 1.405 | 1.468 | 6.14 ± 0.13 | 155 | | `fd -HI '.*[0-9]\.jpg$'` | 0.232 ± 0.002 | 0.230 | 0.236 | 1.00 | 156 | 157 | #### JSON 158 | 159 | The JSON output is useful if you want to analyze the benchmark results in more detail. The 160 | [`scripts/`](https://github.com/sharkdp/hyperfine/tree/master/scripts) folder includes a lot 161 | of helpful Python programs to further analyze benchmark results and create helpful 162 | visualizations, like a histogram of runtimes or a whisker plot to compare 163 | multiple benchmarks: 164 | 165 | | ![](doc/histogram.png) | ![](doc/whisker.png) | 166 | |---:|---:| 167 | 168 | 169 | ### Detailed benchmark flowchart 170 | 171 | The following chart explains the execution order of various timing runs when using options 172 | like `--warmup`, `--prepare `, `--setup ` or `--cleanup `: 173 | 174 | ![](doc/execution-order.png) 175 | 176 | ## Installation 177 | 178 | [![Packaging status](https://repology.org/badge/vertical-allrepos/hyperfine.svg?columns=3&exclude_unsupported=1)](https://repology.org/project/hyperfine/versions) 179 | 180 | ### On Ubuntu 181 | 182 | Download the appropriate `.deb` package from the [Release page](https://github.com/sharkdp/hyperfine/releases) 183 | and install it via `dpkg`: 184 | ``` 185 | wget https://github.com/sharkdp/hyperfine/releases/download/v1.19.0/hyperfine_1.19.0_amd64.deb 186 | sudo dpkg -i hyperfine_1.19.0_amd64.deb 187 | ``` 188 | 189 | ### On Fedora 190 | 191 | On Fedora, hyperfine can be installed from the official repositories: 192 | 193 | ```sh 194 | dnf install hyperfine 195 | ``` 196 | 197 | ### On Alpine Linux 198 | 199 | On Alpine Linux, hyperfine can be installed [from the official repositories](https://pkgs.alpinelinux.org/packages?name=hyperfine): 200 | ``` 201 | apk add hyperfine 202 | ``` 203 | 204 | ### On Arch Linux 205 | 206 | On Arch Linux, hyperfine can be installed [from the official repositories](https://archlinux.org/packages/extra/x86_64/hyperfine/): 207 | ``` 208 | pacman -S hyperfine 209 | ``` 210 | 211 | ### On Debian Linux 212 | 213 | On Debian Linux, hyperfine can be installed [from the testing repositories](https://packages.debian.org/testing/main/hyperfine): 214 | ``` 215 | apt install hyperfine 216 | ``` 217 | 218 | ### On Exherbo Linux 219 | 220 | On Exherbo Linux, hyperfine can be installed [from the rust repositories](https://gitlab.exherbo.org/exherbo/rust/-/tree/master/packages/sys-apps/hyperfine): 221 | ``` 222 | cave resolve -x repository/rust 223 | cave resolve -x hyperfine 224 | ``` 225 | 226 | ### On Funtoo Linux 227 | 228 | On Funtoo Linux, hyperfine can be installed [from core-kit](https://github.com/funtoo/core-kit/tree/1.4-release/app-benchmarks/hyperfine): 229 | ``` 230 | emerge app-benchmarks/hyperfine 231 | ``` 232 | 233 | ### On NixOS 234 | 235 | On NixOS, hyperfine can be installed [from the official repositories](https://nixos.org/nixos/packages.html?query=hyperfine): 236 | ``` 237 | nix-env -i hyperfine 238 | ``` 239 | 240 | ### On Flox 241 | 242 | On Flox, hyperfine can be installed as follows. 243 | ``` 244 | flox install hyperfine 245 | ``` 246 | Hyperfine's version in Flox follows that of Nix. 247 | 248 | ### On openSUSE 249 | 250 | On openSUSE, hyperfine can be installed [from the official repositories](https://software.opensuse.org/package/hyperfine): 251 | ``` 252 | zypper install hyperfine 253 | ``` 254 | 255 | ### On Void Linux 256 | 257 | Hyperfine can be installed via xbps 258 | 259 | ``` 260 | xbps-install -S hyperfine 261 | ``` 262 | 263 | ### On macOS 264 | 265 | Hyperfine can be installed via [Homebrew](https://brew.sh): 266 | ``` 267 | brew install hyperfine 268 | ``` 269 | 270 | Or you can install using [MacPorts](https://www.macports.org): 271 | ``` 272 | sudo port selfupdate 273 | sudo port install hyperfine 274 | ``` 275 | 276 | ### On FreeBSD 277 | 278 | Hyperfine can be installed via pkg: 279 | ``` 280 | pkg install hyperfine 281 | ``` 282 | 283 | ### On OpenBSD 284 | 285 | ``` 286 | doas pkg_add hyperfine 287 | ``` 288 | 289 | ### On Windows 290 | 291 | Hyperfine can be installed via [Chocolatey](https://community.chocolatey.org/packages/hyperfine), [Scoop](https://scoop.sh/#/apps?q=hyperfine&s=0&d=1&o=true&id=8f7c10f75ecf5f9e42a862c615257328e2f70f61), or [Winget](https://github.com/microsoft/winget-pkgs/tree/master/manifests/s/sharkdp/hyperfine): 292 | ``` 293 | choco install hyperfine 294 | ``` 295 | ``` 296 | scoop install hyperfine 297 | ``` 298 | ``` 299 | winget install hyperfine 300 | ``` 301 | 302 | ### With conda 303 | 304 | Hyperfine can be installed via [`conda`](https://conda.io/en/latest/) from the [`conda-forge`](https://anaconda.org/conda-forge/hyperfine) channel: 305 | ``` 306 | conda install -c conda-forge hyperfine 307 | ``` 308 | 309 | ### With cargo (Linux, macOS, Windows) 310 | 311 | Hyperfine can be installed from source via [cargo](https://doc.rust-lang.org/cargo/): 312 | ``` 313 | cargo install --locked hyperfine 314 | ``` 315 | 316 | Make sure that you use Rust 1.76 or newer. 317 | 318 | ### From binaries (Linux, macOS, Windows) 319 | 320 | Download the corresponding archive from the [Release page](https://github.com/sharkdp/hyperfine/releases). 321 | 322 | ## Alternative tools 323 | 324 | Hyperfine is inspired by [bench](https://github.com/Gabriella439/bench). 325 | 326 | ## Integration with other tools 327 | 328 | [Chronologer](https://github.com/dandavison/chronologer) is a tool that uses `hyperfine` to 329 | visualize changes in benchmark timings across your Git history. 330 | 331 | [Bencher](https://github.com/bencherdev/bencher) is a continuous benchmarking tool that supports `hyperfine` to 332 | track benchmarks and catch performance regressions in CI. 333 | 334 | Drop hyperfine JSON outputs onto the [Venz](https://try.venz.dev) chart to visualize the results, 335 | and manage hyperfine configurations. 336 | 337 | Make sure to check out the [`scripts` folder](https://github.com/sharkdp/hyperfine/tree/master/scripts) 338 | in this repository for a set of tools to work with `hyperfine` benchmark results. 339 | 340 | ## Origin of the name 341 | 342 | The name *hyperfine* was chosen in reference to the hyperfine levels of caesium 133 which play a crucial role in the 343 | [definition of our base unit of time](https://en.wikipedia.org/wiki/Second#History_of_definition) 344 | — the second. 345 | 346 | ## Citing hyperfine 347 | 348 | Thank you for considering to cite hyperfine in your research work. Please see the information 349 | in the sidebar on how to properly cite hyperfine. 350 | 351 | ## License 352 | 353 | `hyperfine` is dual-licensed under the terms of the MIT License and the Apache License 2.0. 354 | 355 | See the [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) files for details. 356 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | 3 | use clap_complete::{generate_to, Shell}; 4 | 5 | include!("src/cli.rs"); 6 | 7 | fn main() { 8 | let var = std::env::var_os("SHELL_COMPLETIONS_DIR").or_else(|| std::env::var_os("OUT_DIR")); 9 | let outdir = match var { 10 | None => return, 11 | Some(outdir) => outdir, 12 | }; 13 | fs::create_dir_all(&outdir).unwrap(); 14 | 15 | let mut command = build_command(); 16 | for shell in [ 17 | Shell::Bash, 18 | Shell::Fish, 19 | Shell::Zsh, 20 | Shell::PowerShell, 21 | Shell::Elvish, 22 | ] { 23 | generate_to(shell, &mut command, "hyperfine", &outdir).unwrap(); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /doc/execution-order.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharkdp/hyperfine/3cedcc38d0c430cbf38b4364b441c43a938d2bf3/doc/execution-order.png -------------------------------------------------------------------------------- /doc/histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharkdp/hyperfine/3cedcc38d0c430cbf38b4364b441c43a938d2bf3/doc/histogram.png -------------------------------------------------------------------------------- /doc/hyperfine.1: -------------------------------------------------------------------------------- 1 | .TH HYPERFINE 1 2 | .SH NAME 3 | hyperfine \- command\-line benchmarking tool 4 | .SH SYNOPSIS 5 | .B hyperfine 6 | .RB [ \-ihVN ] 7 | .RB [ \-\-warmup 8 | .IR NUM ] 9 | .RB [ \-\-min\-runs 10 | .IR NUM ] 11 | .RB [ \-\-max\-runs 12 | .IR NUM ] 13 | .RB [ \-\-runs 14 | .IR NUM ] 15 | .RB [ \-\-setup 16 | .IR CMD ] 17 | .RB [ \-\-prepare 18 | .IR CMD ] 19 | .RB [ \-\-conclude 20 | .IR CMD ] 21 | .RB [ \-\-cleanup 22 | .IR CMD ] 23 | .RB [ \-\-parameter\-scan 24 | .IR VAR 25 | .IR MIN 26 | .IR MAX ] 27 | .RB [ \-\-parameter\-step\-size 28 | .IR DELTA ] 29 | .RB [ \-\-parameter\-list 30 | .IR VAR 31 | .IR VALUES ] 32 | .RB [ \-\-shell 33 | .IR SHELL ] 34 | .RB [ \-\-style 35 | .IR TYPE ] 36 | .RB [ \-\-sort 37 | .IR METHOD ] 38 | .RB [ \-\-time-unit 39 | .IR UNIT ] 40 | .RB [ \-\-export\-asciidoc 41 | .IR FILE ] 42 | .RB [ \-\-export\-csv 43 | .IR FILE ] 44 | .RB [ \-\-export\-json 45 | .IR FILE ] 46 | .RB [ \-\-export\-markdown 47 | .IR FILE ] 48 | .RB [ \-\-export\-orgmode 49 | .IR FILE ] 50 | .RB [ \-\-output 51 | .IR WHERE ] 52 | .RB [ \-\-input 53 | .IR WHERE ] 54 | .RB [ \-\-command\-name 55 | .IR NAME ] 56 | .RI [ COMMAND... ] 57 | .SH DESCRIPTION 58 | A command\-line benchmarking tool which includes: 59 | .LP 60 | .RS 61 | * Statistical analysis across multiple runs 62 | .RE 63 | .RS 64 | * Support for arbitrary shell commands 65 | .RE 66 | .RS 67 | * Constant feedback about the benchmark progress and current estimates 68 | .RE 69 | .RS 70 | * Warmup runs can be executed before the actual benchmark 71 | .RE 72 | .RS 73 | * Cache-clearing commands can be set up before each timing run 74 | .RE 75 | .RS 76 | * Statistical outlier detection to detect interference from other programs and caching effects 77 | .RE 78 | .RS 79 | * Export results to various formats: CSV, JSON, Markdown, AsciiDoc 80 | .RE 81 | .RS 82 | * Parameterized benchmarks (e.g. vary the number of threads) 83 | .RE 84 | .SH OPTIONS 85 | .HP 86 | \fB\-w\fR, \fB\-\-warmup\fR \fINUM\fP 87 | .IP 88 | Perform \fINUM\fP warmup runs before the actual benchmark. This can be used 89 | to fill (disk) caches for I/O\-heavy programs. 90 | .HP 91 | \fB\-m\fR, \fB\-\-min\-runs\fR \fINUM\fP 92 | .IP 93 | Perform at least \fINUM\fP runs for each command. Default: 10. 94 | .HP 95 | \fB\-M\fR, \fB\-\-max\-runs\fR \fINUM\fP 96 | .IP 97 | Perform at most \fINUM\fP runs for each command. By default, there is no 98 | limit. 99 | .HP 100 | \fB\-r\fR, \fB\-\-runs\fR \fINUM\fP 101 | .IP 102 | Perform exactly \fINUM\fP runs for each command. If this option is not specified, 103 | \fBhyperfine\fR automatically determines the number of runs. 104 | .HP 105 | \fB\-s\fR, \fB\-\-setup\fR \fICMD...\fP 106 | .IP 107 | Execute \fICMD\fP once before each set of timing runs. This is useful 108 | for compiling your software or with the provided parameters, or to do any 109 | other work that should happen once before a series of benchmark runs, 110 | not every time as would happen with the \fB\-\-prepare\fR option. 111 | .HP 112 | \fB\-p\fR, \fB\-\-prepare\fR \fICMD...\fP 113 | .IP 114 | Execute \fICMD\fP before each timing run. This is useful for clearing disk caches, 115 | for example. 116 | The \fB\-\-prepare\fR option can be specified once for all commands or multiple times, 117 | once for each command. In the latter case, each preparation command will be 118 | run prior to the corresponding benchmark command. 119 | .HP 120 | .IP 121 | Execute \fICMD\fP after each timing run. This is useful for clearing disk caches, 122 | for example. 123 | The \fB\-\-conclude\fR option can be specified once for all commands or multiple times, 124 | once for each command. In the latter case, each conclusion command will be 125 | run after the corresponding benchmark command. 126 | .HP 127 | \fB\-c\fR, \fB\-\-cleanup\fR \fICMD...\fP 128 | .IP 129 | Execute \fICMD\fP after the completion of all benchmarking runs for each individual 130 | command to be benchmarked. This is useful if the commands to be benchmarked 131 | produce artifacts that need to be cleaned up. It only runs once a series of 132 | benchmark runs, as opposed to \fB\-\-conclude\fR option which runs after 133 | ever run. 134 | .HP 135 | \fB\-P\fR, \fB\-\-parameter\-scan\fR \fIVAR\fP \fIMIN\fP \fIMAX\fP 136 | .IP 137 | Perform benchmark runs for each value in the range \fIMIN..MAX\fP. Replaces the 138 | string '{\fIVAR\fP}' in each command by the current parameter value. 139 | .IP 140 | .RS 141 | Example: 142 | .RS 143 | \fBhyperfine\fR \fB\-P\fR threads 1 8 'make \-j {threads}' 144 | .RE 145 | .RE 146 | .IP 147 | This performs benchmarks for 'make \-j 1', 'make \-j 2', ..., 'make \-j 8'. 148 | .IP 149 | To have the value increase following different patterns, use shell 150 | arithmetics. 151 | .IP 152 | .RS 153 | Example: 154 | .RS 155 | \fBhyperfine\fR \fB\-P\fR size 0 3 'sleep $((2**{size}))' 156 | .RE 157 | .RE 158 | .IP 159 | This performs benchmarks with power of 2 increases: 'sleep 1', 'sleep 160 | 2', 'sleep 4', ... 161 | .IP 162 | The exact syntax may vary depending on your shell and OS. 163 | .HP 164 | \fB\-D\fR, \fB\-\-parameter\-step\-size\fR \fIDELTA\fP 165 | .IP 166 | This argument requires \fB\-\-parameter\-scan\fR to be specified as well. Traverse the 167 | range \fIMIN..MAX\fP in steps of \fIDELTA\fP. 168 | .IP 169 | .RS 170 | Example: 171 | .RS 172 | \fBhyperfine\fR \fB\-P\fR delay 0.3 0.7 \fB\-D\fR 0.2 'sleep {delay}' 173 | .RE 174 | .RE 175 | .IP 176 | This performs benchmarks for 'sleep 0.3', 'sleep 0.5' and 'sleep 0.7'. 177 | .HP 178 | \fB\-L\fR, \fB\-\-parameter\-list\fR \fIVAR\fP \fIVALUES\fP 179 | .IP 180 | Perform benchmark runs for each value in the comma\-separated list of \fIVALUES\fP. 181 | Replaces the string '{\fIVAR\fP}' in each command by the current parameter value. 182 | .IP 183 | .RS 184 | Example: 185 | .RS 186 | \fBhyperfine\fR \fB\-L\fR compiler gcc,clang '{compiler} \-O2 main.cpp' 187 | .RE 188 | .RE 189 | .IP 190 | This performs benchmarks for 'gcc \-O2 main.cpp' and 'clang \-O2 main.cpp'. 191 | .IP 192 | The option can be specified multiple times to run benchmarks for all 193 | possible parameter combinations. 194 | .HP 195 | \fB\-S\fR, \fB\-\-shell\fR \fISHELL\fP 196 | .IP 197 | Set the shell to use for executing benchmarked commands. This can be 198 | the name or the path to the shell executable, or a full command line 199 | like "bash \fB\-\-norc\fR". It can also be set to "default" to explicitly 200 | select the default shell on this platform. Finally, this can also be 201 | set to "none" to disable the shell. In this case, commands will be 202 | executed directly. They can still have arguments, but more complex 203 | things like "sleep 0.1; sleep 0.2" are not possible without a shell. 204 | .HP 205 | \fB\-N\fR 206 | .IP 207 | An alias for '\-\-shell=none'. 208 | .HP 209 | \fB\-i\fR, \fB\-\-ignore\-failure\fR 210 | .IP 211 | Ignore non\-zero exit codes of the benchmarked programs. 212 | .HP 213 | \fB\-\-style\fR \fITYPE\fP 214 | .IP 215 | Set output style \fITYPE\fP (default: auto). Set this to 'basic' to disable output 216 | coloring and interactive elements. Set it to 'full' to enable all effects even 217 | if no interactive terminal was detected. Set this to 'nocolor' to keep the 218 | interactive output without any colors. Set this to 'color' to keep the colors 219 | without any interactive output. Set this to 'none' to disable all the output 220 | of the tool. 221 | .HP 222 | \fB\-\-sort\fR \fIMETHOD\fP 223 | .IP 224 | Specify the sort order of the speed comparison summary and the 225 | exported tables for markup formats (Markdown, AsciiDoc, org\-mode): 226 | .RS 227 | .IP "auto (default)" 228 | the speed comparison will be ordered by time and 229 | the markup tables will be ordered by command (input order). 230 | .IP "command" 231 | order benchmarks in the way they were specified 232 | .IP "mean\-time" 233 | order benchmarks by mean runtime 234 | .RE 235 | .HP 236 | \fB\-u\fR, \fB\-\-time\-unit\fR \fIUNIT\fP 237 | .IP 238 | Set the time unit to be used. Possible values: microsecond, millisecond, second. If 239 | the option is not given, the time unit is determined automatically. 240 | This option affects the standard output as well as all export formats 241 | except for CSV and JSON. 242 | .HP 243 | \fB\-\-export\-asciidoc\fR \fIFILE\fP 244 | .IP 245 | Export the timing summary statistics as an AsciiDoc table to the given \fIFILE\fP. 246 | The output time unit can be changed using the \fB\-\-time\-unit\fR option. 247 | .HP 248 | \fB\-\-export\-csv\fR \fIFILE\fP 249 | .IP 250 | Export the timing summary statistics as CSV to the given \fIFILE\fP. If you need the 251 | timing results for each individual run, use the JSON export format. 252 | The output time unit is always seconds. 253 | .HP 254 | \fB\-\-export\-json\fR \fIFILE\fP 255 | .IP 256 | Export the timing summary statistics and timings of individual runs as JSON to 257 | the given \fIFILE\fP. The output time unit is always seconds. 258 | .HP 259 | \fB\-\-export\-markdown\fR \fIFILE\fP 260 | .IP 261 | Export the timing summary statistics as a Markdown table to the given \fIFILE\fP. 262 | The output time unit can be changed using the \fB\-\-time\-unit\fR option. 263 | .HP 264 | \fB\-\-export\-orgmode\fR \fIFILE\fP 265 | .IP 266 | Export the timing summary statistics as an Emacs org\-mode table to the 267 | given \fIFILE\fP. The output time unit can be changed using the \fB\-\-time\-unit\fR option. 268 | .HP 269 | \fB\-\-show\-output\fR 270 | .IP 271 | Print the stdout and stderr of the benchmark instead of suppressing it. This 272 | will increase the time it takes for benchmarks to run, so it should only be 273 | used for debugging purposes or when trying to benchmark output speed. 274 | .HP 275 | \fB\-\-output\fR \fIWHERE\fP 276 | .IP 277 | Control where the output of the benchmark is redirected. Note that 278 | some programs like 'grep' detect when standard output is \fI\,/dev/null\/\fP and 279 | apply certain optimizations. To avoid that, consider using 280 | \-\-output=pipe. 281 | .IP 282 | \fIWHERE\fP can be: 283 | .RS 284 | .IP null 285 | Redirect output to \fI\,/dev/null\/\fP (the default). 286 | .IP pipe 287 | Feed the output through a pipe before discarding it. 288 | .IP inherit 289 | Don't redirect the output at all (same as \&'\-\-show\-output'). 290 | .IP "" 291 | Write the output to the given file. 292 | .RE 293 | .IP 294 | This option can be specified once for all commands or multiple times, 295 | once for each command. Note: If you want to log the output of each and 296 | every iteration, you can use a shell redirection and the $HYPERFINE_ITERATION 297 | environment variable: 'my-command > output-${HYPERFINE_ITERATION}.log' 298 | .HP 299 | \fB\-\-input\fR \fIWHERE\fP 300 | .IP 301 | Control where the input of the benchmark comes from. 302 | .IP 303 | \fIWHERE\fP can be: 304 | .RS 305 | .IP null 306 | Read from \fI\,/dev/null\/\fP (the default). 307 | .IP "" 308 | Read the input from the given file. 309 | .RE 310 | .HP 311 | \fB\-n\fR, \fB\-\-command\-name\fR \fiNAME\fP 312 | .IP 313 | Give a meaningful \fiNAME\fP to a command. This can be specified multiple times 314 | if several commands are benchmarked. 315 | .HP 316 | \fB\-h\fR, \fB\-\-help\fR 317 | .IP 318 | Print help 319 | .HP 320 | \fB\-V\fR, \fB\-\-version\fR 321 | .IP 322 | Print version 323 | .SH EXAMPLES 324 | .LP 325 | Basic benchmark of 'find . -name todo.txt': 326 | .RS 327 | .nf 328 | \fBhyperfine\fR 'find . -name todo.txt' 329 | .fi 330 | .RE 331 | .LP 332 | Perform benchmarks for 'sleep 0.2' and 'sleep 3.2' with a minimum 5 runs each: 333 | .RS 334 | .nf 335 | \fBhyperfine\fR \fB\-\-min\-runs\fR 5 'sleep 0.2' 'sleep 3.2' 336 | .fi 337 | .RE 338 | .LP 339 | Perform a benchmark of 'grep' with a warm disk cache by executing 3 runs up front that are not part 340 | of the measurement: 341 | .RS 342 | .nf 343 | \fBhyperfine\fR \fB\-\-warmup\fR 3 'grep -R TODO *' 344 | .fi 345 | .RE 346 | .LP 347 | Export the results of a parameter scan benchmark to a markdown table: 348 | .RS 349 | .nf 350 | \fBhyperfine\fR \fB\-\-export\-markdown\fR output.md \fB\-\-parameter-scan\fR time 1 5 'sleep {time}' 351 | .fi 352 | .RE 353 | .LP 354 | Demonstrate when each of \fB\-\-setup\fR, \fB\-\-prepare\fR, \fB\-\-conclude\fR, \fIcmd\fP and \fB\-\-cleanup\fR will run: 355 | .RS 356 | .nf 357 | \fBhyperfine\fR \fB\-L\fR n 1,2 \fB\-r\fR 2 \fB\-\-show-output\fR \\ 358 | \fB\-\-setup\fR 'echo setup n={n}' \\ 359 | \fB\-\-prepare\fR 'echo prepare={n}' \\ 360 | \fB\-\-conclude\fR 'echo conclude={n}' \\ 361 | \fB\-\-cleanup\fR 'echo cleanup n={n}' \\ 362 | 'echo command n={n}' 363 | .fi 364 | .RE 365 | .RE 366 | .SH AUTHOR 367 | .LP 368 | David Peter 369 | .LP 370 | Source, bug tracker, and additional information can be found on GitHub: 371 | .I https://github.com/sharkdp/hyperfine 372 | -------------------------------------------------------------------------------- /doc/sponsors.md: -------------------------------------------------------------------------------- 1 | ## Sponsors 2 | 3 | `hyperfine` development is sponsored by many individuals and companies. Thank you very much! 4 | 5 | Please note, that being sponsored does not affect the individuality of the `hyperfine` 6 | project or affect the maintainers' actions in any way. 7 | We remain impartial and continue to assess pull requests solely on merit - the 8 | features added, bugs solved, and effect on the overall complexity of the code. 9 | No issue will have a different priority based on sponsorship status of the 10 | reporter. 11 | 12 | Contributions from anybody are most welcomed. 13 | 14 | If you want to see our biggest sponsors, check the top of [`README.md`](../README.md#sponsors). 15 | -------------------------------------------------------------------------------- /doc/sponsors/warp-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharkdp/hyperfine/3cedcc38d0c430cbf38b4364b441c43a938d2bf3/doc/sponsors/warp-logo.png -------------------------------------------------------------------------------- /doc/whisker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharkdp/hyperfine/3cedcc38d0c430cbf38b4364b441c43a938d2bf3/doc/whisker.png -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | This folder contains scripts that can be used in combination with hyperfines `--export-json` option. 2 | 3 | ### Example: 4 | 5 | ```bash 6 | hyperfine 'sleep 0.020' 'sleep 0.021' 'sleep 0.022' --export-json sleep.json 7 | ./plot_whisker.py sleep.json 8 | ``` 9 | 10 | ### Pre-requisites 11 | 12 | To make these scripts work, you will need `numpy`, `matplotlib` and `scipy`. 13 | 14 | If you have a Python package manager that understands [PEP-723](https://peps.python.org/pep-0723/) 15 | inline script requirements like [`uv`](https://github.com/astral-sh/uv) or [`pipx`](https://github.com/pypa/pipx), 16 | you can directly run the scripts using 17 | 18 | ```bash 19 | uv run plot_whisker.py sleep.json 20 | ``` 21 | 22 | Otherwise, install the dependencies via your system package manager or using `pip`: 23 | 24 | ```bash 25 | pip install numpy matplotlib scipy # pip3, if you are using python3 26 | ``` 27 | -------------------------------------------------------------------------------- /scripts/advanced_statistics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "numpy", 6 | # ] 7 | # /// 8 | 9 | import argparse 10 | import json 11 | from enum import Enum 12 | 13 | import numpy as np 14 | 15 | 16 | class Unit(Enum): 17 | SECOND = 1 18 | MILLISECOND = 2 19 | 20 | def factor(self): 21 | match self: 22 | case Unit.SECOND: 23 | return 1 24 | case Unit.MILLISECOND: 25 | return 1e3 26 | 27 | def __str__(self): 28 | match self: 29 | case Unit.SECOND: 30 | return "s" 31 | case Unit.MILLISECOND: 32 | return "ms" 33 | 34 | 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument("file", help="JSON file with benchmark results") 37 | parser.add_argument( 38 | "--time-unit", 39 | help="The unit of time.", 40 | default="second", 41 | action="store", 42 | choices=["second", "millisecond"], 43 | dest="unit", 44 | ) 45 | args = parser.parse_args() 46 | 47 | unit = Unit.MILLISECOND if args.unit == "millisecond" else Unit.SECOND 48 | unit_str = str(unit) 49 | 50 | with open(args.file) as f: 51 | results = json.load(f)["results"] 52 | 53 | commands = [b["command"] for b in results] 54 | times = [b["times"] for b in results] 55 | 56 | for command, ts in zip(commands, times): 57 | ts = [t * unit.factor() for t in ts] 58 | 59 | p05 = np.percentile(ts, 5) 60 | p25 = np.percentile(ts, 25) 61 | p75 = np.percentile(ts, 75) 62 | p95 = np.percentile(ts, 95) 63 | 64 | iqr = p75 - p25 65 | 66 | print(f"Command '{command}'") 67 | print(f" runs: {len(ts):8d}") 68 | print(f" mean: {np.mean(ts):8.3f} {unit_str}") 69 | print(f" stddev: {np.std(ts, ddof=1):8.3f} {unit_str}") 70 | print(f" median: {np.median(ts):8.3f} {unit_str}") 71 | print(f" min: {np.min(ts):8.3f} {unit_str}") 72 | print(f" max: {np.max(ts):8.3f} {unit_str}") 73 | print() 74 | print(" percentiles:") 75 | print(f" P_05 .. P_95: {p05:.3f} {unit_str} .. {p95:.3f} {unit_str}") 76 | print( 77 | f" P_25 .. P_75: {p25:.3f} {unit_str} .. {p75:.3f} {unit_str} (IQR = {iqr:.3f} {unit_str})" 78 | ) 79 | print() 80 | -------------------------------------------------------------------------------- /scripts/plot_benchmark_comparison.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "matplotlib", 6 | # "pyqt6", 7 | # "numpy", 8 | # ] 9 | # /// 10 | 11 | """ 12 | This script shows `hyperfine` benchmark results as a bar plot grouped by command. 13 | Note all the input files must contain results for all commands. 14 | """ 15 | 16 | import argparse 17 | import json 18 | import pathlib 19 | 20 | import matplotlib.pyplot as plt 21 | import numpy as np 22 | 23 | parser = argparse.ArgumentParser(description=__doc__) 24 | parser.add_argument( 25 | "files", nargs="+", type=pathlib.Path, help="JSON files with benchmark results" 26 | ) 27 | parser.add_argument("--title", help="Plot Title") 28 | parser.add_argument( 29 | "--benchmark-names", nargs="+", help="Names of the benchmark groups" 30 | ) 31 | parser.add_argument("-o", "--output", help="Save image to the given filename") 32 | 33 | args = parser.parse_args() 34 | 35 | commands = None 36 | data = [] 37 | inputs = [] 38 | 39 | if args.benchmark_names: 40 | assert len(args.files) == len( 41 | args.benchmark_names 42 | ), "Number of benchmark names must match the number of input files." 43 | 44 | for i, filename in enumerate(args.files): 45 | with open(filename) as f: 46 | results = json.load(f)["results"] 47 | benchmark_commands = [b["command"] for b in results] 48 | if commands is None: 49 | commands = benchmark_commands 50 | else: 51 | assert ( 52 | commands == benchmark_commands 53 | ), f"Unexpected commands in {filename}: {benchmark_commands}, expected: {commands}" 54 | data.append([round(b["mean"], 2) for b in results]) 55 | if args.benchmark_names: 56 | inputs.append(args.benchmark_names[i]) 57 | else: 58 | inputs.append(filename.stem) 59 | 60 | data = np.transpose(data) 61 | x = np.arange(len(inputs)) # the label locations 62 | width = 0.25 # the width of the bars 63 | 64 | fig, ax = plt.subplots(layout="constrained") 65 | fig.set_figheight(5) 66 | fig.set_figwidth(10) 67 | for i, command in enumerate(commands): 68 | offset = width * (i + 1) 69 | rects = ax.bar(x + offset, data[i], width, label=command) 70 | 71 | ax.set_xticks(x + 0.5, inputs) 72 | ax.grid(visible=True, axis="y") 73 | 74 | if args.title: 75 | plt.title(args.title) 76 | plt.xlabel("Benchmark") 77 | plt.ylabel("Time [s]") 78 | plt.legend(title="Command") 79 | 80 | if args.output: 81 | plt.savefig(args.output) 82 | else: 83 | plt.show() 84 | -------------------------------------------------------------------------------- /scripts/plot_histogram.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "matplotlib", 6 | # "pyqt6", 7 | # "numpy", 8 | # ] 9 | # /// 10 | 11 | """This program shows `hyperfine` benchmark results as a histogram.""" 12 | 13 | import argparse 14 | import json 15 | 16 | import matplotlib.pyplot as plt 17 | import numpy as np 18 | 19 | parser = argparse.ArgumentParser(description=__doc__) 20 | parser.add_argument("file", help="JSON file with benchmark results") 21 | parser.add_argument("--title", help="Plot title") 22 | parser.add_argument( 23 | "--labels", help="Comma-separated list of entries for the plot legend" 24 | ) 25 | parser.add_argument("--bins", help="Number of bins (default: auto)") 26 | parser.add_argument( 27 | "--legend-location", 28 | help="Location of the legend on plot (default: upper center)", 29 | choices=[ 30 | "upper center", 31 | "lower center", 32 | "right", 33 | "left", 34 | "best", 35 | "upper left", 36 | "upper right", 37 | "lower left", 38 | "lower right", 39 | "center left", 40 | "center right", 41 | "center", 42 | ], 43 | default="upper center", 44 | ) 45 | parser.add_argument( 46 | "--type", help="Type of histogram (*bar*, barstacked, step, stepfilled)" 47 | ) 48 | parser.add_argument("-o", "--output", help="Save image to the given filename.") 49 | parser.add_argument( 50 | "--t-min", metavar="T", help="Minimum time to be displayed (seconds)" 51 | ) 52 | parser.add_argument( 53 | "--t-max", metavar="T", help="Maximum time to be displayed (seconds)" 54 | ) 55 | parser.add_argument( 56 | "--log-count", 57 | help="Use a logarithmic y-axis for the event count", 58 | action="store_true", 59 | ) 60 | 61 | args = parser.parse_args() 62 | 63 | with open(args.file) as f: 64 | results = json.load(f)["results"] 65 | 66 | if args.labels: 67 | labels = args.labels.split(",") 68 | else: 69 | labels = [b["command"] for b in results] 70 | all_times = [b["times"] for b in results] 71 | 72 | t_min = float(args.t_min) if args.t_min else np.min(list(map(np.min, all_times))) 73 | t_max = float(args.t_max) if args.t_max else np.max(list(map(np.max, all_times))) 74 | 75 | bins = int(args.bins) if args.bins else "auto" 76 | histtype = args.type if args.type else "bar" 77 | 78 | plt.figure(figsize=(10, 5)) 79 | plt.hist( 80 | all_times, 81 | label=labels, 82 | bins=bins, 83 | histtype=histtype, 84 | range=(t_min, t_max), 85 | ) 86 | plt.legend( 87 | loc=args.legend_location, 88 | fancybox=True, 89 | shadow=True, 90 | prop={"size": 10, "family": ["Source Code Pro", "Fira Mono", "Courier New"]}, 91 | ) 92 | 93 | plt.xlabel("Time [s]") 94 | if args.title: 95 | plt.title(args.title) 96 | 97 | if args.log_count: 98 | plt.yscale("log") 99 | else: 100 | plt.ylim(0, None) 101 | 102 | if args.output: 103 | plt.savefig(args.output, dpi=600) 104 | else: 105 | plt.show() 106 | -------------------------------------------------------------------------------- /scripts/plot_parametrized.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "matplotlib", 6 | # "pyqt6", 7 | # ] 8 | # /// 9 | 10 | """This program shows parametrized `hyperfine` benchmark results as an 11 | errorbar plot.""" 12 | 13 | import argparse 14 | import json 15 | import sys 16 | 17 | import matplotlib.pyplot as plt 18 | 19 | parser = argparse.ArgumentParser(description=__doc__) 20 | parser.add_argument("file", help="JSON file with benchmark results", nargs="+") 21 | parser.add_argument( 22 | "--parameter-name", 23 | metavar="name", 24 | type=str, 25 | help="Deprecated; parameter names are now inferred from benchmark files", 26 | ) 27 | parser.add_argument( 28 | "--log-x", help="Use a logarithmic x (parameter) axis", action="store_true" 29 | ) 30 | parser.add_argument( 31 | "--log-time", help="Use a logarithmic time axis", action="store_true" 32 | ) 33 | parser.add_argument( 34 | "--titles", help="Comma-separated list of titles for the plot legend" 35 | ) 36 | parser.add_argument("-o", "--output", help="Save image to the given filename.") 37 | 38 | args = parser.parse_args() 39 | if args.parameter_name is not None: 40 | sys.stderr.write( 41 | "warning: --parameter-name is deprecated; names are inferred from " 42 | "benchmark results\n" 43 | ) 44 | 45 | 46 | def die(msg): 47 | sys.stderr.write(f"fatal: {msg}\n") 48 | sys.exit(1) 49 | 50 | 51 | def extract_parameters(results): 52 | """Return `(parameter_name: str, parameter_values: List[float])`.""" 53 | if not results: 54 | die("no benchmark data to plot") 55 | (names, values) = zip(*(unique_parameter(b) for b in results)) 56 | names = frozenset(names) 57 | if len(names) != 1: 58 | die( 59 | f"benchmarks must all have the same parameter name, but found: {sorted(names)}" 60 | ) 61 | return (next(iter(names)), list(values)) 62 | 63 | 64 | def unique_parameter(benchmark): 65 | """Return the unique parameter `(name: str, value: float)`, or die.""" 66 | params_dict = benchmark.get("parameters", {}) 67 | if not params_dict: 68 | die("benchmarks must have exactly one parameter, but found none") 69 | if len(params_dict) > 1: 70 | die( 71 | f"benchmarks must have exactly one parameter, but found multiple: {sorted(params_dict)}" 72 | ) 73 | [(name, value)] = params_dict.items() 74 | return (name, float(value)) 75 | 76 | 77 | parameter_name = None 78 | 79 | for filename in args.file: 80 | with open(filename) as f: 81 | results = json.load(f)["results"] 82 | 83 | (this_parameter_name, parameter_values) = extract_parameters(results) 84 | if parameter_name is not None and this_parameter_name != parameter_name: 85 | die( 86 | f"files must all have the same parameter name, but found {parameter_name!r} vs. {this_parameter_name!r}" 87 | ) 88 | parameter_name = this_parameter_name 89 | 90 | times_mean = [b["mean"] for b in results] 91 | times_stddev = [b["stddev"] for b in results] 92 | 93 | plt.errorbar(x=parameter_values, y=times_mean, yerr=times_stddev, capsize=2) 94 | 95 | plt.xlabel(parameter_name) 96 | plt.ylabel("Time [s]") 97 | 98 | if args.log_time: 99 | plt.yscale("log") 100 | else: 101 | plt.ylim(0, None) 102 | 103 | if args.log_x: 104 | plt.xscale("log") 105 | 106 | if args.titles: 107 | plt.legend(args.titles.split(",")) 108 | 109 | if args.output: 110 | plt.savefig(args.output) 111 | else: 112 | plt.show() 113 | -------------------------------------------------------------------------------- /scripts/plot_progression.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "pyqt6", 6 | # "matplotlib", 7 | # "numpy", 8 | # ] 9 | # /// 10 | 11 | """This program shows `hyperfine` benchmark results in a sequential way 12 | in order to debug possible background interference, caching effects, 13 | thermal throttling and similar effects. 14 | """ 15 | 16 | import argparse 17 | import json 18 | 19 | import matplotlib.pyplot as plt 20 | import numpy as np 21 | 22 | 23 | def moving_average(times, num_runs): 24 | times_padded = np.pad( 25 | times, (num_runs // 2, num_runs - 1 - num_runs // 2), mode="edge" 26 | ) 27 | kernel = np.ones(num_runs) / num_runs 28 | return np.convolve(times_padded, kernel, mode="valid") 29 | 30 | 31 | parser = argparse.ArgumentParser(description=__doc__) 32 | parser.add_argument("file", help="JSON file with benchmark results") 33 | parser.add_argument("--title", help="Plot Title") 34 | parser.add_argument("-o", "--output", help="Save image to the given filename.") 35 | parser.add_argument( 36 | "-w", 37 | "--moving-average-width", 38 | type=int, 39 | metavar="num_runs", 40 | help="Width of the moving-average window (default: N/5)", 41 | ) 42 | parser.add_argument( 43 | "--no-moving-average", 44 | action="store_true", 45 | help="Do not show moving average curve", 46 | ) 47 | 48 | 49 | args = parser.parse_args() 50 | 51 | with open(args.file) as f: 52 | results = json.load(f)["results"] 53 | 54 | for result in results: 55 | label = result["command"] 56 | times = result["times"] 57 | num = len(times) 58 | nums = range(num) 59 | 60 | plt.scatter(x=nums, y=times, marker=".") 61 | plt.ylim([0, None]) 62 | plt.xlim([-1, num]) 63 | 64 | if not args.no_moving_average: 65 | moving_average_width = ( 66 | num // 5 if args.moving_average_width is None else args.moving_average_width 67 | ) 68 | 69 | average = moving_average(times, moving_average_width) 70 | plt.plot(nums, average, "-") 71 | 72 | if args.title: 73 | plt.title(args.title) 74 | 75 | legend = [] 76 | for result in results: 77 | legend.append(result["command"]) 78 | if not args.no_moving_average: 79 | legend.append("moving average") 80 | plt.legend(legend) 81 | 82 | plt.ylabel("Time [s]") 83 | 84 | if args.output: 85 | plt.savefig(args.output) 86 | else: 87 | plt.show() 88 | -------------------------------------------------------------------------------- /scripts/plot_whisker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "matplotlib", 6 | # "pyqt6", 7 | # ] 8 | # /// 9 | 10 | """This program shows `hyperfine` benchmark results as a box and whisker plot. 11 | 12 | Quoting from the matplotlib documentation: 13 | The box extends from the lower to upper quartile values of the data, with 14 | a line at the median. The whiskers extend from the box to show the range 15 | of the data. Flier points are those past the end of the whiskers. 16 | """ 17 | 18 | import argparse 19 | import json 20 | 21 | import matplotlib.pyplot as plt 22 | 23 | parser = argparse.ArgumentParser(description=__doc__) 24 | parser.add_argument("file", help="JSON file with benchmark results") 25 | parser.add_argument("--title", help="Plot Title") 26 | parser.add_argument("--sort-by", choices=["median"], help="Sort method") 27 | parser.add_argument( 28 | "--labels", help="Comma-separated list of entries for the plot legend" 29 | ) 30 | parser.add_argument("-o", "--output", help="Save image to the given filename.") 31 | 32 | args = parser.parse_args() 33 | 34 | with open(args.file, encoding="utf-8") as f: 35 | results = json.load(f)["results"] 36 | 37 | if args.labels: 38 | labels = args.labels.split(",") 39 | else: 40 | labels = [b["command"] for b in results] 41 | times = [b["times"] for b in results] 42 | 43 | if args.sort_by == "median": 44 | medians = [b["median"] for b in results] 45 | indices = sorted(range(len(labels)), key=lambda k: medians[k]) 46 | labels = [labels[i] for i in indices] 47 | times = [times[i] for i in indices] 48 | 49 | plt.figure(figsize=(10, 6), constrained_layout=True) 50 | boxplot = plt.boxplot(times, vert=True, patch_artist=True) 51 | cmap = plt.get_cmap("rainbow") 52 | colors = [cmap(val / len(times)) for val in range(len(times))] 53 | 54 | for patch, color in zip(boxplot["boxes"], colors): 55 | patch.set_facecolor(color) 56 | 57 | if args.title: 58 | plt.title(args.title) 59 | plt.legend(handles=boxplot["boxes"], labels=labels, loc="best", fontsize="medium") 60 | plt.ylabel("Time [s]") 61 | plt.ylim(0, None) 62 | plt.xticks(list(range(1, len(labels) + 1)), labels, rotation=45) 63 | if args.output: 64 | plt.savefig(args.output) 65 | else: 66 | plt.show() 67 | -------------------------------------------------------------------------------- /scripts/ruff.toml: -------------------------------------------------------------------------------- 1 | target-version = "py310" 2 | 3 | [lint] 4 | extend-select = ["I", "UP", "RUF"] 5 | -------------------------------------------------------------------------------- /scripts/welch_ttest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # /// script 3 | # requires-python = ">=3.10" 4 | # dependencies = [ 5 | # "scipy", 6 | # ] 7 | # /// 8 | 9 | """This script performs Welch's t-test on a JSON export file with two 10 | benchmark results to test whether or not the two distributions are 11 | the same.""" 12 | 13 | import argparse 14 | import json 15 | import sys 16 | 17 | from scipy import stats 18 | 19 | parser = argparse.ArgumentParser(description=__doc__) 20 | parser.add_argument("file", help="JSON file with two benchmark results") 21 | args = parser.parse_args() 22 | 23 | with open(args.file) as f: 24 | results = json.load(f)["results"] 25 | 26 | if len(results) != 2: 27 | print("The input file has to contain exactly two benchmarks") 28 | sys.exit(1) 29 | 30 | a, b = (x["command"] for x in results[:2]) 31 | X, Y = (x["times"] for x in results[:2]) 32 | 33 | print(f"Command 1: {a}") 34 | print(f"Command 2: {b}\n") 35 | 36 | t, p = stats.ttest_ind(X, Y, equal_var=False) 37 | th = 0.05 38 | dispose = p < th 39 | print(f"t = {t:.3}, p = {p:.3}") 40 | print() 41 | 42 | if dispose: 43 | print(f"There is a difference between the two benchmarks (p < {th}).") 44 | else: 45 | print(f"The two benchmarks are almost the same (p >= {th}).") 46 | -------------------------------------------------------------------------------- /src/benchmark/benchmark_result.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use serde::Serialize; 4 | 5 | use crate::util::units::Second; 6 | 7 | /// Set of values that will be exported. 8 | // NOTE: `serde` is used for JSON serialization, but not for CSV serialization due to the 9 | // `parameters` map. Update `src/hyperfine/export/csv.rs` with new fields, as appropriate. 10 | #[derive(Debug, Default, Clone, Serialize, PartialEq)] 11 | pub struct BenchmarkResult { 12 | /// The full command line of the program that is being benchmarked 13 | pub command: String, 14 | 15 | /// The full command line of the program that is being benchmarked, possibly including a list of 16 | /// parameters that were not used in the command line template. 17 | #[serde(skip_serializing)] 18 | pub command_with_unused_parameters: String, 19 | 20 | /// The average run time 21 | pub mean: Second, 22 | 23 | /// The standard deviation of all run times. Not available if only one run has been performed 24 | pub stddev: Option, 25 | 26 | /// The median run time 27 | pub median: Second, 28 | 29 | /// Time spent in user mode 30 | pub user: Second, 31 | 32 | /// Time spent in kernel mode 33 | pub system: Second, 34 | 35 | /// Minimum of all measured times 36 | pub min: Second, 37 | 38 | /// Maximum of all measured times 39 | pub max: Second, 40 | 41 | /// All run time measurements 42 | #[serde(skip_serializing_if = "Option::is_none")] 43 | pub times: Option>, 44 | 45 | /// Maximum memory usage of the process, in bytes 46 | #[serde(skip_serializing_if = "Option::is_none")] 47 | pub memory_usage_byte: Option>, 48 | 49 | /// Exit codes of all command invocations 50 | pub exit_codes: Vec>, 51 | 52 | /// Parameter values for this benchmark 53 | #[serde(skip_serializing_if = "BTreeMap::is_empty")] 54 | pub parameters: BTreeMap, 55 | } 56 | -------------------------------------------------------------------------------- /src/benchmark/executor.rs: -------------------------------------------------------------------------------- 1 | #[cfg(windows)] 2 | use std::os::windows::process::CommandExt; 3 | use std::process::ExitStatus; 4 | 5 | use crate::command::Command; 6 | use crate::options::{ 7 | CmdFailureAction, CommandInputPolicy, CommandOutputPolicy, Options, OutputStyleOption, Shell, 8 | }; 9 | use crate::output::progress_bar::get_progress_bar; 10 | use crate::timer::{execute_and_measure, TimerResult}; 11 | use crate::util::randomized_environment_offset; 12 | use crate::util::units::Second; 13 | 14 | use super::timing_result::TimingResult; 15 | 16 | use anyhow::{bail, Context, Result}; 17 | use statistical::mean; 18 | 19 | pub enum BenchmarkIteration { 20 | NonBenchmarkRun, 21 | Warmup(u64), 22 | Benchmark(u64), 23 | } 24 | 25 | impl BenchmarkIteration { 26 | pub fn to_env_var_value(&self) -> Option { 27 | match self { 28 | BenchmarkIteration::NonBenchmarkRun => None, 29 | BenchmarkIteration::Warmup(i) => Some(format!("warmup-{}", i)), 30 | BenchmarkIteration::Benchmark(i) => Some(format!("{}", i)), 31 | } 32 | } 33 | } 34 | 35 | pub trait Executor { 36 | /// Run the given command and measure the execution time 37 | fn run_command_and_measure( 38 | &self, 39 | command: &Command<'_>, 40 | iteration: BenchmarkIteration, 41 | command_failure_action: Option, 42 | output_policy: &CommandOutputPolicy, 43 | ) -> Result<(TimingResult, ExitStatus)>; 44 | 45 | /// Perform a calibration of this executor. For example, 46 | /// when running commands through a shell, we need to 47 | /// measure the shell spawning time separately in order 48 | /// to subtract it from the full runtime later. 49 | fn calibrate(&mut self) -> Result<()>; 50 | 51 | /// Return the time overhead for this executor when 52 | /// performing a measurement. This should return the time 53 | /// that is being used in addition to the actual runtime 54 | /// of the command. 55 | fn time_overhead(&self) -> Second; 56 | } 57 | 58 | fn run_command_and_measure_common( 59 | mut command: std::process::Command, 60 | iteration: BenchmarkIteration, 61 | command_failure_action: CmdFailureAction, 62 | command_input_policy: &CommandInputPolicy, 63 | command_output_policy: &CommandOutputPolicy, 64 | command_name: &str, 65 | ) -> Result { 66 | let stdin = command_input_policy.get_stdin()?; 67 | let (stdout, stderr) = command_output_policy.get_stdout_stderr()?; 68 | command.stdin(stdin).stdout(stdout).stderr(stderr); 69 | 70 | command.env( 71 | "HYPERFINE_RANDOMIZED_ENVIRONMENT_OFFSET", 72 | randomized_environment_offset::value(), 73 | ); 74 | 75 | if let Some(value) = iteration.to_env_var_value() { 76 | command.env("HYPERFINE_ITERATION", value); 77 | } 78 | 79 | let result = execute_and_measure(command) 80 | .with_context(|| format!("Failed to run command '{command_name}'"))?; 81 | 82 | if command_failure_action == CmdFailureAction::RaiseError && !result.status.success() { 83 | let when = match iteration { 84 | BenchmarkIteration::NonBenchmarkRun => "a non-benchmark run".to_string(), 85 | BenchmarkIteration::Warmup(0) => "the first warmup run".to_string(), 86 | BenchmarkIteration::Warmup(i) => format!("warmup iteration {i}"), 87 | BenchmarkIteration::Benchmark(0) => "the first benchmark run".to_string(), 88 | BenchmarkIteration::Benchmark(i) => format!("benchmark iteration {i}"), 89 | }; 90 | bail!( 91 | "{cause} in {when}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \ 92 | Alternatively, use the '--show-output' option to debug what went wrong.", 93 | cause=result.status.code().map_or( 94 | "The process has been terminated by a signal".into(), 95 | |c| format!("Command terminated with non-zero exit code {c}") 96 | 97 | ), 98 | ); 99 | } 100 | 101 | Ok(result) 102 | } 103 | 104 | pub struct RawExecutor<'a> { 105 | options: &'a Options, 106 | } 107 | 108 | impl<'a> RawExecutor<'a> { 109 | pub fn new(options: &'a Options) -> Self { 110 | RawExecutor { options } 111 | } 112 | } 113 | 114 | impl Executor for RawExecutor<'_> { 115 | fn run_command_and_measure( 116 | &self, 117 | command: &Command<'_>, 118 | iteration: BenchmarkIteration, 119 | command_failure_action: Option, 120 | output_policy: &CommandOutputPolicy, 121 | ) -> Result<(TimingResult, ExitStatus)> { 122 | let result = run_command_and_measure_common( 123 | command.get_command()?, 124 | iteration, 125 | command_failure_action.unwrap_or(self.options.command_failure_action), 126 | &self.options.command_input_policy, 127 | output_policy, 128 | &command.get_command_line(), 129 | )?; 130 | 131 | Ok(( 132 | TimingResult { 133 | time_real: result.time_real, 134 | time_user: result.time_user, 135 | time_system: result.time_system, 136 | memory_usage_byte: result.memory_usage_byte, 137 | }, 138 | result.status, 139 | )) 140 | } 141 | 142 | fn calibrate(&mut self) -> Result<()> { 143 | Ok(()) 144 | } 145 | 146 | fn time_overhead(&self) -> Second { 147 | 0.0 148 | } 149 | } 150 | 151 | pub struct ShellExecutor<'a> { 152 | options: &'a Options, 153 | shell: &'a Shell, 154 | shell_spawning_time: Option, 155 | } 156 | 157 | impl<'a> ShellExecutor<'a> { 158 | pub fn new(shell: &'a Shell, options: &'a Options) -> Self { 159 | ShellExecutor { 160 | shell, 161 | options, 162 | shell_spawning_time: None, 163 | } 164 | } 165 | } 166 | 167 | impl Executor for ShellExecutor<'_> { 168 | fn run_command_and_measure( 169 | &self, 170 | command: &Command<'_>, 171 | iteration: BenchmarkIteration, 172 | command_failure_action: Option, 173 | output_policy: &CommandOutputPolicy, 174 | ) -> Result<(TimingResult, ExitStatus)> { 175 | let on_windows_cmd = cfg!(windows) && *self.shell == Shell::Default("cmd.exe"); 176 | let mut command_builder = self.shell.command(); 177 | command_builder.arg(if on_windows_cmd { "/C" } else { "-c" }); 178 | 179 | // Windows needs special treatment for its behavior on parsing cmd arguments 180 | if on_windows_cmd { 181 | #[cfg(windows)] 182 | command_builder.raw_arg(command.get_command_line()); 183 | } else { 184 | command_builder.arg(command.get_command_line()); 185 | } 186 | 187 | let mut result = run_command_and_measure_common( 188 | command_builder, 189 | iteration, 190 | command_failure_action.unwrap_or(self.options.command_failure_action), 191 | &self.options.command_input_policy, 192 | output_policy, 193 | &command.get_command_line(), 194 | )?; 195 | 196 | // Subtract shell spawning time 197 | if let Some(spawning_time) = self.shell_spawning_time { 198 | result.time_real = (result.time_real - spawning_time.time_real).max(0.0); 199 | result.time_user = (result.time_user - spawning_time.time_user).max(0.0); 200 | result.time_system = (result.time_system - spawning_time.time_system).max(0.0); 201 | } 202 | 203 | Ok(( 204 | TimingResult { 205 | time_real: result.time_real, 206 | time_user: result.time_user, 207 | time_system: result.time_system, 208 | memory_usage_byte: result.memory_usage_byte, 209 | }, 210 | result.status, 211 | )) 212 | } 213 | 214 | /// Measure the average shell spawning time 215 | fn calibrate(&mut self) -> Result<()> { 216 | const COUNT: u64 = 50; 217 | let progress_bar = if self.options.output_style != OutputStyleOption::Disabled { 218 | Some(get_progress_bar( 219 | COUNT, 220 | "Measuring shell spawning time", 221 | self.options.output_style, 222 | )) 223 | } else { 224 | None 225 | }; 226 | 227 | let mut times_real: Vec = vec![]; 228 | let mut times_user: Vec = vec![]; 229 | let mut times_system: Vec = vec![]; 230 | 231 | for _ in 0..COUNT { 232 | // Just run the shell without any command 233 | let res = self.run_command_and_measure( 234 | &Command::new(None, ""), 235 | BenchmarkIteration::NonBenchmarkRun, 236 | None, 237 | &CommandOutputPolicy::Null, 238 | ); 239 | 240 | match res { 241 | Err(_) => { 242 | let shell_cmd = if cfg!(windows) { 243 | format!("{} /C \"\"", self.shell) 244 | } else { 245 | format!("{} -c \"\"", self.shell) 246 | }; 247 | 248 | bail!( 249 | "Could not measure shell execution time. Make sure you can run '{}'.", 250 | shell_cmd 251 | ); 252 | } 253 | Ok((r, _)) => { 254 | times_real.push(r.time_real); 255 | times_user.push(r.time_user); 256 | times_system.push(r.time_system); 257 | } 258 | } 259 | 260 | if let Some(bar) = progress_bar.as_ref() { 261 | bar.inc(1) 262 | } 263 | } 264 | 265 | if let Some(bar) = progress_bar.as_ref() { 266 | bar.finish_and_clear() 267 | } 268 | 269 | self.shell_spawning_time = Some(TimingResult { 270 | time_real: mean(×_real), 271 | time_user: mean(×_user), 272 | time_system: mean(×_system), 273 | memory_usage_byte: 0, 274 | }); 275 | 276 | Ok(()) 277 | } 278 | 279 | fn time_overhead(&self) -> Second { 280 | self.shell_spawning_time.unwrap().time_real 281 | } 282 | } 283 | 284 | #[derive(Clone)] 285 | pub struct MockExecutor { 286 | shell: Option, 287 | } 288 | 289 | impl MockExecutor { 290 | pub fn new(shell: Option) -> Self { 291 | MockExecutor { shell } 292 | } 293 | 294 | fn extract_time>(sleep_command: S) -> Second { 295 | assert!(sleep_command.as_ref().starts_with("sleep ")); 296 | sleep_command 297 | .as_ref() 298 | .trim_start_matches("sleep ") 299 | .parse::() 300 | .unwrap() 301 | } 302 | } 303 | 304 | impl Executor for MockExecutor { 305 | fn run_command_and_measure( 306 | &self, 307 | command: &Command<'_>, 308 | _iteration: BenchmarkIteration, 309 | _command_failure_action: Option, 310 | _output_policy: &CommandOutputPolicy, 311 | ) -> Result<(TimingResult, ExitStatus)> { 312 | #[cfg(unix)] 313 | let status = { 314 | use std::os::unix::process::ExitStatusExt; 315 | ExitStatus::from_raw(0) 316 | }; 317 | 318 | #[cfg(windows)] 319 | let status = { 320 | use std::os::windows::process::ExitStatusExt; 321 | ExitStatus::from_raw(0) 322 | }; 323 | 324 | Ok(( 325 | TimingResult { 326 | time_real: Self::extract_time(command.get_command_line()), 327 | time_user: 0.0, 328 | time_system: 0.0, 329 | memory_usage_byte: 0, 330 | }, 331 | status, 332 | )) 333 | } 334 | 335 | fn calibrate(&mut self) -> Result<()> { 336 | Ok(()) 337 | } 338 | 339 | fn time_overhead(&self) -> Second { 340 | match &self.shell { 341 | None => 0.0, 342 | Some(shell) => Self::extract_time(shell), 343 | } 344 | } 345 | } 346 | 347 | #[test] 348 | fn test_mock_executor_extract_time() { 349 | assert_eq!(MockExecutor::extract_time("sleep 0.1"), 0.1); 350 | } 351 | -------------------------------------------------------------------------------- /src/benchmark/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod benchmark_result; 2 | pub mod executor; 3 | pub mod relative_speed; 4 | pub mod scheduler; 5 | pub mod timing_result; 6 | 7 | use std::cmp; 8 | 9 | use crate::benchmark::executor::BenchmarkIteration; 10 | use crate::command::Command; 11 | use crate::options::{ 12 | CmdFailureAction, CommandOutputPolicy, ExecutorKind, Options, OutputStyleOption, 13 | }; 14 | use crate::outlier_detection::{modified_zscores, OUTLIER_THRESHOLD}; 15 | use crate::output::format::{format_duration, format_duration_unit}; 16 | use crate::output::progress_bar::get_progress_bar; 17 | use crate::output::warnings::{OutlierWarningOptions, Warnings}; 18 | use crate::parameter::ParameterNameAndValue; 19 | use crate::util::exit_code::extract_exit_code; 20 | use crate::util::min_max::{max, min}; 21 | use crate::util::units::Second; 22 | use benchmark_result::BenchmarkResult; 23 | use timing_result::TimingResult; 24 | 25 | use anyhow::{anyhow, Result}; 26 | use colored::*; 27 | use statistical::{mean, median, standard_deviation}; 28 | 29 | use self::executor::Executor; 30 | 31 | /// Threshold for warning about fast execution time 32 | pub const MIN_EXECUTION_TIME: Second = 5e-3; 33 | 34 | pub struct Benchmark<'a> { 35 | number: usize, 36 | command: &'a Command<'a>, 37 | options: &'a Options, 38 | executor: &'a dyn Executor, 39 | } 40 | 41 | impl<'a> Benchmark<'a> { 42 | pub fn new( 43 | number: usize, 44 | command: &'a Command<'a>, 45 | options: &'a Options, 46 | executor: &'a dyn Executor, 47 | ) -> Self { 48 | Benchmark { 49 | number, 50 | command, 51 | options, 52 | executor, 53 | } 54 | } 55 | 56 | /// Run setup, cleanup, or preparation commands 57 | fn run_intermediate_command( 58 | &self, 59 | command: &Command<'_>, 60 | error_output: &'static str, 61 | output_policy: &CommandOutputPolicy, 62 | ) -> Result { 63 | self.executor 64 | .run_command_and_measure( 65 | command, 66 | executor::BenchmarkIteration::NonBenchmarkRun, 67 | Some(CmdFailureAction::RaiseError), 68 | output_policy, 69 | ) 70 | .map(|r| r.0) 71 | .map_err(|_| anyhow!(error_output)) 72 | } 73 | 74 | /// Run the command specified by `--setup`. 75 | fn run_setup_command( 76 | &self, 77 | parameters: impl IntoIterator>, 78 | output_policy: &CommandOutputPolicy, 79 | ) -> Result { 80 | let command = self 81 | .options 82 | .setup_command 83 | .as_ref() 84 | .map(|setup_command| Command::new_parametrized(None, setup_command, parameters)); 85 | 86 | let error_output = "The setup command terminated with a non-zero exit code. \ 87 | Append ' || true' to the command if you are sure that this can be ignored."; 88 | 89 | Ok(command 90 | .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy)) 91 | .transpose()? 92 | .unwrap_or_default()) 93 | } 94 | 95 | /// Run the command specified by `--cleanup`. 96 | fn run_cleanup_command( 97 | &self, 98 | parameters: impl IntoIterator>, 99 | output_policy: &CommandOutputPolicy, 100 | ) -> Result { 101 | let command = self 102 | .options 103 | .cleanup_command 104 | .as_ref() 105 | .map(|cleanup_command| Command::new_parametrized(None, cleanup_command, parameters)); 106 | 107 | let error_output = "The cleanup command terminated with a non-zero exit code. \ 108 | Append ' || true' to the command if you are sure that this can be ignored."; 109 | 110 | Ok(command 111 | .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy)) 112 | .transpose()? 113 | .unwrap_or_default()) 114 | } 115 | 116 | /// Run the command specified by `--prepare`. 117 | fn run_preparation_command( 118 | &self, 119 | command: &Command<'_>, 120 | output_policy: &CommandOutputPolicy, 121 | ) -> Result { 122 | let error_output = "The preparation command terminated with a non-zero exit code. \ 123 | Append ' || true' to the command if you are sure that this can be ignored."; 124 | 125 | self.run_intermediate_command(command, error_output, output_policy) 126 | } 127 | 128 | /// Run the command specified by `--conclude`. 129 | fn run_conclusion_command( 130 | &self, 131 | command: &Command<'_>, 132 | output_policy: &CommandOutputPolicy, 133 | ) -> Result { 134 | let error_output = "The conclusion command terminated with a non-zero exit code. \ 135 | Append ' || true' to the command if you are sure that this can be ignored."; 136 | 137 | self.run_intermediate_command(command, error_output, output_policy) 138 | } 139 | 140 | /// Run the benchmark for a single command 141 | pub fn run(&self) -> Result { 142 | if self.options.output_style != OutputStyleOption::Disabled { 143 | println!( 144 | "{}{}: {}", 145 | "Benchmark ".bold(), 146 | (self.number + 1).to_string().bold(), 147 | self.command.get_name_with_unused_parameters(), 148 | ); 149 | } 150 | 151 | let mut times_real: Vec = vec![]; 152 | let mut times_user: Vec = vec![]; 153 | let mut times_system: Vec = vec![]; 154 | let mut memory_usage_byte: Vec = vec![]; 155 | let mut exit_codes: Vec> = vec![]; 156 | let mut all_succeeded = true; 157 | 158 | let output_policy = &self.options.command_output_policies[self.number]; 159 | 160 | let preparation_command = self.options.preparation_command.as_ref().map(|values| { 161 | let preparation_command = if values.len() == 1 { 162 | &values[0] 163 | } else { 164 | &values[self.number] 165 | }; 166 | Command::new_parametrized( 167 | None, 168 | preparation_command, 169 | self.command.get_parameters().iter().cloned(), 170 | ) 171 | }); 172 | 173 | let run_preparation_command = || { 174 | preparation_command 175 | .as_ref() 176 | .map(|cmd| self.run_preparation_command(cmd, output_policy)) 177 | .transpose() 178 | }; 179 | 180 | let conclusion_command = self.options.conclusion_command.as_ref().map(|values| { 181 | let conclusion_command = if values.len() == 1 { 182 | &values[0] 183 | } else { 184 | &values[self.number] 185 | }; 186 | Command::new_parametrized( 187 | None, 188 | conclusion_command, 189 | self.command.get_parameters().iter().cloned(), 190 | ) 191 | }); 192 | let run_conclusion_command = || { 193 | conclusion_command 194 | .as_ref() 195 | .map(|cmd| self.run_conclusion_command(cmd, output_policy)) 196 | .transpose() 197 | }; 198 | 199 | self.run_setup_command(self.command.get_parameters().iter().cloned(), output_policy)?; 200 | 201 | // Warmup phase 202 | if self.options.warmup_count > 0 { 203 | let progress_bar = if self.options.output_style != OutputStyleOption::Disabled { 204 | Some(get_progress_bar( 205 | self.options.warmup_count, 206 | "Performing warmup runs", 207 | self.options.output_style, 208 | )) 209 | } else { 210 | None 211 | }; 212 | 213 | for i in 0..self.options.warmup_count { 214 | let _ = run_preparation_command()?; 215 | let _ = self.executor.run_command_and_measure( 216 | self.command, 217 | BenchmarkIteration::Warmup(i), 218 | None, 219 | output_policy, 220 | )?; 221 | let _ = run_conclusion_command()?; 222 | if let Some(bar) = progress_bar.as_ref() { 223 | bar.inc(1) 224 | } 225 | } 226 | if let Some(bar) = progress_bar.as_ref() { 227 | bar.finish_and_clear() 228 | } 229 | } 230 | 231 | // Set up progress bar (and spinner for initial measurement) 232 | let progress_bar = if self.options.output_style != OutputStyleOption::Disabled { 233 | Some(get_progress_bar( 234 | self.options.run_bounds.min, 235 | "Initial time measurement", 236 | self.options.output_style, 237 | )) 238 | } else { 239 | None 240 | }; 241 | 242 | let preparation_result = run_preparation_command()?; 243 | let preparation_overhead = 244 | preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead()); 245 | 246 | // Initial timing run 247 | let (res, status) = self.executor.run_command_and_measure( 248 | self.command, 249 | BenchmarkIteration::Benchmark(0), 250 | None, 251 | output_policy, 252 | )?; 253 | let success = status.success(); 254 | 255 | let conclusion_result = run_conclusion_command()?; 256 | let conclusion_overhead = 257 | conclusion_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead()); 258 | 259 | // Determine number of benchmark runs 260 | let runs_in_min_time = (self.options.min_benchmarking_time 261 | / (res.time_real 262 | + self.executor.time_overhead() 263 | + preparation_overhead 264 | + conclusion_overhead)) as u64; 265 | 266 | let count = { 267 | let min = cmp::max(runs_in_min_time, self.options.run_bounds.min); 268 | 269 | self.options 270 | .run_bounds 271 | .max 272 | .as_ref() 273 | .map(|max| cmp::min(min, *max)) 274 | .unwrap_or(min) 275 | }; 276 | 277 | let count_remaining = count - 1; 278 | 279 | // Save the first result 280 | times_real.push(res.time_real); 281 | times_user.push(res.time_user); 282 | times_system.push(res.time_system); 283 | memory_usage_byte.push(res.memory_usage_byte); 284 | exit_codes.push(extract_exit_code(status)); 285 | 286 | all_succeeded = all_succeeded && success; 287 | 288 | // Re-configure the progress bar 289 | if let Some(bar) = progress_bar.as_ref() { 290 | bar.set_length(count) 291 | } 292 | if let Some(bar) = progress_bar.as_ref() { 293 | bar.inc(1) 294 | } 295 | 296 | // Gather statistics (perform the actual benchmark) 297 | for i in 0..count_remaining { 298 | run_preparation_command()?; 299 | 300 | let msg = { 301 | let mean = format_duration(mean(×_real), self.options.time_unit); 302 | format!("Current estimate: {}", mean.to_string().green()) 303 | }; 304 | 305 | if let Some(bar) = progress_bar.as_ref() { 306 | bar.set_message(msg.to_owned()) 307 | } 308 | 309 | let (res, status) = self.executor.run_command_and_measure( 310 | self.command, 311 | BenchmarkIteration::Benchmark(i + 1), 312 | None, 313 | output_policy, 314 | )?; 315 | let success = status.success(); 316 | 317 | times_real.push(res.time_real); 318 | times_user.push(res.time_user); 319 | times_system.push(res.time_system); 320 | memory_usage_byte.push(res.memory_usage_byte); 321 | exit_codes.push(extract_exit_code(status)); 322 | 323 | all_succeeded = all_succeeded && success; 324 | 325 | if let Some(bar) = progress_bar.as_ref() { 326 | bar.inc(1) 327 | } 328 | 329 | run_conclusion_command()?; 330 | } 331 | 332 | if let Some(bar) = progress_bar.as_ref() { 333 | bar.finish_and_clear() 334 | } 335 | 336 | // Compute statistical quantities 337 | let t_num = times_real.len(); 338 | let t_mean = mean(×_real); 339 | let t_stddev = if times_real.len() > 1 { 340 | Some(standard_deviation(×_real, Some(t_mean))) 341 | } else { 342 | None 343 | }; 344 | let t_median = median(×_real); 345 | let t_min = min(×_real); 346 | let t_max = max(×_real); 347 | 348 | let user_mean = mean(×_user); 349 | let system_mean = mean(×_system); 350 | 351 | // Formatting and console output 352 | let (mean_str, time_unit) = format_duration_unit(t_mean, self.options.time_unit); 353 | let min_str = format_duration(t_min, Some(time_unit)); 354 | let max_str = format_duration(t_max, Some(time_unit)); 355 | let num_str = format!("{t_num} runs"); 356 | 357 | let user_str = format_duration(user_mean, Some(time_unit)); 358 | let system_str = format_duration(system_mean, Some(time_unit)); 359 | 360 | if self.options.output_style != OutputStyleOption::Disabled { 361 | if times_real.len() == 1 { 362 | println!( 363 | " Time ({} ≡): {:>8} {:>8} [User: {}, System: {}]", 364 | "abs".green().bold(), 365 | mean_str.green().bold(), 366 | " ", // alignment 367 | user_str.blue(), 368 | system_str.blue() 369 | ); 370 | } else { 371 | let stddev_str = format_duration(t_stddev.unwrap(), Some(time_unit)); 372 | 373 | println!( 374 | " Time ({} ± {}): {:>8} ± {:>8} [User: {}, System: {}]", 375 | "mean".green().bold(), 376 | "σ".green(), 377 | mean_str.green().bold(), 378 | stddev_str.green(), 379 | user_str.blue(), 380 | system_str.blue() 381 | ); 382 | 383 | println!( 384 | " Range ({} … {}): {:>8} … {:>8} {}", 385 | "min".cyan(), 386 | "max".purple(), 387 | min_str.cyan(), 388 | max_str.purple(), 389 | num_str.dimmed() 390 | ); 391 | } 392 | } 393 | 394 | // Warnings 395 | let mut warnings = vec![]; 396 | 397 | // Check execution time 398 | if matches!(self.options.executor_kind, ExecutorKind::Shell(_)) 399 | && times_real.iter().any(|&t| t < MIN_EXECUTION_TIME) 400 | { 401 | warnings.push(Warnings::FastExecutionTime); 402 | } 403 | 404 | // Check program exit codes 405 | if !all_succeeded { 406 | warnings.push(Warnings::NonZeroExitCode); 407 | } 408 | 409 | // Run outlier detection 410 | let scores = modified_zscores(×_real); 411 | 412 | let outlier_warning_options = OutlierWarningOptions { 413 | warmup_in_use: self.options.warmup_count > 0, 414 | prepare_in_use: self 415 | .options 416 | .preparation_command 417 | .as_ref() 418 | .map(|v| v.len()) 419 | .unwrap_or(0) 420 | > 0, 421 | }; 422 | 423 | if scores[0] > OUTLIER_THRESHOLD { 424 | warnings.push(Warnings::SlowInitialRun( 425 | times_real[0], 426 | outlier_warning_options, 427 | )); 428 | } else if scores.iter().any(|&s| s.abs() > OUTLIER_THRESHOLD) { 429 | warnings.push(Warnings::OutliersDetected(outlier_warning_options)); 430 | } 431 | 432 | if !warnings.is_empty() { 433 | eprintln!(" "); 434 | 435 | for warning in &warnings { 436 | eprintln!(" {}: {}", "Warning".yellow(), warning); 437 | } 438 | } 439 | 440 | if self.options.output_style != OutputStyleOption::Disabled { 441 | println!(" "); 442 | } 443 | 444 | self.run_cleanup_command(self.command.get_parameters().iter().cloned(), output_policy)?; 445 | 446 | Ok(BenchmarkResult { 447 | command: self.command.get_name(), 448 | command_with_unused_parameters: self.command.get_name_with_unused_parameters(), 449 | mean: t_mean, 450 | stddev: t_stddev, 451 | median: t_median, 452 | user: user_mean, 453 | system: system_mean, 454 | min: t_min, 455 | max: t_max, 456 | times: Some(times_real), 457 | memory_usage_byte: Some(memory_usage_byte), 458 | exit_codes, 459 | parameters: self 460 | .command 461 | .get_parameters() 462 | .iter() 463 | .map(|(name, value)| (name.to_string(), value.to_string())) 464 | .collect(), 465 | }) 466 | } 467 | } 468 | -------------------------------------------------------------------------------- /src/benchmark/relative_speed.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | 3 | use super::benchmark_result::BenchmarkResult; 4 | use crate::{options::SortOrder, util::units::Scalar}; 5 | 6 | #[derive(Debug)] 7 | pub struct BenchmarkResultWithRelativeSpeed<'a> { 8 | pub result: &'a BenchmarkResult, 9 | pub relative_speed: Scalar, 10 | pub relative_speed_stddev: Option, 11 | pub is_reference: bool, 12 | // Less means faster 13 | pub relative_ordering: Ordering, 14 | } 15 | 16 | pub fn compare_mean_time(l: &BenchmarkResult, r: &BenchmarkResult) -> Ordering { 17 | l.mean.partial_cmp(&r.mean).unwrap_or(Ordering::Equal) 18 | } 19 | 20 | pub fn fastest_of(results: &[BenchmarkResult]) -> &BenchmarkResult { 21 | results 22 | .iter() 23 | .min_by(|&l, &r| compare_mean_time(l, r)) 24 | .expect("at least one benchmark result") 25 | } 26 | 27 | fn compute_relative_speeds<'a>( 28 | results: &'a [BenchmarkResult], 29 | reference: &'a BenchmarkResult, 30 | sort_order: SortOrder, 31 | ) -> Vec> { 32 | let mut results: Vec<_> = results 33 | .iter() 34 | .map(|result| { 35 | let is_reference = result == reference; 36 | let relative_ordering = compare_mean_time(result, reference); 37 | 38 | if result.mean == 0.0 { 39 | return BenchmarkResultWithRelativeSpeed { 40 | result, 41 | relative_speed: if is_reference { 1.0 } else { f64::INFINITY }, 42 | relative_speed_stddev: None, 43 | is_reference, 44 | relative_ordering, 45 | }; 46 | } 47 | 48 | let ratio = match relative_ordering { 49 | Ordering::Less => reference.mean / result.mean, 50 | Ordering::Equal => 1.0, 51 | Ordering::Greater => result.mean / reference.mean, 52 | }; 53 | 54 | // https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas 55 | // Covariance asssumed to be 0, i.e. variables are assumed to be independent 56 | let ratio_stddev = match (result.stddev, reference.stddev) { 57 | (Some(result_stddev), Some(fastest_stddev)) => Some( 58 | ratio 59 | * ((result_stddev / result.mean).powi(2) 60 | + (fastest_stddev / reference.mean).powi(2)) 61 | .sqrt(), 62 | ), 63 | _ => None, 64 | }; 65 | 66 | BenchmarkResultWithRelativeSpeed { 67 | result, 68 | relative_speed: ratio, 69 | relative_speed_stddev: ratio_stddev, 70 | is_reference, 71 | relative_ordering, 72 | } 73 | }) 74 | .collect(); 75 | 76 | match sort_order { 77 | SortOrder::Command => {} 78 | SortOrder::MeanTime => { 79 | results.sort_unstable_by(|r1, r2| compare_mean_time(r1.result, r2.result)); 80 | } 81 | } 82 | 83 | results 84 | } 85 | 86 | pub fn compute_with_check_from_reference<'a>( 87 | results: &'a [BenchmarkResult], 88 | reference: &'a BenchmarkResult, 89 | sort_order: SortOrder, 90 | ) -> Option>> { 91 | if fastest_of(results).mean == 0.0 || reference.mean == 0.0 { 92 | return None; 93 | } 94 | 95 | Some(compute_relative_speeds(results, reference, sort_order)) 96 | } 97 | 98 | pub fn compute_with_check( 99 | results: &[BenchmarkResult], 100 | sort_order: SortOrder, 101 | ) -> Option> { 102 | let fastest = fastest_of(results); 103 | 104 | if fastest.mean == 0.0 { 105 | return None; 106 | } 107 | 108 | Some(compute_relative_speeds(results, fastest, sort_order)) 109 | } 110 | 111 | /// Same as compute_with_check, potentially resulting in relative speeds of infinity 112 | pub fn compute( 113 | results: &[BenchmarkResult], 114 | sort_order: SortOrder, 115 | ) -> Vec { 116 | let fastest = fastest_of(results); 117 | 118 | compute_relative_speeds(results, fastest, sort_order) 119 | } 120 | 121 | #[cfg(test)] 122 | fn create_result(name: &str, mean: Scalar) -> BenchmarkResult { 123 | use std::collections::BTreeMap; 124 | 125 | BenchmarkResult { 126 | command: name.into(), 127 | command_with_unused_parameters: name.into(), 128 | mean, 129 | stddev: Some(1.0), 130 | median: mean, 131 | user: mean, 132 | system: 0.0, 133 | min: mean, 134 | max: mean, 135 | times: None, 136 | memory_usage_byte: None, 137 | exit_codes: Vec::new(), 138 | parameters: BTreeMap::new(), 139 | } 140 | } 141 | 142 | #[test] 143 | fn test_compute_relative_speed() { 144 | use approx::assert_relative_eq; 145 | 146 | let results = vec![ 147 | create_result("cmd1", 3.0), 148 | create_result("cmd2", 2.0), 149 | create_result("cmd3", 5.0), 150 | ]; 151 | 152 | let annotated_results = compute_with_check(&results, SortOrder::Command).unwrap(); 153 | 154 | assert_relative_eq!(1.5, annotated_results[0].relative_speed); 155 | assert_relative_eq!(1.0, annotated_results[1].relative_speed); 156 | assert_relative_eq!(2.5, annotated_results[2].relative_speed); 157 | } 158 | 159 | #[test] 160 | fn test_compute_relative_speed_with_reference() { 161 | use approx::assert_relative_eq; 162 | 163 | let results = vec![create_result("cmd2", 2.0), create_result("cmd3", 5.0)]; 164 | let reference = create_result("cmd2", 4.0); 165 | 166 | let annotated_results = 167 | compute_with_check_from_reference(&results, &reference, SortOrder::Command).unwrap(); 168 | 169 | assert_relative_eq!(2.0, annotated_results[0].relative_speed); 170 | assert_relative_eq!(1.25, annotated_results[1].relative_speed); 171 | } 172 | 173 | #[test] 174 | fn test_compute_relative_speed_for_zero_times() { 175 | let results = vec![create_result("cmd1", 1.0), create_result("cmd2", 0.0)]; 176 | 177 | let annotated_results = compute_with_check(&results, SortOrder::Command); 178 | 179 | assert!(annotated_results.is_none()); 180 | } 181 | -------------------------------------------------------------------------------- /src/benchmark/scheduler.rs: -------------------------------------------------------------------------------- 1 | use super::benchmark_result::BenchmarkResult; 2 | use super::executor::{Executor, MockExecutor, RawExecutor, ShellExecutor}; 3 | use super::{relative_speed, Benchmark}; 4 | use colored::*; 5 | use std::cmp::Ordering; 6 | 7 | use crate::command::{Command, Commands}; 8 | use crate::export::ExportManager; 9 | use crate::options::{ExecutorKind, Options, OutputStyleOption, SortOrder}; 10 | 11 | use anyhow::Result; 12 | 13 | pub struct Scheduler<'a> { 14 | commands: &'a Commands<'a>, 15 | options: &'a Options, 16 | export_manager: &'a ExportManager, 17 | results: Vec, 18 | } 19 | 20 | impl<'a> Scheduler<'a> { 21 | pub fn new( 22 | commands: &'a Commands, 23 | options: &'a Options, 24 | export_manager: &'a ExportManager, 25 | ) -> Self { 26 | Self { 27 | commands, 28 | options, 29 | export_manager, 30 | results: vec![], 31 | } 32 | } 33 | 34 | pub fn run_benchmarks(&mut self) -> Result<()> { 35 | let mut executor: Box = match self.options.executor_kind { 36 | ExecutorKind::Raw => Box::new(RawExecutor::new(self.options)), 37 | ExecutorKind::Mock(ref shell) => Box::new(MockExecutor::new(shell.clone())), 38 | ExecutorKind::Shell(ref shell) => Box::new(ShellExecutor::new(shell, self.options)), 39 | }; 40 | 41 | let reference = self 42 | .options 43 | .reference_command 44 | .as_ref() 45 | .map(|cmd| Command::new(None, cmd)); 46 | 47 | executor.calibrate()?; 48 | 49 | for (number, cmd) in reference.iter().chain(self.commands.iter()).enumerate() { 50 | self.results 51 | .push(Benchmark::new(number, cmd, self.options, &*executor).run()?); 52 | 53 | // We export results after each individual benchmark, because 54 | // we would risk losing them if a later benchmark fails. 55 | self.export_manager.write_results(&self.results, true)?; 56 | } 57 | 58 | Ok(()) 59 | } 60 | 61 | pub fn print_relative_speed_comparison(&self) { 62 | if self.options.output_style == OutputStyleOption::Disabled { 63 | return; 64 | } 65 | 66 | if self.results.len() < 2 { 67 | return; 68 | } 69 | 70 | let reference = self 71 | .options 72 | .reference_command 73 | .as_ref() 74 | .map(|_| &self.results[0]) 75 | .unwrap_or_else(|| relative_speed::fastest_of(&self.results)); 76 | 77 | if let Some(annotated_results) = relative_speed::compute_with_check_from_reference( 78 | &self.results, 79 | reference, 80 | self.options.sort_order_speed_comparison, 81 | ) { 82 | match self.options.sort_order_speed_comparison { 83 | SortOrder::MeanTime => { 84 | println!("{}", "Summary".bold()); 85 | 86 | let reference = annotated_results.iter().find(|r| r.is_reference).unwrap(); 87 | let others = annotated_results.iter().filter(|r| !r.is_reference); 88 | 89 | println!( 90 | " {} ran", 91 | reference.result.command_with_unused_parameters.cyan() 92 | ); 93 | 94 | for item in others { 95 | let stddev = if let Some(stddev) = item.relative_speed_stddev { 96 | format!(" ± {}", format!("{:.2}", stddev).green()) 97 | } else { 98 | "".into() 99 | }; 100 | let comparator = match item.relative_ordering { 101 | Ordering::Less => format!( 102 | "{}{} times slower than", 103 | format!("{:8.2}", item.relative_speed).bold().green(), 104 | stddev 105 | ), 106 | Ordering::Greater => format!( 107 | "{}{} times faster than", 108 | format!("{:8.2}", item.relative_speed).bold().green(), 109 | stddev 110 | ), 111 | Ordering::Equal => format!( 112 | " As fast ({}{}) as", 113 | format!("{:.2}", item.relative_speed).bold().green(), 114 | stddev 115 | ), 116 | }; 117 | println!( 118 | "{} {}", 119 | comparator, 120 | &item.result.command_with_unused_parameters.magenta() 121 | ); 122 | } 123 | } 124 | SortOrder::Command => { 125 | println!("{}", "Relative speed comparison".bold()); 126 | 127 | for item in annotated_results { 128 | println!( 129 | " {}{} {}", 130 | format!("{:10.2}", item.relative_speed).bold().green(), 131 | if item.is_reference { 132 | " ".into() 133 | } else if let Some(stddev) = item.relative_speed_stddev { 134 | format!(" ± {}", format!("{stddev:5.2}").green()) 135 | } else { 136 | " ".into() 137 | }, 138 | &item.result.command_with_unused_parameters, 139 | ); 140 | } 141 | } 142 | } 143 | } else { 144 | eprintln!( 145 | "{}: The benchmark comparison could not be computed as some benchmark times are zero. \ 146 | This could be caused by background interference during the initial calibration phase \ 147 | of hyperfine, in combination with very fast commands (faster than a few milliseconds). \ 148 | Try to re-run the benchmark on a quiet system. If you did not do so already, try the \ 149 | --shell=none/-N option. If it does not help either, you command is most likely too fast \ 150 | to be accurately benchmarked by hyperfine.", 151 | "Note".bold().red() 152 | ); 153 | } 154 | } 155 | 156 | pub fn final_export(&self) -> Result<()> { 157 | self.export_manager.write_results(&self.results, false) 158 | } 159 | } 160 | 161 | #[cfg(test)] 162 | fn generate_results(args: &[&'static str]) -> Result> { 163 | use crate::cli::get_cli_arguments; 164 | 165 | let args = ["hyperfine", "--debug-mode", "--style=none"] 166 | .iter() 167 | .chain(args); 168 | let cli_arguments = get_cli_arguments(args); 169 | let mut options = Options::from_cli_arguments(&cli_arguments)?; 170 | 171 | assert_eq!(options.executor_kind, ExecutorKind::Mock(None)); 172 | 173 | let commands = Commands::from_cli_arguments(&cli_arguments)?; 174 | let export_manager = ExportManager::from_cli_arguments( 175 | &cli_arguments, 176 | options.time_unit, 177 | options.sort_order_exports, 178 | )?; 179 | 180 | options.validate_against_command_list(&commands)?; 181 | 182 | let mut scheduler = Scheduler::new(&commands, &options, &export_manager); 183 | 184 | scheduler.run_benchmarks()?; 185 | Ok(scheduler.results) 186 | } 187 | 188 | #[test] 189 | fn scheduler_basic() -> Result<()> { 190 | insta::assert_yaml_snapshot!(generate_results(&["--runs=2", "sleep 0.123", "sleep 0.456"])?, @r#" 191 | - command: sleep 0.123 192 | mean: 0.123 193 | stddev: 0 194 | median: 0.123 195 | user: 0 196 | system: 0 197 | min: 0.123 198 | max: 0.123 199 | times: 200 | - 0.123 201 | - 0.123 202 | memory_usage_byte: 203 | - 0 204 | - 0 205 | exit_codes: 206 | - 0 207 | - 0 208 | - command: sleep 0.456 209 | mean: 0.456 210 | stddev: 0 211 | median: 0.456 212 | user: 0 213 | system: 0 214 | min: 0.456 215 | max: 0.456 216 | times: 217 | - 0.456 218 | - 0.456 219 | memory_usage_byte: 220 | - 0 221 | - 0 222 | exit_codes: 223 | - 0 224 | - 0 225 | "#); 226 | 227 | Ok(()) 228 | } 229 | -------------------------------------------------------------------------------- /src/benchmark/timing_result.rs: -------------------------------------------------------------------------------- 1 | use crate::util::units::Second; 2 | 3 | /// Results from timing a single command 4 | #[derive(Debug, Default, Copy, Clone)] 5 | pub struct TimingResult { 6 | /// Wall clock time 7 | pub time_real: Second, 8 | 9 | /// Time spent in user mode 10 | pub time_user: Second, 11 | 12 | /// Time spent in kernel mode 13 | pub time_system: Second, 14 | 15 | /// Maximum amount of memory used, in bytes 16 | pub memory_usage_byte: u64, 17 | } 18 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::num::{self, ParseFloatError, ParseIntError}; 2 | 3 | use rust_decimal::Error as DecimalError; 4 | use thiserror::Error; 5 | 6 | #[derive(Debug, Error)] 7 | pub enum ParameterScanError { 8 | #[error("Error while parsing parameter scan arguments ({0})")] 9 | ParseIntError(num::ParseIntError), 10 | #[error("Error while parsing parameter scan arguments ({0})")] 11 | ParseDecimalError(DecimalError), 12 | #[error("Empty parameter range")] 13 | EmptyRange, 14 | #[error("Parameter range is too large")] 15 | TooLarge, 16 | #[error("Zero is not a valid parameter step")] 17 | ZeroStep, 18 | #[error("A step size is required when the range bounds are floating point numbers. The step size can be specified with the '-D/--parameter-step-size ' parameter")] 19 | StepRequired, 20 | #[error("'--command-name' has been specified {0} times. It has to appear exactly once, or exactly {1} times (number of benchmarks)")] 21 | UnexpectedCommandNameCount(usize, usize), 22 | } 23 | 24 | impl From for ParameterScanError { 25 | fn from(e: num::ParseIntError) -> ParameterScanError { 26 | ParameterScanError::ParseIntError(e) 27 | } 28 | } 29 | 30 | impl From for ParameterScanError { 31 | fn from(e: DecimalError) -> ParameterScanError { 32 | ParameterScanError::ParseDecimalError(e) 33 | } 34 | } 35 | 36 | #[derive(Debug, Error)] 37 | pub enum OptionsError<'a> { 38 | #[error( 39 | "Conflicting requirements for the number of runs (empty range, min is larger than max)" 40 | )] 41 | EmptyRunsRange, 42 | #[error("Too many --command-name options: Expected {0} at most")] 43 | TooManyCommandNames(usize), 44 | #[error("'--command-name' has been specified {0} times. It has to appear exactly once, or exactly {1} times (number of benchmarks)")] 45 | UnexpectedCommandNameCount(usize, usize), 46 | #[error("Could not read numeric integer argument to '--{0}': {1}")] 47 | IntParsingError(&'a str, ParseIntError), 48 | #[error("Could not read numeric floating point argument to '--{0}': {1}")] 49 | FloatParsingError(&'a str, ParseFloatError), 50 | #[error("An empty command has been specified for the '--shell ' option")] 51 | EmptyShell, 52 | #[error("Failed to parse '--shell ' expression as command line: {0}")] 53 | ShellParseError(shell_words::ParseError), 54 | #[error("Unknown output policy '{0}'. Use './{0}' to output to a file named '{0}'.")] 55 | UnknownOutputPolicy(String), 56 | #[error("The file '{0}' specified as '--input' does not exist")] 57 | StdinDataFileDoesNotExist(String), 58 | } 59 | -------------------------------------------------------------------------------- /src/export/asciidoc.rs: -------------------------------------------------------------------------------- 1 | use super::markup::Alignment; 2 | use crate::export::markup::MarkupExporter; 3 | 4 | #[derive(Default)] 5 | pub struct AsciidocExporter {} 6 | 7 | impl MarkupExporter for AsciidocExporter { 8 | fn table_header(&self, cell_aligmnents: &[Alignment]) -> String { 9 | format!( 10 | "[cols=\"{}\"]\n|===", 11 | cell_aligmnents 12 | .iter() 13 | .map(|a| match a { 14 | Alignment::Left => "<", 15 | Alignment::Right => ">", 16 | }) 17 | .collect::>() 18 | .join(",") 19 | ) 20 | } 21 | 22 | fn table_footer(&self, _cell_aligmnents: &[Alignment]) -> String { 23 | "|===\n".to_string() 24 | } 25 | 26 | fn table_row(&self, cells: &[&str]) -> String { 27 | format!("\n| {} \n", cells.join(" \n| ")) 28 | } 29 | 30 | fn table_divider(&self, _cell_aligmnents: &[Alignment]) -> String { 31 | "".to_string() 32 | } 33 | 34 | fn command(&self, cmd: &str) -> String { 35 | format!("`{cmd}`") 36 | } 37 | } 38 | 39 | /// Check Asciidoc-based data row formatting 40 | #[test] 41 | fn test_asciidoc_exporter_table_data() { 42 | let exporter = AsciidocExporter::default(); 43 | let data = vec!["a", "b", "c"]; 44 | 45 | let actual = exporter.table_row(&data); 46 | let expect = "\n| a \n| b \n| c \n"; 47 | 48 | assert_eq!(expect, actual); 49 | } 50 | 51 | /// Check Asciidoc-based table header formatting 52 | #[test] 53 | fn test_asciidoc_exporter_table_header() { 54 | let exporter = AsciidocExporter::default(); 55 | let cells_alignment = [ 56 | Alignment::Left, 57 | Alignment::Right, 58 | Alignment::Right, 59 | Alignment::Right, 60 | Alignment::Right, 61 | ]; 62 | 63 | let actual = exporter.table_header(&cells_alignment); 64 | let expect = "[cols=\"<,>,>,>,>\"]\n|==="; 65 | 66 | assert_eq!(expect, actual); 67 | } 68 | -------------------------------------------------------------------------------- /src/export/csv.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | 3 | use csv::WriterBuilder; 4 | 5 | use super::Exporter; 6 | use crate::benchmark::benchmark_result::BenchmarkResult; 7 | use crate::options::SortOrder; 8 | use crate::util::units::Unit; 9 | 10 | use anyhow::Result; 11 | 12 | #[derive(Default)] 13 | pub struct CsvExporter {} 14 | 15 | impl Exporter for CsvExporter { 16 | fn serialize( 17 | &self, 18 | results: &[BenchmarkResult], 19 | _unit: Option, 20 | _sort_order: SortOrder, 21 | ) -> Result> { 22 | let mut writer = WriterBuilder::new().from_writer(vec![]); 23 | 24 | { 25 | let mut headers: Vec> = [ 26 | // The list of times and exit codes cannot be exported to the CSV file - omit them. 27 | "command", "mean", "stddev", "median", "user", "system", "min", "max", 28 | ] 29 | .iter() 30 | .map(|x| Cow::Borrowed(x.as_bytes())) 31 | .collect(); 32 | if let Some(res) = results.first() { 33 | for param_name in res.parameters.keys() { 34 | headers.push(Cow::Owned(format!("parameter_{param_name}").into_bytes())); 35 | } 36 | } 37 | writer.write_record(headers)?; 38 | } 39 | 40 | for res in results { 41 | let mut fields = vec![Cow::Borrowed(res.command.as_bytes())]; 42 | for f in &[ 43 | res.mean, 44 | res.stddev.unwrap_or(0.0), 45 | res.median, 46 | res.user, 47 | res.system, 48 | res.min, 49 | res.max, 50 | ] { 51 | fields.push(Cow::Owned(f.to_string().into_bytes())) 52 | } 53 | for v in res.parameters.values() { 54 | fields.push(Cow::Borrowed(v.as_bytes())) 55 | } 56 | writer.write_record(fields)?; 57 | } 58 | 59 | Ok(writer.into_inner()?) 60 | } 61 | } 62 | 63 | #[test] 64 | fn test_csv() { 65 | use std::collections::BTreeMap; 66 | let exporter = CsvExporter::default(); 67 | 68 | let results = vec![ 69 | BenchmarkResult { 70 | command: String::from("command_a"), 71 | command_with_unused_parameters: String::from("command_a"), 72 | mean: 1.0, 73 | stddev: Some(2.0), 74 | median: 1.0, 75 | user: 3.0, 76 | system: 4.0, 77 | min: 5.0, 78 | max: 6.0, 79 | times: Some(vec![7.0, 8.0, 9.0]), 80 | memory_usage_byte: None, 81 | exit_codes: vec![Some(0), Some(0), Some(0)], 82 | parameters: { 83 | let mut params = BTreeMap::new(); 84 | params.insert("foo".into(), "one".into()); 85 | params.insert("bar".into(), "two".into()); 86 | params 87 | }, 88 | }, 89 | BenchmarkResult { 90 | command: String::from("command_b"), 91 | command_with_unused_parameters: String::from("command_b"), 92 | mean: 11.0, 93 | stddev: Some(12.0), 94 | median: 11.0, 95 | user: 13.0, 96 | system: 14.0, 97 | min: 15.0, 98 | max: 16.5, 99 | times: Some(vec![17.0, 18.0, 19.0]), 100 | memory_usage_byte: None, 101 | exit_codes: vec![Some(0), Some(0), Some(0)], 102 | parameters: { 103 | let mut params = BTreeMap::new(); 104 | params.insert("foo".into(), "one".into()); 105 | params.insert("bar".into(), "seven".into()); 106 | params 107 | }, 108 | }, 109 | ]; 110 | 111 | let actual = String::from_utf8( 112 | exporter 113 | .serialize(&results, Some(Unit::Second), SortOrder::Command) 114 | .unwrap(), 115 | ) 116 | .unwrap(); 117 | 118 | insta::assert_snapshot!(actual, @r#" 119 | command,mean,stddev,median,user,system,min,max,parameter_bar,parameter_foo 120 | command_a,1,2,1,3,4,5,6,two,one 121 | command_b,11,12,11,13,14,15,16.5,seven,one 122 | "#); 123 | } 124 | -------------------------------------------------------------------------------- /src/export/json.rs: -------------------------------------------------------------------------------- 1 | use serde::*; 2 | use serde_json::to_vec_pretty; 3 | 4 | use super::Exporter; 5 | use crate::benchmark::benchmark_result::BenchmarkResult; 6 | use crate::options::SortOrder; 7 | use crate::util::units::Unit; 8 | 9 | use anyhow::Result; 10 | 11 | #[derive(Serialize, Debug)] 12 | struct HyperfineSummary<'a> { 13 | results: &'a [BenchmarkResult], 14 | } 15 | 16 | #[derive(Default)] 17 | pub struct JsonExporter {} 18 | 19 | impl Exporter for JsonExporter { 20 | fn serialize( 21 | &self, 22 | results: &[BenchmarkResult], 23 | _unit: Option, 24 | _sort_order: SortOrder, 25 | ) -> Result> { 26 | let mut output = to_vec_pretty(&HyperfineSummary { results }); 27 | if let Ok(ref mut content) = output { 28 | content.push(b'\n'); 29 | } 30 | 31 | Ok(output?) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/export/markdown.rs: -------------------------------------------------------------------------------- 1 | use crate::export::markup::MarkupExporter; 2 | 3 | use super::markup::Alignment; 4 | 5 | #[derive(Default)] 6 | pub struct MarkdownExporter {} 7 | 8 | impl MarkupExporter for MarkdownExporter { 9 | fn table_row(&self, cells: &[&str]) -> String { 10 | format!("| {} |\n", cells.join(" | ")) 11 | } 12 | 13 | fn table_divider(&self, cell_aligmnents: &[Alignment]) -> String { 14 | format!( 15 | "|{}\n", 16 | cell_aligmnents 17 | .iter() 18 | .map(|a| match a { 19 | Alignment::Left => ":---|", 20 | Alignment::Right => "---:|", 21 | }) 22 | .collect::() 23 | ) 24 | } 25 | 26 | fn command(&self, cmd: &str) -> String { 27 | format!("`{cmd}`") 28 | } 29 | } 30 | 31 | /// Check Markdown-based data row formatting 32 | #[test] 33 | fn test_markdown_formatter_table_data() { 34 | let formatter = MarkdownExporter::default(); 35 | 36 | assert_eq!(formatter.table_row(&["a", "b", "c"]), "| a | b | c |\n"); 37 | } 38 | 39 | /// Check Markdown-based horizontal line formatting 40 | #[test] 41 | fn test_markdown_formatter_table_divider() { 42 | let formatter = MarkdownExporter::default(); 43 | 44 | let divider = formatter.table_divider(&[Alignment::Left, Alignment::Right, Alignment::Left]); 45 | assert_eq!(divider, "|:---|---:|:---|\n"); 46 | } 47 | -------------------------------------------------------------------------------- /src/export/markup.rs: -------------------------------------------------------------------------------- 1 | use crate::benchmark::relative_speed::BenchmarkResultWithRelativeSpeed; 2 | use crate::benchmark::{benchmark_result::BenchmarkResult, relative_speed}; 3 | use crate::options::SortOrder; 4 | use crate::output::format::format_duration_value; 5 | use crate::util::units::Unit; 6 | 7 | use super::Exporter; 8 | use anyhow::Result; 9 | 10 | pub enum Alignment { 11 | Left, 12 | Right, 13 | } 14 | 15 | pub trait MarkupExporter { 16 | fn table_results(&self, entries: &[BenchmarkResultWithRelativeSpeed], unit: Unit) -> String { 17 | // prepare table header strings 18 | let notation = format!("[{}]", unit.short_name()); 19 | 20 | // prepare table cells alignment 21 | let cells_alignment = [ 22 | Alignment::Left, 23 | Alignment::Right, 24 | Alignment::Right, 25 | Alignment::Right, 26 | Alignment::Right, 27 | ]; 28 | 29 | // emit table header format 30 | let mut table = self.table_header(&cells_alignment); 31 | 32 | // emit table header data 33 | table.push_str(&self.table_row(&[ 34 | "Command", 35 | &format!("Mean {notation}"), 36 | &format!("Min {notation}"), 37 | &format!("Max {notation}"), 38 | "Relative", 39 | ])); 40 | 41 | // emit horizontal line 42 | table.push_str(&self.table_divider(&cells_alignment)); 43 | 44 | for entry in entries { 45 | let measurement = &entry.result; 46 | // prepare data row strings 47 | let cmd_str = measurement 48 | .command_with_unused_parameters 49 | .replace('|', "\\|"); 50 | let mean_str = format_duration_value(measurement.mean, Some(unit)).0; 51 | let stddev_str = if let Some(stddev) = measurement.stddev { 52 | format!(" ± {}", format_duration_value(stddev, Some(unit)).0) 53 | } else { 54 | "".into() 55 | }; 56 | let min_str = format_duration_value(measurement.min, Some(unit)).0; 57 | let max_str = format_duration_value(measurement.max, Some(unit)).0; 58 | let rel_str = format!("{:.2}", entry.relative_speed); 59 | let rel_stddev_str = if entry.is_reference { 60 | "".into() 61 | } else if let Some(stddev) = entry.relative_speed_stddev { 62 | format!(" ± {stddev:.2}") 63 | } else { 64 | "".into() 65 | }; 66 | 67 | // prepare table row entries 68 | table.push_str(&self.table_row(&[ 69 | &self.command(&cmd_str), 70 | &format!("{mean_str}{stddev_str}"), 71 | &min_str, 72 | &max_str, 73 | &format!("{rel_str}{rel_stddev_str}"), 74 | ])) 75 | } 76 | 77 | // emit table footer format 78 | table.push_str(&self.table_footer(&cells_alignment)); 79 | 80 | table 81 | } 82 | 83 | fn table_row(&self, cells: &[&str]) -> String; 84 | 85 | fn table_divider(&self, cell_aligmnents: &[Alignment]) -> String; 86 | 87 | fn table_header(&self, _cell_aligmnents: &[Alignment]) -> String { 88 | "".to_string() 89 | } 90 | 91 | fn table_footer(&self, _cell_aligmnents: &[Alignment]) -> String { 92 | "".to_string() 93 | } 94 | 95 | fn command(&self, size: &str) -> String; 96 | } 97 | 98 | fn determine_unit_from_results(results: &[BenchmarkResult]) -> Unit { 99 | if let Some(first_result) = results.first() { 100 | // Use the first BenchmarkResult entry to determine the unit for all entries. 101 | format_duration_value(first_result.mean, None).1 102 | } else { 103 | // Default to `Second`. 104 | Unit::Second 105 | } 106 | } 107 | 108 | impl Exporter for T { 109 | fn serialize( 110 | &self, 111 | results: &[BenchmarkResult], 112 | unit: Option, 113 | sort_order: SortOrder, 114 | ) -> Result> { 115 | let unit = unit.unwrap_or_else(|| determine_unit_from_results(results)); 116 | let entries = relative_speed::compute(results, sort_order); 117 | 118 | let table = self.table_results(&entries, unit); 119 | Ok(table.as_bytes().to_vec()) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/export/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{File, OpenOptions}; 2 | use std::io::Write; 3 | 4 | mod asciidoc; 5 | mod csv; 6 | mod json; 7 | mod markdown; 8 | mod markup; 9 | mod orgmode; 10 | #[cfg(test)] 11 | mod tests; 12 | 13 | use self::asciidoc::AsciidocExporter; 14 | use self::csv::CsvExporter; 15 | use self::json::JsonExporter; 16 | use self::markdown::MarkdownExporter; 17 | use self::orgmode::OrgmodeExporter; 18 | 19 | use crate::benchmark::benchmark_result::BenchmarkResult; 20 | use crate::options::SortOrder; 21 | use crate::util::units::Unit; 22 | 23 | use anyhow::{Context, Result}; 24 | use clap::ArgMatches; 25 | 26 | /// The desired form of exporter to use for a given file. 27 | #[derive(Clone)] 28 | pub enum ExportType { 29 | /// Asciidoc Table 30 | Asciidoc, 31 | 32 | /// CSV (comma separated values) format 33 | Csv, 34 | 35 | /// JSON format 36 | Json, 37 | 38 | /// Markdown table 39 | Markdown, 40 | 41 | /// Emacs org-mode tables 42 | Orgmode, 43 | } 44 | 45 | /// Interface for different exporters. 46 | trait Exporter { 47 | /// Export the given entries in the serialized form. 48 | fn serialize( 49 | &self, 50 | results: &[BenchmarkResult], 51 | unit: Option, 52 | sort_order: SortOrder, 53 | ) -> Result>; 54 | } 55 | 56 | pub enum ExportTarget { 57 | File(String), 58 | Stdout, 59 | } 60 | 61 | struct ExporterWithTarget { 62 | exporter: Box, 63 | target: ExportTarget, 64 | } 65 | 66 | /// Handles the management of multiple file exporters. 67 | pub struct ExportManager { 68 | exporters: Vec, 69 | time_unit: Option, 70 | sort_order: SortOrder, 71 | } 72 | 73 | impl ExportManager { 74 | /// Build the ExportManager that will export the results specified 75 | /// in the given ArgMatches 76 | pub fn from_cli_arguments( 77 | matches: &ArgMatches, 78 | time_unit: Option, 79 | sort_order: SortOrder, 80 | ) -> Result { 81 | let mut export_manager = Self { 82 | exporters: vec![], 83 | time_unit, 84 | sort_order, 85 | }; 86 | { 87 | let mut add_exporter = |flag, exporttype| -> Result<()> { 88 | if let Some(filename) = matches.get_one::(flag) { 89 | export_manager.add_exporter(exporttype, filename)?; 90 | } 91 | Ok(()) 92 | }; 93 | add_exporter("export-asciidoc", ExportType::Asciidoc)?; 94 | add_exporter("export-json", ExportType::Json)?; 95 | add_exporter("export-csv", ExportType::Csv)?; 96 | add_exporter("export-markdown", ExportType::Markdown)?; 97 | add_exporter("export-orgmode", ExportType::Orgmode)?; 98 | } 99 | Ok(export_manager) 100 | } 101 | 102 | /// Add an additional exporter to the ExportManager 103 | pub fn add_exporter(&mut self, export_type: ExportType, filename: &str) -> Result<()> { 104 | let exporter: Box = match export_type { 105 | ExportType::Asciidoc => Box::::default(), 106 | ExportType::Csv => Box::::default(), 107 | ExportType::Json => Box::::default(), 108 | ExportType::Markdown => Box::::default(), 109 | ExportType::Orgmode => Box::::default(), 110 | }; 111 | 112 | self.exporters.push(ExporterWithTarget { 113 | exporter, 114 | target: if filename == "-" { 115 | ExportTarget::Stdout 116 | } else { 117 | let _ = File::create(filename) 118 | .with_context(|| format!("Could not create export file '{filename}'"))?; 119 | ExportTarget::File(filename.to_string()) 120 | }, 121 | }); 122 | 123 | Ok(()) 124 | } 125 | 126 | /// Write the given results to all Exporters. The 'intermediate' flag specifies 127 | /// whether this is being called while still performing benchmarks, or if this 128 | /// is the final call after all benchmarks have been finished. In the former case, 129 | /// results are written to all file targets (to always have them up to date, even 130 | /// if a benchmark fails). In the latter case, we only print to stdout targets (in 131 | /// order not to clutter the output of hyperfine with intermediate results). 132 | pub fn write_results(&self, results: &[BenchmarkResult], intermediate: bool) -> Result<()> { 133 | for e in &self.exporters { 134 | let content = || { 135 | e.exporter 136 | .serialize(results, self.time_unit, self.sort_order) 137 | }; 138 | 139 | match e.target { 140 | ExportTarget::File(ref filename) => { 141 | if intermediate { 142 | write_to_file(filename, &content()?)? 143 | } 144 | } 145 | ExportTarget::Stdout => { 146 | if !intermediate { 147 | println!(); 148 | println!("{}", String::from_utf8(content()?).unwrap()); 149 | } 150 | } 151 | } 152 | } 153 | Ok(()) 154 | } 155 | } 156 | 157 | /// Write the given content to a file with the specified name 158 | fn write_to_file(filename: &str, content: &[u8]) -> Result<()> { 159 | let mut file = OpenOptions::new().write(true).open(filename)?; 160 | file.write_all(content) 161 | .with_context(|| format!("Failed to export results to '{filename}'")) 162 | } 163 | -------------------------------------------------------------------------------- /src/export/orgmode.rs: -------------------------------------------------------------------------------- 1 | use super::markup::Alignment; 2 | use crate::export::markup::MarkupExporter; 3 | 4 | #[derive(Default)] 5 | pub struct OrgmodeExporter {} 6 | 7 | impl MarkupExporter for OrgmodeExporter { 8 | fn table_row(&self, cells: &[&str]) -> String { 9 | format!( 10 | "| {} | {} |\n", 11 | cells.first().unwrap(), 12 | &cells[1..].join(" | ") 13 | ) 14 | } 15 | 16 | fn table_divider(&self, cell_aligmnents: &[Alignment]) -> String { 17 | format!("|{}--|\n", "--+".repeat(cell_aligmnents.len() - 1)) 18 | } 19 | 20 | fn command(&self, cmd: &str) -> String { 21 | format!("={cmd}=") 22 | } 23 | } 24 | 25 | /// Check Emacs org-mode data row formatting 26 | #[test] 27 | fn test_orgmode_formatter_table_data() { 28 | let exporter = OrgmodeExporter::default(); 29 | 30 | let actual = exporter.table_row(&["a", "b", "c"]); 31 | let expect = "| a | b | c |\n"; 32 | 33 | assert_eq!(expect, actual); 34 | } 35 | 36 | /// Check Emacs org-mode horizontal line formatting 37 | #[test] 38 | fn test_orgmode_formatter_table_line() { 39 | let exporter = OrgmodeExporter::default(); 40 | 41 | let actual = exporter.table_divider(&[ 42 | Alignment::Left, 43 | Alignment::Left, 44 | Alignment::Left, 45 | Alignment::Left, 46 | Alignment::Left, 47 | ]); 48 | let expect = "|--+--+--+--+--|\n"; 49 | 50 | assert_eq!(expect, actual); 51 | } 52 | -------------------------------------------------------------------------------- /src/export/tests.rs: -------------------------------------------------------------------------------- 1 | use super::Exporter; 2 | use crate::benchmark::benchmark_result::BenchmarkResult; 3 | use crate::export::asciidoc::AsciidocExporter; 4 | use crate::export::orgmode::OrgmodeExporter; 5 | use crate::util::units::Unit; 6 | use crate::{export::markdown::MarkdownExporter, options::SortOrder}; 7 | use std::collections::BTreeMap; 8 | 9 | fn get_output( 10 | results: &[BenchmarkResult], 11 | unit: Option, 12 | sort_order: SortOrder, 13 | ) -> String { 14 | let exporter = E::default(); 15 | String::from_utf8(exporter.serialize(results, unit, sort_order).unwrap()).unwrap() 16 | } 17 | 18 | /// Ensure the makrup output includes the table header and the multiple 19 | /// benchmark results as a table. The list of actual times is not included 20 | /// in the output. 21 | /// 22 | /// This also demonstrates that the first entry's units (ms) are used to set 23 | /// the units for all entries when the time unit is not specified. 24 | #[test] 25 | fn test_markup_export_auto_ms() { 26 | let results = [ 27 | BenchmarkResult { 28 | command: String::from("sleep 0.1"), 29 | command_with_unused_parameters: String::from("sleep 0.1"), 30 | mean: 0.1057, 31 | stddev: Some(0.0016), 32 | median: 0.1057, 33 | user: 0.0009, 34 | system: 0.0011, 35 | min: 0.1023, 36 | max: 0.1080, 37 | times: Some(vec![0.1, 0.1, 0.1]), 38 | memory_usage_byte: None, 39 | exit_codes: vec![Some(0), Some(0), Some(0)], 40 | parameters: BTreeMap::new(), 41 | }, 42 | BenchmarkResult { 43 | command: String::from("sleep 2"), 44 | command_with_unused_parameters: String::from("sleep 2"), 45 | mean: 2.0050, 46 | stddev: Some(0.0020), 47 | median: 2.0050, 48 | user: 0.0009, 49 | system: 0.0012, 50 | min: 2.0020, 51 | max: 2.0080, 52 | times: Some(vec![2.0, 2.0, 2.0]), 53 | memory_usage_byte: None, 54 | exit_codes: vec![Some(0), Some(0), Some(0)], 55 | parameters: BTreeMap::new(), 56 | }, 57 | ]; 58 | 59 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 60 | | Command | Mean [ms] | Min [ms] | Max [ms] | Relative | 61 | |:---|---:|---:|---:|---:| 62 | | `sleep 0.1` | 105.7 ± 1.6 | 102.3 | 108.0 | 1.00 | 63 | | `sleep 2` | 2005.0 ± 2.0 | 2002.0 | 2008.0 | 18.97 ± 0.29 | 64 | "#); 65 | 66 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 67 | [cols="<,>,>,>,>"] 68 | |=== 69 | | Command 70 | | Mean [ms] 71 | | Min [ms] 72 | | Max [ms] 73 | | Relative 74 | 75 | | `sleep 0.1` 76 | | 105.7 ± 1.6 77 | | 102.3 78 | | 108.0 79 | | 1.00 80 | 81 | | `sleep 2` 82 | | 2005.0 ± 2.0 83 | | 2002.0 84 | | 2008.0 85 | | 18.97 ± 0.29 86 | |=== 87 | "#); 88 | 89 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 90 | | Command | Mean [ms] | Min [ms] | Max [ms] | Relative | 91 | |--+--+--+--+--| 92 | | =sleep 0.1= | 105.7 ± 1.6 | 102.3 | 108.0 | 1.00 | 93 | | =sleep 2= | 2005.0 ± 2.0 | 2002.0 | 2008.0 | 18.97 ± 0.29 | 94 | "#); 95 | } 96 | 97 | /// This (again) demonstrates that the first entry's units (s) are used to set 98 | /// the units for all entries when the time unit is not given. 99 | #[test] 100 | fn test_markup_export_auto_s() { 101 | let results = [ 102 | BenchmarkResult { 103 | command: String::from("sleep 2"), 104 | command_with_unused_parameters: String::from("sleep 2"), 105 | mean: 2.0050, 106 | stddev: Some(0.0020), 107 | median: 2.0050, 108 | user: 0.0009, 109 | system: 0.0012, 110 | min: 2.0020, 111 | max: 2.0080, 112 | times: Some(vec![2.0, 2.0, 2.0]), 113 | memory_usage_byte: None, 114 | exit_codes: vec![Some(0), Some(0), Some(0)], 115 | parameters: BTreeMap::new(), 116 | }, 117 | BenchmarkResult { 118 | command: String::from("sleep 0.1"), 119 | command_with_unused_parameters: String::from("sleep 0.1"), 120 | mean: 0.1057, 121 | stddev: Some(0.0016), 122 | median: 0.1057, 123 | user: 0.0009, 124 | system: 0.0011, 125 | min: 0.1023, 126 | max: 0.1080, 127 | times: Some(vec![0.1, 0.1, 0.1]), 128 | memory_usage_byte: None, 129 | exit_codes: vec![Some(0), Some(0), Some(0)], 130 | parameters: BTreeMap::new(), 131 | }, 132 | ]; 133 | 134 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 135 | | Command | Mean [s] | Min [s] | Max [s] | Relative | 136 | |:---|---:|---:|---:|---:| 137 | | `sleep 2` | 2.005 ± 0.002 | 2.002 | 2.008 | 18.97 ± 0.29 | 138 | | `sleep 0.1` | 0.106 ± 0.002 | 0.102 | 0.108 | 1.00 | 139 | "#); 140 | 141 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 142 | [cols="<,>,>,>,>"] 143 | |=== 144 | | Command 145 | | Mean [s] 146 | | Min [s] 147 | | Max [s] 148 | | Relative 149 | 150 | | `sleep 2` 151 | | 2.005 ± 0.002 152 | | 2.002 153 | | 2.008 154 | | 18.97 ± 0.29 155 | 156 | | `sleep 0.1` 157 | | 0.106 ± 0.002 158 | | 0.102 159 | | 0.108 160 | | 1.00 161 | |=== 162 | "#); 163 | 164 | insta::assert_snapshot!(get_output::(&results, None, SortOrder::Command), @r#" 165 | | Command | Mean [s] | Min [s] | Max [s] | Relative | 166 | |--+--+--+--+--| 167 | | =sleep 2= | 2.005 ± 0.002 | 2.002 | 2.008 | 18.97 ± 0.29 | 168 | | =sleep 0.1= | 0.106 ± 0.002 | 0.102 | 0.108 | 1.00 | 169 | "#); 170 | } 171 | 172 | /// This (again) demonstrates that the given time unit (ms) is used to set 173 | /// the units for all entries. 174 | #[test] 175 | fn test_markup_export_manual_ms() { 176 | let timing_results = [ 177 | BenchmarkResult { 178 | command: String::from("sleep 2"), 179 | command_with_unused_parameters: String::from("sleep 2"), 180 | mean: 2.0050, 181 | stddev: Some(0.0020), 182 | median: 2.0050, 183 | user: 0.0009, 184 | system: 0.0012, 185 | min: 2.0020, 186 | max: 2.0080, 187 | times: Some(vec![2.0, 2.0, 2.0]), 188 | memory_usage_byte: None, 189 | exit_codes: vec![Some(0), Some(0), Some(0)], 190 | parameters: BTreeMap::new(), 191 | }, 192 | BenchmarkResult { 193 | command: String::from("sleep 0.1"), 194 | command_with_unused_parameters: String::from("sleep 0.1"), 195 | mean: 0.1057, 196 | stddev: Some(0.0016), 197 | median: 0.1057, 198 | user: 0.0009, 199 | system: 0.0011, 200 | min: 0.1023, 201 | max: 0.1080, 202 | times: Some(vec![0.1, 0.1, 0.1]), 203 | memory_usage_byte: None, 204 | exit_codes: vec![Some(0), Some(0), Some(0)], 205 | parameters: BTreeMap::new(), 206 | }, 207 | ]; 208 | 209 | insta::assert_snapshot!(get_output::(&timing_results, Some(Unit::MilliSecond), SortOrder::Command), @r#" 210 | | Command | Mean [ms] | Min [ms] | Max [ms] | Relative | 211 | |:---|---:|---:|---:|---:| 212 | | `sleep 2` | 2005.0 ± 2.0 | 2002.0 | 2008.0 | 18.97 ± 0.29 | 213 | | `sleep 0.1` | 105.7 ± 1.6 | 102.3 | 108.0 | 1.00 | 214 | "#); 215 | 216 | insta::assert_snapshot!(get_output::(&timing_results, Some(Unit::MilliSecond), SortOrder::Command), @r#" 217 | [cols="<,>,>,>,>"] 218 | |=== 219 | | Command 220 | | Mean [ms] 221 | | Min [ms] 222 | | Max [ms] 223 | | Relative 224 | 225 | | `sleep 2` 226 | | 2005.0 ± 2.0 227 | | 2002.0 228 | | 2008.0 229 | | 18.97 ± 0.29 230 | 231 | | `sleep 0.1` 232 | | 105.7 ± 1.6 233 | | 102.3 234 | | 108.0 235 | | 1.00 236 | |=== 237 | "#); 238 | 239 | insta::assert_snapshot!(get_output::(&timing_results, Some(Unit::MilliSecond), SortOrder::Command), @r#" 240 | | Command | Mean [ms] | Min [ms] | Max [ms] | Relative | 241 | |--+--+--+--+--| 242 | | =sleep 2= | 2005.0 ± 2.0 | 2002.0 | 2008.0 | 18.97 ± 0.29 | 243 | | =sleep 0.1= | 105.7 ± 1.6 | 102.3 | 108.0 | 1.00 | 244 | "#); 245 | } 246 | 247 | /// The given time unit (s) is used to set the units for all entries. 248 | #[test] 249 | fn test_markup_export_manual_s() { 250 | let results = [ 251 | BenchmarkResult { 252 | command: String::from("sleep 2"), 253 | command_with_unused_parameters: String::from("sleep 2"), 254 | mean: 2.0050, 255 | stddev: Some(0.0020), 256 | median: 2.0050, 257 | user: 0.0009, 258 | system: 0.0012, 259 | min: 2.0020, 260 | max: 2.0080, 261 | times: Some(vec![2.0, 2.0, 2.0]), 262 | memory_usage_byte: None, 263 | exit_codes: vec![Some(0), Some(0), Some(0)], 264 | parameters: BTreeMap::new(), 265 | }, 266 | BenchmarkResult { 267 | command: String::from("sleep 0.1"), 268 | command_with_unused_parameters: String::from("sleep 0.1"), 269 | mean: 0.1057, 270 | stddev: Some(0.0016), 271 | median: 0.1057, 272 | user: 0.0009, 273 | system: 0.0011, 274 | min: 0.1023, 275 | max: 0.1080, 276 | times: Some(vec![0.1, 0.1, 0.1]), 277 | memory_usage_byte: None, 278 | exit_codes: vec![Some(0), Some(0), Some(0)], 279 | parameters: BTreeMap::new(), 280 | }, 281 | ]; 282 | 283 | insta::assert_snapshot!(get_output::(&results, Some(Unit::Second), SortOrder::Command), @r#" 284 | | Command | Mean [s] | Min [s] | Max [s] | Relative | 285 | |:---|---:|---:|---:|---:| 286 | | `sleep 2` | 2.005 ± 0.002 | 2.002 | 2.008 | 18.97 ± 0.29 | 287 | | `sleep 0.1` | 0.106 ± 0.002 | 0.102 | 0.108 | 1.00 | 288 | "#); 289 | 290 | insta::assert_snapshot!(get_output::(&results, Some(Unit::Second), SortOrder::MeanTime), @r#" 291 | | Command | Mean [s] | Min [s] | Max [s] | Relative | 292 | |:---|---:|---:|---:|---:| 293 | | `sleep 0.1` | 0.106 ± 0.002 | 0.102 | 0.108 | 1.00 | 294 | | `sleep 2` | 2.005 ± 0.002 | 2.002 | 2.008 | 18.97 ± 0.29 | 295 | "#); 296 | 297 | insta::assert_snapshot!(get_output::(&results, Some(Unit::Second), SortOrder::Command), @r#" 298 | [cols="<,>,>,>,>"] 299 | |=== 300 | | Command 301 | | Mean [s] 302 | | Min [s] 303 | | Max [s] 304 | | Relative 305 | 306 | | `sleep 2` 307 | | 2.005 ± 0.002 308 | | 2.002 309 | | 2.008 310 | | 18.97 ± 0.29 311 | 312 | | `sleep 0.1` 313 | | 0.106 ± 0.002 314 | | 0.102 315 | | 0.108 316 | | 1.00 317 | |=== 318 | "#); 319 | } 320 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr( 2 | all(windows, feature = "windows_process_extensions_main_thread_handle"), 3 | feature(windows_process_extensions_main_thread_handle) 4 | )] 5 | 6 | use std::env; 7 | 8 | use benchmark::scheduler::Scheduler; 9 | use cli::get_cli_arguments; 10 | use command::Commands; 11 | use export::ExportManager; 12 | use options::Options; 13 | 14 | use anyhow::Result; 15 | use colored::*; 16 | 17 | pub mod benchmark; 18 | pub mod cli; 19 | pub mod command; 20 | pub mod error; 21 | pub mod export; 22 | pub mod options; 23 | pub mod outlier_detection; 24 | pub mod output; 25 | pub mod parameter; 26 | pub mod timer; 27 | pub mod util; 28 | 29 | fn run() -> Result<()> { 30 | // Enabled ANSI colors on Windows 10 31 | #[cfg(windows)] 32 | colored::control::set_virtual_terminal(true).unwrap(); 33 | 34 | let cli_arguments = get_cli_arguments(env::args_os()); 35 | let mut options = Options::from_cli_arguments(&cli_arguments)?; 36 | let commands = Commands::from_cli_arguments(&cli_arguments)?; 37 | let export_manager = ExportManager::from_cli_arguments( 38 | &cli_arguments, 39 | options.time_unit, 40 | options.sort_order_exports, 41 | )?; 42 | 43 | options.validate_against_command_list(&commands)?; 44 | 45 | let mut scheduler = Scheduler::new(&commands, &options, &export_manager); 46 | scheduler.run_benchmarks()?; 47 | scheduler.print_relative_speed_comparison(); 48 | scheduler.final_export()?; 49 | 50 | Ok(()) 51 | } 52 | 53 | fn main() { 54 | match run() { 55 | Ok(_) => {} 56 | Err(e) => { 57 | eprintln!("{} {:#}", "Error:".red(), e); 58 | std::process::exit(1); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/outlier_detection.rs: -------------------------------------------------------------------------------- 1 | //! A module for statistical outlier detection. 2 | //! 3 | //! References: 4 | //! - Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and Handle Outliers", 5 | //! The ASQC Basic References in Quality Control: Statistical Techniques, Edward F. Mykytka, 6 | //! Ph.D., Editor. 7 | 8 | use statistical::median; 9 | 10 | /// Minimum modified Z-score for a datapoint to be an outlier. Here, 1.4826 is a factor that 11 | /// converts the MAD to an estimator for the standard deviation. The second factor is the number 12 | /// of standard deviations. 13 | pub const OUTLIER_THRESHOLD: f64 = 1.4826 * 10.0; 14 | 15 | /// Compute modifized Z-scores for a given sample. A (unmodified) Z-score is defined by 16 | /// `(x_i - x_mean)/x_stddev` whereas the modified Z-score is defined by `(x_i - x_median)/MAD` 17 | /// where MAD is the median absolute deviation. 18 | /// 19 | /// References: 20 | /// - 21 | pub fn modified_zscores(xs: &[f64]) -> Vec { 22 | assert!(!xs.is_empty()); 23 | 24 | // Compute sample median: 25 | let x_median = median(xs); 26 | 27 | // Compute the absolute deviations from the median: 28 | let deviations: Vec = xs.iter().map(|x| (x - x_median).abs()).collect(); 29 | 30 | // Compute median absolute deviation: 31 | let mad = median(&deviations); 32 | 33 | // Handle MAD == 0 case 34 | let mad = if mad > 0.0 { mad } else { f64::EPSILON }; 35 | 36 | // Compute modified Z-scores (x_i - x_median) / MAD 37 | xs.iter().map(|&x| (x - x_median) / mad).collect() 38 | } 39 | 40 | /// Return the number of outliers in a given sample. Outliers are defined as data points with a 41 | /// modified Z-score that is larger than `OUTLIER_THRESHOLD`. 42 | #[cfg(test)] 43 | pub fn num_outliers(xs: &[f64]) -> usize { 44 | if xs.is_empty() { 45 | return 0; 46 | } 47 | 48 | let scores = modified_zscores(xs); 49 | scores 50 | .iter() 51 | .filter(|&&s| s.abs() > OUTLIER_THRESHOLD) 52 | .count() 53 | } 54 | 55 | #[test] 56 | fn test_detect_outliers() { 57 | // Should not detect outliers in small samples 58 | assert_eq!(0, num_outliers(&[])); 59 | assert_eq!(0, num_outliers(&[50.0])); 60 | assert_eq!(0, num_outliers(&[1000.0, 0.0])); 61 | 62 | // Should not detect outliers in low-variance samples 63 | let xs = [-0.2, 0.0, 0.2]; 64 | assert_eq!(0, num_outliers(&xs)); 65 | 66 | // Should detect a single outlier 67 | let xs = [-0.2, 0.0, 0.2, 4.0]; 68 | assert_eq!(1, num_outliers(&xs)); 69 | 70 | // Should detect a single outlier 71 | let xs = [0.5, 0.30, 0.29, 0.31, 0.30]; 72 | assert_eq!(1, num_outliers(&xs)); 73 | 74 | // Should detect no outliers in sample drawn from normal distribution 75 | let xs = [ 76 | 2.33269488, 77 | 1.42195907, 78 | -0.57527698, 79 | -0.31293437, 80 | 2.2948158, 81 | 0.75813273, 82 | -1.0712388, 83 | -0.96394741, 84 | -1.15897446, 85 | 1.10976285, 86 | ]; 87 | assert_eq!(0, num_outliers(&xs)); 88 | 89 | // Should detect two outliers that were manually added 90 | let xs = [ 91 | 2.33269488, 92 | 1.42195907, 93 | -0.57527698, 94 | -0.31293437, 95 | 2.2948158, 96 | 0.75813273, 97 | -1.0712388, 98 | -0.96394741, 99 | -1.15897446, 100 | 1.10976285, 101 | 20.0, 102 | -500.0, 103 | ]; 104 | assert_eq!(2, num_outliers(&xs)); 105 | } 106 | 107 | #[test] 108 | fn test_detect_outliers_if_mad_becomes_0() { 109 | // See https://stats.stackexchange.com/q/339932 110 | let xs = [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0]; 111 | assert_eq!(1, num_outliers(&xs)); 112 | 113 | let xs = [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0]; 114 | assert_eq!(2, num_outliers(&xs)); 115 | } 116 | -------------------------------------------------------------------------------- /src/output/format.rs: -------------------------------------------------------------------------------- 1 | use crate::util::units::{Second, Unit}; 2 | 3 | /// Format the given duration as a string. The output-unit can be enforced by setting `unit` to 4 | /// `Some(target_unit)`. If `unit` is `None`, it will be determined automatically. 5 | pub fn format_duration(duration: Second, unit: Option) -> String { 6 | let (duration_fmt, _) = format_duration_unit(duration, unit); 7 | duration_fmt 8 | } 9 | 10 | /// Like `format_duration`, but returns the target unit as well. 11 | pub fn format_duration_unit(duration: Second, unit: Option) -> (String, Unit) { 12 | let (out_str, out_unit) = format_duration_value(duration, unit); 13 | 14 | (format!("{} {}", out_str, out_unit.short_name()), out_unit) 15 | } 16 | 17 | /// Like `format_duration`, but returns the target unit as well. 18 | pub fn format_duration_value(duration: Second, unit: Option) -> (String, Unit) { 19 | if (duration < 0.001 && unit.is_none()) || unit == Some(Unit::MicroSecond) { 20 | (Unit::MicroSecond.format(duration), Unit::MicroSecond) 21 | } else if (duration < 1.0 && unit.is_none()) || unit == Some(Unit::MilliSecond) { 22 | (Unit::MilliSecond.format(duration), Unit::MilliSecond) 23 | } else { 24 | (Unit::Second.format(duration), Unit::Second) 25 | } 26 | } 27 | 28 | #[test] 29 | fn test_format_duration_unit_basic() { 30 | let (out_str, out_unit) = format_duration_unit(1.3, None); 31 | 32 | assert_eq!("1.300 s", out_str); 33 | assert_eq!(Unit::Second, out_unit); 34 | 35 | let (out_str, out_unit) = format_duration_unit(1.0, None); 36 | 37 | assert_eq!("1.000 s", out_str); 38 | assert_eq!(Unit::Second, out_unit); 39 | 40 | let (out_str, out_unit) = format_duration_unit(0.999, None); 41 | 42 | assert_eq!("999.0 ms", out_str); 43 | assert_eq!(Unit::MilliSecond, out_unit); 44 | 45 | let (out_str, out_unit) = format_duration_unit(0.0005, None); 46 | 47 | assert_eq!("500.0 µs", out_str); 48 | assert_eq!(Unit::MicroSecond, out_unit); 49 | 50 | let (out_str, out_unit) = format_duration_unit(0.0, None); 51 | 52 | assert_eq!("0.0 µs", out_str); 53 | assert_eq!(Unit::MicroSecond, out_unit); 54 | 55 | let (out_str, out_unit) = format_duration_unit(1000.0, None); 56 | 57 | assert_eq!("1000.000 s", out_str); 58 | assert_eq!(Unit::Second, out_unit); 59 | } 60 | 61 | #[test] 62 | fn test_format_duration_unit_with_unit() { 63 | let (out_str, out_unit) = format_duration_unit(1.3, Some(Unit::Second)); 64 | 65 | assert_eq!("1.300 s", out_str); 66 | assert_eq!(Unit::Second, out_unit); 67 | 68 | let (out_str, out_unit) = format_duration_unit(1.3, Some(Unit::MilliSecond)); 69 | 70 | assert_eq!("1300.0 ms", out_str); 71 | assert_eq!(Unit::MilliSecond, out_unit); 72 | 73 | let (out_str, out_unit) = format_duration_unit(1.3, Some(Unit::MicroSecond)); 74 | 75 | assert_eq!("1300000.0 µs", out_str); 76 | assert_eq!(Unit::MicroSecond, out_unit); 77 | } 78 | -------------------------------------------------------------------------------- /src/output/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod format; 2 | pub mod progress_bar; 3 | pub mod warnings; 4 | -------------------------------------------------------------------------------- /src/output/progress_bar.rs: -------------------------------------------------------------------------------- 1 | use indicatif::{ProgressBar, ProgressStyle}; 2 | use std::time::Duration; 3 | 4 | use crate::options::OutputStyleOption; 5 | 6 | #[cfg(not(windows))] 7 | const TICK_SETTINGS: (&str, u64) = ("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏ ", 80); 8 | 9 | #[cfg(windows)] 10 | const TICK_SETTINGS: (&str, u64) = (r"+-x| ", 200); 11 | 12 | /// Return a pre-configured progress bar 13 | pub fn get_progress_bar(length: u64, msg: &str, option: OutputStyleOption) -> ProgressBar { 14 | let progressbar_style = match option { 15 | OutputStyleOption::Basic | OutputStyleOption::Color => ProgressStyle::default_bar(), 16 | _ => ProgressStyle::default_spinner() 17 | .tick_chars(TICK_SETTINGS.0) 18 | .template(" {spinner} {msg:<30} {wide_bar} ETA {eta_precise} ") 19 | .expect("no template error"), 20 | }; 21 | 22 | let progress_bar = match option { 23 | OutputStyleOption::Basic | OutputStyleOption::Color => ProgressBar::hidden(), 24 | _ => ProgressBar::new(length), 25 | }; 26 | progress_bar.set_style(progressbar_style); 27 | progress_bar.enable_steady_tick(Duration::from_millis(TICK_SETTINGS.1)); 28 | progress_bar.set_message(msg.to_owned()); 29 | 30 | progress_bar 31 | } 32 | -------------------------------------------------------------------------------- /src/output/warnings.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use crate::benchmark::MIN_EXECUTION_TIME; 4 | use crate::output::format::format_duration; 5 | use crate::util::units::Second; 6 | 7 | pub struct OutlierWarningOptions { 8 | pub warmup_in_use: bool, 9 | pub prepare_in_use: bool, 10 | } 11 | 12 | /// A list of all possible warnings 13 | pub enum Warnings { 14 | FastExecutionTime, 15 | NonZeroExitCode, 16 | SlowInitialRun(Second, OutlierWarningOptions), 17 | OutliersDetected(OutlierWarningOptions), 18 | } 19 | 20 | impl fmt::Display for Warnings { 21 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 22 | match *self { 23 | Warnings::FastExecutionTime => write!( 24 | f, 25 | "Command took less than {:.0} ms to complete. Note that the results might be \ 26 | inaccurate because hyperfine can not calibrate the shell startup time much \ 27 | more precise than this limit. You can try to use the `-N`/`--shell=none` \ 28 | option to disable the shell completely.", 29 | MIN_EXECUTION_TIME * 1e3 30 | ), 31 | Warnings::NonZeroExitCode => write!(f, "Ignoring non-zero exit code."), 32 | Warnings::SlowInitialRun(time_first_run, ref options) => write!( 33 | f, 34 | "The first benchmarking run for this command was significantly slower than the \ 35 | rest ({time}). This could be caused by (filesystem) caches that were not filled until \ 36 | after the first run. {hints}", 37 | time=format_duration(time_first_run, None), 38 | hints=match (options.warmup_in_use, options.prepare_in_use) { 39 | (true, true) => "You are already using both the '--warmup' option as well \ 40 | as the '--prepare' option. Consider re-running the benchmark on a quiet system. \ 41 | Maybe it was a random outlier. Alternatively, consider increasing the warmup \ 42 | count.", 43 | (true, false) => "You are already using the '--warmup' option which helps \ 44 | to fill these caches before the actual benchmark. You can either try to \ 45 | increase the warmup count further or re-run this benchmark on a quiet system \ 46 | in case it was a random outlier. Alternatively, consider using the '--prepare' \ 47 | option to clear the caches before each timing run.", 48 | (false, true) => "You are already using the '--prepare' option which can \ 49 | be used to clear caches. If you did not use a cache-clearing command with \ 50 | '--prepare', you can either try that or consider using the '--warmup' option \ 51 | to fill those caches before the actual benchmark.", 52 | (false, false) => "You should consider using the '--warmup' option to fill \ 53 | those caches before the actual benchmark. Alternatively, use the '--prepare' \ 54 | option to clear the caches before each timing run." 55 | } 56 | ), 57 | Warnings::OutliersDetected(ref options) => write!( 58 | f, 59 | "Statistical outliers were detected. Consider re-running this benchmark on a quiet \ 60 | system without any interferences from other programs.{hint}", 61 | hint=if options.warmup_in_use && options.prepare_in_use { 62 | "" 63 | } else { 64 | " It might help to use the '--warmup' or '--prepare' options." 65 | } 66 | ), 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/parameter/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::util::number::Number; 2 | use std::fmt::Display; 3 | 4 | pub mod range_step; 5 | pub mod tokenize; 6 | 7 | #[derive(Debug, Clone, PartialEq, Eq)] 8 | pub enum ParameterValue { 9 | Text(String), 10 | Numeric(Number), 11 | } 12 | 13 | impl Display for ParameterValue { 14 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 15 | let str = match self { 16 | ParameterValue::Text(ref value) => value.clone(), 17 | ParameterValue::Numeric(value) => value.to_string(), 18 | }; 19 | write!(f, "{str}") 20 | } 21 | } 22 | 23 | pub type ParameterNameAndValue<'a> = (&'a str, ParameterValue); 24 | -------------------------------------------------------------------------------- /src/parameter/range_step.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryInto; 2 | use std::ops::{Add, AddAssign, Div, Sub}; 3 | 4 | use crate::error::ParameterScanError; 5 | use crate::util::number::Number; 6 | 7 | pub trait Numeric: 8 | Add 9 | + Sub 10 | + Div 11 | + AddAssign 12 | + PartialOrd 13 | + Copy 14 | + Clone 15 | + From 16 | + Into 17 | { 18 | } 19 | impl< 20 | T: Add 21 | + Sub 22 | + Div 23 | + AddAssign 24 | + PartialOrd 25 | + Copy 26 | + Clone 27 | + From 28 | + Into, 29 | > Numeric for T 30 | { 31 | } 32 | 33 | #[derive(Debug)] 34 | pub struct RangeStep { 35 | state: T, 36 | end: T, 37 | step: T, 38 | } 39 | 40 | impl RangeStep { 41 | pub fn new(start: T, end: T, step: T) -> Result { 42 | if end < start { 43 | return Err(ParameterScanError::EmptyRange); 44 | } 45 | 46 | if step == T::from(0) { 47 | return Err(ParameterScanError::ZeroStep); 48 | } 49 | 50 | const MAX_PARAMETERS: usize = 100_000; 51 | match range_step_size_hint(start, end, step) { 52 | (_, Some(size)) if size <= MAX_PARAMETERS => Ok(Self { 53 | state: start, 54 | end, 55 | step, 56 | }), 57 | _ => Err(ParameterScanError::TooLarge), 58 | } 59 | } 60 | } 61 | 62 | impl Iterator for RangeStep { 63 | type Item = T; 64 | 65 | fn next(&mut self) -> Option { 66 | if self.state > self.end { 67 | return None; 68 | } 69 | let return_val = self.state; 70 | self.state += self.step; 71 | 72 | Some(return_val) 73 | } 74 | 75 | fn size_hint(&self) -> (usize, Option) { 76 | range_step_size_hint(self.state, self.end, self.step) 77 | } 78 | } 79 | 80 | fn range_step_size_hint(start: T, end: T, step: T) -> (usize, Option) { 81 | if step == T::from(0) { 82 | return (usize::MAX, None); 83 | } 84 | 85 | let steps = (end - start + T::from(1)) / step; 86 | steps 87 | .into() 88 | .try_into() 89 | .map_or((usize::MAX, None), |u| (u, Some(u))) 90 | } 91 | 92 | #[cfg(test)] 93 | mod tests { 94 | use super::*; 95 | 96 | use rust_decimal::Decimal; 97 | use std::str::FromStr; 98 | 99 | #[test] 100 | fn test_integer_range() { 101 | let param_range: Vec = RangeStep::new(0, 10, 3).unwrap().collect(); 102 | 103 | assert_eq!(param_range.len(), 4); 104 | assert_eq!(param_range[0], 0); 105 | assert_eq!(param_range[3], 9); 106 | } 107 | 108 | #[test] 109 | fn test_decimal_range() { 110 | let param_min = Decimal::from(0); 111 | let param_max = Decimal::from(1); 112 | let step = Decimal::from_str("0.1").unwrap(); 113 | 114 | let param_range: Vec = RangeStep::new(param_min, param_max, step) 115 | .unwrap() 116 | .collect(); 117 | 118 | assert_eq!(param_range.len(), 11); 119 | assert_eq!(param_range[0], Decimal::from(0)); 120 | assert_eq!(param_range[10], Decimal::from(1)); 121 | } 122 | 123 | #[test] 124 | fn test_range_step_validate() { 125 | let result = RangeStep::new(0, 10, 3); 126 | assert!(result.is_ok()); 127 | 128 | let result = RangeStep::new( 129 | Decimal::from(0), 130 | Decimal::from(1), 131 | Decimal::from_str("0.1").unwrap(), 132 | ); 133 | assert!(result.is_ok()); 134 | 135 | let result = RangeStep::new(11, 10, 1); 136 | assert_eq!(format!("{}", result.unwrap_err()), "Empty parameter range"); 137 | 138 | let result = RangeStep::new(0, 10, 0); 139 | assert_eq!( 140 | format!("{}", result.unwrap_err()), 141 | "Zero is not a valid parameter step" 142 | ); 143 | 144 | let result = RangeStep::new(0, 100_001, 1); 145 | assert_eq!( 146 | format!("{}", result.unwrap_err()), 147 | "Parameter range is too large" 148 | ); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/parameter/tokenize.rs: -------------------------------------------------------------------------------- 1 | pub fn tokenize(values: &str) -> Vec { 2 | let mut tokens = vec![]; 3 | let mut buf = String::new(); 4 | 5 | let mut iter = values.chars(); 6 | while let Some(c) = iter.next() { 7 | match c { 8 | '\\' => match iter.next() { 9 | Some(c2 @ ',') | Some(c2 @ '\\') => { 10 | buf.push(c2); 11 | } 12 | Some(c2) => { 13 | buf.push('\\'); 14 | buf.push(c2); 15 | } 16 | None => buf.push('\\'), 17 | }, 18 | ',' => { 19 | tokens.push(buf); 20 | buf = String::new(); 21 | } 22 | _ => { 23 | buf.push(c); 24 | } 25 | }; 26 | } 27 | 28 | tokens.push(buf); 29 | 30 | tokens 31 | } 32 | 33 | #[test] 34 | fn test_tokenize_single_value() { 35 | assert_eq!(tokenize(r""), vec![""]); 36 | assert_eq!(tokenize(r"foo"), vec!["foo"]); 37 | assert_eq!(tokenize(r" "), vec![" "]); 38 | assert_eq!(tokenize(r"hello\, world!"), vec!["hello, world!"]); 39 | assert_eq!(tokenize(r"\,"), vec![","]); 40 | assert_eq!(tokenize(r"\,\,\,"), vec![",,,"]); 41 | assert_eq!(tokenize(r"\n"), vec![r"\n"]); 42 | assert_eq!(tokenize(r"\\"), vec![r"\"]); 43 | assert_eq!(tokenize(r"\\\,"), vec![r"\,"]); 44 | } 45 | 46 | #[test] 47 | fn test_tokenize_multiple_values() { 48 | assert_eq!(tokenize(r"foo,bar,baz"), vec!["foo", "bar", "baz"]); 49 | assert_eq!(tokenize(r"hello world,foo"), vec!["hello world", "foo"]); 50 | 51 | assert_eq!(tokenize(r"hello\,world!,baz"), vec!["hello,world!", "baz"]); 52 | } 53 | 54 | #[test] 55 | fn test_tokenize_empty_values() { 56 | assert_eq!(tokenize(r"foo,,bar"), vec!["foo", "", "bar"]); 57 | assert_eq!(tokenize(r",bar"), vec!["", "bar"]); 58 | assert_eq!(tokenize(r"bar,"), vec!["bar", ""]); 59 | assert_eq!(tokenize(r",,"), vec!["", "", ""]); 60 | } 61 | -------------------------------------------------------------------------------- /src/timer/mod.rs: -------------------------------------------------------------------------------- 1 | mod wall_clock_timer; 2 | 3 | #[cfg(windows)] 4 | mod windows_timer; 5 | 6 | #[cfg(not(windows))] 7 | mod unix_timer; 8 | 9 | #[cfg(target_os = "linux")] 10 | use nix::fcntl::{splice, SpliceFFlags}; 11 | #[cfg(target_os = "linux")] 12 | use std::fs::File; 13 | #[cfg(target_os = "linux")] 14 | use std::os::fd::AsFd; 15 | 16 | #[cfg(target_os = "windows")] 17 | use windows_sys::Win32::System::Threading::CREATE_SUSPENDED; 18 | 19 | use crate::util::units::Second; 20 | use wall_clock_timer::WallClockTimer; 21 | 22 | use std::io::Read; 23 | use std::process::{ChildStdout, Command, ExitStatus}; 24 | 25 | use anyhow::Result; 26 | 27 | #[cfg(not(windows))] 28 | #[derive(Debug, Copy, Clone)] 29 | struct CPUTimes { 30 | /// Total amount of time spent executing in user mode 31 | pub user_usec: i64, 32 | 33 | /// Total amount of time spent executing in kernel mode 34 | pub system_usec: i64, 35 | 36 | /// Maximum amount of memory used by the process, in bytes 37 | pub memory_usage_byte: u64, 38 | } 39 | 40 | /// Used to indicate the result of running a command 41 | #[derive(Debug, Copy, Clone)] 42 | pub struct TimerResult { 43 | pub time_real: Second, 44 | pub time_user: Second, 45 | pub time_system: Second, 46 | pub memory_usage_byte: u64, 47 | /// The exit status of the process 48 | pub status: ExitStatus, 49 | } 50 | 51 | /// Discard the output of a child process. 52 | fn discard(output: ChildStdout) { 53 | const CHUNK_SIZE: usize = 64 << 10; 54 | 55 | #[cfg(target_os = "linux")] 56 | { 57 | if let Ok(file) = File::create("/dev/null") { 58 | while let Ok(bytes) = splice( 59 | output.as_fd(), 60 | None, 61 | file.as_fd(), 62 | None, 63 | CHUNK_SIZE, 64 | SpliceFFlags::empty(), 65 | ) { 66 | if bytes == 0 { 67 | break; 68 | } 69 | } 70 | } 71 | } 72 | 73 | let mut output = output; 74 | let mut buf = [0; CHUNK_SIZE]; 75 | while let Ok(bytes) = output.read(&mut buf) { 76 | if bytes == 0 { 77 | break; 78 | } 79 | } 80 | } 81 | 82 | /// Execute the given command and return a timing summary 83 | pub fn execute_and_measure(mut command: Command) -> Result { 84 | #[cfg(not(windows))] 85 | let cpu_timer = self::unix_timer::CPUTimer::start(); 86 | 87 | #[cfg(windows)] 88 | { 89 | use std::os::windows::process::CommandExt; 90 | 91 | // Create the process in a suspended state so that we don't miss any cpu time between process creation and `CPUTimer` start. 92 | command.creation_flags(CREATE_SUSPENDED); 93 | } 94 | 95 | let wallclock_timer = WallClockTimer::start(); 96 | let mut child = command.spawn()?; 97 | 98 | #[cfg(windows)] 99 | let cpu_timer = { 100 | // SAFETY: We created a suspended process 101 | unsafe { self::windows_timer::CPUTimer::start_suspended_process(&child) } 102 | }; 103 | 104 | if let Some(output) = child.stdout.take() { 105 | // Handle CommandOutputPolicy::Pipe 106 | discard(output); 107 | } 108 | 109 | let status = child.wait()?; 110 | 111 | let time_real = wallclock_timer.stop(); 112 | let (time_user, time_system, memory_usage_byte) = cpu_timer.stop(); 113 | 114 | Ok(TimerResult { 115 | time_real, 116 | time_user, 117 | time_system, 118 | memory_usage_byte, 119 | status, 120 | }) 121 | } 122 | -------------------------------------------------------------------------------- /src/timer/unix_timer.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(windows))] 2 | 3 | use std::convert::TryFrom; 4 | use std::mem; 5 | 6 | use crate::timer::CPUTimes; 7 | use crate::util::units::Second; 8 | 9 | #[derive(Debug, Copy, Clone)] 10 | pub struct CPUInterval { 11 | /// Total amount of time spent executing in user mode 12 | pub user: Second, 13 | 14 | /// Total amount of time spent executing in kernel mode 15 | pub system: Second, 16 | } 17 | 18 | pub struct CPUTimer { 19 | start_cpu: CPUTimes, 20 | } 21 | 22 | impl CPUTimer { 23 | pub fn start() -> Self { 24 | CPUTimer { 25 | start_cpu: get_cpu_times(), 26 | } 27 | } 28 | 29 | pub fn stop(&self) -> (Second, Second, u64) { 30 | let end_cpu = get_cpu_times(); 31 | let cpu_interval = cpu_time_interval(&self.start_cpu, &end_cpu); 32 | ( 33 | cpu_interval.user, 34 | cpu_interval.system, 35 | end_cpu.memory_usage_byte, 36 | ) 37 | } 38 | } 39 | 40 | /// Read CPU execution times ('user' and 'system') 41 | fn get_cpu_times() -> CPUTimes { 42 | use libc::{getrusage, rusage, RUSAGE_CHILDREN}; 43 | 44 | let result: rusage = unsafe { 45 | let mut buf = mem::zeroed(); 46 | let success = getrusage(RUSAGE_CHILDREN, &mut buf); 47 | assert_eq!(0, success); 48 | buf 49 | }; 50 | 51 | const MICROSEC_PER_SEC: i64 = 1000 * 1000; 52 | 53 | // Linux and *BSD return the value in KibiBytes, Darwin flavors in bytes 54 | let max_rss_byte = if cfg!(target_os = "macos") || cfg!(target_os = "ios") { 55 | result.ru_maxrss 56 | } else { 57 | result.ru_maxrss * 1024 58 | }; 59 | 60 | #[allow(clippy::useless_conversion)] 61 | CPUTimes { 62 | user_usec: i64::from(result.ru_utime.tv_sec) * MICROSEC_PER_SEC 63 | + i64::from(result.ru_utime.tv_usec), 64 | system_usec: i64::from(result.ru_stime.tv_sec) * MICROSEC_PER_SEC 65 | + i64::from(result.ru_stime.tv_usec), 66 | memory_usage_byte: u64::try_from(max_rss_byte).unwrap_or(0), 67 | } 68 | } 69 | 70 | /// Compute the time intervals in between two `CPUTimes` snapshots 71 | fn cpu_time_interval(start: &CPUTimes, end: &CPUTimes) -> CPUInterval { 72 | CPUInterval { 73 | user: ((end.user_usec - start.user_usec) as f64) * 1e-6, 74 | system: ((end.system_usec - start.system_usec) as f64) * 1e-6, 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | use approx::assert_relative_eq; 80 | 81 | #[test] 82 | fn test_cpu_time_interval() { 83 | let t_a = CPUTimes { 84 | user_usec: 12345, 85 | system_usec: 54321, 86 | memory_usage_byte: 0, 87 | }; 88 | 89 | let t_b = CPUTimes { 90 | user_usec: 20000, 91 | system_usec: 70000, 92 | memory_usage_byte: 0, 93 | }; 94 | 95 | let t_zero = cpu_time_interval(&t_a, &t_a); 96 | assert!(t_zero.user.abs() < f64::EPSILON); 97 | assert!(t_zero.system.abs() < f64::EPSILON); 98 | 99 | let t_ab = cpu_time_interval(&t_a, &t_b); 100 | assert_relative_eq!(0.007655, t_ab.user); 101 | assert_relative_eq!(0.015679, t_ab.system); 102 | 103 | let t_ba = cpu_time_interval(&t_b, &t_a); 104 | assert_relative_eq!(-0.007655, t_ba.user); 105 | assert_relative_eq!(-0.015679, t_ba.system); 106 | } 107 | -------------------------------------------------------------------------------- /src/timer/wall_clock_timer.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use crate::util::units::Second; 4 | 5 | pub struct WallClockTimer { 6 | start: Instant, 7 | } 8 | 9 | impl WallClockTimer { 10 | pub fn start() -> WallClockTimer { 11 | WallClockTimer { 12 | start: Instant::now(), 13 | } 14 | } 15 | 16 | pub fn stop(&self) -> Second { 17 | let duration = self.start.elapsed(); 18 | duration.as_secs() as f64 + f64::from(duration.subsec_nanos()) * 1e-9 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/timer/windows_timer.rs: -------------------------------------------------------------------------------- 1 | #![cfg(windows)] 2 | #![warn(unsafe_op_in_unsafe_fn)] 3 | 4 | use std::{mem, os::windows::io::AsRawHandle, process, ptr}; 5 | 6 | use windows_sys::Win32::{ 7 | Foundation::{CloseHandle, HANDLE}, 8 | System::JobObjects::{ 9 | AssignProcessToJobObject, CreateJobObjectW, JobObjectBasicAccountingInformation, 10 | QueryInformationJobObject, JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, 11 | }, 12 | }; 13 | 14 | #[cfg(feature = "windows_process_extensions_main_thread_handle")] 15 | use std::os::windows::process::ChildExt; 16 | #[cfg(feature = "windows_process_extensions_main_thread_handle")] 17 | use windows_sys::Win32::System::Threading::ResumeThread; 18 | 19 | #[cfg(not(feature = "windows_process_extensions_main_thread_handle"))] 20 | use once_cell::sync::Lazy; 21 | #[cfg(not(feature = "windows_process_extensions_main_thread_handle"))] 22 | use windows_sys::{ 23 | s, w, 24 | Win32::{ 25 | Foundation::{NTSTATUS, STATUS_SUCCESS}, 26 | System::LibraryLoader::{GetModuleHandleW, GetProcAddress}, 27 | }, 28 | }; 29 | 30 | use crate::util::units::Second; 31 | 32 | const HUNDRED_NS_PER_MS: i64 = 10; 33 | 34 | #[cfg(not(feature = "windows_process_extensions_main_thread_handle"))] 35 | #[allow(non_upper_case_globals)] 36 | static NtResumeProcess: Lazy NTSTATUS> = 37 | Lazy::new(|| { 38 | // SAFETY: Getting the module handle for ntdll.dll is safe 39 | let ntdll = unsafe { GetModuleHandleW(w!("ntdll.dll")) }; 40 | assert!(ntdll != std::ptr::null_mut(), "GetModuleHandleW failed"); 41 | 42 | // SAFETY: The ntdll handle is valid 43 | let nt_resume_process = unsafe { GetProcAddress(ntdll, s!("NtResumeProcess")) }; 44 | 45 | // SAFETY: We transmute to the correct function signature 46 | unsafe { mem::transmute(nt_resume_process.unwrap()) } 47 | }); 48 | 49 | pub struct CPUTimer { 50 | job_object: HANDLE, 51 | } 52 | 53 | impl CPUTimer { 54 | pub unsafe fn start_suspended_process(child: &process::Child) -> Self { 55 | let child_handle = child.as_raw_handle() as HANDLE; 56 | 57 | // SAFETY: Creating a new job object is safe 58 | let job_object = unsafe { CreateJobObjectW(ptr::null_mut(), ptr::null_mut()) }; 59 | assert!( 60 | job_object != std::ptr::null_mut(), 61 | "CreateJobObjectW failed" 62 | ); 63 | 64 | // SAFETY: The job object handle is valid 65 | let ret = unsafe { AssignProcessToJobObject(job_object, child_handle) }; 66 | assert!(ret != 0, "AssignProcessToJobObject failed"); 67 | 68 | #[cfg(feature = "windows_process_extensions_main_thread_handle")] 69 | { 70 | // SAFETY: The main thread handle is valid 71 | let ret = unsafe { ResumeThread(child.main_thread_handle().as_raw_handle() as HANDLE) }; 72 | assert!(ret != u32::MAX, "ResumeThread failed"); 73 | } 74 | 75 | #[cfg(not(feature = "windows_process_extensions_main_thread_handle"))] 76 | { 77 | // Since we can't get the main thread handle on stable rust, we use 78 | // the undocumented but widely known `NtResumeProcess` function to 79 | // resume a process by it's handle. 80 | 81 | // SAFETY: The process handle is valid 82 | let ret = unsafe { NtResumeProcess(child_handle) }; 83 | assert!(ret == STATUS_SUCCESS, "NtResumeProcess failed"); 84 | } 85 | 86 | Self { job_object } 87 | } 88 | 89 | pub fn stop(&self) -> (Second, Second, u64) { 90 | let mut job_object_info = 91 | mem::MaybeUninit::::uninit(); 92 | 93 | // SAFETY: A valid job object got created in `start_suspended_process` 94 | let res = unsafe { 95 | QueryInformationJobObject( 96 | self.job_object, 97 | JobObjectBasicAccountingInformation, 98 | job_object_info.as_mut_ptr().cast(), 99 | mem::size_of::() as u32, 100 | ptr::null_mut(), 101 | ) 102 | }; 103 | 104 | if res != 0 { 105 | // SAFETY: The job object info got correctly initialized 106 | let job_object_info = unsafe { job_object_info.assume_init() }; 107 | 108 | // The `TotalUserTime` is "The total amount of user-mode execution time for 109 | // all active processes associated with the job, as well as all terminated processes no 110 | // longer associated with the job, in 100-nanosecond ticks." 111 | let user: i64 = job_object_info.TotalUserTime / HUNDRED_NS_PER_MS; 112 | 113 | // The `TotalKernelTime` is "The total amount of kernel-mode execution time 114 | // for all active processes associated with the job, as well as all terminated 115 | // processes no longer associated with the job, in 100-nanosecond ticks." 116 | let kernel: i64 = job_object_info.TotalKernelTime / HUNDRED_NS_PER_MS; 117 | (user as f64 * 1e-6, kernel as f64 * 1e-6, 0) 118 | } else { 119 | (0.0, 0.0, 0) 120 | } 121 | } 122 | } 123 | 124 | impl Drop for CPUTimer { 125 | fn drop(&mut self) { 126 | // SAFETY: A valid job object got created in `start_suspended_process` 127 | unsafe { CloseHandle(self.job_object) }; 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /src/util/exit_code.rs: -------------------------------------------------------------------------------- 1 | use std::process::ExitStatus; 2 | 3 | #[cfg(unix)] 4 | pub fn extract_exit_code(status: ExitStatus) -> Option { 5 | use std::os::unix::process::ExitStatusExt; 6 | 7 | // From the ExitStatus::code documentation: 8 | // 9 | // "On Unix, this will return None if the process was terminated by a signal." 10 | // 11 | // In that case, ExitStatusExt::signal should never return None. 12 | // 13 | // To differentiate between "normal" exit codes and signals, we are using a technique 14 | // similar to bash (https://tldp.org/LDP/abs/html/exitcodes.html) and add 128 to the 15 | // signal value. 16 | status.code().or_else(|| status.signal().map(|s| s + 128)) 17 | } 18 | 19 | #[cfg(not(unix))] 20 | pub fn extract_exit_code(status: ExitStatus) -> Option { 21 | status.code() 22 | } 23 | -------------------------------------------------------------------------------- /src/util/min_max.rs: -------------------------------------------------------------------------------- 1 | /// A max function for f64's without NaNs 2 | pub fn max(vals: &[f64]) -> f64 { 3 | *vals 4 | .iter() 5 | .max_by(|a, b| a.partial_cmp(b).unwrap()) 6 | .unwrap() 7 | } 8 | 9 | /// A min function for f64's without NaNs 10 | pub fn min(vals: &[f64]) -> f64 { 11 | *vals 12 | .iter() 13 | .min_by(|a, b| a.partial_cmp(b).unwrap()) 14 | .unwrap() 15 | } 16 | 17 | #[test] 18 | fn test_max() { 19 | let assert_float_eq = |a: f64, b: f64| { 20 | assert!((a - b).abs() < f64::EPSILON); 21 | }; 22 | 23 | assert_float_eq(1.0, max(&[1.0])); 24 | assert_float_eq(-1.0, max(&[-1.0])); 25 | assert_float_eq(-1.0, max(&[-2.0, -1.0])); 26 | assert_float_eq(1.0, max(&[-1.0, 1.0])); 27 | assert_float_eq(1.0, max(&[-1.0, 1.0, 0.0])); 28 | } 29 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod exit_code; 2 | pub mod min_max; 3 | pub mod number; 4 | pub mod randomized_environment_offset; 5 | pub mod units; 6 | -------------------------------------------------------------------------------- /src/util/number.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::fmt; 3 | 4 | use rust_decimal::prelude::ToPrimitive; 5 | use rust_decimal::Decimal; 6 | use serde::Serialize; 7 | 8 | #[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq)] 9 | #[serde(untagged)] 10 | pub enum Number { 11 | Int(i32), 12 | Decimal(Decimal), 13 | } 14 | 15 | impl fmt::Display for Number { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | match *self { 18 | Number::Int(i) => fmt::Display::fmt(&i, f), 19 | Number::Decimal(i) => fmt::Display::fmt(&i, f), 20 | } 21 | } 22 | } 23 | 24 | impl From for Number { 25 | fn from(x: i32) -> Number { 26 | Number::Int(x) 27 | } 28 | } 29 | 30 | impl From for Number { 31 | fn from(x: Decimal) -> Number { 32 | Number::Decimal(x) 33 | } 34 | } 35 | 36 | impl TryFrom for usize { 37 | type Error = (); 38 | 39 | fn try_from(numeric: Number) -> Result { 40 | match numeric { 41 | Number::Int(i) => usize::try_from(i).map_err(|_| ()), 42 | Number::Decimal(d) => match d.to_u64() { 43 | Some(u) => usize::try_from(u).map_err(|_| ()), 44 | None => Err(()), 45 | }, 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/util/randomized_environment_offset.rs: -------------------------------------------------------------------------------- 1 | /// Returns a string with a random length. This value will be set as an environment 2 | /// variable to account for offset effects. See [1] for more details. 3 | /// 4 | /// [1] Mytkowicz, 2009. Producing Wrong Data Without Doing Anything Obviously Wrong!. 5 | /// Sigplan Notices - SIGPLAN. 44. 265-276. 10.1145/1508284.1508275. 6 | pub fn value() -> String { 7 | "X".repeat(rand::random::() % 4096usize) 8 | } 9 | -------------------------------------------------------------------------------- /src/util/units.rs: -------------------------------------------------------------------------------- 1 | //! This module contains common units. 2 | 3 | pub type Scalar = f64; 4 | 5 | /// Type alias for unit of time 6 | pub type Second = Scalar; 7 | 8 | /// Supported time units 9 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 10 | pub enum Unit { 11 | Second, 12 | MilliSecond, 13 | MicroSecond, 14 | } 15 | 16 | impl Unit { 17 | /// The abbreviation of the Unit. 18 | pub fn short_name(self) -> String { 19 | match self { 20 | Unit::Second => String::from("s"), 21 | Unit::MilliSecond => String::from("ms"), 22 | Unit::MicroSecond => String::from("µs"), 23 | } 24 | } 25 | 26 | /// Returns the Second value formatted for the Unit. 27 | pub fn format(self, value: Second) -> String { 28 | match self { 29 | Unit::Second => format!("{value:.3}"), 30 | Unit::MilliSecond => format!("{:.1}", value * 1e3), 31 | Unit::MicroSecond => format!("{:.1}", value * 1e6), 32 | } 33 | } 34 | } 35 | 36 | #[test] 37 | fn test_unit_short_name() { 38 | assert_eq!("s", Unit::Second.short_name()); 39 | assert_eq!("ms", Unit::MilliSecond.short_name()); 40 | assert_eq!("µs", Unit::MicroSecond.short_name()); 41 | } 42 | 43 | // Note - the values are rounded when formatted. 44 | #[test] 45 | fn test_unit_format() { 46 | let value: Second = 123.456789; 47 | assert_eq!("123.457", Unit::Second.format(value)); 48 | assert_eq!("123456.8", Unit::MilliSecond.format(value)); 49 | 50 | assert_eq!("1234.6", Unit::MicroSecond.format(0.00123456)); 51 | } 52 | -------------------------------------------------------------------------------- /tests/common.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | 3 | use assert_cmd::cargo::CommandCargoExt; 4 | 5 | pub fn hyperfine_raw_command() -> Command { 6 | let mut cmd = Command::cargo_bin("hyperfine").unwrap(); 7 | cmd.current_dir("tests/"); 8 | cmd 9 | } 10 | 11 | pub fn hyperfine() -> assert_cmd::Command { 12 | assert_cmd::Command::from_std(hyperfine_raw_command()) 13 | } 14 | -------------------------------------------------------------------------------- /tests/example_input_file.txt: -------------------------------------------------------------------------------- 1 | This text is part of a file 2 | --------------------------------------------------------------------------------