├── .github └── workflows │ └── test.yml ├── .gitignore ├── CMakeLists.txt ├── LICENSE.txt ├── README.md ├── VSOpenFileFromDirFilters.json ├── dev.cmake ├── example ├── CMakeLists.txt ├── basic.cpp └── locks.cpp ├── include └── picobench │ └── picobench.hpp ├── test ├── CMakeLists.txt ├── README.md ├── basic.cpp ├── get_cpm.cmake └── multi_cpp │ ├── main.cpp │ ├── picobench_configured.hpp │ ├── suite_a.cpp │ ├── suite_b.cpp │ └── suite_b_cont.cpp └── tools ├── CMakeLists.txt ├── README.md └── picobench.cpp /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) Borislav Stanimirov 2 | # SPDX-License-Identifier: MIT 3 | # 4 | name: Test 5 | on: 6 | push: 7 | branches: [master] 8 | pull_request: 9 | branches: [master] 10 | jobs: 11 | build-and-test: 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | os: [ubuntu-latest, windows-latest, macos-latest] 16 | type: [Debug, RelWithDebInfo] 17 | steps: 18 | - name: Clone 19 | uses: actions/checkout@v3 20 | - name: Install Ninja 21 | uses: seanmiddleditch/gha-setup-ninja@v4 22 | - name: VC Vars 23 | # Setup vcvars on Windows 24 | # MSVC's address sanitizer attaches dependencies to several DLLs which are not in PATH 25 | # vcvars will add them to PATH and allow msvc asan executables to run 26 | if: matrix.os == 'windows-latest' 27 | uses: ilammy/msvc-dev-cmd@v1 28 | - name: Configure 29 | run: cmake . -G Ninja -DCMAKE_BUILD_TYPE=${{ matrix.type }} -DSAN_ADDR=1 30 | - name: Build 31 | run: cmake --build . --config ${{ matrix.type }} 32 | - name: Test 33 | run: ctest -C ${{ matrix.type }} --output-on-failure 34 | - name: Example 35 | run: cmake --build . --config Release --target=picobench-run-example-basic 36 | # TODO: ./picobench "sleep 0.3" "sleep 0.22" 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | out/ 3 | 4 | # ides 5 | .vs/ 6 | .vscode/ 7 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) Borislav Stanimirov 2 | # SPDX-License-Identifier: MIT 3 | # 4 | cmake_minimum_required(VERSION 3.10) 5 | 6 | project(picobench) 7 | 8 | if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) 9 | # dev_mode is used below to make life easier for developers 10 | # it enables some configurations and the defaults for building tests and 11 | # examples which typically wouldn't be built if xmem is a subdirectory of 12 | # another project 13 | set(dev_mode ON) 14 | else() 15 | set(dev_mode OFF) 16 | endif() 17 | 18 | option(PICOBENCH_BUILD_TOOLS "picobench: build tools" ${dev_mode}) 19 | option(PICOBENCH_BUILD_TESTS "picobench: build tests" ${dev_mode}) 20 | option(PICOBENCH_BUILD_EXAMPLES "picobench: build examples" ${dev_mode}) 21 | mark_as_advanced(PICOBENCH_BUILD_TOOLS PICOBENCH_BUILD_TESTS PICOBENCH_BUILD_EXAMPLES) 22 | 23 | if(dev_mode) 24 | include(./dev.cmake) 25 | endif() 26 | 27 | add_library(picobench INTERFACE) 28 | add_library(picobench::picobench ALIAS picobench) 29 | target_include_directories(picobench INTERFACE include) 30 | 31 | if(PICOBENCH_BUILD_TOOLS) 32 | add_subdirectory(tools) 33 | endif() 34 | 35 | if(PICOBENCH_BUILD_TESTS) 36 | enable_testing() 37 | add_subdirectory(test) 38 | endif() 39 | 40 | if(PICOBENCH_BUILD_EXAMPLES) 41 | add_subdirectory(example) 42 | endif() 43 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017-2025 Borislav Stanimirov 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # picobench 3 | [![Language](https://img.shields.io/badge/language-C++-blue.svg)](https://isocpp.org/) [![Standard](https://img.shields.io/badge/C%2B%2B-11-blue.svg)](https://en.wikipedia.org/wiki/C%2B%2B#Standardization) [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) 4 | 5 | [![Test](https://github.com/iboB/picobench/actions/workflows/test.yml/badge.svg)](https://github.com/iboB/picobench/actions/workflows/test.yml) 6 | 7 | picobench is a tiny (micro) microbenchmarking library in a single header file. 8 | 9 | It's designed to be easy to use and integrate and fast to compile while covering the most common features of a microbenchmarking library. 10 | 11 | ## Example usage 12 | 13 | Here's the complete code of a microbenchmark which compares adding elements to a `std::vector` with and without using `reserve`: 14 | 15 | ```c++ 16 | #define PICOBENCH_IMPLEMENT_WITH_MAIN 17 | #include "picobench/picobench.hpp" 18 | 19 | #include 20 | #include // for rand 21 | 22 | // Benchmarking function written by the user: 23 | static void rand_vector(picobench::state& s) 24 | { 25 | std::vector v; 26 | for (auto _ : s) 27 | { 28 | v.push_back(rand()); 29 | } 30 | } 31 | PICOBENCH(rand_vector); // Register the above function with picobench 32 | 33 | // Another benchmarking function: 34 | static void rand_vector_reserve(picobench::state& s) 35 | { 36 | std::vector v; 37 | v.reserve(s.iterations()); 38 | for (auto _ : s) 39 | { 40 | v.push_back(rand()); 41 | } 42 | } 43 | PICOBENCH(rand_vector_reserve); 44 | ``` 45 | 46 | The output of this benchmark might look like this: 47 | 48 | ``` 49 | Name (* = baseline) | Dim | Total ms | ns/op |Baseline| Ops/second 50 | --------------------------|--------:|----------:|--------:|-------:|----------: 51 | rand_vector * | 8 | 0.001 | 167 | - | 5974607.9 52 | rand_vector_reserve | 8 | 0.000 | 55 | 0.329 | 18181818.1 53 | rand_vector * | 64 | 0.004 | 69 | - | 14343343.8 54 | rand_vector_reserve | 64 | 0.002 | 27 | 0.400 | 35854341.7 55 | rand_vector * | 512 | 0.017 | 33 | - | 30192239.7 56 | rand_vector_reserve | 512 | 0.012 | 23 | 0.710 | 42496679.9 57 | rand_vector * | 4096 | 0.181 | 44 | - | 22607850.9 58 | rand_vector_reserve | 4096 | 0.095 | 23 | 0.527 | 42891848.9 59 | rand_vector * | 8196 | 0.266 | 32 | - | 30868196.3 60 | rand_vector_reserve | 8196 | 0.207 | 25 | 0.778 | 39668749.5 61 | ``` 62 | 63 | ...which tells us that we see a noticeable performance gain when we use `reserve` but the effect gets less prominent for bigger numbers of elements inserted. 64 | 65 | ## Documentation 66 | 67 | To use picobench, you need to include `picobench.hpp` by either copying it inside your project or adding this repo as a submodule to yours. 68 | 69 | In one compilation unit (.cpp file) in the module (typically the benchmark executable) in which you use picobench, you need to define `PICOBENCH_IMPLEMENT_WITH_MAIN` (or `PICOBENCH_IMPLEMENT` if you want to write your own `main` function). 70 | 71 | ### Creating benchmarks 72 | 73 | A benchmark is a function which you're written with the signature `void (picobench::state& s)`. You need to register the function with the macro `PICOBENCH(func_name)` where the only argument is the function's name as shown in the example above. 74 | 75 | The library will run the benchmark function several times with different numbers of iterations, to simulate different problem spaces, then collect the results in a report. 76 | 77 | Typically a benchmark has a loop. To run the loop, use the `picobench::state` argument in a range-based for loop in your function. The time spent looping is measured for the benchmark. You can have initialization/deinitialization code outside of the loop and it won't be measured. 78 | 79 | You can have multiple benchmarks in multiple files. All of them will be run when the executable starts. 80 | 81 | Use `state::iterations` as shown in the example to make initialization based on how many iterations the loop will make. 82 | 83 | If you don't want the automatic time measurement, you can use `state::start_timer` and `state::stop_timer` to manually measure it, or use the RAII class `picobench::scope` for semi-automatic measurement. 84 | 85 | Here's an example of a couple of benchmarks, which does not use the range-based for loop for time measurement: 86 | 87 | ```c++ 88 | void my_func(); // Function you want to benchmark 89 | static void benchmark_my_func(picobench::state& s) 90 | { 91 | s.start_timer(); // Manual start 92 | for (int i=0; i(new my_vector2(result))); 202 | } 203 | 204 | bool compare_vectors(result_t a, result_t b) 205 | { 206 | auto v1 = reinterpret_cast(a); 207 | auto v2 = reinterpret_cast(b); 208 | return v1->x == v2->x && v1->y == v2->y; 209 | } 210 | 211 | ... 212 | 213 | auto report = runner.generate_report(compare_vectors); 214 | 215 | ``` 216 | 217 | 218 | ### Other options 219 | 220 | Other characteristics of a benchmark are: 221 | 222 | * **Iterations**: (or "problem spaces") a vector of integers describing the set of iterations to be made for a benchmark. Set with `.iterations({i1, i2, i3...})`. The default is {8, 64, 512, 4096, 8196}. 223 | * **Label**: a string which is used for this benchmark in the report instead of the function name. Set with `.label("my label")` 224 | * **User data**: a user defined number (`uintptr_t`) assinged to a benchmark which can be accessed by `state::user_data` 225 | 226 | You can combine the options by concatenating them like this: `PICOBENCH(my_func).label("My Function").samples(2).iterations({1000, 10000, 50000});` 227 | 228 | If you write your own main function, you can set the default iterations and samples for all benchmarks with `runner::set_default_state_iterations` and `runner::set_default_samples` *before* calling `runner::run_benchmarks`. 229 | 230 | If you parse the command line or use the library-provided `main` function you can also set the iterations and samples with command line args: 231 | * `--iters=1000,5000,10000` will set the iterations for benchmarks which don't explicitly override them 232 | * `--samples=5` will set the samples for benchmarks which don't explicitly override them 233 | 234 | ### Other command line arguments 235 | 236 | If you're using the library-provided `main` function, it will also handle the following command line arguments: 237 | * `--out-fmt=` - sets the output report format to either full text, concise text or csv. 238 | * `--output=` - writes the output report to a given file 239 | * `--compare-results` - will compare results from benchmarks and trigger an error if they don't match. 240 | 241 | ### Misc 242 | 243 | * The runner randomizes the benchmarks. To have the same order on every run and every platform, set an integer seed to `runner::run_benchmarks`. 244 | 245 | Here's another example of a custom main function incporporating the above: 246 | 247 | ```c++ 248 | #define PICOBENCH_IMPLEMENT 249 | #include "picobench/picobench.hpp" 250 | ... 251 | int main() 252 | { 253 | // User-defined code which makes global initializations 254 | custom_global_init(); 255 | 256 | picobench::runner runner; 257 | // Disregard command-line for simplicity 258 | 259 | // Two sets of iterations 260 | runner.set_default_state_iterations({10000, 50000}); 261 | 262 | // One sample per benchmark because the huge numbers are expected to compensate 263 | // for external factors 264 | runner.set_default_samples(1); 265 | 266 | // Run the benchmarks with some seed which guarantees the same order every time 267 | auto report = runner.run_benchmarks(123); 268 | 269 | // Output to some file 270 | report.to_csv(ofstream("my.csv")); 271 | 272 | return 0; 273 | } 274 | ``` 275 | 276 | ## Contributing 277 | 278 | Contributions in the form of issues and pull requests are welcome. 279 | 280 | ## License 281 | 282 | This software is distributed under the MIT Software License. 283 | 284 | See accompanying file LICENSE.txt or copy [here](https://opensource.org/licenses/MIT). 285 | 286 | Copyright © 2017-2025 [Borislav Stanimirov](http://github.com/iboB) 287 | -------------------------------------------------------------------------------- /VSOpenFileFromDirFilters.json: -------------------------------------------------------------------------------- 1 | { 2 | "dirs": [ 3 | ".git", 4 | ".vs", 5 | "build", 6 | "out" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /dev.cmake: -------------------------------------------------------------------------------- 1 | # Copyright (c) Borislav Stanimirov 2 | # SPDX-License-Identifier: MIT 3 | # 4 | set(CMAKE_CXX_STANDARD 11) 5 | set(CMAKE_CXX_EXTENSIONS OFF) 6 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 7 | 8 | if(MSVC) 9 | set(icm_compiler_flags "-D_CRT_SECURE_NO_WARNINGS /Zc:__cplusplus /permissive-\ 10 | /w34100 /w34701 /w34702 /w34703 /w34706 /w34714 /w34913\ 11 | /wd4251 /wd4275" 12 | ) 13 | else() 14 | set(icm_compiler_flags "-Wall -Wextra") 15 | endif() 16 | 17 | if(SAN_ADDR) 18 | if(MSVC) 19 | set(icm_san_flags "/fsanitize=address") 20 | elseif(APPLE) 21 | # apple clang doesn't support the leak sanitizer 22 | set(icm_san_flags "-fsanitize=address,undefined -pthread -g") 23 | else() 24 | set(icm_san_flags "-fsanitize=address,undefined,leak -pthread -g") 25 | endif() 26 | endif() 27 | 28 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${icm_compiler_flags} ${icm_san_flags}") 29 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${icm_compiler_flags} ${icm_san_flags}") 30 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${icm_san_flags}") 31 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${icm_san_flags}") 32 | -------------------------------------------------------------------------------- /example/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | macro(pb_example name) 2 | set(tgt picobench-example-${name}) 3 | add_executable(${tgt} ${ARGN}) 4 | target_link_libraries(${tgt} picobench::picobench) 5 | set_target_properties(${tgt} PROPERTIES FOLDER example) 6 | add_custom_target( 7 | picobench-run-example-${name} 8 | COMMAND ${tgt} 9 | ) 10 | endmacro() 11 | 12 | pb_example(basic basic.cpp) 13 | pb_example(locks locks.cpp) 14 | -------------------------------------------------------------------------------- /example/basic.cpp: -------------------------------------------------------------------------------- 1 | #define PICOBENCH_DEBUG 2 | #define PICOBENCH_IMPLEMENT_WITH_MAIN 3 | #include "picobench/picobench.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | void rand_vector(picobench::state& s) 10 | { 11 | std::vector v; 12 | for (auto _ : s) 13 | { 14 | v.push_back(rand()); 15 | } 16 | } 17 | PICOBENCH(rand_vector); 18 | 19 | void rand_vector_reserve(picobench::state& s) 20 | { 21 | std::vector v; 22 | v.reserve(s.iterations()); 23 | for (auto _ : s) 24 | { 25 | v.push_back(rand()); 26 | } 27 | } 28 | PICOBENCH(rand_vector_reserve); 29 | 30 | void rand_deque(picobench::state& s) 31 | { 32 | std::deque v; 33 | for (auto _ : s) 34 | { 35 | v.push_back(rand()); 36 | } 37 | } 38 | PICOBENCH(rand_deque); 39 | -------------------------------------------------------------------------------- /example/locks.cpp: -------------------------------------------------------------------------------- 1 | #define PICOBENCH_DEBUG 2 | #define PICOBENCH_IMPLEMENT_WITH_MAIN 3 | #define PICOBENCH_DEFAULT_ITERATIONS {1000, 10000, 100000, 1000000} 4 | #include "picobench/picobench.hpp" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | volatile int sum; 12 | 13 | template 14 | int calc_sum(bool inc, const int n, Locker& lock) 15 | { 16 | for(int i=0; i guard(lock); 19 | if (inc) sum += 2; 20 | else sum -= 3; 21 | } 22 | return n; 23 | } 24 | 25 | template 26 | void bench(picobench::state& s) 27 | { 28 | Locker lock; 29 | sum = 0; 30 | picobench::scope time(s); 31 | auto f = std::async(std::launch::async, std::bind(calc_sum, true, s.iterations(), std::ref(lock))); 32 | calc_sum(false, s.iterations(), lock); 33 | f.wait(); 34 | s.set_result(picobench::result_t(sum)); 35 | } 36 | 37 | template 38 | struct spinlock 39 | { 40 | void lock() 41 | { 42 | while(std::atomic_flag_test_and_set_explicit( 43 | &spin_flag, 44 | std::memory_order_acquire)) 45 | Yield(); 46 | } 47 | 48 | void unlock() 49 | { 50 | std::atomic_flag_clear_explicit( 51 | &spin_flag, 52 | std::memory_order_release); 53 | } 54 | 55 | std::atomic_flag spin_flag = ATOMIC_FLAG_INIT; 56 | }; 57 | 58 | inline void noop() {} 59 | 60 | using noop_spin = spinlock; 61 | using yield_spin = spinlock; 62 | 63 | using namespace std; 64 | 65 | PICOBENCH(bench); 66 | PICOBENCH(bench); 67 | PICOBENCH(bench); 68 | 69 | #if defined(__X86_64__) || defined(__x86_64) || defined(_M_X64) 70 | #include 71 | inline void pause() { _mm_pause(); } 72 | using pause_spin = spinlock; 73 | PICOBENCH(bench); 74 | #endif 75 | -------------------------------------------------------------------------------- /include/picobench/picobench.hpp: -------------------------------------------------------------------------------- 1 | // picobench v2.08 2 | // https://github.com/iboB/picobench 3 | // 4 | // A micro microbenchmarking library in a single header file 5 | // 6 | // SPDX-License-Identifier: MIT 7 | // 8 | // MIT License 9 | // 10 | // Copyright(c) 2017-2025 Borislav Stanimirov 11 | // 12 | // Permission is hereby granted, free of charge, to any person obtaining a copy 13 | // of this software and associated documentation files(the "Software"), to deal 14 | // in the Software without restriction, including without limitation the rights 15 | // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell 16 | // copies of the Software, and to permit persons to whom the Software is 17 | // furnished to do so, subject to the following conditions : 18 | // 19 | // The above copyright notice and this permission notice shall be included in all 20 | // copies or substantial portions of the Software. 21 | // 22 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE 25 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 28 | // SOFTWARE. 29 | // 30 | // 31 | // VERSION HISTORY 32 | // 33 | // 2.08 (2025-04-04) Internal. This file was not affected 34 | // 2.07 (2024-03-06) * Text output is now markdown compatible 35 | // * Allow including picobench.hpp before defining 36 | // PICOBENCH_IMPLEMENT 37 | // 2.06 (2023-11-24) Internal. This file was not affected 38 | // 2.05 (2023-04-26) Fixed MinGW build 39 | // 2.04 (2023-04-12) Added CLI args to run specific benchmarks or suites 40 | // 2.03 (2023-03-21) * Added PICOBENCH_UNIQUE_SYM_SUFFIX 41 | // * Fixed several warnings 42 | // 2.02 (2023-02-16) * Fixed same-func warning if user data is different 43 | // * Macro PICOBENCH_NAMESPACE to change namespace 44 | // * Changed marking of baseline in human-readable reports 45 | // * Minor internal changes in strings 46 | // 2.01 (2019-03-03) * Fixed android build when binding to a single core 47 | // * Minor doc fixes 48 | // 2.00 (2018-10-30) * Breaking change! runner::run_benchmarks doesn't return 49 | // a report anymore. The report is generated by 50 | // runner::generate_report instead 51 | // * Breaking change! report_output_format doesn't accept 52 | // output streams as arguments. Use set_output_streams. 53 | // * Potentially breaking change (gcc and clang)! Always set 54 | // thread affinity to first core. Macro to turn this off. 55 | // * Added runner::run which performs a full execution 56 | // * Added benchmark results and results comparison 57 | // * Added error enum 58 | // * Macro option to allow a std::function as a benchmark 59 | // * Macros for default iterations and samples 60 | // * Allowing local registration of benchmarks in a runner 61 | // * Added local_runner which doesn't consume registry 62 | // * More force-inline functions in states 63 | // * Fixed some potential compilation warnings 64 | // * Removed tests from header 65 | // * Anonymous namespace for impl-only classes and funcs 66 | // * Added setters and getters for every config option 67 | // 1.05 (2018-07-17) * Counting iterations of state 68 | // * Optionally set thread affinity when running benchmarks 69 | // so as not to miss cpu cycles with the high res clock 70 | // 1.04 (2018-02-06) * User data for benchmarks, which can be seen from states 71 | // * `add_custom_duration` to states so the user can modify time 72 | // * Text table format fixes 73 | // * Custom cmd opts in runner 74 | // * --version CLI command 75 | // 1.03 (2018-01-05) Added helper methods for easier browsing of reports 76 | // 1.02 (2018-01-04) Added parsing of command line 77 | // 1.01 (2018-01-03) * Only taking the fastest sample into account 78 | // * Set default number of samples to 2 79 | // * Added CSV output 80 | // 1.00 (2018-01-01) Initial release 81 | // 0.01 (2017-12-28) Initial prototype release 82 | // 83 | // 84 | // EXAMPLE 85 | // 86 | // void my_function(); // the function you want to benchmark 87 | // 88 | // // write your benchmarking code in a function like this 89 | // static void benchmark_my_function(picobench::state& state) 90 | // { 91 | // // use the state in a range-based for loop to call your code 92 | // for (auto _ : state) 93 | // my_function(); 94 | // } 95 | // // create a picobench with your benchmarking code 96 | // PICOBENCH(benchmark_my_function); 97 | // 98 | // 99 | // BASIC DOCUMENTATION 100 | // 101 | // A very brief usage guide follows. For more detailed documentation see the 102 | // README here: https://github.com/iboB/picobench/blob/master/README.md 103 | // 104 | // Simply include this file wherever you need. 105 | // You need to define PICOBENCH_IMPLEMENT_WITH_MAIN (or PICOBENCH_IMPLEMENT if 106 | // you want to write your own main function) in one compilation unit to have 107 | // the implementation compiled there. 108 | // 109 | // The benchmark code must be a `void (picobench::state&)` function which 110 | // you have written. Benchmarks are registered using the `PICOBENCH` macro 111 | // where the only argument is the function's name. 112 | // 113 | // You can have multiple benchmarks in multiple files. All will be run when the 114 | // executable starts. 115 | // 116 | // Typically a benchmark has a loop. To run the loop use the state argument in 117 | // a range-based for loop in your function. The time spent looping is measured 118 | // for the benchmark. You can have initialization/deinitialization code outside 119 | // of the loop and it won't be measured. 120 | // 121 | #if !defined(PICOBENCH_HPP_INCLUDED) 122 | #define PICOBENCH_HPP_INCLUDED 123 | 124 | #include 125 | #include 126 | #include 127 | 128 | #if defined(PICOBENCH_STD_FUNCTION_BENCHMARKS) 129 | # include 130 | #endif 131 | 132 | #define PICOBENCH_VERSION 2.05 133 | #define PICOBENCH_VERSION_STR "2.05" 134 | 135 | #if defined(PICOBENCH_DEBUG) 136 | # include 137 | # define I_PICOBENCH_ASSERT assert 138 | #else 139 | # define I_PICOBENCH_ASSERT(...) 140 | #endif 141 | 142 | #if defined(__GNUC__) 143 | # define PICOBENCH_INLINE __attribute__((always_inline)) 144 | #elif defined(_MSC_VER) 145 | # define PICOBENCH_INLINE __forceinline 146 | #else 147 | # define PICOBENCH_INLINE inline 148 | #endif 149 | 150 | #if !defined(PICOBENCH_NAMESPACE) 151 | # define PICOBENCH_NAMESPACE picobench 152 | #endif 153 | 154 | namespace PICOBENCH_NAMESPACE 155 | { 156 | 157 | #if defined(_MSC_VER) || defined(__MINGW32__) || defined(PICOBENCH_TEST) 158 | struct high_res_clock 159 | { 160 | typedef long long rep; 161 | typedef std::nano period; 162 | typedef std::chrono::duration duration; 163 | typedef std::chrono::time_point time_point; 164 | static const bool is_steady = true; 165 | 166 | static time_point now(); 167 | }; 168 | #else 169 | using high_res_clock = std::chrono::high_resolution_clock; 170 | #endif 171 | 172 | using result_t = intptr_t; 173 | 174 | class state 175 | { 176 | public: 177 | explicit state(int num_iterations, uintptr_t user_data = 0) 178 | : _user_data(user_data) 179 | , _iterations(num_iterations) 180 | { 181 | I_PICOBENCH_ASSERT(_iterations > 0); 182 | } 183 | 184 | int iterations() const { return _iterations; } 185 | 186 | int64_t duration_ns() const { return _duration_ns; } 187 | void add_custom_duration(int64_t duration_ns) { _duration_ns += duration_ns; } 188 | 189 | uintptr_t user_data() const { return _user_data; } 190 | 191 | // optionally set result of benchmark 192 | // this can be used as a value sync to prevent optimizations 193 | // or a way to check whether benchmarks produce the same results 194 | void set_result(uintptr_t data) { _result = data; } 195 | result_t result() const { return _result; } 196 | 197 | PICOBENCH_INLINE 198 | void start_timer() 199 | { 200 | _start = high_res_clock::now(); 201 | } 202 | 203 | PICOBENCH_INLINE 204 | void stop_timer() 205 | { 206 | auto duration = high_res_clock::now() - _start; 207 | _duration_ns = std::chrono::duration_cast(duration).count(); 208 | } 209 | 210 | struct iterator 211 | { 212 | PICOBENCH_INLINE 213 | iterator(state* parent) 214 | : _counter(0) 215 | , _lim(parent->iterations()) 216 | , _state(parent) 217 | { 218 | I_PICOBENCH_ASSERT(_counter < _lim); 219 | } 220 | 221 | PICOBENCH_INLINE 222 | iterator() 223 | : _counter(0) 224 | , _lim(0) 225 | , _state(nullptr) 226 | {} 227 | 228 | PICOBENCH_INLINE 229 | iterator& operator++() 230 | { 231 | I_PICOBENCH_ASSERT(_counter < _lim); 232 | ++_counter; 233 | return *this; 234 | } 235 | 236 | PICOBENCH_INLINE 237 | bool operator!=(const iterator&) const 238 | { 239 | if (_counter < _lim) return true; 240 | _state->stop_timer(); 241 | return false; 242 | } 243 | 244 | PICOBENCH_INLINE 245 | int operator*() const 246 | { 247 | return _counter; 248 | } 249 | 250 | private: 251 | int _counter; 252 | const int _lim; 253 | state* _state; 254 | }; 255 | 256 | PICOBENCH_INLINE 257 | iterator begin() 258 | { 259 | start_timer(); 260 | return iterator(this); 261 | } 262 | 263 | PICOBENCH_INLINE 264 | iterator end() 265 | { 266 | return iterator(); 267 | } 268 | 269 | private: 270 | high_res_clock::time_point _start; 271 | int64_t _duration_ns = 0; 272 | uintptr_t _user_data; 273 | int _iterations; 274 | result_t _result = 0; 275 | }; 276 | 277 | // this can be used for manual measurement 278 | class scope 279 | { 280 | public: 281 | PICOBENCH_INLINE 282 | scope(state& s) 283 | : _state(s) 284 | { 285 | _state.start_timer(); 286 | } 287 | 288 | PICOBENCH_INLINE 289 | ~scope() 290 | { 291 | _state.stop_timer(); 292 | } 293 | private: 294 | state& _state; 295 | }; 296 | 297 | #if defined(PICOBENCH_STD_FUNCTION_BENCHMARKS) 298 | using benchmark_proc = std::function; 299 | #else 300 | using benchmark_proc = void(*)(state&); 301 | #endif 302 | 303 | class benchmark 304 | { 305 | public: 306 | const char* name() const { return _name; } 307 | 308 | benchmark& iterations(std::vector data) { _state_iterations = std::move(data); return *this; } 309 | benchmark& samples(int n) { _samples = n; return *this; } 310 | benchmark& label(const char* label) { _name = label; return *this; } 311 | benchmark& baseline(bool b = true) { _baseline = b; return *this; } 312 | benchmark& user_data(uintptr_t data) { _user_data = data; return *this; } 313 | 314 | protected: 315 | friend class runner; 316 | 317 | benchmark(const char* name, benchmark_proc proc); 318 | 319 | const char* _name; 320 | const benchmark_proc _proc; 321 | bool _baseline = false; 322 | 323 | uintptr_t _user_data = 0; 324 | std::vector _state_iterations; 325 | int _samples = 0; 326 | }; 327 | 328 | // used for globally functions 329 | // note that you can instantiate a runner and register local benchmarks for it alone 330 | class global_registry 331 | { 332 | public: 333 | static int set_bench_suite(const char* name); 334 | static benchmark& new_benchmark(const char* name, benchmark_proc proc); 335 | }; 336 | 337 | } 338 | 339 | // Optionally define PICOBENCH_UNIQUE_SYM_SUFFIX to replace __LINE__ with something 340 | // non standard like __COUNTER__ in case you need multiple PICOBENCH macros in a 341 | // macro of yours 342 | #if !defined(PICOBENCH_UNIQUE_SYM_SUFFIX) 343 | #define PICOBENCH_UNIQUE_SYM_SUFFIX __LINE__ 344 | #endif 345 | 346 | #define I_PICOBENCH_PP_CAT(a, b) I_PICOBENCH_PP_INTERNAL_CAT(a, b) 347 | #define I_PICOBENCH_PP_INTERNAL_CAT(a, b) a##b 348 | 349 | #define PICOBENCH_SUITE(name) \ 350 | static int I_PICOBENCH_PP_CAT(picobench_suite, PICOBENCH_UNIQUE_SYM_SUFFIX) = \ 351 | PICOBENCH_NAMESPACE::global_registry::set_bench_suite(name) 352 | 353 | #define PICOBENCH(func) \ 354 | static auto& I_PICOBENCH_PP_CAT(picobench, PICOBENCH_UNIQUE_SYM_SUFFIX) = \ 355 | PICOBENCH_NAMESPACE::global_registry::new_benchmark(#func, func) 356 | 357 | #if defined(PICOBENCH_IMPLEMENT_WITH_MAIN) 358 | # define PICOBENCH_IMPLEMENT 359 | # define PICOBENCH_IMPLEMENT_MAIN 360 | #endif 361 | 362 | #endif // PICOBENCH_HPP_INCLUDED 363 | 364 | #if defined(PICOBENCH_IMPLEMENT) 365 | 366 | #include 367 | #include 368 | #include 369 | #include 370 | #include 371 | #include 372 | #include 373 | #include 374 | #include 375 | 376 | #if defined(_WIN32) 377 | # define WIN32_LEAN_AND_MEAN 378 | # include 379 | #else 380 | # if !defined(PICOBENCH_DONT_BIND_TO_ONE_CORE) 381 | # if defined(__APPLE__) 382 | # include 383 | # else 384 | # include 385 | # endif 386 | # endif 387 | #endif 388 | 389 | namespace PICOBENCH_NAMESPACE 390 | { 391 | 392 | // namespace 393 | // { 394 | 395 | enum error_t 396 | { 397 | no_error, 398 | error_bad_cmd_line_argument, // ill-formed command-line argument 399 | error_unknown_cmd_line_argument, // command argument looks like a picobench one, but isn't 400 | error_sample_compare, // benchmark produced different results across samples 401 | error_benchmark_compare, // two benchmarks of the same suite and dimension produced different results 402 | }; 403 | 404 | class report 405 | { 406 | public: 407 | struct benchmark_problem_space 408 | { 409 | int dimension; // number of iterations for the problem space 410 | int samples; // number of samples taken 411 | int64_t total_time_ns; // fastest sample!!! 412 | result_t result; // result of fastest sample 413 | }; 414 | struct benchmark 415 | { 416 | const char* name; 417 | bool is_baseline; 418 | std::vector data; 419 | }; 420 | 421 | struct suite 422 | { 423 | const char* name; 424 | std::vector benchmarks; // benchmark view 425 | 426 | const benchmark* find_benchmark(const char* bname) const 427 | { 428 | for (auto& b : benchmarks) 429 | { 430 | if (strcmp(b.name, bname) == 0) 431 | return &b; 432 | } 433 | 434 | return nullptr; 435 | } 436 | 437 | const benchmark* find_baseline() const 438 | { 439 | for (auto& b : benchmarks) 440 | { 441 | if (b.is_baseline) 442 | return &b; 443 | } 444 | 445 | return nullptr; 446 | } 447 | }; 448 | 449 | std::vector suites; 450 | error_t error = no_error; 451 | 452 | const suite* find_suite(const char* name) const 453 | { 454 | for (auto& s : suites) 455 | { 456 | if (strcmp(s.name, name) == 0) 457 | return &s; 458 | } 459 | 460 | return nullptr; 461 | } 462 | 463 | void to_text(std::ostream& out) const 464 | { 465 | using namespace std; 466 | for (auto& suite : suites) 467 | { 468 | if (suite.name) 469 | { 470 | out << "## " << suite.name << ":\n"; 471 | } 472 | 473 | out.put('\n'); 474 | out << 475 | " Name (* = baseline) | Dim | Total ms | ns/op |Baseline| Ops/second\n"; 476 | out << 477 | "--------------------------|--------:|----------:|--------:|-------:|----------:\n"; 478 | 479 | auto problem_space_view = get_problem_space_view(suite); 480 | for (auto& ps : problem_space_view) 481 | { 482 | const problem_space_benchmark* baseline = nullptr; 483 | for (auto& bm : ps.second) 484 | { 485 | if (bm.is_baseline) 486 | { 487 | baseline = &bm; 488 | break; 489 | } 490 | } 491 | 492 | for (auto& bm : ps.second) 493 | { 494 | out << ' ' << bm.name; 495 | auto pad = 24 - int(strlen(bm.name)); 496 | if (bm.is_baseline) 497 | { 498 | out << " *"; 499 | pad -= 2; 500 | } 501 | for (int i = 0; i < pad; ++i) { 502 | out.put(' '); 503 | } 504 | 505 | out << " |" 506 | << setw(8) << ps.first << " |" 507 | << setw(10) << fixed << setprecision(3) << double(bm.total_time_ns) / 1000000.0 << " |"; 508 | 509 | auto ns_op = (bm.total_time_ns / ps.first); 510 | if (ns_op > 99999999) 511 | { 512 | int e = 0; 513 | while (ns_op > 999999) 514 | { 515 | ++e; 516 | ns_op /= 10; 517 | } 518 | out << ns_op << 'e' << e; 519 | } 520 | else 521 | { 522 | out << setw(8) << ns_op; 523 | } 524 | 525 | out << " |"; 526 | 527 | if (baseline == &bm) 528 | { 529 | out << " - |"; 530 | } 531 | else if (baseline) 532 | { 533 | out << setw(7) << fixed << setprecision(3) 534 | << double(bm.total_time_ns) / double(baseline->total_time_ns) << " |"; 535 | } 536 | else 537 | { 538 | // no baseline to compare to 539 | out << " ??? |"; 540 | } 541 | 542 | auto ops_per_sec = ps.first * (1000000000.0 / double(bm.total_time_ns)); 543 | out << setw(11) << fixed << setprecision(1) << ops_per_sec << "\n"; 544 | } 545 | } 546 | out.put('\n'); 547 | } 548 | } 549 | 550 | void to_text_concise(std::ostream& out) 551 | { 552 | using namespace std; 553 | for (auto& suite : suites) 554 | { 555 | if (suite.name) 556 | { 557 | out << "## " << suite.name << ":\n"; 558 | } 559 | 560 | out.put('\n'); 561 | out << 562 | " Name (* = baseline) | ns/op | Baseline | Ops/second\n"; 563 | out << 564 | "--------------------------|--------:|---------:|-----------:\n"; 565 | 566 | const benchmark* baseline = nullptr; 567 | for (auto& bm : suite.benchmarks) 568 | { 569 | if (bm.is_baseline) 570 | { 571 | baseline = &bm; 572 | break; 573 | } 574 | } 575 | I_PICOBENCH_ASSERT(baseline); 576 | int64_t baseline_total_time = 0; 577 | int baseline_total_iterations = 0; 578 | for (auto& d : baseline->data) 579 | { 580 | baseline_total_time += d.total_time_ns; 581 | baseline_total_iterations += d.dimension; 582 | } 583 | int64_t baseline_ns_per_op = baseline_total_time / baseline_total_iterations; 584 | 585 | for (auto& bm : suite.benchmarks) 586 | { 587 | out << ' ' << bm.name; 588 | auto pad = 24 - int(strlen(bm.name)); 589 | if (bm.is_baseline) 590 | { 591 | out << " *"; 592 | pad -= 2; 593 | } 594 | for (int i = 0; i < pad; ++i) { 595 | out.put(' '); 596 | } 597 | 598 | int64_t total_time = 0; 599 | int total_iterations = 0; 600 | for (auto& d : bm.data) 601 | { 602 | total_time += d.total_time_ns; 603 | total_iterations += d.dimension; 604 | } 605 | int64_t ns_per_op = total_time / total_iterations; 606 | 607 | out << " |" << setw(8) << ns_per_op << " |"; 608 | 609 | if (&bm == baseline) 610 | { 611 | out << " - |"; 612 | } 613 | else 614 | { 615 | out << setw(9) << fixed << setprecision(3) 616 | << double(ns_per_op) / double(baseline_ns_per_op) << " |"; 617 | } 618 | 619 | auto ops_per_sec = total_iterations * (1000000000.0 / double(total_time)); 620 | out << setw(12) << fixed << setprecision(1) << ops_per_sec << "\n"; 621 | } 622 | 623 | out.put('\n'); 624 | } 625 | } 626 | 627 | void to_csv(std::ostream& out, bool header = true) const 628 | { 629 | using namespace std; 630 | 631 | if (header) 632 | { 633 | out << "Suite,Benchmark,b,D,S,\"Total ns\",Result,\"ns/op\",Baseline\n"; 634 | } 635 | 636 | for (auto& suite : suites) 637 | { 638 | const benchmark* baseline = nullptr; 639 | for (auto& bm : suite.benchmarks) 640 | { 641 | if (bm.is_baseline) 642 | { 643 | baseline = &bm; 644 | break; 645 | } 646 | } 647 | I_PICOBENCH_ASSERT(baseline); 648 | 649 | for (auto& bm : suite.benchmarks) 650 | { 651 | for (auto& d : bm.data) 652 | { 653 | if (suite.name) 654 | { 655 | out << '"' << suite.name << '"';; 656 | } 657 | out << ",\"" << bm.name << "\","; 658 | if (&bm == baseline) 659 | { 660 | out << '*'; 661 | } 662 | out << ',' 663 | << d.dimension << ',' 664 | << d.samples << ',' 665 | << d.total_time_ns << ',' 666 | << d.result << ',' 667 | << (d.total_time_ns / d.dimension) << ','; 668 | 669 | if (baseline) 670 | { 671 | for (auto& bd : baseline->data) 672 | { 673 | if (bd.dimension == d.dimension) 674 | { 675 | out << fixed << setprecision(3) << (double(d.total_time_ns) / double(bd.total_time_ns)); 676 | } 677 | } 678 | } 679 | 680 | out << '\n'; 681 | } 682 | } 683 | } 684 | } 685 | 686 | struct problem_space_benchmark 687 | { 688 | const char* name; 689 | bool is_baseline; 690 | int64_t total_time_ns; // fastest sample!!! 691 | result_t result; // result of fastest sample 692 | }; 693 | 694 | static std::map> get_problem_space_view(const suite& s) 695 | { 696 | std::map> res; 697 | for (auto& bm : s.benchmarks) 698 | { 699 | for (auto& d : bm.data) 700 | { 701 | auto& pvbs = res[d.dimension]; 702 | pvbs.push_back({ bm.name, bm.is_baseline, d.total_time_ns, d.result }); 703 | } 704 | } 705 | return res; 706 | } 707 | 708 | private: 709 | }; 710 | 711 | class benchmark_impl : public benchmark 712 | { 713 | public: 714 | benchmark_impl(const char* name, benchmark_proc proc) 715 | : benchmark(name, proc) 716 | {} 717 | 718 | private: 719 | friend class runner; 720 | 721 | // state 722 | std::vector _states; // length is _samples * _state_iterations.size() 723 | std::vector::iterator _istate; 724 | }; 725 | 726 | class picostring 727 | { 728 | public: 729 | picostring() = default; 730 | explicit picostring(const char* text) 731 | : str(text) 732 | , len(int(strlen(text))) 733 | {} 734 | picostring(const char* text, int len) 735 | : str(text) 736 | , len(len) 737 | {} 738 | 739 | const char* str; 740 | int len = 0; 741 | 742 | // checks whether other begins with this string 743 | bool is_start_of(const char* other) const 744 | { 745 | return strncmp(str, other, size_t(len)) == 0; 746 | } 747 | 748 | bool operator==(const picostring& other) const 749 | { 750 | if (len != other.len) return false; 751 | return strncmp(str, other.str, size_t(len)) == 0; 752 | } 753 | 754 | bool operator==(const char* other) const 755 | { 756 | return operator==(picostring(other)); 757 | } 758 | }; 759 | 760 | class null_streambuf : public std::streambuf 761 | { 762 | public: 763 | virtual int overflow(int c) override { return c; } 764 | }; 765 | 766 | struct null_stream : public std::ostream 767 | { 768 | null_stream() : std::ostream(&_buf) {} 769 | private: 770 | null_streambuf _buf; 771 | } cnull; 772 | 773 | enum class report_output_format 774 | { 775 | text, 776 | concise_text, 777 | csv, 778 | }; 779 | 780 | #if !defined(PICOBENCH_DEFAULT_ITERATIONS) 781 | # define PICOBENCH_DEFAULT_ITERATIONS { 8, 64, 512, 4096, 8192 } 782 | #endif 783 | 784 | #if !defined(PICOBENCH_DEFAULT_SAMPLES) 785 | # define PICOBENCH_DEFAULT_SAMPLES 2 786 | #endif 787 | 788 | using benchmarks_vector = std::vector>; 789 | struct rsuite 790 | { 791 | const char* name; 792 | benchmarks_vector benchmarks; 793 | }; 794 | 795 | class registry 796 | { 797 | public: 798 | benchmark& add_benchmark(const char* name, benchmark_proc proc) 799 | { 800 | auto b = new benchmark_impl(name, proc); 801 | benchmarks_for_current_suite().emplace_back(b); 802 | return *b; 803 | } 804 | 805 | void set_suite(const char* name) 806 | { 807 | _current_suite_name = name; 808 | } 809 | 810 | const char*& current_suite_name() 811 | { 812 | return _current_suite_name; 813 | } 814 | 815 | benchmarks_vector& benchmarks_for_current_suite() 816 | { 817 | for (auto& s : _suites) 818 | { 819 | if (s.name == _current_suite_name) 820 | return s.benchmarks; 821 | 822 | if (s.name && _current_suite_name && strcmp(s.name, _current_suite_name) == 0) 823 | return s.benchmarks; 824 | } 825 | _suites.push_back({ _current_suite_name, {} }); 826 | return _suites.back().benchmarks; 827 | } 828 | 829 | protected: 830 | friend class runner; 831 | const char* _current_suite_name = nullptr; 832 | std::vector _suites; 833 | }; 834 | 835 | registry& g_registry() 836 | { 837 | static registry r; 838 | return r; 839 | } 840 | 841 | class runner : public registry 842 | { 843 | public: 844 | runner(bool local = false) 845 | : _default_state_iterations(PICOBENCH_DEFAULT_ITERATIONS) 846 | , _default_samples(PICOBENCH_DEFAULT_SAMPLES) 847 | { 848 | if (!local) 849 | { 850 | _suites = std::move(g_registry()._suites); 851 | } 852 | } 853 | 854 | int run(int benchmark_random_seed = -1) 855 | { 856 | if (should_run()) 857 | { 858 | run_benchmarks(benchmark_random_seed); 859 | auto report = generate_report(); 860 | std::ostream* out = _stdout; 861 | std::ofstream fout; 862 | if (preferred_output_filename()) 863 | { 864 | fout.open(preferred_output_filename()); 865 | if (!fout.is_open()) 866 | { 867 | std::cerr << "Error: Could not open output file `" << preferred_output_filename() << "`\n"; 868 | return 1; 869 | } 870 | out = &fout; 871 | } 872 | 873 | switch (preferred_output_format()) 874 | { 875 | case report_output_format::text: 876 | report.to_text(*out); 877 | break; 878 | case report_output_format::concise_text: 879 | report.to_text_concise(*out); 880 | break; 881 | case report_output_format::csv: 882 | report.to_csv(*out); 883 | break; 884 | } 885 | } 886 | return error(); 887 | } 888 | 889 | void run_benchmarks(int random_seed = -1) 890 | { 891 | I_PICOBENCH_ASSERT(_error == no_error && _should_run); 892 | 893 | if (random_seed == -1) 894 | { 895 | random_seed = int(std::random_device()()); 896 | } 897 | 898 | std::minstd_rand rnd(random_seed); 899 | 900 | // vector of all benchmarks 901 | std::vector benchmarks; 902 | for (auto& suite : _suites) 903 | { 904 | // also identify a baseline in this loop 905 | // if there is no explicit one, set the first one as a baseline 906 | bool found_baseline = false; 907 | for (auto irb = suite.benchmarks.begin(); irb != suite.benchmarks.end(); ++irb) 908 | { 909 | auto& rb = *irb; 910 | rb->_states.clear(); // clear states so we can safely call run_benchmarks multiple times 911 | benchmarks.push_back(rb.get()); 912 | if (rb->_baseline) 913 | { 914 | found_baseline = true; 915 | } 916 | 917 | #if !defined(PICOBENCH_STD_FUNCTION_BENCHMARKS) 918 | // check for same func 919 | for (auto ib = irb+1; ib != suite.benchmarks.end(); ++ib) 920 | { 921 | auto& b = *ib; 922 | if (rb->_proc == b->_proc && rb->_user_data == b->_user_data) 923 | { 924 | *_stdwarn << "Warning: " << rb->name() << " and " << b->name() 925 | << " are benchmarks of the same function.\n"; 926 | } 927 | } 928 | #endif 929 | } 930 | 931 | if (!found_baseline && !suite.benchmarks.empty()) 932 | { 933 | suite.benchmarks.front()->_baseline = true; 934 | } 935 | } 936 | 937 | // initialize benchmarks 938 | for (auto b : benchmarks) 939 | { 940 | const std::vector& state_iterations = 941 | b->_state_iterations.empty() ? 942 | _default_state_iterations : 943 | b->_state_iterations; 944 | 945 | if (b->_samples == 0) 946 | b->_samples = _default_samples; 947 | 948 | b->_states.reserve(state_iterations.size() * size_t(b->_samples)); 949 | 950 | // fill states while random shuffling them 951 | for (auto iters : state_iterations) 952 | { 953 | for (int i = 0; i < b->_samples; ++i) 954 | { 955 | auto index = rnd() % (b->_states.size() + 1); 956 | auto pos = b->_states.begin() + long(index); 957 | b->_states.emplace(pos, iters, b->_user_data); 958 | } 959 | } 960 | 961 | b->_istate = b->_states.begin(); 962 | } 963 | 964 | #if !defined(PICOBENCH_DONT_BIND_TO_ONE_CORE) 965 | // set thread affinity to first cpu 966 | // so the high resolution clock doesn't miss cycles 967 | { 968 | #if defined(_WIN32) 969 | SetThreadAffinityMask(GetCurrentThread(), 1); 970 | #elif defined(__APPLE__) 971 | thread_affinity_policy_data_t policy = {0}; 972 | thread_policy_set( 973 | pthread_mach_thread_np(pthread_self()), 974 | THREAD_AFFINITY_POLICY, 975 | (thread_policy_t)&policy, 1); 976 | #else 977 | cpu_set_t cpuset; 978 | CPU_ZERO(&cpuset); 979 | CPU_SET(0, &cpuset); 980 | 981 | sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); 982 | #endif 983 | } 984 | #endif 985 | 986 | // we run a random benchmark from it incrementing _istate for each 987 | // when _istate reaches _states.end(), we erase the benchmark 988 | // when the vector becomes empty, we're done 989 | while (!benchmarks.empty()) 990 | { 991 | auto i = benchmarks.begin() + long(rnd() % benchmarks.size()); 992 | auto& b = *i; 993 | 994 | b->_proc(*b->_istate); 995 | 996 | ++b->_istate; 997 | 998 | if (b->_istate == b->_states.end()) 999 | { 1000 | benchmarks.erase(i); 1001 | } 1002 | } 1003 | } 1004 | 1005 | // function to compare results 1006 | template > 1007 | report generate_report(CompareResult cmp = std::equal_to()) const 1008 | { 1009 | report rpt; 1010 | 1011 | rpt.suites.resize(_suites.size()); 1012 | auto rpt_suite = rpt.suites.begin(); 1013 | 1014 | for (auto& suite : _suites) 1015 | { 1016 | rpt_suite->name = suite.name; 1017 | 1018 | // build benchmark view 1019 | rpt_suite->benchmarks.resize(suite.benchmarks.size()); 1020 | auto rpt_benchmark = rpt_suite->benchmarks.begin(); 1021 | 1022 | for (auto& b : suite.benchmarks) 1023 | { 1024 | rpt_benchmark->name = b->_name; 1025 | rpt_benchmark->is_baseline = b->_baseline; 1026 | 1027 | const std::vector& state_iterations = 1028 | b->_state_iterations.empty() ? 1029 | _default_state_iterations : 1030 | b->_state_iterations; 1031 | 1032 | rpt_benchmark->data.reserve(state_iterations.size()); 1033 | for (auto d : state_iterations) 1034 | { 1035 | rpt_benchmark->data.push_back({d, 0, 0ll, result_t(0)}); 1036 | } 1037 | 1038 | for (auto& state : b->_states) 1039 | { 1040 | for (auto& d : rpt_benchmark->data) 1041 | { 1042 | if (state.iterations() == d.dimension) 1043 | { 1044 | if (d.total_time_ns == 0 || d.total_time_ns > state.duration_ns()) 1045 | { 1046 | d.total_time_ns = state.duration_ns(); 1047 | d.result = state.result(); 1048 | } 1049 | 1050 | if (_compare_results_across_samples) 1051 | { 1052 | if (d.result != state.result() && !cmp(d.result, state.result())) 1053 | { 1054 | *_stderr << "Error: Two samples of " << b->name() << " @" << d.dimension << " produced different results: " 1055 | << d.result << " and " << state.result() << '\n'; 1056 | _error = error_sample_compare; 1057 | } 1058 | } 1059 | 1060 | ++d.samples; 1061 | } 1062 | } 1063 | } 1064 | 1065 | #if defined(PICOBENCH_DEBUG) 1066 | for (auto& d : rpt_benchmark->data) 1067 | { 1068 | I_PICOBENCH_ASSERT(d.samples == b->_samples); 1069 | } 1070 | #endif 1071 | 1072 | ++rpt_benchmark; 1073 | } 1074 | 1075 | ++rpt_suite; 1076 | } 1077 | 1078 | if (_compare_results_across_benchmarks) 1079 | { 1080 | for(auto& suite : rpt.suites) 1081 | { 1082 | auto psview = report::get_problem_space_view(suite); 1083 | 1084 | for (auto& space : psview) 1085 | { 1086 | I_PICOBENCH_ASSERT(!space.second.empty()); 1087 | 1088 | if (space.second.size() == 1) 1089 | { 1090 | auto& b = space.second.front(); 1091 | *_stdwarn << "Warning: Benchmark " << b.name << " @" << space.first 1092 | << " has a single instance and cannot be compared to others.\n"; 1093 | continue; 1094 | } 1095 | 1096 | auto result0 = space.second.front().result; 1097 | 1098 | for (auto& b : space.second) 1099 | { 1100 | if (result0 != b.result && !cmp(result0, b.result)) 1101 | { 1102 | auto& f = space.second.front(); 1103 | *_stderr << "Error: Benchmarks " << f.name << " and " << b.name 1104 | << " @" << space.first << " produce different results: " 1105 | << result0 << " and " << b.result << '\n'; 1106 | _error = error_benchmark_compare; 1107 | } 1108 | } 1109 | } 1110 | } 1111 | } 1112 | 1113 | return rpt; 1114 | } 1115 | 1116 | void set_default_state_iterations(const std::vector& data) 1117 | { 1118 | _default_state_iterations = data; 1119 | } 1120 | 1121 | const std::vector& default_state_iterations() const 1122 | { 1123 | return _default_state_iterations; 1124 | } 1125 | 1126 | void set_default_samples(int n) 1127 | { 1128 | _default_samples = n; 1129 | } 1130 | 1131 | int default_samples() const 1132 | { 1133 | return _default_samples; 1134 | } 1135 | 1136 | void add_cmd_opt(const char* cmd, const char* arg_desc, const char* cmd_desc, bool(*handler)(uintptr_t, const char*), uintptr_t user_data = 0) 1137 | { 1138 | cmd_line_option opt; 1139 | opt.cmd = picostring(cmd); 1140 | opt.arg_desc = picostring(arg_desc); 1141 | opt.desc = cmd_desc; 1142 | opt.handler = nullptr; 1143 | opt.user_data = user_data; 1144 | opt.user_handler = handler; 1145 | _opts.push_back(opt); 1146 | } 1147 | 1148 | // returns false if there were errors parsing the command line 1149 | // all args starting with prefix are parsed 1150 | // the others are ignored 1151 | bool parse_cmd_line(int argc, const char* const argv[], const char* cmd_prefix = "-") 1152 | { 1153 | _cmd_prefix = picostring(cmd_prefix); 1154 | 1155 | if (!_has_opts) 1156 | { 1157 | _opts.emplace_back("-iters=", "", 1158 | "Sets default iterations for benchmarks", 1159 | &runner::cmd_iters); 1160 | _opts.emplace_back("-samples=", "", 1161 | "Sets default number of samples for benchmarks", 1162 | &runner::cmd_samples); 1163 | _opts.emplace_back("-out-fmt=", "", 1164 | "Outputs text or concise or csv", 1165 | &runner::cmd_out_fmt); 1166 | _opts.emplace_back("-output=", "", 1167 | "Sets output filename or `stdout`", 1168 | &runner::cmd_output); 1169 | _opts.emplace_back("-compare-results", "", 1170 | "Compare benchmark results", 1171 | &runner::cmd_compare_results); 1172 | _opts.emplace_back("-no-run", "", 1173 | "Doesn't run benchmarks", 1174 | &runner::cmd_no_run); 1175 | _opts.emplace_back("-run-suite=", "", 1176 | "Runs only benchmarks from suite", 1177 | &runner::cmd_run_suite); 1178 | _opts.emplace_back("-run-only=", "", 1179 | "Runs only selected benchmarks", 1180 | &runner::cmd_run_only); 1181 | _opts.emplace_back("-list", "", 1182 | "Lists available benchmarks", 1183 | &runner::cmd_list); 1184 | _opts.emplace_back("-version", "", 1185 | "Show version info", 1186 | &runner::cmd_version); 1187 | _opts.emplace_back("-help", "", 1188 | "Prints help", 1189 | &runner::cmd_help); 1190 | _has_opts = true; 1191 | } 1192 | 1193 | for (int i = 1; i < argc; ++i) 1194 | { 1195 | if (!_cmd_prefix.is_start_of(argv[i])) 1196 | continue; 1197 | 1198 | auto arg = argv[i] + _cmd_prefix.len; 1199 | 1200 | bool found = false; 1201 | for (auto& opt : _opts) 1202 | { 1203 | if (opt.cmd.is_start_of(arg)) 1204 | { 1205 | found = true; 1206 | bool success = false; 1207 | if (opt.handler) 1208 | { 1209 | success = (this->*opt.handler)(arg + opt.cmd.len); 1210 | } 1211 | else 1212 | { 1213 | I_PICOBENCH_ASSERT(opt.user_handler); 1214 | success = opt.user_handler(opt.user_data, arg + opt.cmd.len); 1215 | } 1216 | 1217 | if (!success) 1218 | { 1219 | *_stderr << "Error: Bad command-line argument: " << argv[i] << "\n"; 1220 | _error = error_bad_cmd_line_argument; 1221 | return false; 1222 | } 1223 | break; 1224 | } 1225 | } 1226 | 1227 | if (!found) 1228 | { 1229 | *_stderr << "Error: Unknown command-line argument: " << argv[i] << "\n"; 1230 | _error = error_unknown_cmd_line_argument; 1231 | return false; 1232 | } 1233 | } 1234 | 1235 | return true; 1236 | } 1237 | 1238 | void set_should_run(bool set) { _should_run = set; } 1239 | bool should_run() const { return _error == no_error && _should_run; } 1240 | void set_error(error_t e) { _error = e; } 1241 | error_t error() const { return _error; } 1242 | 1243 | void set_output_streams(std::ostream& out, std::ostream& err) 1244 | { 1245 | _stdout = &out; 1246 | _stderr = &err; 1247 | _stdwarn = &out; 1248 | } 1249 | 1250 | void set_preferred_output_format(report_output_format fmt) { _output_format = fmt; } 1251 | report_output_format preferred_output_format() const { return _output_format; } 1252 | 1253 | // can be nullptr (run will interpret it as stdout) 1254 | void set_preferred_output_filename(const char* path) { _output_file = path; } 1255 | const char* preferred_output_filename() const { return _output_file; } 1256 | 1257 | void set_compare_results_across_samples(bool b) { _compare_results_across_samples = b; } 1258 | bool compare_results_across_samples() const { return _compare_results_across_samples; } 1259 | 1260 | void set_compare_results_across_benchmarks(bool b) { _compare_results_across_benchmarks = b; } 1261 | bool compare_results_across_benchmarks() const { return _compare_results_across_benchmarks; } 1262 | 1263 | private: 1264 | // runner's suites and benchmarks come from its parent: registry 1265 | 1266 | // state and configuration 1267 | mutable error_t _error = no_error; 1268 | bool _should_run = true; 1269 | 1270 | bool _compare_results_across_samples = false; 1271 | bool _compare_results_across_benchmarks = false; 1272 | 1273 | report_output_format _output_format = report_output_format::text; 1274 | const char* _output_file = nullptr; // nullptr means stdout 1275 | 1276 | std::ostream* _stdout = &std::cout; 1277 | std::ostream* _stderr = &std::cerr; 1278 | std::ostream* _stdwarn = &std::cout; 1279 | 1280 | // default data 1281 | 1282 | // default iterations per state per benchmark 1283 | std::vector _default_state_iterations; 1284 | 1285 | // default samples per benchmark 1286 | int _default_samples; 1287 | 1288 | // command line parsing 1289 | picostring _cmd_prefix; 1290 | typedef bool (runner::*cmd_handler)(const char*); // internal handler 1291 | typedef bool(*ext_handler)(uintptr_t user_data, const char* cmd_line); // external (user) handler 1292 | struct cmd_line_option 1293 | { 1294 | cmd_line_option() = default; 1295 | cmd_line_option(const char* c, const char* a, const char* d, cmd_handler h) 1296 | : cmd(c) 1297 | , arg_desc(a) 1298 | , desc(d) 1299 | , handler(h) 1300 | , user_data(0) 1301 | , user_handler(nullptr) 1302 | {} 1303 | picostring cmd; 1304 | picostring arg_desc; 1305 | const char* desc; 1306 | cmd_handler handler; // may be nullptr for external handlers 1307 | uintptr_t user_data; // passed as an argument to user handlers 1308 | ext_handler user_handler; 1309 | }; 1310 | bool _has_opts = false; // have opts been added to list 1311 | std::vector _opts; 1312 | 1313 | bool cmd_iters(const char* line) 1314 | { 1315 | std::vector iters; 1316 | auto p = line; 1317 | while (true) 1318 | { 1319 | auto i = int(strtoul(p, nullptr, 10)); 1320 | if (i <= 0) return false; 1321 | iters.push_back(i); 1322 | p = strchr(p + 1, ','); 1323 | if (!p) break; 1324 | ++p; 1325 | } 1326 | if (iters.empty()) return false; 1327 | _default_state_iterations = iters; 1328 | return true; 1329 | } 1330 | 1331 | bool cmd_samples(const char* line) 1332 | { 1333 | int samples = int(strtol(line, nullptr, 10)); 1334 | if (samples <= 0) return false; 1335 | _default_samples = samples; 1336 | return true; 1337 | } 1338 | 1339 | bool cmd_no_run(const char* line) 1340 | { 1341 | if (*line) return false; 1342 | _should_run = false; 1343 | return true; 1344 | } 1345 | 1346 | bool cmd_run_suite(const char* line) 1347 | { 1348 | auto new_end = std::remove_if(_suites.begin(), _suites.end(), [line](const rsuite& s) { 1349 | return !s.name || strcmp(s.name, line) != 0; 1350 | }); 1351 | _suites.erase(new_end, _suites.end()); 1352 | return true; 1353 | } 1354 | 1355 | bool cmd_run_only(const char* line) 1356 | { 1357 | std::vector names; 1358 | 1359 | auto p = line; 1360 | while (true) 1361 | { 1362 | const char* q = strchr(p, ','); 1363 | if (!q) q = p + strlen(p); 1364 | names.emplace_back(p, int(q - p)); 1365 | if (!*q) break; 1366 | p = q + 1; 1367 | } 1368 | 1369 | for (auto& s : _suites) 1370 | { 1371 | auto new_end = std::remove_if(s.benchmarks.begin(), s.benchmarks.end(), [&names](const std::unique_ptr& b) { 1372 | auto f = std::find(names.begin(), names.end(), b->name()); 1373 | return f == names.end(); 1374 | }); 1375 | s.benchmarks.erase(new_end, s.benchmarks.end()); 1376 | } 1377 | return true; 1378 | } 1379 | 1380 | bool cmd_list(const char* line) 1381 | { 1382 | if (*line) return false; 1383 | _should_run = false; 1384 | for (auto& suite : _suites) 1385 | { 1386 | if (suite.name) 1387 | { 1388 | *_stdout << " " << suite.name << ":\n"; 1389 | } 1390 | else 1391 | { 1392 | *_stdout << " :\n"; 1393 | } 1394 | for (auto& bench : suite.benchmarks) 1395 | { 1396 | *_stdout << " " << bench->name() << "\n"; 1397 | } 1398 | } 1399 | _should_run = false; 1400 | return true; 1401 | } 1402 | 1403 | bool cmd_version(const char* line) 1404 | { 1405 | if (*line) return false; 1406 | *_stdout << "picobench " PICOBENCH_VERSION_STR << "\n"; 1407 | _should_run = false; 1408 | return true; 1409 | } 1410 | 1411 | bool cmd_help(const char* line) 1412 | { 1413 | if (*line) return false; 1414 | cmd_version(line); 1415 | auto& cout = *_stdout; 1416 | for (auto& opt : _opts) 1417 | { 1418 | cout << ' ' << _cmd_prefix.str << opt.cmd.str << opt.arg_desc.str; 1419 | int w = 27 - (_cmd_prefix.len + opt.cmd.len + opt.arg_desc.len); 1420 | for (int i = 0; i < w; ++i) 1421 | { 1422 | cout.put(' '); 1423 | } 1424 | cout << opt.desc << "\n"; 1425 | } 1426 | _should_run = false; 1427 | return true; 1428 | } 1429 | 1430 | bool cmd_out_fmt(const char* line) 1431 | { 1432 | if (strcmp(line, "txt") == 0) 1433 | { 1434 | _output_format = report_output_format::text; 1435 | } 1436 | else if (strcmp(line, "con") == 0) 1437 | { 1438 | _output_format = report_output_format::concise_text; 1439 | } 1440 | else if (strcmp(line, "csv") == 0) 1441 | { 1442 | _output_format = report_output_format::csv; 1443 | } 1444 | else 1445 | { 1446 | return false; 1447 | } 1448 | return true; 1449 | } 1450 | 1451 | bool cmd_output(const char* line) 1452 | { 1453 | if (strcmp(line, "stdout") != 0) 1454 | { 1455 | _output_file = line; 1456 | } 1457 | else 1458 | { 1459 | _output_file = nullptr; 1460 | } 1461 | return true; 1462 | } 1463 | 1464 | bool cmd_compare_results(const char* line) 1465 | { 1466 | if (*line) return false; 1467 | _compare_results_across_samples = true; 1468 | _compare_results_across_benchmarks = true; 1469 | return true; 1470 | } 1471 | }; 1472 | 1473 | class local_runner : public runner 1474 | { 1475 | public: 1476 | local_runner() : runner(true) 1477 | {} 1478 | }; 1479 | 1480 | // } // anonymous namespace 1481 | 1482 | benchmark::benchmark(const char* name, benchmark_proc proc) 1483 | : _name(name) 1484 | , _proc(proc) 1485 | {} 1486 | 1487 | benchmark& global_registry::new_benchmark(const char* name, benchmark_proc proc) 1488 | { 1489 | return g_registry().add_benchmark(name, proc); 1490 | } 1491 | 1492 | int global_registry::set_bench_suite(const char* name) 1493 | { 1494 | g_registry().current_suite_name() = name; 1495 | return 0; 1496 | } 1497 | 1498 | #if (defined(_MSC_VER) || defined(__MINGW32__)) && !defined(PICOBENCH_TEST) 1499 | 1500 | static const long long high_res_clock_freq = []() -> long long 1501 | { 1502 | LARGE_INTEGER frequency; 1503 | QueryPerformanceFrequency(&frequency); 1504 | return frequency.QuadPart; 1505 | }(); 1506 | 1507 | high_res_clock::time_point high_res_clock::now() 1508 | { 1509 | LARGE_INTEGER t; 1510 | QueryPerformanceCounter(&t); 1511 | return time_point(duration((t.QuadPart * rep(period::den)) / high_res_clock_freq)); 1512 | } 1513 | #endif 1514 | } 1515 | 1516 | #endif 1517 | 1518 | #if defined(PICOBENCH_IMPLEMENT_MAIN) 1519 | int main(int argc, char* argv[]) 1520 | { 1521 | PICOBENCH_NAMESPACE::runner r; 1522 | r.parse_cmd_line(argc, argv); 1523 | return r.run(); 1524 | } 1525 | #endif 1526 | 1527 | #if defined(PICOBENCH_TEST) 1528 | 1529 | // fake time keeping functions for the tests 1530 | namespace PICOBENCH_NAMESPACE 1531 | { 1532 | namespace test 1533 | { 1534 | 1535 | 1536 | void this_thread_sleep_for_ns(uint64_t ns); 1537 | 1538 | template 1539 | void this_thread_sleep_for(const std::chrono::duration& duration) 1540 | { 1541 | this_thread_sleep_for_ns(std::chrono::duration_cast(duration).count()); 1542 | } 1543 | 1544 | #if defined(PICOBENCH_IMPLEMENT) 1545 | static struct fake_time 1546 | { 1547 | uint64_t now; 1548 | } the_time; 1549 | 1550 | void this_thread_sleep_for_ns(uint64_t ns) 1551 | { 1552 | the_time.now += ns; 1553 | } 1554 | 1555 | } // namespace test 1556 | 1557 | high_res_clock::time_point high_res_clock::now() 1558 | { 1559 | auto ret = time_point(duration(test::the_time.now)); 1560 | return ret; 1561 | #endif 1562 | } // dual purpose closing brace 1563 | } 1564 | #endif 1565 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.14 FATAL_ERROR) 2 | 3 | include(./get_cpm.cmake) 4 | CPMAddPackage(gh:iboB/doctest-util@0.1.2) 5 | 6 | macro(pb_test test) 7 | add_doctest_lib_test(${test} picobench ${ARGN}) 8 | endmacro() 9 | 10 | pb_test(basic basic.cpp) 11 | pb_test(multi_cpp 12 | multi_cpp/main.cpp 13 | multi_cpp/suite_a.cpp 14 | multi_cpp/suite_b.cpp 15 | multi_cpp/suite_b_cont.cpp 16 | ) 17 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | ## picobench Unit Tests 2 | 3 | The unit tests use [doctest](https://github.com/onqtam/doctest) which is a submodule of this repo. -------------------------------------------------------------------------------- /test/basic.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #define PICOBENCH_DEBUG 4 | #define PICOBENCH_TEST 5 | #define PICOBENCH_IMPLEMENT 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | using namespace picobench; 12 | using namespace std; 13 | 14 | PICOBENCH_SUITE("test a"); 15 | 16 | static void a_a(picobench::state& s) 17 | { 18 | for (auto _ : s) 19 | { 20 | test::this_thread_sleep_for_ns(10); 21 | } 22 | s.set_result(s.iterations() * 2); 23 | } 24 | PICOBENCH(a_a); 25 | 26 | map a_b_samples; 27 | static void a_b(picobench::state& s) 28 | { 29 | uint64_t time = 11; 30 | if (a_b_samples.find(s.iterations()) == a_b_samples.end()) 31 | { 32 | // slower first time 33 | time = 32; 34 | } 35 | 36 | ++a_b_samples[s.iterations()]; 37 | for (auto _ : s) 38 | { 39 | test::this_thread_sleep_for_ns(time); 40 | } 41 | s.set_result(s.iterations() * 2); 42 | } 43 | PICOBENCH(a_b); 44 | 45 | static void a_c(picobench::state& s) 46 | { 47 | s.start_timer(); 48 | test::this_thread_sleep_for_ns((s.iterations() - 1) * 20); 49 | s.stop_timer(); 50 | 51 | s.add_custom_duration(20); 52 | s.set_result(s.iterations() * 2); 53 | } 54 | PICOBENCH(a_c); 55 | 56 | PICOBENCH_SUITE("test empty"); 57 | 58 | PICOBENCH_SUITE("test b"); 59 | 60 | static void b_a(picobench::state& s) 61 | { 62 | CHECK(s.user_data() == 9088); 63 | for (auto _ : s) 64 | { 65 | test::this_thread_sleep_for_ns(75); 66 | } 67 | } 68 | PICOBENCH(b_a) 69 | .iterations({20, 30, 50}) 70 | .user_data(9088); 71 | 72 | map b_b_samples; 73 | 74 | static void b_b(picobench::state& s) 75 | { 76 | uint64_t time = 111; 77 | if (b_b_samples.find(s.iterations()) == b_b_samples.end()) 78 | { 79 | // faster first time 80 | time = 100; 81 | } 82 | 83 | ++b_b_samples[s.iterations()]; 84 | picobench::scope scope(s); 85 | test::this_thread_sleep_for_ns(s.iterations() * time); 86 | } 87 | PICOBENCH(b_b) 88 | .baseline() 89 | .label("something else") 90 | .samples(15) 91 | .iterations({10, 20, 30}); 92 | 93 | const picobench::report::suite& find_suite(const char* s, const picobench::report& r) 94 | { 95 | auto suite = r.find_suite(s); 96 | REQUIRE(suite); 97 | return *suite; 98 | } 99 | 100 | #define cntof(ar) (sizeof(ar) / sizeof(ar[0])) 101 | 102 | TEST_CASE("[picobench] test utils") 103 | { 104 | const char* ar[] = {"test", "123", "asdf"}; 105 | CHECK(cntof(ar) == 3); 106 | 107 | auto start = high_res_clock::now(); 108 | test::this_thread_sleep_for_ns(1234); 109 | auto end = high_res_clock::now(); 110 | 111 | auto duration = end - start; 112 | CHECK(duration == std::chrono::nanoseconds(1234)); 113 | 114 | start = high_res_clock::now(); 115 | test::this_thread_sleep_for(std::chrono::milliseconds(987)); 116 | end = high_res_clock::now(); 117 | duration = end - start; 118 | CHECK(duration == std::chrono::milliseconds(987)); 119 | } 120 | 121 | TEST_CASE("[picobench] picostring") 122 | { 123 | picostring str("test"); 124 | CHECK(str == "test"); 125 | CHECK(str.len == 4); 126 | CHECK(!str.is_start_of("tes")); 127 | CHECK(str.is_start_of("test")); 128 | CHECK(str.is_start_of("test123")); 129 | } 130 | 131 | TEST_CASE("[picobench] state") 132 | { 133 | state s0(3); 134 | CHECK(s0.iterations() == 3); 135 | CHECK(s0.user_data() == 0); 136 | 137 | int i = 0; 138 | for (auto _ : s0) 139 | { 140 | CHECK(_ == i); 141 | ++i; 142 | test::this_thread_sleep_for_ns(1); 143 | } 144 | CHECK(s0.duration_ns() == 3); 145 | s0.add_custom_duration(5); 146 | CHECK(s0.duration_ns() == 8); 147 | 148 | state s(2, 123); 149 | CHECK(s.iterations() == 2); 150 | CHECK(s.user_data() == 123); 151 | 152 | i = 0; 153 | for (auto it = s.begin(); it != s.end(); ++it) 154 | { 155 | CHECK(*it == i); 156 | ++i; 157 | test::this_thread_sleep_for_ns(2); 158 | } 159 | CHECK(s.duration_ns() == 4); 160 | } 161 | 162 | const vector default_iters = { 8, 64, 512, 4096, 8192 }; 163 | const int default_samples = 2; 164 | 165 | TEST_CASE("[picobench] cmd line") 166 | { 167 | { 168 | local_runner r; 169 | bool b = r.parse_cmd_line(0, {}); 170 | CHECK(b); 171 | CHECK(r.should_run()); 172 | CHECK(r.error() == 0); 173 | CHECK(r.default_state_iterations() == default_iters); 174 | CHECK(r.default_samples() == default_samples); 175 | CHECK(!r.preferred_output_filename()); 176 | CHECK(r.preferred_output_format() == report_output_format::text); 177 | CHECK(!r.compare_results_across_benchmarks()); 178 | CHECK(!r.compare_results_across_samples()); 179 | } 180 | 181 | { 182 | local_runner r; 183 | ostringstream sout, serr; 184 | r.set_output_streams(sout, serr); 185 | const char* cmd_line[] = { "", "-asdf" }; 186 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "-"); 187 | CHECK(sout.str().empty()); 188 | CHECK(serr.str() == "Error: Unknown command-line argument: -asdf\n"); 189 | CHECK(!b); 190 | CHECK(!r.should_run()); 191 | CHECK(r.error() == error_unknown_cmd_line_argument); 192 | } 193 | 194 | { 195 | local_runner r; 196 | const char* cmd_line[] = { "", "--no-run", "--iters=1,2,3", "--samples=54", "--out-fmt=con", "--output=stdout" }; 197 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line); 198 | CHECK(b); 199 | CHECK(!r.should_run()); 200 | CHECK(r.error() == 0); 201 | CHECK(r.default_samples() == 54); 202 | CHECK(r.default_state_iterations() == vector({ 1, 2, 3 })); 203 | CHECK(!r.preferred_output_filename()); 204 | CHECK(r.preferred_output_format() == report_output_format::concise_text); 205 | CHECK(!r.compare_results_across_benchmarks()); 206 | CHECK(!r.compare_results_across_samples()); 207 | } 208 | 209 | { 210 | local_runner r; 211 | const char* cmd_line[] = { "", "--pb-no-run", "--pb-iters=1000,2000,3000", "-other-cmd1", "--pb-samples=54", 212 | "-other-cmd2", "--pb-out-fmt=csv", "--pb-output=foo.csv", "--pb-compare-results" }; 213 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "--pb"); 214 | CHECK(b); 215 | CHECK(!r.should_run()); 216 | CHECK(r.error() == 0); 217 | CHECK(r.default_samples() == 54); 218 | CHECK(r.default_state_iterations() == vector({ 1000, 2000, 3000 })); 219 | CHECK(strcmp(r.preferred_output_filename(), "foo.csv") == 0); 220 | CHECK(r.preferred_output_format() == report_output_format::csv); 221 | CHECK(r.compare_results_across_benchmarks()); 222 | CHECK(r.compare_results_across_samples()); 223 | 224 | } 225 | 226 | { 227 | local_runner r; 228 | ostringstream sout, serr; 229 | r.set_output_streams(sout, serr); 230 | const char* cmd_line[] = { "", "--samples=xxx" }; 231 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "-"); 232 | CHECK(sout.str().empty()); 233 | CHECK(serr.str() == "Error: Bad command-line argument: --samples=xxx\n"); 234 | CHECK(!b); 235 | CHECK(!r.should_run()); 236 | CHECK(r.error() == error_bad_cmd_line_argument); 237 | CHECK(r.default_samples() == default_samples); 238 | } 239 | 240 | { 241 | local_runner r; 242 | ostringstream sout, serr; 243 | r.set_output_streams(sout, serr); 244 | const char* cmd_line[] = { "", "--iters=1,xxx,2" }; 245 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "-"); 246 | CHECK(sout.str().empty()); 247 | CHECK(serr.str() == "Error: Bad command-line argument: --iters=1,xxx,2\n"); 248 | CHECK(!b); 249 | CHECK(!r.should_run()); 250 | CHECK(r.error() == error_bad_cmd_line_argument); 251 | CHECK(r.default_state_iterations() == default_iters); 252 | } 253 | 254 | { 255 | local_runner r; 256 | ostringstream sout, serr; 257 | r.set_output_streams(sout, serr); 258 | const char* cmd_line[] = { "", "--out-fmt=asdf" }; 259 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "-"); 260 | CHECK(sout.str().empty()); 261 | CHECK(serr.str() == "Error: Bad command-line argument: --out-fmt=asdf\n"); 262 | CHECK(!b); 263 | CHECK(!r.should_run()); 264 | CHECK(r.error() == error_bad_cmd_line_argument); 265 | CHECK(r.preferred_output_format() == report_output_format::text); 266 | } 267 | 268 | #define PB_VERSION_INFO "picobench " PICOBENCH_VERSION_STR "\n" 269 | 270 | { 271 | const char* v = PB_VERSION_INFO; 272 | 273 | local_runner r; 274 | ostringstream sout, serr; 275 | r.set_output_streams(sout, serr); 276 | const char* cmd_line[] = { "", "--pb-version" }; 277 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "--pb"); 278 | CHECK(sout.str() == v); 279 | CHECK(serr.str().empty()); 280 | CHECK(b); 281 | CHECK(!r.should_run()); 282 | CHECK(r.error() == 0); 283 | } 284 | 285 | #define PB_HELP \ 286 | " --pb-iters= Sets default iterations for benchmarks\n" \ 287 | " --pb-samples= Sets default number of samples for benchmarks\n" \ 288 | " --pb-out-fmt= Outputs text or concise or csv\n" \ 289 | " --pb-output= Sets output filename or `stdout`\n" \ 290 | " --pb-compare-results Compare benchmark results\n" \ 291 | " --pb-no-run Doesn't run benchmarks\n" \ 292 | " --pb-run-suite= Runs only benchmarks from suite\n" \ 293 | " --pb-run-only= Runs only selected benchmarks\n" \ 294 | " --pb-list Lists available benchmarks\n" \ 295 | " --pb-version Show version info\n" \ 296 | " --pb-help Prints help\n" 297 | 298 | { 299 | const char* help = 300 | PB_VERSION_INFO 301 | PB_HELP; 302 | 303 | local_runner r; 304 | ostringstream sout, serr; 305 | r.set_output_streams(sout, serr); 306 | const char* cmd_line[] = { "", "--pb-help" }; 307 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "--pb"); 308 | CHECK(sout.str() == help); 309 | CHECK(serr.str().empty()); 310 | CHECK(b); 311 | CHECK(!r.should_run()); 312 | CHECK(r.error() == 0); 313 | } 314 | 315 | { 316 | const char* help = 317 | PB_VERSION_INFO 318 | " --pb-cmd-hi Custom help\n" 319 | " --pb-cmd-bi=123 More custom help\n" 320 | PB_HELP; 321 | 322 | local_runner r; 323 | 324 | auto handler_hi = [](uintptr_t data, const char* cmd) -> bool { 325 | CHECK(data == 123); 326 | CHECK(*cmd == 0); 327 | return true; 328 | }; 329 | 330 | r.add_cmd_opt("-cmd-hi", "", "Custom help", handler_hi, 123); 331 | 332 | auto handler_bi = [](uintptr_t data, const char* cmd) -> bool { 333 | CHECK(data == 98); 334 | CHECK(strcmp(cmd, "123") == 0); 335 | return true; 336 | }; 337 | 338 | r.add_cmd_opt("-cmd-bi=", "123", "More custom help", handler_bi, 98); 339 | 340 | ostringstream sout, serr; 341 | r.set_output_streams(sout, serr); 342 | const char* cmd_line[] = { "", "--pb-help" }; 343 | bool b = r.parse_cmd_line(cntof(cmd_line), cmd_line, "--pb"); 344 | CHECK(sout.str() == help); 345 | CHECK(serr.str().empty()); 346 | CHECK(b); 347 | CHECK(!r.should_run()); 348 | CHECK(r.error() == 0); 349 | 350 | sout.str(std::string()); 351 | serr.str(std::string()); 352 | 353 | const char* cmd_line2[] = { "", "--zz-cmd-bi=123", "--zz-cmd-hi" }; 354 | b = r.parse_cmd_line(cntof(cmd_line2), cmd_line2, "--zz"); 355 | 356 | CHECK(sout.str().empty()); 357 | CHECK(serr.str().empty()); 358 | CHECK(b); 359 | CHECK(r.error() == 0); 360 | } 361 | } 362 | 363 | TEST_CASE("[picobench] test") 364 | { 365 | runner r; 366 | CHECK(r.default_state_iterations() == default_iters); 367 | CHECK(r.default_samples() == default_samples); 368 | 369 | r.set_compare_results_across_benchmarks(true); 370 | r.set_compare_results_across_samples(true); 371 | 372 | ostringstream sout; 373 | ostringstream serr; 374 | r.set_output_streams(sout, serr); 375 | 376 | r.run_benchmarks(); 377 | auto report = r.generate_report(); 378 | 379 | CHECK(serr.str().empty()); 380 | 381 | const char* warnings = 382 | "Warning: Benchmark something else @10 has a single instance and cannot be compared to others.\n" 383 | "Warning: Benchmark b_a @50 has a single instance and cannot be compared to others.\n"; 384 | CHECK(sout.str() == warnings); 385 | 386 | CHECK(report.suites.size() == 2); 387 | CHECK(!report.find_suite("asdf")); 388 | 389 | auto& a = find_suite("test a", report); 390 | CHECK(strcmp(a.name, "test a") == 0); 391 | CHECK(a.benchmarks.size() == 3); 392 | CHECK(!a.find_benchmark("b_a")); 393 | 394 | auto& aa = a.benchmarks[0]; 395 | CHECK(a.find_baseline() == &aa); 396 | CHECK(a.find_benchmark("a_a") == &aa); 397 | CHECK(strcmp(aa.name, "a_a") == 0); 398 | CHECK(aa.is_baseline); 399 | CHECK(aa.data.size() == r.default_state_iterations().size()); 400 | 401 | for (size_t i = 0; i 2 | 3 | #define PICOBENCH_IMPLEMENT 4 | #include "picobench_configured.hpp" 5 | 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | std::map g_num_samples; 12 | 13 | const pb::report::suite& find_suite(const char* s, const pb::report& r) 14 | { 15 | auto suite = r.find_suite(s); 16 | REQUIRE(suite); 17 | return *suite; 18 | } 19 | 20 | TEST_CASE("[picobench] multi cpp test") 21 | { 22 | using namespace pb; 23 | runner r; 24 | 25 | const vector iters = { 100, 2000, 5000 }; 26 | r.set_default_state_iterations(iters); 27 | 28 | const int samples = 13; 29 | r.set_default_samples(samples); 30 | 31 | r.run_benchmarks(); 32 | auto report = r.generate_report(); 33 | CHECK(report.suites.size() == 2); 34 | 35 | CHECK(g_num_samples.size() == iters.size()); 36 | size_t i = 0; 37 | for (auto& elem : g_num_samples) 38 | { 39 | CHECK(elem.first == iters[i]); 40 | CHECK(elem.second == samples); 41 | ++i; 42 | } 43 | 44 | auto& a = find_suite("suite a", report); 45 | CHECK(strcmp(a.name, "suite a") == 0); 46 | CHECK(a.benchmarks.size() == 2); 47 | 48 | auto& aa = a.benchmarks[0]; 49 | CHECK(strcmp(aa.name, "a_a") == 0); 50 | CHECK(aa.is_baseline); 51 | CHECK(aa.data.size() == iters.size()); 52 | 53 | for (size_t i = 0; i 8 | -------------------------------------------------------------------------------- /test/multi_cpp/suite_a.cpp: -------------------------------------------------------------------------------- 1 | #include "picobench_configured.hpp" 2 | 3 | PICOBENCH_SUITE("suite a"); 4 | 5 | static void a_a(pb::state& s) 6 | { 7 | for (auto _ : s) 8 | { 9 | pb::test::this_thread_sleep_for_ns(10); 10 | } 11 | } 12 | PICOBENCH(a_a); 13 | 14 | static void a_b(pb::state& s) 15 | { 16 | for (auto _ : s) 17 | { 18 | pb::test::this_thread_sleep_for_ns(15); 19 | } 20 | } 21 | PICOBENCH(a_b); 22 | -------------------------------------------------------------------------------- /test/multi_cpp/suite_b.cpp: -------------------------------------------------------------------------------- 1 | #include "picobench_configured.hpp" 2 | 3 | PICOBENCH_SUITE("suite b"); 4 | 5 | static void a_a(pb::state& s) 6 | { 7 | for (auto _ : s) 8 | { 9 | pb::test::this_thread_sleep_for_ns(15); 10 | } 11 | } 12 | PICOBENCH(a_a); 13 | 14 | static void a_b(size_t stime, pb::state& s) 15 | { 16 | s.start_timer(); 17 | pb::test::this_thread_sleep_for_ns(s.iterations() * size_t(stime)); 18 | s.stop_timer(); 19 | } 20 | PICOBENCH([](pb::state& s) { a_b(30, s); }).label("a_b").baseline(); 21 | -------------------------------------------------------------------------------- /test/multi_cpp/suite_b_cont.cpp: -------------------------------------------------------------------------------- 1 | #include "picobench_configured.hpp" 2 | #include 3 | 4 | extern std::map g_num_samples; 5 | 6 | PICOBENCH_SUITE("suite b"); 7 | 8 | static void b_a(pb::state& s) 9 | { 10 | ++g_num_samples[s.iterations()]; 11 | for (auto _ : s) 12 | { 13 | pb::test::this_thread_sleep_for_ns(20); 14 | } 15 | } 16 | PICOBENCH(b_a); 17 | 18 | static void b_b(pb::state& s) 19 | { 20 | s.start_timer(); 21 | pb::test::this_thread_sleep_for_ns(s.iterations() * size_t(25)); 22 | s.stop_timer(); 23 | } 24 | PICOBENCH(b_b); 25 | -------------------------------------------------------------------------------- /tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5) 2 | 3 | project(picobench-tools) 4 | 5 | if(MSVC) 6 | add_definitions(-D_CRT_SECURE_NO_WARNINGS=1) 7 | endif() 8 | 9 | add_executable(picobench-cli picobench.cpp) 10 | target_link_libraries(picobench-cli picobench) 11 | set_target_properties(picobench-cli PROPERTIES OUTPUT_NAME picobench) 12 | set_target_properties(picobench-cli PROPERTIES FOLDER tools) 13 | -------------------------------------------------------------------------------- /tools/README.md: -------------------------------------------------------------------------------- 1 | ## picobench Tools 2 | 3 | ### picobench.cpp 4 | 5 | An executable which allows the user to benchmark commands. 6 | 7 | Usage: 8 | 9 | `$ picobench [ ... ] [args]` 10 | 11 | The default number of iterations is one. And the default number of samples is two. 12 | 13 | It supports the command-line arguments for the picobench library plus an additional one: 14 | 15 | `--bfile=` - Sets a filename which lists the commands to test as benchmarks 16 | 17 | The benchmark file format is: 18 | 19 | ``` 20 | title for baseline 21 | command line for baseline 22 | 23 | title for other benchmark 1 24 | command file for benchmark 2 25 | 26 | [...] 27 | 28 | title for benchmark N 29 | command line for benchmark N 30 | ``` 31 | 32 | Empty lines are ignored. 33 | 34 | Examples: 35 | 36 | * `$ picobench "sleep 1" "sleep 1.2"` 37 | * `$ picobench --bfile=benchmarks.txt --samples=10 --output=data.csv --out-fmt=csv` 38 | 39 | 40 | -------------------------------------------------------------------------------- /tools/picobench.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #if defined(_WIN32) 4 | #include 5 | 6 | template 7 | void zm(T& data) 8 | { 9 | ZeroMemory(&data, sizeof(T)); 10 | } 11 | 12 | int exec(const char* cmd) 13 | { 14 | const short lim = _MAX_PATH + 10; 15 | char cmd_line[lim]; 16 | strcpy(cmd_line, "cmd /c "); 17 | strncat(cmd_line, cmd, _MAX_PATH); 18 | 19 | STARTUPINFO s_info; 20 | zm(s_info); 21 | s_info.cb = sizeof(STARTUPINFO); 22 | s_info.dwFlags = STARTF_USESTDHANDLES; 23 | 24 | PROCESS_INFORMATION proc_info; 25 | zm(proc_info); 26 | 27 | auto success = CreateProcessA( 28 | nullptr, 29 | &cmd_line[0], 30 | nullptr, 31 | nullptr, 32 | TRUE, 33 | 0, 34 | nullptr, 35 | nullptr, 36 | &s_info, 37 | &proc_info); 38 | 39 | if (!success) return -1; 40 | 41 | // spin lock? doesn't seem to do anything different 42 | // while (WAIT_TIMEOUT == WaitForSingleObject(proc_info.hProcess, 0)); 43 | WaitForSingleObject(proc_info.hProcess, INFINITE); 44 | 45 | DWORD exit_code; 46 | success = GetExitCodeProcess(proc_info.hProcess, &exit_code); 47 | 48 | if (!success) return -1; 49 | 50 | CloseHandle(proc_info.hProcess); 51 | CloseHandle(proc_info.hThread); 52 | 53 | return int(exit_code); 54 | } 55 | 56 | #else 57 | 58 | #include 59 | 60 | int exec(const char* cmd) 61 | { 62 | auto s = popen(cmd, "r"); 63 | if (!s) return -1; 64 | return pclose(s); 65 | } 66 | 67 | #endif 68 | 69 | #include 70 | #include 71 | #include 72 | 73 | #define PICOBENCH_DEBUG 74 | #define PICOBENCH_IMPLEMENT 75 | #include "picobench/picobench.hpp" 76 | 77 | using namespace picobench; 78 | using namespace std; 79 | 80 | // calculate nanoseconds to spawn an empty process 81 | // by running some empty commands and taking the minimum 82 | int64_t calc_spawn_time() 83 | { 84 | const int lim = 50; 85 | int64_t min_sample = LLONG_MAX; 86 | for (int i = 0; i < lim; ++i) 87 | { 88 | auto start = high_res_clock::now(); 89 | exec(""); 90 | auto duration = high_res_clock::now() - start; 91 | auto ns = std::chrono::duration_cast(duration).count(); 92 | if (ns < min_sample) min_sample = ns; 93 | } 94 | return min_sample; 95 | } 96 | 97 | struct bench 98 | { 99 | string name; 100 | string cmd; 101 | }; 102 | 103 | vector benchmarks; 104 | static int64_t spawn_time; 105 | 106 | void bench_proc(state& s) 107 | { 108 | const char* cmd = benchmarks[s.user_data()].cmd.c_str(); 109 | for (auto _ : s) 110 | { 111 | exec(cmd); 112 | } 113 | 114 | s.add_custom_duration(-spawn_time); 115 | }; 116 | 117 | bool parse_bfile(uintptr_t, const char* file) 118 | { 119 | if (!*file) 120 | { 121 | cerr << "Error: bfile missing filename\n"; 122 | return false; 123 | } 124 | 125 | ifstream fin(file); 126 | 127 | if (!fin) 128 | { 129 | cerr << "Error: Cannot open " << file << "\n"; 130 | return false; 131 | } 132 | 133 | int iline = 0; 134 | string line; 135 | string name; 136 | while (!fin.eof()) 137 | { 138 | getline(fin, line); 139 | bool empty = true; 140 | for (auto& c : line) 141 | { 142 | if (!isspace(c)) 143 | { 144 | empty = false; 145 | break; 146 | } 147 | } 148 | 149 | if (empty) continue; 150 | 151 | ++iline; 152 | // odd lines are benchmark names 153 | // even lines are commands 154 | if (iline & 1) 155 | { 156 | name = line; 157 | } 158 | else 159 | { 160 | benchmarks.push_back({ name, line }); 161 | } 162 | } 163 | return true; 164 | } 165 | 166 | int main(int argc, char* argv[]) 167 | { 168 | if (argc == 1) 169 | { 170 | cout << "picobench " PICOBENCH_VERSION_STR "\n"; 171 | cout << "Usage: picobench \n"; 172 | cout << "Type 'picobench --help' for help.\n"; 173 | return 0; 174 | } 175 | 176 | for (int i = 1; i < argc; ++i) 177 | { 178 | if (argv[i][0] != '-') 179 | { 180 | benchmarks.push_back({ argv[i], argv[i] }); 181 | } 182 | } 183 | 184 | runner r; 185 | r.set_default_state_iterations({ 1 }); 186 | r.set_default_samples(1); 187 | 188 | r.add_cmd_opt("-bfile=", "", "Set a file which lists benchmarks", parse_bfile); 189 | 190 | r.parse_cmd_line(argc, argv); 191 | 192 | if (!r.should_run()) return r.error(); 193 | 194 | for (size_t i = 0; i < benchmarks.size(); ++i) 195 | { 196 | auto& b = benchmarks[i]; 197 | r.add_benchmark(b.name.c_str(), bench_proc).user_data(i); 198 | } 199 | 200 | spawn_time = calc_spawn_time(); 201 | 202 | r.run_benchmarks(); 203 | auto report = r.generate_report(); 204 | std::ostream* out = &std::cout; 205 | std::ofstream fout; 206 | if (r.preferred_output_filename()) 207 | { 208 | fout.open(r.preferred_output_filename()); 209 | if (!fout.is_open()) 210 | { 211 | std::cerr << "Error: Could not open output file `" << r.preferred_output_filename() << "`\n"; 212 | return 1; 213 | } 214 | out = &fout; 215 | } 216 | 217 | switch (r.preferred_output_format()) 218 | { 219 | case picobench::report_output_format::text: 220 | report.to_text(*out); 221 | break; 222 | case picobench::report_output_format::concise_text: 223 | report.to_text_concise(*out); 224 | break; 225 | case picobench::report_output_format::csv: 226 | report.to_csv(*out); 227 | break; 228 | } 229 | 230 | return r.error(); 231 | } 232 | --------------------------------------------------------------------------------