├── .gitignore ├── vendor ├── catch2 │ └── catch.cpp └── benchmark │ ├── .clang-format │ ├── cmake │ ├── Config.cmake.in │ ├── split_list.cmake │ ├── thread_safety_attributes.cpp │ ├── steady_clock.cpp │ ├── benchmark.pc.in │ ├── std_regex.cpp │ ├── llvm-toolchain.cmake │ ├── gnu_posix_regex.cpp │ ├── posix_regex.cpp │ ├── GetGitVersion.cmake │ ├── CXXFeatureCheck.cmake │ ├── AddCXXCompilerFlag.cmake │ └── HandleGTest.cmake │ ├── test │ ├── link_main_test.cc │ ├── templated_fixture_test.cc │ ├── benchmark_gtest.cc │ ├── statistics_gtest.cc │ ├── fixture_test.cc │ ├── donotoptimize_test.cc │ ├── report_aggregates_only_test.cc │ ├── memory_manager_test.cc │ ├── clobber_memory_assembly_test.cc │ ├── AssemblyTests.cmake │ ├── map_test.cc │ ├── display_aggregates_only_test.cc │ ├── cxx03_test.cc │ ├── BUILD │ ├── state_assembly_test.cc │ ├── options_test.cc │ ├── diagnostics_test.cc │ ├── filter_test.cc │ ├── multiple_ranges_test.cc │ ├── string_util_gtest.cc │ ├── basic_test.cc │ ├── donotoptimize_assembly_test.cc │ ├── register_benchmark_test.cc │ ├── skip_with_error_test.cc │ └── complexity_test.cc │ ├── tools │ ├── gbench │ │ ├── __init__.py │ │ ├── Inputs │ │ │ ├── test3_run0.json │ │ │ ├── test3_run1.json │ │ │ ├── test2_run.json │ │ │ ├── test1_run1.json │ │ │ └── test1_run2.json │ │ └── util.py │ └── strip_asm.py │ ├── WORKSPACE │ ├── src │ ├── sleep.h │ ├── benchmark_main.cc │ ├── benchmark_register.h │ ├── colorprint.h │ ├── counter.h │ ├── arraysize.h │ ├── timers.h │ ├── benchmark_api_internal.h │ ├── statistics.h │ ├── string_util.h │ ├── thread_manager.h │ ├── sleep.cc │ ├── log.h │ ├── thread_timer.h │ ├── complexity.h │ ├── counter.cc │ ├── internal_macros.h │ ├── check.h │ ├── reporter.cc │ ├── commandlineflags.h │ ├── CMakeLists.txt │ ├── re.h │ ├── csv_reporter.cc │ ├── mutex.h │ ├── colorprint.cc │ ├── console_reporter.cc │ ├── statistics.cc │ └── json_reporter.cc │ ├── releasing.md │ ├── .gitignore │ ├── BUILD.bazel │ ├── .travis-libcxx-setup.sh │ ├── appveyor.yml │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── CONTRIBUTORS │ ├── .ycm_extra_conf.py │ ├── docs │ └── AssemblyTests.md │ └── .travis.yml ├── CMakeLists.txt ├── LICENSE └── include └── rigtorp └── CharConv.h /.gitignore: -------------------------------------------------------------------------------- 1 | cmake-build-* 2 | .idea/ 3 | *~ -------------------------------------------------------------------------------- /vendor/catch2/catch.cpp: -------------------------------------------------------------------------------- 1 | #define CATCH_CONFIG_MAIN 2 | #include "catch.hpp" -------------------------------------------------------------------------------- /vendor/benchmark/.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | BasedOnStyle: Google 4 | ... 5 | 6 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/Config.cmake.in: -------------------------------------------------------------------------------- 1 | include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") 2 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/split_list.cmake: -------------------------------------------------------------------------------- 1 | macro(split_list listname) 2 | string(REPLACE ";" " " ${listname} "${${listname}}") 3 | endmacro() 4 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/thread_safety_attributes.cpp: -------------------------------------------------------------------------------- 1 | #define HAVE_THREAD_SAFETY_ATTRIBUTES 2 | #include "../src/mutex.h" 3 | 4 | int main() {} 5 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/steady_clock.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main() { 4 | typedef std::chrono::steady_clock Clock; 5 | Clock::time_point tp = Clock::now(); 6 | ((void)tp); 7 | } 8 | -------------------------------------------------------------------------------- /vendor/benchmark/test/link_main_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | void BM_empty(benchmark::State& state) { 4 | for (auto _ : state) { 5 | benchmark::DoNotOptimize(state.iterations()); 6 | } 7 | } 8 | BENCHMARK(BM_empty); 9 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/__init__.py: -------------------------------------------------------------------------------- 1 | """Google Benchmark tooling""" 2 | 3 | __author__ = 'Eric Fiselier' 4 | __email__ = 'eric@efcs.ca' 5 | __versioninfo__ = (0, 5, 0) 6 | __version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' 7 | 8 | __all__ = [] 9 | -------------------------------------------------------------------------------- /vendor/benchmark/WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "com_github_google_benchmark") 2 | 3 | http_archive( 4 | name = "com_google_googletest", 5 | urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], 6 | strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", 7 | ) 8 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/benchmark.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@CMAKE_INSTALL_PREFIX@ 2 | exec_prefix=${prefix} 3 | libdir=${prefix}/lib 4 | includedir=${prefix}/include 5 | 6 | Name: @PROJECT_NAME@ 7 | Description: Google microbenchmark framework 8 | Version: @VERSION@ 9 | 10 | Libs: -L${libdir} -lbenchmark 11 | Cflags: -I${includedir} 12 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/std_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | const std::string str = "test0159"; 5 | std::regex re; 6 | re = std::regex("^[a-z]+[0-9]+$", 7 | std::regex_constants::extended | std::regex_constants::nosubs); 8 | return std::regex_search(str, re) ? 0 : -1; 9 | } 10 | 11 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/llvm-toolchain.cmake: -------------------------------------------------------------------------------- 1 | find_package(LLVMAr REQUIRED) 2 | set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) 3 | 4 | find_package(LLVMNm REQUIRED) 5 | set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) 6 | 7 | find_package(LLVMRanLib REQUIRED) 8 | set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) 9 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/gnu_posix_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | std::string str = "test0159"; 5 | regex_t re; 6 | int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); 7 | if (ec != 0) { 8 | return ec; 9 | } 10 | return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/posix_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | std::string str = "test0159"; 5 | regex_t re; 6 | int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); 7 | if (ec != 0) { 8 | return ec; 9 | } 10 | int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; 11 | regfree(&re); 12 | return ret; 13 | } 14 | 15 | -------------------------------------------------------------------------------- /vendor/benchmark/src/sleep.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_SLEEP_H_ 2 | #define BENCHMARK_SLEEP_H_ 3 | 4 | namespace benchmark { 5 | const int kNumMillisPerSecond = 1000; 6 | const int kNumMicrosPerMilli = 1000; 7 | const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; 8 | const int kNumNanosPerMicro = 1000; 9 | const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; 10 | 11 | void SleepForMilliseconds(int milliseconds); 12 | void SleepForSeconds(double seconds); 13 | } // end namespace benchmark 14 | 15 | #endif // BENCHMARK_SLEEP_H_ 16 | -------------------------------------------------------------------------------- /vendor/benchmark/test/templated_fixture_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #include 5 | #include 6 | 7 | template 8 | class MyFixture : public ::benchmark::Fixture { 9 | public: 10 | MyFixture() : data(0) {} 11 | 12 | T data; 13 | }; 14 | 15 | BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { 16 | for (auto _ : st) { 17 | data += 1; 18 | } 19 | } 20 | 21 | BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { 22 | for (auto _ : st) { 23 | data += 1.0; 24 | } 25 | } 26 | BENCHMARK_REGISTER_F(MyFixture, Bar); 27 | 28 | BENCHMARK_MAIN(); 29 | -------------------------------------------------------------------------------- /vendor/benchmark/releasing.md: -------------------------------------------------------------------------------- 1 | # How to release 2 | 3 | * Make sure you're on master and synced to HEAD 4 | * Ensure the project builds and tests run (sanity check only, obviously) 5 | * `parallel -j0 exec ::: test/*_test` can help ensure everything at least 6 | passes 7 | * Prepare release notes 8 | * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of 9 | commits between the last annotated tag and HEAD 10 | * Pick the most interesting. 11 | * Create a release through github's interface 12 | * Note this will create a lightweight tag. 13 | * Update this to an annotated tag: 14 | * `git pull --tags` 15 | * `git tag -a -f ` 16 | * `git push --force origin` 17 | -------------------------------------------------------------------------------- /vendor/benchmark/src/benchmark_main.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | 17 | BENCHMARK_MAIN(); 18 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.2) 2 | 3 | project(CharConv CXX) 4 | 5 | set(CMAKE_CXX_STANDARD 17) 6 | add_compile_options(-Wall -Wextra -Werror -pedantic) 7 | 8 | option(BENCHMARK_ENABLE_TESTING OFF) 9 | add_subdirectory(vendor/benchmark) 10 | 11 | enable_testing() 12 | 13 | add_library(catch vendor/catch2/catch.cpp) 14 | target_include_directories(catch PUBLIC vendor) 15 | 16 | add_library(CharConv INTERFACE) 17 | target_include_directories(CharConv INTERFACE include) 18 | 19 | add_executable(CharConvTest src/CharConvTest.cpp) 20 | target_link_libraries(CharConvTest CharConv catch) 21 | add_test(CharConvTest CharConvTest) 22 | 23 | add_executable(CharConvBenchmark src/CharConvBenchmark.cpp) 24 | target_link_libraries(CharConvBenchmark CharConv benchmark) 25 | -------------------------------------------------------------------------------- /vendor/benchmark/src/benchmark_register.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_REGISTER_H 2 | #define BENCHMARK_REGISTER_H 3 | 4 | #include 5 | 6 | #include "check.h" 7 | 8 | template 9 | void AddRange(std::vector* dst, T lo, T hi, int mult) { 10 | CHECK_GE(lo, 0); 11 | CHECK_GE(hi, lo); 12 | CHECK_GE(mult, 2); 13 | 14 | // Add "lo" 15 | dst->push_back(lo); 16 | 17 | static const T kmax = std::numeric_limits::max(); 18 | 19 | // Now space out the benchmarks in multiples of "mult" 20 | for (T i = 1; i < kmax / mult; i *= mult) { 21 | if (i >= hi) break; 22 | if (i > lo) { 23 | dst->push_back(i); 24 | } 25 | } 26 | 27 | // Add "hi" (if different from "lo") 28 | if (hi != lo) { 29 | dst->push_back(hi); 30 | } 31 | } 32 | 33 | #endif // BENCHMARK_REGISTER_H 34 | -------------------------------------------------------------------------------- /vendor/benchmark/src/colorprint.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_COLORPRINT_H_ 2 | #define BENCHMARK_COLORPRINT_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace benchmark { 9 | enum LogColor { 10 | COLOR_DEFAULT, 11 | COLOR_RED, 12 | COLOR_GREEN, 13 | COLOR_YELLOW, 14 | COLOR_BLUE, 15 | COLOR_MAGENTA, 16 | COLOR_CYAN, 17 | COLOR_WHITE 18 | }; 19 | 20 | std::string FormatString(const char* msg, va_list args); 21 | std::string FormatString(const char* msg, ...); 22 | 23 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, 24 | va_list args); 25 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); 26 | 27 | // Returns true if stdout appears to be a terminal that supports colored 28 | // output, false otherwise. 29 | bool IsColorTerminal(); 30 | 31 | } // end namespace benchmark 32 | 33 | #endif // BENCHMARK_COLORPRINT_H_ 34 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/Inputs/test3_run0.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_One", 12 | "iterations": 1000, 13 | "real_time": 10, 14 | "cpu_time": 100, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_Two", 19 | "iterations": 1000, 20 | "real_time": 9, 21 | "cpu_time": 90, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_Two", 26 | "iterations": 1000, 27 | "real_time": 8, 28 | "cpu_time": 80, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "short", 33 | "iterations": 1000, 34 | "real_time": 8, 35 | "cpu_time": 80, 36 | "time_unit": "ns" 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/Inputs/test3_run1.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_One", 12 | "iterations": 1000, 13 | "real_time": 9, 14 | "cpu_time": 110, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_Two", 19 | "iterations": 1000, 20 | "real_time": 10, 21 | "cpu_time": 89, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_Two", 26 | "iterations": 1000, 27 | "real_time": 7, 28 | "cpu_time": 70, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "short", 33 | "iterations": 1000, 34 | "real_time": 8, 35 | "cpu_time": 80, 36 | "time_unit": "ns" 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /vendor/benchmark/.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | *.so 3 | *.so.?* 4 | *.dll 5 | *.exe 6 | *.dylib 7 | *.cmake 8 | !/cmake/*.cmake 9 | !/test/AssemblyTests.cmake 10 | *~ 11 | *.pyc 12 | __pycache__ 13 | 14 | # lcov 15 | *.lcov 16 | /lcov 17 | 18 | # cmake files. 19 | /Testing 20 | CMakeCache.txt 21 | CMakeFiles/ 22 | cmake_install.cmake 23 | 24 | # makefiles. 25 | Makefile 26 | 27 | # in-source build. 28 | bin/ 29 | lib/ 30 | /test/*_test 31 | 32 | # exuberant ctags. 33 | tags 34 | 35 | # YouCompleteMe configuration. 36 | .ycm_extra_conf.pyc 37 | 38 | # ninja generated files. 39 | .ninja_deps 40 | .ninja_log 41 | build.ninja 42 | install_manifest.txt 43 | rules.ninja 44 | 45 | # bazel output symlinks. 46 | bazel-* 47 | 48 | # out-of-source build top-level folders. 49 | build/ 50 | _build/ 51 | build*/ 52 | 53 | # in-source dependencies 54 | /googletest/ 55 | 56 | # Visual Studio 2015/2017 cache/options directory 57 | .vs/ 58 | CMakeSettings.json 59 | -------------------------------------------------------------------------------- /vendor/benchmark/test/benchmark_gtest.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "../src/benchmark_register.h" 4 | #include "gmock/gmock.h" 5 | #include "gtest/gtest.h" 6 | 7 | namespace { 8 | 9 | TEST(AddRangeTest, Simple) { 10 | std::vector dst; 11 | AddRange(&dst, 1, 2, 2); 12 | EXPECT_THAT(dst, testing::ElementsAre(1, 2)); 13 | } 14 | 15 | TEST(AddRangeTest, Simple64) { 16 | std::vector dst; 17 | AddRange(&dst, static_cast(1), static_cast(2), 2); 18 | EXPECT_THAT(dst, testing::ElementsAre(1, 2)); 19 | } 20 | 21 | TEST(AddRangeTest, Advanced) { 22 | std::vector dst; 23 | AddRange(&dst, 5, 15, 2); 24 | EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); 25 | } 26 | 27 | TEST(AddRangeTest, Advanced64) { 28 | std::vector dst; 29 | AddRange(&dst, static_cast(5), static_cast(15), 2); 30 | EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); 31 | } 32 | 33 | } // end namespace 34 | -------------------------------------------------------------------------------- /vendor/benchmark/BUILD.bazel: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | config_setting( 4 | name = "windows", 5 | values = { 6 | "cpu": "x64_windows", 7 | }, 8 | visibility = [":__subpackages__"], 9 | ) 10 | 11 | cc_library( 12 | name = "benchmark", 13 | srcs = glob( 14 | [ 15 | "src/*.cc", 16 | "src/*.h", 17 | ], 18 | exclude = ["src/benchmark_main.cc"], 19 | ), 20 | hdrs = ["include/benchmark/benchmark.h"], 21 | linkopts = select({ 22 | ":windows": ["-DEFAULTLIB:shlwapi.lib"], 23 | "//conditions:default": ["-pthread"], 24 | }), 25 | strip_include_prefix = "include", 26 | visibility = ["//visibility:public"], 27 | ) 28 | 29 | cc_library( 30 | name = "benchmark_main", 31 | srcs = ["src/benchmark_main.cc"], 32 | hdrs = ["include/benchmark/benchmark.h"], 33 | strip_include_prefix = "include", 34 | visibility = ["//visibility:public"], 35 | deps = [":benchmark"], 36 | ) 37 | 38 | cc_library( 39 | name = "benchmark_internal_headers", 40 | hdrs = glob(["src/*.h"]), 41 | visibility = ["//test:__pkg__"], 42 | ) 43 | -------------------------------------------------------------------------------- /vendor/benchmark/src/counter.h: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | 17 | namespace benchmark { 18 | 19 | // these counter-related functions are hidden to reduce API surface. 20 | namespace internal { 21 | void Finish(UserCounters* l, int64_t iterations, double time, double num_threads); 22 | void Increment(UserCounters* l, UserCounters const& r); 23 | bool SameNames(UserCounters const& l, UserCounters const& r); 24 | } // end namespace internal 25 | 26 | } // end namespace benchmark 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Erik Rigtorp 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /vendor/benchmark/test/statistics_gtest.cc: -------------------------------------------------------------------------------- 1 | //===---------------------------------------------------------------------===// 2 | // statistics_test - Unit tests for src/statistics.cc 3 | //===---------------------------------------------------------------------===// 4 | 5 | #include "../src/statistics.h" 6 | #include "gtest/gtest.h" 7 | 8 | namespace { 9 | TEST(StatisticsTest, Mean) { 10 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); 11 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); 12 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); 13 | } 14 | 15 | TEST(StatisticsTest, Median) { 16 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); 17 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); 18 | EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); 19 | } 20 | 21 | TEST(StatisticsTest, StdDev) { 22 | EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); 23 | EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); 24 | EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}), 25 | 1.42302495); 26 | } 27 | 28 | } // end namespace 29 | -------------------------------------------------------------------------------- /vendor/benchmark/.travis-libcxx-setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Install a newer CMake version 4 | curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh 5 | chmod +x install-cmake.sh 6 | sudo ./install-cmake.sh --prefix=/usr/local --skip-license 7 | 8 | # Checkout LLVM sources 9 | git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source 10 | git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx 11 | git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi 12 | 13 | # Setup libc++ options 14 | if [ -z "$BUILD_32_BITS" ]; then 15 | export BUILD_32_BITS=OFF && echo disabling 32 bit build 16 | fi 17 | 18 | # Build and install libc++ (Use unstable ABI for better sanitizer coverage) 19 | mkdir llvm-build && cd llvm-build 20 | cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ 21 | -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ 22 | -DLIBCXX_ABI_UNSTABLE=ON \ 23 | -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ 24 | -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ 25 | ../llvm-source 26 | make cxx -j2 27 | sudo make install-cxxabi install-cxx 28 | cd ../ 29 | -------------------------------------------------------------------------------- /vendor/benchmark/src/arraysize.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_ARRAYSIZE_H_ 2 | #define BENCHMARK_ARRAYSIZE_H_ 3 | 4 | #include "internal_macros.h" 5 | 6 | namespace benchmark { 7 | namespace internal { 8 | // The arraysize(arr) macro returns the # of elements in an array arr. 9 | // The expression is a compile-time constant, and therefore can be 10 | // used in defining new arrays, for example. If you use arraysize on 11 | // a pointer by mistake, you will get a compile-time error. 12 | // 13 | 14 | // This template function declaration is used in defining arraysize. 15 | // Note that the function doesn't need an implementation, as we only 16 | // use its type. 17 | template 18 | char (&ArraySizeHelper(T (&array)[N]))[N]; 19 | 20 | // That gcc wants both of these prototypes seems mysterious. VC, for 21 | // its part, can't decide which to use (another mystery). Matching of 22 | // template overloads: the final frontier. 23 | #ifndef COMPILER_MSVC 24 | template 25 | char (&ArraySizeHelper(const T (&array)[N]))[N]; 26 | #endif 27 | 28 | #define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) 29 | 30 | } // end namespace internal 31 | } // end namespace benchmark 32 | 33 | #endif // BENCHMARK_ARRAYSIZE_H_ 34 | -------------------------------------------------------------------------------- /vendor/benchmark/test/fixture_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #include 5 | #include 6 | 7 | class MyFixture : public ::benchmark::Fixture { 8 | public: 9 | void SetUp(const ::benchmark::State& state) { 10 | if (state.thread_index == 0) { 11 | assert(data.get() == nullptr); 12 | data.reset(new int(42)); 13 | } 14 | } 15 | 16 | void TearDown(const ::benchmark::State& state) { 17 | if (state.thread_index == 0) { 18 | assert(data.get() != nullptr); 19 | data.reset(); 20 | } 21 | } 22 | 23 | ~MyFixture() { assert(data == nullptr); } 24 | 25 | std::unique_ptr data; 26 | }; 27 | 28 | BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { 29 | assert(data.get() != nullptr); 30 | assert(*data == 42); 31 | for (auto _ : st) { 32 | } 33 | } 34 | 35 | BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { 36 | if (st.thread_index == 0) { 37 | assert(data.get() != nullptr); 38 | assert(*data == 42); 39 | } 40 | for (auto _ : st) { 41 | assert(data.get() != nullptr); 42 | assert(*data == 42); 43 | } 44 | st.SetItemsProcessed(st.range(0)); 45 | } 46 | BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); 47 | BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); 48 | 49 | BENCHMARK_MAIN(); 50 | -------------------------------------------------------------------------------- /vendor/benchmark/src/timers.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_TIMERS_H 2 | #define BENCHMARK_TIMERS_H 3 | 4 | #include 5 | #include 6 | 7 | namespace benchmark { 8 | 9 | // Return the CPU usage of the current process 10 | double ProcessCPUUsage(); 11 | 12 | // Return the CPU usage of the children of the current process 13 | double ChildrenCPUUsage(); 14 | 15 | // Return the CPU usage of the current thread 16 | double ThreadCPUUsage(); 17 | 18 | #if defined(HAVE_STEADY_CLOCK) 19 | template 20 | struct ChooseSteadyClock { 21 | typedef std::chrono::high_resolution_clock type; 22 | }; 23 | 24 | template <> 25 | struct ChooseSteadyClock { 26 | typedef std::chrono::steady_clock type; 27 | }; 28 | #endif 29 | 30 | struct ChooseClockType { 31 | #if defined(HAVE_STEADY_CLOCK) 32 | typedef ChooseSteadyClock<>::type type; 33 | #else 34 | typedef std::chrono::high_resolution_clock type; 35 | #endif 36 | }; 37 | 38 | inline double ChronoClockNow() { 39 | typedef ChooseClockType::type ClockType; 40 | using FpSeconds = std::chrono::duration; 41 | return FpSeconds(ClockType::now().time_since_epoch()).count(); 42 | } 43 | 44 | std::string LocalDateTimeString(); 45 | 46 | } // end namespace benchmark 47 | 48 | #endif // BENCHMARK_TIMERS_H 49 | -------------------------------------------------------------------------------- /vendor/benchmark/src/benchmark_api_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_API_INTERNAL_H 2 | #define BENCHMARK_API_INTERNAL_H 3 | 4 | #include "benchmark/benchmark.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace benchmark { 13 | namespace internal { 14 | 15 | // Information kept per benchmark we may want to run 16 | struct Benchmark::Instance { 17 | std::string name; 18 | Benchmark* benchmark; 19 | AggregationReportMode aggregation_report_mode; 20 | std::vector arg; 21 | TimeUnit time_unit; 22 | int range_multiplier; 23 | bool use_real_time; 24 | bool use_manual_time; 25 | BigO complexity; 26 | BigOFunc* complexity_lambda; 27 | UserCounters counters; 28 | const std::vector* statistics; 29 | bool last_benchmark_instance; 30 | int repetitions; 31 | double min_time; 32 | size_t iterations; 33 | int threads; // Number of concurrent threads to us 34 | }; 35 | 36 | bool FindBenchmarksInternal(const std::string& re, 37 | std::vector* benchmarks, 38 | std::ostream* Err); 39 | 40 | bool IsZero(double n); 41 | 42 | ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); 43 | 44 | } // end namespace internal 45 | } // end namespace benchmark 46 | 47 | #endif // BENCHMARK_API_INTERNAL_H 48 | -------------------------------------------------------------------------------- /vendor/benchmark/test/donotoptimize_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | 5 | namespace { 6 | #if defined(__GNUC__) 7 | std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); 8 | #endif 9 | std::uint64_t double_up(const std::uint64_t x) { return x * 2; } 10 | } 11 | 12 | // Using DoNotOptimize on types like BitRef seem to cause a lot of problems 13 | // with the inline assembly on both GCC and Clang. 14 | struct BitRef { 15 | int index; 16 | unsigned char &byte; 17 | 18 | public: 19 | static BitRef Make() { 20 | static unsigned char arr[2] = {}; 21 | BitRef b(1, arr[0]); 22 | return b; 23 | } 24 | private: 25 | BitRef(int i, unsigned char& b) : index(i), byte(b) {} 26 | }; 27 | 28 | int main(int, char*[]) { 29 | // this test verifies compilation of DoNotOptimize() for some types 30 | 31 | char buffer8[8] = ""; 32 | benchmark::DoNotOptimize(buffer8); 33 | 34 | char buffer20[20] = ""; 35 | benchmark::DoNotOptimize(buffer20); 36 | 37 | char buffer1024[1024] = ""; 38 | benchmark::DoNotOptimize(buffer1024); 39 | benchmark::DoNotOptimize(&buffer1024[0]); 40 | 41 | int x = 123; 42 | benchmark::DoNotOptimize(x); 43 | benchmark::DoNotOptimize(&x); 44 | benchmark::DoNotOptimize(x += 42); 45 | 46 | benchmark::DoNotOptimize(double_up(x)); 47 | 48 | // These tests are to e 49 | benchmark::DoNotOptimize(BitRef::Make()); 50 | BitRef lval = BitRef::Make(); 51 | benchmark::DoNotOptimize(lval); 52 | } 53 | -------------------------------------------------------------------------------- /vendor/benchmark/src/statistics.h: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Ismael Jimenez Martinez. All rights reserved. 2 | // Copyright 2017 Roman Lebedev. All rights reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #ifndef STATISTICS_H_ 17 | #define STATISTICS_H_ 18 | 19 | #include 20 | 21 | #include "benchmark/benchmark.h" 22 | 23 | namespace benchmark { 24 | 25 | // Return a vector containing the mean, median and standard devation information 26 | // (and any user-specified info) for the specified list of reports. If 'reports' 27 | // contains less than two non-errored runs an empty vector is returned 28 | std::vector ComputeStats( 29 | const std::vector& reports); 30 | 31 | double StatisticsMean(const std::vector& v); 32 | double StatisticsMedian(const std::vector& v); 33 | double StatisticsStdDev(const std::vector& v); 34 | 35 | } // end namespace benchmark 36 | 37 | #endif // STATISTICS_H_ 38 | -------------------------------------------------------------------------------- /vendor/benchmark/test/report_aggregates_only_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #undef NDEBUG 3 | #include 4 | #include 5 | 6 | #include "benchmark/benchmark.h" 7 | #include "output_test.h" 8 | 9 | // Ok this test is super ugly. We want to check what happens with the file 10 | // reporter in the presence of ReportAggregatesOnly(). 11 | // We do not care about console output, the normal tests check that already. 12 | 13 | void BM_SummaryRepeat(benchmark::State& state) { 14 | for (auto _ : state) { 15 | } 16 | } 17 | BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); 18 | 19 | int main(int argc, char* argv[]) { 20 | const std::string output = GetFileReporterOutput(argc, argv); 21 | 22 | if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || 23 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || 24 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 25 | 1 || 26 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 27 | 1) { 28 | std::cout << "Precondition mismatch. Expected to only find three " 29 | "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" 30 | "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " 31 | "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " 32 | "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " 33 | "output:\n"; 34 | std::cout << output; 35 | return 1; 36 | } 37 | 38 | return 0; 39 | } 40 | -------------------------------------------------------------------------------- /vendor/benchmark/test/memory_manager_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "../src/check.h" 4 | #include "benchmark/benchmark.h" 5 | #include "output_test.h" 6 | 7 | class TestMemoryManager : public benchmark::MemoryManager { 8 | void Start() {} 9 | void Stop(Result* result) { 10 | result->num_allocs = 42; 11 | result->max_bytes_used = 42000; 12 | } 13 | }; 14 | 15 | void BM_empty(benchmark::State& state) { 16 | for (auto _ : state) { 17 | benchmark::DoNotOptimize(state.iterations()); 18 | } 19 | } 20 | BENCHMARK(BM_empty); 21 | 22 | ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}}); 23 | ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"}, 24 | {"\"run_name\": \"BM_empty\",$", MR_Next}, 25 | {"\"run_type\": \"iteration\",$", MR_Next}, 26 | {"\"iterations\": %int,$", MR_Next}, 27 | {"\"real_time\": %float,$", MR_Next}, 28 | {"\"cpu_time\": %float,$", MR_Next}, 29 | {"\"time_unit\": \"ns\",$", MR_Next}, 30 | {"\"allocs_per_iter\": %float,$", MR_Next}, 31 | {"\"max_bytes_used\": 42000$", MR_Next}, 32 | {"}", MR_Next}}); 33 | ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); 34 | 35 | 36 | int main(int argc, char *argv[]) { 37 | std::unique_ptr mm(new TestMemoryManager()); 38 | 39 | benchmark::RegisterMemoryManager(mm.get()); 40 | RunOutputTests(argc, argv); 41 | benchmark::RegisterMemoryManager(nullptr); 42 | } 43 | -------------------------------------------------------------------------------- /vendor/benchmark/appveyor.yml: -------------------------------------------------------------------------------- 1 | version: '{build}' 2 | 3 | image: Visual Studio 2017 4 | 5 | configuration: 6 | - Debug 7 | - Release 8 | 9 | environment: 10 | matrix: 11 | - compiler: msvc-15-seh 12 | generator: "Visual Studio 15 2017" 13 | 14 | - compiler: msvc-15-seh 15 | generator: "Visual Studio 15 2017 Win64" 16 | 17 | - compiler: msvc-14-seh 18 | generator: "Visual Studio 14 2015" 19 | 20 | - compiler: msvc-14-seh 21 | generator: "Visual Studio 14 2015 Win64" 22 | 23 | - compiler: msvc-12-seh 24 | generator: "Visual Studio 12 2013" 25 | 26 | - compiler: msvc-12-seh 27 | generator: "Visual Studio 12 2013 Win64" 28 | 29 | - compiler: gcc-5.3.0-posix 30 | generator: "MinGW Makefiles" 31 | cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' 32 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 33 | 34 | matrix: 35 | fast_finish: true 36 | 37 | install: 38 | # git bash conflicts with MinGW makefiles 39 | - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") 40 | - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") 41 | 42 | build_script: 43 | - md _build -Force 44 | - cd _build 45 | - echo %configuration% 46 | - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON .. 47 | - cmake --build . --config %configuration% 48 | 49 | test_script: 50 | - ctest -c %configuration% --timeout 300 --output-on-failure 51 | 52 | artifacts: 53 | - path: '_build/CMakeFiles/*.log' 54 | name: logs 55 | - path: '_build/Testing/**/*.xml' 56 | name: test_results 57 | -------------------------------------------------------------------------------- /vendor/benchmark/test/clobber_memory_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | extern "C" { 8 | 9 | extern int ExternInt; 10 | extern int ExternInt2; 11 | extern int ExternInt3; 12 | 13 | } 14 | 15 | // CHECK-LABEL: test_basic: 16 | extern "C" void test_basic() { 17 | int x; 18 | benchmark::DoNotOptimize(&x); 19 | x = 101; 20 | benchmark::ClobberMemory(); 21 | // CHECK: leaq [[DEST:[^,]+]], %rax 22 | // CHECK: movl $101, [[DEST]] 23 | // CHECK: ret 24 | } 25 | 26 | // CHECK-LABEL: test_redundant_store: 27 | extern "C" void test_redundant_store() { 28 | ExternInt = 3; 29 | benchmark::ClobberMemory(); 30 | ExternInt = 51; 31 | // CHECK-DAG: ExternInt 32 | // CHECK-DAG: movl $3 33 | // CHECK: movl $51 34 | } 35 | 36 | // CHECK-LABEL: test_redundant_read: 37 | extern "C" void test_redundant_read() { 38 | int x; 39 | benchmark::DoNotOptimize(&x); 40 | x = ExternInt; 41 | benchmark::ClobberMemory(); 42 | x = ExternInt2; 43 | // CHECK: leaq [[DEST:[^,]+]], %rax 44 | // CHECK: ExternInt(%rip) 45 | // CHECK: movl %eax, [[DEST]] 46 | // CHECK-NOT: ExternInt2 47 | // CHECK: ret 48 | } 49 | 50 | // CHECK-LABEL: test_redundant_read2: 51 | extern "C" void test_redundant_read2() { 52 | int x; 53 | benchmark::DoNotOptimize(&x); 54 | x = ExternInt; 55 | benchmark::ClobberMemory(); 56 | x = ExternInt2; 57 | benchmark::ClobberMemory(); 58 | // CHECK: leaq [[DEST:[^,]+]], %rax 59 | // CHECK: ExternInt(%rip) 60 | // CHECK: movl %eax, [[DEST]] 61 | // CHECK: ExternInt2(%rip) 62 | // CHECK: movl %eax, [[DEST]] 63 | // CHECK: ret 64 | } 65 | -------------------------------------------------------------------------------- /vendor/benchmark/test/AssemblyTests.cmake: -------------------------------------------------------------------------------- 1 | 2 | include(split_list) 3 | 4 | set(ASM_TEST_FLAGS "") 5 | check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) 6 | if (BENCHMARK_HAS_O3_FLAG) 7 | list(APPEND ASM_TEST_FLAGS -O3) 8 | endif() 9 | 10 | check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG) 11 | if (BENCHMARK_HAS_G0_FLAG) 12 | list(APPEND ASM_TEST_FLAGS -g0) 13 | endif() 14 | 15 | check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) 16 | if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) 17 | list(APPEND ASM_TEST_FLAGS -fno-stack-protector) 18 | endif() 19 | 20 | split_list(ASM_TEST_FLAGS) 21 | string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) 22 | 23 | macro(add_filecheck_test name) 24 | cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) 25 | add_library(${name} OBJECT ${name}.cc) 26 | set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") 27 | set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") 28 | add_custom_target(copy_${name} ALL 29 | COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py 30 | $ 31 | ${ASM_OUTPUT_FILE} 32 | BYPRODUCTS ${ASM_OUTPUT_FILE}) 33 | add_dependencies(copy_${name} ${name}) 34 | if (NOT ARG_CHECK_PREFIXES) 35 | set(ARG_CHECK_PREFIXES "CHECK") 36 | endif() 37 | foreach(prefix ${ARG_CHECK_PREFIXES}) 38 | add_test(NAME run_${name}_${prefix} 39 | COMMAND 40 | ${LLVM_FILECHECK_EXE} ${name}.cc 41 | --input-file=${ASM_OUTPUT_FILE} 42 | --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER} 43 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) 44 | endforeach() 45 | endmacro() 46 | 47 | -------------------------------------------------------------------------------- /vendor/benchmark/test/map_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | 6 | namespace { 7 | 8 | std::map ConstructRandomMap(int size) { 9 | std::map m; 10 | for (int i = 0; i < size; ++i) { 11 | m.insert(std::make_pair(std::rand() % size, std::rand() % size)); 12 | } 13 | return m; 14 | } 15 | 16 | } // namespace 17 | 18 | // Basic version. 19 | static void BM_MapLookup(benchmark::State& state) { 20 | const int size = static_cast(state.range(0)); 21 | std::map m; 22 | for (auto _ : state) { 23 | state.PauseTiming(); 24 | m = ConstructRandomMap(size); 25 | state.ResumeTiming(); 26 | for (int i = 0; i < size; ++i) { 27 | benchmark::DoNotOptimize(m.find(std::rand() % size)); 28 | } 29 | } 30 | state.SetItemsProcessed(state.iterations() * size); 31 | } 32 | BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); 33 | 34 | // Using fixtures. 35 | class MapFixture : public ::benchmark::Fixture { 36 | public: 37 | void SetUp(const ::benchmark::State& st) { 38 | m = ConstructRandomMap(static_cast(st.range(0))); 39 | } 40 | 41 | void TearDown(const ::benchmark::State&) { m.clear(); } 42 | 43 | std::map m; 44 | }; 45 | 46 | BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { 47 | const int size = static_cast(state.range(0)); 48 | for (auto _ : state) { 49 | for (int i = 0; i < size; ++i) { 50 | benchmark::DoNotOptimize(m.find(std::rand() % size)); 51 | } 52 | } 53 | state.SetItemsProcessed(state.iterations() * size); 54 | } 55 | BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); 56 | 57 | BENCHMARK_MAIN(); 58 | -------------------------------------------------------------------------------- /vendor/benchmark/src/string_util.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_STRING_UTIL_H_ 2 | #define BENCHMARK_STRING_UTIL_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include "internal_macros.h" 8 | 9 | namespace benchmark { 10 | 11 | void AppendHumanReadable(int n, std::string* str); 12 | 13 | std::string HumanReadableNumber(double n, double one_k = 1024.0); 14 | 15 | std::string StrFormat(const char* format, ...); 16 | 17 | inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { 18 | return out; 19 | } 20 | 21 | template 22 | inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { 23 | out << std::forward(f); 24 | return StrCatImp(out, std::forward(rest)...); 25 | } 26 | 27 | template 28 | inline std::string StrCat(Args&&... args) { 29 | std::ostringstream ss; 30 | StrCatImp(ss, std::forward(args)...); 31 | return ss.str(); 32 | } 33 | 34 | void ReplaceAll(std::string* str, const std::string& from, 35 | const std::string& to); 36 | 37 | #ifdef BENCHMARK_STL_ANDROID_GNUSTL 38 | /* 39 | * GNU STL in Android NDK lacks support for some C++11 functions, including 40 | * stoul, stoi, stod. We reimplement them here using C functions strtoul, 41 | * strtol, strtod. Note that reimplemented functions are in benchmark:: 42 | * namespace, not std:: namespace. 43 | */ 44 | unsigned long stoul(const std::string& str, size_t* pos = nullptr, 45 | int base = 10); 46 | int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); 47 | double stod(const std::string& str, size_t* pos = nullptr); 48 | #else 49 | using std::stoul; 50 | using std::stoi; 51 | using std::stod; 52 | #endif 53 | 54 | } // end namespace benchmark 55 | 56 | #endif // BENCHMARK_STRING_UTIL_H_ 57 | -------------------------------------------------------------------------------- /vendor/benchmark/test/display_aggregates_only_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #undef NDEBUG 3 | #include 4 | #include 5 | 6 | #include "benchmark/benchmark.h" 7 | #include "output_test.h" 8 | 9 | // Ok this test is super ugly. We want to check what happens with the file 10 | // reporter in the presence of DisplayAggregatesOnly(). 11 | // We do not care about console output, the normal tests check that already. 12 | 13 | void BM_SummaryRepeat(benchmark::State& state) { 14 | for (auto _ : state) { 15 | } 16 | } 17 | BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); 18 | 19 | int main(int argc, char* argv[]) { 20 | const std::string output = GetFileReporterOutput(argc, argv); 21 | 22 | if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || 23 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || 24 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || 25 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 26 | 1 || 27 | SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 28 | 1) { 29 | std::cout << "Precondition mismatch. Expected to only find 6 " 30 | "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" 31 | "\"name\": \"BM_SummaryRepeat/repeats:3\", " 32 | "\"name\": \"BM_SummaryRepeat/repeats:3\", " 33 | "\"name\": \"BM_SummaryRepeat/repeats:3\", " 34 | "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " 35 | "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " 36 | "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " 37 | "output:\n"; 38 | std::cout << output; 39 | return 1; 40 | } 41 | 42 | return 0; 43 | } 44 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/GetGitVersion.cmake: -------------------------------------------------------------------------------- 1 | # - Returns a version string from Git tags 2 | # 3 | # This function inspects the annotated git tags for the project and returns a string 4 | # into a CMake variable 5 | # 6 | # get_git_version() 7 | # 8 | # - Example 9 | # 10 | # include(GetGitVersion) 11 | # get_git_version(GIT_VERSION) 12 | # 13 | # Requires CMake 2.8.11+ 14 | find_package(Git) 15 | 16 | if(__get_git_version) 17 | return() 18 | endif() 19 | set(__get_git_version INCLUDED) 20 | 21 | function(get_git_version var) 22 | if(GIT_EXECUTABLE) 23 | execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 24 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 25 | RESULT_VARIABLE status 26 | OUTPUT_VARIABLE GIT_VERSION 27 | ERROR_QUIET) 28 | if(${status}) 29 | set(GIT_VERSION "v0.0.0") 30 | else() 31 | string(STRIP ${GIT_VERSION} GIT_VERSION) 32 | string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) 33 | endif() 34 | 35 | # Work out if the repository is dirty 36 | execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh 37 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 38 | OUTPUT_QUIET 39 | ERROR_QUIET) 40 | execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- 41 | WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} 42 | OUTPUT_VARIABLE GIT_DIFF_INDEX 43 | ERROR_QUIET) 44 | string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) 45 | if (${GIT_DIRTY}) 46 | set(GIT_VERSION "${GIT_VERSION}-dirty") 47 | endif() 48 | else() 49 | set(GIT_VERSION "v0.0.0") 50 | endif() 51 | 52 | message("-- git Version: ${GIT_VERSION}") 53 | set(${var} ${GIT_VERSION} PARENT_SCOPE) 54 | endfunction() 55 | -------------------------------------------------------------------------------- /vendor/benchmark/src/thread_manager.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_THREAD_MANAGER_H 2 | #define BENCHMARK_THREAD_MANAGER_H 3 | 4 | #include 5 | 6 | #include "benchmark/benchmark.h" 7 | #include "mutex.h" 8 | 9 | namespace benchmark { 10 | namespace internal { 11 | 12 | class ThreadManager { 13 | public: 14 | ThreadManager(int num_threads) 15 | : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} 16 | 17 | Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { 18 | return benchmark_mutex_; 19 | } 20 | 21 | bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { 22 | return start_stop_barrier_.wait(); 23 | } 24 | 25 | void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { 26 | start_stop_barrier_.removeThread(); 27 | if (--alive_threads_ == 0) { 28 | MutexLock lock(end_cond_mutex_); 29 | end_condition_.notify_all(); 30 | } 31 | } 32 | 33 | void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { 34 | MutexLock lock(end_cond_mutex_); 35 | end_condition_.wait(lock.native_handle(), 36 | [this]() { return alive_threads_ == 0; }); 37 | } 38 | 39 | public: 40 | struct Result { 41 | int64_t iterations = 0; 42 | double real_time_used = 0; 43 | double cpu_time_used = 0; 44 | double manual_time_used = 0; 45 | int64_t complexity_n = 0; 46 | std::string report_label_; 47 | std::string error_message_; 48 | bool has_error_ = false; 49 | UserCounters counters; 50 | }; 51 | GUARDED_BY(GetBenchmarkMutex()) Result results; 52 | 53 | private: 54 | mutable Mutex benchmark_mutex_; 55 | std::atomic alive_threads_; 56 | Barrier start_stop_barrier_; 57 | Mutex end_cond_mutex_; 58 | Condition end_condition_; 59 | }; 60 | 61 | } // namespace internal 62 | } // namespace benchmark 63 | 64 | #endif // BENCHMARK_THREAD_MANAGER_H 65 | -------------------------------------------------------------------------------- /vendor/benchmark/AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of benchmark authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | # 5 | # Names should be added to this file as: 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | # 9 | # Please keep the list sorted. 10 | 11 | Albert Pretorius 12 | Arne Beer 13 | Carto 14 | Christopher Seymour 15 | David Coeurjolly 16 | Deniz Evrenci 17 | Dirac Research 18 | Dominik Czarnota 19 | Eric Fiselier 20 | Eugene Zhuk 21 | Evgeny Safronov 22 | Federico Ficarelli 23 | Felix Homann 24 | Google Inc. 25 | International Business Machines Corporation 26 | Ismael Jimenez Martinez 27 | Jern-Kuan Leong 28 | JianXiong Zhou 29 | Joao Paulo Magalhaes 30 | Jussi Knuuttila 31 | Kaito Udagawa 32 | Kishan Kumar 33 | Lei Xu 34 | Matt Clarkson 35 | Maxim Vafin 36 | MongoDB Inc. 37 | Nick Hutchinson 38 | Oleksandr Sochka 39 | Paul Redmond 40 | Radoslav Yovchev 41 | Roman Lebedev 42 | Shuo Chen 43 | Steinar H. Gunderson 44 | Stripe, Inc. 45 | Yixuan Qiu 46 | Yusuke Suzuki 47 | Zbigniew Skowron 48 | -------------------------------------------------------------------------------- /vendor/benchmark/test/cxx03_test.cc: -------------------------------------------------------------------------------- 1 | #undef NDEBUG 2 | #include 3 | #include 4 | 5 | #include "benchmark/benchmark.h" 6 | 7 | #if __cplusplus >= 201103L 8 | #error C++11 or greater detected. Should be C++03. 9 | #endif 10 | 11 | #ifdef BENCHMARK_HAS_CXX11 12 | #error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. 13 | #endif 14 | 15 | void BM_empty(benchmark::State& state) { 16 | while (state.KeepRunning()) { 17 | volatile std::size_t x = state.iterations(); 18 | ((void)x); 19 | } 20 | } 21 | BENCHMARK(BM_empty); 22 | 23 | // The new C++11 interface for args/ranges requires initializer list support. 24 | // Therefore we provide the old interface to support C++03. 25 | void BM_old_arg_range_interface(benchmark::State& state) { 26 | assert((state.range(0) == 1 && state.range(1) == 2) || 27 | (state.range(0) == 5 && state.range(1) == 6)); 28 | while (state.KeepRunning()) { 29 | } 30 | } 31 | BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); 32 | 33 | template 34 | void BM_template2(benchmark::State& state) { 35 | BM_empty(state); 36 | } 37 | BENCHMARK_TEMPLATE2(BM_template2, int, long); 38 | 39 | template 40 | void BM_template1(benchmark::State& state) { 41 | BM_empty(state); 42 | } 43 | BENCHMARK_TEMPLATE(BM_template1, long); 44 | BENCHMARK_TEMPLATE1(BM_template1, int); 45 | 46 | template 47 | struct BM_Fixture : public ::benchmark::Fixture { 48 | }; 49 | 50 | BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { 51 | BM_empty(state); 52 | } 53 | BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { 54 | BM_empty(state); 55 | } 56 | 57 | void BM_counters(benchmark::State& state) { 58 | BM_empty(state); 59 | state.counters["Foo"] = 2; 60 | } 61 | BENCHMARK(BM_counters); 62 | 63 | BENCHMARK_MAIN(); 64 | -------------------------------------------------------------------------------- /vendor/benchmark/test/BUILD: -------------------------------------------------------------------------------- 1 | TEST_COPTS = [ 2 | "-pedantic", 3 | "-pedantic-errors", 4 | "-std=c++11", 5 | "-Wall", 6 | "-Wextra", 7 | "-Wshadow", 8 | # "-Wshorten-64-to-32", 9 | "-Wfloat-equal", 10 | "-fstrict-aliasing", 11 | ] 12 | 13 | PER_SRC_COPTS = ({ 14 | "cxx03_test.cc": ["-std=c++03"], 15 | # Some of the issues with DoNotOptimize only occur when optimization is enabled 16 | "donotoptimize_test.cc": ["-O3"], 17 | }) 18 | 19 | 20 | TEST_ARGS = ["--benchmark_min_time=0.01"] 21 | 22 | PER_SRC_TEST_ARGS = ({ 23 | "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], 24 | }) 25 | 26 | cc_library( 27 | name = "output_test_helper", 28 | testonly = 1, 29 | srcs = ["output_test_helper.cc"], 30 | hdrs = ["output_test.h"], 31 | copts = TEST_COPTS, 32 | deps = [ 33 | "//:benchmark", 34 | "//:benchmark_internal_headers", 35 | ], 36 | ) 37 | 38 | [ 39 | cc_test( 40 | name = test_src[:-len(".cc")], 41 | size = "small", 42 | srcs = [test_src], 43 | args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), 44 | copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), 45 | deps = [ 46 | ":output_test_helper", 47 | "//:benchmark", 48 | "//:benchmark_internal_headers", 49 | "@com_google_googletest//:gtest", 50 | ] + ( 51 | ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] 52 | ), 53 | # FIXME: Add support for assembly tests to bazel. 54 | # See Issue #556 55 | # https://github.com/google/benchmark/issues/556 56 | ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc", "link_main_test.cc"]) 57 | ] 58 | 59 | cc_test( 60 | name = "link_main_test", 61 | size = "small", 62 | srcs = ["link_main_test.cc"], 63 | copts = TEST_COPTS, 64 | deps = ["//:benchmark_main"], 65 | ) 66 | -------------------------------------------------------------------------------- /vendor/benchmark/src/sleep.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "sleep.h" 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "internal_macros.h" 22 | 23 | #ifdef BENCHMARK_OS_WINDOWS 24 | #include 25 | #endif 26 | 27 | namespace benchmark { 28 | #ifdef BENCHMARK_OS_WINDOWS 29 | // Window's Sleep takes milliseconds argument. 30 | void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } 31 | void SleepForSeconds(double seconds) { 32 | SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); 33 | } 34 | #else // BENCHMARK_OS_WINDOWS 35 | void SleepForMicroseconds(int microseconds) { 36 | struct timespec sleep_time; 37 | sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; 38 | sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; 39 | while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) 40 | ; // Ignore signals and wait for the full interval to elapse. 41 | } 42 | 43 | void SleepForMilliseconds(int milliseconds) { 44 | SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); 45 | } 46 | 47 | void SleepForSeconds(double seconds) { 48 | SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); 49 | } 50 | #endif // BENCHMARK_OS_WINDOWS 51 | } // end namespace benchmark 52 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/Inputs/test2_run.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_Hi", 12 | "iterations": 1234, 13 | "real_time": 42, 14 | "cpu_time": 24, 15 | "time_unit": "ms" 16 | }, 17 | { 18 | "name": "BM_Zero", 19 | "iterations": 1000, 20 | "real_time": 10, 21 | "cpu_time": 10, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_Zero/4", 26 | "iterations": 4000, 27 | "real_time": 40, 28 | "cpu_time": 40, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "Prefix/BM_Zero", 33 | "iterations": 2000, 34 | "real_time": 20, 35 | "cpu_time": 20, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "Prefix/BM_Zero/3", 40 | "iterations": 3000, 41 | "real_time": 30, 42 | "cpu_time": 30, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_One", 47 | "iterations": 5000, 48 | "real_time": 5, 49 | "cpu_time": 5, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_One/4", 54 | "iterations": 2000, 55 | "real_time": 20, 56 | "cpu_time": 20, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "Prefix/BM_One", 61 | "iterations": 1000, 62 | "real_time": 10, 63 | "cpu_time": 10, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "Prefix/BM_One/3", 68 | "iterations": 1500, 69 | "real_time": 15, 70 | "cpu_time": 15, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_Bye", 75 | "iterations": 5321, 76 | "real_time": 11, 77 | "cpu_time": 63, 78 | "time_unit": "ns" 79 | } 80 | ] 81 | } 82 | -------------------------------------------------------------------------------- /vendor/benchmark/src/log.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_LOG_H_ 2 | #define BENCHMARK_LOG_H_ 3 | 4 | #include 5 | #include 6 | 7 | #include "benchmark/benchmark.h" 8 | 9 | namespace benchmark { 10 | namespace internal { 11 | 12 | typedef std::basic_ostream&(EndLType)(std::basic_ostream&); 13 | 14 | class LogType { 15 | friend LogType& GetNullLogInstance(); 16 | friend LogType& GetErrorLogInstance(); 17 | 18 | // FIXME: Add locking to output. 19 | template 20 | friend LogType& operator<<(LogType&, Tp const&); 21 | friend LogType& operator<<(LogType&, EndLType*); 22 | 23 | private: 24 | LogType(std::ostream* out) : out_(out) {} 25 | std::ostream* out_; 26 | BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); 27 | }; 28 | 29 | template 30 | LogType& operator<<(LogType& log, Tp const& value) { 31 | if (log.out_) { 32 | *log.out_ << value; 33 | } 34 | return log; 35 | } 36 | 37 | inline LogType& operator<<(LogType& log, EndLType* m) { 38 | if (log.out_) { 39 | *log.out_ << m; 40 | } 41 | return log; 42 | } 43 | 44 | inline int& LogLevel() { 45 | static int log_level = 0; 46 | return log_level; 47 | } 48 | 49 | inline LogType& GetNullLogInstance() { 50 | static LogType log(nullptr); 51 | return log; 52 | } 53 | 54 | inline LogType& GetErrorLogInstance() { 55 | static LogType log(&std::clog); 56 | return log; 57 | } 58 | 59 | inline LogType& GetLogInstanceForLevel(int level) { 60 | if (level <= LogLevel()) { 61 | return GetErrorLogInstance(); 62 | } 63 | return GetNullLogInstance(); 64 | } 65 | 66 | } // end namespace internal 67 | } // end namespace benchmark 68 | 69 | // clang-format off 70 | #define VLOG(x) \ 71 | (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ 72 | " ") 73 | // clang-format on 74 | #endif 75 | -------------------------------------------------------------------------------- /vendor/benchmark/src/thread_timer.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_THREAD_TIMER_H 2 | #define BENCHMARK_THREAD_TIMER_H 3 | 4 | #include "check.h" 5 | #include "timers.h" 6 | 7 | namespace benchmark { 8 | namespace internal { 9 | 10 | class ThreadTimer { 11 | public: 12 | ThreadTimer() = default; 13 | 14 | // Called by each thread 15 | void StartTimer() { 16 | running_ = true; 17 | start_real_time_ = ChronoClockNow(); 18 | start_cpu_time_ = ThreadCPUUsage(); 19 | } 20 | 21 | // Called by each thread 22 | void StopTimer() { 23 | CHECK(running_); 24 | running_ = false; 25 | real_time_used_ += ChronoClockNow() - start_real_time_; 26 | // Floating point error can result in the subtraction producing a negative 27 | // time. Guard against that. 28 | cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); 29 | } 30 | 31 | // Called by each thread 32 | void SetIterationTime(double seconds) { manual_time_used_ += seconds; } 33 | 34 | bool running() const { return running_; } 35 | 36 | // REQUIRES: timer is not running 37 | double real_time_used() { 38 | CHECK(!running_); 39 | return real_time_used_; 40 | } 41 | 42 | // REQUIRES: timer is not running 43 | double cpu_time_used() { 44 | CHECK(!running_); 45 | return cpu_time_used_; 46 | } 47 | 48 | // REQUIRES: timer is not running 49 | double manual_time_used() { 50 | CHECK(!running_); 51 | return manual_time_used_; 52 | } 53 | 54 | private: 55 | bool running_ = false; // Is the timer running 56 | double start_real_time_ = 0; // If running_ 57 | double start_cpu_time_ = 0; // If running_ 58 | 59 | // Accumulated time so far (does not contain current slice if running_) 60 | double real_time_used_ = 0; 61 | double cpu_time_used_ = 0; 62 | // Manually set iteration time. User sets this with SetIterationTime(seconds). 63 | double manual_time_used_ = 0; 64 | }; 65 | 66 | } // namespace internal 67 | } // namespace benchmark 68 | 69 | #endif // BENCHMARK_THREAD_TIMER_H 70 | -------------------------------------------------------------------------------- /vendor/benchmark/test/state_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | // clang-format off 8 | extern "C" { 9 | extern int ExternInt; 10 | benchmark::State& GetState(); 11 | void Fn(); 12 | } 13 | // clang-format on 14 | 15 | using benchmark::State; 16 | 17 | // CHECK-LABEL: test_for_auto_loop: 18 | extern "C" int test_for_auto_loop() { 19 | State& S = GetState(); 20 | int x = 42; 21 | // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv 22 | // CHECK-NEXT: testq %rbx, %rbx 23 | // CHECK-NEXT: je [[LOOP_END:.*]] 24 | 25 | for (auto _ : S) { 26 | // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: 27 | // CHECK-GNU-NEXT: subq $1, %rbx 28 | // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax 29 | // CHECK-NEXT: jne .L[[LOOP_HEAD]] 30 | benchmark::DoNotOptimize(x); 31 | } 32 | // CHECK: [[LOOP_END]]: 33 | // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv 34 | 35 | // CHECK: movl $101, %eax 36 | // CHECK: ret 37 | return 101; 38 | } 39 | 40 | // CHECK-LABEL: test_while_loop: 41 | extern "C" int test_while_loop() { 42 | State& S = GetState(); 43 | int x = 42; 44 | 45 | // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] 46 | // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: 47 | while (S.KeepRunning()) { 48 | // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] 49 | // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] 50 | // CHECK: movq %[[IREG]], [[DEST:.*]] 51 | benchmark::DoNotOptimize(x); 52 | } 53 | // CHECK-DAG: movq [[DEST]], %[[IREG]] 54 | // CHECK-DAG: testq %[[IREG]], %[[IREG]] 55 | // CHECK-DAG: jne .L[[LOOP_BODY]] 56 | // CHECK-DAG: .L[[LOOP_HEADER]]: 57 | 58 | // CHECK: cmpb $0 59 | // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] 60 | // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv 61 | 62 | // CHECK: .L[[LOOP_END]]: 63 | // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv 64 | 65 | // CHECK: movl $101, %eax 66 | // CHECK: ret 67 | return 101; 68 | } 69 | -------------------------------------------------------------------------------- /vendor/benchmark/test/options_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | #include 3 | #include 4 | 5 | #if defined(NDEBUG) 6 | #undef NDEBUG 7 | #endif 8 | #include 9 | 10 | void BM_basic(benchmark::State& state) { 11 | for (auto _ : state) { 12 | } 13 | } 14 | 15 | void BM_basic_slow(benchmark::State& state) { 16 | std::chrono::milliseconds sleep_duration(state.range(0)); 17 | for (auto _ : state) { 18 | std::this_thread::sleep_for( 19 | std::chrono::duration_cast(sleep_duration)); 20 | } 21 | } 22 | 23 | BENCHMARK(BM_basic); 24 | BENCHMARK(BM_basic)->Arg(42); 25 | BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond); 26 | BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond); 27 | BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond); 28 | BENCHMARK(BM_basic)->Range(1, 8); 29 | BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8); 30 | BENCHMARK(BM_basic)->DenseRange(10, 15); 31 | BENCHMARK(BM_basic)->Args({42, 42}); 32 | BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); 33 | BENCHMARK(BM_basic)->MinTime(0.7); 34 | BENCHMARK(BM_basic)->UseRealTime(); 35 | BENCHMARK(BM_basic)->ThreadRange(2, 4); 36 | BENCHMARK(BM_basic)->ThreadPerCpu(); 37 | BENCHMARK(BM_basic)->Repetitions(3); 38 | 39 | void CustomArgs(benchmark::internal::Benchmark* b) { 40 | for (int i = 0; i < 10; ++i) { 41 | b->Arg(i); 42 | } 43 | } 44 | 45 | BENCHMARK(BM_basic)->Apply(CustomArgs); 46 | 47 | void BM_explicit_iteration_count(benchmark::State& state) { 48 | // Test that benchmarks specified with an explicit iteration count are 49 | // only run once. 50 | static bool invoked_before = false; 51 | assert(!invoked_before); 52 | invoked_before = true; 53 | 54 | // Test that the requested iteration count is respected. 55 | assert(state.max_iterations == 42); 56 | size_t actual_iterations = 0; 57 | for (auto _ : state) 58 | ++actual_iterations; 59 | assert(state.iterations() == state.max_iterations); 60 | assert(state.iterations() == 42); 61 | 62 | } 63 | BENCHMARK(BM_explicit_iteration_count)->Iterations(42); 64 | 65 | BENCHMARK_MAIN(); 66 | -------------------------------------------------------------------------------- /vendor/benchmark/src/complexity.h: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Ismael Jimenez Martinez. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Source project : https://github.com/ismaelJimenez/cpp.leastsq 16 | // Adapted to be used with google benchmark 17 | 18 | #ifndef COMPLEXITY_H_ 19 | #define COMPLEXITY_H_ 20 | 21 | #include 22 | #include 23 | 24 | #include "benchmark/benchmark.h" 25 | 26 | namespace benchmark { 27 | 28 | // Return a vector containing the bigO and RMS information for the specified 29 | // list of reports. If 'reports.size() < 2' an empty vector is returned. 30 | std::vector ComputeBigO( 31 | const std::vector& reports); 32 | 33 | // This data structure will contain the result returned by MinimalLeastSq 34 | // - coef : Estimated coeficient for the high-order term as 35 | // interpolated from data. 36 | // - rms : Normalized Root Mean Squared Error. 37 | // - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability 38 | // form has been provided to MinimalLeastSq this will return 39 | // the same value. In case BigO::oAuto has been selected, this 40 | // parameter will return the best fitting curve detected. 41 | 42 | struct LeastSq { 43 | LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} 44 | 45 | double coef; 46 | double rms; 47 | BigO complexity; 48 | }; 49 | 50 | // Function to return an string for the calculated complexity 51 | std::string GetBigOString(BigO complexity); 52 | 53 | } // end namespace benchmark 54 | 55 | #endif // COMPLEXITY_H_ 56 | -------------------------------------------------------------------------------- /vendor/benchmark/test/diagnostics_test.cc: -------------------------------------------------------------------------------- 1 | // Testing: 2 | // State::PauseTiming() 3 | // State::ResumeTiming() 4 | // Test that CHECK's within these function diagnose when they are called 5 | // outside of the KeepRunning() loop. 6 | // 7 | // NOTE: Users should NOT include or use src/check.h. This is only done in 8 | // order to test library internals. 9 | 10 | #include 11 | #include 12 | 13 | #include "../src/check.h" 14 | #include "benchmark/benchmark.h" 15 | 16 | #if defined(__GNUC__) && !defined(__EXCEPTIONS) 17 | #define TEST_HAS_NO_EXCEPTIONS 18 | #endif 19 | 20 | void TestHandler() { 21 | #ifndef TEST_HAS_NO_EXCEPTIONS 22 | throw std::logic_error(""); 23 | #else 24 | std::abort(); 25 | #endif 26 | } 27 | 28 | void try_invalid_pause_resume(benchmark::State& state) { 29 | #if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) 30 | try { 31 | state.PauseTiming(); 32 | std::abort(); 33 | } catch (std::logic_error const&) { 34 | } 35 | try { 36 | state.ResumeTiming(); 37 | std::abort(); 38 | } catch (std::logic_error const&) { 39 | } 40 | #else 41 | (void)state; // avoid unused warning 42 | #endif 43 | } 44 | 45 | void BM_diagnostic_test(benchmark::State& state) { 46 | static bool called_once = false; 47 | 48 | if (called_once == false) try_invalid_pause_resume(state); 49 | 50 | for (auto _ : state) { 51 | benchmark::DoNotOptimize(state.iterations()); 52 | } 53 | 54 | if (called_once == false) try_invalid_pause_resume(state); 55 | 56 | called_once = true; 57 | } 58 | BENCHMARK(BM_diagnostic_test); 59 | 60 | 61 | void BM_diagnostic_test_keep_running(benchmark::State& state) { 62 | static bool called_once = false; 63 | 64 | if (called_once == false) try_invalid_pause_resume(state); 65 | 66 | while(state.KeepRunning()) { 67 | benchmark::DoNotOptimize(state.iterations()); 68 | } 69 | 70 | if (called_once == false) try_invalid_pause_resume(state); 71 | 72 | called_once = true; 73 | } 74 | BENCHMARK(BM_diagnostic_test_keep_running); 75 | 76 | int main(int argc, char* argv[]) { 77 | benchmark::internal::GetAbortHandler() = &TestHandler; 78 | benchmark::Initialize(&argc, argv); 79 | benchmark::RunSpecifiedBenchmarks(); 80 | } 81 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/CXXFeatureCheck.cmake: -------------------------------------------------------------------------------- 1 | # - Compile and run code to check for C++ features 2 | # 3 | # This functions compiles a source file under the `cmake` folder 4 | # and adds the corresponding `HAVE_[FILENAME]` flag to the CMake 5 | # environment 6 | # 7 | # cxx_feature_check( []) 8 | # 9 | # - Example 10 | # 11 | # include(CXXFeatureCheck) 12 | # cxx_feature_check(STD_REGEX) 13 | # Requires CMake 2.8.12+ 14 | 15 | if(__cxx_feature_check) 16 | return() 17 | endif() 18 | set(__cxx_feature_check INCLUDED) 19 | 20 | function(cxx_feature_check FILE) 21 | string(TOLOWER ${FILE} FILE) 22 | string(TOUPPER ${FILE} VAR) 23 | string(TOUPPER "HAVE_${VAR}" FEATURE) 24 | if (DEFINED HAVE_${VAR}) 25 | set(HAVE_${VAR} 1 PARENT_SCOPE) 26 | add_definitions(-DHAVE_${VAR}) 27 | return() 28 | endif() 29 | 30 | if (NOT DEFINED COMPILE_${FEATURE}) 31 | message("-- Performing Test ${FEATURE}") 32 | if(CMAKE_CROSSCOMPILING) 33 | try_compile(COMPILE_${FEATURE} 34 | ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp 35 | CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} 36 | LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) 37 | if(COMPILE_${FEATURE}) 38 | message(WARNING 39 | "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") 40 | set(RUN_${FEATURE} 0) 41 | else() 42 | set(RUN_${FEATURE} 1) 43 | endif() 44 | else() 45 | message("-- Performing Test ${FEATURE}") 46 | try_run(RUN_${FEATURE} COMPILE_${FEATURE} 47 | ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp 48 | CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} 49 | LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) 50 | endif() 51 | endif() 52 | 53 | if(RUN_${FEATURE} EQUAL 0) 54 | message("-- Performing Test ${FEATURE} -- success") 55 | set(HAVE_${VAR} 1 PARENT_SCOPE) 56 | add_definitions(-DHAVE_${VAR}) 57 | else() 58 | if(NOT COMPILE_${FEATURE}) 59 | message("-- Performing Test ${FEATURE} -- failed to compile") 60 | else() 61 | message("-- Performing Test ${FEATURE} -- compiled but failed to run") 62 | endif() 63 | endif() 64 | endfunction() 65 | -------------------------------------------------------------------------------- /vendor/benchmark/src/counter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "counter.h" 16 | 17 | namespace benchmark { 18 | namespace internal { 19 | 20 | double Finish(Counter const& c, int64_t iterations, double cpu_time, 21 | double num_threads) { 22 | double v = c.value; 23 | if (c.flags & Counter::kIsRate) { 24 | v /= cpu_time; 25 | } 26 | if (c.flags & Counter::kAvgThreads) { 27 | v /= num_threads; 28 | } 29 | if (c.flags & Counter::kIsIterationInvariant) { 30 | v *= iterations; 31 | } 32 | if (c.flags & Counter::kAvgIterations) { 33 | v /= iterations; 34 | } 35 | return v; 36 | } 37 | 38 | void Finish(UserCounters* l, int64_t iterations, double cpu_time, double num_threads) { 39 | for (auto& c : *l) { 40 | c.second.value = Finish(c.second, iterations, cpu_time, num_threads); 41 | } 42 | } 43 | 44 | void Increment(UserCounters* l, UserCounters const& r) { 45 | // add counters present in both or just in *l 46 | for (auto& c : *l) { 47 | auto it = r.find(c.first); 48 | if (it != r.end()) { 49 | c.second.value = c.second + it->second; 50 | } 51 | } 52 | // add counters present in r, but not in *l 53 | for (auto const& tc : r) { 54 | auto it = l->find(tc.first); 55 | if (it == l->end()) { 56 | (*l)[tc.first] = tc.second; 57 | } 58 | } 59 | } 60 | 61 | bool SameNames(UserCounters const& l, UserCounters const& r) { 62 | if (&l == &r) return true; 63 | if (l.size() != r.size()) { 64 | return false; 65 | } 66 | for (auto const& c : l) { 67 | if (r.find(c.first) == r.end()) { 68 | return false; 69 | } 70 | } 71 | return true; 72 | } 73 | 74 | } // end namespace internal 75 | } // end namespace benchmark 76 | -------------------------------------------------------------------------------- /vendor/benchmark/src/internal_macros.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_INTERNAL_MACROS_H_ 2 | #define BENCHMARK_INTERNAL_MACROS_H_ 3 | 4 | #include "benchmark/benchmark.h" 5 | 6 | /* Needed to detect STL */ 7 | #include 8 | 9 | // clang-format off 10 | 11 | #ifndef __has_feature 12 | #define __has_feature(x) 0 13 | #endif 14 | 15 | #if defined(__clang__) 16 | #if !defined(COMPILER_CLANG) 17 | #define COMPILER_CLANG 18 | #endif 19 | #elif defined(_MSC_VER) 20 | #if !defined(COMPILER_MSVC) 21 | #define COMPILER_MSVC 22 | #endif 23 | #elif defined(__GNUC__) 24 | #if !defined(COMPILER_GCC) 25 | #define COMPILER_GCC 26 | #endif 27 | #endif 28 | 29 | #if __has_feature(cxx_attributes) 30 | #define BENCHMARK_NORETURN [[noreturn]] 31 | #elif defined(__GNUC__) 32 | #define BENCHMARK_NORETURN __attribute__((noreturn)) 33 | #elif defined(COMPILER_MSVC) 34 | #define BENCHMARK_NORETURN __declspec(noreturn) 35 | #else 36 | #define BENCHMARK_NORETURN 37 | #endif 38 | 39 | #if defined(__CYGWIN__) 40 | #define BENCHMARK_OS_CYGWIN 1 41 | #elif defined(_WIN32) 42 | #define BENCHMARK_OS_WINDOWS 1 43 | #elif defined(__APPLE__) 44 | #define BENCHMARK_OS_APPLE 1 45 | #include "TargetConditionals.h" 46 | #if defined(TARGET_OS_MAC) 47 | #define BENCHMARK_OS_MACOSX 1 48 | #if defined(TARGET_OS_IPHONE) 49 | #define BENCHMARK_OS_IOS 1 50 | #endif 51 | #endif 52 | #elif defined(__FreeBSD__) 53 | #define BENCHMARK_OS_FREEBSD 1 54 | #elif defined(__NetBSD__) 55 | #define BENCHMARK_OS_NETBSD 1 56 | #elif defined(__OpenBSD__) 57 | #define BENCHMARK_OS_OPENBSD 1 58 | #elif defined(__linux__) 59 | #define BENCHMARK_OS_LINUX 1 60 | #elif defined(__native_client__) 61 | #define BENCHMARK_OS_NACL 1 62 | #elif defined(__EMSCRIPTEN__) 63 | #define BENCHMARK_OS_EMSCRIPTEN 1 64 | #elif defined(__rtems__) 65 | #define BENCHMARK_OS_RTEMS 1 66 | #elif defined(__Fuchsia__) 67 | #define BENCHMARK_OS_FUCHSIA 1 68 | #elif defined (__SVR4) && defined (__sun) 69 | #define BENCHMARK_OS_SOLARIS 1 70 | #endif 71 | 72 | #if defined(__ANDROID__) && defined(__GLIBCXX__) 73 | #define BENCHMARK_STL_ANDROID_GNUSTL 1 74 | #endif 75 | 76 | #if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ 77 | && !defined(__EXCEPTIONS) 78 | #define BENCHMARK_HAS_NO_EXCEPTIONS 79 | #endif 80 | 81 | #if defined(COMPILER_CLANG) || defined(COMPILER_GCC) 82 | #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) 83 | #else 84 | #define BENCHMARK_MAYBE_UNUSED 85 | #endif 86 | 87 | // clang-format on 88 | 89 | #endif // BENCHMARK_INTERNAL_MACROS_H_ 90 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/Inputs/test1_run1.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_SameTimes", 12 | "iterations": 1000, 13 | "real_time": 10, 14 | "cpu_time": 10, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_2xFaster", 19 | "iterations": 1000, 20 | "real_time": 50, 21 | "cpu_time": 50, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_2xSlower", 26 | "iterations": 1000, 27 | "real_time": 50, 28 | "cpu_time": 50, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "BM_1PercentFaster", 33 | "iterations": 1000, 34 | "real_time": 100, 35 | "cpu_time": 100, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "BM_1PercentSlower", 40 | "iterations": 1000, 41 | "real_time": 100, 42 | "cpu_time": 100, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_10PercentFaster", 47 | "iterations": 1000, 48 | "real_time": 100, 49 | "cpu_time": 100, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_10PercentSlower", 54 | "iterations": 1000, 55 | "real_time": 100, 56 | "cpu_time": 100, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "BM_100xSlower", 61 | "iterations": 1000, 62 | "real_time": 100, 63 | "cpu_time": 100, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "BM_100xFaster", 68 | "iterations": 1000, 69 | "real_time": 10000, 70 | "cpu_time": 10000, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_10PercentCPUToTime", 75 | "iterations": 1000, 76 | "real_time": 100, 77 | "cpu_time": 100, 78 | "time_unit": "ns" 79 | }, 80 | { 81 | "name": "BM_ThirdFaster", 82 | "iterations": 1000, 83 | "real_time": 100, 84 | "cpu_time": 100, 85 | "time_unit": "ns" 86 | }, 87 | { 88 | "name": "BM_BadTimeUnit", 89 | "iterations": 1000, 90 | "real_time": 0.4, 91 | "cpu_time": 0.5, 92 | "time_unit": "s" 93 | }, 94 | { 95 | "name": "BM_DifferentTimeUnit", 96 | "iterations": 1, 97 | "real_time": 1, 98 | "cpu_time": 1, 99 | "time_unit": "s" 100 | } 101 | ] 102 | } 103 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/Inputs/test1_run2.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_SameTimes", 12 | "iterations": 1000, 13 | "real_time": 10, 14 | "cpu_time": 10, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_2xFaster", 19 | "iterations": 1000, 20 | "real_time": 25, 21 | "cpu_time": 25, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_2xSlower", 26 | "iterations": 20833333, 27 | "real_time": 100, 28 | "cpu_time": 100, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "BM_1PercentFaster", 33 | "iterations": 1000, 34 | "real_time": 98.9999999, 35 | "cpu_time": 98.9999999, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "BM_1PercentSlower", 40 | "iterations": 1000, 41 | "real_time": 100.9999999, 42 | "cpu_time": 100.9999999, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_10PercentFaster", 47 | "iterations": 1000, 48 | "real_time": 90, 49 | "cpu_time": 90, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_10PercentSlower", 54 | "iterations": 1000, 55 | "real_time": 110, 56 | "cpu_time": 110, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "BM_100xSlower", 61 | "iterations": 1000, 62 | "real_time": 1.0000e+04, 63 | "cpu_time": 1.0000e+04, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "BM_100xFaster", 68 | "iterations": 1000, 69 | "real_time": 100, 70 | "cpu_time": 100, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_10PercentCPUToTime", 75 | "iterations": 1000, 76 | "real_time": 110, 77 | "cpu_time": 90, 78 | "time_unit": "ns" 79 | }, 80 | { 81 | "name": "BM_ThirdFaster", 82 | "iterations": 1000, 83 | "real_time": 66.665, 84 | "cpu_time": 66.664, 85 | "time_unit": "ns" 86 | }, 87 | { 88 | "name": "BM_BadTimeUnit", 89 | "iterations": 1000, 90 | "real_time": 0.04, 91 | "cpu_time": 0.6, 92 | "time_unit": "s" 93 | }, 94 | { 95 | "name": "BM_DifferentTimeUnit", 96 | "iterations": 1, 97 | "real_time": 1, 98 | "cpu_time": 1, 99 | "time_unit": "ns" 100 | } 101 | ] 102 | } 103 | -------------------------------------------------------------------------------- /vendor/benchmark/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute # 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | a just a few small guidelines you need to follow. 5 | 6 | 7 | ## Contributor License Agreement ## 8 | 9 | Contributions to any Google project must be accompanied by a Contributor 10 | License Agreement. This is not a copyright **assignment**, it simply gives 11 | Google permission to use and redistribute your contributions as part of the 12 | project. 13 | 14 | * If you are an individual writing original source code and you're sure you 15 | own the intellectual property, then you'll need to sign an [individual 16 | CLA][]. 17 | 18 | * If you work for a company that wants to allow you to contribute your work, 19 | then you'll need to sign a [corporate CLA][]. 20 | 21 | You generally only need to submit a CLA once, so if you've already submitted 22 | one (even if it was for a different project), you probably don't need to do it 23 | again. 24 | 25 | [individual CLA]: https://developers.google.com/open-source/cla/individual 26 | [corporate CLA]: https://developers.google.com/open-source/cla/corporate 27 | 28 | Once your CLA is submitted (or if you already submitted one for 29 | another Google project), make a commit adding yourself to the 30 | [AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part 31 | of your first [pull request][]. 32 | 33 | [AUTHORS]: AUTHORS 34 | [CONTRIBUTORS]: CONTRIBUTORS 35 | 36 | 37 | ## Submitting a patch ## 38 | 39 | 1. It's generally best to start by opening a new issue describing the bug or 40 | feature you're intending to fix. Even if you think it's relatively minor, 41 | it's helpful to know what people are working on. Mention in the initial 42 | issue that you are planning to work on that bug or feature so that it can 43 | be assigned to you. 44 | 45 | 1. Follow the normal process of [forking][] the project, and setup a new 46 | branch to work in. It's important that each group of changes be done in 47 | separate branches in order to ensure that a pull request only includes the 48 | commits related to that bug or feature. 49 | 50 | 1. Do your best to have [well-formed commit messages][] for each change. 51 | This provides consistency throughout the project, and ensures that commit 52 | messages are able to be formatted properly by various git tools. 53 | 54 | 1. Finally, push the commits to your fork and submit a [pull request][]. 55 | 56 | [forking]: https://help.github.com/articles/fork-a-repo 57 | [well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 58 | [pull request]: https://help.github.com/articles/creating-a-pull-request 59 | -------------------------------------------------------------------------------- /vendor/benchmark/src/check.h: -------------------------------------------------------------------------------- 1 | #ifndef CHECK_H_ 2 | #define CHECK_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "internal_macros.h" 9 | #include "log.h" 10 | 11 | namespace benchmark { 12 | namespace internal { 13 | 14 | typedef void(AbortHandlerT)(); 15 | 16 | inline AbortHandlerT*& GetAbortHandler() { 17 | static AbortHandlerT* handler = &std::abort; 18 | return handler; 19 | } 20 | 21 | BENCHMARK_NORETURN inline void CallAbortHandler() { 22 | GetAbortHandler()(); 23 | std::abort(); // fallback to enforce noreturn 24 | } 25 | 26 | // CheckHandler is the class constructed by failing CHECK macros. CheckHandler 27 | // will log information about the failures and abort when it is destructed. 28 | class CheckHandler { 29 | public: 30 | CheckHandler(const char* check, const char* file, const char* func, int line) 31 | : log_(GetErrorLogInstance()) { 32 | log_ << file << ":" << line << ": " << func << ": Check `" << check 33 | << "' failed. "; 34 | } 35 | 36 | LogType& GetLog() { return log_; } 37 | 38 | BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { 39 | log_ << std::endl; 40 | CallAbortHandler(); 41 | } 42 | 43 | CheckHandler& operator=(const CheckHandler&) = delete; 44 | CheckHandler(const CheckHandler&) = delete; 45 | CheckHandler() = delete; 46 | 47 | private: 48 | LogType& log_; 49 | }; 50 | 51 | } // end namespace internal 52 | } // end namespace benchmark 53 | 54 | // The CHECK macro returns a std::ostream object that can have extra information 55 | // written to it. 56 | #ifndef NDEBUG 57 | #define CHECK(b) \ 58 | (b ? ::benchmark::internal::GetNullLogInstance() \ 59 | : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ 60 | .GetLog()) 61 | #else 62 | #define CHECK(b) ::benchmark::internal::GetNullLogInstance() 63 | #endif 64 | 65 | // clang-format off 66 | // preserve whitespacing between operators for alignment 67 | #define CHECK_EQ(a, b) CHECK((a) == (b)) 68 | #define CHECK_NE(a, b) CHECK((a) != (b)) 69 | #define CHECK_GE(a, b) CHECK((a) >= (b)) 70 | #define CHECK_LE(a, b) CHECK((a) <= (b)) 71 | #define CHECK_GT(a, b) CHECK((a) > (b)) 72 | #define CHECK_LT(a, b) CHECK((a) < (b)) 73 | 74 | #define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) 75 | #define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) 76 | #define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) 77 | #define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) 78 | #define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) 79 | #define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) 80 | //clang-format on 81 | 82 | #endif // CHECK_H_ 83 | -------------------------------------------------------------------------------- /vendor/benchmark/test/filter_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace { 14 | 15 | class TestReporter : public benchmark::ConsoleReporter { 16 | public: 17 | virtual bool ReportContext(const Context& context) { 18 | return ConsoleReporter::ReportContext(context); 19 | }; 20 | 21 | virtual void ReportRuns(const std::vector& report) { 22 | ++count_; 23 | ConsoleReporter::ReportRuns(report); 24 | }; 25 | 26 | TestReporter() : count_(0) {} 27 | 28 | virtual ~TestReporter() {} 29 | 30 | size_t GetCount() const { return count_; } 31 | 32 | private: 33 | mutable size_t count_; 34 | }; 35 | 36 | } // end namespace 37 | 38 | static void NoPrefix(benchmark::State& state) { 39 | for (auto _ : state) { 40 | } 41 | } 42 | BENCHMARK(NoPrefix); 43 | 44 | static void BM_Foo(benchmark::State& state) { 45 | for (auto _ : state) { 46 | } 47 | } 48 | BENCHMARK(BM_Foo); 49 | 50 | static void BM_Bar(benchmark::State& state) { 51 | for (auto _ : state) { 52 | } 53 | } 54 | BENCHMARK(BM_Bar); 55 | 56 | static void BM_FooBar(benchmark::State& state) { 57 | for (auto _ : state) { 58 | } 59 | } 60 | BENCHMARK(BM_FooBar); 61 | 62 | static void BM_FooBa(benchmark::State& state) { 63 | for (auto _ : state) { 64 | } 65 | } 66 | BENCHMARK(BM_FooBa); 67 | 68 | int main(int argc, char **argv) { 69 | bool list_only = false; 70 | for (int i = 0; i < argc; ++i) 71 | list_only |= std::string(argv[i]).find("--benchmark_list_tests") != 72 | std::string::npos; 73 | 74 | benchmark::Initialize(&argc, argv); 75 | 76 | TestReporter test_reporter; 77 | const size_t returned_count = 78 | benchmark::RunSpecifiedBenchmarks(&test_reporter); 79 | 80 | if (argc == 2) { 81 | // Make sure we ran all of the tests 82 | std::stringstream ss(argv[1]); 83 | size_t expected_return; 84 | ss >> expected_return; 85 | 86 | if (returned_count != expected_return) { 87 | std::cerr << "ERROR: Expected " << expected_return 88 | << " tests to match the filter but returned_count = " 89 | << returned_count << std::endl; 90 | return -1; 91 | } 92 | 93 | const size_t expected_reports = list_only ? 0 : expected_return; 94 | const size_t reports_count = test_reporter.GetCount(); 95 | if (reports_count != expected_reports) { 96 | std::cerr << "ERROR: Expected " << expected_reports 97 | << " tests to be run but reported_count = " << reports_count 98 | << std::endl; 99 | return -1; 100 | } 101 | } 102 | 103 | return 0; 104 | } 105 | -------------------------------------------------------------------------------- /vendor/benchmark/CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # People who have agreed to one of the CLAs and can contribute patches. 2 | # The AUTHORS file lists the copyright holders; this file 3 | # lists people. For example, Google employees are listed here 4 | # but not in AUTHORS, because Google holds the copyright. 5 | # 6 | # Names should be added to this file only after verifying that 7 | # the individual or the individual's organization has agreed to 8 | # the appropriate Contributor License Agreement, found here: 9 | # 10 | # https://developers.google.com/open-source/cla/individual 11 | # https://developers.google.com/open-source/cla/corporate 12 | # 13 | # The agreement for individuals can be filled out on the web. 14 | # 15 | # When adding J Random Contributor's name to this file, 16 | # either J's name or J's organization's name should be 17 | # added to the AUTHORS file, depending on whether the 18 | # individual or corporate CLA was used. 19 | # 20 | # Names should be added to this file as: 21 | # Name 22 | # 23 | # Please keep the list sorted. 24 | 25 | Albert Pretorius 26 | Arne Beer 27 | Billy Robert O'Neal III 28 | Chris Kennelly 29 | Christopher Seymour 30 | David Coeurjolly 31 | Deniz Evrenci 32 | Dominic Hamon 33 | Dominik Czarnota 34 | Eric Fiselier 35 | Eugene Zhuk 36 | Evgeny Safronov 37 | Federico Ficarelli 38 | Felix Homann 39 | Ismael Jimenez Martinez 40 | Jern-Kuan Leong 41 | JianXiong Zhou 42 | Joao Paulo Magalhaes 43 | John Millikin 44 | Jussi Knuuttila 45 | Kai Wolf 46 | Kishan Kumar 47 | Kaito Udagawa 48 | Lei Xu 49 | Matt Clarkson 50 | Maxim Vafin 51 | Nick Hutchinson 52 | Oleksandr Sochka 53 | Pascal Leroy 54 | Paul Redmond 55 | Pierre Phaneuf 56 | Radoslav Yovchev 57 | Raul Marin 58 | Ray Glover 59 | Robert Guo 60 | Roman Lebedev 61 | Shuo Chen 62 | Tobias Ulvgård 63 | Tom Madams 64 | Yixuan Qiu 65 | Yusuke Suzuki 66 | Zbigniew Skowron 67 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/AddCXXCompilerFlag.cmake: -------------------------------------------------------------------------------- 1 | # - Adds a compiler flag if it is supported by the compiler 2 | # 3 | # This function checks that the supplied compiler flag is supported and then 4 | # adds it to the corresponding compiler flags 5 | # 6 | # add_cxx_compiler_flag( []) 7 | # 8 | # - Example 9 | # 10 | # include(AddCXXCompilerFlag) 11 | # add_cxx_compiler_flag(-Wall) 12 | # add_cxx_compiler_flag(-no-strict-aliasing RELEASE) 13 | # Requires CMake 2.6+ 14 | 15 | if(__add_cxx_compiler_flag) 16 | return() 17 | endif() 18 | set(__add_cxx_compiler_flag INCLUDED) 19 | 20 | include(CheckCXXCompilerFlag) 21 | 22 | function(mangle_compiler_flag FLAG OUTPUT) 23 | string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) 24 | string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) 25 | string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) 26 | string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) 27 | set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) 28 | endfunction(mangle_compiler_flag) 29 | 30 | function(add_cxx_compiler_flag FLAG) 31 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 32 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 33 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") 34 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 35 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 36 | if(${MANGLED_FLAG}) 37 | set(VARIANT ${ARGV1}) 38 | if(ARGV1) 39 | string(TOUPPER "_${VARIANT}" VARIANT) 40 | endif() 41 | set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) 42 | endif() 43 | endfunction() 44 | 45 | function(add_required_cxx_compiler_flag FLAG) 46 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 47 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 48 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") 49 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 50 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 51 | if(${MANGLED_FLAG}) 52 | set(VARIANT ${ARGV1}) 53 | if(ARGV1) 54 | string(TOUPPER "_${VARIANT}" VARIANT) 55 | endif() 56 | set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) 57 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 58 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 59 | set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 60 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) 61 | else() 62 | message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") 63 | endif() 64 | endfunction() 65 | 66 | function(check_cxx_warning_flag FLAG) 67 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 68 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 69 | # Add -Werror to ensure the compiler generates an error if the warning flag 70 | # doesn't exist. 71 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") 72 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 73 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 74 | endfunction() 75 | -------------------------------------------------------------------------------- /vendor/benchmark/test/multiple_ranges_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | class MultipleRangesFixture : public ::benchmark::Fixture { 9 | public: 10 | MultipleRangesFixture() 11 | : expectedValues({{1, 3, 5}, 12 | {1, 3, 8}, 13 | {1, 3, 15}, 14 | {2, 3, 5}, 15 | {2, 3, 8}, 16 | {2, 3, 15}, 17 | {1, 4, 5}, 18 | {1, 4, 8}, 19 | {1, 4, 15}, 20 | {2, 4, 5}, 21 | {2, 4, 8}, 22 | {2, 4, 15}, 23 | {1, 7, 5}, 24 | {1, 7, 8}, 25 | {1, 7, 15}, 26 | {2, 7, 5}, 27 | {2, 7, 8}, 28 | {2, 7, 15}, 29 | {7, 6, 3}}) {} 30 | 31 | void SetUp(const ::benchmark::State& state) { 32 | std::vector ranges = {state.range(0), state.range(1), 33 | state.range(2)}; 34 | 35 | assert(expectedValues.find(ranges) != expectedValues.end()); 36 | 37 | actualValues.insert(ranges); 38 | } 39 | 40 | // NOTE: This is not TearDown as we want to check after _all_ runs are 41 | // complete. 42 | virtual ~MultipleRangesFixture() { 43 | assert(actualValues.size() == expectedValues.size()); 44 | if (actualValues.size() != expectedValues.size()) { 45 | std::cout << "EXPECTED\n"; 46 | for (auto v : expectedValues) { 47 | std::cout << "{"; 48 | for (int64_t iv : v) { 49 | std::cout << iv << ", "; 50 | } 51 | std::cout << "}\n"; 52 | } 53 | std::cout << "ACTUAL\n"; 54 | for (auto v : actualValues) { 55 | std::cout << "{"; 56 | for (int64_t iv : v) { 57 | std::cout << iv << ", "; 58 | } 59 | std::cout << "}\n"; 60 | } 61 | } 62 | } 63 | 64 | std::set> expectedValues; 65 | std::set> actualValues; 66 | }; 67 | 68 | BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { 69 | for (auto _ : state) { 70 | int64_t product = state.range(0) * state.range(1) * state.range(2); 71 | for (int64_t x = 0; x < product; x++) { 72 | benchmark::DoNotOptimize(x); 73 | } 74 | } 75 | } 76 | 77 | BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) 78 | ->RangeMultiplier(2) 79 | ->Ranges({{1, 2}, {3, 7}, {5, 15}}) 80 | ->Args({7, 6, 3}); 81 | 82 | void BM_CheckDefaultArgument(benchmark::State& state) { 83 | // Test that the 'range()' without an argument is the same as 'range(0)'. 84 | assert(state.range() == state.range(0)); 85 | assert(state.range() != state.range(1)); 86 | for (auto _ : state) { 87 | } 88 | } 89 | BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); 90 | 91 | static void BM_MultipleRanges(benchmark::State& st) { 92 | for (auto _ : st) { 93 | } 94 | } 95 | BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); 96 | 97 | BENCHMARK_MAIN(); 98 | -------------------------------------------------------------------------------- /vendor/benchmark/src/reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "timers.h" 17 | 18 | #include 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | #include "check.h" 25 | 26 | namespace benchmark { 27 | 28 | BenchmarkReporter::BenchmarkReporter() 29 | : output_stream_(&std::cout), error_stream_(&std::cerr) {} 30 | 31 | BenchmarkReporter::~BenchmarkReporter() {} 32 | 33 | void BenchmarkReporter::PrintBasicContext(std::ostream *out, 34 | Context const &context) { 35 | CHECK(out) << "cannot be null"; 36 | auto &Out = *out; 37 | 38 | Out << LocalDateTimeString() << "\n"; 39 | 40 | if (context.executable_name) 41 | Out << "Running " << context.executable_name << "\n"; 42 | 43 | const CPUInfo &info = context.cpu_info; 44 | Out << "Run on (" << info.num_cpus << " X " 45 | << (info.cycles_per_second / 1000000.0) << " MHz CPU " 46 | << ((info.num_cpus > 1) ? "s" : "") << ")\n"; 47 | if (info.caches.size() != 0) { 48 | Out << "CPU Caches:\n"; 49 | for (auto &CInfo : info.caches) { 50 | Out << " L" << CInfo.level << " " << CInfo.type << " " 51 | << (CInfo.size / 1000) << "K"; 52 | if (CInfo.num_sharing != 0) 53 | Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; 54 | Out << "\n"; 55 | } 56 | } 57 | 58 | if (info.scaling_enabled) { 59 | Out << "***WARNING*** CPU scaling is enabled, the benchmark " 60 | "real time measurements may be noisy and will incur extra " 61 | "overhead.\n"; 62 | } 63 | 64 | #ifndef NDEBUG 65 | Out << "***WARNING*** Library was built as DEBUG. Timings may be " 66 | "affected.\n"; 67 | #endif 68 | } 69 | 70 | // No initializer because it's already initialized to NULL. 71 | const char *BenchmarkReporter::Context::executable_name; 72 | 73 | BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} 74 | 75 | std::string BenchmarkReporter::Run::benchmark_name() const { 76 | std::string name = run_name; 77 | if (run_type == RT_Aggregate) { 78 | name += "_" + aggregate_name; 79 | } 80 | return name; 81 | } 82 | 83 | double BenchmarkReporter::Run::GetAdjustedRealTime() const { 84 | double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); 85 | if (iterations != 0) new_time /= static_cast(iterations); 86 | return new_time; 87 | } 88 | 89 | double BenchmarkReporter::Run::GetAdjustedCPUTime() const { 90 | double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); 91 | if (iterations != 0) new_time /= static_cast(iterations); 92 | return new_time; 93 | } 94 | 95 | } // end namespace benchmark 96 | -------------------------------------------------------------------------------- /vendor/benchmark/src/commandlineflags.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_COMMANDLINEFLAGS_H_ 2 | #define BENCHMARK_COMMANDLINEFLAGS_H_ 3 | 4 | #include 5 | #include 6 | 7 | // Macro for referencing flags. 8 | #define FLAG(name) FLAGS_##name 9 | 10 | // Macros for declaring flags. 11 | #define DECLARE_bool(name) extern bool FLAG(name) 12 | #define DECLARE_int32(name) extern int32_t FLAG(name) 13 | #define DECLARE_int64(name) extern int64_t FLAG(name) 14 | #define DECLARE_double(name) extern double FLAG(name) 15 | #define DECLARE_string(name) extern std::string FLAG(name) 16 | 17 | // Macros for defining flags. 18 | #define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) 19 | #define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) 20 | #define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) 21 | #define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) 22 | #define DEFINE_string(name, default_val, doc) \ 23 | std::string FLAG(name) = (default_val) 24 | 25 | namespace benchmark { 26 | // Parses 'str' for a 32-bit signed integer. If successful, writes the result 27 | // to *value and returns true; otherwise leaves *value unchanged and returns 28 | // false. 29 | bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); 30 | 31 | // Parses a bool/Int32/string from the environment variable 32 | // corresponding to the given Google Test flag. 33 | bool BoolFromEnv(const char* flag, bool default_val); 34 | int32_t Int32FromEnv(const char* flag, int32_t default_val); 35 | double DoubleFromEnv(const char* flag, double default_val); 36 | const char* StringFromEnv(const char* flag, const char* default_val); 37 | 38 | // Parses a string for a bool flag, in the form of either 39 | // "--flag=value" or "--flag". 40 | // 41 | // In the former case, the value is taken as true if it passes IsTruthyValue(). 42 | // 43 | // In the latter case, the value is taken as true. 44 | // 45 | // On success, stores the value of the flag in *value, and returns 46 | // true. On failure, returns false without changing *value. 47 | bool ParseBoolFlag(const char* str, const char* flag, bool* value); 48 | 49 | // Parses a string for an Int32 flag, in the form of 50 | // "--flag=value". 51 | // 52 | // On success, stores the value of the flag in *value, and returns 53 | // true. On failure, returns false without changing *value. 54 | bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); 55 | 56 | // Parses a string for a Double flag, in the form of 57 | // "--flag=value". 58 | // 59 | // On success, stores the value of the flag in *value, and returns 60 | // true. On failure, returns false without changing *value. 61 | bool ParseDoubleFlag(const char* str, const char* flag, double* value); 62 | 63 | // Parses a string for a string flag, in the form of 64 | // "--flag=value". 65 | // 66 | // On success, stores the value of the flag in *value, and returns 67 | // true. On failure, returns false without changing *value. 68 | bool ParseStringFlag(const char* str, const char* flag, std::string* value); 69 | 70 | // Returns true if the string matches the flag. 71 | bool IsFlag(const char* str, const char* flag); 72 | 73 | // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or 74 | // some non-alphanumeric character. As a special case, also returns true if 75 | // value is the empty string. 76 | bool IsTruthyFlagValue(const std::string& value); 77 | } // end namespace benchmark 78 | 79 | #endif // BENCHMARK_COMMANDLINEFLAGS_H_ 80 | -------------------------------------------------------------------------------- /vendor/benchmark/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Allow the source files to find headers in src/ 2 | include_directories(${PROJECT_SOURCE_DIR}/src) 3 | 4 | if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) 5 | list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) 6 | list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) 7 | endif() 8 | 9 | file(GLOB 10 | SOURCE_FILES 11 | *.cc 12 | ${PROJECT_SOURCE_DIR}/include/benchmark/*.h 13 | ${CMAKE_CURRENT_SOURCE_DIR}/*.h) 14 | file(GLOB BENCHMARK_MAIN "benchmark_main.cc") 15 | foreach(item ${BENCHMARK_MAIN}) 16 | list(REMOVE_ITEM SOURCE_FILES "${item}") 17 | endforeach() 18 | 19 | add_library(benchmark ${SOURCE_FILES}) 20 | set_target_properties(benchmark PROPERTIES 21 | OUTPUT_NAME "benchmark" 22 | VERSION ${GENERIC_LIB_VERSION} 23 | SOVERSION ${GENERIC_LIB_SOVERSION} 24 | ) 25 | target_include_directories(benchmark PUBLIC 26 | $ 27 | ) 28 | 29 | # Link threads. 30 | target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) 31 | find_library(LIBRT rt) 32 | if(LIBRT) 33 | target_link_libraries(benchmark ${LIBRT}) 34 | endif() 35 | 36 | # We need extra libraries on Windows 37 | if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") 38 | target_link_libraries(benchmark Shlwapi) 39 | endif() 40 | 41 | # We need extra libraries on Solaris 42 | if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") 43 | target_link_libraries(benchmark kstat) 44 | endif() 45 | 46 | # Benchmark main library 47 | add_library(benchmark_main "benchmark_main.cc") 48 | set_target_properties(benchmark_main PROPERTIES 49 | OUTPUT_NAME "benchmark_main" 50 | VERSION ${GENERIC_LIB_VERSION} 51 | SOVERSION ${GENERIC_LIB_SOVERSION} 52 | ) 53 | target_include_directories(benchmark PUBLIC 54 | $ 55 | ) 56 | target_link_libraries(benchmark_main benchmark) 57 | 58 | set(include_install_dir "include") 59 | set(lib_install_dir "lib/") 60 | set(bin_install_dir "bin/") 61 | set(config_install_dir "lib/cmake/${PROJECT_NAME}") 62 | set(pkgconfig_install_dir "lib/pkgconfig") 63 | 64 | set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") 65 | 66 | set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") 67 | set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") 68 | set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") 69 | set(targets_export_name "${PROJECT_NAME}Targets") 70 | 71 | set(namespace "${PROJECT_NAME}::") 72 | 73 | include(CMakePackageConfigHelpers) 74 | write_basic_package_version_file( 75 | "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion 76 | ) 77 | 78 | configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) 79 | configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) 80 | 81 | if (BENCHMARK_ENABLE_INSTALL) 82 | # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) 83 | install( 84 | TARGETS benchmark benchmark_main 85 | EXPORT ${targets_export_name} 86 | ARCHIVE DESTINATION ${lib_install_dir} 87 | LIBRARY DESTINATION ${lib_install_dir} 88 | RUNTIME DESTINATION ${bin_install_dir} 89 | INCLUDES DESTINATION ${include_install_dir}) 90 | 91 | install( 92 | DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" 93 | DESTINATION ${include_install_dir} 94 | FILES_MATCHING PATTERN "*.*h") 95 | 96 | install( 97 | FILES "${project_config}" "${version_config}" 98 | DESTINATION "${config_install_dir}") 99 | 100 | install( 101 | FILES "${pkg_config}" 102 | DESTINATION "${pkgconfig_install_dir}") 103 | 104 | install( 105 | EXPORT "${targets_export_name}" 106 | NAMESPACE "${namespace}" 107 | DESTINATION "${config_install_dir}") 108 | endif() 109 | -------------------------------------------------------------------------------- /vendor/benchmark/test/string_util_gtest.cc: -------------------------------------------------------------------------------- 1 | //===---------------------------------------------------------------------===// 2 | // statistics_test - Unit tests for src/statistics.cc 3 | //===---------------------------------------------------------------------===// 4 | 5 | #include "../src/string_util.h" 6 | #include "gtest/gtest.h" 7 | 8 | namespace { 9 | TEST(StringUtilTest, stoul) { 10 | { 11 | size_t pos = 0; 12 | EXPECT_EQ(0, benchmark::stoul("0", &pos)); 13 | EXPECT_EQ(1, pos); 14 | } 15 | { 16 | size_t pos = 0; 17 | EXPECT_EQ(7, benchmark::stoul("7", &pos)); 18 | EXPECT_EQ(1, pos); 19 | } 20 | { 21 | size_t pos = 0; 22 | EXPECT_EQ(135, benchmark::stoul("135", &pos)); 23 | EXPECT_EQ(3, pos); 24 | } 25 | #if ULONG_MAX == 0xFFFFFFFFul 26 | { 27 | size_t pos = 0; 28 | EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); 29 | EXPECT_EQ(10, pos); 30 | } 31 | #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul 32 | { 33 | size_t pos = 0; 34 | EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); 35 | EXPECT_EQ(20, pos); 36 | } 37 | #endif 38 | { 39 | size_t pos = 0; 40 | EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2)); 41 | EXPECT_EQ(4, pos); 42 | } 43 | { 44 | size_t pos = 0; 45 | EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8)); 46 | EXPECT_EQ(4, pos); 47 | } 48 | { 49 | size_t pos = 0; 50 | EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10)); 51 | EXPECT_EQ(4, pos); 52 | } 53 | { 54 | size_t pos = 0; 55 | EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16)); 56 | EXPECT_EQ(4, pos); 57 | } 58 | { 59 | size_t pos = 0; 60 | EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16)); 61 | EXPECT_EQ(4, pos); 62 | } 63 | { 64 | ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); 65 | } 66 | } 67 | 68 | TEST(StringUtilTest, stoi) { 69 | { 70 | size_t pos = 0; 71 | EXPECT_EQ(0, benchmark::stoi("0", &pos)); 72 | EXPECT_EQ(1, pos); 73 | } 74 | { 75 | size_t pos = 0; 76 | EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); 77 | EXPECT_EQ(3, pos); 78 | } 79 | { 80 | size_t pos = 0; 81 | EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); 82 | EXPECT_EQ(4, pos); 83 | } 84 | { 85 | size_t pos = 0; 86 | EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); 87 | EXPECT_EQ(4, pos); 88 | } 89 | { 90 | size_t pos = 0; 91 | EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); 92 | EXPECT_EQ(4, pos); 93 | } 94 | { 95 | size_t pos = 0; 96 | EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); 97 | EXPECT_EQ(4, pos); 98 | } 99 | { 100 | size_t pos = 0; 101 | EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); 102 | EXPECT_EQ(4, pos); 103 | } 104 | { 105 | size_t pos = 0; 106 | EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); 107 | EXPECT_EQ(4, pos); 108 | } 109 | { 110 | ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); 111 | } 112 | } 113 | 114 | TEST(StringUtilTest, stod) { 115 | { 116 | size_t pos = 0; 117 | EXPECT_EQ(0.0, benchmark::stod("0", &pos)); 118 | EXPECT_EQ(1, pos); 119 | } 120 | { 121 | size_t pos = 0; 122 | EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); 123 | EXPECT_EQ(3, pos); 124 | } 125 | { 126 | size_t pos = 0; 127 | EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); 128 | EXPECT_EQ(4, pos); 129 | } 130 | { 131 | size_t pos = 0; 132 | EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); 133 | EXPECT_EQ(3, pos); 134 | } 135 | { 136 | size_t pos = 0; 137 | /* Note: exactly representable as double */ 138 | EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); 139 | EXPECT_EQ(8, pos); 140 | } 141 | { 142 | ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); 143 | } 144 | } 145 | 146 | } // end namespace 147 | -------------------------------------------------------------------------------- /vendor/benchmark/.ycm_extra_conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ycm_core 3 | 4 | # These are the compilation flags that will be used in case there's no 5 | # compilation database set (by default, one is not set). 6 | # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. 7 | flags = [ 8 | '-Wall', 9 | '-Werror', 10 | '-pedantic-errors', 11 | '-std=c++0x', 12 | '-fno-strict-aliasing', 13 | '-O3', 14 | '-DNDEBUG', 15 | # ...and the same thing goes for the magic -x option which specifies the 16 | # language that the files to be compiled are written in. This is mostly 17 | # relevant for c++ headers. 18 | # For a C project, you would set this to 'c' instead of 'c++'. 19 | '-x', 'c++', 20 | '-I', 'include', 21 | '-isystem', '/usr/include', 22 | '-isystem', '/usr/local/include', 23 | ] 24 | 25 | 26 | # Set this to the absolute path to the folder (NOT the file!) containing the 27 | # compile_commands.json file to use that instead of 'flags'. See here for 28 | # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html 29 | # 30 | # Most projects will NOT need to set this to anything; you can just change the 31 | # 'flags' list of compilation flags. Notice that YCM itself uses that approach. 32 | compilation_database_folder = '' 33 | 34 | if os.path.exists( compilation_database_folder ): 35 | database = ycm_core.CompilationDatabase( compilation_database_folder ) 36 | else: 37 | database = None 38 | 39 | SOURCE_EXTENSIONS = [ '.cc' ] 40 | 41 | def DirectoryOfThisScript(): 42 | return os.path.dirname( os.path.abspath( __file__ ) ) 43 | 44 | 45 | def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): 46 | if not working_directory: 47 | return list( flags ) 48 | new_flags = [] 49 | make_next_absolute = False 50 | path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] 51 | for flag in flags: 52 | new_flag = flag 53 | 54 | if make_next_absolute: 55 | make_next_absolute = False 56 | if not flag.startswith( '/' ): 57 | new_flag = os.path.join( working_directory, flag ) 58 | 59 | for path_flag in path_flags: 60 | if flag == path_flag: 61 | make_next_absolute = True 62 | break 63 | 64 | if flag.startswith( path_flag ): 65 | path = flag[ len( path_flag ): ] 66 | new_flag = path_flag + os.path.join( working_directory, path ) 67 | break 68 | 69 | if new_flag: 70 | new_flags.append( new_flag ) 71 | return new_flags 72 | 73 | 74 | def IsHeaderFile( filename ): 75 | extension = os.path.splitext( filename )[ 1 ] 76 | return extension in [ '.h', '.hxx', '.hpp', '.hh' ] 77 | 78 | 79 | def GetCompilationInfoForFile( filename ): 80 | # The compilation_commands.json file generated by CMake does not have entries 81 | # for header files. So we do our best by asking the db for flags for a 82 | # corresponding source file, if any. If one exists, the flags for that file 83 | # should be good enough. 84 | if IsHeaderFile( filename ): 85 | basename = os.path.splitext( filename )[ 0 ] 86 | for extension in SOURCE_EXTENSIONS: 87 | replacement_file = basename + extension 88 | if os.path.exists( replacement_file ): 89 | compilation_info = database.GetCompilationInfoForFile( 90 | replacement_file ) 91 | if compilation_info.compiler_flags_: 92 | return compilation_info 93 | return None 94 | return database.GetCompilationInfoForFile( filename ) 95 | 96 | 97 | def FlagsForFile( filename, **kwargs ): 98 | if database: 99 | # Bear in mind that compilation_info.compiler_flags_ does NOT return a 100 | # python list, but a "list-like" StringVec object 101 | compilation_info = GetCompilationInfoForFile( filename ) 102 | if not compilation_info: 103 | return None 104 | 105 | final_flags = MakeRelativePathsInFlagsAbsolute( 106 | compilation_info.compiler_flags_, 107 | compilation_info.compiler_working_dir_ ) 108 | else: 109 | relative_to = DirectoryOfThisScript() 110 | final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) 111 | 112 | return { 113 | 'flags': final_flags, 114 | 'do_cache': True 115 | } 116 | -------------------------------------------------------------------------------- /vendor/benchmark/test/basic_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) 5 | 6 | void BM_empty(benchmark::State& state) { 7 | for (auto _ : state) { 8 | benchmark::DoNotOptimize(state.iterations()); 9 | } 10 | } 11 | BENCHMARK(BM_empty); 12 | BENCHMARK(BM_empty)->ThreadPerCpu(); 13 | 14 | void BM_spin_empty(benchmark::State& state) { 15 | for (auto _ : state) { 16 | for (int x = 0; x < state.range(0); ++x) { 17 | benchmark::DoNotOptimize(x); 18 | } 19 | } 20 | } 21 | BASIC_BENCHMARK_TEST(BM_spin_empty); 22 | BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); 23 | 24 | void BM_spin_pause_before(benchmark::State& state) { 25 | for (int i = 0; i < state.range(0); ++i) { 26 | benchmark::DoNotOptimize(i); 27 | } 28 | for (auto _ : state) { 29 | for (int i = 0; i < state.range(0); ++i) { 30 | benchmark::DoNotOptimize(i); 31 | } 32 | } 33 | } 34 | BASIC_BENCHMARK_TEST(BM_spin_pause_before); 35 | BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); 36 | 37 | void BM_spin_pause_during(benchmark::State& state) { 38 | for (auto _ : state) { 39 | state.PauseTiming(); 40 | for (int i = 0; i < state.range(0); ++i) { 41 | benchmark::DoNotOptimize(i); 42 | } 43 | state.ResumeTiming(); 44 | for (int i = 0; i < state.range(0); ++i) { 45 | benchmark::DoNotOptimize(i); 46 | } 47 | } 48 | } 49 | BASIC_BENCHMARK_TEST(BM_spin_pause_during); 50 | BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); 51 | 52 | void BM_pause_during(benchmark::State& state) { 53 | for (auto _ : state) { 54 | state.PauseTiming(); 55 | state.ResumeTiming(); 56 | } 57 | } 58 | BENCHMARK(BM_pause_during); 59 | BENCHMARK(BM_pause_during)->ThreadPerCpu(); 60 | BENCHMARK(BM_pause_during)->UseRealTime(); 61 | BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); 62 | 63 | void BM_spin_pause_after(benchmark::State& state) { 64 | for (auto _ : state) { 65 | for (int i = 0; i < state.range(0); ++i) { 66 | benchmark::DoNotOptimize(i); 67 | } 68 | } 69 | for (int i = 0; i < state.range(0); ++i) { 70 | benchmark::DoNotOptimize(i); 71 | } 72 | } 73 | BASIC_BENCHMARK_TEST(BM_spin_pause_after); 74 | BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); 75 | 76 | void BM_spin_pause_before_and_after(benchmark::State& state) { 77 | for (int i = 0; i < state.range(0); ++i) { 78 | benchmark::DoNotOptimize(i); 79 | } 80 | for (auto _ : state) { 81 | for (int i = 0; i < state.range(0); ++i) { 82 | benchmark::DoNotOptimize(i); 83 | } 84 | } 85 | for (int i = 0; i < state.range(0); ++i) { 86 | benchmark::DoNotOptimize(i); 87 | } 88 | } 89 | BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); 90 | BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); 91 | 92 | void BM_empty_stop_start(benchmark::State& state) { 93 | for (auto _ : state) { 94 | } 95 | } 96 | BENCHMARK(BM_empty_stop_start); 97 | BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); 98 | 99 | 100 | void BM_KeepRunning(benchmark::State& state) { 101 | size_t iter_count = 0; 102 | assert(iter_count == state.iterations()); 103 | while (state.KeepRunning()) { 104 | ++iter_count; 105 | } 106 | assert(iter_count == state.iterations()); 107 | } 108 | BENCHMARK(BM_KeepRunning); 109 | 110 | void BM_KeepRunningBatch(benchmark::State& state) { 111 | // Choose a prime batch size to avoid evenly dividing max_iterations. 112 | const size_t batch_size = 101; 113 | size_t iter_count = 0; 114 | while (state.KeepRunningBatch(batch_size)) { 115 | iter_count += batch_size; 116 | } 117 | assert(state.iterations() == iter_count); 118 | } 119 | BENCHMARK(BM_KeepRunningBatch); 120 | 121 | void BM_RangedFor(benchmark::State& state) { 122 | size_t iter_count = 0; 123 | for (auto _ : state) { 124 | ++iter_count; 125 | } 126 | assert(iter_count == state.max_iterations); 127 | } 128 | BENCHMARK(BM_RangedFor); 129 | 130 | // Ensure that StateIterator provides all the necessary typedefs required to 131 | // instantiate std::iterator_traits. 132 | static_assert(std::is_same< 133 | typename std::iterator_traits::value_type, 134 | typename benchmark::State::StateIterator::value_type>::value, ""); 135 | 136 | BENCHMARK_MAIN(); 137 | -------------------------------------------------------------------------------- /vendor/benchmark/src/re.h: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef BENCHMARK_RE_H_ 16 | #define BENCHMARK_RE_H_ 17 | 18 | #include "internal_macros.h" 19 | 20 | // clang-format off 21 | 22 | #if !defined(HAVE_STD_REGEX) && \ 23 | !defined(HAVE_GNU_POSIX_REGEX) && \ 24 | !defined(HAVE_POSIX_REGEX) 25 | // No explicit regex selection; detect based on builtin hints. 26 | #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) 27 | #define HAVE_POSIX_REGEX 1 28 | #elif __cplusplus >= 199711L 29 | #define HAVE_STD_REGEX 1 30 | #endif 31 | #endif 32 | 33 | // Prefer C regex libraries when compiling w/o exceptions so that we can 34 | // correctly report errors. 35 | #if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ 36 | defined(BENCHMARK_HAVE_STD_REGEX) && \ 37 | (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) 38 | #undef HAVE_STD_REGEX 39 | #endif 40 | 41 | #if defined(HAVE_STD_REGEX) 42 | #include 43 | #elif defined(HAVE_GNU_POSIX_REGEX) 44 | #include 45 | #elif defined(HAVE_POSIX_REGEX) 46 | #include 47 | #else 48 | #error No regular expression backend was found! 49 | #endif 50 | 51 | // clang-format on 52 | 53 | #include 54 | 55 | #include "check.h" 56 | 57 | namespace benchmark { 58 | 59 | // A wrapper around the POSIX regular expression API that provides automatic 60 | // cleanup 61 | class Regex { 62 | public: 63 | Regex() : init_(false) {} 64 | 65 | ~Regex(); 66 | 67 | // Compile a regular expression matcher from spec. Returns true on success. 68 | // 69 | // On failure (and if error is not nullptr), error is populated with a human 70 | // readable error message if an error occurs. 71 | bool Init(const std::string& spec, std::string* error); 72 | 73 | // Returns whether str matches the compiled regular expression. 74 | bool Match(const std::string& str); 75 | 76 | private: 77 | bool init_; 78 | // Underlying regular expression object 79 | #if defined(HAVE_STD_REGEX) 80 | std::regex re_; 81 | #elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) 82 | regex_t re_; 83 | #else 84 | #error No regular expression backend implementation available 85 | #endif 86 | }; 87 | 88 | #if defined(HAVE_STD_REGEX) 89 | 90 | inline bool Regex::Init(const std::string& spec, std::string* error) { 91 | #ifdef BENCHMARK_HAS_NO_EXCEPTIONS 92 | ((void)error); // suppress unused warning 93 | #else 94 | try { 95 | #endif 96 | re_ = std::regex(spec, std::regex_constants::extended); 97 | init_ = true; 98 | #ifndef BENCHMARK_HAS_NO_EXCEPTIONS 99 | } 100 | catch (const std::regex_error& e) { 101 | if (error) { 102 | *error = e.what(); 103 | } 104 | } 105 | #endif 106 | return init_; 107 | } 108 | 109 | inline Regex::~Regex() {} 110 | 111 | inline bool Regex::Match(const std::string& str) { 112 | if (!init_) { 113 | return false; 114 | } 115 | return std::regex_search(str, re_); 116 | } 117 | 118 | #else 119 | inline bool Regex::Init(const std::string& spec, std::string* error) { 120 | int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); 121 | if (ec != 0) { 122 | if (error) { 123 | size_t needed = regerror(ec, &re_, nullptr, 0); 124 | char* errbuf = new char[needed]; 125 | regerror(ec, &re_, errbuf, needed); 126 | 127 | // regerror returns the number of bytes necessary to null terminate 128 | // the string, so we move that when assigning to error. 129 | CHECK_NE(needed, 0); 130 | error->assign(errbuf, needed - 1); 131 | 132 | delete[] errbuf; 133 | } 134 | 135 | return false; 136 | } 137 | 138 | init_ = true; 139 | return true; 140 | } 141 | 142 | inline Regex::~Regex() { 143 | if (init_) { 144 | regfree(&re_); 145 | } 146 | } 147 | 148 | inline bool Regex::Match(const std::string& str) { 149 | if (!init_) { 150 | return false; 151 | } 152 | return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; 153 | } 154 | #endif 155 | 156 | } // end namespace benchmark 157 | 158 | #endif // BENCHMARK_RE_H_ 159 | -------------------------------------------------------------------------------- /vendor/benchmark/cmake/HandleGTest.cmake: -------------------------------------------------------------------------------- 1 | 2 | include(split_list) 3 | 4 | macro(build_external_gtest) 5 | include(ExternalProject) 6 | set(GTEST_FLAGS "") 7 | if (BENCHMARK_USE_LIBCXX) 8 | if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 9 | list(APPEND GTEST_FLAGS -stdlib=libc++) 10 | else() 11 | message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++") 12 | endif() 13 | endif() 14 | if (BENCHMARK_BUILD_32_BITS) 15 | list(APPEND GTEST_FLAGS -m32) 16 | endif() 17 | if (NOT "${CMAKE_CXX_FLAGS}" STREQUAL "") 18 | list(APPEND GTEST_FLAGS ${CMAKE_CXX_FLAGS}) 19 | endif() 20 | string(TOUPPER "${CMAKE_BUILD_TYPE}" GTEST_BUILD_TYPE) 21 | if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE") 22 | set(GTEST_BUILD_TYPE "DEBUG") 23 | endif() 24 | # FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where 25 | # -Werror=unused-function fires during the build on OS X. This is a temporary 26 | # workaround to keep our travis bots from failing. It should be removed 27 | # once gtest is fixed. 28 | if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") 29 | list(APPEND GTEST_FLAGS "-Wno-unused-function") 30 | endif() 31 | split_list(GTEST_FLAGS) 32 | set(EXCLUDE_FROM_ALL_OPT "") 33 | set(EXCLUDE_FROM_ALL_VALUE "") 34 | if (${CMAKE_VERSION} VERSION_GREATER "3.0.99") 35 | set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL") 36 | set(EXCLUDE_FROM_ALL_VALUE "ON") 37 | endif() 38 | ExternalProject_Add(googletest 39 | ${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE} 40 | GIT_REPOSITORY https://github.com/google/googletest.git 41 | GIT_TAG master 42 | PREFIX "${CMAKE_BINARY_DIR}/googletest" 43 | INSTALL_DIR "${CMAKE_BINARY_DIR}/googletest" 44 | CMAKE_CACHE_ARGS 45 | -DCMAKE_BUILD_TYPE:STRING=${GTEST_BUILD_TYPE} 46 | -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER} 47 | -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER} 48 | -DCMAKE_INSTALL_PREFIX:PATH= 49 | -DCMAKE_INSTALL_LIBDIR:PATH=/lib 50 | -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS} 51 | -Dgtest_force_shared_crt:BOOL=ON 52 | ) 53 | 54 | ExternalProject_Get_Property(googletest install_dir) 55 | set(GTEST_INCLUDE_DIRS ${install_dir}/include) 56 | file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS}) 57 | 58 | set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}") 59 | set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}") 60 | if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG") 61 | set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}") 62 | endif() 63 | 64 | # Use gmock_main instead of gtest_main because it initializes gtest as well. 65 | # Note: The libraries are listed in reverse order of their dependancies. 66 | foreach(LIB gtest gmock gmock_main) 67 | add_library(${LIB} UNKNOWN IMPORTED) 68 | set_target_properties(${LIB} PROPERTIES 69 | IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX} 70 | INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS} 71 | INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}" 72 | ) 73 | add_dependencies(${LIB} googletest) 74 | list(APPEND GTEST_BOTH_LIBRARIES ${LIB}) 75 | endforeach() 76 | endmacro(build_external_gtest) 77 | 78 | if (BENCHMARK_ENABLE_GTEST_TESTS) 79 | if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest) 80 | set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest") 81 | set(INSTALL_GTEST OFF CACHE INTERNAL "") 82 | set(INSTALL_GMOCK OFF CACHE INTERNAL "") 83 | add_subdirectory(${CMAKE_SOURCE_DIR}/googletest) 84 | set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main) 85 | foreach(HEADER test mock) 86 | # CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we 87 | # have to add the paths ourselves. 88 | set(HFILE g${HEADER}/g${HEADER}.h) 89 | set(HPATH ${GTEST_ROOT}/google${HEADER}/include) 90 | find_path(HEADER_PATH_${HEADER} ${HFILE} 91 | NO_DEFAULT_PATHS 92 | HINTS ${HPATH} 93 | ) 94 | if (NOT HEADER_PATH_${HEADER}) 95 | message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}") 96 | endif() 97 | list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}}) 98 | endforeach() 99 | elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES) 100 | build_external_gtest() 101 | else() 102 | find_package(GTest REQUIRED) 103 | find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h 104 | HINTS ${GTEST_INCLUDE_DIRS}) 105 | if (NOT GMOCK_INCLUDE_DIRS) 106 | message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}") 107 | endif() 108 | set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS}) 109 | # FIXME: We don't currently require the gmock library to build the tests, 110 | # and it's likely we won't find it, so we don't try. As long as we've 111 | # found the gmock/gmock.h header and gtest_main that should be good enough. 112 | endif() 113 | endif() 114 | -------------------------------------------------------------------------------- /vendor/benchmark/test/donotoptimize_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | extern "C" { 8 | 9 | extern int ExternInt; 10 | extern int ExternInt2; 11 | extern int ExternInt3; 12 | 13 | inline int Add42(int x) { return x + 42; } 14 | 15 | struct NotTriviallyCopyable { 16 | NotTriviallyCopyable(); 17 | explicit NotTriviallyCopyable(int x) : value(x) {} 18 | NotTriviallyCopyable(NotTriviallyCopyable const&); 19 | int value; 20 | }; 21 | 22 | struct Large { 23 | int value; 24 | int data[2]; 25 | }; 26 | 27 | } 28 | // CHECK-LABEL: test_with_rvalue: 29 | extern "C" void test_with_rvalue() { 30 | benchmark::DoNotOptimize(Add42(0)); 31 | // CHECK: movl $42, %eax 32 | // CHECK: ret 33 | } 34 | 35 | // CHECK-LABEL: test_with_large_rvalue: 36 | extern "C" void test_with_large_rvalue() { 37 | benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); 38 | // CHECK: ExternInt(%rip) 39 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] 40 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 41 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 42 | // CHECK: ret 43 | } 44 | 45 | // CHECK-LABEL: test_with_non_trivial_rvalue: 46 | extern "C" void test_with_non_trivial_rvalue() { 47 | benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); 48 | // CHECK: mov{{l|q}} ExternInt(%rip) 49 | // CHECK: ret 50 | } 51 | 52 | // CHECK-LABEL: test_with_lvalue: 53 | extern "C" void test_with_lvalue() { 54 | int x = 101; 55 | benchmark::DoNotOptimize(x); 56 | // CHECK-GNU: movl $101, %eax 57 | // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) 58 | // CHECK: ret 59 | } 60 | 61 | // CHECK-LABEL: test_with_large_lvalue: 62 | extern "C" void test_with_large_lvalue() { 63 | Large L{ExternInt, {ExternInt, ExternInt}}; 64 | benchmark::DoNotOptimize(L); 65 | // CHECK: ExternInt(%rip) 66 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 67 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 68 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 69 | // CHECK: ret 70 | } 71 | 72 | // CHECK-LABEL: test_with_non_trivial_lvalue: 73 | extern "C" void test_with_non_trivial_lvalue() { 74 | NotTriviallyCopyable NTC(ExternInt); 75 | benchmark::DoNotOptimize(NTC); 76 | // CHECK: ExternInt(%rip) 77 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 78 | // CHECK: ret 79 | } 80 | 81 | // CHECK-LABEL: test_with_const_lvalue: 82 | extern "C" void test_with_const_lvalue() { 83 | const int x = 123; 84 | benchmark::DoNotOptimize(x); 85 | // CHECK: movl $123, %eax 86 | // CHECK: ret 87 | } 88 | 89 | // CHECK-LABEL: test_with_large_const_lvalue: 90 | extern "C" void test_with_large_const_lvalue() { 91 | const Large L{ExternInt, {ExternInt, ExternInt}}; 92 | benchmark::DoNotOptimize(L); 93 | // CHECK: ExternInt(%rip) 94 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 95 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 96 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 97 | // CHECK: ret 98 | } 99 | 100 | // CHECK-LABEL: test_with_non_trivial_const_lvalue: 101 | extern "C" void test_with_non_trivial_const_lvalue() { 102 | const NotTriviallyCopyable Obj(ExternInt); 103 | benchmark::DoNotOptimize(Obj); 104 | // CHECK: mov{{q|l}} ExternInt(%rip) 105 | // CHECK: ret 106 | } 107 | 108 | // CHECK-LABEL: test_div_by_two: 109 | extern "C" int test_div_by_two(int input) { 110 | int divisor = 2; 111 | benchmark::DoNotOptimize(divisor); 112 | return input / divisor; 113 | // CHECK: movl $2, [[DEST:.*]] 114 | // CHECK: idivl [[DEST]] 115 | // CHECK: ret 116 | } 117 | 118 | // CHECK-LABEL: test_inc_integer: 119 | extern "C" int test_inc_integer() { 120 | int x = 0; 121 | for (int i=0; i < 5; ++i) 122 | benchmark::DoNotOptimize(++x); 123 | // CHECK: movl $1, [[DEST:.*]] 124 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 125 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 126 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 127 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 128 | // CHECK-CLANG: movl [[DEST]], %eax 129 | // CHECK: ret 130 | return x; 131 | } 132 | 133 | // CHECK-LABEL: test_pointer_rvalue 134 | extern "C" void test_pointer_rvalue() { 135 | // CHECK: movl $42, [[DEST:.*]] 136 | // CHECK: leaq [[DEST]], %rax 137 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 138 | // CHECK: ret 139 | int x = 42; 140 | benchmark::DoNotOptimize(&x); 141 | } 142 | 143 | // CHECK-LABEL: test_pointer_const_lvalue: 144 | extern "C" void test_pointer_const_lvalue() { 145 | // CHECK: movl $42, [[DEST:.*]] 146 | // CHECK: leaq [[DEST]], %rax 147 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 148 | // CHECK: ret 149 | int x = 42; 150 | int * const xp = &x; 151 | benchmark::DoNotOptimize(xp); 152 | } 153 | 154 | // CHECK-LABEL: test_pointer_lvalue: 155 | extern "C" void test_pointer_lvalue() { 156 | // CHECK: movl $42, [[DEST:.*]] 157 | // CHECK: leaq [[DEST]], %rax 158 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) 159 | // CHECK: ret 160 | int x = 42; 161 | int *xp = &x; 162 | benchmark::DoNotOptimize(xp); 163 | } 164 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/strip_asm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | strip_asm.py - Cleanup ASM output for the specified file 5 | """ 6 | 7 | from argparse import ArgumentParser 8 | import sys 9 | import os 10 | import re 11 | 12 | def find_used_labels(asm): 13 | found = set() 14 | label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") 15 | for l in asm.splitlines(): 16 | m = label_re.match(l) 17 | if m: 18 | found.add('.L%s' % m.group(1)) 19 | return found 20 | 21 | 22 | def normalize_labels(asm): 23 | decls = set() 24 | label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") 25 | for l in asm.splitlines(): 26 | m = label_decl.match(l) 27 | if m: 28 | decls.add(m.group(0)) 29 | if len(decls) == 0: 30 | return asm 31 | needs_dot = next(iter(decls))[0] != '.' 32 | if not needs_dot: 33 | return asm 34 | for ld in decls: 35 | asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) 36 | return asm 37 | 38 | 39 | def transform_labels(asm): 40 | asm = normalize_labels(asm) 41 | used_decls = find_used_labels(asm) 42 | new_asm = '' 43 | label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") 44 | for l in asm.splitlines(): 45 | m = label_decl.match(l) 46 | if not m or m.group(0) in used_decls: 47 | new_asm += l 48 | new_asm += '\n' 49 | return new_asm 50 | 51 | 52 | def is_identifier(tk): 53 | if len(tk) == 0: 54 | return False 55 | first = tk[0] 56 | if not first.isalpha() and first != '_': 57 | return False 58 | for i in range(1, len(tk)): 59 | c = tk[i] 60 | if not c.isalnum() and c != '_': 61 | return False 62 | return True 63 | 64 | def process_identifiers(l): 65 | """ 66 | process_identifiers - process all identifiers and modify them to have 67 | consistent names across all platforms; specifically across ELF and MachO. 68 | For example, MachO inserts an additional understore at the beginning of 69 | names. This function removes that. 70 | """ 71 | parts = re.split(r'([a-zA-Z0-9_]+)', l) 72 | new_line = '' 73 | for tk in parts: 74 | if is_identifier(tk): 75 | if tk.startswith('__Z'): 76 | tk = tk[1:] 77 | elif tk.startswith('_') and len(tk) > 1 and \ 78 | tk[1].isalpha() and tk[1] != 'Z': 79 | tk = tk[1:] 80 | new_line += tk 81 | return new_line 82 | 83 | 84 | def process_asm(asm): 85 | """ 86 | Strip the ASM of unwanted directives and lines 87 | """ 88 | new_contents = '' 89 | asm = transform_labels(asm) 90 | 91 | # TODO: Add more things we want to remove 92 | discard_regexes = [ 93 | re.compile("\s+\..*$"), # directive 94 | re.compile("\s*#(NO_APP|APP)$"), #inline ASM 95 | re.compile("\s*#.*$"), # comment line 96 | re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive 97 | re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), 98 | ] 99 | keep_regexes = [ 100 | 101 | ] 102 | fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") 103 | for l in asm.splitlines(): 104 | # Remove Mach-O attribute 105 | l = l.replace('@GOTPCREL', '') 106 | add_line = True 107 | for reg in discard_regexes: 108 | if reg.match(l) is not None: 109 | add_line = False 110 | break 111 | for reg in keep_regexes: 112 | if reg.match(l) is not None: 113 | add_line = True 114 | break 115 | if add_line: 116 | if fn_label_def.match(l) and len(new_contents) != 0: 117 | new_contents += '\n' 118 | l = process_identifiers(l) 119 | new_contents += l 120 | new_contents += '\n' 121 | return new_contents 122 | 123 | def main(): 124 | parser = ArgumentParser( 125 | description='generate a stripped assembly file') 126 | parser.add_argument( 127 | 'input', metavar='input', type=str, nargs=1, 128 | help='An input assembly file') 129 | parser.add_argument( 130 | 'out', metavar='output', type=str, nargs=1, 131 | help='The output file') 132 | args, unknown_args = parser.parse_known_args() 133 | input = args.input[0] 134 | output = args.out[0] 135 | if not os.path.isfile(input): 136 | print(("ERROR: input file '%s' does not exist") % input) 137 | sys.exit(1) 138 | contents = None 139 | with open(input, 'r') as f: 140 | contents = f.read() 141 | new_contents = process_asm(contents) 142 | with open(output, 'w') as f: 143 | f.write(new_contents) 144 | 145 | 146 | if __name__ == '__main__': 147 | main() 148 | 149 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 150 | # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; 151 | # kate: indent-mode python; remove-trailing-spaces modified; 152 | -------------------------------------------------------------------------------- /vendor/benchmark/src/csv_reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "complexity.h" 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #include "check.h" 26 | #include "string_util.h" 27 | #include "timers.h" 28 | 29 | // File format reference: http://edoceo.com/utilitas/csv-file-format. 30 | 31 | namespace benchmark { 32 | 33 | namespace { 34 | std::vector elements = { 35 | "name", "iterations", "real_time", "cpu_time", 36 | "time_unit", "bytes_per_second", "items_per_second", "label", 37 | "error_occurred", "error_message"}; 38 | } // namespace 39 | 40 | bool CSVReporter::ReportContext(const Context& context) { 41 | PrintBasicContext(&GetErrorStream(), context); 42 | return true; 43 | } 44 | 45 | void CSVReporter::ReportRuns(const std::vector& reports) { 46 | std::ostream& Out = GetOutputStream(); 47 | 48 | if (!printed_header_) { 49 | // save the names of all the user counters 50 | for (const auto& run : reports) { 51 | for (const auto& cnt : run.counters) { 52 | if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") 53 | continue; 54 | user_counter_names_.insert(cnt.first); 55 | } 56 | } 57 | 58 | // print the header 59 | for (auto B = elements.begin(); B != elements.end();) { 60 | Out << *B++; 61 | if (B != elements.end()) Out << ","; 62 | } 63 | for (auto B = user_counter_names_.begin(); 64 | B != user_counter_names_.end();) { 65 | Out << ",\"" << *B++ << "\""; 66 | } 67 | Out << "\n"; 68 | 69 | printed_header_ = true; 70 | } else { 71 | // check that all the current counters are saved in the name set 72 | for (const auto& run : reports) { 73 | for (const auto& cnt : run.counters) { 74 | if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") 75 | continue; 76 | CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) 77 | << "All counters must be present in each run. " 78 | << "Counter named \"" << cnt.first 79 | << "\" was not in a run after being added to the header"; 80 | } 81 | } 82 | } 83 | 84 | // print results for each run 85 | for (const auto& run : reports) { 86 | PrintRunData(run); 87 | } 88 | } 89 | 90 | void CSVReporter::PrintRunData(const Run& run) { 91 | std::ostream& Out = GetOutputStream(); 92 | 93 | // Field with embedded double-quote characters must be doubled and the field 94 | // delimited with double-quotes. 95 | std::string name = run.benchmark_name(); 96 | ReplaceAll(&name, "\"", "\"\""); 97 | Out << '"' << name << "\","; 98 | if (run.error_occurred) { 99 | Out << std::string(elements.size() - 3, ','); 100 | Out << "true,"; 101 | std::string msg = run.error_message; 102 | ReplaceAll(&msg, "\"", "\"\""); 103 | Out << '"' << msg << "\"\n"; 104 | return; 105 | } 106 | 107 | // Do not print iteration on bigO and RMS report 108 | if (!run.report_big_o && !run.report_rms) { 109 | Out << run.iterations; 110 | } 111 | Out << ","; 112 | 113 | Out << run.GetAdjustedRealTime() << ","; 114 | Out << run.GetAdjustedCPUTime() << ","; 115 | 116 | // Do not print timeLabel on bigO and RMS report 117 | if (run.report_big_o) { 118 | Out << GetBigOString(run.complexity); 119 | } else if (!run.report_rms) { 120 | Out << GetTimeUnitString(run.time_unit); 121 | } 122 | Out << ","; 123 | 124 | if (run.counters.find("bytes_per_second") != run.counters.end()) { 125 | Out << run.counters.at("bytes_per_second"); 126 | } 127 | Out << ","; 128 | if (run.counters.find("items_per_second") != run.counters.end()) { 129 | Out << run.counters.at("items_per_second"); 130 | } 131 | Out << ","; 132 | if (!run.report_label.empty()) { 133 | // Field with embedded double-quote characters must be doubled and the field 134 | // delimited with double-quotes. 135 | std::string label = run.report_label; 136 | ReplaceAll(&label, "\"", "\"\""); 137 | Out << "\"" << label << "\""; 138 | } 139 | Out << ",,"; // for error_occurred and error_message 140 | 141 | // Print user counters 142 | for (const auto& ucn : user_counter_names_) { 143 | auto it = run.counters.find(ucn); 144 | if (it == run.counters.end()) { 145 | Out << ","; 146 | } else { 147 | Out << "," << it->second; 148 | } 149 | } 150 | Out << '\n'; 151 | } 152 | 153 | } // end namespace benchmark 154 | -------------------------------------------------------------------------------- /vendor/benchmark/src/mutex.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_MUTEX_H_ 2 | #define BENCHMARK_MUTEX_H_ 3 | 4 | #include 5 | #include 6 | 7 | #include "check.h" 8 | 9 | // Enable thread safety attributes only with clang. 10 | // The attributes can be safely erased when compiling with other compilers. 11 | #if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) 12 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) 13 | #else 14 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op 15 | #endif 16 | 17 | #define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) 18 | 19 | #define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) 20 | 21 | #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) 22 | 23 | #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) 24 | 25 | #define ACQUIRED_BEFORE(...) \ 26 | THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) 27 | 28 | #define ACQUIRED_AFTER(...) \ 29 | THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) 30 | 31 | #define REQUIRES(...) \ 32 | THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) 33 | 34 | #define REQUIRES_SHARED(...) \ 35 | THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) 36 | 37 | #define ACQUIRE(...) \ 38 | THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) 39 | 40 | #define ACQUIRE_SHARED(...) \ 41 | THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) 42 | 43 | #define RELEASE(...) \ 44 | THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) 45 | 46 | #define RELEASE_SHARED(...) \ 47 | THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) 48 | 49 | #define TRY_ACQUIRE(...) \ 50 | THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) 51 | 52 | #define TRY_ACQUIRE_SHARED(...) \ 53 | THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) 54 | 55 | #define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) 56 | 57 | #define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) 58 | 59 | #define ASSERT_SHARED_CAPABILITY(x) \ 60 | THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) 61 | 62 | #define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) 63 | 64 | #define NO_THREAD_SAFETY_ANALYSIS \ 65 | THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) 66 | 67 | namespace benchmark { 68 | 69 | typedef std::condition_variable Condition; 70 | 71 | // NOTE: Wrappers for std::mutex and std::unique_lock are provided so that 72 | // we can annotate them with thread safety attributes and use the 73 | // -Wthread-safety warning with clang. The standard library types cannot be 74 | // used directly because they do not provided the required annotations. 75 | class CAPABILITY("mutex") Mutex { 76 | public: 77 | Mutex() {} 78 | 79 | void lock() ACQUIRE() { mut_.lock(); } 80 | void unlock() RELEASE() { mut_.unlock(); } 81 | std::mutex& native_handle() { return mut_; } 82 | 83 | private: 84 | std::mutex mut_; 85 | }; 86 | 87 | class SCOPED_CAPABILITY MutexLock { 88 | typedef std::unique_lock MutexLockImp; 89 | 90 | public: 91 | MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} 92 | ~MutexLock() RELEASE() {} 93 | MutexLockImp& native_handle() { return ml_; } 94 | 95 | private: 96 | MutexLockImp ml_; 97 | }; 98 | 99 | class Barrier { 100 | public: 101 | Barrier(int num_threads) : running_threads_(num_threads) {} 102 | 103 | // Called by each thread 104 | bool wait() EXCLUDES(lock_) { 105 | bool last_thread = false; 106 | { 107 | MutexLock ml(lock_); 108 | last_thread = createBarrier(ml); 109 | } 110 | if (last_thread) phase_condition_.notify_all(); 111 | return last_thread; 112 | } 113 | 114 | void removeThread() EXCLUDES(lock_) { 115 | MutexLock ml(lock_); 116 | --running_threads_; 117 | if (entered_ != 0) phase_condition_.notify_all(); 118 | } 119 | 120 | private: 121 | Mutex lock_; 122 | Condition phase_condition_; 123 | int running_threads_; 124 | 125 | // State for barrier management 126 | int phase_number_ = 0; 127 | int entered_ = 0; // Number of threads that have entered this barrier 128 | 129 | // Enter the barrier and wait until all other threads have also 130 | // entered the barrier. Returns iff this is the last thread to 131 | // enter the barrier. 132 | bool createBarrier(MutexLock& ml) REQUIRES(lock_) { 133 | CHECK_LT(entered_, running_threads_); 134 | entered_++; 135 | if (entered_ < running_threads_) { 136 | // Wait for all threads to enter 137 | int phase_number_cp = phase_number_; 138 | auto cb = [this, phase_number_cp]() { 139 | return this->phase_number_ > phase_number_cp || 140 | entered_ == running_threads_; // A thread has aborted in error 141 | }; 142 | phase_condition_.wait(ml.native_handle(), cb); 143 | if (phase_number_ > phase_number_cp) return false; 144 | // else (running_threads_ == entered_) and we are the last thread. 145 | } 146 | // Last thread has reached the barrier 147 | phase_number_++; 148 | entered_ = 0; 149 | return true; 150 | } 151 | }; 152 | 153 | } // end namespace benchmark 154 | 155 | #endif // BENCHMARK_MUTEX_H_ 156 | -------------------------------------------------------------------------------- /vendor/benchmark/tools/gbench/util.py: -------------------------------------------------------------------------------- 1 | """util.py - General utilities for running, loading, and processing benchmarks 2 | """ 3 | import json 4 | import os 5 | import tempfile 6 | import subprocess 7 | import sys 8 | 9 | # Input file type enumeration 10 | IT_Invalid = 0 11 | IT_JSON = 1 12 | IT_Executable = 2 13 | 14 | _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 15 | def is_executable_file(filename): 16 | """ 17 | Return 'True' if 'filename' names a valid file which is likely 18 | an executable. A file is considered an executable if it starts with the 19 | magic bytes for a EXE, Mach O, or ELF file. 20 | """ 21 | if not os.path.isfile(filename): 22 | return False 23 | with open(filename, mode='rb') as f: 24 | magic_bytes = f.read(_num_magic_bytes) 25 | if sys.platform == 'darwin': 26 | return magic_bytes in [ 27 | b'\xfe\xed\xfa\xce', # MH_MAGIC 28 | b'\xce\xfa\xed\xfe', # MH_CIGAM 29 | b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 30 | b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 31 | b'\xca\xfe\xba\xbe', # FAT_MAGIC 32 | b'\xbe\xba\xfe\xca' # FAT_CIGAM 33 | ] 34 | elif sys.platform.startswith('win'): 35 | return magic_bytes == b'MZ' 36 | else: 37 | return magic_bytes == b'\x7FELF' 38 | 39 | 40 | def is_json_file(filename): 41 | """ 42 | Returns 'True' if 'filename' names a valid JSON output file. 43 | 'False' otherwise. 44 | """ 45 | try: 46 | with open(filename, 'r') as f: 47 | json.load(f) 48 | return True 49 | except: 50 | pass 51 | return False 52 | 53 | 54 | def classify_input_file(filename): 55 | """ 56 | Return a tuple (type, msg) where 'type' specifies the classified type 57 | of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable 58 | string represeting the error. 59 | """ 60 | ftype = IT_Invalid 61 | err_msg = None 62 | if not os.path.exists(filename): 63 | err_msg = "'%s' does not exist" % filename 64 | elif not os.path.isfile(filename): 65 | err_msg = "'%s' does not name a file" % filename 66 | elif is_executable_file(filename): 67 | ftype = IT_Executable 68 | elif is_json_file(filename): 69 | ftype = IT_JSON 70 | else: 71 | err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename 72 | return ftype, err_msg 73 | 74 | 75 | def check_input_file(filename): 76 | """ 77 | Classify the file named by 'filename' and return the classification. 78 | If the file is classified as 'IT_Invalid' print an error message and exit 79 | the program. 80 | """ 81 | ftype, msg = classify_input_file(filename) 82 | if ftype == IT_Invalid: 83 | print("Invalid input file: %s" % msg) 84 | sys.exit(1) 85 | return ftype 86 | 87 | def find_benchmark_flag(prefix, benchmark_flags): 88 | """ 89 | Search the specified list of flags for a flag matching `` and 90 | if it is found return the arg it specifies. If specified more than once the 91 | last value is returned. If the flag is not found None is returned. 92 | """ 93 | assert prefix.startswith('--') and prefix.endswith('=') 94 | result = None 95 | for f in benchmark_flags: 96 | if f.startswith(prefix): 97 | result = f[len(prefix):] 98 | return result 99 | 100 | def remove_benchmark_flags(prefix, benchmark_flags): 101 | """ 102 | Return a new list containing the specified benchmark_flags except those 103 | with the specified prefix. 104 | """ 105 | assert prefix.startswith('--') and prefix.endswith('=') 106 | return [f for f in benchmark_flags if not f.startswith(prefix)] 107 | 108 | def load_benchmark_results(fname): 109 | """ 110 | Read benchmark output from a file and return the JSON object. 111 | REQUIRES: 'fname' names a file containing JSON benchmark output. 112 | """ 113 | with open(fname, 'r') as f: 114 | return json.load(f) 115 | 116 | 117 | def run_benchmark(exe_name, benchmark_flags): 118 | """ 119 | Run a benchmark specified by 'exe_name' with the specified 120 | 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve 121 | real time console output. 122 | RETURNS: A JSON object representing the benchmark output 123 | """ 124 | output_name = find_benchmark_flag('--benchmark_out=', 125 | benchmark_flags) 126 | is_temp_output = False 127 | if output_name is None: 128 | is_temp_output = True 129 | thandle, output_name = tempfile.mkstemp() 130 | os.close(thandle) 131 | benchmark_flags = list(benchmark_flags) + \ 132 | ['--benchmark_out=%s' % output_name] 133 | 134 | cmd = [exe_name] + benchmark_flags 135 | print("RUNNING: %s" % ' '.join(cmd)) 136 | exitCode = subprocess.call(cmd) 137 | if exitCode != 0: 138 | print('TEST FAILED...') 139 | sys.exit(exitCode) 140 | json_res = load_benchmark_results(output_name) 141 | if is_temp_output: 142 | os.unlink(output_name) 143 | return json_res 144 | 145 | 146 | def run_or_load_benchmark(filename, benchmark_flags): 147 | """ 148 | Get the results for a specified benchmark. If 'filename' specifies 149 | an executable benchmark then the results are generated by running the 150 | benchmark. Otherwise 'filename' must name a valid JSON output file, 151 | which is loaded and the result returned. 152 | """ 153 | ftype = check_input_file(filename) 154 | if ftype == IT_JSON: 155 | return load_benchmark_results(filename) 156 | elif ftype == IT_Executable: 157 | return run_benchmark(filename, benchmark_flags) 158 | else: 159 | assert False # This branch is unreachable -------------------------------------------------------------------------------- /vendor/benchmark/docs/AssemblyTests.md: -------------------------------------------------------------------------------- 1 | # Assembly Tests 2 | 3 | The Benchmark library provides a number of functions whose primary 4 | purpose in to affect assembly generation, including `DoNotOptimize` 5 | and `ClobberMemory`. In addition there are other functions, 6 | such as `KeepRunning`, for which generating good assembly is paramount. 7 | 8 | For these functions it's important to have tests that verify the 9 | correctness and quality of the implementation. This requires testing 10 | the code generated by the compiler. 11 | 12 | This document describes how the Benchmark library tests compiler output, 13 | as well as how to properly write new tests. 14 | 15 | 16 | ## Anatomy of a Test 17 | 18 | Writing a test has two steps: 19 | 20 | * Write the code you want to generate assembly for. 21 | * Add `// CHECK` lines to match against the verified assembly. 22 | 23 | Example: 24 | ```c++ 25 | 26 | // CHECK-LABEL: test_add: 27 | extern "C" int test_add() { 28 | extern int ExternInt; 29 | return ExternInt + 1; 30 | 31 | // CHECK: movl ExternInt(%rip), %eax 32 | // CHECK: addl %eax 33 | // CHECK: ret 34 | } 35 | 36 | ``` 37 | 38 | #### LLVM Filecheck 39 | 40 | [LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) 41 | is used to test the generated assembly against the `// CHECK` lines 42 | specified in the tests source file. Please see the documentation 43 | linked above for information on how to write `CHECK` directives. 44 | 45 | #### Tips and Tricks: 46 | 47 | * Tests should match the minimal amount of output required to establish 48 | correctness. `CHECK` directives don't have to match on the exact next line 49 | after the previous match, so tests should omit checks for unimportant 50 | bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) 51 | can be used to ensure a match occurs exactly after the previous match). 52 | 53 | * The tests are compiled with `-O3 -g0`. So we're only testing the 54 | optimized output. 55 | 56 | * The assembly output is further cleaned up using `tools/strip_asm.py`. 57 | This removes comments, assembler directives, and unused labels before 58 | the test is run. 59 | 60 | * The generated and stripped assembly file for a test is output under 61 | `/test/.s` 62 | 63 | * Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) 64 | to specify lines that should only match in certain situations. 65 | The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that 66 | are only expected to match Clang or GCC's output respectively. Normal 67 | `CHECK` lines match against all compilers. (Note: `CHECK-NOT` and 68 | `CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed 69 | `CHECK` lines) 70 | 71 | * Use `extern "C"` to disable name mangling for specific functions. This 72 | makes them easier to name in the `CHECK` lines. 73 | 74 | 75 | ## Problems Writing Portable Tests 76 | 77 | Writing tests which check the code generated by a compiler are 78 | inherently non-portable. Different compilers and even different compiler 79 | versions may generate entirely different code. The Benchmark tests 80 | must tolerate this. 81 | 82 | LLVM Filecheck provides a number of mechanisms to help write 83 | "more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), 84 | allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) 85 | for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). 86 | 87 | #### Capturing Variables 88 | 89 | For example, say GCC stores a variable in a register but Clang stores 90 | it in memory. To write a test that tolerates both cases we "capture" 91 | the destination of the store, and then use the captured expression 92 | to write the remainder of the test. 93 | 94 | ```c++ 95 | // CHECK-LABEL: test_div_no_op_into_shr: 96 | extern "C" void test_div_no_op_into_shr(int value) { 97 | int divisor = 2; 98 | benchmark::DoNotOptimize(divisor); // hide the value from the optimizer 99 | return value / divisor; 100 | 101 | // CHECK: movl $2, [[DEST:.*]] 102 | // CHECK: idivl [[DEST]] 103 | // CHECK: ret 104 | } 105 | ``` 106 | 107 | #### Using Regular Expressions to Match Differing Output 108 | 109 | Often tests require testing assembly lines which may subtly differ 110 | between compilers or compiler versions. A common example of this 111 | is matching stack frame addresses. In this case regular expressions 112 | can be used to match the differing bits of output. For example: 113 | 114 | ```c++ 115 | int ExternInt; 116 | struct Point { int x, y, z; }; 117 | 118 | // CHECK-LABEL: test_store_point: 119 | extern "C" void test_store_point() { 120 | Point p{ExternInt, ExternInt, ExternInt}; 121 | benchmark::DoNotOptimize(p); 122 | 123 | // CHECK: movl ExternInt(%rip), %eax 124 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 125 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 126 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 127 | // CHECK: ret 128 | } 129 | ``` 130 | 131 | ## Current Requirements and Limitations 132 | 133 | The tests require Filecheck to be installed along the `PATH` of the 134 | build machine. Otherwise the tests will be disabled. 135 | 136 | Additionally, as mentioned in the previous section, codegen tests are 137 | inherently non-portable. Currently the tests are limited to: 138 | 139 | * x86_64 targets. 140 | * Compiled with GCC or Clang 141 | 142 | Further work could be done, at least on a limited basis, to extend the 143 | tests to other architectures and compilers (using `CHECK` prefixes). 144 | 145 | Furthermore, the tests fail for builds which specify additional flags 146 | that modify code generation, including `--coverage` or `-fsanitize=`. 147 | 148 | -------------------------------------------------------------------------------- /vendor/benchmark/src/colorprint.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "colorprint.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include "check.h" 25 | #include "internal_macros.h" 26 | 27 | #ifdef BENCHMARK_OS_WINDOWS 28 | #include 29 | #include 30 | #else 31 | #include 32 | #endif // BENCHMARK_OS_WINDOWS 33 | 34 | namespace benchmark { 35 | namespace { 36 | #ifdef BENCHMARK_OS_WINDOWS 37 | typedef WORD PlatformColorCode; 38 | #else 39 | typedef const char* PlatformColorCode; 40 | #endif 41 | 42 | PlatformColorCode GetPlatformColorCode(LogColor color) { 43 | #ifdef BENCHMARK_OS_WINDOWS 44 | switch (color) { 45 | case COLOR_RED: 46 | return FOREGROUND_RED; 47 | case COLOR_GREEN: 48 | return FOREGROUND_GREEN; 49 | case COLOR_YELLOW: 50 | return FOREGROUND_RED | FOREGROUND_GREEN; 51 | case COLOR_BLUE: 52 | return FOREGROUND_BLUE; 53 | case COLOR_MAGENTA: 54 | return FOREGROUND_BLUE | FOREGROUND_RED; 55 | case COLOR_CYAN: 56 | return FOREGROUND_BLUE | FOREGROUND_GREEN; 57 | case COLOR_WHITE: // fall through to default 58 | default: 59 | return 0; 60 | } 61 | #else 62 | switch (color) { 63 | case COLOR_RED: 64 | return "1"; 65 | case COLOR_GREEN: 66 | return "2"; 67 | case COLOR_YELLOW: 68 | return "3"; 69 | case COLOR_BLUE: 70 | return "4"; 71 | case COLOR_MAGENTA: 72 | return "5"; 73 | case COLOR_CYAN: 74 | return "6"; 75 | case COLOR_WHITE: 76 | return "7"; 77 | default: 78 | return nullptr; 79 | }; 80 | #endif 81 | } 82 | 83 | } // end namespace 84 | 85 | std::string FormatString(const char* msg, va_list args) { 86 | // we might need a second shot at this, so pre-emptivly make a copy 87 | va_list args_cp; 88 | va_copy(args_cp, args); 89 | 90 | std::size_t size = 256; 91 | char local_buff[256]; 92 | auto ret = vsnprintf(local_buff, size, msg, args_cp); 93 | 94 | va_end(args_cp); 95 | 96 | // currently there is no error handling for failure, so this is hack. 97 | CHECK(ret >= 0); 98 | 99 | if (ret == 0) // handle empty expansion 100 | return {}; 101 | else if (static_cast(ret) < size) 102 | return local_buff; 103 | else { 104 | // we did not provide a long enough buffer on our first attempt. 105 | size = (size_t)ret + 1; // + 1 for the null byte 106 | std::unique_ptr buff(new char[size]); 107 | ret = vsnprintf(buff.get(), size, msg, args); 108 | CHECK(ret > 0 && ((size_t)ret) < size); 109 | return buff.get(); 110 | } 111 | } 112 | 113 | std::string FormatString(const char* msg, ...) { 114 | va_list args; 115 | va_start(args, msg); 116 | auto tmp = FormatString(msg, args); 117 | va_end(args); 118 | return tmp; 119 | } 120 | 121 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { 122 | va_list args; 123 | va_start(args, fmt); 124 | ColorPrintf(out, color, fmt, args); 125 | va_end(args); 126 | } 127 | 128 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, 129 | va_list args) { 130 | #ifdef BENCHMARK_OS_WINDOWS 131 | ((void)out); // suppress unused warning 132 | 133 | const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); 134 | 135 | // Gets the current text color. 136 | CONSOLE_SCREEN_BUFFER_INFO buffer_info; 137 | GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); 138 | const WORD old_color_attrs = buffer_info.wAttributes; 139 | 140 | // We need to flush the stream buffers into the console before each 141 | // SetConsoleTextAttribute call lest it affect the text that is already 142 | // printed but has not yet reached the console. 143 | fflush(stdout); 144 | SetConsoleTextAttribute(stdout_handle, 145 | GetPlatformColorCode(color) | FOREGROUND_INTENSITY); 146 | vprintf(fmt, args); 147 | 148 | fflush(stdout); 149 | // Restores the text color. 150 | SetConsoleTextAttribute(stdout_handle, old_color_attrs); 151 | #else 152 | const char* color_code = GetPlatformColorCode(color); 153 | if (color_code) out << FormatString("\033[0;3%sm", color_code); 154 | out << FormatString(fmt, args) << "\033[m"; 155 | #endif 156 | } 157 | 158 | bool IsColorTerminal() { 159 | #if BENCHMARK_OS_WINDOWS 160 | // On Windows the TERM variable is usually not set, but the 161 | // console there does support colors. 162 | return 0 != _isatty(_fileno(stdout)); 163 | #else 164 | // On non-Windows platforms, we rely on the TERM variable. This list of 165 | // supported TERM values is copied from Google Test: 166 | // . 167 | const char* const SUPPORTED_TERM_VALUES[] = { 168 | "xterm", "xterm-color", "xterm-256color", 169 | "screen", "screen-256color", "tmux", 170 | "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", 171 | "linux", "cygwin", 172 | }; 173 | 174 | const char* const term = getenv("TERM"); 175 | 176 | bool term_supports_color = false; 177 | for (const char* candidate : SUPPORTED_TERM_VALUES) { 178 | if (term && 0 == strcmp(term, candidate)) { 179 | term_supports_color = true; 180 | break; 181 | } 182 | } 183 | 184 | return 0 != isatty(fileno(stdout)) && term_supports_color; 185 | #endif // BENCHMARK_OS_WINDOWS 186 | } 187 | 188 | } // end namespace benchmark 189 | -------------------------------------------------------------------------------- /vendor/benchmark/src/console_reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "complexity.h" 17 | #include "counter.h" 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | #include "check.h" 28 | #include "colorprint.h" 29 | #include "commandlineflags.h" 30 | #include "internal_macros.h" 31 | #include "string_util.h" 32 | #include "timers.h" 33 | 34 | namespace benchmark { 35 | 36 | bool ConsoleReporter::ReportContext(const Context& context) { 37 | name_field_width_ = context.name_field_width; 38 | printed_header_ = false; 39 | prev_counters_.clear(); 40 | 41 | PrintBasicContext(&GetErrorStream(), context); 42 | 43 | #ifdef BENCHMARK_OS_WINDOWS 44 | if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { 45 | GetErrorStream() 46 | << "Color printing is only supported for stdout on windows." 47 | " Disabling color printing\n"; 48 | output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); 49 | } 50 | #endif 51 | 52 | return true; 53 | } 54 | 55 | void ConsoleReporter::PrintHeader(const Run& run) { 56 | std::string str = FormatString("%-*s %13s %13s %10s", static_cast(name_field_width_), 57 | "Benchmark", "Time", "CPU", "Iterations"); 58 | if(!run.counters.empty()) { 59 | if(output_options_ & OO_Tabular) { 60 | for(auto const& c : run.counters) { 61 | str += FormatString(" %10s", c.first.c_str()); 62 | } 63 | } else { 64 | str += " UserCounters..."; 65 | } 66 | } 67 | str += "\n"; 68 | std::string line = std::string(str.length(), '-'); 69 | GetOutputStream() << line << "\n" << str << line << "\n"; 70 | } 71 | 72 | void ConsoleReporter::ReportRuns(const std::vector& reports) { 73 | for (const auto& run : reports) { 74 | // print the header: 75 | // --- if none was printed yet 76 | bool print_header = !printed_header_; 77 | // --- or if the format is tabular and this run 78 | // has different fields from the prev header 79 | print_header |= (output_options_ & OO_Tabular) && 80 | (!internal::SameNames(run.counters, prev_counters_)); 81 | if (print_header) { 82 | printed_header_ = true; 83 | prev_counters_ = run.counters; 84 | PrintHeader(run); 85 | } 86 | // As an alternative to printing the headers like this, we could sort 87 | // the benchmarks by header and then print. But this would require 88 | // waiting for the full results before printing, or printing twice. 89 | PrintRunData(run); 90 | } 91 | } 92 | 93 | static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, 94 | ...) { 95 | va_list args; 96 | va_start(args, fmt); 97 | out << FormatString(fmt, args); 98 | va_end(args); 99 | } 100 | 101 | void ConsoleReporter::PrintRunData(const Run& result) { 102 | typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); 103 | auto& Out = GetOutputStream(); 104 | PrinterFn* printer = (output_options_ & OO_Color) ? 105 | (PrinterFn*)ColorPrintf : IgnoreColorPrint; 106 | auto name_color = 107 | (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; 108 | printer(Out, name_color, "%-*s ", name_field_width_, 109 | result.benchmark_name().c_str()); 110 | 111 | if (result.error_occurred) { 112 | printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", 113 | result.error_message.c_str()); 114 | printer(Out, COLOR_DEFAULT, "\n"); 115 | return; 116 | } 117 | 118 | const double real_time = result.GetAdjustedRealTime(); 119 | const double cpu_time = result.GetAdjustedCPUTime(); 120 | 121 | if (result.report_big_o) { 122 | std::string big_o = GetBigOString(result.complexity); 123 | printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(), 124 | cpu_time, big_o.c_str()); 125 | } else if (result.report_rms) { 126 | printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100, 127 | cpu_time * 100); 128 | } else { 129 | const char* timeLabel = GetTimeUnitString(result.time_unit); 130 | printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel, 131 | cpu_time, timeLabel); 132 | } 133 | 134 | if (!result.report_big_o && !result.report_rms) { 135 | printer(Out, COLOR_CYAN, "%10lld", result.iterations); 136 | } 137 | 138 | for (auto& c : result.counters) { 139 | const std::size_t cNameLen = std::max(std::string::size_type(10), 140 | c.first.length()); 141 | auto const& s = HumanReadableNumber(c.second.value, c.second.oneK); 142 | if (output_options_ & OO_Tabular) { 143 | if (c.second.flags & Counter::kIsRate) { 144 | printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str()); 145 | } else { 146 | printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str()); 147 | } 148 | } else { 149 | const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : ""; 150 | printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), 151 | unit); 152 | } 153 | } 154 | 155 | if (!result.report_label.empty()) { 156 | printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); 157 | } 158 | 159 | printer(Out, COLOR_DEFAULT, "\n"); 160 | } 161 | 162 | } // end namespace benchmark 163 | -------------------------------------------------------------------------------- /vendor/benchmark/test/register_benchmark_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #undef NDEBUG 3 | #include 4 | #include 5 | 6 | #include "../src/check.h" // NOTE: check.h is for internal use only! 7 | #include "benchmark/benchmark.h" 8 | 9 | namespace { 10 | 11 | class TestReporter : public benchmark::ConsoleReporter { 12 | public: 13 | virtual void ReportRuns(const std::vector& report) { 14 | all_runs_.insert(all_runs_.end(), begin(report), end(report)); 15 | ConsoleReporter::ReportRuns(report); 16 | } 17 | 18 | std::vector all_runs_; 19 | }; 20 | 21 | struct TestCase { 22 | std::string name; 23 | const char* label; 24 | // Note: not explicit as we rely on it being converted through ADD_CASES. 25 | TestCase(const char* xname) : TestCase(xname, nullptr) {} 26 | TestCase(const char* xname, const char* xlabel) 27 | : name(xname), label(xlabel) {} 28 | 29 | typedef benchmark::BenchmarkReporter::Run Run; 30 | 31 | void CheckRun(Run const& run) const { 32 | // clang-format off 33 | CHECK(name == run.benchmark_name()) << "expected " << name << " got " 34 | << run.benchmark_name(); 35 | if (label) { 36 | CHECK(run.report_label == label) << "expected " << label << " got " 37 | << run.report_label; 38 | } else { 39 | CHECK(run.report_label == ""); 40 | } 41 | // clang-format on 42 | } 43 | }; 44 | 45 | std::vector ExpectedResults; 46 | 47 | int AddCases(std::initializer_list const& v) { 48 | for (auto N : v) { 49 | ExpectedResults.push_back(N); 50 | } 51 | return 0; 52 | } 53 | 54 | #define CONCAT(x, y) CONCAT2(x, y) 55 | #define CONCAT2(x, y) x##y 56 | #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__}) 57 | 58 | } // end namespace 59 | 60 | typedef benchmark::internal::Benchmark* ReturnVal; 61 | 62 | //----------------------------------------------------------------------------// 63 | // Test RegisterBenchmark with no additional arguments 64 | //----------------------------------------------------------------------------// 65 | void BM_function(benchmark::State& state) { 66 | for (auto _ : state) { 67 | } 68 | } 69 | BENCHMARK(BM_function); 70 | ReturnVal dummy = benchmark::RegisterBenchmark( 71 | "BM_function_manual_registration", BM_function); 72 | ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); 73 | 74 | //----------------------------------------------------------------------------// 75 | // Test RegisterBenchmark with additional arguments 76 | // Note: GCC <= 4.8 do not support this form of RegisterBenchmark because they 77 | // reject the variadic pack expansion of lambda captures. 78 | //----------------------------------------------------------------------------// 79 | #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK 80 | 81 | void BM_extra_args(benchmark::State& st, const char* label) { 82 | for (auto _ : st) { 83 | } 84 | st.SetLabel(label); 85 | } 86 | int RegisterFromFunction() { 87 | std::pair cases[] = { 88 | {"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}}; 89 | for (auto const& c : cases) 90 | benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); 91 | return 0; 92 | } 93 | int dummy2 = RegisterFromFunction(); 94 | ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); 95 | 96 | #endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK 97 | 98 | //----------------------------------------------------------------------------// 99 | // Test RegisterBenchmark with different callable types 100 | //----------------------------------------------------------------------------// 101 | 102 | struct CustomFixture { 103 | void operator()(benchmark::State& st) { 104 | for (auto _ : st) { 105 | } 106 | } 107 | }; 108 | 109 | void TestRegistrationAtRuntime() { 110 | #ifdef BENCHMARK_HAS_CXX11 111 | { 112 | CustomFixture fx; 113 | benchmark::RegisterBenchmark("custom_fixture", fx); 114 | AddCases({"custom_fixture"}); 115 | } 116 | #endif 117 | #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK 118 | { 119 | const char* x = "42"; 120 | auto capturing_lam = [=](benchmark::State& st) { 121 | for (auto _ : st) { 122 | } 123 | st.SetLabel(x); 124 | }; 125 | benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); 126 | AddCases({{"lambda_benchmark", x}}); 127 | } 128 | #endif 129 | } 130 | 131 | // Test that all benchmarks, registered at either during static init or runtime, 132 | // are run and the results are passed to the reported. 133 | void RunTestOne() { 134 | TestRegistrationAtRuntime(); 135 | 136 | TestReporter test_reporter; 137 | benchmark::RunSpecifiedBenchmarks(&test_reporter); 138 | 139 | typedef benchmark::BenchmarkReporter::Run Run; 140 | auto EB = ExpectedResults.begin(); 141 | 142 | for (Run const& run : test_reporter.all_runs_) { 143 | assert(EB != ExpectedResults.end()); 144 | EB->CheckRun(run); 145 | ++EB; 146 | } 147 | assert(EB == ExpectedResults.end()); 148 | } 149 | 150 | // Test that ClearRegisteredBenchmarks() clears all previously registered 151 | // benchmarks. 152 | // Also test that new benchmarks can be registered and ran afterwards. 153 | void RunTestTwo() { 154 | assert(ExpectedResults.size() != 0 && 155 | "must have at least one registered benchmark"); 156 | ExpectedResults.clear(); 157 | benchmark::ClearRegisteredBenchmarks(); 158 | 159 | TestReporter test_reporter; 160 | size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); 161 | assert(num_ran == 0); 162 | assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); 163 | 164 | TestRegistrationAtRuntime(); 165 | num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); 166 | assert(num_ran == ExpectedResults.size()); 167 | 168 | typedef benchmark::BenchmarkReporter::Run Run; 169 | auto EB = ExpectedResults.begin(); 170 | 171 | for (Run const& run : test_reporter.all_runs_) { 172 | assert(EB != ExpectedResults.end()); 173 | EB->CheckRun(run); 174 | ++EB; 175 | } 176 | assert(EB == ExpectedResults.end()); 177 | } 178 | 179 | int main(int argc, char* argv[]) { 180 | benchmark::Initialize(&argc, argv); 181 | 182 | RunTestOne(); 183 | RunTestTwo(); 184 | } 185 | -------------------------------------------------------------------------------- /vendor/benchmark/src/statistics.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Ismael Jimenez Martinez. All rights reserved. 2 | // Copyright 2017 Roman Lebedev. All rights reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #include "benchmark/benchmark.h" 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "check.h" 24 | #include "statistics.h" 25 | 26 | namespace benchmark { 27 | 28 | auto StatisticsSum = [](const std::vector& v) { 29 | return std::accumulate(v.begin(), v.end(), 0.0); 30 | }; 31 | 32 | double StatisticsMean(const std::vector& v) { 33 | if (v.empty()) return 0.0; 34 | return StatisticsSum(v) * (1.0 / v.size()); 35 | } 36 | 37 | double StatisticsMedian(const std::vector& v) { 38 | if (v.size() < 3) return StatisticsMean(v); 39 | std::vector copy(v); 40 | 41 | auto center = copy.begin() + v.size() / 2; 42 | std::nth_element(copy.begin(), center, copy.end()); 43 | 44 | // did we have an odd number of samples? 45 | // if yes, then center is the median 46 | // it no, then we are looking for the average between center and the value 47 | // before 48 | if (v.size() % 2 == 1) return *center; 49 | auto center2 = copy.begin() + v.size() / 2 - 1; 50 | std::nth_element(copy.begin(), center2, copy.end()); 51 | return (*center + *center2) / 2.0; 52 | } 53 | 54 | // Return the sum of the squares of this sample set 55 | auto SumSquares = [](const std::vector& v) { 56 | return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); 57 | }; 58 | 59 | auto Sqr = [](const double dat) { return dat * dat; }; 60 | auto Sqrt = [](const double dat) { 61 | // Avoid NaN due to imprecision in the calculations 62 | if (dat < 0.0) return 0.0; 63 | return std::sqrt(dat); 64 | }; 65 | 66 | double StatisticsStdDev(const std::vector& v) { 67 | const auto mean = StatisticsMean(v); 68 | if (v.empty()) return mean; 69 | 70 | // Sample standard deviation is undefined for n = 1 71 | if (v.size() == 1) return 0.0; 72 | 73 | const double avg_squares = SumSquares(v) * (1.0 / v.size()); 74 | return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); 75 | } 76 | 77 | std::vector ComputeStats( 78 | const std::vector& reports) { 79 | typedef BenchmarkReporter::Run Run; 80 | std::vector results; 81 | 82 | auto error_count = 83 | std::count_if(reports.begin(), reports.end(), 84 | [](Run const& run) { return run.error_occurred; }); 85 | 86 | if (reports.size() - error_count < 2) { 87 | // We don't report aggregated data if there was a single run. 88 | return results; 89 | } 90 | 91 | // Accumulators. 92 | std::vector real_accumulated_time_stat; 93 | std::vector cpu_accumulated_time_stat; 94 | 95 | real_accumulated_time_stat.reserve(reports.size()); 96 | cpu_accumulated_time_stat.reserve(reports.size()); 97 | 98 | // All repetitions should be run with the same number of iterations so we 99 | // can take this information from the first benchmark. 100 | int64_t const run_iterations = reports.front().iterations; 101 | // create stats for user counters 102 | struct CounterStat { 103 | Counter c; 104 | std::vector s; 105 | }; 106 | std::map counter_stats; 107 | for (Run const& r : reports) { 108 | for (auto const& cnt : r.counters) { 109 | auto it = counter_stats.find(cnt.first); 110 | if (it == counter_stats.end()) { 111 | counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); 112 | it = counter_stats.find(cnt.first); 113 | it->second.s.reserve(reports.size()); 114 | } else { 115 | CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); 116 | } 117 | } 118 | } 119 | 120 | // Populate the accumulators. 121 | for (Run const& run : reports) { 122 | CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); 123 | CHECK_EQ(run_iterations, run.iterations); 124 | if (run.error_occurred) continue; 125 | real_accumulated_time_stat.emplace_back(run.real_accumulated_time); 126 | cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); 127 | // user counters 128 | for (auto const& cnt : run.counters) { 129 | auto it = counter_stats.find(cnt.first); 130 | CHECK_NE(it, counter_stats.end()); 131 | it->second.s.emplace_back(cnt.second); 132 | } 133 | } 134 | 135 | // Only add label if it is same for all runs 136 | std::string report_label = reports[0].report_label; 137 | for (std::size_t i = 1; i < reports.size(); i++) { 138 | if (reports[i].report_label != report_label) { 139 | report_label = ""; 140 | break; 141 | } 142 | } 143 | 144 | for (const auto& Stat : *reports[0].statistics) { 145 | // Get the data from the accumulator to BenchmarkReporter::Run's. 146 | Run data; 147 | data.run_name = reports[0].benchmark_name(); 148 | data.run_type = BenchmarkReporter::Run::RT_Aggregate; 149 | data.aggregate_name = Stat.name_; 150 | data.report_label = report_label; 151 | data.iterations = run_iterations; 152 | 153 | data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); 154 | data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); 155 | 156 | data.time_unit = reports[0].time_unit; 157 | 158 | // user counters 159 | for (auto const& kv : counter_stats) { 160 | const auto uc_stat = Stat.compute_(kv.second.s); 161 | auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, 162 | counter_stats[kv.first].c.oneK); 163 | data.counters[kv.first] = c; 164 | } 165 | 166 | results.push_back(data); 167 | } 168 | 169 | return results; 170 | } 171 | 172 | } // end namespace benchmark 173 | -------------------------------------------------------------------------------- /include/rigtorp/CharConv.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2018 Erik Rigtorp 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in all 12 | copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 | SOFTWARE. 21 | */ 22 | 23 | #pragma once 24 | 25 | #include 26 | #include 27 | #include 28 | 29 | namespace rigtorp { 30 | 31 | struct to_chars_result { 32 | char *ptr; 33 | std::errc ec; 34 | }; 35 | 36 | struct from_chars_result { 37 | const char *ptr; 38 | std::errc ec; 39 | }; 40 | 41 | namespace detail { 42 | 43 | static constexpr uint32_t powers_of_10_32[] = { 44 | UINT32_C(0), UINT32_C(10), UINT32_C(100), 45 | UINT32_C(1000), UINT32_C(10000), UINT32_C(100000), 46 | UINT32_C(1000000), UINT32_C(10000000), UINT32_C(100000000), 47 | UINT32_C(1000000000), 48 | }; 49 | 50 | static constexpr uint64_t powers_of_10_64[] = { 51 | UINT64_C(0), 52 | UINT64_C(10), 53 | UINT64_C(100), 54 | UINT64_C(1000), 55 | UINT64_C(10000), 56 | UINT64_C(100000), 57 | UINT64_C(1000000), 58 | UINT64_C(10000000), 59 | UINT64_C(100000000), 60 | UINT64_C(1000000000), 61 | UINT64_C(10000000000), 62 | UINT64_C(100000000000), 63 | UINT64_C(1000000000000), 64 | UINT64_C(10000000000000), 65 | UINT64_C(100000000000000), 66 | UINT64_C(1000000000000000), 67 | UINT64_C(10000000000000000), 68 | UINT64_C(100000000000000000), 69 | UINT64_C(1000000000000000000), 70 | UINT64_C(10000000000000000000), 71 | }; 72 | 73 | template constexpr unsigned to_chars_len(T value) noexcept { 74 | static_assert(std::is_unsigned::value); 75 | static_assert(std::numeric_limits::max() >= 76 | std::numeric_limits::digits); 77 | if constexpr (sizeof(T) <= sizeof(int)) { 78 | static_assert(sizeof(int) == 4); 79 | const unsigned t = (32 - __builtin_clz(value | 1)) * 1233 >> 12; 80 | return t - (value < powers_of_10_32[t]) + 1; 81 | } else { 82 | static_assert(sizeof(T) <= sizeof(long long)); 83 | static_assert(sizeof(long long) == 8); 84 | const unsigned t = (64 - __builtin_clzll(value | 1)) * 1233 >> 12; 85 | return t - (value < powers_of_10_64[t]) + 1; 86 | } 87 | } 88 | 89 | template 90 | constexpr to_chars_result to_chars(char *first, char *last, T value) noexcept { 91 | static_assert(std::is_integral::value); 92 | using UT = std::make_unsigned_t; 93 | static_assert(sizeof(UT) == sizeof(T)); 94 | UT uvalue = value; 95 | if constexpr (std::is_signed::value) { 96 | if (value < 0) { 97 | if (__builtin_expect(first != last, 1)) { 98 | *first++ = '-'; 99 | uvalue = UT(~value) + UT(1); 100 | } 101 | } 102 | } 103 | const auto len = to_chars_len(uvalue); 104 | if (__builtin_expect(last - first < len, 0)) { 105 | return {last, std::errc::value_too_large}; 106 | } 107 | uint32_t pos = len - 1; 108 | while (uvalue >= 10) { 109 | const auto q = uvalue / 10; 110 | const auto r = uvalue % 10; 111 | first[pos--] = r + '0'; 112 | uvalue = q; 113 | } 114 | first[0] = uvalue + '0'; 115 | return {first + len, {}}; 116 | } 117 | 118 | template 119 | constexpr from_chars_result from_chars(const char *first, const char *last, 120 | T &value) noexcept { 121 | static_assert(std::is_integral::value); 122 | [[maybe_unused]] int sign = 1; 123 | if constexpr (std::is_signed::value) { 124 | if (first != last && *first == '-') { 125 | sign = -1; 126 | ++first; 127 | } 128 | } 129 | if (__builtin_expect(first == last, 0)) { 130 | return {first, std::errc::invalid_argument}; 131 | } 132 | std::make_unsigned_t res = 0; 133 | static_assert(sizeof(res) == sizeof(T)); 134 | while (first != last) { 135 | const uint8_t c = *first - '0'; 136 | if (__builtin_expect(c > 9, 0)) { 137 | return {first, std::errc::invalid_argument}; 138 | } 139 | if (__builtin_expect(__builtin_mul_overflow(res, 10, &res), 0) || 140 | __builtin_expect(__builtin_add_overflow(res, c, &res), 0)) { 141 | return {first, std::errc::result_out_of_range}; 142 | } 143 | ++first; 144 | } 145 | if constexpr (std::is_signed::value) { 146 | T tmp; 147 | if (__builtin_expect(__builtin_mul_overflow(res, sign, &tmp), 0)) { 148 | return {first, std::errc::result_out_of_range}; 149 | } 150 | value = tmp; 151 | } else { 152 | value = res; 153 | } 154 | return {first, {}}; 155 | } 156 | } // namespace detail 157 | 158 | constexpr inline to_chars_result to_chars(char *first, char *last, 159 | uint32_t value) noexcept { 160 | return detail::to_chars(first, last, value); 161 | } 162 | 163 | constexpr inline to_chars_result to_chars(char *first, char *last, 164 | int32_t value) noexcept { 165 | return detail::to_chars(first, last, value); 166 | } 167 | 168 | constexpr inline to_chars_result to_chars(char *first, char *last, 169 | uint64_t value) noexcept { 170 | return detail::to_chars(first, last, value); 171 | } 172 | 173 | constexpr inline to_chars_result to_chars(char *first, char *last, 174 | int64_t value) noexcept { 175 | return detail::to_chars(first, last, value); 176 | } 177 | 178 | constexpr inline from_chars_result 179 | from_chars(const char *first, const char *last, uint32_t &value) noexcept { 180 | return detail::from_chars(first, last, value); 181 | } 182 | 183 | constexpr inline from_chars_result 184 | from_chars(const char *first, const char *last, int32_t &value) noexcept { 185 | return detail::from_chars(first, last, value); 186 | } 187 | 188 | constexpr inline from_chars_result 189 | from_chars(const char *first, const char *last, uint64_t &value) noexcept { 190 | return detail::from_chars(first, last, value); 191 | } 192 | 193 | constexpr inline from_chars_result 194 | from_chars(const char *first, const char *last, int64_t &value) noexcept { 195 | return detail::from_chars(first, last, value); 196 | } 197 | } // namespace rigtorp -------------------------------------------------------------------------------- /vendor/benchmark/test/skip_with_error_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #undef NDEBUG 3 | #include 4 | #include 5 | 6 | #include "../src/check.h" // NOTE: check.h is for internal use only! 7 | #include "benchmark/benchmark.h" 8 | 9 | namespace { 10 | 11 | class TestReporter : public benchmark::ConsoleReporter { 12 | public: 13 | virtual bool ReportContext(const Context& context) { 14 | return ConsoleReporter::ReportContext(context); 15 | }; 16 | 17 | virtual void ReportRuns(const std::vector& report) { 18 | all_runs_.insert(all_runs_.end(), begin(report), end(report)); 19 | ConsoleReporter::ReportRuns(report); 20 | } 21 | 22 | TestReporter() {} 23 | virtual ~TestReporter() {} 24 | 25 | mutable std::vector all_runs_; 26 | }; 27 | 28 | struct TestCase { 29 | std::string name; 30 | bool error_occurred; 31 | std::string error_message; 32 | 33 | typedef benchmark::BenchmarkReporter::Run Run; 34 | 35 | void CheckRun(Run const& run) const { 36 | CHECK(name == run.benchmark_name()) 37 | << "expected " << name << " got " << run.benchmark_name(); 38 | CHECK(error_occurred == run.error_occurred); 39 | CHECK(error_message == run.error_message); 40 | if (error_occurred) { 41 | // CHECK(run.iterations == 0); 42 | } else { 43 | CHECK(run.iterations != 0); 44 | } 45 | } 46 | }; 47 | 48 | std::vector ExpectedResults; 49 | 50 | int AddCases(const char* base_name, std::initializer_list const& v) { 51 | for (auto TC : v) { 52 | TC.name = base_name + TC.name; 53 | ExpectedResults.push_back(std::move(TC)); 54 | } 55 | return 0; 56 | } 57 | 58 | #define CONCAT(x, y) CONCAT2(x, y) 59 | #define CONCAT2(x, y) x##y 60 | #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) 61 | 62 | } // end namespace 63 | 64 | void BM_error_before_running(benchmark::State& state) { 65 | state.SkipWithError("error message"); 66 | while (state.KeepRunning()) { 67 | assert(false); 68 | } 69 | } 70 | BENCHMARK(BM_error_before_running); 71 | ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); 72 | 73 | void BM_error_before_running_batch(benchmark::State& state) { 74 | state.SkipWithError("error message"); 75 | while (state.KeepRunningBatch(17)) { 76 | assert(false); 77 | } 78 | } 79 | BENCHMARK(BM_error_before_running_batch); 80 | ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); 81 | 82 | void BM_error_before_running_range_for(benchmark::State& state) { 83 | state.SkipWithError("error message"); 84 | for (auto _ : state) { 85 | assert(false); 86 | } 87 | } 88 | BENCHMARK(BM_error_before_running_range_for); 89 | ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); 90 | 91 | void BM_error_during_running(benchmark::State& state) { 92 | int first_iter = true; 93 | while (state.KeepRunning()) { 94 | if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { 95 | assert(first_iter); 96 | first_iter = false; 97 | state.SkipWithError("error message"); 98 | } else { 99 | state.PauseTiming(); 100 | state.ResumeTiming(); 101 | } 102 | } 103 | } 104 | BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); 105 | ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, 106 | {"/1/threads:2", true, "error message"}, 107 | {"/1/threads:4", true, "error message"}, 108 | {"/1/threads:8", true, "error message"}, 109 | {"/2/threads:1", false, ""}, 110 | {"/2/threads:2", false, ""}, 111 | {"/2/threads:4", false, ""}, 112 | {"/2/threads:8", false, ""}}); 113 | 114 | void BM_error_during_running_ranged_for(benchmark::State& state) { 115 | assert(state.max_iterations > 3 && "test requires at least a few iterations"); 116 | int first_iter = true; 117 | // NOTE: Users should not write the for loop explicitly. 118 | for (auto It = state.begin(), End = state.end(); It != End; ++It) { 119 | if (state.range(0) == 1) { 120 | assert(first_iter); 121 | first_iter = false; 122 | state.SkipWithError("error message"); 123 | // Test the unfortunate but documented behavior that the ranged-for loop 124 | // doesn't automatically terminate when SkipWithError is set. 125 | assert(++It != End); 126 | break; // Required behavior 127 | } 128 | } 129 | } 130 | BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); 131 | ADD_CASES("BM_error_during_running_ranged_for", 132 | {{"/1/iterations:5", true, "error message"}, 133 | {"/2/iterations:5", false, ""}}); 134 | 135 | void BM_error_after_running(benchmark::State& state) { 136 | for (auto _ : state) { 137 | benchmark::DoNotOptimize(state.iterations()); 138 | } 139 | if (state.thread_index <= (state.threads / 2)) 140 | state.SkipWithError("error message"); 141 | } 142 | BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); 143 | ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, 144 | {"/threads:2", true, "error message"}, 145 | {"/threads:4", true, "error message"}, 146 | {"/threads:8", true, "error message"}}); 147 | 148 | void BM_error_while_paused(benchmark::State& state) { 149 | bool first_iter = true; 150 | while (state.KeepRunning()) { 151 | if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { 152 | assert(first_iter); 153 | first_iter = false; 154 | state.PauseTiming(); 155 | state.SkipWithError("error message"); 156 | } else { 157 | state.PauseTiming(); 158 | state.ResumeTiming(); 159 | } 160 | } 161 | } 162 | BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); 163 | ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"}, 164 | {"/1/threads:2", true, "error message"}, 165 | {"/1/threads:4", true, "error message"}, 166 | {"/1/threads:8", true, "error message"}, 167 | {"/2/threads:1", false, ""}, 168 | {"/2/threads:2", false, ""}, 169 | {"/2/threads:4", false, ""}, 170 | {"/2/threads:8", false, ""}}); 171 | 172 | int main(int argc, char* argv[]) { 173 | benchmark::Initialize(&argc, argv); 174 | 175 | TestReporter test_reporter; 176 | benchmark::RunSpecifiedBenchmarks(&test_reporter); 177 | 178 | typedef benchmark::BenchmarkReporter::Run Run; 179 | auto EB = ExpectedResults.begin(); 180 | 181 | for (Run const& run : test_reporter.all_runs_) { 182 | assert(EB != ExpectedResults.end()); 183 | EB->CheckRun(run); 184 | ++EB; 185 | } 186 | assert(EB == ExpectedResults.end()); 187 | 188 | return 0; 189 | } 190 | -------------------------------------------------------------------------------- /vendor/benchmark/.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: cpp 4 | 5 | env: 6 | global: 7 | - /usr/local/bin:$PATH 8 | 9 | matrix: 10 | include: 11 | - compiler: gcc 12 | addons: 13 | apt: 14 | packages: 15 | - lcov 16 | env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage 17 | - compiler: gcc 18 | env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug 19 | - compiler: gcc 20 | env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release 21 | - compiler: gcc 22 | addons: 23 | apt: 24 | packages: 25 | - g++-multilib 26 | env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON 27 | - compiler: gcc 28 | addons: 29 | apt: 30 | packages: 31 | - g++-multilib 32 | env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON 33 | - compiler: gcc 34 | env: 35 | - INSTALL_GCC6_FROM_PPA=1 36 | - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug 37 | - ENABLE_SANITIZER=1 38 | - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" 39 | - compiler: clang 40 | env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug 41 | - compiler: clang 42 | env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release 43 | # Clang w/ libc++ 44 | - compiler: clang 45 | addons: 46 | apt: 47 | packages: 48 | clang-3.8 49 | env: 50 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug 51 | - LIBCXX_BUILD=1 52 | - EXTRA_FLAGS="-stdlib=libc++" 53 | - compiler: clang 54 | addons: 55 | apt: 56 | packages: 57 | clang-3.8 58 | env: 59 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release 60 | - LIBCXX_BUILD=1 61 | - EXTRA_FLAGS="-stdlib=libc++" 62 | # Clang w/ 32bit libc++ 63 | - compiler: clang 64 | addons: 65 | apt: 66 | packages: 67 | - clang-3.8 68 | - g++-multilib 69 | env: 70 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug 71 | - LIBCXX_BUILD=1 72 | - BUILD_32_BITS=ON 73 | - EXTRA_FLAGS="-stdlib=libc++ -m32" 74 | # Clang w/ 32bit libc++ 75 | - compiler: clang 76 | addons: 77 | apt: 78 | packages: 79 | - clang-3.8 80 | - g++-multilib 81 | env: 82 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release 83 | - LIBCXX_BUILD=1 84 | - BUILD_32_BITS=ON 85 | - EXTRA_FLAGS="-stdlib=libc++ -m32" 86 | # Clang w/ libc++, ASAN, UBSAN 87 | - compiler: clang 88 | addons: 89 | apt: 90 | packages: 91 | clang-3.8 92 | env: 93 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug 94 | - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" 95 | - ENABLE_SANITIZER=1 96 | - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" 97 | - UBSAN_OPTIONS=print_stacktrace=1 98 | # Clang w/ libc++ and MSAN 99 | - compiler: clang 100 | addons: 101 | apt: 102 | packages: 103 | clang-3.8 104 | env: 105 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug 106 | - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins 107 | - ENABLE_SANITIZER=1 108 | - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" 109 | # Clang w/ libc++ and MSAN 110 | - compiler: clang 111 | addons: 112 | apt: 113 | packages: 114 | clang-3.8 115 | env: 116 | - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo 117 | - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread 118 | - ENABLE_SANITIZER=1 119 | - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" 120 | - os: osx 121 | osx_image: xcode8.3 122 | compiler: clang 123 | env: 124 | - COMPILER=clang++ BUILD_TYPE=Debug 125 | - os: osx 126 | osx_image: xcode8.3 127 | compiler: clang 128 | env: 129 | - COMPILER=clang++ BUILD_TYPE=Release 130 | - os: osx 131 | osx_image: xcode8.3 132 | compiler: clang 133 | env: 134 | - COMPILER=clang++ BUILD_TYPE=Release BUILD_32_BITS=ON 135 | - os: osx 136 | osx_image: xcode8.3 137 | compiler: gcc 138 | env: 139 | - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug 140 | 141 | before_script: 142 | - if [ -n "${LIBCXX_BUILD}" ]; then 143 | source .travis-libcxx-setup.sh; 144 | fi 145 | - if [ -n "${ENABLE_SANITIZER}" ]; then 146 | export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF"; 147 | else 148 | export EXTRA_OPTIONS=""; 149 | fi 150 | - mkdir -p build && cd build 151 | 152 | before_install: 153 | - if [ -z "$BUILD_32_BITS" ]; then 154 | export BUILD_32_BITS=OFF && echo disabling 32 bit build; 155 | fi 156 | - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then 157 | sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test"; 158 | sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60"; 159 | fi 160 | 161 | install: 162 | - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then 163 | travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6; 164 | fi 165 | - if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then 166 | travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools; 167 | sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/; 168 | fi 169 | - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then 170 | PATH=~/.local/bin:${PATH}; 171 | pip install --user --upgrade pip; 172 | travis_wait pip install --user cpp-coveralls; 173 | fi 174 | - if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then 175 | rm -f /usr/local/include/c++; 176 | brew update; 177 | travis_wait brew install gcc@7; 178 | fi 179 | - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then 180 | sudo apt-get update -qq; 181 | sudo apt-get install -qq unzip; 182 | wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh; 183 | travis_wait sudo bash bazel-installer.sh; 184 | fi 185 | - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then 186 | curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh; 187 | travis_wait sudo bash bazel-installer.sh; 188 | fi 189 | 190 | script: 191 | - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} .. 192 | - make 193 | - ctest -C ${BUILD_TYPE} --output-on-failure 194 | - bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/... 195 | 196 | after_success: 197 | - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then 198 | coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; 199 | fi 200 | -------------------------------------------------------------------------------- /vendor/benchmark/src/json_reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "complexity.h" 17 | 18 | #include 19 | #include 20 | #include // for setprecision 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | #include "string_util.h" 28 | #include "timers.h" 29 | 30 | namespace benchmark { 31 | 32 | namespace { 33 | 34 | std::string FormatKV(std::string const& key, std::string const& value) { 35 | return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str()); 36 | } 37 | 38 | std::string FormatKV(std::string const& key, const char* value) { 39 | return StrFormat("\"%s\": \"%s\"", key.c_str(), value); 40 | } 41 | 42 | std::string FormatKV(std::string const& key, bool value) { 43 | return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false"); 44 | } 45 | 46 | std::string FormatKV(std::string const& key, int64_t value) { 47 | std::stringstream ss; 48 | ss << '"' << key << "\": " << value; 49 | return ss.str(); 50 | } 51 | 52 | std::string FormatKV(std::string const& key, double value) { 53 | std::stringstream ss; 54 | ss << '"' << key << "\": "; 55 | 56 | const auto max_digits10 = std::numeric_limits::max_digits10; 57 | const auto max_fractional_digits10 = max_digits10 - 1; 58 | 59 | ss << std::scientific << std::setprecision(max_fractional_digits10) << value; 60 | return ss.str(); 61 | } 62 | 63 | int64_t RoundDouble(double v) { return static_cast(v + 0.5); } 64 | 65 | } // end namespace 66 | 67 | bool JSONReporter::ReportContext(const Context& context) { 68 | std::ostream& out = GetOutputStream(); 69 | 70 | out << "{\n"; 71 | std::string inner_indent(2, ' '); 72 | 73 | // Open context block and print context information. 74 | out << inner_indent << "\"context\": {\n"; 75 | std::string indent(4, ' '); 76 | 77 | std::string walltime_value = LocalDateTimeString(); 78 | out << indent << FormatKV("date", walltime_value) << ",\n"; 79 | 80 | if (Context::executable_name) { 81 | // windows uses backslash for its path separator, 82 | // which must be escaped in JSON otherwise it blows up conforming JSON 83 | // decoders 84 | std::string executable_name = Context::executable_name; 85 | ReplaceAll(&executable_name, "\\", "\\\\"); 86 | out << indent << FormatKV("executable", executable_name) << ",\n"; 87 | } 88 | 89 | CPUInfo const& info = context.cpu_info; 90 | out << indent << FormatKV("num_cpus", static_cast(info.num_cpus)) 91 | << ",\n"; 92 | out << indent 93 | << FormatKV("mhz_per_cpu", 94 | RoundDouble(info.cycles_per_second / 1000000.0)) 95 | << ",\n"; 96 | out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) 97 | << ",\n"; 98 | 99 | out << indent << "\"caches\": [\n"; 100 | indent = std::string(6, ' '); 101 | std::string cache_indent(8, ' '); 102 | for (size_t i = 0; i < info.caches.size(); ++i) { 103 | auto& CI = info.caches[i]; 104 | out << indent << "{\n"; 105 | out << cache_indent << FormatKV("type", CI.type) << ",\n"; 106 | out << cache_indent << FormatKV("level", static_cast(CI.level)) 107 | << ",\n"; 108 | out << cache_indent 109 | << FormatKV("size", static_cast(CI.size) * 1000u) << ",\n"; 110 | out << cache_indent 111 | << FormatKV("num_sharing", static_cast(CI.num_sharing)) 112 | << "\n"; 113 | out << indent << "}"; 114 | if (i != info.caches.size() - 1) out << ","; 115 | out << "\n"; 116 | } 117 | indent = std::string(4, ' '); 118 | out << indent << "],\n"; 119 | 120 | #if defined(NDEBUG) 121 | const char build_type[] = "release"; 122 | #else 123 | const char build_type[] = "debug"; 124 | #endif 125 | out << indent << FormatKV("library_build_type", build_type) << "\n"; 126 | // Close context block and open the list of benchmarks. 127 | out << inner_indent << "},\n"; 128 | out << inner_indent << "\"benchmarks\": [\n"; 129 | return true; 130 | } 131 | 132 | void JSONReporter::ReportRuns(std::vector const& reports) { 133 | if (reports.empty()) { 134 | return; 135 | } 136 | std::string indent(4, ' '); 137 | std::ostream& out = GetOutputStream(); 138 | if (!first_report_) { 139 | out << ",\n"; 140 | } 141 | first_report_ = false; 142 | 143 | for (auto it = reports.begin(); it != reports.end(); ++it) { 144 | out << indent << "{\n"; 145 | PrintRunData(*it); 146 | out << indent << '}'; 147 | auto it_cp = it; 148 | if (++it_cp != reports.end()) { 149 | out << ",\n"; 150 | } 151 | } 152 | } 153 | 154 | void JSONReporter::Finalize() { 155 | // Close the list of benchmarks and the top level object. 156 | GetOutputStream() << "\n ]\n}\n"; 157 | } 158 | 159 | void JSONReporter::PrintRunData(Run const& run) { 160 | std::string indent(6, ' '); 161 | std::ostream& out = GetOutputStream(); 162 | out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; 163 | out << indent << FormatKV("run_name", run.run_name) << ",\n"; 164 | out << indent << FormatKV("run_type", [&run]() -> const char* { 165 | switch (run.run_type) { 166 | case BenchmarkReporter::Run::RT_Iteration: 167 | return "iteration"; 168 | case BenchmarkReporter::Run::RT_Aggregate: 169 | return "aggregate"; 170 | } 171 | BENCHMARK_UNREACHABLE(); 172 | }()) << ",\n"; 173 | if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { 174 | out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; 175 | } 176 | if (run.error_occurred) { 177 | out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; 178 | out << indent << FormatKV("error_message", run.error_message) << ",\n"; 179 | } 180 | if (!run.report_big_o && !run.report_rms) { 181 | out << indent << FormatKV("iterations", run.iterations) << ",\n"; 182 | out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; 183 | out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); 184 | out << ",\n" 185 | << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); 186 | } else if (run.report_big_o) { 187 | out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) 188 | << ",\n"; 189 | out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) 190 | << ",\n"; 191 | out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; 192 | out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); 193 | } else if (run.report_rms) { 194 | out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); 195 | } 196 | 197 | for (auto& c : run.counters) { 198 | out << ",\n" << indent << FormatKV(c.first, c.second); 199 | } 200 | 201 | if (run.has_memory_result) { 202 | out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); 203 | out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); 204 | } 205 | 206 | if (!run.report_label.empty()) { 207 | out << ",\n" << indent << FormatKV("label", run.report_label); 208 | } 209 | out << '\n'; 210 | } 211 | 212 | } // end namespace benchmark 213 | -------------------------------------------------------------------------------- /vendor/benchmark/test/complexity_test.cc: -------------------------------------------------------------------------------- 1 | #undef NDEBUG 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "benchmark/benchmark.h" 8 | #include "output_test.h" 9 | 10 | namespace { 11 | 12 | #define ADD_COMPLEXITY_CASES(...) \ 13 | int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) 14 | 15 | int AddComplexityTest(std::string test_name, std::string big_o_test_name, 16 | std::string rms_test_name, std::string big_o) { 17 | SetSubstitutions({{"%name", test_name}, 18 | {"%bigo_name", big_o_test_name}, 19 | {"%rms_name", rms_test_name}, 20 | {"%bigo_str", "[ ]* %float " + big_o}, 21 | {"%bigo", big_o}, 22 | {"%rms", "[ ]*[0-9]+ %"}}); 23 | AddCases( 24 | TC_ConsoleOut, 25 | {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, 26 | {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. 27 | {"^%rms_name %rms %rms[ ]*$", MR_Next}}); 28 | AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, 29 | {"\"run_name\": \"%name\",$", MR_Next}, 30 | {"\"run_type\": \"aggregate\",$", MR_Next}, 31 | {"\"aggregate_name\": \"BigO\",$", MR_Next}, 32 | {"\"cpu_coefficient\": %float,$", MR_Next}, 33 | {"\"real_coefficient\": %float,$", MR_Next}, 34 | {"\"big_o\": \"%bigo\",$", MR_Next}, 35 | {"\"time_unit\": \"ns\"$", MR_Next}, 36 | {"}", MR_Next}, 37 | {"\"name\": \"%rms_name\",$"}, 38 | {"\"run_name\": \"%name\",$", MR_Next}, 39 | {"\"run_type\": \"aggregate\",$", MR_Next}, 40 | {"\"aggregate_name\": \"RMS\",$", MR_Next}, 41 | {"\"rms\": %float$", MR_Next}, 42 | {"}", MR_Next}}); 43 | AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, 44 | {"^\"%bigo_name\"", MR_Not}, 45 | {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}}); 46 | return 0; 47 | } 48 | 49 | } // end namespace 50 | 51 | // ========================================================================= // 52 | // --------------------------- Testing BigO O(1) --------------------------- // 53 | // ========================================================================= // 54 | 55 | void BM_Complexity_O1(benchmark::State& state) { 56 | for (auto _ : state) { 57 | for (int i = 0; i < 1024; ++i) { 58 | benchmark::DoNotOptimize(&i); 59 | } 60 | } 61 | state.SetComplexityN(state.range(0)); 62 | } 63 | BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); 64 | BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); 65 | BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) { 66 | return 1.0; 67 | }); 68 | 69 | const char *one_test_name = "BM_Complexity_O1"; 70 | const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; 71 | const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; 72 | const char *enum_big_o_1 = "\\([0-9]+\\)"; 73 | // FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto 74 | // deduced. 75 | // See https://github.com/google/benchmark/issues/272 76 | const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)"; 77 | const char *lambda_big_o_1 = "f\\(N\\)"; 78 | 79 | // Add enum tests 80 | ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, 81 | enum_big_o_1); 82 | 83 | // Add auto enum tests 84 | ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, 85 | auto_big_o_1); 86 | 87 | // Add lambda tests 88 | ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, 89 | lambda_big_o_1); 90 | 91 | // ========================================================================= // 92 | // --------------------------- Testing BigO O(N) --------------------------- // 93 | // ========================================================================= // 94 | 95 | std::vector ConstructRandomVector(int64_t size) { 96 | std::vector v; 97 | v.reserve(static_cast(size)); 98 | for (int i = 0; i < size; ++i) { 99 | v.push_back(static_cast(std::rand() % size)); 100 | } 101 | return v; 102 | } 103 | 104 | void BM_Complexity_O_N(benchmark::State& state) { 105 | auto v = ConstructRandomVector(state.range(0)); 106 | // Test worst case scenario (item not in vector) 107 | const int64_t item_not_in_vector = state.range(0) * 2; 108 | for (auto _ : state) { 109 | benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); 110 | } 111 | state.SetComplexityN(state.range(0)); 112 | } 113 | BENCHMARK(BM_Complexity_O_N) 114 | ->RangeMultiplier(2) 115 | ->Range(1 << 10, 1 << 16) 116 | ->Complexity(benchmark::oN); 117 | BENCHMARK(BM_Complexity_O_N) 118 | ->RangeMultiplier(2) 119 | ->Range(1 << 10, 1 << 16) 120 | ->Complexity([](int64_t n) -> double { return static_cast(n); }); 121 | BENCHMARK(BM_Complexity_O_N) 122 | ->RangeMultiplier(2) 123 | ->Range(1 << 10, 1 << 16) 124 | ->Complexity(); 125 | 126 | const char *n_test_name = "BM_Complexity_O_N"; 127 | const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; 128 | const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; 129 | const char *enum_auto_big_o_n = "N"; 130 | const char *lambda_big_o_n = "f\\(N\\)"; 131 | 132 | // Add enum tests 133 | ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, 134 | enum_auto_big_o_n); 135 | 136 | // Add lambda tests 137 | ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, 138 | lambda_big_o_n); 139 | 140 | // ========================================================================= // 141 | // ------------------------- Testing BigO O(N*lgN) ------------------------- // 142 | // ========================================================================= // 143 | 144 | static void BM_Complexity_O_N_log_N(benchmark::State& state) { 145 | auto v = ConstructRandomVector(state.range(0)); 146 | for (auto _ : state) { 147 | std::sort(v.begin(), v.end()); 148 | } 149 | state.SetComplexityN(state.range(0)); 150 | } 151 | static const double kLog2E = 1.44269504088896340736; 152 | BENCHMARK(BM_Complexity_O_N_log_N) 153 | ->RangeMultiplier(2) 154 | ->Range(1 << 10, 1 << 16) 155 | ->Complexity(benchmark::oNLogN); 156 | BENCHMARK(BM_Complexity_O_N_log_N) 157 | ->RangeMultiplier(2) 158 | ->Range(1 << 10, 1 << 16) 159 | ->Complexity([](int64_t n) { return kLog2E * n * log(static_cast(n)); }); 160 | BENCHMARK(BM_Complexity_O_N_log_N) 161 | ->RangeMultiplier(2) 162 | ->Range(1 << 10, 1 << 16) 163 | ->Complexity(); 164 | 165 | const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; 166 | const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; 167 | const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; 168 | const char *enum_auto_big_o_n_lg_n = "NlgN"; 169 | const char *lambda_big_o_n_lg_n = "f\\(N\\)"; 170 | 171 | // Add enum tests 172 | ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, 173 | rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); 174 | 175 | // Add lambda tests 176 | ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, 177 | rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n); 178 | 179 | // ========================================================================= // 180 | // --------------------------- TEST CASES END ------------------------------ // 181 | // ========================================================================= // 182 | 183 | int main(int argc, char *argv[]) { RunOutputTests(argc, argv); } 184 | --------------------------------------------------------------------------------