├── QMJSTL for vs2015 ├── AlgorithTemplate.zip └── readMe ├── QMJSTL ├── AVL_tree.h ├── algorithm_qmj.h ├── allocator.h ├── deque_qmj.h ├── forward_list_qmj.h ├── hashfunction.h ├── hashtable.h ├── heap.h ├── iterator_qmj.h ├── list_qmj.h ├── map_qmj.h ├── numeric_qmj.h ├── queue_qmj.h ├── rb_tree.h ├── set_qmj.h ├── splay_tree.h ├── stack_qmj.h ├── type_traits_qmj.h ├── unordered_map_qmj.h ├── unordered_set_qmj.h └── vector_qmj.h ├── README.md ├── benchmark ├── .clang-format ├── .gitignore ├── .travis-libcxx-setup.sh ├── .travis.yml ├── .ycm_extra_conf.py ├── AUTHORS ├── BUILD.bazel ├── CMakeLists.txt ├── CONTRIBUTING.md ├── CONTRIBUTORS ├── LICENSE ├── README.md ├── WORKSPACE ├── appveyor.yml ├── cmake │ ├── AddCXXCompilerFlag.cmake │ ├── CXXFeatureCheck.cmake │ ├── Config.cmake.in │ ├── GetGitVersion.cmake │ ├── HandleGTest.cmake │ ├── benchmark.pc.in │ ├── gnu_posix_regex.cpp │ ├── llvm-toolchain.cmake │ ├── posix_regex.cpp │ ├── std_regex.cpp │ ├── steady_clock.cpp │ └── thread_safety_attributes.cpp ├── docs │ ├── AssemblyTests.md │ └── tools.md ├── include │ └── benchmark │ │ └── benchmark.h ├── mingw.py ├── releasing.md ├── src │ ├── CMakeLists.txt │ ├── arraysize.h │ ├── benchmark.cc │ ├── benchmark_api_internal.h │ ├── benchmark_register.cc │ ├── benchmark_register.h │ ├── check.h │ ├── colorprint.cc │ ├── colorprint.h │ ├── commandlineflags.cc │ ├── commandlineflags.h │ ├── complexity.cc │ ├── complexity.h │ ├── console_reporter.cc │ ├── counter.cc │ ├── counter.h │ ├── csv_reporter.cc │ ├── cycleclock.h │ ├── internal_macros.h │ ├── json_reporter.cc │ ├── log.h │ ├── mutex.h │ ├── re.h │ ├── reporter.cc │ ├── sleep.cc │ ├── sleep.h │ ├── statistics.cc │ ├── statistics.h │ ├── string_util.cc │ ├── string_util.h │ ├── sysinfo.cc │ ├── thread_manager.h │ ├── thread_timer.h │ ├── timers.cc │ └── timers.h ├── test │ ├── AssemblyTests.cmake │ ├── BUILD │ ├── CMakeLists.txt │ ├── basic_test.cc │ ├── benchmark_gtest.cc │ ├── benchmark_test.cc │ ├── clobber_memory_assembly_test.cc │ ├── complexity_test.cc │ ├── cxx03_test.cc │ ├── diagnostics_test.cc │ ├── donotoptimize_assembly_test.cc │ ├── donotoptimize_test.cc │ ├── filter_test.cc │ ├── fixture_test.cc │ ├── map_test.cc │ ├── multiple_ranges_test.cc │ ├── options_test.cc │ ├── output_test.h │ ├── output_test_helper.cc │ ├── register_benchmark_test.cc │ ├── reporter_output_test.cc │ ├── skip_with_error_test.cc │ ├── state_assembly_test.cc │ ├── statistics_gtest.cc │ ├── templated_fixture_test.cc │ ├── user_counters_tabular_test.cc │ └── user_counters_test.cc └── tools │ ├── compare.py │ ├── compare_bench.py │ ├── gbench │ ├── Inputs │ │ ├── test1_run1.json │ │ ├── test1_run2.json │ │ └── test2_run.json │ ├── __init__.py │ ├── report.py │ └── util.py │ └── strip_asm.py ├── image ├── RB_tree │ ├── RB_tree for map.png │ ├── multiset.png │ ├── nothing │ ├── set multiData.png │ └── set.png ├── algorithm │ ├── next_permutation.png │ ├── power.gif │ ├── rotate BIter.png │ ├── rotate_FIter.png │ ├── search_n.png │ ├── sort multi.png │ ├── sort random.png │ ├── sort sorted.png │ ├── stable_sort multi-sorted.png │ ├── stable_sort random.png │ └── stable_sort reverse.png ├── avl_tree │ ├── avl_tree random.png │ └── avl_tree.png ├── deque.xml ├── deque │ ├── deque- int.png │ ├── deque-not pod.png │ ├── qmj__deuqe.png │ └── std__deque.png ├── hashtable │ ├── hashtable.png │ ├── qmj_hashtable.png │ ├── qmj_hashtable1.png │ ├── unordered_multiset.png │ ├── unordered_set multiData.png │ └── unordered_set.png ├── heap │ ├── fib_heap.png │ ├── heap_sort.png │ ├── make_heap.png │ └── priority_queue.png ├── list │ ├── forward_list.png │ └── list.png └── vector │ └── vector.png └── test ├── build_run.sh ├── test_create_data.h ├── test_deque.cpp ├── test_forward_list.cpp ├── test_list.cpp ├── test_map.cpp ├── test_multimap.cpp ├── test_multiset.cpp ├── test_priority_queue.cpp ├── test_queue.cpp ├── test_set.cpp ├── test_stack.cpp ├── test_unordered_map.cpp ├── test_unordered_multimap.cpp ├── test_unordered_multiset.cpp ├── test_unordered_set.cpp └── test_vector.cpp /QMJSTL for vs2015/AlgorithTemplate.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/QMJSTL for vs2015/AlgorithTemplate.zip -------------------------------------------------------------------------------- /QMJSTL for vs2015/readMe: -------------------------------------------------------------------------------- 1 | ##解压文件后打开2015,依次点击 文件--打开--项目/解决方案--AlgorithTemplate--AlgorithTemplate.sln(双击) 2 | ###QMJSTL的所有容器和算法都在qmj::命名空间中,使用map示例: 3 | qmj::mapstr_mp; 4 | -------------------------------------------------------------------------------- /QMJSTL/hashfunction.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifndef _HASHFUNCTION_ 3 | #define _HASHFUNCTION_ 4 | 5 | #include 6 | 7 | namespace qmj 8 | { 9 | template 10 | struct hash 11 | { 12 | }; 13 | 14 | template <> 15 | struct hash 16 | { 17 | size_t operator()(const std::string &str) const 18 | { 19 | size_t h = 0; 20 | for (auto iter = str.cbegin(); iter != str.cend(); ++iter) 21 | h = (h << 2) + *iter; 22 | return h; 23 | } 24 | }; 25 | 26 | inline size_t hash_string(const char *s) 27 | { 28 | size_t h = 0; 29 | for (; *s; ++s) 30 | h = (h << 2) + *s; 31 | return (h); 32 | } 33 | 34 | template <> 35 | struct hash 36 | { 37 | size_t operator()(char *s) const { return hash_string(s); } 38 | }; 39 | 40 | template <> 41 | struct hash 42 | { 43 | size_t operator()(const char *s) const { return hash_string(s); } 44 | }; 45 | 46 | template <> 47 | struct hash 48 | { 49 | size_t operator()(char x) const { return x; } 50 | }; 51 | 52 | template <> 53 | struct hash 54 | { 55 | size_t operator()(signed char x) const { return x; } 56 | }; 57 | 58 | template <> 59 | struct hash 60 | { 61 | size_t operator()(unsigned char x) const { return x; } 62 | }; 63 | 64 | template <> 65 | struct hash 66 | { 67 | size_t operator()(short x) const { return x; } 68 | }; 69 | 70 | template <> 71 | struct hash 72 | { 73 | size_t operator()(unsigned short x) const { return x; } 74 | }; 75 | 76 | template <> 77 | struct hash 78 | { 79 | size_t operator()(int x) const { return x; } 80 | }; 81 | 82 | template <> 83 | struct hash 84 | { 85 | size_t operator()(unsigned int x) const { return x; } 86 | }; 87 | 88 | template <> 89 | struct hash 90 | { 91 | size_t operator()(long x) const { return x; } 92 | }; 93 | 94 | template <> 95 | struct hash 96 | { 97 | size_t operator()(unsigned long x) const { return x; } 98 | }; 99 | 100 | } // namespace qmj 101 | 102 | #endif -------------------------------------------------------------------------------- /QMJSTL/iterator_qmj.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifndef _ITERATOR_QMJ_ 3 | #define _ITERATOR_QMJ_ 4 | #include "type_traits_qmj.h" 5 | 6 | namespace qmj 7 | { 8 | template 9 | inline typename qmj::iterator_traits::iterator_category iterator_category( 10 | const Iter &iter) 11 | { 12 | return typename qmj::iterator_traits::iterator_category(); 13 | } 14 | 15 | template 16 | inline typename qmj::iterator_traits::value_type value_type( 17 | const Iter &iter) 18 | { 19 | return qmj::iterator_traits::value_type(); 20 | } 21 | 22 | template 23 | inline typename qmj::iterator_traits::difference_type difference_type( 24 | const Iter &iter) 25 | { 26 | return qmj::iterator_traits::difference_type(); 27 | } 28 | 29 | template 30 | inline typename qmj::iterator_traits::pointer pointer(const Iter &iter) 31 | { 32 | return qmj::iterator_traits::pointer(); 33 | } 34 | 35 | template 36 | inline typename qmj::iterator_traits::reference reference( 37 | const Iter &iter) 38 | { 39 | return qmj::iterator_traits::reference(); 40 | } 41 | } 42 | 43 | namespace qmj 44 | { 45 | template 46 | inline void advance_imple(Iter &iter, difference n, std::input_iterator_tag) 47 | { 48 | while (n--) 49 | ++iter; 50 | } 51 | 52 | template 53 | inline void advance_imple(Iter &iter, difference n, 54 | std::bidirectional_iterator_tag) 55 | { 56 | if (n >= 0) 57 | while (n--) 58 | ++iter; 59 | else 60 | while (n++) 61 | --iter; 62 | } 63 | 64 | template 65 | inline void advance_imple(Iter &iter, difference n, 66 | std::random_access_iterator_tag) 67 | { 68 | iter += n; 69 | } 70 | 71 | template 72 | inline void advance(Iter &iter, difference n) 73 | { 74 | advance_imple(iter, n, qmj::iterator_category(iter)); 75 | } 76 | 77 | template 78 | inline typename qmj::iterator_traits::difference_type distance_imple( 79 | Iter first, Iter last, std::input_iterator_tag) 80 | { 81 | typename qmj::iterator_traits::difference_type n = 0; 82 | for (; first != last; ++first) 83 | ++n; 84 | return (n); 85 | } 86 | 87 | template 88 | inline typename qmj::iterator_traits::difference_type distance_imple( 89 | Iter first, Iter last, std::random_access_iterator_tag) 90 | { 91 | return last - first; 92 | } 93 | 94 | template 95 | inline typename qmj::iterator_traits::difference_type distance( 96 | Iter first, Iter last) 97 | { 98 | return distance_imple(first, last, qmj::iterator_category(first)); 99 | } 100 | } 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /QMJSTL/numeric_qmj.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifndef _NUMERIC_QMJ_ 3 | #define _NUMERIC_QMJ_ 4 | #include "iterator_qmj.h" 5 | #include "type_traits_qmj.h" 6 | 7 | namespace qmj { 8 | template 9 | inline value_type accumulate(Iter first, Iter last, value_type init, 10 | const Fn2& fn2) { 11 | for (; first != last; ++first) init = fn2(*first, init); 12 | return (init); 13 | } 14 | 15 | template 16 | inline value_type accumulate(Iter first, Iter last, value_type init) { 17 | return (_QMJ accumulate(first, last, init, std::plus<>())); 18 | } 19 | 20 | template 21 | OIter adjacent_difference(Iter first, Iter last, OIter dest) { 22 | return (_QMJ adjacent_difference(first, last, dest, std::minus<>())); 23 | } 24 | 25 | template 26 | inline OIter adjacent_difference(Iter first, Iter last, OIter dest, 27 | const Fn2& fn2) { 28 | if (first != last) { 29 | iter_val_t val = *first; 30 | for (*dest = val; ++first != last;) { 31 | iter_val_t tmp = *first; 32 | *++dest = fn2(tmp, val); 33 | val = std::move(tmp); 34 | } 35 | ++dest; 36 | } 37 | return (dest); 38 | } 39 | 40 | template 42 | inline value_type inner_product(Iter1 first1, Iter1 last1, Iter2 first2, 43 | value_type init, const Fn2_1& fn2_1, 44 | const Fn2_2& fn2_2) { 45 | for (; first1 != last1; ++first1, ++first2) 46 | init = fn2_1(init, fn2_2(*first1, *first2)); 47 | return (init); 48 | } 49 | 50 | template 51 | inline value_type inner_product(Iter1 first1, Iter1 last1, Iter2 first2, 52 | value_type init) { 53 | return (_QMJ inner_product(first1, last1, first2, init, std::plus<>(), 54 | std::multiplies<>())); 55 | } 56 | 57 | template 58 | inline OIter partial_sum(Iter first, Iter last, OIter dest, const Fn2& fn2) { 59 | if (first != last) { 60 | iter_val_t val = *first; 61 | for (*dest = val; ++first != last; *++dest = val) val = fn2(*first, val); 62 | ++dest; 63 | } 64 | return (dest); 65 | } 66 | 67 | template 68 | inline OIter partial_sum(Iter first, Iter last, OIter dest) { 69 | return (_QMJ partial_sum(first, last, dest, std::plus<>())); 70 | } 71 | 72 | template 73 | value_type power(value_type x, Integer n, const Fn2& fn2) { 74 | if (n <= 0) return (x); 75 | for (; !(n & 1);) { 76 | n >>= 1; 77 | x = fn2(x, x); 78 | } 79 | value_type result = x; 80 | for (n >>= 1; n != 0; n >>= 1) { 81 | x = fn2(x, x); 82 | if (n & 1) result = fn2(result, x); 83 | } 84 | return (result); 85 | } 86 | 87 | template 88 | value_type power(value_type x, Integer n) { 89 | return (_QMJ power(x, n, std::multiplies<>())); 90 | } 91 | 92 | template 93 | void iota(Iter first, Iter last, value_type val) { 94 | for (; first != last; ++first, ++val) *first = val; 95 | } 96 | } 97 | 98 | #endif 99 | -------------------------------------------------------------------------------- /QMJSTL/stack_qmj.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifndef _STACK_QMJ_ 3 | #define _STACK_QMJ_ 4 | #include 5 | #include "vector_qmj.h" 6 | 7 | namespace qmj 8 | { 9 | template > 10 | class stack 11 | { 12 | public: 13 | typedef typename Container::value_type value_type; 14 | typedef Container container_type; 15 | typedef typename container_type::size_type size_type; 16 | typedef typename container_type::reference reference; 17 | typedef typename container_type::const_reference const_reference; 18 | 19 | typedef stack self; 20 | 21 | stack() : c() {} 22 | 23 | template 24 | stack(Iter first, Iter last) : c(first, last) {} 25 | 26 | stack(const self &x) : c(x.c) {} 27 | 28 | stack(self &&x) : c(std::move(x.c)) {} 29 | 30 | explicit stack(Container &&c) : c(std::move(c)) {} 31 | 32 | explicit stack(const Container &cont) : c(cont) {} 33 | 34 | self &operator=(const self &x) 35 | { 36 | c = x.c; 37 | return (*this); 38 | } 39 | 40 | self &operator=(self &&x) 41 | { 42 | c = std::move(x.c); 43 | return (*this); 44 | } 45 | 46 | bool empty() const { return c.empty(); } 47 | 48 | reference top() { return c.back(); } 49 | 50 | const_reference top() const { return c.back(); } 51 | 52 | void push(const value_type &val) { c.push_back(val); } 53 | 54 | template 55 | void emplace(types &&... args) 56 | { 57 | c.emplace_back(std::forward(args)...); 58 | } 59 | 60 | void pop() { c.pop_back(); } 61 | 62 | size_t size() const { return c.size(); } 63 | 64 | value_type pop_top() 65 | { 66 | auto top = c.back(); 67 | c.pop_back(); 68 | return top; 69 | } 70 | 71 | void swap(self &x) noexcept { swap(c, x.c); } 72 | 73 | const Container &_get_constainer() const { return c; } 74 | 75 | private: 76 | Container c; 77 | }; 78 | 79 | template 80 | inline void swap(_QMJ stack &left, 81 | _QMJ stack &right) noexcept 82 | { 83 | left.swap(right); 84 | } 85 | 86 | template 87 | inline bool operator==(const _QMJ stack &left, 88 | const _QMJ stack &right) 89 | { 90 | return (left._get_constainer() == right._get_constainer()); 91 | } 92 | 93 | template 94 | inline bool operator!=(const _QMJ stack &left, 95 | const _QMJ stack &right) 96 | { 97 | return (left._get_constainer() != right._get_constainer()); 98 | } 99 | 100 | template 101 | inline bool operator<(const _QMJ stack &left, 102 | const _QMJ stack &right) 103 | { 104 | return (left._get_constainer() < right._get_constainer()); 105 | } 106 | 107 | template 108 | inline bool operator<=(const _QMJ stack &left, 109 | const _QMJ stack &right) 110 | { 111 | return (left._get_constainer() <= right._get_constainer()); 112 | } 113 | 114 | template 115 | inline bool operator>(const _QMJ stack &left, 116 | const _QMJ stack &right) 117 | { 118 | return (left._get_constainer() > right._get_constainer()); 119 | } 120 | 121 | template 122 | inline bool operator>=(const _QMJ stack &left, 123 | const _QMJ stack &right) 124 | { 125 | return (left._get_constainer() >= right._get_constainer()); 126 | } 127 | } // namespace qmj 128 | 129 | #endif 130 | -------------------------------------------------------------------------------- /benchmark/.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | BasedOnStyle: Google 4 | ... 5 | 6 | -------------------------------------------------------------------------------- /benchmark/.gitignore: -------------------------------------------------------------------------------- 1 | *.a 2 | *.so 3 | *.so.?* 4 | *.dll 5 | *.exe 6 | *.dylib 7 | *.cmake 8 | !/cmake/*.cmake 9 | !/test/AssemblyTests.cmake 10 | *~ 11 | *.pyc 12 | __pycache__ 13 | 14 | # lcov 15 | *.lcov 16 | /lcov 17 | 18 | # cmake files. 19 | /Testing 20 | CMakeCache.txt 21 | CMakeFiles/ 22 | cmake_install.cmake 23 | 24 | # makefiles. 25 | Makefile 26 | 27 | # in-source build. 28 | bin/ 29 | lib/ 30 | /test/*_test 31 | 32 | # exuberant ctags. 33 | tags 34 | 35 | # YouCompleteMe configuration. 36 | .ycm_extra_conf.pyc 37 | 38 | # ninja generated files. 39 | .ninja_deps 40 | .ninja_log 41 | build.ninja 42 | install_manifest.txt 43 | rules.ninja 44 | 45 | # bazel output symlinks. 46 | bazel-* 47 | 48 | # out-of-source build top-level folders. 49 | build/ 50 | _build/ 51 | 52 | # in-source dependencies 53 | /googletest/ 54 | 55 | # Visual Studio 2015/2017 cache/options directory 56 | .vs/ 57 | CMakeSettings.json 58 | -------------------------------------------------------------------------------- /benchmark/.travis-libcxx-setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Install a newer CMake version 4 | curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh 5 | chmod +x install-cmake.sh 6 | sudo ./install-cmake.sh --prefix=/usr/local --skip-license 7 | 8 | # Checkout LLVM sources 9 | git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source 10 | git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx 11 | git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi 12 | 13 | # Setup libc++ options 14 | if [ -z "$BUILD_32_BITS" ]; then 15 | export BUILD_32_BITS=OFF && echo disabling 32 bit build 16 | fi 17 | 18 | # Build and install libc++ (Use unstable ABI for better sanitizer coverage) 19 | mkdir llvm-build && cd llvm-build 20 | cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ 21 | -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ 22 | -DLIBCXX_ABI_UNSTABLE=ON \ 23 | -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ 24 | -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ 25 | ../llvm-source 26 | make cxx -j2 27 | sudo make install-cxxabi install-cxx 28 | cd ../ 29 | -------------------------------------------------------------------------------- /benchmark/.ycm_extra_conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ycm_core 3 | 4 | # These are the compilation flags that will be used in case there's no 5 | # compilation database set (by default, one is not set). 6 | # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. 7 | flags = [ 8 | '-Wall', 9 | '-Werror', 10 | '-pedantic-errors', 11 | '-std=c++0x', 12 | '-fno-strict-aliasing', 13 | '-O3', 14 | '-DNDEBUG', 15 | # ...and the same thing goes for the magic -x option which specifies the 16 | # language that the files to be compiled are written in. This is mostly 17 | # relevant for c++ headers. 18 | # For a C project, you would set this to 'c' instead of 'c++'. 19 | '-x', 'c++', 20 | '-I', 'include', 21 | '-isystem', '/usr/include', 22 | '-isystem', '/usr/local/include', 23 | ] 24 | 25 | 26 | # Set this to the absolute path to the folder (NOT the file!) containing the 27 | # compile_commands.json file to use that instead of 'flags'. See here for 28 | # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html 29 | # 30 | # Most projects will NOT need to set this to anything; you can just change the 31 | # 'flags' list of compilation flags. Notice that YCM itself uses that approach. 32 | compilation_database_folder = '' 33 | 34 | if os.path.exists( compilation_database_folder ): 35 | database = ycm_core.CompilationDatabase( compilation_database_folder ) 36 | else: 37 | database = None 38 | 39 | SOURCE_EXTENSIONS = [ '.cc' ] 40 | 41 | def DirectoryOfThisScript(): 42 | return os.path.dirname( os.path.abspath( __file__ ) ) 43 | 44 | 45 | def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): 46 | if not working_directory: 47 | return list( flags ) 48 | new_flags = [] 49 | make_next_absolute = False 50 | path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] 51 | for flag in flags: 52 | new_flag = flag 53 | 54 | if make_next_absolute: 55 | make_next_absolute = False 56 | if not flag.startswith( '/' ): 57 | new_flag = os.path.join( working_directory, flag ) 58 | 59 | for path_flag in path_flags: 60 | if flag == path_flag: 61 | make_next_absolute = True 62 | break 63 | 64 | if flag.startswith( path_flag ): 65 | path = flag[ len( path_flag ): ] 66 | new_flag = path_flag + os.path.join( working_directory, path ) 67 | break 68 | 69 | if new_flag: 70 | new_flags.append( new_flag ) 71 | return new_flags 72 | 73 | 74 | def IsHeaderFile( filename ): 75 | extension = os.path.splitext( filename )[ 1 ] 76 | return extension in [ '.h', '.hxx', '.hpp', '.hh' ] 77 | 78 | 79 | def GetCompilationInfoForFile( filename ): 80 | # The compilation_commands.json file generated by CMake does not have entries 81 | # for header files. So we do our best by asking the db for flags for a 82 | # corresponding source file, if any. If one exists, the flags for that file 83 | # should be good enough. 84 | if IsHeaderFile( filename ): 85 | basename = os.path.splitext( filename )[ 0 ] 86 | for extension in SOURCE_EXTENSIONS: 87 | replacement_file = basename + extension 88 | if os.path.exists( replacement_file ): 89 | compilation_info = database.GetCompilationInfoForFile( 90 | replacement_file ) 91 | if compilation_info.compiler_flags_: 92 | return compilation_info 93 | return None 94 | return database.GetCompilationInfoForFile( filename ) 95 | 96 | 97 | def FlagsForFile( filename, **kwargs ): 98 | if database: 99 | # Bear in mind that compilation_info.compiler_flags_ does NOT return a 100 | # python list, but a "list-like" StringVec object 101 | compilation_info = GetCompilationInfoForFile( filename ) 102 | if not compilation_info: 103 | return None 104 | 105 | final_flags = MakeRelativePathsInFlagsAbsolute( 106 | compilation_info.compiler_flags_, 107 | compilation_info.compiler_working_dir_ ) 108 | else: 109 | relative_to = DirectoryOfThisScript() 110 | final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) 111 | 112 | return { 113 | 'flags': final_flags, 114 | 'do_cache': True 115 | } 116 | -------------------------------------------------------------------------------- /benchmark/AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of benchmark authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | # 5 | # Names should be added to this file as: 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | # 9 | # Please keep the list sorted. 10 | 11 | Albert Pretorius 12 | Arne Beer 13 | Carto 14 | Christopher Seymour 15 | David Coeurjolly 16 | Dirac Research 17 | Dominik Czarnota 18 | Eric Fiselier 19 | Eugene Zhuk 20 | Evgeny Safronov 21 | Felix Homann 22 | Google Inc. 23 | International Business Machines Corporation 24 | Ismael Jimenez Martinez 25 | Jern-Kuan Leong 26 | JianXiong Zhou 27 | Joao Paulo Magalhaes 28 | Jussi Knuuttila 29 | Kaito Udagawa 30 | Kishan Kumar 31 | Lei Xu 32 | Matt Clarkson 33 | Maxim Vafin 34 | MongoDB Inc. 35 | Nick Hutchinson 36 | Oleksandr Sochka 37 | Paul Redmond 38 | Radoslav Yovchev 39 | Roman Lebedev 40 | Shuo Chen 41 | Steinar H. Gunderson 42 | Stripe, Inc. 43 | Yixuan Qiu 44 | Yusuke Suzuki 45 | Zbigniew Skowron 46 | -------------------------------------------------------------------------------- /benchmark/BUILD.bazel: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | config_setting( 4 | name = "windows", 5 | values = { 6 | "cpu": "x64_windows", 7 | }, 8 | visibility = [":__subpackages__"], 9 | ) 10 | 11 | cc_library( 12 | name = "benchmark", 13 | srcs = glob([ 14 | "src/*.cc", 15 | "src/*.h", 16 | ]), 17 | hdrs = ["include/benchmark/benchmark.h"], 18 | linkopts = select({ 19 | ":windows": ["-DEFAULTLIB:shlwapi.lib"], 20 | "//conditions:default": ["-pthread"], 21 | }), 22 | strip_include_prefix = "include", 23 | visibility = ["//visibility:public"], 24 | ) 25 | 26 | cc_library( 27 | name = "benchmark_internal_headers", 28 | hdrs = glob(["src/*.h"]), 29 | visibility = ["//test:__pkg__"], 30 | ) 31 | -------------------------------------------------------------------------------- /benchmark/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute # 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | a just a few small guidelines you need to follow. 5 | 6 | 7 | ## Contributor License Agreement ## 8 | 9 | Contributions to any Google project must be accompanied by a Contributor 10 | License Agreement. This is not a copyright **assignment**, it simply gives 11 | Google permission to use and redistribute your contributions as part of the 12 | project. 13 | 14 | * If you are an individual writing original source code and you're sure you 15 | own the intellectual property, then you'll need to sign an [individual 16 | CLA][]. 17 | 18 | * If you work for a company that wants to allow you to contribute your work, 19 | then you'll need to sign a [corporate CLA][]. 20 | 21 | You generally only need to submit a CLA once, so if you've already submitted 22 | one (even if it was for a different project), you probably don't need to do it 23 | again. 24 | 25 | [individual CLA]: https://developers.google.com/open-source/cla/individual 26 | [corporate CLA]: https://developers.google.com/open-source/cla/corporate 27 | 28 | Once your CLA is submitted (or if you already submitted one for 29 | another Google project), make a commit adding yourself to the 30 | [AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part 31 | of your first [pull request][]. 32 | 33 | [AUTHORS]: AUTHORS 34 | [CONTRIBUTORS]: CONTRIBUTORS 35 | 36 | 37 | ## Submitting a patch ## 38 | 39 | 1. It's generally best to start by opening a new issue describing the bug or 40 | feature you're intending to fix. Even if you think it's relatively minor, 41 | it's helpful to know what people are working on. Mention in the initial 42 | issue that you are planning to work on that bug or feature so that it can 43 | be assigned to you. 44 | 45 | 1. Follow the normal process of [forking][] the project, and setup a new 46 | branch to work in. It's important that each group of changes be done in 47 | separate branches in order to ensure that a pull request only includes the 48 | commits related to that bug or feature. 49 | 50 | 1. Do your best to have [well-formed commit messages][] for each change. 51 | This provides consistency throughout the project, and ensures that commit 52 | messages are able to be formatted properly by various git tools. 53 | 54 | 1. Finally, push the commits to your fork and submit a [pull request][]. 55 | 56 | [forking]: https://help.github.com/articles/fork-a-repo 57 | [well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 58 | [pull request]: https://help.github.com/articles/creating-a-pull-request 59 | -------------------------------------------------------------------------------- /benchmark/CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # People who have agreed to one of the CLAs and can contribute patches. 2 | # The AUTHORS file lists the copyright holders; this file 3 | # lists people. For example, Google employees are listed here 4 | # but not in AUTHORS, because Google holds the copyright. 5 | # 6 | # Names should be added to this file only after verifying that 7 | # the individual or the individual's organization has agreed to 8 | # the appropriate Contributor License Agreement, found here: 9 | # 10 | # https://developers.google.com/open-source/cla/individual 11 | # https://developers.google.com/open-source/cla/corporate 12 | # 13 | # The agreement for individuals can be filled out on the web. 14 | # 15 | # When adding J Random Contributor's name to this file, 16 | # either J's name or J's organization's name should be 17 | # added to the AUTHORS file, depending on whether the 18 | # individual or corporate CLA was used. 19 | # 20 | # Names should be added to this file as: 21 | # Name 22 | # 23 | # Please keep the list sorted. 24 | 25 | Albert Pretorius 26 | Arne Beer 27 | Billy Robert O'Neal III 28 | Chris Kennelly 29 | Christopher Seymour 30 | David Coeurjolly 31 | Dominic Hamon 32 | Dominik Czarnota 33 | Eric Fiselier 34 | Eugene Zhuk 35 | Evgeny Safronov 36 | Felix Homann 37 | Ismael Jimenez Martinez 38 | Jern-Kuan Leong 39 | JianXiong Zhou 40 | Joao Paulo Magalhaes 41 | John Millikin 42 | Jussi Knuuttila 43 | Kai Wolf 44 | Kishan Kumar 45 | Kaito Udagawa 46 | Lei Xu 47 | Matt Clarkson 48 | Maxim Vafin 49 | Nick Hutchinson 50 | Oleksandr Sochka 51 | Pascal Leroy 52 | Paul Redmond 53 | Pierre Phaneuf 54 | Radoslav Yovchev 55 | Raul Marin 56 | Ray Glover 57 | Robert Guo 58 | Roman Lebedev 59 | Shuo Chen 60 | Tobias Ulvgård 61 | Tom Madams 62 | Yixuan Qiu 63 | Yusuke Suzuki 64 | Zbigniew Skowron 65 | -------------------------------------------------------------------------------- /benchmark/WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "com_github_google_benchmark") 2 | 3 | http_archive( 4 | name = "com_google_googletest", 5 | urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], 6 | strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", 7 | ) 8 | -------------------------------------------------------------------------------- /benchmark/appveyor.yml: -------------------------------------------------------------------------------- 1 | version: '{build}' 2 | 3 | image: Visual Studio 2017 4 | 5 | configuration: 6 | - Debug 7 | - Release 8 | 9 | environment: 10 | matrix: 11 | - compiler: msvc-15-seh 12 | generator: "Visual Studio 15 2017" 13 | 14 | - compiler: msvc-15-seh 15 | generator: "Visual Studio 15 2017 Win64" 16 | 17 | - compiler: msvc-14-seh 18 | generator: "Visual Studio 14 2015" 19 | 20 | - compiler: msvc-14-seh 21 | generator: "Visual Studio 14 2015 Win64" 22 | 23 | - compiler: msvc-12-seh 24 | generator: "Visual Studio 12 2013" 25 | 26 | - compiler: msvc-12-seh 27 | generator: "Visual Studio 12 2013 Win64" 28 | 29 | - compiler: gcc-5.3.0-posix 30 | generator: "MinGW Makefiles" 31 | cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' 32 | APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 33 | 34 | matrix: 35 | fast_finish: true 36 | 37 | install: 38 | # git bash conflicts with MinGW makefiles 39 | - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") 40 | - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") 41 | 42 | build_script: 43 | - md _build -Force 44 | - cd _build 45 | - echo %configuration% 46 | - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON .. 47 | - cmake --build . --config %configuration% 48 | 49 | test_script: 50 | - ctest -c %configuration% --timeout 300 --output-on-failure 51 | 52 | artifacts: 53 | - path: '_build/CMakeFiles/*.log' 54 | name: logs 55 | - path: '_build/Testing/**/*.xml' 56 | name: test_results 57 | -------------------------------------------------------------------------------- /benchmark/cmake/AddCXXCompilerFlag.cmake: -------------------------------------------------------------------------------- 1 | # - Adds a compiler flag if it is supported by the compiler 2 | # 3 | # This function checks that the supplied compiler flag is supported and then 4 | # adds it to the corresponding compiler flags 5 | # 6 | # add_cxx_compiler_flag( []) 7 | # 8 | # - Example 9 | # 10 | # include(AddCXXCompilerFlag) 11 | # add_cxx_compiler_flag(-Wall) 12 | # add_cxx_compiler_flag(-no-strict-aliasing RELEASE) 13 | # Requires CMake 2.6+ 14 | 15 | if(__add_cxx_compiler_flag) 16 | return() 17 | endif() 18 | set(__add_cxx_compiler_flag INCLUDED) 19 | 20 | include(CheckCXXCompilerFlag) 21 | 22 | function(mangle_compiler_flag FLAG OUTPUT) 23 | string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) 24 | string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) 25 | string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) 26 | string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) 27 | set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) 28 | endfunction(mangle_compiler_flag) 29 | 30 | function(add_cxx_compiler_flag FLAG) 31 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 32 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 33 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") 34 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 35 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 36 | if(${MANGLED_FLAG}) 37 | set(VARIANT ${ARGV1}) 38 | if(ARGV1) 39 | string(TOUPPER "_${VARIANT}" VARIANT) 40 | endif() 41 | set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) 42 | endif() 43 | endfunction() 44 | 45 | function(add_required_cxx_compiler_flag FLAG) 46 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 47 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 48 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") 49 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 50 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 51 | if(${MANGLED_FLAG}) 52 | set(VARIANT ${ARGV1}) 53 | if(ARGV1) 54 | string(TOUPPER "_${VARIANT}" VARIANT) 55 | endif() 56 | set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) 57 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 58 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 59 | set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) 60 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) 61 | else() 62 | message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") 63 | endif() 64 | endfunction() 65 | 66 | function(check_cxx_warning_flag FLAG) 67 | mangle_compiler_flag("${FLAG}" MANGLED_FLAG) 68 | set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") 69 | # Add -Werror to ensure the compiler generates an error if the warning flag 70 | # doesn't exist. 71 | set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}") 72 | check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) 73 | set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") 74 | endfunction() 75 | -------------------------------------------------------------------------------- /benchmark/cmake/CXXFeatureCheck.cmake: -------------------------------------------------------------------------------- 1 | # - Compile and run code to check for C++ features 2 | # 3 | # This functions compiles a source file under the `cmake` folder 4 | # and adds the corresponding `HAVE_[FILENAME]` flag to the CMake 5 | # environment 6 | # 7 | # cxx_feature_check( []) 8 | # 9 | # - Example 10 | # 11 | # include(CXXFeatureCheck) 12 | # cxx_feature_check(STD_REGEX) 13 | # Requires CMake 2.8.12+ 14 | 15 | if(__cxx_feature_check) 16 | return() 17 | endif() 18 | set(__cxx_feature_check INCLUDED) 19 | 20 | function(cxx_feature_check FILE) 21 | string(TOLOWER ${FILE} FILE) 22 | string(TOUPPER ${FILE} VAR) 23 | string(TOUPPER "HAVE_${VAR}" FEATURE) 24 | if (DEFINED HAVE_${VAR}) 25 | set(HAVE_${VAR} 1 PARENT_SCOPE) 26 | add_definitions(-DHAVE_${VAR}) 27 | return() 28 | endif() 29 | 30 | if (NOT DEFINED COMPILE_${FEATURE}) 31 | message("-- Performing Test ${FEATURE}") 32 | if(CMAKE_CROSSCOMPILING) 33 | try_compile(COMPILE_${FEATURE} 34 | ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp 35 | CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} 36 | LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) 37 | if(COMPILE_${FEATURE}) 38 | message(WARNING 39 | "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") 40 | set(RUN_${FEATURE} 0) 41 | else() 42 | set(RUN_${FEATURE} 1) 43 | endif() 44 | else() 45 | message("-- Performing Test ${FEATURE}") 46 | try_run(RUN_${FEATURE} COMPILE_${FEATURE} 47 | ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp 48 | CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} 49 | LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) 50 | endif() 51 | endif() 52 | 53 | if(RUN_${FEATURE} EQUAL 0) 54 | message("-- Performing Test ${FEATURE} -- success") 55 | set(HAVE_${VAR} 1 PARENT_SCOPE) 56 | add_definitions(-DHAVE_${VAR}) 57 | else() 58 | if(NOT COMPILE_${FEATURE}) 59 | message("-- Performing Test ${FEATURE} -- failed to compile") 60 | else() 61 | message("-- Performing Test ${FEATURE} -- compiled but failed to run") 62 | endif() 63 | endif() 64 | endfunction() 65 | -------------------------------------------------------------------------------- /benchmark/cmake/Config.cmake.in: -------------------------------------------------------------------------------- 1 | include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") 2 | -------------------------------------------------------------------------------- /benchmark/cmake/GetGitVersion.cmake: -------------------------------------------------------------------------------- 1 | # - Returns a version string from Git tags 2 | # 3 | # This function inspects the annotated git tags for the project and returns a string 4 | # into a CMake variable 5 | # 6 | # get_git_version() 7 | # 8 | # - Example 9 | # 10 | # include(GetGitVersion) 11 | # get_git_version(GIT_VERSION) 12 | # 13 | # Requires CMake 2.8.11+ 14 | find_package(Git) 15 | 16 | if(__get_git_version) 17 | return() 18 | endif() 19 | set(__get_git_version INCLUDED) 20 | 21 | function(get_git_version var) 22 | if(GIT_EXECUTABLE) 23 | execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 24 | RESULT_VARIABLE status 25 | OUTPUT_VARIABLE GIT_VERSION 26 | ERROR_QUIET) 27 | if(${status}) 28 | set(GIT_VERSION "v0.0.0") 29 | else() 30 | string(STRIP ${GIT_VERSION} GIT_VERSION) 31 | string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) 32 | endif() 33 | 34 | # Work out if the repository is dirty 35 | execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh 36 | OUTPUT_QUIET 37 | ERROR_QUIET) 38 | execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- 39 | OUTPUT_VARIABLE GIT_DIFF_INDEX 40 | ERROR_QUIET) 41 | string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) 42 | if (${GIT_DIRTY}) 43 | set(GIT_VERSION "${GIT_VERSION}-dirty") 44 | endif() 45 | else() 46 | set(GIT_VERSION "v0.0.0") 47 | endif() 48 | 49 | message("-- git Version: ${GIT_VERSION}") 50 | set(${var} ${GIT_VERSION} PARENT_SCOPE) 51 | endfunction() 52 | -------------------------------------------------------------------------------- /benchmark/cmake/HandleGTest.cmake: -------------------------------------------------------------------------------- 1 | 2 | macro(split_list listname) 3 | string(REPLACE ";" " " ${listname} "${${listname}}") 4 | endmacro() 5 | 6 | macro(build_external_gtest) 7 | include(ExternalProject) 8 | set(GTEST_FLAGS "") 9 | if (BENCHMARK_USE_LIBCXX) 10 | if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 11 | list(APPEND GTEST_FLAGS -stdlib=libc++) 12 | else() 13 | message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++") 14 | endif() 15 | endif() 16 | if (BENCHMARK_BUILD_32_BITS) 17 | list(APPEND GTEST_FLAGS -m32) 18 | endif() 19 | if (NOT "${CMAKE_CXX_FLAGS}" STREQUAL "") 20 | list(APPEND GTEST_FLAGS ${CMAKE_CXX_FLAGS}) 21 | endif() 22 | string(TOUPPER "${CMAKE_BUILD_TYPE}" GTEST_BUILD_TYPE) 23 | if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE") 24 | set(GTEST_BUILD_TYPE "DEBUG") 25 | endif() 26 | # FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where 27 | # -Werror=unused-function fires during the build on OS X. This is a temporary 28 | # workaround to keep our travis bots from failing. It should be removed 29 | # once gtest is fixed. 30 | if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") 31 | list(APPEND GTEST_FLAGS "-Wno-unused-function") 32 | endif() 33 | split_list(GTEST_FLAGS) 34 | set(EXCLUDE_FROM_ALL_OPT "") 35 | set(EXCLUDE_FROM_ALL_VALUE "") 36 | if (${CMAKE_VERSION} VERSION_GREATER "3.0.99") 37 | set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL") 38 | set(EXCLUDE_FROM_ALL_VALUE "ON") 39 | endif() 40 | ExternalProject_Add(googletest 41 | ${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE} 42 | GIT_REPOSITORY https://github.com/google/googletest.git 43 | GIT_TAG master 44 | PREFIX "${CMAKE_BINARY_DIR}/googletest" 45 | INSTALL_DIR "${CMAKE_BINARY_DIR}/googletest" 46 | CMAKE_CACHE_ARGS 47 | -DCMAKE_BUILD_TYPE:STRING=${GTEST_BUILD_TYPE} 48 | -DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER} 49 | -DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER} 50 | -DCMAKE_INSTALL_PREFIX:PATH= 51 | -DCMAKE_INSTALL_LIBDIR:PATH=/lib 52 | -DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS} 53 | -Dgtest_force_shared_crt:BOOL=ON 54 | ) 55 | 56 | ExternalProject_Get_Property(googletest install_dir) 57 | set(GTEST_INCLUDE_DIRS ${install_dir}/include) 58 | file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS}) 59 | 60 | set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}") 61 | set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}") 62 | if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG") 63 | set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}") 64 | endif() 65 | 66 | # Use gmock_main instead of gtest_main because it initializes gtest as well. 67 | # Note: The libraries are listed in reverse order of their dependancies. 68 | foreach(LIB gtest gmock gmock_main) 69 | add_library(${LIB} UNKNOWN IMPORTED) 70 | set_target_properties(${LIB} PROPERTIES 71 | IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX} 72 | INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS} 73 | INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}" 74 | ) 75 | add_dependencies(${LIB} googletest) 76 | list(APPEND GTEST_BOTH_LIBRARIES ${LIB}) 77 | endforeach() 78 | endmacro(build_external_gtest) 79 | 80 | if (BENCHMARK_ENABLE_GTEST_TESTS) 81 | if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest) 82 | set(GTEST_ROOT "${CMAKE_SOURCE_DIR}/googletest") 83 | set(INSTALL_GTEST OFF CACHE INTERNAL "") 84 | set(INSTALL_GMOCK OFF CACHE INTERNAL "") 85 | add_subdirectory(${CMAKE_SOURCE_DIR}/googletest) 86 | set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main) 87 | foreach(HEADER test mock) 88 | # CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we 89 | # have to add the paths ourselves. 90 | set(HFILE g${HEADER}/g${HEADER}.h) 91 | set(HPATH ${GTEST_ROOT}/google${HEADER}/include) 92 | find_path(HEADER_PATH_${HEADER} ${HFILE} 93 | NO_DEFAULT_PATHS 94 | HINTS ${HPATH} 95 | ) 96 | if (NOT HEADER_PATH_${HEADER}) 97 | message(FATAL "Failed to find header ${HFILE} in ${HPATH}") 98 | endif() 99 | list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}}) 100 | endforeach() 101 | elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES) 102 | build_external_gtest() 103 | else() 104 | find_package(GTest REQUIRED) 105 | find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h 106 | HINTS ${GTEST_INCLUDE_DIRS}) 107 | if (NOT GMOCK_INCLUDE_DIRS) 108 | message(FATAL "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}") 109 | endif() 110 | set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS}) 111 | # FIXME: We don't currently require the gmock library to build the tests, 112 | # and it's likely we won't find it, so we don't try. As long as we've 113 | # found the gmock/gmock.h header and gtest_main that should be good enough. 114 | endif() 115 | endif() 116 | -------------------------------------------------------------------------------- /benchmark/cmake/benchmark.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@CMAKE_INSTALL_PREFIX@ 2 | exec_prefix=${prefix} 3 | libdir=${prefix}/lib 4 | includedir=${prefix}/include 5 | 6 | Name: @PROJECT_NAME@ 7 | Description: Google microbenchmark framework 8 | Version: @VERSION@ 9 | 10 | Libs: -L${libdir} -lbenchmark 11 | Cflags: -I${includedir} 12 | -------------------------------------------------------------------------------- /benchmark/cmake/gnu_posix_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | std::string str = "test0159"; 5 | regex_t re; 6 | int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); 7 | if (ec != 0) { 8 | return ec; 9 | } 10 | return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /benchmark/cmake/llvm-toolchain.cmake: -------------------------------------------------------------------------------- 1 | find_package(LLVMAr REQUIRED) 2 | set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE) 3 | 4 | find_package(LLVMNm REQUIRED) 5 | set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE) 6 | 7 | find_package(LLVMRanLib REQUIRED) 8 | set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE) 9 | -------------------------------------------------------------------------------- /benchmark/cmake/posix_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | std::string str = "test0159"; 5 | regex_t re; 6 | int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); 7 | if (ec != 0) { 8 | return ec; 9 | } 10 | int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; 11 | regfree(&re); 12 | return ret; 13 | } 14 | 15 | -------------------------------------------------------------------------------- /benchmark/cmake/std_regex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | int main() { 4 | const std::string str = "test0159"; 5 | std::regex re; 6 | re = std::regex("^[a-z]+[0-9]+$", 7 | std::regex_constants::extended | std::regex_constants::nosubs); 8 | return std::regex_search(str, re) ? 0 : -1; 9 | } 10 | 11 | -------------------------------------------------------------------------------- /benchmark/cmake/steady_clock.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main() { 4 | typedef std::chrono::steady_clock Clock; 5 | Clock::time_point tp = Clock::now(); 6 | ((void)tp); 7 | } 8 | -------------------------------------------------------------------------------- /benchmark/cmake/thread_safety_attributes.cpp: -------------------------------------------------------------------------------- 1 | #define HAVE_THREAD_SAFETY_ATTRIBUTES 2 | #include "../src/mutex.h" 3 | 4 | int main() {} 5 | -------------------------------------------------------------------------------- /benchmark/docs/AssemblyTests.md: -------------------------------------------------------------------------------- 1 | # Assembly Tests 2 | 3 | The Benchmark library provides a number of functions whose primary 4 | purpose in to affect assembly generation, including `DoNotOptimize` 5 | and `ClobberMemory`. In addition there are other functions, 6 | such as `KeepRunning`, for which generating good assembly is paramount. 7 | 8 | For these functions it's important to have tests that verify the 9 | correctness and quality of the implementation. This requires testing 10 | the code generated by the compiler. 11 | 12 | This document describes how the Benchmark library tests compiler output, 13 | as well as how to properly write new tests. 14 | 15 | 16 | ## Anatomy of a Test 17 | 18 | Writing a test has two steps: 19 | 20 | * Write the code you want to generate assembly for. 21 | * Add `// CHECK` lines to match against the verified assembly. 22 | 23 | Example: 24 | ```c++ 25 | 26 | // CHECK-LABEL: test_add: 27 | extern "C" int test_add() { 28 | extern int ExternInt; 29 | return ExternInt + 1; 30 | 31 | // CHECK: movl ExternInt(%rip), %eax 32 | // CHECK: addl %eax 33 | // CHECK: ret 34 | } 35 | 36 | ``` 37 | 38 | #### LLVM Filecheck 39 | 40 | [LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html) 41 | is used to test the generated assembly against the `// CHECK` lines 42 | specified in the tests source file. Please see the documentation 43 | linked above for information on how to write `CHECK` directives. 44 | 45 | #### Tips and Tricks: 46 | 47 | * Tests should match the minimal amount of output required to establish 48 | correctness. `CHECK` directives don't have to match on the exact next line 49 | after the previous match, so tests should omit checks for unimportant 50 | bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive) 51 | can be used to ensure a match occurs exactly after the previous match). 52 | 53 | * The tests are compiled with `-O3 -g0`. So we're only testing the 54 | optimized output. 55 | 56 | * The assembly output is further cleaned up using `tools/strip_asm.py`. 57 | This removes comments, assembler directives, and unused labels before 58 | the test is run. 59 | 60 | * The generated and stripped assembly file for a test is output under 61 | `/test/.s` 62 | 63 | * Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes) 64 | to specify lines that should only match in certain situations. 65 | The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that 66 | are only expected to match Clang or GCC's output respectively. Normal 67 | `CHECK` lines match against all compilers. (Note: `CHECK-NOT` and 68 | `CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed 69 | `CHECK` lines) 70 | 71 | * Use `extern "C"` to disable name mangling for specific functions. This 72 | makes them easier to name in the `CHECK` lines. 73 | 74 | 75 | ## Problems Writing Portable Tests 76 | 77 | Writing tests which check the code generated by a compiler are 78 | inherently non-portable. Different compilers and even different compiler 79 | versions may generate entirely different code. The Benchmark tests 80 | must tolerate this. 81 | 82 | LLVM Filecheck provides a number of mechanisms to help write 83 | "more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax), 84 | allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables) 85 | for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive). 86 | 87 | #### Capturing Variables 88 | 89 | For example, say GCC stores a variable in a register but Clang stores 90 | it in memory. To write a test that tolerates both cases we "capture" 91 | the destination of the store, and then use the captured expression 92 | to write the remainder of the test. 93 | 94 | ```c++ 95 | // CHECK-LABEL: test_div_no_op_into_shr: 96 | extern "C" void test_div_no_op_into_shr(int value) { 97 | int divisor = 2; 98 | benchmark::DoNotOptimize(divisor); // hide the value from the optimizer 99 | return value / divisor; 100 | 101 | // CHECK: movl $2, [[DEST:.*]] 102 | // CHECK: idivl [[DEST]] 103 | // CHECK: ret 104 | } 105 | ``` 106 | 107 | #### Using Regular Expressions to Match Differing Output 108 | 109 | Often tests require testing assembly lines which may subtly differ 110 | between compilers or compiler versions. A common example of this 111 | is matching stack frame addresses. In this case regular expressions 112 | can be used to match the differing bits of output. For example: 113 | 114 | ```c++ 115 | int ExternInt; 116 | struct Point { int x, y, z; }; 117 | 118 | // CHECK-LABEL: test_store_point: 119 | extern "C" void test_store_point() { 120 | Point p{ExternInt, ExternInt, ExternInt}; 121 | benchmark::DoNotOptimize(p); 122 | 123 | // CHECK: movl ExternInt(%rip), %eax 124 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 125 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 126 | // CHECK: movl %eax, -{{[0-9]+}}(%rsp) 127 | // CHECK: ret 128 | } 129 | ``` 130 | 131 | ## Current Requirements and Limitations 132 | 133 | The tests require Filecheck to be installed along the `PATH` of the 134 | build machine. Otherwise the tests will be disabled. 135 | 136 | Additionally, as mentioned in the previous section, codegen tests are 137 | inherently non-portable. Currently the tests are limited to: 138 | 139 | * x86_64 targets. 140 | * Compiled with GCC or Clang 141 | 142 | Further work could be done, at least on a limited basis, to extend the 143 | tests to other architectures and compilers (using `CHECK` prefixes). 144 | 145 | Furthermore, the tests fail for builds which specify additional flags 146 | that modify code generation, including `--coverage` or `-fsanitize=`. 147 | 148 | -------------------------------------------------------------------------------- /benchmark/releasing.md: -------------------------------------------------------------------------------- 1 | # How to release 2 | 3 | * Make sure you're on master and synced to HEAD 4 | * Ensure the project builds and tests run (sanity check only, obviously) 5 | * `parallel -j0 exec ::: test/*_test` can help ensure everything at least 6 | passes 7 | * Prepare release notes 8 | * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of 9 | commits between the last annotated tag and HEAD 10 | * Pick the most interesting. 11 | * Create a release through github's interface 12 | * Note this will create a lightweight tag. 13 | * Update this to an annotated tag: 14 | * `git pull --tags` 15 | * `git tag -a -f ` 16 | * `git push --force origin` 17 | -------------------------------------------------------------------------------- /benchmark/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Allow the source files to find headers in src/ 2 | include_directories(${PROJECT_SOURCE_DIR}/src) 3 | 4 | if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) 5 | list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) 6 | list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) 7 | endif() 8 | 9 | file(GLOB 10 | SOURCE_FILES 11 | *.cc 12 | ${PROJECT_SOURCE_DIR}/include/benchmark/*.h 13 | ${CMAKE_CURRENT_SOURCE_DIR}/*.h) 14 | 15 | add_library(benchmark ${SOURCE_FILES}) 16 | set_target_properties(benchmark PROPERTIES 17 | OUTPUT_NAME "benchmark" 18 | VERSION ${GENERIC_LIB_VERSION} 19 | SOVERSION ${GENERIC_LIB_SOVERSION} 20 | ) 21 | target_include_directories(benchmark PUBLIC 22 | $ 23 | ) 24 | 25 | # Link threads. 26 | target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) 27 | find_library(LIBRT rt) 28 | if(LIBRT) 29 | target_link_libraries(benchmark ${LIBRT}) 30 | endif() 31 | 32 | # We need extra libraries on Windows 33 | if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") 34 | target_link_libraries(benchmark Shlwapi) 35 | endif() 36 | 37 | # We need extra libraries on Solaris 38 | if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") 39 | target_link_libraries(benchmark kstat) 40 | endif() 41 | 42 | set(include_install_dir "include") 43 | set(lib_install_dir "lib/") 44 | set(bin_install_dir "bin/") 45 | set(config_install_dir "lib/cmake/${PROJECT_NAME}") 46 | set(pkgconfig_install_dir "lib/pkgconfig") 47 | 48 | set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") 49 | 50 | set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") 51 | set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") 52 | set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") 53 | set(targets_export_name "${PROJECT_NAME}Targets") 54 | 55 | set(namespace "${PROJECT_NAME}::") 56 | 57 | include(CMakePackageConfigHelpers) 58 | write_basic_package_version_file( 59 | "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion 60 | ) 61 | 62 | configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) 63 | configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) 64 | 65 | if (BENCHMARK_ENABLE_INSTALL) 66 | # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) 67 | install( 68 | TARGETS benchmark 69 | EXPORT ${targets_export_name} 70 | ARCHIVE DESTINATION ${lib_install_dir} 71 | LIBRARY DESTINATION ${lib_install_dir} 72 | RUNTIME DESTINATION ${bin_install_dir} 73 | INCLUDES DESTINATION ${include_install_dir}) 74 | 75 | install( 76 | DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" 77 | DESTINATION ${include_install_dir} 78 | FILES_MATCHING PATTERN "*.*h") 79 | 80 | install( 81 | FILES "${project_config}" "${version_config}" 82 | DESTINATION "${config_install_dir}") 83 | 84 | install( 85 | FILES "${pkg_config}" 86 | DESTINATION "${pkgconfig_install_dir}") 87 | 88 | install( 89 | EXPORT "${targets_export_name}" 90 | NAMESPACE "${namespace}" 91 | DESTINATION "${config_install_dir}") 92 | endif() 93 | -------------------------------------------------------------------------------- /benchmark/src/arraysize.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_ARRAYSIZE_H_ 2 | #define BENCHMARK_ARRAYSIZE_H_ 3 | 4 | #include "internal_macros.h" 5 | 6 | namespace benchmark { 7 | namespace internal { 8 | // The arraysize(arr) macro returns the # of elements in an array arr. 9 | // The expression is a compile-time constant, and therefore can be 10 | // used in defining new arrays, for example. If you use arraysize on 11 | // a pointer by mistake, you will get a compile-time error. 12 | // 13 | 14 | // This template function declaration is used in defining arraysize. 15 | // Note that the function doesn't need an implementation, as we only 16 | // use its type. 17 | template 18 | char (&ArraySizeHelper(T (&array)[N]))[N]; 19 | 20 | // That gcc wants both of these prototypes seems mysterious. VC, for 21 | // its part, can't decide which to use (another mystery). Matching of 22 | // template overloads: the final frontier. 23 | #ifndef COMPILER_MSVC 24 | template 25 | char (&ArraySizeHelper(const T (&array)[N]))[N]; 26 | #endif 27 | 28 | #define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) 29 | 30 | } // end namespace internal 31 | } // end namespace benchmark 32 | 33 | #endif // BENCHMARK_ARRAYSIZE_H_ 34 | -------------------------------------------------------------------------------- /benchmark/src/benchmark_api_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_API_INTERNAL_H 2 | #define BENCHMARK_API_INTERNAL_H 3 | 4 | #include "benchmark/benchmark.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace benchmark { 13 | namespace internal { 14 | 15 | // Information kept per benchmark we may want to run 16 | struct Benchmark::Instance { 17 | std::string name; 18 | Benchmark* benchmark; 19 | ReportMode report_mode; 20 | std::vector arg; 21 | TimeUnit time_unit; 22 | int range_multiplier; 23 | bool use_real_time; 24 | bool use_manual_time; 25 | BigO complexity; 26 | BigOFunc* complexity_lambda; 27 | UserCounters counters; 28 | const std::vector* statistics; 29 | bool last_benchmark_instance; 30 | int repetitions; 31 | double min_time; 32 | size_t iterations; 33 | int threads; // Number of concurrent threads to us 34 | }; 35 | 36 | bool FindBenchmarksInternal(const std::string& re, 37 | std::vector* benchmarks, 38 | std::ostream* Err); 39 | 40 | bool IsZero(double n); 41 | 42 | ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); 43 | 44 | } // end namespace internal 45 | } // end namespace benchmark 46 | 47 | #endif // BENCHMARK_API_INTERNAL_H 48 | -------------------------------------------------------------------------------- /benchmark/src/benchmark_register.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_REGISTER_H 2 | #define BENCHMARK_REGISTER_H 3 | 4 | #include 5 | 6 | #include "check.h" 7 | 8 | template 9 | void AddRange(std::vector* dst, T lo, T hi, int mult) { 10 | CHECK_GE(lo, 0); 11 | CHECK_GE(hi, lo); 12 | CHECK_GE(mult, 2); 13 | 14 | // Add "lo" 15 | dst->push_back(lo); 16 | 17 | static const T kmax = std::numeric_limits::max(); 18 | 19 | // Now space out the benchmarks in multiples of "mult" 20 | for (T i = 1; i < kmax / mult; i *= mult) { 21 | if (i >= hi) break; 22 | if (i > lo) { 23 | dst->push_back(i); 24 | } 25 | } 26 | 27 | // Add "hi" (if different from "lo") 28 | if (hi != lo) { 29 | dst->push_back(hi); 30 | } 31 | } 32 | 33 | #endif // BENCHMARK_REGISTER_H 34 | -------------------------------------------------------------------------------- /benchmark/src/check.h: -------------------------------------------------------------------------------- 1 | #ifndef CHECK_H_ 2 | #define CHECK_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "internal_macros.h" 9 | #include "log.h" 10 | 11 | namespace benchmark { 12 | namespace internal { 13 | 14 | typedef void(AbortHandlerT)(); 15 | 16 | inline AbortHandlerT*& GetAbortHandler() { 17 | static AbortHandlerT* handler = &std::abort; 18 | return handler; 19 | } 20 | 21 | BENCHMARK_NORETURN inline void CallAbortHandler() { 22 | GetAbortHandler()(); 23 | std::abort(); // fallback to enforce noreturn 24 | } 25 | 26 | // CheckHandler is the class constructed by failing CHECK macros. CheckHandler 27 | // will log information about the failures and abort when it is destructed. 28 | class CheckHandler { 29 | public: 30 | CheckHandler(const char* check, const char* file, const char* func, int line) 31 | : log_(GetErrorLogInstance()) { 32 | log_ << file << ":" << line << ": " << func << ": Check `" << check 33 | << "' failed. "; 34 | } 35 | 36 | LogType& GetLog() { return log_; } 37 | 38 | BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { 39 | log_ << std::endl; 40 | CallAbortHandler(); 41 | } 42 | 43 | CheckHandler& operator=(const CheckHandler&) = delete; 44 | CheckHandler(const CheckHandler&) = delete; 45 | CheckHandler() = delete; 46 | 47 | private: 48 | LogType& log_; 49 | }; 50 | 51 | } // end namespace internal 52 | } // end namespace benchmark 53 | 54 | // The CHECK macro returns a std::ostream object that can have extra information 55 | // written to it. 56 | #ifndef NDEBUG 57 | #define CHECK(b) \ 58 | (b ? ::benchmark::internal::GetNullLogInstance() \ 59 | : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ 60 | .GetLog()) 61 | #else 62 | #define CHECK(b) ::benchmark::internal::GetNullLogInstance() 63 | #endif 64 | 65 | #define CHECK_EQ(a, b) CHECK((a) == (b)) 66 | #define CHECK_NE(a, b) CHECK((a) != (b)) 67 | #define CHECK_GE(a, b) CHECK((a) >= (b)) 68 | #define CHECK_LE(a, b) CHECK((a) <= (b)) 69 | #define CHECK_GT(a, b) CHECK((a) > (b)) 70 | #define CHECK_LT(a, b) CHECK((a) < (b)) 71 | 72 | #define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) 73 | #define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) 74 | #define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) 75 | #define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) 76 | #define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) 77 | #define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) 78 | 79 | #endif // CHECK_H_ 80 | -------------------------------------------------------------------------------- /benchmark/src/colorprint.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_COLORPRINT_H_ 2 | #define BENCHMARK_COLORPRINT_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace benchmark { 9 | enum LogColor { 10 | COLOR_DEFAULT, 11 | COLOR_RED, 12 | COLOR_GREEN, 13 | COLOR_YELLOW, 14 | COLOR_BLUE, 15 | COLOR_MAGENTA, 16 | COLOR_CYAN, 17 | COLOR_WHITE 18 | }; 19 | 20 | std::string FormatString(const char* msg, va_list args); 21 | std::string FormatString(const char* msg, ...); 22 | 23 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, 24 | va_list args); 25 | void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); 26 | 27 | // Returns true if stdout appears to be a terminal that supports colored 28 | // output, false otherwise. 29 | bool IsColorTerminal(); 30 | 31 | } // end namespace benchmark 32 | 33 | #endif // BENCHMARK_COLORPRINT_H_ 34 | -------------------------------------------------------------------------------- /benchmark/src/commandlineflags.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_COMMANDLINEFLAGS_H_ 2 | #define BENCHMARK_COMMANDLINEFLAGS_H_ 3 | 4 | #include 5 | #include 6 | 7 | // Macro for referencing flags. 8 | #define FLAG(name) FLAGS_##name 9 | 10 | // Macros for declaring flags. 11 | #define DECLARE_bool(name) extern bool FLAG(name) 12 | #define DECLARE_int32(name) extern int32_t FLAG(name) 13 | #define DECLARE_int64(name) extern int64_t FLAG(name) 14 | #define DECLARE_double(name) extern double FLAG(name) 15 | #define DECLARE_string(name) extern std::string FLAG(name) 16 | 17 | // Macros for defining flags. 18 | #define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) 19 | #define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) 20 | #define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) 21 | #define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) 22 | #define DEFINE_string(name, default_val, doc) \ 23 | std::string FLAG(name) = (default_val) 24 | 25 | namespace benchmark { 26 | // Parses 'str' for a 32-bit signed integer. If successful, writes the result 27 | // to *value and returns true; otherwise leaves *value unchanged and returns 28 | // false. 29 | bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); 30 | 31 | // Parses a bool/Int32/string from the environment variable 32 | // corresponding to the given Google Test flag. 33 | bool BoolFromEnv(const char* flag, bool default_val); 34 | int32_t Int32FromEnv(const char* flag, int32_t default_val); 35 | double DoubleFromEnv(const char* flag, double default_val); 36 | const char* StringFromEnv(const char* flag, const char* default_val); 37 | 38 | // Parses a string for a bool flag, in the form of either 39 | // "--flag=value" or "--flag". 40 | // 41 | // In the former case, the value is taken as true if it passes IsTruthyValue(). 42 | // 43 | // In the latter case, the value is taken as true. 44 | // 45 | // On success, stores the value of the flag in *value, and returns 46 | // true. On failure, returns false without changing *value. 47 | bool ParseBoolFlag(const char* str, const char* flag, bool* value); 48 | 49 | // Parses a string for an Int32 flag, in the form of 50 | // "--flag=value". 51 | // 52 | // On success, stores the value of the flag in *value, and returns 53 | // true. On failure, returns false without changing *value. 54 | bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); 55 | 56 | // Parses a string for a Double flag, in the form of 57 | // "--flag=value". 58 | // 59 | // On success, stores the value of the flag in *value, and returns 60 | // true. On failure, returns false without changing *value. 61 | bool ParseDoubleFlag(const char* str, const char* flag, double* value); 62 | 63 | // Parses a string for a string flag, in the form of 64 | // "--flag=value". 65 | // 66 | // On success, stores the value of the flag in *value, and returns 67 | // true. On failure, returns false without changing *value. 68 | bool ParseStringFlag(const char* str, const char* flag, std::string* value); 69 | 70 | // Returns true if the string matches the flag. 71 | bool IsFlag(const char* str, const char* flag); 72 | 73 | // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or 74 | // some non-alphanumeric character. As a special case, also returns true if 75 | // value is the empty string. 76 | bool IsTruthyFlagValue(const std::string& value); 77 | } // end namespace benchmark 78 | 79 | #endif // BENCHMARK_COMMANDLINEFLAGS_H_ 80 | -------------------------------------------------------------------------------- /benchmark/src/complexity.h: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Ismael Jimenez Martinez. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Source project : https://github.com/ismaelJimenez/cpp.leastsq 16 | // Adapted to be used with google benchmark 17 | 18 | #ifndef COMPLEXITY_H_ 19 | #define COMPLEXITY_H_ 20 | 21 | #include 22 | #include 23 | 24 | #include "benchmark/benchmark.h" 25 | 26 | namespace benchmark { 27 | 28 | // Return a vector containing the bigO and RMS information for the specified 29 | // list of reports. If 'reports.size() < 2' an empty vector is returned. 30 | std::vector ComputeBigO( 31 | const std::vector& reports); 32 | 33 | // This data structure will contain the result returned by MinimalLeastSq 34 | // - coef : Estimated coeficient for the high-order term as 35 | // interpolated from data. 36 | // - rms : Normalized Root Mean Squared Error. 37 | // - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability 38 | // form has been provided to MinimalLeastSq this will return 39 | // the same value. In case BigO::oAuto has been selected, this 40 | // parameter will return the best fitting curve detected. 41 | 42 | struct LeastSq { 43 | LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} 44 | 45 | double coef; 46 | double rms; 47 | BigO complexity; 48 | }; 49 | 50 | // Function to return an string for the calculated complexity 51 | std::string GetBigOString(BigO complexity); 52 | 53 | } // end namespace benchmark 54 | 55 | #endif // COMPLEXITY_H_ 56 | -------------------------------------------------------------------------------- /benchmark/src/counter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "counter.h" 16 | 17 | namespace benchmark { 18 | namespace internal { 19 | 20 | double Finish(Counter const& c, double cpu_time, double num_threads) { 21 | double v = c.value; 22 | if (c.flags & Counter::kIsRate) { 23 | v /= cpu_time; 24 | } 25 | if (c.flags & Counter::kAvgThreads) { 26 | v /= num_threads; 27 | } 28 | return v; 29 | } 30 | 31 | void Finish(UserCounters *l, double cpu_time, double num_threads) { 32 | for (auto &c : *l) { 33 | c.second.value = Finish(c.second, cpu_time, num_threads); 34 | } 35 | } 36 | 37 | void Increment(UserCounters *l, UserCounters const& r) { 38 | // add counters present in both or just in *l 39 | for (auto &c : *l) { 40 | auto it = r.find(c.first); 41 | if (it != r.end()) { 42 | c.second.value = c.second + it->second; 43 | } 44 | } 45 | // add counters present in r, but not in *l 46 | for (auto const &tc : r) { 47 | auto it = l->find(tc.first); 48 | if (it == l->end()) { 49 | (*l)[tc.first] = tc.second; 50 | } 51 | } 52 | } 53 | 54 | bool SameNames(UserCounters const& l, UserCounters const& r) { 55 | if (&l == &r) return true; 56 | if (l.size() != r.size()) { 57 | return false; 58 | } 59 | for (auto const& c : l) { 60 | if (r.find(c.first) == r.end()) { 61 | return false; 62 | } 63 | } 64 | return true; 65 | } 66 | 67 | } // end namespace internal 68 | } // end namespace benchmark 69 | -------------------------------------------------------------------------------- /benchmark/src/counter.h: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | 17 | namespace benchmark { 18 | 19 | // these counter-related functions are hidden to reduce API surface. 20 | namespace internal { 21 | void Finish(UserCounters *l, double time, double num_threads); 22 | void Increment(UserCounters *l, UserCounters const& r); 23 | bool SameNames(UserCounters const& l, UserCounters const& r); 24 | } // end namespace internal 25 | 26 | } //end namespace benchmark 27 | -------------------------------------------------------------------------------- /benchmark/src/csv_reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "complexity.h" 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #include "string_util.h" 26 | #include "timers.h" 27 | #include "check.h" 28 | 29 | // File format reference: http://edoceo.com/utilitas/csv-file-format. 30 | 31 | namespace benchmark { 32 | 33 | namespace { 34 | std::vector elements = { 35 | "name", "iterations", "real_time", "cpu_time", 36 | "time_unit", "bytes_per_second", "items_per_second", "label", 37 | "error_occurred", "error_message"}; 38 | } // namespace 39 | 40 | bool CSVReporter::ReportContext(const Context& context) { 41 | PrintBasicContext(&GetErrorStream(), context); 42 | return true; 43 | } 44 | 45 | void CSVReporter::ReportRuns(const std::vector & reports) { 46 | std::ostream& Out = GetOutputStream(); 47 | 48 | if (!printed_header_) { 49 | // save the names of all the user counters 50 | for (const auto& run : reports) { 51 | for (const auto& cnt : run.counters) { 52 | user_counter_names_.insert(cnt.first); 53 | } 54 | } 55 | 56 | // print the header 57 | for (auto B = elements.begin(); B != elements.end();) { 58 | Out << *B++; 59 | if (B != elements.end()) Out << ","; 60 | } 61 | for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { 62 | Out << ",\"" << *B++ << "\""; 63 | } 64 | Out << "\n"; 65 | 66 | printed_header_ = true; 67 | } else { 68 | // check that all the current counters are saved in the name set 69 | for (const auto& run : reports) { 70 | for (const auto& cnt : run.counters) { 71 | CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) 72 | << "All counters must be present in each run. " 73 | << "Counter named \"" << cnt.first 74 | << "\" was not in a run after being added to the header"; 75 | } 76 | } 77 | } 78 | 79 | // print results for each run 80 | for (const auto& run : reports) { 81 | PrintRunData(run); 82 | } 83 | 84 | } 85 | 86 | void CSVReporter::PrintRunData(const Run & run) { 87 | std::ostream& Out = GetOutputStream(); 88 | 89 | // Field with embedded double-quote characters must be doubled and the field 90 | // delimited with double-quotes. 91 | std::string name = run.benchmark_name; 92 | ReplaceAll(&name, "\"", "\"\""); 93 | Out << '"' << name << "\","; 94 | if (run.error_occurred) { 95 | Out << std::string(elements.size() - 3, ','); 96 | Out << "true,"; 97 | std::string msg = run.error_message; 98 | ReplaceAll(&msg, "\"", "\"\""); 99 | Out << '"' << msg << "\"\n"; 100 | return; 101 | } 102 | 103 | // Do not print iteration on bigO and RMS report 104 | if (!run.report_big_o && !run.report_rms) { 105 | Out << run.iterations; 106 | } 107 | Out << ","; 108 | 109 | Out << run.GetAdjustedRealTime() << ","; 110 | Out << run.GetAdjustedCPUTime() << ","; 111 | 112 | // Do not print timeLabel on bigO and RMS report 113 | if (run.report_big_o) { 114 | Out << GetBigOString(run.complexity); 115 | } else if (!run.report_rms) { 116 | Out << GetTimeUnitString(run.time_unit); 117 | } 118 | Out << ","; 119 | 120 | if (run.bytes_per_second > 0.0) { 121 | Out << run.bytes_per_second; 122 | } 123 | Out << ","; 124 | if (run.items_per_second > 0.0) { 125 | Out << run.items_per_second; 126 | } 127 | Out << ","; 128 | if (!run.report_label.empty()) { 129 | // Field with embedded double-quote characters must be doubled and the field 130 | // delimited with double-quotes. 131 | std::string label = run.report_label; 132 | ReplaceAll(&label, "\"", "\"\""); 133 | Out << "\"" << label << "\""; 134 | } 135 | Out << ",,"; // for error_occurred and error_message 136 | 137 | // Print user counters 138 | for (const auto &ucn : user_counter_names_) { 139 | auto it = run.counters.find(ucn); 140 | if(it == run.counters.end()) { 141 | Out << ","; 142 | } else { 143 | Out << "," << it->second; 144 | } 145 | } 146 | Out << '\n'; 147 | } 148 | 149 | } // end namespace benchmark 150 | -------------------------------------------------------------------------------- /benchmark/src/internal_macros.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_INTERNAL_MACROS_H_ 2 | #define BENCHMARK_INTERNAL_MACROS_H_ 3 | 4 | #include "benchmark/benchmark.h" 5 | 6 | #ifndef __has_feature 7 | #define __has_feature(x) 0 8 | #endif 9 | #ifndef __has_builtin 10 | #define __has_builtin(x) 0 11 | #endif 12 | 13 | #if defined(__clang__) 14 | #if !defined(COMPILER_CLANG) 15 | #define COMPILER_CLANG 16 | #endif 17 | #elif defined(_MSC_VER) 18 | #if !defined(COMPILER_MSVC) 19 | #define COMPILER_MSVC 20 | #endif 21 | #elif defined(__GNUC__) 22 | #if !defined(COMPILER_GCC) 23 | #define COMPILER_GCC 24 | #endif 25 | #endif 26 | 27 | #if __has_feature(cxx_attributes) 28 | #define BENCHMARK_NORETURN [[noreturn]] 29 | #elif defined(__GNUC__) 30 | #define BENCHMARK_NORETURN __attribute__((noreturn)) 31 | #elif defined(COMPILER_MSVC) 32 | #define BENCHMARK_NORETURN __declspec(noreturn) 33 | #else 34 | #define BENCHMARK_NORETURN 35 | #endif 36 | 37 | #if defined(__CYGWIN__) 38 | #define BENCHMARK_OS_CYGWIN 1 39 | #elif defined(_WIN32) 40 | #define BENCHMARK_OS_WINDOWS 1 41 | #elif defined(__APPLE__) 42 | #define BENCHMARK_OS_APPLE 1 43 | #include "TargetConditionals.h" 44 | #if defined(TARGET_OS_MAC) 45 | #define BENCHMARK_OS_MACOSX 1 46 | #if defined(TARGET_OS_IPHONE) 47 | #define BENCHMARK_OS_IOS 1 48 | #endif 49 | #endif 50 | #elif defined(__FreeBSD__) 51 | #define BENCHMARK_OS_FREEBSD 1 52 | #elif defined(__NetBSD__) 53 | #define BENCHMARK_OS_NETBSD 1 54 | #elif defined(__OpenBSD__) 55 | #define BENCHMARK_OS_OPENBSD 1 56 | #elif defined(__linux__) 57 | #define BENCHMARK_OS_LINUX 1 58 | #elif defined(__native_client__) 59 | #define BENCHMARK_OS_NACL 1 60 | #elif defined(__EMSCRIPTEN__) 61 | #define BENCHMARK_OS_EMSCRIPTEN 1 62 | #elif defined(__rtems__) 63 | #define BENCHMARK_OS_RTEMS 1 64 | #elif defined(__Fuchsia__) 65 | #define BENCHMARK_OS_FUCHSIA 1 66 | #elif defined (__SVR4) && defined (__sun) 67 | #define BENCHMARK_OS_SOLARIS 1 68 | #endif 69 | 70 | #if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ 71 | && !defined(__EXCEPTIONS) 72 | #define BENCHMARK_HAS_NO_EXCEPTIONS 73 | #endif 74 | 75 | #if defined(COMPILER_CLANG) || defined(COMPILER_GCC) 76 | #define BENCHMARK_MAYBE_UNUSED __attribute__((unused)) 77 | #else 78 | #define BENCHMARK_MAYBE_UNUSED 79 | #endif 80 | 81 | #if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable) 82 | #define BENCHMARK_UNREACHABLE() __builtin_unreachable() 83 | #elif defined(COMPILER_MSVC) 84 | #define BENCHMARK_UNREACHABLE() __assume(false) 85 | #else 86 | #define BENCHMARK_UNREACHABLE() ((void)0) 87 | #endif 88 | 89 | #endif // BENCHMARK_INTERNAL_MACROS_H_ 90 | -------------------------------------------------------------------------------- /benchmark/src/log.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_LOG_H_ 2 | #define BENCHMARK_LOG_H_ 3 | 4 | #include 5 | #include 6 | 7 | #include "benchmark/benchmark.h" 8 | 9 | namespace benchmark { 10 | namespace internal { 11 | 12 | typedef std::basic_ostream&(EndLType)(std::basic_ostream&); 13 | 14 | class LogType { 15 | friend LogType& GetNullLogInstance(); 16 | friend LogType& GetErrorLogInstance(); 17 | 18 | // FIXME: Add locking to output. 19 | template 20 | friend LogType& operator<<(LogType&, Tp const&); 21 | friend LogType& operator<<(LogType&, EndLType*); 22 | 23 | private: 24 | LogType(std::ostream* out) : out_(out) {} 25 | std::ostream* out_; 26 | BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); 27 | }; 28 | 29 | template 30 | LogType& operator<<(LogType& log, Tp const& value) { 31 | if (log.out_) { 32 | *log.out_ << value; 33 | } 34 | return log; 35 | } 36 | 37 | inline LogType& operator<<(LogType& log, EndLType* m) { 38 | if (log.out_) { 39 | *log.out_ << m; 40 | } 41 | return log; 42 | } 43 | 44 | inline int& LogLevel() { 45 | static int log_level = 0; 46 | return log_level; 47 | } 48 | 49 | inline LogType& GetNullLogInstance() { 50 | static LogType log(nullptr); 51 | return log; 52 | } 53 | 54 | inline LogType& GetErrorLogInstance() { 55 | static LogType log(&std::clog); 56 | return log; 57 | } 58 | 59 | inline LogType& GetLogInstanceForLevel(int level) { 60 | if (level <= LogLevel()) { 61 | return GetErrorLogInstance(); 62 | } 63 | return GetNullLogInstance(); 64 | } 65 | 66 | } // end namespace internal 67 | } // end namespace benchmark 68 | 69 | #define VLOG(x) \ 70 | (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ 71 | " ") 72 | 73 | #endif 74 | -------------------------------------------------------------------------------- /benchmark/src/mutex.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_MUTEX_H_ 2 | #define BENCHMARK_MUTEX_H_ 3 | 4 | #include 5 | #include 6 | 7 | #include "check.h" 8 | 9 | // Enable thread safety attributes only with clang. 10 | // The attributes can be safely erased when compiling with other compilers. 11 | #if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) 12 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) 13 | #else 14 | #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op 15 | #endif 16 | 17 | #define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) 18 | 19 | #define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) 20 | 21 | #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) 22 | 23 | #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) 24 | 25 | #define ACQUIRED_BEFORE(...) \ 26 | THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) 27 | 28 | #define ACQUIRED_AFTER(...) \ 29 | THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) 30 | 31 | #define REQUIRES(...) \ 32 | THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) 33 | 34 | #define REQUIRES_SHARED(...) \ 35 | THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) 36 | 37 | #define ACQUIRE(...) \ 38 | THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) 39 | 40 | #define ACQUIRE_SHARED(...) \ 41 | THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) 42 | 43 | #define RELEASE(...) \ 44 | THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) 45 | 46 | #define RELEASE_SHARED(...) \ 47 | THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) 48 | 49 | #define TRY_ACQUIRE(...) \ 50 | THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) 51 | 52 | #define TRY_ACQUIRE_SHARED(...) \ 53 | THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) 54 | 55 | #define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) 56 | 57 | #define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) 58 | 59 | #define ASSERT_SHARED_CAPABILITY(x) \ 60 | THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) 61 | 62 | #define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) 63 | 64 | #define NO_THREAD_SAFETY_ANALYSIS \ 65 | THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) 66 | 67 | namespace benchmark { 68 | 69 | typedef std::condition_variable Condition; 70 | 71 | // NOTE: Wrappers for std::mutex and std::unique_lock are provided so that 72 | // we can annotate them with thread safety attributes and use the 73 | // -Wthread-safety warning with clang. The standard library types cannot be 74 | // used directly because they do not provided the required annotations. 75 | class CAPABILITY("mutex") Mutex { 76 | public: 77 | Mutex() {} 78 | 79 | void lock() ACQUIRE() { mut_.lock(); } 80 | void unlock() RELEASE() { mut_.unlock(); } 81 | std::mutex& native_handle() { return mut_; } 82 | 83 | private: 84 | std::mutex mut_; 85 | }; 86 | 87 | class SCOPED_CAPABILITY MutexLock { 88 | typedef std::unique_lock MutexLockImp; 89 | 90 | public: 91 | MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} 92 | ~MutexLock() RELEASE() {} 93 | MutexLockImp& native_handle() { return ml_; } 94 | 95 | private: 96 | MutexLockImp ml_; 97 | }; 98 | 99 | class Barrier { 100 | public: 101 | Barrier(int num_threads) : running_threads_(num_threads) {} 102 | 103 | // Called by each thread 104 | bool wait() EXCLUDES(lock_) { 105 | bool last_thread = false; 106 | { 107 | MutexLock ml(lock_); 108 | last_thread = createBarrier(ml); 109 | } 110 | if (last_thread) phase_condition_.notify_all(); 111 | return last_thread; 112 | } 113 | 114 | void removeThread() EXCLUDES(lock_) { 115 | MutexLock ml(lock_); 116 | --running_threads_; 117 | if (entered_ != 0) phase_condition_.notify_all(); 118 | } 119 | 120 | private: 121 | Mutex lock_; 122 | Condition phase_condition_; 123 | int running_threads_; 124 | 125 | // State for barrier management 126 | int phase_number_ = 0; 127 | int entered_ = 0; // Number of threads that have entered this barrier 128 | 129 | // Enter the barrier and wait until all other threads have also 130 | // entered the barrier. Returns iff this is the last thread to 131 | // enter the barrier. 132 | bool createBarrier(MutexLock& ml) REQUIRES(lock_) { 133 | CHECK_LT(entered_, running_threads_); 134 | entered_++; 135 | if (entered_ < running_threads_) { 136 | // Wait for all threads to enter 137 | int phase_number_cp = phase_number_; 138 | auto cb = [this, phase_number_cp]() { 139 | return this->phase_number_ > phase_number_cp || 140 | entered_ == running_threads_; // A thread has aborted in error 141 | }; 142 | phase_condition_.wait(ml.native_handle(), cb); 143 | if (phase_number_ > phase_number_cp) return false; 144 | // else (running_threads_ == entered_) and we are the last thread. 145 | } 146 | // Last thread has reached the barrier 147 | phase_number_++; 148 | entered_ = 0; 149 | return true; 150 | } 151 | }; 152 | 153 | } // end namespace benchmark 154 | 155 | #endif // BENCHMARK_MUTEX_H_ 156 | -------------------------------------------------------------------------------- /benchmark/src/re.h: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef BENCHMARK_RE_H_ 16 | #define BENCHMARK_RE_H_ 17 | 18 | #include "internal_macros.h" 19 | 20 | #if !defined(HAVE_STD_REGEX) && \ 21 | !defined(HAVE_GNU_POSIX_REGEX) && \ 22 | !defined(HAVE_POSIX_REGEX) 23 | // No explicit regex selection; detect based on builtin hints. 24 | #if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE) 25 | #define HAVE_POSIX_REGEX 1 26 | #elif __cplusplus >= 199711L 27 | #define HAVE_STD_REGEX 1 28 | #endif 29 | #endif 30 | 31 | // Prefer C regex libraries when compiling w/o exceptions so that we can 32 | // correctly report errors. 33 | #if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ 34 | defined(BENCHMARK_HAVE_STD_REGEX) && \ 35 | (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) 36 | #undef HAVE_STD_REGEX 37 | #endif 38 | 39 | #if defined(HAVE_STD_REGEX) 40 | #include 41 | #elif defined(HAVE_GNU_POSIX_REGEX) 42 | #include 43 | #elif defined(HAVE_POSIX_REGEX) 44 | #include 45 | #else 46 | #error No regular expression backend was found! 47 | #endif 48 | #include 49 | 50 | #include "check.h" 51 | 52 | namespace benchmark { 53 | 54 | // A wrapper around the POSIX regular expression API that provides automatic 55 | // cleanup 56 | class Regex { 57 | public: 58 | Regex() : init_(false) {} 59 | 60 | ~Regex(); 61 | 62 | // Compile a regular expression matcher from spec. Returns true on success. 63 | // 64 | // On failure (and if error is not nullptr), error is populated with a human 65 | // readable error message if an error occurs. 66 | bool Init(const std::string& spec, std::string* error); 67 | 68 | // Returns whether str matches the compiled regular expression. 69 | bool Match(const std::string& str); 70 | 71 | private: 72 | bool init_; 73 | // Underlying regular expression object 74 | #if defined(HAVE_STD_REGEX) 75 | std::regex re_; 76 | #elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) 77 | regex_t re_; 78 | #else 79 | #error No regular expression backend implementation available 80 | #endif 81 | }; 82 | 83 | #if defined(HAVE_STD_REGEX) 84 | 85 | inline bool Regex::Init(const std::string& spec, std::string* error) { 86 | #ifdef BENCHMARK_HAS_NO_EXCEPTIONS 87 | ((void)error); // suppress unused warning 88 | #else 89 | try { 90 | #endif 91 | re_ = std::regex(spec, std::regex_constants::extended); 92 | init_ = true; 93 | #ifndef BENCHMARK_HAS_NO_EXCEPTIONS 94 | } catch (const std::regex_error& e) { 95 | if (error) { 96 | *error = e.what(); 97 | } 98 | } 99 | #endif 100 | return init_; 101 | } 102 | 103 | inline Regex::~Regex() {} 104 | 105 | inline bool Regex::Match(const std::string& str) { 106 | if (!init_) { 107 | return false; 108 | } 109 | return std::regex_search(str, re_); 110 | } 111 | 112 | #else 113 | inline bool Regex::Init(const std::string& spec, std::string* error) { 114 | int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); 115 | if (ec != 0) { 116 | if (error) { 117 | size_t needed = regerror(ec, &re_, nullptr, 0); 118 | char* errbuf = new char[needed]; 119 | regerror(ec, &re_, errbuf, needed); 120 | 121 | // regerror returns the number of bytes necessary to null terminate 122 | // the string, so we move that when assigning to error. 123 | CHECK_NE(needed, 0); 124 | error->assign(errbuf, needed - 1); 125 | 126 | delete[] errbuf; 127 | } 128 | 129 | return false; 130 | } 131 | 132 | init_ = true; 133 | return true; 134 | } 135 | 136 | inline Regex::~Regex() { 137 | if (init_) { 138 | regfree(&re_); 139 | } 140 | } 141 | 142 | inline bool Regex::Match(const std::string& str) { 143 | if (!init_) { 144 | return false; 145 | } 146 | return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; 147 | } 148 | #endif 149 | 150 | } // end namespace benchmark 151 | 152 | #endif // BENCHMARK_RE_H_ 153 | -------------------------------------------------------------------------------- /benchmark/src/reporter.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "benchmark/benchmark.h" 16 | #include "timers.h" 17 | 18 | #include 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | #include "check.h" 25 | 26 | namespace benchmark { 27 | 28 | BenchmarkReporter::BenchmarkReporter() 29 | : output_stream_(&std::cout), error_stream_(&std::cerr) {} 30 | 31 | BenchmarkReporter::~BenchmarkReporter() {} 32 | 33 | void BenchmarkReporter::PrintBasicContext(std::ostream *out, 34 | Context const &context) { 35 | CHECK(out) << "cannot be null"; 36 | auto &Out = *out; 37 | 38 | Out << LocalDateTimeString() << "\n"; 39 | 40 | if (context.executable_name) 41 | Out << "Running " << context.executable_name << "\n"; 42 | 43 | const CPUInfo &info = context.cpu_info; 44 | Out << "Run on (" << info.num_cpus << " X " 45 | << (info.cycles_per_second / 1000000.0) << " MHz CPU " 46 | << ((info.num_cpus > 1) ? "s" : "") << ")\n"; 47 | if (info.caches.size() != 0) { 48 | Out << "CPU Caches:\n"; 49 | for (auto &CInfo : info.caches) { 50 | Out << " L" << CInfo.level << " " << CInfo.type << " " 51 | << (CInfo.size / 1000) << "K"; 52 | if (CInfo.num_sharing != 0) 53 | Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; 54 | Out << "\n"; 55 | } 56 | } 57 | 58 | if (info.scaling_enabled) { 59 | Out << "***WARNING*** CPU scaling is enabled, the benchmark " 60 | "real time measurements may be noisy and will incur extra " 61 | "overhead.\n"; 62 | } 63 | 64 | #ifndef NDEBUG 65 | Out << "***WARNING*** Library was built as DEBUG. Timings may be " 66 | "affected.\n"; 67 | #endif 68 | } 69 | 70 | // No initializer because it's already initialized to NULL. 71 | const char* BenchmarkReporter::Context::executable_name; 72 | 73 | BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} 74 | 75 | double BenchmarkReporter::Run::GetAdjustedRealTime() const { 76 | double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); 77 | if (iterations != 0) new_time /= static_cast(iterations); 78 | return new_time; 79 | } 80 | 81 | double BenchmarkReporter::Run::GetAdjustedCPUTime() const { 82 | double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); 83 | if (iterations != 0) new_time /= static_cast(iterations); 84 | return new_time; 85 | } 86 | 87 | } // end namespace benchmark 88 | -------------------------------------------------------------------------------- /benchmark/src/sleep.cc: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #include "sleep.h" 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include "internal_macros.h" 22 | 23 | #ifdef BENCHMARK_OS_WINDOWS 24 | #include 25 | #endif 26 | 27 | namespace benchmark { 28 | #ifdef BENCHMARK_OS_WINDOWS 29 | // Window's Sleep takes milliseconds argument. 30 | void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } 31 | void SleepForSeconds(double seconds) { 32 | SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); 33 | } 34 | #else // BENCHMARK_OS_WINDOWS 35 | void SleepForMicroseconds(int microseconds) { 36 | struct timespec sleep_time; 37 | sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; 38 | sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; 39 | while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) 40 | ; // Ignore signals and wait for the full interval to elapse. 41 | } 42 | 43 | void SleepForMilliseconds(int milliseconds) { 44 | SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); 45 | } 46 | 47 | void SleepForSeconds(double seconds) { 48 | SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); 49 | } 50 | #endif // BENCHMARK_OS_WINDOWS 51 | } // end namespace benchmark 52 | -------------------------------------------------------------------------------- /benchmark/src/sleep.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_SLEEP_H_ 2 | #define BENCHMARK_SLEEP_H_ 3 | 4 | namespace benchmark { 5 | const int kNumMillisPerSecond = 1000; 6 | const int kNumMicrosPerMilli = 1000; 7 | const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; 8 | const int kNumNanosPerMicro = 1000; 9 | const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; 10 | 11 | void SleepForMilliseconds(int milliseconds); 12 | void SleepForSeconds(double seconds); 13 | } // end namespace benchmark 14 | 15 | #endif // BENCHMARK_SLEEP_H_ 16 | -------------------------------------------------------------------------------- /benchmark/src/statistics.h: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Ismael Jimenez Martinez. All rights reserved. 2 | // Copyright 2017 Roman Lebedev. All rights reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #ifndef STATISTICS_H_ 17 | #define STATISTICS_H_ 18 | 19 | #include 20 | 21 | #include "benchmark/benchmark.h" 22 | 23 | namespace benchmark { 24 | 25 | // Return a vector containing the mean, median and standard devation information 26 | // (and any user-specified info) for the specified list of reports. If 'reports' 27 | // contains less than two non-errored runs an empty vector is returned 28 | std::vector ComputeStats( 29 | const std::vector& reports); 30 | 31 | double StatisticsMean(const std::vector& v); 32 | double StatisticsMedian(const std::vector& v); 33 | double StatisticsStdDev(const std::vector& v); 34 | 35 | } // end namespace benchmark 36 | 37 | #endif // STATISTICS_H_ 38 | -------------------------------------------------------------------------------- /benchmark/src/string_util.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_STRING_UTIL_H_ 2 | #define BENCHMARK_STRING_UTIL_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include "internal_macros.h" 8 | 9 | namespace benchmark { 10 | 11 | void AppendHumanReadable(int n, std::string* str); 12 | 13 | std::string HumanReadableNumber(double n, double one_k = 1024.0); 14 | 15 | std::string StrFormat(const char* format, ...); 16 | 17 | inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { 18 | return out; 19 | } 20 | 21 | template 22 | inline std::ostream& StrCatImp(std::ostream& out, First&& f, 23 | Rest&&... rest) { 24 | out << std::forward(f); 25 | return StrCatImp(out, std::forward(rest)...); 26 | } 27 | 28 | template 29 | inline std::string StrCat(Args&&... args) { 30 | std::ostringstream ss; 31 | StrCatImp(ss, std::forward(args)...); 32 | return ss.str(); 33 | } 34 | 35 | void ReplaceAll(std::string* str, const std::string& from, 36 | const std::string& to); 37 | 38 | } // end namespace benchmark 39 | 40 | #endif // BENCHMARK_STRING_UTIL_H_ 41 | -------------------------------------------------------------------------------- /benchmark/src/thread_manager.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_THREAD_MANAGER_H 2 | #define BENCHMARK_THREAD_MANAGER_H 3 | 4 | #include 5 | 6 | #include "benchmark/benchmark.h" 7 | #include "mutex.h" 8 | 9 | namespace benchmark { 10 | namespace internal { 11 | 12 | class ThreadManager { 13 | public: 14 | ThreadManager(int num_threads) 15 | : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} 16 | 17 | Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { 18 | return benchmark_mutex_; 19 | } 20 | 21 | bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { 22 | return start_stop_barrier_.wait(); 23 | } 24 | 25 | void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { 26 | start_stop_barrier_.removeThread(); 27 | if (--alive_threads_ == 0) { 28 | MutexLock lock(end_cond_mutex_); 29 | end_condition_.notify_all(); 30 | } 31 | } 32 | 33 | void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { 34 | MutexLock lock(end_cond_mutex_); 35 | end_condition_.wait(lock.native_handle(), 36 | [this]() { return alive_threads_ == 0; }); 37 | } 38 | 39 | public: 40 | struct Result { 41 | int64_t iterations = 0; 42 | double real_time_used = 0; 43 | double cpu_time_used = 0; 44 | double manual_time_used = 0; 45 | int64_t bytes_processed = 0; 46 | int64_t items_processed = 0; 47 | int64_t complexity_n = 0; 48 | std::string report_label_; 49 | std::string error_message_; 50 | bool has_error_ = false; 51 | UserCounters counters; 52 | }; 53 | GUARDED_BY(GetBenchmarkMutex()) Result results; 54 | 55 | private: 56 | mutable Mutex benchmark_mutex_; 57 | std::atomic alive_threads_; 58 | Barrier start_stop_barrier_; 59 | Mutex end_cond_mutex_; 60 | Condition end_condition_; 61 | }; 62 | 63 | } // namespace internal 64 | } // namespace benchmark 65 | 66 | #endif // BENCHMARK_THREAD_MANAGER_H 67 | -------------------------------------------------------------------------------- /benchmark/src/thread_timer.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_THREAD_TIMER_H 2 | #define BENCHMARK_THREAD_TIMER_H 3 | 4 | #include "check.h" 5 | #include "timers.h" 6 | 7 | namespace benchmark { 8 | namespace internal { 9 | 10 | class ThreadTimer { 11 | public: 12 | ThreadTimer() = default; 13 | 14 | // Called by each thread 15 | void StartTimer() { 16 | running_ = true; 17 | start_real_time_ = ChronoClockNow(); 18 | start_cpu_time_ = ThreadCPUUsage(); 19 | } 20 | 21 | // Called by each thread 22 | void StopTimer() { 23 | CHECK(running_); 24 | running_ = false; 25 | real_time_used_ += ChronoClockNow() - start_real_time_; 26 | // Floating point error can result in the subtraction producing a negative 27 | // time. Guard against that. 28 | cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); 29 | } 30 | 31 | // Called by each thread 32 | void SetIterationTime(double seconds) { manual_time_used_ += seconds; } 33 | 34 | bool running() const { return running_; } 35 | 36 | // REQUIRES: timer is not running 37 | double real_time_used() { 38 | CHECK(!running_); 39 | return real_time_used_; 40 | } 41 | 42 | // REQUIRES: timer is not running 43 | double cpu_time_used() { 44 | CHECK(!running_); 45 | return cpu_time_used_; 46 | } 47 | 48 | // REQUIRES: timer is not running 49 | double manual_time_used() { 50 | CHECK(!running_); 51 | return manual_time_used_; 52 | } 53 | 54 | private: 55 | bool running_ = false; // Is the timer running 56 | double start_real_time_ = 0; // If running_ 57 | double start_cpu_time_ = 0; // If running_ 58 | 59 | // Accumulated time so far (does not contain current slice if running_) 60 | double real_time_used_ = 0; 61 | double cpu_time_used_ = 0; 62 | // Manually set iteration time. User sets this with SetIterationTime(seconds). 63 | double manual_time_used_ = 0; 64 | }; 65 | 66 | } // namespace internal 67 | } // namespace benchmark 68 | 69 | #endif // BENCHMARK_THREAD_TIMER_H 70 | -------------------------------------------------------------------------------- /benchmark/src/timers.h: -------------------------------------------------------------------------------- 1 | #ifndef BENCHMARK_TIMERS_H 2 | #define BENCHMARK_TIMERS_H 3 | 4 | #include 5 | #include 6 | 7 | namespace benchmark { 8 | 9 | // Return the CPU usage of the current process 10 | double ProcessCPUUsage(); 11 | 12 | // Return the CPU usage of the children of the current process 13 | double ChildrenCPUUsage(); 14 | 15 | // Return the CPU usage of the current thread 16 | double ThreadCPUUsage(); 17 | 18 | #if defined(HAVE_STEADY_CLOCK) 19 | template 20 | struct ChooseSteadyClock { 21 | typedef std::chrono::high_resolution_clock type; 22 | }; 23 | 24 | template <> 25 | struct ChooseSteadyClock { 26 | typedef std::chrono::steady_clock type; 27 | }; 28 | #endif 29 | 30 | struct ChooseClockType { 31 | #if defined(HAVE_STEADY_CLOCK) 32 | typedef ChooseSteadyClock<>::type type; 33 | #else 34 | typedef std::chrono::high_resolution_clock type; 35 | #endif 36 | }; 37 | 38 | inline double ChronoClockNow() { 39 | typedef ChooseClockType::type ClockType; 40 | using FpSeconds = std::chrono::duration; 41 | return FpSeconds(ClockType::now().time_since_epoch()).count(); 42 | } 43 | 44 | std::string LocalDateTimeString(); 45 | 46 | } // end namespace benchmark 47 | 48 | #endif // BENCHMARK_TIMERS_H 49 | -------------------------------------------------------------------------------- /benchmark/test/AssemblyTests.cmake: -------------------------------------------------------------------------------- 1 | 2 | 3 | set(ASM_TEST_FLAGS "") 4 | check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) 5 | if (BENCHMARK_HAS_O3_FLAG) 6 | list(APPEND ASM_TEST_FLAGS -O3) 7 | endif() 8 | 9 | check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG) 10 | if (BENCHMARK_HAS_G0_FLAG) 11 | list(APPEND ASM_TEST_FLAGS -g0) 12 | endif() 13 | 14 | check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) 15 | if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG) 16 | list(APPEND ASM_TEST_FLAGS -fno-stack-protector) 17 | endif() 18 | 19 | split_list(ASM_TEST_FLAGS) 20 | string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) 21 | 22 | macro(add_filecheck_test name) 23 | cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) 24 | add_library(${name} OBJECT ${name}.cc) 25 | set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") 26 | set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") 27 | add_custom_target(copy_${name} ALL 28 | COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py 29 | $ 30 | ${ASM_OUTPUT_FILE} 31 | BYPRODUCTS ${ASM_OUTPUT_FILE}) 32 | add_dependencies(copy_${name} ${name}) 33 | if (NOT ARG_CHECK_PREFIXES) 34 | set(ARG_CHECK_PREFIXES "CHECK") 35 | endif() 36 | foreach(prefix ${ARG_CHECK_PREFIXES}) 37 | add_test(NAME run_${name}_${prefix} 38 | COMMAND 39 | ${LLVM_FILECHECK_EXE} ${name}.cc 40 | --input-file=${ASM_OUTPUT_FILE} 41 | --check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER} 42 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) 43 | endforeach() 44 | endmacro() 45 | 46 | -------------------------------------------------------------------------------- /benchmark/test/BUILD: -------------------------------------------------------------------------------- 1 | TEST_COPTS = [ 2 | "-pedantic", 3 | "-pedantic-errors", 4 | "-std=c++11", 5 | "-Wall", 6 | "-Wextra", 7 | "-Wshadow", 8 | # "-Wshorten-64-to-32", 9 | "-Wfloat-equal", 10 | "-fstrict-aliasing", 11 | ] 12 | 13 | PER_SRC_COPTS = ({ 14 | "cxx03_test.cc": ["-std=c++03"], 15 | # Some of the issues with DoNotOptimize only occur when optimization is enabled 16 | "donotoptimize_test.cc": ["-O3"], 17 | }) 18 | 19 | 20 | TEST_ARGS = ["--benchmark_min_time=0.01"] 21 | 22 | PER_SRC_TEST_ARGS = ({ 23 | "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], 24 | }) 25 | 26 | cc_library( 27 | name = "output_test_helper", 28 | testonly = 1, 29 | srcs = ["output_test_helper.cc"], 30 | hdrs = ["output_test.h"], 31 | copts = TEST_COPTS, 32 | deps = [ 33 | "//:benchmark", 34 | "//:benchmark_internal_headers", 35 | ], 36 | ) 37 | 38 | [ 39 | cc_test( 40 | name = test_src[:-len(".cc")], 41 | size = "small", 42 | srcs = [test_src], 43 | args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), 44 | copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), 45 | deps = [ 46 | ":output_test_helper", 47 | "//:benchmark", 48 | "//:benchmark_internal_headers", 49 | "@com_google_googletest//:gtest", 50 | ] + ( 51 | ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] 52 | ), 53 | # FIXME: Add support for assembly tests to bazel. 54 | # See Issue #556 55 | # https://github.com/google/benchmark/issues/556 56 | ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc"]) 57 | ] 58 | -------------------------------------------------------------------------------- /benchmark/test/basic_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) 5 | 6 | void BM_empty(benchmark::State& state) { 7 | for (auto _ : state) { 8 | benchmark::DoNotOptimize(state.iterations()); 9 | } 10 | } 11 | BENCHMARK(BM_empty); 12 | BENCHMARK(BM_empty)->ThreadPerCpu(); 13 | 14 | void BM_spin_empty(benchmark::State& state) { 15 | for (auto _ : state) { 16 | for (int x = 0; x < state.range(0); ++x) { 17 | benchmark::DoNotOptimize(x); 18 | } 19 | } 20 | } 21 | BASIC_BENCHMARK_TEST(BM_spin_empty); 22 | BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); 23 | 24 | void BM_spin_pause_before(benchmark::State& state) { 25 | for (int i = 0; i < state.range(0); ++i) { 26 | benchmark::DoNotOptimize(i); 27 | } 28 | for (auto _ : state) { 29 | for (int i = 0; i < state.range(0); ++i) { 30 | benchmark::DoNotOptimize(i); 31 | } 32 | } 33 | } 34 | BASIC_BENCHMARK_TEST(BM_spin_pause_before); 35 | BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); 36 | 37 | void BM_spin_pause_during(benchmark::State& state) { 38 | for (auto _ : state) { 39 | state.PauseTiming(); 40 | for (int i = 0; i < state.range(0); ++i) { 41 | benchmark::DoNotOptimize(i); 42 | } 43 | state.ResumeTiming(); 44 | for (int i = 0; i < state.range(0); ++i) { 45 | benchmark::DoNotOptimize(i); 46 | } 47 | } 48 | } 49 | BASIC_BENCHMARK_TEST(BM_spin_pause_during); 50 | BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); 51 | 52 | void BM_pause_during(benchmark::State& state) { 53 | for (auto _ : state) { 54 | state.PauseTiming(); 55 | state.ResumeTiming(); 56 | } 57 | } 58 | BENCHMARK(BM_pause_during); 59 | BENCHMARK(BM_pause_during)->ThreadPerCpu(); 60 | BENCHMARK(BM_pause_during)->UseRealTime(); 61 | BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); 62 | 63 | void BM_spin_pause_after(benchmark::State& state) { 64 | for (auto _ : state) { 65 | for (int i = 0; i < state.range(0); ++i) { 66 | benchmark::DoNotOptimize(i); 67 | } 68 | } 69 | for (int i = 0; i < state.range(0); ++i) { 70 | benchmark::DoNotOptimize(i); 71 | } 72 | } 73 | BASIC_BENCHMARK_TEST(BM_spin_pause_after); 74 | BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); 75 | 76 | void BM_spin_pause_before_and_after(benchmark::State& state) { 77 | for (int i = 0; i < state.range(0); ++i) { 78 | benchmark::DoNotOptimize(i); 79 | } 80 | for (auto _ : state) { 81 | for (int i = 0; i < state.range(0); ++i) { 82 | benchmark::DoNotOptimize(i); 83 | } 84 | } 85 | for (int i = 0; i < state.range(0); ++i) { 86 | benchmark::DoNotOptimize(i); 87 | } 88 | } 89 | BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); 90 | BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); 91 | 92 | void BM_empty_stop_start(benchmark::State& state) { 93 | for (auto _ : state) { 94 | } 95 | } 96 | BENCHMARK(BM_empty_stop_start); 97 | BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); 98 | 99 | 100 | void BM_KeepRunning(benchmark::State& state) { 101 | size_t iter_count = 0; 102 | while (state.KeepRunning()) { 103 | ++iter_count; 104 | } 105 | assert(iter_count == state.iterations()); 106 | } 107 | BENCHMARK(BM_KeepRunning); 108 | 109 | void BM_KeepRunningBatch(benchmark::State& state) { 110 | // Choose a prime batch size to avoid evenly dividing max_iterations. 111 | const size_t batch_size = 101; 112 | size_t iter_count = 0; 113 | while (state.KeepRunningBatch(batch_size)) { 114 | iter_count += batch_size; 115 | } 116 | assert(state.iterations() == iter_count); 117 | } 118 | BENCHMARK(BM_KeepRunningBatch); 119 | 120 | void BM_RangedFor(benchmark::State& state) { 121 | size_t iter_count = 0; 122 | for (auto _ : state) { 123 | ++iter_count; 124 | } 125 | assert(iter_count == state.max_iterations); 126 | } 127 | BENCHMARK(BM_RangedFor); 128 | 129 | // Ensure that StateIterator provides all the necessary typedefs required to 130 | // instantiate std::iterator_traits. 131 | static_assert(std::is_same< 132 | typename std::iterator_traits::value_type, 133 | typename benchmark::State::StateIterator::value_type>::value, ""); 134 | 135 | BENCHMARK_MAIN(); 136 | -------------------------------------------------------------------------------- /benchmark/test/benchmark_gtest.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "../src/benchmark_register.h" 4 | #include "gmock/gmock.h" 5 | #include "gtest/gtest.h" 6 | 7 | namespace { 8 | 9 | TEST(AddRangeTest, Simple) { 10 | std::vector dst; 11 | AddRange(&dst, 1, 2, 2); 12 | EXPECT_THAT(dst, testing::ElementsAre(1, 2)); 13 | } 14 | 15 | TEST(AddRangeTest, Simple64) { 16 | std::vector dst; 17 | AddRange(&dst, static_cast(1), static_cast(2), 2); 18 | EXPECT_THAT(dst, testing::ElementsAre(1, 2)); 19 | } 20 | 21 | TEST(AddRangeTest, Advanced) { 22 | std::vector dst; 23 | AddRange(&dst, 5, 15, 2); 24 | EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); 25 | } 26 | 27 | TEST(AddRangeTest, Advanced64) { 28 | std::vector dst; 29 | AddRange(&dst, static_cast(5), static_cast(15), 2); 30 | EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); 31 | } 32 | 33 | } // end namespace 34 | -------------------------------------------------------------------------------- /benchmark/test/clobber_memory_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | extern "C" { 8 | 9 | extern int ExternInt; 10 | extern int ExternInt2; 11 | extern int ExternInt3; 12 | 13 | } 14 | 15 | // CHECK-LABEL: test_basic: 16 | extern "C" void test_basic() { 17 | int x; 18 | benchmark::DoNotOptimize(&x); 19 | x = 101; 20 | benchmark::ClobberMemory(); 21 | // CHECK: leaq [[DEST:[^,]+]], %rax 22 | // CHECK: movl $101, [[DEST]] 23 | // CHECK: ret 24 | } 25 | 26 | // CHECK-LABEL: test_redundant_store: 27 | extern "C" void test_redundant_store() { 28 | ExternInt = 3; 29 | benchmark::ClobberMemory(); 30 | ExternInt = 51; 31 | // CHECK-DAG: ExternInt 32 | // CHECK-DAG: movl $3 33 | // CHECK: movl $51 34 | } 35 | 36 | // CHECK-LABEL: test_redundant_read: 37 | extern "C" void test_redundant_read() { 38 | int x; 39 | benchmark::DoNotOptimize(&x); 40 | x = ExternInt; 41 | benchmark::ClobberMemory(); 42 | x = ExternInt2; 43 | // CHECK: leaq [[DEST:[^,]+]], %rax 44 | // CHECK: ExternInt(%rip) 45 | // CHECK: movl %eax, [[DEST]] 46 | // CHECK-NOT: ExternInt2 47 | // CHECK: ret 48 | } 49 | 50 | // CHECK-LABEL: test_redundant_read2: 51 | extern "C" void test_redundant_read2() { 52 | int x; 53 | benchmark::DoNotOptimize(&x); 54 | x = ExternInt; 55 | benchmark::ClobberMemory(); 56 | x = ExternInt2; 57 | benchmark::ClobberMemory(); 58 | // CHECK: leaq [[DEST:[^,]+]], %rax 59 | // CHECK: ExternInt(%rip) 60 | // CHECK: movl %eax, [[DEST]] 61 | // CHECK: ExternInt2(%rip) 62 | // CHECK: movl %eax, [[DEST]] 63 | // CHECK: ret 64 | } 65 | -------------------------------------------------------------------------------- /benchmark/test/cxx03_test.cc: -------------------------------------------------------------------------------- 1 | #undef NDEBUG 2 | #include 3 | #include 4 | 5 | #include "benchmark/benchmark.h" 6 | 7 | #if __cplusplus >= 201103L 8 | #error C++11 or greater detected. Should be C++03. 9 | #endif 10 | 11 | #ifdef BENCHMARK_HAS_CXX11 12 | #error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. 13 | #endif 14 | 15 | void BM_empty(benchmark::State& state) { 16 | while (state.KeepRunning()) { 17 | volatile std::size_t x = state.iterations(); 18 | ((void)x); 19 | } 20 | } 21 | BENCHMARK(BM_empty); 22 | 23 | // The new C++11 interface for args/ranges requires initializer list support. 24 | // Therefore we provide the old interface to support C++03. 25 | void BM_old_arg_range_interface(benchmark::State& state) { 26 | assert((state.range(0) == 1 && state.range(1) == 2) || 27 | (state.range(0) == 5 && state.range(1) == 6)); 28 | while (state.KeepRunning()) { 29 | } 30 | } 31 | BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); 32 | 33 | template 34 | void BM_template2(benchmark::State& state) { 35 | BM_empty(state); 36 | } 37 | BENCHMARK_TEMPLATE2(BM_template2, int, long); 38 | 39 | template 40 | void BM_template1(benchmark::State& state) { 41 | BM_empty(state); 42 | } 43 | BENCHMARK_TEMPLATE(BM_template1, long); 44 | BENCHMARK_TEMPLATE1(BM_template1, int); 45 | 46 | template 47 | struct BM_Fixture : public ::benchmark::Fixture { 48 | }; 49 | 50 | BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { 51 | BM_empty(state); 52 | } 53 | BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { 54 | BM_empty(state); 55 | } 56 | 57 | void BM_counters(benchmark::State& state) { 58 | BM_empty(state); 59 | state.counters["Foo"] = 2; 60 | } 61 | BENCHMARK(BM_counters); 62 | 63 | BENCHMARK_MAIN(); 64 | -------------------------------------------------------------------------------- /benchmark/test/diagnostics_test.cc: -------------------------------------------------------------------------------- 1 | // Testing: 2 | // State::PauseTiming() 3 | // State::ResumeTiming() 4 | // Test that CHECK's within these function diagnose when they are called 5 | // outside of the KeepRunning() loop. 6 | // 7 | // NOTE: Users should NOT include or use src/check.h. This is only done in 8 | // order to test library internals. 9 | 10 | #include 11 | #include 12 | 13 | #include "../src/check.h" 14 | #include "benchmark/benchmark.h" 15 | 16 | #if defined(__GNUC__) && !defined(__EXCEPTIONS) 17 | #define TEST_HAS_NO_EXCEPTIONS 18 | #endif 19 | 20 | void TestHandler() { 21 | #ifndef TEST_HAS_NO_EXCEPTIONS 22 | throw std::logic_error(""); 23 | #else 24 | std::abort(); 25 | #endif 26 | } 27 | 28 | void try_invalid_pause_resume(benchmark::State& state) { 29 | #if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) 30 | try { 31 | state.PauseTiming(); 32 | std::abort(); 33 | } catch (std::logic_error const&) { 34 | } 35 | try { 36 | state.ResumeTiming(); 37 | std::abort(); 38 | } catch (std::logic_error const&) { 39 | } 40 | #else 41 | (void)state; // avoid unused warning 42 | #endif 43 | } 44 | 45 | void BM_diagnostic_test(benchmark::State& state) { 46 | static bool called_once = false; 47 | 48 | if (called_once == false) try_invalid_pause_resume(state); 49 | 50 | for (auto _ : state) { 51 | benchmark::DoNotOptimize(state.iterations()); 52 | } 53 | 54 | if (called_once == false) try_invalid_pause_resume(state); 55 | 56 | called_once = true; 57 | } 58 | BENCHMARK(BM_diagnostic_test); 59 | 60 | 61 | void BM_diagnostic_test_keep_running(benchmark::State& state) { 62 | static bool called_once = false; 63 | 64 | if (called_once == false) try_invalid_pause_resume(state); 65 | 66 | while(state.KeepRunning()) { 67 | benchmark::DoNotOptimize(state.iterations()); 68 | } 69 | 70 | if (called_once == false) try_invalid_pause_resume(state); 71 | 72 | called_once = true; 73 | } 74 | BENCHMARK(BM_diagnostic_test_keep_running); 75 | 76 | int main(int argc, char* argv[]) { 77 | benchmark::internal::GetAbortHandler() = &TestHandler; 78 | benchmark::Initialize(&argc, argv); 79 | benchmark::RunSpecifiedBenchmarks(); 80 | } 81 | -------------------------------------------------------------------------------- /benchmark/test/donotoptimize_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | extern "C" { 8 | 9 | extern int ExternInt; 10 | extern int ExternInt2; 11 | extern int ExternInt3; 12 | 13 | inline int Add42(int x) { return x + 42; } 14 | 15 | struct NotTriviallyCopyable { 16 | NotTriviallyCopyable(); 17 | explicit NotTriviallyCopyable(int x) : value(x) {} 18 | NotTriviallyCopyable(NotTriviallyCopyable const&); 19 | int value; 20 | }; 21 | 22 | struct Large { 23 | int value; 24 | int data[2]; 25 | }; 26 | 27 | } 28 | // CHECK-LABEL: test_with_rvalue: 29 | extern "C" void test_with_rvalue() { 30 | benchmark::DoNotOptimize(Add42(0)); 31 | // CHECK: movl $42, %eax 32 | // CHECK: ret 33 | } 34 | 35 | // CHECK-LABEL: test_with_large_rvalue: 36 | extern "C" void test_with_large_rvalue() { 37 | benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); 38 | // CHECK: ExternInt(%rip) 39 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] 40 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 41 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 42 | // CHECK: ret 43 | } 44 | 45 | // CHECK-LABEL: test_with_non_trivial_rvalue: 46 | extern "C" void test_with_non_trivial_rvalue() { 47 | benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); 48 | // CHECK: mov{{l|q}} ExternInt(%rip) 49 | // CHECK: ret 50 | } 51 | 52 | // CHECK-LABEL: test_with_lvalue: 53 | extern "C" void test_with_lvalue() { 54 | int x = 101; 55 | benchmark::DoNotOptimize(x); 56 | // CHECK-GNU: movl $101, %eax 57 | // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) 58 | // CHECK: ret 59 | } 60 | 61 | // CHECK-LABEL: test_with_large_lvalue: 62 | extern "C" void test_with_large_lvalue() { 63 | Large L{ExternInt, {ExternInt, ExternInt}}; 64 | benchmark::DoNotOptimize(L); 65 | // CHECK: ExternInt(%rip) 66 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 67 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 68 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 69 | // CHECK: ret 70 | } 71 | 72 | // CHECK-LABEL: test_with_non_trivial_lvalue: 73 | extern "C" void test_with_non_trivial_lvalue() { 74 | NotTriviallyCopyable NTC(ExternInt); 75 | benchmark::DoNotOptimize(NTC); 76 | // CHECK: ExternInt(%rip) 77 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 78 | // CHECK: ret 79 | } 80 | 81 | // CHECK-LABEL: test_with_const_lvalue: 82 | extern "C" void test_with_const_lvalue() { 83 | const int x = 123; 84 | benchmark::DoNotOptimize(x); 85 | // CHECK: movl $123, %eax 86 | // CHECK: ret 87 | } 88 | 89 | // CHECK-LABEL: test_with_large_const_lvalue: 90 | extern "C" void test_with_large_const_lvalue() { 91 | const Large L{ExternInt, {ExternInt, ExternInt}}; 92 | benchmark::DoNotOptimize(L); 93 | // CHECK: ExternInt(%rip) 94 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 95 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 96 | // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) 97 | // CHECK: ret 98 | } 99 | 100 | // CHECK-LABEL: test_with_non_trivial_const_lvalue: 101 | extern "C" void test_with_non_trivial_const_lvalue() { 102 | const NotTriviallyCopyable Obj(ExternInt); 103 | benchmark::DoNotOptimize(Obj); 104 | // CHECK: mov{{q|l}} ExternInt(%rip) 105 | // CHECK: ret 106 | } 107 | 108 | // CHECK-LABEL: test_div_by_two: 109 | extern "C" int test_div_by_two(int input) { 110 | int divisor = 2; 111 | benchmark::DoNotOptimize(divisor); 112 | return input / divisor; 113 | // CHECK: movl $2, [[DEST:.*]] 114 | // CHECK: idivl [[DEST]] 115 | // CHECK: ret 116 | } 117 | 118 | // CHECK-LABEL: test_inc_integer: 119 | extern "C" int test_inc_integer() { 120 | int x = 0; 121 | for (int i=0; i < 5; ++i) 122 | benchmark::DoNotOptimize(++x); 123 | // CHECK: movl $1, [[DEST:.*]] 124 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 125 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 126 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 127 | // CHECK: {{(addl \$1,|incl)}} [[DEST]] 128 | // CHECK-CLANG: movl [[DEST]], %eax 129 | // CHECK: ret 130 | return x; 131 | } 132 | 133 | // CHECK-LABEL: test_pointer_rvalue 134 | extern "C" void test_pointer_rvalue() { 135 | // CHECK: movl $42, [[DEST:.*]] 136 | // CHECK: leaq [[DEST]], %rax 137 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 138 | // CHECK: ret 139 | int x = 42; 140 | benchmark::DoNotOptimize(&x); 141 | } 142 | 143 | // CHECK-LABEL: test_pointer_const_lvalue: 144 | extern "C" void test_pointer_const_lvalue() { 145 | // CHECK: movl $42, [[DEST:.*]] 146 | // CHECK: leaq [[DEST]], %rax 147 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) 148 | // CHECK: ret 149 | int x = 42; 150 | int * const xp = &x; 151 | benchmark::DoNotOptimize(xp); 152 | } 153 | 154 | // CHECK-LABEL: test_pointer_lvalue: 155 | extern "C" void test_pointer_lvalue() { 156 | // CHECK: movl $42, [[DEST:.*]] 157 | // CHECK: leaq [[DEST]], %rax 158 | // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) 159 | // CHECK: ret 160 | int x = 42; 161 | int *xp = &x; 162 | benchmark::DoNotOptimize(xp); 163 | } 164 | -------------------------------------------------------------------------------- /benchmark/test/donotoptimize_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | 5 | namespace { 6 | #if defined(__GNUC__) 7 | std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); 8 | #endif 9 | std::uint64_t double_up(const std::uint64_t x) { return x * 2; } 10 | } 11 | 12 | // Using DoNotOptimize on types like BitRef seem to cause a lot of problems 13 | // with the inline assembly on both GCC and Clang. 14 | struct BitRef { 15 | int index; 16 | unsigned char &byte; 17 | 18 | public: 19 | static BitRef Make() { 20 | static unsigned char arr[2] = {}; 21 | BitRef b(1, arr[0]); 22 | return b; 23 | } 24 | private: 25 | BitRef(int i, unsigned char& b) : index(i), byte(b) {} 26 | }; 27 | 28 | int main(int, char*[]) { 29 | // this test verifies compilation of DoNotOptimize() for some types 30 | 31 | char buffer8[8] = ""; 32 | benchmark::DoNotOptimize(buffer8); 33 | 34 | char buffer20[20] = ""; 35 | benchmark::DoNotOptimize(buffer20); 36 | 37 | char buffer1024[1024] = ""; 38 | benchmark::DoNotOptimize(buffer1024); 39 | benchmark::DoNotOptimize(&buffer1024[0]); 40 | 41 | int x = 123; 42 | benchmark::DoNotOptimize(x); 43 | benchmark::DoNotOptimize(&x); 44 | benchmark::DoNotOptimize(x += 42); 45 | 46 | benchmark::DoNotOptimize(double_up(x)); 47 | 48 | // These tests are to e 49 | benchmark::DoNotOptimize(BitRef::Make()); 50 | BitRef lval = BitRef::Make(); 51 | benchmark::DoNotOptimize(lval); 52 | } 53 | -------------------------------------------------------------------------------- /benchmark/test/filter_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace { 14 | 15 | class TestReporter : public benchmark::ConsoleReporter { 16 | public: 17 | virtual bool ReportContext(const Context& context) { 18 | return ConsoleReporter::ReportContext(context); 19 | }; 20 | 21 | virtual void ReportRuns(const std::vector& report) { 22 | ++count_; 23 | ConsoleReporter::ReportRuns(report); 24 | }; 25 | 26 | TestReporter() : count_(0) {} 27 | 28 | virtual ~TestReporter() {} 29 | 30 | size_t GetCount() const { return count_; } 31 | 32 | private: 33 | mutable size_t count_; 34 | }; 35 | 36 | } // end namespace 37 | 38 | static void NoPrefix(benchmark::State& state) { 39 | for (auto _ : state) { 40 | } 41 | } 42 | BENCHMARK(NoPrefix); 43 | 44 | static void BM_Foo(benchmark::State& state) { 45 | for (auto _ : state) { 46 | } 47 | } 48 | BENCHMARK(BM_Foo); 49 | 50 | static void BM_Bar(benchmark::State& state) { 51 | for (auto _ : state) { 52 | } 53 | } 54 | BENCHMARK(BM_Bar); 55 | 56 | static void BM_FooBar(benchmark::State& state) { 57 | for (auto _ : state) { 58 | } 59 | } 60 | BENCHMARK(BM_FooBar); 61 | 62 | static void BM_FooBa(benchmark::State& state) { 63 | for (auto _ : state) { 64 | } 65 | } 66 | BENCHMARK(BM_FooBa); 67 | 68 | int main(int argc, char **argv) { 69 | bool list_only = false; 70 | for (int i = 0; i < argc; ++i) 71 | list_only |= std::string(argv[i]).find("--benchmark_list_tests") != 72 | std::string::npos; 73 | 74 | benchmark::Initialize(&argc, argv); 75 | 76 | TestReporter test_reporter; 77 | const size_t returned_count = 78 | benchmark::RunSpecifiedBenchmarks(&test_reporter); 79 | 80 | if (argc == 2) { 81 | // Make sure we ran all of the tests 82 | std::stringstream ss(argv[1]); 83 | size_t expected_return; 84 | ss >> expected_return; 85 | 86 | if (returned_count != expected_return) { 87 | std::cerr << "ERROR: Expected " << expected_return 88 | << " tests to match the filter but returned_count = " 89 | << returned_count << std::endl; 90 | return -1; 91 | } 92 | 93 | const size_t expected_reports = list_only ? 0 : expected_return; 94 | const size_t reports_count = test_reporter.GetCount(); 95 | if (reports_count != expected_reports) { 96 | std::cerr << "ERROR: Expected " << expected_reports 97 | << " tests to be run but reported_count = " << reports_count 98 | << std::endl; 99 | return -1; 100 | } 101 | } 102 | 103 | return 0; 104 | } 105 | -------------------------------------------------------------------------------- /benchmark/test/fixture_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #include 5 | #include 6 | 7 | class MyFixture : public ::benchmark::Fixture { 8 | public: 9 | void SetUp(const ::benchmark::State& state) { 10 | if (state.thread_index == 0) { 11 | assert(data.get() == nullptr); 12 | data.reset(new int(42)); 13 | } 14 | } 15 | 16 | void TearDown(const ::benchmark::State& state) { 17 | if (state.thread_index == 0) { 18 | assert(data.get() != nullptr); 19 | data.reset(); 20 | } 21 | } 22 | 23 | ~MyFixture() { assert(data == nullptr); } 24 | 25 | std::unique_ptr data; 26 | }; 27 | 28 | BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { 29 | assert(data.get() != nullptr); 30 | assert(*data == 42); 31 | for (auto _ : st) { 32 | } 33 | } 34 | 35 | BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { 36 | if (st.thread_index == 0) { 37 | assert(data.get() != nullptr); 38 | assert(*data == 42); 39 | } 40 | for (auto _ : st) { 41 | assert(data.get() != nullptr); 42 | assert(*data == 42); 43 | } 44 | st.SetItemsProcessed(st.range(0)); 45 | } 46 | BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); 47 | BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); 48 | 49 | BENCHMARK_MAIN(); 50 | -------------------------------------------------------------------------------- /benchmark/test/map_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | 6 | namespace { 7 | 8 | std::map ConstructRandomMap(int size) { 9 | std::map m; 10 | for (int i = 0; i < size; ++i) { 11 | m.insert(std::make_pair(std::rand() % size, std::rand() % size)); 12 | } 13 | return m; 14 | } 15 | 16 | } // namespace 17 | 18 | // Basic version. 19 | static void BM_MapLookup(benchmark::State& state) { 20 | const int size = static_cast(state.range(0)); 21 | std::map m; 22 | for (auto _ : state) { 23 | state.PauseTiming(); 24 | m = ConstructRandomMap(size); 25 | state.ResumeTiming(); 26 | for (int i = 0; i < size; ++i) { 27 | benchmark::DoNotOptimize(m.find(std::rand() % size)); 28 | } 29 | } 30 | state.SetItemsProcessed(state.iterations() * size); 31 | } 32 | BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); 33 | 34 | // Using fixtures. 35 | class MapFixture : public ::benchmark::Fixture { 36 | public: 37 | void SetUp(const ::benchmark::State& st) { 38 | m = ConstructRandomMap(static_cast(st.range(0))); 39 | } 40 | 41 | void TearDown(const ::benchmark::State&) { m.clear(); } 42 | 43 | std::map m; 44 | }; 45 | 46 | BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { 47 | const int size = static_cast(state.range(0)); 48 | for (auto _ : state) { 49 | for (int i = 0; i < size; ++i) { 50 | benchmark::DoNotOptimize(m.find(std::rand() % size)); 51 | } 52 | } 53 | state.SetItemsProcessed(state.iterations() * size); 54 | } 55 | BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); 56 | 57 | BENCHMARK_MAIN(); 58 | -------------------------------------------------------------------------------- /benchmark/test/multiple_ranges_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | class MultipleRangesFixture : public ::benchmark::Fixture { 9 | public: 10 | MultipleRangesFixture() 11 | : expectedValues({{1, 3, 5}, 12 | {1, 3, 8}, 13 | {1, 3, 15}, 14 | {2, 3, 5}, 15 | {2, 3, 8}, 16 | {2, 3, 15}, 17 | {1, 4, 5}, 18 | {1, 4, 8}, 19 | {1, 4, 15}, 20 | {2, 4, 5}, 21 | {2, 4, 8}, 22 | {2, 4, 15}, 23 | {1, 7, 5}, 24 | {1, 7, 8}, 25 | {1, 7, 15}, 26 | {2, 7, 5}, 27 | {2, 7, 8}, 28 | {2, 7, 15}, 29 | {7, 6, 3}}) {} 30 | 31 | void SetUp(const ::benchmark::State& state) { 32 | std::vector ranges = {state.range(0), state.range(1), 33 | state.range(2)}; 34 | 35 | assert(expectedValues.find(ranges) != expectedValues.end()); 36 | 37 | actualValues.insert(ranges); 38 | } 39 | 40 | // NOTE: This is not TearDown as we want to check after _all_ runs are 41 | // complete. 42 | virtual ~MultipleRangesFixture() { 43 | assert(actualValues.size() == expectedValues.size()); 44 | if (actualValues.size() != expectedValues.size()) { 45 | std::cout << "EXPECTED\n"; 46 | for (auto v : expectedValues) { 47 | std::cout << "{"; 48 | for (int64_t iv : v) { 49 | std::cout << iv << ", "; 50 | } 51 | std::cout << "}\n"; 52 | } 53 | std::cout << "ACTUAL\n"; 54 | for (auto v : actualValues) { 55 | std::cout << "{"; 56 | for (int64_t iv : v) { 57 | std::cout << iv << ", "; 58 | } 59 | std::cout << "}\n"; 60 | } 61 | } 62 | } 63 | 64 | std::set> expectedValues; 65 | std::set> actualValues; 66 | }; 67 | 68 | BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { 69 | for (auto _ : state) { 70 | int64_t product = state.range(0) * state.range(1) * state.range(2); 71 | for (int64_t x = 0; x < product; x++) { 72 | benchmark::DoNotOptimize(x); 73 | } 74 | } 75 | } 76 | 77 | BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) 78 | ->RangeMultiplier(2) 79 | ->Ranges({{1, 2}, {3, 7}, {5, 15}}) 80 | ->Args({7, 6, 3}); 81 | 82 | void BM_CheckDefaultArgument(benchmark::State& state) { 83 | // Test that the 'range()' without an argument is the same as 'range(0)'. 84 | assert(state.range() == state.range(0)); 85 | assert(state.range() != state.range(1)); 86 | for (auto _ : state) { 87 | } 88 | } 89 | BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); 90 | 91 | static void BM_MultipleRanges(benchmark::State& st) { 92 | for (auto _ : st) { 93 | } 94 | } 95 | BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); 96 | 97 | BENCHMARK_MAIN(); 98 | -------------------------------------------------------------------------------- /benchmark/test/options_test.cc: -------------------------------------------------------------------------------- 1 | #include "benchmark/benchmark.h" 2 | #include 3 | #include 4 | 5 | #if defined(NDEBUG) 6 | #undef NDEBUG 7 | #endif 8 | #include 9 | 10 | void BM_basic(benchmark::State& state) { 11 | for (auto _ : state) { 12 | } 13 | } 14 | 15 | void BM_basic_slow(benchmark::State& state) { 16 | std::chrono::milliseconds sleep_duration(state.range(0)); 17 | for (auto _ : state) { 18 | std::this_thread::sleep_for( 19 | std::chrono::duration_cast(sleep_duration)); 20 | } 21 | } 22 | 23 | BENCHMARK(BM_basic); 24 | BENCHMARK(BM_basic)->Arg(42); 25 | BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond); 26 | BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond); 27 | BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond); 28 | BENCHMARK(BM_basic)->Range(1, 8); 29 | BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8); 30 | BENCHMARK(BM_basic)->DenseRange(10, 15); 31 | BENCHMARK(BM_basic)->Args({42, 42}); 32 | BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); 33 | BENCHMARK(BM_basic)->MinTime(0.7); 34 | BENCHMARK(BM_basic)->UseRealTime(); 35 | BENCHMARK(BM_basic)->ThreadRange(2, 4); 36 | BENCHMARK(BM_basic)->ThreadPerCpu(); 37 | BENCHMARK(BM_basic)->Repetitions(3); 38 | 39 | void CustomArgs(benchmark::internal::Benchmark* b) { 40 | for (int i = 0; i < 10; ++i) { 41 | b->Arg(i); 42 | } 43 | } 44 | 45 | BENCHMARK(BM_basic)->Apply(CustomArgs); 46 | 47 | void BM_explicit_iteration_count(benchmark::State& state) { 48 | // Test that benchmarks specified with an explicit iteration count are 49 | // only run once. 50 | static bool invoked_before = false; 51 | assert(!invoked_before); 52 | invoked_before = true; 53 | 54 | // Test that the requested iteration count is respected. 55 | assert(state.max_iterations == 42); 56 | size_t actual_iterations = 0; 57 | for (auto _ : state) 58 | ++actual_iterations; 59 | assert(state.iterations() == state.max_iterations); 60 | assert(state.iterations() == 42); 61 | 62 | } 63 | BENCHMARK(BM_explicit_iteration_count)->Iterations(42); 64 | 65 | BENCHMARK_MAIN(); 66 | -------------------------------------------------------------------------------- /benchmark/test/state_assembly_test.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __clang__ 4 | #pragma clang diagnostic ignored "-Wreturn-type" 5 | #endif 6 | 7 | extern "C" { 8 | extern int ExternInt; 9 | benchmark::State& GetState(); 10 | void Fn(); 11 | } 12 | 13 | using benchmark::State; 14 | 15 | // CHECK-LABEL: test_for_auto_loop: 16 | extern "C" int test_for_auto_loop() { 17 | State& S = GetState(); 18 | int x = 42; 19 | // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv 20 | // CHECK-NEXT: testq %rbx, %rbx 21 | // CHECK-NEXT: je [[LOOP_END:.*]] 22 | 23 | for (auto _ : S) { 24 | // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: 25 | // CHECK-GNU-NEXT: subq $1, %rbx 26 | // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax 27 | // CHECK-NEXT: jne .L[[LOOP_HEAD]] 28 | benchmark::DoNotOptimize(x); 29 | } 30 | // CHECK: [[LOOP_END]]: 31 | // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv 32 | 33 | // CHECK: movl $101, %eax 34 | // CHECK: ret 35 | return 101; 36 | } 37 | 38 | // CHECK-LABEL: test_while_loop: 39 | extern "C" int test_while_loop() { 40 | State& S = GetState(); 41 | int x = 42; 42 | 43 | // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] 44 | // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: 45 | while (S.KeepRunning()) { 46 | // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] 47 | // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] 48 | // CHECK: movq %[[IREG]], [[DEST:.*]] 49 | benchmark::DoNotOptimize(x); 50 | } 51 | // CHECK-DAG: movq [[DEST]], %[[IREG]] 52 | // CHECK-DAG: testq %[[IREG]], %[[IREG]] 53 | // CHECK-DAG: jne .L[[LOOP_BODY]] 54 | // CHECK-DAG: .L[[LOOP_HEADER]]: 55 | 56 | // CHECK: cmpb $0 57 | // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] 58 | // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv 59 | 60 | // CHECK: .L[[LOOP_END]]: 61 | // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv 62 | 63 | // CHECK: movl $101, %eax 64 | // CHECK: ret 65 | return 101; 66 | } 67 | -------------------------------------------------------------------------------- /benchmark/test/statistics_gtest.cc: -------------------------------------------------------------------------------- 1 | //===---------------------------------------------------------------------===// 2 | // statistics_test - Unit tests for src/statistics.cc 3 | //===---------------------------------------------------------------------===// 4 | 5 | #include "../src/statistics.h" 6 | #include "gtest/gtest.h" 7 | 8 | namespace { 9 | TEST(StatisticsTest, Mean) { 10 | std::vector Inputs; 11 | { 12 | Inputs = {42, 42, 42, 42}; 13 | double Res = benchmark::StatisticsMean(Inputs); 14 | EXPECT_DOUBLE_EQ(Res, 42.0); 15 | } 16 | { 17 | Inputs = {1, 2, 3, 4}; 18 | double Res = benchmark::StatisticsMean(Inputs); 19 | EXPECT_DOUBLE_EQ(Res, 2.5); 20 | } 21 | { 22 | Inputs = {1, 2, 5, 10, 10, 14}; 23 | double Res = benchmark::StatisticsMean(Inputs); 24 | EXPECT_DOUBLE_EQ(Res, 7.0); 25 | } 26 | } 27 | 28 | TEST(StatisticsTest, Median) { 29 | std::vector Inputs; 30 | { 31 | Inputs = {42, 42, 42, 42}; 32 | double Res = benchmark::StatisticsMedian(Inputs); 33 | EXPECT_DOUBLE_EQ(Res, 42.0); 34 | } 35 | { 36 | Inputs = {1, 2, 3, 4}; 37 | double Res = benchmark::StatisticsMedian(Inputs); 38 | EXPECT_DOUBLE_EQ(Res, 2.5); 39 | } 40 | { 41 | Inputs = {1, 2, 5, 10, 10}; 42 | double Res = benchmark::StatisticsMedian(Inputs); 43 | EXPECT_DOUBLE_EQ(Res, 5.0); 44 | } 45 | } 46 | 47 | TEST(StatisticsTest, StdDev) { 48 | std::vector Inputs; 49 | { 50 | Inputs = {101, 101, 101, 101}; 51 | double Res = benchmark::StatisticsStdDev(Inputs); 52 | EXPECT_DOUBLE_EQ(Res, 0.0); 53 | } 54 | { 55 | Inputs = {1, 2, 3}; 56 | double Res = benchmark::StatisticsStdDev(Inputs); 57 | EXPECT_DOUBLE_EQ(Res, 1.0); 58 | } 59 | } 60 | 61 | } // end namespace 62 | -------------------------------------------------------------------------------- /benchmark/test/templated_fixture_test.cc: -------------------------------------------------------------------------------- 1 | 2 | #include "benchmark/benchmark.h" 3 | 4 | #include 5 | #include 6 | 7 | template 8 | class MyFixture : public ::benchmark::Fixture { 9 | public: 10 | MyFixture() : data(0) {} 11 | 12 | T data; 13 | }; 14 | 15 | BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) { 16 | for (auto _ : st) { 17 | data += 1; 18 | } 19 | } 20 | 21 | BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { 22 | for (auto _ : st) { 23 | data += 1.0; 24 | } 25 | } 26 | BENCHMARK_REGISTER_F(MyFixture, Bar); 27 | 28 | BENCHMARK_MAIN(); 29 | -------------------------------------------------------------------------------- /benchmark/tools/compare_bench.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | compare_bench.py - Compare two benchmarks or their results and report the 4 | difference. 5 | """ 6 | import argparse 7 | from argparse import ArgumentParser 8 | import sys 9 | import gbench 10 | from gbench import util, report 11 | from gbench.util import * 12 | 13 | def check_inputs(in1, in2, flags): 14 | """ 15 | Perform checking on the user provided inputs and diagnose any abnormalities 16 | """ 17 | in1_kind, in1_err = classify_input_file(in1) 18 | in2_kind, in2_err = classify_input_file(in2) 19 | output_file = find_benchmark_flag('--benchmark_out=', flags) 20 | output_type = find_benchmark_flag('--benchmark_out_format=', flags) 21 | if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: 22 | print(("WARNING: '--benchmark_out=%s' will be passed to both " 23 | "benchmarks causing it to be overwritten") % output_file) 24 | if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: 25 | print("WARNING: passing --benchmark flags has no effect since both " 26 | "inputs are JSON") 27 | if output_type is not None and output_type != 'json': 28 | print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`" 29 | " is not supported.") % output_type) 30 | sys.exit(1) 31 | 32 | 33 | def main(): 34 | parser = ArgumentParser( 35 | description='compare the results of two benchmarks') 36 | parser.add_argument( 37 | 'test1', metavar='test1', type=str, nargs=1, 38 | help='A benchmark executable or JSON output file') 39 | parser.add_argument( 40 | 'test2', metavar='test2', type=str, nargs=1, 41 | help='A benchmark executable or JSON output file') 42 | parser.add_argument( 43 | 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, 44 | help='Arguments to pass when running benchmark executables' 45 | ) 46 | args, unknown_args = parser.parse_known_args() 47 | # Parse the command line flags 48 | test1 = args.test1[0] 49 | test2 = args.test2[0] 50 | if unknown_args: 51 | # should never happen 52 | print("Unrecognized positional argument arguments: '%s'" 53 | % unknown_args) 54 | exit(1) 55 | benchmark_options = args.benchmark_options 56 | check_inputs(test1, test2, benchmark_options) 57 | # Run the benchmarks and report the results 58 | json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) 59 | json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) 60 | output_lines = gbench.report.generate_difference_report(json1, json2) 61 | print('Comparing %s to %s' % (test1, test2)) 62 | for ln in output_lines: 63 | print(ln) 64 | 65 | 66 | if __name__ == '__main__': 67 | main() 68 | -------------------------------------------------------------------------------- /benchmark/tools/gbench/Inputs/test1_run1.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_SameTimes", 12 | "iterations": 1000, 13 | "real_time": 10, 14 | "cpu_time": 10, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_2xFaster", 19 | "iterations": 1000, 20 | "real_time": 50, 21 | "cpu_time": 50, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_2xSlower", 26 | "iterations": 1000, 27 | "real_time": 50, 28 | "cpu_time": 50, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "BM_1PercentFaster", 33 | "iterations": 1000, 34 | "real_time": 100, 35 | "cpu_time": 100, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "BM_1PercentSlower", 40 | "iterations": 1000, 41 | "real_time": 100, 42 | "cpu_time": 100, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_10PercentFaster", 47 | "iterations": 1000, 48 | "real_time": 100, 49 | "cpu_time": 100, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_10PercentSlower", 54 | "iterations": 1000, 55 | "real_time": 100, 56 | "cpu_time": 100, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "BM_100xSlower", 61 | "iterations": 1000, 62 | "real_time": 100, 63 | "cpu_time": 100, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "BM_100xFaster", 68 | "iterations": 1000, 69 | "real_time": 10000, 70 | "cpu_time": 10000, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_10PercentCPUToTime", 75 | "iterations": 1000, 76 | "real_time": 100, 77 | "cpu_time": 100, 78 | "time_unit": "ns" 79 | }, 80 | { 81 | "name": "BM_ThirdFaster", 82 | "iterations": 1000, 83 | "real_time": 100, 84 | "cpu_time": 100, 85 | "time_unit": "ns" 86 | }, 87 | { 88 | "name": "BM_BadTimeUnit", 89 | "iterations": 1000, 90 | "real_time": 0.4, 91 | "cpu_time": 0.5, 92 | "time_unit": "s" 93 | }, 94 | { 95 | "name": "BM_DifferentTimeUnit", 96 | "iterations": 1, 97 | "real_time": 1, 98 | "cpu_time": 1, 99 | "time_unit": "s" 100 | } 101 | ] 102 | } 103 | -------------------------------------------------------------------------------- /benchmark/tools/gbench/Inputs/test1_run2.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_SameTimes", 12 | "iterations": 1000, 13 | "real_time": 10, 14 | "cpu_time": 10, 15 | "time_unit": "ns" 16 | }, 17 | { 18 | "name": "BM_2xFaster", 19 | "iterations": 1000, 20 | "real_time": 25, 21 | "cpu_time": 25, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_2xSlower", 26 | "iterations": 20833333, 27 | "real_time": 100, 28 | "cpu_time": 100, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "BM_1PercentFaster", 33 | "iterations": 1000, 34 | "real_time": 98.9999999, 35 | "cpu_time": 98.9999999, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "BM_1PercentSlower", 40 | "iterations": 1000, 41 | "real_time": 100.9999999, 42 | "cpu_time": 100.9999999, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_10PercentFaster", 47 | "iterations": 1000, 48 | "real_time": 90, 49 | "cpu_time": 90, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_10PercentSlower", 54 | "iterations": 1000, 55 | "real_time": 110, 56 | "cpu_time": 110, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "BM_100xSlower", 61 | "iterations": 1000, 62 | "real_time": 1.0000e+04, 63 | "cpu_time": 1.0000e+04, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "BM_100xFaster", 68 | "iterations": 1000, 69 | "real_time": 100, 70 | "cpu_time": 100, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_10PercentCPUToTime", 75 | "iterations": 1000, 76 | "real_time": 110, 77 | "cpu_time": 90, 78 | "time_unit": "ns" 79 | }, 80 | { 81 | "name": "BM_ThirdFaster", 82 | "iterations": 1000, 83 | "real_time": 66.665, 84 | "cpu_time": 66.664, 85 | "time_unit": "ns" 86 | }, 87 | { 88 | "name": "BM_BadTimeUnit", 89 | "iterations": 1000, 90 | "real_time": 0.04, 91 | "cpu_time": 0.6, 92 | "time_unit": "s" 93 | }, 94 | { 95 | "name": "BM_DifferentTimeUnit", 96 | "iterations": 1, 97 | "real_time": 1, 98 | "cpu_time": 1, 99 | "time_unit": "ns" 100 | } 101 | ] 102 | } 103 | -------------------------------------------------------------------------------- /benchmark/tools/gbench/Inputs/test2_run.json: -------------------------------------------------------------------------------- 1 | { 2 | "context": { 3 | "date": "2016-08-02 17:44:46", 4 | "num_cpus": 4, 5 | "mhz_per_cpu": 4228, 6 | "cpu_scaling_enabled": false, 7 | "library_build_type": "release" 8 | }, 9 | "benchmarks": [ 10 | { 11 | "name": "BM_Hi", 12 | "iterations": 1234, 13 | "real_time": 42, 14 | "cpu_time": 24, 15 | "time_unit": "ms" 16 | }, 17 | { 18 | "name": "BM_Zero", 19 | "iterations": 1000, 20 | "real_time": 10, 21 | "cpu_time": 10, 22 | "time_unit": "ns" 23 | }, 24 | { 25 | "name": "BM_Zero/4", 26 | "iterations": 4000, 27 | "real_time": 40, 28 | "cpu_time": 40, 29 | "time_unit": "ns" 30 | }, 31 | { 32 | "name": "Prefix/BM_Zero", 33 | "iterations": 2000, 34 | "real_time": 20, 35 | "cpu_time": 20, 36 | "time_unit": "ns" 37 | }, 38 | { 39 | "name": "Prefix/BM_Zero/3", 40 | "iterations": 3000, 41 | "real_time": 30, 42 | "cpu_time": 30, 43 | "time_unit": "ns" 44 | }, 45 | { 46 | "name": "BM_One", 47 | "iterations": 5000, 48 | "real_time": 5, 49 | "cpu_time": 5, 50 | "time_unit": "ns" 51 | }, 52 | { 53 | "name": "BM_One/4", 54 | "iterations": 2000, 55 | "real_time": 20, 56 | "cpu_time": 20, 57 | "time_unit": "ns" 58 | }, 59 | { 60 | "name": "Prefix/BM_One", 61 | "iterations": 1000, 62 | "real_time": 10, 63 | "cpu_time": 10, 64 | "time_unit": "ns" 65 | }, 66 | { 67 | "name": "Prefix/BM_One/3", 68 | "iterations": 1500, 69 | "real_time": 15, 70 | "cpu_time": 15, 71 | "time_unit": "ns" 72 | }, 73 | { 74 | "name": "BM_Bye", 75 | "iterations": 5321, 76 | "real_time": 11, 77 | "cpu_time": 63, 78 | "time_unit": "ns" 79 | } 80 | ] 81 | } 82 | -------------------------------------------------------------------------------- /benchmark/tools/gbench/__init__.py: -------------------------------------------------------------------------------- 1 | """Google Benchmark tooling""" 2 | 3 | __author__ = 'Eric Fiselier' 4 | __email__ = 'eric@efcs.ca' 5 | __versioninfo__ = (0, 5, 0) 6 | __version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' 7 | 8 | __all__ = [] 9 | -------------------------------------------------------------------------------- /benchmark/tools/gbench/util.py: -------------------------------------------------------------------------------- 1 | """util.py - General utilities for running, loading, and processing benchmarks 2 | """ 3 | import json 4 | import os 5 | import tempfile 6 | import subprocess 7 | import sys 8 | 9 | # Input file type enumeration 10 | IT_Invalid = 0 11 | IT_JSON = 1 12 | IT_Executable = 2 13 | 14 | _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 15 | def is_executable_file(filename): 16 | """ 17 | Return 'True' if 'filename' names a valid file which is likely 18 | an executable. A file is considered an executable if it starts with the 19 | magic bytes for a EXE, Mach O, or ELF file. 20 | """ 21 | if not os.path.isfile(filename): 22 | return False 23 | with open(filename, mode='rb') as f: 24 | magic_bytes = f.read(_num_magic_bytes) 25 | if sys.platform == 'darwin': 26 | return magic_bytes in [ 27 | b'\xfe\xed\xfa\xce', # MH_MAGIC 28 | b'\xce\xfa\xed\xfe', # MH_CIGAM 29 | b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 30 | b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 31 | b'\xca\xfe\xba\xbe', # FAT_MAGIC 32 | b'\xbe\xba\xfe\xca' # FAT_CIGAM 33 | ] 34 | elif sys.platform.startswith('win'): 35 | return magic_bytes == b'MZ' 36 | else: 37 | return magic_bytes == b'\x7FELF' 38 | 39 | 40 | def is_json_file(filename): 41 | """ 42 | Returns 'True' if 'filename' names a valid JSON output file. 43 | 'False' otherwise. 44 | """ 45 | try: 46 | with open(filename, 'r') as f: 47 | json.load(f) 48 | return True 49 | except: 50 | pass 51 | return False 52 | 53 | 54 | def classify_input_file(filename): 55 | """ 56 | Return a tuple (type, msg) where 'type' specifies the classified type 57 | of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable 58 | string represeting the error. 59 | """ 60 | ftype = IT_Invalid 61 | err_msg = None 62 | if not os.path.exists(filename): 63 | err_msg = "'%s' does not exist" % filename 64 | elif not os.path.isfile(filename): 65 | err_msg = "'%s' does not name a file" % filename 66 | elif is_executable_file(filename): 67 | ftype = IT_Executable 68 | elif is_json_file(filename): 69 | ftype = IT_JSON 70 | else: 71 | err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename 72 | return ftype, err_msg 73 | 74 | 75 | def check_input_file(filename): 76 | """ 77 | Classify the file named by 'filename' and return the classification. 78 | If the file is classified as 'IT_Invalid' print an error message and exit 79 | the program. 80 | """ 81 | ftype, msg = classify_input_file(filename) 82 | if ftype == IT_Invalid: 83 | print("Invalid input file: %s" % msg) 84 | sys.exit(1) 85 | return ftype 86 | 87 | def find_benchmark_flag(prefix, benchmark_flags): 88 | """ 89 | Search the specified list of flags for a flag matching `` and 90 | if it is found return the arg it specifies. If specified more than once the 91 | last value is returned. If the flag is not found None is returned. 92 | """ 93 | assert prefix.startswith('--') and prefix.endswith('=') 94 | result = None 95 | for f in benchmark_flags: 96 | if f.startswith(prefix): 97 | result = f[len(prefix):] 98 | return result 99 | 100 | def remove_benchmark_flags(prefix, benchmark_flags): 101 | """ 102 | Return a new list containing the specified benchmark_flags except those 103 | with the specified prefix. 104 | """ 105 | assert prefix.startswith('--') and prefix.endswith('=') 106 | return [f for f in benchmark_flags if not f.startswith(prefix)] 107 | 108 | def load_benchmark_results(fname): 109 | """ 110 | Read benchmark output from a file and return the JSON object. 111 | REQUIRES: 'fname' names a file containing JSON benchmark output. 112 | """ 113 | with open(fname, 'r') as f: 114 | return json.load(f) 115 | 116 | 117 | def run_benchmark(exe_name, benchmark_flags): 118 | """ 119 | Run a benchmark specified by 'exe_name' with the specified 120 | 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve 121 | real time console output. 122 | RETURNS: A JSON object representing the benchmark output 123 | """ 124 | output_name = find_benchmark_flag('--benchmark_out=', 125 | benchmark_flags) 126 | is_temp_output = False 127 | if output_name is None: 128 | is_temp_output = True 129 | thandle, output_name = tempfile.mkstemp() 130 | os.close(thandle) 131 | benchmark_flags = list(benchmark_flags) + \ 132 | ['--benchmark_out=%s' % output_name] 133 | 134 | cmd = [exe_name] + benchmark_flags 135 | print("RUNNING: %s" % ' '.join(cmd)) 136 | exitCode = subprocess.call(cmd) 137 | if exitCode != 0: 138 | print('TEST FAILED...') 139 | sys.exit(exitCode) 140 | json_res = load_benchmark_results(output_name) 141 | if is_temp_output: 142 | os.unlink(output_name) 143 | return json_res 144 | 145 | 146 | def run_or_load_benchmark(filename, benchmark_flags): 147 | """ 148 | Get the results for a specified benchmark. If 'filename' specifies 149 | an executable benchmark then the results are generated by running the 150 | benchmark. Otherwise 'filename' must name a valid JSON output file, 151 | which is loaded and the result returned. 152 | """ 153 | ftype = check_input_file(filename) 154 | if ftype == IT_JSON: 155 | return load_benchmark_results(filename) 156 | elif ftype == IT_Executable: 157 | return run_benchmark(filename, benchmark_flags) 158 | else: 159 | assert False # This branch is unreachable -------------------------------------------------------------------------------- /benchmark/tools/strip_asm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | strip_asm.py - Cleanup ASM output for the specified file 5 | """ 6 | 7 | from argparse import ArgumentParser 8 | import sys 9 | import os 10 | import re 11 | 12 | def find_used_labels(asm): 13 | found = set() 14 | label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") 15 | for l in asm.splitlines(): 16 | m = label_re.match(l) 17 | if m: 18 | found.add('.L%s' % m.group(1)) 19 | return found 20 | 21 | 22 | def normalize_labels(asm): 23 | decls = set() 24 | label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") 25 | for l in asm.splitlines(): 26 | m = label_decl.match(l) 27 | if m: 28 | decls.add(m.group(0)) 29 | if len(decls) == 0: 30 | return asm 31 | needs_dot = next(iter(decls))[0] != '.' 32 | if not needs_dot: 33 | return asm 34 | for ld in decls: 35 | asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) 36 | return asm 37 | 38 | 39 | def transform_labels(asm): 40 | asm = normalize_labels(asm) 41 | used_decls = find_used_labels(asm) 42 | new_asm = '' 43 | label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") 44 | for l in asm.splitlines(): 45 | m = label_decl.match(l) 46 | if not m or m.group(0) in used_decls: 47 | new_asm += l 48 | new_asm += '\n' 49 | return new_asm 50 | 51 | 52 | def is_identifier(tk): 53 | if len(tk) == 0: 54 | return False 55 | first = tk[0] 56 | if not first.isalpha() and first != '_': 57 | return False 58 | for i in range(1, len(tk)): 59 | c = tk[i] 60 | if not c.isalnum() and c != '_': 61 | return False 62 | return True 63 | 64 | def process_identifiers(l): 65 | """ 66 | process_identifiers - process all identifiers and modify them to have 67 | consistent names across all platforms; specifically across ELF and MachO. 68 | For example, MachO inserts an additional understore at the beginning of 69 | names. This function removes that. 70 | """ 71 | parts = re.split(r'([a-zA-Z0-9_]+)', l) 72 | new_line = '' 73 | for tk in parts: 74 | if is_identifier(tk): 75 | if tk.startswith('__Z'): 76 | tk = tk[1:] 77 | elif tk.startswith('_') and len(tk) > 1 and \ 78 | tk[1].isalpha() and tk[1] != 'Z': 79 | tk = tk[1:] 80 | new_line += tk 81 | return new_line 82 | 83 | 84 | def process_asm(asm): 85 | """ 86 | Strip the ASM of unwanted directives and lines 87 | """ 88 | new_contents = '' 89 | asm = transform_labels(asm) 90 | 91 | # TODO: Add more things we want to remove 92 | discard_regexes = [ 93 | re.compile("\s+\..*$"), # directive 94 | re.compile("\s*#(NO_APP|APP)$"), #inline ASM 95 | re.compile("\s*#.*$"), # comment line 96 | re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive 97 | re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), 98 | ] 99 | keep_regexes = [ 100 | 101 | ] 102 | fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") 103 | for l in asm.splitlines(): 104 | # Remove Mach-O attribute 105 | l = l.replace('@GOTPCREL', '') 106 | add_line = True 107 | for reg in discard_regexes: 108 | if reg.match(l) is not None: 109 | add_line = False 110 | break 111 | for reg in keep_regexes: 112 | if reg.match(l) is not None: 113 | add_line = True 114 | break 115 | if add_line: 116 | if fn_label_def.match(l) and len(new_contents) != 0: 117 | new_contents += '\n' 118 | l = process_identifiers(l) 119 | new_contents += l 120 | new_contents += '\n' 121 | return new_contents 122 | 123 | def main(): 124 | parser = ArgumentParser( 125 | description='generate a stripped assembly file') 126 | parser.add_argument( 127 | 'input', metavar='input', type=str, nargs=1, 128 | help='An input assembly file') 129 | parser.add_argument( 130 | 'out', metavar='output', type=str, nargs=1, 131 | help='The output file') 132 | args, unknown_args = parser.parse_known_args() 133 | input = args.input[0] 134 | output = args.out[0] 135 | if not os.path.isfile(input): 136 | print(("ERROR: input file '%s' does not exist") % input) 137 | sys.exit(1) 138 | contents = None 139 | with open(input, 'r') as f: 140 | contents = f.read() 141 | new_contents = process_asm(contents) 142 | with open(output, 'w') as f: 143 | f.write(new_contents) 144 | 145 | 146 | if __name__ == '__main__': 147 | main() 148 | 149 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 150 | # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; 151 | # kate: indent-mode python; remove-trailing-spaces modified; 152 | -------------------------------------------------------------------------------- /image/RB_tree/RB_tree for map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/RB_tree/RB_tree for map.png -------------------------------------------------------------------------------- /image/RB_tree/multiset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/RB_tree/multiset.png -------------------------------------------------------------------------------- /image/RB_tree/nothing: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /image/RB_tree/set multiData.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/RB_tree/set multiData.png -------------------------------------------------------------------------------- /image/RB_tree/set.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/RB_tree/set.png -------------------------------------------------------------------------------- /image/algorithm/next_permutation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/next_permutation.png -------------------------------------------------------------------------------- /image/algorithm/power.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/power.gif -------------------------------------------------------------------------------- /image/algorithm/rotate BIter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/rotate BIter.png -------------------------------------------------------------------------------- /image/algorithm/rotate_FIter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/rotate_FIter.png -------------------------------------------------------------------------------- /image/algorithm/search_n.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/search_n.png -------------------------------------------------------------------------------- /image/algorithm/sort multi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/sort multi.png -------------------------------------------------------------------------------- /image/algorithm/sort random.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/sort random.png -------------------------------------------------------------------------------- /image/algorithm/sort sorted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/sort sorted.png -------------------------------------------------------------------------------- /image/algorithm/stable_sort multi-sorted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/stable_sort multi-sorted.png -------------------------------------------------------------------------------- /image/algorithm/stable_sort random.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/stable_sort random.png -------------------------------------------------------------------------------- /image/algorithm/stable_sort reverse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/algorithm/stable_sort reverse.png -------------------------------------------------------------------------------- /image/avl_tree/avl_tree random.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/avl_tree/avl_tree random.png -------------------------------------------------------------------------------- /image/avl_tree/avl_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/avl_tree/avl_tree.png -------------------------------------------------------------------------------- /image/deque.xml: -------------------------------------------------------------------------------- 1 | UzV2zq1wL0osyPDNT0nNUTV2VTV2LsrPL4GwciucU3NyVI0MMlNUjV1UjYwMgFjVyA2HrCFY1qAgsSg1rwSLBiADYTaQg2Y1AA== -------------------------------------------------------------------------------- /image/deque/deque- int.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/deque/deque- int.png -------------------------------------------------------------------------------- /image/deque/deque-not pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/deque/deque-not pod.png -------------------------------------------------------------------------------- /image/deque/qmj__deuqe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/deque/qmj__deuqe.png -------------------------------------------------------------------------------- /image/deque/std__deque.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/deque/std__deque.png -------------------------------------------------------------------------------- /image/hashtable/hashtable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/hashtable.png -------------------------------------------------------------------------------- /image/hashtable/qmj_hashtable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/qmj_hashtable.png -------------------------------------------------------------------------------- /image/hashtable/qmj_hashtable1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/qmj_hashtable1.png -------------------------------------------------------------------------------- /image/hashtable/unordered_multiset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/unordered_multiset.png -------------------------------------------------------------------------------- /image/hashtable/unordered_set multiData.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/unordered_set multiData.png -------------------------------------------------------------------------------- /image/hashtable/unordered_set.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/hashtable/unordered_set.png -------------------------------------------------------------------------------- /image/heap/fib_heap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/heap/fib_heap.png -------------------------------------------------------------------------------- /image/heap/heap_sort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/heap/heap_sort.png -------------------------------------------------------------------------------- /image/heap/make_heap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/heap/make_heap.png -------------------------------------------------------------------------------- /image/heap/priority_queue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/heap/priority_queue.png -------------------------------------------------------------------------------- /image/list/forward_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/list/forward_list.png -------------------------------------------------------------------------------- /image/list/list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/list/list.png -------------------------------------------------------------------------------- /image/vector/vector.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MouJieQin/QMJSTL/25a060c9e31317dad1c36222c364904215f7115e/image/vector/vector.png -------------------------------------------------------------------------------- /test/build_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function rm_if_exit() 4 | { 5 | if [ -e $1 ] 6 | then 7 | rm $1 8 | fi 9 | } 10 | 11 | if [ $# -eq 1 ] 12 | then 13 | filename=`echo $1 | cut -d '.' -f 1` 14 | execfile='./'$filename 15 | rm_if_exit $execfile 16 | clang++ -std=c++14 $1 -lgtest -lgtest_main -lpthread -o $filename 17 | if [ -e $execfile ] 18 | then 19 | $execfile 20 | fi 21 | elif [ $# -eq 0 ] 22 | then 23 | rm_if_exit "./allcppfile.tmp" 24 | rm_if_exit "./build_fail.tmp" 25 | ls | grep cpp | grep test > allcppfile.tmp 26 | cat allcppfile.tmp | while read line 27 | do 28 | echo 29 | echo build and run $line ... 30 | echo 31 | filename=`echo $line | cut -d '.' -f 1` 32 | execfile='./'$filename 33 | rm_if_exit $execfile 34 | clang++ -std=c++14 $line -lgtest -lgtest_main -lpthread -o $filename 35 | if [ -e $execfile ] 36 | then 37 | $execfile 38 | else 39 | echo $line >> build_fail.tmp 40 | fi 41 | done 42 | rm allcppfile.tmp 43 | if [ -e "./build_fail.tmp" ] 44 | then 45 | echo "--------build-----error--------" 46 | cat build_fail.tmp 47 | echo "--------build-----error--------" 48 | rm build_fail.tmp 49 | fi 50 | else 51 | echo 'num of parameters error !' 52 | fi 53 | 54 | -------------------------------------------------------------------------------- /test/test_map.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/map_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_map 14 | : public Test_set_map_base< 15 | std::map, 16 | qmj::map> 17 | { 18 | }; 19 | 20 | class Test_map_int : public Test_map 21 | { 22 | }; 23 | 24 | class Test_map_string : public Test_map 25 | { 26 | }; 27 | 28 | class Test_map_pair : public Test_map, int> 29 | { 30 | }; 31 | 32 | TEST_F(Test_map_int, Test_assign) 33 | { 34 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 35 | test_assign(); 36 | } 37 | 38 | TEST_F(Test_map_int, Test_emplace_hint) 39 | { 40 | ASSERT_TRUE(load_data()) << "load data error"; 41 | test_emplace_hint(); 42 | } 43 | 44 | TEST_F(Test_map_int, Test_count) 45 | { 46 | ASSERT_TRUE(load_data()) << "load data error"; 47 | test_count(); 48 | } 49 | 50 | TEST_F(Test_map_int, Test_insert) 51 | { 52 | ASSERT_TRUE(load_data()) << "load data error"; 53 | test_insert(); 54 | } 55 | 56 | TEST_F(Test_map_int, Test_find) 57 | { 58 | ASSERT_TRUE(load_data()) << "load data error"; 59 | test_find(); 60 | } 61 | 62 | TEST_F(Test_map_int, Test_erase) 63 | { 64 | ASSERT_TRUE(load_data()) << "load data error"; 65 | test_erase(); 66 | } 67 | 68 | TEST_F(Test_map_int, Test_all) 69 | { 70 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 71 | test_assign(); 72 | test_emplace_hint(); 73 | test_erase(); 74 | test_count(); 75 | test_insert(); 76 | test_find(); 77 | } 78 | 79 | TEST_F(Test_map_string, Test_assign) 80 | { 81 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 82 | test_assign(); 83 | } 84 | 85 | TEST_F(Test_map_string, Test_emplace_hint) 86 | { 87 | ASSERT_TRUE(load_data()) << "load data error"; 88 | test_emplace_hint(); 89 | } 90 | 91 | TEST_F(Test_map_string, Test_count) 92 | { 93 | ASSERT_TRUE(load_data()) << "load data error"; 94 | test_count(); 95 | } 96 | 97 | TEST_F(Test_map_string, Test_insert) 98 | { 99 | ASSERT_TRUE(load_data()) << "load data error"; 100 | test_insert(); 101 | } 102 | 103 | TEST_F(Test_map_string, Test_find) 104 | { 105 | ASSERT_TRUE(load_data()) << "load data error"; 106 | test_find(); 107 | } 108 | 109 | TEST_F(Test_map_string, Test_erase) 110 | { 111 | ASSERT_TRUE(load_data()) << "load data error"; 112 | test_erase(); 113 | } 114 | 115 | TEST_F(Test_map_string, Test_all) 116 | { 117 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 118 | test_assign(); 119 | test_emplace_hint(); 120 | test_erase(); 121 | test_count(); 122 | test_insert(); 123 | test_find(); 124 | } 125 | 126 | TEST_F(Test_map_pair, Test_assign) 127 | { 128 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 129 | test_assign(); 130 | } 131 | 132 | TEST_F(Test_map_pair, Test_emplace_hint) 133 | { 134 | ASSERT_TRUE(load_data()) << "load data error"; 135 | test_emplace_hint(); 136 | } 137 | 138 | TEST_F(Test_map_pair, Test_count) 139 | { 140 | ASSERT_TRUE(load_data()) << "load data error"; 141 | test_count(); 142 | } 143 | 144 | TEST_F(Test_map_pair, Test_insert) 145 | { 146 | ASSERT_TRUE(load_data()) << "load data error"; 147 | test_insert(); 148 | } 149 | 150 | TEST_F(Test_map_pair, Test_find) 151 | { 152 | ASSERT_TRUE(load_data()) << "load data error"; 153 | test_find(); 154 | } 155 | 156 | TEST_F(Test_map_pair, Test_erase) 157 | { 158 | ASSERT_TRUE(load_data()) << "load data error"; 159 | test_erase(); 160 | } 161 | 162 | TEST_F(Test_map_pair, Test_all) 163 | { 164 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 165 | test_assign(); 166 | test_emplace_hint(); 167 | test_erase(); 168 | test_count(); 169 | test_insert(); 170 | test_find(); 171 | } 172 | 173 | } // namespace test 174 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_multimap.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/map_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_multimap 14 | : public Test_set_map_base< 15 | std::multimap, 16 | qmj::multimap> 17 | { 18 | }; 19 | 20 | class Test_multimap_int : public Test_multimap 21 | { 22 | }; 23 | 24 | class Test_multimap_string : public Test_multimap 25 | { 26 | }; 27 | 28 | class Test_multimap_pair : public Test_multimap, int> 29 | { 30 | }; 31 | 32 | TEST_F(Test_multimap_int, Test_assign) 33 | { 34 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 35 | test_assign(); 36 | } 37 | 38 | TEST_F(Test_multimap_int, Test_emplace_hint) 39 | { 40 | ASSERT_TRUE(load_data()) << "load data error"; 41 | test_emplace_hint(); 42 | } 43 | 44 | TEST_F(Test_multimap_int, Test_count) 45 | { 46 | ASSERT_TRUE(load_data()) << "load data error"; 47 | test_count(); 48 | } 49 | 50 | TEST_F(Test_multimap_int, Test_insert) 51 | { 52 | ASSERT_TRUE(load_data()) << "load data error"; 53 | test_insert(); 54 | } 55 | 56 | TEST_F(Test_multimap_int, Test_erase) 57 | { 58 | ASSERT_TRUE(load_data()) << "load data error"; 59 | test_erase(); 60 | } 61 | 62 | TEST_F(Test_multimap_int, Test_all) 63 | { 64 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 65 | test_assign(); 66 | test_emplace_hint(); 67 | test_erase(); 68 | test_count(); 69 | test_insert(); 70 | } 71 | 72 | TEST_F(Test_multimap_string, Test_assign) 73 | { 74 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 75 | test_assign(); 76 | } 77 | 78 | TEST_F(Test_multimap_string, Test_emplace_hint) 79 | { 80 | ASSERT_TRUE(load_data()) << "load data error"; 81 | test_emplace_hint(); 82 | } 83 | 84 | TEST_F(Test_multimap_string, Test_count) 85 | { 86 | ASSERT_TRUE(load_data()) << "load data error"; 87 | test_count(); 88 | } 89 | 90 | TEST_F(Test_multimap_string, Test_insert) 91 | { 92 | ASSERT_TRUE(load_data()) << "load data error"; 93 | test_insert(); 94 | } 95 | 96 | TEST_F(Test_multimap_string, Test_erase) 97 | { 98 | ASSERT_TRUE(load_data()) << "load data error"; 99 | test_erase(); 100 | } 101 | 102 | TEST_F(Test_multimap_string, Test_all) 103 | { 104 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 105 | test_assign(); 106 | test_emplace_hint(); 107 | test_erase(); 108 | test_count(); 109 | test_insert(); 110 | } 111 | 112 | TEST_F(Test_multimap_pair, Test_assign) 113 | { 114 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 115 | test_assign(); 116 | } 117 | 118 | TEST_F(Test_multimap_pair, Test_emplace_hint) 119 | { 120 | ASSERT_TRUE(load_data()) << "load data error"; 121 | test_emplace_hint(); 122 | } 123 | 124 | TEST_F(Test_multimap_pair, Test_count) 125 | { 126 | ASSERT_TRUE(load_data()) << "load data error"; 127 | test_count(); 128 | } 129 | 130 | TEST_F(Test_multimap_pair, Test_insert) 131 | { 132 | ASSERT_TRUE(load_data()) << "load data error"; 133 | test_insert(); 134 | } 135 | 136 | TEST_F(Test_multimap_pair, Test_erase) 137 | { 138 | ASSERT_TRUE(load_data()) << "load data error"; 139 | test_erase(); 140 | } 141 | 142 | TEST_F(Test_multimap_pair, Test_all) 143 | { 144 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 145 | test_assign(); 146 | test_emplace_hint(); 147 | test_erase(); 148 | test_count(); 149 | test_insert(); 150 | } 151 | 152 | } // namespace test 153 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_multiset.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/set_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_multiset 14 | : public Test_set_map_base< 15 | std::multiset, qmj::multiset> 16 | { 17 | }; 18 | 19 | class Test_multiset_int : public Test_multiset 20 | { 21 | }; 22 | 23 | class Test_multiset_string : public Test_multiset 24 | { 25 | }; 26 | 27 | class Test_multiset_pair : public Test_multiset> 28 | { 29 | }; 30 | 31 | TEST_F(Test_multiset_int, Test_assign) 32 | { 33 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 34 | test_assign(); 35 | } 36 | 37 | TEST_F(Test_multiset_int, Test_emplace_hint) 38 | { 39 | ASSERT_TRUE(load_data()) << "load data error"; 40 | test_emplace_hint(); 41 | } 42 | 43 | TEST_F(Test_multiset_int, Test_equal_range) 44 | { 45 | ASSERT_TRUE(load_data()) << "load data error"; 46 | test_equal_range(); 47 | } 48 | 49 | TEST_F(Test_multiset_int, Test_count) 50 | { 51 | ASSERT_TRUE(load_data()) << "load data error"; 52 | test_count(); 53 | } 54 | 55 | TEST_F(Test_multiset_int, Test_insert) 56 | { 57 | ASSERT_TRUE(load_data()) << "load data error"; 58 | test_insert(); 59 | } 60 | 61 | TEST_F(Test_multiset_int, Test_find) 62 | { 63 | ASSERT_TRUE(load_data()) << "load data error"; 64 | test_find(); 65 | } 66 | 67 | TEST_F(Test_multiset_int, Test_erase) 68 | { 69 | ASSERT_TRUE(load_data()) << "load data error"; 70 | test_erase(); 71 | } 72 | 73 | TEST_F(Test_multiset_int, Test_all) 74 | { 75 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 76 | test_assign(); 77 | test_emplace_hint(); 78 | test_equal_range(); 79 | test_erase(); 80 | test_count(); 81 | test_insert(); 82 | test_find(); 83 | } 84 | 85 | TEST_F(Test_multiset_string, Test_assign) 86 | { 87 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 88 | test_assign(); 89 | } 90 | 91 | TEST_F(Test_multiset_string, Test_emplace_hint) 92 | { 93 | ASSERT_TRUE(load_data()) << "load data error"; 94 | test_emplace_hint(); 95 | } 96 | 97 | TEST_F(Test_multiset_string, Test_equal_range) 98 | { 99 | ASSERT_TRUE(load_data()) << "load data error"; 100 | test_equal_range(); 101 | } 102 | 103 | TEST_F(Test_multiset_string, Test_count) 104 | { 105 | ASSERT_TRUE(load_data()) << "load data error"; 106 | test_count(); 107 | } 108 | 109 | TEST_F(Test_multiset_string, Test_insert) 110 | { 111 | ASSERT_TRUE(load_data()) << "load data error"; 112 | test_insert(); 113 | } 114 | 115 | TEST_F(Test_multiset_string, Test_find) 116 | { 117 | ASSERT_TRUE(load_data()) << "load data error"; 118 | test_find(); 119 | } 120 | 121 | TEST_F(Test_multiset_string, Test_erase) 122 | { 123 | ASSERT_TRUE(load_data()) << "load data error"; 124 | test_erase(); 125 | } 126 | 127 | TEST_F(Test_multiset_string, Test_all) 128 | { 129 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 130 | test_assign(); 131 | test_emplace_hint(); 132 | test_equal_range(); 133 | test_erase(); 134 | test_count(); 135 | test_insert(); 136 | test_find(); 137 | } 138 | 139 | TEST_F(Test_multiset_pair, Test_assign) 140 | { 141 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 142 | test_assign(); 143 | } 144 | 145 | TEST_F(Test_multiset_pair, Test_emplace_hint) 146 | { 147 | ASSERT_TRUE(load_data()) << "load data error"; 148 | test_emplace_hint(); 149 | } 150 | 151 | TEST_F(Test_multiset_pair, Test_equal_range) 152 | { 153 | ASSERT_TRUE(load_data()) << "load data error"; 154 | test_equal_range(); 155 | } 156 | 157 | TEST_F(Test_multiset_pair, Test_count) 158 | { 159 | ASSERT_TRUE(load_data()) << "load data error"; 160 | test_count(); 161 | } 162 | 163 | TEST_F(Test_multiset_pair, Test_insert) 164 | { 165 | ASSERT_TRUE(load_data()) << "load data error"; 166 | test_insert(); 167 | } 168 | 169 | TEST_F(Test_multiset_pair, Test_find) 170 | { 171 | ASSERT_TRUE(load_data()) << "load data error"; 172 | test_find(); 173 | } 174 | 175 | TEST_F(Test_multiset_pair, Test_erase) 176 | { 177 | ASSERT_TRUE(load_data()) << "load data error"; 178 | test_erase(); 179 | } 180 | 181 | TEST_F(Test_multiset_pair, Test_all) 182 | { 183 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 184 | test_assign(); 185 | test_emplace_hint(); 186 | test_equal_range(); 187 | test_erase(); 188 | test_count(); 189 | test_insert(); 190 | test_find(); 191 | } 192 | 193 | } // namespace test 194 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_priority_queue.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/queue_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_priority_queue 14 | : public Test_stack_queue_base, qmj::priority_queue> 15 | { 16 | }; 17 | 18 | class Test_priority_queue_int : public Test_priority_queue 19 | { 20 | }; 21 | 22 | class Test_priority_queue_string : public Test_priority_queue 23 | { 24 | }; 25 | 26 | class Test_priority_queue_pair : public Test_priority_queue> 27 | { 28 | }; 29 | 30 | TEST_F(Test_priority_queue_int, Test_assign) 31 | { 32 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 33 | test_assign(); 34 | } 35 | 36 | TEST_F(Test_priority_queue_int, Test_push) 37 | { 38 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 39 | test_push(); 40 | } 41 | 42 | TEST_F(Test_priority_queue_int, Test_pop) 43 | { 44 | ASSERT_TRUE(load_data()) << "reset data error"; 45 | test_pop(); 46 | } 47 | 48 | TEST_F(Test_priority_queue_int, Test_top) 49 | { 50 | ASSERT_TRUE(load_data()) << "reset data error"; 51 | test_top(); 52 | } 53 | 54 | TEST_F(Test_priority_queue_int, Test_all) 55 | { 56 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 57 | test_assign(); 58 | test_push(); 59 | test_pop(); 60 | test_top(); 61 | } 62 | 63 | TEST_F(Test_priority_queue_string, Test_assign) 64 | { 65 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 66 | test_assign(); 67 | } 68 | 69 | 70 | TEST_F(Test_priority_queue_string, Test_push) 71 | { 72 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 73 | test_push(); 74 | } 75 | 76 | TEST_F(Test_priority_queue_string, Test_pop) 77 | { 78 | ASSERT_TRUE(load_data()) << "reset data error"; 79 | test_pop(); 80 | } 81 | 82 | TEST_F(Test_priority_queue_string, Test_top) 83 | { 84 | ASSERT_TRUE(load_data()) << "reset data error"; 85 | test_top(); 86 | } 87 | 88 | TEST_F(Test_priority_queue_string, Test_all) 89 | { 90 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 91 | test_assign(); 92 | test_push(); 93 | test_pop(); 94 | test_top(); 95 | } 96 | 97 | TEST_F(Test_priority_queue_pair, Test_assign) 98 | { 99 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 100 | test_assign(); 101 | } 102 | 103 | TEST_F(Test_priority_queue_pair, Test_push) 104 | { 105 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 106 | test_push(); 107 | } 108 | 109 | TEST_F(Test_priority_queue_pair, Test_pop) 110 | { 111 | ASSERT_TRUE(load_data()) << "reset data error"; 112 | test_pop(); 113 | } 114 | 115 | TEST_F(Test_priority_queue_pair, Test_top) 116 | { 117 | ASSERT_TRUE(load_data()) << "reset data error"; 118 | test_top(); 119 | } 120 | 121 | TEST_F(Test_priority_queue_pair, Test_all) 122 | { 123 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 124 | test_assign(); 125 | test_push(); 126 | test_pop(); 127 | test_top(); 128 | } 129 | 130 | 131 | } // namespace test 132 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_queue.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/queue_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_queue 14 | : public Test_stack_queue_base, qmj::queue> 15 | { 16 | public: 17 | void test_back() 18 | { 19 | EXPECT_EQ(this->std_con.back(), this->qmj_con.back()) 20 | << "not equal of back in queue"; 21 | } 22 | 23 | void test_front() 24 | { 25 | EXPECT_EQ(this->std_con.front(), this->qmj_con.front()) 26 | << "not equal of front in queue"; 27 | } 28 | }; 29 | 30 | class Test_queue_int : public Test_queue 31 | { 32 | }; 33 | 34 | class Test_queue_string : public Test_queue 35 | { 36 | }; 37 | 38 | class Test_queue_pair : public Test_queue> 39 | { 40 | }; 41 | 42 | TEST_F(Test_queue_int, Test_assign) 43 | { 44 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 45 | test_assign(); 46 | } 47 | 48 | TEST_F(Test_queue_int, Test_push) 49 | { 50 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 51 | test_push(); 52 | } 53 | 54 | TEST_F(Test_queue_int, Test_pop) 55 | { 56 | ASSERT_TRUE(load_data()) << "reset data error"; 57 | test_pop(); 58 | } 59 | 60 | TEST_F(Test_queue_int, Test_front) 61 | { 62 | ASSERT_TRUE(load_data()) << "reset data error"; 63 | test_front(); 64 | } 65 | 66 | TEST_F(Test_queue_int, Test_back) 67 | { 68 | ASSERT_TRUE(load_data()) << "reset data error"; 69 | test_back(); 70 | } 71 | 72 | TEST_F(Test_queue_int, Test_all) 73 | { 74 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 75 | test_assign(); 76 | test_push(); 77 | test_pop(); 78 | test_back(); 79 | test_front(); 80 | } 81 | 82 | TEST_F(Test_queue_string, Test_assign) 83 | { 84 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 85 | test_assign(); 86 | } 87 | 88 | TEST_F(Test_queue_string, Test_push) 89 | { 90 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 91 | test_push(); 92 | } 93 | 94 | TEST_F(Test_queue_string, Test_pop) 95 | { 96 | ASSERT_TRUE(load_data()) << "reset data error"; 97 | test_pop(); 98 | } 99 | 100 | TEST_F(Test_queue_string, Test_front) 101 | { 102 | ASSERT_TRUE(load_data()) << "reset data error"; 103 | test_front(); 104 | } 105 | 106 | TEST_F(Test_queue_string, Test_back) 107 | { 108 | ASSERT_TRUE(load_data()) << "reset data error"; 109 | test_back(); 110 | } 111 | 112 | TEST_F(Test_queue_string, Test_all) 113 | { 114 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 115 | test_assign(); 116 | test_push(); 117 | test_pop(); 118 | test_back(); 119 | test_front(); 120 | } 121 | 122 | TEST_F(Test_queue_pair, Test_assign) 123 | { 124 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 125 | test_assign(); 126 | } 127 | 128 | TEST_F(Test_queue_pair, Test_push) 129 | { 130 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 131 | test_push(); 132 | } 133 | 134 | TEST_F(Test_queue_pair, Test_pop) 135 | { 136 | ASSERT_TRUE(load_data()) << "reset data error"; 137 | test_pop(); 138 | } 139 | 140 | TEST_F(Test_queue_pair, Test_front) 141 | { 142 | ASSERT_TRUE(load_data()) << "reset data error"; 143 | test_front(); 144 | } 145 | 146 | TEST_F(Test_queue_pair, Test_back) 147 | { 148 | ASSERT_TRUE(load_data()) << "reset data error"; 149 | test_back(); 150 | } 151 | 152 | TEST_F(Test_queue_pair, Test_all) 153 | { 154 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 155 | test_assign(); 156 | test_push(); 157 | test_pop(); 158 | test_back(); 159 | test_front(); 160 | } 161 | 162 | } // namespace test 163 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_set.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/set_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_set 14 | : public Test_set_map_base< 15 | std::set, qmj::set> 16 | { 17 | }; 18 | 19 | class Test_set_int : public Test_set 20 | { 21 | }; 22 | 23 | class Test_set_string : public Test_set 24 | { 25 | }; 26 | 27 | class Test_set_pair:public Test_set> 28 | { 29 | }; 30 | 31 | TEST_F(Test_set_int, Test_assign) 32 | { 33 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 34 | test_assign(); 35 | } 36 | 37 | TEST_F(Test_set_int, Test_emplace_hint) 38 | { 39 | ASSERT_TRUE(load_data()) << "load data error"; 40 | test_emplace_hint(); 41 | } 42 | 43 | TEST_F(Test_set_int, Test_count) 44 | { 45 | ASSERT_TRUE(load_data()) << "load data error"; 46 | test_count(); 47 | } 48 | 49 | TEST_F(Test_set_int, Test_insert) 50 | { 51 | ASSERT_TRUE(load_data()) << "load data error"; 52 | test_insert(); 53 | } 54 | 55 | TEST_F(Test_set_int, Test_find) 56 | { 57 | ASSERT_TRUE(load_data()) << "load data error"; 58 | test_find(); 59 | } 60 | 61 | TEST_F(Test_set_int, Test_erase) 62 | { 63 | ASSERT_TRUE(load_data()) << "load data error"; 64 | test_erase(); 65 | } 66 | 67 | TEST_F(Test_set_int, Test_all) 68 | { 69 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 70 | test_assign(); 71 | test_emplace_hint(); 72 | test_erase(); 73 | test_count(); 74 | test_insert(); 75 | test_find(); 76 | } 77 | 78 | TEST_F(Test_set_string, Test_assign) 79 | { 80 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 81 | test_assign(); 82 | } 83 | 84 | TEST_F(Test_set_string, Test_emplace_hint) 85 | { 86 | ASSERT_TRUE(load_data()) << "load data error"; 87 | test_emplace_hint(); 88 | } 89 | 90 | TEST_F(Test_set_string, Test_count) 91 | { 92 | ASSERT_TRUE(load_data()) << "load data error"; 93 | test_count(); 94 | } 95 | 96 | TEST_F(Test_set_string, Test_insert) 97 | { 98 | ASSERT_TRUE(load_data()) << "load data error"; 99 | test_insert(); 100 | } 101 | 102 | TEST_F(Test_set_string, Test_find) 103 | { 104 | ASSERT_TRUE(load_data()) << "load data error"; 105 | test_find(); 106 | } 107 | 108 | TEST_F(Test_set_string, Test_erase) 109 | { 110 | ASSERT_TRUE(load_data()) << "load data error"; 111 | test_erase(); 112 | } 113 | 114 | TEST_F(Test_set_string, Test_all) 115 | { 116 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 117 | test_assign(); 118 | test_emplace_hint(); 119 | test_erase(); 120 | test_count(); 121 | test_insert(); 122 | test_find(); 123 | } 124 | 125 | TEST_F(Test_set_pair, Test_assign) 126 | { 127 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 128 | test_assign(); 129 | } 130 | 131 | TEST_F(Test_set_pair, Test_emplace_hint) 132 | { 133 | ASSERT_TRUE(load_data()) << "load data error"; 134 | test_emplace_hint(); 135 | } 136 | 137 | TEST_F(Test_set_pair, Test_count) 138 | { 139 | ASSERT_TRUE(load_data()) << "load data error"; 140 | test_count(); 141 | } 142 | 143 | TEST_F(Test_set_pair, Test_insert) 144 | { 145 | ASSERT_TRUE(load_data()) << "load data error"; 146 | test_insert(); 147 | } 148 | 149 | TEST_F(Test_set_pair, Test_find) 150 | { 151 | ASSERT_TRUE(load_data()) << "load data error"; 152 | test_find(); 153 | } 154 | 155 | TEST_F(Test_set_pair, Test_erase) 156 | { 157 | ASSERT_TRUE(load_data()) << "load data error"; 158 | test_erase(); 159 | } 160 | 161 | TEST_F(Test_set_pair, Test_all) 162 | { 163 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 164 | test_assign(); 165 | test_emplace_hint(); 166 | test_erase(); 167 | test_count(); 168 | test_insert(); 169 | test_find(); 170 | } 171 | 172 | } // namespace test 173 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_stack.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/stack_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_stack 14 | : public Test_stack_queue_base, qmj::stack> 15 | { 16 | }; 17 | 18 | class Test_stack_int : public Test_stack 19 | { 20 | }; 21 | 22 | class Test_stack_string : public Test_stack 23 | { 24 | }; 25 | 26 | class Test_stack_pair : public Test_stack> 27 | { 28 | }; 29 | 30 | 31 | TEST_F(Test_stack_int, Test_assign) 32 | { 33 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 34 | test_assign(); 35 | } 36 | 37 | TEST_F(Test_stack_int, Test_push) 38 | { 39 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 40 | test_push(); 41 | } 42 | 43 | TEST_F(Test_stack_int, Test_pop) 44 | { 45 | ASSERT_TRUE(load_data()) << "reset data error"; 46 | test_pop(); 47 | } 48 | 49 | TEST_F(Test_stack_int, Test_top) 50 | { 51 | ASSERT_TRUE(load_data()) << "reset data error"; 52 | test_top(); 53 | } 54 | 55 | TEST_F(Test_stack_int, Test_all) 56 | { 57 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 58 | test_assign(); 59 | test_push(); 60 | test_pop(); 61 | test_top(); 62 | } 63 | 64 | TEST_F(Test_stack_string, Test_assign) 65 | { 66 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 67 | test_assign(); 68 | } 69 | 70 | 71 | TEST_F(Test_stack_string, Test_push) 72 | { 73 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 74 | test_push(); 75 | } 76 | 77 | TEST_F(Test_stack_string, Test_pop) 78 | { 79 | ASSERT_TRUE(load_data()) << "reset data error"; 80 | test_pop(); 81 | } 82 | 83 | TEST_F(Test_stack_string, Test_top) 84 | { 85 | ASSERT_TRUE(load_data()) << "reset data error"; 86 | test_top(); 87 | } 88 | 89 | TEST_F(Test_stack_string, Test_all) 90 | { 91 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 92 | test_assign(); 93 | test_push(); 94 | test_pop(); 95 | test_top(); 96 | } 97 | 98 | TEST_F(Test_stack_pair, Test_assign) 99 | { 100 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 101 | test_assign(); 102 | } 103 | 104 | TEST_F(Test_stack_pair, Test_push) 105 | { 106 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 107 | test_push(); 108 | } 109 | 110 | TEST_F(Test_stack_pair, Test_pop) 111 | { 112 | ASSERT_TRUE(load_data()) << "reset data error"; 113 | test_pop(); 114 | } 115 | 116 | TEST_F(Test_stack_pair, Test_top) 117 | { 118 | ASSERT_TRUE(load_data()) << "reset data error"; 119 | test_top(); 120 | } 121 | 122 | TEST_F(Test_stack_pair, Test_all) 123 | { 124 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 125 | test_assign(); 126 | test_push(); 127 | test_pop(); 128 | test_top(); 129 | } 130 | 131 | 132 | } // namespace test 133 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_unordered_map.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/unordered_map_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_unordered_map 14 | : public Test_set_map_base< 15 | std::unordered_map, 16 | qmj::unordered_map> 17 | { 18 | }; 19 | 20 | class Test_unordered_map_int : public Test_unordered_map 21 | { 22 | }; 23 | 24 | class Test_unordered_map_string : public Test_unordered_map 25 | { 26 | }; 27 | 28 | class Test_unordered_map_pair 29 | : public Test_set_map_base< 30 | std::unordered_map, int, hash_pair>, 31 | qmj::unordered_map, int, hash_pair>> 32 | { 33 | }; 34 | 35 | TEST_F(Test_unordered_map_int, Test_assign) 36 | { 37 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 38 | test_assign(); 39 | } 40 | 41 | TEST_F(Test_unordered_map_int, Test_count) 42 | { 43 | ASSERT_TRUE(load_data()) << "load data error"; 44 | test_count(); 45 | } 46 | 47 | TEST_F(Test_unordered_map_int, Test_insert) 48 | { 49 | ASSERT_TRUE(load_data()) << "load data error"; 50 | test_insert(); 51 | } 52 | 53 | TEST_F(Test_unordered_map_int, Test_find) 54 | { 55 | ASSERT_TRUE(load_data()) << "load data error"; 56 | test_find(); 57 | } 58 | 59 | TEST_F(Test_unordered_map_int, Test_erase) 60 | { 61 | ASSERT_TRUE(load_data()) << "load data error"; 62 | test_erase(); 63 | } 64 | 65 | TEST_F(Test_unordered_map_int, Test_all) 66 | { 67 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 68 | test_assign(); 69 | test_erase(); 70 | test_count(); 71 | test_insert(); 72 | test_find(); 73 | } 74 | 75 | TEST_F(Test_unordered_map_string, Test_assign) 76 | { 77 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 78 | test_assign(); 79 | } 80 | 81 | TEST_F(Test_unordered_map_string, Test_count) 82 | { 83 | ASSERT_TRUE(load_data()) << "load data error"; 84 | test_count(); 85 | } 86 | 87 | TEST_F(Test_unordered_map_string, Test_insert) 88 | { 89 | ASSERT_TRUE(load_data()) << "load data error"; 90 | test_insert(); 91 | } 92 | 93 | TEST_F(Test_unordered_map_string, Test_find) 94 | { 95 | ASSERT_TRUE(load_data()) << "load data error"; 96 | test_find(); 97 | } 98 | 99 | TEST_F(Test_unordered_map_string, Test_erase) 100 | { 101 | ASSERT_TRUE(load_data()) << "load data error"; 102 | test_erase(); 103 | } 104 | 105 | TEST_F(Test_unordered_map_string, Test_all) 106 | { 107 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 108 | test_assign(); 109 | test_erase(); 110 | test_count(); 111 | test_insert(); 112 | test_find(); 113 | } 114 | 115 | TEST_F(Test_unordered_map_pair, Test_assign) 116 | { 117 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 118 | test_assign(); 119 | } 120 | 121 | TEST_F(Test_unordered_map_pair, Test_count) 122 | { 123 | ASSERT_TRUE(load_data()) << "load data error"; 124 | test_count(); 125 | } 126 | 127 | TEST_F(Test_unordered_map_pair, Test_insert) 128 | { 129 | ASSERT_TRUE(load_data()) << "load data error"; 130 | test_insert(); 131 | } 132 | 133 | TEST_F(Test_unordered_map_pair, Test_find) 134 | { 135 | ASSERT_TRUE(load_data()) << "load data error"; 136 | test_find(); 137 | } 138 | 139 | TEST_F(Test_unordered_map_pair, Test_erase) 140 | { 141 | ASSERT_TRUE(load_data()) << "load data error"; 142 | test_erase(); 143 | } 144 | 145 | TEST_F(Test_unordered_map_pair, Test_all) 146 | { 147 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 148 | test_assign(); 149 | test_erase(); 150 | test_count(); 151 | test_insert(); 152 | test_find(); 153 | } 154 | 155 | } // namespace test 156 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_unordered_multimap.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/unordered_map_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_unordered_multimap 14 | : public Test_set_map_base< 15 | std::unordered_multimap, 16 | qmj::unordered_multimap> 17 | { 18 | }; 19 | 20 | class Test_unordered_multimap_int : public Test_unordered_multimap 21 | { 22 | }; 23 | 24 | class Test_unordered_multimap_string : public Test_unordered_multimap 25 | { 26 | }; 27 | 28 | class Test_unordered_multimap_pair 29 | : public Test_set_map_base< 30 | std::unordered_multimap, int, hash_pair>, 31 | qmj::unordered_multimap, int, hash_pair>> 32 | { 33 | }; 34 | 35 | TEST_F(Test_unordered_multimap_int, Test_assign) 36 | { 37 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 38 | test_assign(); 39 | } 40 | 41 | TEST_F(Test_unordered_multimap_int, Test_equal_range) 42 | { 43 | ASSERT_TRUE(load_data()) << "load data error"; 44 | test_equal_range(); 45 | } 46 | 47 | TEST_F(Test_unordered_multimap_int, Test_count) 48 | { 49 | ASSERT_TRUE(load_data()) << "load data error"; 50 | test_count(); 51 | } 52 | 53 | TEST_F(Test_unordered_multimap_int, Test_insert) 54 | { 55 | ASSERT_TRUE(load_data()) << "load data error"; 56 | test_insert(); 57 | } 58 | 59 | TEST_F(Test_unordered_multimap_int, Test_erase) 60 | { 61 | ASSERT_TRUE(load_data()) << "load data error"; 62 | test_erase(); 63 | } 64 | 65 | TEST_F(Test_unordered_multimap_int, Test_all) 66 | { 67 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 68 | test_assign(); 69 | test_equal_range(); 70 | test_erase(); 71 | test_count(); 72 | test_insert(); 73 | } 74 | 75 | TEST_F(Test_unordered_multimap_string, Test_assign) 76 | { 77 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 78 | test_assign(); 79 | } 80 | 81 | TEST_F(Test_unordered_multimap_string, Test_equal_range) 82 | { 83 | ASSERT_TRUE(load_data()) << "load data error"; 84 | test_equal_range(); 85 | } 86 | 87 | TEST_F(Test_unordered_multimap_string, Test_count) 88 | { 89 | ASSERT_TRUE(load_data()) << "load data error"; 90 | test_count(); 91 | } 92 | 93 | TEST_F(Test_unordered_multimap_string, Test_insert) 94 | { 95 | ASSERT_TRUE(load_data()) << "load data error"; 96 | test_insert(); 97 | } 98 | 99 | TEST_F(Test_unordered_multimap_string, Test_erase) 100 | { 101 | ASSERT_TRUE(load_data()) << "load data error"; 102 | test_erase(); 103 | } 104 | 105 | TEST_F(Test_unordered_multimap_string, Test_all) 106 | { 107 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 108 | test_assign(); 109 | test_equal_range(); 110 | test_erase(); 111 | test_count(); 112 | test_insert(); 113 | } 114 | 115 | TEST_F(Test_unordered_multimap_pair, Test_assign) 116 | { 117 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 118 | test_assign(); 119 | } 120 | 121 | TEST_F(Test_unordered_multimap_pair, Test_equal_range) 122 | { 123 | ASSERT_TRUE(load_data()) << "load data error"; 124 | test_equal_range(); 125 | } 126 | 127 | TEST_F(Test_unordered_multimap_pair, Test_count) 128 | { 129 | ASSERT_TRUE(load_data()) << "load data error"; 130 | test_count(); 131 | } 132 | 133 | TEST_F(Test_unordered_multimap_pair, Test_insert) 134 | { 135 | ASSERT_TRUE(load_data()) << "load data error"; 136 | test_insert(); 137 | } 138 | 139 | TEST_F(Test_unordered_multimap_pair, Test_erase) 140 | { 141 | ASSERT_TRUE(load_data()) << "load data error"; 142 | test_erase(); 143 | } 144 | 145 | TEST_F(Test_unordered_multimap_pair, Test_all) 146 | { 147 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 148 | test_assign(); 149 | test_equal_range(); 150 | test_erase(); 151 | test_count(); 152 | test_insert(); 153 | } 154 | 155 | } // namespace test 156 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_unordered_multiset.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/unordered_set_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_unordered_multiset 14 | : public Test_set_map_base< 15 | std::unordered_multiset, qmj::unordered_multiset> 16 | { 17 | }; 18 | 19 | class Test_unordered_multiset_int : public Test_unordered_multiset 20 | { 21 | }; 22 | 23 | class Test_unordered_multiset_string : public Test_unordered_multiset 24 | { 25 | }; 26 | 27 | class Test_unordered_multiset_pair 28 | : public Test_set_map_base< 29 | std::unordered_multiset, hash_pair>, 30 | qmj::unordered_multiset, hash_pair>> 31 | { 32 | }; 33 | 34 | TEST_F(Test_unordered_multiset_int, Test_assign) 35 | { 36 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 37 | test_assign(); 38 | } 39 | 40 | TEST_F(Test_unordered_multiset_int, Test_count) 41 | { 42 | ASSERT_TRUE(load_data()) << "load data error"; 43 | test_count(); 44 | } 45 | 46 | TEST_F(Test_unordered_multiset_int, Test_insert) 47 | { 48 | ASSERT_TRUE(load_data()) << "load data error"; 49 | test_insert(); 50 | } 51 | 52 | TEST_F(Test_unordered_multiset_int, Test_find) 53 | { 54 | ASSERT_TRUE(load_data()) << "load data error"; 55 | test_find(); 56 | } 57 | 58 | TEST_F(Test_unordered_multiset_int, Test_erase) 59 | { 60 | ASSERT_TRUE(load_data()) << "load data error"; 61 | test_erase(); 62 | } 63 | 64 | TEST_F(Test_unordered_multiset_int, Test_emplace_hint) 65 | { 66 | ASSERT_TRUE(load_data()) << "load data error"; 67 | test_emplace_hint(); 68 | } 69 | 70 | TEST_F(Test_unordered_multiset_int, Test_all) 71 | { 72 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 73 | test_assign(); 74 | test_equal_range(); 75 | test_erase(); 76 | test_count(); 77 | test_insert(); 78 | test_find(); 79 | } 80 | 81 | TEST_F(Test_unordered_multiset_string, Test_assign) 82 | { 83 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 84 | test_assign(); 85 | } 86 | 87 | TEST_F(Test_unordered_multiset_string, Test_equal_range) 88 | { 89 | ASSERT_TRUE(load_data()) << "load data error"; 90 | test_equal_range(); 91 | } 92 | 93 | TEST_F(Test_unordered_multiset_string, Test_count) 94 | { 95 | ASSERT_TRUE(load_data()) << "load data error"; 96 | test_count(); 97 | } 98 | 99 | TEST_F(Test_unordered_multiset_string, Test_insert) 100 | { 101 | ASSERT_TRUE(load_data()) << "load data error"; 102 | test_insert(); 103 | } 104 | 105 | TEST_F(Test_unordered_multiset_string, Test_find) 106 | { 107 | ASSERT_TRUE(load_data()) << "load data error"; 108 | test_find(); 109 | } 110 | 111 | TEST_F(Test_unordered_multiset_string, Test_erase) 112 | { 113 | ASSERT_TRUE(load_data()) << "load data error"; 114 | test_erase(); 115 | } 116 | 117 | TEST_F(Test_unordered_multiset_string, Test_all) 118 | { 119 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 120 | test_assign(); 121 | test_equal_range(); 122 | test_erase(); 123 | test_count(); 124 | test_insert(); 125 | test_find(); 126 | } 127 | 128 | TEST_F(Test_unordered_multiset_pair, Test_assign) 129 | { 130 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 131 | test_assign(); 132 | } 133 | 134 | TEST_F(Test_unordered_multiset_pair, Test_equal_range) 135 | { 136 | ASSERT_TRUE(load_data()) << "load data error"; 137 | test_equal_range(); 138 | } 139 | 140 | TEST_F(Test_unordered_multiset_pair, Test_count) 141 | { 142 | ASSERT_TRUE(load_data()) << "load data error"; 143 | test_count(); 144 | } 145 | 146 | TEST_F(Test_unordered_multiset_pair, Test_insert) 147 | { 148 | ASSERT_TRUE(load_data()) << "load data error"; 149 | test_insert(); 150 | } 151 | 152 | TEST_F(Test_unordered_multiset_pair, Test_find) 153 | { 154 | ASSERT_TRUE(load_data()) << "load data error"; 155 | test_find(); 156 | } 157 | 158 | TEST_F(Test_unordered_multiset_pair, Test_erase) 159 | { 160 | ASSERT_TRUE(load_data()) << "load data error"; 161 | test_erase(); 162 | } 163 | 164 | TEST_F(Test_unordered_multiset_pair, Test_all) 165 | { 166 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 167 | test_assign(); 168 | test_equal_range(); 169 | test_erase(); 170 | test_count(); 171 | test_insert(); 172 | test_find(); 173 | } 174 | 175 | } // namespace test 176 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_unordered_set.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "../QMJSTL/unordered_set_qmj.h" 5 | #include "test_create_data.h" 6 | 7 | namespace qmj 8 | { 9 | namespace test 10 | { 11 | 12 | template 13 | class Test_unordered_set 14 | : public Test_set_map_base< 15 | std::unordered_set, qmj::unordered_set> 16 | { 17 | }; 18 | 19 | class Test_unordered_set_int : public Test_unordered_set 20 | { 21 | }; 22 | 23 | class Test_unordered_set_string : public Test_unordered_set 24 | { 25 | }; 26 | 27 | class Test_unordered_set_pair 28 | : public Test_set_map_base< 29 | std::unordered_set, hash_pair>, 30 | qmj::unordered_set, hash_pair>> 31 | { 32 | }; 33 | 34 | TEST_F(Test_unordered_set_int, Test_assign) 35 | { 36 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 37 | test_assign(); 38 | } 39 | 40 | TEST_F(Test_unordered_set_int, Test_count) 41 | { 42 | ASSERT_TRUE(load_data()) << "load data error"; 43 | test_count(); 44 | } 45 | 46 | TEST_F(Test_unordered_set_int, Test_insert) 47 | { 48 | ASSERT_TRUE(load_data()) << "load data error"; 49 | test_insert(); 50 | } 51 | 52 | TEST_F(Test_unordered_set_int, Test_find) 53 | { 54 | ASSERT_TRUE(load_data()) << "load data error"; 55 | test_find(); 56 | } 57 | 58 | TEST_F(Test_unordered_set_int, Test_erase) 59 | { 60 | ASSERT_TRUE(load_data()) << "load data error"; 61 | test_erase(); 62 | } 63 | 64 | TEST_F(Test_unordered_set_int, Test_all) 65 | { 66 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 67 | test_assign(); 68 | test_erase(); 69 | test_count(); 70 | test_insert(); 71 | test_find(); 72 | } 73 | 74 | TEST_F(Test_unordered_set_string, Test_assign) 75 | { 76 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 77 | test_assign(); 78 | } 79 | 80 | TEST_F(Test_unordered_set_string, Test_count) 81 | { 82 | ASSERT_TRUE(load_data()) << "load data error"; 83 | test_count(); 84 | } 85 | 86 | TEST_F(Test_unordered_set_string, Test_insert) 87 | { 88 | ASSERT_TRUE(load_data()) << "load data error"; 89 | test_insert(); 90 | } 91 | 92 | TEST_F(Test_unordered_set_string, Test_find) 93 | { 94 | ASSERT_TRUE(load_data()) << "load data error"; 95 | test_find(); 96 | } 97 | 98 | TEST_F(Test_unordered_set_string, Test_erase) 99 | { 100 | ASSERT_TRUE(load_data()) << "load data error"; 101 | test_erase(); 102 | } 103 | 104 | TEST_F(Test_unordered_set_string, Test_all) 105 | { 106 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 107 | test_assign(); 108 | test_erase(); 109 | test_count(); 110 | test_insert(); 111 | test_find(); 112 | } 113 | 114 | TEST_F(Test_unordered_set_pair, Test_assign) 115 | { 116 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 117 | test_assign(); 118 | } 119 | 120 | TEST_F(Test_unordered_set_pair, Test_count) 121 | { 122 | ASSERT_TRUE(load_data()) << "load data error"; 123 | test_count(); 124 | } 125 | 126 | TEST_F(Test_unordered_set_pair, Test_insert) 127 | { 128 | ASSERT_TRUE(load_data()) << "load data error"; 129 | test_insert(); 130 | } 131 | 132 | TEST_F(Test_unordered_set_pair, Test_find) 133 | { 134 | ASSERT_TRUE(load_data()) << "load data error"; 135 | test_find(); 136 | } 137 | 138 | TEST_F(Test_unordered_set_pair, Test_erase) 139 | { 140 | ASSERT_TRUE(load_data()) << "load data error"; 141 | test_erase(); 142 | } 143 | 144 | TEST_F(Test_unordered_set_pair, Test_all) 145 | { 146 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 147 | test_assign(); 148 | test_erase(); 149 | test_count(); 150 | test_insert(); 151 | test_find(); 152 | } 153 | 154 | } // namespace test 155 | } // namespace qmj -------------------------------------------------------------------------------- /test/test_vector.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "../QMJSTL/vector_qmj.h" 4 | #include "test_create_data.h" 5 | 6 | namespace qmj 7 | { 8 | namespace test 9 | { 10 | 11 | template 12 | class Test_vector 13 | : public Test_rand_container, qmj::vector> 14 | { 15 | }; 16 | 17 | class Test_vector_int : public Test_vector 18 | { 19 | }; 20 | 21 | class Test_vector_string : public Test_vector 22 | { 23 | }; 24 | 25 | class Test_vector_pair : public Test_vector> 26 | { 27 | }; 28 | 29 | TEST_F(Test_vector_int, Test_copy) 30 | { 31 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 32 | test_assign(); 33 | } 34 | 35 | TEST_F(Test_vector_int, Test_push_back) 36 | { 37 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 38 | test_push_back(); 39 | } 40 | 41 | TEST_F(Test_vector_int, Test_erase) 42 | { 43 | ASSERT_TRUE(load_data()) << "load data error"; 44 | test_erase(); 45 | } 46 | 47 | TEST_F(Test_vector_int, Test_insert) 48 | { 49 | ASSERT_TRUE(load_data()) << "load data error"; 50 | test_insert(); 51 | } 52 | 53 | TEST_F(Test_vector_int, Test_pop_back) 54 | { 55 | ASSERT_TRUE(load_data()) << "load data error"; 56 | test_pop_back(); 57 | } 58 | 59 | TEST_F(Test_vector_int, Test_resize) 60 | { 61 | ASSERT_TRUE(load_data()) << "load data error"; 62 | test_resize(); 63 | } 64 | 65 | TEST_F(Test_vector_int, Test_all) 66 | { 67 | ASSERT_TRUE(load_data()) << "load data error"; 68 | test_push_back(); 69 | test_erase(); 70 | test_insert(); 71 | test_pop_back(); 72 | test_resize(); 73 | } 74 | 75 | TEST_F(Test_vector_string, Test_copy) 76 | { 77 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 78 | test_assign(); 79 | } 80 | 81 | TEST_F(Test_vector_string, Test_push_back) 82 | { 83 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 84 | test_push_back(); 85 | } 86 | 87 | TEST_F(Test_vector_string, Test_erase) 88 | { 89 | ASSERT_TRUE(load_data()) << "load data error"; 90 | test_erase(); 91 | } 92 | 93 | TEST_F(Test_vector_string, Test_insert) 94 | { 95 | ASSERT_TRUE(load_data()) << "load data error"; 96 | test_insert(); 97 | } 98 | 99 | TEST_F(Test_vector_string, Test_pop_back) 100 | { 101 | ASSERT_TRUE(load_data()) << "load data error"; 102 | test_pop_back(); 103 | } 104 | 105 | TEST_F(Test_vector_string, Test_resize) 106 | { 107 | ASSERT_TRUE(load_data()) << "load data error"; 108 | test_resize(); 109 | } 110 | 111 | TEST_F(Test_vector_string, Test_all) 112 | { 113 | ASSERT_TRUE(load_data()) << "load data error"; 114 | test_push_back(); 115 | test_erase(); 116 | test_insert(); 117 | test_pop_back(); 118 | test_resize(); 119 | } 120 | 121 | TEST_F(Test_vector_pair, Test_copy) 122 | { 123 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 124 | test_assign(); 125 | } 126 | 127 | TEST_F(Test_vector_pair, Test_push_back) 128 | { 129 | ASSERT_TRUE(reset_data(TEST_DATASIZE)) << "reset data error"; 130 | test_push_back(); 131 | } 132 | 133 | TEST_F(Test_vector_pair, Test_erase) 134 | { 135 | ASSERT_TRUE(load_data()) << "load data error"; 136 | test_erase(); 137 | } 138 | 139 | TEST_F(Test_vector_pair, Test_insert) 140 | { 141 | ASSERT_TRUE(load_data()) << "load data error"; 142 | test_insert(); 143 | } 144 | 145 | TEST_F(Test_vector_pair, Test_pop_back) 146 | { 147 | ASSERT_TRUE(load_data()) << "load data error"; 148 | test_pop_back(); 149 | } 150 | 151 | TEST_F(Test_vector_pair, Test_resize) 152 | { 153 | ASSERT_TRUE(load_data()) << "load data error"; 154 | test_resize(); 155 | } 156 | 157 | TEST_F(Test_vector_pair, Test_all) 158 | { 159 | ASSERT_TRUE(load_data()) << "load data error"; 160 | test_push_back(); 161 | test_erase(); 162 | test_insert(); 163 | test_pop_back(); 164 | test_resize(); 165 | } 166 | 167 | } // namespace test 168 | } // namespace qmj --------------------------------------------------------------------------------