├── CMakeLists.txt ├── LICENSE ├── README.md ├── concurrent_hash_map ├── CMakeLists.txt ├── concurrent_hash_map.cpp └── concurrent_hash_map.hpp ├── examples ├── CMakeLists.txt ├── event_processor.cpp ├── stats.cpp └── user_session_cache.cpp ├── proposal.v2.html └── tests ├── CMakeLists.txt ├── Catch ├── CMakeLists.txt ├── LICENSE_1_0.txt ├── README.md └── single_include │ └── catch.hpp ├── pcg ├── CMakeLists.txt ├── pcg_extras.hpp ├── pcg_random.hpp └── pcg_uint128.hpp ├── stress-tests ├── CMakeLists.txt ├── benchmark.cpp ├── stress_checked.cpp ├── stress_unchecked.cpp └── test_util.hpp └── unit-tests ├── CMakeLists.txt ├── test_constructor.cpp ├── test_hash_properties.cpp ├── test_heterogeneous_compare.cpp ├── test_iterator.cpp ├── test_libcuckoo_bucket_container.cpp ├── test_locked_table.cpp ├── test_noncopyable_types.cpp ├── test_resize.cpp ├── test_runner.cpp ├── test_user_exceptions.cpp ├── unit_test_util.cpp └── unit_test_util.hpp /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.7) 2 | project(concurrent_hash_map LANGUAGES CXX) 3 | 4 | add_definitions(-std=c++2a) 5 | 6 | add_subdirectory(concurrent_hash_map) 7 | add_subdirectory(examples) 8 | enable_testing() 9 | add_subdirectory(tests) 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Concurrent hash map 2 | =========== 3 | STL compatible implementation of hash map optimized for concurrent access. For more details look to proposal D0652R0 4 | 5 | Usage example 6 | =========== 7 | 8 | #include "concurrent_hash_map.hpp" 9 | #include 10 | 11 | std::concurrent_unordered_map m; 12 | m.insert(std::make_pair("abc", 123)); 13 | m.update("abc", 124); 14 | assert(*m.find("abc") == 124); 15 | 16 | Licence 17 | =========== 18 | The prototype is based on libcuckoo originaly created by Carnegie Mellon University & Intel Corporation, but it have a different interface & functionality. 19 | 20 | Copyright (C) 2013, Carnegie Mellon University and Intel Corporation 21 | 22 | Licensed under the Apache License, Version 2.0 (the "License"); 23 | you may not use this file except in compliance with the License. 24 | You may obtain a copy of the License at 25 | 26 | http://www.apache.org/licenses/LICENSE-2.0 27 | 28 | Unless required by applicable law or agreed to in writing, software 29 | distributed under the License is distributed on an "AS IS" BASIS, 30 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 31 | See the License for the specific language governing permissions and 32 | limitations under the License. 33 | 34 | --------------------------- 35 | 36 | The third-party libraries have their own licenses, as detailed in their source 37 | files. 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Concurrent hash map 2 | =========== 3 | STL compatible implementation of hash map optimized for concurrent access. For more details look to proposal [D0652R0](http://apolukhin.github.io/papers/Concurrent%20and%20unordered.html) 4 | 5 | Usage example 6 | =========== 7 | ``` 8 | #include "concurrent_hash_map.hpp" 9 | #include 10 | 11 | std::concurrent_unordered_map m; 12 | m.emplace("abc", 123); 13 | m.update("abc", 124); 14 | assert(*m.find("abc") == 124); 15 | ``` 16 | 17 | Licence 18 | =========== 19 | The prototype is based on libcuckoo originaly created by Carnegie Mellon University & Intel Corporation, but it have a different interface & functionality. 20 | 21 | Copyright (C) 2013, Carnegie Mellon University and Intel Corporation 22 | 23 | Licensed under the Apache License, Version 2.0 (the "License"); 24 | you may not use this file except in compliance with the License. 25 | You may obtain a copy of the License at 26 | 27 | http://www.apache.org/licenses/LICENSE-2.0 28 | 29 | Unless required by applicable law or agreed to in writing, software 30 | distributed under the License is distributed on an "AS IS" BASIS, 31 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 32 | See the License for the specific language governing permissions and 33 | limitations under the License. 34 | 35 | --------------------------- 36 | 37 | The third-party libraries have their own licenses, as detailed in their source 38 | files. 39 | -------------------------------------------------------------------------------- /concurrent_hash_map/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(concurrent_hash_map INTERFACE) 2 | 3 | target_include_directories(concurrent_hash_map INTERFACE 4 | $ 5 | $ 6 | ${Boost_SYNC_INCLUDE_DIRS} 7 | ) 8 | 9 | set(THREADS_PREFER_PTHREAD_FLAG ON) 10 | find_package(Threads REQUIRED) 11 | target_link_libraries(concurrent_hash_map INTERFACE Threads::Threads) 12 | 13 | install( 14 | FILES 15 | concurrent_hash_map.hpp 16 | DESTINATION 17 | ${CMAKE_INSTALL_PREFIX}/include 18 | ) 19 | -------------------------------------------------------------------------------- /concurrent_hash_map/concurrent_hash_map.cpp: -------------------------------------------------------------------------------- 1 | #include "concurrent_hash_map.hpp" 2 | -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(Boost COMPONENTS system thread REQUIRED) 2 | 3 | include_directories(..) 4 | include_directories(${Boost_SYNC_INCLUDE_DIRS}) 5 | 6 | add_executable(user_session_cache user_session_cache.cpp) 7 | target_link_libraries(user_session_cache 8 | PRIVATE ${Boost_SYSTEM_LIBRARY} 9 | PRIVATE ${Boost_THREAD_LIBRARY} 10 | PRIVATE pthread 11 | ) 12 | 13 | add_executable(event_processor event_processor.cpp) 14 | target_link_libraries(event_processor 15 | PRIVATE ${Boost_SYSTEM_LIBRARY} 16 | PRIVATE ${Boost_THREAD_LIBRARY} 17 | PRIVATE pthread) 18 | 19 | add_executable(stats stats.cpp) 20 | target_link_libraries(stats 21 | PRIVATE ${Boost_SYSTEM_LIBRARY} 22 | PRIVATE ${Boost_THREAD_LIBRARY} 23 | PRIVATE pthread) 24 | -------------------------------------------------------------------------------- /examples/event_processor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | using event_id = unsigned long long; 11 | 12 | struct event_data { 13 | event_data() = default; 14 | event_data(event_data&&) = default; 15 | event_data(const event_data&) = delete; 16 | event_data& operator=(const event_data&) = delete; 17 | event_data& operator=(event_data&&) = default; 18 | 19 | time_t start = time(nullptr); 20 | std::string name; 21 | unsigned long long priority = 0; 22 | }; 23 | 24 | struct event_generator { 25 | std::string name; 26 | std::default_random_engine e; 27 | std::uniform_int_distribution dist; 28 | 29 | event_generator(const std::string& name, unsigned seed) 30 | : name(name), e(seed), dist(1, 10) 31 | { 32 | } 33 | 34 | std::pair> get_event() { 35 | event_data ev; 36 | ev.start = time(nullptr); 37 | ev.name = name; 38 | ev.priority = dist(e); 39 | return std::make_pair(dist(e), std::move(std::make_unique(std::move(ev)))); 40 | } 41 | }; 42 | 43 | std::vector get_event_generators() { 44 | std::vector result; 45 | for (int i = 0; i < 10; ++i) { 46 | result.emplace_back("Gen " + std::to_string(i), i); 47 | } 48 | return result; 49 | } 50 | 51 | void process(event_id id, const event_data& data) { 52 | tm* start = localtime(&data.start); 53 | std::cout << "Id: " << id << " started at " << start->tm_hour << ":" << start->tm_min 54 | << " generator " << data.name << " priority " << data.priority << std::endl; 55 | } 56 | 57 | int main() { 58 | concurrent_unordered_map > events; 59 | 60 | // Getting unique events. 61 | auto event_generators = get_event_generators(); 62 | std::vector> handlers; 63 | for (auto& generator : event_generators) { 64 | auto res = std::async(std::launch::async, [&events, &generator]() { 65 | for (int i = 0; i < 10; ++i) { 66 | std::pair> event(generator.get_event()); 67 | 68 | events.emplace_or_visit(event.first, [&event](unique_ptr& v) { 69 | if (v && v->priority < event.second->priority) { 70 | std::swap(event.second, v); 71 | } 72 | }, make_unique()); 73 | } 74 | }); 75 | 76 | handlers.emplace_back(std::move(res)); 77 | } 78 | 79 | for (auto& handler : handlers) { 80 | handler.wait(); 81 | } 82 | 83 | auto v = events.make_unordered_map_view(true); 84 | for (auto& e : v) { 85 | process(e.first, *e.second); 86 | } 87 | 88 | return 0; 89 | } 90 | -------------------------------------------------------------------------------- /examples/stats.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | void process_stuff(size_t id) { 8 | std::cout << "Updated counter : " << id << std::endl; 9 | } 10 | 11 | void process_stats(std::concurrent_unordered_map::unordered_map_view&& view) { 12 | std::cout << "Statistic counters final state" << std::endl; 13 | for (auto i = view.begin(); i != view.end(); ++i) { 14 | std::cout << i->first << " " << i->second << std::endl; 15 | } 16 | } 17 | 18 | int main() { 19 | using namespace std; 20 | using id_t = unsigned long long; 21 | using use_count_t = size_t; 22 | 23 | concurrent_unordered_map stats; 24 | 25 | constexpr unsigned threads_count = 10; 26 | thread threads[threads_count]; 27 | for (auto& t: threads) { 28 | t = thread([&stats]() { 29 | std::default_random_engine e1; 30 | std::uniform_int_distribution uniform_dist(1, 5); 31 | for (auto i = 0; i < 10; ++i) { 32 | auto id = uniform_dist(e1); 33 | stats.emplace_or_visit( 34 | id, 35 | [](auto& v){ ++v; }, 36 | 0 37 | ); 38 | 39 | process_stuff(id); 40 | } 41 | }); 42 | } 43 | 44 | for (auto& t: threads) { 45 | t.join(); 46 | } 47 | 48 | process_stats(std::move(stats.make_unordered_map_view())); 49 | } 50 | -------------------------------------------------------------------------------- /examples/user_session_cache.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | using std::experimental::string_view; 9 | 10 | struct user_t { 11 | string name; 12 | size_t age; 13 | size_t view_count; 14 | }; 15 | 16 | void process_user(shared_ptr user, size_t additional_views) { 17 | user->view_count += additional_views; 18 | } 19 | 20 | auto get_new_user() { 21 | user_t victor({"victor", 24}); 22 | return make_pair("victor", make_shared(victor)); 23 | } 24 | 25 | auto get_request() { 26 | return make_pair("alex", 13); 27 | } 28 | 29 | void read_users_from_file(concurrent_unordered_map>::unordered_map_view& users) { 30 | user_t alex({"alex", 24}); 31 | user_t alice({"alice", 21}); 32 | users.insert(make_pair("alex", make_shared(alex))); 33 | users.insert(make_pair("alice", make_shared(alice))); 34 | } 35 | 36 | void cleanup(concurrent_unordered_map>::unordered_map_view& users) { 37 | users.clear(); 38 | } 39 | 40 | void dump_to_file(concurrent_unordered_map>::unordered_map_view& users) { 41 | 42 | } 43 | 44 | void count_statistics(concurrent_unordered_map>::unordered_map_view& users) { 45 | map stats; 46 | for (const auto& user : users) { 47 | stats[user.second->age]++; 48 | } 49 | 50 | cout << "User count by age stats" << endl; 51 | for (auto& stat : stats) { 52 | cout << stat.first << '=' << stat.second << endl; 53 | } 54 | cout << endl; 55 | } 56 | 57 | int main() { 58 | concurrent_unordered_map > users; 59 | // single threaded fill 60 | { 61 | auto unsafe_users = std::move(users.make_unordered_map_view()); 62 | read_users_from_file(unsafe_users); 63 | } 64 | 65 | constexpr unsigned threads_count = 10; 66 | // concurrent work: 67 | std::atomic b{threads_count * 100500}; 68 | thread threads[threads_count]; 69 | 70 | for (auto& t: threads) { 71 | // processing users 72 | t = thread([&users, &b]() { 73 | while (--b > 0) { 74 | auto [user_name, data] = get_request(); 75 | users.visit(user_name, [&data](const shared_ptr user) { process_user(user, data); }); 76 | } 77 | }); 78 | } 79 | 80 | // accepting users 81 | while (--b > 0) { 82 | auto [new_user_name, user] = get_new_user(); 83 | users.emplace(new_user_name, user); 84 | } 85 | 86 | for (auto& t: threads) { 87 | t.join(); 88 | } 89 | 90 | // single threaded processing: 91 | auto unsafe_users = std::move(users.make_unordered_map_view()); 92 | count_statistics(unsafe_users); 93 | dump_to_file(unsafe_users); 94 | cleanup(unsafe_users); 95 | } 96 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(pcg) 2 | add_subdirectory(Catch) 3 | add_subdirectory(stress-tests) 4 | add_subdirectory(unit-tests) 5 | -------------------------------------------------------------------------------- /tests/Catch/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(catch INTERFACE) 2 | target_include_directories(catch INTERFACE 3 | $ 4 | ) -------------------------------------------------------------------------------- /tests/Catch/LICENSE_1_0.txt: -------------------------------------------------------------------------------- 1 | Boost Software License - Version 1.0 - August 17th, 2003 2 | 3 | Permission is hereby granted, free of charge, to any person or organization 4 | obtaining a copy of the software and accompanying documentation covered by 5 | this license (the "Software") to use, reproduce, display, distribute, 6 | execute, and transmit the Software, and to prepare derivative works of the 7 | Software, and to permit third-parties to whom the Software is furnished to 8 | do so, all subject to the following: 9 | 10 | The copyright notices in the Software and this entire statement, including 11 | the above license grant, this restriction and the following disclaimer, 12 | must be included in all copies of the Software, in whole or in part, and 13 | all derivative works of the Software, unless such copies or derivative 14 | works are solely in the form of machine-executable object code generated by 15 | a source language processor. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 20 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 21 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 22 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /tests/Catch/README.md: -------------------------------------------------------------------------------- 1 | ![catch logo](catch-logo-small.png) 2 | 3 | *v1.5.9* 4 | 5 | Build status (on Travis CI) [![Build Status](https://travis-ci.org/philsquared/Catch.png)](https://travis-ci.org/philsquared/Catch) 6 | 7 | The latest, single header, version can be downloaded directly using this link 8 | 9 | ## What's the Catch? 10 | 11 | Catch stands for C++ Automated Test Cases in Headers and is a multi-paradigm automated test framework for C++ and Objective-C (and, maybe, C). It is implemented entirely in a set of header files, but is packaged up as a single header for extra convenience. 12 | 13 | ## How to use it 14 | This documentation comprises these three parts: 15 | 16 | * [Why do we need yet another C++ Test Framework?](docs/why-catch.md) 17 | * [Tutorial](docs/tutorial.md) - getting started 18 | * [Reference section](docs/Readme.md) - all the details 19 | 20 | ## More 21 | * Issues and bugs can be raised on the [Issue tracker on GitHub](https://github.com/philsquared/Catch/issues) 22 | * For discussion or questions please use [the dedicated Google Groups forum](https://groups.google.com/forum/?fromgroups#!forum/catch-forum) 23 | -------------------------------------------------------------------------------- /tests/pcg/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(pcg INTERFACE) 2 | target_include_directories(pcg INTERFACE 3 | $ 4 | ) 5 | -------------------------------------------------------------------------------- /tests/pcg/pcg_extras.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * PCG Random Number Generation for C++ 3 | * 4 | * Copyright 2014 Melissa O'Neill 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | * 18 | * For additional information about the PCG random number generation scheme, 19 | * including its license and other licensing options, visit 20 | * 21 | * http://www.pcg-random.org 22 | */ 23 | 24 | /* 25 | * This file provides support code that is useful for random-number generation 26 | * but not specific to the PCG generation scheme, including: 27 | * - 128-bit int support for platforms where it isn't available natively 28 | * - bit twiddling operations 29 | * - I/O of 128-bit and 8-bit integers 30 | * - Handling the evilness of SeedSeq 31 | * - Support for efficiently producing random numbers less than a given 32 | * bound 33 | */ 34 | 35 | #ifndef PCG_EXTRAS_HPP_INCLUDED 36 | #define PCG_EXTRAS_HPP_INCLUDED 1 37 | 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | 51 | #ifdef __GNUC__ 52 | #include 53 | #endif 54 | 55 | /* 56 | * Abstractions for compiler-specific directives 57 | */ 58 | 59 | #ifdef __GNUC__ 60 | #define PCG_NOINLINE __attribute__((noinline)) 61 | #else 62 | #define PCG_NOINLINE 63 | #endif 64 | 65 | /* 66 | * Some members of the PCG library use 128-bit math. When compiling on 64-bit 67 | * platforms, both GCC and Clang provide 128-bit integer types that are ideal 68 | * for the job. 69 | * 70 | * On 32-bit platforms (or with other compilers), we fall back to a C++ 71 | * class that provides 128-bit unsigned integers instead. It may seem 72 | * like we're reinventing the wheel here, because libraries already exist 73 | * that support large integers, but most existing libraries provide a very 74 | * generic multiprecision code, but here we're operating at a fixed size. 75 | * Also, most other libraries are fairly heavyweight. So we use a direct 76 | * implementation. Sadly, it's much slower than hand-coded assembly or 77 | * direct CPU support. 78 | * 79 | */ 80 | #if __SIZEOF_INT128__ 81 | namespace pcg_extras { 82 | typedef __uint128_t pcg128_t; 83 | } 84 | #define PCG_128BIT_CONSTANT(high,low) \ 85 | ((pcg128_t(high) << 64) + low) 86 | #else 87 | #include "pcg_uint128.hpp" 88 | namespace pcg_extras { 89 | typedef pcg_extras::uint_x4 pcg128_t; 90 | } 91 | #define PCG_128BIT_CONSTANT(high,low) \ 92 | pcg128_t(high,low) 93 | #define PCG_EMULATED_128BIT_MATH 1 94 | #endif 95 | 96 | 97 | namespace pcg_extras { 98 | 99 | /* 100 | * We often need to represent a "number of bits". When used normally, these 101 | * numbers are never greater than 128, so an unsigned char is plenty. 102 | * If you're using a nonstandard generator of a larger size, you can set 103 | * PCG_BITCOUNT_T to have it define it as a larger size. (Some compilers 104 | * might produce faster code if you set it to an unsigned int.) 105 | */ 106 | 107 | #ifndef PCG_BITCOUNT_T 108 | typedef uint8_t bitcount_t; 109 | #else 110 | typedef PCG_BITCOUNT_T bitcount_t; 111 | #endif 112 | 113 | /* 114 | * C++ requires us to be able to serialize RNG state by printing or reading 115 | * it from a stream. Because we use 128-bit ints, we also need to be able 116 | * ot print them, so here is code to do so. 117 | * 118 | * This code provides enough functionality to print 128-bit ints in decimal 119 | * and zero-padded in hex. It's not a full-featured implementation. 120 | */ 121 | 122 | template 123 | std::basic_ostream& 124 | operator<<(std::basic_ostream& out, pcg128_t value) 125 | { 126 | auto desired_base = out.flags() & out.basefield; 127 | bool want_hex = desired_base == out.hex; 128 | 129 | if (want_hex) { 130 | uint64_t highpart = uint64_t(value >> 64); 131 | uint64_t lowpart = uint64_t(value); 132 | auto desired_width = out.width(); 133 | if (desired_width > 16) { 134 | out.width(desired_width - 16); 135 | } 136 | if (highpart != 0 || desired_width > 16) 137 | out << highpart; 138 | CharT oldfill; 139 | if (highpart != 0) { 140 | out.width(16); 141 | oldfill = out.fill('0'); 142 | } 143 | auto oldflags = out.setf(decltype(desired_base){}, out.showbase); 144 | out << lowpart; 145 | out.setf(oldflags); 146 | if (highpart != 0) { 147 | out.fill(oldfill); 148 | } 149 | return out; 150 | } 151 | constexpr size_t MAX_CHARS_128BIT = 40; 152 | 153 | char buffer[MAX_CHARS_128BIT]; 154 | char* pos = buffer+sizeof(buffer); 155 | *(--pos) = '\0'; 156 | constexpr auto BASE = pcg128_t(10ULL); 157 | do { 158 | auto div = value / BASE; 159 | auto mod = static_cast(value - (div * BASE)); 160 | *(--pos) = '0' + mod; 161 | value = div; 162 | } while(value != pcg128_t(0ULL)); 163 | return out << pos; 164 | } 165 | 166 | template 167 | std::basic_istream& 168 | operator>>(std::basic_istream& in, pcg128_t& value) 169 | { 170 | typename std::basic_istream::sentry s(in); 171 | 172 | if (!s) 173 | return in; 174 | 175 | constexpr auto BASE = pcg128_t(10ULL); 176 | pcg128_t current(0ULL); 177 | bool did_nothing = true; 178 | bool overflow = false; 179 | for(;;) { 180 | CharT wide_ch = in.get(); 181 | if (!in.good()) 182 | break; 183 | auto ch = in.narrow(wide_ch, '\0'); 184 | if (ch < '0' || ch > '9') { 185 | in.unget(); 186 | break; 187 | } 188 | did_nothing = false; 189 | pcg128_t digit(uint32_t(ch - '0')); 190 | pcg128_t timesbase = current*BASE; 191 | overflow = overflow || timesbase < current; 192 | current = timesbase + digit; 193 | overflow = overflow || current < digit; 194 | } 195 | 196 | if (did_nothing || overflow) { 197 | in.setstate(std::ios::failbit); 198 | if (overflow) 199 | current = ~pcg128_t(0ULL); 200 | } 201 | 202 | value = current; 203 | 204 | return in; 205 | } 206 | 207 | /* 208 | * Likewise, if people use tiny rngs, we'll be serializing uint8_t. 209 | * If we just used the provided IO operators, they'd read/write chars, 210 | * not ints, so we need to define our own. We *can* redefine this operator 211 | * here because we're in our own namespace. 212 | */ 213 | 214 | template 215 | std::basic_ostream& 216 | operator<<(std::basic_ostream&out, uint8_t value) 217 | { 218 | return out << uint32_t(value); 219 | } 220 | 221 | template 222 | std::basic_istream& 223 | operator>>(std::basic_istream& in, uint8_t& target) 224 | { 225 | uint32_t value = 0xdecea5edU; 226 | in >> value; 227 | if (!in && value == 0xdecea5edU) 228 | return in; 229 | if (value > uint8_t(~0)) { 230 | in.setstate(std::ios::failbit); 231 | value = ~0U; 232 | } 233 | target = uint8_t(value); 234 | return in; 235 | } 236 | 237 | /* Unfortunately, the above functions don't get found in preference to the 238 | * built in ones, so we create some more specific overloads that will. 239 | * Ugh. 240 | */ 241 | 242 | inline std::ostream& operator<<(std::ostream& out, uint8_t value) 243 | { 244 | return pcg_extras::operator<< (out, value); 245 | } 246 | 247 | inline std::istream& operator>>(std::istream& in, uint8_t& value) 248 | { 249 | return pcg_extras::operator>> (in, value); 250 | } 251 | 252 | 253 | 254 | /* 255 | * Useful bitwise operations. 256 | */ 257 | 258 | /* 259 | * XorShifts are invertable, but they are someting of a pain to invert. 260 | * This function backs them out. It's used by the whacky "inside out" 261 | * generator defined later. 262 | */ 263 | 264 | template 265 | inline itype unxorshift(itype x, bitcount_t bits, bitcount_t shift) 266 | { 267 | if (2*shift >= bits) { 268 | return x ^ (x >> shift); 269 | } 270 | itype lowmask1 = (itype(1U) << (bits - shift*2)) - 1; 271 | itype highmask1 = ~lowmask1; 272 | itype top1 = x; 273 | itype bottom1 = x & lowmask1; 274 | top1 ^= top1 >> shift; 275 | top1 &= highmask1; 276 | x = top1 | bottom1; 277 | itype lowmask2 = (itype(1U) << (bits - shift)) - 1; 278 | itype bottom2 = x & lowmask2; 279 | bottom2 = unxorshift(bottom2, bits - shift, shift); 280 | bottom2 &= lowmask1; 281 | return top1 | bottom2; 282 | } 283 | 284 | /* 285 | * Rotate left and right. 286 | * 287 | * In ideal world, compilers would spot idiomatic rotate code and convert it 288 | * to a rotate instruction. Of course, opinions vary on what the correct 289 | * idiom is and how to spot it. For clang, sometimes it generates better 290 | * (but still crappy) code if you define PCG_USE_ZEROCHECK_ROTATE_IDIOM. 291 | */ 292 | 293 | template 294 | inline itype rotl(itype value, bitcount_t rot) 295 | { 296 | constexpr bitcount_t bits = sizeof(itype) * 8; 297 | constexpr bitcount_t mask = bits - 1; 298 | #if PCG_USE_ZEROCHECK_ROTATE_IDIOM 299 | return rot ? (value << rot) | (value >> (bits - rot)) : value; 300 | #else 301 | return (value << rot) | (value >> ((- rot) & mask)); 302 | #endif 303 | } 304 | 305 | template 306 | inline itype rotr(itype value, bitcount_t rot) 307 | { 308 | constexpr bitcount_t bits = sizeof(itype) * 8; 309 | constexpr bitcount_t mask = bits - 1; 310 | #if PCG_USE_ZEROCHECK_ROTATE_IDIOM 311 | return rot ? (value >> rot) | (value << (bits - rot)) : value; 312 | #else 313 | return (value >> rot) | (value << ((- rot) & mask)); 314 | #endif 315 | } 316 | 317 | /* Unfortunately, both Clang and GCC sometimes perform poorly when it comes 318 | * to properly recognizing idiomatic rotate code, so for we also provide 319 | * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss. 320 | * (I hope that these compilers get better so that this code can die.) 321 | * 322 | * These overloads will be preferred over the general template code above. 323 | */ 324 | #if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__) 325 | 326 | inline uint8_t rotr(uint8_t value, bitcount_t rot) 327 | { 328 | asm ("rorb %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); 329 | return value; 330 | } 331 | 332 | inline uint16_t rotr(uint16_t value, bitcount_t rot) 333 | { 334 | asm ("rorw %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); 335 | return value; 336 | } 337 | 338 | inline uint32_t rotr(uint32_t value, bitcount_t rot) 339 | { 340 | asm ("rorl %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); 341 | return value; 342 | } 343 | 344 | #if __x86_64__ 345 | inline uint64_t rotr(uint64_t value, bitcount_t rot) 346 | { 347 | asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); 348 | return value; 349 | } 350 | #endif // __x86_64__ 351 | 352 | #endif // PCG_USE_INLINE_ASM 353 | 354 | 355 | /* 356 | * The C++ SeedSeq concept (modelled by seed_seq) can fill an array of 357 | * 32-bit integers with seed data, but sometimes we want to produce 358 | * larger or smaller integers. 359 | * 360 | * The following code handles this annoyance. 361 | * 362 | * uneven_copy will copy an array of 32-bit ints to an array of larger or 363 | * smaller ints (actually, the code is general it only needing forward 364 | * iterators). The copy is identical to the one that would be performed if 365 | * we just did memcpy on a standard little-endian machine, but works 366 | * regardless of the endian of the machine (or the weirdness of the ints 367 | * involved). 368 | * 369 | * generate_to initializes an array of integers using a SeedSeq 370 | * object. It is given the size as a static constant at compile time and 371 | * tries to avoid memory allocation. If we're filling in 32-bit constants 372 | * we just do it directly. If we need a separate buffer and it's small, 373 | * we allocate it on the stack. Otherwise, we fall back to heap allocation. 374 | * Ugh. 375 | * 376 | * generate_one produces a single value of some integral type using a 377 | * SeedSeq object. 378 | */ 379 | 380 | /* uneven_copy helper, case where destination ints are less than 32 bit. */ 381 | 382 | template 383 | SrcIter uneven_copy_impl( 384 | SrcIter src_first, DestIter dest_first, DestIter dest_last, 385 | std::true_type) 386 | { 387 | typedef typename std::iterator_traits::value_type src_t; 388 | typedef typename std::iterator_traits::value_type dest_t; 389 | 390 | constexpr bitcount_t SRC_SIZE = sizeof(src_t); 391 | constexpr bitcount_t DEST_SIZE = sizeof(dest_t); 392 | constexpr bitcount_t DEST_BITS = DEST_SIZE * 8; 393 | constexpr bitcount_t SCALE = SRC_SIZE / DEST_SIZE; 394 | 395 | size_t count = 0; 396 | src_t value; 397 | 398 | while (dest_first != dest_last) { 399 | if ((count++ % SCALE) == 0) 400 | value = *src_first++; // Get more bits 401 | else 402 | value >>= DEST_BITS; // Move down bits 403 | 404 | *dest_first++ = dest_t(value); // Truncates, ignores high bits. 405 | } 406 | return src_first; 407 | } 408 | 409 | /* uneven_copy helper, case where destination ints are more than 32 bit. */ 410 | 411 | template 412 | SrcIter uneven_copy_impl( 413 | SrcIter src_first, DestIter dest_first, DestIter dest_last, 414 | std::false_type) 415 | { 416 | typedef typename std::iterator_traits::value_type src_t; 417 | typedef typename std::iterator_traits::value_type dest_t; 418 | 419 | constexpr auto SRC_SIZE = sizeof(src_t); 420 | constexpr auto SRC_BITS = SRC_SIZE * 8; 421 | constexpr auto DEST_SIZE = sizeof(dest_t); 422 | constexpr auto SCALE = (DEST_SIZE+SRC_SIZE-1) / SRC_SIZE; 423 | 424 | while (dest_first != dest_last) { 425 | dest_t value(0UL); 426 | unsigned int shift = 0; 427 | 428 | for (size_t i = 0; i < SCALE; ++i) { 429 | value |= dest_t(*src_first++) << shift; 430 | shift += SRC_BITS; 431 | } 432 | 433 | *dest_first++ = value; 434 | } 435 | return src_first; 436 | } 437 | 438 | /* uneven_copy, call the right code for larger vs. smaller */ 439 | 440 | template 441 | inline SrcIter uneven_copy(SrcIter src_first, 442 | DestIter dest_first, DestIter dest_last) 443 | { 444 | typedef typename std::iterator_traits::value_type src_t; 445 | typedef typename std::iterator_traits::value_type dest_t; 446 | 447 | constexpr bool DEST_IS_SMALLER = sizeof(dest_t) < sizeof(src_t); 448 | 449 | return uneven_copy_impl(src_first, dest_first, dest_last, 450 | std::integral_constant{}); 451 | } 452 | 453 | /* generate_to, fill in a fixed-size array of integral type using a SeedSeq 454 | * (actually works for any random-access iterator) 455 | */ 456 | 457 | template 458 | inline void generate_to_impl(SeedSeq&& generator, DestIter dest, 459 | std::true_type) 460 | { 461 | generator.generate(dest, dest+size); 462 | } 463 | 464 | template 465 | void generate_to_impl(SeedSeq&& generator, DestIter dest, 466 | std::false_type) 467 | { 468 | typedef typename std::iterator_traits::value_type dest_t; 469 | constexpr auto DEST_SIZE = sizeof(dest_t); 470 | constexpr auto GEN_SIZE = sizeof(uint32_t); 471 | 472 | constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; 473 | constexpr size_t FROM_ELEMS = 474 | GEN_IS_SMALLER 475 | ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) 476 | : (size + (GEN_SIZE / DEST_SIZE) - 1) 477 | / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); 478 | // this odd code ^^^^^^^^^^^^^^^^^ is work-around for 479 | // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 480 | 481 | if (FROM_ELEMS <= 1024) { 482 | uint32_t buffer[FROM_ELEMS]; 483 | generator.generate(buffer, buffer+FROM_ELEMS); 484 | uneven_copy(buffer, dest, dest+size); 485 | } else { 486 | uint32_t* buffer = (uint32_t*) malloc(GEN_SIZE * FROM_ELEMS); 487 | generator.generate(buffer, buffer+FROM_ELEMS); 488 | uneven_copy(buffer, dest, dest+size); 489 | free(buffer); 490 | } 491 | } 492 | 493 | template 494 | inline void generate_to(SeedSeq&& generator, DestIter dest) 495 | { 496 | typedef typename std::iterator_traits::value_type dest_t; 497 | constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); 498 | 499 | generate_to_impl(std::forward(generator), dest, 500 | std::integral_constant{}); 501 | } 502 | 503 | /* generate_one, produce a value of integral type using a SeedSeq 504 | * (optionally, we can have it produce more than one and pick which one 505 | * we want) 506 | */ 507 | 508 | template 509 | inline UInt generate_one(SeedSeq&& generator) 510 | { 511 | UInt result[N]; 512 | generate_to(std::forward(generator), result); 513 | return result[i]; 514 | } 515 | 516 | template 517 | auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) 518 | -> typename RngType::result_type 519 | { 520 | typedef typename RngType::result_type rtype; 521 | rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) 522 | % upper_bound; 523 | for (;;) { 524 | rtype r = rng() - RngType::min(); 525 | if (r >= threshold) 526 | return r % upper_bound; 527 | } 528 | } 529 | 530 | template 531 | void shuffle(Iter from, Iter to, RandType&& rng) 532 | { 533 | typedef typename std::iterator_traits::difference_type delta_t; 534 | auto count = to - from; 535 | while (count > 1) { 536 | delta_t chosen(bounded_rand(rng, count)); 537 | --count; 538 | --to; 539 | using std::swap; 540 | swap(*(from+chosen), *to); 541 | } 542 | } 543 | 544 | /* 545 | * Although std::seed_seq is useful, it isn't everything. Often we want to 546 | * initialize a random-number generator some other way, such as from a random 547 | * device. 548 | * 549 | * Technically, it does not meet the requirements of a SeedSequence because 550 | * it lacks some of the rarely-used member functions (some of which would 551 | * be impossible to provide). However the C++ standard is quite specific 552 | * that actual engines only called the generate method, so it ought not to be 553 | * a problem in practice. 554 | */ 555 | 556 | template 557 | class seed_seq_from { 558 | private: 559 | RngType rng_; 560 | 561 | typedef uint_least32_t result_type; 562 | 563 | public: 564 | template 565 | seed_seq_from(Args&&... args) : 566 | rng_(std::forward(args)...) 567 | { 568 | // Nothing (else) to do... 569 | } 570 | 571 | template 572 | void generate(Iter start, Iter finish) 573 | { 574 | for (auto i = start; i != finish; ++i) 575 | *i = result_type(rng_()); 576 | } 577 | 578 | constexpr size_t size() const 579 | { 580 | return (sizeof(typename RngType::result_type) > sizeof(result_type) 581 | && RngType::max() > ~size_t(0UL)) 582 | ? ~size_t(0UL) 583 | : size_t(RngType::max()); 584 | } 585 | }; 586 | 587 | /* 588 | * Sometimes you might want a distinct seed based on when the program 589 | * was compiled. That way, a particular instance of the program will 590 | * behave the same way, but when recompiled it'll produce a different 591 | * value. 592 | */ 593 | 594 | template 595 | struct static_arbitrary_seed { 596 | private: 597 | static constexpr IntType fnv(IntType hash, const char* pos) { 598 | return *pos == '\0' 599 | ? hash 600 | : fnv((hash * IntType(16777619U)) ^ *pos, (pos+1)); 601 | } 602 | 603 | public: 604 | static constexpr IntType value = fnv(IntType(2166136261U ^ sizeof(IntType)), 605 | __DATE__ __TIME__ __FILE__); 606 | }; 607 | 608 | // Sometimes, when debugging or testing, it's handy to be able print the name 609 | // of a (in human-readable form). This code allows the idiom: 610 | // 611 | // cout << printable_typename() 612 | // 613 | // to print out my_foo_type_t (or its concrete type if it is a synonym) 614 | 615 | template 616 | struct printable_typename {}; 617 | 618 | template 619 | std::ostream& operator<<(std::ostream& out, printable_typename) { 620 | const char *implementation_typename = typeid(T).name(); 621 | #ifdef __GNUC__ 622 | int status; 623 | const char* pretty_name = 624 | abi::__cxa_demangle(implementation_typename, NULL, NULL, &status); 625 | if (status == 0) 626 | out << pretty_name; 627 | free((void*) pretty_name); 628 | if (status == 0) 629 | return out; 630 | #endif 631 | out << implementation_typename; 632 | return out; 633 | } 634 | 635 | } // namespace pcg_extras 636 | 637 | #endif // PCG_EXTRAS_HPP_INCLUDED 638 | -------------------------------------------------------------------------------- /tests/pcg/pcg_uint128.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * PCG Random Number Generation for C++ 3 | * 4 | * Copyright 2014 Melissa O'Neill 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | * 18 | * For additional information about the PCG random number generation scheme, 19 | * including its license and other licensing options, visit 20 | * 21 | * http://www.pcg-random.org 22 | */ 23 | 24 | /* 25 | * This code provides a a C++ class that can provide 128-bit (or higher) 26 | * integers. To produce 2K-bit integers, it uses two K-bit integers, 27 | * placed in a union that allowes the code to also see them as four K/2 bit 28 | * integers (and access them either directly name, or by index). 29 | * 30 | * It may seem like we're reinventing the wheel here, because several 31 | * libraries already exist that support large integers, but most existing 32 | * libraries provide a very generic multiprecision code, but here we're 33 | * operating at a fixed size. Also, most other libraries are fairly 34 | * heavyweight. So we use a direct implementation. Sadly, it's much slower 35 | * than hand-coded assembly or direct CPU support. 36 | */ 37 | 38 | #ifndef PCG_UINT128_HPP_INCLUDED 39 | #define PCG_UINT128_HPP_INCLUDED 1 40 | 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | 49 | /* 50 | * We want to lay the type out the same way that a native type would be laid 51 | * out, which means we must know the machine's endian, at compile time. 52 | * This ugliness attempts to do so. 53 | */ 54 | 55 | #ifndef PCG_LITTLE_ENDIAN 56 | #if defined(__BYTE_ORDER__) 57 | #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 58 | #define PCG_LITTLE_ENDIAN 1 59 | #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 60 | #define PCG_LITTLE_ENDIAN 0 61 | #else 62 | #error __BYTE_ORDER__ does not match a standard endian, pick a side 63 | #endif 64 | #elif __LITTLE_ENDIAN__ || _LITTLE_ENDIAN 65 | #define PCG_LITTLE_ENDIAN 1 66 | #elif __BIG_ENDIAN__ || _BIG_ENDIAN 67 | #define PCG_LITTLE_ENDIAN 0 68 | #elif __x86_64 || __x86_64__ || __i386 || __i386__ || _M_IX86 || _M_X64 69 | #define PCG_LITTLE_ENDIAN 1 70 | #elif __powerpc__ || __POWERPC__ || __ppc__ || __PPC__ \ 71 | || __m68k__ || __mc68000__ 72 | #define PCG_LITTLE_ENDIAN 0 73 | #else 74 | #error Unable to determine target endianness 75 | #endif 76 | #endif 77 | 78 | namespace pcg_extras { 79 | 80 | // Recent versions of GCC have intrinsics we can use to quickly calculate 81 | // the number of leading and trailing zeros in a number. If possible, we 82 | // use them, otherwise we fall back to old-fashioned bit twiddling to figure 83 | // them out. 84 | 85 | #ifndef PCG_BITCOUNT_T 86 | typedef uint8_t bitcount_t; 87 | #else 88 | typedef PCG_BITCOUNT_T bitcount_t; 89 | #endif 90 | 91 | /* 92 | * Provide some useful helper functions 93 | * * flog2 floor(log2(x)) 94 | * * trailingzeros number of trailing zero bits 95 | */ 96 | 97 | #ifdef __GNUC__ // Any GNU-compatible compiler supporting C++11 has 98 | // some useful intrinsics we can use. 99 | 100 | inline bitcount_t flog2(uint32_t v) 101 | { 102 | return 31 - __builtin_clz(v); 103 | } 104 | 105 | inline bitcount_t trailingzeros(uint32_t v) 106 | { 107 | return __builtin_ctz(v); 108 | } 109 | 110 | inline bitcount_t flog2(uint64_t v) 111 | { 112 | #if UINT64_MAX == ULONG_MAX 113 | return 63 - __builtin_clzl(v); 114 | #elif UINT64_MAX == ULLONG_MAX 115 | return 63 - __builtin_clzll(v); 116 | #else 117 | #error Cannot find a function for uint64_t 118 | #endif 119 | } 120 | 121 | inline bitcount_t trailingzeros(uint64_t v) 122 | { 123 | #if UINT64_MAX == ULONG_MAX 124 | return __builtin_ctzl(v); 125 | #elif UINT64_MAX == ULLONG_MAX 126 | return __builtin_ctzll(v); 127 | #else 128 | #error Cannot find a function for uint64_t 129 | #endif 130 | } 131 | 132 | #else // Otherwise, we fall back to bit twiddling 133 | // implementations 134 | 135 | inline bitcount_t flog2(uint32_t v) 136 | { 137 | // Based on code by Eric Cole and Mark Dickinson, which appears at 138 | // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn 139 | 140 | static const uint8_t multiplyDeBruijnBitPos[32] = { 141 | 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 142 | 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 143 | }; 144 | 145 | v |= v >> 1; // first round down to one less than a power of 2 146 | v |= v >> 2; 147 | v |= v >> 4; 148 | v |= v >> 8; 149 | v |= v >> 16; 150 | 151 | return multiplyDeBruijnBitPos[(uint32_t)(v * 0x07C4ACDDU) >> 27]; 152 | } 153 | 154 | inline bitcount_t trailingzeros(uint32_t v) 155 | { 156 | static const uint8_t multiplyDeBruijnBitPos[32] = { 157 | 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 158 | 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 159 | }; 160 | 161 | return multiplyDeBruijnBitPos[((uint32_t)((v & -v) * 0x077CB531U)) >> 27]; 162 | } 163 | 164 | inline bitcount_t flog2(uint64_t v) 165 | { 166 | uint32_t high = v >> 32; 167 | uint32_t low = uint32_t(v); 168 | 169 | return high ? 32+flog2(high) : flog2(low); 170 | } 171 | 172 | inline bitcount_t trailingzeros(uint64_t v) 173 | { 174 | uint32_t high = v >> 32; 175 | uint32_t low = uint32_t(v); 176 | 177 | return low ? trailingzeros(low) : trailingzeros(high)+32; 178 | } 179 | 180 | #endif 181 | 182 | template 183 | inline bitcount_t clog2(UInt v) 184 | { 185 | return flog2(v) + ((v & (-v)) != v); 186 | } 187 | 188 | template 189 | inline UInt addwithcarry(UInt x, UInt y, bool carryin, bool* carryout) 190 | { 191 | UInt half_result = y + carryin; 192 | UInt result = x + half_result; 193 | *carryout = (half_result < y) || (result < x); 194 | return result; 195 | } 196 | 197 | template 198 | inline UInt subwithcarry(UInt x, UInt y, bool carryin, bool* carryout) 199 | { 200 | UInt half_result = y + carryin; 201 | UInt result = x - half_result; 202 | *carryout = (half_result < y) || (result > x); 203 | return result; 204 | } 205 | 206 | 207 | template 208 | class uint_x4 { 209 | // private: 210 | public: 211 | union { 212 | #if PCG_LITTLE_ENDIAN 213 | struct { 214 | UInt v0, v1, v2, v3; 215 | } w; 216 | struct { 217 | UIntX2 v01, v23; 218 | } d; 219 | #else 220 | struct { 221 | UInt v3, v2, v1, v0; 222 | } w; 223 | struct { 224 | UIntX2 v23, v01; 225 | } d; 226 | #endif 227 | // For the array access versions, the code that uses the array 228 | // must handle endian itself. Yuck. 229 | UInt wa[4]; 230 | UIntX2 da[2]; 231 | }; 232 | 233 | public: 234 | uint_x4() = default; 235 | 236 | constexpr uint_x4(UInt v3, UInt v2, UInt v1, UInt v0) 237 | #if PCG_LITTLE_ENDIAN 238 | : w{v0, v1, v2, v3} 239 | #else 240 | : w{v3, v2, v1, v0} 241 | #endif 242 | { 243 | // Nothing (else) to do 244 | } 245 | 246 | constexpr uint_x4(UIntX2 v23, UIntX2 v01) 247 | #if PCG_LITTLE_ENDIAN 248 | : d{v01,v23} 249 | #else 250 | : d{v23,v01} 251 | #endif 252 | { 253 | // Nothing (else) to do 254 | } 255 | 256 | template::value 258 | && sizeof(Integral) <= sizeof(UIntX2)) 259 | >::type* = nullptr> 260 | constexpr uint_x4(Integral v01) 261 | #if PCG_LITTLE_ENDIAN 262 | : d{UIntX2(v01),0UL} 263 | #else 264 | : d{0UL,UIntX2(v01)} 265 | #endif 266 | { 267 | // Nothing (else) to do 268 | } 269 | 270 | explicit constexpr operator uint64_t() const 271 | { 272 | return d.v01; 273 | } 274 | 275 | explicit constexpr operator uint32_t() const 276 | { 277 | return w.v0; 278 | } 279 | 280 | explicit constexpr operator int() const 281 | { 282 | return w.v0; 283 | } 284 | 285 | explicit constexpr operator uint16_t() const 286 | { 287 | return w.v0; 288 | } 289 | 290 | explicit constexpr operator uint8_t() const 291 | { 292 | return w.v0; 293 | } 294 | 295 | typedef typename std::conditional::value, 297 | unsigned long long, 298 | unsigned long>::type 299 | uint_missing_t; 300 | 301 | explicit constexpr operator uint_missing_t() const 302 | { 303 | return d.v01; 304 | } 305 | 306 | explicit constexpr operator bool() const 307 | { 308 | return d.v01 || d.v23; 309 | } 310 | 311 | template 312 | friend uint_x4 operator*(const uint_x4&, const uint_x4&); 313 | 314 | template 315 | friend std::pair< uint_x4,uint_x4 > 316 | divmod(const uint_x4&, const uint_x4&); 317 | 318 | template 319 | friend uint_x4 operator+(const uint_x4&, const uint_x4&); 320 | 321 | template 322 | friend uint_x4 operator-(const uint_x4&, const uint_x4&); 323 | 324 | template 325 | friend uint_x4 operator<<(const uint_x4&, const uint_x4&); 326 | 327 | template 328 | friend uint_x4 operator>>(const uint_x4&, const uint_x4&); 329 | 330 | template 331 | friend uint_x4 operator&(const uint_x4&, const uint_x4&); 332 | 333 | template 334 | friend uint_x4 operator|(const uint_x4&, const uint_x4&); 335 | 336 | template 337 | friend uint_x4 operator^(const uint_x4&, const uint_x4&); 338 | 339 | template 340 | friend bool operator==(const uint_x4&, const uint_x4&); 341 | 342 | template 343 | friend bool operator!=(const uint_x4&, const uint_x4&); 344 | 345 | template 346 | friend bool operator<(const uint_x4&, const uint_x4&); 347 | 348 | template 349 | friend bool operator<=(const uint_x4&, const uint_x4&); 350 | 351 | template 352 | friend bool operator>(const uint_x4&, const uint_x4&); 353 | 354 | template 355 | friend bool operator>=(const uint_x4&, const uint_x4&); 356 | 357 | template 358 | friend uint_x4 operator~(const uint_x4&); 359 | 360 | template 361 | friend uint_x4 operator-(const uint_x4&); 362 | 363 | template 364 | friend bitcount_t flog2(const uint_x4&); 365 | 366 | template 367 | friend bitcount_t trailingzeros(const uint_x4&); 368 | 369 | uint_x4& operator*=(const uint_x4& rhs) 370 | { 371 | uint_x4 result = *this * rhs; 372 | return *this = result; 373 | } 374 | 375 | uint_x4& operator/=(const uint_x4& rhs) 376 | { 377 | uint_x4 result = *this / rhs; 378 | return *this = result; 379 | } 380 | 381 | uint_x4& operator%=(const uint_x4& rhs) 382 | { 383 | uint_x4 result = *this % rhs; 384 | return *this = result; 385 | } 386 | 387 | uint_x4& operator+=(const uint_x4& rhs) 388 | { 389 | uint_x4 result = *this + rhs; 390 | return *this = result; 391 | } 392 | 393 | uint_x4& operator-=(const uint_x4& rhs) 394 | { 395 | uint_x4 result = *this - rhs; 396 | return *this = result; 397 | } 398 | 399 | uint_x4& operator&=(const uint_x4& rhs) 400 | { 401 | uint_x4 result = *this & rhs; 402 | return *this = result; 403 | } 404 | 405 | uint_x4& operator|=(const uint_x4& rhs) 406 | { 407 | uint_x4 result = *this | rhs; 408 | return *this = result; 409 | } 410 | 411 | uint_x4& operator^=(const uint_x4& rhs) 412 | { 413 | uint_x4 result = *this ^ rhs; 414 | return *this = result; 415 | } 416 | 417 | uint_x4& operator>>=(bitcount_t shift) 418 | { 419 | uint_x4 result = *this >> shift; 420 | return *this = result; 421 | } 422 | 423 | uint_x4& operator<<=(bitcount_t shift) 424 | { 425 | uint_x4 result = *this << shift; 426 | return *this = result; 427 | } 428 | 429 | }; 430 | 431 | template 432 | bitcount_t flog2(const uint_x4& v) 433 | { 434 | #if PCG_LITTLE_ENDIAN 435 | for (uint8_t i = 4; i !=0; /* dec in loop */) { 436 | --i; 437 | #else 438 | for (uint8_t i = 0; i < 4; ++i) { 439 | #endif 440 | if (v.wa[i] == 0) 441 | continue; 442 | return flog2(v.wa[i]) + (sizeof(U)*CHAR_BIT)*i; 443 | } 444 | abort(); 445 | } 446 | 447 | template 448 | bitcount_t trailingzeros(const uint_x4& v) 449 | { 450 | #if PCG_LITTLE_ENDIAN 451 | for (uint8_t i = 0; i < 4; ++i) { 452 | #else 453 | for (uint8_t i = 4; i !=0; /* dec in loop */) { 454 | --i; 455 | #endif 456 | if (v.wa[i] != 0) 457 | return trailingzeros(v.wa[i]) + (sizeof(U)*CHAR_BIT)*i; 458 | } 459 | return (sizeof(U)*CHAR_BIT)*4; 460 | } 461 | 462 | template 463 | std::pair< uint_x4, uint_x4 > 464 | divmod(const uint_x4& orig_dividend, 465 | const uint_x4& divisor) 466 | { 467 | // If the dividend is less than the divisor, the answer is always zero. 468 | // This takes care of boundary cases like 0/x (which would otherwise be 469 | // problematic because we can't take the log of zero. (The boundary case 470 | // of division by zero is undefined.) 471 | if (orig_dividend < divisor) 472 | return { uint_x4(0UL), orig_dividend }; 473 | 474 | auto dividend = orig_dividend; 475 | 476 | auto log2_divisor = flog2(divisor); 477 | auto log2_dividend = flog2(dividend); 478 | // assert(log2_dividend >= log2_divisor); 479 | bitcount_t logdiff = log2_dividend - log2_divisor; 480 | 481 | constexpr uint_x4 ONE(1UL); 482 | if (logdiff == 0) 483 | return { ONE, dividend - divisor }; 484 | 485 | // Now we change the log difference to 486 | // floor(log2(divisor)) - ceil(log2(dividend)) 487 | // to ensure that we *underestimate* the result. 488 | logdiff -= 1; 489 | 490 | uint_x4 quotient(0UL); 491 | 492 | auto qfactor = ONE << logdiff; 493 | auto factor = divisor << logdiff; 494 | 495 | do { 496 | dividend -= factor; 497 | quotient += qfactor; 498 | while (dividend < factor) { 499 | factor >>= 1; 500 | qfactor >>= 1; 501 | } 502 | } while (dividend >= divisor); 503 | 504 | return { quotient, dividend }; 505 | } 506 | 507 | template 508 | uint_x4 operator/(const uint_x4& dividend, 509 | const uint_x4& divisor) 510 | { 511 | return divmod(dividend, divisor).first; 512 | } 513 | 514 | template 515 | uint_x4 operator%(const uint_x4& dividend, 516 | const uint_x4& divisor) 517 | { 518 | return divmod(dividend, divisor).second; 519 | } 520 | 521 | 522 | template 523 | uint_x4 operator*(const uint_x4& a, 524 | const uint_x4& b) 525 | { 526 | uint_x4 r = {0U, 0U, 0U, 0U}; 527 | bool carryin = false; 528 | bool carryout; 529 | UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(b.w.v0); 530 | r.w.v0 = UInt(a0b0); 531 | r.w.v1 = UInt(a0b0 >> 32); 532 | 533 | UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(b.w.v0); 534 | r.w.v2 = UInt(a1b0 >> 32); 535 | r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); 536 | carryin = carryout; 537 | r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); 538 | carryin = carryout; 539 | r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); 540 | 541 | UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b.w.v1); 542 | carryin = false; 543 | r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> 32), carryin, &carryout); 544 | carryin = carryout; 545 | r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); 546 | 547 | carryin = false; 548 | r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); 549 | carryin = carryout; 550 | r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); 551 | carryin = carryout; 552 | r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); 553 | 554 | UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b.w.v1); 555 | carryin = false; 556 | r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); 557 | carryin = carryout; 558 | r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> 32), carryin, &carryout); 559 | 560 | r.d.v23 += a.d.v01 * b.d.v23 + a.d.v23 * b.d.v01; 561 | 562 | return r; 563 | } 564 | 565 | 566 | template 567 | uint_x4 operator+(const uint_x4& a, 568 | const uint_x4& b) 569 | { 570 | uint_x4 r = {0U, 0U, 0U, 0U}; 571 | 572 | bool carryin = false; 573 | bool carryout; 574 | r.w.v0 = addwithcarry(a.w.v0, b.w.v0, carryin, &carryout); 575 | carryin = carryout; 576 | r.w.v1 = addwithcarry(a.w.v1, b.w.v1, carryin, &carryout); 577 | carryin = carryout; 578 | r.w.v2 = addwithcarry(a.w.v2, b.w.v2, carryin, &carryout); 579 | carryin = carryout; 580 | r.w.v3 = addwithcarry(a.w.v3, b.w.v3, carryin, &carryout); 581 | 582 | return r; 583 | } 584 | 585 | template 586 | uint_x4 operator-(const uint_x4& a, 587 | const uint_x4& b) 588 | { 589 | uint_x4 r = {0U, 0U, 0U, 0U}; 590 | 591 | bool carryin = false; 592 | bool carryout; 593 | r.w.v0 = subwithcarry(a.w.v0, b.w.v0, carryin, &carryout); 594 | carryin = carryout; 595 | r.w.v1 = subwithcarry(a.w.v1, b.w.v1, carryin, &carryout); 596 | carryin = carryout; 597 | r.w.v2 = subwithcarry(a.w.v2, b.w.v2, carryin, &carryout); 598 | carryin = carryout; 599 | r.w.v3 = subwithcarry(a.w.v3, b.w.v3, carryin, &carryout); 600 | 601 | return r; 602 | } 603 | 604 | 605 | template 606 | uint_x4 operator&(const uint_x4& a, 607 | const uint_x4& b) 608 | { 609 | return uint_x4(a.d.v23 & b.d.v23, a.d.v01 & b.d.v01); 610 | } 611 | 612 | template 613 | uint_x4 operator|(const uint_x4& a, 614 | const uint_x4& b) 615 | { 616 | return uint_x4(a.d.v23 | b.d.v23, a.d.v01 | b.d.v01); 617 | } 618 | 619 | template 620 | uint_x4 operator^(const uint_x4& a, 621 | const uint_x4& b) 622 | { 623 | return uint_x4(a.d.v23 ^ b.d.v23, a.d.v01 ^ b.d.v01); 624 | } 625 | 626 | template 627 | uint_x4 operator~(const uint_x4& v) 628 | { 629 | return uint_x4(~v.d.v23, ~v.d.v01); 630 | } 631 | 632 | template 633 | uint_x4 operator-(const uint_x4& v) 634 | { 635 | return uint_x4(0UL,0UL) - v; 636 | } 637 | 638 | template 639 | bool operator==(const uint_x4& a, const uint_x4& b) 640 | { 641 | return (a.d.v01 == b.d.v01) && (a.d.v23 == b.d.v23); 642 | } 643 | 644 | template 645 | bool operator!=(const uint_x4& a, const uint_x4& b) 646 | { 647 | return !operator==(a,b); 648 | } 649 | 650 | 651 | template 652 | bool operator<(const uint_x4& a, const uint_x4& b) 653 | { 654 | return (a.d.v23 < b.d.v23) 655 | || ((a.d.v23 == b.d.v23) && (a.d.v01 < b.d.v01)); 656 | } 657 | 658 | template 659 | bool operator>(const uint_x4& a, const uint_x4& b) 660 | { 661 | return operator<(b,a); 662 | } 663 | 664 | template 665 | bool operator<=(const uint_x4& a, const uint_x4& b) 666 | { 667 | return !(operator<(b,a)); 668 | } 669 | 670 | template 671 | bool operator>=(const uint_x4& a, const uint_x4& b) 672 | { 673 | return !(operator<(a,b)); 674 | } 675 | 676 | 677 | 678 | template 679 | uint_x4 operator<<(const uint_x4& v, 680 | const bitcount_t shift) 681 | { 682 | uint_x4 r = {0U, 0U, 0U, 0U}; 683 | const bitcount_t bits = sizeof(UInt) * CHAR_BIT; 684 | const bitcount_t bitmask = bits - 1; 685 | const bitcount_t shiftdiv = shift / bits; 686 | const bitcount_t shiftmod = shift & bitmask; 687 | 688 | if (shiftmod) { 689 | UInt carryover = 0; 690 | #if PCG_LITTLE_ENDIAN 691 | for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { 692 | #else 693 | for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { 694 | --out, --in; 695 | #endif 696 | r.wa[out] = (v.wa[in] << shiftmod) | carryover; 697 | carryover = (v.wa[in] >> (bits - shiftmod)); 698 | } 699 | } else { 700 | #if PCG_LITTLE_ENDIAN 701 | for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { 702 | #else 703 | for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { 704 | --out, --in; 705 | #endif 706 | r.wa[out] = v.wa[in]; 707 | } 708 | } 709 | 710 | return r; 711 | } 712 | 713 | template 714 | uint_x4 operator>>(const uint_x4& v, 715 | const bitcount_t shift) 716 | { 717 | uint_x4 r = {0U, 0U, 0U, 0U}; 718 | const bitcount_t bits = sizeof(UInt) * CHAR_BIT; 719 | const bitcount_t bitmask = bits - 1; 720 | const bitcount_t shiftdiv = shift / bits; 721 | const bitcount_t shiftmod = shift & bitmask; 722 | 723 | if (shiftmod) { 724 | UInt carryover = 0; 725 | #if PCG_LITTLE_ENDIAN 726 | for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { 727 | --out, --in; 728 | #else 729 | for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { 730 | #endif 731 | r.wa[out] = (v.wa[in] >> shiftmod) | carryover; 732 | carryover = (v.wa[in] << (bits - shiftmod)); 733 | } 734 | } else { 735 | #if PCG_LITTLE_ENDIAN 736 | for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { 737 | --out, --in; 738 | #else 739 | for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { 740 | #endif 741 | r.wa[out] = v.wa[in]; 742 | } 743 | } 744 | 745 | return r; 746 | } 747 | 748 | } // namespace pcg_extras 749 | 750 | #endif // PCG_UINT128_HPP_INCLUDED 751 | -------------------------------------------------------------------------------- /tests/stress-tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(Boost COMPONENTS system thread REQUIRED) 2 | 3 | include_directories(${Boost_INCLUDE_DIRS}) 4 | include_directories(${Libcuckoo_INCLUDE_DIRS}) 5 | include_directories(${Folly_INCLUDE_DIRS}) 6 | 7 | add_executable(stress_unchecked stress_unchecked.cpp) 8 | target_link_libraries(stress_unchecked 9 | PRIVATE concurrent_hash_map 10 | PRIVATE pcg 11 | PRIVATE ${Boost_SYSTEM_LIBRARY} 12 | ) 13 | 14 | add_test(NAME stress_unchecked COMMAND stress_unchecked) 15 | 16 | add_executable(stress_checked 17 | stress_checked.cpp 18 | ) 19 | target_link_libraries(stress_checked 20 | PRIVATE concurrent_hash_map 21 | PRIVATE pcg 22 | PRIVATE ${Boost_SYSTEM_LIBRARY} 23 | ) 24 | add_test(NAME stress_checked COMMAND stress_checked) 25 | 26 | link_directories(${Folly_LIB_DIRS}) 27 | add_executable(benchmark benchmark.cpp) 28 | target_link_libraries(benchmark 29 | PRIVATE ${Boost_SYSTEM_LIBRARY} 30 | PRIVATE ${Boost_THREAD_LIBRARY} 31 | PRIVATE concurrent_hash_map 32 | PRIVATE pcg 33 | PRIVATE folly 34 | PRIVATE glog 35 | PRIVATE dl 36 | PRIVATE double-conversion 37 | PRIVATE iberty 38 | ) 39 | add_test(NAME benchmark COMMAND benchmark) 40 | -------------------------------------------------------------------------------- /tests/stress-tests/benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | 20 | template< 21 | class Key, 22 | class Value, 23 | class Hash = std::hash, 24 | class KeyEqual = std::equal_to, 25 | class Allocator = std::allocator>, 26 | class Locable = std::mutex 27 | > 28 | class concurrent_unordered_map { 29 | public: 30 | using bucket_type = std::unordered_map; 31 | using allocator_type = typename bucket_type::allocator_type; 32 | using key_type = typename bucket_type::key_type; 33 | using mapped_type = typename bucket_type::mapped_type; 34 | using value_type = typename bucket_type::value_type; 35 | using size_type = typename bucket_type::size_type; 36 | using difference_type = typename bucket_type::difference_type; 37 | using hasher = typename bucket_type::hasher; 38 | using key_equal = typename bucket_type::key_equal; 39 | using reference = typename bucket_type::reference; 40 | using const_reference = typename bucket_type::const_reference; 41 | using pointer = typename bucket_type::pointer; 42 | using const_pointer = typename bucket_type::const_pointer; 43 | using local_iterator = typename bucket_type::iterator; 44 | using const_local_iterator = typename bucket_type::const_iterator; 45 | 46 | private: 47 | using bucket_allocator = typename std::allocator_traits::template rebind_alloc; 48 | using locable_allocator = typename std::allocator_traits::template rebind_alloc; 49 | 50 | hasher hash; 51 | key_equal equal; 52 | allocator_type allocator; 53 | std::vector buckets; 54 | mutable std::vector bucket_mutexes; 55 | 56 | public: 57 | static constexpr size_type DEFAULT_BUCKET_COUNT = 16; 58 | 59 | explicit concurrent_unordered_map(size_type bucket_count = DEFAULT_BUCKET_COUNT, 60 | const Hash& hash = Hash(), 61 | const KeyEqual& equal = KeyEqual(), 62 | const allocator_type& allocator = allocator_type()) : 63 | hash(hash), 64 | equal(equal), 65 | allocator(allocator), 66 | buckets(bucket_allocator()), 67 | bucket_mutexes(bucket_count, locable_allocator()) 68 | { 69 | assert(bucket_count > 0); 70 | for (size_type i = 0; i < bucket_count; ++i) { 71 | bucket_type bucket(bucket_count, hash, equal, allocator_type()); 72 | buckets.push_back(std::move(bucket)); 73 | } 74 | } 75 | 76 | explicit concurrent_unordered_map(const allocator_type& allocator) : 77 | concurrent_unordered_map(DEFAULT_BUCKET_COUNT, Hash(), KeyEqual(), allocator) 78 | { 79 | } 80 | template< class InputIt > 81 | concurrent_unordered_map(InputIt first, InputIt last, 82 | size_type bucket_count = DEFAULT_BUCKET_COUNT, 83 | const Hash& hash = Hash(), 84 | const KeyEqual& equal = KeyEqual(), 85 | const allocator_type& allocator = allocator_type()) : 86 | concurrent_unordered_map(bucket_count, hash, equal, allocator) 87 | { 88 | insert(first, last); 89 | } 90 | 91 | concurrent_unordered_map(std::initializer_list init, 92 | size_type bucket_count = DEFAULT_BUCKET_COUNT, 93 | const Hash& hash = Hash(), 94 | const KeyEqual& equal = KeyEqual(), 95 | const allocator_type& allocator = allocator_type()) : 96 | concurrent_unordered_map(bucket_count, hash, equal, allocator) 97 | { 98 | insert(init.begin(), init.end()); 99 | } 100 | 101 | concurrent_unordered_map(const concurrent_unordered_map&) = delete; 102 | 103 | concurrent_unordered_map(concurrent_unordered_map&& other) { 104 | swap(other); 105 | } 106 | allocator_type get_allocator() const { 107 | return allocator; 108 | } 109 | 110 | concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete; 111 | concurrent_unordered_map& operator=(std::initializer_list) = delete; 112 | 113 | concurrent_unordered_map& operator=(concurrent_unordered_map&& other) { 114 | swap(other); 115 | return *this; 116 | } 117 | bool empty() const { 118 | lock_all(); 119 | try { 120 | bool result = true; 121 | for (int i = 0; i < buckets.size(); ++i) { 122 | if (!buckets[i].empty()) { 123 | result = false; 124 | break; 125 | } 126 | } 127 | unlock_all(); 128 | return result; 129 | } catch(...) { 130 | unlock_all(); 131 | throw; 132 | } 133 | } 134 | 135 | size_type size() const { 136 | lock_all(); 137 | try { 138 | size_type result = 0; 139 | for (size_type i = 0; i < buckets.size(); ++i) { 140 | result += buckets[i].size(); 141 | } 142 | unlock_all(); 143 | return result; 144 | } catch(...) { 145 | unlock_all(); 146 | throw; 147 | } 148 | } 149 | 150 | 151 | void clear() { 152 | lock_all(); 153 | try { 154 | for (size_type i = 0; i < buckets.size(); ++i) { 155 | buckets[i].clear(); 156 | } 157 | unlock_all(); 158 | } catch(...) { 159 | unlock_all(); 160 | throw; 161 | } 162 | } 163 | 164 | size_type max_size() const { 165 | return buckets[0].max_size() / buckets.size(); 166 | } 167 | 168 | template 169 | class reference_guard : public std::reference_wrapper { 170 | public: 171 | explicit reference_guard(T& t) noexcept : std::reference_wrapper(t) { 172 | } 173 | }; 174 | 175 | reference_guard operator[](const Key& key) const { 176 | size_type bucket_id = bucket(key); 177 | std::lock_guard guard(bucket_mutexes[bucket_id]); 178 | return reference_guard(buckets[bucket_id][key]); 179 | } 180 | 181 | reference_guard operator[](const Key& key) { 182 | size_type bucket_id = bucket(key); 183 | std::lock_guard guard(bucket_mutexes[bucket_id]); 184 | return reference_guard(buckets[bucket_id][key]); 185 | } 186 | 187 | reference_guard at(const Key& key) { 188 | size_type bucket_id = bucket(key); 189 | std::lock_guard guard(bucket_mutexes[bucket_id]); 190 | return reference_guard(buckets[bucket_id].at(key)); 191 | } 192 | 193 | reference_guard at(const Key& key) const { 194 | size_type bucket_id = bucket(key); 195 | std::lock_guard guard(bucket_mutexes[bucket_id]); 196 | return reference_guard(buckets[bucket_id].at(key)); 197 | } 198 | 199 | size_type count(const Key& key) const { 200 | size_type bucket_id = bucket(key); 201 | std::lock_guard guard(bucket_mutexes[bucket_id]); 202 | return buckets[bucket_id].count(key); 203 | } 204 | 205 | void swap(concurrent_unordered_map& other) { 206 | using mutex_list = std::vector; 207 | mutex_list mutexes; 208 | 209 | for (size_type i = 0; i < buckets.size(); ++i) { 210 | mutexes.push_back(&bucket_mutexes[i]); 211 | } 212 | 213 | for (size_type i = 0; i < other.buckets.size(); ++i) { 214 | mutexes.push_back(&other.bucket_mutexes[i]); 215 | } 216 | 217 | boost::indirect_iterator first(mutexes.begin()); 218 | boost::indirect_iterator last(mutexes.end()); 219 | boost::lock(first, last); 220 | 221 | try { 222 | this->bucket_mutexes.swap(other.bucket_mutexes); 223 | this->buckets.swap(other.buckets); 224 | std::swap(this->hash, other.hash); 225 | } catch(...) { 226 | unlock(first, last); 227 | throw; 228 | } 229 | } 230 | 231 | template 232 | class base_iterator : public boost::iterator_facade< 233 | base_iterator, 234 | Val, boost::forward_traversal_tag> 235 | { 236 | public: 237 | base_iterator() : bucket_idx(-1), Container(nullptr) { 238 | } 239 | 240 | explicit base_iterator(Container* base_ptr, BucketIterator local_iterator, int bucket_idx) : 241 | base_ptr(base_ptr), local_iterator(local_iterator), bucket_idx(bucket_idx) 242 | { 243 | } 244 | 245 | private: 246 | friend class boost::iterator_core_access; 247 | void increment() { 248 | bool local_finished = false; 249 | { 250 | std::lock_guard guard(bucket_mutexes[bucket_idx]); 251 | ++local_iterator; 252 | local_finished = local_iterator == base_ptr->buckets[bucket_idx].end(); 253 | } 254 | 255 | if (local_finished) { 256 | ++bucket_idx; 257 | if (bucket_idx < base_ptr->buckets.size()) { 258 | std::lock_guard guard(bucket_mutexes[bucket_idx]); 259 | local_iterator = base_ptr->buckets[bucket_idx].begin(); 260 | } 261 | } 262 | } 263 | 264 | bool equal(base_iterator const& other) const { 265 | return this->base_ptr == other.base_ptr 266 | && this->bucket_idx == other.bucket_idx 267 | && this->local_iterator == other.local_iterator; 268 | } 269 | 270 | Val& dereference() const { 271 | return *local_iterator; 272 | } 273 | 274 | BucketIterator local_iterator; 275 | int bucket_idx; 276 | Container* base_ptr; 277 | }; 278 | 279 | typedef base_iterator> iterator; 286 | 287 | typedef base_iterator> const_iterator; 294 | 295 | iterator find(const Key& key) { 296 | size_type bucket_id = bucket(key); 297 | std::lock_guard guard(bucket_mutexes[bucket_id]); 298 | auto rez = buckets[bucket_id].find(key); 299 | if (rez != buckets[bucket_id].end()) { 300 | return iterator(this, rez, bucket_id); 301 | } else { 302 | return iterator(this, buckets[buckets.size() - 1].end(), buckets.size()); 303 | } 304 | } 305 | 306 | const_iterator find(const Key& key) const { 307 | size_type bucket_id = bucket(key); 308 | std::lock_guard guard(bucket_mutexes[bucket_id]); 309 | auto rez = buckets[bucket_id].find(key); 310 | return const_iterator(this, rez, bucket_id); 311 | } 312 | 313 | std::pair equal_range(const Key& key) { 314 | size_type bucket_id = bucket(key); 315 | std::lock_guard guard(bucket_mutexes[bucket_id]); 316 | auto rez = buckets[bucket_id].equal_range(key); 317 | return make_pair(iterator(this, rez.first, bucket_id), 318 | iterator(this, rez.second, bucket_id)); 319 | } 320 | 321 | std::pair equal_range(const Key& key) const { 322 | size_type bucket_id = bucket(key); 323 | std::lock_guard guard(bucket_mutexes[bucket_id]); 324 | auto rez = buckets[bucket_id].equal_range(key); 325 | return make_pair(const_iterator(this, rez.first, bucket_id), 326 | const_iterator(this, rez.second, bucket_id)); 327 | } 328 | 329 | 330 | std::pair insert(const value_type& val) { 331 | size_type bucket_id = bucket(val.first); 332 | std::lock_guard guard(bucket_mutexes[bucket_id]); 333 | auto result = buckets[bucket_id].insert(val); 334 | return make_pair(iterator(this, result.first, bucket_id), result.second); 335 | } 336 | 337 | template 338 | std::pair insert(P&& val) { 339 | size_type bucket_id = bucket(val.first); 340 | std::lock_guard guard(bucket_mutexes[bucket_id]); 341 | auto result = buckets[bucket_id].insert(val); 342 | return make_pair(iterator(this, result.first, bucket_id), result.second); 343 | } 344 | 345 | template 346 | void insert(InputIterator first, InputIterator last) { 347 | for (auto i = first; i != last; ++i) { 348 | insert(*i); 349 | } 350 | } 351 | 352 | void insert(std::initializer_list items) { 353 | insert(items.begin(), items.end()); 354 | } 355 | template 356 | std::pair insert_or_assign(const key_type& key, M&& obj) { 357 | size_type bucket_id = bucket(key); 358 | std::lock_guard guard(bucket_mutexes[bucket_id]); 359 | auto iter = buckets[bucket_id].find(key); 360 | if (iter != buckets[bucket_id].end()) { 361 | iter->second = obj; 362 | return make_pair(iterator(this, iter, bucket_id), false); 363 | } 364 | 365 | return insert(std::make_pair(key, std::forward(obj))); 366 | } 367 | 368 | template 369 | bool emplace(K key, Args&&... args) { 370 | size_type bucket_id = bucket(key); 371 | std::lock_guard guard(bucket_mutexes[bucket_id]); 372 | return buckets[bucket_id].emplace(key, args...).second; 373 | } 374 | 375 | iterator erase(const_iterator pos) { 376 | size_type bucket_id = bucket(pos->first); 377 | std::lock_guard guard(bucket_mutexes[bucket_id]); 378 | auto next = buckets[bucket_id].erase(pos.local_iterator); 379 | return iterator(this, next, bucket_id); 380 | } 381 | 382 | iterator erase(const_iterator first, const_iterator last) { 383 | iterator result; 384 | for (auto i = first; i != last; ++i) { 385 | result = erase(i); 386 | } 387 | return result; 388 | } 389 | 390 | size_type erase(const key_type& key) { 391 | size_type bucket_id = bucket(key); 392 | std::lock_guard guard(bucket_mutexes[bucket_id]); 393 | return buckets[bucket_id].erase(key); 394 | } 395 | 396 | iterator begin() { 397 | std::lock_guard guard(bucket_mutexes[0]); 398 | return iterator(this, buckets[0].begin(), 0); 399 | } 400 | 401 | iterator end() { 402 | std::lock_guard guard(bucket_mutexes[buckets.size() - 1]); 403 | return iterator(this, buckets[buckets.size() - 1].end(), buckets.size()); 404 | } 405 | 406 | const_iterator cbegin() const { 407 | std::lock_guard guard(bucket_mutexes[0]); 408 | return iterator(this, buckets[0].cbegin(), 0); 409 | } 410 | 411 | const_iterator cend() const { 412 | std::lock_guard guard(bucket_mutexes[buckets.size() - 1]); 413 | return iterator(this, buckets[buckets.size() - 1].cend(), buckets.size()); 414 | } 415 | 416 | const_iterator begin() const { 417 | return cbegin(); 418 | } 419 | 420 | const_iterator end() const { 421 | return cend(); 422 | } 423 | 424 | size_type bucket_count() const { 425 | return buckets.size(); 426 | } 427 | 428 | size_type max_bucket_count() const { 429 | return bucket_count(); 430 | } 431 | size_type bucket_size(const size_type bucket_id) const { 432 | std::lock_guard guard(bucket_mutexes[bucket_id]); 433 | return buckets[bucket_id].size(); 434 | } 435 | 436 | size_type bucket(const Key& key) const { 437 | return hash(key) % buckets.size(); 438 | } 439 | 440 | float load_factor() const { 441 | float result = 0; 442 | for (size_type i = 0; i < buckets.size(); ++i) { 443 | std::lock_guard guard(bucket_mutexes[i]); 444 | result += buckets[i].load_factor(); 445 | } 446 | return result / buckets.size(); 447 | } 448 | 449 | float max_load_factor() const { 450 | float result = std::numeric_limits::min(); 451 | for (size_type i = 0; i < buckets.size(); ++i) { 452 | std::lock_guard guard(bucket_mutexes[i]); 453 | result = std::max(result, buckets[i].max_load_factor()); 454 | } 455 | return result; 456 | } 457 | 458 | void max_load_factor(float ml) { 459 | for (size_type i = 0; i < buckets.size(); ++i) { 460 | std::lock_guard guard(bucket_mutexes[i]); 461 | buckets[i].max_load_factor(ml); 462 | } 463 | } 464 | 465 | void rehash(size_type count) { 466 | size_type per_bucket_count = (count + buckets.size() - 1) / buckets.size(); 467 | for (size_type i = 0; i < buckets.size(); ++i) { 468 | std::lock_guard guard(bucket_mutexes[i]); 469 | buckets[i].rehash(per_bucket_count); 470 | } 471 | } 472 | 473 | void reserve(size_type count) { 474 | rehash(std::ceil(count / max_load_factor())); 475 | } 476 | 477 | hasher hash_function() const { 478 | return hash; 479 | } 480 | key_equal key_eq() const { 481 | return equal; 482 | } 483 | 484 | private: 485 | friend class base_iterator>; 492 | 493 | friend class base_iterator>; 500 | 501 | void lock_all() { 502 | boost::lock(bucket_mutexes.begin(), bucket_mutexes.end()); 503 | } 504 | 505 | void unlock_all() { 506 | unlock_all(bucket_mutexes.begin(), bucket_mutexes.end()); 507 | } 508 | 509 | template 510 | void unlock_all(InputIterator first, InputIterator last) { 511 | for (auto i = first; i != last; ++i) { 512 | i->unlock(); 513 | } 514 | } 515 | }; 516 | 517 | constexpr int TEST_ITERATIONS = 1000 * 1000; 518 | 519 | template 520 | void test_thread(M& m, Operation op, float write_ratio, unsigned seed) { 521 | std::minstd_rand e(seed); 522 | std::uniform_int_distribution key_dist(1, std::numeric_limits::max()); 523 | std::uniform_int_distribution value_dist(1, std::numeric_limits::max()); 524 | std::uniform_int_distribution write_ratio_dist(1, 100); 525 | for (int i = 0; i < TEST_ITERATIONS; ++i) { 526 | unsigned key = key_dist(e); 527 | unsigned val = value_dist(e); 528 | bool need_write = write_ratio_dist(e) <= static_cast(write_ratio * 100); 529 | op(m, key, val, need_write); 530 | } 531 | } 532 | 533 | long long get_time_ms() { 534 | return std::chrono::duration_cast( 535 | std::chrono::steady_clock::now().time_since_epoch()).count(); 536 | } 537 | 538 | template 539 | void run_test(M& m, Operation op, const std::string& description, 540 | unsigned thread_count, float write_ratio) { 541 | auto start_time = get_time_ms(); 542 | 543 | std::vector threads; 544 | static const unsigned magic_prime = 1000 * 1000 * 1000 + 7; 545 | threads.reserve(thread_count); 546 | for (unsigned i = 0; i < thread_count; ++i) { 547 | std::thread t([&m, op, write_ratio, i] { 548 | test_thread(m, op, write_ratio, i * magic_prime); 549 | }); 550 | threads.push_back(std::move(t)); 551 | } 552 | 553 | for (int i = 0; i < thread_count; ++i) { 554 | threads[i].join(); 555 | } 556 | 557 | std::cout << description << " test is done for " 558 | << get_time_ms() - start_time 559 | << "ms." << std::endl; 560 | } 561 | 562 | int main(int argc, char **argv) { 563 | srand(13213); 564 | 565 | std::cout << "Single key access" << std::endl; 566 | for (unsigned write_faction_denominator : { 1, 2, 5, 10, 20 }) { 567 | std::cout << "Write fraction: 1/" << write_faction_denominator << std::endl; 568 | for (unsigned thread_count : { 1, 2, 4, 8, 16, 32, 64 }) { 569 | std::cout << "Thread count: " << thread_count << std::endl; 570 | 571 | boost::synchronized_value> m0; 572 | m0->reserve(TEST_ITERATIONS); 573 | run_test(m0, [](boost::synchronized_value>& m, 574 | unsigned key, unsigned val, bool need_write) { 575 | auto i = m->find(0); 576 | 577 | if (need_write) { 578 | m->emplace(0, val); 579 | } 580 | }, "Default syncronized map", thread_count, 1.0 / write_faction_denominator); 581 | 582 | std::concurrent_unordered_map m1(TEST_ITERATIONS); 583 | run_test(m1, [](std::concurrent_unordered_map& m, 584 | unsigned key, unsigned val, bool need_write) { 585 | unsigned old; 586 | m.find(0, old); 587 | 588 | if (need_write) { 589 | m.emplace(0, val); 590 | } 591 | }, "std concurrent hash map", thread_count, 1.0 / write_faction_denominator); 592 | 593 | concurrent_unordered_map m2(thread_count); 594 | m2.reserve(TEST_ITERATIONS); 595 | run_test(m2, [](concurrent_unordered_map& m, 596 | unsigned key, unsigned val, bool need_write) { 597 | m.find(0); 598 | 599 | if (need_write) { 600 | m.emplace(0, val); 601 | } 602 | }, "Collision list based syncronized map", thread_count, 1.0 / write_faction_denominator); 603 | cuckoohash_map m3(TEST_ITERATIONS); 604 | run_test(m3, [](cuckoohash_map& m, 605 | unsigned key, unsigned val, bool need_write) { 606 | unsigned old; 607 | m.find(0, old); 608 | 609 | if (need_write) { 610 | m.insert(0, val); 611 | } 612 | }, "libcuckoo hash map", thread_count, 1.0 / write_faction_denominator); 613 | folly::ConcurrentHashMap, 615 | std::equal_to, 616 | std::allocator, 16> m4(TEST_ITERATIONS); 617 | run_test(m4, [](folly::ConcurrentHashMap, 619 | std::equal_to, 620 | std::allocator, 16>& m, 621 | unsigned key, unsigned val, bool need_write) { 622 | m.find(0); 623 | 624 | if (need_write) { 625 | m.emplace(0, val); 626 | } 627 | }, "folly ConcurrentHashMap", thread_count, 1.0 / write_faction_denominator); 628 | 629 | } 630 | std::cout << std::endl; 631 | 632 | } 633 | 634 | std::cout << "Int key, Int value" << std::endl; 635 | 636 | for (unsigned write_faction_denominator : { 1, 2, 5, 10, 20 }) { 637 | std::cout << "Write fraction: 1/" << write_faction_denominator << std::endl; 638 | for (unsigned thread_count : { 1, 2, 4, 8, 16, 32, 64 }) { 639 | std::cout << "Thread count: " << thread_count << std::endl; 640 | 641 | boost::synchronized_value> m0; 642 | m0->reserve(TEST_ITERATIONS); 643 | run_test(m0, [](boost::synchronized_value>& m, 644 | unsigned key, unsigned val, bool need_write) { 645 | auto i = m->find(key); 646 | 647 | if (need_write) { 648 | m->emplace(key, val); 649 | } 650 | }, "Default syncronized map", thread_count, 1.0 / write_faction_denominator); 651 | 652 | std::concurrent_unordered_map m1(TEST_ITERATIONS); 653 | run_test(m1, [](std::concurrent_unordered_map& m, 654 | unsigned key, unsigned val, bool need_write) { 655 | unsigned old; 656 | m.find(key, old); 657 | 658 | if (need_write) { 659 | m.emplace(key, val); 660 | } 661 | }, "std concurrent hash map", thread_count, 1.0 / write_faction_denominator); 662 | 663 | concurrent_unordered_map m2(thread_count); 664 | m2.reserve(TEST_ITERATIONS); 665 | run_test(m2, [](concurrent_unordered_map& m, 666 | unsigned key, unsigned val, bool need_write) { 667 | m.find(key); 668 | 669 | if (need_write) { 670 | m.emplace(key, val); 671 | } 672 | }, "Collision list based syncronized map", thread_count, 1.0 / write_faction_denominator); 673 | cuckoohash_map m3(TEST_ITERATIONS); 674 | run_test(m3, [](cuckoohash_map& m, 675 | unsigned key, unsigned val, bool need_write) { 676 | unsigned old; 677 | m.find(key, old); 678 | 679 | if (need_write) { 680 | m.insert(key, val); 681 | } 682 | }, "libcuckoo hash map", thread_count, 1.0 / write_faction_denominator); 683 | folly::ConcurrentHashMap, 685 | std::equal_to, 686 | std::allocator, 16> m4(TEST_ITERATIONS); 687 | run_test(m4, [](folly::ConcurrentHashMap, 689 | std::equal_to, 690 | std::allocator, 16>& m, 691 | unsigned key, unsigned val, bool need_write) { 692 | m.find(key); 693 | 694 | if (need_write) { 695 | m.emplace(key, val); 696 | } 697 | }, "folly ConcurrentHashMap", thread_count, 1.0 / write_faction_denominator); 698 | 699 | } 700 | std::cout << std::endl; 701 | } 702 | 703 | std::cout << std::endl; 704 | std::cout << "string key, string value" << std::endl; 705 | for (unsigned write_faction_denominator : { 1, 2, 5, 10, 20 }) { 706 | std::cout << "Write fraction: 1/" << write_faction_denominator << std::endl; 707 | for (unsigned thread_count : { 1, 2, 4, 8, 16, 32, 64 }) { 708 | std::cout << "Thread count: " << thread_count << std::endl; 709 | 710 | boost::synchronized_value> m0; 711 | m0->reserve(TEST_ITERATIONS); 712 | run_test(m0, [](boost::synchronized_value>& m, 713 | unsigned key, unsigned val, bool need_write) { 714 | auto str_key = std::to_string(key); 715 | auto str_val = std::to_string(val); 716 | auto i = m->find(str_key); 717 | 718 | if (need_write) { 719 | m->emplace(str_key, str_val); 720 | } 721 | }, "Default syncronized map", thread_count, 1.0 / write_faction_denominator); 722 | 723 | std::concurrent_unordered_map m1(TEST_ITERATIONS); 724 | run_test(m1, [](std::concurrent_unordered_map& m, 725 | unsigned key, unsigned val, bool need_write) { 726 | auto str_key = std::to_string(key); 727 | auto str_val = std::to_string(val); 728 | 729 | std::string old; 730 | m.find(str_key, old); 731 | 732 | if (need_write) { 733 | m.emplace(str_key, str_val); 734 | } 735 | }, "std concurrent hash map", thread_count, 1.0 / write_faction_denominator); 736 | 737 | concurrent_unordered_map m2(thread_count); 738 | m2.reserve(TEST_ITERATIONS); 739 | run_test(m2, [](concurrent_unordered_map& m, 740 | unsigned key, unsigned val, bool need_write) { 741 | std::string str_key = std::to_string(key); 742 | std::string str_val = std::to_string(val); 743 | 744 | m.find(str_key); 745 | 746 | if (need_write) { 747 | m.emplace(str_key, str_val); 748 | } 749 | }, "Collision list based syncronized map", thread_count, 1.0 / write_faction_denominator); 750 | cuckoohash_map m3(TEST_ITERATIONS); 751 | run_test(m3, [](cuckoohash_map& m, 752 | unsigned key, unsigned val, bool need_write) { 753 | auto str_key = std::to_string(key); 754 | auto str_val = std::to_string(val); 755 | 756 | std::string old; 757 | m.find(str_key, old); 758 | 759 | if (need_write) { 760 | m.insert(str_key, str_val); 761 | } 762 | }, "libcuckoo hash map", thread_count, 1.0 / write_faction_denominator); 763 | folly::ConcurrentHashMap, 765 | std::equal_to, 766 | std::allocator, 16> m4(TEST_ITERATIONS); 767 | run_test(m4, [](folly::ConcurrentHashMap, 769 | std::equal_to, 770 | std::allocator, 16>& m, 771 | unsigned key, unsigned val, bool need_write) { 772 | auto str_key = std::to_string(key); 773 | auto str_val = std::to_string(val); 774 | 775 | m.find(str_key); 776 | 777 | if (need_write) { 778 | m.emplace(str_key, str_val); 779 | } 780 | }, "folly ConcurrentHashMap", thread_count, 1.0 / write_faction_denominator); 781 | 782 | } 783 | std::cout << std::endl; 784 | } 785 | 786 | return 0; 787 | } 788 | -------------------------------------------------------------------------------- /tests/stress-tests/stress_checked.cpp: -------------------------------------------------------------------------------- 1 | // Tests concurrent inserts, deletes, updates, and finds. The test makes sure 2 | // that multiple operations are not run on the same key, so that the accuracy of 3 | // the operations can be verified. 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include 21 | #include 22 | 23 | #include "test_util.hpp" 24 | 25 | typedef uint32_t KeyType; 26 | typedef std::string KeyType2; 27 | typedef uint32_t ValType; 28 | typedef int32_t ValType2; 29 | 30 | // The number of keys to size the table with, expressed as a power of 31 | // 2. This can be set with the command line flag --power 32 | size_t g_power = 25; 33 | size_t g_numkeys; // Holds 2^power 34 | // The number of threads spawned for each type of operation. This can 35 | // be set with the command line flag --thread-num 36 | size_t g_thread_num = 4; 37 | // Whether to disable inserts or not. This can be set with the command 38 | // line flag --disable-inserts 39 | bool g_disable_inserts = false; 40 | // Whether to disable deletes or not. This can be set with the command 41 | // line flag --disable-deletes 42 | bool g_disable_deletes = false; 43 | // Whether to disable updates or not. This can be set with the command 44 | // line flag --disable-updates 45 | bool g_disable_updates = false; 46 | // Whether to disable finds or not. This can be set with the command 47 | // line flag --disable-finds 48 | bool g_disable_finds = false; 49 | // How many seconds to run the test for. This can be set with the 50 | // command line flag --time 51 | size_t g_test_len = 10; 52 | // The seed for the random number generator. If this isn't set to a 53 | // nonzero value with the --seed flag, the current time is used 54 | size_t g_seed = 0; 55 | // Whether to use strings as the key 56 | bool g_use_strings = false; 57 | 58 | std::atomic num_inserts = ATOMIC_VAR_INIT(0); 59 | std::atomic num_deletes = ATOMIC_VAR_INIT(0); 60 | std::atomic num_updates = ATOMIC_VAR_INIT(0); 61 | std::atomic num_finds = ATOMIC_VAR_INIT(0); 62 | 63 | template 64 | class AllEnvironment { 65 | public: 66 | AllEnvironment() 67 | : table(g_numkeys), table2(g_numkeys), keys(g_numkeys), vals(g_numkeys), 68 | vals2(g_numkeys), in_table(new bool[g_numkeys]), 69 | in_use(new std::atomic_flag[g_numkeys]), 70 | val_dist(std::numeric_limits::min(), 71 | std::numeric_limits::max()), 72 | val_dist2(std::numeric_limits::min(), 73 | std::numeric_limits::max()), 74 | ind_dist(0, g_numkeys - 1), finished(false) { 75 | // Sets up the random number generator 76 | if (g_seed == 0) { 77 | g_seed = static_cast(std::chrono::system_clock::now().time_since_epoch().count()); 78 | } 79 | std::cout << "seed = " << g_seed << std::endl; 80 | gen_seed = g_seed; 81 | // Fills in all the vectors except vals, which will be filled 82 | // in by the insertion threads. 83 | for (size_t i = 0; i < g_numkeys; i++) { 84 | keys[i] = generateKey(i); 85 | in_table[i] = false; 86 | in_use[i].clear(); 87 | } 88 | } 89 | 90 | std::concurrent_unordered_map table; 91 | std::concurrent_unordered_map table2; 92 | std::vector keys; 93 | std::vector vals; 94 | std::vector vals2; 95 | std::unique_ptr in_table; 96 | std::unique_ptr in_use; 97 | std::uniform_int_distribution val_dist; 98 | std::uniform_int_distribution val_dist2; 99 | std::uniform_int_distribution ind_dist; 100 | size_t gen_seed; 101 | // When set to true, it signals to the threads to stop running 102 | std::atomic finished; 103 | }; 104 | 105 | template 106 | void stress_insert_thread(AllEnvironment *env) { 107 | pcg64_fast gen(env->gen_seed); 108 | while (!env->finished.load()) { 109 | // Pick a random number between 0 and g_numkeys. If that slot is 110 | // not in use, lock the slot. Insert a random value into both 111 | // tables. The inserts should only be successful if the key 112 | // wasn't in the table. If the inserts succeeded, check that 113 | // the insertion were actually successful with another find 114 | // operation, and then store the values in their arrays and 115 | // set in_table to true and clear in_use 116 | size_t ind = env->ind_dist(gen); 117 | if (!env->in_use[ind].test_and_set()) { 118 | KType k = env->keys[ind]; 119 | ValType v = env->val_dist(gen); 120 | ValType2 v2 = env->val_dist2(gen); 121 | bool res = env->table.emplace(k, v); 122 | bool res2 = env->table2.emplace(k, v2); 123 | EXPECT_NE(res, env->in_table[ind]); 124 | EXPECT_NE(res2, env->in_table[ind]); 125 | if (res) { 126 | EXPECT_EQ(v, env->table.find(k).value()); 127 | EXPECT_EQ(v2, env->table2.find(k).value()); 128 | env->vals[ind] = v; 129 | env->vals2[ind] = v2; 130 | env->in_table[ind] = true; 131 | num_inserts.fetch_add(2, std::memory_order_relaxed); 132 | } 133 | env->in_use[ind].clear(); 134 | } 135 | } 136 | } 137 | 138 | template 139 | void delete_thread(AllEnvironment *env) { 140 | pcg64_fast gen(env->gen_seed); 141 | while (!env->finished.load()) { 142 | // Run deletes on a random key, check that the deletes 143 | // succeeded only if the keys were in the table. If the 144 | // deletes succeeded, check that the keys are indeed not in 145 | // the tables anymore, and then set in_table to false 146 | size_t ind = env->ind_dist(gen); 147 | if (!env->in_use[ind].test_and_set()) { 148 | KType k = env->keys[ind]; 149 | auto res = env->table.erase(k); 150 | auto res2 = env->table2.erase(k); 151 | EXPECT_EQ(res, env->in_table[ind]); 152 | EXPECT_EQ(res2, env->in_table[ind]); 153 | if (res) { 154 | EXPECT_FALSE(static_cast(env->table.find(k))); 155 | EXPECT_FALSE(static_cast(env->table2.find(k))); 156 | env->in_table[ind] = false; 157 | num_deletes.fetch_add(2, std::memory_order_relaxed); 158 | } 159 | env->in_use[ind].clear(); 160 | } 161 | } 162 | } 163 | 164 | template 165 | void update_thread(AllEnvironment *env) { 166 | pcg64_fast gen(env->gen_seed); 167 | std::uniform_int_distribution third(0, 2); 168 | auto updatefn = [](ValType &v) { v += 3; }; 169 | auto updatefn2 = [](ValType2 &v) { v += 10; }; 170 | while (!env->finished.load()) { 171 | // Run updates, update_fns, or upserts on a random key, check 172 | // that the operations succeeded only if the keys were in the 173 | // table (or that they succeeded regardless if it's an 174 | // upsert). If successful, check that the keys are indeed in 175 | // the table with the new value, and then set in_table to true 176 | size_t ind = env->ind_dist(gen); 177 | if (!env->in_use[ind].test_and_set()) { 178 | KType k = env->keys[ind]; 179 | ValType v; 180 | ValType2 v2; 181 | bool res, res2; 182 | switch (third(gen)) { 183 | case 0: 184 | // update 185 | v = env->val_dist(gen); 186 | v2 = env->val_dist2(gen); 187 | res = static_cast(env->table.update(k, v)); 188 | res2 = static_cast(env->table2.update(k, v2)); 189 | EXPECT_EQ(res, env->in_table[ind]); 190 | EXPECT_EQ(res2, env->in_table[ind]); 191 | break; 192 | case 1: 193 | // update_fn 194 | v = env->vals[ind]; 195 | v2 = env->vals2[ind]; 196 | updatefn(v); 197 | updatefn2(v2); 198 | res = env->table.visit(k, updatefn); 199 | res2 = env->table2.visit(k, updatefn2); 200 | EXPECT_EQ(res, env->in_table[ind]); 201 | EXPECT_EQ(res2, env->in_table[ind]); 202 | break; 203 | case 2: 204 | // upsert 205 | if (env->in_table[ind]) { 206 | // Then it should run updatefn 207 | v = env->vals[ind]; 208 | v2 = env->vals2[ind]; 209 | updatefn(v); 210 | updatefn2(v2); 211 | } else { 212 | // Then it should run an insert 213 | v = env->val_dist(gen); 214 | v2 = env->val_dist2(gen); 215 | } 216 | // These upserts should always succeed, so set res and res2 to 217 | // true. 218 | env->table.emplace_or_visit(k, updatefn, v); 219 | env->table2.emplace_or_visit(k, updatefn2, v2); 220 | res = res2 = true; 221 | env->in_table[ind] = true; 222 | break; 223 | default: 224 | throw std::logic_error("Impossible"); 225 | } 226 | if (res) { 227 | EXPECT_EQ(v, env->table.find(k).value()); 228 | EXPECT_EQ(v2, env->table2.find(k).value()); 229 | env->vals[ind] = v; 230 | env->vals2[ind] = v2; 231 | num_updates.fetch_add(2, std::memory_order_relaxed); 232 | } 233 | env->in_use[ind].clear(); 234 | } 235 | } 236 | } 237 | 238 | template 239 | void find_thread(AllEnvironment *env) { 240 | pcg64_fast gen(env->gen_seed); 241 | while (!env->finished.load()) { 242 | // Run finds on a random key and check that the presence of 243 | // the keys matches in_table 244 | size_t ind = env->ind_dist(gen); 245 | if (!env->in_use[ind].test_and_set()) { 246 | KType k = env->keys[ind]; 247 | auto res = env->table.find(k); 248 | EXPECT_EQ(env->in_table[ind], (bool) res); 249 | if (res) { 250 | EXPECT_EQ(env->vals[ind], res.value()); 251 | } 252 | 253 | auto res2 = env->table2.find(k); 254 | EXPECT_EQ(env->in_table[ind], (bool) res2); 255 | if (res2) { 256 | EXPECT_EQ(env->vals2[ind], res2.value()); 257 | } 258 | num_finds.fetch_add(2, std::memory_order_relaxed); 259 | env->in_use[ind].clear(); 260 | } 261 | } 262 | } 263 | 264 | // Spawns g_thread_num insert, delete, update, and find threads 265 | template 266 | void StressTest(AllEnvironment *env) { 267 | std::vector threads; 268 | for (size_t i = 0; i < g_thread_num; i++) { 269 | if (!g_disable_inserts) { 270 | threads.emplace_back(stress_insert_thread, env); 271 | } 272 | if (!g_disable_deletes) { 273 | threads.emplace_back(delete_thread, env); 274 | } 275 | if (!g_disable_updates) { 276 | threads.emplace_back(update_thread, env); 277 | } 278 | if (!g_disable_finds) { 279 | threads.emplace_back(find_thread, env); 280 | } 281 | } 282 | // Sleeps before ending the threads 283 | std::this_thread::sleep_for(std::chrono::seconds(g_test_len)); 284 | env->finished.store(true); 285 | for (auto &thread : threads) { 286 | thread.join(); 287 | } 288 | // Finds the number of slots that are filled 289 | size_t numfilled = 0; 290 | for (size_t i = 0; i < g_numkeys; i++) { 291 | if (env->in_table[i]) { 292 | numfilled++; 293 | } 294 | } 295 | EXPECT_EQ(numfilled, env->table.make_unordered_map_view().size()); 296 | std::cout << "----------Results----------" << std::endl; 297 | std::cout << "Number of inserts:\t" << num_inserts.load() << std::endl; 298 | std::cout << "Number of deletes:\t" << num_deletes.load() << std::endl; 299 | std::cout << "Number of updates:\t" << num_updates.load() << std::endl; 300 | std::cout << "Number of finds:\t" << num_finds.load() << std::endl; 301 | } 302 | 303 | int main(int argc, char **argv) { 304 | const char *args[] = {"--power", "--thread-num", "--time", "--seed"}; 305 | size_t *arg_vars[] = {&g_power, &g_thread_num, &g_test_len, &g_seed}; 306 | const char *arg_help[] = { 307 | "The number of keys to size the table with, expressed as a power of 2", 308 | "The number of threads to spawn for each type of operation", 309 | "The number of seconds to run the test for", 310 | "The seed for the random number generator"}; 311 | const char *flags[] = {"--disable-inserts", "--disable-deletes", 312 | "--disable-updates", "--disable-finds", 313 | "--use-strings"}; 314 | bool *flag_vars[] = {&g_disable_inserts, &g_disable_deletes, 315 | &g_disable_updates, &g_disable_finds, &g_use_strings}; 316 | const char *flag_help[] = { 317 | "If set, no inserts will be run", "If set, no deletes will be run", 318 | "If set, no updates will be run", "If set, no finds will be run", 319 | "If set, the key type of the map will be std::string"}; 320 | parse_flags(argc, argv, "Runs a stress test on inserts, deletes, and finds", 321 | args, arg_vars, arg_help, sizeof(args) / sizeof(const char *), 322 | flags, flag_vars, flag_help, 323 | sizeof(flags) / sizeof(const char *)); 324 | g_numkeys = 1U << g_power; 325 | 326 | if (g_use_strings) { 327 | auto *env = new AllEnvironment; 328 | StressTest(env); 329 | delete env; 330 | } else { 331 | auto *env = new AllEnvironment; 332 | StressTest(env); 333 | delete env; 334 | } 335 | return main_return_value; 336 | } 337 | -------------------------------------------------------------------------------- /tests/stress-tests/stress_unchecked.cpp: -------------------------------------------------------------------------------- 1 | // Tests all operations and iterators concurrently. It doesn't check any 2 | // operation for correctness, only making sure that everything completes without 3 | // crashing. 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | #include 20 | #include 21 | #include "test_util.hpp" 22 | 23 | typedef uint32_t KeyType; 24 | typedef std::string KeyType2; 25 | typedef uint32_t ValType; 26 | typedef int32_t ValType2; 27 | 28 | // The number of keys to size the table with, expressed as a power of 29 | // 2. This can be set with the command line flag --power 30 | size_t g_power = 24; 31 | size_t g_numkeys; // Holds 2^power 32 | // The number of threads spawned for each type of operation. This can 33 | // be set with the command line flag --thread-num 34 | size_t g_thread_num = 4; 35 | // Whether to disable inserts or not. This can be set with the command 36 | // line flag --disable-inserts 37 | bool g_disable_inserts = false; 38 | // Whether to disable deletes or not. This can be set with the command 39 | // line flag --disable-deletes 40 | bool g_disable_deletes = false; 41 | // Whether to disable updates or not. This can be set with the command 42 | // line flag --disable-updates 43 | bool g_disable_updates = false; 44 | // Whether to disable finds or not. This can be set with the command 45 | // line flag --disable-finds 46 | bool g_disable_finds = false; 47 | // Whether to disable resizes operations or not. This can be set with 48 | // the command line flag --disable-resizes 49 | bool g_disable_resizes = false; 50 | // Whether to disable iterator operations or not. This can be set with 51 | // the command line flag --disable-iterators 52 | bool g_disable_iterators = false; 53 | // Whether to disable statistic operations or not. This can be set with 54 | // the command line flag --disable-misc 55 | bool g_disable_misc = false; 56 | // Whether to disable clear operations or not. This can be set with 57 | // the command line flag --disable-clears 58 | bool g_disable_clears = false; 59 | // How many seconds to run the test for. This can be set with the 60 | // command line flag --time 61 | size_t g_test_len = 10; 62 | // The seed for the random number generator. If this isn't set to a 63 | // nonzero value with the --seed flag, the current time is used 64 | size_t g_seed = 0; 65 | // Whether to use strings as the key 66 | bool g_use_strings = false; 67 | 68 | template 69 | class AllEnvironment { 70 | public: 71 | AllEnvironment() : table(g_numkeys), table2(g_numkeys), finished(false) { 72 | // Sets up the random number generator 73 | if (g_seed == 0) { 74 | g_seed = static_cast(std::chrono::system_clock::now().time_since_epoch().count()); 75 | } 76 | std::cout << "seed = " << g_seed << std::endl; 77 | gen_seed = g_seed; 78 | } 79 | 80 | std::concurrent_unordered_map table; 81 | std::concurrent_unordered_map table2; 82 | size_t gen_seed; 83 | std::atomic finished; 84 | }; 85 | 86 | template 87 | void stress_insert_thread(AllEnvironment *env, size_t thread_seed) { 88 | std::uniform_int_distribution ind_dist; 89 | std::uniform_int_distribution val_dist; 90 | std::uniform_int_distribution val_dist2; 91 | pcg64_fast gen(thread_seed); 92 | while (!env->finished.load()) { 93 | // Insert a random key into the table 94 | auto k = generateKey(ind_dist(gen)); 95 | env->table.emplace(k, val_dist(gen)); 96 | env->table2.emplace(k, val_dist2(gen)); 97 | env->table.insert_or_assign(k, val_dist(gen)); 98 | env->table2.insert_or_assign(k, val_dist2(gen)); 99 | } 100 | } 101 | 102 | template 103 | void delete_thread(AllEnvironment *env, size_t thread_seed) { 104 | std::uniform_int_distribution ind_dist; 105 | pcg64_fast gen(thread_seed); 106 | while (!env->finished.load()) { 107 | // Run deletes on a random key. 108 | const auto k = generateKey(ind_dist(gen)); 109 | env->table.erase(k); 110 | env->table2.erase(k); 111 | } 112 | } 113 | 114 | template 115 | void update_thread(AllEnvironment *env, size_t thread_seed) { 116 | std::uniform_int_distribution ind_dist; 117 | std::uniform_int_distribution val_dist; 118 | std::uniform_int_distribution val_dist2; 119 | std::uniform_int_distribution third(0, 2); 120 | pcg64_fast gen(thread_seed); 121 | auto updatefn = [](ValType &v) { v += 3; }; 122 | while (!env->finished.load()) { 123 | // Run updates, update_funcs, or upserts on a random key. 124 | const auto k = generateKey(ind_dist(gen)); 125 | switch (third(gen)) { 126 | case 0: 127 | // update 128 | env->table.update(k, val_dist(gen)); 129 | env->table2.update(k, val_dist2(gen)); 130 | break; 131 | case 1: 132 | // update_fn 133 | env->table.visit(k, updatefn); 134 | env->table2.visit(k, [](ValType2 &v) { v += 10; }); 135 | break; 136 | case 2: 137 | env->table.emplace_or_visit(k, updatefn, val_dist(gen)); 138 | env->table2.emplace_or_visit(k, [](ValType2 &v) { v -= 50; }, val_dist2(gen)); 139 | break; 140 | default: 141 | throw std::logic_error("Impossible"); 142 | } 143 | } 144 | } 145 | 146 | template 147 | void find_thread(AllEnvironment *env, size_t thread_seed) { 148 | std::uniform_int_distribution ind_dist; 149 | pcg64_fast gen(thread_seed); 150 | ValType v; 151 | while (!env->finished.load()) { 152 | // Run finds on a random key. 153 | const auto k = generateKey(ind_dist(gen)); 154 | env->table.find(k, v); 155 | try { 156 | env->table2.find(k); 157 | } catch (...) { 158 | } 159 | } 160 | } 161 | 162 | template 163 | void iterator_thread(AllEnvironment *env, size_t thread_seed) { 164 | pcg64_fast gen(thread_seed); 165 | // Runs an iteration operation at a random time 166 | const size_t sleep_time = gen() % g_test_len; 167 | std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); 168 | if (env->finished.load()) { 169 | return; 170 | } 171 | auto lt = env->table2.make_unordered_map_view(true); 172 | for (auto &item : lt) { 173 | if (gen() & 1) { 174 | item.second++; 175 | } 176 | } 177 | } 178 | 179 | template 180 | void misc_thread(AllEnvironment *env) { 181 | // Runs all the misc functions 182 | pcg64_fast gen(g_seed); 183 | while (!env->finished.load()) { 184 | env->table.hash_function(); 185 | env->table.key_eq(); 186 | } 187 | } 188 | 189 | template 190 | void clear_thread(AllEnvironment *env, size_t thread_seed) { 191 | pcg64_fast gen(thread_seed); 192 | // Runs a clear operation at a random time 193 | const size_t sleep_time = gen() % g_test_len; 194 | std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); 195 | if (env->finished.load()) { 196 | return; 197 | } 198 | env->table.clear(); 199 | } 200 | 201 | // Spawns thread_num threads for each type of operation 202 | template 203 | void StressTest(AllEnvironment *env) { 204 | std::vector threads; 205 | for (size_t i = 0; i < g_thread_num; i++) { 206 | if (!g_disable_inserts) { 207 | threads.emplace_back(stress_insert_thread, env, env->gen_seed++); 208 | } 209 | if (!g_disable_deletes) { 210 | threads.emplace_back(delete_thread, env, env->gen_seed++); 211 | } 212 | if (!g_disable_updates) { 213 | threads.emplace_back(update_thread, env, env->gen_seed++); 214 | } 215 | if (!g_disable_finds) { 216 | threads.emplace_back(find_thread, env, env->gen_seed++); 217 | } 218 | if (!g_disable_iterators) { 219 | threads.emplace_back(iterator_thread, env, env->gen_seed++); 220 | } 221 | if (!g_disable_misc) { 222 | threads.emplace_back(misc_thread, env); 223 | } 224 | if (!g_disable_clears) { 225 | threads.emplace_back(clear_thread, env, env->gen_seed++); 226 | } 227 | } 228 | // Sleeps before ending the threads 229 | std::this_thread::sleep_for(std::chrono::seconds(g_test_len)); 230 | env->finished.store(true); 231 | for (auto &thread : threads) { 232 | thread.join(); 233 | } 234 | std::cout << "----------Results----------" << std::endl; 235 | std::cout << "Final size:\t" << env->table.make_unordered_map_view().size() << std::endl; 236 | std::cout << "Final load factor:\t" << env->table.make_unordered_map_view().load_factor() << std::endl; 237 | } 238 | 239 | int main(int argc, char **argv) { 240 | const char *args[] = {"--power", "--thread-num", "--time", "--seed"}; 241 | size_t *arg_vars[] = {&g_power, &g_thread_num, &g_test_len, &g_seed}; 242 | const char *arg_help[] = { 243 | "The number of keys to size the table with, expressed as a power of 2", 244 | "The number of threads to spawn for each type of operation", 245 | "The number of seconds to run the test for", 246 | "The seed for the random number generator"}; 247 | const char *flags[] = { 248 | "--disable-inserts", "--disable-deletes", "--disable-updates", 249 | "--disable-finds", "--disable-resizes", "--disable-iterators", 250 | "--disable-misc", "--disable-clears", "--use-strings"}; 251 | bool *flag_vars[] = { 252 | &g_disable_inserts, &g_disable_deletes, &g_disable_updates, 253 | &g_disable_finds, &g_disable_resizes, &g_disable_iterators, 254 | &g_disable_misc, &g_disable_clears, &g_use_strings}; 255 | const char *flag_help[] = { 256 | "If set, no inserts will be run", 257 | "If set, no deletes will be run", 258 | "If set, no updates will be run", 259 | "If set, no finds will be run", 260 | "If set, no resize operations will be run", 261 | "If set, no iterator operations will be run", 262 | "If set, no misc functions will be run", 263 | "If set, no clears will be run", 264 | "If set, the key type of the map will be std::string"}; 265 | parse_flags(argc, argv, "Runs a stress test on inserts, deletes, and finds", 266 | args, arg_vars, arg_help, sizeof(args) / sizeof(const char *), 267 | flags, flag_vars, flag_help, 268 | sizeof(flags) / sizeof(const char *)); 269 | g_numkeys = 1U << g_power; 270 | 271 | if (g_use_strings) { 272 | auto *env = new AllEnvironment; 273 | StressTest(env); 274 | delete env; 275 | } else { 276 | auto *env = new AllEnvironment; 277 | StressTest(env); 278 | delete env; 279 | } 280 | return main_return_value; 281 | } 282 | -------------------------------------------------------------------------------- /tests/stress-tests/test_util.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _TEST_UTIL_HH 2 | #define _TEST_UTIL_HH 3 | 4 | // Utilities for running stress tests and benchmarks 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | std::mutex print_lock; 17 | int main_return_value = EXIT_SUCCESS; 18 | typedef std::lock_guard mutex_guard; 19 | 20 | // Prints a message if the two items aren't equal 21 | template 22 | inline void do_expect_equal(T x, const char *xname, U y, const char *yname, 23 | size_t line) { 24 | if (x != y) { 25 | mutex_guard m(print_lock); 26 | main_return_value = EXIT_FAILURE; 27 | std::cout << "ERROR:\t" << xname << "(" << x << ") does not equal " << yname 28 | << "(" << y << ") on line " << line << std::endl; 29 | } 30 | } 31 | #define EXPECT_EQ(x, y) do_expect_equal(x, #x, y, #y, __LINE__) 32 | 33 | // Prints a message if the two items are equal 34 | template 35 | inline void do_expect_not_equal(T x, const char *xname, U y, const char *yname, 36 | size_t line) { 37 | if (x == y) { 38 | mutex_guard m(print_lock); 39 | main_return_value = EXIT_FAILURE; 40 | std::cout << "ERROR:\t" << xname << "(" << x << ") equals " << yname << "(" 41 | << y << ") on line " << line << std::endl; 42 | } 43 | } 44 | #define EXPECT_NE(x, y) do_expect_not_equal(x, #x, y, #y, __LINE__) 45 | 46 | // Prints a message if the item is false 47 | inline void do_expect_true(bool x, const char *xname, size_t line) { 48 | if (!x) { 49 | mutex_guard m(print_lock); 50 | main_return_value = EXIT_FAILURE; 51 | std::cout << "ERROR:\t" << xname << "(" << x << ") is false on line " 52 | << line << std::endl; 53 | } 54 | } 55 | #define EXPECT_TRUE(x) do_expect_true(x, #x, __LINE__) 56 | 57 | // Prints a message if the item is true 58 | inline void do_expect_false(bool x, const char *xname, size_t line) { 59 | if (x) { 60 | mutex_guard m(print_lock); 61 | main_return_value = EXIT_FAILURE; 62 | std::cout << "ERROR:\t" << xname << "(" << x << ") is true on line " << line 63 | << std::endl; 64 | } 65 | } 66 | #define EXPECT_FALSE(x) do_expect_false(x, #x, __LINE__) 67 | 68 | // Prints a message and exists if the two items aren't equal 69 | template 70 | inline void do_assert_equal(T x, const char *xname, U y, const char *yname, 71 | size_t line) { 72 | if (x != y) { 73 | mutex_guard m(print_lock); 74 | std::cout << "FATAL ERROR:\t" << xname << "(" << x << ") does not equal " 75 | << yname << "(" << y << ") on line " << line << std::endl; 76 | exit(EXIT_FAILURE); 77 | } 78 | } 79 | #define ASSERT_EQ(x, y) do_assert_equal(x, #x, y, #y, __LINE__) 80 | 81 | // Prints a message and exists if the item is false 82 | inline void do_assert_true(bool x, const char *xname, size_t line) { 83 | if (!x) { 84 | mutex_guard m(print_lock); 85 | std::cout << "FATAL ERROR:\t" << xname << "(" << x << ") is false on line " 86 | << line << std::endl; 87 | exit(EXIT_FAILURE); 88 | } 89 | } 90 | #define ASSERT_TRUE(x) do_assert_true(x, #x, __LINE__) 91 | 92 | // Parses boolean flags and flags with positive integer arguments 93 | void parse_flags(int argc, char **argv, const char *description, 94 | const char *args[], size_t *arg_vars[], const char *arg_help[], 95 | size_t arg_num, const char *flags[], bool *flag_vars[], 96 | const char *flag_help[], size_t flag_num) { 97 | 98 | errno = 0; 99 | for (int i = 0; i < argc; i++) { 100 | for (size_t j = 0; j < arg_num; j++) { 101 | if (strcmp(argv[i], args[j]) == 0) { 102 | if (i == argc - 1) { 103 | std::cerr << "You must provide a positive integer argument" 104 | << " after the " << args[j] << " argument" << std::endl; 105 | exit(EXIT_FAILURE); 106 | } else { 107 | size_t argval = strtoull(argv[i + 1], NULL, 10); 108 | if (errno != 0) { 109 | std::cerr << "The argument to " << args[j] 110 | << " must be a valid size_t" << std::endl; 111 | exit(EXIT_FAILURE); 112 | } else { 113 | *(arg_vars[j]) = argval; 114 | } 115 | } 116 | } 117 | } 118 | for (size_t j = 0; j < flag_num; j++) { 119 | if (strcmp(argv[i], flags[j]) == 0) { 120 | *(flag_vars[j]) = true; 121 | } 122 | } 123 | if (strcmp(argv[i], "--help") == 0) { 124 | std::cerr << description << std::endl; 125 | std::cerr << "Arguments:" << std::endl; 126 | for (size_t j = 0; j < arg_num; j++) { 127 | std::cerr << args[j] << " (default " << *arg_vars[j] << "):\t" 128 | << arg_help[j] << std::endl; 129 | } 130 | for (size_t j = 0; j < flag_num; j++) { 131 | std::cerr << flags[j] << " (default " 132 | << (*flag_vars[j] ? "true" : "false") << "):\t" 133 | << flag_help[j] << std::endl; 134 | } 135 | exit(0); 136 | } 137 | } 138 | } 139 | 140 | // generateKey is a function from a number to another given type, used to 141 | // generate keys for insertion. 142 | template T generateKey(size_t i) { return (T)i; } 143 | // This specialization returns a stringified representation of the given 144 | // integer, where the number is copied to the end of a long string of 'a's, in 145 | // order to make comparisons and hashing take time. 146 | template <> std::string generateKey(size_t n) { 147 | const size_t min_length = 100; 148 | const std::string num(std::to_string(n)); 149 | if (num.size() >= min_length) { 150 | return num; 151 | } 152 | std::string ret(min_length, 'a'); 153 | const size_t startret = min_length - num.size(); 154 | for (size_t i = 0; i < num.size(); i++) { 155 | ret[i + startret] = num[i]; 156 | } 157 | return ret; 158 | } 159 | 160 | // An overloaded class that does the inserts for different table types. Inserts 161 | // with a value of 0. 162 | template class insert_thread { 163 | public: 164 | typedef typename std::vector::iterator it_t; 165 | static void func(Table &table, it_t begin, it_t end) { 166 | for (; begin != end; begin++) { 167 | ASSERT_TRUE(table.insert(*begin, 0)); 168 | } 169 | } 170 | }; 171 | 172 | // An overloaded class that does the reads for different table types. It 173 | // repeatedly searches for the keys in the given range until the time is up. All 174 | // the keys we're searching for should either be in the table or not in the 175 | // table, so we assert that. 176 | template class read_thread { 177 | public: 178 | typedef typename std::vector::iterator it_t; 179 | static void func(Table &table, it_t begin, it_t end, 180 | std::atomic &counter, bool in_table, 181 | std::atomic &finished) { 182 | typename Table::mapped_type v; 183 | // We keep track of our own local counter for reads, to avoid 184 | // over-burdening the shared atomic counter 185 | size_t reads = 0; 186 | while (!finished.load(std::memory_order_acquire)) { 187 | for (auto it = begin; it != end; it++) { 188 | if (finished.load(std::memory_order_acquire)) { 189 | counter.fetch_add(reads); 190 | return; 191 | } 192 | ASSERT_EQ(in_table, table.find(*it, v)); 193 | reads++; 194 | } 195 | } 196 | } 197 | }; 198 | 199 | // An overloaded class that does a mixture of reads and inserts for different 200 | // table types. It repeatedly searches for the keys in the given range until 201 | // everything has been inserted. 202 | template class read_insert_thread { 203 | public: 204 | typedef typename std::vector::iterator it_t; 205 | static void func(Table &table, it_t begin, it_t end, 206 | std::atomic &counter, const double insert_prob, 207 | const size_t start_seed) { 208 | typename Table::mapped_type v; 209 | pcg64_fast gen(start_seed); 210 | std::uniform_real_distribution dist(0.0, 1.0); 211 | auto inserter_it = begin; 212 | auto reader_it = begin; 213 | size_t ops = 0; 214 | while (inserter_it != end) { 215 | if (dist(gen) < insert_prob) { 216 | // Do an insert 217 | ASSERT_TRUE(table.insert(*inserter_it, 0)); 218 | ++inserter_it; 219 | } else { 220 | // Do a read 221 | ASSERT_EQ(table.find(*reader_it, v), (reader_it < inserter_it)); 222 | ++reader_it; 223 | if (reader_it == end) { 224 | reader_it = begin; 225 | } 226 | } 227 | ++ops; 228 | } 229 | counter.fetch_add(ops); 230 | } 231 | }; 232 | 233 | #endif // _TEST_UTIL_HH 234 | -------------------------------------------------------------------------------- /tests/unit-tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_executable(unit_tests 2 | test_constructor.cpp 3 | test_hash_properties.cpp 4 | test_heterogeneous_compare.cpp 5 | test_iterator.cpp 6 | test_noncopyable_types.cpp 7 | test_resize.cpp 8 | test_runner.cpp 9 | test_user_exceptions.cpp 10 | test_locked_table.cpp 11 | test_libcuckoo_bucket_container.cpp 12 | unit_test_util.cpp 13 | unit_test_util.hpp 14 | ) 15 | 16 | find_package(Boost COMPONENTS system REQUIRED) 17 | 18 | target_link_libraries(unit_tests 19 | PRIVATE concurrent_hash_map 20 | PRIVATE catch 21 | PRIVATE ${Boost_SYSTEM_LIBRARY} 22 | ) 23 | 24 | add_test(NAME unit_tests COMMAND unit_tests) 25 | -------------------------------------------------------------------------------- /tests/unit-tests/test_constructor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "unit_test_util.hpp" 8 | #include 9 | 10 | using int_int_table = std::concurrent_unordered_map; 11 | 12 | TEST_CASE("default size", "[constructor]") { 13 | int_int_table t; 14 | const auto& tbl = t.make_unordered_map_view(); 15 | REQUIRE(tbl.size() == 0); 16 | REQUIRE(tbl.empty()); 17 | REQUIRE(tbl.bucket_count() == 1UL << unit_test_internals_view::hashpower(t)); 18 | REQUIRE(tbl.load_factor() == 0); 19 | } 20 | 21 | TEST_CASE("given size", "[constructor]") { 22 | int_int_table t(1); 23 | const auto &tbl = t.make_unordered_map_view(); 24 | 25 | REQUIRE(tbl.size() == 0); 26 | REQUIRE(tbl.empty()); 27 | REQUIRE(tbl.bucket_count() == 1); 28 | REQUIRE(tbl.load_factor() == 0); 29 | } 30 | 31 | TEST_CASE("frees even with exceptions", "[constructor]") { 32 | using no_space_table = std::concurrent_unordered_map, 33 | std::hash, 34 | tracking_allocator>; 35 | // Should throw when allocating anything 36 | REQUIRE_THROWS_AS(no_space_table(1), std::bad_alloc); 37 | REQUIRE(get_unfreed_bytes() == 0); 38 | 39 | typedef int_int_table_with_allocator< 40 | tracking_allocator> 41 | some_space_table; 42 | // Should throw when allocating things after the bucket 43 | REQUIRE_THROWS_AS(some_space_table(1), std::bad_alloc); 44 | REQUIRE(get_unfreed_bytes() == 0); 45 | } 46 | 47 | struct StatefulHash { 48 | StatefulHash(int state_) : state(state_) {} 49 | 50 | size_t operator()(int x) const { return x; } 51 | 52 | int state; 53 | }; 54 | 55 | struct StatefulKeyEqual { 56 | StatefulKeyEqual(int state_) : state(state_) {} 57 | 58 | bool operator()(int x, int y) const { return x == y; } 59 | 60 | int state; 61 | }; 62 | 63 | template 64 | struct StatefulAllocator { 65 | using value_type = T; 66 | using pointer = T *; 67 | using const_pointer = const T *; 68 | using reference = T &; 69 | using const_reference = const T &; 70 | using size_type = size_t; 71 | using difference_type = ptrdiff_t; 72 | 73 | template 74 | struct rebind { 75 | using other = StatefulAllocator; 76 | }; 77 | 78 | StatefulAllocator() : state(0) {} 79 | 80 | StatefulAllocator(int state_) : state(state_) {} 81 | 82 | template 83 | StatefulAllocator(const StatefulAllocator &other) : state(other.state) {} 84 | 85 | T *allocate(size_t n) { return std::allocator().allocate(n); } 86 | 87 | void deallocate(T *p, size_t n) { std::allocator().deallocate(p, n); } 88 | 89 | template 90 | void construct(U *p, Args &&... args) { 91 | new((void *) p) U(std::forward(args)...); 92 | } 93 | 94 | template 95 | void destroy(U *p) { p->~U(); } 96 | 97 | StatefulAllocator select_on_container_copy_construction() const { 98 | return StatefulAllocator(); 99 | } 100 | 101 | using propagate_on_container_swap = std::integral_constant; 102 | 103 | int state; 104 | }; 105 | 106 | template 107 | bool operator==(const StatefulAllocator &a1, 108 | const StatefulAllocator &a2) { 109 | return a1.state == a2.state; 110 | } 111 | 112 | template 113 | bool operator!=(const StatefulAllocator &a1, 114 | const StatefulAllocator &a2) { 115 | return a1.state != a2.state; 116 | } 117 | 118 | using alloc_t = StatefulAllocator>; 119 | using tbl_t = 120 | std::concurrent_unordered_map; 121 | 122 | TEST_CASE("stateful components", "[constructor]") { 123 | tbl_t map(8, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 124 | REQUIRE(map.hash_function().state == 10); 125 | for (int i = 0; i < 100; ++i) { 126 | REQUIRE(map.hash_function()(i) == i); 127 | } 128 | REQUIRE(map.key_eq().state == 20); 129 | for (int i = 0; i < 100; ++i) { 130 | REQUIRE(map.key_eq()(i, i)); 131 | REQUIRE_FALSE(map.key_eq()(i, i + 1)); 132 | } 133 | REQUIRE(map.get_allocator().state == 30); 134 | } 135 | 136 | TEST_CASE("range constructor", "[constructor]") { 137 | std::array elems{{{1, 2}, {3, 4}, {5, 6}}}; 138 | tbl_t map(elems.begin(), elems.end(), 3, StatefulHash(10), 139 | StatefulKeyEqual(20), alloc_t(30)); 140 | REQUIRE(map.hash_function().state == 10); 141 | REQUIRE(map.key_eq().state == 20); 142 | REQUIRE(map.get_allocator().state == 30); 143 | for (int i = 1; i <= 5; i += 2) { 144 | REQUIRE(map.find(i).value() == i + 1); 145 | } 146 | } 147 | 148 | TEST_CASE("move constructor", "[constructor]") { 149 | tbl_t map(10, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 150 | map.emplace(10, 10); 151 | tbl_t map2(std::move(map)); 152 | const auto& m1 = map.make_unordered_map_view(); 153 | const auto& m2 = map2.make_unordered_map_view(); 154 | REQUIRE(m1.size() == 0); 155 | REQUIRE(m2.size() == 1); 156 | REQUIRE(m2.hash_function().state == 10); 157 | REQUIRE(m2.key_eq().state == 20); 158 | REQUIRE(m2.get_allocator().state == 30); 159 | } 160 | 161 | TEST_CASE("initializer list constructor", "[constructor]") { 162 | tbl_t map({{1, 2}, 163 | {3, 4}, 164 | {5, 6}}, 3, StatefulHash(10), StatefulKeyEqual(20), 165 | alloc_t(30)); 166 | const auto &m = map.make_unordered_map_view(); 167 | REQUIRE(m.hash_function().state == 10); 168 | REQUIRE(m.key_eq().state == 20); 169 | REQUIRE(m.get_allocator().state == 30); 170 | for (int i = 1; i <= 5; i += 2) { 171 | REQUIRE(m.find(i)->second == i + 1); 172 | } 173 | } 174 | 175 | TEST_CASE("swap maps", "[constructor]") { 176 | tbl_t map({{1, 2}}, 1, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 177 | tbl_t map2({{3, 4}}, 1, StatefulHash(40), StatefulKeyEqual(50), alloc_t(60)); 178 | map.swap(map2); 179 | 180 | { 181 | const auto &m1 = map.make_unordered_map_view(); 182 | const auto &m2 = map2.make_unordered_map_view(); 183 | 184 | REQUIRE(m1.size() == 1); 185 | REQUIRE(m1.hash_function().state == 40); 186 | REQUIRE(m1.key_eq().state == 50); 187 | REQUIRE(m1.get_allocator().state == 30); 188 | 189 | REQUIRE(m2.size() == 1); 190 | REQUIRE(m2.hash_function().state == 10); 191 | REQUIRE(m2.key_eq().state == 20); 192 | REQUIRE(m2.get_allocator().state == 60); 193 | } 194 | std::swap(map, map2); 195 | 196 | { 197 | const auto &m1 = map.make_unordered_map_view(); 198 | const auto &m2 = map2.make_unordered_map_view(); 199 | 200 | REQUIRE(m1.size() == 1); 201 | REQUIRE(m1.hash_function().state == 10); 202 | REQUIRE(m1.key_eq().state == 20); 203 | REQUIRE(m1.get_allocator().state == 60); 204 | 205 | REQUIRE(m2.size() == 1); 206 | REQUIRE(m2.hash_function().state == 40); 207 | REQUIRE(m2.key_eq().state == 50); 208 | REQUIRE(m2.get_allocator().state == 30); 209 | } 210 | } 211 | 212 | TEST_CASE("move assign different allocators", "[constructor]") { 213 | tbl_t map({{1, 2}}, 1, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 214 | tbl_t map2({{3, 4}}, 1, StatefulHash(40), StatefulKeyEqual(50), alloc_t(60)); 215 | 216 | const auto &m1 = map.make_unordered_map_view(); 217 | const auto &m2 = map2.make_unordered_map_view(); 218 | 219 | map = std::move(map2); 220 | REQUIRE(m1.size() == 1); 221 | REQUIRE(m1.find(3)->second == 4); 222 | REQUIRE(m1.hash_function().state == 40); 223 | REQUIRE(m1.key_eq().state == 50); 224 | REQUIRE(m1.get_allocator().state == 60); 225 | 226 | REQUIRE(m2.hash_function().state == 40); 227 | REQUIRE(m2.key_eq().state == 50); 228 | REQUIRE(m2.get_allocator().state == 60); 229 | } 230 | 231 | TEST_CASE("move assign same allocators", "[constructor]") { 232 | tbl_t map({{1, 2}}, 1, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 233 | tbl_t map2({{3, 4}}, 1, StatefulHash(40), StatefulKeyEqual(50), alloc_t(30)); 234 | 235 | const auto &m1 = map.make_unordered_map_view(); 236 | const auto &m2 = map2.make_unordered_map_view(); 237 | 238 | map = std::move(map2); 239 | REQUIRE(m1.size() == 1); 240 | REQUIRE(m1.find(3)->second == 4); 241 | REQUIRE(m1.hash_function().state == 40); 242 | REQUIRE(m1.key_eq().state == 50); 243 | REQUIRE(m1.get_allocator().state == 30); 244 | 245 | REQUIRE(m2.size() == 0); 246 | REQUIRE(m2.hash_function().state == 40); 247 | REQUIRE(m2.key_eq().state == 50); 248 | REQUIRE(m2.get_allocator().state == 30); 249 | } 250 | 251 | TEST_CASE("initializer list assignment", "[constructor]") { 252 | tbl_t map({{1, 2}}, 1, StatefulHash(10), StatefulKeyEqual(20), alloc_t(30)); 253 | { 254 | const auto &m = map.make_unordered_map_view(); 255 | REQUIRE(m.find(1)->second == 2); 256 | } 257 | map = {{3, 4}}; 258 | { 259 | const auto &m = map.make_unordered_map_view(); 260 | REQUIRE(m.find(3)->second == 4); 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /tests/unit-tests/test_hash_properties.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "unit_test_util.hpp" 4 | #include 5 | 6 | // Checks that the alt index function returns a different bucket, and can 7 | // recover the old bucket when called with the alternate bucket as the index. 8 | template 9 | void check_key(size_t hashpower, const typename concurrent_map::key_type &key) { 10 | auto hashfn = typename concurrent_map::hasher(); 11 | size_t hv = hashfn(key); 12 | auto partial = unit_test_internals_view::partial_key(hv); 13 | size_t bucket = 14 | unit_test_internals_view::index_hash(hashpower, hv); 15 | size_t alt_bucket = unit_test_internals_view::alt_index( 16 | hashpower, partial, bucket); 17 | size_t orig_bucket = unit_test_internals_view::alt_index( 18 | hashpower, partial, alt_bucket); 19 | 20 | REQUIRE(bucket != alt_bucket); 21 | REQUIRE(bucket == orig_bucket); 22 | } 23 | 24 | TEST_CASE("int alt index works correctly", "[hash properties]") { 25 | for (size_t hashpower = 10; hashpower < 15; ++hashpower) { 26 | for (int key = 0; key < 10000; ++key) { 27 | check_key(hashpower, key); 28 | } 29 | } 30 | } 31 | 32 | TEST_CASE("string alt index works correctly", "[hash properties]") { 33 | for (size_t hashpower = 10; hashpower < 15; ++hashpower) { 34 | for (int key = 0; key < 10000; ++key) { 35 | check_key(hashpower, std::to_string(key)); 36 | } 37 | } 38 | } 39 | 40 | TEST_CASE("hash with larger hashpower only adds top bits", 41 | "[hash properties]") { 42 | std::string key = "abc"; 43 | size_t hv = string_int_table::hasher()(key); 44 | for (size_t hashpower = 1; hashpower < 30; ++hashpower) { 45 | auto partial = unit_test_internals_view::partial_key(hv); 46 | size_t index_bucket1 = 47 | unit_test_internals_view::index_hash(hashpower, hv); 48 | size_t index_bucket2 = 49 | unit_test_internals_view::index_hash(hashpower + 1, hv); 50 | CHECK((index_bucket2 & ~(1L << hashpower)) == index_bucket1); 51 | 52 | size_t alt_bucket1 = unit_test_internals_view::alt_index( 53 | hashpower, partial, index_bucket1); 54 | size_t alt_bucket2 = unit_test_internals_view::alt_index( 55 | hashpower, partial, index_bucket2); 56 | 57 | CHECK((alt_bucket2 & ~(1L << hashpower)) == alt_bucket1); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /tests/unit-tests/test_heterogeneous_compare.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | size_t int_constructions; 6 | size_t copy_constructions; 7 | size_t destructions; 8 | size_t foo_comparisons; 9 | size_t int_comparisons; 10 | size_t foo_hashes; 11 | size_t int_hashes; 12 | 13 | class foo { 14 | public: 15 | int val; 16 | 17 | foo(int v) { 18 | ++int_constructions; 19 | val = v; 20 | } 21 | 22 | foo(const foo &x) { 23 | ++copy_constructions; 24 | val = x.val; 25 | } 26 | 27 | ~foo() { ++destructions; } 28 | }; 29 | 30 | class foo_eq { 31 | public: 32 | bool operator()(const foo &left, const foo &right) const { 33 | ++foo_comparisons; 34 | return left.val == right.val; 35 | } 36 | 37 | bool operator()(const foo &left, const int right) const { 38 | ++int_comparisons; 39 | return left.val == right; 40 | } 41 | }; 42 | 43 | class foo_hasher { 44 | public: 45 | size_t operator()(const foo &x) const { 46 | ++foo_hashes; 47 | return static_cast(x.val); 48 | } 49 | 50 | size_t operator()(const int x) const { 51 | ++int_hashes; 52 | return static_cast(x); 53 | } 54 | }; 55 | 56 | typedef std::concurrent_unordered_map foo_map; 57 | 58 | TEST_CASE("heterogeneous compare", "[heterogeneous compare]") { 59 | // setup code 60 | int_constructions = 0; 61 | copy_constructions = 0; 62 | destructions = 0; 63 | foo_comparisons = 0; 64 | int_comparisons = 0; 65 | foo_hashes = 0; 66 | int_hashes = 0; 67 | 68 | SECTION("emplace") { 69 | { 70 | foo_map map; 71 | map.emplace(0, true); 72 | } 73 | REQUIRE(int_constructions == 1); 74 | REQUIRE(copy_constructions == 0); 75 | REQUIRE(destructions == 1); 76 | REQUIRE(foo_comparisons == 0); 77 | REQUIRE(int_comparisons == 0); 78 | REQUIRE(foo_hashes == 0); 79 | REQUIRE(int_hashes == 1); 80 | } 81 | 82 | SECTION("foo emplace") { 83 | { 84 | foo_map map; 85 | map.emplace(foo(0), true); 86 | } 87 | REQUIRE(int_constructions == 1); 88 | REQUIRE(copy_constructions == 1); 89 | // One destruction of passed-in and moved argument, and one after the 90 | // table is destroyed. 91 | REQUIRE(destructions == 2); 92 | REQUIRE(foo_comparisons == 0); 93 | REQUIRE(int_comparisons == 0); 94 | REQUIRE(foo_hashes == 1); 95 | REQUIRE(int_hashes == 0); 96 | } 97 | 98 | SECTION("insert_or_assign") { 99 | { 100 | foo_map map; 101 | map.insert_or_assign(0, true); 102 | map.insert_or_assign(0, false); 103 | std::experimental::optional val; 104 | map.visit(0, [&val](bool map_value) { val = map_value; }); 105 | REQUIRE(val); 106 | REQUIRE_FALSE(val.value()); 107 | } 108 | REQUIRE(int_constructions == 2); 109 | REQUIRE(copy_constructions == 0); 110 | REQUIRE(destructions == 2); 111 | REQUIRE(foo_comparisons == 1); 112 | REQUIRE(int_comparisons == 1); 113 | REQUIRE(foo_hashes == 1); 114 | REQUIRE(int_hashes == 2); 115 | } 116 | 117 | SECTION("foo insert_or_assign") { 118 | { 119 | foo_map map; 120 | map.insert_or_assign(foo(0), true); 121 | map.insert_or_assign(foo(0), false); 122 | REQUIRE_FALSE(map.find(foo(0)).value()); 123 | } 124 | REQUIRE(int_constructions == 3); 125 | REQUIRE(copy_constructions == 1); 126 | // Three destructions of foo arguments, and one in table destruction 127 | REQUIRE(destructions == 4); 128 | REQUIRE(foo_comparisons == 2); 129 | REQUIRE(int_comparisons == 0); 130 | REQUIRE(foo_hashes == 3); 131 | REQUIRE(int_hashes == 0); 132 | } 133 | 134 | SECTION("find") { 135 | { 136 | foo_map map; 137 | map.emplace(0, true); 138 | std::experimental::optional val; 139 | map.visit(0, [&val](bool map_value) { val = map_value; }); 140 | REQUIRE(val); 141 | REQUIRE(val.value()); 142 | val = std::experimental::nullopt; 143 | map.visit(1, [&val](bool map_value) { val = map_value; }); 144 | REQUIRE_FALSE(val); 145 | } 146 | REQUIRE(int_constructions == 3); 147 | REQUIRE(copy_constructions == 0); 148 | REQUIRE(destructions == 3); 149 | REQUIRE(foo_comparisons == 1); 150 | REQUIRE(int_comparisons == 0); 151 | REQUIRE(foo_hashes == 2); 152 | REQUIRE(int_hashes == 1); 153 | } 154 | 155 | SECTION("foo find") { 156 | { 157 | foo_map map; 158 | map.emplace(0, true); 159 | REQUIRE((bool) map.find(foo(0))); 160 | REQUIRE_FALSE((bool) map.find(foo(1))); 161 | } 162 | REQUIRE(int_constructions == 3); 163 | REQUIRE(copy_constructions == 0); 164 | REQUIRE(destructions == 3); 165 | REQUIRE(foo_comparisons == 1); 166 | REQUIRE(int_comparisons == 0); 167 | REQUIRE(foo_hashes == 2); 168 | REQUIRE(int_hashes == 1); 169 | } 170 | 171 | SECTION("contains") { 172 | { 173 | foo_map map(2); 174 | // map.rehash(2); 175 | map.emplace(0, true); 176 | REQUIRE(map.find(0)); 177 | // Shouldn't do comparison because of different partial key 178 | REQUIRE(!map.find(4)); 179 | } 180 | REQUIRE(int_constructions == 3); 181 | REQUIRE(copy_constructions == 0); 182 | REQUIRE(destructions == 3); 183 | REQUIRE(foo_comparisons == 1); 184 | REQUIRE(int_comparisons == 0); 185 | REQUIRE(foo_hashes == 2); 186 | REQUIRE(int_hashes == 1); 187 | } 188 | 189 | SECTION("erase") { 190 | { 191 | foo_map map; 192 | map.emplace(0, true); 193 | REQUIRE(map.erase(0)); 194 | REQUIRE(!map.find(0)); 195 | } 196 | REQUIRE(int_constructions == 2); 197 | REQUIRE(copy_constructions == 0); 198 | REQUIRE(destructions == 2); 199 | REQUIRE(foo_comparisons == 0); 200 | REQUIRE(int_comparisons == 1); 201 | REQUIRE(foo_hashes == 1); 202 | REQUIRE(int_hashes == 2); 203 | } 204 | 205 | SECTION("update") { 206 | { 207 | foo_map map; 208 | map.emplace(0, true); 209 | REQUIRE(map.update(0, false)); 210 | REQUIRE(!map.find(0).value()); 211 | } 212 | REQUIRE(int_constructions == 2); 213 | REQUIRE(copy_constructions == 0); 214 | REQUIRE(destructions == 2); 215 | REQUIRE(foo_comparisons == 1); 216 | REQUIRE(int_comparisons == 1); 217 | REQUIRE(foo_hashes == 1); 218 | REQUIRE(int_hashes == 2); 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /tests/unit-tests/test_iterator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "unit_test_util.hpp" 12 | 13 | TEST_CASE("iterator types", "[iterator]") { 14 | using Ltbl = int_int_table::unordered_map_view; 15 | using It = Ltbl::iterator; 16 | using ConstIt = Ltbl::const_iterator; 17 | 18 | const bool it_difference_type = 19 | std::is_same::value; 20 | const bool it_value_type = 21 | std::is_same::value; 22 | const bool it_pointer = std::is_same::value; 23 | const bool it_reference = std::is_same::value; 24 | const bool it_iterator_category = 25 | std::is_same::value; 27 | 28 | const bool const_it_difference_type = 29 | std::is_same::value; 30 | const bool const_it_value_type = 31 | std::is_same::value; 32 | const bool const_it_reference = 33 | std::is_same::value; 34 | const bool const_it_pointer = 35 | std::is_same::value; 36 | const bool const_it_iterator_category = 37 | std::is_same::value; 39 | 40 | REQUIRE(it_difference_type); 41 | REQUIRE(it_value_type); 42 | REQUIRE(it_pointer); 43 | REQUIRE(it_reference); 44 | REQUIRE(it_iterator_category); 45 | 46 | REQUIRE(const_it_difference_type); 47 | REQUIRE(const_it_value_type); 48 | REQUIRE(const_it_pointer); 49 | REQUIRE(const_it_reference); 50 | REQUIRE(const_it_iterator_category); 51 | } 52 | 53 | TEST_CASE("empty table iteration", "[iterator]") { 54 | int_int_table table; 55 | { 56 | auto lt = table.make_unordered_map_view(); 57 | REQUIRE(lt.begin() == lt.begin()); 58 | REQUIRE(lt.begin() == lt.end()); 59 | 60 | REQUIRE(lt.cbegin() == lt.begin()); 61 | REQUIRE(lt.begin() == lt.end()); 62 | 63 | REQUIRE(lt.cbegin() == lt.begin()); 64 | REQUIRE(lt.cend() == lt.end()); 65 | } 66 | } 67 | 68 | TEST_CASE("iterator walkthrough", "[iterator]") { 69 | int_int_table table; 70 | for (int i = 0; i < 10; ++i) { 71 | table.emplace(i, i); 72 | } 73 | 74 | SECTION("forward postfix walkthrough") { 75 | auto lt = table.make_unordered_map_view(); 76 | auto it = lt.cbegin(); 77 | for (size_t i = 0; i < lt.size(); ++i) { 78 | REQUIRE((*it).first == (*it).second); 79 | REQUIRE(it->first == it->second); 80 | auto old_it = it; 81 | REQUIRE(old_it == it++); 82 | } 83 | REQUIRE(it == lt.end()); 84 | } 85 | 86 | SECTION("forward prefix walkthrough") { 87 | auto lt = table.make_unordered_map_view(); 88 | auto it = lt.cbegin(); 89 | for (size_t i = 0; i < lt.size(); ++i) { 90 | REQUIRE((*it).first == (*it).second); 91 | REQUIRE(it->first == it->second); 92 | ++it; 93 | } 94 | REQUIRE(it == lt.end()); 95 | } 96 | 97 | SECTION("backwards postfix walkthrough") { 98 | auto lt = table.make_unordered_map_view(); 99 | auto it = lt.cend(); 100 | for (size_t i = 0; i < lt.size(); ++i) { 101 | auto old_it = it; 102 | REQUIRE(old_it == it--); 103 | REQUIRE((*it).first == (*it).second); 104 | REQUIRE(it->first == it->second); 105 | } 106 | REQUIRE(it == lt.begin()); 107 | } 108 | 109 | SECTION("backwards prefix walkthrough") { 110 | auto lt = table.make_unordered_map_view(); 111 | auto it = lt.cend(); 112 | for (size_t i = 0; i < lt.size(); ++i) { 113 | --it; 114 | REQUIRE((*it).first == (*it).second); 115 | REQUIRE(it->first == it->second); 116 | } 117 | REQUIRE(it == lt.begin()); 118 | } 119 | 120 | SECTION("walkthrough works after move") { 121 | auto lt = table.make_unordered_map_view(); 122 | auto it = lt.cend(); 123 | auto lt2 = std::move(lt); 124 | for (size_t i = 0; i < lt.size(); ++i) { 125 | --it; 126 | REQUIRE((*it).first == (*it).second); 127 | REQUIRE(it->first == it->second); 128 | } 129 | REQUIRE(it == lt2.begin()); 130 | } 131 | } 132 | 133 | TEST_CASE("iterator modification", "[iterator]") { 134 | int_int_table table; 135 | for (int i = 0; i < 10; ++i) { 136 | table.emplace(i, i); 137 | } 138 | 139 | auto lt = table.make_unordered_map_view(); 140 | for (auto it = lt.begin(); it != lt.end(); ++it) { 141 | it->second = it->second + 1; 142 | } 143 | 144 | auto it = lt.cbegin(); 145 | for (size_t i = 0; i < lt.size(); ++i) { 146 | REQUIRE(it->first == it->second - 1); 147 | ++it; 148 | } 149 | REQUIRE(it == lt.end()); 150 | } 151 | 152 | TEST_CASE("Cast iterator to const iterator", "[iterator]") { 153 | int_int_table table; 154 | for (int i = 0; i < 10; ++i) { 155 | table.emplace(i, i); 156 | } 157 | auto lt = table.make_unordered_map_view(); 158 | for (int_int_table::unordered_map_view::iterator it = lt.begin(); it != lt.end(); 159 | ++it) { 160 | REQUIRE(it->first == it->second); 161 | it->second++; 162 | int_int_table::unordered_map_view::const_iterator const_it = it; 163 | REQUIRE(it->first + 1 == it->second); 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /tests/unit-tests/test_libcuckoo_bucket_container.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "unit_test_util.hpp" 10 | #include 11 | 12 | template 14 | struct allocator_wrapper { 15 | template 16 | class stateful_allocator { 17 | public: 18 | using value_type = T; 19 | using propagate_on_container_copy_assignment = 20 | std::integral_constant; 21 | using propagate_on_container_move_assignment = 22 | std::integral_constant; 23 | using propagate_on_container_swap = 24 | std::integral_constant; 25 | 26 | stateful_allocator() : id(0) {} 27 | 28 | stateful_allocator(const size_t &id_) : id(id_) {} 29 | 30 | stateful_allocator(const stateful_allocator &other) : id(other.id) {} 31 | 32 | template 33 | stateful_allocator(const stateful_allocator &other) : id(other.id) {} 34 | 35 | stateful_allocator &operator=(const stateful_allocator &a) { 36 | id = a.id + 1; 37 | return *this; 38 | } 39 | 40 | stateful_allocator &operator=(stateful_allocator &&a) { 41 | id = a.id + 2; 42 | return *this; 43 | } 44 | 45 | T *allocate(size_t n) { return std::allocator().allocate(n); } 46 | 47 | void deallocate(T *ptr, size_t n) { 48 | std::allocator().deallocate(ptr, n); 49 | } 50 | 51 | stateful_allocator select_on_container_copy_construction() const { 52 | stateful_allocator copy(*this); 53 | ++copy.id; 54 | return copy; 55 | } 56 | 57 | bool operator==(const stateful_allocator &other) { return id == other.id; } 58 | 59 | bool operator!=(const stateful_allocator &other) { return id != other.id; } 60 | 61 | size_t id; 62 | }; 63 | }; 64 | 65 | template 66 | using stateful_allocator = 67 | typename allocator_wrapper::template stateful_allocator; 68 | 69 | const size_t SLOT_PER_BUCKET = 4; 70 | 71 | template 72 | using testing_container = 73 | std::private_impl::bucket_container, int, Alloc, uint8_t, 74 | SLOT_PER_BUCKET>; 75 | 76 | using value_type = std::pair, int>; 77 | 78 | TEST_CASE("bucket container default constructor", "[bucket container]") { 79 | allocator_wrapper<>::stateful_allocator a; 80 | testing_container tc(2, a); 81 | REQUIRE(tc.hashpower() == 2); 82 | REQUIRE(tc.size() == 4); 83 | REQUIRE(tc.get_allocator().id == 0); 84 | for (size_t i = 0; i < tc.size(); ++i) { 85 | for (size_t j = 0; j < SLOT_PER_BUCKET; ++j) { 86 | REQUIRE_FALSE(tc[i].occupied(j)); 87 | } 88 | } 89 | } 90 | 91 | TEST_CASE("bucket container simple stateful allocator", "[bucket container]") { 92 | allocator_wrapper<>::stateful_allocator a(10); 93 | testing_container tc(2, a); 94 | REQUIRE(tc.hashpower() == 2); 95 | REQUIRE(tc.size() == 4); 96 | REQUIRE(tc.get_allocator().id == 10); 97 | } 98 | 99 | TEST_CASE("bucket container copy construction", "[bucket container]") { 100 | allocator_wrapper<>::stateful_allocator a(5); 101 | testing_container tc(2, a); 102 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 103 | testing_container tc2(tc); 104 | 105 | REQUIRE(tc[0].occupied(0)); 106 | REQUIRE(tc[0].partial(0) == 2); 107 | REQUIRE(*tc[0].key(0) == 10); 108 | REQUIRE(tc[0].mapped(0) == 5); 109 | REQUIRE(tc.get_allocator().id == 5); 110 | 111 | REQUIRE(tc2[0].occupied(0)); 112 | REQUIRE(tc2[0].partial(0) == 2); 113 | REQUIRE(*tc2[0].key(0) == 10); 114 | REQUIRE(tc2[0].mapped(0) == 5); 115 | REQUIRE(tc2.get_allocator().id == 6); 116 | } 117 | 118 | TEST_CASE("bucket container move construction", "[bucket container]") { 119 | allocator_wrapper<>::stateful_allocator a(5); 120 | testing_container tc(2, a); 121 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 122 | testing_container tc2(std::move(tc)); 123 | 124 | REQUIRE(tc2[0].occupied(0)); 125 | REQUIRE(tc2[0].partial(0) == 2); 126 | REQUIRE(*tc2[0].key(0) == 10); 127 | REQUIRE(tc2[0].mapped(0) == 5); 128 | REQUIRE(tc2.get_allocator().id == 5); 129 | } 130 | 131 | TEST_CASE("bucket container copy assignment with propagate", 132 | "[bucket container]") { 133 | allocator_wrapper::stateful_allocator a(5); 134 | testing_container tc(2, a); 135 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 136 | testing_container tc2(2, a); 137 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 138 | 139 | tc2 = tc; 140 | REQUIRE(tc2[0].occupied(0)); 141 | REQUIRE(tc2[0].partial(0) == 2); 142 | REQUIRE(*tc2[0].key(0) == 10); 143 | REQUIRE(tc2[0].key(0).use_count() == 2); 144 | REQUIRE(tc2[0].mapped(0) == 5); 145 | REQUIRE_FALSE(tc2[1].occupied(0)); 146 | 147 | REQUIRE(tc.get_allocator().id == 5); 148 | REQUIRE(tc2.get_allocator().id == 6); 149 | } 150 | 151 | TEST_CASE("bucket container copy assignment no propagate", 152 | "[bucket container]") { 153 | allocator_wrapper::stateful_allocator a(5); 154 | testing_container tc(2, a); 155 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 156 | testing_container tc2(2, a); 157 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 158 | 159 | tc2 = tc; 160 | REQUIRE(tc2[0].occupied(0)); 161 | REQUIRE(tc2[0].partial(0) == 2); 162 | REQUIRE(*tc2[0].key(0) == 10); 163 | REQUIRE(tc2[0].key(0).use_count() == 2); 164 | REQUIRE(tc2[0].mapped(0) == 5); 165 | REQUIRE_FALSE(tc2[1].occupied(0)); 166 | 167 | REQUIRE(tc.get_allocator().id == 5); 168 | REQUIRE(tc2.get_allocator().id == 5); 169 | } 170 | 171 | TEST_CASE("bucket container move assignment with propagate", 172 | "[bucket container]") { 173 | allocator_wrapper<>::stateful_allocator a(5); 174 | testing_container tc(2, a); 175 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 176 | testing_container tc2(2, a); 177 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 178 | 179 | tc2 = std::move(tc); 180 | REQUIRE(tc2[0].occupied(0)); 181 | REQUIRE(tc2[0].partial(0) == 2); 182 | REQUIRE(*tc2[0].key(0) == 10); 183 | REQUIRE(tc2[0].mapped(0) == 5); 184 | REQUIRE_FALSE(tc2[1].occupied(0)); 185 | REQUIRE(tc2.get_allocator().id == 7); 186 | } 187 | 188 | TEST_CASE("bucket container move assignment no propagate equal", 189 | "[bucket container]") { 190 | allocator_wrapper::stateful_allocator a(5); 191 | testing_container tc(2, a); 192 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 193 | testing_container tc2(2, a); 194 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 195 | 196 | tc2 = std::move(tc); 197 | REQUIRE(tc2[0].occupied(0)); 198 | REQUIRE(tc2[0].partial(0) == 2); 199 | REQUIRE(*tc2[0].key(0) == 10); 200 | REQUIRE(tc2[0].key(0).use_count() == 1); 201 | REQUIRE(tc2[0].mapped(0) == 5); 202 | REQUIRE_FALSE(tc2[1].occupied(0)); 203 | REQUIRE(tc2.get_allocator().id == 5); 204 | } 205 | 206 | TEST_CASE("bucket container move assignment no propagate unequal", 207 | "[bucket container]") { 208 | allocator_wrapper::stateful_allocator a(5); 209 | testing_container tc(2, a); 210 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 211 | allocator_wrapper::stateful_allocator a2(4); 212 | testing_container tc2(2, a2); 213 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 214 | 215 | tc2 = std::move(tc); 216 | REQUIRE(!tc2[1].occupied(0)); 217 | REQUIRE(tc2[0].occupied(0)); 218 | REQUIRE(tc2[0].partial(0) == 2); 219 | REQUIRE(*tc2[0].key(0) == 10); 220 | REQUIRE(tc2[0].key(0).use_count() == 1); 221 | REQUIRE(tc2[0].mapped(0) == 5); 222 | REQUIRE_FALSE(tc2[1].occupied(0)); 223 | REQUIRE(tc2.get_allocator().id == 4); 224 | 225 | REQUIRE(tc[0].occupied(0)); 226 | REQUIRE(tc[0].partial(0) == 2); 227 | REQUIRE_FALSE(tc[0].key(0)); 228 | } 229 | 230 | TEST_CASE("bucket container swap no propagate", "[bucket container]") { 231 | allocator_wrapper::stateful_allocator a(5); 232 | testing_container tc(2, a); 233 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 234 | testing_container tc2(2, a); 235 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 236 | 237 | tc.swap(tc2); 238 | 239 | REQUIRE(tc[1].occupied(0)); 240 | REQUIRE(tc[1].partial(0) == 2); 241 | REQUIRE(*tc[1].key(0) == 10); 242 | REQUIRE(tc[1].key(0).use_count() == 1); 243 | REQUIRE(tc[1].mapped(0) == 5); 244 | REQUIRE(tc.get_allocator().id == 5); 245 | 246 | REQUIRE(tc2[0].occupied(0)); 247 | REQUIRE(tc2[0].partial(0) == 2); 248 | REQUIRE(*tc2[0].key(0) == 10); 249 | REQUIRE(tc2[0].key(0).use_count() == 1); 250 | REQUIRE(tc2[0].mapped(0) == 5); 251 | REQUIRE(tc2.get_allocator().id == 5); 252 | } 253 | 254 | TEST_CASE("bucket container swap propagate", "[bucket container]") { 255 | allocator_wrapper::stateful_allocator a(5); 256 | testing_container tc(2, a); 257 | tc.set_element(0, 0, 2, std::make_shared(10), 5); 258 | testing_container tc2(2, a); 259 | tc2.set_element(1, 0, 2, std::make_shared(10), 5); 260 | 261 | tc.swap(tc2); 262 | 263 | REQUIRE(tc[1].occupied(0)); 264 | REQUIRE(tc[1].partial(0) == 2); 265 | REQUIRE(*tc[1].key(0) == 10); 266 | REQUIRE(tc[1].key(0).use_count() == 1); 267 | REQUIRE(tc[1].mapped(0) == 5); 268 | REQUIRE(tc.get_allocator().id == 7); 269 | 270 | REQUIRE(tc2[0].occupied(0)); 271 | REQUIRE(tc2[0].partial(0) == 2); 272 | REQUIRE(*tc2[0].key(0) == 10); 273 | REQUIRE(tc2[0].key(0).use_count() == 1); 274 | REQUIRE(tc2[0].mapped(0) == 5); 275 | REQUIRE(tc2.get_allocator().id == 7); 276 | } 277 | 278 | struct exception_int { 279 | int x; 280 | static bool do_throw; 281 | 282 | exception_int(int x_) : x(x_) { maybe_throw(); } 283 | 284 | exception_int(const exception_int &other) : x(other.x) { maybe_throw(); } 285 | 286 | exception_int &operator=(const exception_int &other) { 287 | x = other.x; 288 | maybe_throw(); 289 | return *this; 290 | } 291 | 292 | ~exception_int() { maybe_throw(); } 293 | 294 | private: 295 | void maybe_throw() { 296 | if (do_throw) { 297 | throw std::runtime_error("thrown"); 298 | } 299 | } 300 | }; 301 | 302 | bool exception_int::do_throw = false; 303 | 304 | using exception_container = 305 | std::private_impl::bucket_container>, 307 | uint8_t, SLOT_PER_BUCKET>; 308 | 309 | TEST_CASE("set_element with throwing type maintains strong guarantee", 310 | "[bucket container]") { 311 | exception_container container(0, exception_container::allocator_type()); 312 | container.set_element(0, 0, 0, exception_int(10), 20); 313 | 314 | exception_int::do_throw = true; 315 | REQUIRE_THROWS_AS(container.set_element(0, 1, 0, 0, 0), std::runtime_error); 316 | exception_int::do_throw = false; 317 | 318 | REQUIRE(container[0].occupied(0)); 319 | REQUIRE(container[0].key(0).x == 10); 320 | REQUIRE(container[0].mapped(0) == 20); 321 | 322 | REQUIRE_FALSE(container[0].occupied(1)); 323 | } 324 | 325 | TEST_CASE("copy assignment with throwing type is destroyed properly", 326 | "[bucket container]") { 327 | exception_container container(0, exception_container::allocator_type()); 328 | container.set_element(0, 0, 0, exception_int(10), 20); 329 | exception_container other(0, exception_container::allocator_type()); 330 | 331 | exception_int::do_throw = true; 332 | REQUIRE_THROWS_AS(other = container, std::runtime_error); 333 | exception_int::do_throw = false; 334 | } 335 | -------------------------------------------------------------------------------- /tests/unit-tests/test_locked_table.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | #include "unit_test_util.hpp" 9 | 10 | TEST_CASE("locked_table typedefs", "[locked_table]") { 11 | using tbl = int_int_table; 12 | using ltbl = tbl::unordered_map_view; 13 | const bool key_type = std::is_same::value; 14 | const bool mapped_type = 15 | std::is_same::value; 16 | const bool value_type = 17 | std::is_same::value; 18 | const bool size_type = std::is_same::value; 19 | const bool hasher = std::is_same::value; 20 | const bool key_equal = std::is_same::value; 21 | const bool allocator_type = 22 | std::is_same::value; 23 | REQUIRE(key_type); 24 | REQUIRE(mapped_type); 25 | REQUIRE(value_type); 26 | REQUIRE(size_type); 27 | REQUIRE(hasher); 28 | REQUIRE(key_equal); 29 | REQUIRE(allocator_type); 30 | } 31 | 32 | TEST_CASE("locked_table move", "[locked_table]") { 33 | int_int_table tbl; 34 | 35 | SECTION("move constructor") { 36 | auto lt = tbl.make_unordered_map_view(); 37 | auto lt2(std::move(lt)); 38 | // REQUIRE(!lt.is_active()); 39 | // REQUIRE(lt2.is_active()); 40 | } 41 | 42 | SECTION("move assignment") { 43 | auto lt = tbl.make_unordered_map_view(); 44 | auto lt2 = std::move(lt); 45 | // REQUIRE(!lt.is_active()); 46 | // REQUIRE(lt2.is_active()); 47 | } 48 | 49 | SECTION("iterators compare after table is moved") { 50 | auto lt1 = tbl.make_unordered_map_view(); 51 | auto it1 = lt1.begin(); 52 | auto it2 = lt1.begin(); 53 | REQUIRE(it1 == it2); 54 | auto lt2(std::move(lt1)); 55 | REQUIRE(it1 == it2); 56 | } 57 | } 58 | 59 | TEST_CASE("locked_table info", "[locked_table]") { 60 | int_int_table tbl; 61 | tbl.emplace(10, 10); 62 | auto lt = tbl.make_unordered_map_view(); 63 | 64 | // We should still be able to call table info operations on the 65 | // cuckoohash_map instance, because they shouldn't take locks. 66 | 67 | REQUIRE(lt.get_allocator() == tbl.get_allocator()); 68 | lt.rehash(5); 69 | } 70 | 71 | TEST_CASE("locked_table clear", "[locked_table]") { 72 | int_int_table tbl; 73 | tbl.emplace(10, 10); 74 | auto lt = tbl.make_unordered_map_view(); 75 | REQUIRE(lt.size() == 1); 76 | lt.clear(); 77 | REQUIRE(lt.size() == 0); 78 | lt.clear(); 79 | REQUIRE(lt.size() == 0); 80 | } 81 | 82 | TEST_CASE("locked_table insert duplicate", "[locked_table]") { 83 | int_int_table tbl; 84 | tbl.emplace(10, 10); 85 | { 86 | auto lt = tbl.make_unordered_map_view(); 87 | auto result = lt.insert(std::make_pair(10, 20)); 88 | REQUIRE(result.first->first == 10); 89 | REQUIRE(result.first->second == 10); 90 | REQUIRE_FALSE(result.second); 91 | result.first->second = 50; 92 | } 93 | REQUIRE(*tbl.find(10) == 50); 94 | } 95 | 96 | TEST_CASE("locked_table insert new key", "[locked_table]") { 97 | int_int_table tbl; 98 | tbl.emplace(10, 10); 99 | { 100 | auto lt = tbl.make_unordered_map_view(); 101 | auto result = lt.insert(std::make_pair(20, 20)); 102 | REQUIRE(result.first->first == 20); 103 | REQUIRE(result.first->second == 20); 104 | REQUIRE(result.second); 105 | result.first->second = 50; 106 | } 107 | REQUIRE(*tbl.find(10) == 10); 108 | REQUIRE(*tbl.find(20) == 50); 109 | } 110 | 111 | TEST_CASE("locked_table insert lifetime", "[locked_table]") { 112 | unique_ptr_table tbl; 113 | 114 | SECTION("Successful insert") { 115 | auto lt = tbl.make_unordered_map_view(); 116 | std::unique_ptr key(new int(20)); 117 | std::unique_ptr value(new int(20)); 118 | 119 | 120 | auto result = lt.insert(std::move(std::make_pair(std::move(key), std::move(value)))); 121 | REQUIRE(*result.first->first == 20); 122 | REQUIRE(*result.first->second == 20); 123 | REQUIRE(result.second); 124 | REQUIRE(!static_cast(key)); 125 | REQUIRE(!static_cast(value)); 126 | } 127 | 128 | SECTION("Unsuccessful insert") { 129 | tbl.emplace(std::unique_ptr(new int(20)), std::unique_ptr(new int(20))); 130 | auto lt = tbl.make_unordered_map_view(); 131 | std::unique_ptr key(new int(20)); 132 | std::unique_ptr value(new int(30)); 133 | unique_ptr_table::value_type val(std::move(key), std::move(value)); 134 | auto result = lt.insert(std::move(val)); 135 | REQUIRE(*result.first->first == 20); 136 | REQUIRE(*result.first->second == 20); 137 | REQUIRE(!result.second); 138 | REQUIRE(static_cast(val.first)); 139 | REQUIRE(static_cast(val.second)); 140 | } 141 | } 142 | 143 | TEST_CASE("locked_table erase", "[locked_table]") { 144 | int_int_table tbl; 145 | for (int i = 0; i < 5; ++i) { 146 | tbl.emplace(i, i); 147 | } 148 | using lt_t = int_int_table::unordered_map_view; 149 | 150 | SECTION("simple erase") { 151 | auto lt = tbl.make_unordered_map_view(); 152 | lt_t::const_iterator const_it; 153 | const_it = lt.find(0); 154 | REQUIRE(const_it != lt.end()); 155 | lt_t::const_iterator const_next = const_it; 156 | ++const_next; 157 | REQUIRE(static_cast(lt.erase(const_it)) == 158 | const_next); 159 | REQUIRE(lt.size() == 4); 160 | 161 | lt_t::iterator it; 162 | it = lt.find(1); 163 | lt_t::iterator next = it; 164 | ++next; 165 | REQUIRE(lt.erase(static_cast(it)) == next); 166 | REQUIRE(lt.size() == 3); 167 | 168 | REQUIRE(lt.erase(2) == 1); 169 | REQUIRE(lt.size() == 2); 170 | } 171 | 172 | SECTION("erase doesn't ruin this iterator") { 173 | auto lt = tbl.make_unordered_map_view(); 174 | auto it = lt.begin(); 175 | auto next = it; 176 | ++next; 177 | REQUIRE(lt.erase(it) == next); 178 | ++it; 179 | REQUIRE(it->first >= 0); 180 | REQUIRE(it->first < 5); 181 | REQUIRE(it->second >= 0); 182 | REQUIRE(it->second < 5); 183 | } 184 | 185 | SECTION("erase doesn't ruin other iterators") { 186 | auto lt = tbl.make_unordered_map_view(); 187 | auto it0 = lt.find(0); 188 | auto it1 = lt.find(1); 189 | auto it2 = lt.find(2); 190 | auto it3 = lt.find(3); 191 | auto it4 = lt.find(4); 192 | auto next = it2; 193 | ++next; 194 | REQUIRE(lt.erase(it2) == next); 195 | REQUIRE(it0->first == 0); 196 | REQUIRE(it0->second == 0); 197 | REQUIRE(it1->first == 1); 198 | REQUIRE(it1->second == 1); 199 | REQUIRE(it3->first == 3); 200 | REQUIRE(it3->second == 3); 201 | REQUIRE(it4->first == 4); 202 | REQUIRE(it4->second == 4); 203 | } 204 | } 205 | 206 | TEST_CASE("locked_table find", "[locked_table]") { 207 | int_int_table tbl; 208 | using lt_t = int_int_table::unordered_map_view; 209 | auto lt = tbl.make_unordered_map_view(); 210 | for (int i = 0; i < 10; ++i) { 211 | REQUIRE(lt.insert(std::make_pair(i, i)).second); 212 | } 213 | bool found_begin_elem = false; 214 | bool found_last_elem = false; 215 | for (int i = 0; i < 10; ++i) { 216 | lt_t::iterator it = lt.find(i); 217 | lt_t::const_iterator const_it = lt.find(i); 218 | REQUIRE(it != lt.end()); 219 | REQUIRE(it->first == i); 220 | REQUIRE(it->second == i); 221 | REQUIRE(const_it != lt.end()); 222 | REQUIRE(const_it->first == i); 223 | REQUIRE(const_it->second == i); 224 | it->second++; 225 | if (it == lt.begin()) { 226 | found_begin_elem = true; 227 | } 228 | if (++it == lt.end()) { 229 | found_last_elem = true; 230 | } 231 | } 232 | REQUIRE(found_begin_elem); 233 | REQUIRE(found_last_elem); 234 | for (int i = 0; i < 10; ++i) { 235 | lt_t::iterator it = lt.find(i); 236 | REQUIRE(it->first == i); 237 | REQUIRE(it->second == i + 1); 238 | } 239 | } 240 | 241 | TEST_CASE("locked_table at", "[locked_table]") { 242 | int_int_table tbl; 243 | auto lt = tbl.make_unordered_map_view(); 244 | for (int i = 0; i < 10; ++i) { 245 | REQUIRE(lt.insert(std::make_pair(i, i)).second); 246 | } 247 | for (int i = 0; i < 10; ++i) { 248 | int &val = lt.at(i); 249 | const int &const_val = 250 | const_cast(lt).at(i); 251 | REQUIRE(val == i); 252 | REQUIRE(const_val == i); 253 | ++val; 254 | } 255 | for (int i = 0; i < 10; ++i) { 256 | REQUIRE(lt.at(i) == i + 1); 257 | } 258 | REQUIRE_THROWS_AS(lt.at(11), std::out_of_range); 259 | } 260 | 261 | TEST_CASE("locked_table operator[]", "[locked_table]") { 262 | int_int_table tbl; 263 | auto lt = tbl.make_unordered_map_view(); 264 | for (int i = 0; i < 10; ++i) { 265 | REQUIRE(lt.insert(std::make_pair(i, i)).second); 266 | } 267 | for (int i = 0; i < 10; ++i) { 268 | int &val = lt[i]; 269 | REQUIRE(val == i); 270 | ++val; 271 | } 272 | for (int i = 0; i < 10; ++i) { 273 | REQUIRE(lt[i] == i + 1); 274 | } 275 | REQUIRE(lt[11] == 0); 276 | REQUIRE(lt.at(11) == 0); 277 | } 278 | 279 | TEST_CASE("locked_table count", "[locked_table]") { 280 | int_int_table tbl; 281 | auto lt = tbl.make_unordered_map_view(); 282 | for (int i = 0; i < 10; ++i) { 283 | REQUIRE(lt.insert(std::make_pair(i, i)).second); 284 | } 285 | for (int i = 0; i < 10; ++i) { 286 | REQUIRE(lt.count(i) == 1); 287 | } 288 | REQUIRE(lt.count(11) == 0); 289 | } 290 | 291 | TEST_CASE("locked_table equal_range", "[locked_table]") { 292 | int_int_table tbl; 293 | using lt_t = int_int_table::unordered_map_view; 294 | auto lt = tbl.make_unordered_map_view(); 295 | for (int i = 0; i < 10; ++i) { 296 | REQUIRE(lt.insert(std::make_pair(i, i)).second); 297 | } 298 | for (int i = 0; i < 10; ++i) { 299 | std::pair it_range = lt.equal_range(i); 300 | REQUIRE(it_range.first->first == i); 301 | REQUIRE(++it_range.first == it_range.second); 302 | std::pair const_it_range = 303 | lt.equal_range(i); 304 | REQUIRE(const_it_range.first->first == i); 305 | REQUIRE(++const_it_range.first == const_it_range.second); 306 | } 307 | auto it_range = lt.equal_range(11); 308 | REQUIRE(it_range.first == lt.end()); 309 | REQUIRE(it_range.second == lt.end()); 310 | } 311 | 312 | TEST_CASE("locked_table equality", "[locked_table]") { 313 | int_int_table tbl1(40); 314 | auto lt1 = tbl1.make_unordered_map_view(); 315 | for (int i = 0; i < 10; ++i) { 316 | lt1.insert(std::make_pair(i, i)); 317 | } 318 | 319 | int_int_table tbl2(30); 320 | auto lt2 = tbl2.make_unordered_map_view(); 321 | for (int i = 0; i < 10; ++i) { 322 | lt2.insert(std::make_pair(i, i)); 323 | } 324 | 325 | int_int_table tbl3(30); 326 | auto lt3 = tbl3.make_unordered_map_view(); 327 | for (int i = 0; i < 10; ++i) { 328 | lt3.insert(std::make_pair(i, i + 1)); 329 | } 330 | 331 | int_int_table tbl4(40); 332 | auto lt4 = tbl4.make_unordered_map_view(); 333 | for (int i = 0; i < 10; ++i) { 334 | lt4.insert(std::make_pair(i + 1, i)); 335 | } 336 | 337 | REQUIRE(lt1 == lt2); 338 | REQUIRE_FALSE(lt2 != lt1); 339 | 340 | REQUIRE(lt1 != lt3); 341 | REQUIRE_FALSE(lt3 == lt1); 342 | REQUIRE_FALSE(lt2 == lt3); 343 | REQUIRE(lt3 != lt2); 344 | 345 | REQUIRE(lt1 != lt4); 346 | REQUIRE(lt4 != lt1); 347 | REQUIRE_FALSE(lt3 == lt4); 348 | REQUIRE_FALSE(lt4 == lt3); 349 | } 350 | 351 | template 352 | void check_all_locks_taken(Table &tbl) { 353 | auto &locks = unit_test_internals_view::get_current_locks(tbl); 354 | for (size_t i = 0; i < locks.size(); ++i) { 355 | REQUIRE_FALSE(locks[i].try_lock(std::private_impl::LOCKING_ACTIVE())); 356 | } 357 | } 358 | 359 | TEST_CASE("locked table holds locks after resize", "[locked table]") { 360 | int_int_table tbl(4); 361 | auto lt = tbl.make_unordered_map_view(); 362 | //TODO: FIXME add a method with locked view 363 | // check_all_locks_taken(tbl); 364 | 365 | // After a cuckoo_fast_double, all locks are still taken 366 | for (int i = 0; i < 5; ++i) { 367 | lt.insert(std::make_pair(i, i)); 368 | } 369 | //check_all_locks_taken(tbl); 370 | 371 | // After a cuckoo_simple_expand, all locks are still taken 372 | lt.rehash(10); 373 | //check_all_locks_taken(tbl); 374 | } 375 | -------------------------------------------------------------------------------- /tests/unit-tests/test_noncopyable_types.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | #include "unit_test_util.hpp" 7 | 8 | using tbl = unique_ptr_table; 9 | using uptr = std::unique_ptr; 10 | 11 | const size_t TBL_INIT = 1; 12 | const size_t TBL_SIZE = TBL_INIT * std::private_impl::DEFAULT_SLOTS_PER_BUCKET * 2; 13 | 14 | void check_key_eq(tbl &tbl, int key, int expected_val) { 15 | size_t count = 0; 16 | tbl.visit(std::move(uptr(new int(key))), [expected_val, &count](const uptr &ptr) { 17 | ++count; 18 | REQUIRE(*ptr == expected_val); 19 | }); 20 | REQUIRE(count > 0); 21 | } 22 | 23 | TEST_CASE("noncopyable insert and update", "[noncopyable]") { 24 | tbl tbl(TBL_INIT); 25 | for (size_t i = 0; i < TBL_SIZE; ++i) { 26 | REQUIRE(tbl.emplace(std::move(uptr(new int(i))), std::move(uptr(new int(i))))); 27 | } 28 | for (size_t i = 0; i < TBL_SIZE; ++i) { 29 | check_key_eq(tbl, i, i); 30 | } 31 | for (size_t i = 0; i < TBL_SIZE; ++i) { 32 | tbl.update(uptr(new int(i)), uptr(new int(i + 1))); 33 | } 34 | for (size_t i = 0; i < TBL_SIZE; ++i) { 35 | check_key_eq(tbl, i, i + 1); 36 | } 37 | } 38 | 39 | TEST_CASE("noncopyable upsert", "[noncopyable]") { 40 | tbl tbl(TBL_INIT); 41 | auto increment = [](uptr &ptr) { *ptr += 1; }; 42 | for (size_t i = 0; i < TBL_SIZE; ++i) { 43 | tbl.emplace_or_visit(uptr(new int(i)), increment, uptr(new int(i))); 44 | } 45 | for (size_t i = 0; i < TBL_SIZE; ++i) { 46 | check_key_eq(tbl, i, i); 47 | } 48 | for (size_t i = 0; i < TBL_SIZE; ++i) { 49 | tbl.emplace_or_visit(uptr(new int(i)), increment, uptr(new int(i))); 50 | } 51 | for (size_t i = 0; i < TBL_SIZE; ++i) { 52 | check_key_eq(tbl, i, i + 1); 53 | } 54 | } 55 | 56 | TEST_CASE("noncopyable iteration", "[noncopyable]") { 57 | tbl tbl(TBL_INIT); 58 | for (size_t i = 0; i < TBL_SIZE; ++i) { 59 | tbl.emplace(uptr(new int(i)), uptr(new int(i))); 60 | } 61 | { 62 | auto locked_tbl = tbl.make_unordered_map_view(); 63 | for (auto &kv : locked_tbl) { 64 | REQUIRE(*kv.first == *kv.second); 65 | *kv.second += 1; 66 | } 67 | } 68 | { 69 | auto locked_tbl = tbl.make_unordered_map_view(); 70 | for (auto &kv : locked_tbl) { 71 | REQUIRE(*kv.first == *kv.second - 1); 72 | } 73 | } 74 | } 75 | 76 | TEST_CASE("nested table", "[noncopyable]") { 77 | typedef std::concurrent_unordered_map inner_tbl; 78 | typedef std::concurrent_unordered_map> nested_tbl; 79 | nested_tbl tbl; 80 | std::string keys[] = {"abc", "def"}; 81 | for (std::string &k : keys) { 82 | tbl.emplace(std::string(k), nested_tbl::mapped_type(new inner_tbl)); 83 | tbl.emplace_or_visit(k, [&k](nested_tbl::mapped_type &t) { 84 | for (char c : k) { 85 | t->emplace(c, std::string(k)); 86 | } 87 | }); 88 | } 89 | for (std::string &k : keys) { 90 | REQUIRE(tbl.visit(k, [](nested_tbl::mapped_type &) {})); 91 | tbl.visit(k, [&k](nested_tbl::mapped_type &t) { 92 | for (char c : k) { 93 | REQUIRE(t->find(c) == k); 94 | } 95 | }); 96 | } 97 | } 98 | 99 | TEST_CASE("noncopyable insert lifetime") { 100 | tbl tbl; 101 | 102 | // Successful insert 103 | SECTION("Successful insert") { 104 | uptr key(new int(20)); 105 | uptr value(new int(20)); 106 | REQUIRE(tbl.emplace(std::move(key), std::move(value))); 107 | REQUIRE(!static_cast(key)); 108 | REQUIRE(!static_cast(value)); 109 | } 110 | 111 | // Unsuccessful insert 112 | SECTION("Unsuccessful insert") { 113 | tbl.emplace(uptr(new int(20)), uptr(new int(20))); 114 | uptr key(new int(20)); 115 | uptr value(new int(30)); 116 | REQUIRE_FALSE(tbl.emplace(std::move(key), std::move(value))); 117 | REQUIRE(static_cast(key)); 118 | REQUIRE(static_cast(value)); 119 | } 120 | } 121 | 122 | TEST_CASE("noncopyable erase_fn") { 123 | tbl tbl; 124 | tbl.emplace(uptr(new int(10)), uptr(new int(10))); 125 | auto decrement_and_erase = [](uptr &p) { 126 | --(*p); 127 | return *p == 0; 128 | }; 129 | uptr k(new int(10)); 130 | for (int i = 0; i < 9; ++i) { 131 | tbl.erase_and_visit(k, decrement_and_erase); 132 | REQUIRE(tbl.visit(k, [](tbl::mapped_type &) {})); 133 | } 134 | tbl.erase_and_visit(k, decrement_and_erase); 135 | REQUIRE_FALSE(tbl.visit(k, [](tbl::mapped_type &) {})); 136 | } 137 | -------------------------------------------------------------------------------- /tests/unit-tests/test_resize.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include "unit_test_util.hpp" 6 | #include 7 | 8 | TEST_CASE("rehash empty table", "[resize]") { 9 | int_int_table table(1); 10 | REQUIRE(unit_test_internals_view::hashpower(table) == 0); 11 | 12 | table.make_unordered_map_view().rehash(20); 13 | REQUIRE(unit_test_internals_view::hashpower(table) == 20); 14 | 15 | table.make_unordered_map_view().rehash(1); 16 | REQUIRE(unit_test_internals_view::hashpower(table) == 1); 17 | } 18 | 19 | TEST_CASE("reserve calc", "[resize]") { 20 | const size_t slot_per_bucket = std::private_impl::DEFAULT_SLOTS_PER_BUCKET; 21 | REQUIRE(unit_test_internals_view::reserve_calc(0) == 0); 22 | REQUIRE(unit_test_internals_view::reserve_calc( 23 | 1 * slot_per_bucket) == 0); 24 | 25 | REQUIRE(unit_test_internals_view::reserve_calc( 26 | 2 * slot_per_bucket) == 1); 27 | REQUIRE(unit_test_internals_view::reserve_calc( 28 | 3 * slot_per_bucket) == 2); 29 | REQUIRE(unit_test_internals_view::reserve_calc( 30 | 4 * slot_per_bucket) == 2); 31 | REQUIRE(unit_test_internals_view::reserve_calc( 32 | 2500000 * slot_per_bucket) == 22); 33 | 34 | REQUIRE(unit_test_internals_view::reserve_calc( 35 | (1UL << 31) * slot_per_bucket) == 31); 36 | REQUIRE(unit_test_internals_view::reserve_calc( 37 | ((1UL << 31) + 1) * slot_per_bucket) == 32); 38 | 39 | REQUIRE(unit_test_internals_view::reserve_calc( 40 | (1UL << 61) * slot_per_bucket) == 61); 41 | REQUIRE(unit_test_internals_view::reserve_calc( 42 | ((1ULL << 61) + 1) * slot_per_bucket) == 62); 43 | } 44 | 45 | struct my_type { 46 | int x; 47 | 48 | ~my_type() { ++num_deletes; } 49 | 50 | static size_t num_deletes; 51 | }; 52 | 53 | size_t my_type::num_deletes = 0; 54 | 55 | TEST_CASE("Resizing number of frees", "[resize]") { 56 | my_type val{0}; 57 | size_t num_deletes_after_resize; 58 | { 59 | // Should allocate 2 buckets of 4 slots 60 | std::concurrent_unordered_map, std::equal_to, 61 | std::allocator>> 62 | map(8); 63 | for (int i = 0; i < 9; ++i) { 64 | map.emplace(i, val); 65 | } 66 | // All of the items should be moved during resize to the new region of 67 | // memory. Then up to 8 of them can be moved to their new bucket. 68 | REQUIRE(my_type::num_deletes >= 8); 69 | REQUIRE(my_type::num_deletes <= 17); 70 | num_deletes_after_resize = my_type::num_deletes; 71 | } 72 | REQUIRE(my_type::num_deletes == num_deletes_after_resize + 9); 73 | } 74 | 75 | // Taken from https://github.com/facebook/folly/blob/master/folly/docs/Traits.md 76 | class non_relocatable_type { 77 | public: 78 | std::array buffer; 79 | char *pointerToBuffer; 80 | 81 | non_relocatable_type() : pointerToBuffer(buffer.data()) {} 82 | 83 | non_relocatable_type(char c) : pointerToBuffer(buffer.data()) { 84 | buffer.fill(c); 85 | } 86 | 87 | non_relocatable_type(const non_relocatable_type &x) noexcept 88 | : buffer(x.buffer), pointerToBuffer(buffer.data()) {} 89 | 90 | non_relocatable_type &operator=(const non_relocatable_type &x) { 91 | buffer = x.buffer; 92 | return *this; 93 | } 94 | }; 95 | 96 | TEST_CASE("Resize on non-relocatable type", "[resize]") { 97 | std::concurrent_unordered_map, std::equal_to, 98 | std::allocator>> 99 | map(0); 100 | REQUIRE(unit_test_internals_view::hashpower(map) == 0); 101 | // Make it resize a few times to ensure the vector capacity has to actually 102 | // change when we resize the buckets 103 | const size_t num_elems = 16; 104 | for (int i = 0; i < num_elems; ++i) { 105 | map.emplace(i, 'a'); 106 | } 107 | // Make sure each pointer actually points to its buffer 108 | non_relocatable_type value; 109 | std::array ref; 110 | ref.fill('a'); 111 | auto lt = map.make_unordered_map_view(); 112 | for (const auto &kvpair : lt) { 113 | REQUIRE(ref == kvpair.second.buffer); 114 | REQUIRE(kvpair.second.pointerToBuffer == kvpair.second.buffer.data()); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /tests/unit-tests/test_runner.cpp: -------------------------------------------------------------------------------- 1 | // This file will be the entry point for the test runner 2 | #define CATCH_CONFIG_MAIN 3 | #include 4 | -------------------------------------------------------------------------------- /tests/unit-tests/test_user_exceptions.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include "unit_test_util.hpp" 6 | 7 | namespace test_user_exceptions { 8 | void maybe_throw(bool throw_exception) { 9 | if (throw_exception) { 10 | throw std::runtime_error("exception"); 11 | } 12 | } 13 | 14 | bool constructor_throw, move_throw, hash_throw, equality_throw; 15 | 16 | class exception_int { 17 | public: 18 | exception_int() { 19 | maybe_throw(constructor_throw); 20 | val = 0; 21 | } 22 | 23 | exception_int(size_t x) { 24 | maybe_throw(constructor_throw); 25 | val = x; 26 | } 27 | 28 | exception_int(const exception_int &i) { 29 | maybe_throw(constructor_throw); 30 | val = static_cast(i); 31 | } 32 | 33 | exception_int(exception_int &&i) { 34 | maybe_throw(constructor_throw || move_throw); 35 | val = static_cast(i); 36 | } 37 | 38 | exception_int &operator=(const exception_int &i) { 39 | maybe_throw(constructor_throw); 40 | val = static_cast(i); 41 | return *this; 42 | } 43 | 44 | exception_int &operator=(exception_int &&i) { 45 | maybe_throw(constructor_throw || move_throw); 46 | val = static_cast(i); 47 | return *this; 48 | } 49 | 50 | operator size_t() const { return val; } 51 | 52 | private: 53 | size_t val; 54 | }; 55 | } 56 | 57 | namespace std { 58 | template<> 59 | struct hash { 60 | size_t operator()(const test_user_exceptions::exception_int &x) const { 61 | test_user_exceptions::maybe_throw(test_user_exceptions::hash_throw); 62 | return x; 63 | } 64 | }; 65 | 66 | template<> 67 | struct equal_to { 68 | bool operator()(const test_user_exceptions::exception_int &lhs, 69 | const test_user_exceptions::exception_int &rhs) const { 70 | test_user_exceptions::maybe_throw(test_user_exceptions::equality_throw); 71 | return static_cast(lhs) == static_cast(rhs); 72 | } 73 | }; 74 | } 75 | 76 | typedef std::concurrent_unordered_map, 77 | std::equal_to> 78 | exception_table; 79 | 80 | void check_iter_table(exception_table &tbl, size_t expectedSize) { 81 | auto lockedTable = tbl.make_unordered_map_view(); 82 | size_t actual_size = 0; 83 | for (auto it = lockedTable.begin(); it != lockedTable.end(); ++it) { 84 | ++actual_size; 85 | } 86 | REQUIRE(actual_size == expectedSize); 87 | } 88 | 89 | TEST_CASE("user exceptions", "[user_exceptions]") { 90 | test_user_exceptions::constructor_throw = test_user_exceptions::hash_throw = test_user_exceptions::equality_throw = 91 | test_user_exceptions::move_throw = false; 92 | // We don't use sub-sections because CATCH is not exactly thread-safe 93 | 94 | // "find/contains" 95 | { 96 | exception_table tbl; 97 | tbl.emplace(1, 1); 98 | tbl.emplace(2, 2); 99 | tbl.emplace(3, 3); 100 | test_user_exceptions::hash_throw = true; 101 | REQUIRE_THROWS_AS(tbl.find(3), std::runtime_error); 102 | test_user_exceptions::hash_throw = false; 103 | test_user_exceptions::equality_throw = true; 104 | REQUIRE_THROWS_AS(tbl.find(3), std::runtime_error); 105 | test_user_exceptions::equality_throw = false; 106 | REQUIRE(tbl.find(3) == std::experimental::make_optional(3UL)); 107 | check_iter_table(tbl, 3); 108 | } 109 | 110 | // "insert" 111 | { 112 | exception_table tbl; 113 | test_user_exceptions::constructor_throw = true; 114 | REQUIRE_THROWS_AS(tbl.emplace(100, 100), std::runtime_error); 115 | test_user_exceptions::constructor_throw = false; 116 | REQUIRE(tbl.emplace(100, 100)); 117 | check_iter_table(tbl, 1); 118 | } 119 | 120 | // "erase" 121 | { 122 | exception_table tbl; 123 | for (int i = 0; i < 10; ++i) { 124 | tbl.emplace(i, i); 125 | } 126 | test_user_exceptions::hash_throw = true; 127 | REQUIRE_THROWS_AS(tbl.erase(5), std::runtime_error); 128 | test_user_exceptions::hash_throw = false; 129 | test_user_exceptions::equality_throw = true; 130 | REQUIRE_THROWS_AS(tbl.erase(5), std::runtime_error); 131 | test_user_exceptions::equality_throw = false; 132 | REQUIRE(tbl.erase(5)); 133 | check_iter_table(tbl, 9); 134 | } 135 | 136 | // "update" 137 | { 138 | exception_table tbl; 139 | tbl.emplace(9, 9); 140 | tbl.emplace(10, 10); 141 | test_user_exceptions::hash_throw = true; 142 | REQUIRE_THROWS_AS(tbl.update(9, 10), std::runtime_error); 143 | test_user_exceptions::hash_throw = false; 144 | test_user_exceptions::equality_throw = true; 145 | REQUIRE_THROWS_AS(tbl.update(9, 10), std::runtime_error); 146 | test_user_exceptions::equality_throw = false; 147 | REQUIRE(tbl.update(9, 10)); 148 | check_iter_table(tbl, 2); 149 | } 150 | 151 | // "update_fn" 152 | { 153 | exception_table tbl; 154 | tbl.emplace(9, 9); 155 | tbl.emplace(10, 10); 156 | auto updater = [](size_t &val) { val++; }; 157 | test_user_exceptions::hash_throw = true; 158 | REQUIRE_THROWS_AS(tbl.visit(9, updater), std::runtime_error); 159 | test_user_exceptions::hash_throw = false; 160 | test_user_exceptions::equality_throw = true; 161 | REQUIRE_THROWS_AS(tbl.visit(9, updater), std::runtime_error); 162 | test_user_exceptions::equality_throw = false; 163 | REQUIRE(tbl.visit(9, updater)); 164 | check_iter_table(tbl, 2); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /tests/unit-tests/unit_test_util.cpp: -------------------------------------------------------------------------------- 1 | #include "unit_test_util.hpp" 2 | 3 | std::atomic& get_unfreed_bytes() { 4 | static std::atomic unfreed_bytes(0L); 5 | return unfreed_bytes; 6 | } 7 | -------------------------------------------------------------------------------- /tests/unit-tests/unit_test_util.hpp: -------------------------------------------------------------------------------- 1 | // Utilities for unit testing 2 | #ifndef UNIT_TEST_UTIL_HH_ 3 | #define UNIT_TEST_UTIL_HH_ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | // Returns a statically allocated value used to keep track of how many unfreed 17 | // bytes have been allocated. This value is shared across all threads. 18 | std::atomic& get_unfreed_bytes(); 19 | 20 | // We define a a allocator class that keeps track of how many unfreed bytes have 21 | // been allocated. Users can specify an optional bound for how many bytes can be 22 | // unfreed, and the allocator will fail if asked to allocate above that bound 23 | // (note that behavior with this bound with concurrent allocations will be hard 24 | // to deal with). A bound below 0 is inactive (the default is -1). 25 | template 26 | struct tracking_allocator { 27 | using value_type = T; 28 | using pointer = T *; 29 | using const_pointer = const T *; 30 | using reference = T &; 31 | using const_reference = const T &; 32 | using size_type = size_t; 33 | using difference_type = ptrdiff_t; 34 | 35 | template 36 | struct rebind { 37 | using other = tracking_allocator; 38 | }; 39 | 40 | tracking_allocator() {} 41 | 42 | template 43 | tracking_allocator(const tracking_allocator&) {} 44 | 45 | T* allocate(size_t n) { 46 | const size_t bytes_to_allocate = sizeof(T) * n; 47 | if (BOUND >= 0 && get_unfreed_bytes() + bytes_to_allocate > BOUND) { 48 | throw std::bad_alloc(); 49 | } 50 | get_unfreed_bytes() += bytes_to_allocate; 51 | return std::allocator().allocate(n); 52 | } 53 | 54 | void deallocate(T* p, size_t n) { 55 | get_unfreed_bytes() -= (sizeof(T) * n); 56 | std::allocator().deallocate(p, n); 57 | } 58 | 59 | template 60 | void construct(U* p, Args &&... args) { 61 | new((void *) p) U(std::forward(args)...); 62 | } 63 | 64 | template 65 | void destroy(U* p) { p->~U(); } 66 | }; 67 | 68 | template 69 | bool operator==(const tracking_allocator &a1, 70 | const tracking_allocator &a2) { 71 | return true; 72 | } 73 | 74 | template 75 | bool operator!=(const tracking_allocator &a1, 76 | const tracking_allocator &a2) { 77 | return false; 78 | } 79 | 80 | using int_int_table = 81 | std::concurrent_unordered_map, std::equal_to, 82 | std::allocator>>; 83 | 84 | template 85 | using int_int_table_with_allocator = 86 | std::concurrent_unordered_map, std::equal_to, Alloc>; 87 | 88 | using string_int_table = 89 | std::concurrent_unordered_map, 90 | std::equal_to, 91 | std::allocator>>; 92 | 93 | namespace std { 94 | template 95 | struct hash> { 96 | size_t operator()(const unique_ptr& ptr) const { 97 | return std::hash()(*ptr); 98 | } 99 | 100 | size_t operator()(const T* ptr) const { return std::hash()(*ptr); } 101 | }; 102 | 103 | template 104 | struct equal_to> { 105 | bool operator()(const unique_ptr& ptr1, const unique_ptr& ptr2) const { 106 | return *ptr1 == *ptr2; 107 | } 108 | 109 | bool operator()(const T* ptr1, const unique_ptr& ptr2) const { 110 | return *ptr1 == *ptr2; 111 | } 112 | 113 | bool operator()(const unique_ptr& ptr1, const T* ptr2) const { 114 | return *ptr1 == *ptr2; 115 | } 116 | }; 117 | } 118 | 119 | template 120 | using unique_ptr_table = std::concurrent_unordered_map< 121 | std::unique_ptr, std::unique_ptr, std::hash>, 122 | std::equal_to>, 123 | std::allocator, std::unique_ptr>>>; 124 | 125 | // Some unit tests need access into certain private data members of the table. 126 | // This class is a friend of the table, so it can access those. 127 | 128 | class unit_test_internals_view { 129 | public: 130 | static const size_t IntIntBucketSize = sizeof( 131 | std::private_impl::bucket_container>, 132 | std::private_impl::partial_t, 133 | std::private_impl::DEFAULT_SLOTS_PER_BUCKET>::bucket); 134 | 135 | template 136 | static typename concurrent_map::partial_t partial_key(const size_t hv) { 137 | return concurrent_map::partial_key(hv); 138 | } 139 | 140 | template 141 | static size_t index_hash(const size_t hashpower, const size_t hv) { 142 | return concurrent_map::index_hash(hashpower, hv); 143 | } 144 | 145 | template 146 | static size_t alt_index(const size_t hashpower, 147 | const typename concurrent_map::partial_t partial, 148 | const size_t index) { 149 | return concurrent_map::alt_index(hashpower, partial, index); 150 | } 151 | 152 | template 153 | static size_t reserve_calc(size_t n) { 154 | return concurrent_map::reserve_calc(n); 155 | } 156 | 157 | template 158 | static typename concurrent_map::locks_t& get_current_locks(const concurrent_map& table) { 159 | return table.locks; 160 | } 161 | 162 | template 163 | static typename concurrent_map::size_type hashpower(const concurrent_map& table) { 164 | return table.hashpower(); 165 | } 166 | }; 167 | 168 | #endif // UNIT_TEST_UTIL_HH_ 169 | --------------------------------------------------------------------------------