├── .gitignore ├── .travis.yml ├── CMakeLists.txt ├── LICENSE_1_0.txt ├── README.md ├── TUTORIAL.md ├── bench └── bench.cpp ├── include └── indexed │ ├── Allocator.h │ ├── ArrayArena.h │ ├── ArrayArenaMT.h │ ├── BufAlloc.h │ ├── Config.h │ ├── MmapAlloc.h │ ├── NewAlloc.h │ ├── Pointer.h │ ├── SingleArenaConfig.h │ ├── SingleArenaConfigUniversal.h │ └── StackTop.h └── tests ├── CMakeLists.txt ├── CMakeLists.txt.in ├── intrusive_test.cpp ├── list_test.cpp ├── map_test.cpp ├── pointer_test.cpp └── unordered_test.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | /build* 2 | /*.kdev4 3 | 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | 3 | jobs: 4 | include: 5 | - os: linux 6 | dist: xenial 7 | addons: 8 | apt: 9 | packages: 10 | - libboost-all-dev 11 | - os: osx 12 | addons: 13 | homebrew: 14 | packages: 15 | - boost 16 | 17 | script: mkdir build && cd build && cmake .. && cmake --build . && ctest 18 | 19 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | 3 | cmake_policy(SET CMP0057 NEW) 4 | 5 | project(Indexed) 6 | 7 | find_package(Boost 1.58 REQUIRED) 8 | find_package(Threads) 9 | 10 | set(CMAKE_CXX_STANDARD 11) 11 | 12 | enable_testing() 13 | 14 | add_library(indexed INTERFACE) 15 | 16 | file(GLOB INCLUDES include/indexed/*.h) 17 | 18 | target_sources(indexed INTERFACE 19 | ${INCLUDES} 20 | ) 21 | 22 | target_include_directories(indexed INTERFACE 23 | ${Boost_INCLUDE_DIRS} 24 | include 25 | ) 26 | 27 | add_subdirectory(tests) 28 | 29 | if(CMAKE_BUILD_TYPE STREQUAL "Release") 30 | 31 | add_executable(Bench bench/bench.cpp) 32 | 33 | target_link_libraries(Bench PRIVATE 34 | indexed 35 | Threads::Threads 36 | ) 37 | 38 | install(DIRECTORY include DESTINATION include) 39 | 40 | endif() 41 | -------------------------------------------------------------------------------- /LICENSE_1_0.txt: -------------------------------------------------------------------------------- 1 | Boost Software License - Version 1.0 - August 17th, 2003 2 | 3 | Permission is hereby granted, free of charge, to any person or organization 4 | obtaining a copy of the software and accompanying documentation covered by 5 | this license (the "Software") to use, reproduce, display, distribute, 6 | execute, and transmit the Software, and to prepare derivative works of the 7 | Software, and to permit third-parties to whom the Software is furnished to 8 | do so, all subject to the following: 9 | 10 | The copyright notices in the Software and this entire statement, including 11 | the above license grant, this restriction and the following disclaimer, 12 | must be included in all copies of the Software, in whole or in part, and 13 | all derivative works of the Software, unless such copies or derivative 14 | works are solely in the form of machine-executable object code generated by 15 | a source language processor. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 20 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 21 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 22 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.com/mrshurik/indexed-allocator.svg?branch=master)](https://travis-ci.com/mrshurik/indexed-allocator) 2 | 3 | # Indexed allocator 4 | It’s a header-only C++ lib of special allocators for boost node-based containers. 5 | 6 | # Main features 7 | - decreases container’s memory overhead by reducing size of pointers used in the container nodes 8 | - allows to allocate containers on stack 9 | - allows to share containers between processes 10 | - quick container serialization (container’s memory is one buffer) 11 | 12 | ## How? 13 | Indexed allocator defines pointer class which stores pointers as 32- or 16-bit integers. 14 | 15 | ## Motivation 16 | std::map contains 3 pointers per node, a simple LRU-cache (map + list) contains 5 pointers per node. With 64-bit pointers the overhead is significant, especially when the payload is small, say you store pair or one pointer. 17 | 18 | ## Requirements 19 | - at least C++11 standard compilation 20 | - need to use only boost::container::slist/list/set/map, boost::unordered::set/map or boost::intrusive::slist/list/set/map containers 21 | 22 | ## Performance 23 | It depends on your use-case. On the one hand, the lib decreases memory footprint and improves cache locality, so it may lead to higher performance due to cache. On the other hand, pointer operations require more CPU instructions and do more memory jumps, so it may be slower. The lib contains a small benchmark, just use it. 24 | 25 | ## License 26 | This project is licensed under the [Boost Software License](LICENSE_1_0.txt) 27 | 28 | ## Author 29 | Dr. Alexander Bulovyatov. Please share your opinion and ideas to bulovyatov AT g m a i l . c o m 30 | 31 | ## Tutorial 32 | [TUTORIAL.md](TUTORIAL.md) 33 | -------------------------------------------------------------------------------- /TUTORIAL.md: -------------------------------------------------------------------------------- 1 | # Indexed Allocator tutorial 2 | 3 | ### Building the library 4 | It’s a header-only library, you don’t have to build and install anything, just set path to the include directory when building your project. 5 | 6 | ### Building and running the tests 7 | You need to have cmake and boost installed. Go to the project directory. 8 | ```sh 9 | $ mkdir build 10 | $ cd build 11 | $ cmake .. 12 | $ make 13 | $ make test 14 | ``` 15 | 16 | ### Building and running the benchmark 17 | You need to have cmake installed. Go to the build directory. 18 | ```sh 19 | $ cmake -DCMAKE_BUILD_TYPE=Release .. 20 | $ make 21 | $ ./Bench 22 | ``` 23 | 24 | ## Concepts 25 | Let’s briefly describe objects taking part in memory allocation: 26 | 27 | **Container** - a node-based boost container allocating Node objects. Example: boost::container::list. 28 | 29 | **Arena** - a memory buffer, array in memory where place for Node objects is allocated. The Arena is a “stateful malloc” returning indices instead of pointers. Arena is parametrized by IndexType used for the indices, it can only allocate objects of one size and the whole Arena memory is allocated at once. 30 | 31 | **Allocator** - a STL-compatible memory allocator needed for definition of a Container type. It redirects allocation to the Arena. 32 | 33 | **Pointer** - a pointer class which stores indices internally. It’s defined in the Allocator, so the Container replaces raw pointers with Pointers in Nodes, making Nodes smaller. 34 | 35 | **ArenaConfig** - a special class which defines how indices are mapped to raw pointers and back. The class contains static members only. Allocator and Pointer types are parametrized by an ArenaConfig type and so they know how to map indices to raw pointers. 36 | 37 | The library defines Pointer class which stores an unsigned integer of IndexedType. In order to convert a raw pointer to an integer and back the following assumptions have been made: 38 | - A raw pointer points to an object (Node) located either on a thread’s stack, or in the Arena, or in the Container object. Any other location is not supported. 39 | - Stack grows from higher addresses to smaller ones, this is true for most of modern CPUs. 40 | - The pointer must be aligned to, at least, sizeof(IndexType). 41 | - When the pointer points to an object in the Arena, the address must be as for the array, i.e. address == Arena.begin() + k * sizeof(Node). The raw pointer can’t point to something inside a Node. 42 | 43 | Under these assumptions 16-bit IndexType allows for 2^14 or 2^15 allocated objects, while 32-bit IndexType allows for 2^30 or 2^31 objects. There are other restrictions described below. 44 | 45 | Pointer objects store only an index, the rest is stored in static variables of the ArenaConfig, one data for all pointers: pointer to the top of a thread’s stack, pointer to the Arena, pointer to the Container. These pointers can be thread local, so at most one Arena per thread is supported. It’s the price of small pointers. 46 | 47 | ## Description of classes 48 | **ArrayArena** - a simple Arena, is not thread-safe, is parametrized by IndexType and Alloc. Alloc defines how real memory is allocated, the allocation happens on the first call to Arena::allocate(). There are following Alloc classes: NewAlloc - uses C++ operator new, MmapAlloc - uses OS memory pages, BufAlloc - uses an already allocated memory buffer. MmapAlloc allows to “reserve” memory instead of allocating it at once, the real memory is lazy allocated when the Arena grows, but the allocation granularity is 4 KB, which isn’t good for a small Container. 49 | 50 | **ArrayArenaMT** - the same as ArrayArena, but it’s thread-safe, designed to reuse/share Arena’s pool between several threads. It’s slower than ArrayArena due to extra synchronization overhead. 51 | 52 | **SingleArenaConfig** - ArenaConfig with assumption that a Node is located either on a stack, or in the Arena. As the result a Container object using this config can’t be located in heap, only on stack. For clarity, here “Container object is located on stack” means that the object itself (list) is located on the stack, while its Nodes are located in the Arena. The same SingleArenaConfig can be used by multiple Container instances. Also, it’s slightly faster than the other config type. SingleArenaConfig uses 1 bit in IndexType for an internal flag. There are SingleArenaConfigStatic and SingleArenaConfigPerThread, which use either static, or static thread local variables for stackTop and arena pointers. 53 | 54 | **SingleArenaConfigUniversal** - ArenaConfig with assumption that a Node is located either on a stack, or in the Arena, or in the Container object. It also supports the case when the Arena’s memory is located on the stack. As a disadvantage, only one (or per thread) Container instance is supported. It’s address must be given to the config before the Container is constructed. Usually it’s done automatically by the Allocator, except for the case of boost::intrusive containers when it must be done explicitly. SingleArenaConfigUniversal uses 2 bits in IndexType for internal flags. There are SingleArenaConfigUniversalStatic and SingleArenaConfigUniversalPerThread classes, which use either static, or static thread local variables for stackTop, arena and container pointers. 55 | 56 | **Allocator** - an STL-allocator, it’s parametrized by an ArenaConfig type. You need to define an Allocator type in order to define a Container type. Different Container types can be defined using the same ArenaConfig, but since the config uses one Arena, the Containers used at the same time must have equal size of Nodes. The Allocator contains pointer to the Arena, the pointer can be passed explicitly to the constructor or is obtained automatically from ArenaConfig::defaultArena(). 57 | 58 | ## Notes 59 | 60 | ### Boost unordered set/map containers 61 | They’re a bit special. First, for them you don’t need to use SingleArenaConfigUniversal even when the container is located in heap. Second, they need to allocate vector of buckets, which is resized from time to time. It’s not supported by the Allocator, so the Allocator rebinds to std::allocator for the bucket type. As the result, bucket memory is allocated via std::allocator. 62 | 63 | ### Stack and 16-bit IndexType 64 | Pointer class must be able to address objects on stack. When IndexType is uint16_t, there are only 14 or 15 bits available. With the default Node alignment = sizeof(IndexType) it gives only 32 KB or 64 KB. If the stack is deeper the code may fail. There are 2 ways to fix it. You can increase Node alignment, depending on your use-case Node can have 4 or 8 bytes alignment. Be careful. Another direction, instead of pointing to the top of a stack, you can set stackTop to address below it, to a function’s frame where the container is located or used. Be very careful. 65 | 66 | ### Debugging support 67 | Since the code is not trivial and relies on a few assumptions these assumptions and some pre/post-conditions are checked in asserts. When the code is compiled in Release mode (NDEBUG var is defined) the asserts are removed, if you need them in Release mode please define INDEXED_DEBUG=1. 68 | 69 | ### Code example 70 | ```C++ 71 | #include 72 | #include 73 | #include 74 | #include 75 | #include 76 | 77 | #include 78 | 79 | using namespace indexed; 80 | 81 | using Arena = ArrayArena; // 16-bit indexed Arena with new() 82 | 83 | namespace { 84 | // define your ArenaConfig via subclassing 85 | struct MyArenaConfig : public SingleArenaConfigUniversalStatic {}; 86 | } 87 | 88 | using ValueType = int; 89 | using Alloc = Allocator; 90 | using List = boost::container::list; 91 | 92 | void myFunction() { 93 | Arena myArena(10); // Arena with capacity 10 94 | MyArenaConfig::setArena(&myArena); // set Arena pointer in the config 95 | MyArenaConfig::setStackTop(getThreadStackTop()); // set pointer to the top of the stack 96 | List myList; // Alloc will use Arena from MyArenaConfig 97 | myList.push_back(1); // use list as usual 98 | } 99 | ``` 100 | 101 | ## FAQ 102 | **How to resize an Arena in order to grow or shrink Containers?** 103 | There is no easy way. The Arena’s capacity is fixed. You can only do the following trick. First, copy data from the containers to, say, a std::vector. Then, you need to destroy the containers or do container = Container(). Then, do arena.freeMemory() and arena.setCapacity(new). Now create new containers, if needed, and copy the data from the std::vector. 104 | 105 | **How to ensure that a Container has no allocated Nodes?** 106 | You may need it if you want to do arena.reset() or arena.freeMemory(). Simple container.clear() is not enough. Do container = Container(). 107 | -------------------------------------------------------------------------------- /bench/bench.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #ifndef NDEBUG 29 | #warning You compile the benchmark not in Release mode! 30 | #endif 31 | 32 | using namespace std; 33 | using namespace indexed; 34 | 35 | using Arena = ArrayArena; 36 | using ArenaMT = ArrayArenaMT; 37 | 38 | namespace { 39 | 40 | struct ArenaConfig : public SingleArenaConfigStatic {}; 41 | struct ArenaConfigUniversal : public SingleArenaConfigUniversalStatic {}; 42 | struct ArenaConfigMT : public SingleArenaConfigPerThread {}; 43 | struct ArenaConfigTL : public SingleArenaConfigPerThread {}; 44 | 45 | } 46 | 47 | using Key = int; 48 | using Value = int; 49 | 50 | template 51 | struct Types { 52 | using Alloc = Allocator, Config>; 53 | using Map = boost::container::map, Alloc>; 54 | using UnMap = boost::unordered_map, equal_to, Alloc>; 55 | }; 56 | 57 | using Map = boost::container::map; 58 | using UnMap = boost::unordered_map; 59 | 60 | template 61 | struct Bench { 62 | 63 | using Arena = typename Config::Arena; 64 | 65 | typename Config::ArenaPtr arena; 66 | size_t n; 67 | size_t m; 68 | size_t repeat; 69 | size_t numThreads; 70 | bool runThreadLocal; 71 | bool dummy; 72 | 73 | template 74 | void map_quick_insert(const char name[], bool showOutput = true); 75 | 76 | template 77 | void map_query(const char name[], bool showOutput = true); 78 | 79 | template 80 | void map_insert_and_remove(const char name[], bool showOutput = true); 81 | 82 | template 83 | void runParallel(const char func[], const char text[]) { 84 | size_t numThreads = this->numThreads; 85 | using Func = function; 86 | Func f; 87 | string fname(func); 88 | if (fname == "map_quick_insert") { 89 | f = Func(&Bench::map_quick_insert); 90 | } else if (fname == "map_query") { 91 | f = Func(&Bench::map_query); 92 | } else if (fname == "map_insert_and_remove") { 93 | f = Func(&Bench::map_insert_and_remove); 94 | } else { 95 | throw runtime_error("can't find function"); 96 | } 97 | vector threads; 98 | threads.reserve(numThreads); 99 | for (size_t i = 0; i < numThreads; ++i) { 100 | threads.emplace_back([this, f, i, text]{ 101 | Config::setArena(this->arena); 102 | Config::setStackTop(getThreadStackTop()); 103 | f(*this, text, i == 0); 104 | }); 105 | } 106 | for (size_t i = 0; i < numThreads; ++i) { 107 | threads[i].join(); 108 | } 109 | } 110 | 111 | unique_ptr useLocalArenaIfNeeded(bool doDelete) { 112 | unique_ptr res; 113 | if (runThreadLocal) { 114 | res.reset(new Arena(n * m + 1, doDelete)); 115 | Config::setArena(res.get()); 116 | } 117 | return res; 118 | } 119 | 120 | }; 121 | 122 | template 123 | void benchSingleThread() { 124 | cout << endl << "Test in single thread mode with " << 125 | (is_same::value ? "standard" : "universal") << " ArenaConfig" << endl << endl; 126 | size_t n = 1024; 127 | size_t m = 1024; 128 | Arena arena(n * m + 1); 129 | Config::setArena(&arena); 130 | Config::setStackTop(getThreadStackTop()); 131 | Bench bench = {&arena, n, m, 3, 1, false}; 132 | 133 | using indexed = Types; 134 | using IndMap = typename indexed::Map; 135 | using IndUnMap = typename indexed::UnMap; 136 | 137 | // map 138 | arena.enableDelete(false); 139 | bench.template map_quick_insert("Insert with indexed map"); 140 | bench.template map_quick_insert("Insert with map"); 141 | arena.reset(); 142 | bench.template map_query("Query with indexed map"); 143 | bench.template map_query("Query with map"); 144 | arena.enableDelete(true); 145 | arena.reset(); 146 | bench.template map_insert_and_remove("Insert and remove with indexed map"); 147 | bench.template map_insert_and_remove("Insert and remove with map"); 148 | arena.freeMemory(); 149 | 150 | // unordered_map 151 | arena.enableDelete(false); 152 | bench.template map_quick_insert("Insert with unordered indexed map"); 153 | bench.template map_quick_insert("Insert with unordered map"); 154 | arena.reset(); 155 | bench.template map_query("Query with unordered indexed map"); 156 | bench.template map_query("Query with unordered map"); 157 | arena.enableDelete(true); 158 | arena.reset(); 159 | bench.template map_insert_and_remove("Insert and remove with unordered indexed map"); 160 | bench.template map_insert_and_remove("Insert and remove with unordered map"); 161 | arena.freeMemory(); 162 | 163 | cout << (bench.dummy ? "" : " ") << endl; 164 | } 165 | 166 | void benchMultiThreadShared() { 167 | size_t numThreads = 2; 168 | cout << endl << "Test in multithread mode with shared ArenaMT and " << numThreads << " threads" << endl << endl; 169 | size_t n = 1024; 170 | size_t m = 1024; 171 | ArenaMT arenaMT((n * m + 1) * numThreads); 172 | Bench bench = {&arenaMT, n, m, 3, numThreads, false}; 173 | 174 | using indexed = Types; 175 | 176 | // map 177 | arenaMT.enableDelete(false); 178 | bench.runParallel("map_query", "Query with indexed map"); 179 | bench.runParallel("map_query", "Query with map"); 180 | arenaMT.reset(); 181 | arenaMT.enableDelete(true); 182 | bench.runParallel("map_insert_and_remove", "Insert and remove with indexed map"); 183 | bench.runParallel("map_insert_and_remove", "Insert and remove with map"); 184 | arenaMT.freeMemory(); 185 | 186 | // unordered map 187 | arenaMT.enableDelete(false); 188 | bench.runParallel("map_query", "Query with indexed unordered map"); 189 | bench.runParallel("map_query", "Query with unordered map"); 190 | arenaMT.reset(); 191 | arenaMT.enableDelete(true); 192 | bench.runParallel("map_insert_and_remove", "Insert and remove with indexed unordered map"); 193 | bench.runParallel("map_insert_and_remove", "Insert and remove with unordered map"); 194 | arenaMT.freeMemory(); 195 | 196 | cout << (bench.dummy ? "" : " ") << endl; 197 | } 198 | 199 | void benchMultiThreadPerThread() { 200 | size_t numThreads = 2; 201 | cout << endl << "Test in multithread mode with thread local Arena and " << numThreads << " threads" << endl << endl; 202 | size_t n = 1024; 203 | size_t m = 1024; 204 | Bench bench = {nullptr, n, m, 3, numThreads, true}; 205 | 206 | using indexed = Types; 207 | 208 | // map 209 | bench.runParallel("map_quick_insert", "Insert with indexed map"); 210 | bench.runParallel("map_quick_insert", "Insert with map"); 211 | bench.runParallel("map_query", "Query with indexed map"); 212 | bench.runParallel("map_query", "Query with map"); 213 | bench.runParallel("map_insert_and_remove", "Insert and remove with indexed map"); 214 | bench.runParallel("map_insert_and_remove", "Insert and remove with map"); 215 | 216 | // unordered map 217 | bench.runParallel("map_quick_insert", "Insert with indexed unordered map"); 218 | bench.runParallel("map_quick_insert", "Insert with unordered map"); 219 | bench.runParallel("map_query", "Query with indexed unordered map"); 220 | bench.runParallel("map_query", "Query with unordered map"); 221 | bench.runParallel("map_insert_and_remove", "Insert and remove with indexed unordered map"); 222 | bench.runParallel("map_insert_and_remove", "Insert and remove with unordered map"); 223 | 224 | cout << (bench.dummy ? "" : " ") << endl; 225 | } 226 | 227 | int main() { 228 | #ifndef NDEBUG 229 | cout << "You run the benchmark compiled not in Release mode!" << endl; 230 | #endif 231 | try { 232 | benchSingleThread(); 233 | benchSingleThread(); 234 | benchMultiThreadPerThread(); 235 | benchMultiThreadShared(); 236 | } catch(const exception& ex) { 237 | cerr << "Bench exit with exception " << ex.what() << endl; 238 | return 1; 239 | } 240 | return 0; 241 | } 242 | 243 | template 244 | template 245 | void Bench::map_quick_insert(const char name[], bool showOutput) { 246 | auto locArena = useLocalArenaIfNeeded(false); 247 | size_t dummy = 0; 248 | auto start = chrono::high_resolution_clock::now(); 249 | for (size_t k = 0; k < repeat; ++k) { 250 | Config::getArena()->reset(); 251 | Map map; 252 | 253 | for (size_t i = 0; i < n; ++i) { 254 | for (size_t j = 0; j < m; ++j) { 255 | int key = int(j * n + i); 256 | map.emplace(key, 0); 257 | } 258 | } 259 | 260 | dummy += map.size(); 261 | } 262 | auto end = chrono::high_resolution_clock::now(); 263 | auto time = chrono::duration_cast(end - start).count(); 264 | 265 | if (showOutput) { 266 | cout << name << ": wall time " << time << endl; 267 | } 268 | this->dummy |= dummy; 269 | } 270 | 271 | template 272 | template 273 | void Bench::map_query(const char name[], bool showOutput) { 274 | auto locArena = useLocalArenaIfNeeded(false); 275 | Map map; 276 | for (size_t i = 0; i < n; ++i) { 277 | for (size_t j = 0; j < m; ++j) { 278 | int key = int(j * n + i); 279 | map.emplace(2 * key, 1); 280 | } 281 | } 282 | 283 | size_t dummy = 0; 284 | auto start = chrono::high_resolution_clock::now(); 285 | const Map& cmap = map; 286 | for (size_t k = 0; k < repeat; ++k) { 287 | for (size_t i = n; i > 0; --i) { 288 | for (size_t j = 0; j < m; ++j) { 289 | int key = int(j * n + i - 1); 290 | dummy += (*cmap.find(2 * key)).second; 291 | dummy += cmap.count(2 * key + 1); 292 | } 293 | } 294 | } 295 | auto end = chrono::high_resolution_clock::now(); 296 | auto time = chrono::duration_cast(end - start).count(); 297 | 298 | if (showOutput) { 299 | cout << name << ": wall time " << time << endl; 300 | } 301 | this->dummy |= dummy; 302 | } 303 | 304 | template 305 | template 306 | void Bench::map_insert_and_remove(const char name[], bool showOutput) { 307 | auto locArena = useLocalArenaIfNeeded(true); 308 | const size_t capacity = n * m; 309 | size_t dummy = 0; 310 | auto start = chrono::high_resolution_clock::now(); 311 | for (size_t k = 0; k < repeat; ++k) { 312 | Map map; 313 | 314 | for (size_t i = 0; i < n; ++i) { 315 | for (size_t j = 0; j < m; ++j) { 316 | int key = int(j * n + i); 317 | map.emplace(key, 0); 318 | } 319 | if (i % 2 == 1) { 320 | for (size_t j = 0; j < m; ++j) { 321 | int key = int(j * n + i - 1); 322 | map.erase(key); 323 | } 324 | for (size_t j = 0; j < m; ++j) { 325 | int key = int(j * n + i - 1 + capacity); 326 | map.emplace(key, 0); 327 | } 328 | } 329 | } 330 | 331 | dummy += map.size(); 332 | } 333 | auto end = chrono::high_resolution_clock::now(); 334 | auto time = chrono::duration_cast(end - start).count(); 335 | 336 | if (showOutput) { 337 | cout << name << ": wall time " << time << endl; 338 | } 339 | this->dummy |= dummy; 340 | } 341 | -------------------------------------------------------------------------------- /include/indexed/Allocator.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | // "hardcode" bucket type used in boost::unordered_* containers 16 | namespace boost { namespace unordered { namespace detail { 17 | 18 | template 19 | struct bucket; 20 | 21 | } } } 22 | 23 | namespace indexed { 24 | 25 | template 26 | class Allocator; 27 | 28 | namespace detail { 29 | 30 | template 31 | struct IsUnorderedBucket { 32 | static constexpr bool value = false; 33 | }; 34 | 35 | template 36 | struct IsUnorderedBucket> { 37 | static constexpr bool value = true; 38 | }; 39 | 40 | // needed in order to provide cast from Allocator to std::allocator 41 | template 42 | class StdAllocator : public std::allocator { 43 | public: 44 | StdAllocator() = default; 45 | 46 | template 47 | StdAllocator(const Allocator&) noexcept 48 | : StdAllocator() {} 49 | }; 50 | 51 | template 52 | class VoidAllocator { 53 | public: 54 | using propagate_on_container_copy_assignment = std::true_type; 55 | using propagate_on_container_move_assignment = std::true_type; 56 | using propagate_on_container_swap = std::true_type; 57 | 58 | template 59 | struct rebind { 60 | using other = typename std::conditional::value, 61 | typename ArenaConfig::template ArrayAllocator, 62 | Allocator>::type; 63 | }; 64 | 65 | protected: 66 | using ArenaPtr = typename ArenaConfig::ArenaPtr; 67 | static constexpr bool kAutoSetContainer = ArenaConfig::kAssignContainerFollowingAllocator; 68 | 69 | VoidAllocator() noexcept 70 | : m_arena(ArenaConfig::defaultArena()) { 71 | if (kAutoSetContainer) { 72 | ArenaConfig::setContainer(this); 73 | } 74 | } 75 | 76 | VoidAllocator(const ArenaPtr& arena) noexcept 77 | : m_arena(arena) {} 78 | 79 | ArenaPtr m_arena; 80 | }; 81 | 82 | } 83 | 84 | /** 85 | * @brief C++11 STL allocator class using indexed Pointer and Arena for allocation. 86 | * NOTE Can allocate only 1 element, can't allocate array of elements. 87 | * @tparam Type type of allocated object 88 | * @tparam ArenaConfig Arena config class with ArenaConfigInterface (see doc) 89 | */ 90 | template 91 | class Allocator : public detail::VoidAllocator { 92 | private: 93 | using Base = detail::VoidAllocator; 94 | using ArenaPtr = typename Base::ArenaPtr; 95 | using Base::kAutoSetContainer; 96 | using IndexType = typename ArenaConfig::IndexType; 97 | 98 | public: 99 | using value_type = Type; 100 | using pointer = Pointer; 101 | 102 | /** 103 | * @brief Create allocator using Arena obtained via ArenaConfig::defaultArena() 104 | */ 105 | Allocator() = default; 106 | 107 | /** 108 | * @brief Create allocator with given Arena 109 | * @param arena Pointer to Arena used for allocation (see doc) 110 | */ 111 | explicit Allocator(const ArenaPtr& arena) noexcept 112 | : Base(arena) {} 113 | 114 | Allocator(const Allocator& other) noexcept 115 | : Base(other) { 116 | if (kAutoSetContainer) { 117 | ArenaConfig::setContainer(this); 118 | } 119 | } 120 | 121 | template 122 | Allocator(const Allocator& alloc) noexcept 123 | : Base(alloc) { 124 | if (kAutoSetContainer) { 125 | ArenaConfig::setContainer(this); 126 | } 127 | } 128 | 129 | Allocator& operator=(const Allocator& other) noexcept { 130 | this->m_arena = other.m_arena; 131 | if (kAutoSetContainer) { 132 | ArenaConfig::setContainer(this); 133 | } 134 | return *this; 135 | } 136 | 137 | pointer allocate(size_t n) const { 138 | indexed_assert(n == 1 && "indexed::Allocator can't allocate/deallocate array"); 139 | IndexType arenaInd = this->m_arena->allocate(sizeof(Type)); 140 | return pointer(ArenaConfig::template arenaToPtrIndex(arenaInd)); 141 | } 142 | 143 | void deallocate(const pointer& ptr, size_t) const noexcept { 144 | IndexType arenaInd = ArenaConfig::template ptrToArenaIndex(ptr.get()); 145 | this->m_arena->deallocate(arenaInd, sizeof(Type)); 146 | } 147 | 148 | friend 149 | bool operator==(const Allocator& left, const Allocator& right) noexcept { 150 | return left.m_arena == right.m_arena; 151 | } 152 | 153 | friend 154 | bool operator!=(const Allocator& left, const Allocator& right) noexcept { 155 | return !(left == right); 156 | } 157 | }; 158 | 159 | } 160 | -------------------------------------------------------------------------------- /include/indexed/ArrayArena.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | namespace indexed { 17 | 18 | /** 19 | * @brief Arena assigning index to allocated memory blocks. It's supposed to be used by indexed::Allocator. 20 | * 21 | * Not thread-safe, use ArrayArenaMT if you need to access the same arena from multiple threads. 22 | * Index type limits Arena's maximal capacity, further limits can be imposed by ArenaConfig. 23 | * Can allocate objects of one size only, the size is fixed on the first object allocation. 24 | * Real memory is allocated for the whole capacity via Alloc on the first object allocation, 25 | * it's never released until the Arena destructor or freeMemory() is called. 26 | * Arena capacity must be set before the first allocation, it can be changed only after freeMemory(). 27 | * @tparam Index unsigned integer type used for pointer representation: uint16_t or uint32_t 28 | * @tparam Alloc class responsible for memory buffer allocation, e.g. NewAlloc 29 | */ 30 | template 31 | class ArrayArena : public Alloc { 32 | static_assert(std::is_same::value || 33 | std::is_same::value, "Index must be uint16_t or uint32_t"); 34 | 35 | public: 36 | using IndexType = Index; 37 | 38 | static constexpr bool kIsArrayArenaMT = false; 39 | 40 | /** 41 | * @brief Create Arena 42 | * @param capacity capacity in objects 43 | * @param enableDelete see enableDelete() 44 | * @param alloc object of Alloc type 45 | */ 46 | explicit ArrayArena(size_t capacity = 0, bool enableDelete = true, Alloc&& alloc = Alloc()) 47 | : Alloc(std::move(alloc)) 48 | , m_capacity(0) 49 | , m_elementSizeInIndex(0) 50 | , m_doDelete(enableDelete) 51 | , m_nextFree(0) 52 | , m_allocatedCount(0) 53 | , m_usedCapacity(0) { 54 | setCapacity(capacity); 55 | } 56 | 57 | ArrayArena(ArrayArena&&) = default; 58 | ArrayArena(const ArrayArena&) = delete; 59 | 60 | ArrayArena& operator=(ArrayArena&&) = default; 61 | ArrayArena& operator=(const ArrayArena&) = delete; 62 | 63 | /** 64 | * @brief start of allocated memory buffer 65 | */ 66 | char* begin() const noexcept { return static_cast(Alloc::getPtr()); } 67 | 68 | /** 69 | * @brief end of allocated memory buffer 70 | */ 71 | char* end() const noexcept { return begin() + elementSize() * m_capacity; } 72 | 73 | /** 74 | * @brief capacity of the Arena 75 | */ 76 | size_t capacity() const noexcept { return m_capacity; } 77 | 78 | /** 79 | * @brief peek size ever reached (mostly for debug) 80 | */ 81 | size_t usedCapacity() const noexcept { return m_usedCapacity; } 82 | 83 | /** 84 | * @brief number of alive objects = allocated - deallocated (mostly for debug) 85 | */ 86 | size_t allocatedCount() const noexcept { return m_allocatedCount; } 87 | 88 | /** 89 | * @brief size of allocated memory objects in bytes 90 | */ 91 | size_t elementSize() const noexcept { return m_elementSizeInIndex * sizeof(Index); } 92 | 93 | /** 94 | * @brief true if deletion is on, see enableDelete() 95 | */ 96 | bool deleteIsEnabled() const noexcept { return m_doDelete; } 97 | 98 | /** 99 | * @brief get pointer of object by index 100 | * @param index index returned by the Arena allocate() 101 | */ 102 | void* getElement(Index index) const noexcept { return getElementInt(index, elementSize()); } 103 | 104 | /** 105 | * @brief Enable/disable object deletion 106 | * When deletion is on index released after deallocate() is used in future allocate(). 107 | * When deletion is off, a new index is always assigned, allocate()/deallocate() is faster, 108 | * but Arena may require more capacity. 109 | * @param enable true - deletion is on 110 | */ 111 | void enableDelete(bool enable) noexcept { m_doDelete = enable; } 112 | 113 | /** 114 | * @brief set Arena capacity, must be done before the first allocation 115 | * @param capacity new capacity 116 | */ 117 | void setCapacity(size_t capacity) { 118 | if (capacity >= (1u << (sizeof(Index) * 8 - 1))) { 119 | throw std::length_error("indexed::ArrayArena capacity is too big for Index type"); 120 | } 121 | if (begin() != nullptr) { 122 | throw std::runtime_error("indexed::ArrayArena capacity must be set before allocation"); 123 | } 124 | m_capacity = Index(capacity); 125 | } 126 | 127 | /** 128 | * @brief Converts pointer to index 129 | * @param ptr pointer to element allocated with the Arena 130 | * @return index of the element in the Arena 131 | */ 132 | Index pointer_to(const void* ptr) const noexcept { 133 | size_t offset = static_cast(ptr) - begin(); 134 | Index pos = Index(uint32_t(offset / sizeof(Index)) / m_elementSizeInIndex); 135 | indexed_assert(elementSize() * pos == offset 136 | && "Attempt to create indexed::Pointer pointing inside an allocated Node, do you use iterator-> ?"); 137 | return pos + 1; 138 | } 139 | 140 | /** 141 | * @brief Allocate object in the Arena 142 | * @param typeSize size of the object in bytes 143 | * @return index assigned to the allocated object 144 | */ 145 | Index allocate(size_t typeSize) { 146 | indexed_assert((elementSize() == typeSize || elementSize() == 0) 147 | && "indexed::ArrayArena can't handle different-sized allocations"); 148 | Index index = 0; 149 | if (m_nextFree != 0) { 150 | index = m_nextFree; 151 | void* outPtr = getElementInt(index, typeSize); 152 | m_nextFree = *static_cast(outPtr); 153 | } else { 154 | if (m_usedCapacity == m_capacity) { 155 | throw std::bad_alloc(); 156 | } 157 | if (begin() == nullptr) { 158 | indexed_assert(typeSize % sizeof(Index) == 0 159 | && "indexed::ArrayArena elementSize must be multiple of Index size"); 160 | Alloc::malloc(typeSize * m_capacity); 161 | m_elementSizeInIndex = decltype(m_elementSizeInIndex)(typeSize / sizeof(Index)); 162 | indexed_assert(m_elementSizeInIndex == typeSize / sizeof(Index) 163 | && "indexed::ArrayArenaMT elementSize is too large"); 164 | } 165 | ++m_usedCapacity; 166 | index = m_usedCapacity; 167 | } 168 | ++m_allocatedCount; 169 | return index; 170 | } 171 | 172 | /** 173 | * @brief Deallocate object allocated before with the Arena 174 | * @param index index of the object obtained in allocate() 175 | * @param typeSize size of the object in bytes 176 | */ 177 | void deallocate(Index index, size_t typeSize) noexcept { 178 | --m_allocatedCount; 179 | if (m_allocatedCount == 0) { 180 | reset(); 181 | return; 182 | } 183 | if (m_doDelete) { 184 | void* ptr = getElementInt(index, typeSize); 185 | *static_cast(ptr) = m_nextFree; 186 | m_nextFree = index; 187 | } 188 | } 189 | 190 | /** 191 | * @brief Reset container to the "new" state, the memory isn't released, it's reused. 192 | * NOTE You should be sure that there are no allocated objects or they will never be used. 193 | */ 194 | void reset() noexcept { 195 | indexed_warning(m_allocatedCount == 0 && "ArrayArena::reset() is called while there are allocated objects"); 196 | m_nextFree = 0; 197 | m_allocatedCount = 0; 198 | m_usedCapacity = 0; 199 | } 200 | 201 | /** 202 | * @brief Reset the Arena and release its memory. New memory will be allocated on allocate(). 203 | * NOTE You should be sure that there are no allocated objects or they will never be used. 204 | */ 205 | void freeMemory() noexcept { 206 | m_elementSizeInIndex = 0; 207 | reset(); 208 | Alloc::free(); 209 | } 210 | 211 | ~ArrayArena() noexcept { 212 | indexed_warning(m_allocatedCount == 0 && "ArrayArena is destructed while there are allocated objects"); 213 | } 214 | 215 | private: 216 | void* getElementInt(Index index, size_t elementSize) const noexcept { 217 | indexed_assert(index > 0 && index <= m_usedCapacity && "indexed::Pointer is invalid"); 218 | return begin() + elementSize * (index - 1); 219 | } 220 | 221 | Index m_capacity; 222 | uint16_t m_elementSizeInIndex; // size / sizeof(Index) 223 | bool m_doDelete; 224 | Index m_nextFree; // slist of free elements 225 | Index m_allocatedCount; 226 | Index m_usedCapacity; 227 | }; 228 | 229 | } 230 | -------------------------------------------------------------------------------- /include/indexed/ArrayArenaMT.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | namespace indexed { 19 | 20 | namespace detail { 21 | 22 | template 23 | struct DoubleWidth; 24 | 25 | template <> 26 | struct DoubleWidth { 27 | using type = uint64_t; 28 | }; 29 | 30 | template <> 31 | struct DoubleWidth { 32 | using type = uint32_t; 33 | }; 34 | 35 | template 36 | class LockFreeSList { 37 | private: 38 | using IndexType = typename Arena::IndexType; 39 | using DoubleIndex = typename DoubleWidth::type; 40 | 41 | static constexpr uint8_t kBits = sizeof(IndexType) * 8; 42 | 43 | public: 44 | LockFreeSList() noexcept 45 | : m_head(0) {} 46 | 47 | void reset() noexcept { m_head = 0; } 48 | 49 | IndexType listLength(Arena& arena) const noexcept { 50 | IndexType len = 0; 51 | IndexType next = IndexType(m_head); 52 | while (next != 0) { 53 | ++len; 54 | void* freeSlot = arena.getElement(next); 55 | next = *static_cast(freeSlot); 56 | } 57 | return len; 58 | } 59 | 60 | IndexType pull(Arena& arena) noexcept { 61 | DoubleIndex headD = m_head; 62 | for(; ;) { 63 | IndexType head = IndexType(headD); 64 | if (head == 0) { 65 | return 0; 66 | } 67 | void* freeSlot = arena.getElement(head); 68 | IndexType futureHead = *static_cast(freeSlot); 69 | DoubleIndex futureHeadD = toDoubleIndex(futureHead, toStamp(headD) + 1); 70 | if (m_head.compare_exchange_strong(headD, futureHeadD)) { 71 | return head; 72 | } 73 | } 74 | } 75 | 76 | void push(IndexType free, Arena& arena) noexcept { 77 | void* freeSlot = arena.getElement(free); 78 | DoubleIndex headD = m_head; 79 | for(; ;) { 80 | *static_cast(freeSlot) = IndexType(headD); 81 | DoubleIndex futureHeadD = toDoubleIndex(free, toStamp(headD) + 1); 82 | if (m_head.compare_exchange_strong(headD, futureHeadD)) { 83 | break; 84 | } 85 | } 86 | } 87 | 88 | private: 89 | static DoubleIndex toStamp(DoubleIndex indexD) noexcept { return indexD >> kBits; } 90 | 91 | static DoubleIndex toDoubleIndex(IndexType index, DoubleIndex stamp) noexcept { return (stamp << kBits) | index; } 92 | 93 | std::atomic m_head; 94 | }; 95 | 96 | } 97 | 98 | /** 99 | * @brief Arena assigning index to allocated memory blocks. It's supposed to be used by indexed::Allocator. 100 | * 101 | * Thread-safe version of ArrayArena. Consider ArrayArena if you don't access the same Arena in MT mode. 102 | * Index type limits Arena's maximal capacity, further limits can be imposed by ArenaConfig. 103 | * Can allocate objects of one size only, the size is fixed on the first object allocation. 104 | * Real memory is allocated for the whole capacity via Alloc on the first object allocation, 105 | * it's never released until the Arena destructor or freeMemory() is called. 106 | * Arena capacity must be set before the first allocation, it can be changed only after freeMemory(). 107 | * 108 | * ArrayArenaMT avoids unnecessary synchronization for performance. It assumes that the first call in every 109 | * thread is allocate(), or at least, the first call to getElement() or pointer_to() in a thread A happens 110 | * after allocate() has been already called in a thread B. This assumption is met when the Arena is 111 | * accessed via Allocator class. 112 | * 113 | * @tparam Index unsigned integer type used for pointer representation: uint16_t or uint32_t 114 | * @tparam Alloc class responsible for memory buffer allocation, e.g. NewAlloc 115 | */ 116 | template 117 | class ArrayArenaMT : public Alloc { 118 | static_assert(std::is_same::value || 119 | std::is_same::value, "Index must be uint16_t or uint32_t"); 120 | 121 | public: 122 | using IndexType = Index; 123 | 124 | static constexpr bool kIsArrayArenaMT = true; 125 | 126 | /** 127 | * @brief Create Arena 128 | * @param capacity capacity in objects 129 | * @param enableDelete see enableDelete() 130 | * @param alloc object of Alloc type 131 | */ 132 | explicit ArrayArenaMT(size_t capacity = 0, bool enableDelete = true, Alloc&& alloc = Alloc()) 133 | : Alloc(std::move(alloc)) 134 | , m_capacity(0) 135 | , m_elementSizeInIndex(0) 136 | , m_doDelete(enableDelete) 137 | , m_isAllocError(false) 138 | , m_allocMutex() 139 | , m_freeList() 140 | , m_usedCapacity(0) { 141 | setCapacity(capacity); 142 | } 143 | 144 | ArrayArenaMT(const ArrayArenaMT&) = delete; 145 | 146 | ArrayArenaMT& operator=(const ArrayArenaMT&) = delete; 147 | 148 | /** 149 | * @brief start of allocated memory buffer 150 | */ 151 | char* begin() const noexcept { return static_cast(Alloc::getPtr()); } 152 | 153 | /** 154 | * @brief end of allocated memory buffer 155 | */ 156 | char* end() const noexcept { return begin() + elementSize() * m_capacity; } 157 | 158 | /** 159 | * @brief capacity of the Arena 160 | */ 161 | size_t capacity() const noexcept { return m_capacity; } 162 | 163 | /** 164 | * @brief peek size ever reached, not MT-safe (mostly for debug) 165 | */ 166 | size_t usedCapacity() const noexcept { return m_usedCapacity; } 167 | 168 | /** 169 | * @brief size of allocated memory objects in bytes 170 | */ 171 | size_t elementSize() const noexcept { return m_elementSizeInIndex * sizeof(Index); } 172 | 173 | /** 174 | * @brief true if deletion is on, see enableDelete() 175 | */ 176 | bool deleteIsEnabled() const noexcept { return m_doDelete; } 177 | 178 | /** 179 | * @brief get pointer of object by index 180 | * @param index index returned by the Arena allocate() 181 | */ 182 | void* getElement(Index index) const noexcept { return getElementInt(index, elementSize()); } 183 | 184 | /** 185 | * @brief Enable/disable object deletion 186 | * When deletion is on index released after deallocate() is used in future allocate(). 187 | * When deletion is off, a new index is always assigned, allocate()/deallocate() is faster, 188 | * but Arena may require more capacity. 189 | * @param enable true - deletion is on 190 | */ 191 | void enableDelete(bool enable) noexcept { m_doDelete = enable; } 192 | 193 | /** 194 | * @brief set Arena capacity, must be done before the first allocation. 195 | * NOTE The method is not MT-safe, read freeMemory() for details. 196 | * @param capacity new capacity 197 | */ 198 | void setCapacity(size_t capacity) { 199 | if (capacity >= (1u << (sizeof(Index) * 8 - 1))) { 200 | throw std::length_error("indexed::ArrayArenaMT capacity is too big for Index type"); 201 | } 202 | if (begin() != nullptr) { 203 | throw std::runtime_error("indexed::ArrayArenaMT capacity must be set before allocation"); 204 | } 205 | m_capacity = Index(capacity); 206 | } 207 | 208 | /** 209 | * @brief Converts pointer to index 210 | * @param ptr pointer to element allocated with the Arena 211 | * @return index of the element in the Arena 212 | */ 213 | Index pointer_to(const void* ptr) const noexcept { 214 | size_t offset = static_cast(ptr) - begin(); 215 | Index pos = Index(uint32_t(offset / sizeof(Index)) / m_elementSizeInIndex); 216 | indexed_assert(elementSize() * pos == offset 217 | && "Attempt to create indexed::Pointer pointing inside an allocated Node, do you use iterator-> ?"); 218 | return pos + 1; 219 | } 220 | 221 | /** 222 | * @brief Allocate object in the Arena 223 | * @param typeSize size of the object in bytes 224 | * @return index assigned to the allocated object 225 | */ 226 | Index allocate(size_t typeSize) { 227 | indexed_assert((elementSize() == 0 || elementSize() == typeSize) 228 | && "indexed::ArrayArenaMT can't handle different-sized allocations"); 229 | // NOTE should first check for m_doDelete? 230 | Index index = m_freeList.pull(*this); 231 | if (index == 0) { 232 | Index futureCapacity = ++m_usedCapacity; 233 | try { 234 | if (futureCapacity > m_capacity) { 235 | throw std::bad_alloc(); 236 | } 237 | if (begin() == nullptr) { 238 | allocateBuffer(typeSize); 239 | } 240 | } catch (const std::exception&) { 241 | --m_usedCapacity; 242 | throw; 243 | } 244 | index = futureCapacity; 245 | } 246 | return index; 247 | } 248 | 249 | /** 250 | * @brief Deallocate object allocated before with the Arena 251 | * @param index index of the object obtained in allocate() 252 | */ 253 | void deallocate(Index index, size_t) noexcept { 254 | if (m_doDelete) { 255 | m_freeList.push(index, *this); 256 | } 257 | } 258 | 259 | /** 260 | * @brief Reset container to the "new" state with no allocated objects. 261 | * The memory isn't released, it's reused. 262 | * NOTE You should be sure that there are no allocated objects or they will never be used. 263 | * NOTE The method is not MT-safe, read freeMemory() for details. 264 | */ 265 | void reset() noexcept { 266 | indexed_warning(m_usedCapacity == m_freeList.listLength(*this) 267 | && "ArrayArenaMT::reset() is called while there are allocated objects"); 268 | m_freeList.reset(); 269 | m_usedCapacity = 0; 270 | } 271 | 272 | /** 273 | * @brief Reset the Arena and release its memory. New memory will be allocated on allocate(). 274 | * NOTE You should be sure that there are no allocated objects or they will never be used. 275 | * NOTE The method is not MT-safe, it should be called only inside a critical section for 276 | * the threads sharing the Arena (or once they've joined in one thread). 277 | */ 278 | void freeMemory() noexcept { 279 | Alloc::free(); 280 | m_elementSizeInIndex = 0; 281 | m_isAllocError = false; 282 | reset(); 283 | } 284 | 285 | ~ArrayArenaMT() noexcept { 286 | indexed_warning(m_usedCapacity == m_freeList.listLength(*this) 287 | && "ArrayArenaMT is destructed while there are allocated objects"); 288 | } 289 | 290 | private: 291 | void* getElementInt(Index index, size_t elementSize) const noexcept { 292 | indexed_assert(index > 0 && index <= m_usedCapacity && "indexed::Pointer is invalid"); 293 | return begin() + elementSize * (index - 1); 294 | } 295 | 296 | void allocateBuffer(size_t typeSize) { 297 | std::lock_guard guard(m_allocMutex); 298 | if (m_isAllocError) { 299 | throw std::bad_alloc(); 300 | } 301 | if (begin() != nullptr) { 302 | return; 303 | } 304 | indexed_assert(typeSize % sizeof(Index) == 0 305 | && "indexed::ArrayArenaMT elementSize must be multiple of Index size"); 306 | try { 307 | Alloc::malloc(typeSize * m_capacity); 308 | } catch (const std::exception&) { 309 | m_isAllocError = true; 310 | throw; 311 | } 312 | m_elementSizeInIndex = decltype(m_elementSizeInIndex)(typeSize / sizeof(Index)); 313 | indexed_assert(m_elementSizeInIndex == typeSize / sizeof(Index) 314 | && "indexed::ArrayArenaMT elementSize is too large"); 315 | } 316 | 317 | Index m_capacity; 318 | uint16_t m_elementSizeInIndex; // size / sizeof(Index) 319 | bool m_doDelete; 320 | bool m_isAllocError; 321 | std::mutex m_allocMutex; 322 | detail::LockFreeSList m_freeList; 323 | std::atomic m_usedCapacity; 324 | }; 325 | 326 | } 327 | -------------------------------------------------------------------------------- /include/indexed/BufAlloc.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | namespace indexed { 15 | 16 | /** 17 | * @brief Helper class for ArrayArena. Uses already allocated memory buffer 18 | */ 19 | class BufAlloc { 20 | public: 21 | BufAlloc(void* bufBegin, size_t bufSize) noexcept 22 | : m_ptr(nullptr) 23 | , m_bufBegin(bufBegin) 24 | , m_bufSize(bufSize) {} 25 | 26 | protected: 27 | void malloc(size_t bytes) { 28 | if (bytes > m_bufSize) { 29 | throw std::bad_alloc(); 30 | } 31 | m_ptr = m_bufBegin; 32 | } 33 | 34 | void* getPtr() const noexcept { 35 | return m_ptr; 36 | } 37 | 38 | void free() noexcept { 39 | m_ptr = nullptr; 40 | } 41 | 42 | private: 43 | void* m_ptr; 44 | void* m_bufBegin; 45 | size_t m_bufSize; 46 | }; 47 | 48 | } 49 | 50 | -------------------------------------------------------------------------------- /include/indexed/Config.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #ifndef INDEXED_DEBUG 10 | #ifdef NDEBUG 11 | #define INDEXED_DEBUG 0 12 | #else 13 | #define INDEXED_DEBUG 1 14 | #endif 15 | #endif 16 | 17 | #if INDEXED_DEBUG == 1 18 | #include 19 | #define indexed_warning(arg) if (!(arg)) std::fprintf(stderr, "Warning `%s' triggered at %s:%d\n", \ 20 | #arg, __FILE__, __LINE__); else ((void)0) 21 | #ifdef NDEBUG 22 | #include 23 | #define indexed_assert(arg) if (!(arg)) { std::fprintf(stderr, "Assertion `%s' failed at %s:%d\n", \ 24 | #arg, __FILE__, __LINE__); std::abort(); } else ((void)0) 25 | #else 26 | #include 27 | #define indexed_assert(arg) assert(arg) 28 | #endif 29 | #else 30 | #define indexed_assert(arg) ((void)0) 31 | #define indexed_warning(arg) ((void)0) 32 | #endif 33 | -------------------------------------------------------------------------------- /include/indexed/MmapAlloc.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | 13 | namespace indexed { 14 | 15 | /** 16 | * @brief Helper class for ArrayArena. Allocates memory via mmap in pages 17 | */ 18 | class MmapAlloc { 19 | public: 20 | MmapAlloc() = default; 21 | 22 | protected: 23 | void malloc(size_t bytes) { 24 | m_memMapped = boost::interprocess::anonymous_shared_memory(bytes); 25 | } 26 | 27 | void* getPtr() const noexcept { 28 | return m_memMapped.get_address(); 29 | } 30 | 31 | void free() noexcept { 32 | m_memMapped = boost::interprocess::mapped_region(); 33 | } 34 | 35 | private: 36 | boost::interprocess::mapped_region m_memMapped; 37 | }; 38 | 39 | } 40 | -------------------------------------------------------------------------------- /include/indexed/NewAlloc.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | 13 | namespace indexed { 14 | 15 | /** 16 | * @brief Helper class for ArrayArena. Allocates memory via ::new 17 | */ 18 | class NewAlloc { 19 | public: 20 | NewAlloc() = default; 21 | 22 | protected: 23 | void malloc(size_t bytes) { 24 | m_memBlock.reset(new char[bytes]); 25 | } 26 | 27 | void* getPtr() const noexcept { 28 | return m_memBlock.get(); 29 | } 30 | 31 | void free() noexcept { 32 | m_memBlock.reset(); 33 | } 34 | 35 | private: 36 | std::unique_ptr m_memBlock; 37 | }; 38 | 39 | } 40 | -------------------------------------------------------------------------------- /include/indexed/Pointer.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | 13 | namespace indexed { 14 | 15 | namespace detail { 16 | 17 | template 18 | class VoidPointer { 19 | public: 20 | using IndexType = typename ArenaConfig::IndexType; 21 | 22 | protected: 23 | IndexType m_index; 24 | 25 | VoidPointer() = default; 26 | 27 | constexpr VoidPointer(std::nullptr_t) 28 | : m_index(0) {} 29 | 30 | VoidPointer(IndexType index) noexcept 31 | : m_index(index) {} 32 | 33 | static 34 | IndexType pointer_to(const void* ref) noexcept { 35 | return ArenaConfig::pointer_to(ref); 36 | } 37 | 38 | void* operator->() const noexcept { 39 | return ArenaConfig::getElement(m_index); 40 | } 41 | 42 | public: 43 | explicit operator bool() const noexcept { 44 | return m_index; 45 | } 46 | 47 | /** 48 | * @brief Get index integer (e.g. for atomic update) 49 | */ 50 | const IndexType& get() const noexcept { 51 | return m_index; 52 | } 53 | 54 | /** 55 | * @brief Get index integer (e.g. for atomic update) 56 | */ 57 | IndexType& get() noexcept { 58 | return m_index; 59 | } 60 | }; 61 | 62 | } 63 | 64 | /** 65 | * @brief C++11 pointer class using integer for storage. Usually you shouldn't use it directly. 66 | * NOTE No support for arrays and pointer arithmetic. 67 | * @tparam Type type of object it points to 68 | * @tparam ArenaConfig Arena config class with ArenaConfigInterface (see doc) 69 | */ 70 | template 71 | class Pointer : public detail::VoidPointer { 72 | private: 73 | using Base = detail::VoidPointer; 74 | 75 | // can't have reference to void, so change return type to int 76 | using TypeOrInt = typename std::conditional::value, int, Type>::type; 77 | 78 | public: 79 | static 80 | Pointer pointer_to(TypeOrInt& ref) noexcept { 81 | return Pointer(Base::pointer_to(&ref)); 82 | } 83 | 84 | Pointer() = default; 85 | 86 | constexpr Pointer(std::nullptr_t) 87 | : Base(nullptr) {} 88 | 89 | explicit Pointer(typename Base::IndexType index) noexcept 90 | : Base(index) {} 91 | 92 | // static_cast from void pointer 93 | explicit Pointer(const Pointer& p) noexcept 94 | : Base(p) {} 95 | 96 | template 97 | Pointer(const Pointer::value, ArenaConfig>::type>& p) noexcept 99 | : Base(p) {} 100 | 101 | Type* operator->() const noexcept { 102 | return static_cast(Base::operator->()); 103 | } 104 | 105 | TypeOrInt& operator*() const noexcept { 106 | return *operator->(); 107 | } 108 | 109 | friend 110 | bool operator==(const Pointer& left, const Pointer& right) noexcept { 111 | return left.m_index == right.m_index; 112 | } 113 | 114 | friend 115 | bool operator!=(const Pointer& left, const Pointer& right) noexcept { 116 | return !(left == right); 117 | } 118 | }; 119 | 120 | } 121 | -------------------------------------------------------------------------------- /include/indexed/SingleArenaConfig.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | 13 | namespace indexed { 14 | 15 | namespace detail { 16 | 17 | template 18 | class StdAllocator; 19 | 20 | template 21 | class ConfigStoreStatic { 22 | public: 23 | using Arena = ArenaType; 24 | 25 | protected: 26 | /** 27 | * @brief Arena used by Pointer and Allocator classes 28 | */ 29 | static ArenaType* arena; 30 | 31 | /** 32 | * @brief Pointer to the highest address of the thread's stack 33 | */ 34 | static void* stackTop; 35 | }; 36 | 37 | template 38 | ArenaType* ConfigStoreStatic::arena = nullptr; 39 | 40 | template 41 | void* ConfigStoreStatic::stackTop = nullptr; 42 | 43 | template 44 | class ConfigStorePerThread { 45 | public: 46 | using Arena = ArenaType; 47 | 48 | protected: 49 | /** 50 | * @brief Arena used by Pointer and Allocator classes, one or per thread 51 | */ 52 | static thread_local ArenaType* arena; 53 | 54 | /** 55 | * @brief Pointer to the highest address of the thread's stack (per thread) 56 | */ 57 | static thread_local void* stackTop; 58 | }; 59 | 60 | template 61 | thread_local ArenaType* ConfigStorePerThread::arena = nullptr; 62 | 63 | template 64 | thread_local void* ConfigStorePerThread::stackTop = nullptr; 65 | 66 | template 68 | class SingleArenaConfig : public ConfigStore { 69 | public: 70 | using Arena = typename ConfigStore::Arena; 71 | 72 | // API for Pointer 73 | using IndexType = typename Arena::IndexType; 74 | 75 | static_assert(sizeof(IndexType) >= 2, "IndexType uint8_t is not supported (not safe)"); 76 | 77 | private: 78 | static constexpr IndexType kOnStackFlag = 1u << (sizeof(IndexType) * 8 - 1); 79 | static constexpr ptrdiff_t kMaxStackSize = 2 * 1024 * 1024; 80 | 81 | using ConfigStore::arena; 82 | using ConfigStore::stackTop; 83 | 84 | public: 85 | // { API for Pointer 86 | static void* getElement(IndexType index) noexcept { 87 | return ((index & kOnStackFlag) != 0) 88 | ? static_cast(stackTop) - kNodeAlignment * (index ^ kOnStackFlag) 89 | : arena->getElement(index); 90 | } 91 | 92 | static IndexType pointer_to(const void* ptr) noexcept { 93 | ptrdiff_t stackOffset = static_cast(stackTop) - static_cast(ptr); 94 | IndexType index = 0; 95 | if ((stackOffset >= 0) && (stackOffset < kMaxStackSize)) { 96 | size_t offset = size_t(stackOffset) / kNodeAlignment; 97 | indexed_assert(offset < kOnStackFlag && "object is too deep in stack for indexed::IndexType"); 98 | indexed_assert(offset * kNodeAlignment == size_t(stackOffset) && "object alignment is wrong, check indexed::ArenaConfig::kAlignment"); 99 | index = IndexType(offset) | kOnStackFlag; 100 | } else { 101 | index = arena->pointer_to(ptr); 102 | } 103 | return index; 104 | } 105 | // } 106 | 107 | // { API for user 108 | 109 | /** 110 | * @brief No op for SingleArenaConfig 111 | */ 112 | static void setContainer(void* containerPtr) noexcept { } 113 | 114 | /** 115 | * @brief Always nullptr for SingleArenaConfig 116 | */ 117 | static void* getContainer() noexcept { return nullptr; } 118 | 119 | /** 120 | * @brief Set Arena used by Pointer and Allocator classes 121 | */ 122 | static void setArena(Arena* arenaPtr) noexcept { arena = arenaPtr; } 123 | 124 | /** 125 | * @brief Get Arena used by Pointer and Allocator classes 126 | */ 127 | static Arena* getArena() noexcept { return arena; } 128 | 129 | /** 130 | * @brief Set pointer to the highest address of the thread's stack 131 | */ 132 | static void setStackTop(void* stackTopPtr) noexcept { stackTop = stackTopPtr; } 133 | 134 | /** 135 | * @brief Get pointer to the highest address of the thread's stack 136 | */ 137 | static void* getStackTop() noexcept { return stackTop; } 138 | 139 | // } 140 | 141 | // { API for Allocator 142 | using ArenaPtr = typename ConfigStore::Arena*; 143 | 144 | // SingleArenaConfig doesn't need to track the container pointer 145 | static constexpr bool kAssignContainerFollowingAllocator = false; 146 | 147 | template 148 | using ArrayAllocator = StdAllocator; 149 | 150 | static ArenaPtr defaultArena() noexcept { return arena; } 151 | 152 | template 153 | static IndexType arenaToPtrIndex(IndexType fromArena) noexcept { return fromArena; } 154 | 155 | template 156 | static IndexType ptrToArenaIndex(IndexType fromPtr) noexcept { return fromPtr; } 157 | 158 | // } 159 | }; 160 | 161 | } 162 | 163 | /** 164 | * @brief ArenaConfig working with one Arena, single thread, stack or Arena as Node location support. 165 | * 166 | * NOTE Access to Pointers / Allocators defined with the config must be done from the same thread. 167 | * NOTE The case when a Node is located on heap, but not in Arena (e.g. in Container) is not supported. 168 | * NOTE The case when Arena storage is allocated on the stack is not supported. 169 | * All Pointers and Allocators defined with the config will use the Arena pointer in the config. 170 | * The pointer can be changed only when there are no Pointers / Allocators created with the old Arena. 171 | * @tparam ArenaType type of Arena it works with 172 | * @tparam ConfigClass user-defined class inherited from the config (unique for the used Allocator type) 173 | * @tparam kNodeAlignment (optional) alignment in bytes for any Pointer using this config 174 | */ 175 | template 176 | struct SingleArenaConfigStatic : 177 | detail::SingleArenaConfig, kNodeAlignment...> {}; 178 | 179 | /** 180 | * @brief ArenaConfig allowing one Arena per thread, many threads, stack or Arena(s) as Node location support. 181 | * 182 | * It's a MT-safe version of SingleArenaConfigStatic with thread local variables arena, stackTop. 183 | * NOTE The case when a Node is located on heap, but not in Arena (e.g. in Container) is not supported. 184 | * NOTE The case when Arena storage is allocated on the stack is not supported. 185 | * All Pointers and Allocators defined with the config will use the Arena pointer in the config. 186 | * The pointer can be changed only when there are no Pointers/Allocators created with the old Arena. 187 | * @tparam ArenaType type of Arena it works with 188 | * @tparam ConfigClass user-defined class inherited from the config (unique for the used Allocator type) 189 | * @tparam kNodeAlignment (optional) alignment in bytes for any Pointer using this config 190 | */ 191 | template 192 | struct SingleArenaConfigPerThread : 193 | detail::SingleArenaConfig, kNodeAlignment...> {}; 194 | 195 | } 196 | -------------------------------------------------------------------------------- /include/indexed/SingleArenaConfigUniversal.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | 13 | namespace indexed { 14 | 15 | namespace detail { 16 | 17 | template 18 | class StdAllocator; 19 | 20 | template 21 | class ConfigStoreUniversalStatic { 22 | public: 23 | using Arena = ArenaType; 24 | 25 | protected: 26 | /** 27 | * @brief Arena used by Pointer and Allocator classes 28 | */ 29 | static ArenaType* arena; 30 | 31 | /** 32 | * @brief Pointer to the highest address of the thread's stack 33 | */ 34 | static void* stackTop; 35 | 36 | /** 37 | * @brief Pointer to the start of the Container object 38 | */ 39 | static void* container; 40 | }; 41 | 42 | template 43 | ArenaType* ConfigStoreUniversalStatic::arena = nullptr; 44 | 45 | template 46 | void* ConfigStoreUniversalStatic::stackTop = nullptr; 47 | 48 | template 49 | void* ConfigStoreUniversalStatic::container = nullptr; 50 | 51 | template 52 | class ConfigStoreUniversalPerThread { 53 | public: 54 | using Arena = ArenaType; 55 | 56 | protected: 57 | /** 58 | * @brief Arena used by Pointer and Allocator classes, one or per thread 59 | */ 60 | static thread_local ArenaType* arena; 61 | 62 | /** 63 | * @brief Pointer to the highest address of the thread's stack (per thread) 64 | */ 65 | static thread_local void* stackTop; 66 | 67 | /** 68 | * @brief Pointer to the start of the Container object, one or per thread 69 | */ 70 | static thread_local void* container; 71 | }; 72 | 73 | template 74 | thread_local ArenaType* ConfigStoreUniversalPerThread::arena = nullptr; 75 | 76 | template 77 | thread_local void* ConfigStoreUniversalPerThread::stackTop = nullptr; 78 | 79 | template 80 | thread_local void* ConfigStoreUniversalPerThread::container = nullptr; 81 | 82 | template 85 | class SingleArenaConfigUniversal : public ConfigStore { 86 | public: 87 | using Arena = typename ConfigStore::Arena; 88 | 89 | // API for Pointer 90 | using IndexType = typename Arena::IndexType; 91 | 92 | static_assert(sizeof(IndexType) >= 2, "IndexType uint8_t is not supported (not safe)"); 93 | 94 | private: 95 | static constexpr IndexType kOnStackFlag = 1u << (sizeof(IndexType) * 8 - 1); 96 | static constexpr IndexType kContainerFlag = 1u << (sizeof(IndexType) * 8 - 2); 97 | static constexpr ptrdiff_t kMaxStackSize = 2 * 1024 * 1024; 98 | 99 | static_assert(!(kObjectSize == 0 && Arena::kIsArrayArenaMT), 100 | "ArrayArenaMT can be used only when kObjectSize is defined"); 101 | 102 | using ConfigStore::arena; 103 | using ConfigStore::stackTop; 104 | using ConfigStore::container; 105 | 106 | public: 107 | // { API for Pointer 108 | static void* getElement(IndexType index) noexcept { 109 | void* res = nullptr; 110 | if ((index & (kContainerFlag | kOnStackFlag)) == 0) { 111 | res = arena->getElement(index); 112 | } else { 113 | if ((index & kOnStackFlag) != 0) { 114 | res = static_cast(stackTop) - kNodeAlignment * (index ^ kOnStackFlag); 115 | } else { 116 | res = static_cast(container) + (index ^ kContainerFlag); 117 | } 118 | } 119 | return res; 120 | } 121 | 122 | static IndexType pointer_to(const void* ptr) noexcept { 123 | if (kObjectSize == 0) { 124 | if (ptr >= arena->begin() && ptr < arena->end()) { 125 | return arena->pointer_to(ptr); 126 | } 127 | } 128 | ptrdiff_t stackOffset = static_cast(stackTop) - static_cast(ptr); 129 | if ((stackOffset >= 0) && (stackOffset < kMaxStackSize)) { 130 | size_t offset = size_t(stackOffset) / kNodeAlignment; 131 | indexed_assert(offset < kOnStackFlag && "object is too deep in stack for indexed::IndexType"); 132 | indexed_assert(offset * kNodeAlignment == size_t(stackOffset) && "object alignment is wrong, check indexed::ArenaConfig::kAlignment"); 133 | return IndexType(offset) | kOnStackFlag; 134 | } 135 | ptrdiff_t containerOffset = static_cast(ptr) - static_cast(container); 136 | if (kObjectSize == 0) { 137 | indexed_assert(containerOffset >= 0 && containerOffset < 256 && "object isn't in container's body"); 138 | return IndexType(containerOffset) | kContainerFlag; 139 | } 140 | return ((containerOffset >= 0) && (containerOffset < ptrdiff_t(kObjectSize))) 141 | ? IndexType(containerOffset) | kContainerFlag 142 | : arena->pointer_to(ptr); 143 | } 144 | // } 145 | 146 | // { API for user 147 | 148 | /** 149 | * @brief Set pointer to the Container object 150 | */ 151 | static void setContainer(void* containerPtr) noexcept { container = containerPtr; } 152 | 153 | /** 154 | * @brief Get pointer to the Container object 155 | */ 156 | static void* getContainer() noexcept { return container; } 157 | 158 | /** 159 | * @brief Set Arena used by Pointer and Allocator classes 160 | */ 161 | static void setArena(Arena* arenaPtr) noexcept { arena = arenaPtr; } 162 | 163 | /** 164 | * @brief Get Arena used by Pointer and Allocator classes 165 | */ 166 | static Arena* getArena() noexcept { return arena; } 167 | 168 | /** 169 | * @brief Set pointer to the highest address of the thread's stack 170 | */ 171 | static void setStackTop(void* stackTopPtr) noexcept { stackTop = stackTopPtr; } 172 | 173 | /** 174 | * @brief Get pointer to the highest address of the thread's stack 175 | */ 176 | static void* getStackTop() noexcept { return stackTop; } 177 | 178 | // } 179 | 180 | // { API for Allocator 181 | using ArenaPtr = typename ConfigStore::Arena*; 182 | 183 | /** 184 | * @brief When true the container pointer is assigned to the Allocator 185 | * address, then it's reset on the Allocator copy or move. Nothing 186 | * is done automatically when the value is false. 187 | */ 188 | static constexpr bool kAssignContainerFollowingAllocator = true; 189 | 190 | template 191 | using ArrayAllocator = StdAllocator; 192 | 193 | static ArenaPtr defaultArena() noexcept { return arena; } 194 | 195 | template 196 | static IndexType arenaToPtrIndex(IndexType fromArena) noexcept { return fromArena; } 197 | 198 | template 199 | static IndexType ptrToArenaIndex(IndexType fromPtr) noexcept { return fromPtr; } 200 | 201 | // } 202 | }; 203 | 204 | } 205 | 206 | /** 207 | * @brief ArenaConfig working with one Arena, single thread, single Container, stack or Arena 208 | * or Container as Node location is supported. 209 | * 210 | * NOTE Access to Pointers / Allocators defined with the config must be done from the same thread. 211 | * NOTE The config supports only one Container at time. Once it's destucted the other one can be used. 212 | * All Pointers and Allocators defined with the config will use the Arena pointer in the config. 213 | * The pointer can be changed only when there are no Pointers / Allocators created with the old Arena. 214 | * The Container pointer can be changed only when the old Container is destucted. 215 | * @tparam ArenaType type of Arena it works with 216 | * @tparam ConfigClass user-defined class inherited from the config (unique for the used Allocator type) 217 | * @tparam kObjectSize (optional) Container object size in bytes, s.t. container + size doesn't overlap Arena. 218 | * @tparam kNodeAlignment (optional) alignment in bytes for any Pointer using this config 219 | */ 220 | template 221 | struct SingleArenaConfigUniversalStatic : 222 | detail::SingleArenaConfigUniversal, Params...> {}; 223 | 224 | /** 225 | * @brief ArenaConfig allowing one Arena per thread, many threads, Container per thread, stack or 226 | * Arena or Container as Node location is supported. 227 | * 228 | * It's a MT-safe version of SingleArenaConfigUniversalStatic with thread local arena, stackTop, container. 229 | * NOTE The config supports only one Container at time. Once it's destucted the other one can be used. 230 | * All Pointers and Allocators defined with the config will use the Arena pointer in the config. 231 | * The pointer can be changed only when there are no Pointers/Allocators created with the old Arena. 232 | * @tparam ArenaType type of Arena it works with 233 | * @tparam ConfigClass user-defined class inherited from the config (unique for the used Allocator type) 234 | * @tparam kObjectSize (optional) Container object size in bytes, s.t. container + size doesn't overlap Arena. 235 | * @tparam kNodeAlignment (optional) alignment in bytes for any Pointer using this config 236 | */ 237 | template 238 | struct SingleArenaConfigUniversalPerThread : 239 | detail::SingleArenaConfigUniversal, Params...> {}; 240 | 241 | } 242 | -------------------------------------------------------------------------------- /include/indexed/StackTop.h: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #pragma once 8 | 9 | #include 10 | 11 | #ifdef WIN32 12 | #include 13 | #include 14 | #else 15 | #include 16 | #include 17 | #endif 18 | 19 | namespace indexed { 20 | 21 | // If the given functions don't compile you may need to write your own getThreadStackTop() 22 | 23 | #ifdef WIN32 24 | 25 | #if !(_WIN32_WINNT >= 0x0602) 26 | #error "Thread API used here requires at least Windows 8" 27 | #endif 28 | 29 | /** 30 | * @brief Get pointer to a thread's stack highest address 31 | */ 32 | inline void* getThreadStackTop() { 33 | HANDLE_PTR lowLimit = 0; 34 | HANDLE_PTR highLimit = 0; 35 | GetCurrentThreadStackLimits(&lowLimit, &highLimit); 36 | return (void*)(highLimit); 37 | } 38 | 39 | #elif defined(__APPLE__) 40 | 41 | /** 42 | * @brief Get pointer to a thread's stack highest address 43 | */ 44 | inline void* getThreadStackTop() { 45 | return pthread_get_stackaddr_np(pthread_self()); 46 | } 47 | 48 | #else 49 | 50 | /** 51 | * @brief Get pointer to a thread's stack highest address 52 | */ 53 | inline void* getThreadStackTop() { 54 | const pthread_t id = pthread_self(); 55 | pthread_attr_t attr; 56 | int err = pthread_getattr_np(id, &attr); 57 | void* stackAddr = nullptr; 58 | size_t stackSize = 0; 59 | if (err == 0) { 60 | err = pthread_attr_getstack(&attr, &stackAddr, &stackSize); 61 | pthread_attr_destroy(&attr); 62 | } 63 | if (err == 0) { 64 | return static_cast(stackAddr) + stackSize; 65 | } else { 66 | throw std::runtime_error("getThreadStackTop() error due to pthread API"); 67 | } 68 | } 69 | 70 | #endif 71 | 72 | } 73 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | configure_file(CMakeLists.txt.in googletest-download/CMakeLists.txt) 2 | 3 | execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . 4 | RESULT_VARIABLE result 5 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download ) 6 | 7 | if(result) 8 | message(FATAL_ERROR "CMake step for googletest failed: ${result}") 9 | endif() 10 | 11 | execute_process(COMMAND ${CMAKE_COMMAND} --build . 12 | RESULT_VARIABLE result 13 | WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download ) 14 | 15 | if(result) 16 | message(FATAL_ERROR "Build step for googletest failed: ${result}") 17 | endif() 18 | 19 | # Prevent overriding the parent project's compiler/linker 20 | # settings on Windows 21 | set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) 22 | 23 | # Add googletest directly to our build. This defines 24 | # the gtest and gtest_main targets. 25 | add_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/googletest-src 26 | ${CMAKE_CURRENT_BINARY_DIR}/googletest-build 27 | EXCLUDE_FROM_ALL) 28 | 29 | set(TEST_SRC 30 | map_test.cpp 31 | unordered_test.cpp 32 | list_test.cpp 33 | intrusive_test.cpp 34 | pointer_test.cpp 35 | ) 36 | 37 | add_executable(indexed_tests ${TEST_SRC}) 38 | 39 | target_link_libraries(indexed_tests 40 | indexed 41 | gtest_main 42 | ) 43 | 44 | add_test(NAME indexed_tests COMMAND indexed_tests) 45 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt.in: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.1) 2 | 3 | project(googletest-download NONE) 4 | 5 | include(ExternalProject) 6 | 7 | ExternalProject_Add(googletest 8 | URL "https://github.com/google/googletest/archive/release-1.10.0.zip" 9 | SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src" 10 | BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build" 11 | CONFIGURE_COMMAND "" 12 | BUILD_COMMAND "" 13 | INSTALL_COMMAND "" 14 | TEST_COMMAND "" 15 | ) 16 | -------------------------------------------------------------------------------- /tests/intrusive_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace indexed; 25 | using namespace std; 26 | 27 | using Arena = ArrayArenaMT; 28 | 29 | // NOTE boost::intrusive::list must use Universal config when the container is allocated on heap 30 | // boost::unordered_map doesn't require Universal config 31 | namespace { 32 | // Emulate List, which is defined below. We just need to know its size. 33 | // We can't simply get sizeof(List), since List type depends on ArenaConfig. 34 | struct ListEquivalent { 35 | size_t size; 36 | Arena::IndexType ptr_head; 37 | Arena::IndexType ptr_tail; 38 | }; 39 | 40 | // We have to define kObjectSize for SingleArenaConfigUniversal since we use it together with ArrayArenaMT. 41 | // It's needed in order to avoid data races in ArrayArenaMT (see its doc). It's not needed for ArrayArena. 42 | struct ArenaConfig : public SingleArenaConfigUniversalStatic { 43 | // We want to call setContainer() on our own instead of automatic assignment 44 | static constexpr bool kAssignContainerFollowingAllocator = false; 45 | }; 46 | } 47 | 48 | using Key = int; 49 | using Value = int; 50 | 51 | struct LRUKey : boost::intrusive::list_base_hook>> { 52 | Key key; 53 | 54 | LRUKey() = default; 55 | 56 | LRUKey(const Key& other) 57 | : key(other) {} 58 | 59 | struct Hasher : public hash { 60 | size_t operator()(const LRUKey& val) const { return hash::operator()(val.key); } 61 | }; 62 | 63 | bool operator==(const LRUKey& other) const { return key == other.key; } 64 | }; 65 | 66 | using Pair = pair; 67 | using Alloc = Allocator; 68 | 69 | using List = boost::intrusive::list; 70 | using Map = boost::unordered_map, Alloc>; 71 | 72 | class LRUCache { 73 | private: 74 | struct Init { 75 | Init(List* list) { 76 | // Only list object contains nodes, unordered_map object doesn't. 77 | ArenaConfig::setContainer(list); 78 | } 79 | }; 80 | 81 | public: 82 | LRUCache(size_t capacity) 83 | : m_dummy(&m_list) 84 | , m_capacity(capacity) {} 85 | 86 | Map::const_iterator find(const LRUKey& key) const { return m_map.find(key); } 87 | 88 | Map::const_iterator begin() const { return m_map.begin(); } 89 | 90 | Map::const_iterator end() const { return m_map.end(); } 91 | 92 | Pair& getOrInsert(const Pair& p) { 93 | auto it = m_map.find(p.first); 94 | if (it != m_map.end()) { 95 | auto& node = it->first; 96 | if (&node != &m_list.back()) { 97 | m_list.erase(m_list.iterator_to(node)); 98 | m_list.push_back(const_cast(node)); 99 | } 100 | return *it; 101 | } 102 | return (m_list.size() == m_capacity) ? dropThenAdd(p) : add(p); 103 | } 104 | 105 | private: 106 | void dropOld() { 107 | auto& node = m_list.front(); 108 | m_list.pop_front(); 109 | m_map.erase(node); 110 | } 111 | 112 | Pair& add(const Pair& p) { 113 | auto pair = m_map.insert(p); 114 | m_list.push_back(const_cast(pair.first->first)); 115 | return *pair.first; 116 | } 117 | 118 | Pair& dropThenAdd(const Pair& p) { 119 | dropOld(); 120 | return add(p); 121 | } 122 | 123 | Init m_dummy; 124 | Map m_map; 125 | List m_list; 126 | size_t m_capacity; 127 | }; 128 | 129 | class End2EndIntrusiveTest : public ::testing::Test { 130 | protected: 131 | static constexpr size_t capacity = 10; 132 | 133 | struct Init { 134 | Init(Arena* arena) { 135 | ArenaConfig::setArena(arena); 136 | ArenaConfig::setStackTop(getThreadStackTop()); 137 | } 138 | }; 139 | 140 | Arena m_arena; 141 | Init m_dummy; // ArenaConfig must be initialized before map since map uses Alloc 142 | 143 | End2EndIntrusiveTest() 144 | : m_arena(capacity) 145 | , m_dummy(&m_arena) {} 146 | }; 147 | 148 | TEST_F(End2EndIntrusiveTest, fillLRU) { 149 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 150 | unique_ptr cache{new LRUCache(capacity)}; 151 | for (auto& p : elements) { 152 | cache->getOrInsert(p); 153 | } 154 | for (auto& p : elements) { 155 | auto it = cache->find(p.first); 156 | ASSERT_TRUE(it != cache->end()); 157 | EXPECT_EQ(*it, p); 158 | } 159 | } 160 | 161 | TEST_F(End2EndIntrusiveTest, dropOld) { 162 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 163 | LRUCache cache{4}; 164 | for (auto& p : elements) { 165 | cache.getOrInsert(p); 166 | } 167 | for (auto& p : elements) { 168 | auto it = cache.find(p.first); 169 | ASSERT_TRUE(it != cache.end()); 170 | EXPECT_EQ(*it, p); 171 | } 172 | Pair pair(6, -6); 173 | cache.getOrInsert(pair); 174 | EXPECT_TRUE(cache.find(1) == cache.end()); 175 | auto it = cache.find(pair.first); 176 | ASSERT_TRUE(it != cache.end()); 177 | EXPECT_EQ(*it, pair); 178 | } 179 | 180 | TEST_F(End2EndIntrusiveTest, dontDropOld) { 181 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 182 | LRUCache cache{4}; 183 | for (auto& p : elements) { 184 | cache.getOrInsert(p); 185 | } 186 | Pair pair(2, -6); 187 | cache.getOrInsert(pair); 188 | EXPECT_TRUE(cache.find(1) != cache.end()); 189 | auto it = cache.find(pair.first); 190 | ASSERT_TRUE(it != cache.end()); 191 | EXPECT_NE(*it, pair); 192 | EXPECT_EQ(*it, *(elements.begin() + 1)); 193 | } 194 | 195 | TEST_F(End2EndIntrusiveTest, nodeSize) { 196 | LRUCache cache{capacity}; 197 | cache.getOrInsert({1, 2}); 198 | size_t wsize = sizeof(size_t); 199 | size_t elementSize = 3 * sizeof(uint16_t) + 2 * sizeof(int) + wsize; 200 | elementSize = (elementSize + wsize - 1) / wsize * wsize; // round up 201 | // 24b for 64-bit 202 | EXPECT_EQ(elementSize, m_arena.elementSize()); // + 2 bytes per bucket 203 | } 204 | -------------------------------------------------------------------------------- /tests/list_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | using namespace indexed; 24 | using namespace std; 25 | 26 | using Arena = ArrayArena; 27 | using ArenaBuf = ArrayArena; 28 | 29 | // NOTE boost::container::slist/list must use Universal config when the container is allocated on heap 30 | namespace { 31 | struct ArenaConfig : public SingleArenaConfigUniversalStatic {}; 32 | struct ArenaConfigStack : public SingleArenaConfigUniversalStatic {}; 33 | } 34 | 35 | using Value = int; 36 | using Alloc = Allocator; 37 | using List = boost::container::list; 38 | using AllocOnStack = Allocator; 39 | using ListOnStack = boost::container::list; 40 | 41 | class End2EndListTest : public ::testing::Test { 42 | protected: 43 | static constexpr size_t capacity = 10; 44 | 45 | struct Init { 46 | Init(Arena* arena) { 47 | ArenaConfig::setArena(arena); 48 | ArenaConfig::setStackTop(getThreadStackTop()); 49 | } 50 | }; 51 | 52 | Arena m_arena; 53 | Init m_dummy; // ArenaConfig must be initialized before map since map uses Alloc 54 | 55 | End2EndListTest() 56 | : m_arena(capacity) 57 | , m_dummy(&m_arena) {} 58 | }; 59 | 60 | TEST_F(End2EndListTest, insertInitList) { 61 | initializer_list elements = {7, 4, 2, 2, 49, 3, -1}; 62 | unique_ptr list{new List(elements)}; 63 | auto srcIt = elements.begin(); 64 | auto listIt = list->begin(); 65 | for (; srcIt != elements.end(); ++srcIt, ++listIt) { 66 | EXPECT_EQ(*srcIt, *listIt); 67 | } 68 | } 69 | 70 | TEST_F(End2EndListTest, push_back) { 71 | initializer_list elements = {7, 4, 2, 2, 49, 3, -1}; 72 | unique_ptr list{new List}; 73 | for (Value v : elements) { 74 | list->push_back(v); 75 | } 76 | auto srcIt = elements.begin(); 77 | auto listIt = list->begin(); 78 | for (; srcIt != elements.end(); ++srcIt, ++listIt) { 79 | EXPECT_EQ(*srcIt, *listIt); 80 | } 81 | } 82 | 83 | TEST_F(End2EndListTest, insert) { 84 | Value elements[] = {7, 4, 2, 2, 49, 3, -1}; 85 | size_t size = sizeof(elements) / sizeof(Value); 86 | List list; 87 | list.push_back(elements[0]); 88 | list.push_back(elements[size - 1]); 89 | for (size_t i = 1; i < size - 1; ++i) { 90 | list.insert(--list.end(), elements[i]); 91 | } 92 | ASSERT_TRUE(equal(elements, elements + size, list.begin())); 93 | } 94 | 95 | TEST_F(End2EndListTest, erase) { 96 | initializer_list elements = {7, 4, 2, 2, 49, 3, -1}; 97 | List list(elements); 98 | list.erase(find(list.begin(), list.end(), 2)); 99 | EXPECT_EQ(elements.size() - 1, list.size()); 100 | } 101 | 102 | TEST_F(End2EndListTest, passAllocExplicitly) { 103 | List list{Alloc(&m_arena)}; 104 | list.push_back(1); 105 | ASSERT_EQ(1, list.back()); 106 | } 107 | 108 | TEST_F(End2EndListTest, mapArenaResetOnContainerClear) { 109 | List list; 110 | EXPECT_EQ(0, m_arena.usedCapacity()); 111 | list.push_back(1); 112 | EXPECT_EQ(8, m_arena.elementSize()); 113 | EXPECT_EQ(1, m_arena.usedCapacity()); 114 | list.pop_front(); // or do map.clear() 115 | EXPECT_EQ(0, m_arena.usedCapacity()); // safe to do arena.freeMemory() 116 | m_arena.freeMemory(); 117 | } 118 | 119 | TEST_F(End2EndListTest, allocateOnStack) { 120 | int buf[8 * 10 / 4]; 121 | ArenaBuf arena(10, true, BufAlloc(buf, sizeof(buf))); 122 | 123 | ArenaConfigStack::setArena(&arena); 124 | ArenaConfigStack::setStackTop(getThreadStackTop()); 125 | 126 | initializer_list elements = {7, 4, 2, 2, 49, 3, -1}; 127 | ListOnStack list; 128 | 129 | for (Value v : elements) { 130 | list.push_back(v); 131 | } 132 | ASSERT_TRUE(equal(elements.begin(), elements.end(), list.begin())); 133 | } 134 | -------------------------------------------------------------------------------- /tests/map_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | using namespace indexed; 24 | using namespace std; 25 | 26 | using Arena = ArrayArena; 27 | using ArenaBuf = ArrayArena; 28 | 29 | // NOTE boost::container::set/map must use Universal config when the container is allocated on heap 30 | namespace { 31 | struct ArenaConfig : public SingleArenaConfigUniversalStatic {}; 32 | struct ArenaConfigStack : public SingleArenaConfigUniversalStatic {}; 33 | } 34 | 35 | using Key = int; 36 | using Value = int; 37 | using Pair = pair; 38 | using Alloc = Allocator; 39 | using Map = boost::container::map, Alloc>; 40 | using AllocOnStack = Allocator; 41 | using MapOnStack = boost::container::map, AllocOnStack>; 42 | 43 | class End2EndMapTest : public ::testing::Test { 44 | protected: 45 | static constexpr size_t capacity = 10; 46 | 47 | struct Init { 48 | Init(Arena* arena) { 49 | ArenaConfig::setArena(arena); 50 | ArenaConfig::setStackTop(getThreadStackTop()); 51 | } 52 | }; 53 | 54 | Arena m_arena; 55 | Init m_dummy; // ArenaConfig must be initialized before map since map uses Alloc 56 | 57 | End2EndMapTest() 58 | : m_arena(capacity) 59 | , m_dummy(&m_arena) {} 60 | }; 61 | 62 | TEST_F(End2EndMapTest, insertInitList) { 63 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 64 | unique_ptr map{new Map(elements)}; 65 | auto srcIt = elements.begin(); 66 | auto mapIt = map->begin(); 67 | for (; srcIt != elements.end(); ++srcIt, ++mapIt) { 68 | EXPECT_EQ(*srcIt, *mapIt); 69 | } 70 | } 71 | 72 | TEST_F(End2EndMapTest, insert) { 73 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 74 | Map map; 75 | for (const Pair& p : elements) { 76 | map.insert(p); 77 | } 78 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 79 | EXPECT_EQ(srcIt->second, (*map.find(srcIt->first)).second); 80 | } 81 | } 82 | 83 | TEST_F(End2EndMapTest, emplace) { 84 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 85 | Map map; 86 | for (const Pair& p : elements) { 87 | map.emplace(p.first, p.second); 88 | } 89 | ASSERT_TRUE(equal(elements.begin(), elements.end(), map.begin())); 90 | } 91 | 92 | TEST_F(End2EndMapTest, eraseByKey) { 93 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 94 | unique_ptr map{new Map(elements)}; 95 | for (const Pair& p : elements) { 96 | map->erase(p.first); 97 | } 98 | ASSERT_EQ(0, map->size()); 99 | } 100 | 101 | TEST_F(End2EndMapTest, eraseByIter) { 102 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 103 | unique_ptr map{new Map(elements.begin(), elements.end())}; 104 | for (const Pair& p : elements) { 105 | map->erase(map->find(p.first)); 106 | } 107 | ASSERT_EQ(0, map->size()); 108 | } 109 | 110 | TEST_F(End2EndMapTest, passAllocExplicitly) { 111 | Map map{Alloc(&m_arena)}; 112 | map.emplace(1, 2); 113 | auto it = map.find(1); 114 | ASSERT_TRUE(it != map.end()); 115 | ASSERT_EQ(2, (*it).second); 116 | } 117 | 118 | TEST_F(End2EndMapTest, passAllocAndCompareExplicitly) { 119 | Map map{less(), Alloc(&m_arena)}; 120 | map.emplace(1, 2); 121 | auto it = map.find(1); 122 | ASSERT_TRUE(it != map.end()); 123 | ASSERT_EQ(2, (*it).second); 124 | } 125 | 126 | TEST_F(End2EndMapTest, mapArenaResetOnContainerClear) { 127 | Map map; 128 | EXPECT_EQ(0, m_arena.usedCapacity()); 129 | map.emplace(1, 2); 130 | EXPECT_EQ(1, m_arena.usedCapacity()); 131 | map.erase(1); // or do map.clear() 132 | EXPECT_EQ(0, m_arena.usedCapacity()); // safe to do arena.freeMemory() 133 | m_arena.freeMemory(); 134 | } 135 | 136 | TEST_F(End2EndMapTest, allocateOnStack) { 137 | int buf[10 * 24 / 4]; 138 | ArenaBuf arena(10, true, BufAlloc(buf, sizeof(buf))); 139 | 140 | ArenaConfigStack::setArena(&arena); 141 | ArenaConfigStack::setStackTop(getThreadStackTop()); 142 | 143 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 144 | MapOnStack map; 145 | 146 | for (const Pair& p : elements) { 147 | map.insert(p); 148 | } 149 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 150 | EXPECT_EQ(srcIt->second, (*map.find(srcIt->first)).second); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /tests/pointer_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | 17 | using namespace indexed; 18 | using namespace std; 19 | 20 | using Arena = ArrayArena; 21 | 22 | namespace { 23 | struct ArenaConfig : public SingleArenaConfigStatic {}; 24 | } 25 | 26 | class PointerTest : public ::testing::Test { 27 | protected: 28 | static constexpr size_t capacity = 12; 29 | 30 | struct Init { 31 | Init(Arena* arena) { 32 | ArenaConfig::setArena(arena); 33 | ArenaConfig::setStackTop(getThreadStackTop()); 34 | } 35 | }; 36 | 37 | Arena m_arena; 38 | Init m_dummy; 39 | 40 | PointerTest() 41 | : m_arena(capacity) 42 | , m_dummy(&m_arena) {} 43 | }; 44 | 45 | TEST_F(PointerTest, castToConstPointer) { 46 | Allocator alloc; 47 | Pointer ptr = alloc.allocate(1); 48 | Pointer ptrToConst = ptr; 49 | ptrToConst = ptr; 50 | EXPECT_NE(ptrToConst, nullptr); 51 | EXPECT_EQ(ptrToConst, ptr); 52 | alloc.deallocate(ptr, 1); 53 | } 54 | 55 | TEST_F(PointerTest, castToVoidPointer) { 56 | Allocator alloc; 57 | Pointer ptr = alloc.allocate(1); 58 | Pointer ptrToVoid = ptr; 59 | ptrToVoid = ptr; 60 | EXPECT_NE(ptrToVoid, nullptr); 61 | EXPECT_EQ(ptrToVoid, ptr); 62 | alloc.deallocate(ptr, 1); 63 | } 64 | 65 | TEST_F(PointerTest, castFromVoidPointer) { 66 | Allocator alloc; 67 | Pointer ptr = alloc.allocate(1); 68 | Pointer ptrToVoid = ptr; 69 | Pointer ptr2 = static_cast>(ptrToVoid); 70 | EXPECT_EQ(ptr2, ptr); 71 | alloc.deallocate(ptr, 1); 72 | } 73 | 74 | struct BaseClass { 75 | int member; 76 | }; 77 | 78 | struct DerivedClass : BaseClass {}; 79 | 80 | TEST_F(PointerTest, castToBasePointer) { 81 | Allocator alloc; 82 | Pointer ptr = alloc.allocate(1); 83 | Pointer ptrToBase = ptr; 84 | ptrToBase = ptr; 85 | EXPECT_EQ(ptrToBase, ptr); 86 | alloc.deallocate(ptr, 1); 87 | } 88 | 89 | TEST_F(PointerTest, castToDerivedPointer) { 90 | Allocator alloc; 91 | Pointer ptr = alloc.allocate(1); 92 | Pointer ptrToBase = ptr; 93 | Pointer ptr2 = static_cast>(ptrToBase); 94 | EXPECT_EQ(ptr2, ptr); 95 | alloc.deallocate(ptr, 1); 96 | } 97 | 98 | TEST_F(PointerTest, usePointerInArena) { 99 | Allocator alloc; 100 | Pointer ptr = alloc.allocate(1); 101 | *ptr = 1; 102 | EXPECT_EQ(*ptr, 1); 103 | EXPECT_EQ(ptr.operator->(), reinterpret_cast(m_arena.begin())); 104 | auto ptr2 = Pointer::pointer_to(*ptr); 105 | EXPECT_EQ(ptr, ptr2); 106 | alloc.deallocate(ptr, 1); 107 | } 108 | 109 | TEST_F(PointerTest, usePointerOnStack) { 110 | Pointer ptr = nullptr; 111 | int v = 1; 112 | ptr = ptr.pointer_to(v); 113 | EXPECT_EQ(ptr.operator->(), &v); 114 | EXPECT_EQ(*ptr, 1); 115 | *ptr = 2; 116 | EXPECT_EQ(v, 2); 117 | } 118 | -------------------------------------------------------------------------------- /tests/unordered_test.cpp: -------------------------------------------------------------------------------- 1 | 2 | // Copyright Alexander Bulovyatov 2018. 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // (See accompanying file ../LICENSE_1_0.txt or copy at 5 | // https://www.boost.org/LICENSE_1_0.txt) 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | using namespace indexed; 22 | using namespace std; 23 | 24 | using Arena = ArrayArena; 25 | 26 | // NOTE boost::unordered_set/map can use non-Universal config even if the container is allocated on heap 27 | namespace { 28 | struct ArenaConfig : public SingleArenaConfigStatic {}; 29 | } 30 | 31 | using Key = int; 32 | using Value = int; 33 | using Pair = pair; 34 | using Alloc = Allocator; 35 | using Map = boost::unordered_map, std::equal_to, Alloc>; 36 | 37 | class End2EndUnorderedTest : public ::testing::Test { 38 | protected: 39 | static constexpr size_t capacity = 12; 40 | 41 | struct Init { 42 | Init(Arena* arena) { 43 | ArenaConfig::setArena(arena); 44 | ArenaConfig::setStackTop(getThreadStackTop()); 45 | } 46 | }; 47 | 48 | Arena m_arena; 49 | Init m_dummy; // ArenaConfig must be initialized before map since map uses Alloc 50 | 51 | End2EndUnorderedTest() 52 | : m_arena(capacity) 53 | , m_dummy(&m_arena) {} 54 | }; 55 | 56 | TEST_F(End2EndUnorderedTest, insertInitList) { 57 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 58 | unique_ptr map{new Map(elements)}; 59 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 60 | EXPECT_EQ(srcIt->second, map->find(srcIt->first)->second); 61 | } 62 | } 63 | 64 | TEST_F(End2EndUnorderedTest, insert) { 65 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 66 | Map map; 67 | for (const Pair& p : elements) { 68 | map.insert(p); 69 | } 70 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 71 | EXPECT_EQ(srcIt->second, map.find(srcIt->first)->second); 72 | } 73 | } 74 | 75 | TEST_F(End2EndUnorderedTest, emplace) { 76 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 77 | Map map; 78 | for (const Pair& p : elements) { 79 | map.emplace(p.first, p.second); 80 | } 81 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 82 | EXPECT_EQ(srcIt->second, map.find(srcIt->first)->second); 83 | } 84 | } 85 | 86 | TEST_F(End2EndUnorderedTest, eraseByKey) { 87 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 88 | unique_ptr map{new Map(elements)}; 89 | for (const Pair& p : elements) { 90 | map->erase(p.first); 91 | } 92 | ASSERT_EQ(0, map->size()); 93 | } 94 | 95 | TEST_F(End2EndUnorderedTest, eraseByIter) { 96 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 97 | unique_ptr map{new Map(elements.begin(), elements.end())}; 98 | for (const Pair& p : elements) { 99 | map->erase(map->find(p.first)); 100 | } 101 | ASSERT_EQ(0, map->size()); 102 | } 103 | 104 | TEST_F(End2EndUnorderedTest, copyContainer) { 105 | initializer_list elements = {{1, -1}, {2, -2}, {3, -3}, {5, -5}}; 106 | unique_ptr map{new Map(elements.begin(), elements.end())}; 107 | Map another; 108 | another = *map; 109 | for (auto srcIt = elements.begin(); srcIt != elements.end(); ++srcIt) { 110 | EXPECT_EQ(srcIt->second, another.find(srcIt->first)->second); 111 | } 112 | } 113 | 114 | TEST_F(End2EndUnorderedTest, mapArenaNoResetOnContainerClear) { 115 | Map map; 116 | EXPECT_EQ(0, m_arena.usedCapacity()); 117 | map.emplace(1, 2); 118 | EXPECT_EQ(2, m_arena.usedCapacity()); // 1 node is extra allocated for internal use 119 | map.clear(); 120 | EXPECT_EQ(1, m_arena.allocatedCount()); // 1 internal node is still not freed 121 | EXPECT_EQ(2, m_arena.usedCapacity()); 122 | map = Map(); // force map to deallocate all 123 | EXPECT_EQ(0, m_arena.usedCapacity()); // safe to do arena.freeMemory() 124 | m_arena.freeMemory(); 125 | } 126 | 127 | TEST_F(End2EndUnorderedTest, passAllocExplicitly) { 128 | Map map{Alloc(&m_arena)}; 129 | map.emplace(1, 2); 130 | auto it = map.find(1); 131 | ASSERT_TRUE(it != map.end()); 132 | ASSERT_EQ(2, it->second); 133 | } 134 | 135 | TEST_F(End2EndUnorderedTest, passAndParamsExplicitly) { 136 | Map map(0, hash(), equal_to(), Alloc(&m_arena)); 137 | map.emplace(1, 2); 138 | auto it = map.find(1); 139 | ASSERT_TRUE(it != map.end()); 140 | ASSERT_EQ(2, it->second); 141 | } 142 | --------------------------------------------------------------------------------