├── .gitignore ├── LICENSE ├── README.md ├── concurrency ├── executor.h ├── latch.h ├── semaphore.h ├── shared_mutex.h └── spin_lock.h ├── pthread_wrapper ├── pthread_local_ptr.h ├── pthread_shared_mutex.h └── pthread_spinlock.h ├── test ├── test_executor.h ├── test_latch.h ├── test_lru_cache.h ├── test_pthread_local_ptr.h ├── test_queued_semaphore.h ├── test_semaphore.h ├── test_shared_mutex.h └── test_spin_lock.h └── util ├── bits ├── invoke.h ├── make_unique.h ├── rvalue_wrapper.h └── scope_guard.h ├── lru_cache.h └── util.h /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | # IDE stuff 35 | .settings/ 36 | .project 37 | .cproject 38 | 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 Jason Shuo Zang 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # concurrency-cpp11 2 | **A concurrency toolbox for c++11, including a cached thread pool executor, a shared timed mutex, a fair semaphore and several other utilities.** 3 | 4 | This little library focuses on making C++11 multithreaded application development a little more comfortable, with several concurrency utilities that is designed to be easy to use. 5 | 6 | As an all header library, you can just drop the headers into your codebase and start using it 7 | using its simple API. No external dependencies. 8 | 9 | This project is released under BSD 2-Clause License. 10 | 11 | ### What you will find here 12 | 13 | + A thread pool executor with cached threads 14 | + An alternative, non-starving implementation of shared timed mutex for C++11 codebases 15 | + A fair, queued semaphore and a simple semaphore for higher throughput 16 | + A count-down latch 17 | + Several implementations of user-space spin locks 18 | + C++ wrappers for pthread spin lock and pthread shared mutex 19 | + A C++ wrapper for pthread_specific *for pre-c++11 code* to make pthread-specific-based thread 20 | local storage much easier to use (It does not require C++11) 21 | + A concurrent LRU Cache for caching objects in memory 22 | + Other miscellaneous utilites, including a complete implementation of C++14 std::make_unique, 23 | C++17 std::invoke, a rvalue wrapper for invoking function overloads taking rvalue reference 24 | parameters through std::bind, and a lambda-expression-based scope guard 25 | 26 | ### Prerequisites 27 | 28 | C++11, and only C++11. 29 | 30 | ### Examples 31 | 32 | #### Thread pool 33 | 34 | ```c++ 35 | auto exec = conc11::make_cached_thread_pool(); 36 | std::future f = exec->submit(func, param1, param2); 37 | f.get(); 38 | exec->shutdown(); 39 | exec->await_termination(); 40 | ``` 41 | 42 | #### Semaphore 43 | 44 | ```c++ 45 | using SemType = conc11::QueuedSemaphore; 46 | SemType* sem; 47 | int ctr; 48 | 49 | void thread_func() { 50 | SemaphoreGuard sg(*sem, 1); 51 | ctr += 1; 52 | } 53 | 54 | int main() { 55 | sem = new SemType(1); 56 | std::vector threads; 57 | for (int i = 0; i < 10; ++i) { 58 | threads.emplace(thread_func); 59 | } 60 | for (auto& th : threads) { 61 | th.join(); 62 | } 63 | assert(ctr==10); 64 | } 65 | ``` 66 | 67 | #### Shared mutex 68 | 69 | ```c++ 70 | std::vector shared_data; 71 | using SharedMutexType = conc11::SharedTimedMutex; 72 | 73 | void reader_func(SharedMutexType* smtx) { 74 | conc11::SharedLock lck(*smtx); 75 | // do readonly work with shared_data 76 | } 77 | 78 | void writer_func(SharedMutexType* smtx) { 79 | std::unique_lock lck(*smtx); 80 | // write to shared data 81 | } 82 | 83 | int main() { 84 | SharedMutexType smtx; 85 | std::thread reader_thread(reader_func, &smtx); 86 | std::thread writer_thread(writer_func, &smtx); 87 | reader_thread.join(); 88 | writer_thread.join(); 89 | } 90 | ``` 91 | 92 | #### Pthread local ptr 93 | 94 | ```c++ 95 | static conc11::PThreadLocalPtr> tlstr; // tlstr dereference to a thread-local 96 | // instance of std::vector created on first 97 | // dereference operation on each thread 98 | ``` 99 | 100 | #### LRU cache 101 | 102 | ```c++ 103 | conc11::LRUCache> cache(10); 104 | // Use conc11::BlockingLRUCache if multithread accessing is needed 105 | 106 | auto str1 = conc11::make_unique("STR1"); // OK to use std::make_unique if c++14 is 107 | auto str2 = conc11::make_unique("STR2"); // available 108 | cache.set(1, std::move(str1)); 109 | cache.set(2, std::move(str2)); 110 | 111 | std::string o1; 112 | bool ret = cache.get_copy_pointee(1, &o1); 113 | assert(ret); 114 | 115 | std::unique_ptr> o2; 116 | ret = cache.get_move(2, &o2); 117 | assert(ret); 118 | ``` 119 | 120 | -------------------------------------------------------------------------------- /concurrency/executor.h: -------------------------------------------------------------------------------- 1 | /** 2 | * executor.h 3 | */ 4 | #ifndef CONCURRENCY_EXECUTOR_H_ 5 | #define CONCURRENCY_EXECUTOR_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "../util/util.h" 12 | 13 | namespace conc11 { 14 | 15 | class ExecutorBase { 16 | protected: 17 | class TaskBase { 18 | public: 19 | virtual ~TaskBase(){ 20 | } 21 | 22 | virtual void operator()() = 0; 23 | protected: 24 | TaskBase() = default; 25 | 26 | TaskBase(const TaskBase&) = delete; 27 | TaskBase& operator=(const TaskBase&) = delete; 28 | }; 29 | 30 | template 31 | class Task : public TaskBase { 32 | public: 33 | explicit Task(std::packaged_task&& task) noexcept :task(std::move(task)) { 34 | } 35 | 36 | virtual void operator()() override { 37 | task(); 38 | } 39 | private: 40 | std::packaged_task task; 41 | }; 42 | 43 | template 44 | class UntrackableTask : public TaskBase { 45 | public: 46 | explicit UntrackableTask(Runnable&& r) noexcept:r(std::forward(r)) { 47 | } 48 | 49 | virtual void operator()() override { 50 | try { 51 | r(); 52 | } catch (...) { 53 | // Swallows exceptions from RunnableTask as there are no means to retrive neither 54 | // results nor exceptions 55 | } 56 | } 57 | 58 | private: 59 | typename std::remove_reference::type r; 60 | }; 61 | 62 | ExecutorBase() = default; 63 | ExecutorBase(const ExecutorBase&) = delete; 64 | ExecutorBase& operator=(const ExecutorBase&) = delete; 65 | }; 66 | 67 | /** 68 | * Thread pool executor with thread caching. 69 | * Several helper functions are also provided for constructing preconfigured thread pools. 70 | */ 71 | class ThreadPoolExecutor : public ExecutorBase{ 72 | public: 73 | ThreadPoolExecutor(size_t core_pool_size, 74 | size_t max_pool_size, 75 | std::chrono::nanoseconds::rep timeout_nanoseconds) : 76 | core_pool_size(core_pool_size), max_pool_size(max_pool_size), 77 | timeout_nanoseconds(timeout_nanoseconds), 78 | shut(false), active_count(0) { 79 | for (size_t i = 0; i < core_pool_size; ++i) { 80 | add_worker_locked(true); 81 | } 82 | size_t max_threads = core_pool_size < max_pool_size ? max_pool_size : core_pool_size; 83 | workers.reserve(max_threads); 84 | dead_workers.reserve(max_threads); 85 | } 86 | 87 | ~ThreadPoolExecutor() { 88 | if (!is_terminated()) { 89 | shutdown(); 90 | await_termination(); 91 | } 92 | // Destructor shall not be called from multiple threads so safe to use _locked methods. 93 | reap_dead_workers_locked(); 94 | } 95 | 96 | /** 97 | * Submits a callable and its parameters to be executed at some time in the future. 98 | * The callable object and parameters will be stored using std::bind. The retsult and possible 99 | * exceptions may be retrived with the returned std::future object. 100 | */ 101 | template 102 | auto submit(Callable&& c, Args&&... args) 103 | -> std::future(c), std::forward(args)...))> { 104 | using RetType = decltype( 105 | invoke(std::forward(c), std::forward(args)...)); 106 | 107 | if (shut.load()) { 108 | throw(std::system_error(std::make_error_code(std::errc::permission_denied))); 109 | } 110 | std::packaged_task task( 111 | std::bind(std::forward(c), std::forward(args)...)); 112 | std::future f = task.get_future(); 113 | auto uptr_task = conc11::make_unique>(std::move(task)); 114 | { 115 | std::lock_guard lock(main_lock); 116 | insert_task_locked(std::move(uptr_task)); 117 | } 118 | return f; 119 | } 120 | 121 | /** 122 | * Submits a callable and its parameters to be executed at some time in the future. Unlike 123 | * submit(), result cannot be retrived as no std::future will be returned. If an exception 124 | * occured during execution, the execution will be interrupted and the exception will be 125 | * lost. 126 | * For executing tasks that requires no tracking var std::future this alternative method of 127 | * submitting tasks yields considerably higher performance on some platforms including Linux. 128 | */ 129 | template 130 | void execute(Callable&& c, Args&&... args) { 131 | if (shut.load()) { 132 | throw(std::system_error(std::make_error_code(std::errc::permission_denied))); 133 | } 134 | auto bind_obj = std::bind(std::forward(c), std::forward(args)...); 135 | auto uptr_task = conc11::make_unique>( 136 | std::move(bind_obj)); 137 | { 138 | std::lock_guard lock(main_lock); 139 | insert_task_locked(std::move(uptr_task)); 140 | } 141 | } 142 | 143 | void shutdown() noexcept { 144 | std::lock_guard lock(main_lock); 145 | shut.store(true); 146 | if (is_terminated_locked()) { 147 | wait_cv.notify_all(); 148 | } else { 149 | cv.notify_all(); 150 | } 151 | } 152 | 153 | void await_termination() { 154 | if (is_terminated()) { 155 | return; 156 | } 157 | std::unique_lock lock(main_lock); 158 | if (is_terminated_locked()) { // double check in case last thread my exit between the 159 | // first check and now resulting in deadlock 160 | return; 161 | } 162 | wait_cv.wait(lock, [this](){return is_terminated_locked();}); 163 | } 164 | 165 | template 166 | bool await_termination_for(const std::chrono::duration& timeout_duration) { 167 | auto timeout_time = std::chrono::steady_clock::now() + timeout_duration; 168 | return await_termination_until(timeout_time); 169 | } 170 | 171 | template 172 | bool await_termination_until(const std::chrono::time_point& timeout_time) { 173 | if (is_terminated()) { 174 | return true; 175 | } 176 | std::unique_lock lock(main_lock); 177 | if (is_terminated_locked()) { 178 | return true; 179 | } 180 | return wait_cv.wait_until(lock, timeout_time, [this](){return is_terminated();}); 181 | } 182 | 183 | /** 184 | * Return number of living threads in the thread pool. 185 | */ 186 | size_t get_pool_size() noexcept { 187 | std::lock_guard lock(main_lock); 188 | return workers.size(); 189 | } 190 | 191 | /** 192 | * Return approximate active tasks being executed. 193 | */ 194 | size_t get_active_count() noexcept { 195 | return active_count.load(); 196 | } 197 | 198 | bool is_shutdown() { 199 | return shut.load(); 200 | } 201 | 202 | bool is_terminated() { 203 | std::lock_guard lock(main_lock); 204 | return is_terminated_locked(); 205 | } 206 | 207 | private: 208 | 209 | /** 210 | * Thread worker that manages a worker thread and will remove self from the workers array 211 | * when the thread exits. 212 | * Do not store pointer or reference to worker instances as they might be invalidated. Always 213 | * access from the queue. 214 | */ 215 | class Worker { 216 | public: 217 | Worker(ThreadPoolExecutor& executor, bool core, size_t worker_index, int id) noexcept: 218 | exec(executor), core(core), worker_index(worker_index), 219 | id(id), worker_thread(std::ref(*this)) { 220 | } 221 | Worker(const Worker&) = delete; 222 | Worker& operator=(const Worker&) = delete; 223 | 224 | /* 225 | * Worker func. Should be called only by the corresponding thread of this worker. 226 | */ 227 | void operator()() noexcept { 228 | run(); 229 | } 230 | 231 | void join() { 232 | worker_thread.join(); 233 | } 234 | 235 | int get_id() { 236 | return id; 237 | } 238 | 239 | private: 240 | 241 | /** 242 | * Fetch ont task from task queue. 243 | * Returns empty pointer when calling worker is non-core and timed out, or the executor 244 | * is shut down when waiting 245 | */ 246 | std::unique_ptr fetch_task_locked(std::unique_lock& lock) { 247 | // Fetch if queue not empty 248 | if (!exec.task_queue.empty()) { 249 | std::unique_ptr t(std::move(exec.task_queue.front())); 250 | exec.task_queue.pop(); 251 | return t; 252 | } 253 | // Wait until queue not empty, executor shut down or timed out 254 | if (core) { 255 | exec.cv.wait(lock, 256 | [this]() {return !exec.task_queue.empty() || exec.shut.load();}); 257 | } else { 258 | auto timeout_time = std::chrono::steady_clock::now() + exec.timeout_nanoseconds; 259 | exec.cv.wait_until(lock, timeout_time, 260 | [this]() {return !exec.task_queue.empty() || exec.shut.load();}); 261 | } 262 | // Fetch if queue not empty, otherwise return nullptr 263 | if (!exec.task_queue.empty()) { 264 | std::unique_ptr t(std::move(exec.task_queue.front())); 265 | exec.task_queue.pop(); 266 | return t; 267 | } else { 268 | return std::unique_ptr(); 269 | } 270 | } 271 | 272 | /** 273 | * Remove self by swapping state with last element in the worker array and decrease size. 274 | * Assumes the thread associated with this Worker instance owns the main lock. 275 | */ 276 | void remove_self_locked() { 277 | std::swap(exec.workers[worker_index], exec.workers.back()); 278 | exec.workers[worker_index]->worker_index = worker_index; 279 | exec.dead_workers.emplace_back(std::move(exec.workers.back())); 280 | exec.workers.pop_back(); 281 | } 282 | 283 | void run() { 284 | // There is a small period where worker_thread seen in the new thread is invalid. 285 | while (true) { // Worker main loop 286 | std::unique_lock lock(exec.main_lock); 287 | if (exec.shut.load() && exec.task_queue.empty()) { 288 | break; 289 | } 290 | std::unique_ptr task = fetch_task_locked(lock); 291 | if (!task) { 292 | break; 293 | } 294 | ++exec.active_count; 295 | lock.unlock(); 296 | (*task)(); 297 | // task.reset(); 298 | lock.lock(); 299 | exec.reap_dead_workers_locked(); 300 | --exec.active_count; 301 | } 302 | { 303 | // handle thread exit 304 | std::lock_guard lock(exec.main_lock); 305 | remove_self_locked(); 306 | if (exec.is_terminated_locked()) { 307 | // Last worker exit after shutdown, finalize executor and notify all waiters 308 | exec.wait_cv.notify_all(); 309 | } 310 | } 311 | } 312 | 313 | // The executor this worker belongs to 314 | ThreadPoolExecutor& exec; 315 | // Is core thread or not. Core threads do not exit after a timeout period. 316 | bool core = false; 317 | // Index of this worker instance in the worker list. 318 | size_t worker_index = 0; 319 | // Instance of std::thread corresponding to this 320 | int id; 321 | std::thread worker_thread; 322 | }; 323 | 324 | void insert_task_locked(std::unique_ptr&& task) { 325 | task_queue.emplace(std::move(task)); 326 | size_t idle_workers = workers.size() - active_count.load(); 327 | if (idle_workers == 0 && workers.size() < max_pool_size) { 328 | add_worker_locked(false); 329 | } 330 | cv.notify_one(); 331 | } 332 | 333 | void add_worker_locked(bool core) { 334 | static int id = 1; 335 | workers.emplace_back(new Worker(*this, core, workers.size(), id++)); 336 | } 337 | 338 | bool is_terminated_locked() { 339 | return shut.load() && task_queue.empty() && workers.empty(); 340 | } 341 | 342 | void reap_dead_workers_locked() { 343 | if (dead_workers.empty()) { 344 | return; 345 | } 346 | for (const auto &worker : dead_workers) { 347 | worker->join(); 348 | } 349 | dead_workers.clear(); 350 | } 351 | 352 | const size_t core_pool_size; 353 | const size_t max_pool_size; 354 | const std::chrono::nanoseconds timeout_nanoseconds; 355 | 356 | std::queue> task_queue; 357 | 358 | std::vector> workers; 359 | std::vector> dead_workers; 360 | 361 | std::mutex main_lock; 362 | std::condition_variable cv; 363 | std::condition_variable wait_cv; 364 | std::atomic shut; 365 | std::atomic active_count; 366 | }; 367 | 368 | // Helper functions for constructing thread pools. 369 | 370 | std::unique_ptr make_single_thread_executor() { 371 | return conc11::make_unique(1, 1, 0L); 372 | } 373 | 374 | std::unique_ptr make_fixed_thread_pool(size_t num_threads) { 375 | return conc11::make_unique(num_threads, num_threads, 0L); 376 | } 377 | 378 | static const size_t MAX_CACHED_THREADS = 1024; 379 | 380 | std::unique_ptr make_cached_thread_pool() { 381 | return conc11::make_unique(1, MAX_CACHED_THREADS, 10 * 1000000000LL); 382 | } 383 | 384 | template 385 | std::unique_ptr make_cached_thread_pool( 386 | const std::chrono::duration cached_thread_timeout_duration){ 387 | return conc11::make_unique(1, 388 | MAX_CACHED_THREADS, 389 | std::chrono::duration_cast(cached_thread_timeout_duration)); 390 | } 391 | 392 | } // namespace conc11 393 | 394 | #endif /* CONCURRENCY_EXECUTOR_H_ */ 395 | -------------------------------------------------------------------------------- /concurrency/latch.h: -------------------------------------------------------------------------------- 1 | /** 2 | * latch.h 3 | */ 4 | #ifndef CONCURRENCY_LATCH_H_ 5 | #define CONCURRENCY_LATCH_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace conc11 { 12 | 13 | /** 14 | * Single-use count down latch. 15 | */ 16 | class Latch { 17 | public: 18 | Latch(std::ptrdiff_t value):value(value) { 19 | } 20 | 21 | Latch(const Latch&) = delete; 22 | Latch& operator=(const Latch&) = delete; 23 | 24 | /** 25 | * Decrement the counter by 1 and wait for the counter to reach 0 if necessary. 26 | */ 27 | void count_down_and_wait() { 28 | auto v = value.load(std::memory_order_acquire); 29 | if (v <= 0) { 30 | return; 31 | } 32 | count_down(1); 33 | wait(); 34 | } 35 | 36 | /** 37 | * Decrement the counter by n. Calling count_down will not block the caller thread, unless 38 | * when the call is the one that caused the counter to reach 0, on which the caller thread 39 | * will need to acquire a mutex. 40 | */ 41 | void count_down(std::ptrdiff_t n) { 42 | auto v = value.fetch_sub(n, std::memory_order_acq_rel); 43 | if (0 < v && v <= n) { 44 | std::unique_lock lock(mtx); 45 | lock.unlock(); // Sync with the thread that already acquired lock but not yet waiting 46 | cv.notify_all(); 47 | } 48 | } 49 | 50 | /** 51 | * Returns true if the counter has reached 0. 52 | * If the counter is minus it is treated as 0. 53 | */ 54 | bool is_ready() const { 55 | auto v = value.load(std::memory_order_acquire); 56 | return v <= 0; 57 | } 58 | 59 | /** 60 | * Blocks the caller thread until the counter reaches 0, returns immediately if already 61 | * reached 0. 62 | */ 63 | void wait() const { 64 | auto v = value.load(std::memory_order_acquire); 65 | if (v <= 0) { 66 | return; 67 | } 68 | std::unique_lock lock(mtx); 69 | if (value.load(std::memory_order_acquire) <= 0) { 70 | return; 71 | } 72 | cv.wait(lock, [this](){return value.load(std::memory_order_acquire) <= 0;}); 73 | } 74 | 75 | private: 76 | std::atomic value; 77 | mutable std::mutex mtx; 78 | mutable std::condition_variable cv; 79 | }; 80 | 81 | } // namespace conc11 82 | 83 | #endif /* CONCURRENCY_LATCH_H_ */ 84 | -------------------------------------------------------------------------------- /concurrency/semaphore.h: -------------------------------------------------------------------------------- 1 | /* 2 | * semaphore.h 3 | */ 4 | 5 | #ifndef CONCURRENCY_SEMAPHORE_H_ 6 | #define CONCURRENCY_SEMAPHORE_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | namespace conc11 { 17 | 18 | /** 19 | * A fair semaphore with an internal waiting queue. 20 | */ 21 | template 22 | class QueuedSemaphore { 23 | public: 24 | QueuedSemaphore() : 25 | QueuedSemaphore(0) { 26 | } 27 | 28 | explicit QueuedSemaphore(int initial_permits) : 29 | permits(initial_permits) { 30 | } 31 | 32 | QueuedSemaphore(const QueuedSemaphore&) = delete; 33 | QueuedSemaphore& operator=(const QueuedSemaphore&) = delete; 34 | 35 | void acquire() { 36 | acquire(1); 37 | } 38 | 39 | void acquire(unsigned int request) { 40 | try_acquire0(false, request, 41 | (std::chrono::time_point*) nullptr); 43 | } 44 | 45 | void release() { 46 | release(1); 47 | } 48 | 49 | void release(unsigned int request) { 50 | std::lock_guard lock(main_lock); 51 | permits += request; 52 | if (permits >= request_record_min()) { 53 | queue.wake_head(); 54 | } 55 | } 56 | 57 | /** 58 | * Untimed try_acquire. Note that untimed version of try_acquire is not fair. 59 | */ 60 | bool try_acquire() { 61 | return try_acquire(1); 62 | } 63 | 64 | bool try_acquire(unsigned int request) { 65 | std::lock_guard lock(main_lock); 66 | if (permits >= request) { 67 | permits -= request; 68 | return true; 69 | } else { 70 | return false; 71 | } 72 | } 73 | 74 | bool try_acquire_for(unsigned long millis, unsigned int micros) { 75 | return try_acquire_for(1, millis, micros); 76 | } 77 | 78 | bool try_acquire_for(unsigned int request, unsigned long millis, unsigned int micros) { 79 | std::chrono::steady_clock::time_point timeout_time = std::chrono::steady_clock::now() + 80 | std::chrono::milliseconds(millis) + std::chrono::microseconds(micros); 81 | return try_acquire0(true, request, &timeout_time); 82 | } 83 | 84 | template 85 | bool try_acquire_for(const std::chrono::duration& timeout_duration) { 86 | return try_acquire_for(1, timeout_duration); 87 | } 88 | 89 | template 90 | bool try_acquire_for(unsigned int request, 91 | const std::chrono::duration& timeout_duration) { 92 | std::chrono::steady_clock::time_point timeout_time = std::chrono::steady_clock::now() + 93 | timeout_duration; 94 | return try_acquire0(true, request, &timeout_time); 95 | } 96 | 97 | template 98 | bool try_acquire_until(const std::chrono::time_point &timeout_time) { 99 | return try_acquire_until(1, &timeout_time); 100 | } 101 | 102 | template 103 | bool try_acquire_until(unsigned int request, 104 | const std::chrono::time_point &timeout_time) { 105 | return try_acquire0(true, &timeout_time); 106 | } 107 | 108 | int available_permits() const noexcept { 109 | return permits.load(); 110 | } 111 | 112 | private: 113 | struct WaitNode { 114 | typename std::conditional::value, 115 | std::condition_variable, 116 | std::condition_variable_any>::type cv; 117 | bool wakeable = false; 118 | WaitNode *prev = nullptr; 119 | WaitNode *next = nullptr; 120 | }; 121 | 122 | /** 123 | * Implementation of the thread waiting queue, a cached linked list of waiting nodes. This 124 | * class is not thread safe and should be protected by the outer semaphore. 125 | */ 126 | class WaitQueue { 127 | public: 128 | 129 | WaitQueue() { 130 | alloc_cache(); 131 | } 132 | 133 | ~WaitQueue() { 134 | while (head) { 135 | WaitNode *next = head->next; 136 | delete head; 137 | head = next; 138 | } 139 | while (cache_head) { 140 | WaitNode *next = cache_head->next; 141 | delete cache_head; 142 | cache_head = next; 143 | } 144 | } 145 | 146 | WaitQueue(const WaitQueue &) = delete; 147 | WaitQueue &operator=(const WaitQueue &) = delete; 148 | 149 | /** 150 | * Enqueue a waiting node at the tail and return a pointer to it. 151 | * May cause memory allocation when cached wait nodes are depleted. 152 | */ 153 | WaitNode *enqueue() { 154 | if (!cache_head) { 155 | alloc_cache(); 156 | cur_queue_capacity <<= 1; 157 | } 158 | WaitNode *cur = cache_head; 159 | cache_head = cache_head->next; 160 | 161 | cur->wakeable = false; 162 | // cur->prev = nullptr; // already guaranteed 163 | cur->next = nullptr; 164 | if (!tail) { 165 | head = cur; 166 | tail = cur; 167 | } else { 168 | tail->next = cur; 169 | cur->prev = tail; 170 | tail = cur; 171 | } 172 | return cur; 173 | } 174 | 175 | /** 176 | * Dequeue WaitNode at head of the queue, returning it to the cache. 177 | */ 178 | void dequeue() { 179 | remove(head); 180 | } 181 | 182 | /** 183 | * Remove a WaitNode from the queue and return it to the cache. Undefined behaviour if node is 184 | * not already in the queue. Do nothing if node is nullptr. 185 | */ 186 | void remove(WaitNode *node) { 187 | if (!node) { 188 | return; 189 | } 190 | if (node == head && head == tail) { 191 | head = nullptr; 192 | tail = nullptr; 193 | } // handle special condition in which the last node is removed from the queue 194 | if (node == head) { 195 | head = node->next; 196 | } 197 | if (node == tail) { 198 | tail = node->prev; 199 | } 200 | if (node->prev) { 201 | node->prev->next = node->next; 202 | } 203 | if (node->next) { 204 | node->next->prev = node->prev; 205 | } 206 | 207 | node->prev = nullptr; 208 | node->next = cache_head; 209 | cache_head = node; 210 | } 211 | 212 | /** 213 | * Wake the thread waiting at head of the queue. 214 | */ 215 | void wake_head() { 216 | if (!head) { 217 | return; 218 | } 219 | head->wakeable = true; 220 | head->cv.notify_all(); 221 | } 222 | 223 | bool is_empty() { 224 | return (!head); 225 | } 226 | 227 | int num_waiting_nodes() { 228 | if (!head) { 229 | return 0; 230 | } else { 231 | int ctr = 0; 232 | WaitNode *p = head; 233 | while (p) { 234 | ctr += 1; 235 | p = p->next; 236 | } 237 | return ctr; 238 | } 239 | } 240 | 241 | private: 242 | 243 | void alloc_cache() { 244 | for (size_t i = 0; i < cur_queue_capacity; ++i) { 245 | WaitNode *cur = new WaitNode(); 246 | cur->next = cache_head; 247 | cache_head = cur; 248 | } 249 | } 250 | 251 | // Doubly linked list as the waiting queue 252 | WaitNode *head = nullptr; 253 | WaitNode *tail = nullptr; 254 | 255 | // **Forward** linked list of cached nodes 256 | WaitNode *cache_head = nullptr; 257 | 258 | size_t cur_queue_capacity = 256; 259 | }; 260 | 261 | template 262 | bool try_acquire0(bool timed, unsigned int request, 263 | const std::chrono::time_point *timeout_time) { 264 | std::unique_lock lock(main_lock); 265 | // printf("permits before acquire: %d\n", permits); 266 | if (permits >= (int) request && queue.is_empty()) { 267 | permits -= request; 268 | return true; 269 | } 270 | 271 | request_record_insert(request); 272 | if (!timed) { 273 | while (true) { 274 | WaitNode *wait_node = queue.enqueue(); 275 | wait_node->cv.wait(lock, [wait_node]() {return wait_node->wakeable;}); 276 | queue.dequeue(); 277 | if (permits >= (int) request) { 278 | break; 279 | } 280 | } 281 | } else { 282 | while (true) { 283 | WaitNode *wait_node = queue.enqueue(); 284 | bool timeout = !(wait_node->cv.wait_until(lock, *timeout_time, 285 | [wait_node]() {return wait_node->wakeable;})); 286 | if (timeout) { 287 | queue.remove(wait_node); 288 | return false; 289 | } 290 | queue.dequeue(); 291 | if (permits >= (int) request) { 292 | break; 293 | } 294 | } 295 | } 296 | request_record_remove(request); 297 | 298 | // When control reaches here current thread is at the head of the queue and 299 | // permits are enough 300 | permits -= request; 301 | if (permits >= request_record_min()) { 302 | queue.wake_head(); // propogate waking signal if there are permits left now 303 | } 304 | if (permits < 0) { 305 | printf("BOOM!"); 306 | std::terminate(); // BOOM when something went very wrong. Will be removed later. 307 | } 308 | 309 | return true; 310 | } 311 | 312 | void request_record_insert(unsigned int request) { 313 | auto iter = request_record.find(request); 314 | if (iter != request_record.end()) { 315 | iter->second += 1; 316 | } else { 317 | request_record.emplace(request, 1); 318 | } 319 | } 320 | 321 | void request_record_remove(unsigned int request) { 322 | auto iter = request_record.find(request); 323 | if (iter != request_record.end()) { 324 | if (iter->second == 1) { 325 | request_record.erase(iter); 326 | } else { 327 | iter->second -= 1; 328 | } 329 | } 330 | } 331 | 332 | int request_record_min() { 333 | if (request_record.begin() != request_record.end()) { 334 | return request_record.begin()->first; 335 | } else { 336 | return 0; 337 | } 338 | } 339 | 340 | std::atomic_int permits = 0; 341 | LockType main_lock; 342 | WaitQueue queue; 343 | std::map request_record; 344 | }; 345 | 346 | /** 347 | * A simple unfair semaphore. Unfair semaphores may starve threads as the thread awaken from 348 | * block waiting is randomly chosen. 349 | */ 350 | template 351 | class SimpleSemaphore { 352 | public: 353 | 354 | explicit SimpleSemaphore(int initial_permits) : 355 | count(initial_permits) { 356 | } 357 | 358 | SimpleSemaphore(const SimpleSemaphore&) = delete; 359 | SimpleSemaphore& operator=(const SimpleSemaphore&) = delete; 360 | 361 | void acquire() { 362 | acquire(1); 363 | } 364 | 365 | void acquire(unsigned int request) { 366 | std::unique_lock ul(mtx); 367 | while (count < (int) request) { 368 | cv.wait(ul); 369 | } 370 | count -= request; 371 | } 372 | 373 | void release() { 374 | release(1); 375 | } 376 | 377 | void release(unsigned int request) { 378 | { 379 | std::lock_guard lock(mtx); 380 | count += request; 381 | } 382 | cv.notify_all(); 383 | } 384 | 385 | bool try_acquire() { 386 | return try_acquire(1); 387 | } 388 | 389 | bool try_acquire(unsigned int request) { 390 | std::lock_guard lock(mtx); 391 | if (count >= (int) request) { 392 | count -= request; 393 | return true; 394 | } else { 395 | return false; 396 | } 397 | } 398 | 399 | bool try_acquire_for(unsigned long millis, unsigned int micros) { 400 | return try_acquire_for(1, millis, micros); 401 | } 402 | 403 | bool try_acquire_for(unsigned int request, unsigned long millis, unsigned int micros) { 404 | std::chrono::steady_clock::time_point timeout_time = std::chrono::steady_clock::now() + 405 | std::chrono::milliseconds(millis) + std::chrono::microseconds(micros); 406 | return try_acquire0(request, timeout_time); 407 | } 408 | 409 | template 410 | bool try_acquire_for(const std::chrono::duration& timeout_duration) { 411 | return try_acquire_for(1, timeout_duration); 412 | } 413 | 414 | template 415 | bool try_acquire_for(unsigned int request, 416 | const std::chrono::duration& timeout_duration) { 417 | std::chrono::steady_clock::time_point timeout_time = std::chrono::steady_clock::now() + 418 | timeout_duration; 419 | return try_acquire0(request, timeout_time); 420 | } 421 | 422 | template 423 | bool try_acquire_until(const std::chrono::time_point &timeout_time) { 424 | return try_acquire_until(1, timeout_time); 425 | } 426 | 427 | template 428 | bool try_acquire_until(unsigned int request, 429 | const std::chrono::time_point &timeout_time) { 430 | return try_acquire0(true, timeout_time); 431 | } 432 | 433 | int available_permits() const noexcept { 434 | return count.load(); 435 | } 436 | 437 | private: 438 | template 439 | bool try_acquire0(unsigned int permits, 440 | const std::chrono::time_point &timeout_time) { 441 | std::unique_lock ul(mtx); 442 | while (count < (int) permits) { 443 | if (cv.wait_until(ul, timeout_time) == std::cv_status::timeout) { 444 | return false; 445 | } 446 | } 447 | count -= permits; 448 | return true; 449 | } 450 | 451 | LockType mtx; 452 | typename std::conditional::value, 453 | std::condition_variable, 454 | std::condition_variable_any>::type cv; 455 | std::atomic_int count; 456 | }; 457 | 458 | /** 459 | * A semaphore wrapper class that provides convenient RAII semaphore owning mechanism during a 460 | * scoped block. Note that this can also be achieved by using SemaphoreLock with a std::lock_guard 461 | * and the effect is equivalent. 462 | */ 463 | template 464 | class SemaphoreGuard { 465 | public: 466 | using SemaphoreType = SemType; 467 | 468 | explicit SemaphoreGuard(SemType& semaphore, unsigned int request) : 469 | sem(semaphore), request(request) { 470 | sem.acquire(request); 471 | } 472 | 473 | SemaphoreGuard(const SemaphoreGuard&) = delete; 474 | SemaphoreGuard& operator=(const SemaphoreGuard&) = delete; 475 | 476 | ~SemaphoreGuard() { 477 | sem.release(request); 478 | } 479 | 480 | private: 481 | SemType& sem; 482 | const unsigned int request; 483 | }; 484 | 485 | /** 486 | * A semaphore adapter class that converts a semaphore (along with a fixed number of permit request) 487 | * to a TimedLockable class which can then be used with standard library components like 488 | * std::lock_guard and std::unique_lock, by forwarding lock, unlock, try_lock, try_lock_for and 489 | * try_lock_until calls to semaphore acquire, release, try_acquire, try_acquire_for and 490 | * try_acquire_until respectively. 491 | * This adapter class does not perform any RAII-style resource managing. Rather it allows RAII 492 | * managing of semaphores with existing standard library components. 493 | */ 494 | template 495 | class SemaphoreTimedLockableAdapter { 496 | public: 497 | using SemaphoreType = SemType; 498 | 499 | explicit SemaphoreTimedLockableAdapter(SemType& semaphore, unsigned int request) : 500 | sem(semaphore), request(request) { 501 | } 502 | 503 | SemaphoreTimedLockableAdapter(const SemaphoreTimedLockableAdapter&) = delete; 504 | SemaphoreTimedLockableAdapter& operator=(const SemaphoreTimedLockableAdapter&) = delete; 505 | 506 | ~SemaphoreTimedLockableAdapter() { 507 | } 508 | 509 | void lock() { 510 | sem.acquire(request); 511 | } 512 | 513 | void unlock() { 514 | sem.release(request); 515 | } 516 | 517 | bool try_lock() { 518 | return sem.try_acquire(request); 519 | } 520 | 521 | template 522 | bool try_lock_for(const std::chrono::duration& timeout_duration) { 523 | return sem.try_acquire_for(request, timeout_duration); 524 | } 525 | 526 | template 527 | bool try_lock_until(const std::chrono::time_point& timeout_time) { 528 | return sem.try_acquire_until(request, timeout_time); 529 | } 530 | 531 | private: 532 | SemType& sem; 533 | const unsigned int request; 534 | }; 535 | 536 | } // namespace conc11 537 | 538 | #endif /* CONCURRENCY_SEMAPHORE_H_ */ 539 | -------------------------------------------------------------------------------- /concurrency/shared_mutex.h: -------------------------------------------------------------------------------- 1 | /** 2 | * shared_mutex.h 3 | * Alternative implementations of shared mutex for C++11. Use C++14 std::shared_timed_mutex and 4 | * C++17 std::shared_mutex if available. 5 | */ 6 | #ifndef CONCURRENCY_SHARED_MUTEX_H_ 7 | #define CONCURRENCY_SHARED_MUTEX_H_ 8 | 9 | namespace conc11 { 10 | 11 | /** 12 | * An implementation of shared timed mutex that satisfies C++14 SharedTimedMutex concept and does 13 | * not starve readers or writers. 14 | */ 15 | class SharedTimedMutex { 16 | public: 17 | SharedTimedMutex() = default; 18 | 19 | SharedTimedMutex(const SharedTimedMutex&) = delete; 20 | SharedTimedMutex& operator=(const SharedTimedMutex&) = delete; 21 | 22 | // Execlusive lock 23 | void lock() { 24 | std::unique_lock lock(mtx); 25 | while (state & WRITER_ENTERED_MASK) { 26 | rgate.wait(lock); 27 | } 28 | state |= WRITER_ENTERED_MASK; 29 | while (state & NUM_READER_MASK) { 30 | wgate.wait(lock); 31 | } 32 | } 33 | 34 | void unlock() { 35 | { 36 | std::lock_guard lock(mtx); 37 | state &= ~WRITER_ENTERED_MASK; // should always set to 0 38 | } 39 | rgate.notify_all(); 40 | } 41 | 42 | bool try_lock() { 43 | std::unique_lock lock(mtx, std::try_to_lock); 44 | if (lock.owns_lock() && state == 0) { 45 | state = WRITER_ENTERED_MASK; 46 | return true; 47 | } else { 48 | return false; 49 | } 50 | } 51 | 52 | template 53 | bool try_lock_for(const std::chrono::duration& timeout_duration) { 54 | return try_lock_until(std::chrono::steady_clock::now() + timeout_duration); 55 | } 56 | 57 | template 58 | bool try_lock_until(const std::chrono::time_point& timeout_time) { 59 | // Untimed mutex blocking, but mutex normally should not take long to acquire 60 | std::unique_lock lock(mtx); 61 | if (!rgate.wait_until(lock, timeout_time, 62 | [this](){return !(state & WRITER_ENTERED_MASK);})) { 63 | return false; 64 | } 65 | state |= WRITER_ENTERED_MASK; 66 | if (!wgate.wait_until(lock, timeout_time, 67 | [this](){return !(state & NUM_READER_MASK);})) { 68 | state &= ~WRITER_ENTERED_MASK; 69 | lock.unlock(); 70 | rgate.notify_all(); 71 | return false; 72 | } 73 | return true; 74 | } 75 | 76 | void lock_shared() { 77 | std::unique_lock lock(mtx); 78 | while ((state & WRITER_ENTERED_MASK) || ((state & NUM_READER_MASK) == NUM_READER_MASK)) { 79 | rgate.wait(lock); 80 | } 81 | state += 1; 82 | } 83 | 84 | void unlock_shared() { 85 | std::unique_lock lock(mtx); 86 | state -= 1; 87 | uint_fast32_t num_readers_left = state & NUM_READER_MASK; 88 | if ((state & WRITER_ENTERED_MASK) && (num_readers_left == 0)) { 89 | lock.unlock(); 90 | wgate.notify_one(); 91 | } else if (num_readers_left == NUM_READER_MASK - 1) { 92 | lock.unlock(); 93 | rgate.notify_one(); 94 | } 95 | } 96 | 97 | bool try_lock_shared() { 98 | std::unique_lock lock(mtx, std::try_to_lock); 99 | if (lock.owns_lock() && !(state & WRITER_ENTERED_MASK) 100 | && ((state & NUM_READER_MASK) != NUM_READER_MASK)) { 101 | state += 1; 102 | return true; 103 | } else { 104 | return false; 105 | } 106 | } 107 | 108 | template 109 | bool try_lock_shared_for(const std::chrono::duration& timeout_duration) { 110 | return try_lock_shared_until(std::chrono::steady_clock::now() + timeout_duration); 111 | } 112 | 113 | template 114 | bool try_lock_shared_until(const std::chrono::time_point& timeout_time) { 115 | std::unique_lock lock(mtx); 116 | if (!rgate.wait_until(lock, timeout_time, 117 | [this](){return !(state & WRITER_ENTERED_MASK) && 118 | ((state & NUM_READER_MASK) != NUM_READER_MASK);})) { 119 | return false; 120 | } 121 | state += 1; 122 | return true; 123 | } 124 | 125 | private: 126 | static const uint_fast32_t WRITER_ENTERED_MASK = 1U << 31; 127 | static const uint_fast32_t NUM_READER_MASK = WRITER_ENTERED_MASK - 1; 128 | 129 | // Main lock 130 | std::mutex mtx; 131 | 132 | // Readers pass this gate has shared ownership. New readers and writers wait at this gate 133 | // if one writer has passed rgate. 134 | std::condition_variable rgate; 135 | 136 | // Writer pass this gate has exclusive ownership. Only the one writer passed rgate waits 137 | // at this gate for all remaining readers to leave. 138 | std::condition_variable wgate; 139 | 140 | // Combined state: The highest bit indicated weather a writer has entered (i.e. passed rgate), 141 | // lower 31 bits is number of active readers. 142 | uint_fast32_t state = 0; 143 | }; 144 | 145 | /** 146 | * An alternative implementation of C++14 SharedTimedMutex concept. 147 | * This implementation has higher read throughput when there are more readers but may 148 | * starves writers. 149 | * Use SharedTimedMutex instead if there is a chance that starvation is of concern. 150 | */ 151 | class ReaderPreferringSharedTimedMutex { 152 | public: 153 | ReaderPreferringSharedTimedMutex() = default; 154 | 155 | ReaderPreferringSharedTimedMutex(const ReaderPreferringSharedTimedMutex&) = delete; 156 | ReaderPreferringSharedTimedMutex& operator=(const ReaderPreferringSharedTimedMutex&) = delete; 157 | 158 | void lock() { 159 | std::unique_lock lock(mtx); 160 | while(state) { 161 | cv.wait(lock); 162 | } 163 | state = WRITER_ACTIVE_MASK; 164 | } 165 | 166 | void unlock() { 167 | { 168 | std::lock_guard lock(mtx); 169 | state = 0; 170 | } 171 | cv.notify_all(); 172 | } 173 | 174 | bool try_lock() { 175 | std::unique_lock lock(mtx, std::try_to_lock); 176 | if (lock.owns_lock() && state == 0) { 177 | state = WRITER_ACTIVE_MASK; 178 | return true; 179 | } else { 180 | return false; 181 | } 182 | } 183 | 184 | template 185 | bool try_lock_for(const std::chrono::duration& timeout_duration) { 186 | return try_lock_until(std::chrono::steady_clock::now() + timeout_duration); 187 | } 188 | 189 | template 190 | bool try_lock_until(const std::chrono::time_point& timeout_time) { 191 | std::unique_lock lock(mtx); 192 | if (!cv.wait_until(lock, timeout_time, [this](){return state == 0;})) { 193 | return false; 194 | } 195 | state = WRITER_ACTIVE_MASK; 196 | return true; 197 | } 198 | 199 | void lock_shared() { 200 | std::unique_lock lock(mtx); 201 | while((state & WRITER_ACTIVE_MASK) || ((state & NUM_READER_MASK) == NUM_READER_MASK)){ 202 | cv.wait(lock); 203 | } 204 | state += 1; 205 | } 206 | 207 | void unlock_shared() { 208 | std::unique_lock lock(mtx); 209 | state -= 1; 210 | if (state == 0) { 211 | lock.unlock(); 212 | cv.notify_all(); 213 | } 214 | } 215 | 216 | bool try_lock_shared() { 217 | std::unique_lock lock(mtx, std::try_to_lock); 218 | if (lock.owns_lock() 219 | && !(state & WRITER_ACTIVE_MASK) && 220 | ((state & NUM_READER_MASK) != NUM_READER_MASK)) { 221 | state += 1; 222 | return true; 223 | } else { 224 | return false; 225 | } 226 | } 227 | 228 | template 229 | bool try_lock_shared_for(const std::chrono::duration& timeout_duration) { 230 | return try_lock_shared_until(std::chrono::steady_clock::now() + timeout_duration); 231 | } 232 | 233 | template 234 | bool try_lock_shared_until(const std::chrono::time_point& timeout_time) { 235 | std::unique_lock lock(mtx); 236 | if (!cv.wait_until(lock, timeout_time, 237 | [this](){return !(state & WRITER_ACTIVE_MASK) && 238 | ((state & NUM_READER_MASK) != NUM_READER_MASK);})) { 239 | return false; 240 | } 241 | state += 1; 242 | return true; 243 | } 244 | 245 | private: 246 | static const uint_fast32_t WRITER_ACTIVE_MASK = 1U << 31; 247 | static const uint_fast32_t NUM_READER_MASK = WRITER_ACTIVE_MASK - 1; 248 | 249 | std::mutex mtx; 250 | std::condition_variable cv; 251 | uint_fast32_t state = 0; 252 | }; 253 | 254 | /** 255 | * An alternative implementation of C++14 shared_lock. Locks underlying shared mutex in shared 256 | * ownership mode. 257 | */ 258 | template 259 | class SharedLock { 260 | public: 261 | // Shared locking 262 | 263 | SharedLock() noexcept: mtx(nullptr), owns(false) { 264 | } 265 | 266 | SharedLock(SharedLock const&) = delete; 267 | SharedLock& operator=(SharedLock const&) = delete; 268 | 269 | SharedLock(SharedLock&& rhs) noexcept : SharedLock() { 270 | swap(rhs); 271 | } 272 | 273 | explicit SharedLock(Mutex& mutex) : mtx(&mutex), owns(true) { 274 | mutex.lock_shared(); 275 | } 276 | 277 | SharedLock(Mutex& mutex, std::defer_lock_t) noexcept: mtx(&mutex), owns(false) { 278 | } 279 | 280 | SharedLock(Mutex& mutex, std::try_to_lock_t): mtx(&mutex), owns(mutex.try_lock_shared()) { 281 | } 282 | 283 | SharedLock(Mutex& mutex, std::adopt_lock_t) noexcept: mtx(&mutex), owns(true) { 284 | } 285 | 286 | template 287 | SharedLock(Mutex& mutex, const std::chrono::duration& timeout_duration): 288 | mtx(&mutex) { 289 | owns = mtx->try_lock_shared_for(timeout_duration); 290 | } 291 | 292 | template 293 | SharedLock(Mutex& mutex, const std::chrono::time_point& timeout_time): 294 | mtx(&mutex) { 295 | owns = mtx->try_lock_shared_until(timeout_time); 296 | } 297 | 298 | ~SharedLock() { 299 | if (owns) { 300 | mtx->unlock_shared(); 301 | } 302 | } 303 | 304 | SharedLock& operator=(SharedLock&& rhs) noexcept { 305 | SharedLock tmp(std::move(rhs)); 306 | tmp.swap(*this); 307 | return *this; 308 | } 309 | 310 | void lock() { 311 | mtx->lock_shared(); 312 | owns = true; 313 | } 314 | 315 | void unlock() { 316 | mtx->unlock_shared(); 317 | owns = false; 318 | } 319 | 320 | bool try_lock() { 321 | owns = mtx->try_lock_shared(); 322 | return owns; 323 | } 324 | 325 | template 326 | bool try_lock_for(const std::chrono::duration& timeout_duration) { 327 | owns = mtx->try_lock_shared_for(timeout_duration); 328 | return owns; 329 | } 330 | 331 | template 332 | bool try_lock_until(const std::chrono::time_point& timeout_duration) { 333 | owns = mtx->try_lock_shared_until(timeout_duration); 334 | return owns; 335 | } 336 | 337 | void swap(SharedLock& rhs) noexcept { 338 | std::swap(mtx, rhs.mtx); 339 | std::swap(owns, rhs.owns); 340 | } 341 | 342 | Mutex* release() noexcept { 343 | owns = false; 344 | Mutex* ret = mtx; 345 | mtx = nullptr; 346 | return ret; 347 | } 348 | 349 | bool owns_lock() const noexcept { 350 | return owns; 351 | } 352 | 353 | explicit operator bool() const noexcept { 354 | return owns; 355 | } 356 | 357 | Mutex* mutex() const noexcept { 358 | return mtx; 359 | } 360 | 361 | private: 362 | Mutex* mtx; 363 | bool owns; 364 | }; 365 | 366 | } // namespace conc11 367 | 368 | #endif /* CONCURRENCY_SHARED_MUTEX_H_ */ 369 | -------------------------------------------------------------------------------- /concurrency/spin_lock.h: -------------------------------------------------------------------------------- 1 | /* 2 | * spin_lock.h 3 | */ 4 | 5 | #ifndef CONCURRENCY_SPINLOCK_H_ 6 | #define CONCURRENCY_SPINLOCK_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | namespace conc11 { 13 | 14 | static const uint_fast16_t SPIN_CYCLES_BEFORE_YIELD = 100; 15 | static const uint_fast16_t SPIN_CYCLES_BEFORE_YIELD_FAIR = 100; 16 | 17 | /** 18 | * A simple unfair spin lock that satisfies Lockable. 19 | */ 20 | class SpinLock { 21 | public: 22 | 23 | SpinLock() noexcept = default; 24 | SpinLock(const SpinLock &rhs) = delete; 25 | SpinLock &operator=(const SpinLock &rhs) = delete; 26 | 27 | void lock(){ 28 | uint_fast16_t patience = SPIN_CYCLES_BEFORE_YIELD; 29 | while(l.test_and_set(std::memory_order_acquire)) { 30 | patience--; 31 | if (!patience) { 32 | patience = SPIN_CYCLES_BEFORE_YIELD; 33 | std::this_thread::yield(); 34 | } 35 | } 36 | } 37 | 38 | void unlock() { 39 | l.clear(std::memory_order_release); 40 | } 41 | 42 | bool try_lock() { 43 | return !l.test_and_set(std::memory_order_acq_rel); 44 | } 45 | 46 | private: 47 | std::atomic_flag l = ATOMIC_FLAG_INIT; 48 | }; 49 | 50 | /** 51 | * A fair spin lock using ticket lock algorithm that satisfies BasicLockable. 52 | * This type of spin locks tends to become very slow under heavy contention. Consider using 53 | * SpinLock if fairness is not a huge concern. 54 | */ 55 | class FairSpinLock { 56 | public: 57 | FairSpinLock() noexcept: 58 | next(0), active(0) { 59 | } 60 | 61 | FairSpinLock(const FairSpinLock &rhs) = delete; 62 | FairSpinLock &operator=(const FairSpinLock &rhs) = delete; 63 | 64 | void lock() { 65 | unsigned int ticket = next.fetch_add(1, std::memory_order_acq_rel); 66 | uint_fast16_t patience = SPIN_CYCLES_BEFORE_YIELD_FAIR; 67 | while (active.load(std::memory_order_acquire) != ticket) { 68 | patience--; 69 | if (!patience) { 70 | patience = SPIN_CYCLES_BEFORE_YIELD_FAIR; 71 | std::this_thread::yield(); // can be insanely slow without yield 72 | } 73 | } 74 | } 75 | 76 | void unlock() { 77 | active.fetch_add(1, std::memory_order_release); 78 | } 79 | 80 | private: 81 | std::atomic_uint next; 82 | std::atomic_uint active; 83 | }; 84 | 85 | } // namespace conc11 86 | 87 | #endif /* CONCURRENCY_SPINLOCK_H_ */ 88 | -------------------------------------------------------------------------------- /pthread_wrapper/pthread_local_ptr.h: -------------------------------------------------------------------------------- 1 | /** 2 | * pthread_specific_ptr.h 3 | */ 4 | #ifndef PTHREAD_WRAPPER_PTHREAD_LOCAL_PTR_H_ 5 | #define PTHREAD_WRAPPER_PTHREAD_LOCAL_PTR_H_ 6 | 7 | #include 8 | #include 9 | #if __cplusplus >= 201103L 10 | #include 11 | #endif 12 | 13 | namespace conc11 { 14 | 15 | /** 16 | * A C++ wrapper utility for pthread_specific-based thread local storage that will make your life 17 | * a little easier with pre-C++11 thread local. 18 | * 19 | * Usage: Use this wrapper as static, global, file scope static or class static member, then use 20 | * operator* and operator-> to dereference like a pointer. You may also call get() to acquire the 21 | * pointer directly. The first get() or dereferencing will result in the allocation of a thread 22 | * specific object of type T. For pre-C++11 T must be DefaultConstructible. 23 | * With C++11 we have thread_local, but if you insist on trying this you will get a fancier 24 | * constructor that will take parameters of one of the constructers of T, and all thread specific 25 | * instances will be initialized with the parameters. 26 | * Other points to note: 27 | * 1) DO NOT pass reference wrappers to objects that might go out of scope before any further 28 | * thread creation, or it will become a dangling reference. 29 | * 2) The type of the parameters must EXACTLY match one of T's constructors. 30 | * 3) T's constructors that use rvalue references cannot be used 31 | */ 32 | template 33 | class PThreadLocalPtr { 34 | public: 35 | 36 | #if __cplusplus < 201103L 37 | explicit PThreadLocalPtr():valid(false) { 38 | #else 39 | template 40 | explicit PThreadLocalPtr(Args&&... args):valid(false) { 41 | #endif 42 | int ret; 43 | ret = pthread_key_create(&key, &PThreadLocalPtr::deleter); 44 | if (ret == 0) { 45 | valid = true; 46 | } 47 | #if __cplusplus >= 201103L 48 | initializer_func = std::bind(&PThreadLocalPtr::initializer, 49 | std::forward(args)...); 50 | #endif 51 | } 52 | 53 | #if __cplusplus >= 201103L 54 | PThreadLocalPtr(const PThreadLocalPtr &) = delete; 55 | PThreadLocalPtr &operator=(const PThreadLocalPtr &) = delete; 56 | #else 57 | PThreadLocalPtr(const PThreadLocalPtr &); 58 | PThreadLocalPtr &operator=(const PThreadLocalPtr &); 59 | #endif 60 | 61 | ~PThreadLocalPtr() { 62 | pthread_key_delete(key); 63 | } 64 | 65 | bool is_valid() const { 66 | return valid; 67 | } 68 | 69 | T* get() { 70 | T* t = static_cast(pthread_getspecific(key)); 71 | if (t) { 72 | return t; 73 | } 74 | #if __cplusplus >= 201103L 75 | t = initializer_func(); 76 | #else 77 | t = new T(); 78 | #endif 79 | pthread_setspecific(key, t); 80 | return t; 81 | } 82 | 83 | #if __cplusplus >= 201103L 84 | typename std::add_lvalue_reference::type operator *() { 85 | #else 86 | T& operator *() { 87 | #endif 88 | return *get(); 89 | } 90 | 91 | T* operator->() { 92 | return get(); 93 | } 94 | 95 | private: 96 | 97 | #if __cplusplus >= 201103L 98 | template 99 | static T* initializer(Args... args) { 100 | return new T(args...); 101 | } 102 | #endif 103 | 104 | static void deleter(void *t) { 105 | delete static_cast(t); 106 | } 107 | 108 | pthread_key_t key; 109 | bool valid; 110 | #if __cplusplus >= 201103L 111 | std::function initializer_func; 112 | #endif 113 | }; 114 | 115 | } // namespace conc11 116 | 117 | #endif /* PTHREAD_WRAPPER_PTHREAD_LOCAL_PTR_H_ */ 118 | -------------------------------------------------------------------------------- /pthread_wrapper/pthread_shared_mutex.h: -------------------------------------------------------------------------------- 1 | /** 2 | * pthread_shared_mutex.h 3 | */ 4 | #ifndef PTHREAD_WRAPPER_PTHREAD_SHARED_MUTEX_H_ 5 | #define PTHREAD_WRAPPER_PTHREAD_SHARED_MUTEX_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace conc11 { 14 | 15 | /** 16 | * Wrapper for pthread_rwlock 17 | */ 18 | class PThreadSharedMutex { 19 | public: 20 | PThreadSharedMutex() { 21 | int ret = pthread_rwlock_init(&rwlock_handle, NULL); 22 | if (ret == ENOMEM) { 23 | throw(std::bad_alloc()); 24 | } else if (ret == EAGAIN) { 25 | throw(std::system_error( 26 | std::make_error_code(std::errc::resource_unavailable_try_again))); 27 | } else if (ret == EPERM) { 28 | throw(std::system_error( 29 | std::make_error_code(std::errc::operation_not_permitted))); 30 | } 31 | // Errors not handled: EBUSY, EINVAL 32 | assert(ret == 0); 33 | } 34 | 35 | PThreadSharedMutex(const PThreadSharedMutex&) = delete; 36 | PThreadSharedMutex& operator=(const PThreadSharedMutex&) = delete; 37 | 38 | ~PThreadSharedMutex() { 39 | int ret = pthread_rwlock_destroy(&rwlock_handle); 40 | (void) ret; 41 | // Errors not handled: EBUSY, EINVAL 42 | assert(ret == 0); 43 | } 44 | 45 | void lock() { 46 | int ret = pthread_rwlock_wrlock(&rwlock_handle); 47 | if (ret == EDEADLK) { 48 | throw(std::system_error( 49 | std::make_error_code(std::errc::resource_deadlock_would_occur))); 50 | } 51 | // Errors not handled: EINVAL 52 | assert(ret == 0); 53 | } 54 | 55 | void unlock() { 56 | int ret = pthread_rwlock_unlock(&rwlock_handle); 57 | (void) ret; 58 | // Errors not handled: EPERM, EBUSY, EINVAL 59 | assert(ret == 0); 60 | } 61 | 62 | bool try_lock() { 63 | int ret = pthread_rwlock_trywrlock(&rwlock_handle); 64 | if (ret == EBUSY) { 65 | return false; 66 | } 67 | // Errors not handled: EINVAL 68 | assert(ret == 0); 69 | return true; 70 | } 71 | 72 | void lock_shared() { 73 | int ret; 74 | while (true) { 75 | ret = pthread_rwlock_rdlock(&rwlock_handle); 76 | if (ret != EAGAIN) { 77 | break; 78 | } 79 | } 80 | if (ret == EDEADLK) { 81 | throw(std::system_error( 82 | std::make_error_code(std::errc::resource_deadlock_would_occur))); 83 | } 84 | // Errors not handled: EINVAL 85 | assert(ret == 0); 86 | } 87 | 88 | bool try_lock_shared() { 89 | int ret = pthread_rwlock_tryrdlock(&rwlock_handle); 90 | if (ret == EBUSY || ret == EAGAIN) { 91 | return false; 92 | } 93 | // Errors not handled: EINVAL 94 | assert(ret == 0); 95 | return true; 96 | } 97 | 98 | void unlock_shared() { 99 | unlock(); 100 | } 101 | 102 | private: 103 | pthread_rwlock_t rwlock_handle; 104 | }; 105 | 106 | } // namespace conc11 107 | 108 | #endif /* PTHREAD_WRAPPER_PTHREAD_SHARED_MUTEX_H_ */ 109 | -------------------------------------------------------------------------------- /pthread_wrapper/pthread_spinlock.h: -------------------------------------------------------------------------------- 1 | /* 2 | * pthread_spin_lock.h 3 | */ 4 | #ifndef PTHREAD_WRAPPER_PTHREAD_SPINLOCK_H_ 5 | #define PTHREAD_WRAPPER_PTHREAD_SPINLOCK_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace conc11 { 13 | 14 | /** 15 | * A thin wrapper for pthread_spin, satisfies Lockable. 16 | */ 17 | class PThreadSpinLockWrapper { 18 | public: 19 | PThreadSpinLockWrapper() { 20 | int ret = pthread_spin_init(&l, PTHREAD_PROCESS_PRIVATE); 21 | if (ret == ENOMEM) { 22 | throw(std::bad_alloc()); 23 | } else if (ret == EAGAIN) { 24 | throw(std::system_error( 25 | std::make_error_code(std::errc::resource_unavailable_try_again))); 26 | } 27 | // Errors not handled: EBUSY, EINVAL 28 | assert(ret == 0); 29 | } 30 | 31 | ~PThreadSpinLockWrapper() { 32 | int ret = pthread_spin_destroy(&l); 33 | (void) ret; 34 | // Errors not handled: EBUSY, EINVAL 35 | assert(ret == 0); 36 | } 37 | 38 | PThreadSpinLockWrapper(const PThreadSpinLockWrapper &rhs) = delete; 39 | PThreadSpinLockWrapper &operator=(const PThreadSpinLockWrapper &rhs) = delete; 40 | 41 | void lock() { 42 | int ret = pthread_spin_lock(&l); // undefined if caller has the lock 43 | if (ret == EDEADLK) { 44 | throw(std::system_error( 45 | std::make_error_code(std::errc::resource_deadlock_would_occur))); 46 | } 47 | // Errors not handled: EINVAL 48 | assert(ret == 0); 49 | } 50 | 51 | void unlock() { 52 | int ret = pthread_spin_unlock(&l); 53 | (void) ret; 54 | // Errors not handled: EINVAL, EPERM 55 | assert(ret == 0); 56 | } 57 | 58 | bool try_lock() { 59 | int ret = pthread_spin_trylock(&l); 60 | if (ret == EDEADLK || ret == EBUSY) { 61 | return false; 62 | } 63 | // Errors not handled: EINVAL 64 | assert(ret == 0); 65 | return true; 66 | } 67 | 68 | private: 69 | pthread_spinlock_t l; 70 | }; 71 | 72 | } 73 | // namespace conc11 74 | 75 | #endif /* PTHREAD_WRAPPER_PTHREAD_SPINLOCK_H_ */ 76 | -------------------------------------------------------------------------------- /test/test_executor.h: -------------------------------------------------------------------------------- 1 | /* 2 | * test_executor.h 3 | */ 4 | 5 | #ifndef TEST_TEST_EXECUTOR_H_ 6 | #define TEST_TEST_EXECUTOR_H_ 7 | 8 | #include 9 | #include 10 | #include "../concurrency/executor.h" 11 | 12 | namespace conc11 { 13 | 14 | namespace test { 15 | 16 | static int dummy_func(int x, int y) { 17 | return 233 + x + y; 18 | } 19 | 20 | class Foo { 21 | public: 22 | Foo(int x):x(x) { 23 | } 24 | virtual ~Foo() { 25 | } 26 | virtual int foo(int y) { 27 | return 233 + x + y; 28 | } 29 | private: 30 | int x; 31 | }; 32 | 33 | class FooFoo : public Foo{ 34 | public: 35 | FooFoo(int x):Foo(x){ 36 | } 37 | virtual int foo(int y) override { 38 | int ret = 2100 + Foo::foo(y); 39 | printf ("%d\n", ret); 40 | return ret; 41 | } 42 | }; 43 | 44 | void test_executor() { 45 | Foo f1(10000); 46 | // FooFoo* f2 = new FooFoo(1000000); 47 | auto exec = make_fixed_thread_pool(4); 48 | std::vector> futures; 49 | for (int i = 0; i < 100000; ++i) { 50 | futures.emplace_back(exec->submit(dummy_func, 1, 2)); 51 | // futures.emplace_back(exec.submit(&Foo::foo, &f1, 1)); 52 | // futures.emplace_back(exec.submit(&Foo::foo, f2, 1)); 53 | // futures.emplace_back(exec.submit(&Foo::foo, f2, 1)); 54 | // exec.execute(dummy_func, 1, 2); 55 | // exec.execute(&Foo::foo, f2, 1); 56 | } 57 | // for (auto &future : futures) { 58 | //// printf("%d\n", future.get()); 59 | // future.get(); 60 | // } 61 | // printf("active count %lu\n", exec.active_count.load()); 62 | exec->shutdown(); 63 | exec->await_termination(); 64 | } 65 | 66 | class BlockingCounter { 67 | public: 68 | void add(int x) { 69 | std::lock_guard lock(mtx); 70 | ctr += x; 71 | } 72 | 73 | int get() { 74 | std::lock_guard lock(mtx); 75 | return ctr; 76 | } 77 | 78 | private: 79 | std::mutex mtx; 80 | int ctr = 0; 81 | }; 82 | 83 | void test_executor_sync() { 84 | BlockingCounter bc; 85 | auto exec = make_cached_thread_pool(); 86 | std::vector> futures; 87 | for (int i = 0; i < 100000; ++i) { 88 | futures.emplace_back(exec->submit(&BlockingCounter::add, &bc, 1)); 89 | } 90 | for (auto &future : futures) { 91 | future.get(); 92 | } 93 | exec->shutdown(); 94 | exec->await_termination(); 95 | printf("Final value of blocking counter: %d\n", bc.get()); 96 | } 97 | 98 | class AtomicCounter { 99 | public: 100 | AtomicCounter() noexcept:ctr(0) { 101 | } 102 | 103 | void add(int x) { 104 | ctr.fetch_add(x); 105 | } 106 | 107 | int get() { 108 | return ctr.load(); 109 | } 110 | private: 111 | std::atomic ctr; 112 | }; 113 | 114 | void test_executor_atomic() { 115 | AtomicCounter ac; 116 | auto exec = make_cached_thread_pool(); 117 | std::vector> futures; 118 | for (int i = 0; i < 100000; ++i) { 119 | futures.emplace_back(exec->submit(&AtomicCounter::add, &ac, 1)); 120 | } 121 | for (auto &future : futures) { 122 | future.get(); 123 | } 124 | exec->shutdown(); 125 | exec->await_termination(); 126 | printf("Final value of atomic counter: %d\n", ac.get()); 127 | } 128 | 129 | void test_thread_pool_executor() { 130 | std::chrono::microseconds dur; 131 | conc11::timed_invoke(&dur, test_executor); 132 | printf("Micros elapsed test_executor(): %lu\n", dur.count()); 133 | 134 | conc11::timed_invoke(&dur, test_executor_sync); 135 | printf("Micros elapsed test_executor_sync(): %lu\n", dur.count()); 136 | 137 | conc11::timed_invoke(&dur, test_executor_atomic); 138 | printf("Micros elapsed test_executor_atomic(): %lu\n", dur.count()); 139 | } 140 | 141 | } // namespace test 142 | 143 | } // namespace conc11 144 | 145 | #endif /* TEST_TEST_EXECUTOR_H_ */ 146 | -------------------------------------------------------------------------------- /test/test_latch.h: -------------------------------------------------------------------------------- 1 | /** 2 | * test_latch.h 3 | */ 4 | #ifndef TEST_TEST_LATCH_H_ 5 | #define TEST_TEST_LATCH_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "../concurrency/latch.h" 12 | 13 | namespace conc11 { 14 | 15 | namespace test { 16 | 17 | static std::atomic ctr; 18 | 19 | void thread_func_waiter(Latch* latch) { 20 | volatile int i = 0; 21 | for (; i < 10000; ++i) 22 | ; 23 | latch->wait(); 24 | auto ctr_val = ctr.load(std::memory_order_relaxed); 25 | if (ctr_val != 10) { 26 | printf("ctr = %d\n", ctr_val); 27 | assert(false); 28 | } 29 | // printf("Thread awaken from latch\n"); 30 | } 31 | 32 | void thread_func_counter(Latch* latch) { 33 | volatile int i = 0; 34 | for (; i < 10000; ++i) 35 | ; 36 | ctr.fetch_add(1, std::memory_order_relaxed); 37 | latch->count_down(1); 38 | // printf("Counted down by 1\n"); 39 | } 40 | 41 | void test_latch() { 42 | Latch latch(10); 43 | std::vector waiters; 44 | std::vector counters; 45 | for (int i = 0; i < 10; ++i) { 46 | waiters.emplace_back(thread_func_waiter, &latch); 47 | } 48 | for (int i = 0; i < 10; ++i) { 49 | counters.emplace_back(thread_func_counter, &latch); 50 | } 51 | for (auto& th : waiters) { 52 | th.join(); 53 | } 54 | for (auto& th : counters) { 55 | th.join(); 56 | } 57 | } 58 | 59 | } // namespace test 60 | 61 | } // namespace conc11 62 | 63 | 64 | 65 | #endif /* TEST_TEST_LATCH_H_ */ 66 | -------------------------------------------------------------------------------- /test/test_lru_cache.h: -------------------------------------------------------------------------------- 1 | /** 2 | * test_lru_cache.h 3 | */ 4 | #ifndef TEST_TEST_LRU_CACHE_H_ 5 | #define TEST_TEST_LRU_CACHE_H_ 6 | 7 | #include 8 | #include 9 | #include 10 | #include "../util/lru_cache.h" 11 | #include "../util/util.h" 12 | 13 | namespace conc11 { 14 | 15 | namespace test { 16 | 17 | struct Key { 18 | explicit Key(int k) noexcept:k(k) { 19 | } 20 | 21 | Key(const Key& rhs) noexcept:k(rhs.k) { 22 | printf("Key copy constructed\n"); 23 | } 24 | 25 | Key(Key&& rhs) noexcept:k(rhs.k) { 26 | printf("Key move constructed\n"); 27 | } 28 | 29 | bool operator==(const Key& rhs) const { 30 | return k == rhs.k; 31 | } 32 | 33 | int k; 34 | }; 35 | 36 | } 37 | 38 | } 39 | 40 | namespace std { 41 | template<> 42 | struct hash { 43 | size_t operator()(const conc11::test::Key& x) const { 44 | return x.k; 45 | } 46 | }; 47 | } 48 | 49 | namespace conc11 { 50 | 51 | namespace test { 52 | 53 | static void test_lru_copy_pointee() { 54 | conc11::LRUCache> iml(1); 55 | auto m1 = conc11::make_unique(10); 56 | auto m2 = conc11::make_unique(20); 57 | iml.set(1, std::move(m1)); 58 | iml.set(2, std::move(m2)); 59 | int out = 0; 60 | iml.get_copy_pointee(1, &out); 61 | printf("get_copy_pointee with key 1: %d\n", out); 62 | iml.get_copy_pointee(2, &out); 63 | printf("get_copy_pointee with key 2: %d\n", out); 64 | } 65 | 66 | static void test_lru_copy_pointee_ptr() { 67 | conc11::LRUCache iml(1); 68 | auto m1 = new int(10); 69 | auto m2 = new int(20); 70 | iml.set(1, std::move(m1)); 71 | iml.set(2, std::move(m2)); 72 | int out = 0; 73 | iml.get_copy_pointee(1, &out); 74 | printf("get_copy_pointee with key 1: %d\n", out); 75 | iml.get_copy_pointee(2, &out); 76 | printf("get_copy_pointee with key 2: %d\n", out); 77 | } 78 | 79 | template 80 | static void thread_func(LRU* lru) { 81 | for (int i = 0; i < 100000; ++i) { 82 | lru->set(i % 337, i % 613); 83 | int dummy = 0; 84 | lru->get_copy(i, &dummy); 85 | } 86 | } 87 | 88 | static void test_blocking_lru() { 89 | conc11::BlockingLRUCache, std::equal_to, std::mutex> lru(100); 90 | std::vector threads; 91 | for (int i = 0; i < 10; ++i) { 92 | threads.emplace_back(&thread_func, &lru); 93 | } 94 | for (auto& th : threads) { 95 | th.join(); 96 | } 97 | } 98 | 99 | template 100 | static void thread_func_large_obj(LRU* lru) { 101 | for (int i = 0; i < 100000; ++i) { 102 | std::vector data; 103 | for (int j = 0; j < 4096; ++j) { 104 | data.emplace_back(j); 105 | } 106 | lru->set(i, std::move(data)); 107 | std::vector dummy; 108 | lru->get_copy(i, &dummy); 109 | } 110 | } 111 | 112 | static void test_blocking_lru_large_obj() { 113 | conc11::BlockingLRUCache> lru(10000); 114 | std::vector threads; 115 | for (int i = 0; i < 10; ++i) { 116 | threads.emplace_back(&thread_func_large_obj, &lru); 117 | } 118 | for (auto& th : threads) { 119 | th.join(); 120 | } 121 | } 122 | 123 | void run_test_lru_cache() { 124 | test_lru_copy_pointee(); 125 | test_lru_copy_pointee_ptr(); 126 | 127 | std::chrono::microseconds dur; 128 | conc11::timed_invoke(&dur, test_blocking_lru); 129 | printf("Micros elapsed: %lu\n", dur.count()); 130 | conc11::timed_invoke(&dur, test_blocking_lru_large_obj); 131 | printf("Micros elapsed: %lu\n", dur.count()); 132 | } 133 | 134 | } // namespace test 135 | 136 | } // namespace conc11 137 | 138 | #endif /* TEST_TEST_LRU_CACHE_H_ */ 139 | -------------------------------------------------------------------------------- /test/test_pthread_local_ptr.h: -------------------------------------------------------------------------------- 1 | /** 2 | * A simple wrapper of pthread_specific thread local storage to a smart-pointer-like object. 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #if __cplusplus >= 201103L 9 | #include 10 | #include 11 | #endif 12 | 13 | #include "../pthread_wrapper/pthread_local_ptr.h" 14 | 15 | namespace conc11 { 16 | 17 | namespace test { 18 | 19 | #if __cplusplus >= 201103L 20 | 21 | class PThreadLocalPtrCannotCreate { 22 | public: 23 | PThreadLocalPtrCannotCreate(std::string &&rref) : 24 | d(std::move(rref)) { 25 | } 26 | PThreadLocalPtrCannotCreate(const std::string &rhs) = delete; 27 | private: 28 | std::string d; 29 | }; 30 | 31 | // PThreadLocalPtr used as static member 32 | class StaticHolder { 33 | public: 34 | void print_address(int id) { 35 | printf("Thread id: %d, address of class static: %p\n", id, sp.get()); 36 | } 37 | 38 | private: 39 | static PThreadLocalPtr sp; 40 | }; 41 | PThreadLocalPtr StaticHolder::sp; 42 | 43 | // Another layer of function calls 44 | void thread_func_internal(int id) { 45 | std::string a="A"; 46 | static PThreadLocalPtr> pi(std::move(a), std::string("B")); 47 | printf("Thread id: %d, address of pi: %p\n", id, pi.get()); 48 | printf("pi = (%s, %s), a=%s\n", pi->first.c_str(), pi->second.c_str(), a.c_str()); 49 | // only a, b in the first thread will be moved since static only get 50 | // initialized once 51 | 52 | StaticHolder sh; 53 | sh.print_address(id); 54 | 55 | // static PThreadLocalPtr wontcompile1; 56 | // static PThreadLocalPtr wontcompile2(std::string("foo")); 57 | } 58 | 59 | void thread_func(int id) { 60 | std::string ini("INI"); 61 | static PThreadLocalPtr p(ini); 62 | printf("Thread id: %d, address of p: %p\n", id, p.get()); 63 | *p = "Thread id: " + std::to_string(id); 64 | printf("%s\n", p->c_str()); 65 | std::this_thread::sleep_for(std::chrono::milliseconds(50)); 66 | thread_func_internal(id); 67 | std::this_thread::sleep_for(std::chrono::milliseconds(50)); 68 | thread_func_internal(id); // Multiple entry to another function 69 | std::this_thread::sleep_for(std::chrono::milliseconds(50)); 70 | } 71 | 72 | void test_pthread_specific() { 73 | std::vector threads; 74 | for (int i = 0; i < 5; ++i) { 75 | threads.emplace_back(thread_func, i); 76 | } 77 | for (auto &th : threads) { 78 | th.join(); 79 | } 80 | } 81 | 82 | #else 83 | 84 | void* thread_func_pthread(void *id) { 85 | static PThreadLocalPtr p; 86 | *p = (long)id; 87 | printf("%ld\n", *p); 88 | printf("%p\n", p.get()); 89 | for (volatile int i = 100000000; i != 0; --i); 90 | return NULL; 91 | } 92 | 93 | void test_pthread_specific() { 94 | static const int NUM_THREADS = 10; 95 | pthread_t t[NUM_THREADS]; 96 | void *ret; 97 | for (long i=0; i 9 | #include 10 | #include 11 | 12 | #include "../concurrency/semaphore.h" 13 | 14 | namespace conc11 { 15 | 16 | namespace test { 17 | 18 | template 19 | void thread_func_b(Semaphore *sem, int id) { 20 | for (int i = 0; i < 100; ++i) { 21 | // printf("Acquiring\n"); 22 | SemaphoreGuard> sg(*sem, i % 20); 23 | // printf("Thread id: %d, Permits = %d, Waiting nodes = %d\n", id, bqs->permits, bqs->num_waiting_nodes()); 24 | // std::this_thread::sleep_for(std::chrono::milliseconds(3)); 25 | // printf("Releasing\n"); 26 | } 27 | // printf("Blocking thread %d out\n", id); 28 | } 29 | 30 | template 31 | void thread_func_nb(Semaphore *sem, int id) { 32 | for (int i = 0; i < 100; ++i) { 33 | while (!sem->try_acquire_for(10, 0)) 34 | ; 35 | // std::this_thread::sleep_for(std::chrono::milliseconds(3)); 36 | 37 | sem->release(); 38 | } 39 | // printf("Non-blocking thread %d out\n", id); 40 | } 41 | 42 | void test_queued_semaphore() { 43 | using SemaphoreType = conc11::QueuedSemaphore; 44 | static const int NUM_THREADS = 512; 45 | SemaphoreType sem(64); 46 | std::vector blocking_threads; 47 | for (int i = 0; i < NUM_THREADS; ++i) { 48 | blocking_threads.emplace_back(thread_func_b, &sem, i); 49 | } 50 | std::vector non_blocking_threads; 51 | for (int i = NUM_THREADS; i < NUM_THREADS * 2; ++i) { 52 | non_blocking_threads.emplace_back(thread_func_nb, &sem, i); 53 | } 54 | 55 | for (int i = 0; i < NUM_THREADS; ++i) { 56 | blocking_threads[i].join(); 57 | } 58 | for (int i = 0; i < NUM_THREADS; ++i) { 59 | non_blocking_threads[i].join(); 60 | } 61 | } 62 | 63 | } // namespace test 64 | 65 | } // namespace conc11 66 | 67 | #endif /* TEST_TEST_QUEUED_SEMAPHORE_H_ */ 68 | -------------------------------------------------------------------------------- /test/test_semaphore.h: -------------------------------------------------------------------------------- 1 | /* 2 | * test_semaphore.h 3 | */ 4 | 5 | #ifndef TEST_SEMAPHORE_H_ 6 | #define TEST_SEMAPHORE_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "../concurrency/semaphore.h" 15 | #include "../concurrency/spin_lock.h" 16 | 17 | namespace conc11 { 18 | 19 | namespace test { 20 | 21 | using std::vector; 22 | using std::thread; 23 | using std::unique_ptr; 24 | 25 | static const int TICKS = 5; 26 | static const int THREAD_NUMBER = 100; 27 | 28 | template 29 | class ThreadFunctor { 30 | public: 31 | ThreadFunctor(int id, SemType* sem) : 32 | id(id), ctr(TICKS), sem(sem) { 33 | } 34 | void operator()() { 35 | while (ctr > 0) { 36 | sem->acquire(id); 37 | printf("thread id:%d, tick counter %d, remaining sem %d\n", 38 | id, ctr--, sem->available_permits()); 39 | if (sem->available_permits() < 0) { 40 | printf("ERROR! MINUS SEMAPHORES!!\n"); 41 | } 42 | // std::this_thread::sleep_for(std::chrono::milliseconds(50)); 43 | sem->release(id); 44 | } 45 | } 46 | private: 47 | int id; 48 | int ctr; 49 | SemType* sem; 50 | }; 51 | 52 | void test_semaphore() { 53 | using SemType = QueuedSemaphore; 54 | SemType* sem = new SemType(THREAD_NUMBER); 55 | vector > threads; 56 | for (int i = 0; i < THREAD_NUMBER; ++i) { 57 | ThreadFunctor func(i + 1, sem); 58 | thread* curThread = new thread(func); 59 | threads.push_back(unique_ptr(curThread)); 60 | } 61 | for (int i = 0; i < THREAD_NUMBER; ++i) { 62 | threads[i]->join(); 63 | } 64 | } 65 | 66 | } // namespace test 67 | 68 | } // namespace conc11 69 | #endif /* TEST_SEMAPHORE_H_ */ 70 | -------------------------------------------------------------------------------- /test/test_shared_mutex.h: -------------------------------------------------------------------------------- 1 | /* 2 | * file test_shared_mutex.h 3 | */ 4 | 5 | #ifndef TEST_TEST_SHARED_MUTEX_H_ 6 | #define TEST_TEST_SHARED_MUTEX_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include "../concurrency/shared_mutex.h" 12 | #include "../pthread_wrapper/pthread_shared_mutex.h" 13 | 14 | namespace conc11 { 15 | 16 | namespace test { 17 | 18 | using SharedMutexType = conc11::ReaderPreferringSharedTimedMutex; 19 | 20 | void reader_func(int id, SharedMutexType *sm, int* shared_data) { 21 | for (int i = 0; i < 100; ++i) { 22 | conc11::SharedLock lock(*sm); 23 | // conc11::SharedLock lock(*sm, std::defer_lock); 24 | // while (!lock.try_lock_for(std::chrono::microseconds(1000))) { 25 | // printf("Reader %d\ttimeout\n", id); 26 | // } 27 | printf("Reader %d\t, %d\n", id, *shared_data); 28 | std::this_thread::sleep_for(std::chrono::microseconds(100)); 29 | lock.unlock(); 30 | std::this_thread::sleep_for(std::chrono::microseconds(500)); 31 | } 32 | } 33 | 34 | void writer_func(int id, SharedMutexType *sm, int* shared_data) { 35 | for (int i = 0; i < 100; ++i) { 36 | std::unique_lock lock(*sm); 37 | // std::unique_lock lock(*sm, std::defer_lock); 38 | // while (!lock.try_lock_for(std::chrono::microseconds(1000))) { 39 | // printf("Writer %d\ttimeout\n", id); 40 | // } 41 | *shared_data += 1; 42 | printf("Writer %d\t, %d\n", id, *shared_data); 43 | std::this_thread::sleep_for(std::chrono::microseconds(100)); 44 | lock.unlock(); 45 | std::this_thread::sleep_for(std::chrono::microseconds(500)); 46 | } 47 | } 48 | 49 | void reader_func_cont(int id, SharedMutexType* sm, std::map* shared_map) { 50 | for (int i = 0; i < 100; ++i) { 51 | conc11::SharedLock lock(*sm, std::defer_lock); 52 | 53 | // lock.lock(); 54 | 55 | // while (!lock.try_lock()) { 56 | // std::this_thread::yield(); 57 | // } 58 | 59 | while (!lock.try_lock_for(std::chrono::microseconds(1000))) { 60 | } 61 | 62 | if (shared_map->size() != 0) { 63 | printf("Reader %d, current size: %lu, current largest number: %s\n", 64 | id, shared_map->size(), (--shared_map->end())->second.c_str()); 65 | } else { 66 | printf("Reader %d, Empty map\n", id); 67 | } 68 | lock.unlock(); 69 | std::this_thread::sleep_for(std::chrono::microseconds(1000)); 70 | } 71 | } 72 | 73 | void writer_func_cont(int id, SharedMutexType* sm, std::map* shared_map) { 74 | for (int i = 0; i < 100; ++i) { 75 | std::unique_lock lock(*sm, std::defer_lock); 76 | 77 | // lock.lock(); 78 | 79 | // while (!lock.try_lock()) { 80 | // std::this_thread::yield(); 81 | // } 82 | 83 | while (!lock.try_lock_for(std::chrono::microseconds(1000))) { 84 | } 85 | 86 | int key = id * 100 + i; 87 | auto iter = shared_map->find(key); 88 | if (iter == shared_map->end()) { 89 | shared_map->emplace(key, std::to_string(key)); 90 | printf("Writer %d, inserted %d\n", id, key); 91 | } 92 | std::this_thread::sleep_for(std::chrono::microseconds(1000)); 93 | } 94 | } 95 | 96 | template 97 | void do_test_shared_mutex(SharedDataType* shared_data, ReaderFunc rf, WriterFunc wf, 98 | int num_readers, int num_writers) { 99 | std::vector readers; 100 | readers.reserve(num_readers); 101 | std::vector writers; 102 | writers.reserve(num_writers); 103 | 104 | SharedMutexType sm; 105 | 106 | for (int i = 0; i < num_readers; ++i) { 107 | readers.emplace_back(rf, i, &sm, shared_data); 108 | } 109 | for (int i = 0; i < num_writers; ++i) { 110 | writers.emplace_back(wf, i, &sm, shared_data); 111 | } 112 | 113 | for (auto& thread : readers) { 114 | thread.join(); 115 | } 116 | for (auto& thread : writers) { 117 | thread.join(); 118 | } 119 | } 120 | 121 | // Mainly for testing throughput 122 | void test_shared_mutex() { 123 | int shared_data = 0; 124 | do_test_shared_mutex(&shared_data, reader_func, writer_func, 256, 8); 125 | } 126 | 127 | // Mainly for testing correctness 128 | void test_shared_mutex_cont() { 129 | std::map shared_map; 130 | do_test_shared_mutex(&shared_map, reader_func_cont, writer_func_cont, 1, 64); 131 | printf("Final size: %lu\n", shared_map.size()); 132 | } 133 | 134 | } // namespace test 135 | 136 | } // namespace conc11 137 | 138 | #endif /* TEST_TEST_SHARED_MUTEX_H_ */ 139 | -------------------------------------------------------------------------------- /test/test_spin_lock.h: -------------------------------------------------------------------------------- 1 | /* 2 | * test_spin_lock.h 3 | */ 4 | 5 | #ifndef TEST_TEST_SPIN_LOCK_H_ 6 | #define TEST_TEST_SPIN_LOCK_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "../concurrency/spin_lock.h" 14 | #include "../pthread_wrapper/pthread_spinlock.h" 15 | #include "../concurrency/semaphore.h" 16 | 17 | namespace conc11 { 18 | 19 | namespace test { 20 | 21 | static int g = 100000; 22 | conc11::SpinLock sl; 23 | conc11::PThreadSpinLockWrapper psw; 24 | conc11::FairSpinLock fsl; 25 | 26 | conc11::SimpleSemaphore sem(1); 27 | conc11::SemaphoreTimedLockableAdapter sem_lock(sem, 1); 28 | std::mutex m; 29 | 30 | inline void thread_function(int num) { 31 | for (int i = 0; i < num; ++i) { 32 | // std::lock_guard l(sl); 33 | std::lock_guard l(psw); 34 | // std::lock_guard l(fsl); 35 | // std::lock_guard l(sem_lock); 36 | // std::lock_guard l(m); 37 | g -= 1; 38 | volatile int j = 500; 39 | for (; j != 0; --j) 40 | ; // do some work 41 | // printf("%d\n", g); 42 | } 43 | } 44 | 45 | inline void test_spin_lock() { 46 | std::vector threads; 47 | for (int i = 0; i < 10; ++i) { 48 | threads.emplace_back(&thread_function, 10000); 49 | } 50 | for (auto &thread : threads) { 51 | thread.join(); 52 | } 53 | printf("This should be zero: %d\n", g); 54 | } 55 | 56 | } // namespace test 57 | 58 | } // namespace conc11 59 | 60 | #endif /* TEST_TEST_SPIN_LOCK_H_ */ 61 | -------------------------------------------------------------------------------- /util/bits/invoke.h: -------------------------------------------------------------------------------- 1 | /** 2 | * invoke.h 3 | */ 4 | #ifndef UTIL_BITS_INVOKE_H_ 5 | #define UTIL_BITS_INVOKE_H_ 6 | 7 | #include 8 | #include 9 | #include "scope_guard.h" 10 | 11 | namespace conc11 { 12 | 13 | // "Reimplement" C++17 std::invoke 14 | // DO NOT format this code with code formatters. They are just too stupid to format it correctly. 15 | namespace detail { 16 | template 17 | struct is_reference_wrapper : std::false_type {}; 18 | template 19 | struct is_reference_wrapper> : std::true_type {}; 20 | 21 | template 22 | auto _invoke(T Base::*pmf, Derived&& ref, Args&&... args) 23 | noexcept(noexcept((std::forward(ref).*pmf)(std::forward(args)...))) 24 | -> typename std::enable_if::value && 25 | std::is_base_of::type>::value, 26 | decltype((std::forward(ref).*pmf)(std::forward(args)...))> 27 | ::type { 28 | return (std::forward(ref).*pmf)(std::forward(args)...); 29 | } 30 | 31 | template 32 | auto _invoke(T Base::*pmf, RefWrap&& ref, Args&&... args) 33 | noexcept(noexcept((ref.get().*pmf)(std::forward(args)...))) 34 | -> typename std::enable_if::value && 35 | is_reference_wrapper::type>::value, 36 | decltype((ref.get().*pmf)(std::forward(args)...))>::type { 37 | return (ref.get().*pmf)(std::forward(args)...); 38 | } 39 | 40 | template 41 | auto _invoke(T Base::*pmf, Pointer&& ptr, Args&&... args) 42 | noexcept(noexcept(((*std::forward(ptr)).*pmf)(std::forward(args)...))) 43 | -> typename std::enable_if::value && 44 | !is_reference_wrapper::type>::value && 45 | !std::is_base_of::type>::value, 46 | decltype(((*std::forward(ptr)).*pmf)(std::forward(args)...))> 47 | ::type { 48 | return ((*std::forward(ptr)).*pmf)(std::forward(args)...); 49 | } 50 | 51 | template 52 | auto _invoke(T Base::*pmd, Derived&& ref) 53 | noexcept(noexcept(std::forward(ref).*pmd)) 54 | -> typename std::enable_if::value && 55 | std::is_base_of::type>::value, 56 | decltype(std::forward(ref).*pmd)>::type { 57 | return std::forward(ref).*pmd; 58 | } 59 | 60 | template 61 | auto _invoke(T Base::*pmd, RefWrap&& ref) 62 | noexcept(noexcept(ref.get().*pmd)) 63 | -> typename std::enable_if::value && 64 | is_reference_wrapper::type>::value, 65 | decltype(ref.get().*pmd)>::type { 66 | return ref.get().*pmd; 67 | } 68 | 69 | template 70 | auto _invoke(T Base::*pmd, Pointer&& ptr) 71 | noexcept(noexcept((*std::forward(ptr)).*pmd)) 72 | -> typename std::enable_if::value && 73 | !is_reference_wrapper::type>::value && 74 | !std::is_base_of::type>::value, 75 | decltype((*std::forward(ptr)).*pmd)>::type { 76 | return (*std::forward(ptr)).*pmd; 77 | } 78 | 79 | template 80 | auto _invoke(F&& f, Args&&... args) 81 | noexcept(noexcept(std::forward(f)(std::forward(args)...))) 82 | -> typename std::enable_if::type>::value, 83 | decltype(std::forward(f)(std::forward(args)...))>::type { 84 | return std::forward(f)(std::forward(args)...); 85 | } 86 | } // namespace detail 87 | 88 | /** 89 | * Invoke a callable. Should behave like C++17 std::invoke(). 90 | */ 91 | template< class F, class... ArgTypes > 92 | auto invoke(F&& f, ArgTypes&&... args) 93 | // exception specification for QoI 94 | noexcept(noexcept(detail::_invoke(std::forward(f), std::forward(args)...))) 95 | -> decltype(detail::_invoke(std::forward(f), std::forward(args)...)) { 96 | return detail::_invoke(std::forward(f), std::forward(args)...); 97 | } 98 | 99 | // Some extra gadgets based on conc11::invoke() 100 | 101 | namespace detail { 102 | template 103 | struct TimerEndFunctor{ 104 | TimerEndFunctor(std::chrono::duration* dur) : 105 | begin(std::chrono::steady_clock::now()), dur(dur) { 106 | } 107 | void operator()() { 108 | std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); 109 | *dur = std::chrono::duration_cast>( 110 | end - begin); 111 | } 112 | std::chrono::steady_clock::time_point begin; 113 | std::chrono::duration* dur; 114 | }; 115 | 116 | 117 | template 118 | ScopeGuardImpl> make_time_guard( 119 | std::chrono::duration *time_elapsed) { 120 | return make_scope_guard(TimerEndFunctor(time_elapsed)); 121 | } 122 | 123 | } // namespace detail 124 | 125 | /** 126 | * Invokes a callable and records its execution time. The first parameter is a pointer to a 127 | * std::chrono::duration for returning execution time, and the original return value of the 128 | * callable will be returned as the function return value. Second to last parameters match 129 | * those of C++17 std::invoke(). 130 | */ 131 | template 132 | auto timed_invoke(std::chrono::duration *time_elapsed, Callable c, Args&& ...args) 133 | noexcept(noexcept(invoke(std::forward(c), std::forward(args)...))) 134 | -> decltype(invoke(std::forward(c), std::forward(args)...)) { 135 | ScopeGuard time_guard = detail::make_time_guard(time_elapsed); 136 | return invoke(std::forward(c), std::forward(args)...); 137 | } 138 | 139 | } // namespace conc11 140 | 141 | #endif /* UTIL_BITS_INVOKE_H_ */ 142 | -------------------------------------------------------------------------------- /util/bits/make_unique.h: -------------------------------------------------------------------------------- 1 | /** 2 | * make_unique.h 3 | */ 4 | #ifndef UTIL_BITS_MAKE_UNIQUE_H_ 5 | #define UTIL_BITS_MAKE_UNIQUE_H_ 6 | 7 | #include 8 | #include 9 | 10 | namespace conc11 { 11 | 12 | /** 13 | * "Reimplement" C++14 make_unique for C++11. 14 | */ 15 | 16 | template 17 | struct is_array_known_bound : std::false_type { 18 | }; 19 | 20 | template 21 | struct is_array_known_bound : std::true_type { 22 | }; 23 | 24 | template 25 | inline auto make_unique(Args&&... args) 26 | -> typename std::enable_if::value, std::unique_ptr>::type { 27 | return std::unique_ptr(new T(std::forward(args)...)); 28 | } 29 | 30 | template 31 | inline auto make_unique(std::size_t size) 32 | -> typename std::enable_if::value && !is_array_known_bound::value, 33 | std::unique_ptr>::type { 34 | return std::unique_ptr(new typename std::remove_extent::type[size]); 35 | } 36 | 37 | template 38 | inline auto make_unique(Args&&... args) 39 | -> typename std::enable_if::value, void>::type = delete; 40 | 41 | } // namespace conc11 42 | 43 | #endif /* UTIL_BITS_MAKE_UNIQUE_H_ */ 44 | -------------------------------------------------------------------------------- /util/bits/rvalue_wrapper.h: -------------------------------------------------------------------------------- 1 | /** 2 | * rvalue_wrapper.h 3 | */ 4 | #ifndef UTIL_BITS_RVALUE_WRAPPER_H_ 5 | #define UTIL_BITS_RVALUE_WRAPPER_H_ 6 | 7 | namespace conc11 { 8 | 9 | /** 10 | * A wrapper for holding and passing rvalues through std::bind. The rvalue wrapped will be stored 11 | * inside the wrapper until the functor returned by std::bind is invoked, at when the stored rvalue 12 | * will be moved from. The functor can be invoked ONLY ONCE. 13 | */ 14 | template 15 | struct RValueWrapper { 16 | public: 17 | template::value>::type> 18 | explicit RValueWrapper(T &&t) noexcept : 19 | t(std::move(t)) { 20 | } 21 | RValueWrapper(const RValueWrapper &rhs) = default; 22 | RValueWrapper(RValueWrapper &&rhs) = default; 23 | RValueWrapper& operator=(const RValueWrapper &rhs) = default; 24 | RValueWrapper& operator=(RValueWrapper &&rhs) = default; 25 | 26 | template 27 | T &&operator()(Args ...) { 28 | return std::move(t); 29 | } 30 | private: 31 | T t; 32 | }; 33 | 34 | template::value>::type> 36 | RValueWrapper rval(T &&t) { 37 | return RValueWrapper(std::move(t)); 38 | } 39 | 40 | template 41 | RValueWrapper rval(T &t) { 42 | return RValueWrapper(T(t)); 43 | } 44 | 45 | } // namespace conc11 46 | 47 | namespace std { 48 | template 49 | struct is_bind_expression> : std::true_type { 50 | }; 51 | } 52 | 53 | #endif /* UTIL_BITS_RVALUE_WRAPPER_H_ */ 54 | -------------------------------------------------------------------------------- /util/bits/scope_guard.h: -------------------------------------------------------------------------------- 1 | /** 2 | * scope_guard.h 3 | */ 4 | #ifndef UTIL_BITS_SCOPE_GUARD_H_ 5 | #define UTIL_BITS_SCOPE_GUARD_H_ 6 | 7 | #include 8 | 9 | namespace conc11 { 10 | 11 | namespace detail { 12 | 13 | class ScopeGuardBase { 14 | public: 15 | void release() const { 16 | released = true; 17 | } 18 | 19 | protected: 20 | ScopeGuardBase() { 21 | } 22 | 23 | ScopeGuardBase(const ScopeGuardBase&) = delete; 24 | ScopeGuardBase& operator=(const ScopeGuardBase&) = delete; 25 | 26 | ScopeGuardBase(ScopeGuardBase&& rhs) noexcept:released(rhs.released) { 27 | rhs.released = true; 28 | } 29 | 30 | ScopeGuardBase& operator=(ScopeGuardBase&& rhs) noexcept { 31 | assert(this != &rhs); 32 | released = rhs.released; 33 | rhs.released = true; 34 | return *this; 35 | } 36 | 37 | ~ScopeGuardBase() { // Intentionally made non-virtual. Don't use ScopeGuardBase* directly. 38 | } 39 | 40 | bool is_released() const { 41 | return released; 42 | } 43 | 44 | private: 45 | mutable bool released = false; 46 | }; 47 | 48 | template 49 | class ScopeGuardImpl : public ScopeGuardBase { 50 | public: 51 | ScopeGuardImpl(Post&& post) : 52 | post(std::forward(post)) { 53 | } 54 | 55 | ScopeGuardImpl(Pre&& pre, Post&& post) : 56 | post(std::forward(post)) { 57 | pre(); 58 | } 59 | 60 | ScopeGuardImpl(const ScopeGuardImpl&) = delete; 61 | ScopeGuardImpl& operator=(const ScopeGuardImpl&) = delete; 62 | 63 | ScopeGuardImpl(ScopeGuardImpl&&) = default; 64 | ScopeGuardImpl& operator=(ScopeGuardImpl&&) = default; 65 | 66 | ~ScopeGuardImpl() { 67 | if (!is_released()) { 68 | post(); 69 | } 70 | } 71 | 72 | private: 73 | Post post; 74 | }; 75 | 76 | } // namespace detail 77 | 78 | typedef const detail::ScopeGuardBase& ScopeGuard; 79 | struct placeholder_t { 80 | }; 81 | 82 | template 83 | detail::ScopeGuardImpl make_scope_guard(Post&& post) { 84 | return detail::ScopeGuardImpl(std::forward(post)); 85 | } 86 | 87 | template 88 | detail::ScopeGuardImpl make_scope_guard(Pre&& pre, Post&& post) { 89 | return detail::ScopeGuardImpl(std::forward
(pre), std::forward(post));
90 | }
91 | 
92 | } // namespace conc11
93 | 
94 | #endif /* UTIL_BITS_SCOPE_GUARD_H_ */
95 | 


--------------------------------------------------------------------------------
/util/lru_cache.h:
--------------------------------------------------------------------------------
  1 | /*
  2 |  * lru_cache.h
  3 |  */
  4 | #ifndef UTIL_LRU_CACHE_H_
  5 | #define UTIL_LRU_CACHE_H_
  6 | 
  7 | #include 
  8 | #include 
  9 | #include 
 10 | #include 
 11 | 
 12 | namespace conc11 {
 13 | 
 14 | /**
 15 |  * An in memory LRU caching container for storing key-value pairs. Value type is required to be
 16 |  * Assignable.
 17 |  */
 18 | template, class Eql = std::equal_to>
 19 | class LRUCache {
 20 | public:
 21 |     LRUCache(size_t capacity) :
 22 |             mem(), capacity(capacity), lru_list_head(nullptr), lru_list_tail(nullptr) {
 23 |     }
 24 | 
 25 |     ~LRUCache() {
 26 |     }
 27 | 
 28 |     /**
 29 |      * Store or update a key-value pair.
 30 |      */
 31 |     template::value>::type,
 33 |             class = typename std::enable_if::value>::type>
 34 |     void set(UK&& key, UV&& value) {
 35 |         auto iter = mem.find(key);
 36 |         if (iter == mem.end()) {
 37 |             auto ret = mem.emplace(std::piecewise_construct,
 38 |                     std::forward_as_tuple(std::forward(key)),
 39 |                     std::forward_as_tuple(std::forward(value)));
 40 |             auto new_pair_iter = ret.first;
 41 |             list_insert_head(&(*new_pair_iter));
 42 |             if (mem.size() > capacity) {
 43 |                 gc1();
 44 |             }
 45 |         } else {
 46 |             iter->second.value = std::forward(value);
 47 |             list_move_to_head(&(*iter));
 48 |         }
 49 |     }
 50 | 
 51 |     /**
 52 |      * Returns pointer to the stored value, or nullptr if key not found.
 53 |      * The returned pointer may become invalidated after a set, erase or clear operation and
 54 |      * should not be stored.
 55 |      * If accessed from multiple threads, returned pointers should no longer be used after leaving
 56 |      * the critical region.
 57 |      */
 58 |     TV* get(const TK& key) {
 59 |         auto iter = mem.find(key);
 60 |         if (iter == mem.end()) {
 61 |             return nullptr;
 62 |         }
 63 |         list_move_to_head(&(*iter));
 64 |         return &(iter->second.value);
 65 |     }
 66 | 
 67 |     /**
 68 |      * Returns true and return a copy of the stored value var copy-assigning *out_value, or
 69 |      * return false if key not found.
 70 |      */
 71 |     bool get_copy(const TK& key, TV* out_value) {
 72 |         auto iter = mem.find(key);
 73 |         if (iter == mem.end()) {
 74 |             return false;
 75 |         }
 76 |         list_move_to_head(&(*iter));
 77 |         *out_value = iter->second.value;
 78 |         return true;
 79 |     }
 80 | 
 81 |     /**
 82 |      * If key is found, returns true and return the stored value var move-assigning *out_value,
 83 |      * then erase key from the cache. Return false if key not found.
 84 |      */
 85 |     bool get_move(const TK& key, TV* out_value) {
 86 |         auto iter = mem.find(key);
 87 |         if (iter == mem.end()) {
 88 |             return false;
 89 |         }
 90 |         *out_value = std::move(iter->second.value);
 91 |         list_release(&(*iter));
 92 |         mem.erase(iter);
 93 |         return true;
 94 |     }
 95 | 
 96 |     /**
 97 |      * If key is found, returns true and return the object pointed by value, a.k.a. (*value).
 98 |      * Returns false if key not found. This variant of get method can be used with smart pointers
 99 |      * like unique_ptr and make copies of pointees without having to copy the smart pointer.
100 |      */
101 |     template
102 |     bool get_copy_pointee(const TK& key,
103 |                           typename std::remove_reference<
104 |                                   decltype(*(std::declval()))
105 |                           >::type* out_value_pointee) {
106 |         auto iter = mem.find(key);
107 |         if (iter == mem.end()) {
108 |             return false;
109 |         }
110 |         list_move_to_head(&(*iter));
111 |         *out_value_pointee = *(iter->second.value);
112 |         return true;
113 |     }
114 | 
115 |     /**
116 |      * Return if the key exists. Does not count as a "usage" and will not affect LRU.
117 |      */
118 |     bool has_key(const TK& key) {
119 |         return (mem.find(key) != mem.end());
120 |     }
121 | 
122 |     /**
123 |      * Erase a key and its corresponding value from the cache. Return false if key not found.
124 |      */
125 |     bool erase(const TK& key) {
126 |         auto iter = mem.find(key);
127 |         if (iter == mem.end()) {
128 |             return false;
129 |         }
130 |         PairType* p = &(*iter);
131 |         list_release(p);
132 |         mem.erase(iter);
133 |         return true;
134 |     }
135 | 
136 |     /**
137 |      * Clear cache.
138 |      */
139 |     void clear() {
140 |         mem.clear();
141 |         lru_list_head = nullptr;
142 |         lru_list_tail = nullptr;
143 |     }
144 | 
145 | private:
146 | 
147 |     struct Entry;
148 |     using PairType = std::pair;
149 | 
150 |     struct Entry {
151 |         TV value;
152 |         PairType* parent;
153 |         PairType* child;
154 |         explicit Entry(const TV& t) :
155 |                 value(t), parent(nullptr), child(nullptr) {
156 |         }
157 |         explicit Entry(TV&& t) :
158 |                 value(std::move(t)), parent(nullptr), child(nullptr) {
159 |         }
160 |     };
161 | 
162 |     /**
163 |      * Insert a node pointed to by e to the head of the lru list.
164 |      * Undefined behaviour if e does not point to a new node not yet in the list.
165 |      */
166 |     void list_insert_head(PairType* e) {
167 |         if (!lru_list_head) {
168 |             lru_list_head = e;
169 |             lru_list_tail = e;
170 |             e->second.parent = nullptr;
171 |             e->second.child = nullptr;
172 |         } else {
173 |             e->second.parent = nullptr;
174 |             e->second.child = lru_list_head;
175 |             lru_list_head->second.parent = e;
176 |             lru_list_head = e;
177 |         }
178 |     }
179 | 
180 |     /**
181 |      * Release the node pointed to by e from the lru list without destroying the node.
182 |      * Undefined behaviour if e does not point to an existing node in the list.
183 |      */
184 |     void list_release(PairType* e) {
185 |         if (e->second.parent == nullptr) {
186 |             lru_list_head = e->second.child;
187 |         }
188 |         else {
189 |             e->second.parent->second.child = e->second.child;
190 |         }
191 |         if (e->second.child == nullptr) {
192 |             lru_list_tail = e->second.parent;
193 |         }
194 |         else {
195 |             e->second.child->second.parent = e->second.parent;
196 |         }
197 |     }
198 | 
199 |     void list_move_to_head(PairType* e) {
200 |         list_release(e);
201 |         list_insert_head(e);
202 |     }
203 | 
204 |     /**
205 |      * Delete the least recently used key-value pair.
206 |      */
207 |     void gc1() {
208 |         assert(lru_list_tail != nullptr);
209 |         PairType* e = lru_list_tail;
210 |         list_release(e);
211 |         mem.erase(e->first);
212 |     }
213 | 
214 |     std::unordered_map mem;
215 |     const uint32_t capacity;
216 |     PairType* lru_list_head;
217 |     PairType* lru_list_tail;
218 | };
219 | 
220 | /**
221 |  * A thread-safe blocking version of in-memory LRU cache.
222 |  * BlockingInMemoryLru does not provide get() method that returns pointers to stored objects as
223 |  * the pointers should not be used after leaving the critical region.
224 |  */
225 | template, class Eql = std::equal_to,
226 |         class Mutex = std::mutex>
227 | class BlockingLRUCache {
228 | public:
229 |     BlockingLRUCache(size_t capacity):unsynced_cache(capacity){
230 |     }
231 | 
232 |     BlockingLRUCache(const BlockingLRUCache&) = delete;
233 |     BlockingLRUCache& operator=(const BlockingLRUCache&) = delete;
234 | 
235 |     ~BlockingLRUCache() {
236 |     }
237 | 
238 |     template::value>::type,
240 |             class = typename std::enable_if::value>::type>
241 |     void set(UK&& key, UV&& value) {
242 |         std::lock_guard lock(mtx);
243 |         unsynced_cache.set(std::forward(key), std::forward(value));
244 |     }
245 | 
246 |     bool get_copy(const TK& key, TV* out_value) {
247 |         std::lock_guard lock(mtx);
248 |         return unsynced_cache.get_copy(key, out_value);
249 |     }
250 | 
251 |     bool get_move(const TK& key, TV* out_value) {
252 |         std::lock_guard lock(mtx);
253 |         return unsynced_cache.get_move(key, out_value);
254 |     }
255 | 
256 |     template
257 |     bool get_copy_pointee(const TK& key,
258 |                           typename std::remove_reference<
259 |                                   decltype(*(std::declval()))
260 |                           >::type* out_value_pointee) {
261 |         std::lock_guard lock(mtx);
262 |         return unsynced_cache.get_copy_pointee(key, out_value_pointee);
263 |     }
264 | 
265 |     bool erase(const TK& key) {
266 |         std::lock_guard lock(mtx);
267 |         return unsynced_cache.erase(key);
268 |     }
269 | 
270 |     void clear() {
271 |         std::lock_guard lock(mtx);
272 |         unsynced_cache.clear();
273 |     }
274 | 
275 | private:
276 |     LRUCache unsynced_cache;
277 |     Mutex mtx;
278 | };
279 | 
280 | } // namespace conc11
281 | 
282 | #endif /* UTIL_LRU_CACHE_H_ */
283 | 


--------------------------------------------------------------------------------
/util/util.h:
--------------------------------------------------------------------------------
 1 | /**
 2 |  * util.h
 3 |  */
 4 | 
 5 | #ifndef UTIL_UTIL_H_
 6 | #define UTIL_UTIL_H_
 7 | 
 8 | #include "bits/rvalue_wrapper.h"
 9 | #include "bits/scope_guard.h"
10 | #include "bits/invoke.h"
11 | #include "bits/make_unique.h"
12 | 
13 | #endif /* UTIL_UTIL_H_ */
14 | 


--------------------------------------------------------------------------------