├── .clang-format ├── .gitignore ├── LICENSE ├── Makefile.am ├── README.md ├── bootstrap ├── configure.ac ├── evenk ├── Makefile.am ├── backoff.h ├── basic.h ├── bounded_queue.h ├── conqueue.h ├── futex.h ├── spinlock.h ├── synch.h ├── synch_queue.h ├── task.h ├── thread.h └── thread_pool.h ├── m4 ├── ax_cxx_compile_stdcxx.m4 └── ax_cxx_compile_stdcxx_14.m4 └── tests ├── .gitignore ├── Makefile.am ├── lock-bench.cc ├── queue-bench.cc ├── shared-lock-test.cc ├── task-test.cc ├── thread-test.cc └── thread_pool-test.cc /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | 3 | Language: Cpp 4 | Standard: Cpp11 5 | 6 | ColumnLimit: 96 7 | MaxEmptyLinesToKeep: 2 8 | UseTab: Always 9 | 10 | TabWidth: 8 11 | IndentWidth: 8 12 | ContinuationIndentWidth: 8 13 | 14 | AccessModifierOffset: -8 15 | ConstructorInitializerIndentWidth: 8 16 | 17 | BreakBeforeBraces: Custom 18 | BraceWrapping: 19 | AfterClass: true 20 | AfterStruct: true 21 | AfterFunction: true 22 | BinPackArguments: false 23 | BinPackParameters: false 24 | 25 | AllowShortBlocksOnASingleLine: false 26 | AllowShortFunctionsOnASingleLine: Empty 27 | 28 | AlwaysBreakTemplateDeclarations: true 29 | AlwaysBreakAfterReturnType: TopLevelDefinitions 30 | 31 | ReflowComments: false 32 | 33 | SpaceAfterCStyleCast: true 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Various backup files 2 | *~ 3 | 4 | # IDE settings 5 | /.project 6 | /.cproject 7 | /.settings 8 | /nbproject 9 | 10 | # Autotools files 11 | Makefile 12 | Makefile.in 13 | .deps 14 | 15 | /aclocal.m4 16 | /autom4te.cache 17 | /configure 18 | /config.log 19 | /config.status 20 | /build-aux 21 | /evenk/config.h 22 | /evenk/config.h.in 23 | /evenk/stamp-h1 24 | 25 | # Compiled Object files 26 | *.slo 27 | *.lo 28 | *.o 29 | *.obj 30 | *.dSYM 31 | 32 | # Precompiled Headers 33 | *.gch 34 | *.pch 35 | 36 | # Compiled Dynamic libraries 37 | *.so 38 | *.dylib 39 | *.dll 40 | 41 | # Compiled Static libraries 42 | *.lai 43 | *.la 44 | *.a 45 | *.lib 46 | 47 | # Executables 48 | *.exe 49 | *.out 50 | *.app 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Aleksey Demakov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile.am: -------------------------------------------------------------------------------- 1 | 2 | SUBDIRS = evenk tests 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Evenk 2 | 3 | A C++14 library for concurrent programming. 4 | 5 | The primary target platform for now is Linux x86-64. Additionally it might 6 | be used on Mac OS/X x86-64 but for the lack of the futex system call some 7 | features do not function there. 8 | 9 | The library itself is header-only so using it is possible just by copying 10 | the header files wherever you like to include them from. 11 | 12 | [ Currently the above is not true for "task.h" and "thread.h", these files 13 | depend on "config.h" that is generated with the steps described below. ] 14 | 15 | The build step is needed for tests. It is required to have on your system 16 | the following tools installed: automake, autoconf and, of course, make and 17 | gcc or clang. As soon as you have these just run the following commands: 18 | 19 | ``` 20 | > ./bootstrap 21 | > ./configure 22 | > make 23 | ``` -------------------------------------------------------------------------------- /bootstrap: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | autoreconf -i -f -v 3 | -------------------------------------------------------------------------------- /configure.ac: -------------------------------------------------------------------------------- 1 | dnl Process this file with autoconf to produce a configure script. 2 | 3 | dnl Initialize autoconf. 4 | AC_PREREQ(2.68) 5 | AC_INIT([Evenk], [0.0], [ademakov@gmail.com]) 6 | AC_CONFIG_MACRO_DIR([m4]) 7 | AC_CONFIG_AUX_DIR([build-aux]) 8 | AC_CONFIG_SRCDIR([evenk/futex.h]) 9 | AC_CONFIG_HEADERS([evenk/config.h]) 10 | 11 | dnl Initialize automake. 12 | AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects]) 13 | 14 | dnl Determine the host system type. 15 | AC_CANONICAL_HOST 16 | 17 | dnl check for API variants 18 | AC_GNU_SOURCE 19 | 20 | dnl Checks for programs. 21 | AC_PROG_CXX 22 | AC_PROG_CXX_C_O 23 | AM_PROG_AR 24 | AC_PROG_RANLIB 25 | AC_PROG_INSTALL 26 | 27 | dnl Checks for libraries. 28 | AC_SEARCH_LIBS([pthread_create], [pthread], 29 | [], [AC_MSG_ERROR(Cannot find pthread library)]) 30 | 31 | dnl Checks for header files. 32 | AC_CHECK_HEADERS([pthread.h sched.h]) 33 | AC_CHECK_HEADERS([linux/futex.h]) 34 | AC_CHECK_HEADERS([xmmintrin.h]) 35 | 36 | dnl Checks for typedefs, structures, and compiler characteristics. 37 | AX_CXX_COMPILE_STDCXX_14([noext], [mandatory]) 38 | 39 | dnl Checks for library functions. 40 | AC_CHECK_FUNCS(pthread_setaffinity_np) 41 | 42 | dnl Check command line arguments 43 | 44 | AC_CONFIG_FILES([ 45 | Makefile 46 | evenk/Makefile 47 | tests/Makefile]) 48 | AC_OUTPUT 49 | -------------------------------------------------------------------------------- /evenk/Makefile.am: -------------------------------------------------------------------------------- 1 | 2 | include_HEADERS = \ 3 | backoff.h \ 4 | basic.h \ 5 | bounded_queue.h \ 6 | conqueue.h \ 7 | futex.h \ 8 | spinlock.h \ 9 | synch.h \ 10 | synch_queue.h \ 11 | task.h \ 12 | thread.h \ 13 | thread_pool.h 14 | -------------------------------------------------------------------------------- /evenk/backoff.h: -------------------------------------------------------------------------------- 1 | // 2 | // Busy-Waiting Backoff Utilities 3 | // 4 | // Copyright (c) 2015-2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_BACKOFF_H_ 26 | #define EVENK_BACKOFF_H_ 27 | 28 | #include 29 | #include 30 | #include 31 | 32 | #include 33 | #include 34 | 35 | namespace evenk { 36 | 37 | // 38 | // Pause routines for busy waiting. 39 | // 40 | 41 | struct cpu_cycle 42 | { 43 | void operator()(std::uint32_t n) noexcept 44 | { 45 | while (n--) 46 | std::atomic_signal_fence(std::memory_order_relaxed); 47 | } 48 | }; 49 | 50 | struct cpu_relax 51 | { 52 | void operator()(std::uint32_t n) noexcept 53 | { 54 | while (n--) 55 | ::_mm_pause(); 56 | } 57 | }; 58 | 59 | struct nanosleep 60 | { 61 | void operator()(std::uint32_t n) noexcept 62 | { 63 | ::timespec ts = {.tv_sec = 0, .tv_nsec = n}; 64 | ::nanosleep(&ts, NULL); 65 | } 66 | }; 67 | 68 | // 69 | // Back-off policies for busy waiting. 70 | // 71 | 72 | // 73 | // The return value for operator() is true if backoff ceiling is reached and 74 | // false otherwise. 75 | // 76 | 77 | struct no_backoff 78 | { 79 | bool operator()() noexcept 80 | { 81 | return true; 82 | } 83 | }; 84 | 85 | struct yield_backoff 86 | { 87 | bool operator()() noexcept 88 | { 89 | std::this_thread::yield(); 90 | return false; 91 | } 92 | }; 93 | 94 | template 95 | struct const_backoff : Pause 96 | { 97 | bool operator()() noexcept 98 | { 99 | Pause::operator()(count); 100 | return false; 101 | } 102 | }; 103 | 104 | template 105 | class linear_backoff : Pause 106 | { 107 | public: 108 | bool operator()() noexcept 109 | { 110 | Pause::operator()(count_); 111 | count_ += step; 112 | if (count_ > ceiling) { 113 | count_ = ceiling; 114 | return true; 115 | } 116 | return false; 117 | } 118 | 119 | private: 120 | std::uint32_t count_ = 0; 121 | }; 122 | 123 | template 124 | class exponential_backoff : Pause 125 | { 126 | public: 127 | bool operator()() noexcept 128 | { 129 | Pause::operator()(count_); 130 | count_ += count_ + 1; 131 | if (count_ > ceiling) { 132 | count_ = ceiling; 133 | return true; 134 | } 135 | return false; 136 | } 137 | 138 | private: 139 | std::uint32_t count_ = 0; 140 | }; 141 | 142 | template 143 | struct proportional_backoff : Pause 144 | { 145 | bool operator()(std::uint32_t factor) noexcept 146 | { 147 | Pause::operator()(count *factor); 148 | return false; 149 | } 150 | }; 151 | 152 | template 153 | bool 154 | proportional_adapter(Backoff &backoff, std::uint32_t) noexcept 155 | { 156 | return backoff(); 157 | } 158 | 159 | template 160 | bool 161 | proportional_adapter(proportional_backoff &backoff, std::uint32_t factor) noexcept 162 | { 163 | return backoff(factor); 164 | } 165 | 166 | template 167 | class composite_backoff : FirstBackoff, SecondBackoff 168 | { 169 | public: 170 | composite_backoff(FirstBackoff a, SecondBackoff b) noexcept 171 | : FirstBackoff(a), SecondBackoff(b), use_second_{false} 172 | { 173 | } 174 | 175 | bool operator()() noexcept 176 | { 177 | if (use_second_) 178 | return SecondBackoff::operator()(); 179 | use_second_ = FirstBackoff::operator()(); 180 | return false; 181 | } 182 | 183 | private: 184 | bool use_second_; 185 | }; 186 | 187 | } // namespace evenk 188 | 189 | #endif // !EVENK_BACKOFF_H_ 190 | -------------------------------------------------------------------------------- /evenk/basic.h: -------------------------------------------------------------------------------- 1 | // 2 | // Basic Definitions 3 | // 4 | // Copyright (c) 2015-2018 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_BASIC_H_ 26 | #define EVENK_BASIC_H_ 27 | 28 | #include 29 | #include 30 | #include 31 | 32 | namespace evenk { 33 | 34 | constexpr std::size_t cache_line_size = 64; 35 | 36 | [[noreturn]] inline void 37 | throw_system_error(int err_num) 38 | { 39 | throw std::system_error(err_num, std::system_category()); 40 | } 41 | 42 | [[noreturn]] inline void 43 | throw_system_error(int err_num, const char *what) 44 | { 45 | throw std::system_error(err_num, std::system_category(), what); 46 | } 47 | 48 | [[noreturn]] inline void 49 | throw_system_error(int err_num, const std::string &what) 50 | { 51 | throw std::system_error(err_num, std::system_category(), what); 52 | } 53 | 54 | inline void * 55 | aligned_alloc(std::size_t alignment, std::size_t size) 56 | { 57 | void *result; 58 | if (::posix_memalign(&result, alignment, size)) 59 | throw std::bad_alloc(); 60 | return result; 61 | } 62 | 63 | inline void * 64 | cache_aligned_alloc(std::size_t size) 65 | { 66 | return aligned_alloc(cache_line_size, size); 67 | } 68 | 69 | class non_copyable 70 | { 71 | protected: 72 | constexpr non_copyable() noexcept = default; 73 | ~non_copyable() noexcept = default; 74 | 75 | non_copyable(const non_copyable &) = delete; 76 | non_copyable &operator=(const non_copyable &) = delete; 77 | }; 78 | 79 | } // namespace evenk 80 | 81 | #endif // !EVENK_BASIC_H_ 82 | -------------------------------------------------------------------------------- /evenk/bounded_queue.h: -------------------------------------------------------------------------------- 1 | // 2 | // Fast Bounded Concurrent Queue 3 | // 4 | // Copyright (c) 2015-2018 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_BOUNDED_QUEUE_H_ 26 | #define EVENK_BOUNDED_QUEUE_H_ 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | 34 | #include "backoff.h" 35 | #include "basic.h" 36 | #include "conqueue.h" 37 | #include "futex.h" 38 | #include "synch.h" 39 | 40 | namespace evenk { 41 | namespace bounded_queue { 42 | 43 | // The type used to count ring slots. 44 | typedef std::uint32_t count_t; 45 | // The type used to mark ring slots and as a futex too. 46 | typedef std::uint32_t token_t; 47 | 48 | namespace detail { 49 | 50 | // Status flags for ring slots. 51 | enum status_t : token_t { 52 | // the slot contains a value 53 | status_valid = 1, 54 | // the slot contains invalid value 55 | status_invalid = 2, 56 | // there is a waiting thread on the slot (if futex-based) 57 | status_waiting = 4, 58 | // the slot is closed 59 | status_closed = 8, 60 | }; 61 | 62 | enum close_t : std::uint8_t 63 | { 64 | open = 0, 65 | closing = 1, 66 | closed = 2, 67 | }; 68 | 69 | // The bit mask that selects value status. 70 | constexpr token_t status_mask = status_valid | status_invalid; 71 | 72 | // The bit mask that selects ticket number. 73 | constexpr token_t ticket_mask = ~(status_mask | status_waiting | status_closed); 74 | 75 | // The minimum queue size that allows combined slot status and ticket encoding. 76 | constexpr count_t min_size = 16; 77 | 78 | // Single-threaded slot counter. 79 | class counter 80 | { 81 | public: 82 | count_t load() const 83 | { 84 | return count_; 85 | } 86 | 87 | count_t fetch_add(count_t addend) 88 | { 89 | count_t count = count_; 90 | count_ += addend; 91 | return count; 92 | } 93 | 94 | bool try_increment(count_t count) 95 | { 96 | count_ = count + 1; 97 | return true; 98 | } 99 | 100 | private: 101 | count_t count_ = 0; 102 | }; 103 | 104 | // Multi-threaded slot counter. 105 | class atomic_counter 106 | { 107 | public: 108 | count_t load() const 109 | { 110 | return count_.load(std::memory_order_relaxed); 111 | } 112 | 113 | count_t fetch_add(count_t addend) 114 | { 115 | return count_.fetch_add(addend, std::memory_order_relaxed); 116 | } 117 | 118 | bool try_increment(count_t count) 119 | { 120 | return count_.compare_exchange_strong(count, count + 1, 121 | std::memory_order_relaxed, 122 | std::memory_order_relaxed); 123 | } 124 | 125 | private: 126 | std::atomic count_ = {0}; 127 | }; 128 | 129 | } // namespace detail 130 | 131 | class spin : protected std::atomic 132 | { 133 | public: 134 | using base = std::atomic; 135 | 136 | void init(token_t t) 137 | { 138 | store(t, std::memory_order_relaxed); 139 | } 140 | 141 | void close() 142 | { 143 | } 144 | 145 | token_t load() const 146 | { 147 | return base::load(std::memory_order_acquire); 148 | } 149 | 150 | token_t wait(token_t) 151 | { 152 | return base::load(std::memory_order_relaxed); 153 | } 154 | 155 | void wake(token_t t) 156 | { 157 | store(t, std::memory_order_release); 158 | } 159 | }; 160 | 161 | class yield : public spin 162 | { 163 | public: 164 | token_t wait(token_t) 165 | { 166 | std::this_thread::yield(); 167 | return base::load(std::memory_order_relaxed); 168 | } 169 | }; 170 | 171 | class futex : public spin 172 | { 173 | public: 174 | void close() 175 | { 176 | token_t t = base::load(std::memory_order_relaxed); 177 | token_t x = t | detail::status_closed; 178 | while (!compare_exchange_weak( 179 | t, x, std::memory_order_relaxed, std::memory_order_relaxed)) 180 | x = t | detail::status_closed; 181 | if ((t & detail::status_waiting) != 0) 182 | futex_wake(*this, INT32_MAX); 183 | } 184 | 185 | token_t wait(token_t t) 186 | { 187 | token_t x = t | detail::status_waiting; 188 | if (compare_exchange_strong( 189 | t, x, std::memory_order_relaxed, std::memory_order_relaxed) || 190 | t == x) { 191 | futex_wait(*this, x); 192 | t = base::load(std::memory_order_relaxed); 193 | } 194 | return t; 195 | } 196 | 197 | void wake(token_t t) 198 | { 199 | t = exchange(t, std::memory_order_release); 200 | if ((t & detail::status_waiting) != 0) 201 | futex_wake(*this, INT32_MAX); 202 | } 203 | }; 204 | 205 | template 206 | class synch : public spin 207 | { 208 | public: 209 | using lock_type = typename Synch::lock_type; 210 | using cond_var_type = typename Synch::cond_var_type; 211 | using lock_owner_type = typename Synch::lock_owner_type; 212 | 213 | void close() 214 | { 215 | lock_owner_type guard(lock_); 216 | token_t t = base::load(std::memory_order_relaxed); 217 | token_t x = t | detail::status_closed; 218 | store(x, std::memory_order_relaxed); 219 | cond_.notify_all(); 220 | } 221 | 222 | token_t wait(token_t t) 223 | { 224 | lock_owner_type guard(lock_); 225 | token_t v = base::load(std::memory_order_relaxed); 226 | if (t == v) { 227 | cond_.wait(guard); 228 | v = base::load(std::memory_order_relaxed); 229 | } 230 | return v; 231 | } 232 | 233 | void wake(token_t t) 234 | { 235 | lock_owner_type guard(lock_); 236 | store(t, std::memory_order_relaxed); 237 | cond_.notify_all(); 238 | } 239 | 240 | private: 241 | lock_type lock_; 242 | cond_var_type cond_; 243 | }; 244 | 245 | template 246 | class ring : non_copyable 247 | { 248 | public: 249 | using value_type = Value; 250 | using reference = value_type &; 251 | using const_reference = const value_type &; 252 | 253 | static_assert(std::is_nothrow_default_constructible::value, 254 | "value_type must be nothrow-default-constructible"); 255 | static_assert(std::is_nothrow_destructible::value, 256 | "value_type must be nothrow-destructible"); 257 | 258 | ring(count_t size) : ring_{create(size)}, mask_{size - 1} 259 | { 260 | for (count_t i = 0; i < size; i++) 261 | ring_[i].init(i & detail::ticket_mask); 262 | } 263 | 264 | ~ring() 265 | { 266 | destroy(); 267 | } 268 | 269 | // 270 | // State operations 271 | // 272 | 273 | // FIXME: If there are some waiting producers when the queue is closed 274 | // should they be allowed to finish? There is a danger that if there are 275 | // no active consumers then such producers will hang indefinitely. But 276 | // canceling these producers seems unfair when consumers are just a bit 277 | // slow at the moment and sooner or later should unblock the producers. 278 | // If there are no consumers a bounded queue blocks producers by design. 279 | // Therefore close() probably shouldn't try to solve this case. 280 | void close() noexcept 281 | { 282 | const count_t size = mask_ + 1; 283 | 284 | // Protect against a possible case of concurrent close. Only one 285 | // thread may pass beyond the following CAS condition and perform 286 | // actual close operation. 287 | detail::close_t flag = detail::closing; 288 | if (closed_.compare_exchange_strong(flag, detail::open, std::memory_order_acquire, 289 | std::memory_order_relaxed)) 290 | return; 291 | 292 | // Remember the position of the last allowed producer. 293 | last_ = tail_.fetch_add(size); 294 | 295 | // Finish the close operation as such. 296 | closed_.store(detail::closed, std::memory_order_release); 297 | 298 | // Wake up possibly sleeping producers and consumers. 299 | for (count_t i = 0; i < size; i++) { 300 | ring_slot &slot = ring_[(last_ + i) & mask_]; 301 | slot.close(); 302 | } 303 | } 304 | 305 | bool is_closed() const noexcept 306 | { 307 | return closed_.load(std::memory_order_relaxed); 308 | } 309 | 310 | bool is_empty() const noexcept 311 | { 312 | // Assume this is called with no concurrent push operations. 313 | count_t tail = tail_.load(std::memory_order_acquire); 314 | count_t head = head_.load(std::memory_order_relaxed); 315 | return std::make_signed_t(tail - head) <= 0; 316 | } 317 | 318 | bool is_full() const noexcept 319 | { 320 | // Assume this is called with no concurrent pop operations. 321 | count_t head = head_.load(std::memory_order_acquire); 322 | count_t tail = tail_.load(std::memory_order_relaxed); 323 | return std::make_signed_t(tail - head) > mask_; 324 | } 325 | 326 | static bool is_lock_free() noexcept 327 | { 328 | return false; 329 | } 330 | 331 | // 332 | // Basic operations 333 | // 334 | 335 | template 336 | void push(const value_type &value, Backoff... backoff) 337 | { 338 | auto status = wait_push(value, std::forward(backoff)...); 339 | if (status != queue_op_status::success) 340 | throw status; 341 | } 342 | 343 | template 344 | void push(value_type &&value, Backoff &&... backoff) 345 | { 346 | auto status = wait_push(std::move(value), std::forward(backoff)...); 347 | if (status != queue_op_status::success) 348 | throw status; 349 | } 350 | 351 | template 352 | value_type value_pop(Backoff &&... backoff) 353 | { 354 | value_type value; 355 | auto status = wait_pop(value, std::forward(backoff)...); 356 | if (status != queue_op_status::success) 357 | throw status; 358 | return std::move(value); 359 | } 360 | 361 | // 362 | // Waiting operations 363 | // 364 | 365 | template 366 | queue_op_status wait_push(const value_type &value, Backoff &&... backoff) 367 | { 368 | const count_t count = tail_.fetch_add(1); 369 | const token_t token = count & detail::ticket_mask; 370 | ring_slot &slot = ring_[count & mask_]; 371 | 372 | auto status = wait_tail(slot, count, token, std::forward(backoff)...); 373 | if (status != queue_op_status::success) 374 | return status; 375 | 376 | put_value(slot, token, value); 377 | return queue_op_status::success; 378 | } 379 | 380 | template 381 | queue_op_status wait_push(value_type &&value, Backoff &&... backoff) 382 | { 383 | const count_t count = tail_.fetch_add(1); 384 | const token_t token = count & detail::ticket_mask; 385 | ring_slot &slot = ring_[count & mask_]; 386 | 387 | auto status = wait_tail(slot, count, token, std::forward(backoff)...); 388 | if (status != queue_op_status::success) 389 | return status; 390 | 391 | put_value(slot, token, std::move(value)); 392 | return queue_op_status::success; 393 | } 394 | 395 | template 396 | queue_op_status wait_pop(value_type &value, Backoff &&... backoff) 397 | { 398 | for (;;) { 399 | const count_t count = head_.fetch_add(1); 400 | const token_t token = count & detail::ticket_mask; 401 | ring_slot &slot = ring_[count & mask_]; 402 | 403 | auto status = wait_head(slot, count, token, std::forward(backoff)...); 404 | if (status != queue_op_status::success) { 405 | if (status == queue_op_status::empty) { 406 | slot.wake(token + mask_ + 1); 407 | continue; 408 | } 409 | return status; 410 | } 411 | 412 | get_value(slot, token, value); 413 | return queue_op_status::success; 414 | } 415 | } 416 | 417 | // 418 | // Non-waiting operations 419 | // 420 | 421 | queue_op_status try_push(const value_type &value) 422 | { 423 | const count_t count = tail_.load(std::memory_order_relaxed); 424 | const token_t token = count & detail::ticket_mask; 425 | ring_slot &slot = ring_[count & mask_]; 426 | 427 | token_t t = slot.load(); 428 | if ((t & detail::ticket_mask) != token) { 429 | if (is_past_last(count)) 430 | return queue_op_status::closed; 431 | return queue_op_status::empty; 432 | } 433 | 434 | if (!head_.try_increment(count)) 435 | return queue_op_status::full; 436 | 437 | put_value(slot, token, value); 438 | return queue_op_status::success; 439 | } 440 | 441 | queue_op_status try_push(value_type &&value) 442 | { 443 | const count_t count = tail_.load(std::memory_order_relaxed); 444 | const token_t token = count & detail::ticket_mask; 445 | ring_slot &slot = ring_[count & mask_]; 446 | 447 | token_t t = slot.load(); 448 | if ((t & detail::ticket_mask) != token) { 449 | if (is_past_last(count)) 450 | return queue_op_status::closed; 451 | return queue_op_status::empty; 452 | } 453 | 454 | if (!head_.try_increment(count)) 455 | return queue_op_status::full; 456 | 457 | put_value(slot, token, std::move(value)); 458 | return queue_op_status::success; 459 | } 460 | 461 | queue_op_status try_pop(value_type &value) 462 | { 463 | const count_t count = head_.load(std::memory_order_relaxed); 464 | const token_t token = count & detail::ticket_mask; 465 | ring_slot &slot = ring_[count & mask_]; 466 | 467 | token_t t = slot.load(); 468 | if ((t & detail::ticket_mask) != token || (t & detail::status_mask) == 0) { 469 | if (is_past_last(count)) 470 | return queue_op_status::closed; 471 | return queue_op_status::empty; 472 | } 473 | 474 | if (!head_.try_increment(count)) 475 | return queue_op_status::empty; 476 | 477 | if ((t & detail::status_valid) == 0) { 478 | slot.wake(token + mask_ + 1); 479 | return queue_op_status::empty; 480 | } 481 | 482 | get_value(slot, token, value); 483 | return queue_op_status::success; 484 | } 485 | 486 | #if 0 && ENABLE_QUEUE_NONBLOCKING_OPS 487 | // 488 | // Non-blocking operations 489 | // 490 | 491 | queue_op_status nonblocking_pop(value_type &value) 492 | { 493 | } 494 | #endif 495 | 496 | private: 497 | struct alignas(cache_line_size) ring_slot : public Slot 498 | { 499 | value_type value; 500 | }; 501 | 502 | static ring_slot* create(std::size_t size) 503 | { 504 | if (size < detail::min_size) 505 | throw std::invalid_argument( 506 | "bounded_queue size must be at least" + std::to_string(detail::min_size)); 507 | if ((size & (size - 1)) != 0) 508 | throw std::invalid_argument( 509 | "bounded_queue size must be a power of two"); 510 | 511 | void *ring = cache_aligned_alloc(size * sizeof(ring_slot)); 512 | return new (ring) ring_slot[size]; 513 | } 514 | 515 | void destroy() 516 | { 517 | const count_t size = mask_ + 1; 518 | for (count_t i = 0; i < size; i++) 519 | ring_[i].~ring_slot(); 520 | std::free(ring_); 521 | } 522 | 523 | // FIXME: If somebody incessantly does wait_push() or wait_pop() despite 524 | // getting queue_op_status::closed then after 2^31 calls this check will 525 | // produce a wrong result. 526 | bool is_past_last(count_t count) 527 | { 528 | if (closed_.load(std::memory_order_acquire) != detail::closed) 529 | return false; 530 | return std::make_signed_t(last_ - count) <= 0; 531 | } 532 | 533 | queue_op_status wait_tail(ring_slot &slot, const count_t count, const token_t token) 534 | { 535 | token_t t = slot.load(); 536 | while ((t & detail::ticket_mask) != token) { 537 | if (is_past_last(count)) 538 | return queue_op_status::closed; 539 | t = slot.wait(t); 540 | } 541 | return queue_op_status::success; 542 | } 543 | 544 | template 545 | queue_op_status wait_tail(ring_slot &slot, const count_t count, const token_t token, Backoff backoff) 546 | { 547 | bool waiting = false; 548 | token_t t = slot.load(); 549 | while ((t & detail::ticket_mask) != token) { 550 | if (is_past_last(count)) 551 | return queue_op_status::closed; 552 | if (waiting) { 553 | t = slot.wait(t); 554 | continue; 555 | } 556 | waiting = backoff(); 557 | t = slot.load(); 558 | } 559 | return queue_op_status::success; 560 | } 561 | 562 | queue_op_status wait_head(ring_slot &slot, count_t count, token_t token) 563 | { 564 | token_t t = slot.load(); 565 | while ((t & detail::ticket_mask) != token) { 566 | if (is_past_last(count)) 567 | return queue_op_status::closed; 568 | t = slot.wait(t); 569 | } 570 | while ((t & detail::status_mask) == 0) { 571 | if (is_past_last(count)) 572 | return queue_op_status::closed; 573 | t = slot.wait(t); 574 | } 575 | if ((t & detail::status_valid) == 0) 576 | return queue_op_status::empty; 577 | return queue_op_status::success; 578 | } 579 | 580 | template 581 | queue_op_status wait_head(ring_slot &slot, count_t count, token_t token, Backoff backoff) 582 | { 583 | bool waiting = false; 584 | token_t t = slot.load(); 585 | while ((t & detail::ticket_mask) != token) { 586 | if (is_past_last(count)) 587 | return queue_op_status::closed; 588 | if (waiting) { 589 | t = slot.wait(t); 590 | continue; 591 | } 592 | waiting = backoff(); 593 | t = slot.wait(t); 594 | } 595 | while ((t & detail::status_mask) == 0) { 596 | if (is_past_last(count)) 597 | return queue_op_status::closed; 598 | if (waiting) { 599 | t = slot.wait(t); 600 | continue; 601 | } 602 | waiting = backoff(); 603 | t = slot.wait(t); 604 | } 605 | if ((t & detail::status_valid) == 0) 606 | return queue_op_status::empty; 607 | return queue_op_status::success; 608 | } 609 | 610 | template ::value> * = 612 | nullptr> 613 | void put_value(ring_slot &slot, count_t token, const value_type &value) noexcept 614 | { 615 | slot.value = value; 616 | slot.wake(token + detail::status_valid); 617 | } 618 | 619 | template ::value> * = 621 | nullptr> 622 | void put_value(ring_slot &slot, token_t token, const value_type &value) 623 | { 624 | try { 625 | slot.value = value; 626 | slot.wake(token | detail::status_valid); 627 | } catch (...) { 628 | slot.wake(token | detail::status_invalid); 629 | throw; 630 | } 631 | } 632 | 633 | template ::value> * = 635 | nullptr> 636 | void put_value(ring_slot &slot, token_t token, value_type &&value) noexcept 637 | { 638 | slot.value = std::move(value); 639 | slot.wake(token | detail::status_valid); 640 | } 641 | 642 | template ::value> * = 644 | nullptr> 645 | void put_value(ring_slot &slot, token_t token, value_type &&value) 646 | { 647 | try { 648 | slot.value = std::move(value); 649 | slot.wake(token | detail::status_valid); 650 | } catch (...) { 651 | slot.wake(token | detail::status_invalid); 652 | throw; 653 | } 654 | } 655 | 656 | template ::value> * = 658 | nullptr> 659 | void get_value(ring_slot &slot, token_t token, value_type &value) noexcept 660 | { 661 | value = std::move(slot.value); 662 | slot.wake(token + mask_ + 1); 663 | } 664 | 665 | template ::value> * = 667 | nullptr> 668 | void get_value(ring_slot &slot, token_t token, value_type &value) 669 | { 670 | try { 671 | value = std::move(slot.value); 672 | slot.wake(token + mask_ + 1); 673 | } catch (...) { 674 | slot.wake(token + mask_ + 1); 675 | throw; 676 | } 677 | } 678 | 679 | ring_slot *ring_; 680 | const count_t mask_; 681 | 682 | std::atomic closed_ = { detail::open }; 683 | count_t last_; 684 | 685 | alignas(cache_line_size) ConsumerCounter head_; 686 | alignas(cache_line_size) ProducerCounter tail_; 687 | }; 688 | 689 | // A single-producer single-consumer queue. 690 | template 691 | using spsc = ring; 692 | 693 | // A single-producer multi-consumer queue. 694 | template 695 | using spmc = ring; 696 | 697 | // A multi-producer single-consumer queue. 698 | template 699 | using mpsc = ring; 700 | 701 | // A multi-producer multi-consumer queue. 702 | template 703 | using mpmc = ring; 704 | 705 | } // namespace bounded_queue 706 | } // namespace evenk 707 | 708 | #endif // !EVENK_BOUNDED_QUEUE_H_ 709 | -------------------------------------------------------------------------------- /evenk/conqueue.h: -------------------------------------------------------------------------------- 1 | // 2 | // Concurrent Queue Basics and Utilities 3 | // 4 | // Copyright (c) 2016-2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_CONQUEUE_H_ 26 | #define EVENK_CONQUEUE_H_ 27 | 28 | // 29 | // The code in this file is based on the following proposals: 30 | // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0260r0.html 31 | // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2017/p0260r1.html 32 | // 33 | // The second one removes nonblocking methods but it is not clear if this 34 | // a permanent or temporary decision. 35 | // 36 | 37 | #include 38 | #include 39 | 40 | #define ENABLE_QUEUE_NONBLOCKING_OPS 1 41 | 42 | namespace evenk { 43 | 44 | #if ENABLE_QUEUE_NONBLOCKING_OPS 45 | enum class queue_op_status { success = 0, empty, full, closed, busy }; 46 | #else 47 | enum class queue_op_status { success = 0, empty, full, closed }; 48 | #endif 49 | 50 | template 51 | class queue_base 52 | { 53 | public: 54 | using value_type = Value; 55 | using reference = value_type &; 56 | using const_reference = const value_type &; 57 | 58 | virtual ~queue_base() noexcept {}; 59 | 60 | // State operations 61 | virtual void close() noexcept = 0; 62 | virtual bool is_closed() const noexcept = 0; 63 | virtual bool is_empty() const noexcept = 0; 64 | virtual bool is_full() const noexcept = 0; 65 | virtual bool is_lock_free() const noexcept = 0; 66 | 67 | // Basic operations 68 | virtual void push(const value_type &) = 0; 69 | virtual void push(value_type &&) = 0; 70 | virtual value_type value_pop() = 0; 71 | 72 | // Waiting operations 73 | virtual queue_op_status wait_push(const value_type &) = 0; 74 | virtual queue_op_status wait_push(value_type &&) = 0; 75 | virtual queue_op_status wait_pop(value_type &) = 0; 76 | 77 | // Non-waiting operations 78 | virtual queue_op_status try_push(const value_type &) = 0; 79 | virtual queue_op_status try_push(value_type &&) = 0; 80 | virtual queue_op_status try_pop(value_type &) = 0; 81 | 82 | #if ENABLE_QUEUE_NONBLOCKING_OPS 83 | // Non-blocking operations 84 | virtual queue_op_status nonblocking_push(const value_type &) = 0; 85 | virtual queue_op_status nonblocking_push(value_type &&) = 0; 86 | virtual queue_op_status nonblocking_pop(value_type &) = 0; 87 | #endif 88 | }; 89 | 90 | template 91 | class queue_wrapper : public virtual queue_base 92 | { 93 | public: 94 | using queue_type = Queue; 95 | 96 | using value_type = typename queue_type::value_type; 97 | using reference = value_type &; 98 | using const_reference = const value_type &; 99 | 100 | queue_wrapper(queue_type *queue) noexcept : queue_(queue) 101 | { 102 | } 103 | queue_wrapper(queue_type &queue) noexcept : queue_(&queue) 104 | { 105 | } 106 | 107 | virtual ~queue_wrapper() noexcept 108 | { 109 | } 110 | 111 | // State operations 112 | virtual void close() noexcept override 113 | { 114 | queue_->close(); 115 | } 116 | virtual bool is_closed() const noexcept override 117 | { 118 | return queue_->is_closed(); 119 | } 120 | virtual bool is_empty() const noexcept override 121 | { 122 | return queue_->is_empty(); 123 | } 124 | virtual bool is_full() const noexcept override 125 | { 126 | return queue_->is_full(); 127 | } 128 | virtual bool is_lock_free() const noexcept override 129 | { 130 | return queue_->is_lock_free(); 131 | } 132 | 133 | // Basic operations 134 | virtual void push(const value_type &value) override 135 | { 136 | queue_->push(value); 137 | } 138 | virtual void push(value_type &&value) override 139 | { 140 | queue_->push(std::move(value)); 141 | } 142 | virtual value_type value_pop() override 143 | { 144 | return queue_->value_pop(); 145 | } 146 | 147 | // Waiting operations 148 | virtual queue_op_status wait_push(const value_type &value) override 149 | { 150 | return queue_->wait_push(value); 151 | } 152 | virtual queue_op_status wait_push(value_type &&value) override 153 | { 154 | return queue_->wait_push(std::move(value)); 155 | } 156 | virtual queue_op_status wait_pop(value_type &value) override 157 | { 158 | return queue_->wait_pop(value); 159 | } 160 | 161 | // Non-waiting operations 162 | virtual queue_op_status try_push(const value_type &value) override 163 | { 164 | return queue_->try_push(value); 165 | } 166 | virtual queue_op_status try_push(value_type &&value) override 167 | { 168 | return queue_->try_push(std::move(value)); 169 | } 170 | virtual queue_op_status try_pop(value_type &value) override 171 | { 172 | return queue_->try_pop(value); 173 | } 174 | 175 | #if ENABLE_QUEUE_NONBLOCKING_OPS 176 | // Non-blocking operations 177 | virtual queue_op_status nonblocking_push(const value_type &value) override 178 | { 179 | return queue_->nonblocking_push(value); 180 | } 181 | virtual queue_op_status nonblocking_push(value_type &&value) override 182 | { 183 | return queue_->nonblocking_push(std::move(value)); 184 | } 185 | virtual queue_op_status nonblocking_pop(value_type &value) override 186 | { 187 | return queue_->nonblocking_pop(value); 188 | } 189 | #endif // ENABLE_QUEUE_NONBLOCKING_OPS 190 | 191 | private: 192 | queue_type *queue_; 193 | }; 194 | 195 | namespace detail { 196 | 197 | // 198 | // A concurrent queue input iterator. Unlike the underlying queue a given 199 | // iterator instance is not concurrent itself. However distinct iterator 200 | // instances for the same queue can be used from different threads and so 201 | // provide concurrent access. 202 | // 203 | 204 | template 205 | class queue_input_iterator 206 | { 207 | public: 208 | using queue_type = Queue; 209 | 210 | using iterator_category = std::input_iterator_tag; 211 | 212 | using value_type = typename queue_type::value_type; 213 | using difference_type = void; 214 | using pointer = const value_type *; 215 | using reference = const value_type &; 216 | 217 | static_assert(std::is_nothrow_default_constructible::value, 218 | "value_type must be nothrow-default-constructible"); 219 | static_assert(std::is_nothrow_destructible::value, 220 | "value_type must be nothrow-destructible"); 221 | 222 | queue_input_iterator(queue_type &queue) noexcept : queue_(&queue) 223 | { 224 | pop_value(); 225 | } 226 | 227 | constexpr queue_input_iterator() noexcept = default; 228 | queue_input_iterator(const queue_input_iterator &other) = default; 229 | queue_input_iterator &operator=(const queue_input_iterator &other) = default; 230 | 231 | queue_input_iterator &operator++() 232 | { 233 | pop_value(); 234 | return *this; 235 | } 236 | queue_input_iterator operator++(int) 237 | { 238 | queue_input_iterator other(*this); 239 | pop_value(); 240 | return other; 241 | } 242 | 243 | pointer operator->() const noexcept 244 | { 245 | return &value_; 246 | } 247 | reference operator*() const noexcept 248 | { 249 | return value_; 250 | } 251 | 252 | bool operator==(const queue_input_iterator &rhs) const noexcept 253 | { 254 | return queue_ == rhs.queue_; 255 | } 256 | bool operator!=(const queue_input_iterator &rhs) const noexcept 257 | { 258 | return queue_ != rhs.queue_; 259 | } 260 | 261 | private: 262 | void pop_value() 263 | { 264 | auto status = queue_->wait_pop(value_); 265 | if (status == queue_op_status::closed) 266 | queue_ = nullptr; 267 | } 268 | 269 | queue_type *queue_ = nullptr; 270 | value_type value_; 271 | }; 272 | 273 | template 274 | class queue_output_iterator 275 | { 276 | public: 277 | using queue_type = Queue; 278 | 279 | using iterator_category = std::output_iterator_tag; 280 | 281 | using value_type = typename queue_type::value_type; 282 | using difference_type = void; 283 | using pointer = void; 284 | using reference = void; 285 | 286 | queue_output_iterator(queue_type &queue) noexcept : queue_(&queue) 287 | { 288 | } 289 | 290 | constexpr queue_output_iterator() noexcept = default; 291 | queue_output_iterator(const queue_output_iterator &other) noexcept = default; 292 | queue_output_iterator &operator=(const queue_output_iterator &other) noexcept = default; 293 | 294 | queue_output_iterator &operator=(const value_type &value) 295 | { 296 | auto status = queue_->wait_push(value); 297 | if (status != queue_op_status::success) { 298 | queue_ = nullptr; 299 | throw status; 300 | } 301 | return *this; 302 | } 303 | queue_output_iterator &operator=(value_type &&value) 304 | { 305 | auto status = queue_->wait_push(std::move(value)); 306 | if (status != queue_op_status::success) { 307 | queue_ = nullptr; 308 | throw status; 309 | } 310 | return *this; 311 | } 312 | 313 | queue_output_iterator &operator*() const noexcept 314 | { 315 | return *this; 316 | } 317 | queue_output_iterator &operator++() const noexcept 318 | { 319 | return *this; 320 | } 321 | queue_output_iterator &operator++(int) const noexcept 322 | { 323 | return *this; 324 | } 325 | 326 | bool operator==(const queue_output_iterator &rhs) const noexcept 327 | { 328 | return queue_ == rhs.queue_; 329 | } 330 | bool operator!=(const queue_output_iterator &rhs) const noexcept 331 | { 332 | return queue_ != rhs.queue_; 333 | } 334 | 335 | private: 336 | queue_type *queue_ = nullptr; 337 | }; 338 | 339 | } // namespace detail 340 | 341 | template 342 | class generic_queue_back 343 | { 344 | public: 345 | using queue_type = Queue; 346 | 347 | using value_type = typename queue_type::value_type; 348 | using reference = value_type &; 349 | using const_reference = const value_type &; 350 | 351 | using iterator = detail::queue_output_iterator; 352 | using const_iterator = const iterator; 353 | 354 | generic_queue_back(queue_type *queue) noexcept : queue_(queue) 355 | { 356 | } 357 | generic_queue_back(queue_type &queue) noexcept : queue_(&queue) 358 | { 359 | } 360 | 361 | generic_queue_back(const generic_queue_back &other) noexcept = default; 362 | generic_queue_back &operator=(const generic_queue_back &other) noexcept = default; 363 | 364 | // State operations 365 | void close() noexcept 366 | { 367 | queue_->close(); 368 | } 369 | bool is_closed() const noexcept 370 | { 371 | return queue_->is_closed(); 372 | } 373 | bool is_empty() const noexcept 374 | { 375 | return queue_->is_empty(); 376 | } 377 | bool is_full() const noexcept 378 | { 379 | return queue_->is_full(); 380 | } 381 | bool is_lock_free() const noexcept 382 | { 383 | return queue_->is_lock_free(); 384 | } 385 | bool has_queue() const noexcept 386 | { 387 | return queue_ != nullptr; 388 | } 389 | 390 | // Iterators 391 | iterator begin() noexcept 392 | { 393 | return iterator(*this); 394 | } 395 | iterator end() noexcept 396 | { 397 | return iterator(); 398 | } 399 | const_iterator cbegin() noexcept 400 | { 401 | return const_iterator(*this); 402 | } 403 | const_iterator cend() noexcept 404 | { 405 | return const_iterator(); 406 | } 407 | 408 | // Basic operations 409 | void push(const value_type &value) 410 | { 411 | queue_->push(value); 412 | } 413 | void push(value_type &&value) 414 | { 415 | queue_->push(std::move(value)); 416 | } 417 | 418 | // Waiting operations 419 | queue_op_status wait_push(const value_type &value) 420 | { 421 | return queue_->wait_push(value); 422 | } 423 | queue_op_status wait_push(value_type &&value) 424 | { 425 | return queue_->wait_push(std::move(value)); 426 | } 427 | 428 | // Non-waiting operations 429 | queue_op_status try_push(const value_type &value) 430 | { 431 | return queue_->try_push(value); 432 | } 433 | queue_op_status try_push(value_type &&value) 434 | { 435 | return queue_->try_push(std::move(value)); 436 | } 437 | 438 | #if ENABLE_QUEUE_NONBLOCKING_OPS 439 | // Non-blocking operations 440 | queue_op_status nonblocking_push(const value_type &value) 441 | { 442 | return queue_->nonblocking_push(value); 443 | } 444 | queue_op_status nonblocking_push(value_type &&value) 445 | { 446 | return queue_->nonblocking_push(std::move(value)); 447 | } 448 | #endif 449 | 450 | private: 451 | queue_type *queue_; 452 | }; 453 | 454 | template 455 | class generic_queue_front 456 | { 457 | public: 458 | using queue_type = Queue; 459 | 460 | using value_type = typename queue_type::value_type; 461 | using reference = value_type &; 462 | using const_reference = const value_type &; 463 | 464 | using iterator = detail::queue_input_iterator; 465 | using const_iterator = const iterator; 466 | 467 | generic_queue_front(queue_type *queue) noexcept : queue_(queue) 468 | { 469 | } 470 | generic_queue_front(queue_type &queue) noexcept : queue_(&queue) 471 | { 472 | } 473 | 474 | generic_queue_front(const generic_queue_front &other) noexcept = default; 475 | generic_queue_front &operator=(const generic_queue_front &other) noexcept = default; 476 | 477 | // State operations 478 | void close() noexcept 479 | { 480 | queue_->close(); 481 | } 482 | bool is_closed() const noexcept 483 | { 484 | return queue_->is_closed(); 485 | } 486 | bool is_empty() const noexcept 487 | { 488 | return queue_->is_empty(); 489 | } 490 | bool is_full() const noexcept 491 | { 492 | return queue_->is_full(); 493 | } 494 | bool is_lock_free() const noexcept 495 | { 496 | return queue_->is_lock_free(); 497 | } 498 | bool has_queue() const noexcept 499 | { 500 | return queue_ != nullptr; 501 | } 502 | 503 | // Iterators 504 | iterator begin() noexcept 505 | { 506 | return iterator(*this); 507 | } 508 | iterator end() noexcept 509 | { 510 | return iterator(); 511 | } 512 | const_iterator cbegin() noexcept 513 | { 514 | return const_iterator(*this); 515 | } 516 | const_iterator cend() noexcept 517 | { 518 | return const_iterator(); 519 | } 520 | 521 | // Basic operations 522 | value_type value_pop() 523 | { 524 | return queue_->value_pop(); 525 | } 526 | 527 | // Waiting operations 528 | queue_op_status wait_pop(value_type &value) 529 | { 530 | return queue_->wait_pop(value); 531 | } 532 | 533 | // Non-waiting operations 534 | queue_op_status try_pop(value_type &value) 535 | { 536 | return queue_->try_pop(value); 537 | } 538 | 539 | #if ENABLE_QUEUE_NONBLOCKING_OPS 540 | // Non-blocking operations 541 | queue_op_status nonblocking_pop(value_type &value) 542 | { 543 | return queue_->nonblocking_pop(value); 544 | } 545 | #endif 546 | 547 | private: 548 | queue_type *queue_; 549 | }; 550 | 551 | template 552 | using queue_back = generic_queue_back>; 553 | 554 | template 555 | using queue_front = generic_queue_front>; 556 | 557 | } // namespace evenk 558 | 559 | #endif // !EVENK_CONQUEUE_H_ 560 | -------------------------------------------------------------------------------- /evenk/futex.h: -------------------------------------------------------------------------------- 1 | // 2 | // Linux Futex Support 3 | // 4 | // Copyright (c) 2015 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_FUTEX_H_ 26 | #define EVENK_FUTEX_H_ 27 | 28 | #include 29 | #include 30 | #include 31 | 32 | #if __linux__ 33 | #include 34 | #include 35 | #include 36 | #endif 37 | 38 | namespace evenk { 39 | 40 | typedef std::atomic futex_t; 41 | 42 | inline int 43 | futex_wait(futex_t &futex __attribute__((unused)), std::uint32_t value __attribute__((unused))) 44 | { 45 | #if __linux__ 46 | #if __x86_64__ 47 | unsigned result; 48 | __asm__ __volatile__("xor %%r10, %%r10\n\t" 49 | "syscall" 50 | : "=a"(result), "+m"(futex) 51 | : "0"(SYS_futex), "D"(&futex), "S"(FUTEX_WAIT_PRIVATE), "d"(value) 52 | : "cc", "rcx", "r10", "r11", "memory"); 53 | return (result > (unsigned) -4096) ? (int) result : 0; 54 | #else 55 | if (syscall(SYS_futex, &futex, FUTEX_WAIT_PRIVATE, value, NULL, NULL, 0) == -1) 56 | return -errno; 57 | else 58 | return 0; 59 | #endif 60 | #else 61 | return -ENOSYS; 62 | #endif 63 | } 64 | 65 | inline int 66 | futex_wake(futex_t &futex __attribute__((unused)), int count __attribute__((unused))) 67 | { 68 | #if __linux__ 69 | #if __x86_64__ 70 | unsigned result; 71 | __asm__ __volatile__("syscall" 72 | : "=a"(result), "+m"(futex) 73 | : "0"(SYS_futex), "D"(&futex), "S"(FUTEX_WAKE_PRIVATE), "d"(count) 74 | : "cc", "rcx", "r11"); 75 | return (result > (unsigned) -4096) ? (int) result : 0; 76 | #else 77 | if (syscall(SYS_futex, &futex, FUTEX_WAKE_PRIVATE, count, NULL, NULL, 0) == -1) 78 | return -errno; 79 | else 80 | return 0; 81 | #endif 82 | #else 83 | return -ENOSYS; 84 | #endif 85 | } 86 | 87 | inline int 88 | futex_requeue(futex_t &futex __attribute__((unused)), 89 | int futex_count __attribute__((unused)), 90 | int queue_count __attribute__((unused)), 91 | futex_t &queue __attribute__((unused))) 92 | { 93 | #if __linux__ 94 | #if __x86_64__ 95 | unsigned result; 96 | register int arg4 __asm__("r10") = queue_count; 97 | register void *arg5 __asm__("r8") = &queue; 98 | __asm__ __volatile__("syscall" 99 | : "=a"(result), "+m"(futex) 100 | : "0"(SYS_futex), 101 | "D"(&futex), 102 | "S"(FUTEX_REQUEUE_PRIVATE), 103 | "d"(futex_count), 104 | "r"(arg4), 105 | "r"(arg5) 106 | : "cc", "rcx", "r11"); 107 | return (result > (unsigned) -4096) ? (int) result : 0; 108 | #else 109 | if (syscall(SYS_futex, 110 | &futex, 111 | FUTEX_REQUEUE_PRIVATE, 112 | futex_count, 113 | queue_count, 114 | &queue, 115 | 0) 116 | == -1) 117 | return -errno; 118 | else 119 | return 0; 120 | #endif 121 | #else 122 | return -ENOSYS; 123 | #endif 124 | } 125 | 126 | inline int 127 | futex_requeue(futex_t &futex __attribute__((unused)), 128 | int futex_count __attribute__((unused)), 129 | int queue_count __attribute__((unused)), 130 | futex_t &queue __attribute__((unused)), 131 | std::uint32_t futex_value __attribute__((unused))) 132 | { 133 | #if __linux__ 134 | #if __x86_64__ 135 | unsigned result; 136 | register int arg4 __asm__("r10") = queue_count; 137 | register void *arg5 __asm__("r8") = &queue; 138 | register int arg6 __asm__("r9") = futex_value; 139 | __asm__ __volatile__("syscall" 140 | : "=a"(result), "+m"(futex) 141 | : "0"(SYS_futex), 142 | "D"(&futex), 143 | "S"(FUTEX_CMP_REQUEUE_PRIVATE), 144 | "d"(futex_count), 145 | "r"(arg4), 146 | "r"(arg5), 147 | "r"(arg6) 148 | : "cc", "rcx", "r11"); 149 | return (result > (unsigned) -4096) ? (int) result : 0; 150 | #else 151 | if (syscall(SYS_futex, 152 | &futex, 153 | FUTEX_CMP_REQUEUE_PRIVATE, 154 | futex_count, 155 | queue_count, 156 | &queue, 157 | futex_value) 158 | == -1) 159 | return -errno; 160 | else 161 | return 0; 162 | #endif 163 | #else 164 | return -ENOSYS; 165 | #endif 166 | } 167 | 168 | } // namespace evenk 169 | 170 | #endif // !EVENK_FUTEX_H_ 171 | -------------------------------------------------------------------------------- /evenk/spinlock.h: -------------------------------------------------------------------------------- 1 | // 2 | // Spin Locks 3 | // 4 | // Copyright (c) 2015-2019 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_SPINLOCK_H_ 26 | #define EVENK_SPINLOCK_H_ 27 | 28 | #include 29 | #include 30 | 31 | #include "backoff.h" 32 | #include "basic.h" 33 | 34 | namespace evenk { 35 | 36 | class spin_lock : non_copyable 37 | { 38 | public: 39 | void lock() noexcept 40 | { 41 | lock(no_backoff{}); 42 | } 43 | 44 | template 45 | void lock(Backoff backoff) noexcept 46 | { 47 | while (lock_.test_and_set(std::memory_order_acquire)) 48 | backoff(); 49 | } 50 | 51 | bool try_lock() noexcept 52 | { 53 | return !lock_.test_and_set(std::memory_order_acquire); 54 | } 55 | 56 | void unlock() noexcept 57 | { 58 | lock_.clear(std::memory_order_release); 59 | } 60 | 61 | private: 62 | std::atomic_flag lock_ = ATOMIC_FLAG_INIT; 63 | }; 64 | 65 | class tatas_lock : non_copyable 66 | { 67 | public: 68 | void lock() noexcept 69 | { 70 | lock(no_backoff{}); 71 | } 72 | 73 | template 74 | void lock(Backoff backoff) noexcept 75 | { 76 | while (lock_.exchange(true, std::memory_order_acquire)) { 77 | do 78 | backoff(); 79 | while (lock_.load(std::memory_order_relaxed)); 80 | } 81 | } 82 | 83 | bool try_lock() noexcept 84 | { 85 | return !lock_.exchange(true, std::memory_order_acquire); 86 | } 87 | 88 | void unlock() noexcept 89 | { 90 | lock_.store(false, std::memory_order_release); 91 | } 92 | 93 | private: 94 | std::atomic lock_ = ATOMIC_VAR_INIT(false); 95 | }; 96 | 97 | class ticket_lock : non_copyable 98 | { 99 | public: 100 | void lock() noexcept 101 | { 102 | lock(no_backoff{}); 103 | } 104 | 105 | template 106 | void lock(Backoff backoff) noexcept 107 | { 108 | base_type tail = tail_.fetch_add(1, std::memory_order_relaxed); 109 | for (;;) { 110 | base_type head = head_.load(std::memory_order_acquire); 111 | if (tail == head) 112 | break; 113 | proportional_adapter(backoff, static_cast(tail - head)); 114 | } 115 | } 116 | 117 | bool try_lock() noexcept 118 | { 119 | base_type head = head_.load(std::memory_order_acquire); 120 | base_type tail = tail_.load(std::memory_order_relaxed); 121 | return head == tail 122 | && tail_.compare_exchange_strong( 123 | tail, tail + 1, std::memory_order_relaxed); 124 | } 125 | 126 | void unlock() noexcept 127 | { 128 | base_type head = head_.load(std::memory_order_relaxed); 129 | head_.store(head + 1, std::memory_order_release); 130 | } 131 | 132 | private: 133 | using base_type = std::uint16_t; 134 | 135 | std::atomic head_ = ATOMIC_VAR_INIT(0); 136 | std::atomic tail_ = ATOMIC_VAR_INIT(0); 137 | }; 138 | 139 | class shared_ticket_lock : non_copyable 140 | { 141 | public: 142 | void lock() noexcept 143 | { 144 | lock(no_backoff{}); 145 | } 146 | 147 | template 148 | void lock(Backoff backoff) noexcept 149 | { 150 | base_type tail = tail_.fetch_add(exclusive_step, std::memory_order_relaxed); 151 | for (;;) { 152 | base_type head = head_.load(std::memory_order_acquire); 153 | if (tail == head) 154 | break; 155 | proportional_adapter(backoff, static_cast(tail - head)); 156 | } 157 | } 158 | 159 | bool try_lock() noexcept 160 | { 161 | base_type head = head_.load(std::memory_order_acquire); 162 | base_type tail = tail_.load(std::memory_order_relaxed); 163 | return head == tail 164 | && tail_.compare_exchange_strong( 165 | tail, tail + exclusive_step, std::memory_order_relaxed); 166 | 167 | } 168 | 169 | void unlock() noexcept 170 | { 171 | base_type head = head_.load(std::memory_order_relaxed); 172 | head_.store(head + exclusive_step, std::memory_order_release); 173 | } 174 | 175 | void lock_shared() noexcept 176 | { 177 | lock_shared(no_backoff{}); 178 | } 179 | 180 | template 181 | void lock_shared(Backoff backoff) noexcept 182 | { 183 | base_type tail = tail_.fetch_add(shared_step, std::memory_order_relaxed); 184 | for (tail &= exclusive_mask;;) { 185 | base_type head = head_.load(std::memory_order_acquire); 186 | if (tail == (head & exclusive_mask)) 187 | break; 188 | proportional_adapter(backoff, static_cast(tail - head)); 189 | } 190 | } 191 | 192 | bool try_lock_shared() noexcept 193 | { 194 | base_type head = head_.load(std::memory_order_acquire); 195 | base_type tail = tail_.load(std::memory_order_relaxed); 196 | return (head & exclusive_mask) == (tail & exclusive_mask) 197 | && tail_.compare_exchange_strong( 198 | tail, tail + shared_step, std::memory_order_relaxed); 199 | 200 | } 201 | 202 | void unlock_shared() noexcept 203 | { 204 | head_.fetch_add(shared_step, std::memory_order_release); 205 | } 206 | 207 | private: 208 | #if EVENK_SHARED_TICKET_TESTING 209 | #pragma message("using very small shared ticket size to trigger possible bugs with more probability") 210 | using base_type = std::uint8_t; 211 | #else 212 | using base_type = std::uint32_t; 213 | #endif 214 | static constexpr base_type shared_step = 1 << (8 * sizeof(base_type) / 2); 215 | static constexpr base_type exclusive_mask = shared_step - 1; 216 | static constexpr base_type exclusive_step = 1; 217 | 218 | std::atomic head_ = ATOMIC_VAR_INIT(0); 219 | std::atomic tail_ = ATOMIC_VAR_INIT(0); 220 | }; 221 | 222 | } // namespace evenk 223 | 224 | #endif // !EVENK_SPINLOCK_H_ 225 | -------------------------------------------------------------------------------- /evenk/synch.h: -------------------------------------------------------------------------------- 1 | // 2 | // Synchronization Primitives 3 | // 4 | // Copyright (c) 2015-2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_SYNCH_H_ 26 | #define EVENK_SYNCH_H_ 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | #include 34 | 35 | #include "backoff.h" 36 | #include "basic.h" 37 | #include "futex.h" 38 | 39 | namespace evenk { 40 | 41 | // 42 | // Mutexes 43 | // 44 | 45 | class posix_mutex : non_copyable 46 | { 47 | public: 48 | using native_handle_type = pthread_mutex_t *; 49 | 50 | constexpr posix_mutex() noexcept = default; 51 | 52 | ~posix_mutex() noexcept 53 | { 54 | pthread_mutex_destroy(&mutex_); 55 | } 56 | 57 | void lock() 58 | { 59 | int rc = pthread_mutex_lock(&mutex_); 60 | if (rc) 61 | throw_system_error(rc, "pthread_mutex_lock()"); 62 | } 63 | 64 | bool try_lock() noexcept 65 | { 66 | return pthread_mutex_trylock(&mutex_) == 0; 67 | } 68 | 69 | void unlock() noexcept 70 | { 71 | pthread_mutex_unlock(&mutex_); 72 | } 73 | 74 | native_handle_type native_handle() noexcept 75 | { 76 | return &mutex_; 77 | } 78 | 79 | private: 80 | pthread_mutex_t mutex_ = PTHREAD_MUTEX_INITIALIZER; 81 | }; 82 | 83 | class futex_lock : non_copyable 84 | { 85 | public: 86 | using native_handle_type = futex_t &; 87 | 88 | constexpr futex_lock() noexcept = default; 89 | 90 | void lock() noexcept 91 | { 92 | lock(no_backoff{}); 93 | } 94 | 95 | template 96 | void lock(Backoff backoff) noexcept 97 | { 98 | std::uint32_t value = 0; 99 | while (!futex_.compare_exchange_strong( 100 | value, 1, std::memory_order_acquire, std::memory_order_relaxed)) { 101 | if (backoff()) { 102 | if (value == 2 103 | || futex_.exchange(2, std::memory_order_acquire)) { 104 | do 105 | futex_wait(futex_, 2); 106 | while (futex_.exchange(2, std::memory_order_acquire)); 107 | } 108 | break; 109 | } 110 | value = 0; 111 | } 112 | } 113 | 114 | bool try_lock() noexcept 115 | { 116 | std::uint32_t value = 0; 117 | return futex_.compare_exchange_strong( 118 | value, 1, std::memory_order_acquire, std::memory_order_relaxed); 119 | } 120 | 121 | void unlock() noexcept 122 | { 123 | if (futex_.fetch_sub(1, std::memory_order_release) != 1) { 124 | futex_.store(0, std::memory_order_relaxed); 125 | futex_wake(futex_, 1); 126 | } 127 | } 128 | 129 | native_handle_type native_handle() noexcept 130 | { 131 | return futex_; 132 | } 133 | 134 | private: 135 | futex_t futex_ = ATOMIC_VAR_INIT(0); 136 | }; 137 | 138 | // 139 | // Lock Guard 140 | // 141 | 142 | template 143 | class lock_guard : non_copyable 144 | { 145 | public: 146 | using mutex_type = Lock; 147 | 148 | lock_guard(mutex_type &mutex) : mutex_(&mutex), owns_lock_(false) 149 | { 150 | lock(); 151 | } 152 | 153 | template 154 | lock_guard(mutex_type &mutex, Backoff backoff) : mutex_(&mutex), owns_lock_(false) 155 | { 156 | lock(backoff); 157 | } 158 | 159 | lock_guard(mutex_type &mutex, std::adopt_lock_t) noexcept 160 | : mutex_(&mutex), owns_lock_(true) 161 | { 162 | } 163 | 164 | lock_guard(mutex_type &mutex, std::defer_lock_t) noexcept 165 | : mutex_(&mutex), owns_lock_(false) 166 | { 167 | } 168 | 169 | lock_guard(mutex_type &mutex, std::try_to_lock_t) noexcept 170 | : mutex_(&mutex), owns_lock_(false) 171 | { 172 | try_lock(); 173 | } 174 | 175 | ~lock_guard() noexcept 176 | { 177 | if (owns_lock_) 178 | mutex_->unlock(); 179 | } 180 | 181 | void lock() 182 | { 183 | if (owns_lock_) 184 | throw_system_error(int(std::errc::resource_deadlock_would_occur)); 185 | mutex_->lock(); 186 | owns_lock_ = true; 187 | } 188 | 189 | template 190 | void lock(Backoff backoff) 191 | { 192 | if (owns_lock_) 193 | throw_system_error(int(std::errc::resource_deadlock_would_occur)); 194 | mutex_->lock(backoff); 195 | owns_lock_ = true; 196 | } 197 | 198 | bool try_lock() 199 | { 200 | if (owns_lock_) 201 | throw_system_error(int(std::errc::resource_deadlock_would_occur)); 202 | owns_lock_ = mutex_->try_lock(); 203 | return owns_lock_; 204 | } 205 | 206 | void unlock() 207 | { 208 | if (!owns_lock_) 209 | throw_system_error(int(std::errc::operation_not_permitted)); 210 | mutex_->unlock(); 211 | owns_lock_ = false; 212 | } 213 | 214 | mutex_type *mutex() noexcept 215 | { 216 | return mutex_; 217 | } 218 | 219 | bool owns_lock() const noexcept 220 | { 221 | return owns_lock_; 222 | } 223 | 224 | private: 225 | mutex_type *mutex_; 226 | bool owns_lock_; 227 | }; 228 | 229 | // 230 | // Condition Variables 231 | // 232 | 233 | class posix_cond_var : non_copyable 234 | { 235 | public: 236 | using native_handle_type = pthread_cond_t *; 237 | 238 | constexpr posix_cond_var() noexcept = default; 239 | 240 | ~posix_cond_var() noexcept 241 | { 242 | pthread_cond_destroy(&cond_); 243 | } 244 | 245 | void wait(std::unique_lock &lock) noexcept 246 | { 247 | int rc = pthread_cond_wait(&cond_, lock.mutex()->native_handle()); 248 | if (rc) 249 | throw_system_error(rc, "pthread_cond_wait()"); 250 | } 251 | 252 | void notify_one() noexcept 253 | { 254 | pthread_cond_signal(&cond_); 255 | } 256 | 257 | void notify_all() noexcept 258 | { 259 | pthread_cond_broadcast(&cond_); 260 | } 261 | 262 | native_handle_type native_handle() noexcept 263 | { 264 | return &cond_; 265 | } 266 | 267 | private: 268 | pthread_cond_t cond_ = PTHREAD_COND_INITIALIZER; 269 | }; 270 | 271 | class futex_cond_var : non_copyable 272 | { 273 | public: 274 | constexpr futex_cond_var() noexcept = default; 275 | 276 | void wait(lock_guard &guard) noexcept 277 | { 278 | futex_lock *owner = guard.mutex(); 279 | if (owner_ != nullptr && owner_ != owner) 280 | #if 0 281 | throw std::invalid_argument( 282 | "different locks used for the same condition variable."); 283 | #else 284 | std::terminate(); 285 | #endif 286 | owner_.store(owner, std::memory_order_relaxed); 287 | 288 | count_.fetch_add(1, std::memory_order_relaxed); 289 | std::atomic_thread_fence(std::memory_order_acq_rel); 290 | std::uint32_t value = futex_.load(std::memory_order_relaxed); 291 | 292 | owner->unlock(); 293 | 294 | futex_wait(futex_, value); 295 | 296 | futex_t &owner_futex = owner->native_handle(); 297 | count_.fetch_sub(1, std::memory_order_relaxed); 298 | while (owner_futex.exchange(2, std::memory_order_acquire)) 299 | futex_wait(owner_futex, 2); 300 | } 301 | 302 | void notify_one() noexcept 303 | { 304 | futex_.fetch_add(1, std::memory_order_acquire); 305 | if (count_.load(std::memory_order_relaxed)) 306 | futex_wake(futex_, 1); 307 | } 308 | 309 | void notify_all() noexcept 310 | { 311 | futex_.fetch_add(1, std::memory_order_acquire); 312 | if (count_.load(std::memory_order_relaxed)) { 313 | futex_lock *owner = owner_.load(std::memory_order_relaxed); 314 | if (owner) { 315 | futex_requeue(futex_, 316 | 1, 317 | std::numeric_limits::max(), 318 | owner->native_handle()); 319 | } 320 | } 321 | } 322 | 323 | private: 324 | futex_t futex_ = ATOMIC_VAR_INIT(0); 325 | futex_t count_ = ATOMIC_VAR_INIT(0); 326 | std::atomic owner_ = ATOMIC_VAR_INIT(nullptr); 327 | }; 328 | 329 | // 330 | // Synchronization Traits 331 | // 332 | 333 | struct std_synch 334 | { 335 | using lock_type = std::mutex; 336 | using cond_var_type = std::condition_variable; 337 | using lock_owner_type = std::unique_lock; 338 | }; 339 | 340 | struct posix_synch 341 | { 342 | using lock_type = posix_mutex; 343 | using cond_var_type = posix_cond_var; 344 | using lock_owner_type = std::unique_lock; 345 | }; 346 | 347 | struct futex_synch 348 | { 349 | using lock_type = futex_lock; 350 | using cond_var_type = futex_cond_var; 351 | using lock_owner_type = lock_guard; 352 | }; 353 | 354 | #if __linux__ 355 | using default_synch = futex_synch; 356 | #else 357 | using default_synch = std_synch; 358 | #endif 359 | 360 | } // namespace evenk 361 | 362 | #endif // !EVENK_SYNCH_H_ 363 | -------------------------------------------------------------------------------- /evenk/synch_queue.h: -------------------------------------------------------------------------------- 1 | // 2 | // Simple Concurrent Queue 3 | // 4 | // Copyright (c) 2015-2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_SYNCH_QUEUE_H_ 26 | #define EVENK_SYNCH_QUEUE_H_ 27 | 28 | #include 29 | 30 | #include "conqueue.h" 31 | #include "synch.h" 32 | 33 | namespace evenk { 34 | 35 | template > 36 | class synch_queue : non_copyable 37 | { 38 | public: 39 | using value_type = Value; 40 | using reference = value_type &; 41 | using const_reference = const value_type &; 42 | 43 | using lock_type = typename Synch::lock_type; 44 | using cond_var_type = typename Synch::cond_var_type; 45 | using lock_owner_type = typename Synch::lock_owner_type; 46 | 47 | using sequence_type = Sequence; 48 | 49 | synch_queue() noexcept : closed_(false) 50 | { 51 | } 52 | 53 | synch_queue(synch_queue &&other) noexcept : closed_(other.closed_) 54 | { 55 | std::swap(queue_, other.queue_); 56 | } 57 | 58 | // 59 | // State operations 60 | // 61 | 62 | void close() noexcept 63 | { 64 | lock_owner_type guard(lock_); 65 | closed_ = true; 66 | cond_.notify_all(); 67 | } 68 | 69 | bool is_closed() const noexcept 70 | { 71 | lock_owner_type guard(lock_); 72 | return closed_; 73 | } 74 | 75 | bool is_empty() const noexcept 76 | { 77 | lock_owner_type guard(lock_); 78 | return queue_.empty(); 79 | } 80 | 81 | bool is_full() const noexcept 82 | { 83 | return false; 84 | } 85 | 86 | static bool is_lock_free() noexcept 87 | { 88 | return false; 89 | } 90 | 91 | // 92 | // Basic operations 93 | // 94 | 95 | template 96 | void push(const value_type &value, Backoff &&... backoff) 97 | { 98 | auto status = wait_push(value, std::forward(backoff)...); 99 | if (status != queue_op_status::success) 100 | throw status; 101 | } 102 | 103 | template 104 | void push(value_type &&value, Backoff &&... backoff) 105 | { 106 | auto status = wait_push(std::move(value), std::forward(backoff)...); 107 | if (status != queue_op_status::success) 108 | throw status; 109 | } 110 | 111 | template 112 | value_type value_pop(Backoff &&... backoff) 113 | { 114 | value_type value; 115 | auto status = wait_pop(value, std::forward(backoff)...); 116 | if (status != queue_op_status::success) 117 | throw status; 118 | return std::move(value); 119 | } 120 | 121 | // 122 | // Waiting operations 123 | // 124 | 125 | template 126 | queue_op_status wait_push(const value_type &value, Backoff &&... backoff) 127 | { 128 | return try_push(value, std::forward(backoff)...); 129 | } 130 | 131 | template 132 | queue_op_status wait_push(value_type &&value, Backoff &&... backoff) 133 | { 134 | return try_push(std::move(value), std::forward(backoff)...); 135 | } 136 | 137 | template 138 | queue_op_status wait_pop(value_type &value, Backoff &&... backoff) 139 | { 140 | lock_owner_type guard(lock_, std::forward(backoff)...); 141 | auto status = locked_pop(value); 142 | while (status == queue_op_status::empty) { 143 | cond_.wait(guard); 144 | status = locked_pop(value); 145 | } 146 | return status; 147 | } 148 | 149 | // 150 | // Non-waiting operations 151 | // 152 | 153 | template 154 | queue_op_status try_push(const value_type &value, Backoff &&... backoff) 155 | { 156 | lock_owner_type guard(lock_, std::forward(backoff)...); 157 | return locked_push(value); 158 | } 159 | 160 | template 161 | queue_op_status try_push(value_type &&value, Backoff &&... backoff) 162 | { 163 | lock_owner_type guard(lock_, std::forward(backoff)...); 164 | return locked_push(std::move(value)); 165 | } 166 | 167 | template 168 | queue_op_status try_pop(value_type &value, Backoff &&... backoff) 169 | { 170 | lock_owner_type guard(lock_, std::forward(backoff)...); 171 | return locked_pop(value); 172 | } 173 | 174 | #if ENABLE_QUEUE_NONBLOCKING_OPS 175 | // 176 | // Non-blocking operations 177 | // 178 | 179 | queue_op_status nonblocking_push(const value_type &value) 180 | { 181 | lock_owner_type guard(lock_, std::try_to_lock); 182 | if (!guard.owns_lock()) 183 | return queue_op_status::busy; 184 | return locked_push(value); 185 | } 186 | 187 | queue_op_status nonblocking_push(value_type &&value) 188 | { 189 | lock_owner_type guard(lock_, std::try_to_lock); 190 | if (!guard.owns_lock()) 191 | return queue_op_status::busy; 192 | return locked_push(std::move(value)); 193 | } 194 | 195 | queue_op_status nonblocking_pop(value_type &value) 196 | { 197 | lock_owner_type guard(lock_, std::try_to_lock); 198 | if (!guard.owns_lock()) 199 | return queue_op_status::busy; 200 | return locked_pop(value); 201 | } 202 | #endif // ENABLE_QUEUE_NONBLOCKING_OPS 203 | 204 | private: 205 | queue_op_status locked_push(const value_type &value) 206 | { 207 | if (closed_) 208 | return queue_op_status::closed; 209 | 210 | queue_.push_back(value); 211 | cond_.notify_one(); 212 | return queue_op_status::success; 213 | } 214 | 215 | queue_op_status locked_push(value_type &&value) 216 | { 217 | if (closed_) 218 | return queue_op_status::closed; 219 | 220 | queue_.push_back(std::move(value)); 221 | cond_.notify_one(); 222 | return queue_op_status::success; 223 | } 224 | 225 | queue_op_status locked_pop(value_type &value) 226 | { 227 | if (queue_.empty()) { 228 | if (closed_) 229 | return queue_op_status::closed; 230 | return queue_op_status::empty; 231 | } 232 | 233 | value = std::move(queue_.front()); 234 | queue_.pop_front(); 235 | return queue_op_status::success; 236 | } 237 | 238 | bool closed_; 239 | mutable lock_type lock_; 240 | cond_var_type cond_; 241 | sequence_type queue_; 242 | }; 243 | 244 | } // namespace evenk 245 | 246 | #endif // !EVENK_SYNCH_QUEUE_H_ 247 | -------------------------------------------------------------------------------- /evenk/task.h: -------------------------------------------------------------------------------- 1 | // 2 | // Executable Tasks 3 | // 4 | // Copyright (c) 2017-2018 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #include "config.h" 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | #ifndef EVENK_TASK_H_ 33 | #define EVENK_TASK_H_ 34 | 35 | // 36 | // A task is a very restricted version of std::function used together with 37 | // thread pools. It tries to reduce the need for dynamic memory allocation 38 | // and synchronization as much as possible. 39 | // 40 | // This is achieved by enforcing move-semantics while std::function may use 41 | // copy-semantics. At any given time, as a task goes from a client thread to 42 | // a thread pool's task queue and then to a worker thread, there is only one 43 | // current owner of the task. 44 | // 45 | // Memory allocation should be avoided for task move operations. There is no 46 | // memory allocation at all for small trivially-copyable target types (such 47 | // as function pointers, references to function objects, etc). And for other 48 | // target types memory should only be allocated when the task is constructed 49 | // from a target callable object and deallocated when the task is destructed. 50 | // 51 | // A task itself is a callable object without arguments. It is non-copyable. 52 | // However it is noexcept-movable. 53 | // 54 | // Also std::function implementations are usually optimized to efficiently 55 | // store member function pointers so they reserve fixed-size extra room for 56 | // this data type. For larger objects they have to use dynamically allocated 57 | // memory. But for tasks it is possible to specify the reserved memory size. 58 | // So it is possible to avoid allocation for larger objects. But by default 59 | // the reserved memory size is even smaller than for std::function. It is 60 | // just enough for a non-member function pointer or a reference to a more 61 | // complex type. 62 | // 63 | // The task allocator might be polymorphic (pmr). In such a case it moves 64 | // along with the task data. 65 | // 66 | // Note that as of C++17 standard the allocator support was removed from 67 | // std::function and it was never implemented in the e.g. gcc libstdc++ 68 | // library. So in this respect tasks provide an extended feature. 69 | // 70 | // An even more restricted task variation is also available: trivial_task. 71 | // This kind of tasks have a fixed maximum size and do not use allocators 72 | // at all. Also trivial_task can only handle trivially-copyable types. On 73 | // other hand they provide minimal overhead. In principle it is possible 74 | // to use them with other types (non-trivial or exceeding the maximum size) 75 | // if the function object is passed by reference. In this case the object 76 | // in question must be kept alive separately until the task is in use. 77 | // 78 | // Also before invocation a trivial_task requires a manual check of its 79 | // validity. Invoking invalid trivial_task leads to invalid memory access. 80 | // In contrast to this a task (and std::function) automatically handles 81 | // such cases by gently throwing std::bad_function_call(). 82 | // 83 | // Examples: 84 | // 85 | // void test() { ... } 86 | // void testN(int n) { ... } 87 | // struct testBig { 88 | // char data[48] = {0}; 89 | // void operator()() { ... } 90 | // }; 91 | // ... 92 | // // A simple task usage 93 | // auto task1 = evenk::task(test); 94 | // task1(); 95 | // 96 | // // A trivial task usage, has less overhead than the above 97 | // auto task2 = evenk::trivial_task(test); 98 | // task2(); 99 | // 100 | // // A large task usage, looks identically but uses allocation internally 101 | // auto task3 = evenk::task(testBig()); 102 | // task3(); 103 | // 104 | // // A large trivial task will not compile as is 105 | // //auto task4 = evenk::trivial_task(testBig()); 106 | // //task4(); 107 | // 108 | // // It requires either increased internal storage 109 | // auto task5 = evenk::trivial_task(testBig()); 110 | // task5(); 111 | // // ... or using a reference 112 | // testBig test6; 113 | // auto task6 = evenk::trivial_task(std::ref(test6)); 114 | // task6(); 115 | // 116 | // // The result of std::bind works smoothly with tasks 117 | // auto task7 = evenk::task(std::bind(testN, 42)); 118 | // task7(); 119 | // 120 | // // It is not trivially-copyable so a trivial_task will not compile 121 | // //auto task8 = evenk::trivial_task(std::bind(testN, 42)); 122 | // //task8(); 123 | // // ... but using a reference still works 124 | // auto test9 = std::bind(testN, 42); 125 | // auto task9 = evenk::trivial_task(std::ref(test9)); 126 | // task9(); 127 | // 128 | 129 | namespace evenk { 130 | 131 | static constexpr std::size_t fptr_size = sizeof(void (*)()); 132 | static constexpr std::size_t fptr_align = alignof(void (*)()); 133 | 134 | namespace detail { 135 | 136 | // A utility to adjust the size of reserved memory for tasks. 137 | static constexpr std::size_t 138 | task_memory_size(std::size_t size) 139 | { 140 | static_assert((fptr_align & (fptr_align - 1)) == 0, 141 | "function pointer alignment is not a power of two"); 142 | size = (size + fptr_align - 1) & ~(fptr_align - 1); 143 | return std::max(size, fptr_size); 144 | } 145 | 146 | // 147 | // Some template meta-programming stuff that reproduces C++11 std::result_of 148 | // or C++17 std::invoke_result in a compiler independent way. It is relatively 149 | // easy to do here as there is no need to support any arguments. 150 | // 151 | 152 | template 153 | auto task_invoke(F &&f) -> decltype(std::forward(f)()); 154 | 155 | template 156 | struct task_result_base 157 | { 158 | }; 159 | 160 | template 161 | struct task_result_base()))), F> 162 | { 163 | using type = decltype(task_invoke(std::declval())); 164 | }; 165 | 166 | template 167 | struct task_result : task_result_base 168 | { 169 | }; 170 | 171 | // 172 | // Helpers to manage different kinds of tasks uniformly. 173 | // 174 | 175 | template 176 | struct task_adapter_small 177 | { 178 | Target *get(void *memory) 179 | { 180 | return static_cast(memory); 181 | } 182 | 183 | void move(void *memory, void *other_memory) 184 | { 185 | auto ptr = static_cast(memory); 186 | auto other_ptr = static_cast(other_memory); 187 | new (ptr) Target(std::move(*other_ptr)); 188 | other_ptr->~Target(); 189 | } 190 | 191 | void allocate(void *, Alloc &) {} 192 | 193 | void deallocate(void *, Alloc &) {} 194 | }; 195 | 196 | template 197 | struct task_adapter_large 198 | { 199 | Target *get(void *memory) 200 | { 201 | void **ptrptr = static_cast(memory); 202 | return static_cast(*ptrptr); 203 | } 204 | 205 | void move(void *memory, void *other_memory) 206 | { 207 | void **ptrptr = static_cast(memory); 208 | void **other_ptrptr = static_cast(other_memory); 209 | *ptrptr = *other_ptrptr; 210 | *other_ptrptr = nullptr; 211 | } 212 | 213 | void allocate(void *memory, Alloc &alloc) 214 | { 215 | auto ptrptr = static_cast(memory); 216 | *ptrptr = alloc.allocate(sizeof(Target)); 217 | } 218 | 219 | void deallocate(void *memory, Alloc &alloc) 220 | { 221 | auto ptrptr = static_cast(memory); 222 | alloc.deallocate(*ptrptr, sizeof(Target)); 223 | } 224 | }; 225 | 226 | template 227 | using task_adapter = typename std:: 228 | conditional, task_adapter_large>::type; 229 | 230 | } // namespace detail 231 | 232 | template 233 | class trivial_task 234 | { 235 | public: 236 | using result_type = R; 237 | 238 | static constexpr std::size_t memory_size = detail::task_memory_size(S); 239 | 240 | constexpr trivial_task() noexcept = default; 241 | constexpr trivial_task(std::nullptr_t) noexcept {} 242 | 243 | template 244 | trivial_task(Callable &&callable) 245 | : invoke_(&invoke::type>) 246 | { 247 | using target_type = typename std::decay::type; 248 | using target_result_type = typename detail::task_result::type; 249 | static_assert( 250 | std::is_void::value || 251 | std::is_same::value || 252 | std::is_convertible::value, 253 | "a trivial_task target result type mismatch"); 254 | static_assert(std::is_trivially_copyable::value, 255 | "a trivial_task target is not trivially copyable"); 256 | static_assert(std::is_trivially_destructible::value, 257 | "a trivial_task target is not trivially destructible"); 258 | static_assert(sizeof(target_type) <= sizeof(memory_), 259 | "a trivial_task target size limit is exceeded"); 260 | 261 | new (&memory_) target_type(std::forward(callable)); 262 | } 263 | 264 | trivial_task(trivial_task &&other) noexcept 265 | { 266 | other.swap(*this); 267 | } 268 | 269 | trivial_task &operator=(trivial_task &&other) noexcept 270 | { 271 | trivial_task(std::move(other)).swap(*this); 272 | return *this; 273 | } 274 | 275 | void swap(trivial_task &other) noexcept 276 | { 277 | std::swap(invoke_, other.invoke_); 278 | std::swap(memory_, other.memory_); 279 | } 280 | 281 | explicit operator bool() const noexcept 282 | { 283 | return invoke_ != nullptr; 284 | } 285 | 286 | result_type operator()() 287 | { 288 | return (*invoke_)(&memory_); 289 | } 290 | 291 | protected: 292 | using memory_type = char[memory_size]; 293 | using invoke_type = result_type (*)(void *); 294 | 295 | memory_type memory_ = {0}; 296 | invoke_type invoke_ = nullptr; 297 | 298 | constexpr trivial_task(std::nullptr_t, invoke_type invoke) noexcept : invoke_(invoke) {} 299 | 300 | private: 301 | template 302 | static result_type invoke(void *memory) 303 | { 304 | return (*static_cast(memory))(); 305 | } 306 | }; 307 | 308 | template > 309 | class task : private trivial_task 310 | { 311 | using base = trivial_task; 312 | 313 | public: 314 | using result_type = R; 315 | using allocator_type = A; 316 | 317 | using base::memory_size; 318 | 319 | constexpr task() noexcept : base(nullptr, invalid_invoke) {} 320 | constexpr task(std::nullptr_t) noexcept : base(nullptr, invalid_invoke) {} 321 | 322 | template 323 | task(Callable &&callable, const allocator_type &alloc = allocator_type()) 324 | : base(nullptr, &invoke::type>), wrapper_(alloc) 325 | { 326 | using target_type = typename std::decay::type; 327 | using target_result_type = typename detail::task_result::type; 328 | static_assert( 329 | std::is_void::value || 330 | std::is_same::value || 331 | std::is_convertible::value, 332 | "a task target result type mismatch"); 333 | 334 | #if EVENK_TASK_DEBUG 335 | printf("sizeof(memory_) = %zu, sizeof(callable_type) = %zu, task size: %zu\n", 336 | sizeof(base::memory_), 337 | sizeof(target_type), 338 | sizeof(*this)); 339 | #endif 340 | 341 | detail::task_adapter adapter; 342 | adapter.allocate(base::memory_, wrapper_); 343 | new (adapter.get(base::memory_)) target_type(std::forward(callable)); 344 | 345 | wrapper_.helper_ = &helper; 346 | } 347 | 348 | ~task() noexcept 349 | { 350 | if (operator bool()) 351 | wrapper_.helper_(this, nullptr); 352 | } 353 | 354 | task(task &&other) noexcept : base(nullptr, other.invoke_) 355 | { 356 | other.wrapper_.helper_(this, &other); 357 | std::swap(other.wrapper_.helper_, wrapper_.helper_); 358 | other.invoke_ = invalid_invoke; 359 | } 360 | 361 | task &operator=(task &&other) noexcept 362 | { 363 | task(std::move(other)).swap(*this); 364 | return *this; 365 | } 366 | 367 | void swap(task &other) noexcept 368 | { 369 | std::swap(*this, other); 370 | } 371 | 372 | explicit operator bool() const noexcept 373 | { 374 | return wrapper_.helper_ != nullptr; 375 | } 376 | 377 | using base::operator(); 378 | 379 | private: 380 | using helper_type = void (*)(task *, task *); 381 | 382 | // Use empty-base optimization trick to store the allocator instance. 383 | struct helper_wrapper : allocator_type 384 | { 385 | helper_type helper_ = nullptr; 386 | 387 | helper_wrapper() = default; 388 | 389 | helper_wrapper(const allocator_type &allocator) 390 | : allocator_type(allocator) 391 | { 392 | } 393 | }; 394 | 395 | helper_wrapper wrapper_; 396 | 397 | static result_type invalid_invoke(void *) 398 | { 399 | throw std::bad_function_call(); 400 | } 401 | 402 | template 403 | static result_type invoke(void *memory) 404 | { 405 | detail::task_adapter adapter; 406 | return (*adapter.get(memory))(); 407 | } 408 | 409 | template 410 | static void helper(task *this_task, task *other_task) 411 | { 412 | detail::task_adapter adapter; 413 | if (other_task != nullptr) { 414 | adapter.move(this_task->memory_, other_task->memory_); 415 | } else { 416 | adapter.get(this_task->memory_)->~Target(); 417 | adapter.deallocate(this_task->memory_, this_task->wrapper_); 418 | } 419 | } 420 | }; 421 | 422 | } // namespace evenk 423 | 424 | #endif // !EVENK_TASK_H_ 425 | -------------------------------------------------------------------------------- /evenk/thread.h: -------------------------------------------------------------------------------- 1 | // 2 | // Single Thread with CPU Affinity Support 3 | // 4 | // Copyright (c) 2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_THREAD_H_ 26 | #define EVENK_THREAD_H_ 27 | 28 | #include "config.h" 29 | 30 | #include 31 | #include 32 | #include 33 | 34 | #include 35 | #ifdef HAVE_SCHED_H 36 | #include 37 | #endif 38 | 39 | #include "basic.h" 40 | 41 | namespace evenk { 42 | 43 | class thread : public std::thread 44 | { 45 | public: 46 | using cpuset_type = std::vector; 47 | 48 | thread() noexcept = default; 49 | 50 | template 51 | explicit thread(Func &&f, Args &&... args) 52 | : std::thread(std::forward(f), std::forward(args)...) 53 | { 54 | } 55 | 56 | // Move from evenk::thread 57 | thread(thread &&other) noexcept 58 | { 59 | std::thread::swap(other); 60 | } 61 | thread &operator=(thread &&other) noexcept 62 | { 63 | std::thread::swap(other); 64 | return *this; 65 | } 66 | 67 | // Move from std::thread 68 | thread(std::thread &&other) noexcept 69 | { 70 | std::thread::swap(other); 71 | } 72 | thread &operator=(std::thread &&other) noexcept 73 | { 74 | std::thread::swap(other); 75 | return *this; 76 | } 77 | 78 | #if HAVE_PTHREAD_SETAFFINITY_NP 79 | 80 | void affinity(const cpuset_type &cpuset) 81 | { 82 | auto handle = native_handle(); 83 | if (!joinable()) 84 | throw_system_error(EINVAL, "affinity"); 85 | 86 | int cpu_num = cpuset.size(); 87 | // TODO: use CPU_ALLOC instead 88 | if (cpu_num > CPU_SETSIZE) 89 | cpu_num = CPU_SETSIZE; 90 | 91 | cpu_set_t native_cpuset; 92 | CPU_ZERO(&native_cpuset); 93 | for (int cpu = 0; cpu < cpu_num; cpu++) { 94 | if (cpuset[cpu]) 95 | CPU_SET(cpu, &native_cpuset); 96 | } 97 | 98 | int rc = pthread_setaffinity_np(handle, sizeof native_cpuset, &native_cpuset); 99 | if (rc != 0) 100 | throw_system_error(rc, "pthread_setaffinity_np"); 101 | } 102 | 103 | cpuset_type affinity() 104 | { 105 | auto handle = native_handle(); 106 | if (!joinable()) 107 | throw_system_error(EINVAL, "affinity"); 108 | 109 | int cpu_num = std::thread::hardware_concurrency(); 110 | // TODO: use CPU_ALLOC instead 111 | if (cpu_num > CPU_SETSIZE) 112 | cpu_num = CPU_SETSIZE; 113 | 114 | cpu_set_t native_cpuset; 115 | int rc = pthread_getaffinity_np(handle, sizeof native_cpuset, &native_cpuset); 116 | if (rc != 0) 117 | throw_system_error(rc, "pthread_getaffinity_np"); 118 | 119 | cpuset_type cpuset; 120 | for (int cpu = 0; cpu < cpu_num; cpu++) 121 | cpuset.push_back(CPU_ISSET(cpu, &native_cpuset)); 122 | 123 | return cpuset; 124 | } 125 | 126 | #else // HAVE_PTHREAD_SETAFFINITY_NP 127 | 128 | void affinity(const cpuset_type &) 129 | { 130 | if (!joinable()) 131 | throw_system_error(EINVAL, "affinity"); 132 | } 133 | 134 | cpuset_type affinity() 135 | { 136 | if (!joinable()) 137 | throw_system_error(EINVAL, "affinity"); 138 | return cpuset_type(); 139 | } 140 | 141 | #endif // !HAVE_PTHREAD_SETAFFINITY_NP 142 | }; 143 | 144 | } // namespace evenk 145 | 146 | #endif // !EVENK_THREAD_H_ 147 | -------------------------------------------------------------------------------- /evenk/thread_pool.h: -------------------------------------------------------------------------------- 1 | // 2 | // Thread Pool 3 | // 4 | // Copyright (c) 2017 Aleksey Demakov 5 | // 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | // 13 | // The above copyright notice and this permission notice shall be included in 14 | // all copies or substantial portions of the Software. 15 | // 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | // THE SOFTWARE. 23 | // 24 | 25 | #ifndef EVENK_THREAD_POOL_H_ 26 | #define EVENK_THREAD_POOL_H_ 27 | 28 | #include 29 | 30 | #include "basic.h" 31 | #include "conqueue.h" 32 | #include "synch.h" 33 | #include "task.h" 34 | #include "thread.h" 35 | 36 | namespace evenk { 37 | 38 | class thread_pool_base : non_copyable 39 | { 40 | public: 41 | thread_pool_base() noexcept = default; 42 | 43 | virtual ~thread_pool_base() noexcept 44 | { 45 | stop(); 46 | wait(); 47 | } 48 | 49 | std::size_t size() const noexcept 50 | { 51 | return pool_.size(); 52 | } 53 | 54 | thread &operator[](std::size_t index) 55 | { 56 | return pool_[index]; 57 | } 58 | 59 | bool is_stopped() const noexcept 60 | { 61 | return (flags_.load(std::memory_order_relaxed) & stop_flag); 62 | } 63 | 64 | void stop() 65 | { 66 | close(stop_flag); 67 | } 68 | 69 | void wait() 70 | { 71 | close(wait_flag); 72 | 73 | default_synch::lock_owner_type guard(join_lock_); 74 | if (!join_done_) { 75 | for (std::size_t i = 0; i < pool_.size(); i++) 76 | pool_[i].join(); 77 | join_done_ = true; 78 | } 79 | } 80 | 81 | protected: 82 | virtual void work() = 0; 83 | virtual void shutdown() = 0; 84 | 85 | void activate(std::size_t size) 86 | { 87 | if (pool_.size()) 88 | throw std::logic_error("thread_pool is already active"); 89 | 90 | pool_.reserve(size); 91 | for (std::size_t i = 0; i < size; i++) 92 | pool_.emplace_back(&thread_pool_base::work, this); 93 | } 94 | 95 | private: 96 | static constexpr std::uint8_t stop_flag = 1; 97 | static constexpr std::uint8_t wait_flag = 2; 98 | 99 | std::vector pool_; 100 | 101 | std::atomic flags_ = ATOMIC_VAR_INIT(0); 102 | 103 | default_synch::lock_type join_lock_; 104 | bool join_done_ = false; 105 | 106 | void close(std::uint8_t flag) 107 | { 108 | if (flags_.fetch_or(flag, std::memory_order_relaxed) == 0) 109 | shutdown(); 110 | } 111 | }; 112 | 113 | template