└── stack ├── Makefile ├── README.md ├── atomic.h ├── benchmark ├── benchmark.cpp ├── boost ├── LICENSE_1_0.txt ├── atomic.hpp ├── atomic │ ├── detail │ │ ├── base.hpp │ │ ├── cas64strong.hpp │ │ ├── gcc-x86.hpp │ │ └── type-classifier.hpp │ └── platform.hpp └── memory_order.hpp ├── locked.h ├── lockfree.h └── spinlocked.h /stack/Makefile: -------------------------------------------------------------------------------- 1 | CXX= g++ -std=c++0x 2 | CXXFLAGS= -W -Wall -O2 -pthread 3 | 4 | # For OS X, uncomment the next line: 5 | #CXX= clang++ -std=c++11 -stdlib=libc++ 6 | 7 | all: benchmark 8 | 9 | benchmark: benchmark.cpp 10 | $(CXX) $(CXXFLAGS) benchmark.cpp -o benchmark 11 | -------------------------------------------------------------------------------- /stack/README.md: -------------------------------------------------------------------------------- 1 | lockfree-bench 2 | ============== 3 | 4 | Lock free stack benchmark. Use 5 | 6 | make benchmark 7 | 8 | to compile. 9 | -------------------------------------------------------------------------------- /stack/atomic.h: -------------------------------------------------------------------------------- 1 | // A placeholder for until we upgrade gcc to a version that 2 | // is more sensible with regard to memory fences 3 | // 4 | #pragma once 5 | 6 | #include "boost/atomic.hpp" 7 | 8 | -------------------------------------------------------------------------------- /stack/benchmark: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/memsql/lockfree-bench/20d1c49ae1185ae9c85b15ad92705fefa7d98349/stack/benchmark -------------------------------------------------------------------------------- /stack/benchmark.cpp: -------------------------------------------------------------------------------- 1 | #define MAX_THREADS 32 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "locked.h" 9 | #include "spinlocked.h" 10 | #include "lockfree.h" 11 | 12 | struct LockedElement 13 | { 14 | int data; 15 | }; 16 | 17 | struct LockFreeElement: public LockFreeStack::Node 18 | { 19 | int data; 20 | }; 21 | 22 | boost::atomic running; 23 | 24 | template 25 | void Worker(Stack& st, Element* elems, int numElements, int* numOps, int threadId) 26 | { 27 | unsigned int seed = rand(); 28 | std::vector mine; 29 | int ops = 0; 30 | for(int i=0; idata == 0); 46 | elem->data = 1; 47 | mine.pop_back(); 48 | st.Push(elem); 49 | } 50 | ops++; 51 | break; 52 | case 1: 53 | elem = static_cast(st.Pop(threadId)); 54 | if(elem != nullptr) 55 | { 56 | assert(elem->data == 1); 57 | elem->data = 0; 58 | mine.push_back(elem); 59 | } 60 | ops++; 61 | break; 62 | } 63 | } 64 | *numOps = ops; 65 | } 66 | 67 | 68 | template 69 | double Test(int nthreads) 70 | { 71 | const int num_elements = 20000; 72 | const int test_time = 5; 73 | const int test_iterations = 5; 74 | const int elem_per_thread = num_elements / nthreads; 75 | long long ops = 0; 76 | 77 | for(int it = 0; it < test_iterations; it++) 78 | { 79 | Stack st; 80 | Element* elements = new Element[num_elements]; 81 | 82 | std::thread threads[MAX_THREADS]; 83 | int numOps[MAX_THREADS] = {}; 84 | 85 | for(int i = 0; i < nthreads; i++) 86 | { 87 | threads[i] = std::thread(Worker, std::ref(st), elements + i*elem_per_thread, elem_per_thread, &numOps[i], i); 88 | } 89 | 90 | running.store(true, boost::memory_order_release); 91 | sleep(test_time); 92 | running.store(false, boost::memory_order_release); 93 | 94 | for(int i = 0; i < nthreads; i++) 95 | { 96 | threads[i].join(); 97 | ops += numOps[i]; 98 | } 99 | 100 | delete[] elements; 101 | } 102 | return (double)ops / (test_time*test_iterations); 103 | } 104 | 105 | int main() 106 | { 107 | for(int i=1; i<=MAX_THREADS; i++) 108 | { 109 | double lockFreeTime = Test(i); 110 | double lockedTime = Test, LockedElement>(i); 111 | double spinLockedTime = Test, LockedElement>(i); 112 | printf("%d threads, LockFree: %d/sec, Locked: %d/sec, SpinLocked: %d/sec\n", i, (int)lockFreeTime, (int)lockedTime, (int)spinLockedTime); 113 | } 114 | return 0; 115 | } 116 | 117 | -------------------------------------------------------------------------------- /stack/boost/LICENSE_1_0.txt: -------------------------------------------------------------------------------- 1 | Boost Software License - Version 1.0 - August 17th, 2003 2 | 3 | Permission is hereby granted, free of charge, to any person or organization 4 | obtaining a copy of the software and accompanying documentation covered by 5 | this license (the "Software") to use, reproduce, display, distribute, 6 | execute, and transmit the Software, and to prepare derivative works of the 7 | Software, and to permit third-parties to whom the Software is furnished to 8 | do so, all subject to the following: 9 | 10 | The copyright notices in the Software and this entire statement, including 11 | the above license grant, this restriction and the following disclaimer, 12 | must be included in all copies of the Software, in whole or in part, and 13 | all derivative works of the Software, unless such copies or derivative 14 | works are solely in the form of machine-executable object code generated by 15 | a source language processor. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 20 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 21 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 22 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /stack/boost/atomic.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_ATOMIC_HPP 2 | #define BOOST_ATOMIC_HPP 3 | 4 | // Copyright (c) 2011 Helge Bahmann 5 | // 6 | // Distributed under the Boost Software License, Version 1.0. 7 | // See accompanying file LICENSE_1_0.txt or copy at 8 | // http://www.boost.org/LICENSE_1_0.txt) 9 | 10 | #include 11 | #include 12 | 13 | #include "atomic/detail/base.hpp" 14 | #if !defined(BOOST_ATOMIC_FORCE_FALLBACK) 15 | #include "atomic/platform.hpp" 16 | #endif 17 | #include "atomic/detail/type-classifier.hpp" 18 | 19 | namespace boost { 20 | 21 | #ifndef BOOST_ATOMIC_CHAR_LOCK_FREE 22 | #define BOOST_ATOMIC_CHAR_LOCK_FREE 0 23 | #endif 24 | 25 | #ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE 26 | #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0 27 | #endif 28 | 29 | #ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE 30 | #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0 31 | #endif 32 | 33 | #ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE 34 | #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0 35 | #endif 36 | 37 | #ifndef BOOST_ATOMIC_SHORT_LOCK_FREE 38 | #define BOOST_ATOMIC_SHORT_LOCK_FREE 0 39 | #endif 40 | 41 | #ifndef BOOST_ATOMIC_INT_LOCK_FREE 42 | #define BOOST_ATOMIC_INT_LOCK_FREE 0 43 | #endif 44 | 45 | #ifndef BOOST_ATOMIC_LONG_LOCK_FREE 46 | #define BOOST_ATOMIC_LONG_LOCK_FREE 0 47 | #endif 48 | 49 | #ifndef BOOST_ATOMIC_LLONG_LOCK_FREE 50 | #define BOOST_ATOMIC_LLONG_LOCK_FREE 0 51 | #endif 52 | 53 | #ifndef BOOST_ATOMIC_ADDRESS_LOCK_FREE 54 | #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 0 55 | #endif 56 | 57 | #ifndef BOOST_ATOMIC_BOOL_LOCK_FREE 58 | #define BOOST_ATOMIC_BOOL_LOCK_FREE 0 59 | #endif 60 | 61 | #ifndef BOOST_ATOMIC_THREAD_FENCE 62 | #define BOOST_ATOMIC_THREAD_FENCE 0 63 | void 64 | atomic_thread_fence(memory_order) 65 | { 66 | } 67 | #endif 68 | 69 | #ifndef BOOST_ATOMIC_SIGNAL_FENCE 70 | #define BOOST_ATOMIC_SIGNAL_FENCE 0 71 | void 72 | atomic_signal_fence(memory_order order) 73 | { 74 | atomic_thread_fence(order); 75 | } 76 | #endif 77 | 78 | template 79 | class atomic : public detail::atomic::base_atomic::test, sizeof(T)> { 80 | private: 81 | typedef T value_type; 82 | typedef detail::atomic::base_atomic::test, sizeof(T)> super; 83 | public: 84 | atomic(void) : super() {} 85 | explicit atomic(const value_type & v) : super(v) {} 86 | 87 | atomic & operator=(value_type v) volatile 88 | { 89 | super::operator=(v); 90 | return *const_cast(this); 91 | } 92 | private: 93 | atomic(const atomic &) /* =delete */ ; 94 | atomic & operator=(const atomic &) /* =delete */ ; 95 | }; 96 | 97 | typedef atomic atomic_char; 98 | typedef atomic atomic_uchar; 99 | typedef atomic atomic_schar; 100 | typedef atomic atomic_uint8_t; 101 | typedef atomic atomic_int8_t; 102 | typedef atomic atomic_ushort; 103 | typedef atomic atomic_short; 104 | typedef atomic atomic_uint16_t; 105 | typedef atomic atomic_int16_t; 106 | typedef atomic atomic_uint; 107 | typedef atomic atomic_int; 108 | typedef atomic atomic_uint32_t; 109 | typedef atomic atomic_int32_t; 110 | typedef atomic atomic_ulong; 111 | typedef atomic atomic_long; 112 | typedef atomic atomic_uint64_t; 113 | typedef atomic atomic_int64_t; 114 | #ifdef BOOST_HAS_LONG_LONG 115 | typedef atomic atomic_ullong; 116 | typedef atomic atomic_llong; 117 | #endif 118 | typedef atomic atomic_address; 119 | typedef atomic atomic_bool; 120 | 121 | class atomic_flag { 122 | public: 123 | atomic_flag(void) : v_(false) {} 124 | 125 | bool 126 | test_and_set(memory_order order = memory_order_seq_cst) 127 | { 128 | return v_.exchange(true, order); 129 | } 130 | 131 | void 132 | clear(memory_order order = memory_order_seq_cst) volatile 133 | { 134 | v_.store(false, order); 135 | } 136 | private: 137 | atomic_flag(const atomic_flag &) /* = delete */ ; 138 | atomic_flag & operator=(const atomic_flag &) /* = delete */ ; 139 | atomic v_; 140 | }; 141 | 142 | typedef atomic atomic_char; 143 | typedef atomic atomic_uchar; 144 | typedef atomic atomic_schar; 145 | typedef atomic atomic_uint8_t; 146 | typedef atomic atomic_int8_t; 147 | typedef atomic atomic_ushort; 148 | typedef atomic atomic_short; 149 | typedef atomic atomic_uint16_t; 150 | typedef atomic atomic_int16_t; 151 | typedef atomic atomic_uint; 152 | typedef atomic atomic_int; 153 | typedef atomic atomic_uint32_t; 154 | typedef atomic atomic_int32_t; 155 | typedef atomic atomic_ulong; 156 | typedef atomic atomic_long; 157 | typedef atomic atomic_uint64_t; 158 | typedef atomic atomic_int64_t; 159 | typedef atomic atomic_ullong; 160 | typedef atomic atomic_llong; 161 | typedef atomic atomic_address; 162 | typedef atomic atomic_bool; 163 | 164 | } 165 | 166 | #endif 167 | -------------------------------------------------------------------------------- /stack/boost/atomic/detail/base.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_DETAIL_ATOMIC_BASE_HPP 2 | #define BOOST_DETAIL_ATOMIC_BASE_HPP 3 | 4 | // Copyright (c) 2009 Helge Bahmann 5 | // 6 | // Distributed under the Boost Software License, Version 1.0. 7 | // See accompanying file LICENSE_1_0.txt or copy at 8 | // http://www.boost.org/LICENSE_1_0.txt) 9 | 10 | // Base class definition and fallback implementation. 11 | // To be overridden (through partial specialization) by 12 | // platform implementations. 13 | 14 | #include 15 | 16 | #include "../../memory_order.hpp" 17 | 18 | #ifndef DISABLE_ATOMIC_OPERATORS 19 | 20 | #define BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS \ 21 | operator value_type(void) volatile const \ 22 | { \ 23 | return load(memory_order_seq_cst); \ 24 | } \ 25 | \ 26 | this_type & \ 27 | operator=(value_type v) volatile \ 28 | { \ 29 | store(v, memory_order_seq_cst); \ 30 | return *const_cast(this); \ 31 | } 32 | 33 | #else 34 | 35 | // locked out 36 | // 37 | #define BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS 38 | 39 | #endif 40 | 41 | #define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ 42 | BOOST_ATOMIC_DECLARE_ASSIGNMENT_OPERATORS \ 43 | \ 44 | bool \ 45 | compare_exchange_strong( \ 46 | value_type & expected, \ 47 | value_type desired, \ 48 | memory_order order = memory_order_seq_cst) volatile \ 49 | { \ 50 | return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \ 51 | } \ 52 | \ 53 | bool \ 54 | compare_exchange_weak( \ 55 | value_type & expected, \ 56 | value_type desired, \ 57 | memory_order order = memory_order_seq_cst) volatile \ 58 | { \ 59 | return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \ 60 | } \ 61 | \ 62 | 63 | #ifndef DISABLE_ATOMIC_OPERATORS 64 | 65 | #define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ 66 | value_type \ 67 | operator++(int) volatile \ 68 | { \ 69 | return fetch_add(1); \ 70 | } \ 71 | \ 72 | value_type \ 73 | operator++(void) volatile \ 74 | { \ 75 | return fetch_add(1) + 1; \ 76 | } \ 77 | \ 78 | value_type \ 79 | operator--(int) volatile \ 80 | { \ 81 | return fetch_sub(1); \ 82 | } \ 83 | \ 84 | value_type \ 85 | operator--(void) volatile \ 86 | { \ 87 | return fetch_sub(1) - 1; \ 88 | } \ 89 | \ 90 | value_type \ 91 | operator+=(difference_type v) volatile \ 92 | { \ 93 | return fetch_add(v) + v; \ 94 | } \ 95 | \ 96 | value_type \ 97 | operator-=(difference_type v) volatile \ 98 | { \ 99 | return fetch_sub(v) - v; \ 100 | } \ 101 | 102 | #define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \ 103 | value_type \ 104 | operator&=(difference_type v) volatile \ 105 | { \ 106 | return fetch_and(v) & v; \ 107 | } \ 108 | \ 109 | value_type \ 110 | operator|=(difference_type v) volatile \ 111 | { \ 112 | return fetch_or(v) | v; \ 113 | } \ 114 | \ 115 | value_type \ 116 | operator^=(difference_type v) volatile \ 117 | { \ 118 | return fetch_xor(v) ^ v; \ 119 | } \ 120 | 121 | #else 122 | 123 | // locked out 124 | // 125 | #define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS 126 | #define BOOST_ATOMIC_DECLARE_BIT_OPERATORS 127 | 128 | #endif 129 | 130 | #define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \ 131 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ 132 | BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ 133 | 134 | #define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \ 135 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS \ 136 | BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \ 137 | BOOST_ATOMIC_DECLARE_BIT_OPERATORS \ 138 | 139 | namespace boost { 140 | namespace detail { 141 | namespace atomic { 142 | 143 | static inline memory_order 144 | calculate_failure_order(memory_order order) 145 | { 146 | switch(order) { 147 | case memory_order_acq_rel: 148 | return memory_order_acquire; 149 | case memory_order_release: 150 | return memory_order_relaxed; 151 | default: 152 | return order; 153 | } 154 | } 155 | 156 | template 157 | class base_atomic; 158 | 159 | } 160 | } 161 | } 162 | 163 | #endif 164 | -------------------------------------------------------------------------------- /stack/boost/atomic/detail/cas64strong.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_DETAIL_ATOMIC_CAS64STRONG_HPP 2 | #define BOOST_DETAIL_ATOMIC_CAS64STRONG_HPP 3 | 4 | // Distributed under the Boost Software License, Version 1.0. 5 | // See accompanying file LICENSE_1_0.txt or copy at 6 | // http://www.boost.org/LICENSE_1_0.txt) 7 | // 8 | // Copyright (c) 2011 Helge Bahmann 9 | 10 | // Build 64-bit atomic operation from platform_cmpxchg64_strong 11 | // primitive. It is assumed that 64-bit loads/stores are not 12 | // atomic, so they are funnelled through cmpxchg as well. 13 | 14 | #include 15 | #include 16 | 17 | namespace boost { 18 | namespace detail { 19 | namespace atomic { 20 | 21 | /* integral types */ 22 | 23 | template 24 | class base_atomic { 25 | typedef base_atomic this_type; 26 | typedef T value_type; 27 | typedef T difference_type; 28 | public: 29 | explicit base_atomic(value_type v) : v_(v) {} 30 | base_atomic(void) {} 31 | 32 | void 33 | store(value_type v, memory_order order = memory_order_seq_cst) volatile 34 | { 35 | value_type expected = v_; 36 | do { 37 | } while (!compare_exchange_strong(expected, v, order, memory_order_relaxed)); 38 | } 39 | 40 | value_type 41 | load(memory_order order = memory_order_seq_cst) const volatile 42 | { 43 | value_type v = const_cast(v_); 44 | do { 45 | } while (!const_cast(this)->compare_exchange_strong(v, v, order, memory_order_relaxed)); 46 | return v; 47 | } 48 | 49 | value_type 50 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 51 | { 52 | value_type original = load(memory_order_relaxed); 53 | do { 54 | } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); 55 | return original; 56 | } 57 | 58 | bool 59 | compare_exchange_weak( 60 | value_type & expected, 61 | value_type desired, 62 | memory_order success_order, 63 | memory_order failure_order) volatile 64 | { 65 | return compare_exchange_strong(expected, desired, success_order, failure_order); 66 | } 67 | 68 | bool 69 | compare_exchange_strong( 70 | value_type & expected, 71 | value_type desired, 72 | memory_order success_order, 73 | memory_order failure_order) volatile 74 | { 75 | platform_fence_before(success_order); 76 | 77 | bool success = platform_cmpxchg64_strong(expected, desired, &v_); 78 | 79 | if (success) { 80 | platform_fence_after(success_order); 81 | } else { 82 | platform_fence_after(failure_order); 83 | } 84 | 85 | return success; 86 | } 87 | 88 | value_type 89 | fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 90 | { 91 | value_type original = load(memory_order_relaxed); 92 | do { 93 | } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); 94 | return original; 95 | } 96 | 97 | value_type 98 | fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 99 | { 100 | value_type original = load(memory_order_relaxed); 101 | do { 102 | } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); 103 | return original; 104 | } 105 | 106 | value_type 107 | fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 108 | { 109 | value_type original = load(memory_order_relaxed); 110 | do { 111 | } while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed)); 112 | return original; 113 | } 114 | 115 | value_type 116 | fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 117 | { 118 | value_type original = load(memory_order_relaxed); 119 | do { 120 | } while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed)); 121 | return original; 122 | } 123 | 124 | value_type 125 | fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 126 | { 127 | value_type original = load(memory_order_relaxed); 128 | do { 129 | } while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed)); 130 | return original; 131 | } 132 | 133 | bool 134 | is_lock_free(void) const volatile 135 | { 136 | return true; 137 | } 138 | 139 | BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 140 | private: 141 | base_atomic(const base_atomic &) /* = delete */ ; 142 | void operator=(const base_atomic &) /* = delete */ ; 143 | value_type v_; 144 | }; 145 | 146 | /* pointer types */ 147 | 148 | template 149 | class base_atomic { 150 | typedef base_atomic this_type; 151 | typedef void * value_type; 152 | typedef ptrdiff_t difference_type; 153 | public: 154 | explicit base_atomic(value_type v) : v_(v) {} 155 | base_atomic(void) {} 156 | 157 | void 158 | store(value_type v, memory_order order = memory_order_seq_cst) volatile 159 | { 160 | value_type expected = v_; 161 | do { 162 | } while (!compare_exchange_strong(expected, v, order, memory_order_relaxed)); 163 | } 164 | 165 | value_type 166 | load(memory_order order = memory_order_seq_cst) const volatile 167 | { 168 | value_type v = const_cast(v_); 169 | do { 170 | } while (!const_cast(this)->compare_exchange_strong(v, v, order, memory_order_relaxed)); 171 | return v; 172 | } 173 | 174 | value_type 175 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 176 | { 177 | value_type original = load(memory_order_relaxed); 178 | do { 179 | } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); 180 | return original; 181 | } 182 | 183 | bool 184 | compare_exchange_weak( 185 | value_type & expected, 186 | value_type desired, 187 | memory_order success_order, 188 | memory_order failure_order) volatile 189 | { 190 | return compare_exchange_strong(expected, desired, success_order, failure_order); 191 | } 192 | 193 | bool 194 | compare_exchange_strong( 195 | value_type & expected, 196 | value_type desired, 197 | memory_order success_order, 198 | memory_order failure_order) volatile 199 | { 200 | platform_fence_before(success_order); 201 | 202 | bool success = platform_cmpxchg64_strong(expected, desired, &v_); 203 | 204 | if (success) { 205 | platform_fence_after(success_order); 206 | } else { 207 | platform_fence_after(failure_order); 208 | } 209 | 210 | return success; 211 | } 212 | 213 | value_type 214 | fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 215 | { 216 | value_type original = load(memory_order_relaxed); 217 | do { 218 | } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); 219 | return original; 220 | } 221 | 222 | value_type 223 | fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 224 | { 225 | value_type original = load(memory_order_relaxed); 226 | do { 227 | } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); 228 | return original; 229 | } 230 | 231 | bool 232 | is_lock_free(void) const volatile 233 | { 234 | return true; 235 | } 236 | 237 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 238 | private: 239 | base_atomic(const base_atomic &) /* = delete */ ; 240 | void operator=(const base_atomic &) /* = delete */ ; 241 | value_type v_; 242 | }; 243 | 244 | template 245 | class base_atomic { 246 | typedef base_atomic this_type; 247 | typedef T * value_type; 248 | typedef ptrdiff_t difference_type; 249 | public: 250 | explicit base_atomic(value_type v) : v_(v) {} 251 | base_atomic(void) {} 252 | 253 | void 254 | store(value_type v, memory_order order = memory_order_seq_cst) volatile 255 | { 256 | value_type expected = v_; 257 | do { 258 | } while (!compare_exchange_strong(expected, v, order, memory_order_relaxed)); 259 | } 260 | 261 | value_type 262 | load(memory_order order = memory_order_seq_cst) const volatile 263 | { 264 | value_type v = const_cast(v_); 265 | do { 266 | } while (!const_cast(this)->compare_exchange_strong(v, v, order, memory_order_relaxed)); 267 | return v; 268 | } 269 | 270 | value_type 271 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 272 | { 273 | value_type original = load(memory_order_relaxed); 274 | do { 275 | } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); 276 | return original; 277 | } 278 | 279 | bool 280 | compare_exchange_weak( 281 | value_type & expected, 282 | value_type desired, 283 | memory_order success_order, 284 | memory_order failure_order) volatile 285 | { 286 | return compare_exchange_strong(expected, desired, success_order, failure_order); 287 | } 288 | 289 | bool 290 | compare_exchange_strong( 291 | value_type & expected, 292 | value_type desired, 293 | memory_order success_order, 294 | memory_order failure_order) volatile 295 | { 296 | platform_fence_before(success_order); 297 | 298 | bool success = platform_cmpxchg64_strong(expected, desired, &v_); 299 | 300 | if (success) { 301 | platform_fence_after(success_order); 302 | } else { 303 | platform_fence_after(failure_order); 304 | } 305 | 306 | return success; 307 | } 308 | 309 | value_type 310 | fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 311 | { 312 | value_type original = load(memory_order_relaxed); 313 | do { 314 | } while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed)); 315 | return original; 316 | } 317 | 318 | value_type 319 | fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 320 | { 321 | value_type original = load(memory_order_relaxed); 322 | do { 323 | } while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed)); 324 | return original; 325 | } 326 | 327 | bool 328 | is_lock_free(void) const volatile 329 | { 330 | return true; 331 | } 332 | 333 | BOOST_ATOMIC_DECLARE_POINTER_OPERATORS 334 | private: 335 | base_atomic(const base_atomic &) /* = delete */ ; 336 | void operator=(const base_atomic &) /* = delete */ ; 337 | value_type v_; 338 | }; 339 | 340 | /* generic types */ 341 | 342 | template 343 | class base_atomic { 344 | typedef base_atomic this_type; 345 | typedef T value_type; 346 | typedef uint64_t storage_type; 347 | public: 348 | explicit base_atomic(value_type v) : v_(0) 349 | { 350 | memcpy(&v_, &v, sizeof(value_type)); 351 | } 352 | base_atomic(void) : v_(0) {} 353 | 354 | void 355 | store(value_type v, memory_order order = memory_order_seq_cst) volatile 356 | { 357 | value_type expected; 358 | memcpy(&expected, const_cast(&v_), sizeof(value_type)); 359 | do { 360 | } while (!compare_exchange_strong(expected, v, order, memory_order_relaxed)); 361 | } 362 | 363 | value_type 364 | load(memory_order order = memory_order_seq_cst) const volatile 365 | { 366 | value_type v; 367 | memcpy(&v, const_cast(&v_), sizeof(value_type)); 368 | do { 369 | } while (!const_cast(this)->compare_exchange_strong(v, v, order, memory_order_relaxed)); 370 | return v; 371 | } 372 | 373 | value_type 374 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 375 | { 376 | value_type original = load(memory_order_relaxed); 377 | do { 378 | } while (!compare_exchange_weak(original, v, order, memory_order_relaxed)); 379 | return original; 380 | } 381 | 382 | bool 383 | compare_exchange_weak( 384 | value_type & expected, 385 | value_type desired, 386 | memory_order success_order, 387 | memory_order failure_order) volatile 388 | { 389 | return compare_exchange_strong(expected, desired, success_order, failure_order); 390 | } 391 | 392 | bool 393 | compare_exchange_strong( 394 | value_type & expected, 395 | value_type desired, 396 | memory_order success_order, 397 | memory_order failure_order) volatile 398 | { 399 | 400 | storage_type expected_s = 0, desired_s = 0; 401 | memcpy(&expected_s, &expected, sizeof(value_type)); 402 | memcpy(&desired_s, &desired, sizeof(value_type)); 403 | 404 | platform_fence_before(success_order); 405 | bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_); 406 | 407 | if (success) { 408 | platform_fence_after(success_order); 409 | } else { 410 | platform_fence_after(failure_order); 411 | memcpy(&expected, &expected_s, sizeof(value_type)); 412 | } 413 | 414 | return success; 415 | } 416 | 417 | bool 418 | is_lock_free(void) const volatile 419 | { 420 | return true; 421 | } 422 | 423 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 424 | private: 425 | base_atomic(const base_atomic &) /* = delete */ ; 426 | void operator=(const base_atomic &) /* = delete */ ; 427 | storage_type v_; 428 | }; 429 | 430 | } 431 | } 432 | } 433 | 434 | #endif 435 | -------------------------------------------------------------------------------- /stack/boost/atomic/detail/gcc-x86.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP 2 | #define BOOST_DETAIL_ATOMIC_GCC_X86_HPP 3 | 4 | // Copyright (c) 2009 Helge Bahmann 5 | // 6 | // Distributed under the Boost Software License, Version 1.0. 7 | // See accompanying file LICENSE_1_0.txt or copy at 8 | // http://www.boost.org/LICENSE_1_0.txt) 9 | 10 | #define BOOST_ATOMIC_CHAR_LOCK_FREE 2 11 | #define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2 12 | #define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2 13 | #define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2 14 | #define BOOST_ATOMIC_SHORT_LOCK_FREE 2 15 | #define BOOST_ATOMIC_INT_LOCK_FREE 2 16 | #define BOOST_ATOMIC_LONG_LOCK_FREE 2 17 | 18 | #if defined(__x86_64__) 19 | #define BOOST_ATOMIC_LLONG_LOCK_FREE 2 20 | #else 21 | #define BOOST_ATOMIC_LLONG_LOCK_FREE 1 22 | #endif 23 | 24 | #define BOOST_ATOMIC_ADDRESS_LOCK_FREE 2 25 | #define BOOST_ATOMIC_BOOL_LOCK_FREE 2 26 | 27 | namespace boost { 28 | 29 | #if defined(__x86_64__) 30 | # define BOOST_ATOMIC_X86_FENCE_INSTR "mfence\n" 31 | #else 32 | # define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n" 33 | #endif 34 | 35 | #define BOOST_ATOMIC_THREAD_FENCE 2 36 | static inline void 37 | atomic_thread_fence(memory_order order) 38 | { 39 | switch(order) { 40 | case memory_order_relaxed: 41 | break; 42 | case memory_order_release: 43 | __asm__ __volatile__ ("" ::: "memory"); 44 | break; 45 | case memory_order_acquire: 46 | __asm__ __volatile__ ("" ::: "memory"); 47 | break; 48 | case memory_order_acq_rel: 49 | __asm__ __volatile__ ("" ::: "memory"); 50 | break; 51 | case memory_order_consume: 52 | break; 53 | case memory_order_seq_cst: 54 | __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory"); 55 | break; 56 | default:; 57 | } 58 | } 59 | 60 | #define BOOST_ATOMIC_SIGNAL_FENCE 2 61 | static inline void 62 | atomic_signal_fence(memory_order) 63 | { 64 | __asm__ __volatile__ ("" ::: "memory"); 65 | } 66 | 67 | namespace detail { 68 | namespace atomic { 69 | 70 | static inline void 71 | platform_fence_before(memory_order order) 72 | { 73 | switch(order) { 74 | case memory_order_relaxed: 75 | case memory_order_acquire: 76 | case memory_order_consume: 77 | break; 78 | case memory_order_release: 79 | case memory_order_acq_rel: 80 | __asm__ __volatile__ ("" ::: "memory"); 81 | /* release */ 82 | break; 83 | case memory_order_seq_cst: 84 | __asm__ __volatile__ ("" ::: "memory"); 85 | /* seq */ 86 | break; 87 | } 88 | } 89 | 90 | static inline void 91 | platform_fence_after(memory_order order) 92 | { 93 | switch(order) { 94 | case memory_order_relaxed: 95 | case memory_order_release: 96 | break; 97 | case memory_order_acquire: 98 | case memory_order_acq_rel: 99 | __asm__ __volatile__ ("" ::: "memory"); 100 | /* acquire */ 101 | break; 102 | case memory_order_consume: 103 | /* consume */ 104 | break; 105 | case memory_order_seq_cst: 106 | __asm__ __volatile__ ("" ::: "memory"); 107 | /* seq */ 108 | break; 109 | default:; 110 | } 111 | } 112 | 113 | static inline void 114 | platform_fence_after_load(memory_order order) 115 | { 116 | switch(order) { 117 | case memory_order_relaxed: 118 | case memory_order_release: 119 | break; 120 | case memory_order_acquire: 121 | case memory_order_acq_rel: 122 | __asm__ __volatile__ ("" ::: "memory"); 123 | break; 124 | case memory_order_consume: 125 | break; 126 | case memory_order_seq_cst: 127 | __asm__ __volatile__ (BOOST_ATOMIC_X86_FENCE_INSTR ::: "memory"); 128 | break; 129 | default:; 130 | } 131 | } 132 | 133 | template 134 | class base_atomic { 135 | typedef base_atomic this_type; 136 | typedef T value_type; 137 | typedef T difference_type; 138 | public: 139 | explicit base_atomic(value_type v) : v_(v) {} 140 | base_atomic(void) {} 141 | 142 | void 143 | store(value_type v, memory_order order) volatile 144 | { 145 | if (order != memory_order_seq_cst) { 146 | platform_fence_before(order); 147 | const_cast(v_) = v; 148 | } else { 149 | exchange(v, order); 150 | } 151 | } 152 | 153 | value_type 154 | load(memory_order order) const volatile 155 | { 156 | value_type v = const_cast(v_); 157 | platform_fence_after_load(order); 158 | return v; 159 | } 160 | 161 | value_type 162 | fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 163 | { 164 | platform_fence_before(order); 165 | __asm__ ( 166 | "lock ; xaddb %0, %1" 167 | : "+q" (v), "+m" (v_) 168 | ); 169 | platform_fence_after(order); 170 | return v; 171 | } 172 | 173 | value_type 174 | fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 175 | { 176 | return fetch_add(-v, order); 177 | } 178 | 179 | value_type 180 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 181 | { 182 | platform_fence_before(order); 183 | __asm__ ( 184 | "xchgb %0, %1" 185 | : "+q" (v), "+m" (v_) 186 | ); 187 | platform_fence_after(order); 188 | return v; 189 | } 190 | 191 | bool 192 | compare_exchange_strong( 193 | value_type & expected, 194 | value_type desired, 195 | memory_order success_order, 196 | memory_order failure_order) volatile 197 | { 198 | value_type previous = expected; 199 | platform_fence_before(success_order); 200 | __asm__ ( 201 | "lock ; cmpxchgb %2, %1" 202 | : "+a" (previous), "+m" (v_) 203 | : "q" (desired) 204 | ); 205 | bool success = (previous == expected); 206 | if (success) 207 | platform_fence_after(success_order); 208 | else 209 | platform_fence_after(failure_order); 210 | expected = previous; 211 | return success; 212 | } 213 | 214 | bool 215 | compare_exchange_weak( 216 | value_type & expected, 217 | value_type desired, 218 | memory_order success_order, 219 | memory_order failure_order) volatile 220 | { 221 | return compare_exchange_strong(expected, desired, success_order, failure_order); 222 | } 223 | 224 | value_type 225 | fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 226 | { 227 | value_type tmp = load(memory_order_relaxed); 228 | do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 229 | return tmp; 230 | } 231 | 232 | value_type 233 | fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 234 | { 235 | value_type tmp = load(memory_order_relaxed); 236 | do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 237 | return tmp; 238 | } 239 | 240 | value_type 241 | fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 242 | { 243 | value_type tmp = load(memory_order_relaxed); 244 | do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 245 | return tmp; 246 | } 247 | 248 | bool 249 | is_lock_free(void) const volatile 250 | { 251 | return true; 252 | } 253 | 254 | BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 255 | private: 256 | base_atomic(const base_atomic &) /* = delete */ ; 257 | void operator=(const base_atomic &) /* = delete */ ; 258 | value_type v_; 259 | }; 260 | 261 | template 262 | class base_atomic { 263 | typedef base_atomic this_type; 264 | typedef T value_type; 265 | typedef T difference_type; 266 | public: 267 | explicit base_atomic(value_type v) : v_(v) {} 268 | base_atomic(void) {} 269 | 270 | void 271 | store(value_type v, memory_order order) volatile 272 | { 273 | if (order != memory_order_seq_cst) { 274 | platform_fence_before(order); 275 | const_cast(v_) = v; 276 | } else { 277 | exchange(v, order); 278 | } 279 | } 280 | 281 | value_type 282 | load(memory_order order) const volatile 283 | { 284 | value_type v = const_cast(v_); 285 | platform_fence_after_load(order); 286 | return v; 287 | } 288 | 289 | value_type 290 | fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 291 | { 292 | platform_fence_before(order); 293 | __asm__ ( 294 | "lock ; xaddw %0, %1" 295 | : "+q" (v), "+m" (v_) 296 | ); 297 | platform_fence_after(order); 298 | return v; 299 | } 300 | 301 | value_type 302 | fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 303 | { 304 | return fetch_add(-v, order); 305 | } 306 | 307 | value_type 308 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 309 | { 310 | platform_fence_before(order); 311 | __asm__ ( 312 | "xchgw %0, %1" 313 | : "+q" (v), "+m" (v_) 314 | ); 315 | platform_fence_after(order); 316 | return v; 317 | } 318 | 319 | bool 320 | compare_exchange_strong( 321 | value_type & expected, 322 | value_type desired, 323 | memory_order success_order, 324 | memory_order failure_order) volatile 325 | { 326 | value_type previous = expected; 327 | platform_fence_before(success_order); 328 | __asm__ ( 329 | "lock ; cmpxchgw %2, %1" 330 | : "+a" (previous), "+m" (v_) 331 | : "q" (desired) 332 | ); 333 | bool success = (previous == expected); 334 | if (success) 335 | platform_fence_after(success_order); 336 | else 337 | platform_fence_after(failure_order); 338 | expected = previous; 339 | return success; 340 | } 341 | 342 | bool 343 | compare_exchange_weak( 344 | value_type & expected, 345 | value_type desired, 346 | memory_order success_order, 347 | memory_order failure_order) volatile 348 | { 349 | return compare_exchange_strong(expected, desired, success_order, failure_order); 350 | } 351 | 352 | value_type 353 | fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 354 | { 355 | value_type tmp = load(memory_order_relaxed); 356 | do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 357 | return tmp; 358 | } 359 | 360 | value_type 361 | fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 362 | { 363 | value_type tmp = load(memory_order_relaxed); 364 | do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 365 | return tmp; 366 | } 367 | 368 | value_type 369 | fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 370 | { 371 | value_type tmp = load(memory_order_relaxed); 372 | do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 373 | return tmp; 374 | } 375 | 376 | bool 377 | is_lock_free(void) const volatile 378 | { 379 | return true; 380 | } 381 | 382 | BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 383 | private: 384 | base_atomic(const base_atomic &) /* = delete */ ; 385 | void operator=(const base_atomic &) /* = delete */ ; 386 | value_type v_; 387 | }; 388 | 389 | template 390 | class base_atomic { 391 | typedef base_atomic this_type; 392 | typedef T value_type; 393 | typedef T difference_type; 394 | public: 395 | explicit base_atomic(value_type v) : v_(v) {} 396 | base_atomic(void) {} 397 | 398 | void 399 | store(value_type v, memory_order order) volatile 400 | { 401 | if (order != memory_order_seq_cst) { 402 | platform_fence_before(order); 403 | const_cast(v_) = v; 404 | } else { 405 | exchange(v, order); 406 | } 407 | } 408 | 409 | value_type 410 | load(memory_order order) const volatile 411 | { 412 | value_type v = const_cast(v_); 413 | platform_fence_after_load(order); 414 | return v; 415 | } 416 | 417 | value_type 418 | fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 419 | { 420 | platform_fence_before(order); 421 | __asm__ ( 422 | "lock ; xaddl %0, %1" 423 | : "+r" (v), "+m" (v_) 424 | ); 425 | platform_fence_after(order); 426 | return v; 427 | } 428 | 429 | value_type 430 | fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 431 | { 432 | return fetch_add(-v, order); 433 | } 434 | 435 | value_type 436 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 437 | { 438 | platform_fence_before(order); 439 | __asm__ ( 440 | "xchgl %0, %1" 441 | : "+r" (v), "+m" (v_) 442 | ); 443 | platform_fence_after(order); 444 | return v; 445 | } 446 | 447 | bool 448 | compare_exchange_strong( 449 | value_type & expected, 450 | value_type desired, 451 | memory_order success_order, 452 | memory_order failure_order) volatile 453 | { 454 | value_type previous = expected; 455 | platform_fence_before(success_order); 456 | __asm__ ( 457 | "lock ; cmpxchgl %2, %1" 458 | : "+a" (previous), "+m" (v_) 459 | : "r" (desired) 460 | ); 461 | bool success = (previous == expected); 462 | if (success) 463 | platform_fence_after(success_order); 464 | else 465 | platform_fence_after(failure_order); 466 | expected = previous; 467 | return success; 468 | } 469 | 470 | bool 471 | compare_exchange_weak( 472 | value_type & expected, 473 | value_type desired, 474 | memory_order success_order, 475 | memory_order failure_order) volatile 476 | { 477 | return compare_exchange_strong(expected, desired, success_order, failure_order); 478 | } 479 | 480 | value_type 481 | fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 482 | { 483 | value_type tmp = load(memory_order_relaxed); 484 | do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 485 | return tmp; 486 | } 487 | 488 | value_type 489 | fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 490 | { 491 | value_type tmp = load(memory_order_relaxed); 492 | do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 493 | return tmp; 494 | } 495 | 496 | value_type 497 | fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 498 | { 499 | value_type tmp = load(memory_order_relaxed); 500 | do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 501 | return tmp; 502 | } 503 | 504 | bool 505 | is_lock_free(void) const volatile 506 | { 507 | return true; 508 | } 509 | 510 | BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 511 | private: 512 | base_atomic(const base_atomic &) /* = delete */ ; 513 | void operator=(const base_atomic &) /* = delete */ ; 514 | value_type v_; 515 | }; 516 | 517 | #if defined(__x86_64__) 518 | template 519 | class base_atomic { 520 | typedef base_atomic this_type; 521 | typedef T value_type; 522 | typedef T difference_type; 523 | public: 524 | explicit base_atomic(value_type v) : v_(v) {} 525 | base_atomic(void) {} 526 | 527 | void 528 | store(value_type v, memory_order order) volatile 529 | { 530 | if (order != memory_order_seq_cst) { 531 | platform_fence_before(order); 532 | const_cast(v_) = v; 533 | } else { 534 | exchange(v, order); 535 | } 536 | } 537 | 538 | value_type 539 | load(memory_order order) const volatile 540 | { 541 | value_type v = const_cast(v_); 542 | platform_fence_after_load(order); 543 | return v; 544 | } 545 | 546 | value_type 547 | fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile 548 | { 549 | platform_fence_before(order); 550 | __asm__ ( 551 | "lock ; xaddq %0, %1" 552 | : "+r" (v), "+m" (v_) 553 | ); 554 | platform_fence_after(order); 555 | return v; 556 | } 557 | 558 | value_type 559 | fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile 560 | { 561 | return fetch_add(-v, order); 562 | } 563 | 564 | value_type 565 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 566 | { 567 | platform_fence_before(order); 568 | __asm__ ( 569 | "xchgq %0, %1" 570 | : "+r" (v), "+m" (v_) 571 | ); 572 | platform_fence_after(order); 573 | return v; 574 | } 575 | 576 | bool 577 | compare_exchange_strong( 578 | value_type & expected, 579 | value_type desired, 580 | memory_order success_order, 581 | memory_order failure_order) volatile 582 | { 583 | value_type previous = expected; 584 | platform_fence_before(success_order); 585 | __asm__ ( 586 | "lock ; cmpxchgq %2, %1" 587 | : "+a" (previous), "+m" (v_) 588 | : "r" (desired) 589 | ); 590 | bool success = (previous == expected); 591 | if (success) 592 | platform_fence_after(success_order); 593 | else 594 | platform_fence_after(failure_order); 595 | expected = previous; 596 | return success; 597 | } 598 | 599 | bool 600 | compare_exchange_weak( 601 | value_type & expected, 602 | value_type desired, 603 | memory_order success_order, 604 | memory_order failure_order) volatile 605 | { 606 | return compare_exchange_strong(expected, desired, success_order, failure_order); 607 | } 608 | 609 | value_type 610 | fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile 611 | { 612 | value_type tmp = load(memory_order_relaxed); 613 | do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed)); 614 | return tmp; 615 | } 616 | 617 | value_type 618 | fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile 619 | { 620 | value_type tmp = load(memory_order_relaxed); 621 | do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed)); 622 | return tmp; 623 | } 624 | 625 | value_type 626 | fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile 627 | { 628 | value_type tmp = load(memory_order_relaxed); 629 | do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed)); 630 | return tmp; 631 | } 632 | 633 | bool 634 | is_lock_free(void) const volatile 635 | { 636 | return true; 637 | } 638 | 639 | BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS 640 | private: 641 | base_atomic(const base_atomic &) /* = delete */ ; 642 | void operator=(const base_atomic &) /* = delete */ ; 643 | value_type v_; 644 | }; 645 | 646 | #endif 647 | 648 | /* pointers */ 649 | 650 | #if !defined(__x86_64__) 651 | 652 | template<> 653 | class base_atomic { 654 | typedef base_atomic this_type; 655 | typedef void * value_type; 656 | public: 657 | explicit base_atomic(value_type v) : v_(v) {} 658 | base_atomic(void) {} 659 | 660 | void 661 | store(value_type v, memory_order order) volatile 662 | { 663 | if (order != memory_order_seq_cst) { 664 | platform_fence_before(order); 665 | const_cast(v_) = v; 666 | } else { 667 | exchange(v, order); 668 | } 669 | } 670 | 671 | value_type load(memory_order order) const volatile 672 | { 673 | value_type v = const_cast(v_); 674 | platform_fence_after_load(order); 675 | return v; 676 | } 677 | 678 | value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 679 | { 680 | platform_fence_before(order); 681 | __asm__ ( 682 | "xchgl %0, %1" 683 | : "+r" (v), "+m" (v_) 684 | ); 685 | platform_fence_after(order); 686 | return v; 687 | } 688 | 689 | bool compare_exchange_strong(value_type & expected, value_type desired, 690 | memory_order success_order, 691 | memory_order failure_order) volatile 692 | { 693 | value_type previous = expected; 694 | platform_fence_before(success_order); 695 | __asm__ ( 696 | "lock ; cmpxchgl %2, %1" 697 | : "+a" (previous), "+m" (v_) 698 | : "r" (desired) 699 | ); 700 | bool success = (previous == expected); 701 | if (success) 702 | platform_fence_after(success_order); 703 | else 704 | platform_fence_after(failure_order); 705 | expected = previous; 706 | return success; 707 | } 708 | 709 | bool compare_exchange_weak(value_type & expected, value_type desired, 710 | memory_order success_order, 711 | memory_order failure_order) volatile 712 | { 713 | return compare_exchange_strong(expected, desired, success_order, failure_order); 714 | } 715 | 716 | bool 717 | is_lock_free(void) const volatile 718 | { 719 | return true; 720 | } 721 | 722 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 723 | private: 724 | base_atomic(const base_atomic &) /* = delete */ ; 725 | void operator=(const base_atomic &) /* = delete */ ; 726 | value_type v_; 727 | }; 728 | 729 | template 730 | class base_atomic { 731 | typedef base_atomic this_type; 732 | typedef T * value_type; 733 | typedef ptrdiff_t difference_type; 734 | public: 735 | explicit base_atomic(value_type v) : v_(v) {} 736 | base_atomic(void) {} 737 | 738 | void 739 | store(value_type v, memory_order order) volatile 740 | { 741 | if (order != memory_order_seq_cst) { 742 | platform_fence_before(order); 743 | const_cast(v_) = v; 744 | } else { 745 | exchange(v, order); 746 | } 747 | } 748 | 749 | value_type 750 | load(memory_order order) const volatile 751 | { 752 | value_type v = const_cast(v_); 753 | platform_fence_after_load(order); 754 | return v; 755 | } 756 | 757 | value_type 758 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 759 | { 760 | platform_fence_before(order); 761 | __asm__ ( 762 | "xchgl %0, %1" 763 | : "+r" (v), "+m" (v_) 764 | ); 765 | platform_fence_after(order); 766 | return v; 767 | } 768 | 769 | bool 770 | compare_exchange_strong( 771 | value_type & expected, 772 | value_type desired, 773 | memory_order success_order, 774 | memory_order failure_order) volatile 775 | { 776 | value_type previous = expected; 777 | platform_fence_before(success_order); 778 | __asm__ ( 779 | "lock ; cmpxchgl %2, %1" 780 | : "+a" (previous), "+m" (v_) 781 | : "r" (desired) 782 | ); 783 | bool success = (previous == expected); 784 | if (success) 785 | platform_fence_after(success_order); 786 | else 787 | platform_fence_after(failure_order); 788 | expected = previous; 789 | return success; 790 | } 791 | 792 | bool 793 | compare_exchange_weak( 794 | value_type & expected, 795 | value_type desired, 796 | memory_order success_order, 797 | memory_order failure_order) volatile 798 | { 799 | return compare_exchange_strong(expected, desired, success_order, failure_order); 800 | } 801 | 802 | value_type 803 | fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 804 | { 805 | v = v * sizeof(*v_); 806 | platform_fence_before(order); 807 | __asm__ ( 808 | "lock ; xaddl %0, %1" 809 | : "+r" (v), "+m" (v_) 810 | ); 811 | platform_fence_after(order); 812 | return reinterpret_cast(v); 813 | } 814 | 815 | value_type 816 | fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 817 | { 818 | return fetch_add(-v, order); 819 | } 820 | 821 | bool 822 | is_lock_free(void) const volatile 823 | { 824 | return true; 825 | } 826 | 827 | BOOST_ATOMIC_DECLARE_POINTER_OPERATORS 828 | private: 829 | base_atomic(const base_atomic &) /* = delete */ ; 830 | void operator=(const base_atomic &) /* = delete */ ; 831 | value_type v_; 832 | }; 833 | 834 | #else 835 | 836 | template<> 837 | class base_atomic { 838 | typedef base_atomic this_type; 839 | typedef void * value_type; 840 | public: 841 | explicit base_atomic(value_type v) : v_(v) {} 842 | base_atomic(void) {} 843 | 844 | void 845 | store(value_type v, memory_order order) volatile 846 | { 847 | if (order != memory_order_seq_cst) { 848 | platform_fence_before(order); 849 | const_cast(v_) = v; 850 | } else { 851 | exchange(v, order); 852 | } 853 | } 854 | 855 | value_type load(memory_order order) const volatile 856 | { 857 | value_type v = const_cast(v_); 858 | platform_fence_after_load(order); 859 | return v; 860 | } 861 | 862 | value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 863 | { 864 | platform_fence_before(order); 865 | __asm__ ( 866 | "xchgq %0, %1" 867 | : "+r" (v), "+m" (v_) 868 | ); 869 | platform_fence_after(order); 870 | return v; 871 | } 872 | 873 | bool compare_exchange_strong(value_type & expected, value_type desired, 874 | memory_order success_order, 875 | memory_order failure_order) volatile 876 | { 877 | value_type previous = expected; 878 | platform_fence_before(success_order); 879 | __asm__ ( 880 | "lock ; cmpxchgq %2, %1" 881 | : "+a" (previous), "+m" (v_) 882 | : "r" (desired) 883 | ); 884 | bool success = (previous == expected); 885 | if (success) 886 | platform_fence_after(success_order); 887 | else 888 | platform_fence_after(failure_order); 889 | expected = previous; 890 | return success; 891 | } 892 | 893 | bool compare_exchange_weak(value_type & expected, value_type desired, 894 | memory_order success_order, 895 | memory_order failure_order) volatile 896 | { 897 | return compare_exchange_strong(expected, desired, success_order, failure_order); 898 | } 899 | 900 | bool 901 | is_lock_free(void) const volatile 902 | { 903 | return true; 904 | } 905 | 906 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 907 | private: 908 | base_atomic(const base_atomic &) /* = delete */ ; 909 | void operator=(const base_atomic &) /* = delete */ ; 910 | value_type v_; 911 | }; 912 | 913 | template 914 | class base_atomic { 915 | typedef base_atomic this_type; 916 | typedef T * value_type; 917 | typedef ptrdiff_t difference_type; 918 | public: 919 | explicit base_atomic(value_type v) : v_(v) {} 920 | base_atomic(void) {} 921 | 922 | void 923 | store(value_type v, memory_order order) volatile 924 | { 925 | if (order != memory_order_seq_cst) { 926 | platform_fence_before(order); 927 | const_cast(v_) = v; 928 | } else { 929 | exchange(v, order); 930 | } 931 | } 932 | 933 | value_type 934 | load(memory_order order) const volatile 935 | { 936 | value_type v = const_cast(v_); 937 | platform_fence_after_load(order); 938 | return v; 939 | } 940 | 941 | value_type 942 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 943 | { 944 | platform_fence_before(order); 945 | __asm__ ( 946 | "xchgq %0, %1" 947 | : "+r" (v), "+m" (v_) 948 | ); 949 | platform_fence_after(order); 950 | return v; 951 | } 952 | 953 | bool 954 | compare_exchange_strong( 955 | value_type & expected, 956 | value_type desired, 957 | memory_order success_order, 958 | memory_order failure_order) volatile 959 | { 960 | value_type previous = expected; 961 | platform_fence_before(success_order); 962 | __asm__ ( 963 | "lock ; cmpxchgq %2, %1" 964 | : "+a" (previous), "+m" (v_) 965 | : "r" (desired) 966 | ); 967 | bool success = (previous == expected); 968 | if (success) 969 | platform_fence_after(success_order); 970 | else 971 | platform_fence_after(failure_order); 972 | expected = previous; 973 | return success; 974 | } 975 | 976 | bool 977 | compare_exchange_weak( 978 | value_type & expected, 979 | value_type desired, 980 | memory_order success_order, 981 | memory_order failure_order) volatile 982 | { 983 | return compare_exchange_strong(expected, desired, success_order, failure_order); 984 | } 985 | 986 | value_type 987 | fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile 988 | { 989 | v = v * sizeof(*v_); 990 | platform_fence_before(order); 991 | __asm__ ( 992 | "lock ; xaddq %0, %1" 993 | : "+r" (v), "+m" (v_) 994 | ); 995 | platform_fence_after(order); 996 | return reinterpret_cast(v); 997 | } 998 | 999 | value_type 1000 | fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile 1001 | { 1002 | return fetch_add(-v, order); 1003 | } 1004 | 1005 | bool 1006 | is_lock_free(void) const volatile 1007 | { 1008 | return true; 1009 | } 1010 | 1011 | BOOST_ATOMIC_DECLARE_POINTER_OPERATORS 1012 | private: 1013 | base_atomic(const base_atomic &) /* = delete */ ; 1014 | void operator=(const base_atomic &) /* = delete */ ; 1015 | value_type v_; 1016 | }; 1017 | 1018 | #endif 1019 | 1020 | template 1021 | class base_atomic { 1022 | typedef base_atomic this_type; 1023 | typedef T value_type; 1024 | typedef uint8_t storage_type; 1025 | public: 1026 | explicit base_atomic(value_type v) 1027 | { 1028 | memcpy(&v_, &v, sizeof(value_type)); 1029 | } 1030 | base_atomic(void) {} 1031 | 1032 | void 1033 | store(value_type v, memory_order order) volatile 1034 | { 1035 | if (order != memory_order_seq_cst) { 1036 | storage_type tmp; 1037 | memcpy(&tmp, &v, sizeof(value_type)); 1038 | platform_fence_before(order); 1039 | const_cast(v_) = tmp; 1040 | } else { 1041 | exchange(v, order); 1042 | } 1043 | } 1044 | 1045 | value_type 1046 | load(memory_order order) const volatile 1047 | { 1048 | storage_type tmp = const_cast(v_); 1049 | platform_fence_after_load(order); 1050 | value_type v; 1051 | memcpy(&v, &tmp, sizeof(value_type)); 1052 | return v; 1053 | } 1054 | 1055 | value_type 1056 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 1057 | { 1058 | storage_type tmp; 1059 | memcpy(&tmp, &v, sizeof(value_type)); 1060 | platform_fence_before(order); 1061 | __asm__ ( 1062 | "xchgb %0, %1" 1063 | : "+q" (tmp), "+m" (v_) 1064 | ); 1065 | platform_fence_after(order); 1066 | memcpy(&v, &tmp, sizeof(value_type)); 1067 | return v; 1068 | } 1069 | 1070 | bool 1071 | compare_exchange_strong( 1072 | value_type & expected, 1073 | value_type desired, 1074 | memory_order success_order, 1075 | memory_order failure_order) volatile 1076 | { 1077 | storage_type expected_s, desired_s; 1078 | memcpy(&expected_s, &expected, sizeof(value_type)); 1079 | memcpy(&desired_s, &desired, sizeof(value_type)); 1080 | storage_type previous_s = expected_s; 1081 | platform_fence_before(success_order); 1082 | __asm__ ( 1083 | "lock ; cmpxchgb %2, %1" 1084 | : "+a" (previous_s), "+m" (v_) 1085 | : "q" (desired_s) 1086 | ); 1087 | bool success = (previous_s == expected_s); 1088 | if (success) 1089 | platform_fence_after(success_order); 1090 | else 1091 | platform_fence_after(failure_order); 1092 | memcpy(&expected, &previous_s, sizeof(value_type)); 1093 | return success; 1094 | } 1095 | 1096 | bool 1097 | compare_exchange_weak( 1098 | value_type & expected, 1099 | value_type desired, 1100 | memory_order success_order, 1101 | memory_order failure_order) volatile 1102 | { 1103 | return compare_exchange_strong(expected, desired, success_order, failure_order); 1104 | } 1105 | 1106 | bool 1107 | is_lock_free(void) const volatile 1108 | { 1109 | return true; 1110 | } 1111 | 1112 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 1113 | private: 1114 | base_atomic(const base_atomic &) /* = delete */ ; 1115 | void operator=(const base_atomic &) /* = delete */ ; 1116 | storage_type v_; 1117 | }; 1118 | 1119 | template 1120 | class base_atomic { 1121 | typedef base_atomic this_type; 1122 | typedef T value_type; 1123 | typedef uint16_t storage_type; 1124 | public: 1125 | explicit base_atomic(value_type v) 1126 | { 1127 | memcpy(&v_, &v, sizeof(value_type)); 1128 | } 1129 | base_atomic(void) {} 1130 | 1131 | void 1132 | store(value_type v, memory_order order) volatile 1133 | { 1134 | if (order != memory_order_seq_cst) { 1135 | storage_type tmp; 1136 | memcpy(&tmp, &v, sizeof(value_type)); 1137 | platform_fence_before(order); 1138 | const_cast(v_) = tmp; 1139 | } else { 1140 | exchange(v, order); 1141 | } 1142 | } 1143 | 1144 | value_type 1145 | load(memory_order order) const volatile 1146 | { 1147 | storage_type tmp = const_cast(v_); 1148 | platform_fence_after_load(order); 1149 | value_type v; 1150 | memcpy(&v, &tmp, sizeof(value_type)); 1151 | return v; 1152 | } 1153 | 1154 | value_type 1155 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 1156 | { 1157 | storage_type tmp; 1158 | memcpy(&tmp, &v, sizeof(value_type)); 1159 | platform_fence_before(order); 1160 | __asm__ ( 1161 | "xchgw %0, %1" 1162 | : "+q" (tmp), "+m" (v_) 1163 | ); 1164 | platform_fence_after(order); 1165 | memcpy(&v, &tmp, sizeof(value_type)); 1166 | return v; 1167 | } 1168 | 1169 | bool 1170 | compare_exchange_strong( 1171 | value_type & expected, 1172 | value_type desired, 1173 | memory_order success_order, 1174 | memory_order failure_order) volatile 1175 | { 1176 | storage_type expected_s, desired_s; 1177 | memcpy(&expected_s, &expected, sizeof(value_type)); 1178 | memcpy(&desired_s, &desired, sizeof(value_type)); 1179 | storage_type previous_s = expected_s; 1180 | platform_fence_before(success_order); 1181 | __asm__ ( 1182 | "lock ; cmpxchgw %2, %1" 1183 | : "+a" (previous_s), "+m" (v_) 1184 | : "q" (desired_s) 1185 | ); 1186 | bool success = (previous_s == expected_s); 1187 | if (success) 1188 | platform_fence_after(success_order); 1189 | else 1190 | platform_fence_after(failure_order); 1191 | memcpy(&expected, &previous_s, sizeof(value_type)); 1192 | return success; 1193 | } 1194 | 1195 | bool 1196 | compare_exchange_weak( 1197 | value_type & expected, 1198 | value_type desired, 1199 | memory_order success_order, 1200 | memory_order failure_order) volatile 1201 | { 1202 | return compare_exchange_strong(expected, desired, success_order, failure_order); 1203 | } 1204 | 1205 | bool 1206 | is_lock_free(void) const volatile 1207 | { 1208 | return true; 1209 | } 1210 | 1211 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 1212 | private: 1213 | base_atomic(const base_atomic &) /* = delete */ ; 1214 | void operator=(const base_atomic &) /* = delete */ ; 1215 | storage_type v_; 1216 | }; 1217 | 1218 | template 1219 | class base_atomic { 1220 | typedef base_atomic this_type; 1221 | typedef T value_type; 1222 | typedef uint32_t storage_type; 1223 | public: 1224 | explicit base_atomic(value_type v) 1225 | { 1226 | memcpy(&v_, &v, sizeof(value_type)); 1227 | } 1228 | base_atomic(void) {} 1229 | 1230 | void 1231 | store(value_type v, memory_order order) volatile 1232 | { 1233 | if (order != memory_order_seq_cst) { 1234 | storage_type tmp; 1235 | memcpy(&tmp, &v, sizeof(value_type)); 1236 | platform_fence_before(order); 1237 | const_cast(v_) = tmp; 1238 | } else { 1239 | exchange(v, order); 1240 | } 1241 | } 1242 | 1243 | value_type 1244 | load(memory_order order) const volatile 1245 | { 1246 | storage_type tmp = const_cast(v_); 1247 | platform_fence_after_load(order); 1248 | value_type v; 1249 | memcpy(&v, &tmp, sizeof(value_type)); 1250 | return v; 1251 | } 1252 | 1253 | value_type 1254 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 1255 | { 1256 | storage_type tmp; 1257 | memcpy(&tmp, &v, sizeof(value_type)); 1258 | platform_fence_before(order); 1259 | __asm__ ( 1260 | "xchgl %0, %1" 1261 | : "+q" (tmp), "+m" (v_) 1262 | ); 1263 | platform_fence_after(order); 1264 | memcpy(&v, &tmp, sizeof(value_type)); 1265 | return v; 1266 | } 1267 | 1268 | bool 1269 | compare_exchange_strong( 1270 | value_type & expected, 1271 | value_type desired, 1272 | memory_order success_order, 1273 | memory_order failure_order) volatile 1274 | { 1275 | storage_type expected_s, desired_s; 1276 | memcpy(&expected_s, &expected, sizeof(value_type)); 1277 | memcpy(&desired_s, &desired, sizeof(value_type)); 1278 | storage_type previous_s = expected_s; 1279 | platform_fence_before(success_order); 1280 | __asm__ ( 1281 | "lock ; cmpxchgl %2, %1" 1282 | : "+a" (previous_s), "+m" (v_) 1283 | : "q" (desired_s) 1284 | ); 1285 | bool success = (previous_s == expected_s); 1286 | if (success) 1287 | platform_fence_after(success_order); 1288 | else 1289 | platform_fence_after(failure_order); 1290 | memcpy(&expected, &previous_s, sizeof(value_type)); 1291 | return success; 1292 | } 1293 | 1294 | bool 1295 | compare_exchange_weak( 1296 | value_type & expected, 1297 | value_type desired, 1298 | memory_order success_order, 1299 | memory_order failure_order) volatile 1300 | { 1301 | return compare_exchange_strong(expected, desired, success_order, failure_order); 1302 | } 1303 | 1304 | bool 1305 | is_lock_free(void) const volatile 1306 | { 1307 | return true; 1308 | } 1309 | 1310 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 1311 | private: 1312 | base_atomic(const base_atomic &) /* = delete */ ; 1313 | void operator=(const base_atomic &) /* = delete */ ; 1314 | storage_type v_; 1315 | }; 1316 | 1317 | #if defined(__x86_64__) 1318 | template 1319 | class base_atomic { 1320 | typedef base_atomic this_type; 1321 | typedef T value_type; 1322 | typedef uint64_t storage_type; 1323 | public: 1324 | explicit base_atomic(value_type v) 1325 | { 1326 | memcpy(&v_, &v, sizeof(value_type)); 1327 | } 1328 | base_atomic(void) {} 1329 | 1330 | void 1331 | store(value_type v, memory_order order) volatile 1332 | { 1333 | if (order != memory_order_seq_cst) { 1334 | storage_type tmp; 1335 | memcpy(&tmp, &v, sizeof(value_type)); 1336 | platform_fence_before(order); 1337 | const_cast(v_) = tmp; 1338 | } else { 1339 | exchange(v, order); 1340 | } 1341 | } 1342 | 1343 | value_type 1344 | load(memory_order order) const volatile 1345 | { 1346 | storage_type tmp = const_cast(v_); 1347 | platform_fence_after_load(order); 1348 | value_type v; 1349 | memcpy(&v, &tmp, sizeof(value_type)); 1350 | return v; 1351 | } 1352 | 1353 | value_type 1354 | exchange(value_type v, memory_order order = memory_order_seq_cst) volatile 1355 | { 1356 | storage_type tmp; 1357 | memcpy(&tmp, &v, sizeof(value_type)); 1358 | platform_fence_before(order); 1359 | __asm__ ( 1360 | "xchgq %0, %1" 1361 | : "+q" (tmp), "+m" (v_) 1362 | ); 1363 | platform_fence_after(order); 1364 | memcpy(&v, &tmp, sizeof(value_type)); 1365 | return v; 1366 | } 1367 | 1368 | bool 1369 | compare_exchange_strong( 1370 | value_type & expected, 1371 | value_type desired, 1372 | memory_order success_order, 1373 | memory_order failure_order) volatile 1374 | { 1375 | storage_type expected_s, desired_s; 1376 | memcpy(&expected_s, &expected, sizeof(value_type)); 1377 | memcpy(&desired_s, &desired, sizeof(value_type)); 1378 | storage_type previous_s = expected_s; 1379 | platform_fence_before(success_order); 1380 | __asm__ ( 1381 | "lock ; cmpxchgq %2, %1" 1382 | : "+a" (previous_s), "+m" (v_) 1383 | : "q" (desired_s) 1384 | ); 1385 | bool success = (previous_s == expected_s); 1386 | if (success) 1387 | platform_fence_after(success_order); 1388 | else 1389 | platform_fence_after(failure_order); 1390 | memcpy(&expected, &previous_s, sizeof(value_type)); 1391 | return success; 1392 | } 1393 | 1394 | bool 1395 | compare_exchange_weak( 1396 | value_type & expected, 1397 | value_type desired, 1398 | memory_order success_order, 1399 | memory_order failure_order) volatile 1400 | { 1401 | return compare_exchange_strong(expected, desired, success_order, failure_order); 1402 | } 1403 | 1404 | bool 1405 | is_lock_free(void) const volatile 1406 | { 1407 | return true; 1408 | } 1409 | 1410 | BOOST_ATOMIC_DECLARE_BASE_OPERATORS 1411 | private: 1412 | base_atomic(const base_atomic &) /* = delete */ ; 1413 | void operator=(const base_atomic &) /* = delete */ ; 1414 | storage_type v_; 1415 | }; 1416 | #endif 1417 | 1418 | #if defined(__i686__) 1419 | 1420 | template 1421 | bool 1422 | platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr) 1423 | { 1424 | int scratch; 1425 | T prev = expected; 1426 | /* Make sure ebx is saved and restored properly in case 1427 | this object is compiled as "position independent". Since 1428 | programmers on x86 tend to forget specifying -DPIC or 1429 | similar, always assume PIC. 1430 | 1431 | To make this work uniformly even in the non-PIC case, 1432 | setup register constraints such that ebx can not be 1433 | used by accident e.g. as base address for the variable 1434 | to be modified. Accessing "scratch" should always be okay, 1435 | as it can only be placed on the stack (and therefore 1436 | accessed through ebp or esp only). 1437 | 1438 | In theory, could push/pop ebx onto/off the stack, but movs 1439 | to a prepared stack slot turn out to be faster. */ 1440 | __asm__ __volatile__ ( 1441 | "movl %%ebx, %1\n" 1442 | "movl %2, %%ebx\n" 1443 | "lock; cmpxchg8b 0(%4)\n" 1444 | "movl %1, %%ebx\n" 1445 | : "=A" (prev), "=m" (scratch) 1446 | : "D" ((int)desired), "c" ((int)(desired >> 32)), "S" (ptr), "0" (prev) 1447 | : "memory"); 1448 | bool success = (prev == expected); 1449 | expected = prev; 1450 | return success; 1451 | } 1452 | 1453 | #endif 1454 | 1455 | } 1456 | } 1457 | } 1458 | 1459 | /* pull in 64-bit atomic type using cmpxchg8b above */ 1460 | #if defined(__i686__) 1461 | #include 1462 | #endif 1463 | 1464 | #endif 1465 | -------------------------------------------------------------------------------- /stack/boost/atomic/detail/type-classifier.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_DETAIL_ATOMIC_TYPE_CLASSIFIER_HPP 2 | #define BOOST_DETAIL_ATOMIC_TYPE_CLASSIFIER_HPP 3 | 4 | // Copyright (c) 2011 Helge Bahmann 5 | // 6 | // Distributed under the Boost Software License, Version 1.0. 7 | // See accompanying file LICENSE_1_0.txt or copy at 8 | // http://www.boost.org/LICENSE_1_0.txt) 9 | 10 | namespace boost { namespace detail { namespace atomic { 11 | 12 | template 13 | struct type_classifier { 14 | typedef void test; 15 | }; 16 | 17 | template<> 18 | struct type_classifier {typedef int test;}; 19 | template<> 20 | struct type_classifier {typedef int test;}; 21 | template<> 22 | struct type_classifier {typedef int test;}; 23 | template<> 24 | struct type_classifier {typedef int test;}; 25 | template<> 26 | struct type_classifier {typedef int test;}; 27 | template<> 28 | struct type_classifier {typedef int test;}; 29 | template<> 30 | struct type_classifier {typedef int test;}; 31 | template<> 32 | struct type_classifier {typedef int test;}; 33 | template<> 34 | struct type_classifier {typedef int test;}; 35 | #ifdef BOOST_HAS_LONG_LONG 36 | template<> struct type_classifier 37 | {typedef int test;}; 38 | template<> struct type_classifier 39 | {typedef int test;}; 40 | #endif 41 | 42 | template 43 | struct type_classifier {typedef void * test;}; 44 | 45 | template 46 | struct sign_trait { 47 | typedef void test; 48 | }; 49 | 50 | template<> 51 | struct sign_trait {typedef int test;}; 52 | template<> 53 | struct sign_trait {typedef unsigned int test;}; 54 | template<> 55 | struct sign_trait {typedef int test;}; 56 | template<> 57 | struct sign_trait {typedef unsigned int test;}; 58 | template<> 59 | struct sign_trait {typedef int test;}; 60 | template<> 61 | struct sign_trait {typedef unsigned int test;}; 62 | template<> 63 | struct sign_trait {typedef int test;}; 64 | template<> 65 | struct sign_trait {typedef unsigned int test;}; 66 | template<> 67 | struct sign_trait {typedef int test;}; 68 | #ifdef BOOST_HAS_LONG_LONG 69 | template<> struct sign_trait 70 | {typedef unsigned int test;}; 71 | template<> struct sign_trait 72 | {typedef int test;}; 73 | #endif 74 | 75 | 76 | 77 | }}} 78 | 79 | #endif 80 | -------------------------------------------------------------------------------- /stack/boost/atomic/platform.hpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2009 Helge Bahmann 2 | // 3 | // Distributed under the Boost Software License, Version 1.0. 4 | // See accompanying file LICENSE_1_0.txt or copy at 5 | // http://www.boost.org/LICENSE_1_0.txt) 6 | 7 | // Platform selection file 8 | 9 | //#include 10 | 11 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 12 | 13 | #include "detail/gcc-x86.hpp" 14 | 15 | #elif defined(__GNUC__) && defined(__alpha__) 16 | 17 | #include 18 | 19 | #elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__)) 20 | 21 | #include 22 | 23 | // This list of ARM architecture versions comes from Apple's arm/arch.h header. 24 | // I don't know how complete it is. 25 | #elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ 26 | || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ 27 | || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__)) 28 | 29 | #include 30 | 31 | #elif defined(__linux__) && defined(__arm__) 32 | 33 | #include 34 | 35 | #elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) 36 | 37 | #include 38 | 39 | #elif defined(__GNUC__) 40 | 41 | #include 42 | 43 | #endif 44 | -------------------------------------------------------------------------------- /stack/boost/memory_order.hpp: -------------------------------------------------------------------------------- 1 | #ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED 2 | #define BOOST_MEMORY_ORDER_HPP_INCLUDED 3 | 4 | // MS compatible compilers support #pragma once 5 | 6 | #if defined(_MSC_VER) && (_MSC_VER >= 1020) 7 | # pragma once 8 | #endif 9 | 10 | // boost/memory_order.hpp 11 | // 12 | // Defines enum boost::memory_order per the C++0x working draft 13 | // 14 | // Copyright (c) 2008, 2009 Peter Dimov 15 | // 16 | // Distributed under the Boost Software License, Version 1.0. 17 | // See accompanying file LICENSE_1_0.txt or copy at 18 | // http://www.boost.org/LICENSE_1_0.txt) 19 | 20 | 21 | namespace boost 22 | { 23 | 24 | // 25 | // Enum values are chosen so that code that needs to insert 26 | // a trailing fence for acquire semantics can use a single 27 | // test such as: 28 | // 29 | // if( mo & memory_order_acquire ) { ...fence... } 30 | // 31 | // For leading fences one can use: 32 | // 33 | // if( mo & memory_order_release ) { ...fence... } 34 | // 35 | // Architectures such as Alpha that need a fence on consume 36 | // can use: 37 | // 38 | // if( mo & ( memory_order_acquire | memory_order_consume ) ) { ...fence... } 39 | // 40 | 41 | enum memory_order 42 | { 43 | memory_order_relaxed = 0, 44 | memory_order_acquire = 1, 45 | memory_order_release = 2, 46 | memory_order_acq_rel = 3, // acquire | release 47 | memory_order_seq_cst = 7, // acq_rel | 4 48 | memory_order_consume = 8 49 | }; 50 | 51 | } // namespace boost 52 | 53 | #endif // #ifndef BOOST_MEMORY_ORDER_HPP_INCLUDED 54 | -------------------------------------------------------------------------------- /stack/locked.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | template 5 | class LockedStack 6 | { 7 | public: 8 | void Push(T* entry) 9 | { 10 | std::lock_guard lock(m_mutex); 11 | m_stack.push(entry); 12 | } 13 | 14 | // For compatability with the LockFreeStack interface, 15 | // add an unused int parameter. 16 | // 17 | T* Pop(int) 18 | { 19 | std::lock_guard lock(m_mutex); 20 | if(m_stack.empty()) 21 | { 22 | return nullptr; 23 | } 24 | T* ret = m_stack.top(); 25 | m_stack.pop(); 26 | return ret; 27 | } 28 | 29 | private: 30 | std::stack m_stack; 31 | std::mutex m_mutex; 32 | }; 33 | 34 | -------------------------------------------------------------------------------- /stack/lockfree.h: -------------------------------------------------------------------------------- 1 | 2 | #include "atomic.h" 3 | #include 4 | 5 | class LockFreeStack 6 | { 7 | public: 8 | // The elements we wish to store should inherit Node 9 | // 10 | struct Node 11 | { 12 | boost::atomic next; 13 | }; 14 | 15 | // Unfortunately, there is no platform independent way to 16 | // define this class. The following definition works in 17 | // gcc on x86_64 architectures 18 | // 19 | class TaggedPointer 20 | { 21 | public: 22 | TaggedPointer(): m_node(nullptr), m_counter(0) {} 23 | 24 | Node* GetNode() 25 | { 26 | return m_node.load(boost::memory_order_acquire); 27 | } 28 | 29 | uint64_t GetCounter() 30 | { 31 | return m_counter.load(boost::memory_order_acquire); 32 | } 33 | 34 | bool CompareAndSwap(Node* oldNode, uint64_t oldCounter, Node* newNode, uint64_t newCounter) 35 | { 36 | bool cas_result; 37 | __asm__ __volatile__ 38 | ( 39 | "lock cmpxchg16b %0;" // cmpxchg16b sets ZF on success 40 | "setz %3;" // if ZF set, set cas_result to 1 41 | 42 | : "+m" (*this), "+a" (oldNode), "+d" (oldCounter), "=q" (cas_result) 43 | : "b" (newNode), "c" (newCounter) 44 | : "cc", "memory" 45 | ); 46 | return cas_result; 47 | } 48 | private: 49 | boost::atomic m_node; 50 | boost::atomic m_counter; 51 | } 52 | // 16-byte alignment is required for double-width 53 | // compare and swap 54 | // 55 | __attribute__((aligned(16))); 56 | 57 | bool TryPushStack(Node* entry) 58 | { 59 | Node* oldHead; 60 | uint64_t oldCounter; 61 | 62 | oldHead = m_head.GetNode(); 63 | oldCounter = m_head.GetCounter(); 64 | entry->next.store(oldHead, boost::memory_order_relaxed); 65 | return m_head.CompareAndSwap(oldHead, oldCounter, entry, oldCounter + 1); 66 | } 67 | 68 | bool TryPopStack(Node*& oldHead, int threadId) 69 | { 70 | oldHead = m_head.GetNode(); 71 | uint64_t oldCounter = m_head.GetCounter(); 72 | if(oldHead == nullptr) 73 | { 74 | return true; 75 | } 76 | m_hazard[threadId*8].store(oldHead, boost::memory_order_seq_cst); 77 | if(m_head.GetNode() != oldHead) 78 | { 79 | return false; 80 | } 81 | return m_head.CompareAndSwap(oldHead, oldCounter, oldHead->next.load(boost::memory_order_acquire), oldCounter + 1); 82 | } 83 | 84 | void Push(Node* entry) 85 | { 86 | while(true) 87 | { 88 | if(TryPushStack(entry)) 89 | { 90 | return; 91 | } 92 | usleep(250); 93 | } 94 | } 95 | 96 | Node* Pop(int threadId) 97 | { 98 | Node* res; 99 | while(true) 100 | { 101 | if(TryPopStack(res, threadId)) 102 | { 103 | return res; 104 | } 105 | usleep(250); 106 | } 107 | } 108 | 109 | private: 110 | TaggedPointer m_head; 111 | boost::atomic m_hazard[MAX_THREADS*8]; 112 | }; 113 | 114 | -------------------------------------------------------------------------------- /stack/spinlocked.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "atomic.h" 3 | #include 4 | 5 | class SpinLock 6 | { 7 | public: 8 | void Acquire() 9 | { 10 | while (true) 11 | { 12 | if (!m_locked.test_and_set(boost::memory_order_acquire)) 13 | { 14 | return; 15 | } 16 | usleep(250); 17 | } 18 | } 19 | void Release() 20 | { 21 | m_locked.clear(boost::memory_order_release); 22 | } 23 | 24 | private: 25 | boost::atomic_flag m_locked; 26 | }; 27 | 28 | template 29 | class SpinLockedStack 30 | { 31 | public: 32 | void Push(T* entry) 33 | { 34 | m_lock.Acquire(); 35 | m_stack.push(entry); 36 | m_lock.Release(); 37 | } 38 | 39 | // For compatability with the LockFreeStack interface, 40 | // add an unused int parameter. 41 | // 42 | T* Pop(int) 43 | { 44 | m_lock.Acquire(); 45 | if(m_stack.empty()) 46 | { 47 | m_lock.Release(); 48 | return nullptr; 49 | } 50 | T* ret = m_stack.top(); 51 | m_stack.pop(); 52 | m_lock.Release(); 53 | return ret; 54 | } 55 | 56 | private: 57 | SpinLock m_lock; 58 | std::stack m_stack; 59 | }; 60 | 61 | --------------------------------------------------------------------------------