├── .cproject
├── .gitignore
├── .project
├── Makefile
├── README.md
├── hazard.h
├── main.cc
├── pp.h
├── rcu.h
└── spin_lock.h
/.cproject:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Object files
2 | *.slo
3 | *.lo
4 | *.o
5 |
6 | # Compiled Dynamic libraries
7 | *.so
8 | *.dylib
9 |
10 | # Compiled Static libraries
11 | *.lai
12 | *.la
13 | *.a
14 |
--------------------------------------------------------------------------------
/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | RCU-Granary
4 |
5 |
6 |
7 |
8 |
9 | org.eclipse.cdt.managedbuilder.core.genmakebuilder
10 | clean,full,incremental,
11 |
12 |
13 |
14 |
15 | org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
16 | full,incremental,
17 |
18 |
19 |
20 |
21 |
22 | org.eclipse.cdt.core.cnature
23 | org.eclipse.cdt.core.ccnature
24 | org.eclipse.cdt.managedbuilder.core.managedBuildNature
25 | org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
26 |
27 |
28 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | clang++ -O0 -g3 -I./ -std=c++11 -stdlib=libc++ -c main.cc -o main.o
3 | clang++ -O0 -std=c++11 -stdlib=libc++ main.o -pthread -o rcu
4 |
5 | clean:
6 | -rm *.o
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Read-Copy-Update Implementation
2 | ===============================
3 |
4 | This is a simple and restricted implementation of the RCU protocol.
5 | The goals of this implementation are:
6 |
7 | * Being very explicit in how RCU-protected objects are meant to be
8 | used.
9 | * Remove the possibility of errors where `rcu_assign_pointer` or
10 | `rcu_dereference` are missing.
11 | * Implement RCU in a way that does not depend on any particular
12 | implementation of threads or schedulers. Thus, this implementation
13 | should work both in user and kernel space.
14 | * Implement sleepable RCU.
15 |
16 | How it Works
17 | ------------
18 | Each data structure has its own locking primitive (default is a spin lock)
19 | and a pointer to an atomic reference counter. Read critical sections are
20 | delimited by increments and decrements to this reference counter.
21 | Synchronizations operate by switching the reference counter pointed to by
22 | the data structure (and so later readers operate on a different counter),
23 | and then waiting for the previously active counter to hit zero.
24 |
25 | Nitty-Gritty Details
26 | --------------------
27 |
28 | ### Reference Counting
29 | The reference counters operate by incrementing and decrementing by `2`.
30 | This is so that the value of a reference counter encodes both the count
31 | and whether or not it is valid/stale.
32 |
33 | Because a pointer to a reference counter is used, it means that there is
34 | potential for a reader to read the address of the counter, but be descheduled
35 | or interrupted before having a chance to increment the counter. By the time
36 | that it might want to increment the counter, a writer could have already
37 | finished synchronizing using that counter.
38 |
39 | To mitigate this, the operation of waiting on a reference counter is to
40 | loop and compare-and-swap from `0` to `1`. Thus, a reference counter is
41 | valid iff its value is even, and it is stale iff its value is odd. The
42 | increment/decrement by `2` maintains this property.
43 |
44 | Thus, reader threads can unconditionally increment their counters, but can
45 | always detect if an increment was stale and recover by trying to get the
46 | newest reference counter.
47 |
48 | ### Stale Reference Counters
49 | Hazard pointers are used to reclaim stale reference counters. The hazard
50 | pointer implementation is simple: the main RCU-protected data structure
51 | maintains two lists: one of "hazardous" reference counters (currently
52 | being operated on by some reader thread), and another of stale reference
53 | counters. Writer threads periodically compare these lists to find reference
54 | counters that can be reclaimed.
--------------------------------------------------------------------------------
/hazard.h:
--------------------------------------------------------------------------------
1 | /*
2 | * hazard.h
3 | *
4 | * Created on: 2013-01-18
5 | * Author: pag
6 | */
7 |
8 | #ifndef GRANARY_HAZARD_H_
9 | #define GRANARY_HAZARD_H_
10 |
11 | namespace granary { namespace smp {
12 |
13 | /// Forward declarations.
14 | template struct hazard_pointer_list;
15 |
16 |
17 | /// Represents a shared list of hazard pointers.
18 | template
19 | struct hazard_pointer {
20 | private:
21 |
22 | friend struct hazard_pointer_list;
23 |
24 | /// Next hazard pointer in the list
25 | hazard_pointer *next;
26 |
27 | /// Is this hazard pointer active? I.e. should the pointer
28 | /// contained be considered as one that is held by another thread.
29 | std::atomic is_active;
30 |
31 | /// The pointer held by this hazard pointer structure.
32 | std::atomic hazardous_pointer;
33 |
34 | public:
35 |
36 | hazard_pointer(void) throw()
37 | : next(nullptr)
38 | , is_active(ATOMIC_VAR_INIT(true))
39 | , hazardous_pointer(ATOMIC_VAR_INIT(nullptr))
40 | { }
41 |
42 | /// Update the hazard pointer entry with a pointer value.
43 | inline void remember(T *ptr) throw() {
44 | hazardous_pointer.store(ptr);
45 | }
46 |
47 | /// Release a hazard pointer entry.
48 | void release(void) throw() {
49 | this->hazardous_pointer.store(nullptr);
50 | this->is_active.store(false);
51 | }
52 | };
53 |
54 |
55 | /// Represents a hazard pointer list.
56 | template
57 | struct hazard_pointer_list {
58 | private:
59 |
60 | std::atomic *> head;
61 |
62 | public:
63 |
64 | hazard_pointer_list(void) throw()
65 | : head(ATOMIC_VAR_INIT(nullptr))
66 | { }
67 |
68 | /// Destructor; delete the hazard pointer list, but NOT the hazard
69 | /// pointers themselves.
70 | ~hazard_pointer_list(void) throw() {
71 | hazard_pointer *ptr(head.exchange(nullptr));
72 | hazard_pointer *next_ptr(nullptr);
73 |
74 | for(; ptr; ptr = next_ptr) {
75 | next_ptr = ptr->next;
76 | delete ptr;
77 | }
78 | }
79 |
80 | /// Acquire a new hazard pointer entry.
81 | hazard_pointer &acquire(void) throw() {
82 |
83 | hazard_pointer *p(head.load());
84 | bool inactive(false);
85 |
86 | for(; p; p = p->next) {
87 | if(p->is_active.load()) {
88 | continue;
89 | }
90 |
91 | // only try once for each pointer
92 | if(!p->is_active.compare_exchange_weak(inactive, true)) {
93 | continue;
94 | }
95 |
96 | return *p;
97 | }
98 |
99 | // need to allocate a new hazard pointer
100 | p = new hazard_pointer;
101 | hazard_pointer *head_hp(nullptr);
102 |
103 | // link this hazard pointer in as the head of the list.
104 | do {
105 | head_hp = head.load();
106 | p->next = head_hp;
107 | } while(!head.compare_exchange_weak(head_hp, p));
108 |
109 | return *p;
110 | }
111 |
112 | /// Returns true if a particular pointer is contained in the hazard
113 | /// pointer list.
114 | bool contains(const T * const ptr) throw() {
115 | hazard_pointer *p(head.load());
116 | for(; p; p = p->next) {
117 | if(!p->is_active.load()) {
118 | continue;
119 | }
120 |
121 | if(p->hazardous_pointer.load() == ptr) {
122 | return true;
123 | }
124 | }
125 | return false;
126 | }
127 | };
128 |
129 | }}
130 |
131 | #endif /* GRANARY_HAZARD_H_ */
132 |
--------------------------------------------------------------------------------
/main.cc:
--------------------------------------------------------------------------------
1 |
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include "rcu.h"
10 |
11 | /// List of elements; will be used as a queue.
12 | template
13 | struct queue {
14 | queue *next;
15 | T value;
16 | };
17 |
18 |
19 | namespace granary { namespace smp {
20 |
21 | /// Specify the RCU protocol for lists.
22 | RCU_GENERIC_PROTOCOL((typename T), queue, (T),
23 | RCU_REFERENCE(next),
24 | RCU_VALUE(value))
25 |
26 | }}
27 |
28 |
29 | /// Number of items in the queue.
30 | static std::atomic QUEUE_LEN(ATOMIC_VAR_INIT(0U));
31 |
32 |
33 | /// How many operations each writer thread should perform.
34 | static int MAX_WRITE_PER_THREAD(5);
35 |
36 |
37 | /// The shared queue that readers and writers will operate on.
38 | static granary::smp::rcu_protected> QUEUE(
39 | granary::smp::RCU_INIT_NULL);
40 |
41 |
42 | /// The number of total threads currently running.
43 | static std::atomic NUM_ACTIVE_THREADS(ATOMIC_VAR_INIT(0U));
44 |
45 |
46 | /// The number of writer threads currently running.
47 | static std::atomic NUM_ACTIVE_WRITERS(ATOMIC_VAR_INIT(0U));
48 |
49 |
50 | /// Find and return the minimum value in the queue.
51 | int find_min(granary::smp::rcu_read_reference> item) throw() {
52 | int min_elem = -1;
53 | for(; item; ) {
54 | min_elem = std::min(min_elem, int(item.value));
55 | item = item.next;
56 | }
57 | return min_elem;
58 | }
59 |
60 |
61 | /// Add an element with a random value to the head of the queue.
62 | struct enqueue_random : public granary::smp::rcu_writer > {
63 |
64 | queue *new_item;
65 |
66 | /// Allocate a new list head.
67 | virtual void setup(void) throw() {
68 | new_item = new queue;
69 | new_item->value = rand();
70 | new_item->next = nullptr;
71 | }
72 |
73 | /// Change the list head
74 | virtual void while_readers_exist(
75 | write_ref_type head,
76 | publisher_type &publisher
77 | ) throw() {
78 | write_ref_type new_head(publisher.promote(new_item));
79 | new_head.next = head;
80 | publisher.publish(new_head);
81 | }
82 | };
83 |
84 |
85 | /// Remove an element from the end of the queue.
86 | struct dequeue : public granary::smp::rcu_writer > {
87 |
88 | write_ref_type removed_elem;
89 |
90 | /// Find and remove the last element from the queue.
91 | virtual void while_readers_exist(
92 | write_ref_type ref,
93 | publisher_type &publisher
94 | ) throw() {
95 |
96 | // nothing in the list
97 | if(!ref) {
98 | return;
99 | }
100 |
101 | write_ref_type prev_ref;
102 | for(; ; prev_ref = ref, ref = ref.next) {
103 | if(!ref.next) {
104 | break;
105 | }
106 | }
107 |
108 | removed_elem = ref;
109 |
110 | // only one element in the queue
111 | if(!prev_ref) {
112 | publisher.publish(publisher.promote(nullptr));
113 |
114 | // more than one element in the queue
115 | } else {
116 | prev_ref.next = publisher.promote(nullptr);
117 | }
118 | }
119 |
120 | /// Delete the removed element.
121 | virtual void teardown(collector_type &collector) throw() {
122 | if(removed_elem) {
123 | delete collector.demote(removed_elem);
124 | }
125 | }
126 | };
127 |
128 |
129 | /// Remove all elements from the queue.
130 | struct empty : public granary::smp::rcu_writer > {
131 |
132 | queue *head;
133 |
134 | /// Make all elements unreachable.
135 | virtual void while_readers_exist(
136 | write_ref_type ref,
137 | publisher_type &publisher
138 | ) throw() {
139 | head = publisher.publish(publisher.promote(nullptr));
140 | }
141 |
142 | /// Delete the elements.
143 | virtual void teardown(collector_type &collector) throw() {
144 | for(queue *next(nullptr); head; head = next) {
145 | next = head->next;
146 | delete head;
147 | }
148 | }
149 | };
150 |
151 |
152 | /// Reader thread implementation.
153 | void *reader_thread(void *p) {
154 | sleep(1);
155 | for(;;) {
156 | QUEUE.read(find_min);
157 | if(!NUM_ACTIVE_WRITERS.load()) {
158 | break;
159 | }
160 | }
161 |
162 | NUM_ACTIVE_THREADS.fetch_sub(1);
163 |
164 | return nullptr;
165 | }
166 |
167 |
168 | /// Writer thread implementation.
169 | void *writer_thread(void *p) {
170 | sleep(1);
171 | for(int i(0); i < MAX_WRITE_PER_THREAD; ++i) {
172 | if(rand() % 2) {
173 | enqueue_random adder;
174 | QUEUE.write(adder);
175 | } else {
176 | dequeue remover;
177 | QUEUE.write(remover);
178 | }
179 | }
180 |
181 | NUM_ACTIVE_THREADS.fetch_sub(1);
182 | NUM_ACTIVE_WRITERS.fetch_sub(1);
183 |
184 | return nullptr;
185 | }
186 |
187 |
188 | /// Main thread implementation.
189 | int main(int argc, char **argv) throw() {
190 | unsigned num_readers(0);
191 | unsigned num_writers(0);
192 |
193 | if(3 != argc) {
194 | printf("Format: %s \n", argv[0]);
195 | return 0;
196 | }
197 |
198 | sscanf(argv[1], "%u", &num_readers);
199 | sscanf(argv[2], "%u", &num_writers);
200 |
201 | NUM_ACTIVE_THREADS.store(num_writers + num_readers);
202 | NUM_ACTIVE_WRITERS.store(num_writers);
203 |
204 | pthread_t *readers = new pthread_t[num_readers];
205 | pthread_t *writers = new pthread_t[num_writers];
206 |
207 | pthread_attr_t writer_attr;
208 | pthread_attr_init(&writer_attr);
209 | pthread_attr_setschedpolicy(&writer_attr, SCHED_OTHER);
210 |
211 | pthread_attr_t reader_attr;
212 | pthread_attr_init(&reader_attr);
213 | pthread_attr_setschedpolicy(&reader_attr, SCHED_RR);
214 |
215 | // make writer threads
216 | for(unsigned i = 0; i < num_writers; ++i) {
217 | pthread_create(&(writers[i]), &writer_attr, writer_thread, &(writers[i]));
218 | }
219 |
220 | // make reader threads
221 | for(unsigned i = 0; i < num_readers; ++i) {
222 | pthread_create(&(readers[i]), &reader_attr, reader_thread, &(readers[i]));
223 | }
224 |
225 | for(;;) {
226 | sleep(1);
227 | if(!NUM_ACTIVE_THREADS.load()) {
228 | break;
229 | }
230 | }
231 |
232 | delete [] readers;
233 | delete [] writers;
234 |
235 | // free up all memory in the queue.
236 | empty element_remover;
237 | QUEUE.write(element_remover);
238 |
239 | return 0;
240 | }
241 |
--------------------------------------------------------------------------------
/pp.h:
--------------------------------------------------------------------------------
1 | /*
2 | * pp.h
3 | *
4 | * Created on: 2012-11-09
5 | * Author: pag
6 | * Version: $Id$
7 | */
8 |
9 | #ifndef Granary_PP_H_
10 | #define Granary_PP_H_
11 |
12 |
13 | /// Used to denote entrypoints into Granary.
14 | #define GRANARY_ENTRYPOINT
15 |
16 | #if defined(__GNUC__) && defined(__GNUC_MINOR__)
17 | # if __GNUC__ >= 4 && __GNUC_MINOR__ >= 7
18 | # define FORCE_INLINE inline
19 | # else
20 | # define FORCE_INLINE __attribute__((always_inline))
21 | # endif
22 | #elif defined(__clang__)
23 | # define FORCE_INLINE __attribute__((always_inline))
24 | #else
25 | # define FORCE_INLINE inline
26 | #endif
27 |
28 | #define GRANARY
29 |
30 | #define ALIGN_TO(lval, const_align) \
31 | (((lval) % (const_align)) ? ((const_align) - ((lval) % (const_align))) : 0)
32 |
33 | #if GRANARY_IN_KERNEL
34 | # define IF_KERNEL(...) __VA_ARGS__
35 | # define IF_KERNEL_(...) , __VA_ARGS__
36 | # define IF_KERNEL_ELSE(if_true, if_false) if_true
37 | # define IF_KERNEL_ELSE_(if_true, if_false) , if_true
38 | # define IF_USER(...)
39 | # define IF_USER_(...)
40 | #else
41 | # define IF_KERNEL(...)
42 | # define IF_KERNEL_(...)
43 | # define IF_KERNEL_ELSE(if_true, if_false) if_false
44 | # define IF_KERNEL_ELSE_(if_true, if_false) , if_false
45 | # define IF_USER(...) __VA_ARGS__
46 | # define IF_USER_(...) , __VA_ARGS__
47 | #endif
48 |
49 | #define FAULT (granary_break_on_fault(), granary_fault())
50 | #define BARRIER ASM("" : : : "memory")
51 |
52 | #define IF_DEBUG(cond, expr) {if(cond) { expr; }}
53 |
54 | /// Use to statically initialise some code.
55 | #define STATIC_INITIALISE___(id, ...) \
56 | static void init_func_ ## id(void) throw(); \
57 | struct init_class_ ## id : public granary::static_init_list { \
58 | public: \
59 | init_class_ ## id(void) throw() { \
60 | this->exec = init_func_ ## id; \
61 | granary::static_init_list::append(*this); \
62 | } \
63 | }; \
64 | static init_class_ ## id init_val_ ## id; \
65 | static __attribute__((noinline)) void init_func_ ## id(void) { \
66 | (void) init_val_ ## id; \
67 | { __VA_ARGS__ } \
68 | }
69 |
70 | #define STATIC_INITIALISE__(line, counter, ...) \
71 | STATIC_INITIALISE___(line ## _ ## counter, ##__VA_ARGS__)
72 | #define STATIC_INITIALISE_(line, counter, ...) \
73 | STATIC_INITIALISE__(line, counter, ##__VA_ARGS__)
74 | #define STATIC_INITIALISE(...) \
75 | STATIC_INITIALISE_(__LINE__, __COUNTER__, ##__VA_ARGS__)
76 |
77 | #if GRANARY_IN_KERNEL
78 | # define IF_TEST(...)
79 | # define ADD_TEST(func, desc)
80 | # define ASSERT(cond)
81 | #else
82 | # define IF_TEST(...) __VA_ARGS__
83 | # define ADD_TEST(test_func, test_desc) \
84 | STATIC_INITIALISE({ \
85 | static granary::static_test_list test__; \
86 | test__.func = test_func; \
87 | test__.desc = test_desc; \
88 | granary::static_test_list::append(test__); \
89 | })
90 | # define ASSERT(cond) {if(!(cond)) { FAULT; }}
91 | #endif
92 |
93 | #define ASM(...) __asm__ __volatile__ ( __VA_ARGS__ )
94 |
95 | /// unrolling macros for applying something to all general purpose registers
96 | #define ALL_REGS(R, R_last) \
97 | R(rdi, R(rsi, R(rdx, R(rbx, R(rcx, R(rax, R(r8, R(r9, R(r10, R(r11, R(r12, R(r13, R(r14, R_last(r15))))))))))))))
98 |
99 | /// unrolling macros for applying something to all argument registers
100 | #define ALL_CALL_REGS(R, R_last) \
101 | R(rdi, R(rsi, R(rdx, R(rcx, R(r8, R(r9, R(rbp, R_last(rax))))))))
102 |
103 |
104 | #define FOR_EACH_DIRECT_JUMP(macro, ...) \
105 | macro(jo, 3, ##__VA_ARGS__) \
106 | macro(jno, 4, ##__VA_ARGS__) \
107 | macro(jb, 3, ##__VA_ARGS__) \
108 | macro(jnb, 4, ##__VA_ARGS__) \
109 | macro(jz, 3, ##__VA_ARGS__) \
110 | macro(jnz, 4, ##__VA_ARGS__) \
111 | macro(jbe, 4, ##__VA_ARGS__) \
112 | macro(jnbe, 5, ##__VA_ARGS__) \
113 | macro(js, 3, ##__VA_ARGS__) \
114 | macro(jns, 4, ##__VA_ARGS__) \
115 | macro(jp, 3, ##__VA_ARGS__) \
116 | macro(jnp, 4, ##__VA_ARGS__) \
117 | macro(jl, 3, ##__VA_ARGS__) \
118 | macro(jnl, 4, ##__VA_ARGS__) \
119 | macro(jle, 4, ##__VA_ARGS__) \
120 | macro(jnle, 5, ##__VA_ARGS__) \
121 | macro(loop, 5, ##__VA_ARGS__) \
122 | macro(loopne, 7, ##__VA_ARGS__) \
123 | macro(loope, 6, ##__VA_ARGS__) \
124 | macro(jmp, 4, ##__VA_ARGS__) \
125 | macro(jmp_short, 10, ##__VA_ARGS__) \
126 | macro(jmp_ind, 8, ##__VA_ARGS__) \
127 | macro(jmp_far, 8, ##__VA_ARGS__) \
128 | macro(jmp_far_ind, 12, ##__VA_ARGS__) \
129 | macro(jecxz, 6, ##__VA_ARGS__)
130 |
131 | #define TO_STRING_(x) #x
132 | #define TO_STRING(x) TO_STRING_(x)
133 |
134 |
135 | #define CAT__(x, y) x ## y
136 | #define CAT_(x, y) CAT__(x, y)
137 | #define CAT(x, y) CAT_(x, y)
138 |
139 |
140 | #define NOTHING__
141 | #define NOTHING_ NOTHING__
142 | #define NOTHING NOTHING_
143 |
144 | #define EACH(pp, ap, sep, ...) \
145 | CAT(EACH_, NUM_PARAMS(__VA_ARGS__))(pp, ap, sep, ##__VA_ARGS__)
146 |
147 | #define EACH_0(pp, ap, sep,a0)
148 |
149 | #define EACH_1(pp, ap, sep, a0) \
150 | CAT(CAT(pp, a0), ap)
151 |
152 | #define EACH_2(pp, ap, sep, a0, a1) \
153 | EACH_1(pp, ap, sep, a0) sep CAT(CAT(pp, a1), ap)
154 |
155 | #define EACH_3(pp, ap, sep, a0, a1, a2) \
156 | EACH_2(pp, ap, sep, a0, a1) sep CAT(CAT(pp, a2), ap)
157 |
158 | #define EACH_4(pp, ap, sep, a0, a1, a2, a3) \
159 | EACH_3(pp, ap, sep, a0, a1, a2) sep CAT(CAT(pp, a3), ap)
160 |
161 | #define EACH_5(pp, ap, sep, a0, a1, a2, a3, a4) \
162 | EACH_4(pp, ap, sep, a0, a1, a2, a3) sep CAT(CAT(pp, a4), ap)
163 |
164 | #define EACH_6(pp, ap, sep, a0, a1, a2, a3, a4, a5) \
165 | EACH_5(pp, ap, sep, a0, a1, a2, a3, a4) sep CAT(CAT(pp, a5), ap)
166 |
167 |
168 | /// Determine the number of arguments in a variadic macro argument pack.
169 | /// Taken from: http://efesx.com/2010/07/17/variadic-macro-to-count-number-of-arguments/#comment-256
170 | #define NUM_PARAMS(...) NUM_PARAMS_IMPL(, ##__VA_ARGS__,7,6,5,4,3,2,1,0)
171 | #define NUM_PARAMS_IMPL(_0,_1,_2,_3,_4,_5,_6,_7,N,...) N
172 |
173 |
174 | #define PARAMS(...) __VA_ARGS__
175 |
176 |
177 | #define NOTHING__
178 | #define NOTHING_ NOTHING__
179 | #define NOTHING NOTHING_
180 |
181 | #define TEMPLATE_PARAMS(...) \
182 | CAT(TEMPLATE_PARAMS_, NUM_PARAMS(__VA_ARGS__))(__VA_ARGS__)
183 | #define TEMPLATE_PARAMS_0()
184 | #define TEMPLATE_PARAMS_1(...) < __VA_ARGS__ >
185 | #define TEMPLATE_PARAMS_2(...) < __VA_ARGS__ >
186 | #define TEMPLATE_PARAMS_3(...) < __VA_ARGS__ >
187 | #define TEMPLATE_PARAMS_4(...) < __VA_ARGS__ >
188 | #define TEMPLATE_PARAMS_5(...) < __VA_ARGS__ >
189 | #define TEMPLATE_PARAMS_6(...) < __VA_ARGS__ >
190 | #define TEMPLATE_PARAMS_7(...) < __VA_ARGS__ >
191 |
192 | #endif /* Granary_PP_H_ */
193 |
--------------------------------------------------------------------------------
/rcu.h:
--------------------------------------------------------------------------------
1 | /*
2 | * rcu.h
3 | *
4 | * Created on: 2013-01-16
5 | * Author: pag
6 | */
7 |
8 | #ifndef GRANARY_RCU_H_
9 | #define GRANARY_RCU_H_
10 |
11 | #include
12 | #include
13 | #include
14 |
15 | #include "pp.h"
16 | #include "hazard.h"
17 | #include "spin_lock.h"
18 |
19 |
20 | namespace granary { namespace smp {
21 |
22 | /// Forward declarations.
23 | template struct rcu_protected;
24 | template struct rcu_publisher;
25 | template struct rcu_collector;
26 | template union rcu_read_reference;
27 | template union rcu_write_reference;
28 |
29 |
30 | /// Defines the type protocol for an RCU-protected data structure.
31 | #define RCU_PROTOCOL(type_name, ...) \
32 | RCU_GENERIC_PROTOCOL((), type_name, (), #__VA_ARGS__)
33 |
34 |
35 | /// Defines the type protocol for a generic (i.e. one using templates)
36 | /// RCU-protected data structure.
37 | #define RCU_GENERIC_PROTOCOL(tpl_args, type_name, tpl_params, ...) \
38 | template \
39 | union rcu_read_reference { \
40 | private: \
41 | \
42 | typedef type_name TEMPLATE_PARAMS tpl_params base_type__; \
43 | friend struct rcu_protected; \
44 | typedef rcu_read_reference base_ref_type__; \
45 | typedef rcu_secret::read_secret_type secret_type__; \
46 | \
47 | base_type__ *internal_pointer__; \
48 | \
49 | inline rcu_read_reference(base_type__ *ptr__) throw() \
50 | : internal_pointer__(ptr__) \
51 | { } \
52 | \
53 | public: \
54 | enum { \
55 | IS_RCU_READ_REFERENCE = 1 \
56 | }; \
57 | \
58 | inline base_type__ *get_reference(secret_type__) throw() { \
59 | return this->internal_pointer__; \
60 | } \
61 | \
62 | template \
63 | inline rcu_read_reference(A__ a__) throw() \
64 | : internal_pointer__(a__.get_reference(secret_type__())) \
65 | { } \
66 | \
67 | EACH(READ_, NOTHING, NOTHING, __VA_ARGS__) \
68 | \
69 | template \
70 | inline base_ref_type__ &operator=(A__ a__) throw() { \
71 | static_assert(A__::IS_RCU_READ_REFERENCE, \
72 | "Argument to operator= must be an RCU read reference type."); \
73 | this->internal_pointer__ = a__.get_reference(secret_type__()); \
74 | return *this; \
75 | } \
76 | \
77 | inline operator bool (void) const throw() { \
78 | return nullptr != (this->internal_pointer__); \
79 | } \
80 | }; \
81 | \
82 | template \
83 | union rcu_write_reference { \
84 | private: \
85 | \
86 | typedef type_name TEMPLATE_PARAMS tpl_params base_type__; \
87 | friend struct rcu_protected; \
88 | template friend struct rcu_publisher; \
89 | template friend struct rcu_collector; \
90 | typedef rcu_write_reference base_ref_type__; \
91 | typedef rcu_secret::write_secret_type secret_type__; \
92 | \
93 | base_type__ *internal_pointer__; \
94 | \
95 | inline rcu_write_reference(base_type__ *ptr__) throw() \
96 | : internal_pointer__(ptr__) \
97 | { } \
98 | \
99 | inline rcu_write_reference(std::nullptr_t) throw() \
100 | : internal_pointer__(nullptr) \
101 | { } \
102 | \
103 | public: \
104 | enum { \
105 | IS_RCU_WRITE_REFERENCE = 1 \
106 | }; \
107 | \
108 | inline base_type__ *get_reference(secret_type__) throw() { \
109 | return this->internal_pointer__; \
110 | } \
111 | \
112 | template \
113 | inline rcu_write_reference(A__ a__) throw() \
114 | : internal_pointer__(a__.get_reference(secret_type__())) \
115 | { } \
116 | \
117 | rcu_write_reference(void) throw() \
118 | : internal_pointer__(nullptr) \
119 | { } \
120 | \
121 | template \
122 | inline base_ref_type__ &operator=(A__ a__) throw() { \
123 | static_assert(A__::IS_RCU_WRITE_REFERENCE, \
124 | "Argument to operator= must be an RCU write reference type."); \
125 | this->internal_pointer__ = a__.get_reference(secret_type__()); \
126 | return *this; \
127 | } \
128 | \
129 | EACH(WRITE_, NOTHING, NOTHING, __VA_ARGS__) \
130 | \
131 | inline operator bool (void) const throw() { \
132 | return nullptr != (this->internal_pointer__); \
133 | } \
134 | };
135 |
136 |
137 | /// Make a way of accessing the "skeleton" fields of an RCU-protected
138 | /// structure. We define the skeleton fields to be those that are used for
139 | /// traversing and making the structure of the RCU-protected data structure.
140 | ///
141 | /// Importantly, this automatically handles rcu_dereference.
142 | #define READ_RCU_REFERENCE(field_name) \
143 | struct field_name ## _field__ { \
144 | private: \
145 | friend union rcu_read_reference; \
146 | \
147 | base_type__ *internal_pointer__; \
148 | \
149 | typedef field_name ## _field__ self_type__; \
150 | typedef decltype((new base_type__)->field_name) field_type__; \
151 | \
152 | static_assert(std::is_pointer::value, \
153 | "The RCU_REFERENCE-defined field (" \
154 | #field_name \
155 | ") must have a pointer type."); \
156 | \
157 | typedef decltype(**(new field_type__)) value_type__; \
158 | typedef rcu_read_reference ref_type__; \
159 | \
160 | public: \
161 | enum { \
162 | IS_RCU_READ_REFERENCE = 1 \
163 | }; \
164 | \
165 | inline field_type__ get_reference(secret_type__) throw() { \
166 | return rcu::dereference(&(internal_pointer__->field_name)); \
167 | } \
168 | \
169 | inline operator ref_type__ (void) throw() { \
170 | return ref_type__( \
171 | rcu::dereference(&(internal_pointer__->field_name))); \
172 | }; \
173 | } field_name;
174 |
175 |
176 | /// Make a way of accessing a value field of an RCU protected structure. This
177 | /// can implicitly convert to an r-value of the field's type.
178 | #define READ_RCU_VALUE(field_name) \
179 | struct field_name ## _field__ { \
180 | private: \
181 | base_type__ *internal_pointer__; \
182 | \
183 | typedef field_name ## _field__ self_type__; \
184 | typedef decltype((new base_type__)->field_name) field_type__; \
185 | typedef typename std::decay::type decayed_type__; \
186 | public: \
187 | inline operator decayed_type__ (void) const throw() { \
188 | return internal_pointer__->field_name; \
189 | }; \
190 | } field_name;
191 |
192 |
193 | /// Defines the policy for accessing a skeleton field of a write reference.
194 | ///
195 | /// Importantly, this automatically handles rcu_assign_pointer.
196 | #define WRITE_RCU_REFERENCE(field_name) \
197 | struct field_name ## _field__; \
198 | friend struct field_name ## _field__; \
199 | struct field_name ## _field__ { \
200 | private: \
201 | friend union rcu_write_reference; \
202 | \
203 | base_type__ *internal_pointer__; \
204 | \
205 | typedef field_name ## _field__ self_type__; \
206 | typedef decltype((new base_type__)->field_name) field_type__; \
207 | \
208 | static_assert(std::is_pointer::value, \
209 | "The RCU_REFERENCE-defined field (" \
210 | #field_name \
211 | ") must have a pointer type."); \
212 | \
213 | typedef decltype(**(new field_type__)) value_type__; \
214 | typedef rcu_write_reference ref_type__; \
215 | \
216 | public: \
217 | enum { \
218 | IS_RCU_WRITE_REFERENCE = 1 \
219 | }; \
220 | \
221 | inline field_type__ get_reference(secret_type__) throw() { \
222 | return internal_pointer__->field_name; \
223 | } \
224 | \
225 | inline operator ref_type__ (void) throw() { \
226 | return ref_type__(internal_pointer__->field_name); \
227 | }; \
228 | \
229 | inline operator bool (void) const throw() { \
230 | return nullptr != (internal_pointer__->field_name); \
231 | } \
232 | \
233 | template \
234 | inline self_type__ &operator=(A__ val__) throw() { \
235 | static_assert(A__::IS_RCU_WRITE_REFERENCE, \
236 | "Argument to operator= must be an RCU write reference type."); \
237 | rcu::assign_pointer( \
238 | &(internal_pointer__->field_name), \
239 | val__.get_reference(secret_type__())); \
240 | return *this; \
241 | } \
242 | \
243 | } field_name;
244 |
245 |
246 | /// Returns an lvalue for a value field within a RCU-protected field for a
247 | /// write reference.
248 | #define WRITE_RCU_VALUE(field_name) \
249 | struct field_name ## _field__ { \
250 | private: \
251 | base_type__ *internal_pointer__; \
252 | typedef field_name ## _field__ self_type__; \
253 | typedef decltype((new base_type__)->field_name) field_type__; \
254 | \
255 | public: \
256 | inline operator field_type__ &(void) throw() { \
257 | return internal_pointer__->field_name; \
258 | }; \
259 | \
260 | template \
261 | inline self_type__ &operator=(A__ val__) throw() { \
262 | internal_pointer__->field_name = val__; \
263 | return *this; \
264 | } \
265 | \
266 | } field_name;
267 |
268 |
269 | namespace rcu {
270 |
271 | /// De-reference a pointer from within a read reference.
272 | template
273 | inline T *dereference(register T **ptr) throw() {
274 | std::atomic_thread_fence(std::memory_order_acquire);
275 | return *ptr;
276 | }
277 |
278 |
279 | /// Assign to a pointer from within a write reference.
280 | template
281 | inline void assign_pointer(register T **ptr, register T *new_val) throw() {
282 | *ptr = new_val;
283 | std::atomic_thread_fence(std::memory_order_release);
284 | }
285 |
286 |
287 | /// Represents a reference counter that is specific to RCU. RCU
288 | /// reference counters are organised into a data-structure-specific
289 | /// hazard pointer list after they are swapped out. The writer swaps
290 | /// reference counters, then queues up old reference counters into the
291 | /// list. This list is used by future writers to look for hazards
292 | /// (reader threads that have a reference to the counter) so that if
293 | /// the writer finds no hazards then the writer can re-use the counter
294 | /// for a later read generation.
295 | struct reference_counter {
296 | public:
297 |
298 | /// The count of this reference counter.
299 | std::atomic counter;
300 |
301 | /// Next reference counter in the hazard list / free list.
302 | reference_counter *next;
303 |
304 | reference_counter(void) throw()
305 | : counter(ATOMIC_VAR_INIT(0U))
306 | , next(nullptr)
307 | { }
308 |
309 | /// Check if the counter is valid.
310 | inline bool is_valid(void) throw() {
311 | return 0U == (counter.load() & 1U);
312 | }
313 |
314 | /// Increment the reference counter; this increments by two so that
315 | /// the reference counter is always even (when valid) and always
316 | /// odd (when stale). Returns true iff this reference counter is
317 | /// valid.
318 | inline bool increment(void) throw() {
319 | return 0U == (counter.fetch_add(2) & 1U);
320 | }
321 |
322 | /// Decrement the reference counter.
323 | inline void decrement(void) throw() {
324 | counter.fetch_sub(2);
325 | }
326 |
327 | /// Wait for the reference counter to hit zero, then swap to one,
328 | /// thus converting it from valid to stale.
329 | void wait(void) throw() {
330 | unsigned expected(0U);
331 |
332 | for(;;) {
333 | if(expected != counter.load()) {
334 | // TODO: yielding the thread might be good instead of
335 | // spinning needlessly
336 | continue;
337 | }
338 |
339 | if(counter.compare_exchange_weak(expected, 1U)) {
340 | break;
341 | }
342 | }
343 | }
344 |
345 | /// Reset the reference counter. Note: this sets the next pointer
346 | /// to null.
347 | inline void reset(void) throw() {
348 | counter.store(0U);
349 | next = nullptr;
350 | }
351 | };
352 |
353 | /// Represents the protocol that an RCU reader must follow. This is
354 | /// implicitly instantiated when calling write (with a function
355 | /// pointer argument).
356 | template
357 | struct reader {
358 | private:
359 | typedef rcu_read_reference read_ref_type;
360 |
361 | R (*func)(read_ref_type, Args&...);
362 | R ret_value;
363 |
364 | public:
365 |
366 | inline reader(R (*func_)(read_ref_type, Args&...)) throw()
367 | : func(func_)
368 | { }
369 |
370 | inline void operator()(read_ref_type ref, Args&... args) throw() {
371 | ret_value = func(ref, args...);
372 | }
373 |
374 | inline R yield(void) throw() {
375 | return ret_value;
376 | }
377 | };
378 |
379 | /// Read-critical section returning nothing.
380 | template
381 | struct reader {
382 | private:
383 | typedef rcu_read_reference read_ref_type;
384 |
385 | void (*func)(read_ref_type, Args&...);
386 |
387 | public:
388 |
389 | inline reader(void (*func_)(read_ref_type, Args&...)) throw()
390 | : func(func_)
391 | { }
392 |
393 | inline void operator()(read_ref_type ref, Args&... args) throw() {
394 | func(ref, args...);
395 | }
396 |
397 | inline void yield(void) throw() {
398 | return;
399 | }
400 | };
401 | }
402 |
403 |
404 | /// Used to signal that the internal data structure of an RCU-protected
405 | /// structure should not be initialised.
406 | static struct { } RCU_INIT_NULL;
407 |
408 |
409 | /// Represents a function that can only be used during the `while_readers_
410 | /// exist` method of an `rcu_writer` for publishing a new version of the
411 | /// data structure.
412 | template
413 | struct rcu_publisher {
414 | private:
415 |
416 | friend struct rcu_protected;
417 |
418 | T **data;
419 |
420 | rcu_publisher(T **data_) throw()
421 | : data(data_)
422 | { }
423 |
424 | rcu_publisher(const rcu_publisher &) throw() = delete;
425 | rcu_publisher &operator=(const rcu_publisher &) throw() = delete;
426 |
427 | public:
428 |
429 | typedef rcu_write_reference write_ref_type;
430 |
431 | ~rcu_publisher(void) throw() {
432 | data = nullptr;
433 | }
434 |
435 | /// Publish a new version of the data structure and return the old
436 | /// version.
437 | inline T *publish(write_ref_type new_version) throw() {
438 | std::atomic_thread_fence(std::memory_order_acquire);
439 | T *old_data(*data);
440 | *data = new_version.internal_pointer__;
441 | std::atomic_thread_fence(std::memory_order_release);
442 | return old_data;
443 | }
444 |
445 | /// Promote an untracked pointer into a write reference.
446 | template
447 | inline rcu_write_reference promote(R *ptr) throw() {
448 | return rcu_write_reference(ptr);
449 | }
450 |
451 | /// Promote a null pointer into a write reference.
452 | inline write_ref_type promote(std::nullptr_t) throw() {
453 | return write_ref_type(nullptr);
454 | }
455 | };
456 |
457 |
458 | /// Allows demoting of a write reference into a bare pointer. Thus, this
459 | /// allows us to garbage collect no longer visible write references.
460 | template
461 | struct rcu_collector {
462 | private:
463 |
464 | friend struct rcu_protected;
465 |
466 | rcu_collector(void) throw() { }
467 | rcu_collector(const rcu_collector &) throw() = delete;
468 | rcu_collector &operator=(const rcu_collector &) throw() = delete;
469 |
470 | public:
471 |
472 | typedef rcu_write_reference write_ref_type;
473 |
474 | /// Convert a write reference into a bare pointer for use in garbage
475 | /// collecting.
476 | ///
477 | /// Note: this modifies the reference in place, making it unusable after
478 | /// being demoted.
479 | template
480 | R *demote(rcu_write_reference &ref) throw() {
481 | R *ptr(ref.internal_pointer__);
482 | ref.internal_pointer__ = nullptr;
483 | return ptr;
484 | }
485 | };
486 |
487 |
488 | /// Represents the protocol that an RCU writer must follow.
489 | template
490 | struct rcu_writer {
491 | public:
492 |
493 | typedef rcu_write_reference write_ref_type;
494 | typedef rcu_publisher publisher_type;
495 | typedef rcu_collector collector_type;
496 |
497 | virtual ~rcu_writer(void) throw() { }
498 |
499 | /// This function is executed before mutual exclusion is acquired over
500 | /// the data structure.
501 | virtual void setup(void) throw() { }
502 |
503 | /// This function is called after mutual exclusion has been acquired by
504 | /// the writer (and so this the only writer writing to the data
505 | /// structure), but before we have waited for all readers to complete
506 | /// their read-critical sections. This is the only function in which
507 | /// a user of RCU can publish a new version of the entire data
508 | /// structure.
509 | virtual void while_readers_exist(
510 | write_ref_type, publisher_type &) throw() { }
511 |
512 | /// This function is called after all readers are done completing their
513 | /// read-critical sections, but before the writer releases its lock on
514 | /// the data structure.
515 | virtual void after_readers_done(write_ref_type) throw() { }
516 |
517 | /// This function is called after the writer releases its lock on the
518 | /// data structure.
519 | virtual void teardown(collector_type &) throw() { }
520 | };
521 |
522 |
523 | /// Represents a simple spinlock for an RCU-protected data structure.
524 | /// This can be partially specialised to allow for different data
525 | /// structures to use different mutex types.
526 | template
527 | struct rcu_writer_lock : public spin_lock { };
528 |
529 |
530 | /// Represents "secret" information used to ease passing pointers without
531 | /// leaking pointers to users of the RCU API.
532 | struct rcu_secret {
533 | private:
534 |
535 | /// Secret tag type used for extracting and communicating pointers.
536 | struct write_secret_type { };
537 | struct read_secret_type { };
538 |
539 | /// Make the secret type available to read and write references.
540 | template friend union rcu_read_reference;
541 | template friend union rcu_write_reference;
542 | };
543 |
544 |
545 | /// Represents an RCU-protected data structure.
546 | template
547 | struct rcu_protected {
548 | private:
549 |
550 | /// The protected data.
551 | mutable T *data;
552 |
553 | /// Lock used by writer threads to
554 | rcu_writer_lock writer_lock;
555 |
556 | /// Active reference counter.
557 | std::atomic reader_counter;
558 |
559 | /// List of free and hazard reference counters. Both of these are
560 | /// implicitly protected by the writer lock, so that we really have
561 | /// data-structure-specific hazard pointers, rather than thread-
562 | /// specific hazard pointers.
563 | rcu::reference_counter *free_counters;
564 | rcu::reference_counter *hazard_counters;
565 |
566 | /// List of hazardous read counters
567 | mutable hazard_pointer_list active_counters;
568 |
569 | /// Allocate a reference counter.
570 | rcu::reference_counter *allocate_counter(void) throw() {
571 | rcu::reference_counter *next_counter(free_counters);
572 | if(next_counter) {
573 | free_counters = next_counter->next;
574 | next_counter->reset();
575 | } else {
576 | next_counter = new rcu::reference_counter;
577 | }
578 | return next_counter;
579 | }
580 |
581 | /// Try to free a reference counter.
582 | void try_retire_reference_counter(void) throw() {
583 | rcu::reference_counter **prev_ptr(&hazard_counters);
584 | rcu::reference_counter *hazard(hazard_counters);
585 |
586 | for(;;) {
587 | for(; hazard; ) {
588 |
589 | // might not be any readers on this counter because it's at
590 | // one; we'll commit to trying this one.
591 | if(1 == hazard->counter.load()) {
592 | break;
593 | }
594 |
595 | prev_ptr = &(hazard->next);
596 | hazard = hazard->next;
597 | }
598 |
599 | if(!hazard) {
600 | return;
601 |
602 | // quick double check ;-)
603 | } else if(1 == hazard->counter.load()) {
604 | break;
605 | }
606 | }
607 |
608 | // it is dead because no reader threads have it as a hazardous
609 | // pointer; add it to the free list.
610 | if(!active_counters.contains(hazard)) {
611 | *prev_ptr = hazard->next;
612 | hazard->next = free_counters;
613 | free_counters = hazard;
614 | }
615 | }
616 |
617 | public:
618 |
619 | typedef rcu_read_reference read_ref_type;
620 | typedef rcu_write_reference write_ref_type;
621 | typedef rcu_publisher publisher_type;
622 | typedef rcu_collector collector_type;
623 |
624 | /// Constructors
625 |
626 | rcu_protected(void) throw()
627 | : data(new T)
628 | , reader_counter(ATOMIC_VAR_INIT(new rcu::reference_counter))
629 | , free_counters(nullptr)
630 | , hazard_counters(nullptr)
631 | , active_counters()
632 | {
633 | if(std::is_trivial::value) {
634 | memset(data, 0, sizeof *data);
635 | }
636 | }
637 |
638 | template
639 | rcu_protected(Args... args) throw()
640 | : data(new T(args...))
641 | , reader_counter(ATOMIC_VAR_INIT(new rcu::reference_counter))
642 | , free_counters(nullptr)
643 | , hazard_counters(nullptr)
644 | , active_counters()
645 | { }
646 |
647 | rcu_protected(decltype(RCU_INIT_NULL)) throw()
648 | : data(nullptr)
649 | , reader_counter(ATOMIC_VAR_INIT(new rcu::reference_counter))
650 | , free_counters(nullptr)
651 | , hazard_counters(nullptr)
652 | , active_counters()
653 | { }
654 |
655 | /// Destructor.
656 | ~rcu_protected(void) throw() {
657 | if(data) {
658 | delete data;
659 | data = nullptr;
660 | }
661 |
662 | // delete active counter
663 | rcu::reference_counter *c(reader_counter.exchange(nullptr));
664 | if(c) {
665 | delete c;
666 | }
667 |
668 | // delete free list
669 | c = free_counters;
670 | for(; free_counters; free_counters = c) {
671 | c = free_counters->next;
672 | delete free_counters;
673 | }
674 |
675 | // delete hazard list
676 | c = hazard_counters;
677 | for(; hazard_counters; hazard_counters = c) {
678 | c = hazard_counters->next;
679 | delete hazard_counters;
680 | }
681 | }
682 |
683 | /// Write to the RCU-protected data structure.
684 | void write(rcu_writer &writer) throw() {
685 | writer.setup();
686 | writer_lock.acquire();
687 |
688 | // get the next reference counter and swap it in to be the active
689 | // reference counters for new readers
690 | rcu::reference_counter *next_counter(allocate_counter());
691 |
692 | publisher_type publisher(&data);
693 | writer.while_readers_exist(write_ref_type(data), publisher);
694 |
695 | // exchange the reference counter; if the writer publishes something
696 | // then some readers will see it and others won't; just to be sure,
697 | // we conservatively wait for all that might.
698 | rcu::reference_counter *current_counter(
699 | reader_counter.exchange(next_counter));
700 |
701 | // before waiting on the current counter, try to release some old
702 | // hazardous counters to the free list.
703 | try_retire_reference_counter();
704 |
705 | // wait on the reference counter; hopefully by the time we're done
706 | // looking for potentially dead hazard pointers, we have "waited"
707 | // long enough.
708 | current_counter->wait();
709 |
710 | // pass in a new write ref because a new version of data might have
711 | // been published in `while_readers_exist`.
712 | writer.after_readers_done(write_ref_type(data));
713 |
714 | // add the old counter to the hazard list
715 | current_counter->next = hazard_counters;
716 | hazard_counters = current_counter;
717 |
718 | writer_lock.release();
719 |
720 | collector_type collector;
721 | writer.teardown(collector);
722 | }
723 |
724 | /// Read from the RCU-protected data structure. Invokes a function as
725 | /// the read-critical section.
726 | template
727 | inline R read(
728 | R (*reader_func)(read_ref_type, Args&...),
729 | Args&... args
730 | ) const throw() {
731 | rcu::reader reader(reader_func);
732 | return read(reader, args...);
733 | }
734 |
735 | private:
736 |
737 | /// Read from the RCU-protected data structure.
738 | template
739 | R read(rcu::reader reader, Args&... args) const throw() {
740 | rcu::reference_counter *read_counter(nullptr);
741 | hazard_pointer &hp(
742 | active_counters.acquire());
743 |
744 | // TODO: possible starvation if reader never gets a valid counter
745 | for(;;) {
746 | do {
747 | read_counter = reader_counter.load();
748 | hp.remember(read_counter);
749 | } while(read_counter != reader_counter.load());
750 |
751 | // do a read of the counter before we commit to a write of it,
752 | // because the counter might already be dead.
753 | if(!read_counter->is_valid()) {
754 | continue;
755 | }
756 |
757 | // it's now stored as a hazard pointer, so it's safe to use.
758 | if(read_counter->increment()) {
759 | break;
760 | }
761 |
762 | // if the counter is invalid, then decrement it (signalling to
763 | // writers that some counter is potentially okay to retire)
764 | // and try to get the counter again.
765 | read_counter->decrement();
766 | }
767 |
768 | // invoke the read-critical section
769 | std::atomic_thread_fence(std::memory_order_seq_cst);
770 | read_ref_type read_ref(data);
771 | reader(read_ref, args...);
772 |
773 | read_counter->decrement();
774 | hp.release();
775 |
776 | return reader.yield();
777 | }
778 | };
779 | }}
780 |
781 |
782 | #endif /* GRANARY_RCU_H_ */
783 |
--------------------------------------------------------------------------------
/spin_lock.h:
--------------------------------------------------------------------------------
1 | /*
2 | * spin_lock.h
3 | *
4 | * Created on: 2013-01-20
5 | * Author: pag
6 | */
7 |
8 | #ifndef SPIN_LOCK_H_
9 | #define SPIN_LOCK_H_
10 |
11 | #include
12 |
13 | namespace granary { namespace smp {
14 |
15 | /// Simple implementation of a spin lock.
16 | struct spin_lock {
17 | private:
18 |
19 | std::atomic is_locked;
20 |
21 | public:
22 |
23 | ~spin_lock(void) throw() = default;
24 |
25 | spin_lock(const spin_lock &) throw() = delete;
26 | spin_lock &operator=(const spin_lock &) throw() = delete;
27 |
28 | spin_lock(void) throw()
29 | : is_locked(ATOMIC_VAR_INIT(false))
30 | { }
31 |
32 | inline void acquire(void) throw() {
33 | for(;;) {
34 | if(is_locked.load(std::memory_order_acquire)) {
35 | continue;
36 | }
37 |
38 | if(!is_locked.exchange(true, std::memory_order_acquire)) {
39 | break;
40 | }
41 | }
42 | }
43 |
44 | inline void release(void) throw() {
45 | is_locked.store(false, std::memory_order_release);
46 | }
47 | };
48 |
49 | }}
50 |
51 | #endif /* SPIN_LOCK_H_ */
52 |
--------------------------------------------------------------------------------