├── .gitignore ├── Makefile ├── thread_pause.h ├── README.md ├── test.c └── msgpool.h /.gitignore: -------------------------------------------------------------------------------- 1 | test_msgpool 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | ifdef DEBUG 3 | CFLAGS = -g -ggdb -O0 4 | else 5 | CFLAGS = -O3 6 | endif 7 | 8 | all: test_msgpool 9 | 10 | clean: 11 | rm -rf test_msgpool 12 | 13 | test_msgpool: test.c msgpool.h 14 | $(CC) -Wall -Wextra -pedantic -std=c99 $(CFLAGS) -o $@ $< -lpthread 15 | 16 | test: test_msgpool 17 | ./test_msgpool 18 | 19 | # ptest: ptest.c pipe/pipe.o 20 | # $(CC) -Wall -Wextra -std=c99 $(CFLAGS) -o ptest -I pipe/ ptest.c pipe/pipe.o 21 | 22 | .PHONY: all clean test 23 | -------------------------------------------------------------------------------- /thread_pause.h: -------------------------------------------------------------------------------- 1 | #ifndef THREAD_PAUSE_H_ 2 | #define THREAD_PAUSE_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | // Methods to synchronise a group of threads 10 | // One thread gets control, the reset wait for it to release 11 | // Useful for synchronising threads at checkpoints to e.g. save state 12 | 13 | typedef struct 14 | { 15 | volatile bool paused; 16 | volatile size_t nthreads_running, nthreads_waiting; 17 | pthread_mutex_t pause_lock, resume_lock, control_lock; 18 | pthread_cond_t pause_cond, resume_cond; 19 | } ThreadPause; 20 | 21 | static inline void thread_pause_alloc(ThreadPause *thp) 22 | { 23 | if(pthread_mutex_init(&thp->pause_lock, NULL) != 0 || 24 | pthread_mutex_init(&thp->resume_lock, NULL) != 0 || 25 | pthread_mutex_init(&thp->control_lock, NULL) != 0) 26 | { 27 | die("pthread_mutex init failed: %s\n", strerror(errno)); 28 | } 29 | 30 | if(pthread_cond_init(&thp->pause_cond, NULL) != 0 || 31 | pthread_cond_init(&thp->resume_cond, NULL) != 0) 32 | { 33 | die("pthread_cond init failed: %s\n", strerror(errno)); 34 | } 35 | 36 | thp->paused = 0; 37 | thp->nthreads_running = 0; 38 | } 39 | 40 | static inline void thread_pause_dealloc(ThreadPause *thp) 41 | { 42 | pthread_mutex_destroy(&thp->control_lock); 43 | pthread_mutex_destroy(&thp->pause_lock); 44 | pthread_mutex_destroy(&thp->resume_lock); 45 | pthread_cond_destroy(&thp->pause_cond); 46 | pthread_cond_destroy(&thp->resume_cond); 47 | } 48 | 49 | // Indicate that a thread has started 50 | static inline void thread_pause_started(ThreadPause *thp) 51 | { 52 | __sync_fetch_and_add(&thp->nthreads_running, 1); 53 | } 54 | 55 | // Indicate that a thread has finished 56 | static inline void thread_pause_finished(ThreadPause *thp) 57 | { 58 | __sync_fetch_and_sub(&thp->nthreads_running, 1); 59 | } 60 | 61 | // Returns 1 on success, 0 if someone has already called pause 62 | static inline bool thread_pause_take_control(ThreadPause *thp) 63 | { 64 | if(pthread_mutex_trylock(&thp->control_lock) != 0) return false; 65 | 66 | thp->paused = true; 67 | __sync_fetch_and_sub(&thp->nthreads_running, 1); 68 | 69 | pthread_mutex_lock(&thp->pause_lock); 70 | while(thp->nthreads_running) 71 | pthread_cond_wait(&thp->pause_cond, &thp->pause_lock); 72 | pthread_mutex_unlock(&thp->pause_lock); 73 | 74 | return true; 75 | } 76 | 77 | // Resume all threads waiting 78 | static inline void thread_pause_release_control(ThreadPause *thp) 79 | { 80 | thp->paused = false; 81 | 82 | // Wrapping broadcast in lock / unlock required here 83 | // to avoid: 84 | // 1: while(thp->paused) 85 | // 2: paused = false; broadcast() 86 | // 1: wait(); 87 | pthread_mutex_lock(&thp->resume_lock); 88 | pthread_cond_broadcast(&thp->resume_cond); 89 | pthread_mutex_unlock(&thp->resume_lock); 90 | 91 | thread_pause_started(thp); 92 | pthread_mutex_unlock(&thp->control_lock); 93 | } 94 | 95 | // Blocks then returns 1 on success, 0 if no one has paused 96 | static inline bool thread_pause_trywait(ThreadPause *thp) 97 | { 98 | if(!thp->paused) return false; 99 | 100 | __sync_fetch_and_sub(&thp->nthreads_running, 1); 101 | 102 | // Signal wrapped for same reasons as broadcast above 103 | pthread_mutex_lock(&thp->pause_lock); 104 | pthread_cond_signal(&thp->pause_cond); 105 | pthread_mutex_unlock(&thp->pause_lock); 106 | 107 | pthread_mutex_lock(&thp->resume_lock); 108 | while(thp->paused) pthread_cond_wait(&thp->resume_cond, &thp->resume_lock); 109 | pthread_mutex_unlock(&thp->resume_lock); 110 | 111 | thread_pause_started(thp); 112 | 113 | return false; 114 | } 115 | 116 | #endif /* THREAD_PAUSE_H_ */ 117 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | msg-pool: Multiple Producers / Multiple Consumers Message Passing Pool 2 | license: Public Domain 3 | url: https://github.com/noporpoise/msg-pool 4 | Isaac turner 5 | Jan 2014 6 | 7 | Fast message passing 8 | -------------------- 9 | 10 | High throughput message passing pool mixing lockless pool with blocking mutexes 11 | / conditional variables when we need to wait for input/output. 12 | This is a *pool* not a *queue* - messages are delivered in no particular order. 13 | Primary use is for sharing tasks when there are multiple producer threads and 14 | multiple consumer threads. 15 | We do not worry about latency only total throughput rate. 16 | 17 | Features/limitations: 18 | * fixed size pool 19 | * messages passed without guarantees about ordering 20 | * multiple producers / multiple consumer threads 21 | * lockless except when pool is full or empty 22 | 23 | Example 24 | ------- 25 | 26 | MsgPool q; 27 | msgpool_alloc_spinlock(&q, qlen, sizeof(int)); 28 | 29 | // Reader threads 30 | int read; 31 | while(msgpool_read(&q, &r, NULL)) printf("Got %i\n", r); 32 | 33 | // Writer threads 34 | int w = 12; 35 | msgpool_write(&q, &w, NULL); 36 | 37 | msgpool_dealloc(&q); 38 | 39 | API 40 | --- 41 | 42 | void msgpool_alloc_spinlock(MsgPool *q, size_t nel, size_t elsize) 43 | 44 | Create a new message pool using spinlocks to block. This approach may be fastest 45 | if you have more CPU cores than threads. 46 | 47 | void msgpool_alloc_mutex(MsgPool *q, size_t nel, size_t elsize, 48 | size_t nproducers, size_t nconsumers) 49 | 50 | Create a new message pool using mutexes to block. You can have more than the 51 | number of producers/consumers that you specify, but if you have fewer you may 52 | affect performance. This approach is probably faster if you have multiple 53 | threads running per CPU core. 54 | 55 | void msgpool_dealloc(MsgPool *q) 56 | 57 | Release a message pool 58 | 59 | void msgpool_iterate(MsgPool *q, 60 | void (*func)(void *el, size_t idx, void *args), 61 | void *args) 62 | 63 | Iterate over elements in the pool. Example: 64 | 65 | void alloc_elements(void *ptr, size_t i, void *args) 66 | { 67 | (void)args; (void)i; 68 | char *tmp = malloc(100); 69 | memcpy(ptr, &tmp, sizeof(char*)); 70 | } 71 | 72 | MsgPool q; 73 | msgpool_alloc(&q, qlen, sizeof(char*), nproducers, nconsumers); 74 | msgpool_iterate(&q, alloc_elements, NULL); 75 | 76 | `msgpool_iterate()` is useful for e.g. initialising all elements to pointers to memory. 77 | Beware: elements passed to func() function by msgpool_iterate will not be aligned in 78 | memory. 79 | 80 | int msgpool_read(MsgPool *q, void *restrict p, const void *restrict swap) 81 | 82 | Read an element from the pool. `swap` is optional, if non-NULL the data pointed 83 | to by swap is used to overwrite the element after it is read. 84 | Returns: bytes read or 0 on failure. 85 | 86 | void msgpool_write(MsgPool *q, const void *restrict p, void *restrict swap) 87 | 88 | Write an element to the pool. `swap` is optional, if non-NULL the memory pointed 89 | to is overwritten with the data that is about to be overwritten in the pool. 90 | 91 | void msgpool_wait_til_empty(MsgPool *q) 92 | 93 | Block until all elements have been read from the pool 94 | 95 | void msgpool_close(MsgPool *q) 96 | 97 | Close causes msgpool_read() to return 0 if pool is empty. 98 | 99 | void msgpool_reopen(MsgPool *q) 100 | 101 | Reopen the pool for reading after calling close() 102 | 103 | License 104 | ------- 105 | 106 | Public Domain. No warranty. You may use this code as you wish without 107 | restrictions. There are probably some bugs. 108 | 109 | Please open an issue on github with ideas, bug reports or feature requests. 110 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | // request decent POSIX version 2 | #define _XOPEN_SOURCE 700 3 | #define _BSD_SOURCE 4 | 5 | #include 6 | #include 7 | #include 8 | #include // getpid 9 | #include "msgpool.h" 10 | 11 | #define NPROC_DEFAULT 5 12 | #define NCONS_DEFAULT 5 13 | #define NMESG_DEFAULT 1000000 14 | #define QLEN_DEFAULT 1000 15 | 16 | void print_usage() __attribute__((noreturn)); 17 | 18 | void print_usage() 19 | { 20 | printf("usage: test [options]\n" 21 | " -s Use spinlock\n" 22 | " -y Use yield\n" 23 | " -m Use mutexes\n" 24 | " -p

Number of producer threads [default: %i]\n" 25 | " -c Number of consumer threads [default: %i]\n" 26 | " -n Number of messages per producer [default: %i]\n" 27 | " -q Pool capacity [default: %i]\n", 28 | NPROC_DEFAULT, NCONS_DEFAULT, NMESG_DEFAULT, QLEN_DEFAULT); 29 | exit(EXIT_FAILURE); 30 | } 31 | 32 | struct TestThread { 33 | MsgPool *q; 34 | pthread_t th; 35 | size_t id, start, end; 36 | size_t result; 37 | }; 38 | 39 | struct TestThread *producers, *consumers; 40 | 41 | void* produce(void *ptr) 42 | { 43 | const struct TestThread *prod = (const struct TestThread*)ptr; 44 | MsgPool *pool = prod->q; 45 | assert(pool->elsize == sizeof(size_t)); 46 | size_t w; 47 | printf("Created producer %zu\n", prod->id); 48 | for(w = prod->start; w < prod->end; w++) msgpool_write(pool, &w, NULL); 49 | printf("Producer %zu finished!\n", prod->id); 50 | pthread_exit(NULL); 51 | } 52 | 53 | void* consume(void *ptr) 54 | { 55 | struct TestThread *cons = (struct TestThread*)ptr; 56 | MsgPool *pool = cons->q; 57 | assert(pool->elsize == sizeof(size_t)); 58 | size_t r, sum = 0; int pos; 59 | printf("Created consumer %zu\n", cons->id); 60 | while((pos = msgpool_claim_read(pool)) != -1) { 61 | memcpy(&r, msgpool_get_ptr(pool, pos), sizeof(size_t)); 62 | msgpool_release(pool, pos, MPOOL_EMPTY); 63 | // printf("%zu\n", r); 64 | sum += r; 65 | } 66 | printf("Consumer %zu finished!\n", cons->id); 67 | cons->result = sum; 68 | pthread_exit(NULL); 69 | } 70 | 71 | // returns true on success 72 | static bool run_threads(MsgPool *q, size_t nmesgs, 73 | size_t nproducers, size_t nconsumers) 74 | { 75 | size_t i; 76 | int rc; 77 | 78 | const char *lockstr[] = {"spinlocks [-s]", "yield [-y]", "mutexes [-m]"}; 79 | 80 | printf("Using %s\n", lockstr[(int)q->locking]); 81 | 82 | printf("nproducers [-p]: %zu nconsumers [-c]: %zu " 83 | "messages [-n]: %zu qlen [-q]: %zu\n", 84 | nproducers, nconsumers, nmesgs, q->nel); 85 | 86 | // Thread attributes 87 | pthread_attr_t thread_attr; 88 | pthread_attr_init(&thread_attr); 89 | pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); 90 | 91 | producers = malloc(nproducers * sizeof(struct TestThread)); 92 | consumers = malloc(nconsumers * sizeof(struct TestThread)); 93 | 94 | // create consumers 95 | for(i = 0; i < nconsumers; i++) { 96 | consumers[i].id = i; 97 | consumers[i].q = q; 98 | rc = pthread_create(&consumers[i].th, &thread_attr, consume, &consumers[i]); 99 | if(rc != 0) { fprintf(stderr, "Creating thread failed\n"); exit(-1); } 100 | } 101 | 102 | size_t start, end = 0, msg_per_prod = nmesgs/nproducers; 103 | 104 | // create producers 105 | for(i = 0; i < nproducers; i++) { 106 | start = end; 107 | end = (i+1 == nproducers ? nmesgs : end + msg_per_prod); 108 | producers[i].id = i; 109 | producers[i].q = q; 110 | producers[i].start = start; 111 | producers[i].end = end; 112 | rc = pthread_create(&producers[i].th, &thread_attr, produce, &producers[i]); 113 | if(rc != 0) { fprintf(stderr, "Creating thread failed\n"); exit(-1); } 114 | } 115 | 116 | printf("waiting for producers to finish...\n"); 117 | 118 | // Wait for producers to finish 119 | for(i = 0; i < nproducers; i++) { 120 | rc = pthread_join(producers[i].th, NULL); 121 | if(rc != 0) { fprintf(stderr, "Join thread failed\n"); exit(-1); } 122 | } 123 | 124 | // Wait until empty 125 | msgpool_wait_til_empty(q); 126 | 127 | for(i = 0; i < 100; i++) 128 | msgpool_write(q, &i, NULL); 129 | 130 | size_t extra_sum = (size_t)(100*(99/2.0)); 131 | 132 | sleep(1); 133 | 134 | printf("waiting for consumers to finish...\n"); 135 | msgpool_close(q); 136 | 137 | size_t sum = 0; 138 | 139 | // Wait until finished 140 | for(i = 0; i < nconsumers; i++) { 141 | rc = pthread_join(consumers[i].th, NULL); 142 | if(rc != 0) { fprintf(stderr, "Join thread failed\n"); exit(-1); } 143 | sum += consumers[i].result; 144 | } 145 | 146 | size_t corr_sum = (size_t)(nmesgs*((nmesgs-1)/2.0) + extra_sum); 147 | printf("messages: %zu result: %zu [%s %zu]\n", 148 | nmesgs, sum, sum == corr_sum ? "PASS" : "FAIL", corr_sum); 149 | 150 | pthread_attr_destroy(&thread_attr); 151 | free(producers); 152 | free(consumers); 153 | 154 | return sum == corr_sum; 155 | } 156 | 157 | void set_zero(void *ptr, size_t i, void *args) 158 | { 159 | (void)args; 160 | memcpy(ptr, &i, sizeof(size_t)); 161 | } 162 | 163 | int main(int argc, char **argv) 164 | { 165 | // Defaults 166 | // pool size = 100 167 | size_t qlen = QLEN_DEFAULT; 168 | // 100 producers, 100 consumers 169 | size_t nproducers = NPROC_DEFAULT, nconsumers = NCONS_DEFAULT; 170 | // pass 1000000 messages 171 | size_t nmesgs = NMESG_DEFAULT; 172 | 173 | // use spinlock instead of mutex 174 | int use_spinlock = 0, use_mutexes = 0, use_yield = 0; 175 | 176 | // Read args 177 | int c; 178 | 179 | while ((c = getopt(argc, argv, "p:c:n:q:smy")) >= 0) 180 | if (c == 'p') nproducers = (size_t)atoi(optarg); 181 | else if (c == 'c') nconsumers = (size_t)atoi(optarg); 182 | else if (c == 'n') nmesgs = (size_t)atoi(optarg); 183 | else if (c == 'q') qlen = (size_t)atoi(optarg); 184 | else if (c == 's') use_spinlock = 1; 185 | else if (c == 'm') use_mutexes = 1; 186 | else if (c == 'y') use_yield = 1; 187 | else print_usage(); 188 | 189 | if(optind < argc) print_usage(); 190 | if(use_spinlock + use_mutexes + use_yield > 1) print_usage(); 191 | if(use_spinlock + use_mutexes + use_yield == 0) use_spinlock = 1; 192 | 193 | // Create pool of ints of length qlen 194 | MsgPool q; 195 | if(use_spinlock) 196 | msgpool_alloc_spinlock(&q, qlen, sizeof(size_t)); 197 | else if(use_mutexes) 198 | msgpool_alloc_mutex(&q, qlen, sizeof(size_t)); 199 | else 200 | msgpool_alloc_yield(&q, qlen, sizeof(size_t)); 201 | 202 | msgpool_iterate(&q, set_zero, NULL); 203 | 204 | bool pass = run_threads(&q, nmesgs, nproducers, nconsumers); 205 | 206 | msgpool_dealloc(&q); 207 | 208 | printf(pass ? "Done.\n" : "Fail.\n"); 209 | return pass ? EXIT_SUCCESS : EXIT_FAILURE; 210 | } 211 | -------------------------------------------------------------------------------- /msgpool.h: -------------------------------------------------------------------------------- 1 | #ifndef MSG_POOL_H_ 2 | #define MSG_POOL_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include // sched_yield() 10 | #include // need for getpid() 11 | #include // needed for abort() 12 | #include 13 | #include 14 | 15 | #define MPOOL_EMPTY 0 16 | #define MPOOL_CLAIMED 1 17 | #define MPOOL_FULL 2 18 | 19 | #define MSGP_LOCK_SPIN 0 20 | #define MSGP_LOCK_YIELD 1 21 | #define MSGP_LOCK_MUTEX 2 22 | 23 | // 1. merge claim_read / claim_write (?) 24 | // 2. Use rand odd numbers to iterate list to avoid repeatedly clashing (?) 25 | 26 | typedef struct 27 | { 28 | // qsize = elsize+1, qend=(qsize)*nel 29 | const size_t nel, elsize, qsize, qend; 30 | 31 | volatile size_t num_full, num_empty, num_waiting_readers, num_waiting_writers; 32 | volatile size_t last_read, last_write; 33 | char *const data; // []+ 34 | 35 | // reads block until success if open is != 0 36 | // otherwise they return 0 37 | volatile char open; 38 | 39 | // Blocking / locking mechanism (SPIN,YIELD,MUTEX) 40 | const char locking; 41 | 42 | // Mutexes 43 | pthread_mutex_t reader_wait_mutex, writer_wait_mutex; 44 | pthread_cond_t reader_wait_cond, writer_wait_cond; 45 | } MsgPool; 46 | 47 | #define msgpool_get_ptr(pool,pos) ((void*)((pool)->data+(pos)+1)) 48 | 49 | #define msgpool_error(fmt,...) do { \ 50 | fprintf(stderr, fmt, __VA_ARGS__); \ 51 | abort(); \ 52 | } while(0) 53 | 54 | static inline void msgpool_alloc(MsgPool *q, size_t nel, size_t elsize, 55 | char locking) 56 | { 57 | if(locking != MSGP_LOCK_SPIN && locking != MSGP_LOCK_YIELD && 58 | locking != MSGP_LOCK_MUTEX) { 59 | msgpool_error("[%s:%i] Invalid locking param\n", __FILE__, __LINE__); 60 | } 61 | 62 | // 1 byte per element for locking 63 | char *data = calloc(nel, elsize+1); 64 | MsgPool tmpq = {.nel = nel, .elsize = elsize, .qsize = elsize+1, 65 | .qend = (elsize+1)*nel, .data = data, 66 | .num_full = 0, .num_empty = nel, 67 | .num_waiting_readers = 0, .num_waiting_writers = 0, 68 | .open = true, 69 | .last_read = 0, .last_write = 0, 70 | .locking = locking}; 71 | 72 | memcpy(q, &tmpq, sizeof(MsgPool)); 73 | 74 | if(pthread_mutex_init(&q->reader_wait_mutex, NULL) != 0 || 75 | pthread_mutex_init(&q->writer_wait_mutex, NULL) != 0) 76 | { 77 | msgpool_error("pthread_mutex init failed: %s\n", strerror(errno)); 78 | } 79 | 80 | if(pthread_cond_init(&q->reader_wait_cond, NULL) != 0 || 81 | pthread_cond_init(&q->writer_wait_cond, NULL) != 0) 82 | { 83 | msgpool_error("pthread_cond init failed: %s\n", strerror(errno)); 84 | } 85 | } 86 | 87 | #define msgpool_alloc_spinlock(q,n,size) msgpool_alloc(q,n,size,MSGP_LOCK_SPIN) 88 | #define msgpool_alloc_yield(q,n,size) msgpool_alloc(q,n,size,MSGP_LOCK_YIELD) 89 | #define msgpool_alloc_mutex(q,n,size) msgpool_alloc(q,n,size,MSGP_LOCK_MUTEX) 90 | 91 | // Deallocate a new pool 92 | static inline void msgpool_dealloc(MsgPool *q) 93 | { 94 | pthread_cond_destroy(&q->reader_wait_cond); 95 | pthread_mutex_destroy(&q->reader_wait_mutex); 96 | pthread_cond_destroy(&q->writer_wait_cond); 97 | pthread_mutex_destroy(&q->writer_wait_mutex); 98 | free(q->data); 99 | } 100 | 101 | // Iterate over elements in the pool 102 | // calls func(el,idx,args) with idx being 0,1,2... and el a pointer to the 103 | // element (beware: not aligned in memory) 104 | // Can be used to initialise elements at the begining or clean up afterwards 105 | static inline void msgpool_iterate(MsgPool *q, 106 | void (*func)(void *el, size_t idx, void *args), 107 | void *args) 108 | { 109 | size_t i; char *ptr, *data = q->data; 110 | for(i = 0, ptr = data+1; i < q->nel; i++, ptr += q->qsize) { 111 | func((void*)ptr, i, args); 112 | } 113 | } 114 | 115 | // if til_empty, wait until pool is empty, 116 | // otherwise wait until space 117 | static inline void _msgpool_wait_for_empty(MsgPool *q, bool til_empty) 118 | { 119 | // printf("waiting until %s\n", til_empty ? "empty" : "space"); 120 | size_t limit = til_empty ? q->nel : 1; 121 | if(q->num_empty < limit) 122 | { 123 | switch(q->locking) { 124 | case MSGP_LOCK_SPIN: 125 | while(q->num_empty < limit) {} 126 | break; 127 | case MSGP_LOCK_YIELD: 128 | while(q->num_empty < limit) 129 | if(sched_yield()) msgpool_error("yield failed: %s", strerror(errno)); 130 | break; 131 | case MSGP_LOCK_MUTEX: 132 | if(sched_yield()) msgpool_error("yield failed: %s", strerror(errno)); 133 | if(q->num_empty >= limit) break; 134 | pthread_mutex_lock(&q->writer_wait_mutex); 135 | q->num_waiting_writers++; 136 | while(q->num_empty < limit) 137 | pthread_cond_wait(&q->writer_wait_cond, &q->writer_wait_mutex); 138 | q->num_waiting_writers--; 139 | pthread_mutex_unlock(&q->writer_wait_mutex); 140 | break; 141 | } 142 | } 143 | } 144 | 145 | // Wait until there is at least one element in the pool or it is closed 146 | static inline void _msgpool_wait_for_full(MsgPool *q) 147 | { 148 | if(q->num_full == 0 && q->open) 149 | { 150 | // Wait on write 151 | switch(q->locking) { 152 | case MSGP_LOCK_SPIN: 153 | while(q->num_full == 0 && q->open) {} 154 | break; 155 | case MSGP_LOCK_YIELD: 156 | // sched_yield returns non-zero on error 157 | while(q->num_full == 0 && q->open) 158 | if(sched_yield()) msgpool_error("yield failed: %s", strerror(errno)); 159 | break; 160 | case MSGP_LOCK_MUTEX: 161 | if(sched_yield()) msgpool_error("yield failed: %s", strerror(errno)); 162 | if(q->num_full > 0 || !q->open) break; 163 | // don't need to use __sync_fetch_and_add because we have writer_wait_mutex 164 | pthread_mutex_lock(&q->reader_wait_mutex); 165 | q->num_waiting_readers++; 166 | while(q->num_full == 0 && q->open) 167 | pthread_cond_wait(&q->reader_wait_cond, &q->reader_wait_mutex); 168 | q->num_waiting_readers--; 169 | pthread_mutex_unlock(&q->reader_wait_mutex); 170 | break; 171 | } 172 | } 173 | } 174 | 175 | // Returns index claimed or -1 if msgpool is closed 176 | static inline int msgpool_claim_read(MsgPool *q) 177 | { 178 | size_t i, s = q->last_read; 179 | 180 | while(1) 181 | { 182 | _msgpool_wait_for_full(q); 183 | 184 | if(q->num_full == 0 && !q->open) return -1; 185 | 186 | for(i = s; i < q->qend; i += q->qsize) 187 | { 188 | if(q->data[i] == MPOOL_FULL && 189 | __sync_bool_compare_and_swap((volatile char*)&q->data[i], 190 | MPOOL_FULL, MPOOL_CLAIMED)) 191 | { 192 | q->last_read = i; 193 | __sync_sub_and_fetch(&q->num_full, 1); // q->num_full--; 194 | return (int)i; 195 | } 196 | } 197 | 198 | s = 0; 199 | } 200 | } 201 | 202 | // returns index 203 | static inline int msgpool_claim_write(MsgPool *q) 204 | { 205 | size_t i, s = q->last_write; 206 | 207 | while(1) 208 | { 209 | // Wait until there is space to write 210 | _msgpool_wait_for_empty(q, false); 211 | 212 | for(i = s; i < q->qend; i += q->qsize) 213 | { 214 | if(q->data[i] == MPOOL_EMPTY && 215 | __sync_bool_compare_and_swap((volatile char*)&q->data[i], 216 | MPOOL_EMPTY, MPOOL_CLAIMED)) 217 | { 218 | q->last_write = i; 219 | __sync_sub_and_fetch(&q->num_empty, 1); // q->num_empty--; 220 | return (int)i; 221 | } 222 | } 223 | 224 | s = 0; 225 | } 226 | } 227 | 228 | // new_state must be MPOOL_EMPTY or MPOOL_FULL 229 | static inline void msgpool_release(MsgPool *q, size_t pos, char new_state) 230 | { 231 | assert(new_state == MPOOL_EMPTY || new_state == MPOOL_FULL); 232 | assert(q->data[pos] == MPOOL_CLAIMED); 233 | 234 | __sync_synchronize(); 235 | q->data[pos] = new_state; 236 | 237 | if(new_state == MPOOL_EMPTY) 238 | { 239 | __sync_add_and_fetch(&q->num_empty, 1); 240 | 241 | if(q->locking == MSGP_LOCK_MUTEX && q->num_waiting_writers) 242 | { 243 | // Notify when space appears in pool or pool empty 244 | pthread_mutex_lock(&q->writer_wait_mutex); 245 | if(q->num_waiting_writers) pthread_cond_signal(&q->writer_wait_cond); 246 | pthread_mutex_unlock(&q->writer_wait_mutex); 247 | } 248 | } 249 | else 250 | { 251 | // MPOOL_FULL 252 | __sync_add_and_fetch(&q->num_full, 1); 253 | 254 | if(q->locking == MSGP_LOCK_MUTEX && q->num_waiting_readers) 255 | { 256 | // Notify when space appears in pool or pool empty 257 | pthread_mutex_lock(&q->reader_wait_mutex); 258 | if(q->num_waiting_readers) pthread_cond_signal(&q->reader_wait_cond); 259 | pthread_mutex_unlock(&q->reader_wait_mutex); 260 | } 261 | } 262 | } 263 | 264 | static inline void msgpool_read(MsgPool *pool, void *restrict ptr, 265 | void *restrict swap) 266 | { 267 | int pos = msgpool_claim_read(pool); 268 | memcpy(ptr, msgpool_get_ptr(pool, pos), pool->elsize); 269 | if(swap) memcpy(msgpool_get_ptr(pool, pos), swap, pool->elsize); 270 | msgpool_release(pool, pos, MPOOL_EMPTY); 271 | } 272 | 273 | static inline void msgpool_write(MsgPool *pool, void *restrict ptr, 274 | void *restrict swap) 275 | { 276 | int pos = msgpool_claim_write(pool); 277 | if(swap) memcpy(swap, msgpool_get_ptr(pool, pos), pool->elsize); 278 | memcpy(msgpool_get_ptr(pool, pos), ptr, pool->elsize); 279 | msgpool_release(pool, pos, MPOOL_FULL); 280 | } 281 | 282 | // Close causes msgpool_read() to return 0 if pool is empty 283 | // Beware: this function doesn't block until the pool is emtpy 284 | // for that call msgpool_wait_til_empty(q) after calling msgpool_close(q) 285 | static inline void msgpool_close(MsgPool *q) 286 | { 287 | q->open = 0; 288 | if(q->locking == MSGP_LOCK_MUTEX && q->num_waiting_readers) { 289 | pthread_mutex_lock(&q->reader_wait_mutex); 290 | if(q->num_waiting_readers) 291 | pthread_cond_broadcast(&q->reader_wait_cond); // wake all sleeping threads 292 | pthread_mutex_unlock(&q->reader_wait_mutex); 293 | } 294 | } 295 | 296 | static inline void msgpool_reopen(MsgPool *q) { 297 | q->open = 1; 298 | } 299 | 300 | // Wait until the pool is empty, keep msgpool_read() blocking 301 | static inline void msgpool_wait_til_empty(MsgPool *q) 302 | { 303 | _msgpool_wait_for_empty(q, true); 304 | } 305 | 306 | #endif /* MSG_POOL_H_ */ 307 | --------------------------------------------------------------------------------