├── .gitignore ├── Makefile ├── README.md ├── test.c ├── tpool.c └── tpool.h /.gitignore: -------------------------------------------------------------------------------- 1 | testtpool* 2 | debug-testtpool* 3 | LTFPool.udb 4 | *.o 5 | *.orig -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | testtpool:test.c tpool.c tpool.h 2 | gcc -o testtpool -g test.c tpool.c -lpthread 3 | debug-testtpool:test.c tpool.c tpool.h 4 | gcc -o debug-testtpool -g test.c tpool.c -lpthread -DDEBUG 5 | .PHONY:clean 6 | clean: 7 | -rm -f testtpool debug-testtpool 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is LFTPool? 2 | LFTPool is abbreviation of Lock-Free Thread Pool. 3 | It is built without any lock and it can be compiled and used on ubuntu 3.11.3. It is as simple as: 4 | 5 | $ make 6 | 7 | Then you will get an executable file named testtpool. 8 | 9 | For more informations, see http://blog.csdn.net/xhjcehust/article/details/45844901. 10 | # contact 11 | For any question, just contact me at any time. 12 | 13 | mailto: xhjcehust@qq.com 14 | 15 | Any suggestion is welcome! 16 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "tpool.h" 5 | 6 | enum test_return { TEST_PASS, TEST_FAIL }; 7 | #define WORK_NUM 50 8 | 9 | static void heavy_work(void *args) 10 | { 11 | /* do some loops to simulate delay work */ 12 | int i; 13 | for(i = 0; i < 20000; i++) { 14 | int j; 15 | for(j = 0; j < 2000; j++) 16 | ; 17 | } 18 | return; 19 | } 20 | 21 | static enum test_return test_heavy_work(void) 22 | { 23 | int cpu_num = sysconf(_SC_NPROCESSORS_CONF); 24 | void *tpool = tpool_init(cpu_num);; 25 | int i; 26 | 27 | if (tpool == NULL) 28 | return TEST_FAIL; 29 | 30 | for(i = 0; i < WORK_NUM; i++) { 31 | if (tpool_add_work(tpool, heavy_work, NULL) < 0) { 32 | tpool_destroy(tpool, 0); 33 | return TEST_FAIL; 34 | } 35 | } 36 | tpool_destroy(tpool, 1); 37 | return TEST_PASS; 38 | } 39 | 40 | static void light_work(void *args) 41 | { 42 | /* return directly */ 43 | return; 44 | } 45 | 46 | static enum test_return test_light_work(void) 47 | { 48 | int cpu_num = sysconf(_SC_NPROCESSORS_CONF); 49 | void *tpool = tpool_init(cpu_num); 50 | int i; 51 | 52 | if (tpool == NULL) 53 | return TEST_FAIL; 54 | 55 | for(i = 0; i < WORK_NUM; i++) { 56 | if (tpool_add_work(tpool, light_work, NULL) < 0) { 57 | tpool_destroy(tpool, 0); 58 | return TEST_FAIL; 59 | } 60 | } 61 | tpool_destroy(tpool, 1); 62 | return TEST_PASS; 63 | } 64 | 65 | static enum test_return test_one_thread(void) 66 | { 67 | void *tpool = tpool_init(1); 68 | int i; 69 | 70 | if (tpool == NULL) 71 | return TEST_FAIL; 72 | 73 | for(i = 0; i < WORK_NUM; i++) { 74 | if (tpool_add_work(tpool, heavy_work, NULL) < 0) { 75 | tpool_destroy(tpool, 0); 76 | return TEST_FAIL; 77 | } 78 | } 79 | tpool_destroy(tpool, 1); 80 | return TEST_PASS; 81 | } 82 | 83 | static enum test_return test_tpool_destroy_directly(void) 84 | { 85 | int cpu_num = sysconf(_SC_NPROCESSORS_CONF); 86 | void *tpool = tpool_init(cpu_num); 87 | int i; 88 | 89 | if (tpool == NULL) 90 | return TEST_FAIL; 91 | 92 | for(i = 0; i < WORK_NUM; i++) { 93 | if (tpool_add_work(tpool, heavy_work, NULL) < 0) { 94 | tpool_destroy(tpool, 0); 95 | return TEST_FAIL; 96 | } 97 | } 98 | tpool_destroy(tpool, 0); 99 | return TEST_PASS; 100 | } 101 | 102 | static enum test_return test_inc_thread(void) 103 | { 104 | void *tpool = tpool_init(5); 105 | int i; 106 | 107 | if (tpool == NULL) 108 | return TEST_FAIL; 109 | 110 | for(i = 0; i < WORK_NUM << 13; i++) { 111 | if (tpool_add_work(tpool, light_work, NULL) < 0) { 112 | tpool_destroy(tpool, 0); 113 | return TEST_FAIL; 114 | } 115 | } 116 | if (tpool_inc_threads(tpool, 5) < 0) 117 | return TEST_FAIL; 118 | tpool_destroy(tpool, 1); 119 | return TEST_PASS; 120 | } 121 | 122 | static enum test_return test_dec_thread(void) 123 | { 124 | void *tpool = tpool_init(12); 125 | int i; 126 | 127 | if (tpool == NULL) 128 | return TEST_FAIL; 129 | 130 | for(i = 0; i < WORK_NUM; i++) { 131 | if (tpool_add_work(tpool, light_work, NULL) < 0) { 132 | tpool_destroy(tpool, 0); 133 | return TEST_FAIL; 134 | } 135 | } 136 | tpool_dec_threads(tpool, 6); 137 | for(i = 0; i < WORK_NUM; i++) { 138 | if (tpool_add_work(tpool, light_work, NULL) < 0) { 139 | tpool_destroy(tpool, 0); 140 | return TEST_FAIL; 141 | } 142 | } 143 | tpool_destroy(tpool, 1); 144 | return TEST_PASS; 145 | } 146 | 147 | enum test_return test_least_load(void) 148 | { 149 | int cpu_num = sysconf(_SC_NPROCESSORS_CONF); 150 | void *tpool = tpool_init(cpu_num);; 151 | int i; 152 | 153 | if (tpool == NULL) 154 | return TEST_FAIL; 155 | set_thread_schedule_algorithm(tpool, LEAST_LOAD); 156 | for(i = 0; i < WORK_NUM; i++) { 157 | if (tpool_add_work(tpool, heavy_work, NULL) < 0) { 158 | tpool_destroy(tpool, 0); 159 | return TEST_FAIL; 160 | } 161 | } 162 | tpool_destroy(tpool, 1); 163 | return TEST_PASS; 164 | } 165 | 166 | typedef enum test_return (*TEST_FUNC)(void); 167 | 168 | struct testcase { 169 | const char *description; 170 | TEST_FUNC function; 171 | }; 172 | 173 | struct testcase testcases[] = { 174 | {"one thread in thread pool", test_one_thread}, 175 | {"heavy work", test_heavy_work}, 176 | {"light work", test_light_work}, 177 | {"drop remaing works and exit directly", test_tpool_destroy_directly}, 178 | {"increase thread num", test_inc_thread}, 179 | {"decrease thread num", test_dec_thread}, 180 | {"set least load alogrithm", test_least_load}, 181 | { NULL, NULL } 182 | }; 183 | 184 | int main() 185 | { 186 | int exitcode = 0; 187 | int i = 0; 188 | struct timeval tstart,tend; 189 | enum test_return ret; 190 | unsigned long timeuse; 191 | 192 | printf("It may take you a few minutes to finish this test, please wait...\n"); 193 | for (i = 0; testcases[i].description != NULL; ++i) { 194 | gettimeofday(&tstart,NULL); 195 | ret = testcases[i].function(); 196 | gettimeofday(&tend,NULL); 197 | timeuse = 1000000 * (tend.tv_sec - tstart.tv_sec) + 198 | tend.tv_usec - tstart.tv_usec; 199 | if (ret == TEST_PASS) { 200 | printf("ok %d - %s time: %luus\n", i + 1, testcases[i].description, timeuse); 201 | } else { 202 | printf("not ok %d - %s\n", i + 1, testcases[i].description); 203 | exitcode = 1; 204 | } 205 | } 206 | 207 | return exitcode; 208 | } 209 | -------------------------------------------------------------------------------- /tpool.c: -------------------------------------------------------------------------------- 1 | /*************************************************************************** 2 | ** Name : tpool.c 3 | ** Author : xhjcehust 4 | ** Version : v1.0 5 | ** Date : 2015-05 6 | ** Description : Thread pool. 7 | ** 8 | ** CSDN Blog : http://blog.csdn.net/xhjcehust 9 | ** E-mail : hjxiaohust@gmail.com 10 | ** 11 | ** This file may be redistributed under the terms 12 | ** of the GNU Public License. 13 | ***************************************************************************/ 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "tpool.h" 24 | 25 | enum { 26 | TPOOL_ERROR, 27 | TPOOL_WARNING, 28 | TPOOL_INFO, 29 | TPOOL_DEBUG 30 | }; 31 | 32 | #define debug(level, ...) do { \ 33 | if (level < TPOOL_DEBUG) {\ 34 | flockfile(stdout); \ 35 | printf("###%p.%s: ", (void *)pthread_self(), __func__); \ 36 | printf(__VA_ARGS__); \ 37 | putchar('\n'); \ 38 | fflush(stdout); \ 39 | funlockfile(stdout);\ 40 | }\ 41 | } while (0) 42 | 43 | #define WORK_QUEUE_POWER 16 44 | #define WORK_QUEUE_SIZE (1 << WORK_QUEUE_POWER) 45 | #define WORK_QUEUE_MASK (WORK_QUEUE_SIZE - 1) 46 | /* 47 | * Just main thread can increase thread->in, we can make it safely. 48 | * However, thread->out may be increased in both main thread and 49 | * worker thread during balancing thread load when new threads are added 50 | * to our thread pool... 51 | */ 52 | #define thread_out_val(thread) (__sync_val_compare_and_swap(&(thread)->out, 0, 0)) 53 | #define thread_queue_len(thread) ((thread)->in - thread_out_val(thread)) 54 | #define thread_queue_empty(thread) (thread_queue_len(thread) == 0) 55 | #define thread_queue_full(thread) (thread_queue_len(thread) == WORK_QUEUE_SIZE) 56 | #define queue_offset(val) ((val) & WORK_QUEUE_MASK) 57 | 58 | /* enough large for any system */ 59 | #define MAX_THREAD_NUM 512 60 | 61 | typedef struct tpool_work { 62 | void (*routine)(void *); 63 | void *arg; 64 | struct tpool_work *next; 65 | } tpool_work_t; 66 | 67 | typedef struct { 68 | pthread_t id; 69 | int shutdown; 70 | #ifdef DEBUG 71 | int num_works_done; 72 | #endif 73 | unsigned int in; /* offset from start of work_queue where to put work next */ 74 | unsigned int out; /* offset from start of work_queue where to get work next */ 75 | tpool_work_t work_queue[WORK_QUEUE_SIZE]; 76 | } thread_t; 77 | 78 | typedef struct tpool tpool_t; 79 | typedef thread_t* (*schedule_thread_func)(tpool_t *tpool); 80 | struct tpool { 81 | int num_threads; 82 | thread_t threads[MAX_THREAD_NUM]; 83 | schedule_thread_func schedule_thread; 84 | }; 85 | 86 | static pthread_t main_tid; 87 | static volatile int global_num_thread = 0; 88 | 89 | static int tpool_queue_empty(tpool_t *tpool) 90 | { 91 | int i; 92 | 93 | for (i = 0; i < tpool->num_threads; i++) 94 | if (!thread_queue_empty(&tpool->threads[i])) 95 | return 0; 96 | return 1; 97 | } 98 | 99 | static thread_t* round_robin_schedule(tpool_t *tpool) 100 | { 101 | static int cur_thread_index = -1; 102 | 103 | assert(tpool && tpool->num_threads > 0); 104 | cur_thread_index = (cur_thread_index + 1) % tpool->num_threads ; 105 | return &tpool->threads[cur_thread_index]; 106 | } 107 | 108 | static thread_t* least_load_schedule(tpool_t *tpool) 109 | { 110 | int i; 111 | int min_num_works_index = 0; 112 | 113 | assert(tpool && tpool->num_threads > 0); 114 | /* To avoid race, we adapt the simplest min value algorithm instead of min-heap */ 115 | for (i = 1; i < tpool->num_threads; i++) { 116 | if (thread_queue_len(&tpool->threads[i]) < 117 | thread_queue_len(&tpool->threads[min_num_works_index])) 118 | min_num_works_index = i; 119 | } 120 | return &tpool->threads[min_num_works_index]; 121 | } 122 | 123 | static const schedule_thread_func schedule_alogrithms[] = { 124 | [ROUND_ROBIN] = round_robin_schedule, 125 | [LEAST_LOAD] = least_load_schedule 126 | }; 127 | 128 | void set_thread_schedule_algorithm(void *pool, enum schedule_type type) 129 | { 130 | struct tpool *tpool = pool; 131 | 132 | assert(tpool); 133 | tpool->schedule_thread = schedule_alogrithms[type]; 134 | } 135 | 136 | static void sig_do_nothing(int signo) 137 | { 138 | return; 139 | } 140 | 141 | static tpool_work_t *get_work_concurrently(thread_t *thread) 142 | { 143 | tpool_work_t *work = NULL; 144 | unsigned int tmp; 145 | 146 | do { 147 | work = NULL; 148 | if (thread_queue_len(thread) <= 0) 149 | break; 150 | tmp = thread->out; 151 | //prefetch work 152 | work = &thread->work_queue[queue_offset(tmp)]; 153 | } while (!__sync_bool_compare_and_swap(&thread->out, tmp, tmp + 1)); 154 | return work; 155 | } 156 | 157 | static void *tpool_thread(void *arg) 158 | { 159 | thread_t *thread = arg; 160 | tpool_work_t *work = NULL; 161 | sigset_t signal_mask, oldmask; 162 | int rc, sig_caught; 163 | 164 | /* SIGUSR1 handler has been set in tpool_init */ 165 | __sync_fetch_and_add(&global_num_thread, 1); 166 | pthread_kill(main_tid, SIGUSR1); 167 | 168 | sigemptyset (&oldmask); 169 | sigemptyset (&signal_mask); 170 | sigaddset (&signal_mask, SIGUSR1); 171 | 172 | while (1) { 173 | rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); 174 | if (rc != 0) { 175 | debug(TPOOL_ERROR, "SIG_BLOCK failed"); 176 | pthread_exit(NULL); 177 | } 178 | while (thread_queue_empty(thread) && !thread->shutdown) { 179 | debug(TPOOL_DEBUG, "I'm sleep"); 180 | rc = sigwait (&signal_mask, &sig_caught); 181 | if (rc != 0) { 182 | debug(TPOOL_ERROR, "sigwait failed"); 183 | pthread_exit(NULL); 184 | } 185 | } 186 | 187 | rc = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); 188 | if (rc != 0) { 189 | debug(TPOOL_ERROR, "SIG_SETMASK failed"); 190 | pthread_exit(NULL); 191 | } 192 | debug(TPOOL_DEBUG, "I'm awake"); 193 | 194 | if (thread->shutdown) { 195 | debug(TPOOL_DEBUG, "exit"); 196 | #ifdef DEBUG 197 | debug(TPOOL_INFO, "%ld: %d\n", thread->id, thread->num_works_done); 198 | #endif 199 | pthread_exit(NULL); 200 | } 201 | work = get_work_concurrently(thread); 202 | if (work) { 203 | (*(work->routine))(work->arg); 204 | #ifdef DEBUG 205 | thread->num_works_done++; 206 | #endif 207 | } 208 | if (thread_queue_empty(thread)) 209 | pthread_kill(main_tid, SIGUSR1); 210 | } 211 | } 212 | 213 | static void spawn_new_thread(tpool_t *tpool, int index) 214 | { 215 | memset(&tpool->threads[index], 0, sizeof(thread_t)); 216 | if (pthread_create(&tpool->threads[index].id, NULL, tpool_thread, 217 | (void *)(&tpool->threads[index])) != 0) { 218 | debug(TPOOL_ERROR, "pthread_create failed"); 219 | exit(0); 220 | } 221 | } 222 | 223 | static int wait_for_thread_registration(int num_expected) 224 | { 225 | sigset_t signal_mask, oldmask; 226 | int rc, sig_caught; 227 | 228 | sigemptyset (&oldmask); 229 | sigemptyset (&signal_mask); 230 | sigaddset (&signal_mask, SIGUSR1); 231 | rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); 232 | if (rc != 0) { 233 | debug(TPOOL_ERROR, "SIG_BLOCK failed"); 234 | return -1; 235 | } 236 | 237 | while (global_num_thread < num_expected) { 238 | rc = sigwait (&signal_mask, &sig_caught); 239 | if (rc != 0) { 240 | debug(TPOOL_ERROR, "sigwait failed"); 241 | return -1; 242 | } 243 | } 244 | rc = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); 245 | if (rc != 0) { 246 | debug(TPOOL_ERROR, "SIG_SETMASK failed"); 247 | return -1; 248 | } 249 | return 0; 250 | } 251 | 252 | void *tpool_init(int num_threads) 253 | { 254 | int i; 255 | tpool_t *tpool; 256 | 257 | if (num_threads <= 0) { 258 | return NULL; 259 | } else if (num_threads > MAX_THREAD_NUM) { 260 | debug(TPOOL_ERROR, "too many threads!!!"); 261 | return NULL; 262 | } 263 | tpool = malloc(sizeof(*tpool)); 264 | if (tpool == NULL) { 265 | debug(TPOOL_ERROR, "malloc failed"); 266 | return NULL; 267 | } 268 | 269 | memset(tpool, 0, sizeof(*tpool)); 270 | tpool->num_threads = num_threads; 271 | tpool->schedule_thread = round_robin_schedule; 272 | /* all threads are set SIGUSR1 with sig_do_nothing */ 273 | if (signal(SIGUSR1, sig_do_nothing) == SIG_ERR) { 274 | debug(TPOOL_ERROR, "signal failed"); 275 | return NULL; 276 | } 277 | main_tid = pthread_self(); 278 | for (i = 0; i < tpool->num_threads; i++) 279 | spawn_new_thread(tpool, i); 280 | if (wait_for_thread_registration(tpool->num_threads) < 0) 281 | pthread_exit(NULL); 282 | return (void *)tpool; 283 | } 284 | 285 | static int dispatch_work2thread(tpool_t *tpool, 286 | thread_t *thread, void(*routine)(void *), void *arg) 287 | { 288 | tpool_work_t *work = NULL; 289 | 290 | if (thread_queue_full(thread)) { 291 | debug(TPOOL_WARNING, "queue of thread selected is full!!!"); 292 | return -1; 293 | } 294 | work = &thread->work_queue[queue_offset(thread->in)]; 295 | work->routine = routine; 296 | work->arg = arg; 297 | work->next = NULL; 298 | thread->in++; 299 | if (thread_queue_len(thread) == 1) { 300 | debug(TPOOL_DEBUG, "signal has task"); 301 | pthread_kill(thread->id, SIGUSR1); 302 | } 303 | return 0; 304 | } 305 | 306 | /* 307 | * Here, worker threads died with work undone can not change from->out 308 | * and we can read it directly... 309 | */ 310 | static int migrate_thread_work(tpool_t *tpool, thread_t *from) 311 | { 312 | unsigned int i; 313 | tpool_work_t *work; 314 | thread_t *to; 315 | 316 | for (i = from->out; i < from->in; i++) { 317 | work = &from->work_queue[queue_offset(i)]; 318 | to = tpool->schedule_thread(tpool); 319 | if (dispatch_work2thread(tpool, to, work->routine, work->arg) < 0) 320 | return -1; 321 | } 322 | #ifdef DEBUG 323 | printf("%ld migrate_thread_work: %u\n", from->id, thread_queue_len(from)); 324 | #endif 325 | return 0; 326 | } 327 | 328 | static int isnegtive(int val) 329 | { 330 | return val < 0; 331 | } 332 | 333 | static int ispositive(int val) 334 | { 335 | return val > 0; 336 | } 337 | 338 | static int get_first_id(int arr[], int len, int (*fun)(int)) 339 | { 340 | int i; 341 | 342 | for (i = 0; i < len; i++) 343 | if (fun(arr[i])) 344 | return i; 345 | return -1; 346 | } 347 | 348 | /* 349 | * The load balance algorithm may not work so balanced because worker threads 350 | * are consuming work at the same time, which resulting in work count is not 351 | * real-time 352 | */ 353 | static void balance_thread_load(tpool_t *tpool) 354 | { 355 | int count[MAX_THREAD_NUM]; 356 | int i, out, sum = 0, avg; 357 | int first_neg_id, first_pos_id, tmp, migrate_num; 358 | thread_t *from, *to; 359 | tpool_work_t *work; 360 | 361 | for (i = 0; i < tpool->num_threads; i++) { 362 | count[i] = thread_queue_len(&tpool->threads[i]); 363 | sum += count[i]; 364 | } 365 | avg = sum / tpool->num_threads; 366 | if (avg == 0) 367 | return; 368 | for (i = 0; i < tpool->num_threads; i++) 369 | count[i] -= avg; 370 | while (1) { 371 | first_neg_id = get_first_id(count, tpool->num_threads, isnegtive); 372 | first_pos_id = get_first_id(count, tpool->num_threads, ispositive); 373 | if (first_neg_id < 0) 374 | break; 375 | tmp = count[first_neg_id] + count[first_pos_id]; 376 | if (tmp > 0) { 377 | migrate_num = -count[first_neg_id]; 378 | count[first_neg_id] = 0; 379 | count[first_pos_id] = tmp; 380 | } else { 381 | migrate_num = count[first_pos_id]; 382 | count[first_pos_id] = 0; 383 | count[first_neg_id] = tmp; 384 | } 385 | from = &tpool->threads[first_pos_id]; 386 | to = &tpool->threads[first_neg_id]; 387 | for (i = 0; i < migrate_num; i++) { 388 | work = get_work_concurrently(from); 389 | if (work) { 390 | dispatch_work2thread(tpool, to, work->routine, work->arg); 391 | } 392 | } 393 | } 394 | from = &tpool->threads[first_pos_id]; 395 | /* Just migrate count[first_pos_id] - 1 works to other threads*/ 396 | for (i = 1; i < count[first_pos_id]; i++) { 397 | to = &tpool->threads[i - 1]; 398 | if (to == from) 399 | continue; 400 | work = get_work_concurrently(from); 401 | if (work) { 402 | dispatch_work2thread(tpool, to, work->routine, work->arg); 403 | } 404 | } 405 | } 406 | 407 | int tpool_inc_threads(void *pool, int num_inc) 408 | { 409 | tpool_t *tpool = pool; 410 | int i, num_threads; 411 | 412 | assert(tpool && num_inc > 0); 413 | num_threads = tpool->num_threads + num_inc; 414 | if (num_threads > MAX_THREAD_NUM) { 415 | debug(TPOOL_ERROR, "add too many threads!!!"); 416 | return -1; 417 | } 418 | for (i = tpool->num_threads; i < num_threads; i++) { 419 | spawn_new_thread(tpool, i); 420 | } 421 | if (wait_for_thread_registration(num_threads) < 0) { 422 | pthread_exit(NULL); 423 | } 424 | tpool->num_threads = num_threads; 425 | balance_thread_load(tpool); 426 | return 0; 427 | } 428 | 429 | void tpool_dec_threads(void *pool, int num_dec) 430 | { 431 | tpool_t *tpool = pool; 432 | int i, num_threads; 433 | 434 | assert(tpool && num_dec > 0); 435 | if (num_dec > tpool->num_threads) { 436 | num_dec = tpool->num_threads; 437 | } 438 | num_threads = tpool->num_threads; 439 | tpool->num_threads -= num_dec; 440 | for (i = tpool->num_threads; i < num_threads; i++) { 441 | tpool->threads[i].shutdown = 1; 442 | pthread_kill(tpool->threads[i].id, SIGUSR1); 443 | } 444 | for (i = tpool->num_threads; i < num_threads; i++) { 445 | pthread_join(tpool->threads[i].id, NULL); 446 | /* migrate remaining work to other threads */ 447 | if (migrate_thread_work(tpool, &tpool->threads[i]) < 0) 448 | debug(TPOOL_WARNING, "work lost during migration!!!"); 449 | } 450 | if (tpool->num_threads == 0 && !tpool_queue_empty(tpool)) 451 | debug(TPOOL_WARNING, "No thread in pool with work unfinished!!!"); 452 | } 453 | 454 | int tpool_add_work(void *pool, void(*routine)(void *), void *arg) 455 | { 456 | tpool_t *tpool = pool; 457 | thread_t *thread; 458 | 459 | assert(tpool); 460 | thread = tpool->schedule_thread(tpool); 461 | return dispatch_work2thread(tpool, thread, routine, arg); 462 | } 463 | 464 | 465 | void tpool_destroy(void *pool, int finish) 466 | { 467 | tpool_t *tpool = pool; 468 | int i; 469 | 470 | assert(tpool); 471 | if (finish == 1) { 472 | sigset_t signal_mask, oldmask; 473 | int rc, sig_caught; 474 | 475 | debug(TPOOL_DEBUG, "wait all work done"); 476 | 477 | sigemptyset (&oldmask); 478 | sigemptyset (&signal_mask); 479 | sigaddset (&signal_mask, SIGUSR1); 480 | rc = pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); 481 | if (rc != 0) { 482 | debug(TPOOL_ERROR, "SIG_BLOCK failed"); 483 | pthread_exit(NULL); 484 | } 485 | 486 | while (!tpool_queue_empty(tpool)) { 487 | rc = sigwait(&signal_mask, &sig_caught); 488 | if (rc != 0) { 489 | debug(TPOOL_ERROR, "sigwait failed"); 490 | pthread_exit(NULL); 491 | } 492 | } 493 | 494 | rc = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); 495 | if (rc != 0) { 496 | debug(TPOOL_ERROR, "SIG_SETMASK failed"); 497 | pthread_exit(NULL); 498 | } 499 | } 500 | /* shutdown all threads */ 501 | for (i = 0; i < tpool->num_threads; i++) { 502 | tpool->threads[i].shutdown = 1; 503 | /* wake up thread */ 504 | pthread_kill(tpool->threads[i].id, SIGUSR1); 505 | } 506 | debug(TPOOL_DEBUG, "wait worker thread exit"); 507 | for (i = 0; i < tpool->num_threads; i++) { 508 | pthread_join(tpool->threads[i].id, NULL); 509 | } 510 | free(tpool); 511 | } 512 | -------------------------------------------------------------------------------- /tpool.h: -------------------------------------------------------------------------------- 1 | #ifndef __TPOOL_H__ 2 | #define __TPOOL_H__ 3 | 4 | enum schedule_type { 5 | ROUND_ROBIN, 6 | LEAST_LOAD 7 | }; 8 | 9 | void *tpool_init(int num_worker_threads); 10 | 11 | int tpool_inc_threads(void *pool, int num_inc); 12 | 13 | void tpool_dec_threads(void *pool, int num_dec); 14 | 15 | int tpool_add_work(void *pool, void(*routine)(void *), void *arg); 16 | /* 17 | @finish: 1, complete remaining works before return 18 | 0, drop remaining works and return directly 19 | */ 20 | void tpool_destroy(void *pool, int finish); 21 | 22 | /* set thread schedule algorithm, default is round-robin */ 23 | void set_thread_schedule_algorithm(void *pool, enum schedule_type type); 24 | 25 | #endif 26 | --------------------------------------------------------------------------------