├── .clang-format ├── .gitignore ├── CPPLINT.cfg ├── README.md ├── c_style_threadpool ├── Makefile ├── condition.cpp ├── condition.h ├── test.cpp ├── threadpool.cpp └── threadpool.h ├── cpp11_threadpool ├── Makefile ├── test.cpp └── threadpool.h └── cpp_threadpool ├── Makefile ├── sync_task_queue.h ├── sync_task_queue_test.cpp ├── threadpool.h └── threadpool_test.cpp /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | BasedOnStyle: Google 3 | --- 4 | Language: Cpp 5 | Cpp11BracedListStyle: true 6 | Standard: Cpp11 7 | AllowShortFunctionsOnASingleLine: None 8 | AllowShortLambdasOnASingleLine: None 9 | DerivePointerAlignment: false 10 | PointerAlignment: Left 11 | CommentPragmas: '^ NOLINT' 12 | ColumnLimit: 120 13 | --- 14 | Language: Proto 15 | ColumnLimit: 100 16 | --- 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | output 2 | -------------------------------------------------------------------------------- /CPPLINT.cfg: -------------------------------------------------------------------------------- 1 | set noparent 2 | 3 | linelength=120 4 | filter=-legal/copyright 5 | filter=-build/c++11 6 | filter=-build/include_subdir 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # C++线程池 2 | 3 | ## 线程池概念 4 | 5 | > 假设完成一项任务需要的时间=创建线程时间T1+线程执行任务时间T2+销毁线程时间T3,如果T1+T3的时间远大于T2,通常就可以考虑采取线程池来提高服务器的性能 6 | 7 | `thread pool`就是线程的一种使用模式,一个线程池中维护着多个线程等待接收管理者分配的可并发执行的任务。 8 | 9 | * 避免了处理短时间任务时创建与销毁线程的代价 10 | * 既保证内核的充分利用,又能防止过度调度 11 | * 可用线程数量应该取决于可用的并发处理器、处理器内核、内存、网络sockets的数量 12 | 13 | ## 线程池组成部分 14 | 15 | * 线程池管理器(thread pool):创建、销毁线程池 16 | * 工作线程(pool wroker):在没有任务时处于等待状态,循环读取并执行任务队列中的任务 17 | * 任务(task):抽象一个任务,主要规定任务的入口、任务执行完后的收尾工作、任务的执行状态等 18 | * 任务队列(task queue):存放没有处理的任务,提供一种缓冲机制 19 | 20 | ## C风格ThreadPool 21 | 22 | #### 1. 抽象一个任务 23 | 24 | 将待处理的任务抽象成task结构: 25 | 26 | ```c++ 27 | typedef struct task { 28 | void* (*run)(void* args); // abstract a job function that need to run 29 | void* arg; // argument of the run function 30 | struct task* next; // point to the next task in task queue 31 | } task_t; 32 | ``` 33 | 34 | #### 2. 任务队列 35 | 36 | * `threadpool`中用`first`和`last`指针指向首尾两个任务 37 | * `task`结构体保证每个`task`都能指向任务队列中下一个`task` 38 | 39 | ```c++ 40 | typedef struct task { 41 | void* (*run)(void* args); // abstract a job function that need to run 42 | void* arg; // argument of the run function 43 | struct task* next; // point to the next task in task queue 44 | } task_t; 45 | 46 | typedef struct threadpool { 47 | condition_t ready; // condition & mutex 48 | task_t* first; // fist task in task queue 49 | task_t* last; // last task in task queue 50 | int counter; // total task number 51 | int idle; // idle task number 52 | int max_threads; // max task number 53 | int quit; // the quit flag 54 | } threadpool_t; 55 | 56 | ``` 57 | 58 | #### 3. 线程安全的问题 59 | 60 | 设计了`condition_t`类来实现安全并发: 61 | 62 | ```c++ 63 | typedef struct condition { 64 | /** 65 | * 互斥锁 66 | */ 67 | pthread_mutex_t pmutex; 68 | /** 69 | * 条件变量 70 | */ 71 | pthread_cond_t pcond; 72 | } condition_t; 73 | ``` 74 | 75 | 提供对应的接口: 76 | 77 | ```c++ 78 | /** 79 | * 初始化 80 | */ 81 | int condition_init(condition_t* cond); 82 | 83 | /** 84 | * 加锁 85 | */ 86 | int condition_lock(condition_t* cond); 87 | /** 88 | * 解锁 89 | */ 90 | int condition_unlock(condition_t* cond); 91 | 92 | /** 93 | * 条件等待 94 | * 95 | * pthread_cond_wait(cond, mutex)的功能有3个: 96 | * 1) 调用者线程首先释放mutex 97 | * 2) 然后阻塞, 等待被别的线程唤醒 98 | * 3) 当调用者线程被唤醒后,调用者线程会再次获取mutex 99 | */ 100 | int condition_wait(condition_t* cond); 101 | 102 | /** 103 | * 计时等待 104 | */ 105 | int condition_timedwait(condition_t* cond, const timespec* abstime); 106 | 107 | /** 108 | * 激活一个等待该条件的线程 109 | * 110 | * 1) 作用: 发送一个信号给另外一个处于阻塞等待状态的线程, 使其脱离阻塞状态继续执行 111 | * 2) 如果没有线程处在阻塞状态, 那么pthread_cond_signal也会成功返回, 所以需要判断下idle thread的数量 112 | * 3) 最多只会给一个线程发信号,不会有「惊群现象」 113 | * 4) 首先根据线程优先级的高低确定发送给哪个线程信号, 如果优先级相同则优先发给等待最久的线程 114 | * 5) 重点: pthread_cond_wait必须放在lock和unlock之间, 因为他要根据共享变量的状态决定是否要等待; 但是pthread_cond_signal既可以放在lock和unlock之间,也可以放在lock和unlock之后 115 | */ 116 | int condition_signal(condition_t *cond); 117 | /** 118 | * 唤醒所有等待线程 119 | */ 120 | int condition_broadcast(condition_t *cond); 121 | 122 | /** 123 | * 销毁 124 | */ 125 | int condition_destroy(condition_t *cond); 126 | ``` 127 | 128 | #### 4. 线程池的实现 129 | 130 | ###### 4.1 初始化一个线程池 131 | 132 | 仅仅是初始化了`condition`和`mutex`,还有一些线程池的属性。**但是任务队列是空的,而且此时也一个线程都没有**。 133 | 134 | ```c++ 135 | // initialize the thread pool 136 | void threadpool_init(threadpool_t* pool, int threads_num) { 137 | int n_status = condition_init(&pool ->ready); 138 | if (n_status == 0) { 139 | printf("Info: initialize the thread pool successfully!\n"); 140 | } else { 141 | printf("Error: initialize the thread pool failed, status:%d\n", n_status); 142 | } 143 | pool->first = NULL; 144 | pool->last = NULL; 145 | pool->counter = 0; 146 | pool->idle = 0; 147 | pool->max_threads = threads_num; 148 | pool->quit = 0; 149 | } 150 | ``` 151 | 152 | ###### 4.2 向线程池中添加任务,并分配给它一个线程 153 | 154 | 首先构建`task`结构体,然后将其加入任务队列。 155 | 156 | * 如果当前有空闲线程那么直接调用空闲线程执行函数 157 | * 如果无空闲线程且当前线程数未满时创建一个新的线程执行任务 158 | * 如果无空闲线程且当前线程数已满时,任务会呆在任务队列中等待线程池释放出空闲线程 159 | 160 | ```c++ 161 | // add a task to thread pool 162 | void threadpool_add_task(threadpool_t* pool, void* (*run)(void *arg), void* arg) { 163 | // create a task 164 | task_t* new_task = reinterpret_cast(malloc(sizeof(task_t))); 165 | new_task->run = run; 166 | new_task->arg = arg; 167 | new_task->next = NULL; 168 | 169 | // lock the condition 170 | condition_lock(&pool->ready); 171 | 172 | // add the task to task queue 173 | if (pool->first == NULL) { 174 | pool->first = new_task; 175 | } else { // else add to the last task 176 | pool->last->next = new_task; 177 | } 178 | pool->last = new_task; 179 | 180 | /* 181 | * after you add a task to task queue, you need to allocate it to a thread: 182 | * (1)if idle thread num > 0: awake a idle thread 183 | * (2)if idle thread num = 0 & thread num does not reach maximum: create a new thread to run the task 184 | */ 185 | if (pool->idle > 0) { 186 | // awake a thread that wait for longest time 187 | condition_signal(&pool->ready); 188 | } else if (pool->counter < pool->max_threads) { 189 | // define a tid to get the thread identifier that we are going to create 190 | pthread_t tid; 191 | /* 192 | * pthread_create(): 193 | * (1)thread identifier 194 | * (2)set the pthread attribute 195 | * (3)the function that thread is going to run 196 | * (4)the args of run func 197 | * 198 | * A realistic limit of thread num is 200 to 400 threads 199 | * https://www.ibm.com/support/knowledgecenter/en/SSLTBW_2.3.0/com.ibm.zos.v2r3.bpxbd00/ptcrea.htm 200 | */ 201 | pthread_create(&tid, NULL, thread_routine, pool); 202 | pool->counter++; 203 | } else { // when (idle == 0 & counter = max_threads), then wait 204 | printf("Warning: no idle thread, please wait...\n"); 205 | } 206 | 207 | condition_unlock(&pool->ready); 208 | } 209 | ``` 210 | 211 | #### 5. 线程的执行过程 212 | 213 | ###### 5.1 如果任务队列为空 214 | 215 | ```c++ 216 | // when task queue is empty, then block 2 second to get the new task 217 | // If timeout, then destroy the thread 218 | while (pool->first == NULL && !pool->quit) { 219 | printf("Info: thread %ld is waiting for a task\n", (u_int64_t)pthread_self()); 220 | // get the system time 221 | clock_gettime(CLOCK_REALTIME, &abs_name); 222 | abs_name.tv_sec += 2; 223 | int status; 224 | status = condition_timedwait(&pool->ready, &abs_name); // block for 2 second 225 | if (status == ETIMEDOUT) { 226 | printf("Info: thread %ld wait timed out\n", (u_int64_t)pthread_self()); 227 | timeout = true; 228 | break; 229 | } 230 | } 231 | 232 | ... 233 | 234 | // if visit task queue timeout(means no task in queue), quit destory the thread 235 | if (timeout) { 236 | pool->counter--; 237 | condition_unlock(&pool->ready); 238 | break; // destroy the thread 239 | } 240 | ``` 241 | 242 | ###### 5.2 如果任务队列非空 243 | 244 | ```c++ 245 | // when the thread run the task, we should unlock the thread pool 246 | if (pool->first != NULL) { 247 | // get the task from task queue 248 | task_t* t = pool->first; 249 | pool->first = t->next; 250 | // unlock the thread pool to make other threads visit task queue 251 | condition_unlock(&pool->ready); 252 | 253 | // run the task run func 254 | t->run(t->arg); 255 | free(t); 256 | 257 | // lock 258 | condition_lock(&pool->ready); 259 | } 260 | ``` 261 | 262 | ###### 5.3 没有任务且收到退出信号 263 | 264 | ```c++ 265 | // when task queue is clean and quit flag is 1, then destroy the thread 266 | if (pool->quit && pool->first == NULL) { 267 | pool->counter--; 268 | // 若线程池中线程数为0,通知等待线程(主线程)全部任务已经完成 269 | if (pool->counter == 0) { 270 | condition_signal(&pool->ready); 271 | } 272 | condition_unlock(&pool->ready); 273 | break; // destroy the thread 274 | } 275 | ``` 276 | 277 | #### 6. 代码 278 | 279 | condition.h: 280 | 281 | ```c++ 282 | #ifndef CONDITION_H_ 283 | #define CONDITION_H_ 284 | 285 | #include 286 | #include 287 | 288 | typedef struct condition { 289 | /** 290 | * 互斥锁 291 | */ 292 | pthread_mutex_t pmutex; 293 | /** 294 | * 条件变量 295 | */ 296 | pthread_cond_t pcond; 297 | } condition_t; 298 | 299 | /** 300 | * 初始化 301 | */ 302 | int condition_init(condition_t* cond); 303 | 304 | /** 305 | * 加锁 306 | */ 307 | int condition_lock(condition_t* cond); 308 | /** 309 | * 解锁 310 | */ 311 | int condition_unlock(condition_t* cond); 312 | 313 | /** 314 | * 条件等待 315 | * 316 | * pthread_cond_wait(cond, mutex)的功能有3个: 317 | * 1) 调用者线程首先释放mutex 318 | * 2) 然后阻塞, 等待被别的线程唤醒 319 | * 3) 当调用者线程被唤醒后,调用者线程会再次获取mutex 320 | */ 321 | int condition_wait(condition_t* cond); 322 | 323 | /** 324 | * 计时等待 325 | */ 326 | int condition_timedwait(condition_t* cond, const timespec* abstime); 327 | 328 | /** 329 | * 激活一个等待该条件的线程 330 | * 331 | * 1) 作用: 发送一个信号给另外一个处于阻塞等待状态的线程, 使其脱离阻塞状态继续执行 332 | * 2) 如果没有线程处在阻塞状态, 那么pthread_cond_signal也会成功返回, 所以需要判断下idle thread的数量 333 | * 3) 最多只会给一个线程发信号,不会有「惊群现象」 334 | * 4) 首先根据线程优先级的高低确定发送给哪个线程信号, 如果优先级相同则优先发给等待最久的线程 335 | * 5) 重点: pthread_cond_wait必须放在lock和unlock之间, 因为他要根据共享变量的状态决定是否要等待; 但是pthread_cond_signal既可以放在lock和unlock之间,也可以放在lock和unlock之后 336 | */ 337 | int condition_signal(condition_t *cond); 338 | /** 339 | * 唤醒所有等待线程 340 | */ 341 | int condition_broadcast(condition_t *cond); 342 | 343 | /** 344 | * 销毁 345 | */ 346 | int condition_destroy(condition_t *cond); 347 | 348 | #endif // CONDITION_H_ 349 | ``` 350 | 351 | condition.cpp: 352 | 353 | ```c++ 354 | #include "condition.h" 355 | 356 | // 初始化 357 | int condition_init(condition_t* cond) { 358 | int status; 359 | status = pthread_mutex_init(&cond->pmutex, NULL); 360 | if (status != 0) { 361 | printf("Error: pthread_mutex_init failed, return value:%d\n", status); 362 | return status; 363 | } 364 | status = pthread_cond_init(&cond->pcond, NULL); 365 | if (status != 0) { 366 | printf("Error: pthread_cond_init failed, return value:%d\n", status); 367 | return status; 368 | } 369 | return 0; 370 | } 371 | 372 | // 加锁 373 | int condition_lock(condition_t* cond) { 374 | return pthread_mutex_lock(&cond->pmutex); 375 | } 376 | 377 | // 解锁 378 | int condition_unlock(condition_t* cond) { 379 | return pthread_mutex_unlock(&cond->pmutex); 380 | } 381 | 382 | // 条件等待 383 | int condition_wait(condition_t* cond) { 384 | return pthread_cond_wait(&cond->pcond, &cond->pmutex); 385 | } 386 | 387 | // 计时等待 388 | int condition_timedwait(condition_t* cond, const timespec* abstime) { 389 | return pthread_cond_timedwait(&cond->pcond, &cond->pmutex, abstime); 390 | } 391 | 392 | // 激活一个等待该条件的线程 393 | int condition_signal(condition_t *cond) { 394 | return pthread_cond_signal(&cond->pcond); 395 | } 396 | 397 | // 唤醒所有等待线程 398 | int condition_broadcast(condition_t *cond) { 399 | return pthread_cond_broadcast(&cond->pcond); 400 | } 401 | 402 | // 销毁 403 | int condition_destroy(condition_t *cond) { 404 | int status; 405 | status = pthread_mutex_destroy(&cond->pmutex); 406 | if (status != 0) { 407 | return status; 408 | } 409 | 410 | status = pthread_cond_destroy(&cond->pcond); 411 | if (status != 0) { 412 | return status; 413 | } 414 | return 0; 415 | } 416 | ``` 417 | 418 | threadpool.h: 419 | 420 | ```c++ 421 | #ifndef THREAD_POLL_H_ 422 | #define THREAD_POLL_H_ 423 | 424 | #include "condition.h" 425 | 426 | typedef struct task { 427 | void* (*run)(void* args); // abstract a job function that need to run 428 | void* arg; // argument of the run function 429 | struct task* next; // point to the next task in task queue 430 | } task_t; 431 | 432 | typedef struct threadpool { 433 | condition_t ready; // condition & mutex 434 | task_t* first; // fist task in task queue 435 | task_t* last; // last task in task queue 436 | int counter; // total task number 437 | int idle; // idle task number 438 | int max_threads; // max task number 439 | int quit; // the quit flag 440 | } threadpool_t; 441 | 442 | /** 443 | * initialize threadpool 444 | */ 445 | void threadpool_init(threadpool_t* pool, int threads_num); 446 | 447 | /** 448 | * add a task to threadpool 449 | */ 450 | void threadpool_add_task(threadpool_t* pool, void* (*run)(void *args), void* arg); 451 | 452 | /** 453 | * destroy threadpool 454 | */ 455 | void threadpool_destroy(threadpool_t* pool); 456 | 457 | #endif // THREAD_POLL_H_ 458 | ``` 459 | 460 | Threadpool.cpp: 461 | 462 | ```c++ 463 | #include 464 | #include 465 | #include 466 | #include 467 | #include 468 | #include "threadpool.h" 469 | 470 | void *thread_routine(void *arg); 471 | 472 | // initialize the thread pool 473 | void threadpool_init(threadpool_t* pool, int threads_num) { 474 | int n_status = condition_init(&pool ->ready); 475 | if (n_status == 0) { 476 | printf("Info: initialize the thread pool successfully!\n"); 477 | } else { 478 | printf("Error: initialize the thread pool failed, status:%d\n", n_status); 479 | } 480 | pool->first = NULL; 481 | pool->last = NULL; 482 | pool->counter = 0; 483 | pool->idle = 0; 484 | pool->max_threads = threads_num; 485 | pool->quit = 0; 486 | } 487 | 488 | // add a task to thread pool 489 | void threadpool_add_task(threadpool_t* pool, void* (*run)(void *arg), void* arg) { 490 | // create a task 491 | task_t* new_task = reinterpret_cast(malloc(sizeof(task_t))); 492 | new_task->run = run; 493 | new_task->arg = arg; 494 | new_task->next = NULL; 495 | 496 | // lock the condition 497 | condition_lock(&pool->ready); 498 | 499 | // add the task to task queue 500 | if (pool->first == NULL) { 501 | pool->first = new_task; 502 | } else { // else add to the last task 503 | pool->last->next = new_task; 504 | } 505 | pool->last = new_task; 506 | 507 | /* 508 | * after you add a task to task queue, you need to allocate it to a thread: 509 | * (1)if idle thread num > 0: awake a idle thread 510 | * (2)if idle thread num = 0 & thread num does not reach maximum: create a new thread to run the task 511 | */ 512 | if (pool->idle > 0) { 513 | // awake a thread that wait for longest time 514 | condition_signal(&pool->ready); 515 | } else if (pool->counter < pool->max_threads) { 516 | // define a tid to get the thread identifier that we are going to create 517 | pthread_t tid; 518 | /* 519 | * pthread_create(): 520 | * (1)thread identifier 521 | * (2)set the pthread attribute 522 | * (3)the function that thread is going to run 523 | * (4)the args of run func 524 | * 525 | * A realistic limit of thread num is 200 to 400 threads 526 | * https://www.ibm.com/support/knowledgecenter/en/SSLTBW_2.3.0/com.ibm.zos.v2r3.bpxbd00/ptcrea.htm 527 | */ 528 | pthread_create(&tid, NULL, thread_routine, pool); 529 | pool->counter++; 530 | } else { // when (idle == 0 & counter = max_threads), then wait 531 | printf("Warning: no idle thread, please wait...\n"); 532 | } 533 | 534 | condition_unlock(&pool->ready); 535 | } 536 | 537 | // create a thread to run the task run func 538 | // and the void *arg means the arg passed by pthread_create: pool 539 | void *thread_routine(void *arg) { 540 | struct timespec abs_name; 541 | bool timeout; 542 | printf("Info: create thread, and the thread id is: %ld\n", (u_int64_t)pthread_self()); 543 | threadpool_t *pool = reinterpret_cast(arg); 544 | 545 | // keep visiting the task queue 546 | while (true) { 547 | timeout = false; 548 | condition_lock(&pool->ready); 549 | pool->idle++; 550 | 551 | // when task queue is empty, then block 2 second to get the new task 552 | // If timeout, then destroy the thread 553 | while (pool->first == NULL && !pool->quit) { 554 | printf("Info: thread %ld is waiting for a task\n", (u_int64_t)pthread_self()); 555 | // get the system time 556 | clock_gettime(CLOCK_REALTIME, &abs_name); 557 | abs_name.tv_sec += 2; 558 | int status; 559 | status = condition_timedwait(&pool->ready, &abs_name); // block for 2 second 560 | if (status == ETIMEDOUT) { 561 | printf("Info: thread %ld wait timed out\n", (u_int64_t)pthread_self()); 562 | timeout = true; 563 | break; 564 | } 565 | } 566 | 567 | pool->idle--; 568 | // when the thread run the task, we should unlock the thread pool 569 | if (pool->first != NULL) { 570 | // get the task from task queue 571 | task_t* t = pool->first; 572 | pool->first = t->next; 573 | // unlock the thread pool to make other threads visit task queue 574 | condition_unlock(&pool->ready); 575 | 576 | // run the task run func 577 | t->run(t->arg); 578 | free(t); 579 | 580 | // lock 581 | condition_lock(&pool->ready); 582 | } 583 | 584 | // when task queue is clean and quit flag is 1, then destroy the thread 585 | if (pool->quit && pool->first == NULL) { 586 | pool->counter--; 587 | // 若线程池中线程数为0,通知等待线程(主线程)全部任务已经完成 588 | if (pool->counter == 0) { 589 | condition_signal(&pool->ready); 590 | } 591 | condition_unlock(&pool->ready); 592 | break; // destroy the thread 593 | } 594 | 595 | // if visit task queue timeout(means no task in queue), quit destory the thread 596 | if (timeout) { 597 | pool->counter--; 598 | condition_unlock(&pool->ready); 599 | break; // destroy the thread 600 | } 601 | 602 | condition_unlock(&pool->ready); 603 | } 604 | 605 | // if break, destroy the thread 606 | printf("Info: thread %ld quit\n", (u_int64_t)pthread_self()); 607 | return NULL; 608 | } 609 | 610 | /* 611 | * destroy a thread pool: 612 | * 1) awake all the idle thread 613 | * 2) wait for the running thread to finish 614 | */ 615 | void threadpool_destroy(threadpool_t *pool) { 616 | if (pool->quit) { 617 | return; 618 | } 619 | 620 | condition_lock(&pool->ready); 621 | pool->quit = 1; 622 | if (pool->counter > 0) { 623 | if (pool->idle > 0) { 624 | condition_broadcast(&pool->ready); 625 | } 626 | while (pool->counter > 0) { 627 | condition_wait(&pool->ready); 628 | } 629 | } 630 | condition_unlock(&pool->ready); 631 | condition_destroy(&pool->ready); 632 | } 633 | ``` 634 | 635 | test.cpp: 636 | 637 | ```c++ 638 | #include 639 | #include 640 | #include 641 | #include "threadpool.h" 642 | 643 | #define THREADPOOL_MAX_NUM 30 644 | 645 | void* mytask(void *arg) { 646 | printf("Info: thread %ld is working on task %d\n", (u_int64_t)pthread_self(), *reinterpret_cast(arg)); 647 | sleep(1); 648 | free(arg); 649 | return NULL; 650 | } 651 | 652 | int main(int argc, char* argv[]) { 653 | threadpool_t pool; 654 | threadpool_init(&pool, THREADPOOL_MAX_NUM); 655 | 656 | // add task to task queue 657 | for (int i=0; i < 100; i++) { 658 | int *arg = reinterpret_cast(malloc(sizeof(int))); 659 | *arg = i; 660 | threadpool_add_task(&pool, mytask, arg); 661 | } 662 | threadpool_destroy(&pool); 663 | return 0; 664 | } 665 | ``` 666 | 667 | 编译运行: 668 | 669 | ```bash 670 | $g++ -g test.cpp threadpool.cpp condition.cpp -o test -std=c++11 -lpthread 671 | $./test 672 | Info: initialize the thread pool successfully! 673 | Info: create thread, and the thread id is: 139898193295104 674 | Info: create thread, and the thread id is: 139898176509696 675 | Info: thread 139898176509696 is working on task 0 676 | Info: create thread, and the thread id is: 139898168116992 677 | Info: create thread, and the thread id is: 139898184902400 678 | Info: create thread, and the thread id is: 139898134546176 679 | Info: create thread, and the thread id is: 139898126153472 680 | Info: create thread, and the thread id is: 139898117760768 681 | Info: thread 139898117760768 is working on task 1 682 | Info: create thread, and the thread id is: 139898100975360 683 | Info: create thread, and the thread id is: 139898092582656 684 | Info: create thread, and the thread id is: 139898084189952 685 | Info: create thread, and the thread id is: 139898159724288 686 | Info: create thread, and the thread id is: 139898109368064 687 | Info: create thread, and the thread id is: 139898067404544 688 | Info: create thread, and the thread id is: 139898059011840 689 | Info: create thread, and the thread id is: 139898050619136 690 | Info: create thread, and the thread id is: 139898042226432 691 | Info: create thread, and the thread id is: 139898033833728 692 | Info: create thread, and the thread id is: 139898025441024 693 | Info: create thread, and the thread id is: 139898017048320 694 | Info: create thread, and the thread id is: 139898008655616 695 | Info: create thread, and the thread id is: 139898075797248 696 | Info: create thread, and the thread id is: 139898000262912 697 | Info: create thread, and the thread id is: 139898142938880 698 | Info: create thread, and the thread id is: 139898151331584 699 | Info: thread 139898159724288 is working on task 2 700 | Info: thread 139898151331584 is working on task 3 701 | Info: create thread, and the thread id is: 139897991870208 702 | Info: create thread, and the thread id is: 139897966692096 703 | Info: create thread, and the thread id is: 139897958299392 704 | Warning: no idle thread, please wait... 705 | Warning: no idle thread, please wait... 706 | Info: create thread, and the thread id is: 139897949906688 707 | Info: create thread, and the thread id is: 139897983477504 708 | Info: create thread, and the thread id is: 139897975084800 709 | Warning: no idle thread, please wait... 710 | Warning: no idle thread, please wait... 711 | Warning: no idle thread, please wait... 712 | Warning: no idle thread, please wait... 713 | Warning: no idle thread, please wait... 714 | Warning: no idle thread, please wait... 715 | Warning: no idle thread, please wait... 716 | Warning: no idle thread, please wait... 717 | Warning: no idle thread, please wait... 718 | Warning: no idle thread, please wait... 719 | Warning: no idle thread, please wait... 720 | Warning: no idle thread, please wait... 721 | Warning: no idle thread, please wait... 722 | Warning: no idle thread, please wait... 723 | Info: thread 139898067404544 is working on task 4 724 | Warning: no idle thread, please wait... 725 | Warning: no idle thread, please wait... 726 | Warning: no idle thread, please wait... 727 | Warning: no idle thread, please wait... 728 | Warning: no idle thread, please wait... 729 | Warning: no idle thread, please wait... 730 | Warning: no idle thread, please wait... 731 | Warning: no idle thread, please wait... 732 | Warning: no idle thread, please wait... 733 | Warning: no idle thread, please wait... 734 | Warning: no idle thread, please wait... 735 | Warning: no idle thread, please wait... 736 | Warning: no idle thread, please wait... 737 | Warning: no idle thread, please wait... 738 | Warning: no idle thread, please wait... 739 | Info: thread 139898168116992 is working on task 5 740 | Warning: no idle thread, please wait... 741 | Warning: no idle thread, please wait... 742 | Warning: no idle thread, please wait... 743 | Warning: no idle thread, please wait... 744 | Warning: no idle thread, please wait... 745 | Warning: no idle thread, please wait... 746 | Warning: no idle thread, please wait... 747 | Warning: no idle thread, please wait... 748 | Warning: no idle thread, please wait... 749 | Warning: no idle thread, please wait... 750 | Warning: no idle thread, please wait... 751 | Warning: no idle thread, please wait... 752 | Warning: no idle thread, please wait... 753 | Warning: no idle thread, please wait... 754 | Warning: no idle thread, please wait... 755 | Warning: no idle thread, please wait... 756 | Warning: no idle thread, please wait... 757 | Warning: no idle thread, please wait... 758 | Warning: no idle thread, please wait... 759 | Warning: no idle thread, please wait... 760 | Warning: no idle thread, please wait... 761 | Warning: no idle thread, please wait... 762 | Warning: no idle thread, please wait... 763 | Warning: no idle thread, please wait... 764 | Warning: no idle thread, please wait... 765 | Warning: no idle thread, please wait... 766 | Warning: no idle thread, please wait... 767 | Warning: no idle thread, please wait... 768 | Warning: no idle thread, please wait... 769 | Warning: no idle thread, please wait... 770 | Warning: no idle thread, please wait... 771 | Warning: no idle thread, please wait... 772 | Warning: no idle thread, please wait... 773 | Warning: no idle thread, please wait... 774 | Warning: no idle thread, please wait... 775 | Warning: no idle thread, please wait... 776 | Warning: no idle thread, please wait... 777 | Info: thread 139898142938880 is working on task 6 778 | Warning: no idle thread, please wait... 779 | Info: thread 139898042226432 is working on task 7 780 | Warning: no idle thread, please wait... 781 | Info: thread 139897949906688 is working on task 8 782 | Info: thread 139898184902400 is working on task 11 783 | Info: thread 139898134546176 is working on task 13 784 | Info: thread 139898017048320 is working on task 14 785 | Info: thread 139898008655616 is working on task 16 786 | Info: thread 139898193295104 is working on task 18 787 | Info: thread 139898000262912 is working on task 20 788 | Info: thread 139898100975360 is working on task 21 789 | Info: thread 139897983477504 is working on task 9 790 | Info: thread 139897975084800 is working on task 10 791 | Info: thread 139898092582656 is working on task 26 792 | Info: thread 139898050619136 is working on task 24 793 | Info: thread 139897991870208 is working on task 28 794 | Info: thread 139898025441024 is working on task 12 795 | Info: thread 139898084189952 is working on task 15 796 | Info: thread 139898109368064 is working on task 17 797 | Info: thread 139897966692096 is working on task 25 798 | Info: thread 139898075797248 is working on task 19 799 | Info: thread 139898059011840 is working on task 22 800 | Info: thread 139897958299392 is working on task 27 801 | Info: thread 139898033833728 is working on task 29 802 | Info: thread 139898126153472 is working on task 23 803 | Info: thread 139898176509696 is working on task 30 804 | Info: thread 139898117760768 is working on task 31 805 | Info: thread 139898159724288 is working on task 32 806 | Info: thread 139898151331584 is working on task 33 807 | Info: thread 139898067404544 is working on task 34 808 | Info: thread 139898168116992 is working on task 35 809 | Info: thread 139898142938880 is working on task 36 810 | Info: thread 139898042226432 is working on task 37 811 | Info: thread 139897949906688 is working on task 38 812 | Info: thread 139898184902400 is working on task 39 813 | Info: thread 139898000262912 is working on task 40 814 | Info: thread 139898017048320 is working on task 41 815 | Info: thread 139898008655616 is working on task 42 816 | Info: thread 139898134546176 is working on task 43 817 | Info: thread 139898193295104 is working on task 44 818 | Info: thread 139898050619136 is working on task 49 819 | Info: thread 139897991870208 is working on task 50 820 | Info: thread 139898025441024 is working on task 51 821 | Info: thread 139898084189952 is working on task 52 822 | Info: thread 139898109368064 is working on task 53 823 | Info: thread 139898075797248 is working on task 54 824 | Info: thread 139897975084800 is working on task 48 825 | Info: thread 139898100975360 is working on task 45 826 | Info: thread 139898033833728 is working on task 57 827 | Info: thread 139897983477504 is working on task 47 828 | Info: thread 139897966692096 is working on task 55 829 | Info: thread 139897958299392 is working on task 56 830 | Info: thread 139898092582656 is working on task 46 831 | Info: thread 139898126153472 is working on task 58 832 | Info: thread 139898059011840 is working on task 59 833 | Info: thread 139898176509696 is working on task 60 834 | Info: thread 139898117760768 is working on task 61 835 | Info: thread 139898159724288 is working on task 62 836 | Info: thread 139898151331584 is working on task 63 837 | Info: thread 139898067404544 is working on task 64 838 | Info: thread 139898168116992 is working on task 65 839 | Info: thread 139898142938880 is working on task 66 840 | Info: thread 139898042226432 is working on task 67 841 | Info: thread 139897949906688 is working on task 69 842 | Info: thread 139898184902400 is working on task 68 843 | Info: thread 139898000262912 is working on task 70 844 | Info: thread 139898008655616 is working on task 71 845 | Info: thread 139898017048320 is working on task 72 846 | Info: thread 139898050619136 is working on task 73 847 | Info: thread 139898134546176 is working on task 74 848 | Info: thread 139898109368064 is working on task 78 849 | Info: thread 139898100975360 is working on task 80 850 | Info: thread 139897975084800 is working on task 82 851 | Info: thread 139898075797248 is working on task 81 852 | Info: thread 139897966692096 is working on task 85 853 | Info: thread 139897958299392 is working on task 86 854 | Info: thread 139898126153472 is working on task 88 855 | Info: thread 139898059011840 is working on task 89 856 | Info: thread 139898092582656 is working on task 87 857 | Info: thread 139898025441024 is working on task 76 858 | Info: thread 139898084189952 is working on task 77 859 | Info: thread 139898193295104 is working on task 75 860 | Info: thread 139897991870208 is working on task 79 861 | Info: thread 139898033833728 is working on task 83 862 | Info: thread 139897983477504 is working on task 84 863 | Info: thread 139898176509696 is working on task 90 864 | Info: thread 139898117760768 is working on task 91 865 | Info: thread 139898159724288 is working on task 92 866 | Info: thread 139898151331584 is working on task 93 867 | Info: thread 139898067404544 is working on task 94 868 | Info: thread 139898168116992 is working on task 95 869 | Info: thread 139898142938880 is working on task 96 870 | Info: thread 139898042226432 is working on task 97 871 | Info: thread 139897949906688 is working on task 98 872 | Info: thread 139898184902400 is working on task 99 873 | Info: thread 139898000262912 quit 874 | Info: thread 139898008655616 quit 875 | Info: thread 139898017048320 quit 876 | Info: thread 139898050619136 quit 877 | Info: thread 139898134546176 quit 878 | Info: thread 139898109368064 quit 879 | Info: thread 139898100975360 quit 880 | Info: thread 139897966692096 quit 881 | Info: thread 139898126153472 quit 882 | Info: thread 139897975084800 quit 883 | Info: thread 139898075797248 quit 884 | Info: thread 139897958299392 quit 885 | Info: thread 139898059011840 quit 886 | Info: thread 139898092582656 quit 887 | Info: thread 139898025441024 quit 888 | Info: thread 139898084189952 quit 889 | Info: thread 139898193295104 quit 890 | Info: thread 139897991870208 quit 891 | Info: thread 139898033833728 quit 892 | Info: thread 139897983477504 quit 893 | Info: thread 139898176509696 quit 894 | Info: thread 139898117760768 quit 895 | Info: thread 139898159724288 quit 896 | Info: thread 139898151331584 quit 897 | Info: thread 139898067404544 quit 898 | Info: thread 139898168116992 quit 899 | Info: thread 139898142938880 quit 900 | Info: thread 139898042226432 quit 901 | Info: thread 139897949906688 quit 902 | Info: thread 139898184902400 quit 903 | ``` 904 | 905 | ## C++风格ThreadPool 906 | 907 | #### 1. 基于条件变量的线程池 908 | 909 | threadpool.h: 910 | 911 | ```c++ 912 | #ifndef THREAD_POOL_H 913 | #define THREAD_POOL_H 914 | 915 | #include 916 | #include 917 | #include 918 | #include 919 | #include 920 | 921 | class ThreadPool { 922 | public: 923 | typedef void *(WrokerFunc)(void* arg); 924 | 925 | struct Task { 926 | WrokerFunc* run; 927 | void* arg; 928 | }; 929 | 930 | explicit ThreadPool(int thread_num); 931 | ~ThreadPool(); 932 | void addTask(WrokerFunc* func, void* arg); 933 | 934 | private: 935 | std::queue task_queue_; 936 | std::vector thread_list_; 937 | bool is_running_; // note: is_running_不用原子变量或者锁操作可能存在卡死问题 938 | pthread_mutex_t mutex_; 939 | pthread_cond_t condition_; 940 | 941 | static void* thread_routine(void* pool_ptr); 942 | void thread_worker(); 943 | }; 944 | 945 | 946 | // =========================implementation========================= 947 | inline ThreadPool::ThreadPool(int thread_num) : is_running_(true) { 948 | pthread_mutex_init(&mutex_, NULL); 949 | pthread_cond_init(&condition_, NULL); 950 | 951 | for (int i = 0; i < thread_num; i++) { 952 | pthread_t pid; 953 | pthread_create(&pid, NULL, thread_routine, this); 954 | thread_list_.push_back(pid); 955 | } 956 | } 957 | 958 | inline ThreadPool::~ThreadPool() { 959 | pthread_mutex_lock(&mutex_); 960 | is_running_ = false; 961 | pthread_mutex_unlock(&mutex_); 962 | 963 | pthread_cond_broadcast(&condition_); // wakeup all threads that block to get task 964 | for (int i = 0; i < thread_list_.size(); i++) { 965 | pthread_join(thread_list_[i], NULL); 966 | } 967 | 968 | pthread_cond_destroy(&condition_); 969 | pthread_mutex_destroy(&mutex_); 970 | } 971 | 972 | inline void ThreadPool::addTask(WrokerFunc* func, void* arg) { 973 | Task* task = new Task(); 974 | task->run = func; 975 | task->arg = arg; 976 | 977 | pthread_mutex_lock(&mutex_); 978 | task_queue_.push(task); 979 | pthread_mutex_unlock(&mutex_); 980 | pthread_cond_signal(&condition_); 981 | } 982 | 983 | inline void* ThreadPool::thread_routine(void* pool_ptr) { 984 | ThreadPool* pool = static_cast(pool_ptr); 985 | pool->thread_worker(); 986 | } 987 | 988 | inline void ThreadPool::thread_worker() { 989 | Task* task = NULL; 990 | 991 | while (true) { 992 | pthread_mutex_lock(&mutex_); 993 | if (!is_running_) { 994 | pthread_mutex_unlock(&mutex_); 995 | break; 996 | } 997 | 998 | if (task_queue_.empty()) { 999 | pthread_cond_wait(&condition_, &mutex_); // 获取不到任务时阻塞, 直到有新的任务入队 1000 | if (task_queue_.empty()) { 1001 | pthread_mutex_unlock(&mutex_); 1002 | continue; 1003 | } 1004 | } 1005 | task = task_queue_.front(); 1006 | task_queue_.pop(); 1007 | pthread_mutex_unlock(&mutex_); 1008 | 1009 | (*(task->run))(task->arg); 1010 | delete task; 1011 | task = NULL; 1012 | } 1013 | 1014 | // 线程池终止时(is_running_ = false)确保任务队列为空后退出 1015 | while (true) { 1016 | pthread_mutex_lock(&mutex_); 1017 | if (task_queue_.empty()) { 1018 | pthread_mutex_unlock(&mutex_); 1019 | break; 1020 | } 1021 | task = task_queue_.front(); 1022 | task_queue_.pop(); 1023 | pthread_mutex_unlock(&mutex_); 1024 | delete task; 1025 | task = NULL; 1026 | } 1027 | 1028 | printf("Info: thread[%lu] exit\n", pthread_self()); 1029 | } 1030 | 1031 | 1032 | #endif // THREAD_POOL_H 1033 | ``` 1034 | 1035 | 测试代码threadpool_test.cpp: 1036 | 1037 | ```c++ 1038 | #include 1039 | #include 1040 | #include 1041 | #include 1042 | #include 1043 | #include "threadpool.h" 1044 | 1045 | void* MyTaskFunc(void* arg) { 1046 | int* i = static_cast(arg); 1047 | printf("[MyTaskFunc]: thread[%lu] is working on %d\n", pthread_self(), *i); 1048 | return NULL; 1049 | } 1050 | 1051 | int main() { 1052 | ThreadPool pool(10); 1053 | 1054 | for (int i = 0; i < 100; i++) { 1055 | int* arg = new int(i); 1056 | pool.addTask(&MyTaskFunc, arg); 1057 | } 1058 | 1059 | return 0; 1060 | } 1061 | ``` 1062 | 1063 | 编译运行: 1064 | 1065 | ```bash 1066 | $g++ -g threadpool_test.cpp -o threadpool_test -lpthread 1067 | $./threadpool_test 1068 | [MyTaskFunc]: thread[140224777099008] is working on 0 1069 | [MyTaskFunc]: thread[140224793884416] is working on 8 1070 | [MyTaskFunc]: thread[140224844240640] is working on 2 1071 | Info: thread[140224844240640] exit 1072 | [MyTaskFunc]: thread[140224827455232] is working on 4 1073 | Info: thread[140224827455232] exit 1074 | [MyTaskFunc]: thread[140224810669824] is working on 6 1075 | Info: thread[140224810669824] exit 1076 | [MyTaskFunc]: thread[140224777099008] is working on 10 1077 | Info: thread[140224777099008] exit 1078 | [MyTaskFunc]: thread[140224852633344] is working on 1 1079 | Info: thread[140224852633344] exit 1080 | [MyTaskFunc]: thread[140224835847936] is working on 3 1081 | Info: thread[140224835847936] exit 1082 | [MyTaskFunc]: thread[140224802277120] is working on 7 1083 | Info: thread[140224802277120] exit 1084 | Info: thread[140224793884416] exit 1085 | [MyTaskFunc]: thread[140224819062528] is working on 5 1086 | Info: thread[140224819062528] exit 1087 | [MyTaskFunc]: thread[140224785491712] is working on 9 1088 | Info: thread[140224785491712] exit 1089 | ``` 1090 | 1091 | #### 2. 基于信号量的同步任务队列 1092 | 1093 | 上述的线程池无法很好地支持同步任务,因此我们基于信号量实现了SyncTaskQueue。 1094 | 1095 | sync_task_queue.h: 1096 | 1097 | ```c++ 1098 | #ifndef SYNC_TASK_QUEUE_H 1099 | #define SYNC_TASK_QUEUE_H 1100 | 1101 | #include 1102 | #include 1103 | #include "threadpool.h" 1104 | 1105 | class SyncTaskQueue { 1106 | public: 1107 | struct SyncTask { 1108 | ThreadPool::Task task; 1109 | sem_t* sem; 1110 | }; 1111 | 1112 | explicit SyncTaskQueue(ThreadPool* pool_ptr); 1113 | ~SyncTaskQueue(); 1114 | void addTask(ThreadPool::WrokerFunc* func, void* sync_task_ptr); 1115 | void wait(); 1116 | 1117 | private: 1118 | ThreadPool* threadpool_; 1119 | sem_t sem_; 1120 | int sync_task_num_; // 务必保证单线程读写, 否则需要加锁 1121 | 1122 | static ThreadPool::WrokerFunc workerFuncWrapper; 1123 | }; 1124 | 1125 | inline SyncTaskQueue::SyncTaskQueue(ThreadPool* pool_ptr) : threadpool_(pool_ptr), sync_task_num_(0) { 1126 | sem_init(&sem_, 0, 0); 1127 | } 1128 | 1129 | inline SyncTaskQueue::~SyncTaskQueue() { 1130 | // make sure that all task has been finished before destory 1131 | if (sync_task_num_ > 0) { 1132 | wait(); 1133 | } 1134 | sem_destroy(&sem_); 1135 | } 1136 | 1137 | inline void SyncTaskQueue::addTask(ThreadPool::WrokerFunc* func, void* arg) { 1138 | sync_task_num_++; 1139 | 1140 | // wrapper the worker function with sem 1141 | SyncTask* sync_task = new SyncTask(); 1142 | sync_task->sem = &(this->sem_); 1143 | sync_task->task.run = func; 1144 | sync_task->task.arg = arg; 1145 | threadpool_->addTask(&workerFuncWrapper, sync_task); 1146 | } 1147 | 1148 | inline void SyncTaskQueue::wait() { 1149 | while (sync_task_num_) { 1150 | int sem_value = 0; 1151 | sem_wait(&sem_); 1152 | sync_task_num_--; 1153 | } 1154 | } 1155 | 1156 | inline void* SyncTaskQueue::workerFuncWrapper(void* sync_task_ptr) { 1157 | SyncTask* sync_task = static_cast(sync_task_ptr); 1158 | (*(sync_task->task.run))(sync_task->task.arg); 1159 | sem_post(sync_task->sem); 1160 | delete sync_task; 1161 | } 1162 | 1163 | 1164 | #endif // SYNC_TASK_QUEUE_H 1165 | ``` 1166 | 1167 | 测试文件sync_task_queue_test.cpp: 1168 | 1169 | ```c++ 1170 | #include 1171 | #include "sync_task_queue.h" 1172 | 1173 | void* MyTaskFunc(void* arg) { 1174 | int i = *static_cast(arg); 1175 | printf("[MyTaskFunc]: thread[%lu] is working on %d\n", pthread_self(), i); 1176 | sleep(2); 1177 | return NULL; 1178 | } 1179 | 1180 | int main() { 1181 | ThreadPool threadpool(20); 1182 | SyncTaskQueue sync_task_queue(&threadpool); 1183 | 1184 | for (int i = 0; i < 15; i++) { 1185 | int* arg = new int(i); 1186 | sync_task_queue.addTask(&MyTaskFunc, arg); 1187 | } 1188 | 1189 | printf("====================================wait for result===================================\n"); 1190 | sync_task_queue.wait(); 1191 | } 1192 | ``` 1193 | 1194 | 编译运行: 1195 | 1196 | ```bash 1197 | $g++ -g sync_task_queue_test.cpp -o sync_task_queue_test -lpthread 1198 | $./sync_task_queue_test 1199 | [MyTaskFunc]: thread[140349199148800] is working on 0 1200 | [MyTaskFunc]: thread[140349266290432] is working on 12 1201 | [MyTaskFunc]: thread[140349358610176] is working on 2 1202 | [MyTaskFunc]: thread[140349341824768] is working on 3 1203 | [MyTaskFunc]: thread[140349333432064] is working on 4 1204 | [MyTaskFunc]: thread[140349325039360] is working on 5 1205 | [MyTaskFunc]: thread[140349308253952] is working on 6 1206 | [MyTaskFunc]: thread[140349299861248] is working on 8 1207 | [MyTaskFunc]: thread[140349316646656] is working on 7 1208 | [MyTaskFunc]: thread[140349283075840] is working on 9 1209 | [MyTaskFunc]: thread[140349291468544] is working on 10 1210 | [MyTaskFunc]: thread[140349274683136] is working on 11 1211 | [MyTaskFunc]: thread[140349257897728] is working on 13 1212 | [MyTaskFunc]: thread[140349249505024] is working on 14 1213 | [MyTaskFunc]: thread[140349350217472] is working on 1 1214 | ====================================wait for result=================================== 1215 | Info: thread[140349232719616] exit 1216 | Info: thread[140349241112320] exit 1217 | Info: thread[140349266290432] exit 1218 | Info: thread[140349207541504] exit 1219 | Info: thread[140349299861248] exit 1220 | Info: thread[140349283075840] exit 1221 | Info: thread[140349291468544] exit 1222 | Info: thread[140349249505024] exit 1223 | Info: thread[140349274683136] exit 1224 | Info: thread[140349333432064] exit 1225 | Info: thread[140349325039360] exit 1226 | Info: thread[140349308253952] exit 1227 | Info: thread[140349224326912] exit 1228 | Info: thread[140349316646656] exit 1229 | Info: thread[140349199148800] exit 1230 | Info: thread[140349350217472] exit 1231 | Info: thread[140349257897728] exit 1232 | Info: thread[140349215934208] exit 1233 | Info: thread[140349358610176] exit 1234 | Info: thread[140349341824768] exit 1235 | ``` 1236 | 1237 | ## C++11特性的ThreadPool 1238 | 1239 | 传统C++线程池仅能接受特殊的Task(执行函数需要满足特殊的格式),使用C++11特性的线程池可以更好地支持任意类型参数的Task。 1240 | 1241 | #### 1. 代码 1242 | 1243 | threadpool.h: 1244 | 1245 | > 参考自: 1246 | > 1247 | > 另外一种实现: 1248 | 1249 | ```c++ 1250 | #ifndef THREAD_POOL_H 1251 | #define THREAD_POOL_H 1252 | 1253 | #include 1254 | #include 1255 | #include 1256 | #include 1257 | #include 1258 | #include 1259 | #include 1260 | #include 1261 | #include 1262 | #include 1263 | 1264 | class ThreadPool { 1265 | public: 1266 | explicit ThreadPool(size_t); 1267 | template 1268 | auto enqueue(F&& f, Args&&... args) 1269 | -> std::future::type>; 1270 | ~ThreadPool(); 1271 | 1272 | private: 1273 | // need to keep track of threads so we can join them 1274 | std::vector workers_; 1275 | // the task queue 1276 | std::queue> tasks_; 1277 | 1278 | // synchronization 1279 | std::mutex queue_mutex_; 1280 | std::condition_variable condition_; 1281 | bool stop_; 1282 | }; 1283 | 1284 | // the constructor just launches some amount of workers 1285 | inline ThreadPool::ThreadPool(size_t threads) : stop_(false) { 1286 | for (size_t i = 0; i < threads; ++i) { 1287 | workers_.emplace_back( 1288 | [this] { 1289 | for (;;) { 1290 | std::function task; 1291 | { 1292 | std::unique_lock lock(this->queue_mutex_); 1293 | this->condition_.wait(lock, 1294 | [this] { return this->stop_ || !this->tasks_.empty(); }); 1295 | if (this->stop_ && this->tasks_.empty()) { 1296 | return; 1297 | } 1298 | task = std::move(this->tasks_.front()); 1299 | this->tasks_.pop(); 1300 | } 1301 | task(); 1302 | } 1303 | }); 1304 | } 1305 | } 1306 | 1307 | // add new work item to the pool 1308 | template 1309 | auto ThreadPool::enqueue(F&& f, Args&&... args) 1310 | -> std::future::type> { 1311 | using return_type = typename std::result_of::type; 1312 | 1313 | auto task = std::make_shared< std::packaged_task> ( 1314 | std::bind(std::forward(f), std::forward(args)...)); 1315 | std::future res = task->get_future(); 1316 | { 1317 | std::unique_lock lock(queue_mutex_); 1318 | 1319 | // don't allow enqueueing after stopping the pool 1320 | if (stop_) { 1321 | throw std::runtime_error("enqueue on stopped ThreadPool"); 1322 | } 1323 | 1324 | tasks_.emplace([task](){ (*task)(); }); 1325 | } 1326 | condition_.notify_one(); 1327 | return res; 1328 | } 1329 | 1330 | // the destructor joins all threads 1331 | inline ThreadPool::~ThreadPool() { 1332 | { 1333 | std::unique_lock lock(queue_mutex_); 1334 | stop_ = true; 1335 | } 1336 | condition_.notify_all(); 1337 | for (std::thread &worker : workers_) { 1338 | worker.join(); 1339 | } 1340 | } 1341 | 1342 | #endif // THREAD_POOL_H 1343 | ``` 1344 | 1345 | test.cpp: 1346 | 1347 | ```c++ 1348 | #include 1349 | #include "threadpool.h" 1350 | 1351 | void mytask(int i) { 1352 | printf("Info: thread %ld is working on task %d\n", (u_int64_t)pthread_self(), i); 1353 | sleep(1); 1354 | return; 1355 | } 1356 | 1357 | int main() { 1358 | ThreadPool threadpool(20); 1359 | for (int i = 0; i < 100; ++i) { 1360 | threadpool.enqueue(mytask, i); 1361 | } 1362 | return 0; 1363 | } 1364 | ``` 1365 | 1366 | 编译运行: 1367 | 1368 | ```bash 1369 | $g++ -g test.cpp -o test -std=c++11 -lpthread 1370 | $./test 1371 | Info: thread 139679726323456 is working on task 1 1372 | Info: thread 139679709538048 is working on task 3 1373 | Info: thread 139679717930752 is working on task 2 1374 | Info: thread 139679734716160 is working on task 0 1375 | Info: thread 139679600432896 is working on task 15 1376 | Info: thread 139679684359936 is working on task 6 1377 | Info: thread 139679617218304 is working on task 14 1378 | Info: thread 139679634003712 is working on task 12 1379 | Info: thread 139679667574528 is working on task 8 1380 | Info: thread 139679625611008 is working on task 13 1381 | Info: thread 139679575254784 is working on task 19 1382 | Info: thread 139679659181824 is working on task 9 1383 | Info: thread 139679701145344 is working on task 4 1384 | Info: thread 139679692752640 is working on task 5 1385 | Info: thread 139679592040192 is working on task 17 1386 | Info: thread 139679583647488 is working on task 18 1387 | Info: thread 139679675967232 is working on task 7 1388 | Info: thread 139679642396416 is working on task 11 1389 | Info: thread 139679608825600 is working on task 16 1390 | Info: thread 139679650789120 is working on task 10 1391 | Info: thread 139679684359936 is working on task 21 1392 | Info: thread 139679617218304 is working on task 24 1393 | Info: thread 139679600432896 is working on task 28 1394 | Info: thread 139679709538048 is working on task 23 1395 | Info: thread 139679659181824 is working on task 30 1396 | Info: thread 139679692752640 is working on task 32 1397 | Info: thread 139679583647488 is working on task 34 1398 | Info: thread 139679608825600 is working on task 35 1399 | Info: thread 139679592040192 is working on task 33 1400 | Info: thread 139679634003712 is working on task 25 1401 | Info: thread 139679625611008 is working on task 29 1402 | Info: thread 139679726323456 is working on task 20 1403 | Info: thread 139679701145344 is working on task 31 1404 | Info: thread 139679650789120 is working on task 36 1405 | Info: thread 139679667574528 is working on task 27 1406 | Info: thread 139679575254784 is working on task 37 1407 | Info: thread 139679734716160 is working on task 26 1408 | Info: thread 139679675967232 is working on task 38 1409 | Info: thread 139679717930752 is working on task 22 1410 | Info: thread 139679642396416 is working on task 39 1411 | Info: thread 139679684359936 is working on task 40 1412 | Info: thread 139679692752640 is working on task 45 1413 | Info: thread 139679625611008 is working on task 51 1414 | Info: thread 139679583647488 is working on task 43 1415 | Info: thread 139679659181824 is working on task 44 1416 | Info: thread 139679575254784 is working on task 55 1417 | Info: thread 139679592040192 is working on task 47 1418 | Info: thread 139679617218304 is working on task 41 1419 | Info: thread 139679717930752 is working on task 57 1420 | Info: thread 139679726323456 is working on task 49 1421 | Info: thread 139679634003712 is working on task 50 1422 | Info: thread 139679650789120 is working on task 52 1423 | Info: thread 139679675967232 is working on task 59 1424 | Info: thread 139679667574528 is working on task 54 1425 | Info: thread 139679608825600 is working on task 46 1426 | Info: thread 139679734716160 is working on task 56 1427 | Info: thread 139679600432896 is working on task 48 1428 | Info: thread 139679642396416 is working on task 58 1429 | Info: thread 139679709538048 is working on task 42 1430 | Info: thread 139679701145344 is working on task 53 1431 | Info: thread 139679684359936 is working on task 60 1432 | Info: thread 139679625611008 is working on task 62 1433 | Info: thread 139679692752640 is working on task 61 1434 | Info: thread 139679583647488 is working on task 63 1435 | Info: thread 139679659181824 is working on task 64 1436 | Info: thread 139679575254784 is working on task 65 1437 | Info: thread 139679592040192 is working on task 66 1438 | Info: thread 139679617218304 is working on task 67 1439 | Info: thread 139679717930752 is working on task 68 1440 | Info: thread 139679726323456 is working on task 69 1441 | Info: thread 139679650789120 is working on task 71 1442 | Info: thread 139679634003712 is working on task 70 1443 | Info: thread 139679675967232 is working on task 72 1444 | Info: thread 139679667574528 is working on task 73 1445 | Info: thread 139679608825600 is working on task 74 1446 | Info: thread 139679734716160 is working on task 75 1447 | Info: thread 139679642396416 is working on task 77 1448 | Info: thread 139679709538048 is working on task 78 1449 | Info: thread 139679701145344 is working on task 79 1450 | Info: thread 139679600432896 is working on task 76 1451 | Info: thread 139679684359936 is working on task 80 1452 | Info: thread 139679625611008 is working on task 81 1453 | Info: thread 139679583647488 is working on task 83 1454 | Info: thread 139679692752640 is working on task 82 1455 | Info: thread 139679659181824 is working on task 84 1456 | Info: thread 139679575254784 is working on task 85 1457 | Info: thread 139679717930752 is working on task 88 1458 | Info: thread 139679617218304 is working on task 87 1459 | Info: thread 139679592040192 is working on task 86 1460 | Info: thread 139679634003712 is working on task 91 1461 | Info: thread 139679650789120 is working on task 90 1462 | Info: thread 139679667574528 is working on task 93 1463 | Info: thread 139679675967232 is working on task 92 1464 | Info: thread 139679608825600 is working on task 94 1465 | Info: thread 139679726323456 is working on task 89 1466 | Info: thread 139679734716160 is working on task 95 1467 | Info: thread 139679709538048 is working on task 96 1468 | Info: thread 139679642396416 is working on task 97 1469 | Info: thread 139679701145344 is working on task 98 1470 | Info: thread 139679600432896 is working on task 99 1471 | ``` 1472 | 1473 | #### 2. 使用方式 1474 | 1475 | ###### 2.1 全局线程池 + 异步任务 1476 | 1477 | 创建一个ThreadPool的全局变量,将所有需要异步执行的任务丢到该线程池中即可: 1478 | 1479 | ```c++ 1480 | #include 1481 | #include "threadpool.h" 1482 | 1483 | // 全局异步线程池 1484 | ThreadPool g_threadpool2(20); 1485 | 1486 | int main() { 1487 | // 执行异步任务 1488 | g_threadpool2.enqueue( 1489 | [] { 1490 | sleep(1); 1491 | printf("async task done\n"); 1492 | }); 1493 | return 0; 1494 | } 1495 | ``` 1496 | 1497 | 编译运行: 1498 | 1499 | ```bash 1500 | $g++ -g test.cpp -o test -std=c++11 -lpthread 1501 | $./test 1502 | async task done 1503 | ``` 1504 | 1505 | ###### 2.2 全局线程池 + 同步任务 1506 | 1507 | 创建一个ThreadPool的全局变量并添加同步任务,通过`std::future`的`wait()`方法阻塞等待同步结果,也可以使用`get()`方法获取到函数返回值。 1508 | 1509 | ```c++ 1510 | #include 1511 | #include 1512 | #include "threadpool.h" 1513 | 1514 | // 全局异步线程池 1515 | ThreadPool g_threadpool2(20); 1516 | 1517 | int main() { 1518 | // 创建同步任务 1519 | auto res = g_threadpool2.enqueue( 1520 | [] { 1521 | sleep(1); 1522 | printf("sync task done\n"); 1523 | }); 1524 | 1525 | // 阻塞等待同步结果 1526 | res.wait(); 1527 | 1528 | return 0; 1529 | } 1530 | ``` 1531 | 1532 | 编译运行: 1533 | 1534 | ```bash 1535 | $g++ -g test.cpp -o test -std=c++11 -lpthread 1536 | $./test 1537 | sync task done 1538 | ``` 1539 | 1540 | ###### 2.3 局部线程池实现并发同步 1541 | 1542 | 创建一个临时ThreadPool,利用其析构函数完成并发同步任务: 1543 | 1544 | > 需要注意的是,这种用法已经脱离了线程池的初衷(避免处理短时间任务时创建与销毁线程的代价),它的主要用途是实现「多线程并发」,常用于并发多个IO请求并等待同步结果。 1545 | 1546 | 考虑这个场景:代码中仅在某种特殊场景(极少触发)下需要并发请求多个http链接,一方面我们不希望这些请求影响到进程的业务线程池,另一方面我们又不想单独为这个场景创建一个全局线程池使其大部分时间都在空跑。 1547 | 1548 | 2.3这种用法解决了我们「临时创建线程+执行并行任务+销毁线程」的局部并发问题,避免我们直接在用户代码处直接创建线程。 1549 | 1550 | ```bash 1551 | #include 1552 | #include 1553 | #include "threadpool.h" 1554 | 1555 | 1556 | int main() { 1557 | // 创建并发度为5的局部线程池 1558 | std::shared_ptr threadpool = std::make_shared(5); 1559 | 1560 | // 创建30个异步任务 1561 | for (int i = 0; i < 30; i++) { 1562 | threadpool->enqueue( 1563 | [i] { 1564 | sleep(1); 1565 | printf("Info: thread %ld is working on task %d\n", (u_int64_t)pthread_self(), i); 1566 | }); 1567 | } 1568 | 1569 | // 阻塞直至获取同步结果 1570 | threadpool.reset(); 1571 | 1572 | return 0; 1573 | } 1574 | ``` 1575 | 1576 | 编译运行: 1577 | 1578 | ```bash 1579 | $g++ -g test.cpp -o test -std=c++11 -lpthread 1580 | $./test 1581 | Info: thread 139811129124608 is working on task 4 1582 | Info: thread 139811145910016 is working on task 2 1583 | Info: thread 139811137517312 is working on task 3 1584 | Info: thread 139811162695424 is working on task 0 1585 | Info: thread 139811154302720 is working on task 1 1586 | Info: thread 139811129124608 is working on task 5 1587 | Info: thread 139811137517312 is working on task 7 1588 | Info: thread 139811145910016 is working on task 6 1589 | Info: thread 139811162695424 is working on task 8 1590 | Info: thread 139811154302720 is working on task 9 1591 | Info: thread 139811129124608 is working on task 10 1592 | Info: thread 139811137517312 is working on task 11 1593 | Info: thread 139811162695424 is working on task 13 1594 | Info: thread 139811154302720 is working on task 14 1595 | Info: thread 139811145910016 is working on task 12 1596 | Info: thread 139811129124608 is working on task 15 1597 | Info: thread 139811137517312 is working on task 18 1598 | Info: thread 139811145910016 is working on task 19 1599 | Info: thread 139811162695424 is working on task 16 1600 | Info: thread 139811154302720 is working on task 17 1601 | Info: thread 139811129124608 is working on task 21 1602 | Info: thread 139811162695424 is working on task 23 1603 | Info: thread 139811154302720 is working on task 24 1604 | Info: thread 139811145910016 is working on task 22 1605 | Info: thread 139811137517312 is working on task 20 1606 | Info: thread 139811162695424 is working on task 25 1607 | Info: thread 139811154302720 is working on task 26 1608 | Info: thread 139811129124608 is working on task 27 1609 | Info: thread 139811137517312 is working on task 29 1610 | Info: thread 139811145910016 is working on task 28 1611 | ``` 1612 | 1613 | ## Reference 1614 | 1615 | [1] 1616 | 1617 | [2] 1618 | 1619 | [3] 1620 | 1621 | [4] 1622 | -------------------------------------------------------------------------------- /c_style_threadpool/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | mkdir -p output 3 | g++ -g test.cpp threadpool.cpp condition.cpp -o output/test -std=c++11 -lpthread 4 | 5 | run: 6 | ./output/test 7 | 8 | clean: 9 | rm -rf ./output 10 | -------------------------------------------------------------------------------- /c_style_threadpool/condition.cpp: -------------------------------------------------------------------------------- 1 | #include "condition.h" 2 | 3 | // 初始化 4 | int condition_init(condition_t* cond) { 5 | int status; 6 | status = pthread_mutex_init(&cond->pmutex, NULL); 7 | if (status != 0) { 8 | printf("Error: pthread_mutex_init failed, return value:%d\n", status); 9 | return status; 10 | } 11 | status = pthread_cond_init(&cond->pcond, NULL); 12 | if (status != 0) { 13 | printf("Error: pthread_cond_init failed, return value:%d\n", status); 14 | return status; 15 | } 16 | return 0; 17 | } 18 | 19 | // 加锁 20 | int condition_lock(condition_t* cond) { 21 | return pthread_mutex_lock(&cond->pmutex); 22 | } 23 | 24 | // 解锁 25 | int condition_unlock(condition_t* cond) { 26 | return pthread_mutex_unlock(&cond->pmutex); 27 | } 28 | 29 | // 条件等待 30 | int condition_wait(condition_t* cond) { 31 | return pthread_cond_wait(&cond->pcond, &cond->pmutex); 32 | } 33 | 34 | // 计时等待 35 | int condition_timedwait(condition_t* cond, const timespec* abstime) { 36 | return pthread_cond_timedwait(&cond->pcond, &cond->pmutex, abstime); 37 | } 38 | 39 | // 激活一个等待该条件的线程 40 | int condition_signal(condition_t* cond) { 41 | return pthread_cond_signal(&cond->pcond); 42 | } 43 | 44 | // 唤醒所有等待线程 45 | int condition_broadcast(condition_t* cond) { 46 | return pthread_cond_broadcast(&cond->pcond); 47 | } 48 | 49 | // 销毁 50 | int condition_destroy(condition_t* cond) { 51 | int status; 52 | status = pthread_mutex_destroy(&cond->pmutex); 53 | if (status != 0) { 54 | return status; 55 | } 56 | 57 | status = pthread_cond_destroy(&cond->pcond); 58 | if (status != 0) { 59 | return status; 60 | } 61 | return 0; 62 | } 63 | -------------------------------------------------------------------------------- /c_style_threadpool/condition.h: -------------------------------------------------------------------------------- 1 | #ifndef C_STYLE_THREADPOOL_CONDITION_H_ 2 | #define C_STYLE_THREADPOOL_CONDITION_H_ 3 | 4 | #include 5 | 6 | #include 7 | 8 | typedef struct condition { 9 | /** 10 | * 互斥锁 11 | */ 12 | pthread_mutex_t pmutex; 13 | /** 14 | * 条件变量 15 | */ 16 | pthread_cond_t pcond; 17 | } condition_t; 18 | 19 | /** 20 | * 初始化 21 | */ 22 | int condition_init(condition_t* cond); 23 | 24 | /** 25 | * 加锁 26 | */ 27 | int condition_lock(condition_t* cond); 28 | /** 29 | * 解锁 30 | */ 31 | int condition_unlock(condition_t* cond); 32 | 33 | /** 34 | * 条件等待 35 | * 36 | * pthread_cond_wait(cond, mutex) 的功能有3个: 37 | * 1) 调用者线程首先释放 mutex 38 | * 2) 然后阻塞, 等待被别的线程唤醒 39 | * 3) 当调用者线程被唤醒后,调用者线程会再次获取 mutex 40 | */ 41 | int condition_wait(condition_t* cond); 42 | 43 | /** 44 | * 计时等待 45 | */ 46 | int condition_timedwait(condition_t* cond, const timespec* abstime); 47 | 48 | /** 49 | * 激活一个等待该条件的线程 50 | * 51 | * 1) 作用: 发送一个信号给另外一个处于阻塞等待状态的线程, 使其脱离阻塞状态继续执行 52 | * 2) 如果没有线程处在阻塞状态, 那么 pthread_cond_signal 也会成功返回, 所以需要判断下 idle thread 的数量 53 | * 3) 最多只会给一个线程发信号,不会有「惊群现象」 54 | * 4) 首先根据线程优先级的高低确定发送给哪个线程信号, 如果优先级相同则优先发给等待最久的线程 55 | * 5) 重点: pthread_cond_wait 必须放在 lock 和 unlock 之间, 因为他要根据共享变量的状态决定是否要等待; 56 | * 但是 pthread_cond_signal 既可以放在 lock 和 unlock 之间,也可以放在 lock 和 unlock 之后 57 | */ 58 | int condition_signal(condition_t* cond); 59 | 60 | /** 61 | * 唤醒所有等待线程 62 | */ 63 | int condition_broadcast(condition_t* cond); 64 | 65 | /** 66 | * 销毁 67 | */ 68 | int condition_destroy(condition_t* cond); 69 | 70 | #endif // C_STYLE_THREADPOOL_CONDITION_H_ 71 | -------------------------------------------------------------------------------- /c_style_threadpool/test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "threadpool.h" 6 | 7 | #define THREADPOOL_MAX_NUM 30 8 | 9 | void* MyTaskFunc(void* arg) { 10 | printf("Info: thread %ld is working on task %d\n", (u_int64_t)pthread_self(), *reinterpret_cast(arg)); 11 | sleep(1); 12 | free(arg); 13 | return NULL; 14 | } 15 | 16 | int main(int argc, char* argv[]) { 17 | threadpool_t pool; 18 | threadpool_init(&pool, THREADPOOL_MAX_NUM); 19 | 20 | // add task to task queue 21 | for (int i = 0; i < 100; i++) { 22 | int* arg = reinterpret_cast(malloc(sizeof(int))); 23 | *arg = i; 24 | threadpool_add_task(&pool, MyTaskFunc, arg); 25 | } 26 | threadpool_destroy(&pool); 27 | return 0; 28 | } 29 | -------------------------------------------------------------------------------- /c_style_threadpool/threadpool.cpp: -------------------------------------------------------------------------------- 1 | #include "threadpool.h" 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | void* thread_routine(void* arg); 11 | 12 | // initialize the thread pool 13 | void threadpool_init(threadpool_t* pool, int threads_num) { 14 | int n_status = condition_init(&pool->ready); 15 | if (n_status == 0) { 16 | printf("Info: initialize the thread pool successfully!\n"); 17 | } else { 18 | printf("Error: initialize the thread pool failed, status:%d\n", n_status); 19 | } 20 | pool->first = NULL; 21 | pool->last = NULL; 22 | pool->counter = 0; 23 | pool->idle = 0; 24 | pool->max_threads = threads_num; 25 | pool->quit = 0; 26 | } 27 | 28 | // add a task to thread pool 29 | void threadpool_add_task(threadpool_t* pool, void* (*run)(void* arg), void* arg) { 30 | // create a task 31 | task_t* new_task = reinterpret_cast(malloc(sizeof(task_t))); 32 | new_task->run = run; 33 | new_task->arg = arg; 34 | new_task->next = NULL; 35 | 36 | // lock the condition 37 | condition_lock(&pool->ready); 38 | 39 | // add the task to task queue 40 | if (pool->first == NULL) { 41 | pool->first = new_task; 42 | } else { // else add to the last task 43 | pool->last->next = new_task; 44 | } 45 | pool->last = new_task; 46 | 47 | /* 48 | * after you add a task to task queue, you need to allocate it to a thread: 49 | * 1) if idle thread num > 0: awake a idle thread 50 | * 2) if idle thread num = 0 & thread num does not reach maximum: create a new thread to run the task 51 | */ 52 | if (pool->idle > 0) { 53 | // awake a thread that wait for longest time 54 | condition_signal(&pool->ready); 55 | } else if (pool->counter < pool->max_threads) { 56 | // define a tid to get the thread identifier that we are going to create 57 | pthread_t tid; 58 | /* 59 | * pthread_create(): 60 | * 1) thread identifier 61 | * 2) set the pthread attribute 62 | * 3) the function that thread is going to run 63 | * 4) the args of run func 64 | * 65 | * A realistic limit of thread num is 200 to 400 threads 66 | * https://www.ibm.com/support/knowledgecenter/en/SSLTBW_2.3.0/com.ibm.zos.v2r3.bpxbd00/ptcrea.htm 67 | */ 68 | pthread_create(&tid, NULL, thread_routine, pool); 69 | pool->counter++; 70 | } else { // when (idle == 0 & counter = max_threads), then wait 71 | printf("Warning: no idle thread, please wait...\n"); 72 | } 73 | 74 | condition_unlock(&pool->ready); 75 | } 76 | 77 | // create a thread to run the task run func 78 | // and the void *arg means the arg passed by pthread_create: pool 79 | void* thread_routine(void* arg) { 80 | struct timespec abs_name; 81 | bool timeout; 82 | printf("Info: create thread, and the thread id is: %ld\n", (u_int64_t)pthread_self()); 83 | threadpool_t* pool = reinterpret_cast(arg); 84 | 85 | // keep visiting the task queue 86 | while (true) { 87 | timeout = false; 88 | condition_lock(&pool->ready); 89 | pool->idle++; 90 | 91 | // when task queue is empty, then block 2 second to get the new task 92 | // If timeout, then destroy the thread 93 | while (pool->first == NULL && !pool->quit) { 94 | printf("Info: thread %ld is waiting for a task\n", (u_int64_t)pthread_self()); 95 | // get the system time 96 | clock_gettime(CLOCK_REALTIME, &abs_name); 97 | abs_name.tv_sec += 2; 98 | int status; 99 | status = condition_timedwait(&pool->ready, &abs_name); // block for 2 second 100 | if (status == ETIMEDOUT) { 101 | printf("Info: thread %ld wait timed out\n", (u_int64_t)pthread_self()); 102 | timeout = true; 103 | break; 104 | } 105 | } 106 | 107 | pool->idle--; 108 | // when the thread run the task, we should unlock the thread pool 109 | if (pool->first != NULL) { 110 | // get the task from task queue 111 | task_t* t = pool->first; 112 | pool->first = t->next; 113 | // unlock the thread pool to make other threads visit task queue 114 | condition_unlock(&pool->ready); 115 | 116 | // run the task run func 117 | t->run(t->arg); 118 | free(t); 119 | 120 | // lock 121 | condition_lock(&pool->ready); 122 | } 123 | 124 | // when task queue is clean and quit flag is 1, then destroy the thread 125 | if (pool->quit && pool->first == NULL) { 126 | pool->counter--; 127 | // 若线程池中线程数为 0,通知等待线程(主线程)全部任务已经完成 128 | if (pool->counter == 0) { 129 | condition_signal(&pool->ready); 130 | } 131 | condition_unlock(&pool->ready); 132 | break; // destroy the thread 133 | } 134 | 135 | // if visit task queue timeout(means no task in queue), quit destory the thread 136 | if (timeout) { 137 | pool->counter--; 138 | condition_unlock(&pool->ready); 139 | break; // destroy the thread 140 | } 141 | 142 | condition_unlock(&pool->ready); 143 | } 144 | 145 | // if break, destroy the thread 146 | printf("Info: thread %ld quit\n", (u_int64_t)pthread_self()); 147 | return NULL; 148 | } 149 | 150 | /* 151 | * destroy a thread pool: 152 | * 1) awake all the idle thread 153 | * 2) wait for the running thread to finish 154 | */ 155 | void threadpool_destroy(threadpool_t* pool) { 156 | if (pool->quit) { 157 | return; 158 | } 159 | 160 | condition_lock(&pool->ready); 161 | pool->quit = 1; 162 | if (pool->counter > 0) { 163 | if (pool->idle > 0) { 164 | condition_broadcast(&pool->ready); 165 | } 166 | while (pool->counter > 0) { 167 | condition_wait(&pool->ready); 168 | } 169 | } 170 | condition_unlock(&pool->ready); 171 | condition_destroy(&pool->ready); 172 | } 173 | -------------------------------------------------------------------------------- /c_style_threadpool/threadpool.h: -------------------------------------------------------------------------------- 1 | #ifndef C_STYLE_THREADPOOL_THREADPOOL_H_ 2 | #define C_STYLE_THREADPOOL_THREADPOOL_H_ 3 | 4 | #include "condition.h" 5 | 6 | typedef struct task { 7 | void* (*run)(void* args); // abstract a job function that need to run 8 | void* arg; // argument of the run function 9 | struct task* next; // point to the next task in task queue 10 | } task_t; 11 | 12 | typedef struct threadpool { 13 | condition_t ready; // condition & mutex 14 | task_t* first; // fist task in task queue 15 | task_t* last; // last task in task queue 16 | int counter; // total task number 17 | int idle; // idle task number 18 | int max_threads; // max task number 19 | int quit; // the quit flag 20 | } threadpool_t; 21 | 22 | /** 23 | * initialize threadpool 24 | */ 25 | void threadpool_init(threadpool_t* pool, int threads_num); 26 | 27 | /** 28 | * add a task to threadpool 29 | */ 30 | void threadpool_add_task(threadpool_t* pool, void* (*run)(void* args), void* arg); 31 | 32 | /** 33 | * destroy threadpool 34 | */ 35 | void threadpool_destroy(threadpool_t* pool); 36 | 37 | #endif // C_STYLE_THREADPOOL_THREADPOOL_H_ 38 | -------------------------------------------------------------------------------- /cpp11_threadpool/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | mkdir -p output 3 | g++ -g test.cpp -o output/test -std=c++11 -lpthread 4 | 5 | run: 6 | ./output/test 7 | 8 | clean: 9 | rm -rf ./output 10 | -------------------------------------------------------------------------------- /cpp11_threadpool/test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "threadpool.h" 4 | 5 | void MyTaskFunc(int i) { 6 | printf("Info: thread %ld is working on task %d\n", (u_int64_t)pthread_self(), i); 7 | sleep(1); 8 | return; 9 | } 10 | 11 | int main() { 12 | ThreadPool threadpool(20); 13 | for (int i = 0; i < 100; ++i) { 14 | threadpool.enqueue(MyTaskFunc, i); 15 | } 16 | return 0; 17 | } 18 | -------------------------------------------------------------------------------- /cpp11_threadpool/threadpool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | class ThreadPool { 15 | public: 16 | explicit ThreadPool(size_t); 17 | template 18 | auto enqueue(F&& f, Args&&... args) -> std::future::type>; 19 | ~ThreadPool(); 20 | 21 | private: 22 | // need to keep track of threads so we can join them 23 | std::vector workers_; 24 | // the task queue 25 | std::queue> tasks_; 26 | 27 | // synchronization 28 | std::mutex queue_mutex_; 29 | std::condition_variable condition_; 30 | bool stop_; 31 | }; 32 | 33 | // the constructor just launches some amount of workers 34 | inline ThreadPool::ThreadPool(size_t threads) : stop_(false) { 35 | for (size_t i = 0; i < threads; ++i) { 36 | workers_.emplace_back([this] { 37 | for (;;) { 38 | std::function task; 39 | { 40 | std::unique_lock lock(this->queue_mutex_); 41 | this->condition_.wait(lock, [this] { 42 | return this->stop_ || !this->tasks_.empty(); 43 | }); 44 | if (this->stop_ && this->tasks_.empty()) { 45 | return; 46 | } 47 | task = std::move(this->tasks_.front()); 48 | this->tasks_.pop(); 49 | } 50 | task(); 51 | } 52 | }); 53 | } 54 | } 55 | 56 | // add new work item to the pool 57 | template 58 | auto ThreadPool::enqueue(F&& f, Args&&... args) -> std::future::type> { 59 | using return_type = typename std::result_of::type; 60 | 61 | auto task = 62 | std::make_shared>(std::bind(std::forward(f), std::forward(args)...)); 63 | std::future res = task->get_future(); 64 | { 65 | std::unique_lock lock(queue_mutex_); 66 | 67 | // don't allow enqueueing after stopping the pool 68 | if (stop_) { 69 | throw std::runtime_error("enqueue on stopped ThreadPool"); 70 | } 71 | 72 | tasks_.emplace([task]() { 73 | (*task)(); 74 | }); 75 | } 76 | condition_.notify_one(); 77 | return res; 78 | } 79 | 80 | // the destructor joins all threads 81 | inline ThreadPool::~ThreadPool() { 82 | { 83 | std::unique_lock lock(queue_mutex_); 84 | stop_ = true; 85 | } 86 | condition_.notify_all(); 87 | for (std::thread& worker : workers_) { 88 | if (worker.joinable()) { 89 | worker.join(); 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /cpp_threadpool/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | mkdir -p output 3 | g++ -g threadpool_test.cpp -o output/threadpool_test -lpthread 4 | g++ -g sync_task_queue_test.cpp -o output/sync_task_queue_test -lpthread 5 | 6 | run1: 7 | ./output/threadpool_test 8 | 9 | run2: 10 | ./output/sync_task_queue_test 11 | 12 | clean: 13 | rm -rf ./output 14 | -------------------------------------------------------------------------------- /cpp_threadpool/sync_task_queue.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include "threadpool.h" 8 | 9 | class SyncTaskQueue { 10 | public: 11 | struct SyncTask { 12 | ThreadPool::Task task; 13 | sem_t* sem; 14 | }; 15 | 16 | explicit SyncTaskQueue(ThreadPool* pool_ptr); 17 | ~SyncTaskQueue(); 18 | void addTask(ThreadPool::WorkerFunc* func, void* sync_task_ptr); 19 | void wait(); 20 | 21 | private: 22 | ThreadPool* threadpool_; 23 | sem_t sem_; 24 | int sync_task_num_; // 务必保证单线程读写, 否则需要加锁 25 | 26 | static ThreadPool::WorkerFunc workerFuncWrapper; 27 | }; 28 | 29 | inline SyncTaskQueue::SyncTaskQueue(ThreadPool* pool_ptr) : threadpool_(pool_ptr), sync_task_num_(0) { 30 | sem_init(&sem_, 0, 0); 31 | } 32 | 33 | inline SyncTaskQueue::~SyncTaskQueue() { 34 | // make sure that all task has been finished before destory 35 | if (sync_task_num_ > 0) { 36 | wait(); 37 | } 38 | sem_destroy(&sem_); 39 | } 40 | 41 | inline void SyncTaskQueue::addTask(ThreadPool::WorkerFunc* func, void* arg) { 42 | sync_task_num_++; 43 | 44 | // wrapper the worker function with sem 45 | SyncTask* sync_task = new SyncTask(); 46 | sync_task->sem = &(this->sem_); 47 | sync_task->task.run = func; 48 | sync_task->task.arg = arg; 49 | threadpool_->addTask(&workerFuncWrapper, sync_task); 50 | } 51 | 52 | inline void SyncTaskQueue::wait() { 53 | while (sync_task_num_) { 54 | int sem_value = 0; 55 | sem_wait(&sem_); 56 | sync_task_num_--; 57 | } 58 | } 59 | 60 | inline void* SyncTaskQueue::workerFuncWrapper(void* sync_task_ptr) { 61 | SyncTask* sync_task = static_cast(sync_task_ptr); 62 | (*(sync_task->task.run))(sync_task->task.arg); 63 | sem_post(sync_task->sem); 64 | delete sync_task; 65 | return 0; 66 | } 67 | -------------------------------------------------------------------------------- /cpp_threadpool/sync_task_queue_test.cpp: -------------------------------------------------------------------------------- 1 | #include "sync_task_queue.h" 2 | 3 | #include 4 | 5 | void* MyTaskFunc(void* arg) { 6 | int i = *static_cast(arg); 7 | printf("[MyTaskFunc]: thread[%lu] is working on %d\n", pthread_self(), i); 8 | sleep(2); 9 | return NULL; 10 | } 11 | 12 | int main() { 13 | ThreadPool threadpool(20); 14 | SyncTaskQueue sync_task_queue(&threadpool); 15 | 16 | for (int i = 0; i < 15; i++) { 17 | int* arg = new int(i); 18 | sync_task_queue.addTask(&MyTaskFunc, arg); 19 | } 20 | 21 | printf("====================================wait for result===================================\n"); 22 | sync_task_queue.wait(); 23 | } 24 | -------------------------------------------------------------------------------- /cpp_threadpool/threadpool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | class ThreadPool { 11 | public: 12 | typedef void*(WorkerFunc)(void* arg); 13 | 14 | struct Task { 15 | WorkerFunc* run; 16 | void* arg; 17 | }; 18 | 19 | explicit ThreadPool(int thread_num); 20 | ~ThreadPool(); 21 | void addTask(WorkerFunc* func, void* arg); 22 | 23 | private: 24 | std::queue task_queue_; 25 | std::vector thread_list_; 26 | bool is_running_; // note: is_running_不用原子变量或者锁操作可能存在卡死问题 27 | pthread_mutex_t mutex_; 28 | pthread_cond_t condition_; 29 | 30 | static void* thread_routine(void* pool_ptr); 31 | void thread_worker(); 32 | }; 33 | 34 | // =========================implementation========================= 35 | inline ThreadPool::ThreadPool(int thread_num) : is_running_(true) { 36 | pthread_mutex_init(&mutex_, NULL); 37 | pthread_cond_init(&condition_, NULL); 38 | 39 | for (int i = 0; i < thread_num; i++) { 40 | pthread_t pid; 41 | pthread_create(&pid, NULL, thread_routine, this); 42 | thread_list_.push_back(pid); 43 | } 44 | } 45 | 46 | inline ThreadPool::~ThreadPool() { 47 | pthread_mutex_lock(&mutex_); 48 | is_running_ = false; 49 | pthread_mutex_unlock(&mutex_); 50 | 51 | pthread_cond_broadcast(&condition_); // wakeup all threads that block to get task 52 | for (int i = 0; i < thread_list_.size(); i++) { 53 | pthread_join(thread_list_[i], NULL); 54 | } 55 | 56 | pthread_cond_destroy(&condition_); 57 | pthread_mutex_destroy(&mutex_); 58 | } 59 | 60 | inline void ThreadPool::addTask(WorkerFunc* func, void* arg) { 61 | Task* task = new Task(); 62 | task->run = func; 63 | task->arg = arg; 64 | 65 | pthread_mutex_lock(&mutex_); 66 | task_queue_.push(task); 67 | pthread_mutex_unlock(&mutex_); 68 | pthread_cond_signal(&condition_); 69 | } 70 | 71 | inline void* ThreadPool::thread_routine(void* pool_ptr) { 72 | ThreadPool* pool = static_cast(pool_ptr); 73 | pool->thread_worker(); 74 | return 0; 75 | } 76 | 77 | inline void ThreadPool::thread_worker() { 78 | Task* task = NULL; 79 | 80 | while (true) { 81 | pthread_mutex_lock(&mutex_); 82 | if (!is_running_) { 83 | pthread_mutex_unlock(&mutex_); 84 | break; 85 | } 86 | 87 | if (task_queue_.empty()) { 88 | pthread_cond_wait(&condition_, &mutex_); // 获取不到任务时阻塞, 直到有新的任务入队 89 | if (task_queue_.empty()) { 90 | pthread_mutex_unlock(&mutex_); 91 | continue; 92 | } 93 | } 94 | task = task_queue_.front(); 95 | task_queue_.pop(); 96 | pthread_mutex_unlock(&mutex_); 97 | 98 | (*(task->run))(task->arg); 99 | delete task; 100 | task = NULL; 101 | } 102 | 103 | // 线程池终止时(is_running_ = false)确保任务队列为空后退出 104 | while (true) { 105 | pthread_mutex_lock(&mutex_); 106 | if (task_queue_.empty()) { 107 | pthread_mutex_unlock(&mutex_); 108 | break; 109 | } 110 | task = task_queue_.front(); 111 | task_queue_.pop(); 112 | pthread_mutex_unlock(&mutex_); 113 | delete task; 114 | task = NULL; 115 | } 116 | 117 | printf("Info: thread[%lu] exit\n", pthread_self()); 118 | } 119 | -------------------------------------------------------------------------------- /cpp_threadpool/threadpool_test.cpp: -------------------------------------------------------------------------------- 1 | #include "threadpool.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | void* MyTaskFunc(void* arg) { 11 | int* i = static_cast(arg); 12 | printf("[MyTaskFunc]: thread[%lu] is working on %d\n", pthread_self(), *i); 13 | return NULL; 14 | } 15 | 16 | int main() { 17 | ThreadPool pool(10); 18 | 19 | for (int i = 0; i < 100; i++) { 20 | int* arg = new int(i); 21 | pool.addTask(&MyTaskFunc, arg); 22 | } 23 | 24 | return 0; 25 | } 26 | --------------------------------------------------------------------------------