├── images ├── test.png └── test2.png ├── main.cpp ├── .gitignore ├── example.cpp ├── Makefile ├── LICENSE.txt ├── .travis.yml ├── example.hpp ├── test.hpp ├── README.md └── threadpool.hpp /images/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alice-viola/ThreadPool/HEAD/images/test.png -------------------------------------------------------------------------------- /images/test2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alice-viola/ThreadPool/HEAD/images/test2.png -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | #include "threadpool.hpp" 2 | #include "test.hpp" 3 | 4 | int 5 | main() { 6 | test_threadpool(); 7 | return 0; 8 | } 9 | 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | *.out 6 | 7 | # Compiled Dynamic libraries 8 | *.so 9 | *.dylib 10 | 11 | # Compiled Static libraries 12 | *.lai 13 | *.la 14 | *.a 15 | 16 | /build/ 17 | /images/ 18 | /testoutput/ -------------------------------------------------------------------------------- /example.cpp: -------------------------------------------------------------------------------- 1 | #include "threadpool.hpp" 2 | #include 3 | 4 | using namespace astp; 5 | 6 | int main() { 7 | ThreadPool tp; 8 | for (int i = 0; i < 2; i++) { 9 | tp.push([i]() { 10 | std::cout << "ThreadPool " << i << std::endl; 11 | }); 12 | } 13 | tp.wait(); 14 | return 0; 15 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CC=g++ -std=c++11 -Wall 2 | OPT=-O3 3 | OPTL= -pthread 4 | DEPS=threadpool.hpp test.hpp 5 | OBJ=main.o 6 | INCLUDE=-I/usr/local/include/ 7 | LIBS_PATH=-L/usr/local/lib/ -L/opt/homebrew/Cellar 8 | LIBS= -lcppunit-1.14.0 9 | CPP_UNIT=`pkg-config --libs cppunit` 10 | 11 | %.o: %.cpp $(DEPS) 12 | $(CC) $(OPT) -c -o $@ $< $(CFLAGS) $(INCLUDE) 13 | 14 | ThreadPool: $(OBJ) 15 | $(CC) $(OPT) -o $@ $^ $(CFLAGS) $(INCLUDE) $(LIBS_PATH) $(LIBS) 16 | 17 | ThreadPoolTest: $(OBJ) 18 | $(CC) $(OPT) $(OPTL) -o $@ $^ $(CFLAGS) $(INCLUDE) $(CPP_UNIT) 19 | 20 | .PHONY: clean 21 | 22 | clean: 23 | rm -f *.o 24 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Amedeo Setti 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | compiler: 3 | - g++ 4 | 5 | env: 6 | matrix: 7 | - CRASH_PLEASE=boooooooom 8 | - HAPPY_NO_CRASH=all-good 9 | 10 | before_install: 11 | # What is the current file size max for core files? 12 | # It is usually 0, which means no core file will be dumped if there is a crash 13 | - ulimit -c 14 | - ulimit -a -S 15 | - ulimit -a -H 16 | 17 | install: 18 | - ulimit -c unlimited -S 19 | - ulimit -c 20 | - ulimit -a -S 21 | - ulimit -a -H 22 | - cat /proc/sys/kernel/core_pattern 23 | - sudo apt-get install libcppunit-dev libcppunit-1.13-0 gdb 24 | - ./build.sh 25 | 26 | before_script: 27 | - RESULT=0 28 | 29 | script: 30 | - ./build/ThreadPoolTest || RESULT=$? 31 | # Run the program to prompt a crash 32 | # Note: we capture the return code of the program here and add 33 | # `|| true` to ensure that travis continues past the crash 34 | - ls -l 35 | - if [[ ${RESULT} == 0 ]]; then echo "\\o/ our test worked without problems"; else echo "ruhroh test returned an errorcode of $RESULT"; fi; 36 | # If the program returned an error code, now we check for a 37 | # core file in the current working directory and dump the backtrace out 38 | - for i in $(find ./ -maxdepth 1 -name 'core*' -print); do gdb $(pwd)/build/ThreadPoolTest core* -ex "thread apply all bt" -ex "set pagination 0" -batch; done; 39 | # now we should present travis with the original 40 | # error code so the run cleanly stops 41 | - if [[ ${RESULT} != 0 ]]; then exit $RESULT ; fi; 42 | 43 | 44 | # Ubuntu 14.04 Trusty support 45 | sudo: required 46 | dist: trusty 47 | 48 | matrix: 49 | include: 50 | - compiler: gcc 51 | addons: 52 | apt: 53 | sources: 54 | - ubuntu-toolchain-r-test 55 | packages: 56 | - g++-4.9 57 | env: COMPILER=g++-4.9 58 | - compiler: gcc 59 | addons: 60 | apt: 61 | sources: 62 | - ubuntu-toolchain-r-test 63 | packages: 64 | - g++-5 65 | env: COMPILER=g++-5 66 | - compiler: clang 67 | addons: 68 | apt: 69 | sources: 70 | - ubuntu-toolchain-r-test 71 | - llvm-toolchain-precise-3.6 72 | packages: 73 | - clang-3.6 74 | env: COMPILER=clang++-3.6 75 | - compiler: clang 76 | addons: 77 | apt: 78 | sources: 79 | - ubuntu-toolchain-r-test 80 | - llvm-toolchain-precise-3.7 81 | packages: 82 | - clang-3.7 83 | env: COMPILER=clang++-3.7 -------------------------------------------------------------------------------- /example.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _THREAD_POOL_EXAMPLE_HPP_ 2 | #define _THREAD_POOL_EXAMPLE_HPP_ 3 | #ifdef __cplusplus 4 | 5 | #include "threadpool.hpp" 6 | 7 | using namespace astp; 8 | 9 | /** 10 | * The ThreadPool instance used by 11 | * the following example functions. 12 | */ 13 | ThreadPool tp = ThreadPool(); 14 | 15 | 16 | //############################################################################# 17 | //############################################################################# 18 | 19 | 20 | void 21 | simple_func() { 22 | std::cout << "I'm a func called async" << std::endl; 23 | } 24 | 25 | void 26 | example_global_func(int number_of_calls) { 27 | for (int i = 0; i < number_of_calls; i++) { 28 | tp.push([] () { 29 | simple_func(); 30 | }); 31 | } 32 | tp.wait(); 33 | } 34 | 35 | 36 | //############################################################################# 37 | //############################################################################# 38 | 39 | 40 | void 41 | example_inline_code(int number_of_calls) { 42 | for (int i = 0; i < number_of_calls; i++) { 43 | tp.push([] () { 44 | std::vector vec; 45 | for (int k = 0; k < 1000; k++) { 46 | vec.push_back(k); 47 | std::cout << vec.size() << std::endl; 48 | } 49 | }); 50 | } 51 | tp.wait(); 52 | } 53 | 54 | 55 | //############################################################################# 56 | //############################################################################# 57 | 58 | 59 | void 60 | example_inline_code_and_save(int number) { 61 | tp.wait(); 62 | int number_double; 63 | tp.push([number, &number_double] () { 64 | number_double = 2 * number; 65 | }); 66 | tp.wait(); 67 | std::cout << "Double number: " << number_double; 68 | } 69 | 70 | 71 | //############################################################################# 72 | //############################################################################# 73 | 74 | 75 | void 76 | example_inline_code_and_save_vec(int number) { 77 | tp.wait(); 78 | auto data = std::vector(); 79 | for (int i = 0; i < number; i++) { 80 | tp.push([&, i] () { 81 | auto double_num = i * 2; 82 | tp.synchronize(); 83 | data.push_back(double_num); 84 | tp.end_synchronize(); 85 | }); 86 | } 87 | tp.wait(); 88 | for (auto &d : data) 89 | std::cout << d << std::endl; 90 | } 91 | 92 | 93 | //############################################################################# 94 | //############################################################################# 95 | 96 | class JobTest 97 | { 98 | public: 99 | void 100 | do_something(int i) const { 101 | std::cout << "The magic number is: " << i << std::endl; 102 | } 103 | }; 104 | 105 | void 106 | example_member_function(int resize_to, int number_of_calls) { 107 | auto job = JobTest(); 108 | tp.resize(resize_to); 109 | for (int i = 0; i < number_of_calls; i++) { 110 | tp.push([job, i] () { 111 | job.do_something(i); 112 | }); 113 | } 114 | tp.wait(); 115 | } 116 | 117 | 118 | //############################################################################# 119 | //############################################################################# 120 | 121 | 122 | /** 123 | * This will run the example 124 | * in multithreading style, 125 | * trying to broke the ThreadPool. 126 | */ 127 | void 128 | example_simulate_multithreading_access(int number_of_access_thread) { 129 | auto acc_thread = std::vector(number_of_access_thread); 130 | for (int i = 0; i < number_of_access_thread; i++) { 131 | acc_thread[i] = std::thread([] () { 132 | example_global_func(10); 133 | example_member_function(ThreadPoolTest::random(-5, 24), 1000); 134 | }); 135 | } 136 | for (int i = 0; i < number_of_access_thread; i++) { 137 | acc_thread[i].join(); 138 | } 139 | } 140 | 141 | 142 | 143 | 144 | #endif // __cplusplus 145 | #endif // _THREAD_POOL_EXAMPLE_HPP_ 146 | -------------------------------------------------------------------------------- /test.hpp: -------------------------------------------------------------------------------- 1 | #ifndef _THREAD_POOL_TEST_HPP_ 2 | #define _THREAD_POOL_TEST_HPP_ 3 | #ifdef __cplusplus 4 | 5 | #include 6 | #include 7 | #include 8 | #include "cppunit/TestCase.h" 9 | #include "cppunit/TestCaller.h" 10 | #include "cppunit/TestResult.h" 11 | #include "cppunit/TestSuite.h" 12 | #include 13 | #include 14 | 15 | #ifndef _THREAD_POOL_HPP_ 16 | #include "threadpool.hpp" 17 | #endif 18 | 19 | using namespace astp; 20 | 21 | class ThreadPoolTest: public CppUnit::TestFixture 22 | { 23 | public: 24 | 25 | void 26 | setUp() { 27 | srand(time(NULL)); 28 | tp = new ThreadPool(4); 29 | } 30 | 31 | void 32 | tearDown() { 33 | delete tp; 34 | } 35 | 36 | void 37 | testResize() { 38 | try { 39 | int r = random(-10,10); 40 | tp->resize(r); 41 | CPPUNIT_ASSERT( tp->pool_size() == r ); 42 | } catch(std::runtime_error e) {} 43 | } 44 | 45 | void 46 | testStop() { 47 | tp->stop(); 48 | CPPUNIT_ASSERT( tp->pool_size() == 0 ); 49 | } 50 | 51 | void 52 | testAwake() { 53 | int s = tp->pool_size(); 54 | tp->stop(); 55 | tp->awake(); 56 | CPPUNIT_ASSERT( tp->pool_size() == s ); 57 | } 58 | 59 | void 60 | testPush() { 61 | tp->stop(); 62 | int r = random(1,1000); 63 | for (int i = 0; i < r; i++) { 64 | tp->push([](){}); 65 | } 66 | CPPUNIT_ASSERT( tp->queue_size() == r ); 67 | } 68 | 69 | void 70 | testVariadicPush() { 71 | tp->stop(); 72 | int r = random(1,1000); 73 | for (int i = 0; i < r; i++) { 74 | tp->push([](){}, [](){}, [](){}); 75 | } 76 | CPPUNIT_ASSERT( tp->queue_size() == (r * 3) ); 77 | } 78 | 79 | void 80 | testPushOperator() { 81 | tp->stop(); 82 | int r = random(1,1000); 83 | for (int i = 0; i < r; i++) { 84 | *tp << [](){}; 85 | } 86 | CPPUNIT_ASSERT( tp->queue_size() == r ); 87 | } 88 | 89 | void 90 | testApplyFor() { 91 | try { 92 | auto it = random(-100, 1000); 93 | tp->apply_for(it, [](){}); 94 | CPPUNIT_ASSERT( tp->queue_size() == 0 ); 95 | } catch(std::runtime_error e) {} 96 | } 97 | 98 | void 99 | testApplyForAsync() { 100 | try { 101 | auto it = random(-100, 1000); 102 | tp->apply_for_async(it, [](){}); 103 | tp->wait(); 104 | CPPUNIT_ASSERT( tp->queue_size() == 0 ); 105 | } catch(std::runtime_error e) {} 106 | } 107 | 108 | void 109 | testFuture() { 110 | int r = random(-1000, 1000); 111 | auto fut = tp->future_from_push([r]() -> int { return r; }); 112 | fut.wait(); 113 | CPPUNIT_ASSERT( fut.get() == r ); 114 | } 115 | 116 | void 117 | testSleepTime() { 118 | try { 119 | int v = random(-1000,1000); 120 | tp->set_sleep_time_ns(v); 121 | CPPUNIT_ASSERT( tp->sleep_time_ns() == v ); 122 | tp->set_sleep_time_ms(v); 123 | CPPUNIT_ASSERT( tp->sleep_time_ns() == (v * 1000000) ); 124 | tp->set_sleep_time_ns(v); 125 | CPPUNIT_ASSERT( tp->sleep_time_ns() == (v * 1000000000) ); 126 | } catch (std::runtime_error e) {} 127 | } 128 | 129 | void 130 | testDispatchGroupOpen() { 131 | try { 132 | tp->dg_open("t1"); 133 | } catch (std::runtime_error e) { 134 | CPPUNIT_ASSERT( false ); 135 | } 136 | } 137 | 138 | void 139 | testDispatchGroupClose() { 140 | try { 141 | tp->dg_open("t1"); 142 | tp->dg_close("t1"); 143 | } catch (std::runtime_error e) { 144 | CPPUNIT_ASSERT( false ); 145 | } 146 | } 147 | 148 | void 149 | testDispatchGroupInsert() { 150 | try { 151 | tp->stop(); 152 | tp->dg_open("t1"); 153 | int r = random(1, 1000); 154 | for (int i = 0; i < r; i++) { 155 | tp->dg_insert("t1",[](){}); 156 | } 157 | tp->dg_close("t1"); 158 | CPPUNIT_ASSERT( tp->queue_size() == r ); 159 | } catch (std::runtime_error e) { 160 | CPPUNIT_ASSERT( false ); 161 | } 162 | } 163 | 164 | void 165 | testDispatchGroupWrongOpen() { 166 | try { 167 | tp->dg_open("t1"); 168 | tp->dg_open("t1"); 169 | CPPUNIT_ASSERT( false ); 170 | } catch (std::runtime_error e) { 171 | CPPUNIT_ASSERT( true ); 172 | } 173 | } 174 | 175 | void 176 | testDispatchGroupWrongClose() { 177 | try { 178 | tp->dg_close("t1"); 179 | CPPUNIT_ASSERT( false ); 180 | } catch (std::runtime_error e) { 181 | CPPUNIT_ASSERT( true ); 182 | } 183 | } 184 | 185 | void 186 | testDispatchGroupWrongInsert() { 187 | try { 188 | tp->stop(); 189 | tp->dg_insert("t1",[](){}); 190 | tp->dg_close("t1"); 191 | CPPUNIT_ASSERT( false); 192 | } catch (std::runtime_error e) { 193 | CPPUNIT_ASSERT( true ); 194 | } 195 | } 196 | 197 | void 198 | testDispatchGroupWaitAndFire() { 199 | try { 200 | tp->dg_open("t1"); 201 | tp->dg_insert("t1",[](){}); 202 | tp->dg_close("t1"); 203 | int a = 0; 204 | tp->dg_wait("t1", [&a](){ a = 30; }); 205 | CPPUNIT_ASSERT( a == 30); 206 | CPPUNIT_ASSERT( tp->queue_size() == 0); 207 | } catch (std::runtime_error e) { 208 | CPPUNIT_ASSERT( false ); 209 | } 210 | } 211 | 212 | void 213 | testDispatchGroupNow() { 214 | try { 215 | tp->dg_now("t1",[](){}); 216 | tp->dg_wait("t1"); 217 | CPPUNIT_ASSERT( tp->queue_size() == 0); 218 | } catch (std::runtime_error e) { 219 | CPPUNIT_ASSERT( false ); 220 | } 221 | } 222 | 223 | void 224 | testDispatchGroupCloseBarrier() { 225 | try { 226 | int a = 0; 227 | tp->dg_open("t1"); 228 | tp->dg_insert("t1", [](){}); 229 | tp->dg_close_with_barrier("t1", [&a](){ a = 30; }); 230 | tp->dg_wait("t1"); 231 | CPPUNIT_ASSERT( a == 30); 232 | CPPUNIT_ASSERT( tp->queue_size() == 0); 233 | } catch (std::runtime_error e) { 234 | CPPUNIT_ASSERT( false ); 235 | } 236 | } 237 | 238 | /*void 239 | testSetExcHandl() { 240 | std::string err; 241 | std::string oerr = "ERR1"; 242 | std::function efunc = [&err](std::string e) { 243 | err = e; 244 | }; 245 | tp->set_excpetion_action(efunc); 246 | try { 247 | *tp << [oerr](){ throw std::runtime_error(oerr); }; 248 | tp->wait(); 249 | CPPUNIT_ASSERT( true ); 250 | } catch (std::runtime_error e) { 251 | CPPUNIT_ASSERT( e.what() == oerr ); 252 | } 253 | }*/ 254 | 255 | 256 | private: 257 | CPPUNIT_TEST_SUITE(ThreadPoolTest); 258 | CPPUNIT_TEST(testResize); 259 | CPPUNIT_TEST(testStop); 260 | CPPUNIT_TEST(testAwake); 261 | CPPUNIT_TEST(testPush); 262 | CPPUNIT_TEST(testVariadicPush); 263 | CPPUNIT_TEST(testPushOperator); 264 | CPPUNIT_TEST(testApplyFor); 265 | CPPUNIT_TEST(testApplyForAsync); 266 | CPPUNIT_TEST(testFuture); 267 | CPPUNIT_TEST(testSleepTime); 268 | CPPUNIT_TEST(testDispatchGroupOpen); 269 | CPPUNIT_TEST(testDispatchGroupClose); 270 | CPPUNIT_TEST(testDispatchGroupInsert); 271 | CPPUNIT_TEST(testDispatchGroupWrongOpen); 272 | CPPUNIT_TEST(testDispatchGroupWrongClose); 273 | CPPUNIT_TEST(testDispatchGroupWrongInsert); 274 | CPPUNIT_TEST(testDispatchGroupWaitAndFire); 275 | CPPUNIT_TEST(testDispatchGroupNow); 276 | CPPUNIT_TEST(testDispatchGroupCloseBarrier); 277 | //CPPUNIT_TEST(testSetExcHandl); 278 | CPPUNIT_TEST_SUITE_END(); 279 | 280 | ThreadPool *tp; 281 | 282 | int 283 | random(int min, int max) { 284 | return rand() % max + min; 285 | } 286 | }; 287 | 288 | 289 | void 290 | test_threadpool() { 291 | CppUnit::TextUi::TestRunner runner; 292 | runner.addTest(ThreadPoolTest::suite()); 293 | runner.run(); 294 | } 295 | 296 | #endif // __cplusplus 297 | #endif // _THREAD_POOL_TEST_HPP_ 298 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Status 2 | [![Build Status](https://travis-ci.org/adda25/ThreadPool.svg?branch=develop)](https://travis-ci.org/adda25/ThreadPool) 3 | 4 | # ThreadPool 5 | 6 | A thread safe pool using C++11 features. 7 | Developed with three main functionalities in mind: 8 | 9 | 1. Provide fast methods in order to run tasks with high priority: 10 | * *apply_for* methods 11 | * *push* methods 12 | 13 | 2. Provide methods for handle complex multithread apps, like Apple's GCD: 14 | * *dispatch_group* methods 15 | 16 | 3. Be scalable, safe and simple: 17 | * Resize at run-time 18 | * Stop and awake 19 | * User inputs sanity checks (can be disabled) 20 | * A lot of synchronizations methods ready to use 21 | * Thread safe 22 | * Excpetion handling 23 | * Single header 24 | 25 | ## Features: 26 | 27 | * Standard C++11 28 | * Task insertion via lambda expressions 29 | * Task insertion via functions 30 | * Futures from push 31 | * Resize of the pool at runtime 32 | * Support virtually an infinite number of threads in the pool 33 | * Stop and awake the pool 34 | * Fluent-Interface for task insertion 35 | * Fast methods for high priority tasks 36 | * Dispatch groups methods 37 | * Barriers methods 38 | * Synchronizations methods 39 | * Thread safe 40 | * Single header [threadpool.hpp] 41 | * When there aren't tasks to do, ThreadPool consumes the 0% of CPU 42 | * Tested with CPPUnit 43 | * MIT license 44 | 45 | The ThreadPool is tested under macOS Sierra 10.12 (Apple LLVM version 7.2.0 (clang-702.0.25)), 46 | macOS Big Sur 11.5.2 (Apple clang version 12.0.5 (clang-1205.0.22.11)), 47 | LinuxMint 17.1 Rebecca (g++ 4.8.2), Apple iOS 9.3 (XCode 8.0, LLVM version 8.0). 48 | 49 | ## Usage 50 | 51 | To use ThreadPool, simply include the *threadpool.hpp* file 52 | in your projects. 53 | 54 | ## Compile 55 | 56 | You can run some example called in the provided main.cpp file 57 | with the following bash lines (CPPUnit required): 58 | 59 | for macOS (and OSX as well): 60 | ```bash 61 | g++ -std=c++11 -O3 main.cpp 62 | ``` 63 | 64 | for Linux: 65 | ```bash 66 | g++ -std=c++11 -O3 -pthread main.cpp 67 | ``` 68 | 69 | The -O3 optimization parameter is obviously optional. 70 | 71 | To run an example (without the needs of CPPUnit): 72 | 73 | ```bash 74 | # MacOS 75 | g++ -std=c++11 -O3 example.cpp 76 | 77 | # Linux 78 | g++ -std=c++11 -O3 -pthread example.cpp 79 | ``` 80 | 81 | ## First contact 82 | ```C++ 83 | #include "threadpool.hpp" 84 | #include 85 | 86 | using namespace astp; 87 | 88 | int main() { 89 | ThreadPool tp; 90 | for (int i = 0; i < 100; i++) { 91 | tp.push([i]() { 92 | std::cout << "ThreadPool " << i << std::endl; 93 | }); 94 | } 95 | tp.wait(); 96 | return 0; 97 | } 98 | ``` 99 | ## API 100 | 101 | ### Initialization 102 | You can create the thread pool with the default platform dependent number of threads, or 103 | you can specify your desired number: at least one thread must be created. 104 | 105 | ```C++ 106 | astp::ThreadPool tp; // -> Create default pool 107 | astp::ThreadPool tp(64); // -> Create 64 threads 108 | astp::ThreadPool tp(0); // -> Throw an error 109 | astp::ThreadPool tp(-1); // -> Throw an error 110 | ``` 111 | 112 | ### Resize 113 | The pool can be resized after it was created: if the resizing operation decreases 114 | the current number of threads, a number equal to the difference is popped from 115 | the pool, but only when the threads have finished to compute their workload. 116 | At least one thread must be kept in the pool. 117 | During the resizing, the stop of the pool is blocked. 118 | 119 | ```C++ 120 | // For instance, current pool size is 64. 121 | tp.resize(31) // -> Pop (64 - 31) = 33 threads 122 | // For instance, current pool size is 64. 123 | tp.resize(74) // -> Push (74 - 64) = 10 threads 124 | tp.resize(0) // -> Throw an error 125 | tp.resize(-1) // -> Throw an error 126 | ``` 127 | 128 | ### Insertion of tasks 129 | There are three basic syntax for task insertion in the pool, all of them 130 | uses lambda expression. The task inserted are appended at the end of a queue. 131 | 132 | ```C++ 133 | // Classic syntax 134 | tp.push([ /* Lambda capturing */ ] () { /* Task */ }); 135 | 136 | // Variadic template syntax 137 | tp.push( 138 | [ /* Lambda capturing */ ] () { /* Task1 */ }, 139 | [ /* Lambda capturing */ ] () { /* Task2 */ }, 140 | [ /* Lambda capturing */ ] () { /* Task3 */ } 141 | ); 142 | 143 | // Overload operator << syntax 144 | tp << []() { /* Some jobs */}; 145 | ``` 146 | All these syntax have a fluent-interface, so you can chain the calls: 147 | 148 | ```C++ 149 | tp.push([](){}).push([](){}).push([](){}); 150 | tp << []() { /* Some jobs1 */} << []() { /* Some jobs2 */}; 151 | ``` 152 | When you push a task, the task is inserted in queue, and it will be 153 | executed when it will be at the front of the queue. 154 | See the next section for how to wait the execution of the task. 155 | 156 | You can also pass directly a function, but this function must *return void* 157 | and have *no arguments*. 158 | 159 | ```C++ 160 | void func() { /* Do stuff */ } 161 | tp.push(func); 162 | ``` 163 | 164 | ### Waiting execution 165 | When the tasks are inserted in the pool, you cannot know when 166 | they will be completed. If you need to know when they are completed, 167 | you can wait the execution of all the tasks with the following method. 168 | This will wait, blocking the caller thread, until all the tasks have finished 169 | to run. 170 | ```C++ 171 | tp.wait(); 172 | ``` 173 | 174 | ### Stop the pool 175 | With this method all the threads in the pool will be 176 | stopped, waiting the end of task execution, and then will 177 | be popped. So at end of the stop exceution, the thread pool 178 | will have zero threads. During the stop, the resize of the pool 179 | is blocked. 180 | ```C++ 181 | tp.stop(); 182 | ``` 183 | 184 | ### Awake the pool 185 | After that the *stop* method is called, the pool has zero threads, 186 | so new tasks will not be executed. 187 | So you need to call the *awake* method: the pool will be resized to the same number 188 | of threads that the pool had before stopping. 189 | ```C++ 190 | tp.awake(); 191 | ``` 192 | 193 | ### Apply [a.k.a how to run stuff FASTER] 194 | This method allow you to run a repetitive task a lot faster than 195 | the above methods: 196 | *apply_for* let you specify a task and a number of times that this 197 | task must be executed, and return only when the entire task is finished. 198 | The tasks inserted with this method have the max priority, and are 199 | inserted in the front of the queue [That is actually a std::deque]. 200 | Furthermore, the mutex that controls the queue access is acquired only once, 201 | than all tasks are inserted in the queue; this saves the lock/unlock time. 202 | ```C++ 203 | std::vector vec(600); 204 | int i = 0; 205 | tp.apply_for(600, [&vec, &i]() { 206 | vec[i] = doStuff(); 207 | i++; 208 | }); 209 | // Return only when all the iterations 210 | // will be executed. 211 | ``` 212 | I have experimented a performance boost from 10% to 300% using this method instead 213 | of the classical push. 214 | 215 | There is also the async version, that acts like the normal push. 216 | ```C++ 217 | std::vector vec(600); 218 | int i = 0; 219 | tp.apply_for_async(600, [&vec, &i]() { 220 | vec[i] = doStuff(); 221 | i++; 222 | }); 223 | // Returns immediately 224 | ``` 225 | These functions throws an error if the iteration counts is less than zero. 226 | 227 | 228 | ### Future from push 229 | For task insertion, you may like to get a future reference to the pushed 230 | job. This feature was inspired by vit-vit threadpool. 231 | 232 | ```C++ 233 | auto future_value = tp.future_from_push([]() -> std::string { 234 | return "Hello world!"; 235 | }); 236 | future_value.wait(); 237 | auto value = future_value.get(); 238 | std::cout << value << std::endl; // --> Hello world! 239 | ``` 240 | 241 | ### Dispatch Groups 242 | You may have the need of track a series of jobs, so 243 | the thread pool has some methods to accomplish that. 244 | Dispatch group introduce a little overhead, so you should 245 | use it only when you really need to track some tasks. 246 | Start using them by creating a group with a string id, 247 | then append some tasks, and signal the end of the insertion. 248 | When you signal the end of insertion, tasks will be moved 249 | to the pool queue. Than you can wait until they are computed. 250 | ```C++ 251 | // Create a group named "group_id" 252 | tp.dg_open("group_id"); 253 | 254 | // Insert tasks in the group. 255 | tp.dg_insert("group_id", []() { /* task1 */ }); 256 | tp.dg_insert("group_id", []() { /* task2 */ }); 257 | 258 | // Signal the end of task insertion. 259 | tp.dg_close("group_id"); 260 | 261 | // Signal the end of task insertion and add a barrier. 262 | tp.dg_close_with_barrier("group_id", [](){}); 263 | 264 | // Wait the end of execution [if needed] 265 | tp.dg_wait("group_id"); 266 | 267 | // Wait the end of execution [if needed], 268 | // and fire a callback when all tasks in the group were ran. 269 | // Can throw. 270 | tp.dg_wait("group_id", []() { /* Fired when the group has been entirely computed */ }); 271 | 272 | // Synchronize access to external container 273 | tp.dg_synchronize("group_id"); 274 | tp.dg_end_synchronize("group_id"); 275 | ``` 276 | Dispatch group allow the execution of a task with high priority: 277 | the method *dg_now* will insert the task directly 278 | at the front of the pool queue. 279 | ```C++ 280 | tp.dg_now("group_id", []() { std::cout << "High priority" << std::endl; }); 281 | ``` 282 | This is useful when you have a lot of tasks in the pool queue and you want 283 | to process something without waiting the end of all others tasks. 284 | 285 | All these methods throws if you try to do illegal operations, like close a group that 286 | doesn't exist. 287 | 288 | ### Synchronization 289 | Thread pool has four methods that allow the synchronization of the threads in the pool 290 | when accessing some external critical part. These methods acts with binary semaphore 291 | implemented in a nested class of the thread pool. 292 | 293 | Standard synchronization: 294 | ```C++ 295 | std::vector data; 296 | for (int i = 0; i < 100; i++) { 297 | tp.push([i, &tp, &data](){ 298 | // Signal to all others threads in the pool 299 | // to wait. 300 | tp.synchronize(); 301 | // Safely modify external container. 302 | data.push_back(i); 303 | // Signal all others thread to go on. 304 | tp.end_synchronize(); 305 | }); 306 | } 307 | ``` 308 | 309 | Dispatch group synchronization: 310 | ```C++ 311 | std::vector data; 312 | tp.dg_open("data_group"); 313 | for (int i = 0; i < 100; i++) { 314 | tp.dg_insert("data_group", [i, &tp, &data](){ 315 | // Signal to all others threads that 316 | // are working for the group to wait. 317 | tp.dg_synchronize("data_group"); 318 | // Safely modify external container. 319 | data.push_back(i); 320 | // Signal all others threads in the group to go on. 321 | tp.dg_synchronize("data_group"); 322 | }); 323 | } 324 | tp.dg_close("data_group"); 325 | ``` 326 | 327 | ### Sleep 328 | The *wait* method put to sleep your caller thread. You can set this amount of time 329 | with the following functions. 330 | *Seems that the minimal interval is or zero, or a time-slice of the scheduler.* 331 | ```C++ 332 | // Set sleep in nanoseconds 333 | tp.set_sleep_time_ns(100); 334 | // Set sleep in milliseconds 335 | tp.set_sleep_time_ms(100); 336 | // Set sleep in seconds 337 | tp.set_sleep_time_s(100); 338 | tp.set_sleep_time_s(99.85); 339 | // Return the current sleep time in nanoseconds 340 | auto stns = tp.sleep_time_ns(); 341 | ``` 342 | 343 | ### Misc 344 | Various methods in order to get information 345 | about the state of the threadpool. 346 | ```C++ 347 | auto current_pool_size = tp.pool_size(); 348 | auto current_queue_size = tp.queue_size(); 349 | auto is_empty = tp.queue_is_empty(); 350 | ``` 351 | ### Excpetion handling 352 | You can set a callback that will be callled every time 353 | one of pool threads fire an excpetion: 354 | 355 | ```C++ 356 | std::function efunc = [](std::string e) { 357 | std::cout << "Caught exception " << e << std::endl; 358 | }; 359 | tp.set_excpetion_action(efunc); 360 | i = 26; 361 | tp << [i](){ throw std::to_string(i); }; 362 | // -> Caught exception 26 363 | ``` 364 | 365 | If you don't set a callback the threadpool will fire the 366 | default one, that does nothing. 367 | You can override this behaviour **[at your risk]** declaring the follow 368 | macro: `#define TP_ENABLE_DEFAULT_EXCEPTION_CALL 0` 369 | 370 | ### Internal excpetions 371 | Every method of the threadpool can throw for excpetional causes like 372 | failed memory allocation. Futhermore, by default the input checking is active, 373 | than the threadpool can throw some specific expections: the methods 374 | where this behavior is expected, are marked with *noexcept(false)*. 375 | You can disable the input checking with the following macro: 376 | `#define TP_ENABLE_SANITY_CHECKS 0` 377 | 378 | ## Performance 379 | This test was a write to text test: write one million of lines 380 | in a *iterations* number of different text files. 381 | NT means the sequential version, TP[num] means the number of 382 | threads in the threadpool. Test was executed with the normal 383 | *push* function. 384 | 385 | ![Test performance](images/test2.png) 386 | 387 | 388 | Test function: 389 | ```C++ 390 | void 391 | write(int i) { 392 | std::ofstream myfile; 393 | myfile.open ("example" + std::to_string(i) + (".txt")); 394 | for(int k = 0; k < 1000000; k++) 395 | myfile << "Writing this to a file.\n"; 396 | myfile.close(); 397 | } 398 | ``` 399 | 400 | ## License 401 | ThreadPool is released under the MIT license. 402 | See the file *LICENSE.txt* for the license text. -------------------------------------------------------------------------------- /threadpool.hpp: -------------------------------------------------------------------------------- 1 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 2 | * * 3 | * _____ _ _ ____ _ * 4 | * |_ _| |__ _ __ ___ __ _ __| | _ \ ___ ___ | | * 5 | * | | | '_ \| '__/ _ \/ _` |/ _` | |_) / _ \ / _ \| | * 6 | * | | | | | | | | __/ (_| | (_| | __/ (_) | (_) | | * 7 | * |_| |_| |_|_| \___|\__,_|\__,_|_| \___/ \___/|_| * 8 | * * 9 | * BECAUSE POWER IS NOTHING WITHOUT CONTROL * 10 | * You should not inherit from any of these classes: * 11 | * no virtual destructors provided. * 12 | * * 13 | * * 14 | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ** 15 | * * 16 | * The MIT License (MIT) * 17 | * * 18 | * Copyright (c) 2016 Amedeo Setti * 19 | * * 20 | * Permission is hereby granted, free of charge, to any person obtaining a copy * 21 | * of this software and associated documentation files (the "Software"), to deal * 22 | * in the Software without restriction, including without limitation the rights * 23 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * 24 | * copies of the Software, and to permit persons to whom the Software is * 25 | * furnished to do so, subject to the following conditions: * 26 | * * 27 | * The above copyright notice and this permission notice shall be included in all * 28 | * copies or substantial portions of the Software. * 29 | * * 30 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * 31 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * 32 | * FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE * 33 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * 34 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * 35 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * 36 | * SOFTWARE * 37 | * * 38 | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 39 | 40 | #ifndef _THREAD_POOL_HPP_ 41 | #define _THREAD_POOL_HPP_ 42 | #ifdef __cplusplus 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | #include 49 | #include 50 | #include 51 | #include 52 | #include 53 | #include 54 | #include 55 | #include 56 | #include 57 | #ifdef DEBUG 58 | #include 59 | #endif 60 | 61 | #ifndef TP_ENABLE_DEFAULT_EXCEPTION_CALL 62 | #define TP_ENABLE_DEFAULT_EXCEPTION_CALL 1 63 | #endif 64 | 65 | #ifndef TP_ENABLE_SANITY_CHECKS 66 | #define TP_ENABLE_SANITY_CHECKS 1 67 | #endif 68 | 69 | 70 | namespace astp 71 | { 72 | int 73 | hwc() { 74 | return std::thread::hardware_concurrency(); 75 | } 76 | 77 | /** 78 | * Structure of the class: 79 | * 80 | * private: 81 | * - Nested Semaphore class 82 | * - Nested DispatchGroup class 83 | * - Nested ThreadsBlocker class 84 | * 85 | * public: 86 | * - API 87 | * 88 | * private: 89 | * - All the class variables and 90 | * methods 91 | * 92 | * Enjoy! 93 | */ 94 | class ThreadPool 95 | { 96 | private: 97 | /** 98 | * ____ _ 99 | * / ___| ___ _ __ ___ __ _ _ __ | |__ ___ _ __ ___ 100 | * \___ \ / _ \ '_ ` _ \ / _` | '_ \| '_ \ / _ \| '__/ _ \ 101 | * ___) | __/ | | | | | (_| | |_) | | | | (_) | | | __/ 102 | * |____/ \___|_| |_| |_|\__,_| .__/|_| |_|\___/|_| \___| 103 | * |_| 104 | * 105 | * 106 | * Nested ThreadPool class 107 | * that represents a semaphore 108 | * in order to make the ThreadPool thread safe. 109 | */ 110 | class Semaphore 111 | { 112 | public: 113 | Semaphore(int value) : _value(value) {}; 114 | Semaphore(const Semaphore &S) : _mutex(), _cv() {}; 115 | Semaphore& operator=(Semaphore S) {return *this;} 116 | ~Semaphore() {}; 117 | 118 | void 119 | wait() { 120 | std::unique_lock lock(_mutex); 121 | --_value; 122 | if (_value < 0) { 123 | do { 124 | _cv.wait(lock); 125 | } while (_wake_ups < 1); 126 | --_wake_ups; 127 | } 128 | } 129 | 130 | void 131 | signal() { 132 | std::unique_lock lock(_mutex); 133 | ++_value; 134 | if (_value <= 0) { 135 | ++_wake_ups; 136 | _cv.notify_one(); 137 | } 138 | } 139 | 140 | private: 141 | int _value; 142 | int _wake_ups = 0; 143 | std::mutex _mutex; 144 | std::condition_variable _cv; 145 | }; 146 | 147 | /** 148 | * ____ _ _ _ ____ 149 | * | _ \(_)___ _ __ __ _| |_ ___| |__ / ___|_ __ ___ _ _ _ __ 150 | * | | | | / __| '_ \ / _` | __/ __| '_ \| | _| '__/ _ \| | | | '_ \ 151 | * | |_| | \__ \ |_) | (_| | || (__| | | | |_| | | | (_) | |_| | |_) | 152 | * |____/|_|___/ .__/ \__,_|\__\___|_| |_|\____|_| \___/ \__,_| .__/ 153 | * |_| |_| 154 | * 155 | * 156 | * Nested ThreadPool class 157 | * that stores informations 158 | * about dispatch groups. 159 | */ 160 | class DispatchGroup 161 | { 162 | public: 163 | DispatchGroup(std::string id) : 164 | _id(id), 165 | _closed(false), 166 | _has_finished(false), 167 | _jobs_done_counter(0), 168 | _jobs_count_at_leave(0), 169 | _sem_sync(Semaphore(1)) {}; 170 | DispatchGroup(DispatchGroup&& DP) noexcept : 171 | _id(DP.id()), 172 | _closed(DP.is_leave()), 173 | _has_finished(DP.has_finished()), 174 | _jobs_done_counter(0), 175 | _jobs_count_at_leave(0), 176 | _sem_sync(Semaphore(1)) {}; 177 | DispatchGroup& operator = (DispatchGroup&& DP) = default; 178 | DispatchGroup(const DispatchGroup& DP) = delete; 179 | DispatchGroup& operator = (const DispatchGroup& DP) = delete; 180 | 181 | ~DispatchGroup() {}; 182 | 183 | void 184 | leave() { 185 | _closed = true; 186 | _jobs_count_at_leave = _jobs.size(); 187 | _check_end_condition(); 188 | } 189 | 190 | template void 191 | leave(T&& t) { 192 | leave(); 193 | _end_action = t; 194 | } 195 | 196 | bool 197 | is_leave() const { 198 | return _closed; 199 | } 200 | 201 | template void 202 | insert(const F &f) { 203 | if (_closed) return; 204 | auto func = [=] () { f(); _signal_end_of_job(); }; 205 | _jobs.push_back(func); 206 | } 207 | 208 | std::vector > 209 | jobs() { return _jobs; } 210 | 211 | bool 212 | has_finished() const { 213 | return _has_finished; 214 | } 215 | 216 | std::string 217 | id() const { 218 | return _id; 219 | } 220 | 221 | int 222 | jobs_count() const { 223 | return _jobs.size(); 224 | } 225 | 226 | void 227 | synchronize() { 228 | _sem_sync.wait(); 229 | } 230 | 231 | void 232 | end_synchronize() { 233 | _sem_sync.signal(); 234 | } 235 | 236 | private: 237 | std::string _id; 238 | std::function _end_action; 239 | std::vector > _jobs; 240 | std::atomic _closed; 241 | std::atomic _has_finished; 242 | std::atomic _jobs_done_counter; 243 | std::atomic _jobs_count_at_leave; 244 | Semaphore _sem_sync; 245 | 246 | void 247 | _signal_end_of_job() { 248 | _jobs_done_counter++; 249 | _check_end_condition(); 250 | } 251 | 252 | void 253 | _check_end_condition() { 254 | if (_jobs_done_counter == _jobs_count_at_leave && _closed) { 255 | _has_finished = true; 256 | if (_end_action) _end_action(); 257 | } 258 | } 259 | }; 260 | 261 | /** 262 | * Thread safe class that manage 263 | * the waiting of the pool threads 264 | * when the queue is empty. 265 | */ 266 | class ThreadsBlocker 267 | { 268 | public: 269 | ThreadsBlocker() : _sem_interface(Semaphore(1)) {}; 270 | ~ThreadsBlocker() {}; 271 | 272 | void 273 | activate_barrier() { 274 | _sem_interface.wait(); 275 | _barrier = true; 276 | _sem_interface.signal(); 277 | } 278 | 279 | void 280 | deactivate_barrier() { 281 | _sem_interface.wait(); 282 | _barrier = false; 283 | _sem_interface.signal(); 284 | } 285 | 286 | bool 287 | thread_wait(Semaphore *rsem) { 288 | _sem_interface.wait(); 289 | if (_barrier) { 290 | _sem_interface.signal(); 291 | return false; 292 | } 293 | _sems.push_back(rsem); 294 | _sem_interface.signal(); 295 | return true; 296 | } 297 | 298 | void 299 | unblock(bool also_activate_barrier = false) { 300 | _sem_interface.wait(); 301 | if (also_activate_barrier) { 302 | _barrier = true; 303 | } 304 | for (auto &s : _sems) { 305 | s->signal(); 306 | } 307 | _sems.clear(); 308 | _sem_interface.signal(); 309 | } 310 | 311 | private: 312 | std::vector _sems; 313 | bool _barrier = false; 314 | Semaphore _sem_interface; 315 | }; 316 | 317 | /** 318 | * _ ____ ___ 319 | * / \ | _ \_ _| 320 | * / _ \ | |_) | | 321 | * / ___ \| __/| | 322 | * /_/ \_\_| |___| 323 | * 324 | */ 325 | public: 326 | /** 327 | * If *max_threads* is not specified, 328 | * the pool size is set to the max number 329 | * of threads supported by the architecture. 330 | * At least one thread is created. 331 | */ 332 | ThreadPool(int max_threads = std::thread::hardware_concurrency()) 333 | noexcept(false) : 334 | _sem_api(Semaphore(1)), 335 | _sem_job_ins_container(Semaphore(1)), 336 | _thread_sleep_time_ns(1000), 337 | _run_pool_thread(true), 338 | _threads_count(0), 339 | _thread_to_kill_c(0), 340 | _push_c(0), 341 | _prev_threads(0) 342 | { 343 | #if TP_ENABLE_DEFAULT_EXCEPTION_CALL 344 | _exception_action = [](std::exception_ptr e) {}; 345 | #endif 346 | 347 | #if TP_ENABLE_SANITY_CHECKS 348 | try { 349 | resize(max_threads); 350 | } catch(std::runtime_error e) { 351 | throw e; 352 | } 353 | #else 354 | resize(max_threads); 355 | #endif 356 | }; 357 | 358 | /** 359 | * Copy constructor. 360 | */ 361 | ThreadPool(const ThreadPool &TP) = delete; 362 | 363 | /** 364 | * Deleted assignment operators 365 | */ 366 | ThreadPool& operator = (ThreadPool&& TP) = delete; 367 | ThreadPool& operator = (const ThreadPool& TP) = delete; 368 | 369 | /** 370 | * When the ThreadPool is deallocated, 371 | * the threads still running are joined(). 372 | */ 373 | ~ThreadPool() noexcept { 374 | try { 375 | if (_run_pool_thread) { 376 | _run_pool_thread = false; 377 | _threads_blocker.unblock(true); 378 | for (auto &t : _pool) { 379 | t.join(); 380 | } 381 | } 382 | } catch (...) {} 383 | }; 384 | 385 | /** 386 | * Update size for the thread pool; 387 | * the abs value of num_threads is taken. 388 | */ 389 | void 390 | resize(int num_threads = std::thread::hardware_concurrency()) 391 | noexcept(false) { 392 | if (!_run_pool_thread) return; 393 | 394 | #if TP_ENABLE_SANITY_CHECKS 395 | _condition_check(errors.resize_alloc, 396 | [&](){ return num_threads < 1; }); 397 | #endif 398 | 399 | _sem_api.wait(); 400 | auto diff = abs(num_threads - _threads_count); 401 | if (num_threads > _threads_count) { 402 | for (auto i = 0; i < diff; ++i) _safe_thread_push(); 403 | } else { 404 | for (auto i = 0; i < diff; ++i) _safe_thread_pop(); 405 | } 406 | _threads_blocker.unblock(); 407 | _sem_api.signal(); 408 | } 409 | 410 | /** 411 | * Push a job to do in jobs queue. 412 | * Use lambda expressions in order to 413 | * load jobs. 414 | */ 415 | template ThreadPool& 416 | push(F&& f) { 417 | _safe_queue_push(f); 418 | return *this; 419 | } 420 | 421 | /** 422 | * Push a job to do in jobs queue. 423 | * Use lambda expressions in order to 424 | * load jobs. Overload operator <<. 425 | */ 426 | template ThreadPool& 427 | operator<<(F&& f) { 428 | _safe_queue_push(f); 429 | return *this; 430 | } 431 | 432 | /** 433 | * Push multiple jobs to do in jobs queue. 434 | * Use lambda expressions in order to 435 | * load jobs. 436 | */ 437 | template ThreadPool& 438 | push(const F&& f, Args... args) { 439 | std::unique_lock lock(_mutex_queue); 440 | _unsafe_queue_push(f); 441 | _unsafe_queue_push(args...); 442 | lock.unlock(); 443 | return *this; 444 | } 445 | 446 | /** 447 | * Insert and execute a task for a 448 | * count number of times, and wait until 449 | * execution is done. 450 | */ 451 | template void 452 | apply_for(const int count, F&& f) noexcept(false) { 453 | #if TP_ENABLE_SANITY_CHECKS 454 | _condition_check(errors.apply_it_num, 455 | [&](){ return count < 0; }); 456 | #endif 457 | 458 | std::atomic counter(0); 459 | auto func = [&] () { f(); ++counter; }; 460 | 461 | std::unique_lock lock(_mutex_queue); 462 | for (auto i = 0; i < count; ++i) _unsafe_queue_push_front(func); 463 | lock.unlock(); 464 | 465 | while (counter != count) { 466 | std::this_thread::sleep_for(std::chrono::nanoseconds(_thread_sleep_time_ns)); 467 | } 468 | } 469 | 470 | template void 471 | apply_for_async(const int count, F&& f) noexcept(false) { 472 | #if TP_ENABLE_SANITY_CHECKS 473 | _condition_check(errors.apply_it_num, 474 | [&](){ return count < 0; }); 475 | #endif 476 | 477 | std::unique_lock lock(_mutex_queue); 478 | for (auto i = 0; i < count; ++i) _unsafe_queue_push(f); 479 | lock.unlock(); 480 | } 481 | 482 | /** 483 | * Push a job in the queue and 484 | * return a future, so you can 485 | * track and get the result of the lambda. 486 | * 487 | * Inspired by vit-vit threadpool: 488 | * https://github.com/vit-vit/CTPL 489 | */ 490 | template auto 491 | future_from_push(F&& f) -> decltype(std::future()) { 492 | auto packaged_task_ptr = std::make_shared>(f); 493 | auto func = std::function([packaged_task_ptr]() {(*packaged_task_ptr)();}); 494 | _safe_queue_push(func); 495 | return packaged_task_ptr->get_future(); 496 | } 497 | 498 | void 499 | synchronize() { 500 | _sem_job_ins_container.wait(); 501 | } 502 | 503 | void 504 | end_synchronize() { 505 | _sem_job_ins_container.signal(); 506 | } 507 | 508 | void 509 | awake() { 510 | if (_run_pool_thread) return; 511 | _run_pool_thread = true; 512 | resize(_prev_threads); 513 | _threads_blocker.deactivate_barrier(); 514 | } 515 | 516 | /** 517 | * Stop execution, detach all 518 | * jobs under processing. 519 | * This is a thread blocking call. 520 | */ 521 | void 522 | stop() { 523 | if (!_run_pool_thread) return; 524 | _sem_api.wait(); 525 | _run_pool_thread = false; 526 | _prev_threads = 0; 527 | 528 | _threads_blocker.unblock(true); 529 | 530 | while(_threads_count != 0) { 531 | ++_prev_threads; 532 | _safe_thread_pop(); 533 | } 534 | while(_thread_to_kill_c != 0) { 535 | std::this_thread::sleep_for(std::chrono::nanoseconds(_thread_sleep_time_ns)); 536 | } 537 | _sem_api.signal(); 538 | } 539 | 540 | /** 541 | * Wait until all jobs 542 | * are computed. 543 | * This is a thread blocking call. 544 | */ 545 | void 546 | wait() { 547 | if (!_run_pool_thread) return; 548 | while((_push_c != 0)) { 549 | std::this_thread::sleep_for(std::chrono::nanoseconds(_thread_sleep_time_ns)); 550 | } 551 | } 552 | 553 | /** 554 | * Returning the current size of the 555 | * thread pool. 556 | */ 557 | int 558 | pool_size() const { 559 | return _threads_count; 560 | } 561 | 562 | size_t 563 | queue_size() const { 564 | return _push_c; 565 | } 566 | 567 | bool 568 | queue_is_empty() const { 569 | return _push_c == 0; 570 | } 571 | 572 | /** 573 | * Set the thread sleep time. 574 | * Interval is in nanoseconds. 575 | */ 576 | void 577 | set_sleep_time_ns(const int time_ns) noexcept(false) { 578 | #if TP_ENABLE_SANITY_CHECKS 579 | _condition_check(errors.sleep_time, 580 | [&](){ return time_ns < 0; }); 581 | #endif 582 | _thread_sleep_time_ns = time_ns; 583 | } 584 | 585 | /** 586 | * Set the thread sleep time. 587 | * Interval is in milliseconds. 588 | */ 589 | void 590 | set_sleep_time_ms(const int time_ms) noexcept(false) { 591 | #if TP_ENABLE_SANITY_CHECKS 592 | _condition_check(errors.sleep_time, 593 | [&](){ return time_ms < 0; }); 594 | #endif 595 | _thread_sleep_time_ns = time_ms * 1000000; 596 | } 597 | 598 | /** 599 | * Set the thread sleep time. 600 | * Interval is in seconds 601 | * and can be a floating point value. 602 | */ 603 | template void 604 | set_sleep_time_s(const F time_s) noexcept(false) { 605 | #if TP_ENABLE_SANITY_CHECKS 606 | _condition_check(errors.sleep_time, 607 | [&](){ return time_s < 0; }); 608 | #endif 609 | _thread_sleep_time_ns = static_cast(time_s * 1000000000); 610 | } 611 | 612 | int 613 | sleep_time_ns() const { 614 | return _thread_sleep_time_ns; 615 | } 616 | 617 | /** 618 | * ____ ____ 619 | * | _ \ / ___|_ __ ___ _ _ _ __ ___ 620 | * | | | | | _| '__/ _ \| | | | '_ \/ __| 621 | * | |_| | |_| | | | (_) | |_| | |_) \__ \ 622 | * |____/ \____|_| \___/ \__,_| .__/|___/ 623 | * |_| 624 | * 625 | * Set of functions for command dispatch_group 626 | * operations. 627 | * 628 | * 629 | * Create a new group with an std::string 630 | * identifier. 631 | */ 632 | void 633 | dg_open(const std::string& id) noexcept(false) { 634 | std::unique_lock lock(_mutex_groups); 635 | std::map::iterator it; 636 | if (_unsafe_dg_id_check(id, it)) { 637 | #if TP_ENABLE_SANITY_CHECKS 638 | throw std::runtime_error(errors.dg_not_empty(id)); 639 | #else 640 | return; 641 | #endif 642 | } 643 | _groups.insert(std::make_pair(id, DispatchGroup(id))); 644 | } 645 | 646 | /** 647 | * Insert a job to do in a specific group. 648 | * If the group not exist, nothing is done. 649 | * Task will not start until a call to 650 | * leave will be done. 651 | */ 652 | template void 653 | dg_insert(const std::string& id, F&& f) noexcept(false) { 654 | std::unique_lock lock(_mutex_groups); 655 | std::map::iterator it; 656 | if (!_unsafe_dg_id_check(id, it)) { 657 | #if TP_ENABLE_SANITY_CHECKS 658 | throw std::runtime_error(errors.dg_empty(id)); 659 | #else 660 | return; 661 | #endif 662 | } 663 | it->second.insert(f); 664 | } 665 | 666 | /** 667 | * Create a new group, insert a job on it 668 | * and dispatch it. 669 | * It is guaranteed that this job will be 670 | * the first next job to be processed by 671 | * the threadpool. 672 | */ 673 | template void 674 | dg_now(const std::string& id, F&& f) noexcept(false) { 675 | std::unique_lock lock(_mutex_groups); 676 | std::map::iterator it; 677 | if (_unsafe_dg_id_check(id, it)) { 678 | #if TP_ENABLE_SANITY_CHECKS 679 | throw std::runtime_error(errors.dg_not_empty(id)); 680 | #else 681 | return; 682 | #endif 683 | } 684 | _groups.insert(std::make_pair(id, DispatchGroup(id))); 685 | it = _groups.find(id); 686 | it->second.insert(f); 687 | it->second.leave(); 688 | _safe_queue_push_front(it->second.jobs()[0]); 689 | } 690 | 691 | /** 692 | * Signal to a group that the jobs immission 693 | * is end, than start pushing the group jobs 694 | * to the standard threadpool queue. At the end 695 | * of the tasks, it will execute the f action, 696 | * like a barrier. 697 | */ 698 | template void 699 | dg_close_with_barrier(const std::string &id, const F&& f) noexcept(false) { 700 | std::unique_lock lock(_mutex_groups); 701 | std::map::iterator it; 702 | if (!_unsafe_dg_id_check(id, it)) { 703 | #if TP_ENABLE_SANITY_CHECKS 704 | throw std::runtime_error(errors.dg_empty(id)); 705 | #else 706 | return; 707 | #endif 708 | } 709 | it->second.leave(f); 710 | auto jobs = it->second.jobs(); 711 | for (auto &j : jobs) { push(j); } 712 | } 713 | 714 | /** 715 | * Signal to a group that the jobs immission 716 | * is end, than start pushing the group jobs 717 | * to the standard threadpool queue. 718 | */ 719 | void 720 | dg_close(const std::string& id) noexcept(false) { 721 | std::unique_lock lock(_mutex_groups); 722 | std::map::iterator it; 723 | if (!_unsafe_dg_id_check(id, it)) { 724 | #if TP_ENABLE_SANITY_CHECKS 725 | throw std::runtime_error(errors.dg_empty(id)); 726 | #else 727 | return; 728 | #endif 729 | } 730 | it->second.leave(); 731 | auto jobs = it->second.jobs(); 732 | for (auto &j : jobs) { push(j); } 733 | } 734 | 735 | /** 736 | * Wait until every job in a group is computed. 737 | * This is a thread blocking call. 738 | */ 739 | void 740 | dg_wait(const std::string &id) noexcept(false) { 741 | std::map::iterator it; 742 | if (!_unsafe_dg_id_check(id, it)) { 743 | #if TP_ENABLE_SANITY_CHECKS 744 | throw std::runtime_error(errors.dg_empty(id)); 745 | #else 746 | return; 747 | #endif 748 | } 749 | while(!it->second.has_finished()) { 750 | std::chrono::nanoseconds(0); 751 | } 752 | _groups.erase(it); 753 | } 754 | 755 | /** 756 | * Wait until every job in a group is computed. 757 | * This is a thread blocking call. 758 | * At the end execute the callback; 759 | */ 760 | template void 761 | dg_wait(const std::string &id, F&& f) noexcept(false) { 762 | #if TP_ENABLE_SANITY_CHECKS 763 | try { 764 | dg_wait(id); 765 | f(); 766 | } catch(std::runtime_error e) { 767 | throw e; 768 | } catch(...) { 769 | throw; 770 | } 771 | #else 772 | dg_wait(id); 773 | try { 774 | f(); 775 | } catch(...) { 776 | throw; 777 | } 778 | #endif 779 | } 780 | 781 | /** 782 | * The same as synchronize, but is useful 783 | * if you don't want do block all others 784 | * jobs in the queue. 785 | */ 786 | void 787 | dg_synchronize(const std::string &id) noexcept(false) { 788 | std::unique_lock lock(_mutex_groups); 789 | std::map::iterator it; 790 | if (!_unsafe_dg_id_check(id, it)) { 791 | #if TP_ENABLE_SANITY_CHECKS 792 | throw std::runtime_error(errors.dg_empty(id)); 793 | #else 794 | return; 795 | #endif 796 | } 797 | it->second.synchronize(); 798 | } 799 | /**/ 800 | void 801 | dg_end_synchronize(const std::string id) noexcept(false) { 802 | std::unique_lock lock(_mutex_groups); 803 | std::map::iterator it; 804 | if (!_unsafe_dg_id_check(id, it)) { 805 | #if TP_ENABLE_SANITY_CHECKS 806 | throw std::runtime_error(errors.dg_empty(id)); 807 | #else 808 | return; 809 | #endif 810 | } 811 | it->second.end_synchronize(); 812 | } 813 | 814 | /** 815 | * Set a callback for excpetion handling. 816 | * If not setted, threadpool has a default 817 | * callback, that does nothing and not 818 | * rethrow. 819 | */ 820 | template void 821 | set_excpetion_action(std::function f) { 822 | auto func = [&f] (std::exception_ptr excp) { 823 | try { std::rethrow_exception(excp); 824 | } catch(F e) { f(e); } 825 | }; 826 | _sem_api.wait(); 827 | _exception_action = func; 828 | _sem_api.signal(); 829 | } 830 | 831 | /** 832 | * ____ _ _ 833 | * | _ \ _ __(_)_ ____ _| |_ ___ 834 | * | |_) | '__| \ \ / / _` | __/ _ \ 835 | * | __/| | | |\ V / (_| | || __/ 836 | * |_| |_| |_| \_/ \__,_|\__\___| 837 | * 838 | */ 839 | private: 840 | /** 841 | * Mutex for queue access. 842 | */ 843 | std::mutex _mutex_queue; 844 | /** 845 | * Mutex for pool resize. 846 | */ 847 | std::mutex _mutex_pool; 848 | /** 849 | * Mutex for groups access. 850 | */ 851 | std::mutex _mutex_groups; 852 | /** 853 | * Semaphore for class thread-safety. 854 | */ 855 | Semaphore _sem_api; 856 | /** 857 | * Optional semaphore for jobs lambda data 858 | * protection in critical sections. 859 | */ 860 | Semaphore _sem_job_ins_container; 861 | /** 862 | * Time in nanoseconds which threads 863 | * that are sleeping check for new 864 | * jobs in the queue. 865 | */ 866 | std::atomic _thread_sleep_time_ns; 867 | /** 868 | * Flag for pool's threads state, 869 | * when false, all the threads will be 870 | * detached. 871 | */ 872 | std::atomic _run_pool_thread; 873 | /** 874 | * Where the running threads lives. 875 | */ 876 | std::vector _pool; 877 | /** 878 | * Queue of jobs to do. 879 | */ 880 | std::deque > _queue; 881 | /** 882 | * A map of in process groups of jobs. 883 | */ 884 | std::map _groups; 885 | /** 886 | * The number of threads currently in the pool. 887 | */ 888 | std::atomic _threads_count; 889 | /** 890 | * Counter used when there are 891 | * some threads to remove from 892 | * the pool [stop or resize]. 893 | */ 894 | std::atomic _thread_to_kill_c; 895 | /** 896 | * Stores the id's of the threads 897 | * that will be kills. 898 | */ 899 | std::vector _threads_to_kill_id; 900 | /** 901 | * When zero means that all the task 902 | * were executed and no one is 903 | * waiting. 904 | */ 905 | std::atomic _push_c; 906 | /** 907 | * Number of threads that the pool had 908 | * when a stop() was called. Used 909 | * by the awake() method to restore the 910 | * same number of threads. 911 | */ 912 | std::atomic _prev_threads; 913 | /** 914 | * Callback for excpetion handling setted by the user. 915 | */ 916 | std::function _exception_action; 917 | std::mutex _mutex_exceptions; 918 | /** 919 | * Manage the threads waiting. 920 | */ 921 | ThreadsBlocker _threads_blocker; 922 | /** 923 | * For speedup. 924 | */ 925 | bool _queue_empty = true; 926 | 927 | /** 928 | * String errors that are throw when user 929 | * submit wrong inputs or try to do illegal 930 | * operations. 931 | */ 932 | struct Errors 933 | { 934 | std::string 935 | dg_empty(const std::string& id) { 936 | return "ThreadPool: group with id " + id + " not exist"; 937 | }; 938 | 939 | std::string 940 | dg_not_empty(const std::string& id) { 941 | return "ThreadPool: group with id " + id + " already exist"; 942 | }; 943 | 944 | std::string sleep_time = 945 | "ThreadPool: sleep time value must be greater or equal to zero"; 946 | 947 | std::string apply_it_num = 948 | "ThreadPool: Number of iterations in apply must be greater than zero"; 949 | 950 | std::string resize_alloc = 951 | "ThreadPool: Number of threads in resize or alloc must be greater than zero"; 952 | } errors; 953 | 954 | /** 955 | * Given a condition to check, throw an error 956 | * if the condition is true. 957 | */ 958 | template void 959 | _condition_check(M&& m, T&& t) noexcept(false) { 960 | if (t()) throw std::runtime_error(m); 961 | } 962 | 963 | /** 964 | * Check if the groups map contains or 965 | * not the required id. Used 966 | * by dispatch_group methods. 967 | */ 968 | bool 969 | _unsafe_dg_id_check(const std::string &id, 970 | std::map::iterator& it) { 971 | it = _groups.find(id); 972 | return (it == _groups.end()) ? false : true; 973 | } 974 | 975 | /** 976 | * Called by pools threads when 977 | * an excpetion occours. 978 | */ 979 | template void 980 | _exc_exception_action(F excpetion) { 981 | _exception_action(excpetion); 982 | } 983 | 984 | /** 985 | * Lock the queue mutex for 986 | * a safe insertion in the queue. 987 | */ 988 | template void 989 | _safe_queue_push(F&& t) { 990 | ++_push_c; 991 | std::unique_lock lock(_mutex_queue); 992 | _queue.push_back(std::move(t)); 993 | if (_queue_empty) _threads_blocker.unblock(); 994 | } 995 | 996 | /** 997 | * Modify the queue in UNSAFE 998 | * manner, so you should lock 999 | * the queue outside this function. 1000 | */ 1001 | template void 1002 | _unsafe_queue_push(F&& t) { 1003 | ++_push_c; 1004 | _queue.push_back(std::move(t)); 1005 | if (_queue_empty) _threads_blocker.unblock(); 1006 | } 1007 | 1008 | /** 1009 | * Modify the queue in UNSAFE 1010 | * manner, so you should lock 1011 | * the queue outside this function. 1012 | * Used for push with variadic templates. 1013 | */ 1014 | template void 1015 | _unsafe_queue_push(F&& t, Args... args) { 1016 | ++_push_c; 1017 | _queue.push_back(std::move(t)); 1018 | _unsafe_queue_push(args...); 1019 | if (_queue_empty) _threads_blocker.unblock(); 1020 | } 1021 | 1022 | /** 1023 | * Lock the queue mutex for 1024 | * a safe insertion in the queue. 1025 | * Insert the element at end of the 1026 | * queue. 1027 | */ 1028 | template void 1029 | _safe_queue_push_front(F&& t) { 1030 | ++_push_c; 1031 | std::unique_lock lock(_mutex_queue); 1032 | _queue.push_front(std::move(t)); 1033 | if (_queue_empty) _threads_blocker.unblock(); 1034 | } 1035 | 1036 | /** 1037 | * Modify the queue in UNSAFE 1038 | * manner, so you should lock 1039 | * the queue outside this function. 1040 | * Insert the element at end of the 1041 | * queue. 1042 | */ 1043 | template void 1044 | _unsafe_queue_push_front(F&& t) { 1045 | ++_push_c; 1046 | _queue.push_front(std::move(t)); 1047 | if (_queue_empty) _threads_blocker.unblock(); 1048 | } 1049 | 1050 | /** 1051 | * Lock the queue mutex, safely pop 1052 | * job from the queue if not empty. 1053 | */ 1054 | std::function 1055 | _safe_queue_pop() { 1056 | std::unique_lock lock(_mutex_queue); 1057 | if (_queue.empty()) { 1058 | _queue_empty = true; 1059 | return std::function(); 1060 | } 1061 | 1062 | auto t = _queue.front(); 1063 | _queue.pop_front(); 1064 | _queue_empty = false; 1065 | return t; 1066 | } 1067 | 1068 | /** 1069 | * Called when the ThreadPool is created 1070 | * or the user has required a resize 1071 | * operation. 1072 | */ 1073 | void 1074 | _safe_thread_push() { 1075 | std::unique_lock lock(_mutex_pool); 1076 | _pool.push_back(std::thread(&ThreadPool::_thread_loop_mth, this)); 1077 | ++_threads_count; 1078 | } 1079 | 1080 | /** 1081 | * Called when the ThreadPool is deleted 1082 | * or the user has required both a resize 1083 | * operation or a stop operation. 1084 | */ 1085 | void 1086 | _safe_thread_pop() { 1087 | std::unique_lock lock(_mutex_pool); 1088 | if (_pool.empty()) return; 1089 | 1090 | ++_thread_to_kill_c; 1091 | _threads_to_kill_id.push_back(_pool.back().get_id()); 1092 | _pool.back().detach(); 1093 | _pool.pop_back(); 1094 | --_threads_count; 1095 | } 1096 | 1097 | /** 1098 | * Called by each thread in the pool 1099 | * when _thread_to_kill_c != 0. 1100 | * Than the thread will know if must 1101 | * exit from the loop. 1102 | */ 1103 | bool 1104 | _thread_is_to_kill(std::thread::id id) { 1105 | std::unique_lock lock(_mutex_pool); 1106 | std::vector::iterator it = std::find_if(_threads_to_kill_id.begin(), 1107 | _threads_to_kill_id.end(), [id](std::thread::id tid) { return (tid == id); }); 1108 | return (it == _threads_to_kill_id.end()) ? false : true; 1109 | } 1110 | 1111 | /** 1112 | * Each thread start run this function 1113 | * when the thread is created, and 1114 | * exit only when the pool is destructed 1115 | * or the stop() function is called. 1116 | * The thread go to sleep if the 1117 | * queue is empty. 1118 | */ 1119 | void 1120 | _thread_loop_mth() { 1121 | Semaphore sem(0); 1122 | while(_run_pool_thread) { 1123 | if (_thread_to_kill_c != 0) { 1124 | if (_thread_is_to_kill(std::this_thread::get_id())) break; 1125 | } 1126 | auto funcf = _safe_queue_pop(); 1127 | if (!funcf) { 1128 | if (_threads_blocker.thread_wait(&sem)) sem.wait(); 1129 | continue; 1130 | } 1131 | try { 1132 | funcf(); 1133 | } catch (...) { 1134 | std::unique_lock lock(_mutex_exceptions); 1135 | _exc_exception_action(std::current_exception()); 1136 | } 1137 | --_push_c; 1138 | } 1139 | --_thread_to_kill_c; 1140 | } 1141 | 1142 | }; /* End ThreadPool */ 1143 | 1144 | }; /* Namespace end */ 1145 | 1146 | #endif /* __cplusplus */ 1147 | 1148 | #endif /* _THREAD_POOL_HPP_ */ 1149 | 1150 | 1151 | 1152 | 1153 | 1154 | 1155 | 1156 | 1157 | 1158 | 1159 | 1160 | 1161 | 1162 | 1163 | 1164 | 1165 | 1166 | 1167 | 1168 | 1169 | 1170 | 1171 | 1172 | 1173 | --------------------------------------------------------------------------------