├── .DS_Store ├── chapter04 ├── .DS_Store ├── example4_08.cpp ├── example4_14.cpp ├── example4_11.cpp ├── example4_06.cpp ├── example4_03.cpp ├── example4_12.cpp ├── example4_01.cpp ├── example4_02.cpp ├── example4_16.cpp ├── example4_13.cpp ├── example4_10.cpp ├── example4_15.cpp ├── example4_07.cpp ├── example4_09.cpp ├── example4_04.cpp └── example4_05.cpp ├── chapter05 ├── .DS_Store ├── example5_03.cpp ├── example5_01.cpp ├── example5_02.cpp ├── example5_05.cpp ├── example5_10.cpp ├── example5_13.cpp ├── example5_12.cpp ├── example5_08.cpp ├── example5_09.cpp ├── example5_04.cpp ├── example5_07.cpp ├── example5_11.cpp └── example5_06.cpp ├── chapter09 ├── .DS_Store ├── example9_04.cpp ├── example9_10.cpp ├── example9_09.cpp ├── example9_03.cpp ├── example9_13.cpp ├── example9_01.cpp ├── example9_07.cpp ├── example9_05.cpp ├── example9_06.cpp ├── example9_12.cpp ├── example9_11.cpp ├── example9_02.cpp └── example9_08.cpp ├── chapter01 └── example1_1.cpp ├── chapter03 ├── example3_11.cpp ├── example3_1.cpp ├── example3_4.cpp ├── example3_10.cpp ├── example3_9.cpp ├── example3_6.cpp ├── example3_3.cpp ├── example3_2.cpp ├── example3_12.cpp ├── example3_13.cpp ├── example3_7.cpp ├── example3_8.cpp └── example3_5.cpp ├── chapter02 ├── example2_7.cpp ├── example2_1.cpp ├── example2_4.cpp ├── example2_5.cpp ├── example2_2.cpp ├── example2_6.cpp ├── example2_3.cpp └── example2_8.cpp ├── chapter07 ├── example7_01.cpp ├── example7_14.cpp ├── example7_18.cpp ├── example7_02.cpp ├── example7_04.cpp ├── example7_17.cpp ├── example7_16.cpp ├── example7_03.cpp ├── example7_19.cpp ├── example7_09.cpp ├── example7_20.cpp ├── example7_10.cpp ├── example7_06.cpp ├── example7_11.cpp ├── example7_07.cpp ├── example7_05.cpp ├── example7_13.cpp ├── example7_08.cpp ├── example7_21.cpp ├── example7_15.cpp └── example7_12.cpp ├── .gitignore ├── chapter06 ├── example6_08.cpp ├── example6_12.cpp ├── example6_07.cpp ├── example6_01.cpp ├── example6_05.cpp ├── example6_04.cpp ├── example6_10.cpp ├── example6_09.cpp ├── example6_06.cpp ├── example6_02.cpp ├── example6_03.cpp ├── example6_13.cpp └── example6_11.cpp ├── chapter08 ├── example8_12.cpp ├── example8_08.cpp ├── example8_05.cpp ├── example8_06.cpp ├── example8_10.cpp ├── example8_07.cpp ├── example8_03.cpp ├── example8_04.cpp ├── example8_02.cpp ├── example8_09.cpp ├── example8_13.cpp ├── example8_01.cpp └── example8_11.cpp ├── LICENSE ├── chapter10 └── example10_01.cpp └── README.md /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuyicpp/multi_threading/HEAD/.DS_Store -------------------------------------------------------------------------------- /chapter04/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuyicpp/multi_threading/HEAD/chapter04/.DS_Store -------------------------------------------------------------------------------- /chapter05/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuyicpp/multi_threading/HEAD/chapter05/.DS_Store -------------------------------------------------------------------------------- /chapter09/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xuyicpp/multi_threading/HEAD/chapter09/.DS_Store -------------------------------------------------------------------------------- /chapter09/example9_04.cpp: -------------------------------------------------------------------------------- 1 | //run_pending_task()的实现 2 | void thread_pool::run_pending_task() 3 | { 4 | function_weapper task; 5 | if(work_queue.try_pop(task)) 6 | { 7 | task(); 8 | } 9 | else 10 | { 11 | std::this_thread::yield(); 12 | } 13 | } -------------------------------------------------------------------------------- /chapter01/example1_1.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | //join的作用是让主线程等待直到该子线程执行结束,示例: 4 | //需要注意的是线程对象执行了join后就不再joinable了,所以只能调用join一次。 5 | 6 | void hello() 7 | { 8 | std::cout<<"Hello Concurrent World\n"; 9 | } 10 | 11 | int main() 12 | { 13 | std::thread t(hello); 14 | t.join(); 15 | } -------------------------------------------------------------------------------- /chapter05/example5_03.cpp: -------------------------------------------------------------------------------- 1 | //一个函数调用的参数的估计顺序是未指定的 2 | #include 3 | 4 | void foo(int a,int b) 5 | { 6 | std::cout<特化的部分类定义 2 | template<> 3 | class packaged_task*,int)> 4 | { 5 | public: 6 | template 7 | explicit packaged_task(Callable&& f); 8 | std::future get_future(); 9 | void operator()(std::vector*,int); 10 | }; -------------------------------------------------------------------------------- /chapter03/example3_11.cpp: -------------------------------------------------------------------------------- 1 | //使用互斥元进行线程安全的延迟初始化 2 | std:shared_ptr resource_ptr; 3 | std::mutex resource_mutex; 4 | void foo() 5 | { 6 | std::unique_lock lk(resource_mutex); 7 | if(!resource_ptr) 8 | { 9 | resource_ptr.reset(new some_resource); 10 | } 11 | lk.unlock(); 12 | resource_ptr->do_something(); 13 | } -------------------------------------------------------------------------------- /chapter02/example2_7.cpp: -------------------------------------------------------------------------------- 1 | //生成一批线程并等待它们完成 2 | void do_work(unsigned id); 3 | 4 | void f() 5 | { 6 | std::vector threads; 7 | for(unsigned i = 0; i < 20; ++i) 8 | { 9 | threads.push_back(std::thread(do_work,i)); //生成线程 10 | } 11 | std::for_each(threads.begin(),threads.end(),std::mem_fn(&std::thread::join)); //轮流在每个线程上调用join() 12 | } -------------------------------------------------------------------------------- /chapter07/example7_01.cpp: -------------------------------------------------------------------------------- 1 | //使用std::atomic_flag的自旋锁互斥元的实现 2 | class spinlock_mutex 3 | { 4 | std::atomic_flag flag; 5 | public: 6 | spinlock_mutex(): 7 | flag(ATOMIC_FLAG_INIT) 8 | {} 9 | void lock() 10 | { 11 | while(flag.test_and_set(std::memory_order_acquire)); 12 | } 13 | void unlock() 14 | { 15 | flag.clear(std::memory_order_release); 16 | } 17 | }; -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | -------------------------------------------------------------------------------- /chapter02/example2_1.cpp: -------------------------------------------------------------------------------- 1 | struct func 2 | { 3 | int& i; 4 | func(int& i_):i(i_) {} 5 | void operator() () 6 | { 7 | for(unsigned j = 0; j < 1000000; ++j) 8 | { 9 | do_something(i); //对悬空引用可能的访问 10 | } 11 | } 12 | }; 13 | 14 | void oops() 15 | { 16 | int some_local_state = 0; 17 | func my_function(some_local_state); 18 | std::thread my_thread(my_func); 19 | my_thread.detach(); //不等待线程完成 20 | } //新的线程可能仍在运行 -------------------------------------------------------------------------------- /chapter05/example5_01.cpp: -------------------------------------------------------------------------------- 1 | //使用std::atomic_flag的自旋锁互斥实现 2 | class spinlock_mutex 3 | { 4 | std::atomic_flag flag; 5 | public: 6 | spinlock_mutex(): 7 | flag(ATOMIC_FLAG_INIT) 8 | {} 9 | void lock() 10 | { 11 | while(flag.test_and_set(std::memory_order_acquire)); 12 | } 13 | void unlock() 14 | { 15 | flag.clear(std::memory_order_release); 16 | } 17 | }; 18 | //为了锁定互斥元,循环执行test_and_set()知道旧值为false,指示这个线程将值设为true。解锁互斥元就是简单地清除标志。 -------------------------------------------------------------------------------- /chapter04/example4_14.cpp: -------------------------------------------------------------------------------- 1 | //一个简单的spawn_task的实现 2 | //相比于使用std::async,只有在你确实知道将要做什么,并且希望想要通过线程池建立的方式进行完全掌控和执行任务的时候, 3 | //才值得首选这种方法 4 | template 5 | std::future::type> spawn_task(F&& f,A&& a) 6 | { 7 | typedef std::result_of::type result_type; 8 | std::packaged_task res(task.get_future()); 9 | std::thread t(std::move(task),std::move(a)); 10 | t.detach(); 11 | return res; 12 | } -------------------------------------------------------------------------------- /chapter05/example5_02.cpp: -------------------------------------------------------------------------------- 1 | //从不同的线程中读取和写入变量 2 | #include 3 | #include 4 | #include 5 | 6 | std::vector data; 7 | std::atomic data_ready(false); 8 | 9 | void reader_thread() 10 | { 11 | while(!data_ready.load()) 12 | { 13 | std::this_thread::sleep(std::milliseconds(1)); 14 | } 15 | std::cout<<"The answer="< 3 | void threadsafe_queue::push(T new_value) 4 | { 5 | std::shared_ptr new_data(std::make_shared(std::move(new_value))); 6 | std::unique_ptr p(new node); 7 | { 8 | std::lock_guard tail_lock(tail_mutex); 9 | tail->data=new_data; 10 | node* const new_tail=p.get(); 11 | tail->next=std::move(p); 12 | tail=new_tail; 13 | } 14 | data_cond.notify_one(); 15 | } -------------------------------------------------------------------------------- /chapter02/example2_4.cpp: -------------------------------------------------------------------------------- 1 | //分离线程以处理其他文档 2 | void edit_document(std::string const& filename) 3 | { 4 | open_document_and_display_gui(filename); 5 | while(!done_editing()) 6 | { 7 | user_command cmd = get_user_input(); 8 | if(cmd.type == open_new_document) 9 | { 10 | std::string const new_name=get_filename_from_user(); 11 | std::thread t(edit_document,new_name); 12 | t.detach(); 13 | } 14 | else 15 | { 16 | process_user_input(cmd); 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /chapter09/example9_10.cpp: -------------------------------------------------------------------------------- 1 | //因std::condition_variable而遭到破坏的interruptible_wait函数实现 2 | void interruptible_wait(std::condition_variable& cv,std::unique_lock& lk) 3 | { 4 | //检测中断 5 | interruptible_point(); 6 | //关联一个带interrupt_flag的条件变量 7 | this_thread_interrupt_flag.set_condition_variable(cv); 8 | //等待条件变量,这里被唤醒然后中断 9 | cv.wait(lk); 10 | //清除关联的条件变量 11 | this_thread_interrupt_flag.clear_condition_variable(); 12 | //再一次检测中断 13 | interruptible_point(); 14 | } -------------------------------------------------------------------------------- /chapter02/example2_5.cpp: -------------------------------------------------------------------------------- 1 | //从函数中返回std::thread,控制权从函数中转移出 2 | std::thread f() 3 | { 4 | void some_function(); 5 | return std::thread(some_function); 6 | } 7 | std::thread g() 8 | { 9 | void some_other_function(int); 10 | std::thread t(some_other_function,42); 11 | return t; 12 | } 13 | 14 | //控制权从函数中转移进 15 | void f(std::thread t); 16 | void g() 17 | { 18 | void some_function(); 19 | f(std::thread(some_function)); 20 | std::thread t(some_function); 21 | f(std::move(t)); 22 | } -------------------------------------------------------------------------------- /chapter04/example4_11.cpp: -------------------------------------------------------------------------------- 1 | //等待一个具有超时的条件变量 2 | #include 3 | #include 4 | #include 5 | std::condition_variable cv; 6 | bool done; 7 | std::mutex m; 8 | 9 | bool wait_loop() 10 | { 11 | auto const timeout=std::chrono::steady_clock::now()+std::chrono::milliseconds(500); 12 | std::unique_lock lk(m); 13 | while(!done) 14 | { 15 | if(cv.wait_until(lk,timeout)==std::cv_status::timeout) 16 | break; 17 | } 18 | return done; 19 | } -------------------------------------------------------------------------------- /chapter03/example3_1.cpp: -------------------------------------------------------------------------------- 1 | //用互斥元保护列表 2 | #include 3 | #include 4 | #include 5 | 6 | std::list some_list; 7 | std::mutex some_mutex; 8 | 9 | void add_to_list(int new_value) 10 | { 11 | std::lock_guard guard(some_mutex); 12 | some_list.push_back(new_value); 13 | } 14 | 15 | bool list_contains(int value_to_find) 16 | { 17 | std::lock_guard guard(some_mutex); 18 | return std::find(some_list.begin(),some_list.end(),value_to_find) != some_list.end(); 19 | } -------------------------------------------------------------------------------- /chapter04/example4_06.cpp: -------------------------------------------------------------------------------- 1 | //使用std::future获取异步任务的返回值 2 | #include 3 | #include 4 | 5 | int find_the_answer_to_ltuae(); 6 | void do_other_stuff(); 7 | int main() 8 | { 9 | std::future the_anwer=std::async(find_the_answer_to_ltuae); 10 | do_other_stuff(); 11 | std::cout<<"The answer is "< //为了std::shared_ptr 3 | 4 | template 5 | class threadsafe_queue 6 | { 7 | public: 8 | threadsafe_queue(); 9 | threadsafe_queue(const threadsafe_queue&); 10 | threadsafe_queue& operator=(const threadsafe_queue&) = delete; //为了简单起见不允许复制 11 | 12 | void push(T new_value); 13 | 14 | bool try_pop(T& value); 15 | std::shared_ptr try_pop(); 16 | 17 | void wait_and_pop(T& value); 18 | std::shared_ptr wait_and_pop(); 19 | 20 | bool empty() const; 21 | }; -------------------------------------------------------------------------------- /chapter07/example7_14.cpp: -------------------------------------------------------------------------------- 1 | //首次(很逊的)尝试修订push() 2 | void push(T new_value) 3 | { 4 | std::unique_ptr new_data(new T(new_value)); 5 | counted_node_ptr new_next; 6 | new_next.ptr=new node; 7 | new_next.external_count=1; 8 | for(;;) 9 | { 10 | node* const old_tail=tail.load(); //加载一个原子指针 11 | T* old_data=nullptr; 12 | if(old_tail->data.compare_exchange_strong(old_data,new_data.get())) //解引用那个指针 13 | { 14 | old_tail->next=new_next; 15 | tail.store(new_next.ptr); //更新那个指针 16 | new_data.release(); 17 | break; 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /chapter03/example3_4.cpp: -------------------------------------------------------------------------------- 1 | //一个线程安全栈的概要类定义 2 | #include 3 | #include //For std::shared_ptr<> 4 | 5 | struct empty_stack: std::exception 6 | { 7 | const char* waht() const throw(); 8 | }; 9 | 10 | template 11 | class threadsafe_stack 12 | { 13 | public: 14 | threadsafe_stack(); 15 | threadsafe_stack(const threadsafe_stack&); 16 | threadsafe_stack& operator=(const threadsafe_stack&) = delete; //赋值运算符被删除了 17 | 18 | void push(T new_value); 19 | std::shared_ptr pop(); 20 | void pop(T& value); 21 | bool empty() const; 22 | }; -------------------------------------------------------------------------------- /chapter06/example6_12.cpp: -------------------------------------------------------------------------------- 1 | //获取threadsafe_lookup_table的内容作为一个std::map<> 2 | std::map threadsafe_lookup_table::get_map() const 3 | { 4 | std::vector > locks; 5 | for(unsigned i=0;i(buckets[i].mutex)); 8 | } 9 | std::map res; 10 | for (unsigned i = 0; i < buckets.size(); ++i) 11 | { 12 | for(bucket_iterator it=buckets[i].data.begin(); it!=buckets[i].data.end(); ++it) 13 | { 14 | res.insert(*it); 15 | } 16 | } 17 | return res; 18 | } -------------------------------------------------------------------------------- /chapter07/example7_18.cpp: -------------------------------------------------------------------------------- 1 | //在无锁队列中获得结点的新引用 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | static void increase_external_count(std::atomic& counter, counted_node_ptr& old_counter) 7 | { 8 | counted_node_ptr& new_counter; 9 | 10 | do 11 | { 12 | new_counter = old_counter; 13 | ++new_counter.external_count; 14 | } 15 | while(!counter.compare_exchange_strong( 16 | old_counter,new_counter, 17 | std::memory_order_acquire,std::memory_order_relaxed)); 18 | 19 | old_counter.external_count=new_counter.external_count; 20 | } 21 | }; -------------------------------------------------------------------------------- /chapter03/example3_10.cpp: -------------------------------------------------------------------------------- 1 | //在比较运算符中每次锁定一个互斥元 2 | class Y 3 | { 4 | private: 5 | int some_detail; 6 | //被mutable修饰的变量(mutable只能由于修饰类的非静态数据成员),将永远处于可变的状态,即使在一个const函数中。 7 | mutable std::mutex m; 8 | 9 | int get_detail() const 10 | { 11 | std::lock_guard lock_a(m); 12 | return some_detail; 13 | } 14 | public: 15 | Y(int sd):some_detail(sd){} 16 | 17 | freind bool operator==(Y const& lhs, Y const& rhs) 18 | { 19 | if(&lhs==&rhs) 20 | return true; 21 | int const lhs_value=lhs.get_detail(); 22 | int const rhs_value=rhs.get_detail(); 23 | return lhs_value==rhs_value; 24 | } 25 | }; -------------------------------------------------------------------------------- /chapter07/example7_02.cpp: -------------------------------------------------------------------------------- 1 | //实现不使用锁的线程安全push() 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | struct node 7 | { 8 | T data; 9 | node* next; 10 | 11 | node(T const& data_): 12 | data(data_) 13 | {} 14 | }; 15 | public: 16 | void push(T const& data) 17 | { 18 | node* const new_node=new node(data); 19 | new_node->next=head.load(); 20 | while(!head.compare_exchange_weak(new_node->next,new_node)); 21 | //如果这两个值是一样的,那么将head指向new_node。这段代码中使用了比较/交换函数的一部分, 22 | //如果它返回false则表明此次比较没有成功(例如,因为另一个线程修改了head)。此时,第一个参数(new_node->next)的值 23 | //将被更新为head当前的值。 24 | } 25 | }; 26 | -------------------------------------------------------------------------------- /chapter03/example3_9.cpp: -------------------------------------------------------------------------------- 1 | //在交换操作中使用std::lock()和std::unique_lock 2 | class some_big_object; 3 | void swap(some_big_object& lhs,some_big_object& rhs); 4 | 5 | class X 6 | { 7 | private: 8 | some_big_object some_detail; 9 | std::mutex m; 10 | public: 11 | X(some_big_object const& sd):some_big_object(sd){}; 12 | 13 | friend void swap(X& lhs, X& rhs) 14 | { 15 | if(& lhs==& rhs) 16 | return; 17 | std::unique_lock lock_a(lhs.m,std::defer_lock); 18 | std::unique_lock lock_b(rhs.m,std::defer_lock); 19 | std::lock(lock_a,lock_b); 20 | swap(lhs.some_detail,rhs.some_detail); 21 | } 22 | }; -------------------------------------------------------------------------------- /chapter07/example7_04.cpp: -------------------------------------------------------------------------------- 1 | //当pop()中没有线程时回收结点 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | std::atomic threads_in_pop; //原子变量 7 | void try_reclaim(node* old_head); //试着回收内存 7.5有详细的实现 8 | public: 9 | std::shared_ptr pop() 10 | { 11 | ++threads_in_pop; //在做任何其他事情前增加计数 12 | node* old_head=head.load(); 13 | while(old_head && !head.compare_exchange_weak(old_head,old_head->next)); 14 | std::shared_ptr res; 15 | if(old_head) 16 | { 17 | res.swap(old_head->data); //如果可能,回收删除的结点 18 | } 19 | try_reclaim(old_head); //从结点中提取数据,而不是复制指针 20 | return res; 21 | } 22 | }; -------------------------------------------------------------------------------- /chapter07/example7_17.cpp: -------------------------------------------------------------------------------- 1 | //释放无锁队列的结点引用 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | void release_ref() 9 | { 10 | node_counter old_counter=count.load(std::memory_order_relaxed); 11 | node_counter new_counter; 12 | do 13 | { 14 | new_counter=old_counter; 15 | --new_counter.internal_count; 16 | } 17 | while(!count.compare_exchange_strong(old_counter,new_counter, 18 | std::memory_order_acquire,std::memory_order_relaxed)); 19 | 20 | if(!new_counter.internal_count && !new_counter.external_counters) 21 | { 22 | delete this; 23 | } 24 | } 25 | }; 26 | }; -------------------------------------------------------------------------------- /chapter02/example2_6.cpp: -------------------------------------------------------------------------------- 1 | //scoped_thread和示例用法,一旦所有权转移到该对象其他线程就不就可以动它了,保证退出一个作用域线程完成 2 | class scoped_thread 3 | { 4 | std::thread t; 5 | public: 6 | explicit scoped_thread(std::thread t_): 7 | t(std::move(t_)) 8 | { 9 | if(!t.joinable()) 10 | throw std::logic_error("No thread"); 11 | } 12 | ~scoped_thread() 13 | { 14 | t.join(); 15 | } 16 | scoped_thread(scoped_thread const&)=delete; 17 | scoped_thread& operator=(scoped_thread const&)=delete; 18 | }; 19 | 20 | struct func; 21 | 22 | void f() 23 | { 24 | int some_local_state; 25 | scoped_thread t(std::thread(func(some_local_state))); 26 | 27 | do_something_in_current_thread(); 28 | } -------------------------------------------------------------------------------- /chapter05/example5_05.cpp: -------------------------------------------------------------------------------- 1 | //放松操作有极少数的排序要求 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x,y; 7 | std::atomic z; 8 | 9 | void write_x_then_y() 10 | { 11 | x.store(true,std::memory_order_relaxed); 12 | y.store(true,std::memory_order_relaxed); 13 | } 14 | 15 | void read_y_then_x() 16 | { 17 | while(!y.load(std::memory_order_relaxed)); 18 | if(x.load(std::memory_order_relaxed)) 19 | ++z; 20 | } 21 | 22 | int main() 23 | { 24 | x=false; 25 | y=false; 26 | z=0; 27 | std::thread a(write_x_then_y); 28 | std::thread b(read_y_then_x); 29 | a.join(); 30 | b.join(); 31 | assert(z.load()!=0); 32 | } -------------------------------------------------------------------------------- /chapter08/example8_12.cpp: -------------------------------------------------------------------------------- 1 | //屏障(barrier),一种同步方法使得线程等待直到要求的线程已经到达了屏障。 2 | //一个简单的屏障类 3 | class barrier 4 | { 5 | unsigned const count; 6 | std::atomic spaces; 7 | std::atomic generation; 8 | public: 9 | explicit barrier(unsigned count_): 10 | count(count_),spaces(count),generation(0) 11 | {} 12 | void wait() 13 | { 14 | unsigned const my_generation=generation; 15 | if(!--spaces) //到达0时,通过共享的generation变量通知其它线程行动 16 | { 17 | spaces=count; 18 | ++generation; 19 | } 20 | else 21 | { 22 | while(generation==my_generation) //如果空闲的spaces的数量没有到达零,你就必须等待,简单的自旋锁 23 | std::this_thread::yield(); 24 | } 25 | } 26 | }; -------------------------------------------------------------------------------- /chapter02/example2_3.cpp: -------------------------------------------------------------------------------- 1 | //使用RAII等待线程完成 2 | class thread_guard 3 | { 4 | std::thread& t; 5 | public: 6 | explicit thread_guard(std::thread& t_): 7 | t(t_) 8 | {} 9 | ~thread_guard() 10 | { 11 | if(t.joinable()) 12 | { 13 | t.join(); 14 | } 15 | } 16 | thread_guard(thread_guard const&)=delete; 17 | thread_guard& operator=(thread_guard const&)=delete; 18 | }; 19 | 20 | struct func; //2_1 21 | 22 | void f() 23 | { 24 | int some_local_state = 0; 25 | func my_func(some_local_state); 26 | std::thread t(my_func); 27 | thread_guard g(t); 28 | 29 | do_something_in_current_thread(); 30 | } //在当前线程的执行到达f末尾时,局部对象会按照构造函数的逆序被销毁,因此,thread_guard对象g首先被销毁。 31 | -------------------------------------------------------------------------------- /chapter09/example9_09.cpp: -------------------------------------------------------------------------------- 1 | //interruptible_thread的基本实现 2 | class interrupt_flag 3 | { 4 | public: 5 | void set(); 6 | bool is_set() const; 7 | }; 8 | thread_local interrupt_flag this_thread_interrupt_flag; 9 | 10 | class interruptible_thread 11 | { 12 | std::thread internal_thread; 13 | interrupt_flag* flag; 14 | public: 15 | template 16 | interrupt_thread(FunctionType f) 17 | { 18 | std::promise p; 19 | internal_thread=std::thread([f,&p]{ 20 | p.set_value(&this_thread_interrupt_flag); 21 | f(); 22 | }); 23 | } 24 | void interrupt() 25 | { 26 | if(flag) 27 | { 28 | flag->set(); 29 | } 30 | } 31 | } -------------------------------------------------------------------------------- /chapter03/example3_6.cpp: -------------------------------------------------------------------------------- 1 | //在交换操作中使用std::lock()和std::lock_guard 2 | class some_big_object; 3 | void swap(some_big_object& lhs,some_big_object& rhs); 4 | 5 | class X 6 | { 7 | private: 8 | some_big_object some_detail; 9 | std::mutex m; 10 | public: 11 | X(some_big_object const& sd):some_detail(sd){} 12 | 13 | friend void swap(X& lhs, X& rhs) 14 | { 15 | if(&lhs == &rhs) 16 | return; 17 | std::lock(lhs.m,rhs.m); //std::lock函数可以同时锁定两个或更多的互斥元,而没有死锁的风险。 18 | std::lock_guard lock_a(lhs.m,std::adopt_lock); 19 | //额外提供一个adopt_lock给互斥元,沿用互斥元上已有锁的所有权 20 | std::lock_guard lock_b(rhs.m,std::adopt_lock); 21 | swap(lhs.some_detail,rhs.some_detail); 22 | } 23 | } -------------------------------------------------------------------------------- /chapter03/example3_3.cpp: -------------------------------------------------------------------------------- 1 | //std::stack 容器适配器的接口 2 | template > 3 | class stack 4 | { 5 | public: 6 | explicit stack(const Container&); 7 | explicit stack(const Container&& = Container()); 8 | template explicit stack(const Alloc&); 9 | template stack(const Container&, const Alloc&); 10 | template stack(Container&&, const Alloc&); 11 | template stack(stack&&, const Alloc&); //这里应该移动构造函数 12 | bool empty() const; 13 | size_t size() const; 14 | T& top(); 15 | T const& top() const; 16 | void push(T const&); 17 | void push(T&&); 18 | void pop(); 19 | void swap(stack&&); 20 | } 21 | //对于共享的stack对象,这个调用序列不再安全 -------------------------------------------------------------------------------- /chapter08/example8_08.cpp: -------------------------------------------------------------------------------- 1 | //使用std::async的std::for_each的并行版本 2 | template 3 | void parallel_for_each(Iterator first,Iterator last,Func f) 4 | { 5 | unsigned long const length=std::distance(first,last); 6 | 7 | if(!length) 8 | return; 9 | 10 | unsigned long const min_per_thread=25; 11 | 12 | if(length < (2*min_per_thread)) 13 | { 14 | std::for_each(first,last,f); 15 | } 16 | else 17 | { 18 | Iterator const mid_point=first+length/2; 19 | //异步运行前半部分 20 | std::future first_half=std::async(¶llel_for_each,first,mid_point,f); 21 | parallel_for_each(mid_point,last,f); 22 | //使用std::async和get()成员函数std::future提供了异常传播语义 23 | first_half.get(); 24 | } 25 | } -------------------------------------------------------------------------------- /chapter03/example3_2.cpp: -------------------------------------------------------------------------------- 1 | //意外的传出对受保护数据的引用 2 | class some_data 3 | { 4 | int a; 5 | std::string b; 6 | public: 7 | void do_something(); 8 | }; 9 | 10 | class data_warpper 11 | { 12 | private: 13 | some_data data; 14 | std::mutex m; 15 | public: 16 | template 17 | void process_data(Function func) 18 | { 19 | std::lock_guard l(m); 20 | func(data); //传递“受保护的”数据到用户提供的函数 21 | } 22 | }; 23 | some_data* unprotected; 24 | 25 | void malicious_function(some_data& protected_data) 26 | { 27 | unprotected = &protected_data; 28 | } 29 | 30 | data_warpper x; 31 | 32 | void foo() 33 | { 34 | x.protected_data(malicious_function); //传入一个恶意函数 35 | unprotected->do_something(); //对受保护的数据进行未受保护的访问 36 | } -------------------------------------------------------------------------------- /chapter03/example3_12.cpp: -------------------------------------------------------------------------------- 1 | //使用std::call_once的线程安全的类成员延迟初始化 2 | class X 3 | { 4 | private: 5 | connection_info connection_details; 6 | connection_handle connection; 7 | std::once_flag connection_init_flag; 8 | 9 | void open_connection() 10 | { 11 | connection=connection_manager.open(connection_details); 12 | } 13 | public: 14 | X(connection_info const& connection_details_): 15 | connection_details(connection_details_) 16 | {} 17 | void send_data(data_packet const& data) 18 | { 19 | std::call_once(connection_init_flag,&X::open_connection,this); 20 | connection_send_data(data); 21 | } 22 | data_packet receive_data() 23 | { 24 | std::call_once(connection_init_flag,&X::open_connection,this); 25 | return connection.receive_data(); 26 | } 27 | }; -------------------------------------------------------------------------------- /chapter04/example4_12.cpp: -------------------------------------------------------------------------------- 1 | //快速排序的顺序实现 2 | template 3 | std::list sequential_quick_sort(std::list input) 4 | { 5 | if(input.empty()) 6 | { 7 | return input; 8 | } 9 | std::list result; 10 | result.splice(result.begin(),input,input.begin()); 11 | T const& pivot=*result.begin(); 12 | 13 | auto divide_point=std::partition(input.begin(),input.end(),[&](T const& t){return t lower_part; 16 | lower_part.splice(lower_part.end(),input,input.begin(),divide_point); 17 | 18 | auto new_lower(sequential_quick_sort(std::move(lower_part))); 19 | auto new_higher(sequential_quick_sort(std::move(input))); 20 | 21 | result.splice(result.end(),new_higher); 22 | result.splice(result.begin(),new_lower); 23 | return result; 24 | } -------------------------------------------------------------------------------- /chapter05/example5_10.cpp: -------------------------------------------------------------------------------- 1 | //使用std::memory_order_consume同步数据,用于在原子操作载入指向某数据的指针的场合 2 | struct X 3 | { 4 | int i; 5 | std::string s; 6 | }; 7 | 8 | std::atomic p; 9 | std::atomic a; 10 | 11 | void create_x() 12 | { 13 | X* x=new X; 14 | x->i=42; 15 | x->s="hello"; 16 | a.store(99,std::memory_order_relaxed); 17 | p.store(x,std::memory_order_release); 18 | } 19 | 20 | void use_x() 21 | { 22 | X* x; 23 | while(!(x=p.load(std::memory_order_consume))) //对p的存储只发生在依赖p的载入值得表达式之前 24 | std::this_thread::sleep(std::chrono::microseconds(1)); 25 | assert(x->i==42); 26 | assert(x->s=="hello"); 27 | assert(a.load(std::memory_order_relaxed)==99); 28 | } 29 | 30 | int main() 31 | { 32 | std::thread t1(create_x); 33 | std::thread t2(use_x); 34 | t1.join(); 35 | t2.join(); 36 | } -------------------------------------------------------------------------------- /chapter06/example6_07.cpp: -------------------------------------------------------------------------------- 1 | //使用锁和等待的线程安全队列:内部与接口 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; 9 | std::unique_ptr next; 10 | }; 11 | 12 | std::mutex head_mutex; 13 | std::unique_ptr head; 14 | std::mutex tail_mutex; 15 | node* tail; 16 | std::condition_variable data_cond; 17 | public: 18 | threadsafe_queue(): 19 | head(new node),tail(head.get()) 20 | {} 21 | threadsafe_queue(const threadsafe_queue& other)=delete; 22 | threadsafe_queue& operator=(const threadsafe_queue& other)=delete; 23 | 24 | std::shared_ptr try_pop(); 25 | bool try_pop(T& value); 26 | std::shared_ptr wait_and_pop(); 27 | void wait_and_pop(T& value); 28 | void push(T new_value); 29 | void empty(); 30 | }; -------------------------------------------------------------------------------- /chapter07/example7_16.cpp: -------------------------------------------------------------------------------- 1 | //从使用引用计数tail的无锁队列中将结点出队列 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | void release_ref(); 9 | }; 10 | public: 11 | std::unique_ptr pop() 12 | { 13 | counted_node_ptr old_head=head.load(std::memory_order_relaxed); 14 | for(;;) 15 | { 16 | increase_external_count(head,old_head); 17 | node* const ptr=old_head.ptr; 18 | if(ptr==tail.load().ptr) 19 | { 20 | ptr->release_ref(); 21 | return std::unique_ptr(); 22 | } 23 | if(head.compare_exchange_strong(old_head,ptr->next)) 24 | { 25 | T* const res=ptr->data.exchange(nullptr); 26 | free_external_counter(old_head); 27 | return std::unique_ptr(res); 28 | } 29 | ptr->release_ref(); 30 | } 31 | } 32 | }; -------------------------------------------------------------------------------- /chapter04/example4_01.cpp: -------------------------------------------------------------------------------- 1 | //清单4.1 使用std::condition_variable等待数据 2 | std::mutex mut; 3 | std::queue data_queue; 4 | std::condition_variable data_cond; 5 | 6 | void data_preparation_thread() 7 | { 8 | while(more_data_to_prepare()) 9 | { 10 | data_chunk const data=prepare_data(); 11 | std::lock_guard lk(mut); 12 | data_queue.push(data); 13 | data_cond.notify_one(); 14 | } 15 | } 16 | 17 | void data_processing_thread() 18 | { 19 | while(true) 20 | { 21 | std::unique_lock lk(mut); 22 | //lambda函数编写一个匿名函数作为表达式的一部分,[]作为其引导符 23 | data_cond.wait(lk,[]{return !data_queue.empty();}); 24 | data_chunk data=data_queue.front(); 25 | data_queue.pop(); 26 | lk.unlock(); 27 | process(data); 28 | if(is_last_chunk(data)) 29 | break; 30 | } 31 | } -------------------------------------------------------------------------------- /chapter08/example8_05.cpp: -------------------------------------------------------------------------------- 1 | //使用std::async的std::accumulate的异常安全并行版本 2 | template 3 | T parallel_accumulate(Iterator first,Iterator last,T init) 4 | { 5 | unsigned long const length=std::distance(first,last); 6 | unsigned long const max_chunk_size=25; 7 | if(length<=max_chunk_size) 8 | { 9 | return std::accumulate(first,last,init); 10 | } 11 | else 12 | { 13 | Iterator mid_point=first; 14 | std::advance(mid_point,length/2); 15 | std::future first_half_result=std::async(parallel_accumulate,first,mid_point,init); 16 | 17 | T second_half_result=parallel_accumulate(mid_point,last,T()); 18 | return first_half_result.get()+second_half_result; //出异常时,future会销毁,get()会再次抛出异常 19 | } 20 | } 21 | //这个版本使用递归将数据划分为块而不是重新计算将数据划分为块,但是它比之前的版本要简单一些,并且是异常安全的 -------------------------------------------------------------------------------- /chapter03/example3_13.cpp: -------------------------------------------------------------------------------- 1 | //使用boost::share_mutex保护数据结构 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | class dns_entry; 8 | 9 | class dns_cache 10 | { 11 | std::map entries; 12 | mutable boost::shared_mutex entry_mutex; 13 | public: 14 | dns_entry find_entry(std::string const& domain) const 15 | { 16 | boost::shared_lock lk(entry_mutex); 17 | std::map::const_iterator const it = entries.find(domain); 18 | return (it==entries.end())?dns_entry():it->second; 19 | } 20 | void update_or_add_entry(std::string const& domain,dns_entry const& dns_details) 21 | { 22 | std::lock_guard lk(entry_mutex); 23 | entries[domain]=dns_details; 24 | } 25 | }; -------------------------------------------------------------------------------- /chapter05/example5_13.cpp: -------------------------------------------------------------------------------- 1 | //在非原子操作上强制顺序 2 | #include 3 | #include 4 | #include 5 | 6 | bool x=false; //x现在是一个普通的非原子变量 7 | std::atomic y; 8 | std::atomic z; 9 | 10 | void write_x_then_y() 11 | { 12 | x=true; //1.在屏障前存储x 13 | std::atomic_thread_fence(std::memory_order_release); 14 | y.store(true,std::memory_order_relaxed); //2.在屏障后存储y 15 | } 16 | 17 | void read_y_then_x() 18 | { 19 | while(!y.load(std::memory_order_relaxed)); //等待到你看见来自2的写入 20 | std::atomic_thread_fence(std::memory_order_acquire); 21 | if(x) //将读取1写入的值 22 | ++z; 23 | } 24 | 25 | int main() 26 | { 27 | x=false; 28 | y=false; 29 | z=0; 30 | std::thread a(write_x_then_y); 31 | std::thread b(read_y_then_x); 32 | a.join(); 33 | b.join(); 34 | assert(z.load()!=0); //此断言不会触发 35 | } -------------------------------------------------------------------------------- /chapter04/example4_02.cpp: -------------------------------------------------------------------------------- 1 | //清单4.2 std::queue接口 2 | template > 3 | class queue 4 | { 5 | public: 6 | explicit queue(const Container&); 7 | explicit queue(Container&& = Container()); 8 | 9 | template explicit queue(const Alloc&); 10 | template queue(const Container&, const Alloc&); 11 | template queue(Container&&, const Alloc&); 12 | template queue(queue&&, const Alloc&); 13 | 14 | void swap(queue& q); 15 | 16 | bool empty() const; 17 | size_type size() const; 18 | 19 | T& front(); 20 | const T& front() const; 21 | T& back(); 22 | const T& back() const; 23 | 24 | void push(const T& x); 25 | void push(T&& x); 26 | 27 | void pop(); 28 | template void emplace(Args&&... args); 29 | }; -------------------------------------------------------------------------------- /chapter07/example7_03.cpp: -------------------------------------------------------------------------------- 1 | //缺少结点的无锁栈 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; //data现在由指针持有 9 | node* next; 10 | 11 | node(T const& data_): 12 | data(std::make_shared(data_)) //为新分配的T创建std::shared_ptr 13 | {} 14 | }; 15 | 16 | std::atomic head; 17 | public: 18 | void push(T const& data) 19 | { 20 | node* const new_node=new node(data); 21 | new_node->next=head.load(); 22 | while(!head.compare_exchange_weak(new_node->next,new_node)); 23 | } 24 | std::shared_ptr pop() 25 | { 26 | node* old_head=head.load(); 27 | while(old_head && !head.compare_exchange_weak(old_head,old_head->next)); //在解引用之前检查old_head不是一个空指针 28 | return old_head ? old_head->data : std::shared_ptr(); 29 | } 30 | }; -------------------------------------------------------------------------------- /chapter04/example4_16.cpp: -------------------------------------------------------------------------------- 1 | //简单ATM实现的getting_pin状态函数 2 | void atm::getting_pin() 3 | { 4 | incoming.wait() 5 | //每一次对handle()的调用将消息类型指定为模板参数,然后 6 | //接受特定消息类型作为参数的Lambda函数。 7 | .handle( 8 | [&](digit_pressed const& msg) 9 | { 10 | unsigned const pin_length=4; 11 | pin+=msg.digit; 12 | if(pin.length()==pin_length) 13 | { 14 | bank.send(verify_pin(account,pin,incoming)); 15 | state=&atm::verifying_pin; 16 | } 17 | } 18 | ) 19 | 20 | .handle( 21 | [&](clear_last_prossed const& msg) 22 | { 23 | if(!pin.empty()) 24 | { 25 | pin.resize(pin.length()-1); 26 | } 27 | } 28 | ) 29 | 30 | .handle( 31 | [&](cancel_pressed const& msg) 32 | { 33 | state=&atm::done_processing; 34 | } 35 | ); 36 | } -------------------------------------------------------------------------------- /chapter05/example5_12.cpp: -------------------------------------------------------------------------------- 1 | //松散操作可以使用屏障来排序 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x,y; 7 | std::atomic z; 8 | 9 | void write_x_then_y() 10 | { 11 | x.store(true,std::memory_order_relaxed); 12 | std::atomic_thread_fence(std::memory_order_release); //释放屏障 13 | y.store(true,std::memory_order_relaxed); 14 | } 15 | 16 | void read_y_then_x() 17 | { 18 | while(!y.load(std::memory_order_relaxed)); 19 | std::atomic_thread_fence(std::memory_order_acquire); //获取屏障 20 | //对x的存储发生在从x的载入之前,所以读取的值必然是true 21 | if(x.load(std::memory_order_relaxed)) 22 | ++z; 23 | } 24 | 25 | int main() 26 | { 27 | x=false; 28 | y=false; 29 | z=0; 30 | std::thread a(write_x_then_y); 31 | std::thread b(read_y_then_x); 32 | a.join(); 33 | b.join(); 34 | assert(z.load()!=0); 35 | } -------------------------------------------------------------------------------- /chapter07/example7_19.cpp: -------------------------------------------------------------------------------- 1 | //在无锁队列中释放结点的外部计数 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | static void free_external_counter(counted_node_ptr& old_node_ptr) 7 | { 8 | node* const ptr=old_node_ptr.ptr; 9 | int const count_increase=old_node_ptr.external_count-2; 10 | 11 | node_counter old_counter=ptr->count.load(std::memory_order_relaxed); 12 | 13 | node_counter new_counter; 14 | do 15 | { 16 | new_counter=old_counter; 17 | --new_counter.external_counters; 18 | new_counter.internal_count+=count_increase; 19 | } 20 | while(!ptr->count.compare_exchange_strong( 21 | old_counter,new_counter, 22 | std::memory_order_acquire,std::memory_order_relaxed)); 23 | 24 | if(!new_counter.internal_count && !new_counter.external_counters) 25 | { 26 | delete ptr; 27 | } 28 | } 29 | }; -------------------------------------------------------------------------------- /chapter04/example4_13.cpp: -------------------------------------------------------------------------------- 1 | //使用future的并行快速排序 2 | template 3 | std::list parallel_quick_sort(std::list input) 4 | { 5 | if(input.empty()) 6 | { 7 | return input; 8 | } 9 | std::list result; 10 | result.splice(result.begin(),input,input.begin()); 11 | T const& pivot=*result.begin(); 12 | 13 | auto divide_point=std::partition(input.begin(),input.end(),[&](T const& t){return t lower_part; 16 | lower_part.splice(lower_part.end(),input,input.begin(),divide_point); 17 | 18 | std::future > new_lower(std::async(¶llel_quick_sort,std::move(lower_part))); 19 | 20 | auto new_higher(parallel_quick_sort(std::move(input))); 21 | //splice剪切加粘贴 22 | result.splice(result.end(),new_higher); 23 | result.splice(result.begin(),new_lower.get()); 24 | return result; 25 | } -------------------------------------------------------------------------------- /chapter05/example5_08.cpp: -------------------------------------------------------------------------------- 1 | //获取-释放操作可以在松散操作中施加顺序 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x,y; 7 | std::atomic z; 8 | 9 | void write_x_then_y() 10 | { 11 | //对x的存储发生在对y的存储之前,因为他们在同一个线程 12 | x.store(true,std::memory_order_relaxed); //旋转,等待y被设为true 13 | //relaxed松散顺序,release获得-释放顺序 14 | y.store(true,std::memory_order_release); 15 | } 16 | 17 | void read_y_then_x() 18 | { 19 | //对y的加载将会看到由存储写下的true。因为存储使用memory_order_release并且载入 20 | //使用memory_order_acquire,存储与载入同步。 21 | while(!y.load(std::memory_order_aquire)); 22 | if(x.load(std::memory_order_relaxed)) 23 | ++z; 24 | } 25 | 26 | int main() 27 | { 28 | x=false; 29 | y=false; 30 | z=0; 31 | std::thread a(write_x_then_y); 32 | std::thread b(read_y_then_x); 33 | a.join(); 34 | b.join(); 35 | assert(z.load()!=0); 36 | } -------------------------------------------------------------------------------- /chapter05/example5_09.cpp: -------------------------------------------------------------------------------- 1 | //使用获取和释放顺序的传递性同步(利用了线程间happen-before的定义) 2 | std::atomic data[5]; 3 | std::atomic sync1(false),sync2(false); 4 | 5 | void thread_1() 6 | { 7 | data[0].store(42,std::memory_order_relaxed); 8 | data[1].store(97,std::memory_order_relaxed); 9 | data[2].store(17,std::memory_order_relaxed); 10 | sync1.store(true,std::memory_order_release); //设置sync1 11 | } 12 | 13 | void thread_2() 14 | { 15 | while(!sync1.load(std::memory_order_acquire)); //循环直到sync1被设置 16 | sync2.store(true,std::memory_order_release); //设置sync2 17 | } 18 | 19 | void thread_3() 20 | { 21 | while(!sync2.load(std::memory_order_acquire)); //循环直到sync2被设置 22 | assert(data[0].load(std::memory_order_relaxed)==42); 23 | assert(data[1].load(std::memory_order_relaxed)==97); 24 | assert(data[2].load(std::memory_order_relaxed)==17); 25 | } -------------------------------------------------------------------------------- /chapter07/example7_09.cpp: -------------------------------------------------------------------------------- 1 | //使用无锁的std::shared_ptr<>的无锁栈实现 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; 9 | std::shared_ptr next; 10 | 11 | node(T const& data_): 12 | data(std::make_shared(data_)) 13 | {} 14 | }; 15 | 16 | std::shared_ptr head; 17 | public: 18 | void push(T const& data) 19 | { 20 | std::shared_ptr const new_node=std::make_shared(data); 21 | new_node->next=head.load(); 22 | while(!std::atomic_compare_exchange_weak(&head,&new_node->next,new_node)) 23 | } 24 | 25 | std::shared_ptr pop() 26 | { 27 | std::shared_ptr old_head=std::atomic_load(&head); 28 | while(old_head && !std::atomic_compare_exchange_weak(&head,&old_head,old_head->next)); 29 | return old_head ? old_head->data : std::shared_ptr(); 30 | } 31 | }; -------------------------------------------------------------------------------- /chapter07/example7_20.cpp: -------------------------------------------------------------------------------- 1 | //修改pop()来允许帮助push() 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | std::atomic data; 9 | std::atomic count; 10 | std::atomic next; 11 | }; 12 | public: 13 | std::unique pop() 14 | { 15 | counted_node_ptr old_head=head.load(std::memory_order_relaxed); 16 | for(;;) 17 | { 18 | increase_external_count(head,old_head); 19 | node* const ptr=old_head.ptr; 20 | if(ptr==tail.load().ptr) 21 | { 22 | return std::unique_ptr(); 23 | } 24 | counted_node_ptr next=ptr->next.load(); 25 | if(head.compare_exchange_strong(old_head,next)) 26 | { 27 | T* const res=ptr->data.exchange(nullptr); 28 | free_external_counter(old_head); 29 | return std::unique_ptr(res); 30 | } 31 | ptr->release_ref(); 32 | } 33 | } 34 | }; -------------------------------------------------------------------------------- /chapter03/example3_7.cpp: -------------------------------------------------------------------------------- 1 | //使用锁层次来避免死锁 2 | hierarchical_mutex high_level_mutex(10000); 3 | hierarchical_mutex low_level_mutex(5000); 4 | 5 | int do_low_level_stuff(); 6 | 7 | int low_level_func() 8 | { 9 | std::lock_guard lk(low_level_mutex); 10 | return do_low_level_stuff(); 11 | } 12 | 13 | void high_level_stuff(int some_param); 14 | 15 | void high_level_func() 16 | { 17 | std::lock_guard lk(high_level_mutex); 18 | high_level_stuff(low_level_func()); 19 | } 20 | 21 | void thread_a() //遵守规则,运行良好 22 | { 23 | high_level_func(); 24 | } 25 | 26 | hierarchical_mutex other_mutex(100); 27 | void do_other_stuff(); 28 | 29 | void other_stuff() 30 | { 31 | high_level_func(); 32 | do_other_stuff(); 33 | } 34 | 35 | //违反层次,100<1000 36 | void thread_b() 37 | { 38 | std::lock_guard lk(other_mutex); 39 | other_stuff(); 40 | } -------------------------------------------------------------------------------- /chapter07/example7_10.cpp: -------------------------------------------------------------------------------- 1 | //在使用两个引用计数的无锁栈中入栈结点 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | struct node; 7 | 8 | struct counted_node_ptr 9 | { 10 | int external_count; 11 | node* ptr; 12 | }; 13 | 14 | struct node 15 | { 16 | std::shared_ptr data; 17 | std::atomic internal_count; 18 | counted_node_ptr next; 19 | 20 | node(T const& data_): 21 | data(std::make_shared(data_)), 22 | internal_count(0) 23 | {} 24 | }; 25 | 26 | std::atomic head; 27 | 28 | public: 29 | 30 | ~lock_free_stack() 31 | { 32 | while(pop()); 33 | } 34 | 35 | void push(T const& data) 36 | { 37 | counted_node_ptr new_node; 38 | new_node.ptr=new node(data); 39 | new_node.external_count=1; 40 | new_node.ptr->next.ptr->next=head.load(); 41 | while(!head.compare_exchange_weak(new_node.ptr->next,new_node)); 42 | } 43 | }; -------------------------------------------------------------------------------- /chapter05/example5_04.cpp: -------------------------------------------------------------------------------- 1 | //顺序一直隐含着总体顺序 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x,y; 7 | std::atomic z; 8 | 9 | void write_x() 10 | { 11 | x.store(true,std::memory_order_seq_cst); 12 | } 13 | 14 | void write_y() 15 | { 16 | y.store(true,std::memory_order_seq_cst); 17 | } 18 | 19 | void read_x_then_y() 20 | { 21 | while(!x.load(std::memory_order_seq_cst)); 22 | if(y.load(std::memory_order_seq_cst)) 23 | ++z; 24 | } 25 | 26 | void read_y_then_x() 27 | { 28 | while(!y.load(std::memory_order_seq_cst)); 29 | if(x.load(std::memory_order_seq_cst)) 30 | ++z; 31 | } 32 | 33 | int main() 34 | { 35 | x=false; 36 | y=false; 37 | z=0; 38 | std::thread a(write_x); 39 | std::thread b(write_y); 40 | std::thread c(read_x_then_y); 41 | std::thread d(read_y_then_x); 42 | a.join(); 43 | b.join(); 44 | c.join(); 45 | d.join(); 46 | assert(z.load()!=0); 47 | } -------------------------------------------------------------------------------- /chapter05/example5_07.cpp: -------------------------------------------------------------------------------- 1 | //获取-释放并不意味着总体排序 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x,y; 7 | std::atomic z; 8 | 9 | void write_x() 10 | { 11 | x.store(true,std::memory_order_release); 12 | } 13 | 14 | void write_y() 15 | { 16 | y.store(true,std::memory_order_release); 17 | } 18 | 19 | void read_x_then_y() 20 | { 21 | while(!x.load(std::memory_order_acquire)); 22 | if(y.load(std::memory_order_acquire)) 23 | ++z; 24 | } 25 | 26 | void read_y_then_x() 27 | { 28 | while(!y.load(std::memory_order_acquire)); 29 | if(x.load(std::memory_order_acquire)) 30 | ++z; 31 | } 32 | 33 | int main() 34 | { 35 | x=false; 36 | y=false; 37 | z=0; 38 | std::thread a(write_x); 39 | std::thread b(write_y); 40 | std::thread c(read_x_then_y); 41 | std::thread d(read_y_then_x); 42 | a.join(); 43 | b.join(); 44 | c.join(); 45 | d.join(); 46 | assert(z.load()!=0); 47 | } -------------------------------------------------------------------------------- /chapter04/example4_10.cpp: -------------------------------------------------------------------------------- 1 | //使用promise在单个线程中处理多个连接 2 | #include 3 | 4 | void process_connections(connection_set& connections) 5 | { 6 | while(!done(connections)) 7 | { 8 | for(connection_iterator connection=connections.begin(),end=connections.end(); 9 | connection!=end; 10 | ++connection) 11 | { 12 | if(connection->has_incoming_data()) 13 | { 14 | data_packet data=connection->incoming(); 15 | std::promise& p=connection->get_promise(data.id); 16 | p.set_value(data.payload); 17 | } 18 | if(connection->has_outcoming_data()) 19 | { 20 | outgoing_packet data=connection->top_of_outgoing_queue(); 21 | connection->send(data.payload); 22 | data.promise.set_value(true); 23 | } 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /chapter07/example7_06.cpp: -------------------------------------------------------------------------------- 1 | //使用风险指针的pop()实现 2 | std::shared_ptr pop() 3 | { 4 | std::atomic& hp=get_hazard_pointer_for_current_thread(); 5 | node* old_head=head.load(); 6 | do 7 | { 8 | node* temp; 9 | //一直循环到你将风险指针设置到head上 10 | do 11 | { 12 | temp=old_head; 13 | hp.store(old_head); 14 | old_head=head.load(); 15 | }while(old_head!=temp); 16 | //设置风险指针放到外部循环,如果比较/交换失败,则重载old_head。 17 | } 18 | while(old_head && !head.compare_exchange_strong(old_head,old_head->next)); 19 | //因为在这个while循环中确实有效,使用weak()会导致不必要地重置风险指针 20 | hp.store(nullptr); //当你完成时清除风险指针 21 | std::shared_ptr res; 22 | if(old_head) 23 | { 24 | res.swap(old_head->data); 25 | //在你删除一个结点前检查风险指针是否引用它 26 | if(outstanding_hazard_pointers_for(old_head)) 27 | { 28 | reclaim_later(old_head); //放在稍后回收的列表中 29 | } 30 | else 31 | { 32 | delete old_head; //立刻删除 33 | } 34 | delete_nodes_with_no_hazards(); 35 | } 36 | return res; 37 | } -------------------------------------------------------------------------------- /chapter04/example4_15.cpp: -------------------------------------------------------------------------------- 1 | //ATM逻辑类的简单实现 2 | struct card_inserted 3 | { 4 | std::string account; 5 | }; 6 | class atm 7 | { 8 | messaging::receiver incoming; 9 | messaging::sender bank; 10 | messaging::sender interface_hardware; 11 | void (atm::*state)(); 12 | 13 | std::string account; 14 | std::string pin; 15 | 16 | void waiting_for_card() 17 | { 18 | interface_hardware.send(display_enter_card()); 19 | //wait如果接收到消息不匹配指定的类型,他将被丢弃。 20 | incoming.wait() 21 | .handle([&](card_inserted const& msg) 22 | { 23 | account=msg.account; 24 | pin=""; 25 | interface_hardware.send(display_enter_pin()); 26 | state=&atm::getting_pin; 27 | } 28 | ); 29 | } 30 | void getting_pin(); 31 | public: 32 | void run() 33 | { 34 | state=&atm::waiting_for_card; 35 | try 36 | { 37 | for(;;) 38 | { 39 | (this->*state)(); 40 | } 41 | } 42 | catch(messaging::close_queue const&) 43 | { 44 | } 45 | } 46 | }; -------------------------------------------------------------------------------- /chapter09/example9_03.cpp: -------------------------------------------------------------------------------- 1 | //使用可等待任务线程池的parallel_accumulate 2 | template 3 | T parallel_accumulate(Iterator first,Iterator last,T init) 4 | { 5 | unsigned long const length=std::distance(first,last); 6 | if(!length) 7 | return init; 8 | 9 | unsigned long const block_size=25; 10 | unsigned long const num_blocks=(length+block_size-1)/block_size; 11 | 12 | std::vector > futures(num_blocks-1); 13 | thread_pool pool; 14 | 15 | Iterator block_start=first; 16 | for(unsigned long i=0;i<(num_blocks-1)++i) 17 | { 18 | Iterator block_end=block_start; 19 | std::advance(block_end,block_size); 20 | futures[i]=pool.submit(accumulate_block()); 21 | block_start=block_end; 22 | } 23 | T last_result=accumulate_block()(block_start,last); 24 | T result=init; 25 | for(unsigned long i=0;i<(num_blocks-1);++i) 26 | { 27 | result+=futures[i].get(); 28 | } 29 | result+=last_result; 30 | return result; 31 | } -------------------------------------------------------------------------------- /chapter04/example4_07.cpp: -------------------------------------------------------------------------------- 1 | //使用std::async来将参数传递给函数 2 | #include 3 | #include 4 | 5 | struct X 6 | { 7 | void foo(int,std::string const&); 8 | std::string bar(std::string const&); 9 | }; 10 | X x; 11 | auto f1=std::async(&X::foo,&x,42,"hello"); //调用p->foo(42,"hello"),其中p是&x 12 | auto f2=std::async(&X::bar,x,"goodbye"); //调用tmpx.bar("goodbye"),其中tmpx是x的副本 13 | struct Y 14 | { 15 | double operator()(double); 16 | }; 17 | Y y; 18 | auto f3=std::async(Y(),3.141); //调用tmpy(3.131),其中tm 19 | auto f4=std::async(std::ref(y),2.718); //调用y(2.718), 20 | X baz(X&); 21 | std::async(baz,std::ref(x)); //调用baz(x) 22 | class move_only 23 | { 24 | public: 25 | move_only(); 26 | move_only(move_only&&); 27 | move_only(move_only const&) = delete; 28 | move_only& operator=(move_only&&); 29 | move_only& operator=(move_only const&) = delete; 30 | void operator()(); 31 | };//这个类将拷贝构造函数和赋值构造函数都取消了,只留下移动构造函数 32 | auto f5=std::async(move_only()); //调用tmp(),其中tmp是从std::move(move_only())构造的 -------------------------------------------------------------------------------- /chapter06/example6_01.cpp: -------------------------------------------------------------------------------- 1 | //线程安全栈的类定义 2 | #include 3 | 4 | struct empty_stack: std::exception 5 | { 6 | const char* what() const throw(); 7 | }; 8 | 9 | template 10 | class threadsafe_stack 11 | { 12 | private: 13 | std::stack data; 14 | mutable std::mutex m; 15 | public: 16 | threadsafe_stack(){} 17 | threadsafe_stack(const threadsafe_stack& other) 18 | { 19 | std::lock_guard lock(other.m); 20 | data=other.data; 21 | } 22 | threadsafe_stack& operator=(const threadsafe_stack&) = delete; 23 | 24 | void push(T new_value) 25 | { 26 | std::lock_guard lock(m); 27 | data.push(std::move(new_value)); 28 | } 29 | std::shared_ptr pop() 30 | void pop(T& value) 31 | { 32 | std::lock_guard lock(m); 33 | if(data.empty()) throw empty_stack(); 34 | value=std::move(data.top()); 35 | data.pop(); 36 | } 37 | bool empty() const 38 | { 39 | std::lock_guard lock(m); 40 | return data.empty(); 41 | } 42 | }; -------------------------------------------------------------------------------- /chapter08/example8_06.cpp: -------------------------------------------------------------------------------- 1 | //从任务线程中分离GUI线程 2 | std::thread task_thread; 3 | std::atomic task_cancelled(false); 4 | 5 | void gui_thread() 6 | { 7 | while(true) 8 | { 9 | event_data event=get_event(); 10 | if(event.type == quit) 11 | break; 12 | process(event); 13 | } 14 | 15 | void task() 16 | { 17 | while(!task_complete() && !task_cancelled) 18 | { 19 | do_next_operation(); 20 | } 21 | if(task_cancelled) 22 | { 23 | perform_cleanup(); 24 | } 25 | else 26 | { 27 | post_gui_event(task_complete); 28 | } 29 | } 30 | 31 | void process(event_data const& event) 32 | { 33 | switch(event.type) 34 | { 35 | case start_task: 36 | task_cancelled=false; 37 | task_thread=std::thread(task); 38 | break; 39 | case stop_task: 40 | task_cancelled=true; 41 | task_thread.join(); 42 | break; 43 | case task_complete: 44 | task_thread.join(); 45 | display_results(); 46 | break; 47 | default: 48 | //... 49 | } 50 | } 51 | } -------------------------------------------------------------------------------- /chapter06/example6_05.cpp: -------------------------------------------------------------------------------- 1 | //使用傀儡结点的简单队列 2 | template 3 | class queue 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; 9 | std::unique_ptr next; 10 | }; 11 | 12 | std::unique_ptr head; 13 | node* tail; 14 | 15 | public: 16 | queue(): 17 | head(new node),tail(gead.get()) 18 | {} 19 | 20 | queue(const queue& other)=delete; 21 | queue& operator=(const queue& other)=delete; 22 | 23 | std::shared_ptr try_pop() 24 | { 25 | if(head.get()==tail) 26 | { 27 | return std::shared_ptr(); 28 | } 29 | std::shared_ptr const res(head->data); 30 | std::unique_ptr old_head=std::move(head); 31 | head=std::move(old_head->next); 32 | return res; 33 | } 34 | 35 | void push(T new_value) 36 | { 37 | std::shared_ptr new_data(std::make_shared(std::move(new_value))); 38 | std::unique_ptr p(new node); 39 | tail->data=new_data; 40 | node* const new_tail=p.get(); 41 | tail->next=std::move(p); 42 | tail=new_tail; 43 | } 44 | }; -------------------------------------------------------------------------------- /chapter09/example9_13.cpp: -------------------------------------------------------------------------------- 1 | //在后台监视文件系统 2 | std::mutex config_mutex; 3 | std::vector background_threads; 4 | 5 | void background_thread(int disk_id) 6 | { 7 | while(true) 8 | { 9 | interruption_point(); 10 | fs_change fsc=get_fs_changes(disk_id); //检查磁盘变化并且更新索引 11 | if(fsc.has_changes()) 12 | { 13 | update_index(fsc); 14 | } 15 | } 16 | } 17 | 18 | void start_background_processing() 19 | { 20 | background_threads.push_back(interruptible_thread(background_thread.disk_1)); 21 | background_threads.push_back(interruptible_thread(background_thread.disk_2)); 22 | } 23 | 24 | int main() 25 | { 26 | start_background_processing(); //启动的时候,开始运行基础线程 27 | process_gui_until_exit(); //主线程将基础线程与处理GUI一起处理 28 | std::unique_lock lk(config_mutex); 29 | for(unsigned i=0;i 3 | class queue 4 | { 5 | private: 6 | struct node 7 | { 8 | T data; 9 | std::unique_ptr next; 10 | 11 | node(T data_); 12 | data(std::move(data_)) 13 | {} 14 | }; 15 | 16 | std::unique_ptr head; 17 | node* tail; 18 | public: 19 | queue() 20 | {} 21 | queue(const queue& other)=delete; 22 | queue& operator=(const queue& other)=delete; 23 | 24 | std::share_ptr try_pop() 25 | { 26 | if(!head) 27 | { 28 | return std::share_ptr(); 29 | } 30 | std::share_ptr const res(std::make_shared(std::move(head->data))); 31 | std::unique_ptr const old_head=std::move(head); 32 | head=std::move(old_head->next); 33 | return res; 34 | } 35 | 36 | void push(T new_value) 37 | { 38 | std::unique_ptr p(new node(std::move(new_value))); 39 | node* const new_tail=p.get(); 40 | if(tail) 41 | { 42 | tail->next=std::move(p); 43 | } 44 | else 45 | { 46 | head=std::move(p); 47 | } 48 | tail=new_tail; 49 | } 50 | }; -------------------------------------------------------------------------------- /chapter05/example5_11.cpp: -------------------------------------------------------------------------------- 1 | //使用原子操作从队列中读取值 2 | #include 3 | #include 4 | 5 | std::vector queue_data; 6 | std::atomic count; 7 | 8 | void populate_queue() 9 | { 10 | unsigned const number_of_items=20; 11 | queue_data.clear(); 12 | for (unsigned i = 0; i < number_of_items; ++i) 13 | { 14 | queue_data.push_back(i); 15 | } 16 | 17 | count.store(number_of_items,std::memory_order_release); //最初的存储 18 | } 19 | 20 | void consume_queue_items() 21 | { 22 | while(true) 23 | { 24 | int item_index; 25 | //fetch_sub()是一个具有memory_order_acquire语义的读取,并且存储具有memory_order_release语义,所以存储与载入同步 26 | if((item_index=count.fetch_sub(1,std::memory_order_acquire))<=0) //一个读-修改-写操作 27 | { 28 | wait_for_more_items(); //等待更多的项目 29 | continue; 30 | } 31 | process(queue_data[item_index-1]); //读取queue_data是安全的 32 | } 33 | } 34 | 35 | int main() 36 | { 37 | std::thread a(populate_queue); 38 | std::thread b(consume_queue_items); 39 | std::thread c(consume_queue_items); 40 | a.join(); 41 | b.join(); 42 | c.join(); 43 | } -------------------------------------------------------------------------------- /chapter04/example4_09.cpp: -------------------------------------------------------------------------------- 1 | //使用std::packaged_task在GUI线程上运行代码 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include //这里有move函数 7 | 8 | std::mutex m; 9 | std::deque > tasks; 10 | 11 | bool gui_shutdown_message_received(); 12 | void get_and_process_gui_message(); 13 | 14 | void gui_thread() 15 | { 16 | while(!gui_shutdown_message_received()) 17 | { 18 | get_and_process_gui_message(); 19 | std::packaged_task task; 20 | { 21 | std::lock_guard lk(m); 22 | if(tasks.empty()) 23 | continue; 24 | task=std::move(tasks.front()); //move将对象的状态或对象转移到另一个对象,原来那个对象就为空了。 25 | tasks.pop_front(); 26 | } 27 | task(); 28 | } 29 | } 30 | 31 | std::thread gui_bg_thread(gui_thread); 32 | 33 | template 34 | std::future post_task_for_gui_thread(Func f) 35 | { 36 | std::packaged_task task(f); 37 | std::future res=task.get_future(); 38 | std::lock_guard lk(m); 39 | tasks.push_back(std::move(task)); 40 | return res; 41 | } -------------------------------------------------------------------------------- /chapter06/example6_10.cpp: -------------------------------------------------------------------------------- 1 | //使用锁和等待的线程安全队列:try_pop()和empty() 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | std::unique_ptr try_pop_head() 7 | { 8 | std::lock_guard head_lock(head_mutex); 9 | if(head.get()==get_tail()) 10 | { 11 | return std::unique_ptr(); 12 | } 13 | return pop_head(); 14 | } 15 | 16 | std::unique_ptr try_pop_head(T& value) 17 | { 18 | std::lock_guard head_lock(head_mutex); 19 | if(head.get()==get_tail()) 20 | { 21 | return std::unique_ptr(); 22 | } 23 | value=std::move(*head->data); 24 | return pop_head(); 25 | } 26 | 27 | public: 28 | std::shared_ptr try_pop() 29 | { 30 | std::unique_ptr old_head=try_pop_head(); 31 | return old_head?old_head->data:std::shared_ptr(); 32 | } 33 | 34 | bool try_pop(T& value) 35 | { 36 | std::unique_ptr const old_head=try_pop_head(value); 37 | return old_head; //不知道这里是不是隐式转换 38 | } 39 | 40 | bool empty() 41 | { 42 | std::lock_guard head_lock(head_mutex); 43 | return (head.get()==get_tail()); 44 | } 45 | }; -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 小徐 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /chapter09/example9_01.cpp: -------------------------------------------------------------------------------- 1 | //简单的线程池 2 | class thread_pool 3 | { 4 | std::atomic_bool done; 5 | thread_safe_queue > work_queue; 6 | std::vector threads; 7 | join_threads joiner; //join_threads实例会保证在线程池被销毁前所有的线程已经完成 8 | 9 | void work_thread() 10 | { 11 | while(!done) 12 | { 13 | std::function task; 14 | if(work_queue.try_pop(task)) 15 | { 16 | task(); 17 | } 18 | else 19 | { 20 | std::this_thread::yield(); 21 | } 22 | } 23 | } 24 | public: 25 | thread_pool(): 26 | done(false),joiner(threads) 27 | { 28 | unsigned const thread_count=std::thread::hardware_concurrency(); 29 | 30 | try 31 | { 32 | for(unsigned i=0;i 51 | void submit(FunctionType f) 52 | { 53 | work_queue.push(std::function(f)); 54 | } 55 | }; -------------------------------------------------------------------------------- /chapter04/example4_04.cpp: -------------------------------------------------------------------------------- 1 | //从清单4.1中提取push()和wait_and_pop() 2 | #include 3 | #include 4 | #include 5 | 6 | template 7 | class threadsafe_queue 8 | { 9 | private: 10 | std::mutex mut; 11 | std::queue data_queue; 12 | std::condition_variable data_cond; 13 | public: 14 | void push(T new_value) 15 | { 16 | std::lock_guard lk(mut); 17 | data_queue.push(new_value); 18 | data_cond.notify_one(); 19 | } 20 | 21 | void wait_and_pop(T& value) 22 | { 23 | std::unique_lock lk(mut); 24 | //这里在[]加入this,可以从lambda中访问类成员 25 | data_cond.wait(lk, [this] {return !data_queue.empty();}); 26 | value=data_queue.front(); 27 | data_queue.pop(); 28 | } 29 | }; 30 | 31 | threadsafe_queue data_queue; 32 | 33 | void data_preparation_thread() 34 | { 35 | while(more_data_to_prepare()) 36 | { 37 | data_chunk const data = prepare_data(); 38 | data_queue.push(data); 39 | } 40 | } 41 | 42 | void data_processing_thread() 43 | { 44 | while(true) 45 | { 46 | data_chunk data; 47 | data_queue.wait_and_pop(data); 48 | process(data); 49 | if(is_last_chunk(data)) 50 | break; 51 | } 52 | } -------------------------------------------------------------------------------- /chapter10/example10_01.cpp: -------------------------------------------------------------------------------- 1 | //队列上当前调用的push()和pop()的测试例子 2 | void test_concurrent_push_and_pop_on_empty_queue() 3 | { 4 | threadsafe_queue q; //首先,创建空队列,这部分作为通用启动代码 5 | 6 | std::promise go,push_ready,pop_ready; //然后,为所有“就绪”信号创建各自的Promise 7 | std::shared_future ready(go.get_future()); //并为go信号获取一个std::shared_future 8 | 9 | std::future push_done; //创建future来表示线程已经运行结束 10 | std::future pop_done; 11 | //为一场设置go信号二无需等待测试结束,(这是为了将死锁限制在测试代码内部) 12 | try 13 | { 14 | push_done=std::async(std::launch::async, 15 | [&q,ready,&push_ready]() 16 | { 17 | push_ready.set_value(); 18 | ready.wait(); 19 | q.push(42); 20 | }); 21 | pop_done=std::async(std::launch::async, 22 | [&q,ready,&pop_ready]() 23 | { 24 | pop_ready.set_value(); 25 | ready.wait(); 26 | return q.pop(); 27 | }); 28 | //上面启动线程, 29 | push_ready.get_future().wait(); 30 | pop_ready.get_future().wait(); 31 | go.set_value(); 32 | 33 | push_done.get(); 34 | assert(pop_done.get()==42); 35 | assert(q.empty()); 36 | } 37 | catch(...) 38 | { 39 | go.set_value(); //你设置go信号来避免任何产生悬挂线程和再次抛出异常的机会 40 | throw; 41 | } 42 | } -------------------------------------------------------------------------------- /chapter09/example9_07.cpp: -------------------------------------------------------------------------------- 1 | //允许任务窃取的基于锁的队列 2 | class work_stealing_queue 3 | { 4 | private: 5 | typedef function_wrapper data_type; 6 | std::deque the_queue; 7 | mutable std::mutex the_mutex; 8 | public: 9 | work_stealing_queue() 10 | {} 11 | 12 | work_stealing_queue(const work_stealing_queue& other)=delete; 13 | work_stealing_queue& operator=(const work_stealing_queue& other)=delete; 14 | 15 | void push(data_type data) 16 | { 17 | std::lock_guard lock(the_mutex); 18 | the_queue.push_front(std::move(data)); 19 | } 20 | 21 | bool empty() const 22 | { 23 | std::lock_guard lock(the_mutex); 24 | return the_queue.empty(); 25 | } 26 | 27 | bool try_pop(data_type& res) 28 | { 29 | std::lock_guard lock(the_mutex); 30 | if(the_queue.empty()) 31 | { 32 | return false; 33 | } 34 | res=std::move(the_queue.front()); 35 | the_queue.pop_front(); 36 | return true; 37 | } 38 | 39 | bool try_steal(data_type& res) 40 | { 41 | std::lock_guard lock(the_mutex); 42 | if(the_queue.empty()) 43 | { 44 | return false; 45 | } 46 | 47 | res=std::move(the_queue.back()); 48 | the_queue.pop_back(); 49 | return true; 50 | } 51 | }; -------------------------------------------------------------------------------- /chapter06/example6_09.cpp: -------------------------------------------------------------------------------- 1 | //使用锁和等待的线程安全队列:wait_and_pop() 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | node* get_tail() 7 | { 8 | std::lock_guard tail_lock(tail_mutex); 9 | return tail; 10 | } 11 | 12 | std::unique_ptr pop_head() 13 | { 14 | std::unique_ptr old_head=std::move(head); 15 | head=std::move(old_head->next); 16 | return old_head; 17 | } 18 | 19 | std::unique_ptr wait_for_data() 20 | { 21 | std::unique_lock head_lock(head_mutex); 22 | data_cond.wait(head_lock,[&]{return head.get()!=get_tail();}); 23 | return std::move(head_lock); 24 | } 25 | 26 | std::unique_ptr wait_pop_head() 27 | { 28 | std::unique_lock head_lock(wait_for_data()); 29 | return pop_head(); 30 | } 31 | 32 | std::unique_ptr wait_pop_head(T& value) 33 | { 34 | std::unique_lock head_lock(wait_for_data()); 35 | value=std::move(*head->data); 36 | return pop_head(); 37 | } 38 | public: 39 | std::shared_ptr wait_and_pop() 40 | { 41 | std::unique_ptr const old_head=wait_pop_head(); 42 | retun old_head->data; 43 | } 44 | 45 | void wait_and_pop(T& value) 46 | { 47 | std::unique_ptr const old_head=wait_pop_head(value); 48 | } 49 | }; -------------------------------------------------------------------------------- /chapter09/example9_05.cpp: -------------------------------------------------------------------------------- 1 | //基于线程池的快速排序的实现 2 | template 3 | struct sorter 4 | { 5 | thread_pool pool; 6 | 7 | std::list do_sort(std::list& chunk_data) 8 | { 9 | if(chunk_data.empty()) 10 | { 11 | return chunk_data; 12 | } 13 | 14 | std::list result; 15 | result.splice(result.begin(),chunk_data,chunk_data.begin()); 16 | T const& partition_val=*result.begin(); 17 | 18 | typename std::list::iterator divide_point= 19 | std::partition(chunk_data.begin(),chunk_data.end(),[&](T const& val){return val > new_lower=pool.submit(std::bind(&sorter::do_sort,this,std::move(new_lower_chunk))); 23 | 24 | std::list new_higher(do_sort(chunk_data)); 25 | 26 | result.splice(result.end(),new_higher); 27 | while(!new_lower.wait_for(std::chrono::seconds(0)) == std::future_state::timeout) 28 | { 29 | //执行正在等待的任务 30 | pool.run_pending_task(); 31 | } 32 | 33 | result.splice(result.begin(),new_lower.get()); 34 | return result; 35 | } 36 | }; 37 | 38 | template 39 | std::list parallel_quick_sort(std::list input) 40 | { 41 | if(input.empty()) 42 | { 43 | return input; 44 | } 45 | sorter s; 46 | 47 | return s.do_sort(input); 48 | } 49 | -------------------------------------------------------------------------------- /chapter07/example7_11.cpp: -------------------------------------------------------------------------------- 1 | //使用两个引用计数从无锁栈中出栈一个结点 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | void increase_head_count(counted_node_ptr& old_counter) 7 | { 8 | counted_node_ptr new_counter; 9 | 10 | do 11 | { 12 | new_counter=old_counter; 13 | ++new_counter.external_count; 14 | } 15 | while(!head.compare_exchange_strong(old_counter,new_counter)); 16 | 17 | old_counter.external_count=new_counter.external_count; 18 | } 19 | public: 20 | std::shared_ptr pop() 21 | { 22 | counted_node_ptr old_head=head.load(); 23 | for(;;) 24 | { 25 | increase_head_count(old_head); 26 | node* const ptr=old_head.ptr; 27 | if(!ptr) 28 | { 29 | return std::shared_ptr(); 30 | } 31 | if(head.compare_exchange_strong(old_head,ptr->next)) 32 | { 33 | std::shared_ptr res; 34 | res.swap(ptr->data); 35 | //你增加的值比外部计数的值减少2 36 | int const count_increase=old_head.external_count-2; 37 | // 如果当前引用计数的值为0,那么先前你增加的值(即fetch_add的返回值)就是负数,此时可以删除这个结点 38 | if(ptr->internal_count.fetch_add(count_increase)==(-count_increase)) 39 | { 40 | delete ptr; 41 | } 42 | 43 | return res; 44 | } 45 | else if(ptr->internal_count.fetch_sub(1)==1) 46 | { 47 | delete ptr; 48 | } 49 | } 50 | } 51 | }; -------------------------------------------------------------------------------- /chapter07/example7_07.cpp: -------------------------------------------------------------------------------- 1 | //get_hazard_pointer_for_current_thread()的简单实现 2 | unsigned const max_hazard_pointers=100; 3 | struct hazard_pointer 4 | { 5 | std::atomic id; 6 | std::atomic pointer; 7 | }; 8 | hazard_pointer hazard_pointers[max_hazard_pointers]; 9 | 10 | class hp_owner 11 | { 12 | hazard_pointer* hp; 13 | 14 | public: 15 | hp_owner(hp_owner const&)=delete; 16 | hp_owner& operator=(hp_owner const&)=delete; 17 | 18 | hp_owner(): 19 | hp(nullptr) 20 | { 21 | for (unsigned i = 0; i < max_hazard_pointers; ++i) 22 | { 23 | std::thread::id old_id; 24 | //试着获取风险指针的所有权 25 | if(hazard_pointers[i].id.compare_exchange_strong(old_id,std::this_thread::get_id())) 26 | { 27 | hp=&hazard_pointers[i]; 28 | break; 29 | } 30 | } 31 | if(!hp) 32 | { 33 | throw std::runtime_error("No hazard pointers available"); 34 | } 35 | } 36 | 37 | std::atomic& get_pointer() 38 | { 39 | return hp->pointer; 40 | } 41 | //线程退出,hp_owner实例就被销毁了 42 | ~hp_owner() 43 | { 44 | hp->pointer.store(nullptr); 45 | hp->id.store(std::thread::id()); 46 | } 47 | }; 48 | 49 | std::atomic& get_hazard_pointer_for_current_thread() 50 | { 51 | thread_local static hp_owner hazard; //每个线程有自己的风险指针 52 | return hazard.get_pointer(); 53 | } -------------------------------------------------------------------------------- /chapter08/example8_10.cpp: -------------------------------------------------------------------------------- 1 | //使用std::async的并行查找算法的实现 2 | template 3 | Iterator parallel_find_impl(Iterator first,Iterator last,MatchType match,std::atomic& done) 4 | { 5 | try 6 | { 7 | unsigned long const length=std::distance(first,second); 8 | unsigned long const min_per_thread=25; 9 | if(length<(2*min_per_thread)) 10 | { 11 | for(;(first!=last) && !done.load();++first) 12 | { 13 | if(*first==match) 14 | { 15 | done=true; 16 | return first; 17 | } 18 | } 19 | return last; 20 | } 21 | else 22 | { 23 | Iterator const mid_point=first+(length/2); 24 | std::future async_result=std::async(¶llel_find_impl, 25 | mid_point,last,match,std::ref(done)); 26 | Iterator const direct_result=parallel_find_impl(first,mid_point,match,done); 27 | return (direct_result==mid_point)?async_result.get():direct_result; 28 | } 29 | } 30 | catch(...) 31 | { 32 | done=true; 33 | throw; 34 | } 35 | } 36 | 37 | template 38 | Iterator parallel_find(Iterator first,Iterator last,MatchType match) 39 | { 40 | std::atomic done(false); 41 | //线程间共享的标志,需要传递给所有递归调用。从主入口点传递进来的。 42 | return parallel_find_impl(first,last,match,done); 43 | } -------------------------------------------------------------------------------- /chapter08/example8_07.cpp: -------------------------------------------------------------------------------- 1 | //std::for_each的并行版本 2 | template 3 | void parallel_for_each(Iterator first,Iterator last,Func f) 4 | { 5 | unsigned long const length=std::distance(first,last); 6 | 7 | if(!length) 8 | return; 9 | 10 | unsigned long const min_per_thread=25; 11 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 12 | 13 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 14 | 15 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 16 | 17 | unsigned long const block_size=length/num_threads; 18 | 19 | std::vector > futures(num_threads-1); 20 | std::vector threads(num_threads-1); 21 | join_threads joiner(threads); 22 | 23 | Iterator block_start=first; 24 | for(unsigned long i=0; i<(num_threads-1);++i) 25 | { 26 | Iterator block_end=block_start; 27 | std::advance(block_end,block_size); 28 | std::packaged_task task([=](){ 29 | std::for_each(block_start,block_end,f); 30 | }); 31 | futures[i]=task.get_future(); 32 | threads[i]=std::thread(std::move(task)); 33 | block_start=block_end; 34 | } 35 | std::for_each(block_start,last,f); 36 | for (unsigned long i = 0; i < (num_threads-1); ++i) 37 | { 38 | //只提供取回工作线程抛出的异常的方法,如果你不希望传递异常,那么你就可以省略它。 39 | futures[i].get(); 40 | } 41 | } -------------------------------------------------------------------------------- /chapter07/example7_05.cpp: -------------------------------------------------------------------------------- 1 | //引用计数的回收机制try_reclaim 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | std::atomic to_be_deleted; 7 | 8 | static void delete_nodes(node* nodes) 9 | { 10 | while(nodes) 11 | { 12 | node* next=nodes->next; 13 | delete nodes; 14 | nodes=next; 15 | } 16 | } 17 | 18 | void try_reclaim(node* old_head) 19 | { 20 | if(threads_in_pop==1) 21 | { 22 | node* nodes_to_delete=to_be_deleted.exchange(nullptr); //列出将要被删除的结点清单 23 | if(!--threads_in_pop) //是pop()中唯一的线程吗? 24 | { 25 | delete_nodes(nodes_to_delete); 26 | } 27 | else if(nodes_to_delete) //有等待的节点 28 | { 29 | chain_pending_nodes(nodes_to_delete); //将此结点插入到等待删除结点列表的尾部 30 | } 31 | delete old_head; //安全删除刚移动出来的结点 32 | } 33 | else 34 | { 35 | chain_pending_node(old_head); 36 | --threads_in_pop; 37 | } 38 | } 39 | void chain_pending_nodes(node* nodes) 40 | { 41 | node* last=nodes; 42 | while(node* const next=last->next) //跟随下一个指针,链至末尾 43 | { 44 | last=next; 45 | } 46 | chain_pending_nodes(nodes,last); 47 | } 48 | 49 | void chain_pending_nodes(nodes* first,node* last) 50 | { 51 | last->next=to_be_deleted; 52 | while(!to_be_deleted.compare_exchange_weak(last->next,first)); //循环以保证last->next正确 53 | } 54 | 55 | void chain_pending_node(node* n) 56 | { 57 | chain_pending_nodes(n,n); 58 | } 59 | }; 60 | -------------------------------------------------------------------------------- /chapter03/example3_8.cpp: -------------------------------------------------------------------------------- 1 | //简单的分层次互斥元 2 | class hierarchical_mutex 3 | { 4 | std::mutex internal_mutex; 5 | unsigned long const hierarchical_value; 6 | unsigned long previous_hierarchical_value; 7 | //线程局部变量可以在程序中让你为每个线程拥有独立的变量实例,用thread_local关键字标记 8 | static thread_local unsigned long this_thread_hierarchical_value; 9 | 10 | void check_for_hierarchy_violation() 11 | { 12 | if(this_thread_hierarchical_value <= hierarchical_value) 13 | { 14 | throw std::logic_error("mutex hierarchy violated"); 15 | } 16 | } 17 | void update_hierarchy_value() 18 | { 19 | previous_hierarchical_value = this_thread_hierarchical_value; 20 | this_thread_hierarchical_value = hierarchical_value; 21 | } 22 | public: 23 | explicit hierarchical_mutex(unsigned long value): 24 | hierarchical_value(value), 25 | previous_hierarchical_value(0) 26 | {} 27 | void lock() 28 | { 29 | check_for_hierarchy_violation(); 30 | internal_mutex.lock(); 31 | update_hierarchy_value(); 32 | } 33 | void unlock() 34 | { 35 | this_thread_hierarchical_value = previous_hierarchical_value; 36 | internal_mutex.unlock(); 37 | } 38 | bool try_lock() 39 | { 40 | check_for_hierarchy_violation(); 41 | if(!internal_mutex.try_lock()) 42 | return false; 43 | update_hierarchy_value(); 44 | return true; 45 | } 46 | }; 47 | thread_local unsigned long 48 | hierarchical_mutex::this_thread_hierarchical_value(ULONG_MAX); -------------------------------------------------------------------------------- /chapter07/example7_13.cpp: -------------------------------------------------------------------------------- 1 | //单生产者单消费者的无锁队列 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; 9 | node* next; 10 | 11 | node(): 12 | next(nullptr) 13 | {} 14 | }; 15 | 16 | std::atomic head; 17 | std::atomic tail; 18 | 19 | node* pop_head() 20 | { 21 | node* const old_head=head.load(); 22 | if(old_head==tail.load()) 23 | { 24 | return nullptr; 25 | } 26 | head.store(old_head->next); 27 | return old_head; 28 | } 29 | public: 30 | lock_free_queue(): 31 | head(new node),tail(head.load()) 32 | {} 33 | 34 | lock_free_queue(const lock_free_queue& other)=delete; 35 | lock_free_queue& operator=(const lock_free_queue& other)=delete; 36 | 37 | ~lock_free_queue() 38 | { 39 | while(node* const old_head=head.load()) 40 | { 41 | head.store(old_head->next); 42 | delete old_head; 43 | } 44 | } 45 | std::shared_ptr pop() 46 | { 47 | node* old_head=pop_head(); 48 | if(!old_head) 49 | { 50 | return std::shared_ptr(); 51 | } 52 | 53 | std::shared_ptr const res(old_head->data); 54 | delete old_head; 55 | return res; 56 | } 57 | 58 | void push(T new_value) 59 | { 60 | std::shared_ptr new_data(std::make_shared(new_value)); 61 | node* p=new node; 62 | node* const old_tail=tail.load(); 63 | old_tail->data.swap(new_data); 64 | old_tail->next=p; 65 | tail.store(p); 66 | } 67 | }; -------------------------------------------------------------------------------- /chapter03/example3_5.cpp: -------------------------------------------------------------------------------- 1 | //一个线程安全栈的详细类定义 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | struct empty_stack: std::exception 8 | { 9 | const char* what() const throw(); 10 | }; 11 | 12 | template 13 | class threadsafe_stack 14 | { 15 | private: 16 | std::stack data; 17 | //mutalbe的中文意思是“可变的,易变的”,跟constant(既C++中的const)是反义词。 18 | //在C++中,mutable也是为了突破const的限制而设置的。被mutable修饰的变量(mutable只能由于修饰类的非静态数据成员), 19 | //将永远处于可变的状态,即使在一个const函数中。 20 | mutable std::mutex m; 21 | public: 22 | threadsafe_stack(){} 23 | threadsafe_stack(const threadsafe_stack& other) 24 | { 25 | std::lock_guard lock(other.m); 26 | data=other.data; 27 | } 28 | 29 | threadsafe_stack& operator=(const threadsafe_stack&) = delete; 30 | 31 | void push(T new_value) 32 | { 33 | std::lock_guard lock(m); 34 | data.push(new_value); 35 | } 36 | std::share_ptr pop() 37 | { 38 | std::lock_guard lock(m); 39 | if(data.empty()) throw empty_stack(); //在试着出栈值的时候检查是否为空 40 | std::share_ptr const res(std::make_shared(data.top())); //在修改栈之前分配返回值 41 | data.pop(); 42 | return res; 43 | } 44 | void pop(T& value) 45 | { 46 | std::lock_guard lock(m); 47 | if(data.empty()) throw empty_stack(); 48 | value = data.top(); 49 | data.pop(); 50 | } 51 | bool empty() const 52 | { 53 | std::lock_guard lock(m); 54 | return data.empty(); 55 | } 56 | }; -------------------------------------------------------------------------------- /chapter07/example7_08.cpp: -------------------------------------------------------------------------------- 1 | //回收函数的简单实现 2 | template 3 | void do_delete(void* p) 4 | { 5 | delete static_cast(p); 6 | } 7 | 8 | struct data_to_reclaim 9 | { 10 | void* data; 11 | std::function deleter; 12 | data_to_reclaim* next; 13 | 14 | template 15 | data_to_reclaim(T* p): 16 | data(p), 17 | deleter(&do_delete), 18 | next(0) 19 | {} 20 | 21 | ~data_to_reclaim() 22 | { 23 | deleter(data); 24 | } 25 | }; 26 | 27 | std::atomic nodes_to_reclaim; 28 | void add_to_reclaim_list(data_to_reclaim* node) 29 | { 30 | node->next=nodes_to_reclaim.load(); 31 | while(!nodes_to_reclaim.compare_exchange_weak(node->next,node)); 32 | } 33 | 34 | template 35 | void reclaim_later(T* data) 36 | { 37 | add_to_reclaim_list(new data_to_reclaim(data)); 38 | } 39 | 40 | bool outstanding_hazard_pointers_for(void* p) 41 | { 42 | for(unsigned i=0;inext; 58 | if(!outstanding_hazard_pointers_for(current->data)) 59 | { 60 | delete current; 61 | } 62 | else 63 | { 64 | add_to_reclaim_list(current); 65 | } 66 | current=next; 67 | } 68 | } -------------------------------------------------------------------------------- /chapter02/example2_8.cpp: -------------------------------------------------------------------------------- 1 | //使得每个线程具有最小数目的元素以避免过多的线程开销 2 | template 3 | struct accumulate_block 4 | { 5 | void operator()(Iterator first,Iterator last,T& result) 6 | { 7 | result=std::accumlate(first,last,result); 8 | } 9 | }; 10 | 11 | template 12 | T parallel_accumlate(Iterator first,Iterator last,T init) 13 | { 14 | unsigned long const length=std::distance(first,last); 15 | 16 | if(!length) 17 | return init; 18 | 19 | unsigned long const min_per_thread=25; 20 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 21 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 22 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 23 | unsigned long const block_size=length/num_threads; 24 | 25 | std::vector results(num_threads); 26 | std::vector thread(num_threads-1); 27 | 28 | Iterator block_start=first; 29 | for(unsigned long i = 0; i < (num_threads-1);++i) 30 | { 31 | Iterator block_end = block_start; 32 | std::advance(block_end,block_size); 33 | threads[i]=std::thread(accumulate_block(),block_start,block_end,std::ref(results[i])); 34 | block_start=block_end; 35 | } 36 | accumulate_block()(block_start,last,results[num_threads-1]); 37 | std::for_each(threads.begin(),threads.end(),std::mem_fn(&std::thread::join)); 38 | 39 | return std::accumulate(results.begin(),results.end(),init); 40 | } -------------------------------------------------------------------------------- /chapter06/example6_06.cpp: -------------------------------------------------------------------------------- 1 | //使用细粒度锁的线程安全队列 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | struct node 7 | { 8 | std::shared_ptr data; 9 | std::unique_ptr next; 10 | }; 11 | 12 | std::mutex head_mutex; 13 | std::unique_ptr head; 14 | std::mutex tail_mutex; 15 | node* tail; 16 | 17 | node* get_tail() 18 | { 19 | std::lock_guard tail_lock(tail_mutex); 20 | return tail; 21 | } 22 | 23 | std::unique_ptr pop_head() 24 | { 25 | std::lock_guard head_lock(head_mutex); 26 | 27 | if(head.get()==get_tail()) 28 | { 29 | return nullptr; 30 | } 31 | std::unique_ptr old_head=std::move(head); 32 | head=std::move(old_head->next); 33 | return old_head; 34 | } 35 | 36 | public: 37 | threadsafe_queue(): 38 | head(new node),tail(head.get()) 39 | {} 40 | 41 | threadsafe_queue(const threadsafe_queue& other)=delete; 42 | threadsafe_queue& operator=(const threadsafe_queue& other)=delete; 43 | 44 | std::shared_ptr try_pop() 45 | { 46 | std::unique_ptr old_head=pop_head(); 47 | return old_head?old_head->data:std::shared_ptr(); 48 | } 49 | 50 | void push(T new_value) 51 | { 52 | std::shared_ptr new_data(std::make_shared(std::move(new_value))); 53 | std::unique_ptr p(new node); 54 | node* const new_tail=p.get(); 55 | std::lock_guard tail_lock(tail_mutex); 56 | tail->data=new_data; 57 | tail->next=std::move(p); 58 | tail=new_tail; 59 | } 60 | }; -------------------------------------------------------------------------------- /chapter07/example7_21.cpp: -------------------------------------------------------------------------------- 1 | //无锁队列中使用帮助的push() 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | void set_new_tail(counted_node_ptr& old_tail,counted_node_ptr const& new_tail) 7 | { 8 | node* const current_tail_ptr=old_tail.ptr; 9 | while(!tail.compare_exchange_weak(old_tail,new_tail) && old_tail.ptr==current_tail_ptr); 10 | if(old_tail.ptr==current_tail_ptr) 11 | free_external_counter(old_tail); 12 | else 13 | current_tail_ptr->release_ref(); 14 | } 15 | public: 16 | void push(T new_value) 17 | { 18 | std::unique_ptr new_data(new T(new_value)); 19 | counted_node_ptr new_next; 20 | new_next.ptr = new node; 21 | new_next.external_count=1; 22 | counted_node_ptr old_tail=tail.load(); 23 | 24 | for(;;) 25 | { 26 | increase_external_count(tail,old_tail); 27 | 28 | T* old_data=nullptr; 29 | if(old_tail.ptr->data.compare_exchange_strong(old_data,new_data.get())) 30 | { 31 | counted_node_ptr old_next={0}; 32 | if(!old_tail.ptr->next.compare_exchange_strong(old_next,new_next)) 33 | { 34 | delete new_next.ptr; 35 | new_next=old_next; 36 | } 37 | set_new_tail(old_tail, new_next); 38 | new_data.release(); 39 | break; 40 | } 41 | else 42 | { 43 | counted_node_ptr old_next={0}; 44 | if(old_tail.ptr->next.compare_exchange_strong(old_next,new_next)) 45 | { 46 | old_next=new_next; 47 | new_next.ptr=new_next 48 | } 49 | set_new_tail(old_tail, old_next); 50 | } 51 | } 52 | } 53 | }; -------------------------------------------------------------------------------- /chapter09/example9_06.cpp: -------------------------------------------------------------------------------- 1 | //使用本地线程工作队列的线程池来避免工作队列上的竞争 2 | class thread_pool 3 | { 4 | thread_safe_queue pool_work_queue; 5 | typedef std::queue local_queue_type; 6 | //使用thread_local变量来保证每个线程有一个自己的工作队列再加上一个全局的工作队列。 7 | //使用unique_ptr<>来保存线程私有的工作队列因为我们不想让非线程池中的线程也持有一个 8 | static thread_local std::unique_ptr local_work_queue; 9 | 10 | void worker_thread() 11 | { 12 | local_work_queue.reset(new local_queue_type) 13 | 14 | while(!done) 15 | { 16 | run_pending_task(); 17 | } 18 | } 19 | 20 | public: 21 | template 22 | std::future::type> 23 | submit(FunctionType f) 24 | { 25 | typedef typename std::result_of::type result_type; 26 | 27 | std::packaged_task task(f); 28 | std::future res(task.get_future()); 29 | if(local_work_queue) //submit()函数会检查当前线程是否有一个工作队列。 30 | { 31 | local_work_queue->push(std::move(task)); //如果有当前线程时一个线程池中线程,添加私有工作队列, 32 | } 33 | else 34 | { 35 | pool_work_queue.push(std::move(task)); //将任务添加到全局工作队列中 36 | } 37 | return res; 38 | } 39 | 40 | void run_pending_task() 41 | { 42 | function_wrapper task; 43 | if(local_work_queue && !local_queue_type->empty()) 44 | { 45 | task=std::move(local_work_queue->front()); 46 | local_work_queue->pop(); 47 | task(); 48 | } 49 | else if(pool_work_queue.try_pop(task)) 50 | { 51 | task(); 52 | } 53 | else 54 | { 55 | std::this_thread::yield(); 56 | } 57 | } 58 | //rest as before 59 | }; -------------------------------------------------------------------------------- /chapter06/example6_02.cpp: -------------------------------------------------------------------------------- 1 | //使用条件变量的线程安全队列的完整定义 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | mutable std::mutex mut; 7 | std::queue data_queue; 8 | std::condition_variable data_cond; 9 | public: 10 | threadsafe_queue() 11 | {} 12 | 13 | void push(T new_value) 14 | { 15 | std::lock_guard lk(mut); 16 | data_queue.push(std::move(data)); 17 | data_cond.notify_one(); 18 | } 19 | 20 | void wait_and_pop(T& value) 21 | { 22 | std::unique_lock lk(mut); 23 | data_cond.wait(lk,[this]{return !data_queue.empty();}); 24 | value=std::move(data_queue.front()); 25 | data_queue.pop(); 26 | } 27 | 28 | std::shared_ptr wait_and_pop() 29 | { 30 | std::unique_lock lk(mut); 31 | data_cond.wait(lk,[this]{return !data_queue.empty();}); 32 | std::shared_ptr res(std::make_shared(std::move(data_queue.front()))); 33 | data_queue.pop(); 34 | return res; 35 | } 36 | 37 | bool try_pop(T& value) 38 | { 39 | std::lock_guard lk(mut); 40 | if(data_queue.empty()) 41 | return false; 42 | value=std::move(data_queue.front()); 43 | data_queue.pop(); 44 | return true; 45 | } 46 | 47 | std::shared_ptr try_pop() 48 | { 49 | std::lock_guard lk(mut); 50 | if(data_queue.empty()) 51 | return std::shared_ptr(); 52 | std::shared_ptr res(std::make_shared(std::move(data_queue.front()))); 53 | data_queue.pop(); 54 | return res; 55 | } 56 | 57 | bool empty() const 58 | { 59 | std::lock_guard lk(mut); 60 | retrun data_queue.empty(); 61 | } 62 | }; -------------------------------------------------------------------------------- /chapter07/example7_15.cpp: -------------------------------------------------------------------------------- 1 | //在无锁队列中引用计数tail来实现push() 2 | template 3 | class lock_free_queue 4 | { 5 | private: 6 | private: 7 | struct node; 8 | 9 | struct counted_node_ptr 10 | { 11 | int external_count; 12 | node* ptr; 13 | }; 14 | 15 | std::atomic head; 16 | std::atomic tail; 17 | 18 | //将此结构体保存在一个机器字中在许多平台中使原子操作更容易是无锁的 19 | struct node_counter 20 | { 21 | unsigned internal_count:30; 22 | unsigned external_counters:2; //这里的external_counters只包含两个比特,因为最多只有两个计数器 23 | }; 24 | 25 | struct node 26 | { 27 | std::atomic data; 28 | std::atomic count; 29 | counted_node_ptr next; 30 | 31 | node() 32 | { 33 | node_counter new_count; 34 | new_count.internal_count=0; 35 | new_count.external_counters=2; 36 | count.store(new_count); 37 | 38 | next.ptr=nullptr; 39 | next.external_count=0; 40 | } 41 | }; 42 | 43 | public: 44 | void push(T new_value) 45 | { 46 | std::unique_ptr new_data(new T(new_value)); 47 | counted_node_ptr new_next; 48 | new_next.ptr=new node; 49 | new_next.external_count=1; 50 | counted_node_ptr old_tail=tail.load(); 51 | 52 | for(;;) 53 | { 54 | increase_external_count(tail,old_tail); //增加计数 55 | 56 | T* old_data=nullptr; 57 | if(old_tail.ptr->data.compare_exchange_strong(old_data,new_data.get())) //解引用 58 | { 59 | old_tail.ptr->next=new_next; 60 | old_tail=tail.exchange(new_next); 61 | free_external_counter(old_tail); 62 | new_data.release(); 63 | break; 64 | } 65 | old_tail.ptr->release_ref(); 66 | } 67 | } 68 | }; -------------------------------------------------------------------------------- /chapter06/example6_03.cpp: -------------------------------------------------------------------------------- 1 | //包含std::shared_ptr<>实例的线程安全队列 2 | template 3 | class threadsafe_queue 4 | { 5 | private: 6 | mutable std::mutex mut; 7 | std::queue > data_queue; 8 | std::condition_variable data_cond; 9 | public: 10 | threadsafe_queue(); 11 | {} 12 | 13 | void wait_and_pop(T& value) 14 | { 15 | std::unique_lock lk(mut); 16 | data_cond.wait(lk,[this]{return !data_queue.empty();}); 17 | value=std::move(*data_queue.front()); 18 | data_queue.pop(); 19 | } 20 | 21 | bool try_pop(T& value) 22 | { 23 | std::lock_guard lk(mut); 24 | if(data_queue.empty()) 25 | return false; 26 | value=std::move(*data_queue.front()); 27 | data_queue.pop(); 28 | return true; 29 | } 30 | 31 | std::shared_ptr wait_and_pop() 32 | { 33 | std::unique_lock lk(mut); 34 | data_cond.wait(lk,[this]{return !data_queue.empty();}); 35 | std::shared_ptr res=data_queue.front(); 36 | data_queue.pop(); 37 | return res; 38 | } 39 | 40 | std::shared_ptr try_pop() 41 | { 42 | std::lock_guard lk(mut); 43 | if(data_queue.empty()) 44 | return std::shared_ptr(); 45 | std::shared_ptr res=data_queue.front(); 46 | data_queue.pop(); 47 | return res; 48 | } 49 | 50 | void push(T new_value) 51 | { 52 | std::shared_ptr data(std::make_shared(std::move(new_value))); 53 | std::lock_guard lk(mut); 54 | data_queue.push(data); 55 | data_cond.notify_one(); 56 | } 57 | 58 | bool empty() const 59 | { 60 | std::lock_guard lk(mut); 61 | return data_queue.empty(); 62 | } 63 | }; -------------------------------------------------------------------------------- /chapter09/example9_12.cpp: -------------------------------------------------------------------------------- 1 | //为std::condition_variable_any而设的interruptible_wait 2 | //可以与任何锁类型配合工作 3 | class interrupt_flag 4 | { 5 | std::atomic flag; 6 | std::condition_variable* thread_cond; 7 | std::condition_variable_any* thread_cond_any; 8 | std::mutex set_clear_mutex; 9 | 10 | public: 11 | interrupt_flag(): 12 | thread_cond(0),thread_cond_any(0) 13 | {} 14 | 15 | void set() 16 | { 17 | flag.store(true,std::memory_order_relaxed); 18 | std::lock_guard lk(set_clear_mutex); 19 | if(thread_cond) 20 | { 21 | thread_cond->notify_all(); 22 | } 23 | else if(thread_cond_any) 24 | { 25 | thread_cond_any->notify_all(); 26 | } 27 | } 28 | 29 | template 30 | void wait(std::condition_variable_any& cv,Lockable& lk) 31 | { 32 | struct custom_lock 33 | { 34 | interrupt_flag* self; 35 | Lockable lk; 36 | 37 | custom_lock(interrupt_flag* self_,std::condition_variable_any& cond,Lockable& lk_): 38 | self(self_),lk(lk_) 39 | { 40 | self->set_clear_mutex.lock(); 41 | self->thread_cond_any=&cond; 42 | } 43 | 44 | void unlock() 45 | { 46 | lk.unlock(); 47 | self->set_clear_mutex.unlock(); 48 | } 49 | 50 | void lock() 51 | { 52 | std::lock(self->set_clear_mutex,lk); 53 | } 54 | 55 | ~custom_lock() 56 | { 57 | self->thread_cond_any=0; 58 | self->set_clear_mutex.unlock(); 59 | } 60 | }; 61 | custom_lock cl(this,cv,lk); 62 | interruption_point(); 63 | cv.wait(ck); 64 | interruption_point(); 65 | } 66 | //rest as before 67 | }; 68 | 69 | template 70 | void interruption_wait(std::condition_variable_any& cv,Lockable& lk) 71 | { 72 | this_thread_interrupt_flag.wait(cv,lk); 73 | } -------------------------------------------------------------------------------- /chapter08/example8_03.cpp: -------------------------------------------------------------------------------- 1 | //使用std::packaged_task的std::accumulate的并行版本,来解决新线程上抛出异常的问题 2 | template 3 | struct accumulate_block 4 | { 5 | //直接返回结果 6 | T operator()(Iterator first,Iterator last) 7 | { 8 | return std::accumulate(first,last,T()); 9 | } 10 | }; 11 | 12 | template 13 | T parallel_accumulate(Iterator first,Iterator last,T init) 14 | { 15 | unsigned long const length=std::distance(first,last); 16 | 17 | if(!length) 18 | return init; 19 | 20 | //2.8有讲 21 | unsigned long const min_per_thread=25; 22 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 23 | 24 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 25 | 26 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 27 | 28 | unsigned long const block_size=length/num_threads; 29 | 30 | std::vector > futures(num_threads-1); //与8.2不同使用future变量 31 | std::vector threads(num_threads-1); 32 | 33 | Iterator block_start=first; 34 | for(unsigned long i = 0; i < (num_threads-1); ++i) 35 | { 36 | Iterator block_end=block_start; 37 | std::advance(block_end,block_size); 38 | //为accumulate_block创造一个任务 39 | std::packaged_task task(accumulate_block()); 40 | futures[i]=task.get_future(); 41 | threads[i]=std::thread(std::move(task),block_start,block_end); //允许任务的时候,将在future中捕捉结果,也会捕捉任何抛出的异常 42 | block_start=block_end; 43 | } 44 | 45 | T last_result=accumulate_block()(block_start,last); 46 | std::for_each(threads.begin(),threads.end(),std::mem_fn(&std::thread::join)); 47 | 48 | T result=init; 49 | for(unsigned long i=0;i<(num_threads-1);++i) 50 | { 51 | result+=futures[i].get(); 52 | } 53 | result += last_result; 54 | return result; 55 | } -------------------------------------------------------------------------------- /chapter08/example8_04.cpp: -------------------------------------------------------------------------------- 1 | //std::accumulate的异常安全并行版本 2 | //使用future,最简单的方法就是捕获所有异常,并且将它们融合到调用joinable()的线程中,然后再次抛出异常。 3 | //try-catch块令人讨厌,我们在一个对象的析构函数中检查它 4 | class join_threads 5 | { 6 | std::vector& threads; 7 | public: 8 | explicit join_threads(std::vector& threads_): 9 | threads(threads_) 10 | {} 11 | ~join_threads(); 12 | { 13 | for(unsigned long i=0;i 22 | T parallel_accumulate(Iterator first,Iterator last,T init) 23 | { 24 | unsigned long const length=std::distance(first,last); 25 | 26 | if(!length) 27 | return init; 28 | 29 | unsigned long const min_per_thread=25; 30 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 31 | 32 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 33 | 34 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 35 | 36 | unsigned long const block_size=length/num_threads; 37 | 38 | std::vector > futures(num_threads-1); 39 | std::vector threads(num_threads-1); 40 | join_threads joiner(threads); 41 | 42 | Iterator block_start=first; 43 | for(unsigned long i=0;i<(num_threads-1);++i) 44 | { 45 | Iterator block_end=block_start; 46 | std::advance(block_end,block_size); 47 | std::packaged_task task(accumulate_block()); 48 | futures[i]=task.get_future(); 49 | 50 | threads[i]=std::thread(std::move(task),block_start,block_end); 51 | block_start=block_end; 52 | } 53 | T last_result=accumulate_block()(block_start,last); 54 | T result=init; 55 | for(unsigned long i=0;i<(num_threads-1);++i) 56 | { 57 | result+=futures[i].get(); //将被阻塞直到结果出来 58 | } 59 | result += last_result; 60 | return result; 61 | } -------------------------------------------------------------------------------- /chapter08/example8_02.cpp: -------------------------------------------------------------------------------- 1 | //std::accumulate的并行版本(来自清单2.8) 2 | template 3 | struct accumulate_block 4 | { 5 | void operator()(Iterator first,Iterator last,T& result) 6 | { 7 | result=std::accumulate(first,last,result); 8 | } 9 | }; 10 | 11 | template 12 | T parallel_accumulate(Iterator first,Iterator last,T init) 13 | { 14 | unsigned long const length=std::distance(first,last); 15 | 16 | if(!length) //如果输入的范围为空,只返回初始值init 17 | return init; 18 | 19 | unsigned long const min_per_thread=25; //最小块的大小 20 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; //处理的元素数量除以最小块的大小,获取线程的最大数量 21 | 22 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); //真正并发运行的线程数量的指示 23 | //要运行的线程数是你计算出的最大值的硬件线程数量的较小值。 24 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 25 | //如果hardware_concurrency返回0,我们就替换成2,运行过多的线程,会在单核机器上变慢,过少会错过可用的并发 26 | 27 | unsigned long const block_size=length/num_threads; //待处理的线程的条目数量是范围的长度除以线程的数量 28 | 29 | std::vector results(num_threads); //保存中间结果 30 | std::vector threads(num_threads-1); //因为有一个线程(本线程)了所以少创建一个文档 31 | 32 | //循环:1.递进block_end到当前块的结尾,2.并启动一个新的线程来累计此块的结果。3.下一个块的开始是这一个的结束 33 | Iterator block_start=first; 34 | for(unsigned long i = 0; i < (num_threads-1);++i) 35 | { 36 | Iterator block_end=block_start; 37 | std::advance(block_end,block_size); ...1 38 | threads[i]=std::thread(accumulate_block(),block_start,block_end,std::ref(results[i])); ...2 39 | block_start=block_end; ...3 40 | } 41 | 42 | //这里是处理上面没有整除的掉block_size的剩下的部分 43 | accumulate_block()(block_start,last,results[num_threads-1]); 44 | //通过join等待所有计算的线程 45 | std::for_each(threads.begin(),threads.end(),std::mem_fn(&std::thread::join)); 46 | //一旦累计计算出最后一个块的结果,调用accumulate将结果计算出来 47 | return std::accumulate(results.begin(),results.end(),init); 48 | } 49 | -------------------------------------------------------------------------------- /chapter09/example9_11.cpp: -------------------------------------------------------------------------------- 1 | //在为std::condition_variable的interruptible_wait中使用超时 2 | class interrupt_flag 3 | { 4 | std::atomic flag; 5 | std::condition_variable* thread_cond; 6 | std::mutex set_clear_mutex; 7 | 8 | public: 9 | interrupt_flag(): 10 | thread_cond(0) 11 | {} 12 | 13 | void set() 14 | { 15 | flag.store(true,std::memory_order_relaxed); 16 | std::lock_guard lk(set_clear_mutex); 17 | if(thread_cond) 18 | { 19 | thread_cond->notify_all(); 20 | } 21 | } 22 | 23 | bool is_set() const 24 | { 25 | return flag.load(std::memory_order_relaxed); 26 | } 27 | 28 | void set_condition_variable(std::condition_variable& cv) 29 | { 30 | std::lock_guard lk(set_clear_mutex); 31 | thread_cond=&cv; 32 | } 33 | 34 | void clear_condition_variable() 35 | { 36 | std::lock_guard lk(set_clear_mutex); 37 | thread_cond=0; 38 | } 39 | 40 | struct clear_cv_on_destruct 41 | { 42 | ~clear_cv_on_destruct() 43 | { 44 | this_thread_interrupt_flag.clear_condition_variable(); 45 | } 46 | }; 47 | }; 48 | 49 | void interruptible_wait(std::condition_variable& cv,std::unique_lock& lk) 50 | { 51 | interruption_point(); 52 | this_thread_interrupt_flag.set_condition_variable(cv); 53 | interrupt_flag::clear_cv_on_destruct guard; 54 | interruption_point(); 55 | //给wait_for()传递一个很小的时间间隔(如1毫秒)而不是使用wait() 56 | cv.wait_for(lk,std::chrono::milliseconds(1)); 57 | interruption_point(); 58 | } 59 | 60 | //如果你有一个要等待的断言,那么1毫秒的超时会被完全隐藏在断言的循环中 61 | template 62 | void interruptible_wait(std::condition_variable& cv,std::unique_lock& lk,Predicate pred) 63 | { 64 | interruption_point(); 65 | this_thread_interrupt_flag.set_condition_variable(cv); 66 | interrupt_flag::clear_cv_on_destruct guard; 67 | while(!this_thread_interrupt_flag.is_set() && !pred()) 68 | { 69 | cv.wait_for(lk,std::chrono::milliseconds(1)); 70 | } 71 | interruption_point(); 72 | } -------------------------------------------------------------------------------- /chapter04/example4_05.cpp: -------------------------------------------------------------------------------- 1 | //使用条件变量的线程安全队列的完整类定义 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | template 8 | class threadsafe_queue 9 | { 10 | private: 11 | mutable std::mutex mut; //互斥元必须是可变的 12 | std::queue data_queue; 13 | std::condition_variable data_cond; 14 | public: 15 | threadsafe_queue() 16 | {} 17 | threadsafe_queue(threadsafe_queue const& other) 18 | { 19 | std::lock_guard lk(other.mut); 20 | data_queue=other.data_queue; 21 | } 22 | 23 | void push(T new_value) 24 | { 25 | std::lock_guard lk(mut); 26 | data_queue.push(new_value); 27 | data_cond.notify_one(); 28 | } 29 | 30 | void wait_and_pop(T& value) 31 | { 32 | std::unique_lock lk(mut); 33 | //这里在[]加入this,可以从lambda中访问类成员 34 | data_cond.wait(lk, [this] {return !data_queue.empty();}); 35 | value=data_queue.front(); 36 | data_queue.pop(); 37 | } 38 | 39 | std::shared_ptr wait_and_pop() 40 | { 41 | std::unique_lock lk(mut); 42 | data_cond.wait(lk,[this]{return !data_queue.empty();}); 43 | //模板函数 std::make_shared 可以返回一个指定类型的 std::shared_ptr 44 | std::shared_ptr res(std::make_shared(data_queue.front())); 45 | data_queue.pop(); 46 | return res; 47 | } 48 | bool try_pop(T& value) 49 | { 50 | std::lock_guard lk(mut); 51 | if(data_queue.empty()) 52 | return false; 53 | value=data_queue.front(); 54 | data_queue.pop(); 55 | return true; 56 | } 57 | std::shared_ptr try_pop() 58 | { 59 | std::lock_guard lk(mut); 60 | if(data_queue.empty()) 61 | return std::shared_ptr(); 62 | //模板函数 std::make_shared 可以返回一个指定类型的 std::shared_ptr 63 | std::shared_ptr res(std::make_shared(data_queue.front())); 64 | data_queue.pop(); 65 | return res; 66 | } 67 | 68 | bool empty() const 69 | { 70 | std::lock_guard lk(mut); 71 | return data_queue.empty(); 72 | } 73 | }; 74 | -------------------------------------------------------------------------------- /chapter09/example9_02.cpp: -------------------------------------------------------------------------------- 1 | //有等待任务的线程池 2 | //因为std::packaged_task<>的实例只是可移动的,不是可复制的,不能够用std::function<>来作为队列中的元素, 3 | //因为std::function<>要求存储的函数对象是可以拷贝和构造的。所以需要function_wrapper 4 | class function_wrapper 5 | { 6 | struct impl_base 7 | { 8 | virtual void call()=0; 9 | virtual ~impl_base() {} 10 | }; 11 | std::unique_ptr impl; 12 | template 13 | struct impl_type: impl_base 14 | { 15 | F f; 16 | impl_type(F&& f_): f(std::move(f_)) {} 17 | void call() { f(); } 18 | }; 19 | public: 20 | template 21 | function_wrapper(F&& f): 22 | impl(new impl_type(std::move(f))) 23 | {} 24 | 25 | void operator()(){ impl->call(); } 26 | 27 | function_wrapper()=default; 28 | 29 | function_wrapper(function_wrapper&& other): 30 | impl(std::move(other.impl)) 31 | {} 32 | function_wrapper& operator=(function_wrapper&& other) 33 | { 34 | impl=std::move(other.impl); 35 | return* this; 36 | } 37 | 38 | function_wrapper(const function_wrapper&)=delete; 39 | function_wrapper(function_wrapper&)=delete; 40 | function_wrapper& operator=(const function_wrapper&)=delete; 41 | }; 42 | 43 | class thread_pool 44 | { 45 | thrad_safe_queue work_queue; //使用函数包装器而非std::function 46 | 47 | void worker_thread() 48 | { 49 | while(!done) 50 | { 51 | function_wrapper task; 52 | if(work_queue.try_pop(task)) 53 | { 54 | task(); 55 | } 56 | else 57 | { 58 | std::this_thread::yield(); 59 | } 60 | } 61 | } 62 | public: 63 | template 64 | //返回一个std::future<>对象来保存任务的返回值和允许调用者等待任务结束 65 | std::future::type> 66 | submit(FunctionType f) 67 | { 68 | typedef typename std::result_of::type result_type; 69 | 70 | std::packaged_task task(std::move(f)); 71 | std::future res(task.get_future()); 72 | work_queue.push(std::move(task)); 73 | retrun res; 74 | } 75 | //rest as before 76 | }; -------------------------------------------------------------------------------- /chapter08/example8_09.cpp: -------------------------------------------------------------------------------- 1 | //并行find算法的一种实现 2 | template 3 | Iterator parallel_find(Iterator first,Iterator last,MatchType match) 4 | { 5 | struct find_element 6 | { 7 | void operator()(Iterator begin,Iterator end,MatchType match, 8 | std::promise* result, 9 | std::atomic* done_flag) 10 | { 11 | try 12 | { 13 | for(;(begin!=end) && !done_flag->load();++begin) 14 | { 15 | if(*begin == match) 16 | { 17 | result->set_value(begin); 18 | done_flag->store(true); 19 | return; 20 | } 21 | } 22 | } 23 | catch(...) 24 | { 25 | try 26 | { 27 | result->set_exception(std::current_exception()); 28 | done_flag->store(true); 29 | } 30 | catch(...) 31 | {} 32 | } 33 | } 34 | }; 35 | 36 | unsigned long const length=std::distance(first,last); 37 | 38 | if(!length) 39 | return last; 40 | 41 | unsigned long const min_per_thread=25; 42 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 43 | 44 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 45 | 46 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 47 | 48 | unsigned long const block_size=length/num_threads; 49 | 50 | std::promise result; 51 | std::atomic done_flag(false); 52 | std::vector threads(num_threads-1); 53 | 54 | //你通过在块中附入线程链接的代码,使得检查结构之前需要等待所有线程结束。 55 | { 56 | join_threads joiner(threads); 57 | Iterator block_start=first; 58 | for(unsigned long i=0;i<(num_threads-1);++i) 59 | { 60 | Iterator block_end=block_start; 61 | std::advance(block_end,block_size); 62 | 63 | threads[i]=std::thread(find_element(),block_start,block_end,match,&result,&done_flag); 64 | block_start=block_end; 65 | } 66 | find_element()(block_start,last,match,&result,&done_flag); 67 | } 68 | //检查是否有匹配项的时候,所有线程都被联合起来了 69 | if(!done_flag.load()) 70 | { 71 | return last; 72 | } 73 | return result.get_future().get(); 74 | } -------------------------------------------------------------------------------- /chapter05/example5_06.cpp: -------------------------------------------------------------------------------- 1 | //多线程的松散操作 2 | #include 3 | #include 4 | #include 5 | 6 | std::atomic x(0),y(0),z(0); 7 | std::atomic go(false); 8 | 9 | unsigned const loop_count=10; 10 | 11 | struct read_values 12 | { 13 | int x,y,z; 14 | }; 15 | 16 | read_values values1[loop_count]; 17 | read_values values2[loop_count]; 18 | read_values values3[loop_count]; 19 | read_values values4[loop_count]; 20 | read_values values5[loop_count]; 21 | 22 | void increment(std::atomic* var_to_inc,read_values* values) 23 | { 24 | while(!go) //旋转等待信号 25 | std::this_thread::yield(); 26 | for (unsigned i = 0; i < loop_count; ++i) 27 | { 28 | values[i].x=x.load(std::memory_order_relaxed); 29 | values[i].y=y.load(std::memory_order_relaxed); 30 | values[i].z=z.load(std::memory_order_relaxed); 31 | var_to_inc->store(i+1,std::memory_order_relaxed); 32 | std::this_thread::yield(); 33 | //std::this_thread::yield() 是让当前线程让渡出自己的CPU时间片(给其他线程使用) 34 | //std::this_thread::sleep_for() 是让当前休眠”指定的一段”时间. 35 | } 36 | } 37 | 38 | void read_vals(read_values* values) 39 | { 40 | while(!go) //旋转等待信号 41 | std::this_thread::yield(); 42 | for (unsigned i = 0; i < loop_count; ++i) 43 | { 44 | values[i].x=x.load(std::memory_order_relaxed); 45 | values[i].y=y.load(std::memory_order_relaxed); 46 | values[i].z=z.load(std::memory_order_relaxed); 47 | std::this_thread::yield(); 48 | } 49 | } 50 | 51 | void print(read_values* v) 52 | { 53 | for (unsigned i = 0; i < loop_count; ++i) 54 | { 55 | if(i) 56 | std::cout<<","; 57 | std::cout<<"("< count; 5 | std::atomic spaces; 6 | std::atomic generation; 7 | 8 | barrier(unsigned count_): 9 | count(count_),spaces(count_),generation(0) 10 | {} 11 | 12 | void wait() 13 | { 14 | unsigned const gen=generation.load(); 15 | if(!--spaces) 16 | { 17 | spaces=count.load(); 18 | ++generation; 19 | } 20 | else 21 | { 22 | while(generation.load()==gen) 23 | { 24 | std::this_thread::yield(); //是将当前线程所抢到的CPU”时间片A”让渡给其他线程 25 | } 26 | } 27 | } 28 | 29 | void done_waiting() 30 | { 31 | --count; 32 | if(!--spaces) 33 | { 34 | spaces=count.load(); 35 | ++generation; 36 | } 37 | } 38 | }; 39 | 40 | template 41 | void parallel_partial_sum(Iterator first,Iterator last) 42 | { 43 | typedef typename Iterator::value_type value_type; 44 | 45 | struct process_element 46 | { 47 | void operator()(Iterator first,Iterator last, 48 | std::vector& buffer, 49 | unsigned i,barrier& b) 50 | { 51 | value_type& ith_element=*(first+i); 52 | bool update_source=false; 53 | 54 | for(unsigned step=0,stride=1;stride<=i;++step,stride*=2) 55 | { 56 | value_type const& source=(step%2)?buffer[i]:ith_element; 57 | value_type& dest=(step%2)?ith_element:buffer[i]; 58 | value_type const& addend=(step%2)?buffer[i-stride]:*(first+i-stride); 59 | 60 | dest=source+addend; 61 | update_source=!(step%2); 62 | b.wait(); //开始下一步之前在屏障上等待 63 | } 64 | //如果你的最终结果存储在缓冲器中的话,就更新原先范围里的元素 65 | if(update_source) 66 | { 67 | ith_element=buffer[i]; 68 | } 69 | b.done_waiting(); 70 | } 71 | }; 72 | 73 | unsigned long const length=std::distance(first,last); 74 | 75 | if(length<=1) 76 | return; 77 | 78 | std::vector buffer(length); 79 | barrier b(length); 80 | 81 | std::vector threads(length-1); 82 | join_threads joiner(threads); 83 | 84 | Iterator block_start=first; 85 | for(unsigned long i=0;i<(length-1);++i) 86 | { 87 | threads[i]=std::thread(process_element(),first,last,std::ref(buffer),i,std::ref(b)); 88 | } 89 | process_element()(first,last,buffer,length-1,b); 90 | } -------------------------------------------------------------------------------- /chapter07/example7_12.cpp: -------------------------------------------------------------------------------- 1 | //使用引用计数和放松原子操作的无锁栈 2 | template 3 | class lock_free_stack 4 | { 5 | private: 6 | struct node; 7 | 8 | struct counted_node_ptr 9 | { 10 | int external_count; 11 | node* ptr; 12 | }; 13 | 14 | struct node 15 | { 16 | std::share_ptr data; 17 | std::atomic internal_count; 18 | counted_node_ptr next; 19 | 20 | node(T const& data_): 21 | data(std::make_shared(data_)), 22 | internal_count(0) 23 | {} 24 | }; 25 | 26 | std::atomic head; 27 | 28 | void increase_head_count(counted_node_ptr& old_counter) 29 | { 30 | counted_node_ptr new_counter; 31 | 32 | do 33 | { 34 | new_counter=old_counter; 35 | ++new_counter.external_count; 36 | } 37 | while(!head.compare_exchange_strong(old_counter,new_counter, 38 | std::memory_order_acquire, 39 | std::memory_order_relaxed)); 40 | old_counter.external_count=new_counter.external_count; 41 | } 42 | public: 43 | ~lock_free_stack() 44 | { 45 | while(pop()); 46 | } 47 | 48 | void push(T const& data) 49 | { 50 | counted_node_ptr new_node; 51 | new_node.ptr=new node(data); 52 | new_node.external_count=1; 53 | new_node.ptr->next=head.load(std::memory_order_relaxed); 54 | while(!head.compare_exchange_weak(new_node.ptr->next,new_node, 55 | std::memory_order_release, 56 | std::memory_order_relaxed)); 57 | } 58 | std::share_ptr pop() 59 | { 60 | counted_node_ptr old_head=head.load(std::memory_order_relaxed); 61 | for(;;) 62 | { 63 | increase_head_count(old_head); 64 | node* const ptr=old_head.ptr; 65 | if(!ptr) 66 | { 67 | return std::share_ptr(); 68 | } 69 | if(head.compare_exchange_strong(old_head,ptr->next,std::memory_order_relaxed)) 70 | { 71 | std::share_ptr res; 72 | res.swap(ptr->data); 73 | 74 | int const count_increase=old_head.external_count-2; 75 | 76 | if(ptr->internal_count.fetch_add(count_increase,std::memory_order_release)==(-count_increase)) 77 | { 78 | delete ptr; 79 | } 80 | 81 | return res; 82 | } 83 | else if(ptr->internal_count.fetch_add(-1,std::memory_order_relaxed)==1) 84 | { 85 | ptr->internal_count.load(std::memory_order_acquire); 86 | delete ptr; 87 | } 88 | } 89 | } 90 | }; -------------------------------------------------------------------------------- /chapter06/example6_13.cpp: -------------------------------------------------------------------------------- 1 | //支持迭代的线程安全链表 2 | template 3 | class threadsafe_list 4 | { 5 | struct node 6 | { 7 | std::mutex m; 8 | std::shared_ptr data; 9 | std::unique_ptr next; 10 | 11 | node(): 12 | next() 13 | {} 14 | 15 | node(T const& value): 16 | data(std::make_shared(value)) 17 | {} 18 | }; 19 | 20 | node head; 21 | public: 22 | threadsafe_list() 23 | {} 24 | 25 | ~threadsafe_list() 26 | { 27 | remove_if([](node const&){return true;}); // remove node 28 | } 29 | 30 | threadsafe_list(threadsafe_list const& other)=delete; 31 | threadsafe_list& operator=(threadsafe_list const& other)=delete; 32 | 33 | void push_front(T const& value) 34 | { 35 | std::unique_ptr new_node(new node(value)); 36 | std::lock_guard lk(head.m); 37 | new_node->next=std::move(head.next); 38 | head.next=std::move(new_node); 39 | } 40 | 41 | template 42 | void for_each(Function f) 43 | { 44 | node* current=&head; 45 | std::unique_lock lk(head.m); 46 | while(node* const next=current->next.get()) 47 | { 48 | std::unique_lock next_lk(next->m); 49 | lk.unlock(); 50 | f(*next->data); 51 | current=next; 52 | lk=std::move(next_lk); 53 | } 54 | } 55 | 56 | template 57 | std::shared_ptr find_first_if(Predicate p) 58 | { 59 | node* current=&head; 60 | std::unique_lock lk(head.m); 61 | while(node* const next=current->next.get()) 62 | { 63 | std::unique_lock next_lk(next->m); 64 | lk.unlock(); 65 | if(p(*next->data)) 66 | { 67 | return next->data; 68 | } 69 | current=next; 70 | lk=std::move(next_lk); 71 | } 72 | return std::shared_ptr(); 73 | } 74 | 75 | template 76 | void remove_if(Predicate p) 77 | { 78 | node* current=&head; 79 | std::unoque_lock lk(head.m); 80 | while(node* const next=current->next.get()) 81 | { 82 | std::unique_lock next_lk(next->m); 83 | if(p(*next->data)) 84 | { 85 | std::unique_ptr old_next=std::move(current->next); 86 | current->next=std::move(next->next); 87 | next_lk.unlock(); 88 | } 89 | else 90 | { 91 | lk.unlock(); 92 | current=next; 93 | lk=std::move(next_lk); 94 | } 95 | } 96 | } 97 | }; -------------------------------------------------------------------------------- /chapter08/example8_01.cpp: -------------------------------------------------------------------------------- 1 | //使用待排序块栈的并行快速排序 2 | template 3 | struct sorter 4 | { 5 | struct chunk_to_sort 6 | { 7 | std::list data; 8 | std::promise > promise; 9 | }; 10 | 11 | thread_safe_stack chunks; //未排序块 12 | std::vector threads; //线程集 13 | unsigned const max_thread_count; 14 | std::atomic end_of_data; 15 | 16 | sorter(): 17 | max_thread_count(std::thread::hardware_concurrency()-1), 18 | end_of_data(false) 19 | {} 20 | 21 | ~sorter() 22 | { 23 | end_of_data=true; 24 | 25 | for(unsigned i=0;i chunk=chunks.pop(); 34 | if(chunk) 35 | { 36 | sort_chunk(chunk); 37 | } 38 | } 39 | 40 | //完成排序并压入栈 41 | std::list do_sort(std::List& chunk_data) 42 | { 43 | if(chunk_data.empty()) 44 | { 45 | return chunk_data; 46 | } 47 | 48 | std::list result; 49 | result.splice(result.begin(),chunk_data,chunk_data.begin()); 50 | T const& partition_val=*result.begin(); 51 | 52 | typename std::list::iterator divide_point= 53 | std::partition(chunk_data.begin(),chunk_data.end(),[&](T const& val){ 54 | return val < partition_val; 55 | }); 56 | 57 | chunk_to_sort new_lower_chunk; 58 | new_lower_chunk.data.splice(new_lower_chunk.data.end(), 59 | chunk_data,chunk_data.begin(),divide_point); 60 | 61 | std::future > new_lower=new_lower_chunk.promise.get_future(); 62 | chunks.push(std::move(new_lower_chunk)); 63 | if(threads.size()::sort_thread,this)); 66 | } 67 | 68 | std::list new_higher(do_sort(chunk_data)); 69 | 70 | result.splice(result.end(),new_higher); 71 | while(new_lower.wait_for(std::chrono::second(0)) != std::future_status::ready) 72 | { 73 | try_sort_chunk(); 74 | } 75 | 76 | result.splice(result.begin(),new_lower.get()); 77 | return result; 78 | } 79 | 80 | void sort_chunk(boost::shared_ptr const& chunk) 81 | { 82 | chunk->promise.set_value(do_sort(chunk->data)); 83 | } 84 | 85 | void sort_thread() 86 | { 87 | while(!end_of_data) 88 | { 89 | try_sort_chunk(); 90 | std::this_threads::yield(); 91 | } 92 | } 93 | }; 94 | 95 | template 96 | std::list parallel_quick_sort(std::list input) 97 | { 98 | if(input.empty()) 99 | { 100 | return input; 101 | } 102 | sorter s; 103 | 104 | return s.do_sort(input); 105 | } -------------------------------------------------------------------------------- /chapter08/example8_11.cpp: -------------------------------------------------------------------------------- 1 | //通过划分问题来并行计算分段的和 2 | //简单来说还是分块累加 3 | template 4 | void parallel_partial_sum(Iterator first,Iterator last) 5 | { 6 | typedef typename Iterator::value_type value_type; 7 | 8 | struct process_chunk 9 | { 10 | void operator()(Iterator begin,Iterator last, 11 | std::future* previous_end_value, 12 | std::promise* end_value) 13 | { 14 | try 15 | { 16 | Iterator end=last; 17 | ++end; 18 | std::partial_sum(begin,end,begin); 19 | if(previous_end_value) //这是否为第一个块 20 | { 21 | value_type& addend=previous_end_value->get(); 22 | *last+=addend; 23 | if(end_value) 24 | { 25 | end_value->set_value(*last); 26 | } 27 | std::for_each(begin,last,[addend](value_type& item) 28 | { 29 | item+=addend; 30 | }); 31 | } 32 | else if(end_value) 33 | { 34 | end_value->set_value(*last); 35 | } 36 | } 37 | catch(...) 38 | { 39 | if(end_value) 40 | { 41 | end_value->set_exception(std::current_exception()); 42 | } 43 | else 44 | { 45 | throw; 46 | } 47 | } 48 | } 49 | }; 50 | 51 | unsigned long const length=std::distance(first,last); 52 | 53 | if(!length) 54 | return last; 55 | 56 | unsigned long const min_per_thread=25; 57 | unsigned long const max_threads=(length+min_per_thread-1)/min_per_thread; 58 | 59 | unsigned long const hardware_threads=std::thread::hardware_concurrency(); 60 | 61 | unsigned long const num_threads=std::min(hardware_threads!=0?hardware_threads:2,max_threads); 62 | 63 | unsigned long const block_size=length/num_threads; 64 | 65 | typedef typename Iterator::value_type value_type; 66 | 67 | std::vector threads(num_threads-1); 68 | std::vector > end_values(num_threads-1); 69 | std::vector > previous_end_values; 70 | previous_end_values.reserve(num_threads-1); 71 | join_threads joiner(threads); 72 | 73 | Iterator block_start=first; 74 | for(unsigned long i=0;i<(num_threads-1);++i) 75 | { 76 | Iterator block_last=block_start; 77 | std::advance(block_last,block_size-1); 78 | threads[i]=std::thread(process_chunk(), 79 | block_start,block_last, 80 | (i!=0)?&previous_end_values[i-1]:0, 81 | &end_values[i]); 82 | block_start=block_last; 83 | ++block_start; 84 | previous_end_values.push_back(end_values[i].get_future()); 85 | } 86 | Iterator final_element=block_start; 87 | std::advance(final_element,std::distance(block_start,last)-1); 88 | process_chunk()(block_start,final_element, 89 | (num_threads-1)?&previous_end_values.back():0, 90 | 0); 91 | } -------------------------------------------------------------------------------- /chapter09/example9_08.cpp: -------------------------------------------------------------------------------- 1 | //使用工作窃取的线程池 2 | class thread_pool 3 | { 4 | typedef function_wrapper task_type; 5 | 6 | std::atomic_bool done; 7 | thread_safe_queue pool_work_queue; 8 | //每个线程都拥有一个work_stealing_queue,这个队列存储在一个工作队列的表中 9 | std::vector > queues; 10 | std::vector threads; 11 | join_threads joiner; 12 | 13 | static thread_local work_stealing_queue* local_work_queue; 14 | static thread_local unsigned my_index; 15 | 16 | void worker_thread(unsigned my_index_) 17 | { 18 | my_index=my_index_; 19 | local_work_queue=queues[my_index].get(); 20 | while(!done) 21 | { 22 | run_pending_task(); 23 | } 24 | } 25 | 26 | bool pop_task_from_local_queue(task_type& task) 27 | { 28 | return local_work_queue && local_work_queue->try_pop(task); 29 | } 30 | 31 | bool pop_task_from_pool_queue(task_type& task) 32 | { 33 | return pool_work_queue.try_pop(task); 34 | } 35 | 36 | bool pop_task_from_other_thread_queue(task_type& task) 37 | { 38 | for(unsigned i=0;itry_steal(task)) 42 | { 43 | return true; 44 | } 45 | } 46 | return false; 47 | } 48 | 49 | public: 50 | thread_pool(): 51 | done(false),joiner(threads) 52 | { 53 | unsigned const thread_count=std::thread::hardware_concurrency(); 54 | 55 | try 56 | { 57 | for(unsigned i=0;i(new work_stealing_queue)); 61 | threads.push_back(std::thread(&thread_pool::worker_thread,this,i)); 62 | } 63 | } 64 | catch(...) 65 | { 66 | done=true; 67 | throw; 68 | } 69 | } 70 | 71 | ~thread_pool() 72 | { 73 | done=true; 74 | } 75 | 76 | template 77 | std::future::type> submit(FunctionType f) 78 | { 79 | typedef typename std::result_of::type result_type; 80 | 81 | std::packaged_task task(f); 82 | std::future res(task.get_future()); 83 | if(local_work_queue) 84 | { 85 | local_work_queue->push(std::move(task)); 86 | } 87 | else 88 | { 89 | pool_work_queue.push(std::move(task)); 90 | } 91 | return res; 92 | } 93 | 94 | void run_pending_task() 95 | { 96 | task_type task; 97 | if(pop_task_from_local_queue(task) || pop_task_from_pool_queue(task) || pop_task_from_other_thread_queue(task)) 98 | { 99 | task(); 100 | } 101 | else 102 | { 103 | std::this_thread::yield(); 104 | } 105 | } 106 | }; -------------------------------------------------------------------------------- /chapter06/example6_11.cpp: -------------------------------------------------------------------------------- 1 | //线程安全查找表 2 | template > 3 | class threadsafe_lookup_table 4 | { 5 | private: 6 | class bucket_type 7 | { 8 | private: 9 | typedef std::pair bucket_value; 10 | typedef std::list bucket_data; 11 | typedef typename bucket_data::iterator bucket_iterator; 12 | 13 | bucket_data data; 14 | mutable boost::shared_mutex mutex; 15 | 16 | bucket_iterator find_entry_for(Key const& key) const 17 | { 18 | return std::find_if(data.begin(),data.end() 19 | ,[&](bucket_value const& item){return item.first==key;}); 20 | } 21 | public: 22 | Value value_for(Key const& key,Value const& default_value) const 23 | { 24 | boost::shared_lock lock(mutex); 25 | bucket_iterator const found_entry=find_entry_for(key); 26 | return (found_entry==data.end())?default_value:found_entry->second; 27 | } 28 | 29 | void add_or_update_mapping(Key const& key,Value const& value) 30 | { 31 | std::unique_lock lock(mutex); 32 | bucket_iterator const found_entry=find_entry_for(key); 33 | if(found_entry==data.end()) 34 | { 35 | data.push_back(bucket_value(key,value)); 36 | } 37 | else 38 | { 39 | found_entry->second=value; 40 | } 41 | } 42 | 43 | void remove_mapping(Key const& key) 44 | { 45 | std::unique_lock lock(mutex); 46 | bucket_iterator const found_entry=find_entry_for(key); 47 | if(found_entry!=data.end()) 48 | { 49 | data.erase(found_entry); 50 | } 51 | } 52 | }; 53 | 54 | std::vector > buckets; 55 | Hash hasher; 56 | 57 | bucket_type& get_bucket(Key const& key) const 58 | { 59 | std::size_t const bucket_index=hasher(key)%buckets.size(); 60 | return *buckets[bucket_index]; 61 | } 62 | public: 63 | typedef Key key_type; 64 | typedef Value mapped_type; 65 | typedef Hash hash_type; 66 | 67 | threadsafe_lookup_table(unsigned num_buckets=19,Hash const& hasher_=Hash()): 68 | buckets(num_buckets),hasher(hasher_) 69 | { 70 | for(unsigned i=0;i的使用意味着这些函数中的访问是互斥的list_contains()将无法再add_to_list()进行修改的半途看到该表。 50 | 51 | 注意:一个迷路的指针或引用,所有的保护都将白费。在[清单3.2 意外地传出对受保护数据的引用](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_2.cpp)展示了这一个错误的做法。 52 | 53 | 发现接口中固有的竞争条件,这是一个粒度锁定的问题,就是说锁定从语句上升到接口了,书中用一个stack类做了一个扩展,详见[清单3.5 一个线程安全栈的详细类定义](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_5.cpp) 54 | 55 | 死锁:问题和解决方案:为了避免死锁,常见的建议是始终使用相同的顺序锁定者两个互斥元。 56 | std::lock函数可以同时锁定两个或更多的互斥元,而没有死锁的风险。 57 | 常见的思路: 58 | - 避免嵌套锁 59 | - 在持有锁时,避免调用用户提供的代码 60 | - 以固定顺序获取锁 61 | 这里有几个简单的事例:[清单3.7 使用锁层次来避免死锁](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_7.cpp)、[清单3.9 用std::unique_lock灵活锁定](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_9.cpp) 62 | 63 | 锁定在恰当的粒度 64 | 特别的,在持有锁时,不要做任何耗时的活动,比如文件的I/O。 65 | 一般情况下,只应该以执行要求的操作所需的最小可能时间而去持有锁。这也意味着耗时的操作,比如获取获取另一个锁(即便你知道它不会死锁)或是等待I/O完成,都不应该在持有锁的时候去做,除非绝对必要。 66 | 在[清单3.10 在比较运算符中每次锁定一个互斥元](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_10.cpp)虽然减少了持有锁的时间,但是也暴露在竞争条件中去了。 67 | 68 | - 用于保护共享数据的替代工具 69 | 二次检测锁定模式,注意这个和单例模式中的饱汉模式不一样,它后面有对数据的使用 70 | ``` 71 | void undefined_behaviour_with_double_checked_locking() 72 | { 73 | if(!resource_ptr) 74 | { 75 | std::lock_guard lk(resource_mutex); 76 | if(!resource_ptr) 77 | { 78 | resoutce_ptr.reset(new some_resource); 79 | } 80 | } 81 | resource_ptr->do_something(); 82 | } 83 | ``` 84 | 它有可能产生恶劣的竞争条件,因为在锁外部的读取与锁内部由另一线程完成的写入不同步。这就因此创建了一个竞争条件,不仅涵盖了指针本身,还涵盖了指向的对象。 85 | 86 | C++标准库提供了std::once_flag和std::call_once来处理这种情况。使用std::call_once比显示使用互斥元通常会由更低的开销,特别是初始化已经完成的时候,应优先使用。[清单3.12 使用std::call_once的线程安全的类成员延迟初始化](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_12.cpp) 87 | 88 | 保护很少更新的数据结构:例如DNS缓存,使用读写互斥元:单个“写”线程独占访问或共享,由多个“读”线程并发访问。 89 | [清单3.13 使用boost::share_mutex保护数据结构](https://github.com/xuyicpp/multi_threading/blob/master/chapter03/example3_13.cpp) 90 | 91 | ## 第4章 同步并发操作 92 | - 等待事件 93 | 94 | 使用C++标准库提供的工具来等待事件本身。std::condition_variable的std::condition_variable_any,后者可以与任何互斥元一起工作,所以有额外代价的可能。 95 | std::condition_variable可以调用notify_one()和notify_all()。然后std::condition_variable还可以wait(lk,[this]{return !data_queue.empty();}),这里的lk是unique_lock方便后面条件不满足的时候解锁,满足时开锁。 96 | [清单4.1 使用std::condition_variable等待数据](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_01.cpp) 97 | 98 | 使用条件变量建立一个线程安全队列:[清单4.2 std::queue接口](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_02.cpp)、[清单4.4 从清单4.1中提取push()和wait_and_pop()](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_04.cpp)。 99 | 100 | - 使用future来等待一次性事件 101 | 102 | 在一个线程不需要立刻得到结果的时候,你可以使用std::async来启动一个异步任务。std::async返回一个std::future对象,而不是给你一个std::thread对象让你在上面等待,std::future对象最终将持有函数的返回值,当你需要这个值时,只要在future上调用get(),线程就会阻塞知道future就绪,然后返回该值。 103 | [清单4.6 使用std::future获取异步任务的返回值](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_06.cpp) 104 | 105 | std::async允许你通过将额外的参数添加到调用中,来将附加参数传递给函数,这与std::thread是同样的方式。 106 | [清单4.7 使用std::async来将参数传递给函数](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_07.cpp) 107 | 108 | std::packaged_task<>将一个future绑定到一个函数或可调用对象上。当std::packaged_task<>对象被调用时,它就调用相关联的函数或可调用对象,并且让future就绪,将返回值作为关联数据存储。 109 | [清单4.9 使用std::packaged_task在GUI线程上运行代码](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_09.cpp) 110 | 111 | std::promise提供一种设置值(类型T)方式,它可以在这之后通过相关联的std::future对象进行读取。 112 | [清单4.10 使用promise在单个线程中处理多个链接](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_10.cpp),这个有点像select,或者poll。 113 | 114 | 同时,还要为future保存异常,以及使用share_future等待来自多个线程。 115 | 116 | - 有时间限制的等待 117 | 118 | 1.基于时间段的超时。2.基于时间点的超时。 119 | [清单4.11 等待一个具有超时的条件变量](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_11.cpp) 120 | 121 | - 使用操作的同步来简化代码 122 | 123 | 解决同步问题的范式,函数式编程,其中每个任务产生的结果完全依赖于它的输入而不是外部环境,以及消息传递,ATM状态机,线程通信通过状态发送一部消息来实现的。 124 | [清单4.13 使用future的并行快速排序](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_13.cpp)、 125 | [清单4.15 ATM逻辑类的简单实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter04/example4_15.cpp)。 126 | 127 | ## 第5章 C++内存模型和原子类型上操作 128 | 129 | 本章介绍了C++11内存模型的底层细节,以及在线程间提供同步基础的原子操作。这包括了由std::atomic<>类模板的特化提供的基本原子类型,由std::atomic<>主模板提供的泛型原子接口,在这些类型上的操作,以及各种内存顺序选项的复杂细节。 130 | 我们还看了屏障,以及它们如何通过原子类型上的操作配对,以强制顺序。最后,我们回到开头,看了看原子操作是如何用来在独立线程上的非原子操作之间强制顺序的。 131 | 132 | 在原子类型上的每一个操作均具有一个可选的内存顺序参数,它可以用来指定所需的内存顺序语义。 133 | - 存储(store)操作,可以包括memory_order_relaxed、memory_order_release或memory_order_seq_cst顺序。 134 | - 载入(load)操作,可以包括memory_order_relaxed、memory_order_consume、memory_order_acquire或memory_order_seq_cst顺序。 135 | - 读-修改-写(read-modify-write)操作,可以包括memory_order_relaxed、memory_order_consume、memory_order_acquire、memory_order_release、memory_order_acq_rel或memory_order_seq_cst顺序。 136 | 137 | 所有操作的默认顺序为memory_order_seq_cst。 138 | 139 | 原子操作的内存顺序的三种模型: 140 | - 顺序一致顺序(sequentially consistent):(memory_order_seq_cst):[清单5.4 顺序一致隐含着总体顺序](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_04.cpp)。 141 | - 松散顺序(relaxed):(memory_order_relaxed):[清单5.6 多线程的松散操作](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_06.cpp)。 142 | - 获取-释放顺序(acquire-release):(memory_order_consume、memory_order_acquire、memory_order_release和memory_order_acq_rel):[清单5.9 使用获取和释放顺序的传递性同步](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_09.cpp)、[清单5.10 使用std::memory_order_consume同步数据(原子载入操作指向某数据的指针)](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_10.cpp) 143 | 144 | synchronizes-with(与同步): 145 | - 在原子变量的载入和来自另一个线程的对该原子变量的载入之间,建立一个synchronizes-with关系,[清单5.11 使用原子操作从队列中读取值](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_11.cpp) 146 | - 在一个线程中释放屏障,在另一个线程中获取屏障,从而实现synchronizes-with关系,[清单5.12 松散操作可以使用屏障来排序](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_12.cpp) 147 | 148 | happens-before(发生于之前):传递性:如果A线程发生于B线程之前,并且B线程发生于C之前,则A线程间发生于C之前。 149 | - [清单5.8 获取-释放操作可以在松散操作中施加顺序](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_08.cpp) 150 | - [清单5.13 在非原子操作上强制顺序](https://github.com/xuyicpp/multi_threading/blob/master/chapter05/example5_13.cpp) 151 | 152 | ## 第六章 设计基于锁的并发数据结构 153 | 154 | 为并发存取设计数据结构时,需要考虑两方面: 155 | 1、保证存取是安全的 156 | - 保证当数据结构不变性被别的线程破坏时的状态不被任何别的线程看到。 157 | - 注意避免数据结构接口所固有的竞争现象,通过为完整操作提供函数,而不是提供操作步骤。 158 | - 注意当出现例外时,数据结构是怎样来保证不变性不被破坏的。 159 | - 当使用数据结构时,通过限制锁的范围和避免使用嵌套锁,来降低产生死锁的机会。 160 | 2、实现真正的并发存取 161 | - 锁的范围能否被限定,使得一个操作的一部分可以在锁外被执行? 162 | - 数据结构的不同部分能否被不同的互斥元保护? 163 | - 是否所有操作需要同样级别的保护? 164 | - 数据结构的一个小改变能否在不影响操作语义情况下提高并发性的机会? 165 | 166 | 一些通用的数据结构(栈、队列、哈希映射以及链表),考虑了如何在设计并发存取的时候应用上述设计准则来实现他们,使用锁来保护数据并阻止数据竞争。 167 | 168 | 169 | - 使用锁的线程安全栈 170 | [清单6.1 线程安全栈的类定义](https://github.com/xuyicpp/multi_threading/blob/master/chapter06/example6_01.cpp) 171 | - 使用细粒度锁和条件变量的线程安全队列 172 | [清单6.7 使用锁和等待的线程安全队列:内部与接口](https://github.com/xuyicpp/multi_threading/blob/master/chapter06/example6_07.cpp) 173 | - 一个使用锁的线程安全查找表 174 | [清单6.11 线程安全查找表](https://github.com/xuyicpp/multi_threading/blob/master/chapter06/example6_11.cpp) 175 | - 一个使用锁的线程安全链表 176 | [清单6.13 支持迭代的线程安全链表](https://github.com/xuyicpp/multi_threading/blob/master/chapter06/example6_13.cpp) 177 | 178 | ## 第七章 设计无锁的并发数据结构 179 | 180 | - 为无需使用锁的并发而设计的数据结构的实现 181 | - 在无锁数据结构中管理内存的技术 182 | - 有助于编写无锁数据结构的简单准则 183 | 184 | ### 定义 185 | 186 | 使用互斥元,条件变量以及future来同步数据的算法和数据结构被称为阻塞(blocking)的算法和数据结构。不使用阻塞库函数的数据结构和算法被称为非阻塞(nonblocking)的。但是,并不是所有的数据结构都是无锁(lock-free)的。 187 | 188 | [清单7.1 使用std::atomic_flag的自旋锁互斥元的实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter07/example7_01.cpp)这段代码,没有阻塞调用。然而,它并非无锁的。它仍然是一个互斥元,并且一次仍然只能被一个线程锁定。 189 | 190 | 对于有资格称为无锁的数据结构,就必须能够让多余一个线程可以并发地访问次数据结构。 191 | 192 | 无等待的数据结构是一种无锁的数据结构,并且有着额外的特性,每个访问数据结构的线程都可以在有限数量的步骤内完成它的操作,而不用管别的线程的行为。 193 | 194 | ### 无锁数据结构的优点与缺点 195 | 196 | 优点: 197 | - 1.实现最大程度的并发。 198 | - 2.健壮性:当一个线程在持有锁的时候终止,那个数据结构就永远被破坏了。但是如果一个线程在操作无锁数据结构时终止了,就不会丢失任何数据,除了此线程的数据之外,其他线程可以继续正常执行。 199 | 200 | 缺点: 201 | - 1.无锁数据结构时不会发生死锁的,尽管有可能存在活锁。活锁会降低性能而不会导致长期的问题,但是也是需要注意的事情。根据定义,无等待的代码无法忍受活锁,因为它执行操作的步骤数通常是有上限的。另一方面,这种算法比别的算法更复杂,并且即使当没有线程存取数据结构的时候也需要执行更多的步骤。 202 | - 2.它可能降低整体的性能。1、原子操作可能比非原子操作要慢很多。2、与基于锁数据结构的互斥元锁代码相比,无锁数据结构中需要更多的原子操作。3、硬件必须在存取同样的原子变量相关的乒乓缓存可能会成为一种显著的性能消耗。 203 | 204 | 总结: 205 | - 选择有锁无锁的数据结构之前,比较,是否为最坏等待时间,平均等待时间,总的执行时间......是很重要的。 206 | 207 | ### 无锁数据结构的例子 208 | 209 | 从清单7.2-清单7.12不用锁的线程安全栈,从清单7.13-清单7.21无锁线程安全队列。 210 | (这里不是看的很懂以后有机会再补充)。另外哑元结点是一个和有意思的概念。 211 | 212 | ### 编写无锁数据结构的准则 213 | 214 | - 使用std::memory_order_seq_cst作为原型(先用顺序一致顺序跑通,再来其他的骚操作) 215 | - 使用无锁内存回收模式(1.等待直到没有线程访问该数据结构,并且删除所有等待删除的对象。2.使用风险指针来确定线程正在访问一个特定的对象。3.引用计数对象,只有直到没有显著的引用时才删除它们。) 216 | - 当心ABA问题,就是线程1,比较/交换操作原子x,发现它的值是A,然后阻塞,然后,线程2,改成B,然后,线程3改回了A(并且恰好使用了相同的地址),线程1,比较/交换成功。破坏了数据结构。 217 | - 解决ABA问题的方法就是在变量x使用一个ABA计数器。使用空闲表或者回收结点而不是将它返回给分配器,使ABA常见。 218 | - 识别忙于等待的循环以及辅助其他线程(数据成员变原子,并使用比较/交换操作设置它) 219 | 220 | ## 第8章 设计并发代码 221 | 222 | ### 在线程间划分工作的技术 223 | - 处理开始前在线程间划分数据 224 | - 递归地划分数据 225 | - 以任务类型划分工作 226 | 227 | ### 影响并发代码性能的因素 228 | - 有多少个处理器 229 | - 数据竞争和乒乓缓存:处理器很多需要互相等待称为高竞争。在如下的循环中,counter的数据在各处理器的缓存间来回传递。这被称为乒乓缓存(cache ping-pong),而且会严重影响性能。 230 | 231 | ``` 232 | std::atomic counter(0); 233 | void processing_loop() 234 | { 235 | while(counter.fetch_add(1,std::memory_order_relaxed)<100000000) 236 | { 237 | do_something(); 238 | } 239 | } 240 | ``` 241 | - 假共享:处理器缓存的最小单位通常不是一个内存地址,而是一小块缓存线(cache line)的内存。这些内存块一般大小为32 ~ 64字节,取决于具体的处理器。这个缓存线是两者共享的,然而其中的数据并不共享,因此称为假共享(false sharing)。 242 | - 数据应该多紧密 243 | - 过度订阅和过多的任务切换 244 | 245 | ### 为多线程性能设计数据结构 246 | 为多线程性能设计你的数据结构时:竞争、假共享以及数据接近。 247 | - 为复杂操作划分数组元素 248 | - 其他数据结构中的数据访问方式 249 | 250 | ### 为并发设计时的额外考虑 251 | - 并行算法中的异常安全:1.用对象的析构函数中检查2.STD::ASYNC()的异常安全 252 | - 可扩展性和阿姆达尔定律:简单来说就是设计最大化并发 253 | - 用多线程隐藏延迟 254 | - 用并发提高响应性 255 | 256 | ### 在实践中设计并发代码 257 | - std::for_each的并行实现:[清单8.7 std::for_each的并行版本](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_07.cpp)、[清单8.8 使用std::async的std::for_each的并行版本](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_08.cpp) 258 | - std::find的并行实现:[清单8.9 并行find算法的一种实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_09.cpp)、[清单8.10 使用std::async的并行查找算法的实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_10.cpp) 259 | - std::partial_sum的并行实现:[清单8.11 通过划分问题来并行计算分段的和](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_11.cpp)、[清单8.13 通过成对更新的partial_sum的并行实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_13.cpp) 260 | - 屏障(barrier):一种同步方法使得线程等待直到要求的线程已经到达了屏障。[清单8.12 一个简单的屏障类](https://github.com/xuyicpp/multi_threading/blob/master/chapter08/example8_12.cpp) 261 | 262 | ## 第9章 高级线程管理 263 | 264 | 本章,我们考虑了许多“高级的“线程管理方法:线程池和中断线程。 265 | 266 | - [清单9.1 简单的线程池](https://github.com/xuyicpp/multi_threading/blob/master/chapter09/example9_01.cpp) 267 | - [清单9.9 interruptible_thread的基本实现](https://github.com/xuyicpp/multi_threading/blob/master/chapter09/example9_09.cpp) 268 | 269 | 你已经看到使用本地工作队列如何减少同步管理以及潜在提高线程池的吞吐量, 270 | 271 | - [清单9.6 使用本地线程工作队列的线程池](https://github.com/xuyicpp/multi_threading/blob/master/chapter09/example9_06.cpp) 272 | 273 | 并且看到当等待子任务完成时如何运行队列中别的任务来减少发生死锁的可能性。 274 | 275 | - [清单9.8 使用工作窃取的线程池](https://github.com/xuyicpp/multi_threading/blob/master/chapter09/example9_08.cpp) 276 | 277 | 我们也考虑了许多方法来允许一个线程中断另一个线程的处理,例如使用特殊中断点 278 | ``` 279 | void interruption_point() 280 | { 281 | if(this_thread_interrupt_flag.is_set()) 282 | { 283 | throw thread_interrupted(); 284 | } 285 | } 286 | ``` 287 | 和如何将原本会被中断阻塞的函数变得可以被中断。 288 | ``` 289 | template 290 | void interruptible_wait(std::future& uf) 291 | { 292 | //这会一直等到要么中断标志被设置,要么future已经准备好了,但是每次在future上执行阻塞要等待1ms。 293 | while(!this_thread_interrupt_flag.is_set()) 294 | { 295 | if(uf.wait_for(lk.std::chrono::miliseconds(1)==std::future_status::ready)) 296 | break; 297 | } 298 | interruption_point(); 299 | } 300 | ``` 301 | 302 | ## 第10章 多线程应用的测试与调试 303 | 304 | ### 并发相关错误的类型 305 | 306 | 不必要的阻塞 307 | - 死锁 308 | - 活锁 309 | - 在I/O或外部输入上的阻塞 310 | 311 | 竞争条件 312 | - 数据竞争 313 | - 破坏不变量 314 | - 生存期问题 315 | 316 | ### 定位并发相关的错误的技巧 317 | 318 | #### 审阅代码以定位潜在的错误 319 | - 该线程载入的数据是否有效?该数据是够已经被其他线程修改了? 320 | - 如果你假设其他线程可能正在修改该数据,那么可能会导致什么样的后果以及如何保证这样的事情永不发生? 321 | 322 | #### 通过测试定位并发相关的错误 323 | 324 | #### 可测试性设计 325 | - 每个函数功能和类的划分清晰明确 326 | - 函数扼要简洁 327 | - 你的测试代码可以完全控制你的被测试代码的周围的环境 328 | - 被测试的需要特定操作的代码应该集中在一块而不是分散在整个系统中。 329 | - 在你写测试代码之前你要先考虑如何测试代码 330 | 331 | #### 多线程测试技术 332 | - 暴力测试(穷举法) 333 | - 组合仿真测试 334 | - 使用特殊的库函数来检测测试暴露出的问题 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | --------------------------------------------------------------------------------