├── .github └── ISSUE_TEMPLATE │ ├── accessibility.yml │ ├── config.yml │ ├── errata.yml │ └── technical-question.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── ch1-01-hello.rs ├── ch1-02-hello-join.rs ├── ch1-03-spawn-closure.rs ├── ch1-04-scoped-threads.rs ├── ch1-05-rc.rs ├── ch1-06-cell.rs ├── ch1-07-refcell.rs ├── ch1-08-mutex.rs ├── ch1-09-sleep-before-unlock.rs ├── ch1-10-unlock-before-sleep.rs ├── ch1-11-thread-parking.rs ├── ch1-12-condvar.rs ├── ch2-01-stop-flag.rs ├── ch2-02-progress-reporting.rs ├── ch2-03-progress-reporting-unpark.rs ├── ch2-04-lazy-init.rs ├── ch2-05-fetch-add.rs ├── ch2-06-progress-reporting-multiple-threads.rs ├── ch2-07-statistics.rs ├── ch2-08-id-allocation.rs ├── ch2-09-id-allocation-panic.rs ├── ch2-10-id-allocation-subtract-before-panic.rs ├── ch2-11-increment-with-compare-exchange.rs ├── ch2-12-id-allocation-without-overflow.rs ├── ch2-13-lazy-one-time-init.rs ├── ch3-01-relaxed.rs ├── ch3-02-spawn-join.rs ├── ch3-03-total-modification-order.rs ├── ch3-04-total-modification-order-2.rs ├── ch3-05-out-of-thin-air.rs ├── ch3-06-release-acquire.rs ├── ch3-07-release-acquire-unsafe.rs ├── ch3-08-lock.rs ├── ch3-09-lazy-init-box.rs ├── ch3-10-seqcst.rs ├── ch3-11-fence.rs └── ch8-01-futex.rs └── src ├── ch4_spin_lock ├── mod.rs ├── s1_minimal.rs ├── s2_unsafe.rs └── s3_guard.rs ├── ch5_channels ├── mod.rs ├── s1_simple.rs ├── s2_unsafe.rs ├── s3_checks.rs ├── s3_single_atomic.rs ├── s4_types.rs ├── s5_borrowing.rs └── s6_blocking.rs ├── ch6_arc ├── mod.rs ├── s1_basic.rs ├── s2_weak.rs └── s3_optimized.rs ├── ch9_locks ├── condvar_1.rs ├── condvar_2.rs ├── mod.rs ├── mutex_1.rs ├── mutex_2.rs ├── mutex_3.rs ├── rwlock_1.rs ├── rwlock_2.rs └── rwlock_3.rs └── lib.rs /.github/ISSUE_TEMPLATE/accessibility.yml: -------------------------------------------------------------------------------- 1 | name: Accessibility issue 2 | description: Report an issue about the accessibility of the online book 3 | labels: 4 | - accessibility 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | If you encounter any issues with the accessibility of the book on https://marabos.nl/atomics/, please let me know so I can improve it. 10 | 11 | If you encounter any issues with any version published by O'Reilly, you can contact them at bookquestions@oreilly.com. 12 | - type: textarea 13 | id: where 14 | attributes: 15 | label: Where the issue appears 16 | description: If this issue appears in a specific place, please include a link to the relevant section on https://marabos.nl/atomics/. 17 | - type: textarea 18 | id: issue 19 | attributes: 20 | label: Description of the issue 21 | validations: 22 | required: true 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | contact_links: 2 | - name: O'Reilly errata page 3 | about: Errata for the print, pdf, and epub versions published by O'Reilly 4 | url: https://www.oreilly.com/catalog/errata.csp?isbn=9781098119447 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/errata.yml: -------------------------------------------------------------------------------- 1 | name: Errata 2 | description: Report a typo or other error in the book. 3 | labels: 4 | - errata 5 | - unconfirmed 6 | body: 7 | - type: markdown 8 | attributes: 9 | value: | 10 | Thanks for taking the time to report an error! 11 | 12 | Feel free to report content issues with any (print, ebook, or online) version of the book. Please first check if the issue still appears in the online version at https://marabos.nl/atomics/, as issues that you encounter in other versions might have already been fixed. 13 | 14 | Errata in the print, pdf, and epub versions of the book and are tracked by O'Reilly on [their errata page](https://www.oreilly.com/catalog/errata.csp?isbn=9781098119447). Please report any formatting or other issues that only affect the versions published by O'Reilly there. 15 | 16 | Fixes are immediately visible on https://marabos.nl/atomics/. Fixes will propagate to new printed, pdf, and epub copies with a delay. 17 | - type: dropdown 18 | id: version 19 | attributes: 20 | label: Type of error 21 | options: 22 | - Typo 23 | - Language or grammar issue 24 | - Formatting error 25 | - Minor technical mistake 26 | - Serious technical mistake 27 | validations: 28 | required: true 29 | - type: textarea 30 | id: where 31 | attributes: 32 | label: Location of the error 33 | description: Please include a link to the relevant section https://marabos.nl/atomics/ 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: what 38 | attributes: 39 | label: Description of the error 40 | validations: 41 | required: true 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/technical-question.yml: -------------------------------------------------------------------------------- 1 | name: Technical question 2 | description: Ask a technical question about the content of the book. 3 | labels: 4 | - question 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Feel free to use this to ask any technical questions about the book, but please note that I might not have time to answer all questions. 10 | - type: textarea 11 | id: about 12 | attributes: 13 | label: The content that the question is about 14 | description: If possible, please include a link to the relevant section on https://marabos.nl/atomics/ or the relevant example in this repository. 15 | - type: textarea 16 | id: question 17 | attributes: 18 | label: The question 19 | validations: 20 | required: true 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-atomics-and-locks" 3 | version = "1.0.0" 4 | edition = "2021" 5 | rust-version = "1.66.0" 6 | 7 | [dependencies] 8 | atomic-wait = "1.0.1" 9 | 10 | [target.'cfg(target_os = "linux")'.dependencies] 11 | libc = "0.2.138" 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | You may use all code in this repository for any purpose. 2 | 3 | Attribution is appreciated, but not required. 4 | An attribution usually includes the book title, author, 5 | publisher, and ISBN. For example: "Rust Atomics and 6 | Locks by Mara Bos (O’Reilly). Copyright 2023 Mara Bos, 7 | 978-1-098-11944-7." 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This repository contains the code examples, data structures, and links from 2 | [Rust Atomics and Locks](https://marabos.nl/atomics/). 3 | 4 | The examples from chapters 1, 2, 3, and 8 can be found in [examples/](examples/). 5 | The data structures from chapters 4, 5, 6, and 9 can be found in [src/](src/). 6 | 7 | ### Chapter 1 — Basics of Rust Concurrency 8 | 9 | - [examples/ch1-01-hello.rs](examples/ch1-01-hello.rs) 10 | - [examples/ch1-02-hello-join.rs](examples/ch1-02-hello-join.rs) 11 | - [examples/ch1-03-spawn-closure.rs](examples/ch1-03-spawn-closure.rs) 12 | - [examples/ch1-04-scoped-threads.rs](examples/ch1-04-scoped-threads.rs) 13 | - [examples/ch1-05-rc.rs](examples/ch1-05-rc.rs) 14 | - [examples/ch1-06-cell.rs](examples/ch1-06-cell.rs) 15 | - [examples/ch1-07-refcell.rs](examples/ch1-07-refcell.rs) 16 | - [examples/ch1-08-mutex.rs](examples/ch1-08-mutex.rs) 17 | - [examples/ch1-09-sleep-before-unlock.rs](examples/ch1-09-sleep-before-unlock.rs) 18 | - [examples/ch1-10-unlock-before-sleep.rs](examples/ch1-10-unlock-before-sleep.rs) 19 | - [examples/ch1-11-thread-parking.rs](examples/ch1-11-thread-parking.rs) 20 | - [examples/ch1-12-condvar.rs](examples/ch1-12-condvar.rs) 21 | 22 | ### Chapter 2 — Atomics 23 | 24 | - [examples/ch2-01-stop-flag.rs](examples/ch2-01-stop-flag.rs) 25 | - [examples/ch2-02-progress-reporting.rs](examples/ch2-02-progress-reporting.rs) 26 | - [examples/ch2-03-progress-reporting-unpark.rs](examples/ch2-03-progress-reporting-unpark.rs) 27 | - [examples/ch2-04-lazy-init.rs](examples/ch2-04-lazy-init.rs) 28 | - [examples/ch2-05-fetch-add.rs](examples/ch2-05-fetch-add.rs) 29 | - [examples/ch2-06-progress-reporting-multiple-threads.rs](examples/ch2-06-progress-reporting-multiple-threads.rs) 30 | - [examples/ch2-07-statistics.rs](examples/ch2-07-statistics.rs) 31 | - [examples/ch2-08-id-allocation.rs](examples/ch2-08-id-allocation.rs) 32 | - [examples/ch2-09-id-allocation-panic.rs](examples/ch2-09-id-allocation-panic.rs) 33 | - [examples/ch2-10-id-allocation-subtract-before-panic.rs](examples/ch2-10-id-allocation-subtract-before-panic.rs) 34 | - [examples/ch2-11-increment-with-compare-exchange.rs](examples/ch2-11-increment-with-compare-exchange.rs) 35 | - [examples/ch2-12-id-allocation-without-overflow.rs](examples/ch2-12-id-allocation-without-overflow.rs) 36 | - [examples/ch2-13-lazy-one-time-init.rs](examples/ch2-13-lazy-one-time-init.rs) 37 | 38 | ### Chapter 3 — Memory Ordering 39 | 40 | - [examples/ch3-01-relaxed.rs](examples/ch3-01-relaxed.rs) 41 | - [examples/ch3-02-spawn-join.rs](examples/ch3-02-spawn-join.rs) 42 | - [examples/ch3-03-total-modification-order.rs](examples/ch3-03-total-modification-order.rs) 43 | - [examples/ch3-04-total-modification-order-2.rs](examples/ch3-04-total-modification-order-2.rs) 44 | - [examples/ch3-05-out-of-thin-air.rs](examples/ch3-05-out-of-thin-air.rs) 45 | - [examples/ch3-06-release-acquire.rs](examples/ch3-06-release-acquire.rs) 46 | - [examples/ch3-07-release-acquire-unsafe.rs](examples/ch3-07-release-acquire-unsafe.rs) 47 | - [examples/ch3-08-lock.rs](examples/ch3-08-lock.rs) 48 | - [examples/ch3-09-lazy-init-box.rs](examples/ch3-09-lazy-init-box.rs) 49 | - [examples/ch3-10-seqcst.rs](examples/ch3-10-seqcst.rs) 50 | - [examples/ch3-11-fence.rs](examples/ch3-11-fence.rs) 51 | 52 | ### Chapter 4 — Building Our Own Spin Lock 53 | 54 | - [src/ch4_spin_lock/s1_minimal.rs](src/ch4_spin_lock/s1_minimal.rs) 55 | - [src/ch4_spin_lock/s2_unsafe.rs](src/ch4_spin_lock/s2_unsafe.rs) 56 | - [src/ch4_spin_lock/s3_guard.rs](src/ch4_spin_lock/s3_guard.rs) 57 | 58 | ### Chapter 5 — Building Our Own Channels 59 | 60 | - [src/ch5_channels/s1_simple.rs](src/ch5_channels/s1_simple.rs) 61 | - [src/ch5_channels/s2_unsafe.rs](src/ch5_channels/s2_unsafe.rs) 62 | - [src/ch5_channels/s3_checks.rs](src/ch5_channels/s3_checks.rs) 63 | - [src/ch5_channels/s3_single_atomic.rs](src/ch5_channels/s3_single_atomic.rs) 64 | - [src/ch5_channels/s4_types.rs](src/ch5_channels/s4_types.rs) 65 | - [src/ch5_channels/s5_borrowing.rs](src/ch5_channels/s5_borrowing.rs) 66 | - [src/ch5_channels/s6_blocking.rs](src/ch5_channels/s6_blocking.rs) 67 | 68 | ### Chapter 6 — Building Our Own “Arc” 69 | 70 | - [src/ch6_arc/s1_basic.rs](src/ch6_arc/s1_basic.rs) 71 | - [src/ch6_arc/s2_weak.rs](src/ch6_arc/s2_weak.rs) 72 | - [src/ch6_arc/s3_optimized.rs](src/ch6_arc/s3_optimized.rs) 73 | 74 | ### Chapter 7 — Understanding the Processor 75 | 76 | - https://godbolt.org/ 77 | 78 | ### Chapter 8 — Operating System Primitives 79 | 80 | - [examples/ch8-01-futex.rs](examples/ch8-01-futex.rs) 81 | 82 | ### Chapter 9 — Building Our Own Locks 83 | 84 | - [src/ch9_locks/mutex_1.rs](src/ch9_locks/mutex_1.rs) 85 | - [src/ch9_locks/mutex_2.rs](src/ch9_locks/mutex_2.rs) 86 | - [src/ch9_locks/mutex_3.rs](src/ch9_locks/mutex_3.rs) 87 | - [src/ch9_locks/condvar_1.rs](src/ch9_locks/condvar_1.rs) 88 | - [src/ch9_locks/condvar_2.rs](src/ch9_locks/condvar_2.rs) 89 | - [src/ch9_locks/rwlock_1.rs](src/ch9_locks/rwlock_1.rs) 90 | - [src/ch9_locks/rwlock_2.rs](src/ch9_locks/rwlock_2.rs) 91 | - [src/ch9_locks/rwlock_3.rs](src/ch9_locks/rwlock_3.rs) 92 | 93 | ### Chapter 10 — Ideas and Inspiration 94 | 95 | - [Wikipedia article on semaphores](https://en.wikipedia.org/wiki/Semaphore_(programming)) 96 | - [Stanford University course notes on semaphores](https://see.stanford.edu/materials/icsppcs107/23-Concurrency-Examples.pdf) 97 | - [Wikipedia article on the read-copy-update pattern](https://en.wikipedia.org/wiki/Read-copy-update) 98 | - [LWN article "What is RCU, Fundamentally?"](https://lwn.net/Articles/262464/) 99 | - [Wikipedia article on non-blocking linked lists](https://en.wikipedia.org/wiki/Non-blocking_linked_list) 100 | - [LWN article "Using RCU for Linked Lists—A Case Study"](https://lwn.net/Articles/610972/) 101 | - [Notes on the implementation of Windows SRW locks](https://github.com/rust-lang/rust/issues/93740#issuecomment-1064139337) 102 | - [A Rust implementation of queue-based locks](https://github.com/kprotty/usync) 103 | - [WebKit blog post, "Locking in WebKit"](https://webkit.org/blog/6161/locking-in-webkit/) 104 | - [Documentation of the `parking_lot` crate](https://docs.rs/parking_lot) 105 | - [Wikipedia article on Linux's Seqlock](https://en.wikipedia.org/wiki/Seqlock) 106 | - [Rust RFC 3301, `AtomicPerByte`](https://rust.tf/rfc3301) 107 | - [Documentation of the `seqlock` crate](https://docs.rs/seqlock) 108 | 109 | ### License 110 | 111 | You may use all code in this repository for any purpose. 112 | 113 | Attribution is appreciated, but not required. 114 | An attribution usually includes the book title, author, publisher, and ISBN. 115 | For example: "_Rust Atomics and Locks_ by Mara Bos (O’Reilly). Copyright 2023 Mara Bos, 978-1-098-11944-7." 116 | -------------------------------------------------------------------------------- /examples/ch1-01-hello.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | fn main() { 4 | thread::spawn(f); 5 | thread::spawn(f); 6 | 7 | println!("Hello from the main thread."); 8 | } 9 | 10 | fn f() { 11 | println!("Hello from another thread!"); 12 | 13 | let id = thread::current().id(); 14 | println!("This is my thread id: {id:?}"); 15 | } 16 | -------------------------------------------------------------------------------- /examples/ch1-02-hello-join.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | fn main() { 4 | let t1 = thread::spawn(f); 5 | let t2 = thread::spawn(f); 6 | 7 | println!("Hello from the main thread."); 8 | 9 | t1.join().unwrap(); 10 | t2.join().unwrap(); 11 | } 12 | 13 | fn f() { 14 | println!("Hello from another thread!"); 15 | 16 | let id = thread::current().id(); 17 | println!("This is my thread id: {id:?}"); 18 | } 19 | -------------------------------------------------------------------------------- /examples/ch1-03-spawn-closure.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | fn main() { 4 | let numbers = vec![1, 2, 3]; 5 | 6 | thread::spawn(move || { 7 | for n in &numbers { 8 | println!("{n}"); 9 | } 10 | }).join().unwrap(); 11 | } 12 | -------------------------------------------------------------------------------- /examples/ch1-04-scoped-threads.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | fn main() { 4 | let numbers = vec![1, 2, 3]; 5 | 6 | thread::scope(|s| { 7 | s.spawn(|| { 8 | println!("length: {}", numbers.len()); 9 | }); 10 | s.spawn(|| { 11 | for n in &numbers { 12 | println!("{n}"); 13 | } 14 | }); 15 | }); 16 | } 17 | -------------------------------------------------------------------------------- /examples/ch1-05-rc.rs: -------------------------------------------------------------------------------- 1 | use std::rc::Rc; 2 | 3 | fn main() { 4 | let a = Rc::new([1, 2, 3]); 5 | let b = a.clone(); 6 | 7 | assert_eq!(a.as_ptr(), b.as_ptr()); // Same allocation! 8 | } 9 | -------------------------------------------------------------------------------- /examples/ch1-06-cell.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | 3 | fn f(v: &Cell>) { 4 | let mut v2 = v.take(); // Replaces the contents of the Cell with an empty Vec 5 | v2.push(1); 6 | v.set(v2); // Put the modified Vec back 7 | } 8 | 9 | fn main() { 10 | let v = Cell::new(vec![1, 2, 3]); 11 | f(&v); 12 | assert_eq!(v.into_inner(), vec![1, 2, 3, 1]); 13 | } 14 | -------------------------------------------------------------------------------- /examples/ch1-07-refcell.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | 3 | fn f(v: &RefCell>) { 4 | v.borrow_mut().push(1); // We can modify the `Vec` directly. 5 | } 6 | 7 | fn main() { 8 | let v = RefCell::new(vec![1, 2, 3]); 9 | f(&v); 10 | assert_eq!(v.into_inner(), vec![1, 2, 3, 1]); 11 | } 12 | -------------------------------------------------------------------------------- /examples/ch1-08-mutex.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | use std::thread; 3 | 4 | fn main() { 5 | let n = Mutex::new(0); 6 | thread::scope(|s| { 7 | for _ in 0..10 { 8 | s.spawn(|| { 9 | let mut guard = n.lock().unwrap(); 10 | for _ in 0..100 { 11 | *guard += 1; 12 | } 13 | }); 14 | } 15 | }); 16 | assert_eq!(n.into_inner().unwrap(), 1000); 17 | } 18 | -------------------------------------------------------------------------------- /examples/ch1-09-sleep-before-unlock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | use std::thread; 3 | use std::time::Duration; 4 | 5 | fn main() { 6 | let n = Mutex::new(0); 7 | thread::scope(|s| { 8 | for _ in 0..10 { 9 | s.spawn(|| { 10 | let mut guard = n.lock().unwrap(); 11 | for _ in 0..100 { 12 | *guard += 1; 13 | } 14 | thread::sleep(Duration::from_secs(1)); // New! 15 | }); 16 | } 17 | }); 18 | assert_eq!(n.into_inner().unwrap(), 1000); 19 | } 20 | -------------------------------------------------------------------------------- /examples/ch1-10-unlock-before-sleep.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | use std::thread; 3 | use std::time::Duration; 4 | 5 | fn main() { 6 | let n = Mutex::new(0); 7 | thread::scope(|s| { 8 | for _ in 0..10 { 9 | s.spawn(|| { 10 | let mut guard = n.lock().unwrap(); 11 | for _ in 0..100 { 12 | *guard += 1; 13 | } 14 | drop(guard); // New: drop the guard before sleeping! 15 | thread::sleep(Duration::from_secs(1)); 16 | }); 17 | } 18 | }); 19 | assert_eq!(n.into_inner().unwrap(), 1000); 20 | } 21 | -------------------------------------------------------------------------------- /examples/ch1-11-thread-parking.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::sync::Mutex; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn main() { 7 | let queue = Mutex::new(VecDeque::new()); 8 | 9 | thread::scope(|s| { 10 | // Consuming thread 11 | let t = s.spawn(|| loop { 12 | let item = queue.lock().unwrap().pop_front(); 13 | if let Some(item) = item { 14 | dbg!(item); 15 | } else { 16 | thread::park(); 17 | } 18 | }); 19 | 20 | // Producing thread 21 | for i in 0.. { 22 | queue.lock().unwrap().push_back(i); 23 | t.thread().unpark(); 24 | thread::sleep(Duration::from_secs(1)); 25 | } 26 | }); 27 | } 28 | -------------------------------------------------------------------------------- /examples/ch1-12-condvar.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::sync::Condvar; 3 | use std::sync::Mutex; 4 | use std::thread; 5 | use std::time::Duration; 6 | 7 | fn main() { 8 | let queue = Mutex::new(VecDeque::new()); 9 | let not_empty = Condvar::new(); 10 | 11 | thread::scope(|s| { 12 | s.spawn(|| { 13 | loop { 14 | let mut q = queue.lock().unwrap(); 15 | let item = loop { 16 | if let Some(item) = q.pop_front() { 17 | break item; 18 | } else { 19 | q = not_empty.wait(q).unwrap(); 20 | } 21 | }; 22 | drop(q); 23 | dbg!(item); 24 | } 25 | }); 26 | 27 | for i in 0.. { 28 | queue.lock().unwrap().push_back(i); 29 | not_empty.notify_one(); 30 | thread::sleep(Duration::from_secs(1)); 31 | } 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /examples/ch2-01-stop-flag.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn main() { 7 | static STOP: AtomicBool = AtomicBool::new(false); 8 | 9 | // Spawn a thread to do the work. 10 | let background_thread = thread::spawn(|| { 11 | while !STOP.load(Relaxed) { 12 | some_work(); 13 | } 14 | }); 15 | 16 | // Use the main thread to listen for user input. 17 | for line in std::io::stdin().lines() { 18 | match line.unwrap().as_str() { 19 | "help" => println!("commands: help, stop"), 20 | "stop" => break, 21 | cmd => println!("unknown command: {cmd:?}"), 22 | } 23 | } 24 | 25 | // Inform the background thread it needs to stop. 26 | STOP.store(true, Relaxed); 27 | 28 | // Wait until the background thread finishes. 29 | background_thread.join().unwrap(); 30 | } 31 | 32 | fn some_work() { 33 | thread::sleep(Duration::from_millis(100)); 34 | } 35 | -------------------------------------------------------------------------------- /examples/ch2-02-progress-reporting.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn main() { 7 | let num_done = AtomicUsize::new(0); 8 | 9 | thread::scope(|s| { 10 | // A background thread to process all 100 items. 11 | s.spawn(|| { 12 | for i in 0..100 { 13 | process_item(i); // Assuming this takes some time. 14 | num_done.store(i + 1, Relaxed); 15 | } 16 | }); 17 | 18 | // The main thread shows status updates, every second. 19 | loop { 20 | let n = num_done.load(Relaxed); 21 | if n == 100 { break; } 22 | println!("Working.. {n}/100 done"); 23 | thread::sleep(Duration::from_secs(1)); 24 | } 25 | }); 26 | 27 | println!("Done!"); 28 | } 29 | 30 | fn process_item(_: usize) { 31 | thread::sleep(Duration::from_millis(37)); 32 | } 33 | -------------------------------------------------------------------------------- /examples/ch2-03-progress-reporting-unpark.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn main() { 7 | let num_done = AtomicUsize::new(0); 8 | 9 | let main_thread = thread::current(); 10 | 11 | thread::scope(|s| { 12 | // A background thread to process all 100 items. 13 | s.spawn(|| { 14 | for i in 0..100 { 15 | process_item(i); // Assuming this takes some time. 16 | num_done.store(i + 1, Relaxed); 17 | main_thread.unpark(); // Wake up the main thread. 18 | } 19 | }); 20 | 21 | // The main thread shows status updates. 22 | loop { 23 | let n = num_done.load(Relaxed); 24 | if n == 100 { break; } 25 | println!("Working.. {n}/100 done"); 26 | thread::park_timeout(Duration::from_secs(1)); 27 | } 28 | }); 29 | 30 | println!("Done!"); 31 | } 32 | 33 | fn process_item(_: usize) { 34 | thread::sleep(Duration::from_millis(37)); 35 | } 36 | -------------------------------------------------------------------------------- /examples/ch2-04-lazy-init.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU64; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn get_x() -> u64 { 7 | static X: AtomicU64 = AtomicU64::new(0); 8 | let mut x = X.load(Relaxed); 9 | if x == 0 { 10 | x = calculate_x(); 11 | X.store(x, Relaxed); 12 | } 13 | x 14 | } 15 | 16 | fn calculate_x() -> u64 { 17 | thread::sleep(Duration::from_secs(1)); 18 | 123 19 | } 20 | 21 | fn main() { 22 | dbg!(get_x()); 23 | dbg!(get_x()); 24 | } 25 | -------------------------------------------------------------------------------- /examples/ch2-05-fetch-add.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | fn main() { 5 | let a = AtomicI32::new(100); 6 | let b = a.fetch_add(23, Relaxed); 7 | let c = a.load(Relaxed); 8 | 9 | assert_eq!(b, 100); 10 | assert_eq!(c, 123); 11 | } 12 | -------------------------------------------------------------------------------- /examples/ch2-06-progress-reporting-multiple-threads.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | fn main() { 7 | let num_done = &AtomicUsize::new(0); 8 | 9 | thread::scope(|s| { 10 | // Four background threads to process all 100 items, 25 each. 11 | for t in 0..4 { 12 | s.spawn(move || { 13 | for i in 0..25 { 14 | process_item(t * 25 + i); // Assuming this takes some time. 15 | num_done.fetch_add(1, Relaxed); 16 | } 17 | }); 18 | } 19 | 20 | // The main thread shows status updates, every second. 21 | loop { 22 | let n = num_done.load(Relaxed); 23 | if n == 100 { break; } 24 | println!("Working.. {n}/100 done"); 25 | thread::sleep(Duration::from_secs(1)); 26 | } 27 | }); 28 | 29 | println!("Done!"); 30 | } 31 | 32 | fn process_item(_: usize) { 33 | thread::sleep(Duration::from_millis(123)); 34 | } 35 | -------------------------------------------------------------------------------- /examples/ch2-07-statistics.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::atomic::AtomicU64; 3 | use std::sync::atomic::Ordering::Relaxed; 4 | use std::thread; 5 | use std::time::Duration; 6 | use std::time::Instant; 7 | 8 | fn main() { 9 | let num_done = &AtomicUsize::new(0); 10 | let total_time = &AtomicU64::new(0); 11 | let max_time = &AtomicU64::new(0); 12 | 13 | thread::scope(|s| { 14 | // Four background threads to process all 100 items, 25 each. 15 | for t in 0..4 { 16 | s.spawn(move || { 17 | for i in 0..25 { 18 | let start = Instant::now(); 19 | process_item(t * 25 + i); // Assuming this takes some time. 20 | let time_taken = start.elapsed().as_micros() as u64; 21 | num_done.fetch_add(1, Relaxed); 22 | total_time.fetch_add(time_taken, Relaxed); 23 | max_time.fetch_max(time_taken, Relaxed); 24 | } 25 | }); 26 | } 27 | 28 | // The main thread shows status updates, every second. 29 | loop { 30 | let total_time = Duration::from_micros(total_time.load(Relaxed)); 31 | let max_time = Duration::from_micros(max_time.load(Relaxed)); 32 | let n = num_done.load(Relaxed); 33 | if n == 100 { break; } 34 | if n == 0 { 35 | println!("Working.. nothing done yet."); 36 | } else { 37 | println!( 38 | "Working.. {n}/100 done, {:?} average, {:?} peak", 39 | total_time / n as u32, 40 | max_time, 41 | ); 42 | } 43 | thread::sleep(Duration::from_secs(1)); 44 | } 45 | }); 46 | 47 | println!("Done!"); 48 | } 49 | 50 | fn process_item(_: usize) { 51 | thread::sleep(Duration::from_millis(123)); 52 | } 53 | -------------------------------------------------------------------------------- /examples/ch2-08-id-allocation.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | // This version is problematic. 5 | fn allocate_new_id() -> u32 { 6 | static NEXT_ID: AtomicU32 = AtomicU32::new(0); 7 | NEXT_ID.fetch_add(1, Relaxed) 8 | } 9 | 10 | fn main() { 11 | dbg!(allocate_new_id()); // 0 12 | dbg!(allocate_new_id()); // 1 13 | dbg!(allocate_new_id()); // 2 14 | 15 | println!("overflowing the counter... (this might take a minute)"); 16 | 17 | for _ in 3..=u32::MAX { 18 | allocate_new_id(); 19 | } 20 | 21 | println!("overflowed!"); 22 | 23 | dbg!(allocate_new_id()); // ⚠️ This will produce zero again. ⚠️ 24 | } 25 | -------------------------------------------------------------------------------- /examples/ch2-09-id-allocation-panic.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | // This version is problematic. 5 | fn allocate_new_id() -> u32 { 6 | static NEXT_ID: AtomicU32 = AtomicU32::new(0); 7 | let id = NEXT_ID.fetch_add(1, Relaxed); 8 | assert!(id < 1000, "too many IDs!"); 9 | id 10 | } 11 | 12 | fn main() { 13 | dbg!(allocate_new_id()); // This will produce a zero. 14 | 15 | for _ in 1..1000 { 16 | allocate_new_id(); // 1 through 999. 17 | } 18 | 19 | println!("overflowing the counter... (this might take a few hours)"); 20 | 21 | std::panic::set_hook(Box::new(|_| {})); 22 | 23 | for _ in 1000..=u32::MAX { 24 | let _ = std::panic::catch_unwind(|| allocate_new_id()); 25 | } 26 | 27 | println!("overflowed!"); 28 | 29 | dbg!(allocate_new_id()); // ⚠️ This will produce zero again. ⚠️ 30 | } 31 | -------------------------------------------------------------------------------- /examples/ch2-10-id-allocation-subtract-before-panic.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | fn allocate_new_id() -> u32 { 5 | static NEXT_ID: AtomicU32 = AtomicU32::new(0); 6 | let id = NEXT_ID.fetch_add(1, Relaxed); 7 | if id >= 1000 { 8 | NEXT_ID.fetch_sub(1, Relaxed); 9 | panic!("too many IDs!"); 10 | } 11 | id 12 | } 13 | 14 | fn main() { 15 | dbg!(allocate_new_id()); 16 | dbg!(allocate_new_id()); 17 | dbg!(allocate_new_id()); 18 | // TODO 19 | } 20 | -------------------------------------------------------------------------------- /examples/ch2-11-increment-with-compare-exchange.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | fn increment(a: &AtomicU32) { 5 | let mut current = a.load(Relaxed); 6 | loop { 7 | let new = current + 1; 8 | match a.compare_exchange(current, new, Relaxed, Relaxed) { 9 | Ok(_) => return, 10 | Err(v) => current = v, 11 | } 12 | } 13 | } 14 | 15 | fn main() { 16 | let a = AtomicU32::new(0); 17 | increment(&a); 18 | increment(&a); 19 | assert_eq!(a.into_inner(), 2); 20 | } 21 | -------------------------------------------------------------------------------- /examples/ch2-12-id-allocation-without-overflow.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | fn allocate_new_id() -> u32 { 5 | static NEXT_ID: AtomicU32 = AtomicU32::new(0); 6 | let mut id = NEXT_ID.load(Relaxed); 7 | loop { 8 | assert!(id < 1000, "too many IDs!"); 9 | match NEXT_ID.compare_exchange_weak(id, id + 1, Relaxed, Relaxed) { 10 | Ok(_) => return id, 11 | Err(v) => id = v, 12 | } 13 | } 14 | } 15 | 16 | fn main() { 17 | dbg!(allocate_new_id()); 18 | dbg!(allocate_new_id()); 19 | dbg!(allocate_new_id()); 20 | // TODO 21 | } 22 | -------------------------------------------------------------------------------- /examples/ch2-13-lazy-one-time-init.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicU64; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | 4 | fn get_key() -> u64 { 5 | static KEY: AtomicU64 = AtomicU64::new(0); 6 | let key = KEY.load(Relaxed); 7 | if key == 0 { 8 | let new_key = generate_random_key(); 9 | match KEY.compare_exchange(0, new_key, Relaxed, Relaxed) { 10 | Ok(_) => new_key, 11 | Err(k) => k, 12 | } 13 | } else { 14 | key 15 | } 16 | } 17 | 18 | fn generate_random_key() -> u64 { 19 | 123 20 | // TODO 21 | } 22 | 23 | fn main() { 24 | dbg!(get_key()); 25 | dbg!(get_key()); 26 | dbg!(get_key()); 27 | } 28 | -------------------------------------------------------------------------------- /examples/ch3-01-relaxed.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | 5 | static X: AtomicI32 = AtomicI32::new(0); 6 | static Y: AtomicI32 = AtomicI32::new(0); 7 | 8 | fn a() { 9 | X.store(10, Relaxed); 10 | Y.store(20, Relaxed); 11 | } 12 | 13 | fn b() { 14 | let y = Y.load(Relaxed); 15 | let x = X.load(Relaxed); 16 | println!("{x} {y}"); 17 | } 18 | 19 | fn main() { 20 | thread::scope(|s| { 21 | s.spawn(a); 22 | s.spawn(b); 23 | }); 24 | } 25 | -------------------------------------------------------------------------------- /examples/ch3-02-spawn-join.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | 5 | static X: AtomicI32 = AtomicI32::new(0); 6 | 7 | fn main() { 8 | X.store(1, Relaxed); 9 | let t = thread::spawn(f); 10 | X.store(2, Relaxed); 11 | t.join().unwrap(); 12 | X.store(3, Relaxed); 13 | } 14 | 15 | fn f() { 16 | let x = X.load(Relaxed); 17 | assert!(x == 1 || x == 2); 18 | } 19 | -------------------------------------------------------------------------------- /examples/ch3-03-total-modification-order.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | 5 | static X: AtomicI32 = AtomicI32::new(0); 6 | 7 | fn a() { 8 | X.fetch_add(5, Relaxed); 9 | X.fetch_add(10, Relaxed); 10 | } 11 | 12 | fn b() { 13 | let a = X.load(Relaxed); 14 | let b = X.load(Relaxed); 15 | let c = X.load(Relaxed); 16 | let d = X.load(Relaxed); 17 | println!("{a} {b} {c} {d}"); 18 | } 19 | 20 | fn main() { 21 | thread::scope(|s| { 22 | s.spawn(a); 23 | s.spawn(b); 24 | }); 25 | } 26 | -------------------------------------------------------------------------------- /examples/ch3-04-total-modification-order-2.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | 5 | static X: AtomicI32 = AtomicI32::new(0); 6 | 7 | fn a1() { 8 | X.fetch_add(5, Relaxed); 9 | } 10 | 11 | fn a2() { 12 | X.fetch_add(10, Relaxed); 13 | } 14 | 15 | fn b() { 16 | let a = X.load(Relaxed); 17 | let b = X.load(Relaxed); 18 | let c = X.load(Relaxed); 19 | let d = X.load(Relaxed); 20 | println!("{a} {b} {c} {d}"); 21 | } 22 | 23 | fn main() { 24 | thread::scope(|s| { 25 | s.spawn(a1); 26 | s.spawn(a2); 27 | s.spawn(b); 28 | }); 29 | } 30 | -------------------------------------------------------------------------------- /examples/ch3-05-out-of-thin-air.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicI32; 2 | use std::sync::atomic::Ordering::Relaxed; 3 | use std::thread; 4 | 5 | static X: AtomicI32 = AtomicI32::new(0); 6 | static Y: AtomicI32 = AtomicI32::new(0); 7 | 8 | fn main() { 9 | let a = thread::spawn(|| { 10 | let x = X.load(Relaxed); 11 | Y.store(x, Relaxed); 12 | }); 13 | let b = thread::spawn(|| { 14 | let y = Y.load(Relaxed); 15 | X.store(y, Relaxed); 16 | }); 17 | a.join().unwrap(); 18 | b.join().unwrap(); 19 | assert_eq!(X.load(Relaxed), 0); // Might fail? 20 | assert_eq!(Y.load(Relaxed), 0); // Might fail? 21 | } 22 | -------------------------------------------------------------------------------- /examples/ch3-06-release-acquire.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::AtomicU64; 3 | use std::sync::atomic::Ordering::Relaxed; 4 | use std::sync::atomic::Ordering::{Acquire, Release}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | static DATA: AtomicU64 = AtomicU64::new(0); 9 | static READY: AtomicBool = AtomicBool::new(false); 10 | 11 | fn main() { 12 | thread::spawn(|| { 13 | DATA.store(123, Relaxed); 14 | READY.store(true, Release); // Everything from before this store .. 15 | }); 16 | while !READY.load(Acquire) { // .. is visible after this loads `true`. 17 | thread::sleep(Duration::from_millis(100)); 18 | println!("waiting..."); 19 | } 20 | println!("{}", DATA.load(Relaxed)); 21 | } 22 | -------------------------------------------------------------------------------- /examples/ch3-07-release-acquire-unsafe.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::Ordering::{Acquire, Release}; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | static mut DATA: u64 = 0; 7 | static READY: AtomicBool = AtomicBool::new(false); 8 | 9 | fn main() { 10 | thread::spawn(|| { 11 | // Safety: Nothing else is accessing DATA, 12 | // because we haven't set the READY flag yet. 13 | unsafe { DATA = 123 }; 14 | READY.store(true, Release); // Everything from before this store .. 15 | }); 16 | while !READY.load(Acquire) { // .. is visible after this loads `true`. 17 | thread::sleep(Duration::from_millis(100)); 18 | println!("waiting..."); 19 | } 20 | // Safety: Nothing is mutating DATA, because READY is set. 21 | println!("{}", unsafe { DATA }); 22 | } 23 | -------------------------------------------------------------------------------- /examples/ch3-08-lock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 3 | use std::thread; 4 | 5 | static mut DATA: String = String::new(); 6 | static LOCKED: AtomicBool = AtomicBool::new(false); 7 | 8 | fn f() { 9 | if LOCKED.compare_exchange(false, true, Acquire, Relaxed).is_ok() { 10 | // Safety: We hold the exclusive lock, so nothing else is accessing DATA. 11 | unsafe { DATA.push('!') }; 12 | LOCKED.store(false, Release); 13 | } 14 | } 15 | 16 | fn main() { 17 | thread::scope(|s| { 18 | for _ in 0..100 { 19 | s.spawn(f); 20 | } 21 | }); 22 | // DATA now contains at least one exclamation mark (and maybe more). 23 | assert!(unsafe { DATA.len() } > 0); 24 | assert!(unsafe { DATA.chars().all(|c| c == '!') }); 25 | } 26 | -------------------------------------------------------------------------------- /examples/ch3-09-lazy-init-box.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicPtr; 2 | use std::sync::atomic::Ordering::{Acquire, Release}; 3 | 4 | fn get_data() -> &'static Data { 5 | static PTR: AtomicPtr = AtomicPtr::new(std::ptr::null_mut()); 6 | 7 | let mut p = PTR.load(Acquire); 8 | 9 | if p.is_null() { 10 | p = Box::into_raw(Box::new(generate_data())); 11 | if let Err(e) = PTR.compare_exchange( 12 | std::ptr::null_mut(), p, Release, Acquire 13 | ) { 14 | // Safety: p comes from Box::into_raw right above, 15 | // and wasn't shared with any other thread. 16 | drop(unsafe { Box::from_raw(p) }); 17 | p = e; 18 | } 19 | } 20 | 21 | // Safety: p is not null and points to a properly initialized value. 22 | unsafe { &*p } 23 | } 24 | 25 | struct Data([u8; 100]); 26 | 27 | fn generate_data() -> Data { 28 | Data([123; 100]) 29 | } 30 | 31 | fn main() { 32 | println!("{:p}", get_data()); 33 | println!("{:p}", get_data()); // Same address as before. 34 | } 35 | -------------------------------------------------------------------------------- /examples/ch3-10-seqcst.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::Ordering::SeqCst; 3 | use std::thread; 4 | 5 | static A: AtomicBool = AtomicBool::new(false); 6 | static B: AtomicBool = AtomicBool::new(false); 7 | 8 | static mut S: String = String::new(); 9 | 10 | fn main() { 11 | let a = thread::spawn(|| { 12 | A.store(true, SeqCst); 13 | if !B.load(SeqCst) { 14 | unsafe { S.push('!') }; 15 | } 16 | }); 17 | 18 | let b = thread::spawn(|| { 19 | B.store(true, SeqCst); 20 | if !A.load(SeqCst) { 21 | unsafe { S.push('!') }; 22 | } 23 | }); 24 | 25 | a.join().unwrap(); 26 | b.join().unwrap(); 27 | } 28 | -------------------------------------------------------------------------------- /examples/ch3-11-fence.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::fence; 2 | use std::sync::atomic::AtomicBool; 3 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 4 | use std::thread; 5 | use std::time::Duration; 6 | 7 | static mut DATA: [u64; 10] = [0; 10]; 8 | 9 | const ATOMIC_FALSE: AtomicBool = AtomicBool::new(false); 10 | static READY: [AtomicBool; 10] = [ATOMIC_FALSE; 10]; 11 | 12 | fn main() { 13 | for i in 0..10 { 14 | thread::spawn(move || { 15 | let data = some_calculation(i); 16 | unsafe { DATA[i] = data }; 17 | READY[i].store(true, Release); 18 | }); 19 | } 20 | thread::sleep(Duration::from_millis(500)); 21 | let ready: [bool; 10] = std::array::from_fn(|i| READY[i].load(Relaxed)); 22 | if ready.contains(&true) { 23 | fence(Acquire); 24 | for i in 0..10 { 25 | if ready[i] { 26 | println!("data{i} = {}", unsafe { DATA[i] }); 27 | } 28 | } 29 | } 30 | } 31 | 32 | fn some_calculation(i: usize) -> u64 { 33 | thread::sleep(Duration::from_millis(400 + i as u64 % 3 * 100)); 34 | 123 35 | } 36 | -------------------------------------------------------------------------------- /examples/ch8-01-futex.rs: -------------------------------------------------------------------------------- 1 | #![cfg(target_os = "linux")] 2 | 3 | use std::sync::atomic::AtomicU32; 4 | use std::sync::atomic::Ordering::Relaxed; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | pub fn wait(a: &AtomicU32, expected: u32) { 9 | // Refer to the futex (2) man page for the syscall signature. 10 | unsafe { 11 | libc::syscall( 12 | libc::SYS_futex, // The futex syscall. 13 | a as *const AtomicU32, // The atomic to operate on. 14 | libc::FUTEX_WAIT, // The futex operation. 15 | expected, // The expected value. 16 | std::ptr::null::(), // No timeout. 17 | ); 18 | } 19 | } 20 | 21 | pub fn wake_one(a: &AtomicU32) { 22 | // Refer to the futex (2) man page for the syscall signature. 23 | unsafe { 24 | libc::syscall( 25 | libc::SYS_futex, // The futex syscall. 26 | a as *const AtomicU32, // The atomic to operate on. 27 | libc::FUTEX_WAKE, // The futex operation. 28 | 1, // The number of threads to wake up. 29 | ); 30 | } 31 | } 32 | 33 | fn main() { 34 | let a = AtomicU32::new(0); 35 | 36 | thread::scope(|s| { 37 | s.spawn(|| { 38 | thread::sleep(Duration::from_secs(3)); 39 | a.store(1, Relaxed); 40 | wake_one(&a); 41 | }); 42 | 43 | println!("Waiting..."); 44 | while a.load(Relaxed) == 0 { 45 | wait(&a, 0); 46 | } 47 | println!("Done!"); 48 | }); 49 | } 50 | -------------------------------------------------------------------------------- /src/ch4_spin_lock/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod s1_minimal; 2 | pub mod s2_unsafe; 3 | pub mod s3_guard; 4 | -------------------------------------------------------------------------------- /src/ch4_spin_lock/s1_minimal.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicBool; 2 | use std::sync::atomic::Ordering::{Acquire, Release}; 3 | 4 | pub struct SpinLock { 5 | locked: AtomicBool, 6 | } 7 | 8 | impl SpinLock { 9 | pub const fn new() -> Self { 10 | Self { locked: AtomicBool::new(false) } 11 | } 12 | 13 | pub fn lock(&self) { 14 | while self.locked.swap(true, Acquire) { 15 | std::hint::spin_loop(); 16 | } 17 | } 18 | 19 | pub fn unlock(&self) { 20 | self.locked.store(false, Release); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/ch4_spin_lock/s2_unsafe.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::sync::atomic::AtomicBool; 3 | use std::sync::atomic::Ordering::{Acquire, Release}; 4 | 5 | pub struct SpinLock { 6 | locked: AtomicBool, 7 | value: UnsafeCell, 8 | } 9 | 10 | unsafe impl Sync for SpinLock where T: Send {} 11 | 12 | impl SpinLock { 13 | pub const fn new(value: T) -> Self { 14 | Self { 15 | locked: AtomicBool::new(false), 16 | value: UnsafeCell::new(value), 17 | } 18 | } 19 | 20 | pub fn lock(&self) -> &mut T { 21 | while self.locked.swap(true, Acquire) { 22 | std::hint::spin_loop(); 23 | } 24 | unsafe { &mut *self.value.get() } 25 | } 26 | 27 | /// Safety: The &mut T from lock() must be gone! 28 | /// (And no cheating by keeping reference to fields of that T around!) 29 | pub unsafe fn unlock(&self) { 30 | self.locked.store(false, Release); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/ch4_spin_lock/s3_guard.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | use std::cell::UnsafeCell; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::{Acquire, Release}; 5 | 6 | pub struct SpinLock { 7 | locked: AtomicBool, 8 | value: UnsafeCell, 9 | } 10 | 11 | unsafe impl Sync for SpinLock where T: Send {} 12 | 13 | pub struct Guard<'a, T> { 14 | lock: &'a SpinLock, 15 | } 16 | 17 | unsafe impl Sync for Guard<'_, T> where T: Sync {} 18 | 19 | impl SpinLock { 20 | pub const fn new(value: T) -> Self { 21 | Self { 22 | locked: AtomicBool::new(false), 23 | value: UnsafeCell::new(value), 24 | } 25 | } 26 | 27 | pub fn lock(&self) -> Guard { 28 | while self.locked.swap(true, Acquire) { 29 | std::hint::spin_loop(); 30 | } 31 | Guard { lock: self } 32 | } 33 | } 34 | 35 | impl Deref for Guard<'_, T> { 36 | type Target = T; 37 | fn deref(&self) -> &T { 38 | // Safety: The very existence of this Guard 39 | // guarantees we've exclusively locked the lock. 40 | unsafe { &*self.lock.value.get() } 41 | } 42 | } 43 | 44 | impl DerefMut for Guard<'_, T> { 45 | fn deref_mut(&mut self) -> &mut T { 46 | // Safety: The very existence of this Guard 47 | // guarantees we've exclusively locked the lock. 48 | unsafe { &mut *self.lock.value.get() } 49 | } 50 | } 51 | 52 | impl Drop for Guard<'_, T> { 53 | fn drop(&mut self) { 54 | self.lock.locked.store(false, Release); 55 | } 56 | } 57 | 58 | #[test] 59 | fn main() { 60 | use std::thread; 61 | let x = SpinLock::new(Vec::new()); 62 | thread::scope(|s| { 63 | s.spawn(|| x.lock().push(1)); 64 | s.spawn(|| { 65 | let mut g = x.lock(); 66 | g.push(2); 67 | g.push(2); 68 | }); 69 | }); 70 | let g = x.lock(); 71 | assert!(g.as_slice() == [1, 2, 2] || g.as_slice() == [2, 2, 1]); 72 | } 73 | -------------------------------------------------------------------------------- /src/ch5_channels/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod s1_simple; 2 | pub mod s2_unsafe; 3 | pub mod s3_checks; 4 | pub mod s3_single_atomic; 5 | pub mod s4_types; 6 | pub mod s5_borrowing; 7 | pub mod s6_blocking; 8 | -------------------------------------------------------------------------------- /src/ch5_channels/s1_simple.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::sync::Condvar; 3 | use std::sync::Mutex; 4 | 5 | pub struct Channel { 6 | queue: Mutex>, 7 | item_ready: Condvar, 8 | } 9 | 10 | impl Channel { 11 | pub fn new() -> Self { 12 | Self { 13 | queue: Mutex::new(VecDeque::new()), 14 | item_ready: Condvar::new(), 15 | } 16 | } 17 | 18 | pub fn send(&self, message: T) { 19 | self.queue.lock().unwrap().push_back(message); 20 | self.item_ready.notify_one(); 21 | } 22 | 23 | pub fn receive(&self) -> T { 24 | let mut b = self.queue.lock().unwrap(); 25 | loop { 26 | if let Some(message) = b.pop_front() { 27 | return message; 28 | } 29 | b = self.item_ready.wait(b).unwrap(); 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/ch5_channels/s2_unsafe.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::{Acquire, Release}; 5 | 6 | pub struct Channel { 7 | message: UnsafeCell>, 8 | ready: AtomicBool, 9 | } 10 | 11 | unsafe impl Sync for Channel where T: Send {} 12 | 13 | impl Channel { 14 | pub const fn new() -> Self { 15 | Self { 16 | message: UnsafeCell::new(MaybeUninit::uninit()), 17 | ready: AtomicBool::new(false), 18 | } 19 | } 20 | 21 | /// Safety: Only call this once! 22 | pub unsafe fn send(&self, message: T) { 23 | (*self.message.get()).write(message); 24 | self.ready.store(true, Release); 25 | } 26 | 27 | pub fn is_ready(&self) -> bool { 28 | self.ready.load(Acquire) 29 | } 30 | 31 | /// Safety: Only call this once, 32 | /// and only after is_ready() returns true! 33 | pub unsafe fn receive(&self) -> T { 34 | (*self.message.get()).assume_init_read() 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/ch5_channels/s3_checks.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | 6 | pub struct Channel { 7 | message: UnsafeCell>, 8 | in_use: AtomicBool, 9 | ready: AtomicBool, 10 | } 11 | 12 | unsafe impl Sync for Channel where T: Send {} 13 | 14 | impl Channel { 15 | pub const fn new() -> Self { 16 | Self { 17 | message: UnsafeCell::new(MaybeUninit::uninit()), 18 | in_use: AtomicBool::new(false), 19 | ready: AtomicBool::new(false), 20 | } 21 | } 22 | 23 | /// Panics when trying to send more than one message. 24 | pub fn send(&self, message: T) { 25 | if self.in_use.swap(true, Relaxed) { 26 | panic!("can't send more than one message!"); 27 | } 28 | unsafe { (*self.message.get()).write(message) }; 29 | self.ready.store(true, Release); 30 | } 31 | 32 | pub fn is_ready(&self) -> bool { 33 | self.ready.load(Relaxed) 34 | } 35 | 36 | /// Panics if no message is available yet, 37 | /// or if the message was already consumed. 38 | /// 39 | /// Tip: Use `is_ready` to check first. 40 | pub fn receive(&self) -> T { 41 | if !self.ready.swap(false, Acquire) { 42 | panic!("no message available!"); 43 | } 44 | // Safety: We've just checked (and reset) the ready flag. 45 | unsafe { (*self.message.get()).assume_init_read() } 46 | } 47 | } 48 | 49 | impl Drop for Channel { 50 | fn drop(&mut self) { 51 | if *self.ready.get_mut() { 52 | unsafe { self.message.get_mut().assume_init_drop() } 53 | } 54 | } 55 | } 56 | 57 | #[test] 58 | fn main() { 59 | use std::thread; 60 | let channel = Channel::new(); 61 | let t = thread::current(); 62 | thread::scope(|s| { 63 | s.spawn(|| { 64 | channel.send("hello world!"); 65 | t.unpark(); 66 | }); 67 | while !channel.is_ready() { 68 | thread::park(); 69 | } 70 | assert_eq!(channel.receive(), "hello world!"); 71 | }); 72 | } 73 | -------------------------------------------------------------------------------- /src/ch5_channels/s3_single_atomic.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::sync::atomic::AtomicU8; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | 6 | const EMPTY: u8 = 0; 7 | const WRITING: u8 = 1; 8 | const READY: u8 = 2; 9 | const READING: u8 = 3; 10 | 11 | pub struct Channel { 12 | message: UnsafeCell>, 13 | state: AtomicU8, 14 | } 15 | 16 | unsafe impl Sync for Channel {} 17 | 18 | impl Channel { 19 | pub const fn new() -> Self { 20 | Self { 21 | message: UnsafeCell::new(MaybeUninit::uninit()), 22 | state: AtomicU8::new(EMPTY), 23 | } 24 | } 25 | 26 | pub fn send(&self, message: T) { 27 | if self.state.compare_exchange( 28 | EMPTY, WRITING, Relaxed, Relaxed 29 | ).is_err() { 30 | panic!("can't send more than one message!"); 31 | } 32 | unsafe { (*self.message.get()).write(message) }; 33 | self.state.store(READY, Release); 34 | } 35 | 36 | pub fn is_ready(&self) -> bool { 37 | self.state.load(Relaxed) == READY 38 | } 39 | 40 | pub fn receive(&self) -> T { 41 | if self.state.compare_exchange( 42 | READY, READING, Acquire, Relaxed 43 | ).is_err() { 44 | panic!("no message available!"); 45 | } 46 | unsafe { (*self.message.get()).assume_init_read() } 47 | } 48 | } 49 | 50 | impl Drop for Channel { 51 | fn drop(&mut self) { 52 | if *self.state.get_mut() == READY { 53 | unsafe { self.message.get_mut().assume_init_drop() } 54 | } 55 | } 56 | } 57 | 58 | #[test] 59 | fn main() { 60 | use std::thread; 61 | let channel = Channel::new(); 62 | let t = thread::current(); 63 | thread::scope(|s| { 64 | s.spawn(|| { 65 | channel.send("hello world!"); 66 | t.unpark(); 67 | }); 68 | while !channel.is_ready() { 69 | thread::park(); 70 | } 71 | assert_eq!(channel.receive(), "hello world!"); 72 | }); 73 | } 74 | -------------------------------------------------------------------------------- /src/ch5_channels/s4_types.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | use std::sync::Arc; 6 | 7 | pub struct Sender { 8 | channel: Arc>, 9 | } 10 | 11 | pub struct Receiver { 12 | channel: Arc>, 13 | } 14 | 15 | pub fn channel() -> (Sender, Receiver) { 16 | let a = Arc::new(Channel { 17 | message: UnsafeCell::new(MaybeUninit::uninit()), 18 | ready: AtomicBool::new(false), 19 | }); 20 | (Sender { channel: a.clone() }, Receiver { channel: a }) 21 | } 22 | 23 | struct Channel { 24 | message: UnsafeCell>, 25 | ready: AtomicBool, 26 | } 27 | 28 | impl Sender { 29 | pub fn send(self, message: T) { 30 | unsafe { (*self.channel.message.get()).write(message) }; 31 | self.channel.ready.store(true, Release); 32 | } 33 | } 34 | 35 | impl Receiver { 36 | pub fn is_ready(&self) -> bool { 37 | self.channel.ready.load(Relaxed) 38 | } 39 | 40 | pub fn receive(self) -> T { 41 | if !self.channel.ready.swap(false, Acquire) { 42 | panic!("no message available!"); 43 | } 44 | unsafe { (*self.channel.message.get()).assume_init_read() } 45 | } 46 | } 47 | 48 | unsafe impl Sync for Channel where T: Send {} 49 | 50 | impl Drop for Channel { 51 | fn drop(&mut self) { 52 | if *self.ready.get_mut() { 53 | unsafe { self.message.get_mut().assume_init_drop() } 54 | } 55 | } 56 | } 57 | 58 | #[test] 59 | fn main() { 60 | use std::thread; 61 | thread::scope(|s| { 62 | let (sender, receiver) = channel(); 63 | let t = thread::current(); 64 | s.spawn(move || { 65 | sender.send("hello world!"); 66 | t.unpark(); 67 | }); 68 | while !receiver.is_ready() { 69 | thread::park(); 70 | } 71 | assert_eq!(receiver.receive(), "hello world!"); 72 | }); 73 | } 74 | -------------------------------------------------------------------------------- /src/ch5_channels/s5_borrowing.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::sync::atomic::AtomicBool; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | 6 | pub struct Channel { 7 | message: UnsafeCell>, 8 | ready: AtomicBool, 9 | } 10 | 11 | unsafe impl Sync for Channel where T: Send {} 12 | 13 | pub struct Sender<'a, T> { 14 | channel: &'a Channel, 15 | } 16 | 17 | pub struct Receiver<'a, T> { 18 | channel: &'a Channel, 19 | } 20 | 21 | impl Channel { 22 | pub const fn new() -> Self { 23 | Self { 24 | message: UnsafeCell::new(MaybeUninit::uninit()), 25 | ready: AtomicBool::new(false), 26 | } 27 | } 28 | 29 | pub fn split<'a>(&'a mut self) -> (Sender<'a, T>, Receiver<'a, T>) { 30 | *self = Self::new(); 31 | (Sender { channel: self }, Receiver { channel: self }) 32 | } 33 | } 34 | 35 | impl Sender<'_, T> { 36 | pub fn send(self, message: T) { 37 | unsafe { (*self.channel.message.get()).write(message) }; 38 | self.channel.ready.store(true, Release); 39 | } 40 | } 41 | 42 | impl Receiver<'_, T> { 43 | pub fn is_ready(&self) -> bool { 44 | self.channel.ready.load(Relaxed) 45 | } 46 | 47 | pub fn receive(self) -> T { 48 | if !self.channel.ready.swap(false, Acquire) { 49 | panic!("no message available!"); 50 | } 51 | unsafe { (*self.channel.message.get()).assume_init_read() } 52 | } 53 | } 54 | 55 | impl Drop for Channel { 56 | fn drop(&mut self) { 57 | if *self.ready.get_mut() { 58 | unsafe { self.message.get_mut().assume_init_drop() } 59 | } 60 | } 61 | } 62 | 63 | #[test] 64 | fn main() { 65 | use std::thread; 66 | let mut channel = Channel::new(); 67 | thread::scope(|s| { 68 | let (sender, receiver) = channel.split(); 69 | let t = thread::current(); 70 | s.spawn(move || { 71 | sender.send("hello world!"); 72 | t.unpark(); 73 | }); 74 | while !receiver.is_ready() { 75 | thread::park(); 76 | } 77 | assert_eq!(receiver.receive(), "hello world!"); 78 | }); 79 | } 80 | -------------------------------------------------------------------------------- /src/ch5_channels/s6_blocking.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::marker::PhantomData; 3 | use std::mem::MaybeUninit; 4 | use std::sync::atomic::AtomicBool; 5 | use std::sync::atomic::Ordering::{Acquire, Release}; 6 | use std::thread; 7 | use std::thread::Thread; 8 | 9 | pub struct Channel { 10 | message: UnsafeCell>, 11 | ready: AtomicBool, 12 | } 13 | 14 | unsafe impl Sync for Channel where T: Send {} 15 | 16 | pub struct Sender<'a, T> { 17 | channel: &'a Channel, 18 | receiving_thread: Thread, // New! 19 | } 20 | 21 | pub struct Receiver<'a, T> { 22 | channel: &'a Channel, 23 | _no_send: PhantomData<*const ()>, // New! 24 | } 25 | 26 | impl Channel { 27 | pub const fn new() -> Self { 28 | Self { 29 | message: UnsafeCell::new(MaybeUninit::uninit()), 30 | ready: AtomicBool::new(false), 31 | } 32 | } 33 | 34 | pub fn split<'a>(&'a mut self) -> (Sender<'a, T>, Receiver<'a, T>) { 35 | *self = Self::new(); 36 | ( 37 | Sender { 38 | channel: self, 39 | receiving_thread: thread::current(), // New! 40 | }, 41 | Receiver { 42 | channel: self, 43 | _no_send: PhantomData, // New! 44 | } 45 | ) 46 | } 47 | } 48 | 49 | impl Sender<'_, T> { 50 | pub fn send(self, message: T) { 51 | unsafe { (*self.channel.message.get()).write(message) }; 52 | self.channel.ready.store(true, Release); 53 | self.receiving_thread.unpark(); // New! 54 | } 55 | } 56 | 57 | impl Receiver<'_, T> { 58 | pub fn receive(self) -> T { 59 | while !self.channel.ready.swap(false, Acquire) { 60 | thread::park(); 61 | } 62 | unsafe { (*self.channel.message.get()).assume_init_read() } 63 | } 64 | } 65 | 66 | impl Drop for Channel { 67 | fn drop(&mut self) { 68 | if *self.ready.get_mut() { 69 | unsafe { self.message.get_mut().assume_init_drop() } 70 | } 71 | } 72 | } 73 | 74 | #[test] 75 | fn main() { 76 | let mut channel = Channel::new(); 77 | thread::scope(|s| { 78 | let (sender, receiver) = channel.split(); 79 | s.spawn(move || { 80 | sender.send("hello world!"); 81 | }); 82 | assert_eq!(receiver.receive(), "hello world!"); 83 | }); 84 | } 85 | -------------------------------------------------------------------------------- /src/ch6_arc/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod s1_basic; 2 | pub mod s2_weak; 3 | pub mod s3_optimized; 4 | -------------------------------------------------------------------------------- /src/ch6_arc/s1_basic.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Deref; 2 | use std::sync::atomic::AtomicUsize; 3 | use std::sync::atomic::fence; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | use std::ptr::NonNull; 6 | 7 | struct ArcData { 8 | ref_count: AtomicUsize, 9 | data: T, 10 | } 11 | 12 | pub struct Arc { 13 | ptr: NonNull>, 14 | } 15 | 16 | unsafe impl Send for Arc {} 17 | unsafe impl Sync for Arc {} 18 | 19 | impl Arc { 20 | pub fn new(data: T) -> Arc { 21 | Arc { 22 | ptr: NonNull::from(Box::leak(Box::new(ArcData { 23 | ref_count: AtomicUsize::new(1), 24 | data, 25 | }))), 26 | } 27 | } 28 | 29 | fn data(&self) -> &ArcData { 30 | unsafe { self.ptr.as_ref() } 31 | } 32 | 33 | pub fn get_mut(arc: &mut Self) -> Option<&mut T> { 34 | if arc.data().ref_count.load(Relaxed) == 1 { 35 | fence(Acquire); 36 | // Safety: Nothing else can access the data, since 37 | // there's only one Arc, to which we have exclusive access. 38 | unsafe { Some(&mut arc.ptr.as_mut().data) } 39 | } else { 40 | None 41 | } 42 | } 43 | } 44 | 45 | impl Deref for Arc { 46 | type Target = T; 47 | 48 | fn deref(&self) -> &T { 49 | &self.data().data 50 | } 51 | } 52 | 53 | impl Clone for Arc { 54 | fn clone(&self) -> Self { 55 | if self.data().ref_count.fetch_add(1, Relaxed) > usize::MAX / 2 { 56 | std::process::abort(); 57 | } 58 | Arc { 59 | ptr: self.ptr, 60 | } 61 | } 62 | } 63 | 64 | impl Drop for Arc { 65 | fn drop(&mut self) { 66 | if self.data().ref_count.fetch_sub(1, Release) == 1 { 67 | fence(Acquire); 68 | unsafe { 69 | drop(Box::from_raw(self.ptr.as_ptr())); 70 | } 71 | } 72 | } 73 | } 74 | 75 | #[test] 76 | fn test() { 77 | static NUM_DROPS: AtomicUsize = AtomicUsize::new(0); 78 | 79 | struct DetectDrop; 80 | 81 | impl Drop for DetectDrop { 82 | fn drop(&mut self) { 83 | NUM_DROPS.fetch_add(1, Relaxed); 84 | } 85 | } 86 | 87 | // Create two Arcs sharing an object containing a string 88 | // and a DetectDrop, to detect when it's dropped. 89 | let x = Arc::new(("hello", DetectDrop)); 90 | let y = x.clone(); 91 | 92 | // Send x to another thread, and use it there. 93 | let t = std::thread::spawn(move || { 94 | assert_eq!(x.0, "hello"); 95 | }); 96 | 97 | // In parallel, y should still be usable here. 98 | assert_eq!(y.0, "hello"); 99 | 100 | // Wait for the thread to finish. 101 | t.join().unwrap(); 102 | 103 | // One Arc, x, should be dropped by now. 104 | // We still have y, so the object shouldn't have been dropped yet. 105 | assert_eq!(NUM_DROPS.load(Relaxed), 0); 106 | 107 | // Drop the remaining `Arc`. 108 | drop(y); 109 | 110 | // Now that `y` is dropped too, 111 | // the object should've been dropped. 112 | assert_eq!(NUM_DROPS.load(Relaxed), 1); 113 | } 114 | -------------------------------------------------------------------------------- /src/ch6_arc/s2_weak.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::ops::Deref; 3 | use std::sync::atomic::AtomicUsize; 4 | use std::sync::atomic::fence; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | use std::ptr::NonNull; 7 | 8 | struct ArcData { 9 | /// Number of `Arc`s. 10 | data_ref_count: AtomicUsize, 11 | /// Number of `Arc`s and `Weak`s combined. 12 | alloc_ref_count: AtomicUsize, 13 | /// The data. `None` if there's only weak pointers left. 14 | data: UnsafeCell>, 15 | } 16 | 17 | pub struct Arc { 18 | weak: Weak, 19 | } 20 | 21 | pub struct Weak { 22 | ptr: NonNull>, 23 | } 24 | 25 | unsafe impl Send for Weak {} 26 | unsafe impl Sync for Weak {} 27 | 28 | impl Arc { 29 | pub fn new(data: T) -> Arc { 30 | Arc { 31 | weak: Weak { 32 | ptr: NonNull::from(Box::leak(Box::new(ArcData { 33 | alloc_ref_count: AtomicUsize::new(1), 34 | data_ref_count: AtomicUsize::new(1), 35 | data: UnsafeCell::new(Some(data)), 36 | }))), 37 | }, 38 | } 39 | } 40 | 41 | pub fn get_mut(arc: &mut Self) -> Option<&mut T> { 42 | if arc.weak.data().alloc_ref_count.load(Relaxed) == 1 { 43 | fence(Acquire); 44 | // Safety: Nothing else can access the data, since 45 | // there's only one Arc, to which we have exclusive access, 46 | // and no Weak pointers. 47 | let arcdata = unsafe { arc.weak.ptr.as_mut() }; 48 | let option = arcdata.data.get_mut(); 49 | // We know the data is still available since we 50 | // have an Arc to it, so this won't panic. 51 | let data = option.as_mut().unwrap(); 52 | Some(data) 53 | } else { 54 | None 55 | } 56 | } 57 | 58 | pub fn downgrade(arc: &Self) -> Weak { 59 | arc.weak.clone() 60 | } 61 | } 62 | 63 | impl Weak { 64 | fn data(&self) -> &ArcData { 65 | unsafe { self.ptr.as_ref() } 66 | } 67 | 68 | pub fn upgrade(&self) -> Option> { 69 | let mut n = self.data().data_ref_count.load(Relaxed); 70 | loop { 71 | if n == 0 { 72 | return None; 73 | } 74 | assert!(n <= usize::MAX / 2); 75 | if let Err(e) = 76 | self.data() 77 | .data_ref_count 78 | .compare_exchange_weak(n, n + 1, Relaxed, Relaxed) 79 | { 80 | n = e; 81 | continue; 82 | } 83 | return Some(Arc { weak: self.clone() }); 84 | } 85 | } 86 | } 87 | 88 | impl Deref for Arc { 89 | type Target = T; 90 | 91 | fn deref(&self) -> &T { 92 | let ptr = self.weak.data().data.get(); 93 | // Safety: Since there's an Arc to the data, 94 | // the data exists and may be shared. 95 | unsafe { (*ptr).as_ref().unwrap() } 96 | } 97 | } 98 | 99 | impl Clone for Weak { 100 | fn clone(&self) -> Self { 101 | if self.data().alloc_ref_count.fetch_add(1, Relaxed) > usize::MAX / 2 { 102 | std::process::abort(); 103 | } 104 | Weak { ptr: self.ptr } 105 | } 106 | } 107 | 108 | impl Clone for Arc { 109 | fn clone(&self) -> Self { 110 | let weak = self.weak.clone(); 111 | if weak.data().data_ref_count.fetch_add(1, Relaxed) > usize::MAX / 2 { 112 | std::process::abort(); 113 | } 114 | Arc { weak } 115 | } 116 | } 117 | 118 | impl Drop for Weak { 119 | fn drop(&mut self) { 120 | if self.data().alloc_ref_count.fetch_sub(1, Release) == 1 { 121 | fence(Acquire); 122 | unsafe { 123 | drop(Box::from_raw(self.ptr.as_ptr())); 124 | } 125 | } 126 | } 127 | } 128 | 129 | impl Drop for Arc { 130 | fn drop(&mut self) { 131 | if self.weak.data().data_ref_count.fetch_sub(1, Release) == 1 { 132 | fence(Acquire); 133 | let ptr = self.weak.data().data.get(); 134 | // Safety: The data reference counter is zero, 135 | // so nothing will access it. 136 | unsafe { 137 | (*ptr) = None; 138 | } 139 | } 140 | } 141 | } 142 | 143 | #[test] 144 | fn test() { 145 | static NUM_DROPS: AtomicUsize = AtomicUsize::new(0); 146 | 147 | struct DetectDrop; 148 | 149 | impl Drop for DetectDrop { 150 | fn drop(&mut self) { 151 | NUM_DROPS.fetch_add(1, Relaxed); 152 | } 153 | } 154 | 155 | // Create an Arc with two weak pointers. 156 | let x = Arc::new(("hello", DetectDrop)); 157 | let y = Arc::downgrade(&x); 158 | let z = Arc::downgrade(&x); 159 | 160 | let t = std::thread::spawn(move || { 161 | // Weak pointer should be upgradable at this point. 162 | let y = y.upgrade().unwrap(); 163 | assert_eq!(y.0, "hello"); 164 | }); 165 | assert_eq!(x.0, "hello"); 166 | t.join().unwrap(); 167 | 168 | // The data shouldn't be dropped yet, 169 | // and the weak pointer should be upgradable. 170 | assert_eq!(NUM_DROPS.load(Relaxed), 0); 171 | assert!(z.upgrade().is_some()); 172 | 173 | drop(x); 174 | 175 | // Now, the data should be dropped, and the 176 | // weak pointer should no longer be upgradable. 177 | assert_eq!(NUM_DROPS.load(Relaxed), 1); 178 | assert!(z.upgrade().is_none()); 179 | } 180 | -------------------------------------------------------------------------------- /src/ch6_arc/s3_optimized.rs: -------------------------------------------------------------------------------- 1 | use std::mem::ManuallyDrop; 2 | use std::cell::UnsafeCell; 3 | use std::ops::Deref; 4 | use std::sync::atomic::AtomicUsize; 5 | use std::sync::atomic::fence; 6 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 7 | use std::ptr::NonNull; 8 | 9 | pub struct Arc { 10 | ptr: NonNull>, 11 | } 12 | 13 | unsafe impl Send for Arc {} 14 | unsafe impl Sync for Arc {} 15 | 16 | pub struct Weak { 17 | ptr: NonNull>, 18 | } 19 | 20 | unsafe impl Send for Weak {} 21 | unsafe impl Sync for Weak {} 22 | 23 | struct ArcData { 24 | /// Number of `Arc`s. 25 | data_ref_count: AtomicUsize, 26 | /// Number of `Weak`s, plus one if there are any `Arc`s. 27 | alloc_ref_count: AtomicUsize, 28 | /// The data. Dropped if there are only weak pointers left. 29 | data: UnsafeCell>, 30 | } 31 | 32 | impl Arc { 33 | pub fn new(data: T) -> Arc { 34 | Arc { 35 | ptr: NonNull::from(Box::leak(Box::new(ArcData { 36 | alloc_ref_count: AtomicUsize::new(1), 37 | data_ref_count: AtomicUsize::new(1), 38 | data: UnsafeCell::new(ManuallyDrop::new(data)), 39 | }))), 40 | } 41 | } 42 | 43 | fn data(&self) -> &ArcData { 44 | unsafe { self.ptr.as_ref() } 45 | } 46 | 47 | pub fn get_mut(arc: &mut Self) -> Option<&mut T> { 48 | // Acquire matches Weak::drop's Release decrement, to make sure any 49 | // upgraded pointers are visible in the next data_ref_count.load. 50 | if arc.data().alloc_ref_count.compare_exchange( 51 | 1, usize::MAX, Acquire, Relaxed 52 | ).is_err() { 53 | return None; 54 | } 55 | let is_unique = arc.data().data_ref_count.load(Relaxed) == 1; 56 | // Release matches Acquire increment in `downgrade`, to make sure any 57 | // changes to the data_ref_count that come after `downgrade` don't 58 | // change the is_unique result above. 59 | arc.data().alloc_ref_count.store(1, Release); 60 | if !is_unique { 61 | return None; 62 | } 63 | // Acquire to match Arc::drop's Release decrement, to make sure nothing 64 | // else is accessing the data. 65 | fence(Acquire); 66 | unsafe { Some(&mut *arc.data().data.get()) } 67 | } 68 | 69 | pub fn downgrade(arc: &Self) -> Weak { 70 | let mut n = arc.data().alloc_ref_count.load(Relaxed); 71 | loop { 72 | if n == usize::MAX { 73 | std::hint::spin_loop(); 74 | n = arc.data().alloc_ref_count.load(Relaxed); 75 | continue; 76 | } 77 | assert!(n <= usize::MAX / 2); 78 | // Acquire synchronises with get_mut's release-store. 79 | if let Err(e) = 80 | arc.data() 81 | .alloc_ref_count 82 | .compare_exchange_weak(n, n + 1, Acquire, Relaxed) 83 | { 84 | n = e; 85 | continue; 86 | } 87 | return Weak { ptr: arc.ptr }; 88 | } 89 | } 90 | } 91 | 92 | impl Deref for Arc { 93 | type Target = T; 94 | 95 | fn deref(&self) -> &T { 96 | // Safety: Since there's an Arc to the data, 97 | // the data exists and may be shared. 98 | unsafe { &*self.data().data.get() } 99 | } 100 | } 101 | 102 | impl Weak { 103 | fn data(&self) -> &ArcData { 104 | unsafe { self.ptr.as_ref() } 105 | } 106 | 107 | pub fn upgrade(&self) -> Option> { 108 | let mut n = self.data().data_ref_count.load(Relaxed); 109 | loop { 110 | if n == 0 { 111 | return None; 112 | } 113 | assert!(n <= usize::MAX / 2); 114 | if let Err(e) = 115 | self.data() 116 | .data_ref_count 117 | .compare_exchange_weak(n, n + 1, Relaxed, Relaxed) 118 | { 119 | n = e; 120 | continue; 121 | } 122 | return Some(Arc { ptr: self.ptr }); 123 | } 124 | } 125 | } 126 | 127 | impl Clone for Weak { 128 | fn clone(&self) -> Self { 129 | if self.data().alloc_ref_count.fetch_add(1, Relaxed) > usize::MAX / 2 { 130 | std::process::abort(); 131 | } 132 | Weak { ptr: self.ptr } 133 | } 134 | } 135 | 136 | impl Drop for Weak { 137 | fn drop(&mut self) { 138 | if self.data().alloc_ref_count.fetch_sub(1, Release) == 1 { 139 | fence(Acquire); 140 | unsafe { 141 | drop(Box::from_raw(self.ptr.as_ptr())); 142 | } 143 | } 144 | } 145 | } 146 | 147 | impl Clone for Arc { 148 | fn clone(&self) -> Self { 149 | if self.data().data_ref_count.fetch_add(1, Relaxed) > usize::MAX / 2 { 150 | std::process::abort(); 151 | } 152 | Arc { ptr: self.ptr } 153 | } 154 | } 155 | 156 | impl Drop for Arc { 157 | fn drop(&mut self) { 158 | if self.data().data_ref_count.fetch_sub(1, Release) == 1 { 159 | fence(Acquire); 160 | // Safety: The data reference counter is zero, 161 | // so nothing will access the data anymore. 162 | unsafe { 163 | ManuallyDrop::drop(&mut *self.data().data.get()); 164 | } 165 | // Now that there's no `Arc`s left, 166 | // drop the implicit weak pointer that represented all `Arc`s. 167 | drop(Weak { ptr: self.ptr }); 168 | } 169 | } 170 | } 171 | 172 | #[test] 173 | fn test() { 174 | static NUM_DROPS: AtomicUsize = AtomicUsize::new(0); 175 | 176 | struct DetectDrop; 177 | 178 | impl Drop for DetectDrop { 179 | fn drop(&mut self) { 180 | NUM_DROPS.fetch_add(1, Relaxed); 181 | } 182 | } 183 | 184 | // Create an Arc with two weak pointers. 185 | let x = Arc::new(("hello", DetectDrop)); 186 | let y = Arc::downgrade(&x); 187 | let z = Arc::downgrade(&x); 188 | 189 | let t = std::thread::spawn(move || { 190 | // Weak pointer should be upgradable at this point. 191 | let y = y.upgrade().unwrap(); 192 | assert_eq!(y.0, "hello"); 193 | }); 194 | assert_eq!(x.0, "hello"); 195 | t.join().unwrap(); 196 | 197 | // The data shouldn't be dropped yet, 198 | // and the weak pointer should be upgradable. 199 | assert_eq!(NUM_DROPS.load(Relaxed), 0); 200 | assert!(z.upgrade().is_some()); 201 | 202 | drop(x); 203 | 204 | // Now, the data should be dropped, and the 205 | // weak pointer should no longer be upgradable. 206 | assert_eq!(NUM_DROPS.load(Relaxed), 1); 207 | assert!(z.upgrade().is_none()); 208 | } 209 | -------------------------------------------------------------------------------- /src/ch9_locks/condvar_1.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_all, wake_one}; 2 | use std::sync::atomic::AtomicU32; 3 | use std::sync::atomic::Ordering::Relaxed; 4 | use super::mutex_3::MutexGuard; 5 | 6 | pub struct Condvar { 7 | counter: AtomicU32, 8 | } 9 | 10 | impl Condvar { 11 | pub const fn new() -> Self { 12 | Self { counter: AtomicU32::new(0) } 13 | } 14 | 15 | pub fn notify_one(&self) { 16 | self.counter.fetch_add(1, Relaxed); 17 | wake_one(&self.counter); 18 | } 19 | 20 | pub fn notify_all(&self) { 21 | self.counter.fetch_add(1, Relaxed); 22 | wake_all(&self.counter); 23 | } 24 | 25 | pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { 26 | let counter_value = self.counter.load(Relaxed); 27 | 28 | // Unlock the mutex by dropping the guard, 29 | // but remember the mutex so we can lock it again later. 30 | let mutex = guard.mutex; 31 | drop(guard); 32 | 33 | // Wait, but only if the counter hasn't changed since unlocking. 34 | wait(&self.counter, counter_value); 35 | 36 | mutex.lock() 37 | } 38 | } 39 | 40 | #[test] 41 | fn test_condvar() { 42 | use super::mutex_3::Mutex; 43 | use std::thread; 44 | use std::time::Duration; 45 | 46 | let mutex = Mutex::new(0); 47 | let condvar = Condvar::new(); 48 | 49 | let mut wakeups = 0; 50 | 51 | thread::scope(|s| { 52 | s.spawn(|| { 53 | thread::sleep(Duration::from_secs(1)); 54 | *mutex.lock() = 123; 55 | condvar.notify_one(); 56 | }); 57 | 58 | let mut m = mutex.lock(); 59 | while *m < 100 { 60 | m = condvar.wait(m); 61 | wakeups += 1; 62 | } 63 | 64 | assert_eq!(*m, 123); 65 | }); 66 | 67 | // Check that the main thread actually did wait (not busy-loop), 68 | // while still allowing for a few spurious wake ups. 69 | assert!(wakeups < 10); 70 | } 71 | -------------------------------------------------------------------------------- /src/ch9_locks/condvar_2.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_all, wake_one}; 2 | use std::sync::atomic::AtomicU32; 3 | use std::sync::atomic::AtomicUsize; 4 | use std::sync::atomic::Ordering::Relaxed; 5 | use super::mutex_3::MutexGuard; 6 | 7 | pub struct Condvar { 8 | counter: AtomicU32, 9 | num_waiters: AtomicUsize, 10 | } 11 | 12 | impl Condvar { 13 | pub const fn new() -> Self { 14 | Self { 15 | counter: AtomicU32::new(0), 16 | num_waiters: AtomicUsize::new(0), 17 | } 18 | } 19 | 20 | pub fn notify_one(&self) { 21 | if self.num_waiters.load(Relaxed) > 0 { 22 | self.counter.fetch_add(1, Relaxed); 23 | wake_one(&self.counter); 24 | } 25 | } 26 | 27 | pub fn notify_all(&self) { 28 | if self.num_waiters.load(Relaxed) > 0 { 29 | self.counter.fetch_add(1, Relaxed); 30 | wake_all(&self.counter); 31 | } 32 | } 33 | 34 | pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { 35 | self.num_waiters.fetch_add(1, Relaxed); 36 | 37 | let counter_value = self.counter.load(Relaxed); 38 | 39 | let mutex = guard.mutex; 40 | drop(guard); 41 | 42 | wait(&self.counter, counter_value); 43 | 44 | self.num_waiters.fetch_sub(1, Relaxed); 45 | 46 | mutex.lock() 47 | } 48 | } 49 | 50 | #[test] 51 | fn test_condvar() { 52 | use super::mutex_3::Mutex; 53 | use std::thread; 54 | use std::time::Duration; 55 | 56 | let mutex = Mutex::new(0); 57 | let condvar = Condvar::new(); 58 | 59 | let mut wakeups = 0; 60 | 61 | thread::scope(|s| { 62 | s.spawn(|| { 63 | thread::sleep(Duration::from_secs(1)); 64 | *mutex.lock() = 123; 65 | condvar.notify_one(); 66 | }); 67 | 68 | let mut m = mutex.lock(); 69 | while *m < 100 { 70 | m = condvar.wait(m); 71 | wakeups += 1; 72 | } 73 | 74 | assert_eq!(*m, 123); 75 | }); 76 | 77 | // Check that the main thread actually did wait (not busy-loop), 78 | // while still allowing for a few spurious wake ups. 79 | assert!(wakeups < 10); 80 | } 81 | -------------------------------------------------------------------------------- /src/ch9_locks/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mutex_1; 2 | pub mod mutex_2; 3 | pub mod mutex_3; 4 | pub mod condvar_1; 5 | pub mod condvar_2; 6 | pub mod rwlock_1; 7 | pub mod rwlock_2; 8 | pub mod rwlock_3; 9 | -------------------------------------------------------------------------------- /src/ch9_locks/mutex_1.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Release}; 6 | 7 | pub struct Mutex { 8 | /// 0: unlocked 9 | /// 1: locked 10 | state: AtomicU32, 11 | value: UnsafeCell, 12 | } 13 | 14 | unsafe impl Sync for Mutex where T: Send {} 15 | 16 | pub struct MutexGuard<'a, T> { 17 | mutex: &'a Mutex, 18 | } 19 | 20 | unsafe impl Sync for MutexGuard<'_, T> where T: Sync {} 21 | 22 | impl Deref for MutexGuard<'_, T> { 23 | type Target = T; 24 | fn deref(&self) -> &T { 25 | unsafe { &*self.mutex.value.get() } 26 | } 27 | } 28 | 29 | impl DerefMut for MutexGuard<'_, T> { 30 | fn deref_mut(&mut self) -> &mut T { 31 | unsafe { &mut *self.mutex.value.get() } 32 | } 33 | } 34 | 35 | impl Mutex { 36 | pub const fn new(value: T) -> Self { 37 | Self { 38 | state: AtomicU32::new(0), // unlocked state 39 | value: UnsafeCell::new(value), 40 | } 41 | } 42 | 43 | pub fn lock(&self) -> MutexGuard { 44 | // Set the state to 1: locked. 45 | while self.state.swap(1, Acquire) == 1 { 46 | // If it was already locked.. 47 | // .. wait, unless the state is no longer 1. 48 | wait(&self.state, 1); 49 | } 50 | MutexGuard { mutex: self } 51 | } 52 | } 53 | 54 | impl Drop for MutexGuard<'_, T> { 55 | fn drop(&mut self) { 56 | // Set the state back to 0: unlocked. 57 | self.mutex.state.store(0, Release); 58 | // Wake up one of the waiting threads, if any. 59 | wake_one(&self.mutex.state); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/ch9_locks/mutex_2.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | 7 | pub struct Mutex { 8 | /// 0: unlocked 9 | /// 1: locked, no other threads waiting 10 | /// 2: locked, other threads waiting 11 | state: AtomicU32, 12 | value: UnsafeCell, 13 | } 14 | 15 | unsafe impl Sync for Mutex where T: Send {} 16 | 17 | pub struct MutexGuard<'a, T> { 18 | mutex: &'a Mutex, 19 | } 20 | 21 | unsafe impl Sync for MutexGuard<'_, T> where T: Sync {} 22 | 23 | impl Deref for MutexGuard<'_, T> { 24 | type Target = T; 25 | fn deref(&self) -> &T { 26 | unsafe { &*self.mutex.value.get() } 27 | } 28 | } 29 | 30 | impl DerefMut for MutexGuard<'_, T> { 31 | fn deref_mut(&mut self) -> &mut T { 32 | unsafe { &mut *self.mutex.value.get() } 33 | } 34 | } 35 | 36 | impl Mutex { 37 | pub const fn new(value: T) -> Self { 38 | Self { 39 | state: AtomicU32::new(0), // unlocked state 40 | value: UnsafeCell::new(value), 41 | } 42 | } 43 | 44 | pub fn lock(&self) -> MutexGuard { 45 | if self.state.compare_exchange(0, 1, Acquire, Relaxed).is_err() { 46 | while self.state.swap(2, Acquire) != 0 { 47 | wait(&self.state, 2); 48 | } 49 | } 50 | MutexGuard { mutex: self } 51 | } 52 | } 53 | 54 | impl Drop for MutexGuard<'_, T> { 55 | fn drop(&mut self) { 56 | if self.mutex.state.swap(0, Release) == 2 { 57 | wake_one(&self.mutex.state); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/ch9_locks/mutex_3.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | 7 | pub struct Mutex { 8 | /// 0: unlocked 9 | /// 1: locked, no other threads waiting 10 | /// 2: locked, other threads waiting 11 | state: AtomicU32, 12 | value: UnsafeCell, 13 | } 14 | 15 | unsafe impl Sync for Mutex where T: Send {} 16 | 17 | pub struct MutexGuard<'a, T> { 18 | pub(crate) mutex: &'a Mutex, 19 | } 20 | 21 | unsafe impl Sync for MutexGuard<'_, T> where T: Sync {} 22 | 23 | impl Deref for MutexGuard<'_, T> { 24 | type Target = T; 25 | fn deref(&self) -> &T { 26 | unsafe { &*self.mutex.value.get() } 27 | } 28 | } 29 | 30 | impl DerefMut for MutexGuard<'_, T> { 31 | fn deref_mut(&mut self) -> &mut T { 32 | unsafe { &mut *self.mutex.value.get() } 33 | } 34 | } 35 | 36 | impl Mutex { 37 | pub const fn new(value: T) -> Self { 38 | Self { 39 | state: AtomicU32::new(0), // unlocked state 40 | value: UnsafeCell::new(value), 41 | } 42 | } 43 | 44 | pub fn lock(&self) -> MutexGuard { 45 | if self.state.compare_exchange(0, 1, Acquire, Relaxed).is_err() { 46 | // The lock was already locked. :( 47 | lock_contended(&self.state); 48 | } 49 | MutexGuard { mutex: self } 50 | } 51 | } 52 | 53 | fn lock_contended(state: &AtomicU32) { 54 | let mut spin_count = 0; 55 | 56 | while state.load(Relaxed) == 1 && spin_count < 100 { 57 | spin_count += 1; 58 | std::hint::spin_loop(); 59 | } 60 | 61 | if state.compare_exchange(0, 1, Acquire, Relaxed).is_ok() { 62 | return; 63 | } 64 | 65 | while state.swap(2, Acquire) != 0 { 66 | wait(state, 2); 67 | } 68 | } 69 | 70 | impl Drop for MutexGuard<'_, T> { 71 | fn drop(&mut self) { 72 | if self.mutex.state.swap(0, Release) == 2 { 73 | wake_one(&self.mutex.state); 74 | } 75 | } 76 | } 77 | 78 | // TODO (bench) 79 | #[test] 80 | fn main() { 81 | use std::time::Instant; 82 | let m = Mutex::new(0); 83 | std::hint::black_box(&m); 84 | let start = Instant::now(); 85 | for _ in 0..5_000_000 { 86 | *m.lock() += 1; 87 | } 88 | let duration = start.elapsed(); 89 | println!("locked {} times in {:?}", *m.lock(), duration); 90 | } 91 | 92 | // TODO (bench) 93 | #[test] 94 | fn main2() { 95 | use std::thread; 96 | use std::time::Instant; 97 | let m = Mutex::new(0); 98 | std::hint::black_box(&m); 99 | let start = Instant::now(); 100 | thread::scope(|s| { 101 | for _ in 0..4 { 102 | s.spawn(|| { 103 | for _ in 0..5_000_000 { 104 | *m.lock() += 1; 105 | } 106 | }); 107 | } 108 | }); 109 | let duration = start.elapsed(); 110 | println!("locked {} times in {:?}", *m.lock(), duration); 111 | } 112 | -------------------------------------------------------------------------------- /src/ch9_locks/rwlock_1.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_all, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | 7 | pub struct RwLock { 8 | /// The number of readers, or u32::MAX if write-locked. 9 | state: AtomicU32, 10 | value: UnsafeCell, 11 | } 12 | 13 | unsafe impl Sync for RwLock where T: Send + Sync {} 14 | 15 | impl RwLock { 16 | pub const fn new(value: T) -> Self { 17 | Self { 18 | state: AtomicU32::new(0), // Unlocked. 19 | value: UnsafeCell::new(value), 20 | } 21 | } 22 | 23 | pub fn read(&self) -> ReadGuard { 24 | let mut s = self.state.load(Relaxed); 25 | loop { 26 | if s < u32::MAX { 27 | assert!(s < u32::MAX - 1, "too many readers"); 28 | match self.state.compare_exchange_weak( 29 | s, s + 1, Acquire, Relaxed 30 | ) { 31 | Ok(_) => return ReadGuard { rwlock: self }, 32 | Err(e) => s = e, 33 | } 34 | } 35 | if s == u32::MAX { 36 | wait(&self.state, u32::MAX); 37 | s = self.state.load(Relaxed); 38 | } 39 | } 40 | } 41 | 42 | pub fn write(&self) -> WriteGuard { 43 | while let Err(s) = self.state.compare_exchange( 44 | 0, u32::MAX, Acquire, Relaxed 45 | ) { 46 | // Wait while already locked. 47 | wait(&self.state, s); 48 | } 49 | WriteGuard { rwlock: self } 50 | } 51 | } 52 | 53 | pub struct ReadGuard<'a, T> { 54 | rwlock: &'a RwLock, 55 | } 56 | 57 | pub struct WriteGuard<'a, T> { 58 | rwlock: &'a RwLock, 59 | } 60 | 61 | impl Deref for WriteGuard<'_, T> { 62 | type Target = T; 63 | fn deref(&self) -> &T { 64 | unsafe { &*self.rwlock.value.get() } 65 | } 66 | } 67 | 68 | impl DerefMut for WriteGuard<'_, T> { 69 | fn deref_mut(&mut self) -> &mut T { 70 | unsafe { &mut *self.rwlock.value.get() } 71 | } 72 | } 73 | 74 | impl Deref for ReadGuard<'_, T> { 75 | type Target = T; 76 | fn deref(&self) -> &T { 77 | unsafe { &*self.rwlock.value.get() } 78 | } 79 | } 80 | 81 | impl Drop for ReadGuard<'_, T> { 82 | fn drop(&mut self) { 83 | if self.rwlock.state.fetch_sub(1, Release) == 1 { 84 | // Wake up a waiting writer, if any. 85 | wake_one(&self.rwlock.state); 86 | } 87 | } 88 | } 89 | 90 | impl Drop for WriteGuard<'_, T> { 91 | fn drop(&mut self) { 92 | self.rwlock.state.store(0, Release); 93 | // Wake up all waiting readers and writers. 94 | wake_all(&self.rwlock.state); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/ch9_locks/rwlock_2.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_all, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | 7 | pub struct RwLock { 8 | /// The number of readers, or u32::MAX if write-locked. 9 | state: AtomicU32, 10 | /// Incremented to wake up writers. 11 | writer_wake_counter: AtomicU32, 12 | value: UnsafeCell, 13 | } 14 | 15 | unsafe impl Sync for RwLock where T: Send + Sync {} 16 | 17 | impl RwLock { 18 | pub const fn new(value: T) -> Self { 19 | Self { 20 | state: AtomicU32::new(0), 21 | writer_wake_counter: AtomicU32::new(0), 22 | value: UnsafeCell::new(value), 23 | } 24 | } 25 | 26 | pub fn read(&self) -> ReadGuard { 27 | let mut s = self.state.load(Relaxed); 28 | loop { 29 | if s < u32::MAX { 30 | assert!(s < u32::MAX - 1, "too many readers"); 31 | match self.state.compare_exchange_weak( 32 | s, s + 1, Acquire, Relaxed 33 | ) { 34 | Ok(_) => return ReadGuard { rwlock: self }, 35 | Err(e) => s = e, 36 | } 37 | } 38 | if s == u32::MAX { 39 | wait(&self.state, u32::MAX); 40 | s = self.state.load(Relaxed); 41 | } 42 | } 43 | } 44 | 45 | pub fn write(&self) -> WriteGuard { 46 | while self.state.compare_exchange( 47 | 0, u32::MAX, Acquire, Relaxed 48 | ).is_err() { 49 | let w = self.writer_wake_counter.load(Acquire); 50 | if self.state.load(Relaxed) != 0 { 51 | // Wait if the RwLock is still locked, but only if 52 | // there have been no wake signals since we checked. 53 | wait(&self.writer_wake_counter, w); 54 | } 55 | } 56 | WriteGuard { rwlock: self } 57 | } 58 | } 59 | 60 | pub struct ReadGuard<'a, T> { 61 | rwlock: &'a RwLock, 62 | } 63 | 64 | pub struct WriteGuard<'a, T> { 65 | rwlock: &'a RwLock, 66 | } 67 | 68 | impl Deref for WriteGuard<'_, T> { 69 | type Target = T; 70 | fn deref(&self) -> &T { 71 | unsafe { &*self.rwlock.value.get() } 72 | } 73 | } 74 | 75 | impl DerefMut for WriteGuard<'_, T> { 76 | fn deref_mut(&mut self) -> &mut T { 77 | unsafe { &mut *self.rwlock.value.get() } 78 | } 79 | } 80 | 81 | impl Deref for ReadGuard<'_, T> { 82 | type Target = T; 83 | fn deref(&self) -> &T { 84 | unsafe { &*self.rwlock.value.get() } 85 | } 86 | } 87 | 88 | impl Drop for ReadGuard<'_, T> { 89 | fn drop(&mut self) { 90 | if self.rwlock.state.fetch_sub(1, Release) == 1 { 91 | self.rwlock.writer_wake_counter.fetch_add(1, Release); // New! 92 | wake_one(&self.rwlock.writer_wake_counter); // Changed! 93 | } 94 | } 95 | } 96 | 97 | impl Drop for WriteGuard<'_, T> { 98 | fn drop(&mut self) { 99 | self.rwlock.state.store(0, Release); 100 | self.rwlock.writer_wake_counter.fetch_add(1, Release); // New! 101 | wake_one(&self.rwlock.writer_wake_counter); // New! 102 | wake_all(&self.rwlock.state); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/ch9_locks/rwlock_3.rs: -------------------------------------------------------------------------------- 1 | use atomic_wait::{wait, wake_all, wake_one}; 2 | use std::cell::UnsafeCell; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::sync::atomic::AtomicU32; 5 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 6 | 7 | pub struct RwLock { 8 | /// The number of read locks times two, plus one if there's a writer waiting. 9 | /// u32::MAX if write locked. 10 | /// 11 | /// This means that readers may acquire the lock when 12 | /// the state is even, but need to block when odd. 13 | state: AtomicU32, 14 | /// Incremented to wake up writers. 15 | writer_wake_counter: AtomicU32, 16 | value: UnsafeCell, 17 | } 18 | 19 | unsafe impl Sync for RwLock where T: Send + Sync {} 20 | 21 | impl RwLock { 22 | pub const fn new(value: T) -> Self { 23 | Self { 24 | state: AtomicU32::new(0), 25 | writer_wake_counter: AtomicU32::new(0), 26 | value: UnsafeCell::new(value), 27 | } 28 | } 29 | 30 | pub fn read(&self) -> ReadGuard { 31 | let mut s = self.state.load(Relaxed); 32 | loop { 33 | if s % 2 == 0 { // Even. 34 | assert!(s < u32::MAX - 2, "too many readers"); 35 | match self.state.compare_exchange_weak( 36 | s, s + 2, Acquire, Relaxed 37 | ) { 38 | Ok(_) => return ReadGuard { rwlock: self }, 39 | Err(e) => s = e, 40 | } 41 | } 42 | if s % 2 == 1 { // Odd. 43 | wait(&self.state, s); 44 | s = self.state.load(Relaxed); 45 | } 46 | } 47 | } 48 | 49 | pub fn write(&self) -> WriteGuard { 50 | let mut s = self.state.load(Relaxed); 51 | loop { 52 | // Try to lock if unlocked. 53 | if s <= 1 { 54 | match self.state.compare_exchange( 55 | s, u32::MAX, Acquire, Relaxed 56 | ) { 57 | Ok(_) => return WriteGuard { rwlock: self }, 58 | Err(e) => { s = e; continue; } 59 | } 60 | } 61 | // Block new readers, by making sure the state is odd. 62 | if s % 2 == 0 { 63 | match self.state.compare_exchange( 64 | s, s + 1, Relaxed, Relaxed 65 | ) { 66 | Ok(_) => {} 67 | Err(e) => { s = e; continue; } 68 | } 69 | } 70 | // Wait, if it's still locked 71 | let w = self.writer_wake_counter.load(Acquire); 72 | s = self.state.load(Relaxed); 73 | if s >= 2 { 74 | wait(&self.writer_wake_counter, w); 75 | s = self.state.load(Relaxed); 76 | } 77 | } 78 | } 79 | } 80 | 81 | pub struct ReadGuard<'a, T> { 82 | rwlock: &'a RwLock, 83 | } 84 | 85 | pub struct WriteGuard<'a, T> { 86 | rwlock: &'a RwLock, 87 | } 88 | 89 | impl Deref for WriteGuard<'_, T> { 90 | type Target = T; 91 | fn deref(&self) -> &T { 92 | unsafe { &*self.rwlock.value.get() } 93 | } 94 | } 95 | 96 | impl DerefMut for WriteGuard<'_, T> { 97 | fn deref_mut(&mut self) -> &mut T { 98 | unsafe { &mut *self.rwlock.value.get() } 99 | } 100 | } 101 | 102 | impl Deref for ReadGuard<'_, T> { 103 | type Target = T; 104 | fn deref(&self) -> &T { 105 | unsafe { &*self.rwlock.value.get() } 106 | } 107 | } 108 | 109 | impl Drop for ReadGuard<'_, T> { 110 | fn drop(&mut self) { 111 | // Decrement the state by 2 to remove one read-lock. 112 | if self.rwlock.state.fetch_sub(2, Release) == 3 { 113 | // If we decremented from 3 to 1, that means 114 | // the RwLock is now unlocked _and_ there is 115 | // a waiting writer, which we wake up. 116 | self.rwlock.writer_wake_counter.fetch_add(1, Release); 117 | wake_one(&self.rwlock.writer_wake_counter); 118 | } 119 | } 120 | } 121 | 122 | impl Drop for WriteGuard<'_, T> { 123 | fn drop(&mut self) { 124 | self.rwlock.state.store(0, Release); 125 | self.rwlock.writer_wake_counter.fetch_add(1, Release); 126 | wake_one(&self.rwlock.writer_wake_counter); 127 | wake_all(&self.rwlock.state); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod ch4_spin_lock; 2 | pub mod ch5_channels; 3 | pub mod ch6_arc; 4 | pub mod ch9_locks; 5 | --------------------------------------------------------------------------------