├── package.json ├── rustfmt.toml ├── .gitignore ├── src ├── thread.rs ├── handle_unwind.rs ├── poisonable │ ├── flag.rs │ ├── guard.rs │ ├── error.rs │ └── poisonable.rs ├── thread │ └── scope.rst ├── collection │ ├── guard.rs │ └── utils.rs ├── rwlock │ ├── read_guard.rs │ ├── write_guard.rs │ └── rwlock.rs ├── key.rs ├── mutex │ ├── guard.rs │ └── mutex.rs ├── collection.rs ├── lib.rs ├── mutex.rs ├── rwlock.rs └── poisonable.rs ├── examples ├── basic.rs ├── double_mutex.rs ├── fibonacci.rs ├── list.rs ├── dining_philosophers.rs └── dining_philosophers_retry.rs ├── tests ├── forget.rs ├── retry_rw.rs ├── retry.rs ├── evil_mutex.rs ├── evil_try_mutex.rs ├── evil_try_rwlock.rs ├── evil_unlock_mutex.rs ├── evil_rwlock.rs └── evil_unlock_rwlock.rs ├── Cargo.toml ├── LICENSE ├── .vscode └── launch.json ├── README.md └── happylock.md /package.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | hard_tabs = true 3 | newline_style = "Unix" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | 3 | /target 4 | /Cargo.lock 5 | 6 | /mutants.out* 7 | /.cargo -------------------------------------------------------------------------------- /src/thread.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | mod scope; 4 | 5 | #[derive(Debug)] 6 | pub struct Scope<'scope, 'env: 'scope>(PhantomData<(&'env (), &'scope ())>); 7 | 8 | #[derive(Debug)] 9 | pub struct ScopedJoinHandle<'scope, T> { 10 | handle: std::thread::JoinHandle, 11 | _phantom: PhantomData<&'scope ()>, 12 | } 13 | 14 | pub struct JoinHandle { 15 | handle: std::thread::JoinHandle, 16 | key: crate::ThreadKey, 17 | } 18 | 19 | pub struct ThreadBuilder(std::thread::Builder); 20 | -------------------------------------------------------------------------------- /examples/basic.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use happylock::{Mutex, ThreadKey}; 4 | 5 | const N: usize = 10; 6 | 7 | static DATA: Mutex = Mutex::new(0); 8 | 9 | fn main() { 10 | let mut threads = Vec::new(); 11 | for _ in 0..N { 12 | let th = thread::spawn(move || { 13 | let key = ThreadKey::get().unwrap(); 14 | let mut data = DATA.lock(key); 15 | *data += 1; 16 | }); 17 | threads.push(th); 18 | } 19 | 20 | for th in threads { 21 | _ = th.join(); 22 | } 23 | 24 | let key = ThreadKey::get().unwrap(); 25 | let data = DATA.lock(key); 26 | println!("{data}"); 27 | } 28 | -------------------------------------------------------------------------------- /src/handle_unwind.rs: -------------------------------------------------------------------------------- 1 | use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; 2 | 3 | /// Runs `try_fn`. If it unwinds, it will run `catch` and then continue 4 | /// unwinding. This is used instead of `scopeguard` to ensure the `catch` 5 | /// function doesn't run if the thread is already panicking. The unwind 6 | /// must specifically be caused by the `try_fn` 7 | pub fn handle_unwind R, G: FnOnce()>(try_fn: F, catch: G) -> R { 8 | let try_fn = AssertUnwindSafe(try_fn); 9 | catch_unwind(try_fn).unwrap_or_else(|e| { 10 | catch(); 11 | resume_unwind(e) 12 | }) 13 | } 14 | -------------------------------------------------------------------------------- /tests/forget.rs: -------------------------------------------------------------------------------- 1 | use happylock::{Mutex, ThreadKey}; 2 | 3 | #[test] 4 | fn no_new_threadkey_when_forgetting_lock() { 5 | let key = ThreadKey::get().unwrap(); 6 | let mutex = Mutex::new("foo".to_string()); 7 | 8 | let guard = mutex.lock(key); 9 | std::mem::forget(guard); 10 | 11 | assert!(ThreadKey::get().is_none()); 12 | } 13 | 14 | #[test] 15 | fn no_new_threadkey_in_scoped_lock() { 16 | let mut key = ThreadKey::get().unwrap(); 17 | let mutex = Mutex::new("foo".to_string()); 18 | 19 | mutex.scoped_lock(&mut key, |_| { 20 | assert!(ThreadKey::get().is_none()); 21 | }); 22 | 23 | mutex.lock(key); 24 | } 25 | -------------------------------------------------------------------------------- /examples/double_mutex.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; 4 | 5 | const N: usize = 10; 6 | 7 | static DATA: (Mutex, Mutex) = (Mutex::new(0), Mutex::new(String::new())); 8 | 9 | fn main() { 10 | let mut threads = Vec::new(); 11 | for _ in 0..N { 12 | let th = thread::spawn(move || { 13 | let key = ThreadKey::get().unwrap(); 14 | let lock = RefLockCollection::new(&DATA); 15 | let mut guard = lock.lock(key); 16 | *guard.1 = (100 - *guard.0).to_string(); 17 | *guard.0 += 1; 18 | }); 19 | threads.push(th); 20 | } 21 | 22 | for th in threads { 23 | _ = th.join(); 24 | } 25 | 26 | let key = ThreadKey::get().unwrap(); 27 | let data = RefLockCollection::new(&DATA); 28 | let data = data.lock(key); 29 | println!("{}", data.0); 30 | println!("{}", data.1); 31 | } 32 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "happylock" 3 | version = "0.5.0" 4 | authors = ["Mica White "] 5 | edition = "2021" 6 | rust-version = "1.82" 7 | description = "Free deadlock prevention" 8 | documentation = "https://docs.rs/happylock" 9 | readme = "README.md" 10 | repository = "https://github.com/botahamec/happylock/" 11 | license = "CC0-1.0" 12 | keywords = ["deadlock", "mutex", "rwlock"] 13 | categories = ["concurrency"] 14 | include = ["src/**/*", "LICENSE", "README.md"] 15 | 16 | [dependencies] 17 | lock_api = "0.4" 18 | parking_lot = { version = "0.12", optional = true } 19 | spin = { version = "0.9", optional = true } 20 | 21 | mutants = "0.0.3" # used to skip functions that can't run 22 | 23 | [dev-dependencies] 24 | parking_lot = "0.12" 25 | 26 | [features] 27 | default = ["parking_lot"] 28 | 29 | [lints.rust] 30 | unexpected_cfgs = { level = "warn", check-cfg = ["cfg(tarpaulin_include)"] } 31 | -------------------------------------------------------------------------------- /src/poisonable/flag.rs: -------------------------------------------------------------------------------- 1 | #[cfg(panic = "unwind")] 2 | use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; 3 | 4 | use super::PoisonFlag; 5 | 6 | #[cfg(panic = "unwind")] 7 | impl PoisonFlag { 8 | pub const fn new() -> Self { 9 | Self(AtomicBool::new(false)) 10 | } 11 | 12 | pub fn is_poisoned(&self) -> bool { 13 | self.0.load(Relaxed) 14 | } 15 | 16 | pub fn clear_poison(&self) { 17 | self.0.store(false, Relaxed) 18 | } 19 | 20 | pub fn poison(&self) { 21 | self.0.store(true, Relaxed); 22 | } 23 | } 24 | 25 | #[cfg(not(panic = "unwind"))] 26 | impl PoisonFlag { 27 | pub const fn new() -> Self { 28 | Self() 29 | } 30 | 31 | #[mutants::skip] // None of the tests have panic = "abort", so this can't be tested 32 | #[cfg(not(tarpaulin_include))] 33 | pub fn is_poisoned(&self) -> bool { 34 | false 35 | } 36 | 37 | pub fn clear_poison(&self) { 38 | () 39 | } 40 | 41 | pub fn poison(&self) { 42 | () 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /examples/fibonacci.rs: -------------------------------------------------------------------------------- 1 | use happylock::{collection, LockCollection, Mutex, ThreadKey}; 2 | use std::thread; 3 | 4 | const N: usize = 36; 5 | 6 | static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; 7 | 8 | fn main() { 9 | let mut threads = Vec::new(); 10 | for _ in 0..N { 11 | let th = thread::spawn(move || { 12 | let key = ThreadKey::get().unwrap(); 13 | 14 | // a reference to a type that implements `OwnedLockable` will never 15 | // contain duplicates, so no duplicate checking is needed. 16 | let collection = collection::RetryingLockCollection::new_ref(&DATA); 17 | let mut guard = collection.lock(key); 18 | 19 | let x = *guard[1]; 20 | *guard[1] += *guard[0]; 21 | *guard[0] = x; 22 | }); 23 | threads.push(th); 24 | } 25 | 26 | for thread in threads { 27 | _ = thread.join(); 28 | } 29 | 30 | let key = ThreadKey::get().unwrap(); 31 | let data = LockCollection::new_ref(&DATA); 32 | let data = data.lock(key); 33 | println!("{}", data[0]); 34 | println!("{}", data[1]); 35 | } 36 | -------------------------------------------------------------------------------- /src/thread/scope.rst: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::{Keyable, ThreadKey}; 4 | 5 | use super::{Scope, ScopedJoinHandle}; 6 | 7 | pub fn scope<'env, F, T>(key: impl Keyable, f: F) -> T 8 | where 9 | F: for<'scope> FnOnce(&'scope Scope<'scope, 'env>) -> T, 10 | { 11 | let scope = Scope(PhantomData); 12 | let t = f(&scope); 13 | drop(key); 14 | t 15 | } 16 | 17 | impl<'scope> Scope<'scope, '_> { 18 | #[allow(clippy::unused_self)] 19 | pub fn spawn( 20 | &self, 21 | f: impl FnOnce(ThreadKey) -> T + Send + 'scope, 22 | ) -> std::io::Result> { 23 | unsafe { 24 | // safety: the lifetimes ensure that the data lives long enough 25 | let handle = std::thread::Builder::new().spawn_unchecked(|| { 26 | // safety: the thread just started, so the key cannot be acquired yet 27 | let key = ThreadKey::get().unwrap_unchecked(); 28 | f(key) 29 | })?; 30 | 31 | Ok(ScopedJoinHandle { 32 | handle, 33 | _phantom: PhantomData, 34 | }) 35 | } 36 | } 37 | } 38 | 39 | impl ScopedJoinHandle<'_, T> { 40 | pub fn is_finished(&self) -> bool { 41 | self.handle.is_finished() 42 | } 43 | 44 | pub fn join(self) -> std::thread::Result { 45 | self.handle.join() 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /tests/retry_rw.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use happylock::{collection::RetryingLockCollection, RwLock, ThreadKey}; 4 | 5 | static RWLOCK_1: RwLock = RwLock::new(1); 6 | static RWLOCK_2: RwLock = RwLock::new(2); 7 | static RWLOCK_3: RwLock = RwLock::new(3); 8 | 9 | fn thread_1() { 10 | let key = ThreadKey::get().unwrap(); 11 | let mut guard = RWLOCK_2.write(key); 12 | std::thread::sleep(Duration::from_millis(75)); 13 | assert_eq!(*guard, 2); 14 | *guard = 5; 15 | } 16 | 17 | fn thread_2() { 18 | let key = ThreadKey::get().unwrap(); 19 | let collection = RetryingLockCollection::try_new([&RWLOCK_1, &RWLOCK_2, &RWLOCK_3]).unwrap(); 20 | std::thread::sleep(Duration::from_millis(25)); 21 | let guard = collection.read(key); 22 | assert_eq!(*guard[0], 1); 23 | assert_eq!(*guard[1], 5); 24 | assert_eq!(*guard[2], 3); 25 | } 26 | 27 | fn thread_3() { 28 | let key = ThreadKey::get().unwrap(); 29 | std::thread::sleep(Duration::from_millis(50)); 30 | let guard = RWLOCK_1.write(key); 31 | std::thread::sleep(Duration::from_millis(50)); 32 | assert_eq!(*guard, 1); 33 | } 34 | 35 | #[test] 36 | fn retries() { 37 | let t1 = std::thread::spawn(thread_1); 38 | let t2 = std::thread::spawn(thread_2); 39 | let t3 = std::thread::spawn(thread_3); 40 | 41 | t1.join().unwrap(); 42 | t2.join().unwrap(); 43 | t3.join().unwrap(); 44 | } 45 | -------------------------------------------------------------------------------- /examples/list.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; 4 | 5 | const N: usize = 10; 6 | 7 | static DATA: [Mutex; 6] = [ 8 | Mutex::new(0), 9 | Mutex::new(1), 10 | Mutex::new(2), 11 | Mutex::new(3), 12 | Mutex::new(4), 13 | Mutex::new(5), 14 | ]; 15 | 16 | static SEED: Mutex = Mutex::new(42); 17 | 18 | fn random(key: &mut ThreadKey) -> usize { 19 | SEED.scoped_lock(key, |seed| { 20 | let x = *seed; 21 | let x = x ^ (x << 13); 22 | let x = x ^ (x >> 17); 23 | let x = x ^ (x << 5); 24 | *seed = x; 25 | x as usize 26 | }) 27 | } 28 | 29 | fn main() { 30 | let mut threads = Vec::new(); 31 | for _ in 0..N { 32 | let th = thread::spawn(move || { 33 | let mut key = ThreadKey::get().unwrap(); 34 | loop { 35 | let mut data = Vec::new(); 36 | for _ in 0..3 { 37 | let rand = random(&mut key); 38 | data.push(&DATA[rand % 6]); 39 | } 40 | 41 | let Some(lock) = RefLockCollection::try_new(&data) else { 42 | continue; 43 | }; 44 | let mut guard = lock.lock(key); 45 | *guard[0] += *guard[1]; 46 | *guard[1] += *guard[2]; 47 | *guard[2] += *guard[0]; 48 | 49 | return; 50 | } 51 | }); 52 | threads.push(th); 53 | } 54 | 55 | for th in threads { 56 | _ = th.join(); 57 | } 58 | 59 | let key = ThreadKey::get().unwrap(); 60 | let data = RefLockCollection::new(&DATA); 61 | let data = data.lock(key); 62 | for val in &*data { 63 | println!("{val}"); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /tests/retry.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use happylock::{collection::RetryingLockCollection, Mutex, ThreadKey}; 4 | 5 | static MUTEX_1: Mutex = Mutex::new(1); 6 | static MUTEX_2: Mutex = Mutex::new(2); 7 | static MUTEX_3: Mutex = Mutex::new(3); 8 | 9 | fn thread_1() { 10 | let key = ThreadKey::get().unwrap(); 11 | let mut guard = MUTEX_2.lock(key); 12 | std::thread::sleep(Duration::from_millis(100)); 13 | *guard = 5; 14 | } 15 | 16 | fn thread_2() { 17 | let mut key = ThreadKey::get().unwrap(); 18 | std::thread::sleep(Duration::from_millis(50)); 19 | let collection = RetryingLockCollection::try_new([&MUTEX_1, &MUTEX_2, &MUTEX_3]).unwrap(); 20 | collection.scoped_lock(&mut key, |guard| { 21 | assert_eq!(*guard[0], 4); 22 | assert_eq!(*guard[1], 5); 23 | assert_eq!(*guard[2], 3); 24 | }); 25 | } 26 | 27 | fn thread_3() { 28 | let key = ThreadKey::get().unwrap(); 29 | std::thread::sleep(Duration::from_millis(75)); 30 | let mut guard = MUTEX_1.lock(key); 31 | std::thread::sleep(Duration::from_millis(100)); 32 | *guard = 4; 33 | } 34 | 35 | fn thread_4() { 36 | let mut key = ThreadKey::get().unwrap(); 37 | std::thread::sleep(Duration::from_millis(25)); 38 | let collection = RetryingLockCollection::try_new([&MUTEX_1, &MUTEX_2]).unwrap(); 39 | assert!(collection.scoped_try_lock(&mut key, |_| {}).is_err()); 40 | } 41 | 42 | #[test] 43 | fn retries() { 44 | let t1 = std::thread::spawn(thread_1); 45 | let t2 = std::thread::spawn(thread_2); 46 | let t3 = std::thread::spawn(thread_3); 47 | let t4 = std::thread::spawn(thread_4); 48 | 49 | t1.join().unwrap(); 50 | t2.join().unwrap(); 51 | t3.join().unwrap(); 52 | t4.join().unwrap(); 53 | } 54 | -------------------------------------------------------------------------------- /examples/dining_philosophers.rs: -------------------------------------------------------------------------------- 1 | use std::{thread, time::Duration}; 2 | 3 | use happylock::{collection, Mutex, ThreadKey}; 4 | 5 | static PHILOSOPHERS: [Philosopher; 5] = [ 6 | Philosopher { 7 | name: "Socrates", 8 | left: 0, 9 | right: 1, 10 | }, 11 | Philosopher { 12 | name: "John Rawls", 13 | left: 1, 14 | right: 2, 15 | }, 16 | Philosopher { 17 | name: "Jeremy Bentham", 18 | left: 2, 19 | right: 3, 20 | }, 21 | Philosopher { 22 | name: "John Stuart Mill", 23 | left: 3, 24 | right: 4, 25 | }, 26 | Philosopher { 27 | name: "Judith Butler", 28 | left: 4, 29 | right: 0, 30 | }, 31 | ]; 32 | 33 | static FORKS: [Mutex<()>; 5] = [ 34 | Mutex::new(()), 35 | Mutex::new(()), 36 | Mutex::new(()), 37 | Mutex::new(()), 38 | Mutex::new(()), 39 | ]; 40 | 41 | struct Philosopher { 42 | name: &'static str, 43 | left: usize, 44 | right: usize, 45 | } 46 | 47 | impl Philosopher { 48 | fn cycle(&self) { 49 | let key = ThreadKey::get().unwrap(); 50 | thread::sleep(Duration::from_secs(1)); 51 | 52 | // safety: no philosopher asks for the same fork twice 53 | let forks = [&FORKS[self.left], &FORKS[self.right]]; 54 | let forks = unsafe { collection::RefLockCollection::new_unchecked(&forks) }; 55 | let forks = forks.lock(key); 56 | println!("{} is eating...", self.name); 57 | thread::sleep(Duration::from_secs(1)); 58 | println!("{} is done eating", self.name); 59 | drop(forks); 60 | } 61 | } 62 | 63 | fn main() { 64 | let handles: Vec<_> = PHILOSOPHERS 65 | .iter() 66 | .map(|philosopher| thread::spawn(move || philosopher.cycle())) 67 | // The `collect` is absolutely necessary, because we're using lazy 68 | // iterators. If `collect` isn't used, then the thread won't spawn 69 | // until we try to join on it. 70 | .collect(); 71 | 72 | for handle in handles { 73 | _ = handle.join(); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /examples/dining_philosophers_retry.rs: -------------------------------------------------------------------------------- 1 | use std::{thread, time::Duration}; 2 | 3 | use happylock::{collection, Mutex, ThreadKey}; 4 | 5 | static PHILOSOPHERS: [Philosopher; 5] = [ 6 | Philosopher { 7 | name: "Socrates", 8 | left: 0, 9 | right: 1, 10 | }, 11 | Philosopher { 12 | name: "John Rawls", 13 | left: 1, 14 | right: 2, 15 | }, 16 | Philosopher { 17 | name: "Jeremy Bentham", 18 | left: 2, 19 | right: 3, 20 | }, 21 | Philosopher { 22 | name: "John Stuart Mill", 23 | left: 3, 24 | right: 4, 25 | }, 26 | Philosopher { 27 | name: "Judith Butler", 28 | left: 4, 29 | right: 0, 30 | }, 31 | ]; 32 | 33 | static FORKS: [Mutex<()>; 5] = [ 34 | Mutex::new(()), 35 | Mutex::new(()), 36 | Mutex::new(()), 37 | Mutex::new(()), 38 | Mutex::new(()), 39 | ]; 40 | 41 | struct Philosopher { 42 | name: &'static str, 43 | left: usize, 44 | right: usize, 45 | } 46 | 47 | impl Philosopher { 48 | fn cycle(&self) { 49 | let key = ThreadKey::get().unwrap(); 50 | thread::sleep(Duration::from_secs(1)); 51 | 52 | // safety: no philosopher asks for the same fork twice 53 | let forks = [&FORKS[self.left], &FORKS[self.right]]; 54 | let forks = unsafe { collection::RetryingLockCollection::new_unchecked(&forks) }; 55 | let forks = forks.lock(key); 56 | println!("{} is eating...", self.name); 57 | thread::sleep(Duration::from_secs(1)); 58 | println!("{} is done eating", self.name); 59 | drop(forks); 60 | } 61 | } 62 | 63 | fn main() { 64 | let handles: Vec<_> = PHILOSOPHERS 65 | .iter() 66 | .map(|philosopher| thread::spawn(move || philosopher.cycle())) 67 | // The `collect` is absolutely necessary, because we're using lazy 68 | // iterators. If `collect` isn't used, then the thread won't spawn 69 | // until we try to join on it. 70 | .collect(); 71 | 72 | for handle in handles { 73 | _ = handle.join(); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /tests/evil_mutex.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use happylock::collection::{BoxedLockCollection, RetryingLockCollection}; 4 | use happylock::mutex::Mutex; 5 | use happylock::ThreadKey; 6 | use lock_api::{GuardNoSend, RawMutex}; 7 | 8 | struct EvilMutex { 9 | inner: parking_lot::RawMutex, 10 | } 11 | 12 | unsafe impl RawMutex for EvilMutex { 13 | #[allow(clippy::declare_interior_mutable_const)] 14 | const INIT: Self = Self { 15 | inner: parking_lot::RawMutex::INIT, 16 | }; 17 | 18 | type GuardMarker = GuardNoSend; 19 | 20 | fn lock(&self) { 21 | panic!("mwahahahaha"); 22 | } 23 | 24 | fn try_lock(&self) -> bool { 25 | self.inner.try_lock() 26 | } 27 | 28 | unsafe fn unlock(&self) { 29 | panic!("mwahahahaha"); 30 | } 31 | } 32 | 33 | #[test] 34 | fn boxed_mutexes() { 35 | let mut key = ThreadKey::get().unwrap(); 36 | let good_mutex: Arc> = Arc::new(Mutex::new(5)); 37 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 38 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 39 | let c_good = Arc::clone(&good_mutex); 40 | let c_evil = Arc::clone(&evil_mutex); 41 | let c_useless = Arc::clone(&useless_mutex); 42 | 43 | let r = std::thread::spawn(move || { 44 | let key = ThreadKey::get().unwrap(); 45 | let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 46 | _ = collection.lock(key); 47 | }) 48 | .join(); 49 | 50 | assert!(r.is_err()); 51 | assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 52 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 53 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 54 | } 55 | 56 | #[test] 57 | fn retrying_mutexes() { 58 | let mut key = ThreadKey::get().unwrap(); 59 | let good_mutex: Arc> = Arc::new(Mutex::new(5)); 60 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 61 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 62 | let c_good = Arc::clone(&good_mutex); 63 | let c_evil = Arc::clone(&evil_mutex); 64 | let c_useless = Arc::clone(&useless_mutex); 65 | 66 | let r = std::thread::spawn(move || { 67 | let key = ThreadKey::get().unwrap(); 68 | let collection = 69 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 70 | collection.lock(key); 71 | }) 72 | .join(); 73 | 74 | assert!(r.is_err()); 75 | assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 76 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 77 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 78 | } 79 | -------------------------------------------------------------------------------- /tests/evil_try_mutex.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use happylock::{ 4 | collection::{BoxedLockCollection, RetryingLockCollection}, 5 | mutex::Mutex, 6 | ThreadKey, 7 | }; 8 | use lock_api::{GuardNoSend, RawMutex}; 9 | 10 | struct EvilMutex { 11 | inner: parking_lot::RawMutex, 12 | } 13 | 14 | unsafe impl RawMutex for EvilMutex { 15 | #[allow(clippy::declare_interior_mutable_const)] 16 | const INIT: Self = Self { 17 | inner: parking_lot::RawMutex::INIT, 18 | }; 19 | 20 | type GuardMarker = GuardNoSend; 21 | 22 | fn lock(&self) { 23 | self.inner.lock() 24 | } 25 | 26 | fn try_lock(&self) -> bool { 27 | panic!("mwahahahaha"); 28 | } 29 | 30 | unsafe fn unlock(&self) { 31 | self.inner.unlock() 32 | } 33 | } 34 | 35 | #[test] 36 | fn boxed_mutexes() { 37 | let mut key = ThreadKey::get().unwrap(); 38 | let good_mutex: Arc> = Arc::new(Mutex::new(5)); 39 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 40 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 41 | let c_good = Arc::clone(&good_mutex); 42 | let c_evil = Arc::clone(&evil_mutex); 43 | let c_useless = Arc::clone(&useless_mutex); 44 | 45 | let r = std::thread::spawn(move || { 46 | let key = ThreadKey::get().unwrap(); 47 | let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 48 | let g = collection.try_lock(key); 49 | println!("{}", g.unwrap().1); 50 | }) 51 | .join(); 52 | 53 | assert!(r.is_err()); 54 | assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 55 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 56 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 57 | } 58 | 59 | #[test] 60 | fn retrying_mutexes() { 61 | let mut key = ThreadKey::get().unwrap(); 62 | let good_mutex: Arc> = Arc::new(Mutex::new(5)); 63 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 64 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 65 | let c_good = Arc::clone(&good_mutex); 66 | let c_evil = Arc::clone(&evil_mutex); 67 | let c_useless = Arc::clone(&useless_mutex); 68 | 69 | let r = std::thread::spawn(move || { 70 | let key = ThreadKey::get().unwrap(); 71 | let collection = 72 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 73 | let _ = collection.try_lock(key); 74 | }) 75 | .join(); 76 | 77 | assert!(r.is_err()); 78 | assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 79 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 80 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 81 | } 82 | -------------------------------------------------------------------------------- /tests/evil_try_rwlock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use happylock::collection::{BoxedLockCollection, RetryingLockCollection}; 4 | use happylock::rwlock::RwLock; 5 | use happylock::ThreadKey; 6 | use lock_api::{GuardNoSend, RawRwLock}; 7 | 8 | struct EvilRwLock { 9 | inner: parking_lot::RawRwLock, 10 | } 11 | 12 | unsafe impl RawRwLock for EvilRwLock { 13 | #[allow(clippy::declare_interior_mutable_const)] 14 | const INIT: Self = Self { 15 | inner: parking_lot::RawRwLock::INIT, 16 | }; 17 | 18 | type GuardMarker = GuardNoSend; 19 | 20 | fn lock_shared(&self) { 21 | self.inner.lock_shared() 22 | } 23 | 24 | fn try_lock_shared(&self) -> bool { 25 | panic!("mwahahahaha") 26 | } 27 | 28 | unsafe fn unlock_shared(&self) { 29 | self.inner.unlock_shared() 30 | } 31 | 32 | fn lock_exclusive(&self) { 33 | self.inner.lock_exclusive() 34 | } 35 | 36 | fn try_lock_exclusive(&self) -> bool { 37 | panic!("mwahahahaha") 38 | } 39 | 40 | unsafe fn unlock_exclusive(&self) { 41 | self.inner.unlock_exclusive() 42 | } 43 | } 44 | 45 | #[test] 46 | fn boxed_rwlocks() { 47 | let mut key = ThreadKey::get().unwrap(); 48 | let good_mutex: Arc> = Arc::new(RwLock::new(5)); 49 | let evil_mutex: Arc> = Arc::new(RwLock::new(7)); 50 | let useless_mutex: Arc> = Arc::new(RwLock::new(10)); 51 | let c_good = Arc::clone(&good_mutex); 52 | let c_evil = Arc::clone(&evil_mutex); 53 | let c_useless = Arc::clone(&useless_mutex); 54 | 55 | let r = std::thread::spawn(move || { 56 | let key = ThreadKey::get().unwrap(); 57 | let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 58 | let _ = collection.try_read(key); 59 | }) 60 | .join(); 61 | 62 | assert!(r.is_err()); 63 | assert!(good_mutex.scoped_try_read(&mut key, |_| {}).is_ok()); 64 | assert!(evil_mutex.scoped_try_read(&mut key, |_| {}).is_err()); 65 | assert!(useless_mutex.scoped_try_read(&mut key, |_| {}).is_ok()); 66 | } 67 | 68 | #[test] 69 | fn retrying_rwlocks() { 70 | let mut key = ThreadKey::get().unwrap(); 71 | let good_mutex: Arc> = Arc::new(RwLock::new(5)); 72 | let evil_mutex: Arc> = Arc::new(RwLock::new(7)); 73 | let useless_mutex: Arc> = Arc::new(RwLock::new(10)); 74 | let c_good = Arc::clone(&good_mutex); 75 | let c_evil = Arc::clone(&evil_mutex); 76 | let c_useless = Arc::clone(&useless_mutex); 77 | 78 | let r = std::thread::spawn(move || { 79 | let key = ThreadKey::get().unwrap(); 80 | let collection = 81 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 82 | _ = collection.try_read(key); 83 | }) 84 | .join(); 85 | 86 | assert!(r.is_err()); 87 | assert!(good_mutex.scoped_try_read(&mut key, |_| {}).is_ok()); 88 | assert!(evil_mutex.scoped_try_read(&mut key, |_| {}).is_err()); 89 | assert!(useless_mutex.scoped_try_read(&mut key, |_| {}).is_ok()); 90 | } 91 | -------------------------------------------------------------------------------- /tests/evil_unlock_mutex.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use happylock::collection::{BoxedLockCollection, RetryingLockCollection}; 4 | use happylock::mutex::Mutex; 5 | use happylock::ThreadKey; 6 | use lock_api::{GuardNoSend, RawMutex}; 7 | 8 | struct KindaEvilMutex { 9 | inner: parking_lot::RawMutex, 10 | } 11 | 12 | struct EvilMutex {} 13 | 14 | unsafe impl RawMutex for KindaEvilMutex { 15 | #[allow(clippy::declare_interior_mutable_const)] 16 | const INIT: Self = Self { 17 | inner: parking_lot::RawMutex::INIT, 18 | }; 19 | 20 | type GuardMarker = GuardNoSend; 21 | 22 | fn lock(&self) { 23 | self.inner.lock() 24 | } 25 | 26 | fn try_lock(&self) -> bool { 27 | self.inner.try_lock() 28 | } 29 | 30 | unsafe fn unlock(&self) { 31 | panic!("mwahahahaha"); 32 | } 33 | } 34 | 35 | unsafe impl RawMutex for EvilMutex { 36 | #[allow(clippy::declare_interior_mutable_const)] 37 | const INIT: Self = Self {}; 38 | 39 | type GuardMarker = GuardNoSend; 40 | 41 | fn lock(&self) { 42 | panic!("mwahahahaha"); 43 | } 44 | 45 | fn try_lock(&self) -> bool { 46 | panic!("mwahahahaha") 47 | } 48 | 49 | unsafe fn unlock(&self) { 50 | panic!("mwahahahaha"); 51 | } 52 | } 53 | 54 | #[test] 55 | fn boxed_mutexes() { 56 | let mut key = ThreadKey::get().unwrap(); 57 | let kinda_evil_mutex: Arc> = Arc::new(Mutex::new(5)); 58 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 59 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 60 | let c_good = Arc::clone(&kinda_evil_mutex); 61 | let c_evil = Arc::clone(&evil_mutex); 62 | let c_useless = Arc::clone(&useless_mutex); 63 | 64 | let r = std::thread::spawn(move || { 65 | let key = ThreadKey::get().unwrap(); 66 | let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 67 | _ = collection.lock(key); 68 | }) 69 | .join(); 70 | 71 | assert!(r.is_err()); 72 | assert!(kinda_evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 73 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 74 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 75 | } 76 | 77 | #[test] 78 | fn retrying_mutexes() { 79 | let mut key = ThreadKey::get().unwrap(); 80 | let kinda_evil_mutex: Arc> = Arc::new(Mutex::new(5)); 81 | let evil_mutex: Arc> = Arc::new(Mutex::new(7)); 82 | let useless_mutex: Arc> = Arc::new(Mutex::new(10)); 83 | let c_good = Arc::clone(&kinda_evil_mutex); 84 | let c_evil = Arc::clone(&evil_mutex); 85 | let c_useless = Arc::clone(&useless_mutex); 86 | 87 | let r = std::thread::spawn(move || { 88 | let key = ThreadKey::get().unwrap(); 89 | let collection = 90 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 91 | collection.lock(key); 92 | }) 93 | .join(); 94 | 95 | assert!(r.is_err()); 96 | assert!(kinda_evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 97 | assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err()); 98 | assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok()); 99 | } 100 | -------------------------------------------------------------------------------- /tests/evil_rwlock.rs: -------------------------------------------------------------------------------- 1 | use std::panic::AssertUnwindSafe; 2 | use std::sync::Arc; 3 | 4 | use happylock::collection::{BoxedLockCollection, RetryingLockCollection}; 5 | use happylock::rwlock::RwLock; 6 | use happylock::ThreadKey; 7 | use lock_api::{GuardNoSend, RawRwLock}; 8 | 9 | struct EvilRwLock { 10 | inner: parking_lot::RawRwLock, 11 | } 12 | 13 | unsafe impl RawRwLock for EvilRwLock { 14 | #[allow(clippy::declare_interior_mutable_const)] 15 | const INIT: Self = Self { 16 | inner: parking_lot::RawRwLock::INIT, 17 | }; 18 | 19 | type GuardMarker = GuardNoSend; 20 | 21 | fn lock_shared(&self) { 22 | panic!("mwahahahaha"); 23 | } 24 | 25 | fn try_lock_shared(&self) -> bool { 26 | self.inner.try_lock_shared() 27 | } 28 | 29 | unsafe fn unlock_shared(&self) { 30 | panic!("mwahahahaha"); 31 | } 32 | 33 | fn lock_exclusive(&self) { 34 | panic!("mwahahahaha"); 35 | } 36 | 37 | fn try_lock_exclusive(&self) -> bool { 38 | self.inner.try_lock_exclusive() 39 | } 40 | 41 | unsafe fn unlock_exclusive(&self) { 42 | panic!("mwahahahaha"); 43 | } 44 | } 45 | 46 | #[test] 47 | fn boxed_rwlocks() { 48 | let mut key = ThreadKey::get().unwrap(); 49 | let good_mutex: Arc> = Arc::new(RwLock::new(5)); 50 | let evil_mutex: Arc> = Arc::new(RwLock::new(7)); 51 | let useless_mutex: Arc> = Arc::new(RwLock::new(10)); 52 | let c_good = Arc::clone(&good_mutex); 53 | let c_evil = Arc::clone(&evil_mutex); 54 | let c_useless = Arc::clone(&useless_mutex); 55 | 56 | let r = std::thread::spawn(move || { 57 | let key = ThreadKey::get().unwrap(); 58 | let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 59 | _ = collection.lock(key); 60 | }) 61 | .join(); 62 | 63 | assert!(r.is_err()); 64 | assert!(good_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 65 | assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 66 | assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 67 | 68 | std::thread::scope(|s| { 69 | s.spawn(|| { 70 | let evil_mutex = AssertUnwindSafe(evil_mutex); 71 | let r = std::panic::catch_unwind(|| { 72 | let key = ThreadKey::get().unwrap(); 73 | evil_mutex.write(key); 74 | }); 75 | 76 | assert!(r.is_err()); 77 | }); 78 | 79 | s.spawn(|| { 80 | let key = ThreadKey::get().unwrap(); 81 | good_mutex.write(key); 82 | }); 83 | }); 84 | } 85 | 86 | #[test] 87 | fn retrying_rwlocks() { 88 | let mut key = ThreadKey::get().unwrap(); 89 | let good_mutex: Arc> = Arc::new(RwLock::new(5)); 90 | let evil_mutex: Arc> = Arc::new(RwLock::new(7)); 91 | let useless_mutex: Arc> = Arc::new(RwLock::new(10)); 92 | let c_good = Arc::clone(&good_mutex); 93 | let c_evil = Arc::clone(&evil_mutex); 94 | let c_useless = Arc::clone(&useless_mutex); 95 | 96 | let r = std::thread::spawn(move || { 97 | let key = ThreadKey::get().unwrap(); 98 | let collection = 99 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 100 | collection.lock(key); 101 | }) 102 | .join(); 103 | 104 | assert!(r.is_err()); 105 | assert!(good_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 106 | assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 107 | assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 108 | } 109 | -------------------------------------------------------------------------------- /src/poisonable/guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display}; 2 | use std::hash::Hash; 3 | use std::marker::PhantomData; 4 | use std::ops::{Deref, DerefMut}; 5 | 6 | use super::{PoisonFlag, PoisonGuard, PoisonRef}; 7 | 8 | impl<'a, Guard> PoisonRef<'a, Guard> { 9 | // This is used so that we don't keep accidentally adding the flag reference 10 | pub(super) const fn new(flag: &'a PoisonFlag, guard: Guard) -> Self { 11 | Self { 12 | guard, 13 | #[cfg(panic = "unwind")] 14 | flag, 15 | _phantom: PhantomData, 16 | } 17 | } 18 | } 19 | 20 | impl Drop for PoisonRef<'_, Guard> { 21 | fn drop(&mut self) { 22 | #[cfg(panic = "unwind")] 23 | if std::thread::panicking() { 24 | self.flag.poison(); 25 | } 26 | } 27 | } 28 | 29 | #[mutants::skip] // hashing involves RNG and is hard to test 30 | #[cfg(not(tarpaulin_include))] 31 | impl Hash for PoisonRef<'_, Guard> { 32 | fn hash(&self, state: &mut H) { 33 | self.guard.hash(state) 34 | } 35 | } 36 | 37 | #[mutants::skip] 38 | #[cfg(not(tarpaulin_include))] 39 | impl Debug for PoisonRef<'_, Guard> { 40 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 41 | Debug::fmt(&**self, f) 42 | } 43 | } 44 | 45 | impl Display for PoisonRef<'_, Guard> { 46 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 47 | Display::fmt(&**self, f) 48 | } 49 | } 50 | 51 | impl Deref for PoisonRef<'_, Guard> { 52 | type Target = Guard; 53 | 54 | fn deref(&self) -> &Self::Target { 55 | &self.guard 56 | } 57 | } 58 | 59 | impl DerefMut for PoisonRef<'_, Guard> { 60 | fn deref_mut(&mut self) -> &mut Self::Target { 61 | &mut self.guard 62 | } 63 | } 64 | 65 | impl AsRef for PoisonRef<'_, Guard> { 66 | fn as_ref(&self) -> &Guard { 67 | &self.guard 68 | } 69 | } 70 | 71 | impl AsMut for PoisonRef<'_, Guard> { 72 | fn as_mut(&mut self) -> &mut Guard { 73 | &mut self.guard 74 | } 75 | } 76 | 77 | #[mutants::skip] // hashing involves RNG and is hard to test 78 | #[cfg(not(tarpaulin_include))] 79 | impl Hash for PoisonGuard<'_, Guard> { 80 | fn hash(&self, state: &mut H) { 81 | self.guard.hash(state) 82 | } 83 | } 84 | 85 | #[mutants::skip] 86 | #[cfg(not(tarpaulin_include))] 87 | impl Debug for PoisonGuard<'_, Guard> { 88 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 89 | Debug::fmt(&self.guard, f) 90 | } 91 | } 92 | 93 | impl Display for PoisonGuard<'_, Guard> { 94 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 95 | Display::fmt(&self.guard, f) 96 | } 97 | } 98 | 99 | impl> Deref for PoisonGuard<'_, Guard> { 100 | type Target = T; 101 | 102 | fn deref(&self) -> &Self::Target { 103 | #[allow(clippy::explicit_auto_deref)] // fixing this results in a compiler error 104 | &*self.guard.guard 105 | } 106 | } 107 | 108 | impl> DerefMut for PoisonGuard<'_, Guard> { 109 | fn deref_mut(&mut self) -> &mut Self::Target { 110 | #[allow(clippy::explicit_auto_deref)] // fixing this results in a compiler error 111 | &mut *self.guard.guard 112 | } 113 | } 114 | 115 | impl AsRef for PoisonGuard<'_, Guard> { 116 | fn as_ref(&self) -> &Guard { 117 | &self.guard.guard 118 | } 119 | } 120 | 121 | impl AsMut for PoisonGuard<'_, Guard> { 122 | fn as_mut(&mut self) -> &mut Guard { 123 | &mut self.guard.guard 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /tests/evil_unlock_rwlock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use happylock::collection::{BoxedLockCollection, RetryingLockCollection}; 4 | use happylock::rwlock::RwLock; 5 | use happylock::ThreadKey; 6 | use lock_api::{GuardNoSend, RawRwLock}; 7 | 8 | struct KindaEvilRwLock { 9 | inner: parking_lot::RawRwLock, 10 | } 11 | 12 | struct EvilRwLock {} 13 | 14 | unsafe impl RawRwLock for KindaEvilRwLock { 15 | #[allow(clippy::declare_interior_mutable_const)] 16 | const INIT: Self = Self { 17 | inner: parking_lot::RawRwLock::INIT, 18 | }; 19 | 20 | type GuardMarker = GuardNoSend; 21 | 22 | fn lock_shared(&self) { 23 | self.inner.lock_shared() 24 | } 25 | 26 | fn try_lock_shared(&self) -> bool { 27 | self.inner.try_lock_shared() 28 | } 29 | 30 | unsafe fn unlock_shared(&self) { 31 | panic!("mwahahahaha"); 32 | } 33 | 34 | fn lock_exclusive(&self) { 35 | self.inner.lock_exclusive() 36 | } 37 | 38 | fn try_lock_exclusive(&self) -> bool { 39 | self.inner.try_lock_exclusive() 40 | } 41 | 42 | unsafe fn unlock_exclusive(&self) { 43 | panic!("mwahahahaha"); 44 | } 45 | } 46 | 47 | unsafe impl RawRwLock for EvilRwLock { 48 | #[allow(clippy::declare_interior_mutable_const)] 49 | const INIT: Self = Self {}; 50 | 51 | type GuardMarker = GuardNoSend; 52 | 53 | fn lock_shared(&self) { 54 | panic!("mwahahahaha"); 55 | } 56 | 57 | fn try_lock_shared(&self) -> bool { 58 | panic!("mwahahahaha"); 59 | } 60 | 61 | unsafe fn unlock_shared(&self) { 62 | panic!("mwahahahaha"); 63 | } 64 | 65 | fn lock_exclusive(&self) { 66 | panic!("mwahahahaha"); 67 | } 68 | 69 | fn try_lock_exclusive(&self) -> bool { 70 | panic!("mwahahahaha") 71 | } 72 | 73 | unsafe fn unlock_exclusive(&self) { 74 | panic!("mwahahahaha"); 75 | } 76 | } 77 | 78 | #[test] 79 | fn boxed_rwlocks() { 80 | let mut key = ThreadKey::get().unwrap(); 81 | let kinda_evil_mutex: RwLock = RwLock::new(5); 82 | let evil_mutex: RwLock = RwLock::new(7); 83 | let useless_mutex: RwLock = RwLock::new(10); 84 | 85 | let r = std::thread::scope(|s| { 86 | let r = s 87 | .spawn(|| { 88 | let key = ThreadKey::get().unwrap(); 89 | let collection = 90 | BoxedLockCollection::try_new((&kinda_evil_mutex, &evil_mutex, &useless_mutex)) 91 | .unwrap(); 92 | _ = collection.read(key); 93 | }) 94 | .join(); 95 | 96 | r 97 | }); 98 | 99 | assert!(r.is_err()); 100 | assert!(kinda_evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 101 | assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 102 | assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 103 | } 104 | 105 | #[test] 106 | fn retrying_rwlocks() { 107 | let mut key = ThreadKey::get().unwrap(); 108 | let kinda_evil_mutex: Arc> = Arc::new(RwLock::new(5)); 109 | let evil_mutex: Arc> = Arc::new(RwLock::new(7)); 110 | let useless_mutex: Arc> = Arc::new(RwLock::new(10)); 111 | let c_good = Arc::clone(&kinda_evil_mutex); 112 | let c_evil = Arc::clone(&evil_mutex); 113 | let c_useless = Arc::clone(&useless_mutex); 114 | 115 | let r = std::thread::spawn(move || { 116 | let key = ThreadKey::get().unwrap(); 117 | let collection = 118 | RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap(); 119 | collection.read(key); 120 | }) 121 | .join(); 122 | 123 | assert!(r.is_err()); 124 | assert!(kinda_evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 125 | assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err()); 126 | assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok()); 127 | } 128 | -------------------------------------------------------------------------------- /src/collection/guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display}; 2 | use std::hash::Hash; 3 | use std::ops::{Deref, DerefMut}; 4 | 5 | use super::LockGuard; 6 | 7 | #[mutants::skip] // hashing involves RNG and is hard to test 8 | #[cfg(not(tarpaulin_include))] 9 | impl Hash for LockGuard { 10 | fn hash(&self, state: &mut H) { 11 | self.guard.hash(state) 12 | } 13 | } 14 | 15 | // No implementations of Eq, PartialEq, PartialOrd, or Ord 16 | // You can't implement both PartialEq and PartialEq 17 | // It's easier to just implement neither and ask users to dereference 18 | // This is less of a problem when using the scoped lock API 19 | 20 | #[mutants::skip] 21 | #[cfg(not(tarpaulin_include))] 22 | impl Debug for LockGuard { 23 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 24 | Debug::fmt(&**self, f) 25 | } 26 | } 27 | 28 | impl Display for LockGuard { 29 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 30 | Display::fmt(&**self, f) 31 | } 32 | } 33 | 34 | impl Deref for LockGuard { 35 | type Target = Guard; 36 | 37 | fn deref(&self) -> &Self::Target { 38 | &self.guard 39 | } 40 | } 41 | 42 | impl DerefMut for LockGuard { 43 | fn deref_mut(&mut self) -> &mut Self::Target { 44 | &mut self.guard 45 | } 46 | } 47 | 48 | impl AsRef for LockGuard { 49 | fn as_ref(&self) -> &Guard { 50 | &self.guard 51 | } 52 | } 53 | 54 | impl AsMut for LockGuard { 55 | fn as_mut(&mut self) -> &mut Guard { 56 | &mut self.guard 57 | } 58 | } 59 | 60 | #[cfg(test)] 61 | mod tests { 62 | use crate::collection::OwnedLockCollection; 63 | use crate::{LockCollection, Mutex, RwLock, ThreadKey}; 64 | 65 | #[test] 66 | fn guard_display_works() { 67 | let key = ThreadKey::get().unwrap(); 68 | let lock = OwnedLockCollection::new(RwLock::new("Hello, world!")); 69 | let guard = lock.read(key); 70 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 71 | } 72 | 73 | #[test] 74 | fn deref_mut_works() { 75 | let key = ThreadKey::get().unwrap(); 76 | let locks = (Mutex::new(1), Mutex::new(2)); 77 | let lock = LockCollection::new_ref(&locks); 78 | let mut guard = lock.lock(key); 79 | *guard.0 = 3; 80 | let key = LockCollection::<(Mutex<_>, Mutex<_>)>::unlock(guard); 81 | 82 | let guard = locks.0.lock(key); 83 | assert_eq!(*guard, 3); 84 | let key = Mutex::unlock(guard); 85 | 86 | let guard = locks.1.lock(key); 87 | assert_eq!(*guard, 2); 88 | } 89 | 90 | #[test] 91 | fn as_ref_works() { 92 | let key = ThreadKey::get().unwrap(); 93 | let locks = (Mutex::new(1), Mutex::new(2)); 94 | let lock = LockCollection::new_ref(&locks); 95 | let mut guard = lock.lock(key); 96 | *guard.0 = 3; 97 | let key = LockCollection::<(Mutex<_>, Mutex<_>)>::unlock(guard); 98 | 99 | let guard = locks.0.lock(key); 100 | assert_eq!(guard.as_ref(), &3); 101 | let key = Mutex::unlock(guard); 102 | 103 | let guard = locks.1.lock(key); 104 | assert_eq!(guard.as_ref(), &2); 105 | } 106 | 107 | #[test] 108 | fn as_mut_works() { 109 | let key = ThreadKey::get().unwrap(); 110 | let locks = (Mutex::new(1), Mutex::new(2)); 111 | let lock = LockCollection::new_ref(&locks); 112 | let mut guard = lock.lock(key); 113 | let guard_mut = guard.as_mut(); 114 | *guard_mut.0 = 3; 115 | let key = LockCollection::<(Mutex<_>, Mutex<_>)>::unlock(guard); 116 | 117 | let guard = locks.0.lock(key); 118 | assert_eq!(guard.as_ref(), &3); 119 | let key = Mutex::unlock(guard); 120 | 121 | let guard = locks.1.lock(key); 122 | assert_eq!(guard.as_ref(), &2); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/rwlock/read_guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display}; 2 | use std::hash::Hash; 3 | use std::marker::PhantomData; 4 | use std::ops::Deref; 5 | 6 | use lock_api::RawRwLock; 7 | 8 | use crate::lockable::RawLock; 9 | use crate::ThreadKey; 10 | 11 | use super::{RwLock, RwLockReadGuard, RwLockReadRef}; 12 | 13 | // These impls make things slightly easier because now you can use 14 | // `println!("{guard}")` instead of `println!("{}", *guard)` 15 | 16 | #[mutants::skip] // hashing involves PRNG and is hard to test 17 | #[cfg(not(tarpaulin_include))] 18 | impl Hash for RwLockReadRef<'_, T, R> { 19 | fn hash(&self, state: &mut H) { 20 | self.deref().hash(state) 21 | } 22 | } 23 | 24 | #[mutants::skip] 25 | #[cfg(not(tarpaulin_include))] 26 | impl Debug for RwLockReadRef<'_, T, R> { 27 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | Debug::fmt(&**self, f) 29 | } 30 | } 31 | 32 | impl Display for RwLockReadRef<'_, T, R> { 33 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 34 | Display::fmt(&**self, f) 35 | } 36 | } 37 | 38 | impl Deref for RwLockReadRef<'_, T, R> { 39 | type Target = T; 40 | 41 | fn deref(&self) -> &Self::Target { 42 | // safety: this is the only type that can use `value`, and there's 43 | // a reference to this type, so there cannot be any mutable 44 | // references to this value. 45 | unsafe { &*self.0.data.get() } 46 | } 47 | } 48 | 49 | impl AsRef for RwLockReadRef<'_, T, R> { 50 | fn as_ref(&self) -> &T { 51 | self 52 | } 53 | } 54 | 55 | impl Drop for RwLockReadRef<'_, T, R> { 56 | fn drop(&mut self) { 57 | // safety: this guard is being destroyed, so the data cannot be 58 | // accessed without locking again 59 | unsafe { self.0.raw_unlock_read() } 60 | } 61 | } 62 | 63 | impl<'a, T: ?Sized, R: RawRwLock> RwLockReadRef<'a, T, R> { 64 | /// Creates an immutable reference for the underlying data of an [`RwLock`] 65 | /// without locking it or taking ownership of the key. 66 | #[must_use] 67 | pub(crate) const unsafe fn new(mutex: &'a RwLock) -> Self { 68 | Self(mutex, PhantomData) 69 | } 70 | } 71 | 72 | #[mutants::skip] // hashing involves PRNG and is hard to test 73 | #[cfg(not(tarpaulin_include))] 74 | impl Hash for RwLockReadGuard<'_, T, R> { 75 | fn hash(&self, state: &mut H) { 76 | self.deref().hash(state) 77 | } 78 | } 79 | 80 | #[mutants::skip] 81 | #[cfg(not(tarpaulin_include))] 82 | impl Debug for RwLockReadGuard<'_, T, R> { 83 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 84 | Debug::fmt(&**self, f) 85 | } 86 | } 87 | 88 | impl Display for RwLockReadGuard<'_, T, R> { 89 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 90 | Display::fmt(&**self, f) 91 | } 92 | } 93 | 94 | impl Deref for RwLockReadGuard<'_, T, R> { 95 | type Target = T; 96 | 97 | fn deref(&self) -> &Self::Target { 98 | &self.rwlock 99 | } 100 | } 101 | 102 | impl AsRef for RwLockReadGuard<'_, T, R> { 103 | fn as_ref(&self) -> &T { 104 | self 105 | } 106 | } 107 | 108 | impl<'a, T: ?Sized, R: RawRwLock> RwLockReadGuard<'a, T, R> { 109 | /// Create a guard to the given mutex. Undefined if multiple guards to the 110 | /// same mutex exist at once. 111 | #[must_use] 112 | pub(super) const unsafe fn new(rwlock: &'a RwLock, thread_key: ThreadKey) -> Self { 113 | Self { 114 | rwlock: RwLockReadRef(rwlock, PhantomData), 115 | thread_key, 116 | } 117 | } 118 | } 119 | 120 | unsafe impl Sync for RwLockReadRef<'_, T, R> {} 121 | -------------------------------------------------------------------------------- /src/key.rs: -------------------------------------------------------------------------------- 1 | use std::cell::{Cell, LazyCell}; 2 | use std::fmt::{self, Debug}; 3 | use std::marker::PhantomData; 4 | 5 | use sealed::Sealed; 6 | 7 | // Sealed to prevent other key types from being implemented. Otherwise, this 8 | // would almost instant undefined behavior. 9 | mod sealed { 10 | use super::ThreadKey; 11 | 12 | pub trait Sealed {} 13 | impl Sealed for ThreadKey {} 14 | impl Sealed for &mut ThreadKey {} 15 | } 16 | 17 | thread_local! { 18 | static KEY: LazyCell = LazyCell::new(KeyCell::default); 19 | } 20 | 21 | /// The key for the current thread. 22 | /// 23 | /// Only one of these exist per thread. To get the current thread's key, call 24 | /// [`ThreadKey::get`]. If the `ThreadKey` is dropped, it can be re-obtained. 25 | pub struct ThreadKey { 26 | phantom: PhantomData<*const ()>, // implement !Send and !Sync 27 | } 28 | 29 | /// Allows the type to be used as a key for a scoped lock 30 | /// 31 | /// # Safety 32 | /// 33 | /// Only one value which implements this trait may be allowed to exist at a 34 | /// time. Creating a new `Keyable` value requires making any other `Keyable` 35 | /// values invalid. 36 | pub unsafe trait Keyable: Sealed {} 37 | unsafe impl Keyable for ThreadKey {} 38 | // the ThreadKey can't be moved while a mutable reference to it exists 39 | unsafe impl Keyable for &mut ThreadKey {} 40 | 41 | // Implementing this means we can allow `MutexGuard` to be Sync 42 | // Safety: a &ThreadKey is useless by design. 43 | unsafe impl Sync for ThreadKey {} 44 | 45 | #[mutants::skip] 46 | #[cfg(not(tarpaulin_include))] 47 | impl Debug for ThreadKey { 48 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 49 | write!(f, "ThreadKey") 50 | } 51 | } 52 | 53 | // If you lose the thread key, you can get it back by calling ThreadKey::get 54 | impl Drop for ThreadKey { 55 | fn drop(&mut self) { 56 | // safety: a thread key cannot be acquired without creating the lock 57 | // safety: the key is lost, so it's safe to unlock the cell 58 | unsafe { KEY.with(|key| key.force_unlock()) } 59 | } 60 | } 61 | 62 | impl ThreadKey { 63 | /// Get the current thread's `ThreadKey`, if it's not already taken. 64 | /// 65 | /// The first time this is called, it will successfully return a 66 | /// `ThreadKey`. However, future calls to this function on the same thread 67 | /// will return [`None`], unless the key is dropped or unlocked first. 68 | /// 69 | /// # Examples 70 | /// 71 | /// ``` 72 | /// use happylock::ThreadKey; 73 | /// 74 | /// let key = ThreadKey::get().unwrap(); 75 | /// ``` 76 | #[must_use] 77 | pub fn get() -> Option { 78 | // safety: we just acquired the lock 79 | // safety: if this code changes, check to ensure the requirement for 80 | // the Drop implementation is still true 81 | KEY.with(|key| { 82 | key.try_lock().then_some(Self { 83 | phantom: PhantomData, 84 | }) 85 | }) 86 | } 87 | } 88 | 89 | /// A dumb lock that's just a wrapper for an [`AtomicBool`]. 90 | #[derive(Default)] 91 | struct KeyCell { 92 | is_locked: Cell, 93 | } 94 | 95 | impl KeyCell { 96 | /// Attempt to lock the `KeyCell`. This is not a fair lock. 97 | #[must_use] 98 | pub fn try_lock(&self) -> bool { 99 | !self.is_locked.replace(true) 100 | } 101 | 102 | /// Forcibly unlocks the `KeyCell`. This should only be called if the key 103 | /// from this `KeyCell` has been "lost". 104 | pub unsafe fn force_unlock(&self) { 105 | self.is_locked.set(false); 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | mod tests { 111 | use super::*; 112 | 113 | #[test] 114 | fn thread_key_returns_some_on_first_call() { 115 | assert!(ThreadKey::get().is_some()); 116 | } 117 | 118 | #[test] 119 | fn thread_key_returns_none_on_second_call() { 120 | let key = ThreadKey::get(); 121 | assert!(ThreadKey::get().is_none()); 122 | drop(key); 123 | } 124 | 125 | #[test] 126 | fn dropping_thread_key_allows_reobtaining() { 127 | drop(ThreadKey::get()); 128 | assert!(ThreadKey::get().is_some()) 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/mutex/guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display}; 2 | use std::hash::Hash; 3 | use std::marker::PhantomData; 4 | use std::ops::{Deref, DerefMut}; 5 | 6 | use lock_api::RawMutex; 7 | 8 | use crate::lockable::RawLock; 9 | use crate::ThreadKey; 10 | 11 | use super::{Mutex, MutexGuard, MutexRef}; 12 | 13 | // These impls make things slightly easier because now you can use 14 | // `println!("{guard}")` instead of `println!("{}", *guard)` 15 | 16 | #[mutants::skip] // hashing involves RNG and is hard to test 17 | #[cfg(not(tarpaulin_include))] 18 | impl Hash for MutexRef<'_, T, R> { 19 | fn hash(&self, state: &mut H) { 20 | self.deref().hash(state) 21 | } 22 | } 23 | 24 | #[mutants::skip] 25 | #[cfg(not(tarpaulin_include))] 26 | impl Debug for MutexRef<'_, T, R> { 27 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | Debug::fmt(&**self, f) 29 | } 30 | } 31 | 32 | impl Display for MutexRef<'_, T, R> { 33 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 34 | Display::fmt(&**self, f) 35 | } 36 | } 37 | 38 | impl Drop for MutexRef<'_, T, R> { 39 | fn drop(&mut self) { 40 | // safety: this guard is being destroyed, so the data cannot be 41 | // accessed without locking again 42 | unsafe { self.0.raw_unlock_write() } 43 | } 44 | } 45 | 46 | impl Deref for MutexRef<'_, T, R> { 47 | type Target = T; 48 | 49 | fn deref(&self) -> &Self::Target { 50 | // safety: this is the only type that can use `value`, and there's 51 | // a reference to this type, so there cannot be any mutable 52 | // references to this value. 53 | unsafe { &*self.0.data.get() } 54 | } 55 | } 56 | 57 | impl DerefMut for MutexRef<'_, T, R> { 58 | fn deref_mut(&mut self) -> &mut Self::Target { 59 | // safety: this is the only type that can use `value`, and we have a 60 | // mutable reference to this type, so there cannot be any other 61 | // references to this value. 62 | unsafe { &mut *self.0.data.get() } 63 | } 64 | } 65 | 66 | impl AsRef for MutexRef<'_, T, R> { 67 | fn as_ref(&self) -> &T { 68 | self 69 | } 70 | } 71 | 72 | impl AsMut for MutexRef<'_, T, R> { 73 | fn as_mut(&mut self) -> &mut T { 74 | self 75 | } 76 | } 77 | 78 | impl<'a, T: ?Sized, R: RawMutex> MutexRef<'a, T, R> { 79 | /// Creates a reference to the underlying data of a mutex without 80 | /// attempting to lock it or take ownership of the key. But it's also quite 81 | /// dangerous to drop. 82 | pub(crate) const unsafe fn new(mutex: &'a Mutex) -> Self { 83 | Self(mutex, PhantomData) 84 | } 85 | } 86 | 87 | // it's kinda annoying to re-implement some of this stuff on guards 88 | // there's nothing i can do about that 89 | 90 | #[mutants::skip] // hashing involves RNG and is hard to test 91 | #[cfg(not(tarpaulin_include))] 92 | impl Hash for MutexGuard<'_, T, R> { 93 | fn hash(&self, state: &mut H) { 94 | self.deref().hash(state) 95 | } 96 | } 97 | 98 | #[mutants::skip] 99 | #[cfg(not(tarpaulin_include))] 100 | impl Debug for MutexGuard<'_, T, R> { 101 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 102 | Debug::fmt(&**self, f) 103 | } 104 | } 105 | 106 | impl Display for MutexGuard<'_, T, R> { 107 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 108 | Display::fmt(&**self, f) 109 | } 110 | } 111 | 112 | impl Deref for MutexGuard<'_, T, R> { 113 | type Target = T; 114 | 115 | fn deref(&self) -> &Self::Target { 116 | &self.mutex 117 | } 118 | } 119 | 120 | impl DerefMut for MutexGuard<'_, T, R> { 121 | fn deref_mut(&mut self) -> &mut Self::Target { 122 | &mut self.mutex 123 | } 124 | } 125 | 126 | impl AsRef for MutexGuard<'_, T, R> { 127 | fn as_ref(&self) -> &T { 128 | self 129 | } 130 | } 131 | 132 | impl AsMut for MutexGuard<'_, T, R> { 133 | fn as_mut(&mut self) -> &mut T { 134 | self 135 | } 136 | } 137 | 138 | impl<'a, T: ?Sized, R: RawMutex> MutexGuard<'a, T, R> { 139 | /// Create a guard to the given mutex. Undefined if multiple guards to the 140 | /// same mutex exist at once. 141 | #[must_use] 142 | pub(super) const unsafe fn new(mutex: &'a Mutex, thread_key: ThreadKey) -> Self { 143 | Self { 144 | mutex: MutexRef(mutex, PhantomData), 145 | thread_key, 146 | } 147 | } 148 | } 149 | 150 | unsafe impl Sync for MutexRef<'_, T, R> {} 151 | -------------------------------------------------------------------------------- /src/rwlock/write_guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Debug, Display}; 2 | use std::hash::Hash; 3 | use std::marker::PhantomData; 4 | use std::ops::{Deref, DerefMut}; 5 | 6 | use lock_api::RawRwLock; 7 | 8 | use crate::lockable::RawLock; 9 | use crate::ThreadKey; 10 | 11 | use super::{RwLock, RwLockWriteGuard, RwLockWriteRef}; 12 | 13 | // These impls make things slightly easier because now you can use 14 | // `println!("{guard}")` instead of `println!("{}", *guard)` 15 | 16 | #[mutants::skip] // hashing involves PRNG and is difficult to test 17 | #[cfg(not(tarpaulin_include))] 18 | impl Hash for RwLockWriteRef<'_, T, R> { 19 | fn hash(&self, state: &mut H) { 20 | self.deref().hash(state) 21 | } 22 | } 23 | 24 | #[mutants::skip] 25 | #[cfg(not(tarpaulin_include))] 26 | impl Debug for RwLockWriteRef<'_, T, R> { 27 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | Debug::fmt(&**self, f) 29 | } 30 | } 31 | 32 | impl Display for RwLockWriteRef<'_, T, R> { 33 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 34 | Display::fmt(&**self, f) 35 | } 36 | } 37 | 38 | impl Deref for RwLockWriteRef<'_, T, R> { 39 | type Target = T; 40 | 41 | fn deref(&self) -> &Self::Target { 42 | // safety: this is the only type that can use `value`, and there's 43 | // a reference to this type, so there cannot be any mutable 44 | // references to this value. 45 | unsafe { &*self.0.data.get() } 46 | } 47 | } 48 | 49 | impl DerefMut for RwLockWriteRef<'_, T, R> { 50 | fn deref_mut(&mut self) -> &mut Self::Target { 51 | // safety: this is the only type that can use `value`, and we have a 52 | // mutable reference to this type, so there cannot be any other 53 | // references to this value. 54 | unsafe { &mut *self.0.data.get() } 55 | } 56 | } 57 | 58 | impl AsRef for RwLockWriteRef<'_, T, R> { 59 | fn as_ref(&self) -> &T { 60 | self 61 | } 62 | } 63 | 64 | impl AsMut for RwLockWriteRef<'_, T, R> { 65 | fn as_mut(&mut self) -> &mut T { 66 | self 67 | } 68 | } 69 | 70 | impl Drop for RwLockWriteRef<'_, T, R> { 71 | fn drop(&mut self) { 72 | // safety: this guard is being destroyed, so the data cannot be 73 | // accessed without locking again 74 | unsafe { self.0.raw_unlock_write() } 75 | } 76 | } 77 | 78 | impl<'a, T: ?Sized + 'a, R: RawRwLock> RwLockWriteRef<'a, T, R> { 79 | /// Creates a reference to the underlying data of an [`RwLock`] without 80 | /// locking or taking ownership of the key. 81 | #[must_use] 82 | pub(crate) const unsafe fn new(mutex: &'a RwLock) -> Self { 83 | Self(mutex, PhantomData) 84 | } 85 | } 86 | 87 | #[mutants::skip] // hashing involves PRNG and is difficult to test 88 | #[cfg(not(tarpaulin_include))] 89 | impl Hash for RwLockWriteGuard<'_, T, R> { 90 | fn hash(&self, state: &mut H) { 91 | self.deref().hash(state) 92 | } 93 | } 94 | 95 | #[mutants::skip] 96 | #[cfg(not(tarpaulin_include))] 97 | impl Debug for RwLockWriteGuard<'_, T, R> { 98 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 99 | Debug::fmt(&**self, f) 100 | } 101 | } 102 | 103 | impl Display for RwLockWriteGuard<'_, T, R> { 104 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 105 | Display::fmt(&**self, f) 106 | } 107 | } 108 | 109 | impl Deref for RwLockWriteGuard<'_, T, R> { 110 | type Target = T; 111 | 112 | fn deref(&self) -> &Self::Target { 113 | &self.rwlock 114 | } 115 | } 116 | 117 | impl DerefMut for RwLockWriteGuard<'_, T, R> { 118 | fn deref_mut(&mut self) -> &mut Self::Target { 119 | &mut self.rwlock 120 | } 121 | } 122 | 123 | impl AsRef for RwLockWriteGuard<'_, T, R> { 124 | fn as_ref(&self) -> &T { 125 | self 126 | } 127 | } 128 | 129 | impl AsMut for RwLockWriteGuard<'_, T, R> { 130 | fn as_mut(&mut self) -> &mut T { 131 | self 132 | } 133 | } 134 | 135 | impl<'a, T: ?Sized + 'a, R: RawRwLock> RwLockWriteGuard<'a, T, R> { 136 | /// Create a guard to the given mutex. Undefined if multiple guards to the 137 | /// same mutex exist at once. 138 | #[must_use] 139 | pub(super) const unsafe fn new(rwlock: &'a RwLock, thread_key: ThreadKey) -> Self { 140 | Self { 141 | rwlock: RwLockWriteRef(rwlock, PhantomData), 142 | thread_key, 143 | } 144 | } 145 | } 146 | 147 | unsafe impl Sync for RwLockWriteRef<'_, T, R> {} 148 | -------------------------------------------------------------------------------- /src/poisonable/error.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use std::error::Error; 3 | 4 | use super::{PoisonError, PoisonGuard, TryLockPoisonableError}; 5 | 6 | #[mutants::skip] 7 | #[cfg(not(tarpaulin_include))] 8 | impl fmt::Debug for PoisonError { 9 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 10 | f.debug_struct("PoisonError").finish_non_exhaustive() 11 | } 12 | } 13 | 14 | impl fmt::Display for PoisonError { 15 | #[cfg_attr(test, mutants::skip)] 16 | #[cfg(not(tarpaulin_include))] 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | "poisoned lock: another task failed inside".fmt(f) 19 | } 20 | } 21 | 22 | impl AsRef for PoisonError { 23 | fn as_ref(&self) -> &Guard { 24 | self.get_ref() 25 | } 26 | } 27 | 28 | impl AsMut for PoisonError { 29 | fn as_mut(&mut self) -> &mut Guard { 30 | self.get_mut() 31 | } 32 | } 33 | 34 | impl Error for PoisonError {} 35 | 36 | impl PoisonError { 37 | /// Creates a `PoisonError` 38 | /// 39 | /// This is generally created by methods like [`Poisonable::lock`]. 40 | /// 41 | /// [`Poisonable::lock`]: `crate::poisonable::Poisonable::lock` 42 | #[must_use] 43 | pub const fn new(guard: Guard) -> Self { 44 | Self { guard } 45 | } 46 | 47 | /// Consumes the error indicating that a lock is poisonmed, returning the 48 | /// underlying guard to allow access regardless. 49 | /// 50 | /// # Examples 51 | /// 52 | /// ``` 53 | /// use std::collections::HashSet; 54 | /// use std::thread; 55 | /// 56 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 57 | /// 58 | /// let mutex = Poisonable::new(Mutex::new(HashSet::new())); 59 | /// 60 | /// // poison the mutex 61 | /// thread::scope(|s| { 62 | /// let r = s.spawn(|| { 63 | /// let key = ThreadKey::get().unwrap(); 64 | /// let mut data = mutex.lock(key).unwrap(); 65 | /// data.insert(10); 66 | /// panic!(); 67 | /// }).join(); 68 | /// }); 69 | /// 70 | /// let key = ThreadKey::get().unwrap(); 71 | /// let p_err = mutex.lock(key).unwrap_err(); 72 | /// let data = p_err.into_inner(); 73 | /// println!("recovered {} items", data.len()); 74 | /// ``` 75 | #[must_use] 76 | pub fn into_inner(self) -> Guard { 77 | self.guard 78 | } 79 | 80 | /// Reaches into this error indicating that a lock is poisoned, returning a 81 | /// reference to the underlying guard to allow access regardless. 82 | /// 83 | /// # Examples 84 | /// 85 | /// ``` 86 | /// use std::collections::HashSet; 87 | /// use std::thread; 88 | /// 89 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 90 | /// use happylock::poisonable::PoisonGuard; 91 | /// 92 | /// let mutex = Poisonable::new(Mutex::new(HashSet::new())); 93 | /// 94 | /// // poison the mutex 95 | /// thread::scope(|s| { 96 | /// let r = s.spawn(|| { 97 | /// let key = ThreadKey::get().unwrap(); 98 | /// let mut data = mutex.lock(key).unwrap(); 99 | /// data.insert(10); 100 | /// panic!(); 101 | /// }).join(); 102 | /// }); 103 | /// 104 | /// let key = ThreadKey::get().unwrap(); 105 | /// let p_err = mutex.lock(key).unwrap_err(); 106 | /// let data: &PoisonGuard<_> = p_err.get_ref(); 107 | /// println!("recovered {} items", data.len()); 108 | /// ``` 109 | #[must_use] 110 | pub const fn get_ref(&self) -> &Guard { 111 | &self.guard 112 | } 113 | 114 | /// Reaches into this error indicating that a lock is poisoned, returning a 115 | /// mutable reference to the underlying guard to allow access regardless. 116 | /// 117 | /// # Examples 118 | /// 119 | /// ``` 120 | /// use std::collections::HashSet; 121 | /// use std::thread; 122 | /// 123 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 124 | /// 125 | /// let mutex =Poisonable::new(Mutex::new(HashSet::new())); 126 | /// 127 | /// // poison the mutex 128 | /// thread::scope(|s| { 129 | /// let r = s.spawn(|| { 130 | /// let key = ThreadKey::get().unwrap(); 131 | /// let mut data = mutex.lock(key).unwrap(); 132 | /// data.insert(10); 133 | /// panic!(); 134 | /// }).join(); 135 | /// }); 136 | /// 137 | /// let key = ThreadKey::get().unwrap(); 138 | /// let mut p_err = mutex.lock(key).unwrap_err(); 139 | /// let data = p_err.get_mut(); 140 | /// data.insert(20); 141 | /// println!("recovered {} items", data.len()); 142 | /// ``` 143 | #[must_use] 144 | pub fn get_mut(&mut self) -> &mut Guard { 145 | &mut self.guard 146 | } 147 | } 148 | 149 | #[mutants::skip] 150 | #[cfg(not(tarpaulin_include))] 151 | impl fmt::Debug for TryLockPoisonableError<'_, G> { 152 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 153 | match *self { 154 | Self::Poisoned(..) => "Poisoned(..)".fmt(f), 155 | Self::WouldBlock(_) => "WouldBlock".fmt(f), 156 | } 157 | } 158 | } 159 | 160 | impl fmt::Display for TryLockPoisonableError<'_, G> { 161 | #[cfg_attr(test, mutants::skip)] 162 | #[cfg(not(tarpaulin_include))] 163 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 164 | match *self { 165 | Self::Poisoned(..) => "poisoned lock: another task failed inside", 166 | Self::WouldBlock(_) => "try_lock failed because the operation would block", 167 | } 168 | .fmt(f) 169 | } 170 | } 171 | 172 | impl Error for TryLockPoisonableError<'_, G> {} 173 | 174 | impl<'flag, G> From>> for TryLockPoisonableError<'flag, G> { 175 | fn from(value: PoisonError>) -> Self { 176 | Self::Poisoned(value) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/collection.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | 3 | use crate::{lockable::RawLock, ThreadKey}; 4 | 5 | mod boxed; 6 | mod guard; 7 | mod owned; 8 | mod r#ref; 9 | mod retry; 10 | pub(crate) mod utils; 11 | 12 | /// Locks a collection of locks, which cannot be shared immutably. 13 | /// 14 | /// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it 15 | /// can be safely locked without causing a deadlock. 16 | /// 17 | /// The data in this collection is guaranteed to not contain duplicates because 18 | /// `L` must always implement [`OwnedLockable`]. The underlying data may not be 19 | /// immutably referenced. Because of this, there is no need for sorting the 20 | /// locks in the collection, or checking for duplicates, because it can be 21 | /// guaranteed that until the underlying collection is mutated (which requires 22 | /// releasing all acquired locks in the collection to do), then the locks will 23 | /// stay in the same order and be locked in that order, preventing cyclic wait. 24 | /// 25 | /// [`Lockable`]: `crate::lockable::Lockable` 26 | /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` 27 | 28 | // this type caches the idea that no immutable references to the underlying 29 | // collection exist 30 | #[derive(Debug)] 31 | pub struct OwnedLockCollection { 32 | // TODO: rename to child 33 | data: L, 34 | } 35 | 36 | /// Locks a reference to a collection of locks, by sorting them by memory 37 | /// address. 38 | /// 39 | /// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it 40 | /// can be safely locked without causing a deadlock. 41 | /// 42 | /// Upon construction, it must be confirmed that the collection contains no 43 | /// duplicate locks. This can be done by either using [`OwnedLockable`] or by 44 | /// checking. Regardless of how this is done, the locks will be sorted by their 45 | /// memory address before locking them. The sorted order of the locks is cached 46 | /// within this collection. 47 | /// 48 | /// Unlike [`BoxedLockCollection`], this type does not allocate memory for the 49 | /// data, although it does allocate memory for the sorted list of lock 50 | /// references. This makes it slightly faster, but lifetimes must be handled. 51 | /// 52 | /// [`Lockable`]: `crate::lockable::Lockable` 53 | /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` 54 | // 55 | // This type was born when I eventually realized that I needed a self 56 | // referential structure. That used boxing, so I elected to make a more 57 | // efficient implementation (polonius please save us) 58 | // 59 | // This type caches the sorting order of the locks and the fact that it doesn't 60 | // contain any duplicates. 61 | pub struct RefLockCollection<'a, L> { 62 | data: &'a L, 63 | locks: Vec<&'a dyn RawLock>, 64 | } 65 | 66 | /// Locks a collection of locks, stored in the heap, by sorting them by memory 67 | /// address. 68 | /// 69 | /// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it 70 | /// can be safely locked without causing a deadlock. 71 | /// 72 | /// Upon construction, it must be confirmed that the collection contains no 73 | /// duplicate locks. This can be done by either using [`OwnedLockable`] or by 74 | /// checking. Regardless of how this is done, the locks will be sorted by their 75 | /// memory address before locking them. The sorted order of the locks is cached 76 | /// within this collection. 77 | /// 78 | /// Unlike [`RefLockCollection`], this is a self-referential type which boxes 79 | /// the data that is given to it. This means no lifetimes are necessary on the 80 | /// type itself, but it is slightly slower because of the memory allocation. 81 | /// 82 | /// [`Lockable`]: `crate::lockable::Lockable` 83 | /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` 84 | // 85 | // This type caches the sorting order of the locks and the fact that it doesn't 86 | // contain any duplicates. 87 | pub struct BoxedLockCollection { 88 | data: *const UnsafeCell, 89 | locks: Vec<&'static dyn RawLock>, 90 | } 91 | 92 | /// Locks a collection of locks using a retrying algorithm. 93 | /// 94 | /// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it 95 | /// can be safely locked without causing a deadlock. 96 | /// 97 | /// The data in this collection is guaranteed to not contain duplicates, but it 98 | /// also is not sorted. In some cases the lack of sorting can increase 99 | /// performance. However, in most cases, this collection will be slower. Cyclic 100 | /// wait is not guaranteed here, so the locking algorithm must release all its 101 | /// locks if one of the lock attempts blocks. This results in wasted time and 102 | /// potential [livelocking]. 103 | /// 104 | /// However, one case where this might be faster than [`RefLockCollection`] is 105 | /// when cyclic wait is ensured manually. This will prevent the need for 106 | /// subsequent unlocking and re-locking. 107 | /// 108 | /// [`Lockable`]: `crate::lockable::Lockable` 109 | /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` 110 | /// [livelocking]: https://en.wikipedia.org/wiki/Deadlock#Livelock 111 | // 112 | // This type caches the fact that there are no duplicates 113 | #[derive(Debug)] 114 | pub struct RetryingLockCollection { 115 | data: L, 116 | } 117 | 118 | /// A RAII guard for a generic [`Lockable`] type. When this structure is 119 | /// dropped (falls out of scope), the locks will be unlocked. 120 | /// 121 | /// The data protected by the mutex can be accessed through this guard via its 122 | /// [`Deref`] and [`DerefMut`] implementations. 123 | /// 124 | /// Several lock collections can be used to create this type. Specifically, 125 | /// [`BoxedLockCollection`], [`RefLockCollection`], [`OwnedLockCollection`], and 126 | /// [`RetryingLockCollection`]. It is created using the methods, `lock`, 127 | /// `try_lock`, `read`, and `try_read`. 128 | /// 129 | /// [`Deref`]: `std::ops::Deref` 130 | /// [`DerefMut`]: `std::ops::DerefMut` 131 | /// [`Lockable`]: `crate::lockable::Lockable` 132 | pub struct LockGuard { 133 | guard: Guard, 134 | key: ThreadKey, 135 | } 136 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Legal Code 2 | 3 | CC0 1.0 Universal 4 | 5 | CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE 6 | LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN 7 | ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS 8 | INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES 9 | REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS 10 | PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM 11 | THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED 12 | HEREUNDER. 13 | 14 | Statement of Purpose 15 | 16 | The laws of most jurisdictions throughout the world automatically confer 17 | exclusive Copyright and Related Rights (defined below) upon the creator 18 | and subsequent owner(s) (each and all, an "owner") of an original work of 19 | authorship and/or a database (each, a "Work"). 20 | 21 | Certain owners wish to permanently relinquish those rights to a Work for 22 | the purpose of contributing to a commons of creative, cultural and 23 | scientific works ("Commons") that the public can reliably and without fear 24 | of later claims of infringement build upon, modify, incorporate in other 25 | works, reuse and redistribute as freely as possible in any form whatsoever 26 | and for any purposes, including without limitation commercial purposes. 27 | These owners may contribute to the Commons to promote the ideal of a free 28 | culture and the further production of creative, cultural and scientific 29 | works, or to gain reputation or greater distribution for their Work in 30 | part through the use and efforts of others. 31 | 32 | For these and/or other purposes and motivations, and without any 33 | expectation of additional consideration or compensation, the person 34 | associating CC0 with a Work (the "Affirmer"), to the extent that he or she 35 | is an owner of Copyright and Related Rights in the Work, voluntarily 36 | elects to apply CC0 to the Work and publicly distribute the Work under its 37 | terms, with knowledge of his or her Copyright and Related Rights in the 38 | Work and the meaning and intended legal effect of CC0 on those rights. 39 | 40 | 1. Copyright and Related Rights. A Work made available under CC0 may be 41 | protected by copyright and related or neighboring rights ("Copyright and 42 | Related Rights"). Copyright and Related Rights include, but are not 43 | limited to, the following: 44 | 45 | i. the right to reproduce, adapt, distribute, perform, display, 46 | communicate, and translate a Work; 47 | ii. moral rights retained by the original author(s) and/or performer(s); 48 | iii. publicity and privacy rights pertaining to a person's image or 49 | likeness depicted in a Work; 50 | iv. rights protecting against unfair competition in regards to a Work, 51 | subject to the limitations in paragraph 4(a), below; 52 | v. rights protecting the extraction, dissemination, use and reuse of data 53 | in a Work; 54 | vi. database rights (such as those arising under Directive 96/9/EC of the 55 | European Parliament and of the Council of 11 March 1996 on the legal 56 | protection of databases, and under any national implementation 57 | thereof, including any amended or successor version of such 58 | directive); and 59 | vii. other similar, equivalent or corresponding rights throughout the 60 | world based on applicable law or treaty, and any national 61 | implementations thereof. 62 | 63 | 2. Waiver. To the greatest extent permitted by, but not in contravention 64 | of, applicable law, Affirmer hereby overtly, fully, permanently, 65 | irrevocably and unconditionally waives, abandons, and surrenders all of 66 | Affirmer's Copyright and Related Rights and associated claims and causes 67 | of action, whether now known or unknown (including existing as well as 68 | future claims and causes of action), in the Work (i) in all territories 69 | worldwide, (ii) for the maximum duration provided by applicable law or 70 | treaty (including future time extensions), (iii) in any current or future 71 | medium and for any number of copies, and (iv) for any purpose whatsoever, 72 | including without limitation commercial, advertising or promotional 73 | purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each 74 | member of the public at large and to the detriment of Affirmer's heirs and 75 | successors, fully intending that such Waiver shall not be subject to 76 | revocation, rescission, cancellation, termination, or any other legal or 77 | equitable action to disrupt the quiet enjoyment of the Work by the public 78 | as contemplated by Affirmer's express Statement of Purpose. 79 | 80 | 3. Public License Fallback. Should any part of the Waiver for any reason 81 | be judged legally invalid or ineffective under applicable law, then the 82 | Waiver shall be preserved to the maximum extent permitted taking into 83 | account Affirmer's express Statement of Purpose. In addition, to the 84 | extent the Waiver is so judged Affirmer hereby grants to each affected 85 | person a royalty-free, non transferable, non sublicensable, non exclusive, 86 | irrevocable and unconditional license to exercise Affirmer's Copyright and 87 | Related Rights in the Work (i) in all territories worldwide, (ii) for the 88 | maximum duration provided by applicable law or treaty (including future 89 | time extensions), (iii) in any current or future medium and for any number 90 | of copies, and (iv) for any purpose whatsoever, including without 91 | limitation commercial, advertising or promotional purposes (the 92 | "License"). The License shall be deemed effective as of the date CC0 was 93 | applied by Affirmer to the Work. Should any part of the License for any 94 | reason be judged legally invalid or ineffective under applicable law, such 95 | partial invalidity or ineffectiveness shall not invalidate the remainder 96 | of the License, and in such case Affirmer hereby affirms that he or she 97 | will not (i) exercise any of his or her remaining Copyright and Related 98 | Rights in the Work or (ii) assert any associated claims and causes of 99 | action with respect to the Work, in either case contrary to Affirmer's 100 | express Statement of Purpose. 101 | 102 | 4. Limitations and Disclaimers. 103 | 104 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 105 | surrendered, licensed or otherwise affected by this document. 106 | b. Affirmer offers the Work as-is and makes no representations or 107 | warranties of any kind concerning the Work, express, implied, 108 | statutory or otherwise, including without limitation warranties of 109 | title, merchantability, fitness for a particular purpose, non 110 | infringement, or the absence of latent or other defects, accuracy, or 111 | the present or absence of errors, whether or not discoverable, all to 112 | the greatest extent permissible under applicable law. 113 | c. Affirmer disclaims responsibility for clearing rights of other persons 114 | that may apply to the Work or any use thereof, including without 115 | limitation any person's Copyright and Related Rights in the Work. 116 | Further, Affirmer disclaims responsibility for obtaining any necessary 117 | consents, permissions or other rights required for any use of the 118 | Work. 119 | d. Affirmer understands and acknowledges that Creative Commons is not a 120 | party to this document and has no duty or obligation with respect to 121 | this CC0 or use of the Work. 122 | -------------------------------------------------------------------------------- /src/collection/utils.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | 3 | use crate::handle_unwind::handle_unwind; 4 | use crate::lockable::{Lockable, RawLock, Sharable}; 5 | use crate::Keyable; 6 | 7 | /// Returns a list of locks in the given collection and sorts them by their 8 | /// memory address 9 | #[must_use] 10 | pub fn get_locks(data: &L) -> Vec<&dyn RawLock> { 11 | let mut locks = get_locks_unsorted(data); 12 | locks.sort_by_key(|lock| &raw const **lock); 13 | locks 14 | } 15 | 16 | /// Returns a list of locks from the data. Unlike the above function, this does 17 | /// not do any sorting of the locks. 18 | #[must_use] 19 | pub fn get_locks_unsorted(data: &L) -> Vec<&dyn RawLock> { 20 | let mut locks = Vec::new(); 21 | data.get_ptrs(&mut locks); 22 | locks 23 | } 24 | 25 | /// returns `true` if the sorted list contains a duplicate 26 | #[must_use] 27 | pub fn ordered_contains_duplicates(l: &[&dyn RawLock]) -> bool { 28 | if l.is_empty() { 29 | // Return early to prevent panic in the below call to `windows` 30 | return false; 31 | } 32 | 33 | l.windows(2) 34 | // NOTE: addr_eq is necessary because eq would also compare the v-table pointers 35 | .any(|window| std::ptr::addr_eq(window[0], window[1])) 36 | } 37 | 38 | /// Lock a set of locks in the given order. It's UB to call this without a `ThreadKey` 39 | pub unsafe fn ordered_write(locks: &[&dyn RawLock]) { 40 | // these will be unlocked in case of a panic 41 | let locked = Cell::new(0); 42 | 43 | handle_unwind( 44 | || { 45 | for lock in locks { 46 | lock.raw_write(); 47 | locked.set(locked.get() + 1); 48 | } 49 | }, 50 | || attempt_to_recover_writes_from_panic(&locks[0..locked.get()]), 51 | ) 52 | } 53 | 54 | /// Lock a set of locks in the given order. It's UB to call this without a `ThreadKey` 55 | pub unsafe fn ordered_read(locks: &[&dyn RawLock]) { 56 | let locked = Cell::new(0); 57 | 58 | handle_unwind( 59 | || { 60 | for lock in locks { 61 | lock.raw_read(); 62 | locked.set(locked.get() + 1); 63 | } 64 | }, 65 | || attempt_to_recover_reads_from_panic(&locks[0..locked.get()]), 66 | ) 67 | } 68 | 69 | /// Locks the locks in the order they are given. This causes deadlock if the 70 | /// locks contain duplicates, or if this is called by multiple threads with the 71 | /// locks in different orders. 72 | pub unsafe fn ordered_try_write(locks: &[&dyn RawLock]) -> bool { 73 | let locked = Cell::new(0); 74 | 75 | handle_unwind( 76 | || unsafe { 77 | for (i, lock) in locks.iter().enumerate() { 78 | // safety: we have the thread key 79 | if lock.raw_try_write() { 80 | locked.set(locked.get() + 1); 81 | } else { 82 | for lock in &locks[0..i] { 83 | // safety: this lock was already acquired 84 | lock.raw_unlock_write(); 85 | } 86 | return false; 87 | } 88 | } 89 | 90 | true 91 | }, 92 | || 93 | // safety: everything in locked is locked 94 | attempt_to_recover_writes_from_panic(&locks[0..locked.get()]), 95 | ) 96 | } 97 | 98 | /// Locks the locks in the order they are given. This causes deadlock if this 99 | /// is called by multiple threads with the locks in different orders. 100 | pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { 101 | // these will be unlocked in case of a panic 102 | let locked = Cell::new(0); 103 | 104 | handle_unwind( 105 | || unsafe { 106 | for (i, lock) in locks.iter().enumerate() { 107 | // safety: we have the thread key 108 | if lock.raw_try_read() { 109 | locked.set(locked.get() + 1); 110 | } else { 111 | for lock in &locks[0..i] { 112 | // safety: this lock was already acquired 113 | lock.raw_unlock_read(); 114 | } 115 | return false; 116 | } 117 | } 118 | 119 | true 120 | }, 121 | || 122 | // safety: everything in locked is locked 123 | attempt_to_recover_reads_from_panic(&locks[0..locked.get()]), 124 | ) 125 | } 126 | 127 | pub fn scoped_write<'a, L: RawLock + Lockable + ?Sized, R>( 128 | collection: &'a L, 129 | key: impl Keyable, 130 | f: impl FnOnce(L::DataMut<'a>) -> R, 131 | ) -> R { 132 | unsafe { 133 | // safety: we have the key 134 | collection.raw_write(); 135 | 136 | // safety: we just locked this 137 | let r = handle_unwind( 138 | || f(collection.data_mut()), 139 | || collection.raw_unlock_write(), 140 | ); 141 | 142 | // this ensures the key is held long enough 143 | drop(key); 144 | 145 | // safety: we've locked already, and aren't using the data again 146 | collection.raw_unlock_write(); 147 | 148 | r 149 | } 150 | } 151 | 152 | pub fn scoped_try_write<'a, L: RawLock + Lockable + ?Sized, Key: Keyable, R>( 153 | collection: &'a L, 154 | key: Key, 155 | f: impl FnOnce(L::DataMut<'a>) -> R, 156 | ) -> Result { 157 | unsafe { 158 | // safety: we have the key 159 | if !collection.raw_try_write() { 160 | return Err(key); 161 | } 162 | 163 | // safety: we just locked this 164 | let r = handle_unwind( 165 | || f(collection.data_mut()), 166 | || collection.raw_unlock_write(), 167 | ); 168 | 169 | // this ensures the key is held long enough 170 | drop(key); 171 | 172 | // safety: we've locked already, and aren't using the data again 173 | collection.raw_unlock_write(); 174 | 175 | Ok(r) 176 | } 177 | } 178 | 179 | pub fn scoped_read<'a, L: RawLock + Sharable + ?Sized, R>( 180 | collection: &'a L, 181 | key: impl Keyable, 182 | f: impl FnOnce(L::DataRef<'a>) -> R, 183 | ) -> R { 184 | unsafe { 185 | // safety: we have the key 186 | collection.raw_read(); 187 | 188 | // safety: we just locked this 189 | let r = handle_unwind(|| f(collection.data_ref()), || collection.raw_unlock_read()); 190 | 191 | // this ensures the key is held long enough 192 | drop(key); 193 | 194 | // safety: we've locked already, and aren't using the data again 195 | collection.raw_unlock_read(); 196 | 197 | r 198 | } 199 | } 200 | 201 | pub fn scoped_try_read<'a, L: RawLock + Sharable + ?Sized, Key: Keyable, R>( 202 | collection: &'a L, 203 | key: Key, 204 | f: impl FnOnce(L::DataRef<'a>) -> R, 205 | ) -> Result { 206 | unsafe { 207 | // safety: we have the key 208 | if !collection.raw_try_read() { 209 | return Err(key); 210 | } 211 | 212 | // safety: we just locked this 213 | let r = handle_unwind(|| f(collection.data_ref()), || collection.raw_unlock_read()); 214 | 215 | // this ensures the key is held long enough 216 | drop(key); 217 | 218 | // safety: we've locked already, and aren't using the data again 219 | collection.raw_unlock_read(); 220 | 221 | Ok(r) 222 | } 223 | } 224 | 225 | /// Unlocks the already locked locks in order to recover from a panic 226 | pub unsafe fn attempt_to_recover_writes_from_panic(locks: &[&dyn RawLock]) { 227 | handle_unwind( 228 | || { 229 | // safety: the caller assumes that these are already locked 230 | locks.iter().for_each(|lock| lock.raw_unlock_write()); 231 | }, 232 | // if we get another panic in here, we'll just have to poison what remains 233 | || locks.iter().for_each(|l| l.poison()), 234 | ) 235 | } 236 | 237 | /// Unlocks the already locked locks in order to recover from a panic 238 | pub unsafe fn attempt_to_recover_reads_from_panic(locked: &[&dyn RawLock]) { 239 | handle_unwind( 240 | || { 241 | // safety: the caller assumes these are already locked 242 | locked.iter().for_each(|lock| lock.raw_unlock_read()); 243 | }, 244 | // if we get another panic in here, we'll just have to poison what remains 245 | || locked.iter().for_each(|l| l.poison()), 246 | ) 247 | } 248 | 249 | #[cfg(test)] 250 | mod tests { 251 | use crate::collection::utils::ordered_contains_duplicates; 252 | 253 | #[test] 254 | fn empty_array_does_not_contain_duplicates() { 255 | assert!(!ordered_contains_duplicates(&[])) 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(clippy::pedantic)] 2 | #![warn(clippy::nursery)] 3 | #![allow(clippy::module_name_repetitions)] 4 | #![allow(clippy::declare_interior_mutable_const)] 5 | #![allow(clippy::semicolon_if_nothing_returned)] 6 | #![allow(clippy::module_inception)] 7 | #![allow(clippy::single_match_else)] 8 | 9 | //! As it turns out, the Rust borrow checker is powerful enough that, if the 10 | //! standard library supported it, we could've made deadlocks undefined 11 | //! behavior. This library currently serves as a proof of concept for how that 12 | //! would work. 13 | //! 14 | //! # Theory 15 | //! 16 | //! There are four conditions necessary for a deadlock to occur. In order to 17 | //! prevent deadlocks, we just need to prevent one of the following: 18 | //! 19 | //! 1. mutual exclusion 20 | //! 2. non-preemptive allocation 21 | //! 3. circular wait 22 | //! 4. **partial allocation** 23 | //! 24 | //! This library seeks to solve **partial allocation** by requiring total 25 | //! allocation. All the resources a thread needs must be allocated at the same 26 | //! time. In order to request new resources, the old resources must be dropped 27 | //! first. Requesting multiple resources at once is atomic. You either get all 28 | //! the requested resources or none at all. 29 | //! 30 | //! As an optimization, this library also often prevents **circular wait**. 31 | //! Many collections sort the locks in order of their memory address. As long 32 | //! as the locks are always acquired in that order, then time doesn't need to 33 | //! be wasted on releasing locks after a failure and re-acquiring them later. 34 | //! 35 | //! # Examples 36 | //! 37 | //! Simple example: 38 | //! ``` 39 | //! use std::thread; 40 | //! use happylock::{Mutex, ThreadKey}; 41 | //! 42 | //! const N: usize = 10; 43 | //! 44 | //! static DATA: Mutex = Mutex::new(0); 45 | //! 46 | //! for _ in 0..N { 47 | //! thread::spawn(move || { 48 | //! // each thread gets one thread key 49 | //! let key = ThreadKey::get().unwrap(); 50 | //! 51 | //! // unlocking a mutex requires a ThreadKey 52 | //! let mut data = DATA.lock(key); 53 | //! *data += 1; 54 | //! 55 | //! // the key is unlocked at the end of the scope 56 | //! }); 57 | //! } 58 | //! 59 | //! let key = ThreadKey::get().unwrap(); 60 | //! let data = DATA.lock(key); 61 | //! println!("{}", *data); 62 | //! ``` 63 | //! 64 | //! To lock multiple mutexes at a time, create a [`LockCollection`]: 65 | //! 66 | //! ``` 67 | //! use std::thread; 68 | //! use happylock::{LockCollection, Mutex, ThreadKey}; 69 | //! 70 | //! const N: usize = 10; 71 | //! 72 | //! static DATA_1: Mutex = Mutex::new(0); 73 | //! static DATA_2: Mutex = Mutex::new(String::new()); 74 | //! 75 | //! for _ in 0..N { 76 | //! thread::spawn(move || { 77 | //! let key = ThreadKey::get().unwrap(); 78 | //! 79 | //! // happylock ensures at runtime there are no duplicate locks 80 | //! let collection = LockCollection::try_new((&DATA_1, &DATA_2)).unwrap(); 81 | //! let mut guard = collection.lock(key); 82 | //! 83 | //! *guard.1 = (100 - *guard.0).to_string(); 84 | //! *guard.0 += 1; 85 | //! }); 86 | //! } 87 | //! 88 | //! let key = ThreadKey::get().unwrap(); 89 | //! let data = LockCollection::try_new((&DATA_1, &DATA_2)).unwrap(); 90 | //! let data = data.lock(key); 91 | //! println!("{}", *data.0); 92 | //! println!("{}", *data.1); 93 | //! ``` 94 | //! 95 | //! In many cases, the [`LockCollection::new`] or [`LockCollection::new_ref`] 96 | //! method can be used, improving performance. 97 | //! 98 | //! ```rust 99 | //! use std::thread; 100 | //! use happylock::{LockCollection, Mutex, ThreadKey}; 101 | //! 102 | //! const N: usize = 32; 103 | //! 104 | //! static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; 105 | //! 106 | //! for _ in 0..N { 107 | //! thread::spawn(move || { 108 | //! let key = ThreadKey::get().unwrap(); 109 | //! 110 | //! // a reference to a type that implements `OwnedLockable` will never 111 | //! // contain duplicates, so no duplicate checking is needed. 112 | //! let collection = LockCollection::new_ref(&DATA); 113 | //! let mut guard = collection.lock(key); 114 | //! 115 | //! let x = *guard[1]; 116 | //! *guard[1] += *guard[0]; 117 | //! *guard[0] = x; 118 | //! }); 119 | //! } 120 | //! 121 | //! let key = ThreadKey::get().unwrap(); 122 | //! let data = LockCollection::new_ref(&DATA); 123 | //! let data = data.lock(key); 124 | //! println!("{}", data[0]); 125 | //! println!("{}", data[1]); 126 | //! ``` 127 | //! 128 | //! # Performance 129 | //! 130 | //! **The `ThreadKey` is a mostly-zero cost abstraction.** It doesn't use any 131 | //! memory, and it doesn't really exist at run-time. The only cost comes from 132 | //! calling `ThreadKey::get()`, because the function has to ensure at runtime 133 | //! that the key hasn't already been taken. Dropping the key will also have a 134 | //! small cost. 135 | //! 136 | //! **Consider [`OwnedLockCollection`].** This will almost always be the 137 | //! fastest lock collection. It doesn't expose the underlying collection 138 | //! immutably, which means that it will always be locked in the same order, and 139 | //! doesn't need any sorting. 140 | //! 141 | //! **Avoid [`LockCollection::try_new`].** This constructor will check to make 142 | //! sure that the collection contains no duplicate locks. In most cases, this 143 | //! is O(nlogn), where n is the number of locks in the collections but in the 144 | //! case of [`RetryingLockCollection`], it's close to O(n). 145 | //! [`LockCollection::new`] and [`LockCollection::new_ref`] don't need these 146 | //! checks because they use [`OwnedLockable`], which is guaranteed to be unique 147 | //! as long as it is accessible. As a last resort, 148 | //! [`LockCollection::new_unchecked`] doesn't do this check, but is unsafe to 149 | //! call. 150 | //! 151 | //! **Know how to use [`RetryingLockCollection`].** This collection doesn't do 152 | //! any sorting, but uses a wasteful lock algorithm. It can't rely on the order 153 | //! of the locks to be the same across threads, so if it finds a lock that it 154 | //! can't acquire without blocking, it'll first release all of the locks it 155 | //! already acquired to avoid blocking other threads. This is wasteful because 156 | //! this algorithm may end up re-acquiring the same lock multiple times. To 157 | //! avoid this, ensure that (1) the first lock in the collection is always the 158 | //! first lock in any collection it appears in, and (2) the other locks in the 159 | //! collection are always preceded by that first lock. This will prevent any 160 | //! wasted time from re-acquiring locks. If you're unsure, [`LockCollection`] 161 | //! is a sensible default. 162 | //! 163 | //! [`OwnedLockable`]: `lockable::OwnedLockable` 164 | //! [`OwnedLockCollection`]: `collection::OwnedLockCollection` 165 | //! [`RetryingLockCollection`]: `collection::RetryingLockCollection` 166 | 167 | mod handle_unwind; 168 | mod key; 169 | 170 | pub mod collection; 171 | pub mod lockable; 172 | pub mod mutex; 173 | pub mod poisonable; 174 | pub mod rwlock; 175 | 176 | pub use key::{Keyable, ThreadKey}; 177 | 178 | #[cfg(feature = "spin")] 179 | pub use mutex::SpinLock; 180 | 181 | // Personally, I think re-exports look ugly in the rust documentation, so I 182 | // went with type aliases instead. 183 | 184 | /// A collection of locks that can be acquired simultaneously. 185 | /// 186 | /// This re-exports [`BoxedLockCollection`] as a sensible default. 187 | /// 188 | /// [`BoxedLockCollection`]: collection::BoxedLockCollection 189 | pub type LockCollection = collection::BoxedLockCollection; 190 | 191 | /// A re-export for [`poisonable::Poisonable`] 192 | pub type Poisonable = poisonable::Poisonable; 193 | 194 | /// A mutual exclusion primitive useful for protecting shared data, which cannot deadlock. 195 | /// 196 | /// By default, this uses `parking_lot` as a backend. 197 | #[cfg(feature = "parking_lot")] 198 | pub type Mutex = mutex::Mutex; 199 | 200 | /// A reader-writer lock 201 | /// 202 | /// By default, this uses `parking_lot` as a backend. 203 | #[cfg(feature = "parking_lot")] 204 | pub type RwLock = rwlock::RwLock; 205 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug unit tests in library 'happylock'", 11 | "cargo": { 12 | "args": [ 13 | "test", 14 | "--no-run", 15 | "--lib", 16 | "--package=happylock" 17 | ], 18 | "filter": { 19 | "name": "happylock", 20 | "kind": "lib" 21 | } 22 | }, 23 | "args": [], 24 | "cwd": "${workspaceFolder}" 25 | }, 26 | { 27 | "type": "lldb", 28 | "request": "launch", 29 | "name": "Debug example 'basic'", 30 | "cargo": { 31 | "args": [ 32 | "build", 33 | "--example=basic", 34 | "--package=happylock" 35 | ], 36 | "filter": { 37 | "name": "basic", 38 | "kind": "example" 39 | } 40 | }, 41 | "args": [], 42 | "cwd": "${workspaceFolder}" 43 | }, 44 | { 45 | "type": "lldb", 46 | "request": "launch", 47 | "name": "Debug unit tests in example 'basic'", 48 | "cargo": { 49 | "args": [ 50 | "test", 51 | "--no-run", 52 | "--example=basic", 53 | "--package=happylock" 54 | ], 55 | "filter": { 56 | "name": "basic", 57 | "kind": "example" 58 | } 59 | }, 60 | "args": [], 61 | "cwd": "${workspaceFolder}" 62 | }, 63 | { 64 | "type": "lldb", 65 | "request": "launch", 66 | "name": "Debug example 'dining_philosophers'", 67 | "cargo": { 68 | "args": [ 69 | "build", 70 | "--example=dining_philosophers", 71 | "--package=happylock" 72 | ], 73 | "filter": { 74 | "name": "dining_philosophers", 75 | "kind": "example" 76 | } 77 | }, 78 | "args": [], 79 | "cwd": "${workspaceFolder}" 80 | }, 81 | { 82 | "type": "lldb", 83 | "request": "launch", 84 | "name": "Debug unit tests in example 'dining_philosophers'", 85 | "cargo": { 86 | "args": [ 87 | "test", 88 | "--no-run", 89 | "--example=dining_philosophers", 90 | "--package=happylock" 91 | ], 92 | "filter": { 93 | "name": "dining_philosophers", 94 | "kind": "example" 95 | } 96 | }, 97 | "args": [], 98 | "cwd": "${workspaceFolder}" 99 | }, 100 | { 101 | "type": "lldb", 102 | "request": "launch", 103 | "name": "Debug example 'dining_philosophers_retry'", 104 | "cargo": { 105 | "args": [ 106 | "build", 107 | "--example=dining_philosophers_retry", 108 | "--package=happylock" 109 | ], 110 | "filter": { 111 | "name": "dining_philosophers_retry", 112 | "kind": "example" 113 | } 114 | }, 115 | "args": [], 116 | "cwd": "${workspaceFolder}" 117 | }, 118 | { 119 | "type": "lldb", 120 | "request": "launch", 121 | "name": "Debug unit tests in example 'dining_philosophers_retry'", 122 | "cargo": { 123 | "args": [ 124 | "test", 125 | "--no-run", 126 | "--example=dining_philosophers_retry", 127 | "--package=happylock" 128 | ], 129 | "filter": { 130 | "name": "dining_philosophers_retry", 131 | "kind": "example" 132 | } 133 | }, 134 | "args": [], 135 | "cwd": "${workspaceFolder}" 136 | }, 137 | { 138 | "type": "lldb", 139 | "request": "launch", 140 | "name": "Debug example 'double_mutex'", 141 | "cargo": { 142 | "args": [ 143 | "build", 144 | "--example=double_mutex", 145 | "--package=happylock" 146 | ], 147 | "filter": { 148 | "name": "double_mutex", 149 | "kind": "example" 150 | } 151 | }, 152 | "args": [], 153 | "cwd": "${workspaceFolder}" 154 | }, 155 | { 156 | "type": "lldb", 157 | "request": "launch", 158 | "name": "Debug unit tests in example 'double_mutex'", 159 | "cargo": { 160 | "args": [ 161 | "test", 162 | "--no-run", 163 | "--example=double_mutex", 164 | "--package=happylock" 165 | ], 166 | "filter": { 167 | "name": "double_mutex", 168 | "kind": "example" 169 | } 170 | }, 171 | "args": [], 172 | "cwd": "${workspaceFolder}" 173 | }, 174 | { 175 | "type": "lldb", 176 | "request": "launch", 177 | "name": "Debug example 'fibonacci'", 178 | "cargo": { 179 | "args": [ 180 | "build", 181 | "--example=fibonacci", 182 | "--package=happylock" 183 | ], 184 | "filter": { 185 | "name": "fibonacci", 186 | "kind": "example" 187 | } 188 | }, 189 | "args": [], 190 | "cwd": "${workspaceFolder}" 191 | }, 192 | { 193 | "type": "lldb", 194 | "request": "launch", 195 | "name": "Debug unit tests in example 'fibonacci'", 196 | "cargo": { 197 | "args": [ 198 | "test", 199 | "--no-run", 200 | "--example=fibonacci", 201 | "--package=happylock" 202 | ], 203 | "filter": { 204 | "name": "fibonacci", 205 | "kind": "example" 206 | } 207 | }, 208 | "args": [], 209 | "cwd": "${workspaceFolder}" 210 | }, 211 | { 212 | "type": "lldb", 213 | "request": "launch", 214 | "name": "Debug example 'list'", 215 | "cargo": { 216 | "args": [ 217 | "build", 218 | "--example=list", 219 | "--package=happylock" 220 | ], 221 | "filter": { 222 | "name": "list", 223 | "kind": "example" 224 | } 225 | }, 226 | "args": [], 227 | "cwd": "${workspaceFolder}" 228 | }, 229 | { 230 | "type": "lldb", 231 | "request": "launch", 232 | "name": "Debug unit tests in example 'list'", 233 | "cargo": { 234 | "args": [ 235 | "test", 236 | "--no-run", 237 | "--example=list", 238 | "--package=happylock" 239 | ], 240 | "filter": { 241 | "name": "list", 242 | "kind": "example" 243 | } 244 | }, 245 | "args": [], 246 | "cwd": "${workspaceFolder}" 247 | }, 248 | { 249 | "type": "lldb", 250 | "request": "launch", 251 | "name": "Debug integration test 'evil_mutex'", 252 | "cargo": { 253 | "args": [ 254 | "test", 255 | "--no-run", 256 | "--test=evil_mutex", 257 | "--package=happylock" 258 | ], 259 | "filter": { 260 | "name": "evil_mutex", 261 | "kind": "test" 262 | } 263 | }, 264 | "args": [], 265 | "cwd": "${workspaceFolder}" 266 | }, 267 | { 268 | "type": "lldb", 269 | "request": "launch", 270 | "name": "Debug integration test 'evil_rwlock'", 271 | "cargo": { 272 | "args": [ 273 | "test", 274 | "--no-run", 275 | "--test=evil_rwlock", 276 | "--package=happylock" 277 | ], 278 | "filter": { 279 | "name": "evil_rwlock", 280 | "kind": "test" 281 | } 282 | }, 283 | "args": [], 284 | "cwd": "${workspaceFolder}" 285 | }, 286 | { 287 | "type": "lldb", 288 | "request": "launch", 289 | "name": "Debug integration test 'evil_try_mutex'", 290 | "cargo": { 291 | "args": [ 292 | "test", 293 | "--no-run", 294 | "--test=evil_try_mutex", 295 | "--package=happylock" 296 | ], 297 | "filter": { 298 | "name": "evil_try_mutex", 299 | "kind": "test" 300 | } 301 | }, 302 | "args": [], 303 | "cwd": "${workspaceFolder}" 304 | }, 305 | { 306 | "type": "lldb", 307 | "request": "launch", 308 | "name": "Debug integration test 'evil_try_rwlock'", 309 | "cargo": { 310 | "args": [ 311 | "test", 312 | "--no-run", 313 | "--test=evil_try_rwlock", 314 | "--package=happylock" 315 | ], 316 | "filter": { 317 | "name": "evil_try_rwlock", 318 | "kind": "test" 319 | } 320 | }, 321 | "args": [], 322 | "cwd": "${workspaceFolder}" 323 | }, 324 | { 325 | "type": "lldb", 326 | "request": "launch", 327 | "name": "Debug integration test 'evil_unlock_mutex'", 328 | "cargo": { 329 | "args": [ 330 | "test", 331 | "--no-run", 332 | "--test=evil_unlock_mutex", 333 | "--package=happylock" 334 | ], 335 | "filter": { 336 | "name": "evil_unlock_mutex", 337 | "kind": "test" 338 | } 339 | }, 340 | "args": [], 341 | "cwd": "${workspaceFolder}" 342 | }, 343 | { 344 | "type": "lldb", 345 | "request": "launch", 346 | "name": "Debug integration test 'evil_unlock_rwlock'", 347 | "cargo": { 348 | "args": [ 349 | "test", 350 | "--no-run", 351 | "--test=evil_unlock_rwlock", 352 | "--package=happylock" 353 | ], 354 | "filter": { 355 | "name": "evil_unlock_rwlock", 356 | "kind": "test" 357 | } 358 | }, 359 | "args": [], 360 | "cwd": "${workspaceFolder}" 361 | }, 362 | { 363 | "type": "lldb", 364 | "request": "launch", 365 | "name": "Debug integration test 'retry'", 366 | "cargo": { 367 | "args": [ 368 | "test", 369 | "--no-run", 370 | "--test=retry", 371 | "--package=happylock" 372 | ], 373 | "filter": { 374 | "name": "retry", 375 | "kind": "test" 376 | } 377 | }, 378 | "args": [], 379 | "cwd": "${workspaceFolder}" 380 | } 381 | ] 382 | } -------------------------------------------------------------------------------- /src/mutex.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::marker::PhantomData; 3 | 4 | use lock_api::RawMutex; 5 | 6 | use crate::poisonable::PoisonFlag; 7 | use crate::ThreadKey; 8 | 9 | mod guard; 10 | mod mutex; 11 | 12 | /// A spinning mutex 13 | #[cfg(feature = "spin")] 14 | pub type SpinLock = Mutex>; 15 | 16 | /// A parking lot mutex 17 | #[cfg(feature = "parking_lot")] 18 | pub type ParkingMutex = Mutex; 19 | 20 | /// A mutual exclusion primitive useful for protecting shared data, which 21 | /// cannot deadlock. 22 | /// 23 | /// This mutex will block threads waiting for the lock to become available. The 24 | /// mutex can be created via a `new` constructor. Each mutex has a type 25 | /// parameter which represents the data that it is protecting. The data can 26 | /// only be accessed through the [`MutexGuard`]s returned from [`lock`] and 27 | /// [`try_lock`], which guarantees that the data is only ever accessed when 28 | /// the mutex is locked. 29 | /// 30 | /// Locking the mutex on a thread that already locked it is impossible, due to 31 | /// the requirement of the [`ThreadKey`]. Therefore, this will never deadlock. 32 | /// 33 | /// # Examples 34 | /// 35 | /// ``` 36 | /// use std::sync::Arc; 37 | /// use std::thread; 38 | /// use std::sync::mpsc; 39 | /// 40 | /// use happylock::{Mutex, ThreadKey}; 41 | /// 42 | /// // Spawn a few threads to increment a shared variable (non-atomically), 43 | /// // and let the main thread know once all increments are done. 44 | /// // 45 | /// // Here we're using an Arc to share memory among threads, and the data 46 | /// // inside the Arc is protected with a mutex. 47 | /// const N: usize = 10; 48 | /// 49 | /// let data = Arc::new(Mutex::new(0)); 50 | /// 51 | /// let (tx, rx) = mpsc::channel(); 52 | /// for _ in 0..N { 53 | /// let (data, tx) = (Arc::clone(&data), tx.clone()); 54 | /// thread::spawn(move || { 55 | /// let key = ThreadKey::get().unwrap(); 56 | /// let mut data = data.lock(key); 57 | /// *data += 1; 58 | /// if *data == N { 59 | /// tx.send(()).unwrap(); 60 | /// } 61 | /// // the lock is unlocked 62 | /// }); 63 | /// } 64 | /// 65 | /// rx.recv().unwrap(); 66 | /// ``` 67 | /// 68 | /// To unlock a mutex guard sooner than the end of the enclosing scope, either 69 | /// create an inner scope, drop the guard manually, or call [`Mutex::unlock`]. 70 | /// 71 | /// ``` 72 | /// use std::sync::Arc; 73 | /// use std::thread; 74 | /// 75 | /// use happylock::{Mutex, ThreadKey}; 76 | /// 77 | /// const N: usize = 3; 78 | /// 79 | /// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4])); 80 | /// let res_mutex = Arc::new(Mutex::new(0)); 81 | /// 82 | /// let mut threads = Vec::with_capacity(N); 83 | /// (0..N).for_each(|_| { 84 | /// let data_mutex_clone = Arc::clone(&data_mutex); 85 | /// let res_mutex_clone = Arc::clone(&res_mutex); 86 | /// 87 | /// threads.push(thread::spawn(move || { 88 | /// let mut key = ThreadKey::get().unwrap(); 89 | /// 90 | /// // Here we use a block to limit the lifetime of the lock guard. 91 | /// let result = data_mutex_clone.scoped_lock(&mut key, |data| { 92 | /// let result = data.iter().fold(0, |acc, x| acc + x * 2); 93 | /// data.push(result); 94 | /// result 95 | /// // The mutex guard gets dropped here, so the lock is released 96 | /// }); 97 | /// // The thread key is available again 98 | /// *res_mutex_clone.lock(key) += result; 99 | /// })); 100 | /// }); 101 | /// 102 | /// let key = ThreadKey::get().unwrap(); 103 | /// let mut data = data_mutex.lock(key); 104 | /// let result = data.iter().fold(0, |acc, x| acc + x * 2); 105 | /// data.push(result); 106 | /// 107 | /// // We drop the `data` explicitly because it's not necessary anymore. This 108 | /// // allows other threads to start working on the data immediately. Dropping 109 | /// // the data also gives us access to the thread key, so we can lock 110 | /// // another mutex. 111 | /// let key = Mutex::unlock(data); 112 | /// 113 | /// // Here the mutex guard is not assigned to a variable and so, even if the 114 | /// // scope does not end after this line, the mutex is still released: there is 115 | /// // no deadlock. 116 | /// *res_mutex.lock(key) += result; 117 | /// 118 | /// threads.into_iter().for_each(|thread| { 119 | /// thread 120 | /// .join() 121 | /// .expect("The thread creating or execution failed !") 122 | /// }); 123 | /// 124 | /// let key = ThreadKey::get().unwrap(); 125 | /// assert_eq!(*res_mutex.lock(key), 800); 126 | /// ``` 127 | /// 128 | /// [`lock`]: `Mutex::lock` 129 | /// [`try_lock`]: `Mutex::try_lock` 130 | /// [`ThreadKey`]: `crate::ThreadKey` 131 | pub struct Mutex { 132 | raw: R, 133 | poison: PoisonFlag, 134 | data: UnsafeCell, 135 | } 136 | 137 | /// An RAII implementation of a “scoped lock” of a mutex. When this structure 138 | /// is dropped (falls out of scope), the lock will be unlocked. 139 | /// 140 | /// The data protected by the mutex can be accessed through this guard via its 141 | /// [`Deref`] and [`DerefMut`] implementations. 142 | /// 143 | /// This is created by calling the [`lock`] and [`try_lock`] methods on [`Mutex`] 144 | /// 145 | /// This is similar to the [`MutexGuard`] type, except it does not hold a 146 | /// [`ThreadKey`]. 147 | /// 148 | /// [`lock`]: `Mutex::lock` 149 | /// [`try_lock`]: `Mutex::try_lock` 150 | /// [`Deref`]: `std::ops::Deref` 151 | /// [`DerefMut`]: `std::ops::DerefMut` 152 | pub struct MutexRef<'a, T: ?Sized + 'a, R: RawMutex>(&'a Mutex, PhantomData); 153 | 154 | /// An RAII implementation of a “scoped lock” of a mutex. When this structure 155 | /// is dropped (falls out of scope), the lock will be unlocked. 156 | /// 157 | /// The data protected by the mutex can be accessed through this guard via its 158 | /// [`Deref`] and [`DerefMut`] implementations. 159 | /// 160 | /// This is created by calling the [`lock`] and [`try_lock`] methods on [`Mutex`] 161 | /// 162 | /// This guard holds on to a [`ThreadKey`], which ensures that nothing else is 163 | /// locked until this guard is dropped. The [`ThreadKey`] can be reacquired 164 | /// using [`Mutex::unlock`]. 165 | /// 166 | /// [`Deref`]: `std::ops::Deref` 167 | /// [`DerefMut`]: `std::ops::DerefMut` 168 | /// [`lock`]: `Mutex::lock` 169 | /// [`try_lock`]: `Mutex::try_lock` 170 | // 171 | // This is the most lifetime-intensive thing I've ever written. Can I graduate 172 | // from borrow checker university now? 173 | pub struct MutexGuard<'a, T: ?Sized + 'a, R: RawMutex> { 174 | mutex: MutexRef<'a, T, R>, // this way we don't need to re-implement Drop 175 | thread_key: ThreadKey, 176 | } 177 | 178 | #[cfg(test)] 179 | mod tests { 180 | use crate::{LockCollection, ThreadKey}; 181 | 182 | use super::*; 183 | 184 | #[test] 185 | fn unlocked_when_initialized() { 186 | let lock: crate::Mutex<_> = Mutex::new("Hello, world!"); 187 | 188 | assert!(!lock.is_locked()); 189 | } 190 | 191 | #[test] 192 | fn locked_after_read() { 193 | let key = ThreadKey::get().unwrap(); 194 | let lock: crate::Mutex<_> = Mutex::new("Hello, world!"); 195 | 196 | let guard = lock.lock(key); 197 | 198 | assert!(lock.is_locked()); 199 | drop(guard) 200 | } 201 | 202 | #[test] 203 | fn from_works() { 204 | let key = ThreadKey::get().unwrap(); 205 | let mutex: crate::Mutex<_> = Mutex::from("Hello, world!"); 206 | 207 | let guard = mutex.lock(key); 208 | assert_eq!(*guard, "Hello, world!"); 209 | } 210 | 211 | #[test] 212 | fn as_mut_works() { 213 | let key = ThreadKey::get().unwrap(); 214 | let mut mutex = crate::Mutex::from(42); 215 | 216 | let mut_ref = mutex.as_mut(); 217 | *mut_ref = 24; 218 | 219 | mutex.scoped_lock(key, |guard| assert_eq!(*guard, 24)) 220 | } 221 | 222 | #[test] 223 | fn display_works_for_guard() { 224 | let key = ThreadKey::get().unwrap(); 225 | let mutex: crate::Mutex<_> = Mutex::new("Hello, world!"); 226 | let guard = mutex.lock(key); 227 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 228 | } 229 | 230 | #[test] 231 | fn display_works_for_ref() { 232 | let mutex: crate::Mutex<_> = Mutex::new("Hello, world!"); 233 | let guard = unsafe { mutex.try_lock_no_key().unwrap() }; 234 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 235 | } 236 | 237 | #[test] 238 | fn ref_as_mut() { 239 | let key = ThreadKey::get().unwrap(); 240 | let collection = LockCollection::new(crate::Mutex::new(0)); 241 | let mut guard = collection.lock(key); 242 | let guard_mut = guard.as_mut().as_mut(); 243 | 244 | *guard_mut = 3; 245 | let key = LockCollection::>::unlock(guard); 246 | 247 | let guard = collection.lock(key); 248 | 249 | assert_eq!(guard.as_ref().as_ref(), &3); 250 | } 251 | 252 | #[test] 253 | fn guard_as_mut() { 254 | let key = ThreadKey::get().unwrap(); 255 | let mutex = crate::Mutex::new(0); 256 | let mut guard = mutex.lock(key); 257 | let guard_mut = guard.as_mut(); 258 | 259 | *guard_mut = 3; 260 | let key = Mutex::unlock(guard); 261 | 262 | let guard = mutex.lock(key); 263 | 264 | assert_eq!(guard.as_ref(), &3); 265 | } 266 | 267 | #[test] 268 | fn dropping_guard_releases_mutex() { 269 | let key = ThreadKey::get().unwrap(); 270 | let mutex: crate::Mutex<_> = Mutex::new("Hello, world!"); 271 | 272 | let guard = mutex.lock(key); 273 | drop(guard); 274 | 275 | assert!(!mutex.is_locked()); 276 | } 277 | 278 | #[test] 279 | fn dropping_ref_releases_mutex() { 280 | let mutex: crate::Mutex<_> = Mutex::new("Hello, world!"); 281 | 282 | let guard = unsafe { mutex.try_lock_no_key().unwrap() }; 283 | drop(guard); 284 | 285 | assert!(!mutex.is_locked()); 286 | } 287 | } 288 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HappyLock: Deadlock Free Mutexes 2 | 3 | As it turns out, the Rust borrow checker is powerful enough that, if the 4 | standard library supported it, we could've made deadlocks undefined behavior. 5 | This library currently serves as a proof of concept for how that would work. 6 | 7 | ## Theory 8 | 9 | There are four conditions necessary for a deadlock to occur. In order to 10 | prevent deadlocks, we need to prevent one of the following: 11 | 12 | 1. mutual exclusion (This is the entire point of a mutex, so we can't prevent that) 13 | 2. non-preemptive allocation (The language must be able to take away a mutex from a thread at any time. This would be very annoying.) 14 | 3. circular wait (The language must enforce that every thread locks mutexes in the exact same order) 15 | 4. partial allocation (The language must enforce total allocation) 16 | 17 | This library seeks to solve **partial allocation** by requiring total 18 | allocation. All the resources a thread needs must be allocated at the same 19 | time. In order to request new resources, the old resources must be dropped 20 | first. Requesting multiple resources at once is atomic. You either get all the 21 | requested resources or none at all. 22 | 23 | As an optimization, this library also often prevents **circular wait**. Many 24 | collections sort the locks in order of their memory address. As long as the 25 | locks are always acquired in that order, then time doesn't need to be wasted 26 | on releasing locks after a failure and re-acquiring them later. 27 | 28 | ## Example 29 | 30 | ```rust 31 | let data: Mutex = Mutex::new(0); 32 | 33 | for _ in 0..N { 34 | thread::spawn(move || { 35 | // each thread gets one thread key 36 | let key = ThreadKey::get().unwrap(); 37 | 38 | // unlocking a mutex requires a ThreadKey 39 | let mut data = data.lock(key); 40 | *data += 1; 41 | 42 | // the key is unlocked at the end of the scope 43 | }); 44 | } 45 | 46 | let key = ThreadKey::get().unwrap(); 47 | let data = data.lock(&mut key); 48 | println!("{}", *data); 49 | ``` 50 | 51 | Unlocking a mutex requires a `ThreadKey` or a mutable reference to `ThreadKey`. 52 | Each thread will be allowed to have one key at a time, but no more than that. 53 | The `ThreadKey` type is not cloneable or copyable. This means that only one 54 | thing can be locked at a time. 55 | 56 | To lock multiple mutexes at a time, create a `LockCollection`. 57 | 58 | ```rust 59 | static DATA_1: Mutex = Mutex::new(0); 60 | static DATA_2: Mutex = Mutex::new(String::new()); 61 | 62 | for _ in 0..N { 63 | thread::spawn(move || { 64 | let key = ThreadKey::get().unwrap(); 65 | 66 | // happylock ensures at runtime there are no duplicate locks 67 | let collection = LockCollection::try_new((&DATA_1, &DATA_2)).unwrap(); 68 | let mut guard = collection.lock(key); 69 | 70 | *guard.1 = (100 - *guard.0).to_string(); 71 | *guard.0 += 1; 72 | }); 73 | } 74 | 75 | let key = ThreadKey::get().unwrap(); 76 | let data = (&DATA_1, &DATA_2); 77 | let data = LockGuard::lock(&data, &mut key); 78 | println!("{}", *data.0); 79 | println!("{}", *data.1); 80 | ``` 81 | 82 | In many cases, the [`LockCollection::new`] or [`LockCollection::new_ref`] 83 | method can be used, improving performance. 84 | 85 | ```rust 86 | use std::thread; 87 | use happylock::{LockCollection, Mutex, ThreadKey}; 88 | 89 | const N: usize = 100; 90 | 91 | static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; 92 | 93 | for _ in 0..N { 94 | thread::spawn(move || { 95 | let key = ThreadKey::get().unwrap(); 96 | // a reference to a type that implements `OwnedLockable` will never 97 | // contain duplicates, so no duplicate checking is needed. 98 | let collection = LockCollection::new_ref(&DATA); 99 | let mut guard = collection.lock(key); 100 | let x = *guard[1]; 101 | *guard[1] += *guard[0]; 102 | *guard[0] = x; 103 | }); 104 | } 105 | 106 | let key = ThreadKey::get().unwrap(); 107 | let data = LockCollection::new_ref(&DATA); 108 | let data = data.lock(key); 109 | 110 | println!("{}", data[0]); 111 | println!("{}", data[1]); 112 | ``` 113 | 114 | ## Performance 115 | 116 | **The `ThreadKey` is a mostly-zero cost abstraction.** It doesn't use any memory, and it doesn't really exist at run-time. The only cost comes from calling `ThreadKey::get()`, because the function has to ensure at runtime that the key hasn't already been taken. Dropping the key will also have a small cost. 117 | 118 | **Consider [`OwnedLockCollection`].** This will almost always be the fastest lock collection. It doesn't expose the underlying collection immutably, which means that it will always be locked in the same order, and doesn't need any sorting. 119 | 120 | **Avoid [`LockCollection::try_new`].** This constructor will check to make sure that the collection contains no duplicate locks. In most cases, this is O(nlogn), where n is the number of locks in the collections but in the case of [`RetryingLockCollection`], it's close to O(n). [`LockCollection::new`] and [`LockCollection::new_ref`] don't need these checks because they use [`OwnedLockable`], which is guaranteed to be unique as long as it is accessible. As a last resort [`LockCollection::new_unchecked`] doesn't do this check, but is unsafe to call. 121 | 122 | **Know how to use [`RetryingLockCollection`].** This collection doesn't do any sorting, but uses a wasteful lock algorithm. It can't rely on the order of the locks to be the same across threads, so if it finds a lock that it can't acquire without blocking, it'll first release all of the locks it already acquired to avoid blocking other threads. This is wasteful because this algorithm may end up re-acquiring the same lock multiple times. To avoid this, ensure that (1) the first lock in the collection is always the first lock in any collection it appears in, and (2) the other locks in the collection are always preceded by that first lock. This will prevent any wasted time from re-acquiring locks. If you're unsure, [`LockCollection`] is a sensible default. 123 | 124 | ## Future Work 125 | 126 | I want to have another go at `RefLockCollection` and `BoxedLockCollection`. I understand pinning better now than I did when I first wrote this, so I might be able to coalesce them now. `Pin` is not a very good API, so I'd need to implement a workaround for `Unpin` types. 127 | 128 | I'd like some way to mutate the contents of a `BoxedLockCollection`. Currently this can be done by taking the child, mutating it, and creating a new `BoxedLockCollection`. The reason I haven't done this yet is because the set of sorted locks would need to be recalculated afterwards. 129 | 130 | It'd be nice to be able to use the mutexes built into the operating system, saving on binary size. Using `std::sync::Mutex` sounds promising, but it doesn't implement `RawMutex`, and implementing that is very difficult, if not impossible. Maybe I could implement my own abstraction over the OS mutexes. I could also simply implement `Lockable` for the standard library mutex. 131 | 132 | I've been thinking about adding types like `Condvar` and `Barrier`, but I've been stopped by two things. I don't use either of those very often, so I'm probably not the right person to try to implement either of them. They're also weird, and harder to prevent deadlocking for. They're sort of the opposite of a mutex, since a mutex guarantees that at least one thread can always access each resource. I think I can at least implement a deadlock-free `Once`, but it doesn't fit well into the existing lock collection API. There are other types that can deadlock too, like `JoinHandle` and `Stdio`, but I'm hesitant to try those. 133 | 134 | It's becoming clearer to me that the main blocker for people adopting this is async-support. `ThreadKey` doesn't work well in async contexts because multiple tasks can run on a single thread, and they can move between threads over time. I think the future might hold an `async-happylock` trait which uses a `TaskKey`. Special care will need to be taken to make sure that blocking calls to `lock` don't cause a deadlock. 135 | 136 | It'd be interesting to add some methods such as `lock_clone` or `lock_swap`. This would still require a thread key, in case the mutex is already locked. The only way this could be done without a thread key is with a `&mut Mutex`, but we already have `as_mut`. A `try_lock_clone` or `try_lock_swap` might not need a `ThreadKey` though. A special lock that looks like `Cell` but implements `Sync` could be shared without a thread key, because the lock would be dropped immediately (preventing non-preemptive allocation). It might make some common operations easier. 137 | 138 | Maybe `lock_api` or `spin` implements some useful methods that I kept out. Maybe there are some lock-specific methods that could be added to `LockCollection`. More types might be lockable using a lock collection. Is upgrading an `RwLock` even possible here? I don't know, but I'll probably look into it at some point. Downgrading is definitely possible in at least some cases. 139 | 140 | We could implement a `Readonly` wrapper around the collections that don't allow access to `lock` and `try_lock`. The idea would be that if you're not exclusively locking the collection, then you don't need to check for duplicates in the collection. Calling `.read()` on twice on a recursive `RwLock` twice dooes not cause a deadlock. This would also require a `Recursive` trait. 141 | 142 | I want to try to get this working without the standard library. There are a few problems with this though. For instance, this crate uses `thread_local` to allow other threads to have their own keys. Also, the only practical type of mutex that would work is a spinlock. Although, more could be implemented using the `RawMutex` trait. The `Lockable` trait requires memory allocation at this time in order to check for duplicate locks. 143 | 144 | We could implement special methods for something like a `LockCollection>` where we only lock the first three items. 145 | -------------------------------------------------------------------------------- /src/rwlock.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::marker::PhantomData; 3 | 4 | use lock_api::RawRwLock; 5 | 6 | use crate::poisonable::PoisonFlag; 7 | use crate::ThreadKey; 8 | 9 | mod rwlock; 10 | 11 | mod read_guard; 12 | mod write_guard; 13 | 14 | #[cfg(feature = "spin")] 15 | pub type SpinRwLock = RwLock>; 16 | 17 | #[cfg(feature = "parking_lot")] 18 | pub type ParkingRwLock = RwLock; 19 | 20 | /// A reader-writer lock 21 | /// 22 | /// This type of lock allows a number of readers or at most one writer at any 23 | /// point in time. The write portion of this lock typically allows modification 24 | /// of the underlying data (exclusive access) and the read portion of this lock 25 | /// typically allows for read-only access (shared access). 26 | /// 27 | /// In comparison, a [`Mutex`] does not distinguish between readers or writers 28 | /// that acquire the lock, therefore blocking any threads waiting for the lock 29 | /// to become available. An `RwLock` will allow any number of readers to 30 | /// acquire the lock as long as a writer is not holding the lock. 31 | /// 32 | /// The type parameter T represents the data that this lock protects. It is 33 | /// required that T satisfies [`Send`] to be shared across threads and [`Sync`] 34 | /// to allow concurrent access through readers. The RAII guard returned from 35 | /// the locking methods implement [`Deref`] (and [`DerefMut`] for the `write` 36 | /// methods) to allow access to the content of the lock. 37 | /// 38 | /// Locking the mutex on a thread that already locked it is impossible, due to 39 | /// the requirement of the [`ThreadKey`]. This will never deadlock. 40 | /// 41 | /// [`ThreadKey`]: `crate::ThreadKey` 42 | /// [`Mutex`]: `crate::mutex::Mutex` 43 | /// [`Deref`]: `std::ops::Deref` 44 | /// [`DerefMut`]: `std::ops::DerefMut` 45 | pub struct RwLock { 46 | raw: R, 47 | poison: PoisonFlag, 48 | data: UnsafeCell, 49 | } 50 | 51 | /// RAII structure that unlocks the shared read access to a [`RwLock`] when 52 | /// dropped. 53 | /// 54 | /// This structure is created when the [`RwLock`] is put in a wrapper type, 55 | /// such as [`LockCollection`], and a read-only guard is obtained through the 56 | /// wrapper. 57 | /// 58 | /// This is similar to [`RwLockReadGuard`], except it does not hold a 59 | /// [`ThreadKey`]. 60 | /// 61 | /// [`Deref`]: `std::ops::Deref` 62 | /// [`DerefMut`]: `std::ops::DerefMut` 63 | /// [`LockCollection`]: `crate::LockCollection` 64 | pub struct RwLockReadRef<'a, T: ?Sized, R: RawRwLock>( 65 | &'a RwLock, 66 | PhantomData, 67 | ); 68 | 69 | /// RAII structure that unlocks the exclusive write access to a [`RwLock`] when 70 | /// dropped. 71 | /// 72 | /// This structure is created when the [`RwLock`] is put in a wrapper type, 73 | /// such as [`LockCollection`], and a mutable guard is obtained through the 74 | /// wrapper. 75 | /// 76 | /// This is similar to [`RwLockWriteGuard`], except it does not hold a 77 | /// [`ThreadKey`]. 78 | /// 79 | /// [`Deref`]: `std::ops::Deref` 80 | /// [`DerefMut`]: `std::ops::DerefMut` 81 | /// [`LockCollection`]: `crate::LockCollection` 82 | pub struct RwLockWriteRef<'a, T: ?Sized, R: RawRwLock>( 83 | &'a RwLock, 84 | PhantomData, 85 | ); 86 | 87 | /// RAII structure used to release the shared read access of a lock when 88 | /// dropped. 89 | /// 90 | /// This structure is created by the [`read`] and [`try_read`] methods on 91 | /// [`RwLock`]. 92 | /// 93 | /// This guard holds a [`ThreadKey`] for its entire lifetime. Therefore, a new 94 | /// lock cannot be acquired until this one is dropped. The [`ThreadKey`] can be 95 | /// reacquired using [`RwLock::unlock_read`]. 96 | /// 97 | /// [`Deref`]: `std::ops::Deref` 98 | /// [`DerefMut`]: `std::ops::DerefMut` 99 | /// [`read`]: `RwLock::read` 100 | /// [`try_read`]: `RwLock::try_read` 101 | pub struct RwLockReadGuard<'a, T: ?Sized, R: RawRwLock> { 102 | rwlock: RwLockReadRef<'a, T, R>, 103 | thread_key: ThreadKey, 104 | } 105 | 106 | /// RAII structure used to release the exclusive write access of a lock when 107 | /// dropped. 108 | /// 109 | /// This structure is created by the [`write`] and [`try_write`] methods on 110 | /// [`RwLock`] 111 | /// 112 | /// This guard holds a [`ThreadKey`] for its entire lifetime. Therefor, a new 113 | /// lock cannot be acquired until this one is dropped. The [`ThreadKey`] can be 114 | /// reacquired using [`RwLock::unlock_write`]. 115 | /// 116 | /// [`Deref`]: `std::ops::Deref` 117 | /// [`DerefMut`]: `std::ops::DerefMut` 118 | /// [`try_write`]: `RwLock::try_write` 119 | pub struct RwLockWriteGuard<'a, T: ?Sized, R: RawRwLock> { 120 | rwlock: RwLockWriteRef<'a, T, R>, 121 | thread_key: ThreadKey, 122 | } 123 | 124 | #[cfg(test)] 125 | mod tests { 126 | use crate::LockCollection; 127 | use crate::RwLock; 128 | use crate::ThreadKey; 129 | 130 | #[test] 131 | fn unlocked_when_initialized() { 132 | let key = ThreadKey::get().unwrap(); 133 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 134 | 135 | assert!(!lock.is_locked()); 136 | assert!(lock.try_write(key).is_ok()); 137 | } 138 | 139 | #[test] 140 | fn locked_after_read() { 141 | let key = ThreadKey::get().unwrap(); 142 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 143 | 144 | let guard = lock.read(key); 145 | 146 | assert!(lock.is_locked()); 147 | drop(guard) 148 | } 149 | 150 | #[test] 151 | fn locked_after_write() { 152 | let key = ThreadKey::get().unwrap(); 153 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 154 | 155 | let guard = lock.write(key); 156 | 157 | assert!(lock.is_locked()); 158 | drop(guard) 159 | } 160 | 161 | #[test] 162 | fn locked_after_scoped_write() { 163 | let mut key = ThreadKey::get().unwrap(); 164 | let lock = crate::RwLock::new("Hello, world!"); 165 | 166 | lock.scoped_write(&mut key, |guard| { 167 | assert!(lock.is_locked()); 168 | assert_eq!(*guard, "Hello, world!"); 169 | 170 | std::thread::scope(|s| { 171 | s.spawn(|| { 172 | let key = ThreadKey::get().unwrap(); 173 | assert!(lock.try_read(key).is_err()); 174 | }); 175 | }) 176 | }) 177 | } 178 | 179 | #[test] 180 | fn get_mut_works() { 181 | let key = ThreadKey::get().unwrap(); 182 | let mut lock = crate::RwLock::from(42); 183 | 184 | let mut_ref = lock.get_mut(); 185 | *mut_ref = 24; 186 | 187 | lock.scoped_read(key, |guard| assert_eq!(*guard, 24)) 188 | } 189 | 190 | #[test] 191 | fn try_write_can_fail() { 192 | let key = ThreadKey::get().unwrap(); 193 | let lock = crate::RwLock::new("Hello"); 194 | let guard = lock.write(key); 195 | 196 | std::thread::scope(|s| { 197 | s.spawn(|| { 198 | let key = ThreadKey::get().unwrap(); 199 | let r = lock.try_write(key); 200 | assert!(r.is_err()); 201 | }); 202 | }); 203 | 204 | drop(guard); 205 | } 206 | 207 | #[test] 208 | fn try_read_can_fail() { 209 | let key = ThreadKey::get().unwrap(); 210 | let lock = crate::RwLock::new("Hello"); 211 | let guard = lock.write(key); 212 | 213 | std::thread::scope(|s| { 214 | s.spawn(|| { 215 | let key = ThreadKey::get().unwrap(); 216 | let r = lock.try_read(key); 217 | assert!(r.is_err()); 218 | }); 219 | }); 220 | 221 | drop(guard); 222 | } 223 | 224 | #[test] 225 | fn read_display_works() { 226 | let key = ThreadKey::get().unwrap(); 227 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 228 | let guard = lock.read(key); 229 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 230 | } 231 | 232 | #[test] 233 | fn write_display_works() { 234 | let key = ThreadKey::get().unwrap(); 235 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 236 | let guard = lock.write(key); 237 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 238 | } 239 | 240 | #[test] 241 | fn read_ref_display_works() { 242 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 243 | let guard = unsafe { lock.try_read_no_key().unwrap() }; 244 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 245 | } 246 | 247 | #[test] 248 | fn write_ref_display_works() { 249 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 250 | let guard = unsafe { lock.try_write_no_key().unwrap() }; 251 | assert_eq!(guard.to_string(), "Hello, world!".to_string()); 252 | } 253 | 254 | #[test] 255 | fn dropping_read_ref_releases_rwlock() { 256 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 257 | 258 | let guard = unsafe { lock.try_read_no_key().unwrap() }; 259 | drop(guard); 260 | 261 | assert!(!lock.is_locked()); 262 | } 263 | 264 | #[test] 265 | fn dropping_write_guard_releases_rwlock() { 266 | let key = ThreadKey::get().unwrap(); 267 | let lock: crate::RwLock<_> = RwLock::new("Hello, world!"); 268 | 269 | let guard = lock.write(key); 270 | drop(guard); 271 | 272 | assert!(!lock.is_locked()); 273 | } 274 | 275 | #[test] 276 | fn unlock_write() { 277 | let key = ThreadKey::get().unwrap(); 278 | let lock = crate::RwLock::new("Hello, world"); 279 | 280 | let mut guard = lock.write(key); 281 | *guard = "Goodbye, world!"; 282 | let key = RwLock::unlock_write(guard); 283 | 284 | let guard = lock.read(key); 285 | assert_eq!(*guard, "Goodbye, world!"); 286 | } 287 | 288 | #[test] 289 | fn unlock_read() { 290 | let key = ThreadKey::get().unwrap(); 291 | let lock = crate::RwLock::new("Hello, world"); 292 | 293 | let guard = lock.read(key); 294 | assert_eq!(*guard, "Hello, world"); 295 | let key = RwLock::unlock_read(guard); 296 | 297 | let guard = lock.write(key); 298 | assert_eq!(*guard, "Hello, world"); 299 | } 300 | 301 | #[test] 302 | fn read_ref_as_ref() { 303 | let key = ThreadKey::get().unwrap(); 304 | let lock = LockCollection::new(crate::RwLock::new("hi")); 305 | let guard = lock.read(key); 306 | 307 | assert_eq!(*(*guard).as_ref(), "hi"); 308 | } 309 | 310 | #[test] 311 | fn read_guard_as_ref() { 312 | let key = ThreadKey::get().unwrap(); 313 | let lock = crate::RwLock::new("hi"); 314 | let guard = lock.read(key); 315 | 316 | assert_eq!(*guard.as_ref(), "hi"); 317 | } 318 | 319 | #[test] 320 | fn write_ref_as_ref() { 321 | let key = ThreadKey::get().unwrap(); 322 | let lock = LockCollection::new(crate::RwLock::new("hi")); 323 | let guard = lock.lock(key); 324 | 325 | assert_eq!(*(*guard).as_ref(), "hi"); 326 | } 327 | 328 | #[test] 329 | fn write_guard_as_ref() { 330 | let key = ThreadKey::get().unwrap(); 331 | let lock = crate::RwLock::new("hi"); 332 | let guard = lock.write(key); 333 | 334 | assert_eq!(*guard.as_ref(), "hi"); 335 | } 336 | 337 | #[test] 338 | fn write_guard_as_mut() { 339 | let key = ThreadKey::get().unwrap(); 340 | let lock = crate::RwLock::new("hi"); 341 | let mut guard = lock.write(key); 342 | 343 | assert_eq!(*guard.as_mut(), "hi"); 344 | *guard.as_mut() = "foo"; 345 | assert_eq!(*guard.as_mut(), "foo"); 346 | } 347 | } 348 | -------------------------------------------------------------------------------- /src/mutex/mutex.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::fmt::Debug; 3 | use std::marker::PhantomData; 4 | use std::panic::AssertUnwindSafe; 5 | 6 | use lock_api::RawMutex; 7 | 8 | use crate::handle_unwind::handle_unwind; 9 | use crate::lockable::{Lockable, LockableGetMut, LockableIntoInner, OwnedLockable, RawLock}; 10 | use crate::poisonable::PoisonFlag; 11 | use crate::{Keyable, ThreadKey}; 12 | 13 | use super::{Mutex, MutexGuard, MutexRef}; 14 | 15 | unsafe impl RawLock for Mutex { 16 | fn poison(&self) { 17 | self.poison.poison(); 18 | } 19 | 20 | unsafe fn raw_write(&self) { 21 | assert!(!self.poison.is_poisoned(), "The mutex has been killed"); 22 | 23 | // if the closure unwraps, then the mutex will be killed 24 | let this = AssertUnwindSafe(self); 25 | handle_unwind(|| this.raw.lock(), || self.poison()) 26 | } 27 | 28 | unsafe fn raw_try_write(&self) -> bool { 29 | if self.poison.is_poisoned() { 30 | return false; 31 | } 32 | 33 | // if the closure unwraps, then the mutex will be killed 34 | let this = AssertUnwindSafe(self); 35 | handle_unwind(|| this.raw.try_lock(), || self.poison()) 36 | } 37 | 38 | unsafe fn raw_unlock_write(&self) { 39 | // if the closure unwraps, then the mutex will be killed 40 | let this = AssertUnwindSafe(self); 41 | handle_unwind(|| this.raw.unlock(), || self.poison()) 42 | } 43 | 44 | // this is the closest thing to a read we can get, but Sharable isn't 45 | // implemented for this 46 | #[mutants::skip] 47 | #[cfg(not(tarpaulin_include))] 48 | unsafe fn raw_read(&self) { 49 | self.raw_write() 50 | } 51 | 52 | #[mutants::skip] 53 | #[cfg(not(tarpaulin_include))] 54 | unsafe fn raw_try_read(&self) -> bool { 55 | self.raw_try_write() 56 | } 57 | 58 | #[mutants::skip] 59 | #[cfg(not(tarpaulin_include))] 60 | unsafe fn raw_unlock_read(&self) { 61 | self.raw_unlock_write() 62 | } 63 | } 64 | 65 | unsafe impl Lockable for Mutex { 66 | type Guard<'g> 67 | = MutexRef<'g, T, R> 68 | where 69 | Self: 'g; 70 | 71 | type DataMut<'a> 72 | = &'a mut T 73 | where 74 | Self: 'a; 75 | 76 | fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { 77 | ptrs.push(self); 78 | } 79 | 80 | unsafe fn guard(&self) -> Self::Guard<'_> { 81 | MutexRef::new(self) 82 | } 83 | 84 | unsafe fn data_mut(&self) -> Self::DataMut<'_> { 85 | self.data.get().as_mut().unwrap_unchecked() 86 | } 87 | } 88 | 89 | impl LockableIntoInner for Mutex { 90 | type Inner = T; 91 | 92 | fn into_inner(self) -> Self::Inner { 93 | self.into_inner() 94 | } 95 | } 96 | 97 | impl LockableGetMut for Mutex { 98 | type Inner<'a> 99 | = &'a mut T 100 | where 101 | Self: 'a; 102 | 103 | fn get_mut(&mut self) -> Self::Inner<'_> { 104 | self.get_mut() 105 | } 106 | } 107 | 108 | unsafe impl OwnedLockable for Mutex {} 109 | 110 | impl Mutex { 111 | /// Creates a `Mutex` in an unlocked state ready for use. 112 | /// 113 | /// # Examples 114 | /// 115 | /// ``` 116 | /// use happylock::Mutex; 117 | /// 118 | /// let mutex = Mutex::new(0); 119 | /// ``` 120 | #[must_use] 121 | pub const fn new(data: T) -> Self { 122 | Self { 123 | raw: R::INIT, 124 | poison: PoisonFlag::new(), 125 | data: UnsafeCell::new(data), 126 | } 127 | } 128 | 129 | /// Returns the raw underlying mutex. 130 | /// 131 | /// Note that you will most likely need to import the [`RawMutex`] trait 132 | /// from `lock_api` to be able to call functions on the raw mutex. 133 | /// 134 | /// # Safety 135 | /// 136 | /// This method is unsafe because it allows unlocking a mutex while still 137 | /// holding a reference to a [`MutexGuard`], and locking a mutex without 138 | /// holding the [`ThreadKey`]. 139 | /// 140 | /// [`ThreadKey`]: `crate::ThreadKey` 141 | #[must_use] 142 | pub const unsafe fn raw(&self) -> &R { 143 | &self.raw 144 | } 145 | } 146 | 147 | #[mutants::skip] 148 | #[cfg(not(tarpaulin_include))] 149 | impl Debug for Mutex { 150 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 151 | // safety: this is just a try lock, and the value is dropped 152 | // immediately after, so there's no risk of blocking ourselves 153 | // or any other threads 154 | // when i implement try_clone this code will become less unsafe 155 | if let Some(value) = unsafe { self.try_lock_no_key() } { 156 | f.debug_struct("Mutex").field("data", &&*value).finish() 157 | } else { 158 | struct LockedPlaceholder; 159 | impl Debug for LockedPlaceholder { 160 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 161 | f.write_str("") 162 | } 163 | } 164 | 165 | f.debug_struct("Mutex") 166 | .field("data", &LockedPlaceholder) 167 | .finish() 168 | } 169 | } 170 | } 171 | 172 | impl Default for Mutex { 173 | fn default() -> Self { 174 | Self::new(T::default()) 175 | } 176 | } 177 | 178 | impl From for Mutex { 179 | fn from(value: T) -> Self { 180 | Self::new(value) 181 | } 182 | } 183 | 184 | // We don't need a `get_mut` because we don't have mutex poisoning. Hurray! 185 | // We have it anyway for documentation 186 | impl AsMut for Mutex { 187 | fn as_mut(&mut self) -> &mut T { 188 | self.get_mut() 189 | } 190 | } 191 | 192 | impl Mutex { 193 | /// Consumes this mutex, returning the underlying data. 194 | /// 195 | /// # Examples 196 | /// 197 | /// ``` 198 | /// use happylock::Mutex; 199 | /// 200 | /// let mutex = Mutex::new(0); 201 | /// assert_eq!(mutex.into_inner(), 0); 202 | /// ``` 203 | #[must_use] 204 | pub fn into_inner(self) -> T { 205 | self.data.into_inner() 206 | } 207 | } 208 | 209 | impl Mutex { 210 | /// Returns a mutable reference to the underlying data. 211 | /// 212 | /// Since this call borrows `Mutex` mutably, no actual locking is taking 213 | /// place. The mutable borrow statically guarantees that no locks exist. 214 | /// 215 | /// # Examples 216 | /// 217 | /// ``` 218 | /// use happylock::{ThreadKey, Mutex}; 219 | /// 220 | /// let key = ThreadKey::get().unwrap(); 221 | /// let mut mutex = Mutex::new(0); 222 | /// *mutex.get_mut() = 10; 223 | /// assert_eq!(*mutex.lock(key), 10); 224 | /// ``` 225 | #[must_use] 226 | pub fn get_mut(&mut self) -> &mut T { 227 | self.data.get_mut() 228 | } 229 | } 230 | 231 | impl Mutex { 232 | pub fn scoped_lock<'a, Ret>( 233 | &'a self, 234 | key: impl Keyable, 235 | f: impl FnOnce(&'a mut T) -> Ret, 236 | ) -> Ret { 237 | unsafe { 238 | // safety: we have the key 239 | self.raw_write(); 240 | 241 | // safety: the data has been locked 242 | let r = handle_unwind( 243 | || f(self.data.get().as_mut().unwrap_unchecked()), 244 | || self.raw_unlock_write(), 245 | ); 246 | 247 | // ensures the key is held long enough 248 | drop(key); 249 | 250 | // safety: the mutex is still locked 251 | self.raw_unlock_write(); 252 | 253 | r 254 | } 255 | } 256 | 257 | pub fn scoped_try_lock<'a, Key: Keyable, Ret>( 258 | &'a self, 259 | key: Key, 260 | f: impl FnOnce(&'a mut T) -> Ret, 261 | ) -> Result { 262 | unsafe { 263 | // safety: we have the key 264 | if !self.raw_try_write() { 265 | return Err(key); 266 | } 267 | 268 | // safety: the data has been locked 269 | let r = handle_unwind( 270 | || f(self.data.get().as_mut().unwrap_unchecked()), 271 | || self.raw_unlock_write(), 272 | ); 273 | 274 | // ensures the key is held long enough 275 | drop(key); 276 | 277 | // safety: the mutex is still locked 278 | self.raw_unlock_write(); 279 | 280 | Ok(r) 281 | } 282 | } 283 | } 284 | 285 | impl Mutex { 286 | /// Acquires a mutex, blocking the current thread until it is able to do so. 287 | /// 288 | /// This function will block the local thread until it is available to acquire 289 | /// the mutex. Upon returning, the thread is the only thread with the lock 290 | /// held. A [`MutexGuard`] is returned to allow a scoped unlock of this 291 | /// `Mutex`. When the guard goes out of scope, this `Mutex` will unlock. 292 | /// 293 | /// Due to the requirement of a [`ThreadKey`] to call this function, it is not 294 | /// possible for this function to deadlock. 295 | /// 296 | /// # Examples 297 | /// 298 | /// ``` 299 | /// use std::thread; 300 | /// use happylock::{Mutex, ThreadKey}; 301 | /// 302 | /// let mutex = Mutex::new(0); 303 | /// 304 | /// thread::scope(|s| { 305 | /// s.spawn(|| { 306 | /// let key = ThreadKey::get().unwrap(); 307 | /// *mutex.lock(key) = 10; 308 | /// }); 309 | /// }); 310 | /// 311 | /// let key = ThreadKey::get().unwrap(); 312 | /// assert_eq!(*mutex.lock(key), 10); 313 | /// ``` 314 | pub fn lock(&self, key: ThreadKey) -> MutexGuard<'_, T, R> { 315 | unsafe { 316 | // safety: we have the thread key 317 | self.raw_write(); 318 | 319 | // safety: we just locked the mutex 320 | MutexGuard::new(self, key) 321 | } 322 | } 323 | 324 | /// Attempts to lock this `Mutex` without blocking. 325 | /// 326 | /// If the lock could not be acquired at this time, then `Err` is returned. 327 | /// Otherwise, an RAII guard is returned. The lock will be unlocked when the 328 | /// guard is dropped. 329 | /// 330 | /// # Errors 331 | /// 332 | /// If the mutex could not be acquired because it is already locked, then 333 | /// this call will return an error containing the [`ThreadKey`]. 334 | /// 335 | /// # Examples 336 | /// 337 | /// ``` 338 | /// use std::thread; 339 | /// use happylock::{Mutex, ThreadKey}; 340 | /// 341 | /// let mutex = Mutex::new(0); 342 | /// 343 | /// thread::scope(|s| { 344 | /// s.spawn(|| { 345 | /// let key = ThreadKey::get().unwrap(); 346 | /// let mut lock = mutex.try_lock(key); 347 | /// if let Ok(mut lock) = lock { 348 | /// *lock = 10; 349 | /// } else { 350 | /// println!("try_lock failed"); 351 | /// } 352 | /// }); 353 | /// }); 354 | /// 355 | /// let key = ThreadKey::get().unwrap(); 356 | /// assert_eq!(*mutex.lock(key), 10); 357 | /// ``` 358 | pub fn try_lock(&self, key: ThreadKey) -> Result, ThreadKey> { 359 | unsafe { 360 | // safety: we have the key to the mutex 361 | if self.raw_try_write() { 362 | // safety: we just locked the mutex 363 | Ok(MutexGuard::new(self, key)) 364 | } else { 365 | Err(key) 366 | } 367 | } 368 | } 369 | 370 | /// Returns `true` if the mutex is currently locked 371 | #[cfg(test)] 372 | pub(crate) fn is_locked(&self) -> bool { 373 | self.raw.is_locked() 374 | } 375 | 376 | /// Lock without a [`ThreadKey`]. It is undefined behavior to do this without 377 | /// owning the [`ThreadKey`]. 378 | pub(crate) unsafe fn try_lock_no_key(&self) -> Option> { 379 | self.raw_try_write().then_some(MutexRef(self, PhantomData)) 380 | } 381 | 382 | /// Consumes the [`MutexGuard`], and consequently unlocks its `Mutex`. 383 | /// 384 | /// This function is equivalent to calling [`drop`] on the guard, except that 385 | /// it returns the key that was used to create it. Alernatively, the guard 386 | /// will be automatically dropped when it goes out of scope. 387 | /// 388 | /// # Examples 389 | /// 390 | /// ``` 391 | /// use happylock::{ThreadKey, Mutex}; 392 | /// 393 | /// let key = ThreadKey::get().unwrap(); 394 | /// let mutex = Mutex::new(0); 395 | /// 396 | /// let mut guard = mutex.lock(key); 397 | /// *guard += 20; 398 | /// 399 | /// let key = Mutex::unlock(guard); 400 | /// 401 | /// let guard = mutex.lock(key); 402 | /// assert_eq!(*guard, 20); 403 | /// ``` 404 | #[must_use] 405 | pub fn unlock(guard: MutexGuard<'_, T, R>) -> ThreadKey { 406 | drop(guard.mutex); 407 | guard.thread_key 408 | } 409 | } 410 | 411 | unsafe impl Send for Mutex {} 412 | unsafe impl Sync for Mutex {} 413 | -------------------------------------------------------------------------------- /src/rwlock/rwlock.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::fmt::Debug; 3 | use std::marker::PhantomData; 4 | use std::panic::AssertUnwindSafe; 5 | 6 | use lock_api::RawRwLock; 7 | 8 | use crate::handle_unwind::handle_unwind; 9 | use crate::lockable::{ 10 | Lockable, LockableGetMut, LockableIntoInner, OwnedLockable, RawLock, Sharable, 11 | }; 12 | use crate::{Keyable, ThreadKey}; 13 | 14 | use super::{PoisonFlag, RwLock, RwLockReadGuard, RwLockReadRef, RwLockWriteGuard, RwLockWriteRef}; 15 | 16 | unsafe impl RawLock for RwLock { 17 | fn poison(&self) { 18 | self.poison.poison(); 19 | } 20 | 21 | unsafe fn raw_write(&self) { 22 | assert!( 23 | !self.poison.is_poisoned(), 24 | "The read-write lock has been killed" 25 | ); 26 | 27 | // if the closure unwraps, then the mutex will be killed 28 | let this = AssertUnwindSafe(self); 29 | handle_unwind(|| this.raw.lock_exclusive(), || self.poison()) 30 | } 31 | 32 | unsafe fn raw_try_write(&self) -> bool { 33 | if self.poison.is_poisoned() { 34 | return false; 35 | } 36 | 37 | // if the closure unwraps, then the mutex will be killed 38 | let this = AssertUnwindSafe(self); 39 | handle_unwind(|| this.raw.try_lock_exclusive(), || self.poison()) 40 | } 41 | 42 | unsafe fn raw_unlock_write(&self) { 43 | // if the closure unwraps, then the mutex will be killed 44 | let this = AssertUnwindSafe(self); 45 | handle_unwind(|| this.raw.unlock_exclusive(), || self.poison()) 46 | } 47 | 48 | unsafe fn raw_read(&self) { 49 | assert!( 50 | !self.poison.is_poisoned(), 51 | "The read-write lock has been killed" 52 | ); 53 | 54 | // if the closure unwraps, then the mutex will be killed 55 | let this = AssertUnwindSafe(self); 56 | handle_unwind(|| this.raw.lock_shared(), || self.poison()) 57 | } 58 | 59 | unsafe fn raw_try_read(&self) -> bool { 60 | if self.poison.is_poisoned() { 61 | return false; 62 | } 63 | 64 | // if the closure unwraps, then the mutex will be killed 65 | let this = AssertUnwindSafe(self); 66 | handle_unwind(|| this.raw.try_lock_shared(), || self.poison()) 67 | } 68 | 69 | unsafe fn raw_unlock_read(&self) { 70 | // if the closure unwraps, then the mutex will be killed 71 | let this = AssertUnwindSafe(self); 72 | handle_unwind(|| this.raw.unlock_shared(), || self.poison()) 73 | } 74 | } 75 | 76 | unsafe impl Lockable for RwLock { 77 | type Guard<'g> 78 | = RwLockWriteRef<'g, T, R> 79 | where 80 | Self: 'g; 81 | 82 | type DataMut<'a> 83 | = &'a mut T 84 | where 85 | Self: 'a; 86 | 87 | fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { 88 | ptrs.push(self); 89 | } 90 | 91 | unsafe fn guard(&self) -> Self::Guard<'_> { 92 | RwLockWriteRef::new(self) 93 | } 94 | 95 | unsafe fn data_mut(&self) -> Self::DataMut<'_> { 96 | self.data.get().as_mut().unwrap_unchecked() 97 | } 98 | } 99 | 100 | unsafe impl Sharable for RwLock { 101 | type ReadGuard<'g> 102 | = RwLockReadRef<'g, T, R> 103 | where 104 | Self: 'g; 105 | 106 | type DataRef<'a> 107 | = &'a T 108 | where 109 | Self: 'a; 110 | 111 | unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { 112 | RwLockReadRef::new(self) 113 | } 114 | 115 | unsafe fn data_ref(&self) -> Self::DataRef<'_> { 116 | self.data.get().as_ref().unwrap_unchecked() 117 | } 118 | } 119 | 120 | unsafe impl OwnedLockable for RwLock {} 121 | 122 | impl LockableIntoInner for RwLock { 123 | type Inner = T; 124 | 125 | fn into_inner(self) -> Self::Inner { 126 | self.into_inner() 127 | } 128 | } 129 | 130 | impl LockableGetMut for RwLock { 131 | type Inner<'a> 132 | = &'a mut T 133 | where 134 | Self: 'a; 135 | 136 | fn get_mut(&mut self) -> Self::Inner<'_> { 137 | AsMut::as_mut(self) 138 | } 139 | } 140 | 141 | impl RwLock { 142 | /// Creates a new instance of an `RwLock` which is unlocked. 143 | /// 144 | /// # Examples 145 | /// 146 | /// ``` 147 | /// use happylock::RwLock; 148 | /// 149 | /// let lock = RwLock::new(5); 150 | /// 151 | /// 152 | /// ``` 153 | #[must_use] 154 | pub const fn new(data: T) -> Self { 155 | Self { 156 | data: UnsafeCell::new(data), 157 | poison: PoisonFlag::new(), 158 | raw: R::INIT, 159 | } 160 | } 161 | } 162 | 163 | #[mutants::skip] 164 | #[cfg(not(tarpaulin_include))] 165 | impl Debug for RwLock { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | // safety: this is just a try lock, and the value is dropped 168 | // immediately after, so there's no risk of blocking ourselves 169 | // or any other threads 170 | if let Some(value) = unsafe { self.try_read_no_key() } { 171 | f.debug_struct("RwLock").field("data", &&*value).finish() 172 | } else { 173 | struct LockedPlaceholder; 174 | impl Debug for LockedPlaceholder { 175 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 176 | f.write_str("") 177 | } 178 | } 179 | 180 | f.debug_struct("RwLock") 181 | .field("data", &LockedPlaceholder) 182 | .finish() 183 | } 184 | } 185 | } 186 | 187 | impl Default for RwLock { 188 | fn default() -> Self { 189 | Self::new(T::default()) 190 | } 191 | } 192 | 193 | impl From for RwLock { 194 | fn from(value: T) -> Self { 195 | Self::new(value) 196 | } 197 | } 198 | 199 | // We don't need a `get_mut` because we don't have mutex poisoning. Hurray! 200 | // This is safe because you can't have a mutable reference to the lock if it's 201 | // locked. Being locked requires an immutable reference because of the guard. 202 | impl AsMut for RwLock { 203 | fn as_mut(&mut self) -> &mut T { 204 | self.data.get_mut() 205 | } 206 | } 207 | 208 | impl RwLock { 209 | /// Consumes this `RwLock`, returning the underlying data. 210 | /// 211 | /// # Examples 212 | /// 213 | /// ``` 214 | /// use happylock::{RwLock, ThreadKey}; 215 | /// 216 | /// let key = ThreadKey::get().unwrap(); 217 | /// let lock = RwLock::new(String::new()); 218 | /// { 219 | /// let mut s = lock.write(key); 220 | /// *s = "modified".to_owned(); 221 | /// } 222 | /// assert_eq!(lock.into_inner(), "modified"); 223 | /// ``` 224 | #[must_use] 225 | pub fn into_inner(self) -> T { 226 | self.data.into_inner() 227 | } 228 | } 229 | 230 | impl RwLock { 231 | /// Returns a mutable reference to the underlying data. 232 | /// 233 | /// Since this call borrows `RwLock` mutably, no actual locking needs to take 234 | /// place. The mutable borrow statically guarantees that no locks exist. 235 | /// 236 | /// # Examples 237 | /// 238 | /// ``` 239 | /// use happylock::{ThreadKey, RwLock}; 240 | /// 241 | /// let key = ThreadKey::get().unwrap(); 242 | /// let mut lock = RwLock::new(0); 243 | /// *lock.get_mut() = 10; 244 | /// assert_eq!(*lock.read(key), 10); 245 | /// ``` 246 | #[must_use] 247 | pub fn get_mut(&mut self) -> &mut T { 248 | self.data.get_mut() 249 | } 250 | } 251 | 252 | impl RwLock { 253 | pub fn scoped_read<'a, Ret>(&'a self, key: impl Keyable, f: impl Fn(&'a T) -> Ret) -> Ret { 254 | unsafe { 255 | // safety: we have the key 256 | self.raw_read(); 257 | 258 | // safety: the data has been locked 259 | let r = handle_unwind( 260 | || f(self.data.get().as_ref().unwrap_unchecked()), 261 | || self.raw_unlock_read(), 262 | ); 263 | 264 | // ensures the key is held long enough 265 | drop(key); 266 | 267 | // safety: the mutex is still locked 268 | self.raw_unlock_read(); 269 | 270 | r 271 | } 272 | } 273 | 274 | pub fn scoped_try_read<'a, Key: Keyable, Ret>( 275 | &'a self, 276 | key: Key, 277 | f: impl Fn(&'a T) -> Ret, 278 | ) -> Result { 279 | unsafe { 280 | // safety: we have the key 281 | if !self.raw_try_read() { 282 | return Err(key); 283 | } 284 | 285 | // safety: the data has been locked 286 | let r = handle_unwind( 287 | || f(self.data.get().as_ref().unwrap_unchecked()), 288 | || self.raw_unlock_read(), 289 | ); 290 | 291 | // ensures the key is held long enough 292 | drop(key); 293 | 294 | // safety: the mutex is still locked 295 | self.raw_unlock_read(); 296 | 297 | Ok(r) 298 | } 299 | } 300 | 301 | pub fn scoped_write<'a, Ret>(&'a self, key: impl Keyable, f: impl Fn(&'a mut T) -> Ret) -> Ret { 302 | unsafe { 303 | // safety: we have the key 304 | self.raw_write(); 305 | 306 | // safety: the data has been locked 307 | let r = handle_unwind( 308 | || f(self.data.get().as_mut().unwrap_unchecked()), 309 | || self.raw_unlock_write(), 310 | ); 311 | 312 | // ensures the key is held long enough 313 | drop(key); 314 | 315 | // safety: the mutex is still locked 316 | self.raw_unlock_write(); 317 | 318 | r 319 | } 320 | } 321 | 322 | pub fn scoped_try_write<'a, Key: Keyable, Ret>( 323 | &'a self, 324 | key: Key, 325 | f: impl Fn(&'a mut T) -> Ret, 326 | ) -> Result { 327 | unsafe { 328 | // safety: we have the key 329 | if !self.raw_try_write() { 330 | return Err(key); 331 | } 332 | 333 | // safety: the data has been locked 334 | let r = handle_unwind( 335 | || f(self.data.get().as_mut().unwrap_unchecked()), 336 | || self.raw_unlock_write(), 337 | ); 338 | 339 | // ensures the key is held long enough 340 | drop(key); 341 | 342 | // safety: the mutex is still locked 343 | self.raw_unlock_write(); 344 | 345 | Ok(r) 346 | } 347 | } 348 | 349 | /// Locks this `RwLock` with shared read access, blocking the current 350 | /// thread until it can be acquired. 351 | /// 352 | /// The calling thread will be blocked until there are no more writers 353 | /// which hold the lock. There may be other readers currently inside the 354 | /// lock when this method returns. This method does not provide any guarantees 355 | /// with respect to the ordering of whether contentious readers or writers 356 | /// will acquire the lock first. 357 | /// 358 | /// Returns an RAII guard which will release this thread's shared access 359 | /// once it is dropped. 360 | /// 361 | /// Because this method takes a [`ThreadKey`], it's not possible for this 362 | /// method to cause a deadlock. 363 | /// 364 | /// # Examples 365 | /// 366 | /// ``` 367 | /// use std::thread; 368 | /// use happylock::{RwLock, ThreadKey}; 369 | /// 370 | /// let key = ThreadKey::get().unwrap(); 371 | /// let lock = RwLock::new(1); 372 | /// 373 | /// let n = lock.read(key); 374 | /// assert_eq!(*n, 1); 375 | /// 376 | /// thread::scope(|s| { 377 | /// s.spawn(|| { 378 | /// let key = ThreadKey::get().unwrap(); 379 | /// let r = lock.read(key); 380 | /// }); 381 | /// }); 382 | /// ``` 383 | /// 384 | /// [`ThreadKey`]: `crate::ThreadKey` 385 | pub fn read(&self, key: ThreadKey) -> RwLockReadGuard<'_, T, R> { 386 | unsafe { 387 | self.raw_read(); 388 | 389 | // safety: the lock is locked first 390 | RwLockReadGuard::new(self, key) 391 | } 392 | } 393 | 394 | /// Attempts to acquire this `RwLock` with shared read access without 395 | /// blocking. 396 | /// 397 | /// If the access could not be granted at this time, then `Err` is 398 | /// returned. Otherwise, an RAII guard is returned which will release the 399 | /// shared access when it is dropped. 400 | /// 401 | /// This function does not provide any guarantees with respect to the 402 | /// ordering of whether contentious readers or writers will acquire the 403 | /// lock first. 404 | /// 405 | /// # Errors 406 | /// 407 | /// This function will return an error containing the [`ThreadKey`] if the 408 | /// `RwLock` could not be acquired because it was already locked exclusively. 409 | /// 410 | /// # Examples 411 | /// 412 | /// ``` 413 | /// use happylock::{RwLock, ThreadKey}; 414 | /// 415 | /// let key = ThreadKey::get().unwrap(); 416 | /// let lock = RwLock::new(1); 417 | /// 418 | /// match lock.try_read(key) { 419 | /// Ok(n) => assert_eq!(*n, 1), 420 | /// Err(_) => unreachable!(), 421 | /// }; 422 | /// ``` 423 | pub fn try_read(&self, key: ThreadKey) -> Result, ThreadKey> { 424 | unsafe { 425 | if self.raw_try_read() { 426 | // safety: the lock is locked first 427 | Ok(RwLockReadGuard::new(self, key)) 428 | } else { 429 | Err(key) 430 | } 431 | } 432 | } 433 | 434 | /// Attempts to create a shared lock without a key. Locking this without 435 | /// exclusive access to the key is undefined behavior. 436 | pub(crate) unsafe fn try_read_no_key(&self) -> Option> { 437 | if self.raw_try_read() { 438 | // safety: the lock is locked first 439 | Some(RwLockReadRef(self, PhantomData)) 440 | } else { 441 | None 442 | } 443 | } 444 | 445 | /// Attempts to create an exclusive lock without a key. Locking this 446 | /// without exclusive access to the key is undefined behavior. 447 | #[cfg(test)] 448 | pub(crate) unsafe fn try_write_no_key(&self) -> Option> { 449 | if self.raw_try_write() { 450 | // safety: the lock is locked first 451 | Some(RwLockWriteRef(self, PhantomData)) 452 | } else { 453 | None 454 | } 455 | } 456 | 457 | /// Locks this `RwLock` with exclusive write access, blocking the current 458 | /// until it can be acquired. 459 | /// 460 | /// This function will not return while other writers or readers currently 461 | /// have access to the lock. 462 | /// 463 | /// Returns an RAII guard which will drop the write access of this `RwLock` 464 | /// when dropped. 465 | /// 466 | /// Because this method takes a [`ThreadKey`], it's not possible for this 467 | /// method to cause a deadlock. 468 | /// 469 | /// # Examples 470 | /// 471 | /// ``` 472 | /// use happylock::{ThreadKey, RwLock}; 473 | /// 474 | /// let key = ThreadKey::get().unwrap(); 475 | /// let lock = RwLock::new(1); 476 | /// 477 | /// let mut n = lock.write(key); 478 | /// *n = 2; 479 | /// 480 | /// let key = RwLock::unlock_write(n); 481 | /// assert_eq!(*lock.read(key), 2); 482 | /// ``` 483 | /// 484 | /// [`ThreadKey`]: `crate::ThreadKey` 485 | pub fn write(&self, key: ThreadKey) -> RwLockWriteGuard<'_, T, R> { 486 | unsafe { 487 | self.raw_write(); 488 | 489 | // safety: the lock is locked first 490 | RwLockWriteGuard::new(self, key) 491 | } 492 | } 493 | 494 | /// Attempts to lock this `RwLock` with exclusive write access, without 495 | /// blocking. 496 | /// 497 | /// This function does not block. If the lock could not be acquired at this 498 | /// time, then `Err` is returned. Otherwise, an RAII guard is returned 499 | /// which will release the lock when it is dropped. 500 | /// 501 | /// This function does not provide any guarantees with respect to the 502 | /// ordering of whether contentious readers or writers will acquire the 503 | /// lock first. 504 | /// 505 | /// # Errors 506 | /// 507 | /// This function will return an error containing the [`ThreadKey`] if the 508 | /// `RwLock` could not be acquired because it was already locked exclusively. 509 | /// 510 | /// # Examples 511 | /// 512 | /// ``` 513 | /// use happylock::{RwLock, ThreadKey}; 514 | /// 515 | /// let key = ThreadKey::get().unwrap(); 516 | /// let lock = RwLock::new(1); 517 | /// 518 | /// let key = match lock.try_write(key) { 519 | /// Ok(mut n) => { 520 | /// assert_eq!(*n, 1); 521 | /// *n = 2; 522 | /// RwLock::unlock_write(n) 523 | /// } 524 | /// Err(_) => unreachable!(), 525 | /// }; 526 | /// 527 | /// let n = lock.read(key); 528 | /// assert_eq!(*n, 2); 529 | /// ``` 530 | pub fn try_write(&self, key: ThreadKey) -> Result, ThreadKey> { 531 | unsafe { 532 | if self.raw_try_write() { 533 | // safety: the lock is locked first 534 | Ok(RwLockWriteGuard::new(self, key)) 535 | } else { 536 | Err(key) 537 | } 538 | } 539 | } 540 | 541 | /// Returns `true` if the rwlock is currently locked in any way 542 | #[cfg(test)] 543 | pub(crate) fn is_locked(&self) -> bool { 544 | self.raw.is_locked() 545 | } 546 | 547 | /// Immediately drops the guard, and consequently releases the shared lock. 548 | /// 549 | /// This function is equivalent to calling [`drop`] on the guard, except 550 | /// that it returns the key that was used to create it. Alternatively, the 551 | /// guard will be automatically dropped when it goes out of scope. 552 | /// 553 | /// # Examples 554 | /// 555 | /// ``` 556 | /// use happylock::{RwLock, ThreadKey}; 557 | /// 558 | /// let key = ThreadKey::get().unwrap(); 559 | /// let lock = RwLock::new(0); 560 | /// 561 | /// let mut guard = lock.read(key); 562 | /// assert_eq!(*guard, 0); 563 | /// let key = RwLock::unlock_read(guard); 564 | /// ``` 565 | #[must_use] 566 | pub fn unlock_read(guard: RwLockReadGuard<'_, T, R>) -> ThreadKey { 567 | drop(guard.rwlock); 568 | guard.thread_key 569 | } 570 | 571 | /// Immediately drops the guard, and consequently releases the exclusive 572 | /// lock. 573 | /// 574 | /// This function is equivalent to calling [`drop`] on the guard, except that 575 | /// it returns the key that was used to create it. Alternatively, the guard 576 | /// will be automatically dropped when it goes out of scope. 577 | /// 578 | /// # Examples 579 | /// 580 | /// ``` 581 | /// use happylock::{RwLock, ThreadKey}; 582 | /// 583 | /// let key = ThreadKey::get().unwrap(); 584 | /// let lock = RwLock::new(0); 585 | /// 586 | /// let mut guard = lock.write(key); 587 | /// *guard += 20; 588 | /// let key = RwLock::unlock_write(guard); 589 | /// 590 | /// let guard = lock.read(key); 591 | /// assert_eq!(*guard, 20); 592 | /// ``` 593 | #[must_use] 594 | pub fn unlock_write(guard: RwLockWriteGuard<'_, T, R>) -> ThreadKey { 595 | drop(guard.rwlock); 596 | guard.thread_key 597 | } 598 | } 599 | 600 | unsafe impl Send for RwLock {} 601 | unsafe impl Sync for RwLock {} 602 | -------------------------------------------------------------------------------- /src/poisonable.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::sync::atomic::AtomicBool; 3 | 4 | use crate::ThreadKey; 5 | 6 | mod error; 7 | mod flag; 8 | mod guard; 9 | mod poisonable; 10 | 11 | // TODO add helper types for poisonable mutex and so on 12 | 13 | /// A flag indicating if a lock is poisoned or not. The implementation differs 14 | /// depending on whether panics are set to unwind or abort. 15 | #[derive(Debug, Default)] 16 | pub(crate) struct PoisonFlag(#[cfg(panic = "unwind")] AtomicBool); 17 | 18 | /// A wrapper around [`Lockable`] types which will enable poisoning. 19 | /// 20 | /// A lock is "poisoned" when the thread panics while holding the lock. Once a 21 | /// lock is poisoned, all other threads are unable to access the data by 22 | /// default, because the data may be tainted (some invariant of the data might 23 | /// not be upheld). 24 | /// 25 | /// The [`lock`], [`try_lock`], [`read`], and [`try_read`] methods return a 26 | /// [`Result`] which indicates whether the lock has been poisoned or not. The 27 | /// [`PoisonError`] type has an [`into_inner`] method which will return the 28 | /// guard that normally would have been returned for a successful lock. This 29 | /// allows access to the data, despite the lock being poisoned. The scoped 30 | /// locking methods (such as [`scoped_lock`]) will pass the [`Result`] into the 31 | /// given closure. Poisoning will occur if the closure panics. 32 | /// 33 | /// Alternatively, there is also a [`clear_poison`] method, which should 34 | /// indicate that all invariants of the underlying data are upheld, so that 35 | /// subsequent calls may still return [`Ok`]. 36 | /// 37 | /// 38 | /// [`Lockable`]: `crate::lockable::Lockable` 39 | /// [`lock`]: `Poisonable::lock` 40 | /// [`try_lock`]: `Poisonable::try_lock` 41 | /// [`read`]: `Poisonable::read` 42 | /// [`try_read`]: `Poisonable::try_read` 43 | /// [`scoped_lock`]: `Poisonable::scoped_lock` 44 | /// [`into_inner`]: `PoisonError::into_inner` 45 | /// [`clear_poison`]: `Poisonable::clear_poison` 46 | #[derive(Debug, Default)] 47 | pub struct Poisonable { 48 | inner: L, 49 | poisoned: PoisonFlag, 50 | } 51 | 52 | /// An RAII guard for a [`Poisonable`]. When this structure is dropped (falls 53 | /// out of scope), the lock will be unlocked. 54 | /// 55 | /// This is similar to a [`PoisonGuard`], except that it does not hold a 56 | /// [`ThreadKey`]. 57 | /// 58 | /// The data protected by the underlying lock can be accessed through this 59 | /// guard via its [`Deref`] and [`DerefMut`] implementations. 60 | /// 61 | /// This structure is created when passing a `Poisonable` into another lock 62 | /// wrapper, such as [`LockCollection`], and obtaining a guard through the 63 | /// wrapper type. 64 | /// 65 | /// [`Deref`]: `std::ops::Deref` 66 | /// [`DerefMut`]: `std::ops::DerefMut` 67 | /// [`ThreadKey`]: `crate::ThreadKey` 68 | /// [`LockCollection`]: `crate::LockCollection` 69 | pub struct PoisonRef<'a, G> { 70 | guard: G, 71 | #[cfg(panic = "unwind")] 72 | flag: &'a PoisonFlag, 73 | _phantom: PhantomData<&'a ()>, 74 | } 75 | 76 | /// An RAII guard for a [`Poisonable`]. When this structure is dropped (falls 77 | /// out of scope), the lock will be unlocked. 78 | /// 79 | /// The data protected by the underlying lock can be accessed through this 80 | /// guard via its [`Deref`] and [`DerefMut`] implementations. 81 | /// 82 | /// This method is created by calling the [`lock`], [`try_lock`], [`read`], and 83 | /// [`try_read`] methods on [`Poisonable`] 84 | /// 85 | /// This guard holds a [`ThreadKey`], so it is not possible to lock anything 86 | /// else until this guard is dropped. The [`ThreadKey`] can be reacquired by 87 | /// calling [`Poisonable::unlock`], or [`Poisonable::unlock_read`]. 88 | /// 89 | /// [`lock`]: `Poisonable::lock` 90 | /// [`try_lock`]: `Poisonable::try_lock` 91 | /// [`read`]: `Poisonable::read` 92 | /// [`try_read`]: `Poisonable::try_read` 93 | /// [`Deref`]: `std::ops::Deref` 94 | /// [`DerefMut`]: `std::ops::DerefMut` 95 | /// [`ThreadKey`]: `crate::ThreadKey` 96 | /// [`LockCollection`]: `crate::LockCollection` 97 | pub struct PoisonGuard<'a, G> { 98 | guard: PoisonRef<'a, G>, 99 | key: ThreadKey, 100 | } 101 | 102 | /// A type of error which can be returned when acquiring a [`Poisonable`] lock. 103 | /// 104 | /// A [`Poisonable`] is poisoned whenever a thread fails while the lock is 105 | /// held. For a lock in the poisoned state, unless the state is cleared 106 | /// manually, all future acquisitions will return this error. 107 | pub struct PoisonError { 108 | guard: Guard, 109 | } 110 | 111 | /// An enumeration of possible errors associated with 112 | /// [`TryLockPoisonableResult`] which can occur while trying to acquire a lock 113 | /// (i.e.: [`Poisonable::try_lock`]). 114 | pub enum TryLockPoisonableError<'flag, G> { 115 | Poisoned(PoisonError>), 116 | WouldBlock(ThreadKey), 117 | } 118 | 119 | /// A type alias for the result of a lock method which can poisoned. 120 | /// 121 | /// The [`Ok`] variant of this result indicates that the primitive was not 122 | /// poisoned, and the operation result is contained within. The [`Err`] variant 123 | /// indicates that the primitive was poisoned. Note that the [`Err`] variant 124 | /// *also* carries the associated guard, and it can be acquired through the 125 | /// [`into_inner`] method. 126 | /// 127 | /// [`into_inner`]: `PoisonError::into_inner` 128 | pub type PoisonResult = Result>; 129 | 130 | /// A type alias for the result of a nonblocking locking method. 131 | /// 132 | /// For more information, see [`PoisonResult`]. A `TryLockPoisonableResult` 133 | /// doesn't necessarily hold the associated guard in the [`Err`] type as the 134 | /// lock might not have been acquired for other reasons. 135 | pub type TryLockPoisonableResult<'flag, G> = 136 | Result, TryLockPoisonableError<'flag, G>>; 137 | 138 | #[cfg(test)] 139 | mod tests { 140 | use std::sync::Arc; 141 | 142 | use super::*; 143 | use crate::lockable::Lockable; 144 | use crate::{LockCollection, Mutex, RwLock, ThreadKey}; 145 | 146 | #[test] 147 | fn locking_poisoned_mutex_returns_error_in_collection() { 148 | let key = ThreadKey::get().unwrap(); 149 | let mutex = LockCollection::new(Poisonable::new(Mutex::new(42))); 150 | 151 | std::thread::scope(|s| { 152 | s.spawn(|| { 153 | let key = ThreadKey::get().unwrap(); 154 | let mut guard1 = mutex.lock(key); 155 | let guard = guard1.as_deref_mut().unwrap(); 156 | assert_eq!(**guard, 42); 157 | panic!(); 158 | 159 | #[allow(unreachable_code)] 160 | drop(guard1); 161 | }) 162 | .join() 163 | .unwrap_err(); 164 | }); 165 | 166 | let error = mutex.lock(key); 167 | let error = error.as_deref().unwrap_err(); 168 | assert_eq!(***error.get_ref(), 42); 169 | } 170 | 171 | #[test] 172 | fn locking_poisoned_rwlock_returns_error_in_collection() { 173 | let key = ThreadKey::get().unwrap(); 174 | let mutex = LockCollection::new(Poisonable::new(RwLock::new(42))); 175 | 176 | std::thread::scope(|s| { 177 | s.spawn(|| { 178 | let key = ThreadKey::get().unwrap(); 179 | let mut guard1 = mutex.read(key); 180 | let guard = guard1.as_deref_mut().unwrap(); 181 | assert_eq!(**guard, 42); 182 | panic!(); 183 | 184 | #[allow(unreachable_code)] 185 | drop(guard1); 186 | }) 187 | .join() 188 | .unwrap_err(); 189 | }); 190 | 191 | let error = mutex.read(key); 192 | let error = error.as_deref().unwrap_err(); 193 | assert_eq!(***error.get_ref(), 42); 194 | } 195 | 196 | #[test] 197 | fn non_poisoned_get_mut_is_ok() { 198 | let mut mutex = Poisonable::new(Mutex::new(42)); 199 | let guard = mutex.get_mut(); 200 | assert!(guard.is_ok()); 201 | assert_eq!(*guard.unwrap(), 42); 202 | } 203 | 204 | #[test] 205 | fn non_poisoned_get_mut_is_err() { 206 | let mut mutex = Poisonable::new(Mutex::new(42)); 207 | 208 | let _ = std::panic::catch_unwind(|| { 209 | let key = ThreadKey::get().unwrap(); 210 | #[allow(unused_variables)] 211 | let guard = mutex.lock(key); 212 | panic!(); 213 | #[allow(unreachable_code)] 214 | drop(guard); 215 | }); 216 | 217 | let guard = mutex.get_mut(); 218 | assert!(guard.is_err()); 219 | assert_eq!(**guard.unwrap_err().get_ref(), 42); 220 | } 221 | 222 | #[test] 223 | fn unpoisoned_into_inner() { 224 | let mutex = Poisonable::new(Mutex::new("foo")); 225 | assert_eq!(mutex.into_inner().unwrap(), "foo"); 226 | } 227 | 228 | #[test] 229 | fn poisoned_into_inner() { 230 | let mutex = Poisonable::from(Mutex::new("foo")); 231 | 232 | std::panic::catch_unwind(|| { 233 | let key = ThreadKey::get().unwrap(); 234 | #[allow(unused_variables)] 235 | let guard = mutex.lock(key); 236 | panic!(); 237 | #[allow(unreachable_code)] 238 | drop(guard); 239 | }) 240 | .unwrap_err(); 241 | 242 | let error = mutex.into_inner().unwrap_err(); 243 | assert_eq!(error.into_inner(), "foo"); 244 | } 245 | 246 | #[test] 247 | fn unpoisoned_into_child() { 248 | let mutex = Poisonable::new(Mutex::new("foo")); 249 | assert_eq!(mutex.into_child().unwrap().into_inner(), "foo"); 250 | } 251 | 252 | #[test] 253 | fn poisoned_into_child() { 254 | let mutex = Poisonable::from(Mutex::new("foo")); 255 | 256 | std::panic::catch_unwind(|| { 257 | let key = ThreadKey::get().unwrap(); 258 | #[allow(unused_variables)] 259 | let guard = mutex.lock(key); 260 | panic!(); 261 | #[allow(unreachable_code)] 262 | drop(guard); 263 | }) 264 | .unwrap_err(); 265 | 266 | let error = mutex.into_child().unwrap_err(); 267 | assert_eq!(error.into_inner().into_inner(), "foo"); 268 | } 269 | 270 | #[test] 271 | fn scoped_lock_can_poison() { 272 | let key = ThreadKey::get().unwrap(); 273 | let mutex = Poisonable::new(Mutex::new(42)); 274 | 275 | let r = std::panic::catch_unwind(|| { 276 | mutex.scoped_lock(key, |num| { 277 | *num.unwrap() = 56; 278 | panic!(); 279 | }) 280 | }); 281 | assert!(r.is_err()); 282 | 283 | let key = ThreadKey::get().unwrap(); 284 | assert!(mutex.is_poisoned()); 285 | mutex.scoped_lock(key, |num| { 286 | let Err(error) = num else { panic!() }; 287 | mutex.clear_poison(); 288 | let guard = error.into_inner(); 289 | assert_eq!(*guard, 56); 290 | }); 291 | assert!(!mutex.is_poisoned()); 292 | } 293 | 294 | #[test] 295 | fn scoped_try_lock_can_fail() { 296 | let key = ThreadKey::get().unwrap(); 297 | let mutex = Poisonable::new(Mutex::new(42)); 298 | let guard = mutex.lock(key); 299 | 300 | std::thread::scope(|s| { 301 | s.spawn(|| { 302 | let key = ThreadKey::get().unwrap(); 303 | let r = mutex.scoped_try_lock(key, |_| {}); 304 | assert!(r.is_err()); 305 | }); 306 | }); 307 | 308 | drop(guard); 309 | } 310 | 311 | #[test] 312 | fn scoped_try_lock_can_succeed() { 313 | let rwlock = Poisonable::new(RwLock::new(42)); 314 | 315 | std::thread::scope(|s| { 316 | s.spawn(|| { 317 | let key = ThreadKey::get().unwrap(); 318 | let r = rwlock.scoped_try_lock(key, |guard| { 319 | assert_eq!(*guard.unwrap(), 42); 320 | }); 321 | assert!(r.is_ok()); 322 | }); 323 | }); 324 | } 325 | 326 | #[test] 327 | fn scoped_read_can_poison() { 328 | let key = ThreadKey::get().unwrap(); 329 | let mutex = Poisonable::new(RwLock::new(42)); 330 | 331 | let r = std::panic::catch_unwind(|| { 332 | mutex.scoped_read(key, |num| { 333 | assert_eq!(*num.unwrap(), 42); 334 | panic!(); 335 | }) 336 | }); 337 | assert!(r.is_err()); 338 | 339 | let key = ThreadKey::get().unwrap(); 340 | assert!(mutex.is_poisoned()); 341 | mutex.scoped_read(key, |num| { 342 | let Err(error) = num else { panic!() }; 343 | mutex.clear_poison(); 344 | let guard = error.into_inner(); 345 | assert_eq!(*guard, 42); 346 | }); 347 | assert!(!mutex.is_poisoned()); 348 | } 349 | 350 | #[test] 351 | fn scoped_try_read_can_fail() { 352 | let key = ThreadKey::get().unwrap(); 353 | let rwlock = Poisonable::new(RwLock::new(42)); 354 | let guard = rwlock.lock(key); 355 | 356 | std::thread::scope(|s| { 357 | s.spawn(|| { 358 | let key = ThreadKey::get().unwrap(); 359 | let r = rwlock.scoped_try_read(key, |_| {}); 360 | assert!(r.is_err()); 361 | }); 362 | }); 363 | 364 | drop(guard); 365 | } 366 | 367 | #[test] 368 | fn scoped_try_read_can_succeed() { 369 | let rwlock = Poisonable::new(RwLock::new(42)); 370 | 371 | std::thread::scope(|s| { 372 | s.spawn(|| { 373 | let key = ThreadKey::get().unwrap(); 374 | let r = rwlock.scoped_try_read(key, |guard| { 375 | assert_eq!(*guard.unwrap(), 42); 376 | }); 377 | assert!(r.is_ok()); 378 | }); 379 | }); 380 | } 381 | 382 | #[test] 383 | fn display_works() { 384 | let key = ThreadKey::get().unwrap(); 385 | let mutex = Poisonable::new(Mutex::new("Hello, world!")); 386 | 387 | let guard = mutex.lock(key).unwrap(); 388 | 389 | assert_eq!(guard.to_string(), "Hello, world!"); 390 | } 391 | 392 | #[test] 393 | fn ref_as_ref() { 394 | let key = ThreadKey::get().unwrap(); 395 | let collection = LockCollection::new(Poisonable::new(Mutex::new("foo"))); 396 | let guard = collection.lock(key); 397 | let Ok(ref guard) = guard.as_ref() else { 398 | panic!() 399 | }; 400 | assert_eq!(**guard.as_ref(), "foo"); 401 | } 402 | 403 | #[test] 404 | fn ref_as_mut() { 405 | let key = ThreadKey::get().unwrap(); 406 | let collection = LockCollection::new(Poisonable::new(Mutex::new("foo"))); 407 | let mut guard1 = collection.lock(key); 408 | let Ok(ref mut guard) = guard1.as_mut() else { 409 | panic!() 410 | }; 411 | let guard = guard.as_mut(); 412 | **guard = "bar"; 413 | 414 | let key = LockCollection::>>::unlock(guard1); 415 | let guard = collection.lock(key); 416 | let guard = guard.as_deref().unwrap(); 417 | assert_eq!(*guard.as_ref(), "bar"); 418 | } 419 | 420 | #[test] 421 | fn guard_as_ref() { 422 | let key = ThreadKey::get().unwrap(); 423 | let collection = Poisonable::new(Mutex::new("foo")); 424 | let guard = collection.lock(key); 425 | let Ok(ref guard) = guard.as_ref() else { 426 | panic!() 427 | }; 428 | assert_eq!(**guard.as_ref(), "foo"); 429 | } 430 | 431 | #[test] 432 | fn guard_as_mut() { 433 | let key = ThreadKey::get().unwrap(); 434 | let mutex = Poisonable::new(Mutex::new("foo")); 435 | let mut guard1 = mutex.lock(key); 436 | let Ok(ref mut guard) = guard1.as_mut() else { 437 | panic!() 438 | }; 439 | let guard = guard.as_mut(); 440 | **guard = "bar"; 441 | 442 | let key = Poisonable::>::unlock(guard1.unwrap()); 443 | let guard = mutex.lock(key); 444 | let guard = guard.as_deref().unwrap(); 445 | assert_eq!(*guard, "bar"); 446 | } 447 | 448 | #[test] 449 | fn deref_mut_in_collection() { 450 | let key = ThreadKey::get().unwrap(); 451 | let collection = LockCollection::new(Poisonable::new(Mutex::new(42))); 452 | let mut guard1 = collection.lock(key); 453 | let Ok(ref mut guard) = guard1.as_mut() else { 454 | panic!() 455 | }; 456 | // TODO make this more convenient 457 | assert_eq!(***guard, 42); 458 | ***guard = 24; 459 | 460 | let key = LockCollection::>>::unlock(guard1); 461 | _ = collection.lock(key); 462 | } 463 | 464 | #[test] 465 | fn get_ptrs() { 466 | let mutex = Mutex::new(5); 467 | let poisonable = Poisonable::new(mutex); 468 | let mut lock_ptrs = Vec::new(); 469 | poisonable.get_ptrs(&mut lock_ptrs); 470 | 471 | assert_eq!(lock_ptrs.len(), 1); 472 | assert!(std::ptr::addr_eq(lock_ptrs[0], &poisonable.inner)); 473 | } 474 | 475 | #[test] 476 | fn clear_poison_for_poisoned_mutex() { 477 | let mutex = Arc::new(Poisonable::new(Mutex::new(0))); 478 | let c_mutex = Arc::clone(&mutex); 479 | 480 | let _ = std::thread::spawn(move || { 481 | let key = ThreadKey::get().unwrap(); 482 | let _lock = c_mutex.lock(key).unwrap(); 483 | panic!(); // the mutex gets poisoned 484 | }) 485 | .join(); 486 | 487 | assert!(mutex.is_poisoned()); 488 | 489 | let key = ThreadKey::get().unwrap(); 490 | let _ = mutex.lock(key).unwrap_or_else(|mut e| { 491 | **e.get_mut() = 1; 492 | mutex.clear_poison(); 493 | e.into_inner() 494 | }); 495 | 496 | assert!(!mutex.is_poisoned()); 497 | } 498 | 499 | #[test] 500 | fn clear_poison_for_poisoned_rwlock() { 501 | let lock = Arc::new(Poisonable::new(RwLock::new(0))); 502 | let c_mutex = Arc::clone(&lock); 503 | 504 | let _ = std::thread::spawn(move || { 505 | let key = ThreadKey::get().unwrap(); 506 | let lock = c_mutex.read(key).unwrap(); 507 | assert_eq!(*lock, 42); 508 | panic!(); // the mutex gets poisoned 509 | }) 510 | .join(); 511 | 512 | assert!(lock.is_poisoned()); 513 | 514 | let key = ThreadKey::get().unwrap(); 515 | let _ = lock.lock(key).unwrap_or_else(|mut e| { 516 | **e.get_mut() = 1; 517 | lock.clear_poison(); 518 | e.into_inner() 519 | }); 520 | 521 | assert!(!lock.is_poisoned()); 522 | } 523 | 524 | #[test] 525 | fn error_as_ref() { 526 | let mutex = Poisonable::new(Mutex::new("foo")); 527 | 528 | let _ = std::panic::catch_unwind(|| { 529 | let key = ThreadKey::get().unwrap(); 530 | #[allow(unused_variables)] 531 | let guard = mutex.lock(key); 532 | panic!(); 533 | 534 | #[allow(unknown_lints)] 535 | #[allow(unreachable_code)] 536 | drop(guard); 537 | }); 538 | 539 | assert!(mutex.is_poisoned()); 540 | 541 | let key = ThreadKey::get().unwrap(); 542 | let error = mutex.lock(key).unwrap_err(); 543 | assert_eq!(&***error.as_ref(), "foo"); 544 | } 545 | 546 | #[test] 547 | fn error_as_mut() { 548 | let mutex = Poisonable::new(Mutex::new("foo")); 549 | 550 | let _ = std::panic::catch_unwind(|| { 551 | let key = ThreadKey::get().unwrap(); 552 | #[allow(unused_variables)] 553 | let guard = mutex.lock(key); 554 | panic!(); 555 | 556 | #[allow(unknown_lints)] 557 | #[allow(unreachable_code)] 558 | drop(guard); 559 | }); 560 | 561 | assert!(mutex.is_poisoned()); 562 | 563 | let key: ThreadKey = ThreadKey::get().unwrap(); 564 | let mut error = mutex.lock(key).unwrap_err(); 565 | let error1 = error.as_mut(); 566 | **error1 = "bar"; 567 | let key = Poisonable::>::unlock(error.into_inner()); 568 | 569 | mutex.clear_poison(); 570 | let guard = mutex.lock(key).unwrap(); 571 | assert_eq!(&**guard, "bar"); 572 | } 573 | 574 | #[test] 575 | fn try_error_from_lock_error() { 576 | let mutex = Poisonable::new(Mutex::new("foo")); 577 | 578 | let _ = std::panic::catch_unwind(|| { 579 | let key = ThreadKey::get().unwrap(); 580 | #[allow(unused_variables)] 581 | let guard = mutex.lock(key); 582 | panic!(); 583 | 584 | #[allow(unknown_lints)] 585 | #[allow(unreachable_code)] 586 | drop(guard); 587 | }); 588 | 589 | assert!(mutex.is_poisoned()); 590 | 591 | let key = ThreadKey::get().unwrap(); 592 | let error = mutex.lock(key).unwrap_err(); 593 | let error = TryLockPoisonableError::from(error); 594 | 595 | let TryLockPoisonableError::Poisoned(error) = error else { 596 | panic!() 597 | }; 598 | assert_eq!(&**error.into_inner(), "foo"); 599 | } 600 | 601 | #[test] 602 | fn new_poisonable_is_not_poisoned() { 603 | let mutex = Poisonable::new(Mutex::new(42)); 604 | assert!(!mutex.is_poisoned()); 605 | } 606 | } 607 | -------------------------------------------------------------------------------- /src/poisonable/poisonable.rs: -------------------------------------------------------------------------------- 1 | use std::panic::{RefUnwindSafe, UnwindSafe}; 2 | 3 | use crate::handle_unwind::handle_unwind; 4 | use crate::lockable::{ 5 | Lockable, LockableGetMut, LockableIntoInner, OwnedLockable, RawLock, Sharable, 6 | }; 7 | use crate::{Keyable, ThreadKey}; 8 | 9 | use super::{ 10 | PoisonError, PoisonFlag, PoisonGuard, PoisonRef, PoisonResult, Poisonable, 11 | TryLockPoisonableError, TryLockPoisonableResult, 12 | }; 13 | 14 | unsafe impl RawLock for Poisonable { 15 | #[mutants::skip] // this should never run 16 | #[cfg(not(tarpaulin_include))] 17 | fn poison(&self) { 18 | self.inner.poison() 19 | } 20 | 21 | unsafe fn raw_write(&self) { 22 | self.inner.raw_write() 23 | } 24 | 25 | unsafe fn raw_try_write(&self) -> bool { 26 | self.inner.raw_try_write() 27 | } 28 | 29 | unsafe fn raw_unlock_write(&self) { 30 | self.inner.raw_unlock_write() 31 | } 32 | 33 | unsafe fn raw_read(&self) { 34 | self.inner.raw_read() 35 | } 36 | 37 | unsafe fn raw_try_read(&self) -> bool { 38 | self.inner.raw_try_read() 39 | } 40 | 41 | unsafe fn raw_unlock_read(&self) { 42 | self.inner.raw_unlock_read() 43 | } 44 | } 45 | 46 | unsafe impl Lockable for Poisonable { 47 | type Guard<'g> 48 | = PoisonResult>> 49 | where 50 | Self: 'g; 51 | 52 | type DataMut<'a> 53 | = PoisonResult> 54 | where 55 | Self: 'a; 56 | 57 | fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { 58 | self.inner.get_ptrs(ptrs) 59 | } 60 | 61 | unsafe fn guard(&self) -> Self::Guard<'_> { 62 | let ref_guard = PoisonRef::new(&self.poisoned, self.inner.guard()); 63 | 64 | if self.is_poisoned() { 65 | Err(PoisonError::new(ref_guard)) 66 | } else { 67 | Ok(ref_guard) 68 | } 69 | } 70 | 71 | unsafe fn data_mut(&self) -> Self::DataMut<'_> { 72 | if self.is_poisoned() { 73 | Err(PoisonError::new(self.inner.data_mut())) 74 | } else { 75 | Ok(self.inner.data_mut()) 76 | } 77 | } 78 | } 79 | 80 | unsafe impl Sharable for Poisonable { 81 | type ReadGuard<'g> 82 | = PoisonResult>> 83 | where 84 | Self: 'g; 85 | 86 | type DataRef<'a> 87 | = PoisonResult> 88 | where 89 | Self: 'a; 90 | 91 | unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { 92 | let ref_guard = PoisonRef::new(&self.poisoned, self.inner.read_guard()); 93 | 94 | if self.is_poisoned() { 95 | Err(PoisonError::new(ref_guard)) 96 | } else { 97 | Ok(ref_guard) 98 | } 99 | } 100 | 101 | unsafe fn data_ref(&self) -> Self::DataRef<'_> { 102 | if self.is_poisoned() { 103 | Err(PoisonError::new(self.inner.data_ref())) 104 | } else { 105 | Ok(self.inner.data_ref()) 106 | } 107 | } 108 | } 109 | 110 | unsafe impl OwnedLockable for Poisonable {} 111 | 112 | // AsMut won't work here because we don't strictly return a &mut T 113 | // LockableGetMut is the next best thing 114 | impl LockableGetMut for Poisonable { 115 | type Inner<'a> 116 | = PoisonResult> 117 | where 118 | Self: 'a; 119 | 120 | fn get_mut(&mut self) -> Self::Inner<'_> { 121 | if self.is_poisoned() { 122 | Err(PoisonError::new(self.inner.get_mut())) 123 | } else { 124 | Ok(self.inner.get_mut()) 125 | } 126 | } 127 | } 128 | 129 | impl LockableIntoInner for Poisonable { 130 | type Inner = PoisonResult; 131 | 132 | fn into_inner(self) -> Self::Inner { 133 | if self.is_poisoned() { 134 | Err(PoisonError::new(self.inner.into_inner())) 135 | } else { 136 | Ok(self.inner.into_inner()) 137 | } 138 | } 139 | } 140 | 141 | impl From for Poisonable { 142 | fn from(value: L) -> Self { 143 | Self::new(value) 144 | } 145 | } 146 | 147 | impl Poisonable { 148 | /// Creates a new `Poisonable` 149 | /// 150 | /// # Examples 151 | /// 152 | /// ``` 153 | /// use happylock::{Mutex, Poisonable}; 154 | /// 155 | /// let mutex = Poisonable::new(Mutex::new(0)); 156 | /// ``` 157 | pub const fn new(value: L) -> Self { 158 | Self { 159 | inner: value, 160 | poisoned: PoisonFlag::new(), 161 | } 162 | } 163 | 164 | /// Determines whether the `Poisonable` is poisoned. 165 | /// 166 | /// If another thread is active, the `Poisonable` can still become poisoned at 167 | /// any time. You should not trust a `false` value for program correctness 168 | /// without additional synchronization. 169 | /// 170 | /// # Examples 171 | /// 172 | /// ``` 173 | /// use std::thread; 174 | /// 175 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 176 | /// 177 | /// let mutex = Poisonable::new(Mutex::new(0)); 178 | /// 179 | /// thread::scope(|s| { 180 | /// let r = s.spawn(|| { 181 | /// let key = ThreadKey::get().unwrap(); 182 | /// let _lock = mutex.lock(key).unwrap(); 183 | /// panic!(); // the mutex gets poisoned 184 | /// }).join(); 185 | /// }); 186 | /// 187 | /// assert_eq!(mutex.is_poisoned(), true); 188 | /// ``` 189 | pub fn is_poisoned(&self) -> bool { 190 | self.poisoned.is_poisoned() 191 | } 192 | 193 | /// Clear the poisoned state from a lock. 194 | /// 195 | /// If the lock is poisoned, it will remain poisoned until this function 196 | /// is called. This allows recovering from a poisoned state and marking 197 | /// that it has recovered. For example, if the value is overwritten by a 198 | /// known-good value, then the lock can be marked as un-poisoned. Or 199 | /// possibly, the value could by inspected to determine if it is in a 200 | /// consistent state, and if so the poison is removed. 201 | /// 202 | /// # Examples 203 | /// 204 | /// ``` 205 | /// use std::thread; 206 | /// 207 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 208 | /// 209 | /// let mutex = Poisonable::new(Mutex::new(0)); 210 | /// 211 | /// thread::scope(|s| { 212 | /// let r = s.spawn(|| { 213 | /// let key = ThreadKey::get().unwrap(); 214 | /// let _lock = mutex.lock(key).unwrap(); 215 | /// panic!(); // the mutex gets poisoned 216 | /// }).join(); 217 | /// }); 218 | /// 219 | /// assert_eq!(mutex.is_poisoned(), true); 220 | /// 221 | /// let key = ThreadKey::get().unwrap(); 222 | /// let x = mutex.lock(key).unwrap_or_else(|mut e| { 223 | /// **e.get_mut() = 1; 224 | /// mutex.clear_poison(); 225 | /// e.into_inner() 226 | /// }); 227 | /// 228 | /// assert_eq!(mutex.is_poisoned(), false); 229 | /// assert_eq!(*x, 1); 230 | /// ``` 231 | pub fn clear_poison(&self) { 232 | self.poisoned.clear_poison() 233 | } 234 | 235 | /// Consumes this `Poisonable`, returning the underlying lock. 236 | /// 237 | /// # Errors 238 | /// 239 | /// If another user of this lock panicked while holding the lock, then this 240 | /// call will return an error instead. 241 | /// 242 | /// # Examples 243 | /// 244 | /// ``` 245 | /// use happylock::{Mutex, Poisonable}; 246 | /// 247 | /// let mutex = Poisonable::new(Mutex::new(0)); 248 | /// assert_eq!(mutex.into_child().unwrap().into_inner(), 0); 249 | /// ``` 250 | pub fn into_child(self) -> PoisonResult { 251 | if self.is_poisoned() { 252 | Err(PoisonError::new(self.inner)) 253 | } else { 254 | Ok(self.inner) 255 | } 256 | } 257 | 258 | /// Returns a mutable reference to the underlying lock. 259 | /// 260 | /// # Errors 261 | /// 262 | /// If another user of this lock panicked while holding the lock, then 263 | /// this call will return an error instead. 264 | /// 265 | /// # Examples 266 | /// 267 | /// ``` 268 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 269 | /// 270 | /// let key = ThreadKey::get().unwrap(); 271 | /// let mut mutex = Poisonable::new(Mutex::new(0)); 272 | /// *mutex.child_mut().unwrap().as_mut() = 10; 273 | /// assert_eq!(*mutex.lock(key).unwrap(), 10); 274 | /// ``` 275 | pub fn child_mut(&mut self) -> PoisonResult<&mut L> { 276 | if self.is_poisoned() { 277 | Err(PoisonError::new(&mut self.inner)) 278 | } else { 279 | Ok(&mut self.inner) 280 | } 281 | } 282 | 283 | // NOTE: `child_ref` isn't implemented because it would make this not `RefUnwindSafe` 284 | } 285 | 286 | impl Poisonable { 287 | /// Creates a guard for the poisonable, without locking it 288 | unsafe fn guard(&self, key: ThreadKey) -> PoisonResult>> { 289 | let guard = PoisonGuard { 290 | guard: PoisonRef::new(&self.poisoned, self.inner.guard()), 291 | key, 292 | }; 293 | 294 | if self.is_poisoned() { 295 | return Err(PoisonError::new(guard)); 296 | } 297 | 298 | Ok(guard) 299 | } 300 | } 301 | 302 | impl Poisonable { 303 | pub fn scoped_lock<'a, R>( 304 | &'a self, 305 | key: impl Keyable, 306 | f: impl Fn(::DataMut<'a>) -> R, 307 | ) -> R { 308 | unsafe { 309 | // safety: we have the thread key 310 | self.raw_write(); 311 | 312 | // safety: the data was just locked 313 | let r = handle_unwind( 314 | || f(self.data_mut()), 315 | || { 316 | self.poisoned.poison(); 317 | self.raw_unlock_write(); 318 | }, 319 | ); 320 | 321 | // safety: the collection is still locked 322 | self.raw_unlock_write(); 323 | 324 | drop(key); // ensure the key stays alive long enough 325 | 326 | r 327 | } 328 | } 329 | 330 | pub fn scoped_try_lock<'a, Key: Keyable, R>( 331 | &'a self, 332 | key: Key, 333 | f: impl Fn(::DataMut<'a>) -> R, 334 | ) -> Result { 335 | unsafe { 336 | // safety: we have the thread key 337 | if !self.raw_try_write() { 338 | return Err(key); 339 | } 340 | 341 | // safety: we just locked the collection 342 | let r = handle_unwind( 343 | || f(self.data_mut()), 344 | || { 345 | self.poisoned.poison(); 346 | self.raw_unlock_write(); 347 | }, 348 | ); 349 | 350 | // safety: the collection is still locked 351 | self.raw_unlock_write(); 352 | 353 | drop(key); // ensures the key stays valid long enough 354 | 355 | Ok(r) 356 | } 357 | } 358 | 359 | /// Acquires the lock, blocking the current thread until it is ok to do so. 360 | /// 361 | /// This function will block the current thread until it is available to 362 | /// acquire the lock. Upon returning, the thread is the only thread with 363 | /// the lock held. An RAII guard is returned to allow scoped unlock of the 364 | /// lock. When the guard goes out of scope, the mutex will be unlocked. 365 | /// 366 | /// # Errors 367 | /// 368 | /// If another use of this lock panicked while holding the mutex, then 369 | /// this call will return an error once the mutex is acquired. 370 | /// 371 | /// # Examples 372 | /// 373 | /// ``` 374 | /// use std::thread; 375 | /// 376 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 377 | /// 378 | /// let mutex = Poisonable::new(Mutex::new(0)); 379 | /// 380 | /// thread::scope(|s| { 381 | /// let r = s.spawn(|| { 382 | /// let key = ThreadKey::get().unwrap(); 383 | /// *mutex.lock(key).unwrap() = 10; 384 | /// }).join(); 385 | /// }); 386 | /// 387 | /// let key = ThreadKey::get().unwrap(); 388 | /// assert_eq!(*mutex.lock(key).unwrap(), 10); 389 | /// ``` 390 | pub fn lock(&self, key: ThreadKey) -> PoisonResult>> { 391 | unsafe { 392 | self.inner.raw_write(); 393 | self.guard(key) 394 | } 395 | } 396 | 397 | /// Attempts to acquire this lock. 398 | /// 399 | /// If the lock could not be acquired at this time, then [`Err`] is 400 | /// returned. Otherwise, an RAII guard is returned. The lock will be 401 | /// unlocked when the guard is dropped. 402 | /// 403 | /// This function does not block. 404 | /// 405 | /// # Errors 406 | /// 407 | /// If another user of this lock panicked while holding the lock, then this 408 | /// call will return the [`Poisoned`] error if the lock would otherwise be 409 | /// acquired. 410 | /// 411 | /// If the lock could not be acquired because it is already locked, then 412 | /// this call will return the [`WouldBlock`] error. 413 | /// 414 | /// # Examples 415 | /// 416 | /// ``` 417 | /// use std::thread; 418 | /// 419 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 420 | /// 421 | /// let mutex = Poisonable::new(Mutex::new(0)); 422 | /// 423 | /// thread::scope(|s| { 424 | /// s.spawn(|| { 425 | /// let key = ThreadKey::get().unwrap(); 426 | /// let mut lock = mutex.try_lock(key); 427 | /// if let Ok(mut mutex) = lock { 428 | /// *mutex = 10; 429 | /// } else { 430 | /// println!("try_lock failed"); 431 | /// } 432 | /// }); 433 | /// }); 434 | /// 435 | /// let key = ThreadKey::get().unwrap(); 436 | /// assert_eq!(*mutex.lock(key).unwrap(), 10); 437 | /// ``` 438 | /// 439 | /// [`Poisoned`]: `TryLockPoisonableError::Poisoned` 440 | /// [`WouldBlock`]: `TryLockPoisonableError::WouldBlock` 441 | pub fn try_lock(&self, key: ThreadKey) -> TryLockPoisonableResult<'_, L::Guard<'_>> { 442 | unsafe { 443 | if self.inner.raw_try_write() { 444 | Ok(self.guard(key)?) 445 | } else { 446 | Err(TryLockPoisonableError::WouldBlock(key)) 447 | } 448 | } 449 | } 450 | 451 | /// Consumes the [`PoisonGuard`], and consequently unlocks its `Poisonable`. 452 | /// 453 | /// This function is equivalent to calling [`drop`] on the guard, except that 454 | /// it returns the key that was used to create it. Alternatively, the guard 455 | /// will be automatically dropped when it goes out of scope. 456 | /// 457 | /// # Examples 458 | /// 459 | /// ``` 460 | /// use happylock::{ThreadKey, Mutex, Poisonable}; 461 | /// 462 | /// let key = ThreadKey::get().unwrap(); 463 | /// let mutex = Poisonable::new(Mutex::new(0)); 464 | /// 465 | /// let mut guard = mutex.lock(key).unwrap(); 466 | /// *guard += 20; 467 | /// 468 | /// let key = Poisonable::>::unlock(guard); 469 | /// ``` 470 | pub fn unlock<'flag>(guard: PoisonGuard<'flag, L::Guard<'flag>>) -> ThreadKey { 471 | drop(guard.guard); 472 | guard.key 473 | } 474 | } 475 | 476 | impl Poisonable { 477 | unsafe fn read_guard(&self, key: ThreadKey) -> PoisonResult>> { 478 | let guard = PoisonGuard { 479 | guard: PoisonRef::new(&self.poisoned, self.inner.read_guard()), 480 | key, 481 | }; 482 | 483 | if self.is_poisoned() { 484 | return Err(PoisonError::new(guard)); 485 | } 486 | 487 | Ok(guard) 488 | } 489 | 490 | pub fn scoped_read<'a, R>( 491 | &'a self, 492 | key: impl Keyable, 493 | f: impl Fn(::DataRef<'a>) -> R, 494 | ) -> R { 495 | unsafe { 496 | // safety: we have the thread key 497 | self.raw_read(); 498 | 499 | // safety: the data was just locked 500 | let r = handle_unwind( 501 | || f(self.data_ref()), 502 | || { 503 | self.poisoned.poison(); 504 | self.raw_unlock_read(); 505 | }, 506 | ); 507 | 508 | // safety: the collection is still locked 509 | self.raw_unlock_read(); 510 | 511 | drop(key); // ensure the key stays alive long enough 512 | 513 | r 514 | } 515 | } 516 | 517 | pub fn scoped_try_read<'a, Key: Keyable, R>( 518 | &'a self, 519 | key: Key, 520 | f: impl Fn(::DataRef<'a>) -> R, 521 | ) -> Result { 522 | unsafe { 523 | // safety: we have the thread key 524 | if !self.raw_try_read() { 525 | return Err(key); 526 | } 527 | 528 | // safety: we just locked the collection 529 | let r = handle_unwind( 530 | || f(self.data_ref()), 531 | || { 532 | self.poisoned.poison(); 533 | self.raw_unlock_read(); 534 | }, 535 | ); 536 | 537 | // safety: the collection is still locked 538 | self.raw_unlock_read(); 539 | 540 | drop(key); // ensures the key stays valid long enough 541 | 542 | Ok(r) 543 | } 544 | } 545 | 546 | /// Locks with shared read access, blocking the current thread until it can 547 | /// be acquired. 548 | /// 549 | /// This function will block the current thread until there are no writers 550 | /// which hold the lock. This method does not provide any guarantee with 551 | /// respect to the ordering of contentious readers or writers will acquire 552 | /// the lock. 553 | /// 554 | /// # Errors 555 | /// 556 | /// This function will return an error if the `Poisonable` is poisoned. A 557 | /// `Poisonable` is poisoned whenever a thread panics while holding a lock. 558 | /// The failure will occur immediately after the lock has been acquired. The 559 | /// acquired lock guard will be contained in the returned error. 560 | /// 561 | /// # Examples 562 | /// 563 | /// ``` 564 | /// use std::thread; 565 | /// 566 | /// use happylock::{RwLock, Poisonable, ThreadKey}; 567 | /// 568 | /// let key = ThreadKey::get().unwrap(); 569 | /// let lock = Poisonable::new(RwLock::new(0)); 570 | /// 571 | /// let n = lock.read(key).unwrap(); 572 | /// assert_eq!(*n, 0); 573 | /// 574 | /// thread::scope(|s| { 575 | /// s.spawn(|| { 576 | /// let key = ThreadKey::get().unwrap(); 577 | /// let r = lock.read(key); 578 | /// assert!(r.is_ok()); 579 | /// }); 580 | /// }); 581 | /// ``` 582 | pub fn read(&self, key: ThreadKey) -> PoisonResult>> { 583 | unsafe { 584 | self.inner.raw_read(); 585 | self.read_guard(key) 586 | } 587 | } 588 | 589 | /// Attempts to acquire the lock with shared read access, without blocking the 590 | /// thread. 591 | /// 592 | /// If the access could not be granted at this time, then `Err` is returned. 593 | /// Otherwise, an RAII guard is returned which will release the shared access 594 | /// when it is dropped. 595 | /// 596 | /// This function does not provide any guarantees with respect to the ordering 597 | /// of whether contentious readers or writers will acquire the lock first. 598 | /// 599 | /// # Errors 600 | /// 601 | /// This function will return the [`Poisoned`] error if the lock is 602 | /// poisoned. A [`Poisonable`] is poisoned whenever a thread panics while 603 | /// holding a lock. `Poisoned` will only be returned if the lock would have 604 | /// otherwise been acquired. 605 | /// 606 | /// This function will return the [`WouldBlock`] error if the lock could 607 | /// not be acquired because it was already locked exclusively. 608 | /// 609 | /// # Examples 610 | /// 611 | /// ``` 612 | /// use happylock::{Poisonable, RwLock, ThreadKey}; 613 | /// 614 | /// let key = ThreadKey::get().unwrap(); 615 | /// let lock = Poisonable::new(RwLock::new(1)); 616 | /// 617 | /// match lock.try_read(key) { 618 | /// Ok(n) => assert_eq!(*n, 1), 619 | /// Err(_) => unreachable!(), 620 | /// }; 621 | /// ``` 622 | /// 623 | /// [`Poisoned`]: `TryLockPoisonableError::Poisoned` 624 | /// [`WouldBlock`]: `TryLockPoisonableError::WouldBlock` 625 | // TODO don't poison when holding shared lock 626 | pub fn try_read(&self, key: ThreadKey) -> TryLockPoisonableResult<'_, L::ReadGuard<'_>> { 627 | unsafe { 628 | if self.inner.raw_try_read() { 629 | Ok(self.read_guard(key)?) 630 | } else { 631 | Err(TryLockPoisonableError::WouldBlock(key)) 632 | } 633 | } 634 | } 635 | 636 | /// Consumes the [`PoisonGuard`], and consequently unlocks its `Poisonable`. 637 | /// 638 | /// This function is equivalent to calling [`drop`] on the guard, except that 639 | /// it returns the key that was used to create it. Alternatively, the guard 640 | /// will be automatically dropped when it goes out of scope. 641 | /// 642 | /// # Examples 643 | /// 644 | /// ``` 645 | /// use happylock::{ThreadKey, RwLock, Poisonable}; 646 | /// 647 | /// let key = ThreadKey::get().unwrap(); 648 | /// let lock = Poisonable::new(RwLock::new(20)); 649 | /// 650 | /// let mut guard = lock.read(key).unwrap(); 651 | /// assert_eq!(*guard, 20); 652 | /// 653 | /// let key = Poisonable::>::unlock_read(guard); 654 | /// ``` 655 | pub fn unlock_read<'flag>(guard: PoisonGuard<'flag, L::ReadGuard<'flag>>) -> ThreadKey { 656 | drop(guard.guard); 657 | guard.key 658 | } 659 | } 660 | 661 | impl Poisonable { 662 | /// Consumes this `Poisonable`, returning the underlying data. 663 | /// 664 | /// # Errors 665 | /// 666 | /// If another user of this lock panicked while holding the lock, then this 667 | /// call will return an error instead. A `Poisonable` is poisoned whenever a 668 | /// thread panics while holding a lock. 669 | /// 670 | /// # Examples 671 | /// 672 | /// ``` 673 | /// use happylock::{Mutex, Poisonable}; 674 | /// 675 | /// let mutex = Poisonable::new(Mutex::new(0)); 676 | /// assert_eq!(mutex.into_inner().unwrap(), 0); 677 | /// ``` 678 | pub fn into_inner(self) -> PoisonResult { 679 | LockableIntoInner::into_inner(self) 680 | } 681 | } 682 | 683 | impl Poisonable { 684 | /// Returns a mutable reference to the underlying data. 685 | /// 686 | /// Since this call borrows the `Poisonable` mutably, no actual locking 687 | /// needs to take place - the mutable borrow statically guarantees no locks 688 | /// exist. 689 | /// 690 | /// # Errors 691 | /// 692 | /// If another user of this lock panicked while holding the lock, then 693 | /// this call will return an error instead. A `Poisonable` is poisoned 694 | /// whenever a thread panics while holding a lock. 695 | /// 696 | /// # Examples 697 | /// 698 | /// ``` 699 | /// use happylock::{Mutex, Poisonable, ThreadKey}; 700 | /// 701 | /// let key = ThreadKey::get().unwrap(); 702 | /// let mut mutex = Poisonable::new(Mutex::new(0)); 703 | /// *mutex.get_mut().unwrap() = 10; 704 | /// assert_eq!(*mutex.lock(key).unwrap(), 10); 705 | /// ``` 706 | pub fn get_mut(&mut self) -> PoisonResult> { 707 | LockableGetMut::get_mut(self) 708 | } 709 | } 710 | 711 | impl RefUnwindSafe for Poisonable {} 712 | impl UnwindSafe for Poisonable {} 713 | -------------------------------------------------------------------------------- /happylock.md: -------------------------------------------------------------------------------- 1 | --- 2 | marp: true 3 | theme: gaia 4 | class: invert 5 | author: Mica White 6 | --- 7 | 8 | 9 | 10 | # HappyLock 11 | 12 | deadlock-free mutexes at compile-time 13 | 14 | --- 15 | 16 | 17 | # Background 18 | 19 | --- 20 | 21 | ## Goals: Background 22 | 23 | - Quick refresh on the borrow checker 24 | - What is a Mutex? 25 | - How Rust does Mutexes 26 | - What is a deadlock? 27 | - How can deadlocks be prevented? 28 | 29 | --- 30 | 31 | ## Quick Refresh on Borrow Checker Rules 32 | 33 | 1. You may have multiple immutable references to a value at a time 34 | 2. If there is a mutable reference to a value, then it is the only reference 35 | 3. Values cannot be moved while they are being referenced 36 | 37 | ```rust 38 | let s = String::new("Hello, world!"); 39 | let r1 = &s; 40 | let r2 = &s; // this is allowed because of #1 41 | let mr = &mut s; // illegal: rule #2 42 | drop(s); // also illegal: rule #3 43 | println!("{r1} {r2}"); 44 | ``` 45 | 46 | --- 47 | 48 | ## What is a Mutex? 49 | 50 | It gives mutually-exclusive access for one thread, to prevent races. 51 | 52 | ```c 53 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INIT; 54 | static int number = 1; 55 | 56 | void thread_1() { 57 | pthread_mutex_lock(&mutex); 58 | number = 6; 59 | pthread_mutex_unlock(&mutex); 60 | } 61 | void thread_2() { 62 | pthread_mutex_lock(&mutex); 63 | printf("%d", number); 64 | pthread_mutex_unlock(&mutex); 65 | } 66 | ``` 67 | 68 | This prevents the number from being modified while it is being printed, which would cause undefined behavior. 69 | 70 | --- 71 | 72 | ## Rust Mutexes 73 | 74 | In Rust, the mutex is safer than in C. The mutex protects the data itself rather than sections of code. 75 | 76 | ```rust 77 | static NUMBER: Mutex = Mutex::new(1); 78 | 79 | fn thread_1() { 80 | // MutexGuard grants access to the data inside of the mutex 81 | // We cannot access this data without locking first 82 | let mut number: MutexGuard<'_, i32> = NUMBER.lock().unwrap(); 83 | // MutexGuard is a smart pointer that we can modify directly 84 | *number += 5; 85 | 86 | // when the MutexGuard goes out of scope, it unlocks the mutex for you 87 | } 88 | ``` 89 | 90 | --- 91 | 92 | ## What is a deadlock? 93 | 94 | Locking a mutex in a way that makes it impossible to be unlocked. 95 | 96 | A simple way to cause deadlock is to lock twice on the same thread. 97 | 98 | ```rust 99 | let number = Mutex::new(1); 100 | let guard1 = number.lock().unwrap(); 101 | // now everybody has to wait until guard1 is dropped 102 | 103 | let guard2 = number.lock().unwrap(); // but wait, guard1 still exists 104 | // and we can't drop guard1, because we have to wait for this to finish 105 | // THIS IS A DEADLOCK! (in C, this causes undefined behavior) 106 | 107 | // we'll never get to do this 108 | println!("{guard1} {guard2}"); 109 | ``` 110 | 111 | --- 112 | 113 | ## The Dining Philosopher's Problem 114 | 115 | This is another example of deadlock, which is in the category of "deadly embrace" 116 | 117 | 118 | 119 | --- 120 | 121 | ## Four Conditions for Deadlock 122 | 123 | 1. Mutual Exclusion 124 | 2. Non-preemptive Allocation 125 | 3. Cyclic Wait 126 | 4. Partial Allocation 127 | 128 | --- 129 | 130 | ## Preventing Mutual Exclusion 131 | 132 | Mutual exclusion is the entire point of a mutex. 133 | 134 | Do you want a `ReadOnly` type? 135 | 136 | Just use `Arc` or `&T`! 137 | 138 | --- 139 | 140 | ## Prevent Non-Preemptive Allocation 141 | 142 | ```rust 143 | let mutex = Mutex::new(10); 144 | let mut number = mutex.lock(); 145 | 146 | let th = thread::spawn(|| { 147 | let number = mutex.lock(); // preempts the other lock on number 148 | }); 149 | th.join(); 150 | 151 | prinln!("Thread 1: {}", *number); // oops, we don't have access to number anymore! 152 | ``` 153 | 154 | --- 155 | 156 | ## Preventing Cyclic Wait 157 | 158 | The language needs to enforce that all locks are acquired in the same order. 159 | 160 | Rust doesn't have a built-in mechanism which can provide this. 161 | 162 | Even if it could keep the locks in a certain order, using a `OrderedLock` type, we wouldn't be able to force you to use the mechanism. 163 | 164 | And you could create two `OrderedLock` types and get deadlock using that. 165 | 166 | --- 167 | 168 | ## Preventing Partial Allocation 169 | 170 | The language needs to enforce *total allocation*. 171 | 172 | Acquiring a new lock requires releasing all currently-held locks. 173 | 174 | **This will be our approach for now.** 175 | 176 | --- 177 | 178 | ## Summary: Background 179 | 180 | - Mutexes allow mutually exclusive access to memory 181 | - Rust mutexes use the borrow checker to protect data, rather than sections of code 182 | - The borrow checker ensures the mutex is used somewhat correctly 183 | - Deadlocks prevent threads from making progress because a lock prevents unlocking 184 | - We can prevent deadlocks using total allocation 185 | 186 | --- 187 | 188 | # Preventing Deadlock 189 | 190 | --- 191 | 192 | ## Goals: Preventing Deadlock 193 | 194 | - Show how the borrow checker can enforce total allocation 195 | - Lock multiple mutexes at the same time 196 | - Make sure that we lock multiple mutexes, we don't lock the same one twice 197 | 198 | --- 199 | 200 | ## We have technology! (the borrow checker) 201 | 202 | ```rust 203 | use happylock::{ThreadKey, Mutex}; 204 | 205 | fn main() { 206 | // each thread can only have one thread key (that's why we unwrap) 207 | // ThreadKey is not Send, Copy, or Clone 208 | let key = ThreadKey::get().unwrap(); 209 | 210 | let mutex = Mutex::new(10); 211 | 212 | // locking a mutex requires the ThreadKey 213 | let mut guard = mutex.lock(key); 214 | // this means that a thread cannot lock more than one thing at a time 215 | 216 | println!("{}", *guard); 217 | 218 | // you can get the ThreadKey back by unlocking 219 | let key = Mutex::unlock(guard); 220 | } 221 | ``` 222 | 223 | --- 224 | 225 | ## Wait, I need two mutexes 226 | 227 | ```rust 228 | use happylock::{ThreadKey, Mutex, LockCollection}; 229 | 230 | fn main() { 231 | let key = ThreadKey::get().unwrap(); 232 | let mutex1 = Mutex::new(5); 233 | let mutex2 = Mutex::new(String::new()); 234 | 235 | let collection = LockCollection::new((mutex1, mutex2)); 236 | let guard = collection.lock(key); 237 | 238 | *guard.1 = format!("{}{}", *guard.1, guard.0); 239 | *guard.0 += 1; 240 | } 241 | ``` 242 | 243 | --- 244 | 245 | ## The Lockable API 246 | 247 | ```rust 248 | unsafe trait Lockable { 249 | type Guard; 250 | 251 | unsafe fn lock(&self) -> Self::Guard; 252 | 253 | unsafe fn try_lock(&self) -> Option; 254 | } 255 | ``` 256 | 257 | --- 258 | 259 | ## That's cool! Lemme try something 260 | 261 | ```rust 262 | use happylock::{ThreadKey, Mutex, LockCollection}; 263 | 264 | fn main() { 265 | let key = ThreadKey::get().unwrap(); 266 | let mutex1 = Mutex::new(5); 267 | 268 | // oh no. this will deadlock us 269 | let collection = LockCollection::new((&mutex1, &mutex1)); 270 | let guard = collection.lock(key); 271 | 272 | // the good news is: this doesn't compile 273 | } 274 | ``` 275 | 276 | --- 277 | 278 | ## LockCollection's stub 279 | 280 | ```rust 281 | impl LockCollection { 282 | pub fn new(data: L) -> Self { /***/ } 283 | } 284 | 285 | impl LockCollection<&L> { 286 | pub fn new_ref(data: &L) -> Self { /***/ } 287 | } 288 | 289 | impl LockCollection { 290 | // checks for duplicates 291 | pub fn try_new(data: L) -> Option { /***/ } 292 | 293 | pub unsafe fn new_unchecked(data: L) -> Self { /***/ } 294 | } 295 | ``` 296 | 297 | --- 298 | 299 | ## Changes to Lockable 300 | 301 | ```rust 302 | unsafe trait Lockable { 303 | // ... 304 | 305 | fn get_ptrs(&self) -> Vec; 306 | } 307 | 308 | 309 | 310 | // not implemented for &L 311 | // ergo: the values within are guaranteed to be unique 312 | unsafe trait OwnedLockable: Lockable {} 313 | 314 | 315 | ``` 316 | 317 | --- 318 | 319 | ## Summary: Preventing Deadlock 320 | 321 | - Using an owned `ThreadKey` type, we can ensure total allocation 322 | - Using a `LockCollection`, we can lock multiple mutexes at the same time 323 | - The marker trait, `OwnedLockable`, can be guaranteed to not contain duplicates at compile-time 324 | - We can check for duplicates at runtime by checking the memory addresses of the locks 325 | 326 | --- 327 | 328 | 329 | # Optimizations to HappyLock 330 | 331 | --- 332 | 333 | ## Goals: Optimizations 334 | 335 | - Explain how exactly we check for duplicates 336 | - Naively implement `LockCollection` 337 | - Understand how live-locking hurts total allocation 338 | - Rewrite our collections to prevent cyclic wait, which will improve performance 339 | - Provide a way to create a user-defined locking order 340 | 341 | --- 342 | 343 | ## `contains_duplicates` (1st attempt) 344 | 345 | ```rust 346 | fn contains_duplicates(data: L) -> bool { 347 | let pointers = data.get_ptrs(); 348 | for (i, ptr1) in pointers.iter().enumerate() { 349 | for ptr2 in pointers.iter().take(i) { 350 | if ptr1 == ptr2 { 351 | return true; 352 | } 353 | } 354 | } 355 | 356 | false 357 | } 358 | ``` 359 | 360 | Time Complexity: O(n²) 361 | 362 | --- 363 | 364 | ## 2nd attempt: sorting the pointers 365 | 366 | ```rust 367 | fn contains_duplicates(data: L) -> bool { 368 | let mut pointers = data.get_ptrs(); 369 | pointers.sort_unstable(); 370 | pointers.windows(2).any(|w| w[0] == w[1]) 371 | } 372 | ``` 373 | 374 | Time Complexity: O(nlogn) 375 | 376 | --- 377 | 378 | ## RetryingLockCollection 379 | 380 | ```rust 381 | struct RetryingLockCollection { 382 | data: L, 383 | } 384 | ``` 385 | 386 | This will try to lock everything in the collection, and release everything if it fails. 387 | 388 | We keep trying until we've locked everything in a row. 389 | 390 | --- 391 | 392 | ## Problem: Live-locking 393 | 394 | Although this library is able to successfully prevent deadlocks, livelocks may still be an issue. 395 | 396 | 1. Thread 1 locks mutex 1 397 | 2. Thread 2 locks mutex 2 398 | 3. Thread 1 tries to lock mutex 2 and fails 399 | 4. Thread 2 tries to lock mutex 1 and fails 400 | 5. Thread 1 releases mutex 1 401 | 6. Thread 2 releases mutex 2 402 | 7. Repeat 403 | 404 | This pattern will probably end eventually, but we should really avoid it, for performance reasons. 405 | 406 | --- 407 | 408 | ## Solution: Switch to preventing cyclic wait 409 | 410 | - We're already sorting the pointers by memory address. 411 | - So let's keep that order! 412 | 413 | --- 414 | 415 | # New traits 416 | 417 | ```rust 418 | unsafe trait RawLock { 419 | unsafe fn lock(&self); 420 | unsafe fn try_lock(&self) -> bool; 421 | unsafe fn unlock(&self); 422 | } 423 | 424 | unsafe trait Lockable { // this is a bad name (LockGroup?) 425 | type Guard<'g> where Self: 'g; 426 | fn get_locks<'a>(&'a self, &mut Vec<&'a dyn RawLock>); 427 | unsafe fn guard<'g>(&'g self) -> Self::Guard<'g>; 428 | } 429 | ``` 430 | 431 | --- 432 | 433 | ## Solving self-referential data structures with heap allocation 434 | 435 | ```rust 436 | struct BoxedLockCollection { 437 | data: *const UnsafeCell, 438 | locks: Vec<&'static dyn RawLock> 439 | } 440 | ``` 441 | 442 | This is the default lock collection in HappyLock 443 | 444 | --- 445 | 446 | ## Providing our own lock ordering 447 | 448 | - What if we don't want to do any sorting? 449 | - We could make a collection that allows us to define our own order 450 | - This requires that no other ordering is used for the locks in that collection 451 | - `OwnedLockable` can be used for this 452 | 453 | --- 454 | 455 | ## Solving self-referential data structures with owned lockables 456 | 457 | ```rust 458 | struct OwnedLockCollection { 459 | data: L, 460 | } 461 | ``` 462 | 463 | --- 464 | 465 | ## Summary: Optimizations 466 | 467 | - Livelocking causes perpetual locking and unlocking and retries 468 | - Locks are sorted by memory address before looking for duplicates 469 | - `BoxedLockCollection` sorts in the order of the memory address 470 | - `OwnedLockCollection` allows us to define our own lock order 471 | - `RetryingLockCollection` has the original retry behavior 472 | 473 | --- 474 | 475 | 476 | # Quality of Life Enhancements 477 | 478 | --- 479 | 480 | ## Goals: Quality of Life 481 | 482 | - Try to lock a mutex using a `&mut ThreadKey` 483 | - Take inspiration from scoped threads to ensure unlocking 484 | - Use marker traits to allow read-only locks 485 | - Use traits to implement `get_mut` and `into_inner` 486 | 487 | --- 488 | 489 | ## Keyable 490 | 491 | ```rust 492 | unsafe trait Keyable: Sealed {} 493 | unsafe impl Keyable for ThreadKey {} 494 | unsafe impl Keyable for &mut ThreadKey {} 495 | ``` 496 | 497 | This is helpful because you can get the thread key back immediately. 498 | 499 | ```rust 500 | impl Mutex { 501 | pub fn lock<'a, 'key, Key: Keyable + 'key>( 502 | &'a self, 503 | key: Key 504 | ) -> MutexGuard<'a, 'key, T, R, Key>; 505 | } 506 | ``` 507 | 508 | --- 509 | 510 | ## Keyable 511 | 512 | So conveniently, this compiles. 513 | 514 | ```rust 515 | let mut key = ThreadKey::get().unwrap(); 516 | let guard = MUTEX1.lock(&mut key); 517 | 518 | // the first guard can no longer be used here 519 | let guard = MUTEX1.lock(&mut key); 520 | ``` 521 | 522 | --- 523 | 524 | ## Keyable 525 | 526 | The problem is that this also compiles 527 | 528 | ```rust 529 | let guard = MUTEX1.lock(&mut key); 530 | std::mem::forget(guard); 531 | 532 | // wait, the mutex is still locked! 533 | let guard = MUTEX1.lock(&mut key); 534 | // deadlocked now 535 | ``` 536 | 537 | --- 538 | 539 | ## Scoped Threads 540 | 541 | Let's take inspiration from scoped threads: 542 | 543 | ```rust 544 | fn scope<'env, F, T>(f: F) -> T 545 | where 546 | F: for<'scope> FnOnce(&'scope Scope<'scope, env>) -> T; 547 | ``` 548 | 549 | The `Drop` implementation of the `Scope` type will join all of the spawned threads. And because we only have a reference to the `Scope`, we'll never be able to `mem::forget` it. 550 | 551 | --- 552 | 553 | ## Scoped Threads 554 | 555 | ```rust 556 | let mut a = vec![1, 2, 3]; 557 | let mut x = 0; 558 | 559 | scope(|scope| { 560 | scope.spawn(|| { 561 | println!("we can borrow `a` here"); 562 | dbg!(a) 563 | }); 564 | scope.spawn(|| { 565 | println!("we can even borrow mutably"); 566 | println!("no other threads will use it"); 567 | x += a[0] + a[2]; 568 | }); 569 | println!("hello from the main thread"); 570 | }); 571 | ``` 572 | 573 | --- 574 | 575 | ## Scoped Locks 576 | 577 | Let's try the same thing for locks 578 | 579 | ```rust 580 | let mut key = ThreadKey::get().unwrap(); 581 | let mutex_plus_one = MUTEX.scoped_lock(&mut key, |guard: &mut i32| *guard + 1); 582 | ``` 583 | 584 | If you use scoped locks, then you can guarantee that locks will always be unlocked (assuming you never immediately abort the thread). 585 | 586 | --- 587 | 588 | ## Allowing reads on `LockCollection` 589 | 590 | ```rust 591 | // update RawLock 592 | unsafe trait RawLock { 593 | // * snip * 594 | unsafe fn read(&self); 595 | unsafe fn try_read(&self); 596 | unsafe fn unlock_read(&self); 597 | } 598 | 599 | // This trait is used to indicate that reading is actually useful 600 | unsafe trait Sharable: Lockable { 601 | type ReadGuard<'g> where Self: 'g; 602 | 603 | unsafe fn read_guard<'g>(&'g self) -> Self::ReadGuard<'g>; 604 | } 605 | ``` 606 | 607 | --- 608 | 609 | ## Not every lock can be read tho 610 | 611 | ```rust 612 | impl LockCollection { 613 | pub fn read<..>(&'g self, key: Key) -> LockGuard<..> { /* ... */ } 614 | 615 | pub fn try_read<..>(&'g self, key: Key) -> Option> { /* ... */ } 616 | 617 | pub fn unlock_read<..>(guard: LockGuard<..>) { /* ... */ } 618 | } 619 | ``` 620 | 621 | --- 622 | 623 | ## `LockableGetMut` 624 | 625 | ```rust 626 | fn Mutex::::get_mut(&mut self) -> &mut T // already exists in std 627 | // this is safe because a mutable reference means nobody else can access the lock 628 | 629 | trait LockableGetMut: Lockable { 630 | type Inner<'a>; 631 | 632 | fn get_mut(&mut self) -> Self::Inner<'_> 633 | } 634 | 635 | impl LockableGetMut for (A, B) { 636 | type Inner<'a> = (A::Inner<'a>, B::Inner<'b>); 637 | 638 | fn get_mut(&mut self) -> Self::Inner<'_> { 639 | (self.0.get_mut(), self.1.get_mut()) 640 | } 641 | } 642 | ``` 643 | --- 644 | 645 | ## Summary: QoL Enhancements 646 | 647 | - Using `&mut ThreadKey` won't work because someone could `mem::forget` a lock guard 648 | - To guarantee unlocking, we can use a scoped API 649 | - Marker traits can be used to indicate that a lock can be shared 650 | - Traits can also provide other functionality, like `get_mut` 651 | 652 | --- 653 | 654 | 655 | # The Future: Expanding Cyclic Wait 656 | 657 | --- 658 | 659 | ## Goals: Expanding Cyclic Wait 660 | 661 | - Show that we sometimes need partial allocation 662 | - Idealize how cyclic wait could be used in these scenarios 663 | - Design an API that could use typestate to support cyclic wait and partial allocation 664 | - Ensure that the `ThreadKey` is not used while multiple partially allocated guards are active 665 | 666 | --- 667 | 668 | ## Expanding Cyclic Wait 669 | 670 | > ... sometimes you need to lock an object to read its value and determine what should be locked next... is there a way to address it? 671 | 672 | ```rust 673 | let guard = m1.lock(key); 674 | if *guard == true { 675 | let key = Mutex::unlock(guard); 676 | let data = [&m1, &m2]; 677 | let collection = LockCollection::try_new(data).unwrap(); 678 | let guard = collection.lock(key); 679 | 680 | // m1 might no longer be true here... 681 | } 682 | ``` 683 | 684 | --- 685 | 686 | ## What I Really Want 687 | 688 | ```txt 689 | ordered locks: m1, m2, m3 690 | 691 | if m1 is true 692 | lock m2 and keep m1 locked 693 | else 694 | skip m2 and lock m3 695 | ``` 696 | 697 | We can specify lock orders using `OwnedLockCollection` 698 | 699 | Then we need an iterator over the collection to keep that ordering 700 | 701 | This will be hard to do with tuples (but is not be impossible) 702 | 703 | --- 704 | 705 | ## Something like this 706 | 707 | ```rust 708 | let key = ThreadKey::get().unwrap(); 709 | let collection: OwnedLockCollection<(Vec, Vec); 710 | let iterator: LockIterator<(Vec, Vec)> = collection.locking_iter(key); 711 | let (guard, next: LockIterator>) = iterator.next(); 712 | 713 | unsafe trait IntoLockIterator: Lockable { 714 | type Next: Lockable; 715 | type Rest; 716 | 717 | unsafe fn next(&self) -> Self::Next; // must be called before `rest` 718 | fn rest(&self) -> Self::Rest; 719 | } 720 | 721 | unsafe impl IntoLockIterator for (A, B) { 722 | type Next = A; 723 | type Rest = B; 724 | 725 | unsafe fn next(&self) -> Self::Next { self.0 } 726 | 727 | unsafe fn rest(&self) -> Self::Rest { self.1 } 728 | } 729 | ``` 730 | 731 | --- 732 | 733 | ## Here are the helper functions we'll need 734 | 735 | ```rust 736 | struct LockIterator; 737 | 738 | impl LockIterator { 739 | // locks the next item and moves on 740 | fn next(self) -> (Current::Next::Guard, LockIterator); 741 | 742 | // moves on without locking anything 743 | fn skip(self) -> LockIterator; 744 | 745 | // steps into the next item, allowing parts of it to be locked 746 | // For example, if i have LockIterator<(Vec, Vec)>, but only 747 | // want to lock parts of the first Vec, then I can step into it, 748 | // locking what i need to, and then exit. 749 | // This is the first use of LockIterator's second generic parameter 750 | fn step_into(self) -> LockIterator; 751 | 752 | // Once I'm done with my step_into, I can leave and move on 753 | fn exit(self) -> LockIterator; 754 | } 755 | ``` 756 | 757 | --- 758 | 759 | ## A Quick Problem with this Approach 760 | 761 | We're going to be returning a lot of guards. 762 | 763 | The `ThreadKey` needs to be held somewhere while the guards are active. 764 | 765 | **How do we ensure that the `ThreadKey` is not used again until all of the guards are dropped?** 766 | 767 | --- 768 | 769 | ## The Solution 770 | 771 | First, every guard needs to have an immutable reference to the `ThreadKey`. 772 | 773 | ```rust 774 | // this is the MutexGuard that doesn't hold a ThreadKey 775 | // We'll modify it to hold an immutable reference to the ThreadKey 776 | // ThreadKey cannot be moved or mutably referenced during this lifetime 777 | struct MutexRef<'a, 'key, T, Key: Keyable + 'key> 778 | struct RwLockReadRef<'a, 'key, T, Key: Keyable + 'key> 779 | struct RwLockWriteRef<'a, 'key, T, Key: Keyable + 'key> 780 | ``` 781 | 782 | --- 783 | 784 | ## The Solution 785 | 786 | But where do we store the `ThreadKey`? 787 | 788 | ```rust 789 | // This type will hold the ThreadKey 790 | struct LockIteratorGuard<'a, L> { 791 | collection: &'a OwnedLockCollection, 792 | thread_key: ThreadKey, 793 | } 794 | ``` 795 | 796 | --- 797 | 798 | ## The Solution 799 | 800 | Then `LockIterator` must hold a reference to the guard. 801 | 802 | ```rust 803 | struct LockIterator<'a, Current, Rest = ()> 804 | ``` 805 | 806 | --- 807 | 808 | ## The Solution 809 | 810 | And we can get the first LockIterator by taking a mutable reference to the guard. 811 | 812 | ```rust 813 | LockIteratorGuard::next<'a>(&'a mut self) -> LockIterator<'a, L::Next> 814 | ``` 815 | 816 | --- 817 | 818 | ## Summary: Expanding Cyclic Wait 819 | 820 | - Partial allocation is needed in situations like skip lists 821 | - Cyclic wait can be used as a backup in these situations 822 | - Typestate allows us to iterate over the elements of a tuple 823 | - A `ThreadKey` must be stored somewhere while the partially allocated guards are active 824 | - Immutable references to the `ThreadKey` can prove that the `ThreadKey` is not used until *all* of the guards are dropped 825 | 826 | --- 827 | 828 | 829 | ## The End 830 | --------------------------------------------------------------------------------