├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── SECURITY.md ├── benches └── mutex.rs ├── examples └── debug.rs ├── script └── doc-upload.cfg └── src ├── barrier.rs ├── lazy.rs ├── lib.rs ├── mutex.rs ├── mutex ├── fair.rs ├── spin.rs └── ticket.rs ├── once.rs ├── relax.rs └── rwlock.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | permissions: read-all 13 | 14 | jobs: 15 | test: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | rust: [stable, beta, nightly] 21 | 22 | steps: 23 | - uses: actions/checkout@v3 24 | - name: Install Rust 25 | run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} 26 | - name: Run Tests 27 | run: cargo test --verbose 28 | - name: Build crate 29 | run: cargo build --all --all-features --all-targets 30 | - name: Catch missing feature flags 31 | if: startsWith(matrix.rust, 'nightly') 32 | run: cargo check -Z features=dev_dep 33 | - name: Install cargo-hack 34 | uses: taiki-e/install-action@cargo-hack 35 | - run: rustup target add thumbv7m-none-eabi 36 | - name: Ensure we don't depend on libstd 37 | run: cargo hack build --target thumbv7m-none-eabi --no-dev-deps --no-default-features 38 | 39 | semver: 40 | runs-on: ubuntu-latest 41 | steps: 42 | - name: Checkout 43 | uses: actions/checkout@v4 44 | - name: Check semver 45 | uses: obi1kenobi/cargo-semver-checks-action@v2 46 | 47 | msrv: 48 | runs-on: ubuntu-latest 49 | strategy: 50 | matrix: 51 | version: [1.60.0] 52 | steps: 53 | - uses: actions/checkout@v3 54 | - name: Install Rust 55 | run: rustup update ${{ matrix.version }} && rustup default ${{ matrix.version }} 56 | - name: Check MSRV 57 | run: cargo check --all --all-features 58 | 59 | miri: 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v3 63 | - name: Install Rust 64 | run: rustup toolchain install nightly --component miri && rustup default nightly 65 | - run: cargo miri test 66 | env: 67 | MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks 68 | RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | **/target/ 3 | **/Cargo.lock 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - stable 5 | - beta 6 | - nightly 7 | 8 | sudo: false 9 | 10 | notifications: 11 | email: 12 | on_success: never 13 | on_failure: always 14 | 15 | before_script: 16 | - | 17 | pip install 'travis-cargo<0.2' --user && 18 | export PATH=$HOME/.local/bin:$PATH 19 | 20 | script: 21 | - travis-cargo build 22 | - travis-cargo test 23 | - travis-cargo doc -- --no-deps 24 | # TODO: Reenable later 25 | #- rustdoc --test README.md -L target/debug 26 | 27 | after_success: 28 | - curl https://mvdnes.github.io/rust-docs/travis-doc-upload.sh | bash 29 | 30 | env: 31 | global: 32 | # override the default `--features unstable` used by travis-cargo 33 | # since unstable is activated by default 34 | - TRAVIS_CARGO_NIGHTLY_FEATURE="" 35 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | # Unreleased 9 | 10 | ### Added 11 | 12 | ### Changed 13 | 14 | ### Fixed 15 | 16 | # [0.10.0] - 2025-03-26 17 | 18 | ### Added 19 | 20 | - `Mutex::try_lock_weak` 21 | - `RwLock::try_write_weak` 22 | - `RwLock::try_upgrade_weak` 23 | 24 | ### Changed 25 | 26 | - Updated MSRV to 1.60 27 | - Use `dep:` syntax in Cargo.toml 28 | - `portable_atomic` feature has been renamed to `portable-atomic`, for consistency. 29 | 30 | ### Fixed 31 | 32 | # [0.9.8] - 2023-04-03 33 | 34 | ### Fixed 35 | 36 | - Unsoundness in `Once::try_call_once` caused by an `Err(_)` result 37 | 38 | # [0.9.7] - 2023-03-27 39 | 40 | ### Fixed 41 | 42 | - Relaxed accidentally restricted `Send`/`Sync` bounds for `Mutex` guards 43 | 44 | # [0.9.6] - 2023-03-13 45 | 46 | ### Fixed 47 | 48 | - Relaxed accidentally restricted `Send`/`Sync` bounds for `RwLock` guards 49 | 50 | # [0.9.5] - 2023-02-07 51 | 52 | ### Added 53 | 54 | - `FairMutex`, a new mutex implementation that reduces writer starvation. 55 | - A MSRV policy: Rust 1.38 is currently required 56 | 57 | ### Changed 58 | 59 | - The crate's CI now has full MIRI integration, further improving the confidence you can have in the implementation. 60 | 61 | ### Fixed 62 | 63 | - Ensured that the crate's abstractions comply with stacked borrows rules. 64 | - Unsoundness in the `RwLock` that could be triggered via a reader overflow 65 | - Relaxed various `Send`/`Sync` bound requirements to make the crate more flexible 66 | 67 | # [0.9.4] - 2022-07-14 68 | 69 | ### Fixed 70 | 71 | - Fixed unsoundness in `RwLock` on reader overflow 72 | - Relaxed `Send`/`Sync` bounds for `SpinMutex` and `TicketMutex` (doesn't affect `Mutex` itself) 73 | 74 | # [0.9.3] - 2022-04-17 75 | 76 | ### Added 77 | 78 | - Implemented `Default` for `Once` 79 | - `Once::try_call_once` 80 | 81 | ### Fixed 82 | 83 | - Fixed bug that caused `Once::call_once` to incorrectly fail 84 | 85 | # [0.9.2] - 2021-07-09 86 | 87 | ### Changed 88 | 89 | - Improved `Once` performance by reducing the memory footprint of internal state to one byte 90 | 91 | ### Fixed 92 | 93 | - Improved performance of `Once` by relaxing ordering guarantees and removing redundant checks 94 | 95 | # [0.9.1] - 2021-06-21 96 | 97 | ### Added 98 | 99 | - Default type parameter on `Once` for better ergonomics 100 | 101 | # [0.9.0] - 2021-03-18 102 | 103 | ### Changed 104 | 105 | - Placed all major API features behind feature flags 106 | 107 | ### Fixed 108 | 109 | - A compilation bug with the `lock_api` feature 110 | 111 | # [0.8.0] - 2021-03-15 112 | 113 | ### Added 114 | 115 | - `Once::get_unchecked` 116 | - `RelaxStrategy` trait with type parameter on all locks to support switching between relax strategies 117 | 118 | ### Changed 119 | 120 | - `lock_api1` feature is now named `lock_api` 121 | 122 | # [0.7.1] - 2021-01-12 123 | 124 | ### Fixed 125 | 126 | - Prevented `Once` leaking the inner value upon drop 127 | 128 | # [0.7.0] - 2020-10-18 129 | 130 | ### Added 131 | 132 | - `Once::initialized` 133 | - `Once::get_mut` 134 | - `Once::try_into_inner` 135 | - `Once::poll` 136 | - `RwLock`, `Mutex` and `Once` now implement `From` 137 | - `Lazy` type for lazy initialization 138 | - `TicketMutex`, an alternative mutex implementation 139 | - `std` feature flag to enable thread yielding instead of spinning 140 | - `Mutex::is_locked`/`SpinMutex::is_locked`/`TicketMutex::is_locked` 141 | - `Barrier` 142 | 143 | ### Changed 144 | 145 | - `Once::wait` now spins even if initialization has not yet started 146 | - `Guard::leak` is now an associated function instead of a method 147 | - Improved the performance of `SpinMutex` by relaxing unnecessarily conservative 148 | ordering requirements 149 | 150 | # [0.6.0] - 2020-10-08 151 | 152 | ### Added 153 | 154 | - More dynamic `Send`/`Sync` bounds for lock guards 155 | - `lock_api` compatibility 156 | - `Guard::leak` methods 157 | - `RwLock::reader_count` and `RwLock::writer_count` 158 | - `Display` implementation for guard types 159 | 160 | ### Changed 161 | 162 | - Made `Debug` impls of lock guards just show the inner type like `std` 163 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "spin" 3 | version = "0.10.0" 4 | authors = [ 5 | "Mathijs van de Nes ", 6 | "John Ericson ", 7 | "Joshua Barretto ", 8 | ] 9 | license = "MIT" 10 | repository = "https://github.com/zesterer/spin-rs.git" 11 | keywords = ["spinlock", "mutex", "rwlock"] 12 | description = "Spin-based synchronization primitives" 13 | rust-version = "1.60" 14 | 15 | [dependencies] 16 | lock_api_crate = { package = "lock_api", version = "0.4", optional = true } 17 | # Enable require-cas feature to provide a better error message if the end user forgets to use the cfg or feature. 18 | portable-atomic = { version = "1.3", optional = true, default-features = false, features = ["require-cas"] } 19 | 20 | [features] 21 | default = ["lock_api", "mutex", "spin_mutex", "rwlock", "once", "lazy", "barrier"] 22 | 23 | # Enables `Mutex`. Must be used with either `spin_mutex` or `use_ticket_mutex`. 24 | mutex = [] 25 | 26 | # Enables `SpinMutex` and the default spin mutex implementation for `Mutex`. 27 | spin_mutex = ["mutex"] 28 | 29 | # Enables `TicketMutex`. 30 | ticket_mutex = ["mutex"] 31 | 32 | # Enables `FairMutex`. 33 | fair_mutex = ["mutex"] 34 | 35 | # Enables the non-default ticket mutex implementation for `Mutex`. 36 | use_ticket_mutex = ["mutex", "ticket_mutex"] 37 | 38 | # Enables `RwLock`. 39 | rwlock = [] 40 | 41 | # Enables `Once`. 42 | once = [] 43 | 44 | # Enables `Lazy`. 45 | lazy = ["once"] 46 | 47 | # Enables `Barrier`. Because this feature uses `mutex`, either `spin_mutex` or `use_ticket_mutex` must be enabled. 48 | barrier = ["mutex"] 49 | 50 | # Enables `lock_api`-compatible types that use the primitives in this crate internally. 51 | lock_api = ["dep:lock_api_crate"] 52 | 53 | # Enables std-only features such as yield-relaxing. 54 | std = [] 55 | 56 | # Use the `portable-atomic` crate to support platforms without native atomic operations. 57 | # The `portable_atomic_unsafe_assume_single_core` cfg or `critical-section` feature 58 | # of `portable-atomic` crate must also be set by the final binary crate. 59 | # See the documentation for the `portable-atomic` crate for more information 60 | # with some requirements for no-std build: 61 | # https://github.com/taiki-e/portable-atomic#optional-features 62 | portable-atomic = ["dep:portable-atomic"] 63 | 64 | # Deprecated alias: 65 | portable_atomic = ["portable-atomic"] 66 | 67 | [package.metadata.docs.rs] 68 | all-features = true 69 | rustdoc-args = ["--cfg", "docsrs"] 70 | 71 | [dev-dependencies] 72 | criterion = "0.4" 73 | 74 | [[bench]] 75 | name = "mutex" 76 | harness = false 77 | required-features = ["ticket_mutex"] 78 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Mathijs van de Nes 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # spin-rs 2 | 3 | [![Crates.io version](https://img.shields.io/crates/v/spin.svg)](https://crates.io/crates/spin) 4 | [![docs.rs](https://docs.rs/spin/badge.svg)](https://docs.rs/spin/) 5 | [![Build Status](https://travis-ci.org/mvdnes/spin-rs.svg)](https://travis-ci.org/mvdnes/spin-rs) 6 | 7 | Spin-based synchronization primitives. 8 | 9 | This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) 10 | versions of the primitives in `std::sync`. Because synchronization is done 11 | through spinning, the primitives are suitable for use in `no_std` environments. 12 | 13 | Before deciding to use `spin`, we recommend reading 14 | [this superb blog post](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html) 15 | by [@matklad](https://github.com/matklad/) that discusses the pros and cons of 16 | spinlocks. If you have access to `std`, it's likely that the primitives in 17 | `std::sync` will serve you better except in very specific circumstances. 18 | 19 | ## Features 20 | 21 | - `Mutex`, `RwLock`, `Once`, `Lazy` and `Barrier` equivalents 22 | - Support for `no_std` environments 23 | - [`lock_api`](https://crates.io/crates/lock_api) compatibility 24 | - Upgradeable `RwLock` guards 25 | - Guards can be sent and shared between threads 26 | - Guard leaking 27 | - Ticket locks 28 | - Different strategies for dealing with contention 29 | 30 | ## Usage 31 | 32 | Include the following under the `[dependencies]` section in your `Cargo.toml` file. 33 | 34 | ```toml 35 | spin = "x.y" 36 | ``` 37 | 38 | ## Example 39 | 40 | When calling `lock` on a `Mutex` you will get a guard value that provides access 41 | to the data. When this guard is dropped, the mutex will become available again. 42 | 43 | ```rust 44 | extern crate spin; 45 | use std::{sync::Arc, thread}; 46 | 47 | fn main() { 48 | let counter = Arc::new(spin::Mutex::new(0)); 49 | 50 | let thread = thread::spawn({ 51 | let counter = counter.clone(); 52 | move || { 53 | for _ in 0..100 { 54 | *counter.lock() += 1; 55 | } 56 | } 57 | }); 58 | 59 | for _ in 0..100 { 60 | *counter.lock() += 1; 61 | } 62 | 63 | thread.join().unwrap(); 64 | 65 | assert_eq!(*counter.lock(), 200); 66 | } 67 | ``` 68 | 69 | ## Feature flags 70 | 71 | The crate comes with a few feature flags that you may wish to use. 72 | 73 | - `mutex` enables the `Mutex` type. 74 | 75 | - `spin_mutex` enables the `SpinMutex` type. 76 | 77 | - `ticket_mutex` enables the `TicketMutex` type. 78 | 79 | - `use_ticket_mutex` switches to a ticket lock for the implementation of `Mutex`. This 80 | is recommended only on targets for which ordinary spinning locks perform very badly 81 | because it will change the implementation used by other crates that depend on `spin`. 82 | 83 | - `rwlock` enables the `RwLock` type. 84 | 85 | - `once` enables the `Once` type. 86 | 87 | - `lazy` enables the `Lazy` type. 88 | 89 | - `barrier` enables the `Barrier` type. 90 | 91 | - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api) 92 | 93 | - `std` enables support for thread yielding instead of spinning. 94 | 95 | - `portable-atomic` enables usage of the `portable-atomic` crate 96 | to support platforms without native atomic operations (Cortex-M0, etc.). 97 | The `portable_atomic_unsafe_assume_single_core` or `critical-section` feature 98 | of `portable-atomic` crate must also be set by the final binary crate. 99 | See the documentation for the `portable-atomic` crate for more information 100 | with some requirements for no-std build: 101 | https://github.com/taiki-e/portable-atomic#optional-features 102 | 103 | ## Remarks 104 | 105 | It is often desirable to have a lock shared between threads. Wrapping the lock in an 106 | `std::sync::Arc` is route through which this might be achieved. 107 | 108 | Locks provide zero-overhead access to their data when accessed through a mutable 109 | reference by using their `get_mut` methods. 110 | 111 | The behaviour of these lock is similar to their namesakes in `std::sync`. they 112 | differ on the following: 113 | 114 | - Locks will not be poisoned in case of failure. 115 | - Threads will not yield to the OS scheduler when encounter a lock that cannot be 116 | accessed. Instead, they will 'spin' in a busy loop until the lock becomes available. 117 | 118 | Many of the feature flags listed above are enabled by default. If you're writing a 119 | library, we recommend disabling those that you don't use to avoid increasing compilation 120 | time for your crate's users. You can do this like so: 121 | 122 | ``` 123 | [dependencies] 124 | spin = { version = "x.y", default-features = false, features = [...] } 125 | ``` 126 | 127 | ## Minimum Safe Rust Version (MSRV) 128 | 129 | This crate is guaranteed to compile on a Minimum Safe Rust Version (MSRV) of 1.60.0 and above. 130 | This version will not be changed without a minor version bump. 131 | 132 | ## License 133 | 134 | `spin` is distributed under the MIT License, (See `LICENSE`). 135 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Security updates are applied only to the latest release. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. 10 | 11 | Please disclose it at our [security advisory](https://github.com/mvdnes/spin-rs/security/advisories/new). 12 | 13 | This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. 14 | -------------------------------------------------------------------------------- /benches/mutex.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use criterion::{black_box, Bencher, Criterion}; 5 | use std::{ops::DerefMut, sync::Arc}; 6 | 7 | trait Mutex: Send + Sync + 'static { 8 | type Guard<'a>: DerefMut 9 | where 10 | Self: 'a; 11 | fn new(x: T) -> Self; 12 | fn lock(&self) -> Self::Guard<'_>; 13 | } 14 | 15 | impl Mutex for spin::mutex::SpinMutex { 16 | type Guard<'a> = spin::mutex::SpinMutexGuard<'a, T> where Self: 'a; 17 | fn new(x: T) -> Self { 18 | spin::mutex::SpinMutex::new(x) 19 | } 20 | fn lock(&self) -> Self::Guard<'_> { 21 | self.lock() 22 | } 23 | } 24 | 25 | impl Mutex for spin::mutex::TicketMutex { 26 | type Guard<'a> = spin::mutex::TicketMutexGuard<'a, T> where Self: 'a; 27 | fn new(x: T) -> Self { 28 | spin::mutex::TicketMutex::new(x) 29 | } 30 | fn lock(&self) -> Self::Guard<'_> { 31 | self.lock() 32 | } 33 | } 34 | 35 | impl Mutex for std::sync::Mutex { 36 | type Guard<'a> = std::sync::MutexGuard<'a, T> where Self: 'a; 37 | fn new(x: T) -> Self { 38 | std::sync::Mutex::new(x) 39 | } 40 | fn lock(&self) -> Self::Guard<'_> { 41 | self.lock().unwrap() 42 | } 43 | } 44 | 45 | fn gen_create>(b: &mut Bencher) { 46 | b.iter(|| { 47 | let n = black_box(42); 48 | M::new(n) 49 | }); 50 | } 51 | 52 | fn gen_lock_unlock>(b: &mut Bencher) { 53 | let m = M::new(0); 54 | b.iter(|| { 55 | let mut m = m.lock(); 56 | *m = m.wrapping_add(1); 57 | drop(m); 58 | }); 59 | } 60 | 61 | fn gen_lock_unlock_read_contention>(b: &mut Bencher) { 62 | let m = Arc::new(M::new(0)); 63 | let thread = std::thread::spawn({ 64 | let m = m.clone(); 65 | move || { 66 | while Arc::strong_count(&m) > 1 { 67 | for _ in 0..1000 { 68 | black_box(*m.lock()); 69 | } 70 | } 71 | } 72 | }); 73 | b.iter(|| { 74 | let mut m = m.lock(); 75 | *m = m.wrapping_add(1); 76 | drop(m); 77 | }); 78 | drop(m); 79 | thread.join().unwrap(); 80 | } 81 | 82 | fn gen_lock_unlock_write_contention>(b: &mut Bencher) { 83 | let m = Arc::new(M::new(0)); 84 | let thread = std::thread::spawn({ 85 | let m = m.clone(); 86 | move || { 87 | while Arc::strong_count(&m) > 1 { 88 | for _ in 0..1000 { 89 | let mut m = m.lock(); 90 | *m = m.wrapping_add(1); 91 | drop(m); 92 | } 93 | } 94 | } 95 | }); 96 | b.iter(|| { 97 | let mut m = m.lock(); 98 | *m = m.wrapping_add(1); 99 | drop(m); 100 | }); 101 | drop(m); 102 | thread.join().unwrap(); 103 | } 104 | 105 | fn create(b: &mut Criterion) { 106 | b.bench_function("create-spin-spinmutex", |b| { 107 | gen_create::>(b) 108 | }); 109 | b.bench_function("create-spin-ticketmutex", |b| { 110 | gen_create::>(b) 111 | }); 112 | b.bench_function("create-std", |b| gen_create::>(b)); 113 | } 114 | 115 | fn lock_unlock(b: &mut Criterion) { 116 | b.bench_function("lock_unlock-spin-spinmutex", |b| { 117 | gen_lock_unlock::>(b) 118 | }); 119 | b.bench_function("lock_unlock-spin-ticketmutex", |b| { 120 | gen_lock_unlock::>(b) 121 | }); 122 | b.bench_function("lock_unlock-std", |b| { 123 | gen_lock_unlock::>(b) 124 | }); 125 | } 126 | 127 | fn lock_unlock_read_contention(b: &mut Criterion) { 128 | b.bench_function("lock_unlock_read_contention-spin-spinmutex", |b| { 129 | gen_lock_unlock_read_contention::>(b) 130 | }); 131 | b.bench_function("lock_unlock_read_contention-spin-ticketmutex", |b| { 132 | gen_lock_unlock_read_contention::>(b) 133 | }); 134 | b.bench_function("lock_unlock_read_contention-std", |b| { 135 | gen_lock_unlock_read_contention::>(b) 136 | }); 137 | } 138 | 139 | fn lock_unlock_write_contention(b: &mut Criterion) { 140 | b.bench_function("lock_unlock_write_contention-spin-spinmutex", |b| { 141 | gen_lock_unlock_write_contention::>(b) 142 | }); 143 | b.bench_function("lock_unlock_write_contention-spin-ticketmutex", |b| { 144 | gen_lock_unlock_write_contention::>(b) 145 | }); 146 | b.bench_function("lock_unlock_write_contention-std", |b| { 147 | gen_lock_unlock_write_contention::>(b) 148 | }); 149 | } 150 | 151 | criterion_group!( 152 | mutex, 153 | create, 154 | lock_unlock, 155 | lock_unlock_read_contention, 156 | lock_unlock_write_contention, 157 | ); 158 | 159 | criterion_main!(mutex); 160 | -------------------------------------------------------------------------------- /examples/debug.rs: -------------------------------------------------------------------------------- 1 | extern crate spin; 2 | 3 | fn main() { 4 | let mutex = spin::Mutex::new(42); 5 | println!("{:?}", mutex); 6 | { 7 | let x = mutex.lock(); 8 | println!("{:?}, {:?}", mutex, *x); 9 | } 10 | 11 | let rwlock = spin::RwLock::new(42); 12 | println!("{:?}", rwlock); 13 | { 14 | let x = rwlock.read(); 15 | println!("{:?}, {:?}", rwlock, *x); 16 | } 17 | { 18 | let x = rwlock.write(); 19 | println!("{:?}, {:?}", rwlock, *x); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /script/doc-upload.cfg: -------------------------------------------------------------------------------- 1 | PROJECT_NAME=spin-rs 2 | DOCS_REPO=mvdnes/rust-docs.git 3 | DOC_RUST_VERSION=stable 4 | -------------------------------------------------------------------------------- /src/barrier.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitive allowing multiple threads to synchronize the 2 | //! beginning of some computation. 3 | //! 4 | //! Implementation adapted from the 'Barrier' type of the standard library. See: 5 | //! 6 | //! 7 | //! Copyright 2014 The Rust Project Developers. See the COPYRIGHT 8 | //! file at the top-level directory of this distribution and at 9 | //! . 10 | //! 11 | //! Licensed under the Apache License, Version 2.0 > or the MIT license 13 | //! >, at your 14 | //! option. This file may not be copied, modified, or distributed 15 | //! except according to those terms. 16 | 17 | use crate::{mutex::Mutex, RelaxStrategy, Spin}; 18 | 19 | /// A primitive that synchronizes the execution of multiple threads. 20 | /// 21 | /// # Example 22 | /// 23 | /// ``` 24 | /// use spin; 25 | /// use std::sync::Arc; 26 | /// use std::thread; 27 | /// 28 | /// let mut handles = Vec::with_capacity(10); 29 | /// let barrier = Arc::new(spin::Barrier::new(10)); 30 | /// for _ in 0..10 { 31 | /// let c = barrier.clone(); 32 | /// // The same messages will be printed together. 33 | /// // You will NOT see any interleaving. 34 | /// handles.push(thread::spawn(move|| { 35 | /// println!("before wait"); 36 | /// c.wait(); 37 | /// println!("after wait"); 38 | /// })); 39 | /// } 40 | /// // Wait for other threads to finish. 41 | /// for handle in handles { 42 | /// handle.join().unwrap(); 43 | /// } 44 | /// ``` 45 | pub struct Barrier { 46 | lock: Mutex, 47 | num_threads: usize, 48 | } 49 | 50 | // The inner state of a double barrier 51 | struct BarrierState { 52 | count: usize, 53 | generation_id: usize, 54 | } 55 | 56 | /// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`] 57 | /// have rendezvoused. 58 | /// 59 | /// [`wait`]: struct.Barrier.html#method.wait 60 | /// [`Barrier`]: struct.Barrier.html 61 | /// 62 | /// # Examples 63 | /// 64 | /// ``` 65 | /// use spin; 66 | /// 67 | /// let barrier = spin::Barrier::new(1); 68 | /// let barrier_wait_result = barrier.wait(); 69 | /// ``` 70 | pub struct BarrierWaitResult(bool); 71 | 72 | impl Barrier { 73 | /// Blocks the current thread until all threads have rendezvoused here. 74 | /// 75 | /// Barriers are re-usable after all threads have rendezvoused once, and can 76 | /// be used continuously. 77 | /// 78 | /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that 79 | /// returns `true` from [`is_leader`] when returning from this function, and 80 | /// all other threads will receive a result that will return `false` from 81 | /// [`is_leader`]. 82 | /// 83 | /// [`BarrierWaitResult`]: struct.BarrierWaitResult.html 84 | /// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader 85 | /// 86 | /// # Examples 87 | /// 88 | /// ``` 89 | /// use spin; 90 | /// use std::sync::Arc; 91 | /// use std::thread; 92 | /// 93 | /// let mut handles = Vec::with_capacity(10); 94 | /// let barrier = Arc::new(spin::Barrier::new(10)); 95 | /// for _ in 0..10 { 96 | /// let c = barrier.clone(); 97 | /// // The same messages will be printed together. 98 | /// // You will NOT see any interleaving. 99 | /// handles.push(thread::spawn(move|| { 100 | /// println!("before wait"); 101 | /// c.wait(); 102 | /// println!("after wait"); 103 | /// })); 104 | /// } 105 | /// // Wait for other threads to finish. 106 | /// for handle in handles { 107 | /// handle.join().unwrap(); 108 | /// } 109 | /// ``` 110 | pub fn wait(&self) -> BarrierWaitResult { 111 | let mut lock = self.lock.lock(); 112 | lock.count += 1; 113 | 114 | if lock.count < self.num_threads { 115 | // not the leader 116 | let local_gen = lock.generation_id; 117 | 118 | while local_gen == lock.generation_id && lock.count < self.num_threads { 119 | drop(lock); 120 | R::relax(); 121 | lock = self.lock.lock(); 122 | } 123 | BarrierWaitResult(false) 124 | } else { 125 | // this thread is the leader, 126 | // and is responsible for incrementing the generation 127 | lock.count = 0; 128 | lock.generation_id = lock.generation_id.wrapping_add(1); 129 | BarrierWaitResult(true) 130 | } 131 | } 132 | } 133 | 134 | impl Barrier { 135 | /// Creates a new barrier that can block a given number of threads. 136 | /// 137 | /// A barrier will block `n`-1 threads which call [`wait`] and then wake up 138 | /// all threads at once when the `n`th thread calls [`wait`]. A Barrier created 139 | /// with n = 0 will behave identically to one created with n = 1. 140 | /// 141 | /// [`wait`]: #method.wait 142 | /// 143 | /// # Examples 144 | /// 145 | /// ``` 146 | /// use spin; 147 | /// 148 | /// let barrier = spin::Barrier::new(10); 149 | /// ``` 150 | pub const fn new(n: usize) -> Self { 151 | Self { 152 | lock: Mutex::new(BarrierState { 153 | count: 0, 154 | generation_id: 0, 155 | }), 156 | num_threads: n, 157 | } 158 | } 159 | } 160 | 161 | impl BarrierWaitResult { 162 | /// Returns whether this thread from [`wait`] is the "leader thread". 163 | /// 164 | /// Only one thread will have `true` returned from their result, all other 165 | /// threads will have `false` returned. 166 | /// 167 | /// [`wait`]: struct.Barrier.html#method.wait 168 | /// 169 | /// # Examples 170 | /// 171 | /// ``` 172 | /// use spin; 173 | /// 174 | /// let barrier = spin::Barrier::new(1); 175 | /// let barrier_wait_result = barrier.wait(); 176 | /// println!("{:?}", barrier_wait_result.is_leader()); 177 | /// ``` 178 | pub fn is_leader(&self) -> bool { 179 | self.0 180 | } 181 | } 182 | 183 | #[cfg(test)] 184 | mod tests { 185 | use std::prelude::v1::*; 186 | 187 | use std::sync::mpsc::{channel, TryRecvError}; 188 | use std::sync::Arc; 189 | use std::thread; 190 | 191 | type Barrier = super::Barrier; 192 | 193 | fn use_barrier(n: usize, barrier: Arc) { 194 | let (tx, rx) = channel(); 195 | 196 | let mut ts = Vec::new(); 197 | for _ in 0..n - 1 { 198 | let c = barrier.clone(); 199 | let tx = tx.clone(); 200 | ts.push(thread::spawn(move || { 201 | tx.send(c.wait().is_leader()).unwrap(); 202 | })); 203 | } 204 | 205 | // At this point, all spawned threads should be blocked, 206 | // so we shouldn't get anything from the port 207 | assert!(match rx.try_recv() { 208 | Err(TryRecvError::Empty) => true, 209 | _ => false, 210 | }); 211 | 212 | let mut leader_found = barrier.wait().is_leader(); 213 | 214 | // Now, the barrier is cleared and we should get data. 215 | for _ in 0..n - 1 { 216 | if rx.recv().unwrap() { 217 | assert!(!leader_found); 218 | leader_found = true; 219 | } 220 | } 221 | assert!(leader_found); 222 | 223 | for t in ts { 224 | t.join().unwrap(); 225 | } 226 | } 227 | 228 | #[test] 229 | fn test_barrier() { 230 | const N: usize = 10; 231 | 232 | let barrier = Arc::new(Barrier::new(N)); 233 | 234 | use_barrier(N, barrier.clone()); 235 | 236 | // use barrier twice to ensure it is reusable 237 | use_barrier(N, barrier.clone()); 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /src/lazy.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives for lazy evaluation. 2 | //! 3 | //! Implementation adapted from the `SyncLazy` type of the standard library. See: 4 | //! 5 | 6 | use crate::{once::Once, RelaxStrategy, Spin}; 7 | use core::{cell::Cell, fmt, ops::Deref}; 8 | 9 | /// A value which is initialized on the first access. 10 | /// 11 | /// This type is a thread-safe `Lazy`, and can be used in statics. 12 | /// 13 | /// # Examples 14 | /// 15 | /// ``` 16 | /// use std::collections::HashMap; 17 | /// use spin::Lazy; 18 | /// 19 | /// static HASHMAP: Lazy> = Lazy::new(|| { 20 | /// println!("initializing"); 21 | /// let mut m = HashMap::new(); 22 | /// m.insert(13, "Spica".to_string()); 23 | /// m.insert(74, "Hoyten".to_string()); 24 | /// m 25 | /// }); 26 | /// 27 | /// fn main() { 28 | /// println!("ready"); 29 | /// std::thread::spawn(|| { 30 | /// println!("{:?}", HASHMAP.get(&13)); 31 | /// }).join().unwrap(); 32 | /// println!("{:?}", HASHMAP.get(&74)); 33 | /// 34 | /// // Prints: 35 | /// // ready 36 | /// // initializing 37 | /// // Some("Spica") 38 | /// // Some("Hoyten") 39 | /// } 40 | /// ``` 41 | pub struct Lazy T, R = Spin> { 42 | cell: Once, 43 | init: Cell>, 44 | } 45 | 46 | impl fmt::Debug for Lazy { 47 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 48 | let mut d = f.debug_tuple("Lazy"); 49 | let d = if let Some(x) = self.cell.get() { 50 | d.field(&x) 51 | } else { 52 | d.field(&format_args!("")) 53 | }; 54 | d.finish() 55 | } 56 | } 57 | 58 | // We never create a `&F` from a `&Lazy` so it is fine 59 | // to not impl `Sync` for `F` 60 | // we do create a `&mut Option` in `force`, but this is 61 | // properly synchronized, so it only happens once 62 | // so it also does not contribute to this impl. 63 | unsafe impl Sync for Lazy where Once: Sync {} 64 | // auto-derived `Send` impl is OK. 65 | 66 | impl Lazy { 67 | /// Creates a new lazy value with the given initializing 68 | /// function. 69 | pub const fn new(f: F) -> Self { 70 | Self { 71 | cell: Once::new(), 72 | init: Cell::new(Some(f)), 73 | } 74 | } 75 | /// Retrieves a mutable pointer to the inner data. 76 | /// 77 | /// This is especially useful when interfacing with low level code or FFI where the caller 78 | /// explicitly knows that it has exclusive access to the inner data. Note that reading from 79 | /// this pointer is UB until initialized or directly written to. 80 | pub fn as_mut_ptr(&self) -> *mut T { 81 | self.cell.as_mut_ptr() 82 | } 83 | } 84 | 85 | impl T, R: RelaxStrategy> Lazy { 86 | /// Forces the evaluation of this lazy value and 87 | /// returns a reference to result. This is equivalent 88 | /// to the `Deref` impl, but is explicit. 89 | /// 90 | /// # Examples 91 | /// 92 | /// ``` 93 | /// use spin::Lazy; 94 | /// 95 | /// let lazy = Lazy::new(|| 92); 96 | /// 97 | /// assert_eq!(Lazy::force(&lazy), &92); 98 | /// assert_eq!(&*lazy, &92); 99 | /// ``` 100 | pub fn force(this: &Self) -> &T { 101 | this.cell.call_once(|| match this.init.take() { 102 | Some(f) => f(), 103 | None => panic!("Lazy instance has previously been poisoned"), 104 | }) 105 | } 106 | } 107 | 108 | impl T, R: RelaxStrategy> Deref for Lazy { 109 | type Target = T; 110 | 111 | fn deref(&self) -> &T { 112 | Self::force(self) 113 | } 114 | } 115 | 116 | impl Default for Lazy T, R> { 117 | /// Creates a new lazy value using `Default` as the initializing function. 118 | fn default() -> Self { 119 | Self::new(T::default) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] 2 | #![cfg_attr(docsrs, feature(doc_cfg))] 3 | #![deny(missing_docs)] 4 | 5 | //! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the 6 | //! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning, 7 | //! the primitives are suitable for use in `no_std` environments. 8 | //! 9 | //! # Features 10 | //! 11 | //! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents 12 | //! 13 | //! - Support for `no_std` environments 14 | //! 15 | //! - [`lock_api`](https://crates.io/crates/lock_api) compatibility 16 | //! 17 | //! - Upgradeable `RwLock` guards 18 | //! 19 | //! - Guards can be sent and shared between threads 20 | //! 21 | //! - Guard leaking 22 | //! 23 | //! - Ticket locks 24 | //! 25 | //! - Different strategies for dealing with contention 26 | //! 27 | //! # Relationship with `std::sync` 28 | //! 29 | //! While `spin` is not a drop-in replacement for `std::sync` (and 30 | //! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html)) 31 | //! an effort is made to keep this crate reasonably consistent with `std::sync`. 32 | //! 33 | //! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`: 34 | //! 35 | //! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`], 36 | //! [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads. 37 | //! 38 | //! - [`RwLockUpgradableGuard`] supports being upgraded into a [`RwLockWriteGuard`]. 39 | //! 40 | //! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html). 41 | //! 42 | //! - [`Once`] owns the value returned by its `call_once` initializer. 43 | //! 44 | //! - [`RwLock`] supports counting readers and writers. 45 | //! 46 | //! Conversely, the types in this crate do not have some of the features `std::sync` has: 47 | //! 48 | //! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html). 49 | //! 50 | //! ## Feature flags 51 | //! 52 | //! The crate comes with a few feature flags that you may wish to use. 53 | //! 54 | //! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api) 55 | //! 56 | //! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex` 57 | //! 58 | //! - `fair_mutex` enables a fairer implementation of `Mutex` that uses eventual fairness to avoid 59 | //! starvation 60 | //! 61 | //! - `std` enables support for thread yielding instead of spinning 62 | //! 63 | //! - `portable-atomic` enables usage of the `portable-atomic` crate 64 | //! to support platforms without native atomic operations (Cortex-M0, etc.). 65 | //! See the documentation for the `portable-atomic` crate for more information 66 | //! with some requirements for no-std build: 67 | //! https://github.com/taiki-e/portable-atomic#optional-features 68 | 69 | 70 | #[cfg(any(test, feature = "std"))] 71 | extern crate core; 72 | 73 | #[cfg(feature = "portable-atomic")] 74 | extern crate portable_atomic; 75 | 76 | #[cfg(not(feature = "portable-atomic"))] 77 | use core::sync::atomic; 78 | #[cfg(feature = "portable-atomic")] 79 | use portable_atomic as atomic; 80 | 81 | #[cfg(feature = "barrier")] 82 | #[cfg_attr(docsrs, doc(cfg(feature = "barrier")))] 83 | pub mod barrier; 84 | #[cfg(feature = "lazy")] 85 | #[cfg_attr(docsrs, doc(cfg(feature = "lazy")))] 86 | pub mod lazy; 87 | #[cfg(feature = "mutex")] 88 | #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))] 89 | pub mod mutex; 90 | #[cfg(feature = "once")] 91 | #[cfg_attr(docsrs, doc(cfg(feature = "once")))] 92 | pub mod once; 93 | pub mod relax; 94 | #[cfg(feature = "rwlock")] 95 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 96 | pub mod rwlock; 97 | 98 | #[cfg(feature = "mutex")] 99 | #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))] 100 | pub use mutex::MutexGuard; 101 | #[cfg(feature = "std")] 102 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 103 | pub use relax::Yield; 104 | pub use relax::{RelaxStrategy, Spin}; 105 | #[cfg(feature = "rwlock")] 106 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 107 | pub use rwlock::RwLockReadGuard; 108 | 109 | // Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different 110 | // relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible 111 | // but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons). 112 | 113 | /// A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation. 114 | /// 115 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 116 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 117 | #[cfg(feature = "barrier")] 118 | #[cfg_attr(docsrs, doc(cfg(feature = "barrier")))] 119 | pub type Barrier = crate::barrier::Barrier; 120 | 121 | /// A value which is initialized on the first access. See [`lazy::Lazy`] for documentation. 122 | /// 123 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 124 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 125 | #[cfg(feature = "lazy")] 126 | #[cfg_attr(docsrs, doc(cfg(feature = "lazy")))] 127 | pub type Lazy T> = crate::lazy::Lazy; 128 | 129 | /// A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation. 130 | /// 131 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 132 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 133 | #[cfg(feature = "mutex")] 134 | #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))] 135 | pub type Mutex = crate::mutex::Mutex; 136 | 137 | /// A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation. 138 | /// 139 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 140 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 141 | #[cfg(feature = "once")] 142 | #[cfg_attr(docsrs, doc(cfg(feature = "once")))] 143 | pub type Once = crate::once::Once; 144 | 145 | /// A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation. 146 | /// 147 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 148 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 149 | #[cfg(feature = "rwlock")] 150 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 151 | pub type RwLock = crate::rwlock::RwLock; 152 | 153 | /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See 154 | /// [`rwlock::RwLockUpgradableGuard`] for documentation. 155 | /// 156 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 157 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 158 | #[cfg(feature = "rwlock")] 159 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 160 | pub type RwLockUpgradableGuard<'a, T> = crate::rwlock::RwLockUpgradableGuard<'a, T>; 161 | 162 | /// A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation. 163 | /// 164 | /// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax 165 | /// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path. 166 | #[cfg(feature = "rwlock")] 167 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 168 | pub type RwLockWriteGuard<'a, T> = crate::rwlock::RwLockWriteGuard<'a, T>; 169 | 170 | /// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api). 171 | #[cfg(feature = "lock_api")] 172 | #[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))] 173 | pub mod lock_api { 174 | /// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 175 | #[cfg(feature = "mutex")] 176 | #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))] 177 | pub type Mutex = lock_api_crate::Mutex, T>; 178 | 179 | /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 180 | #[cfg(feature = "mutex")] 181 | #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))] 182 | pub type MutexGuard<'a, T> = lock_api_crate::MutexGuard<'a, crate::Mutex<()>, T>; 183 | 184 | /// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 185 | #[cfg(feature = "rwlock")] 186 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 187 | pub type RwLock = lock_api_crate::RwLock, T>; 188 | 189 | /// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 190 | #[cfg(feature = "rwlock")] 191 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 192 | pub type RwLockReadGuard<'a, T> = lock_api_crate::RwLockReadGuard<'a, crate::RwLock<()>, T>; 193 | 194 | /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 195 | #[cfg(feature = "rwlock")] 196 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 197 | pub type RwLockWriteGuard<'a, T> = lock_api_crate::RwLockWriteGuard<'a, crate::RwLock<()>, T>; 198 | 199 | /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 200 | #[cfg(feature = "rwlock")] 201 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 202 | pub type RwLockUpgradableReadGuard<'a, T> = 203 | lock_api_crate::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>; 204 | 205 | /// A guard returned by [RwLockReadGuard::map] that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 206 | #[cfg(feature = "rwlock")] 207 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 208 | pub type MappedRwLockReadGuard<'a, T> = 209 | lock_api_crate::MappedRwLockReadGuard<'a, crate::RwLock<()>, T>; 210 | 211 | /// A guard returned by [RwLockWriteGuard::map] that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)). 212 | #[cfg(feature = "rwlock")] 213 | #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))] 214 | pub type MappedRwLockWriteGuard<'a, T> = 215 | lock_api_crate::MappedRwLockWriteGuard<'a, crate::RwLock<()>, T>; 216 | } 217 | 218 | /// In the event of an invalid operation, it's best to abort the current process. 219 | #[cfg(feature = "fair_mutex")] 220 | fn abort() -> ! { 221 | #[cfg(not(feature = "std"))] 222 | { 223 | // Panicking while panicking is defined by Rust to result in an abort. 224 | struct Panic; 225 | 226 | impl Drop for Panic { 227 | fn drop(&mut self) { 228 | panic!("aborting due to invalid operation"); 229 | } 230 | } 231 | 232 | let _panic = Panic; 233 | panic!("aborting due to invalid operation"); 234 | } 235 | 236 | #[cfg(feature = "std")] 237 | { 238 | std::process::abort(); 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /src/mutex.rs: -------------------------------------------------------------------------------- 1 | //! Locks that have the same behaviour as a mutex. 2 | //! 3 | //! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature. 4 | //! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`] 5 | //! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported. 6 | //! 7 | //! `ticket_mutex` is disabled by default. 8 | //! 9 | //! [`Mutex`]: ./struct.Mutex.html 10 | //! [`MutexGuard`]: ./struct.MutexGuard.html 11 | //! [`TicketMutex`]: ./ticket/struct.TicketMutex.html 12 | //! [`TicketMutexGuard`]: ./ticket/struct.TicketMutexGuard.html 13 | //! [`SpinMutex`]: ./spin/struct.SpinMutex.html 14 | //! [`SpinMutexGuard`]: ./spin/struct.SpinMutexGuard.html 15 | 16 | #[cfg(feature = "spin_mutex")] 17 | #[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] 18 | pub mod spin; 19 | #[cfg(feature = "spin_mutex")] 20 | #[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))] 21 | pub use self::spin::{SpinMutex, SpinMutexGuard}; 22 | 23 | #[cfg(feature = "ticket_mutex")] 24 | #[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))] 25 | pub mod ticket; 26 | #[cfg(feature = "ticket_mutex")] 27 | #[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))] 28 | pub use self::ticket::{TicketMutex, TicketMutexGuard}; 29 | 30 | #[cfg(feature = "fair_mutex")] 31 | #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] 32 | pub mod fair; 33 | #[cfg(feature = "fair_mutex")] 34 | #[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))] 35 | pub use self::fair::{FairMutex, FairMutexGuard, Starvation}; 36 | 37 | use crate::{RelaxStrategy, Spin}; 38 | use core::{ 39 | fmt, 40 | ops::{Deref, DerefMut}, 41 | }; 42 | 43 | #[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))] 44 | compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required."); 45 | 46 | #[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] 47 | type InnerMutex = self::spin::SpinMutex; 48 | #[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))] 49 | type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>; 50 | 51 | #[cfg(feature = "use_ticket_mutex")] 52 | type InnerMutex = self::ticket::TicketMutex; 53 | #[cfg(feature = "use_ticket_mutex")] 54 | type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>; 55 | 56 | /// A spin-based lock providing mutually exclusive access to data. 57 | /// 58 | /// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or 59 | /// `ticket_mutex` feature flag is enabled. 60 | /// 61 | /// # Example 62 | /// 63 | /// ``` 64 | /// use spin; 65 | /// 66 | /// let lock = spin::Mutex::new(0); 67 | /// 68 | /// // Modify the data 69 | /// *lock.lock() = 2; 70 | /// 71 | /// // Read the data 72 | /// let answer = *lock.lock(); 73 | /// assert_eq!(answer, 2); 74 | /// ``` 75 | /// 76 | /// # Thread safety example 77 | /// 78 | /// ``` 79 | /// use spin; 80 | /// use std::sync::{Arc, Barrier}; 81 | /// 82 | /// let thread_count = 1000; 83 | /// let spin_mutex = Arc::new(spin::Mutex::new(0)); 84 | /// 85 | /// // We use a barrier to ensure the readout happens after all writing 86 | /// let barrier = Arc::new(Barrier::new(thread_count + 1)); 87 | /// 88 | /// # let mut ts = Vec::new(); 89 | /// for _ in 0..thread_count { 90 | /// let my_barrier = barrier.clone(); 91 | /// let my_lock = spin_mutex.clone(); 92 | /// # let t = 93 | /// std::thread::spawn(move || { 94 | /// let mut guard = my_lock.lock(); 95 | /// *guard += 1; 96 | /// 97 | /// // Release the lock to prevent a deadlock 98 | /// drop(guard); 99 | /// my_barrier.wait(); 100 | /// }); 101 | /// # ts.push(t); 102 | /// } 103 | /// 104 | /// barrier.wait(); 105 | /// 106 | /// let answer = { *spin_mutex.lock() }; 107 | /// assert_eq!(answer, thread_count); 108 | /// 109 | /// # for t in ts { 110 | /// # t.join().unwrap(); 111 | /// # } 112 | /// ``` 113 | pub struct Mutex { 114 | inner: InnerMutex, 115 | } 116 | 117 | unsafe impl Sync for Mutex {} 118 | unsafe impl Send for Mutex {} 119 | 120 | /// A generic guard that will protect some data access and 121 | /// uses either a ticket lock or a normal spin mutex. 122 | /// 123 | /// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`]. 124 | /// 125 | /// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html 126 | /// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html 127 | pub struct MutexGuard<'a, T: 'a + ?Sized> { 128 | inner: InnerMutexGuard<'a, T>, 129 | } 130 | 131 | impl Mutex { 132 | /// Creates a new [`Mutex`] wrapping the supplied data. 133 | /// 134 | /// # Example 135 | /// 136 | /// ``` 137 | /// use spin::Mutex; 138 | /// 139 | /// static MUTEX: Mutex<()> = Mutex::new(()); 140 | /// 141 | /// fn demo() { 142 | /// let lock = MUTEX.lock(); 143 | /// // do something with lock 144 | /// drop(lock); 145 | /// } 146 | /// ``` 147 | #[inline(always)] 148 | pub const fn new(value: T) -> Self { 149 | Self { 150 | inner: InnerMutex::new(value), 151 | } 152 | } 153 | 154 | /// Consumes this [`Mutex`] and unwraps the underlying data. 155 | /// 156 | /// # Example 157 | /// 158 | /// ``` 159 | /// let lock = spin::Mutex::new(42); 160 | /// assert_eq!(42, lock.into_inner()); 161 | /// ``` 162 | #[inline(always)] 163 | pub fn into_inner(self) -> T { 164 | self.inner.into_inner() 165 | } 166 | } 167 | 168 | impl Mutex { 169 | /// Locks the [`Mutex`] and returns a guard that permits access to the inner data. 170 | /// 171 | /// The returned value may be dereferenced for data access 172 | /// and the lock will be dropped when the guard falls out of scope. 173 | /// 174 | /// ``` 175 | /// let lock = spin::Mutex::new(0); 176 | /// { 177 | /// let mut data = lock.lock(); 178 | /// // The lock is now locked and the data can be accessed 179 | /// *data += 1; 180 | /// // The lock is implicitly dropped at the end of the scope 181 | /// } 182 | /// ``` 183 | #[inline(always)] 184 | pub fn lock(&self) -> MutexGuard { 185 | MutexGuard { 186 | inner: self.inner.lock(), 187 | } 188 | } 189 | } 190 | 191 | impl Mutex { 192 | /// Returns `true` if the lock is currently held. 193 | /// 194 | /// # Safety 195 | /// 196 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 197 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 198 | #[inline(always)] 199 | pub fn is_locked(&self) -> bool { 200 | self.inner.is_locked() 201 | } 202 | 203 | /// Force unlock this [`Mutex`]. 204 | /// 205 | /// # Safety 206 | /// 207 | /// This is *extremely* unsafe if the lock is not held by the current 208 | /// thread. However, this can be useful in some instances for exposing the 209 | /// lock to FFI that doesn't know how to deal with RAII. 210 | #[inline(always)] 211 | pub unsafe fn force_unlock(&self) { 212 | self.inner.force_unlock() 213 | } 214 | 215 | /// Try to lock this [`Mutex`], returning a lock guard if successful. 216 | /// 217 | /// # Example 218 | /// 219 | /// ``` 220 | /// let lock = spin::Mutex::new(42); 221 | /// 222 | /// let maybe_guard = lock.try_lock(); 223 | /// assert!(maybe_guard.is_some()); 224 | /// 225 | /// // `maybe_guard` is still held, so the second call fails 226 | /// let maybe_guard2 = lock.try_lock(); 227 | /// assert!(maybe_guard2.is_none()); 228 | /// ``` 229 | #[inline(always)] 230 | pub fn try_lock(&self) -> Option> { 231 | self.inner 232 | .try_lock() 233 | .map(|guard| MutexGuard { inner: guard }) 234 | } 235 | 236 | /// Returns a mutable reference to the underlying data. 237 | /// 238 | /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust, 239 | /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such, 240 | /// this is a 'zero-cost' operation. 241 | /// 242 | /// # Example 243 | /// 244 | /// ``` 245 | /// let mut lock = spin::Mutex::new(0); 246 | /// *lock.get_mut() = 10; 247 | /// assert_eq!(*lock.lock(), 10); 248 | /// ``` 249 | #[inline(always)] 250 | pub fn get_mut(&mut self) -> &mut T { 251 | self.inner.get_mut() 252 | } 253 | } 254 | 255 | impl fmt::Debug for Mutex { 256 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 257 | fmt::Debug::fmt(&self.inner, f) 258 | } 259 | } 260 | 261 | impl Default for Mutex { 262 | fn default() -> Self { 263 | Self::new(Default::default()) 264 | } 265 | } 266 | 267 | impl From for Mutex { 268 | fn from(data: T) -> Self { 269 | Self::new(data) 270 | } 271 | } 272 | 273 | impl<'a, T: ?Sized> MutexGuard<'a, T> { 274 | /// Leak the lock guard, yielding a mutable reference to the underlying data. 275 | /// 276 | /// Note that this function will permanently lock the original [`Mutex`]. 277 | /// 278 | /// ``` 279 | /// let mylock = spin::Mutex::new(0); 280 | /// 281 | /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock()); 282 | /// 283 | /// *data = 1; 284 | /// assert_eq!(*data, 1); 285 | /// ``` 286 | #[inline(always)] 287 | pub fn leak(this: Self) -> &'a mut T { 288 | InnerMutexGuard::leak(this.inner) 289 | } 290 | } 291 | 292 | impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> { 293 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 294 | fmt::Debug::fmt(&**self, f) 295 | } 296 | } 297 | 298 | impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { 299 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 300 | fmt::Display::fmt(&**self, f) 301 | } 302 | } 303 | 304 | impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { 305 | type Target = T; 306 | fn deref(&self) -> &T { 307 | &*self.inner 308 | } 309 | } 310 | 311 | impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { 312 | fn deref_mut(&mut self) -> &mut T { 313 | &mut *self.inner 314 | } 315 | } 316 | 317 | #[cfg(feature = "lock_api")] 318 | unsafe impl lock_api_crate::RawMutex for Mutex<(), R> { 319 | type GuardMarker = lock_api_crate::GuardSend; 320 | 321 | const INIT: Self = Self::new(()); 322 | 323 | fn lock(&self) { 324 | // Prevent guard destructor running 325 | core::mem::forget(Self::lock(self)); 326 | } 327 | 328 | fn try_lock(&self) -> bool { 329 | // Prevent guard destructor running 330 | Self::try_lock(self).map(core::mem::forget).is_some() 331 | } 332 | 333 | unsafe fn unlock(&self) { 334 | self.force_unlock(); 335 | } 336 | 337 | fn is_locked(&self) -> bool { 338 | self.inner.is_locked() 339 | } 340 | } 341 | -------------------------------------------------------------------------------- /src/mutex/fair.rs: -------------------------------------------------------------------------------- 1 | //! A spinning mutex with a fairer unlock algorithm. 2 | //! 3 | //! This mutex is similar to the `SpinMutex` in that it uses spinning to avoid 4 | //! context switches. However, it uses a fairer unlock algorithm that avoids 5 | //! starvation of threads that are waiting for the lock. 6 | 7 | use crate::{ 8 | atomic::{AtomicUsize, Ordering}, 9 | RelaxStrategy, Spin, 10 | }; 11 | use core::{ 12 | cell::UnsafeCell, 13 | fmt, 14 | marker::PhantomData, 15 | mem::ManuallyDrop, 16 | ops::{Deref, DerefMut}, 17 | }; 18 | 19 | // The lowest bit of `lock` is used to indicate whether the mutex is locked or not. The rest of the bits are used to 20 | // store the number of starving threads. 21 | const LOCKED: usize = 1; 22 | const STARVED: usize = 2; 23 | 24 | /// Number chosen by fair roll of the dice, adjust as needed. 25 | const STARVATION_SPINS: usize = 1024; 26 | 27 | /// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data, but with a fairer 28 | /// algorithm. 29 | /// 30 | /// # Example 31 | /// 32 | /// ``` 33 | /// use spin; 34 | /// 35 | /// let lock = spin::mutex::FairMutex::<_>::new(0); 36 | /// 37 | /// // Modify the data 38 | /// *lock.lock() = 2; 39 | /// 40 | /// // Read the data 41 | /// let answer = *lock.lock(); 42 | /// assert_eq!(answer, 2); 43 | /// ``` 44 | /// 45 | /// # Thread safety example 46 | /// 47 | /// ``` 48 | /// use spin; 49 | /// use std::sync::{Arc, Barrier}; 50 | /// 51 | /// let thread_count = 1000; 52 | /// let spin_mutex = Arc::new(spin::mutex::FairMutex::<_>::new(0)); 53 | /// 54 | /// // We use a barrier to ensure the readout happens after all writing 55 | /// let barrier = Arc::new(Barrier::new(thread_count + 1)); 56 | /// 57 | /// for _ in (0..thread_count) { 58 | /// let my_barrier = barrier.clone(); 59 | /// let my_lock = spin_mutex.clone(); 60 | /// std::thread::spawn(move || { 61 | /// let mut guard = my_lock.lock(); 62 | /// *guard += 1; 63 | /// 64 | /// // Release the lock to prevent a deadlock 65 | /// drop(guard); 66 | /// my_barrier.wait(); 67 | /// }); 68 | /// } 69 | /// 70 | /// barrier.wait(); 71 | /// 72 | /// let answer = { *spin_mutex.lock() }; 73 | /// assert_eq!(answer, thread_count); 74 | /// ``` 75 | pub struct FairMutex { 76 | phantom: PhantomData, 77 | pub(crate) lock: AtomicUsize, 78 | data: UnsafeCell, 79 | } 80 | 81 | /// A guard that provides mutable data access. 82 | /// 83 | /// When the guard falls out of scope it will release the lock. 84 | pub struct FairMutexGuard<'a, T: ?Sized + 'a> { 85 | lock: &'a AtomicUsize, 86 | data: *mut T, 87 | } 88 | 89 | /// A handle that indicates that we have been trying to acquire the lock for a while. 90 | /// 91 | /// This handle is used to prevent starvation. 92 | pub struct Starvation<'a, T: ?Sized + 'a, R> { 93 | lock: &'a FairMutex, 94 | } 95 | 96 | /// Indicates whether a lock was rejected due to the lock being held by another thread or due to starvation. 97 | #[derive(Debug)] 98 | pub enum LockRejectReason { 99 | /// The lock was rejected due to the lock being held by another thread. 100 | Locked, 101 | 102 | /// The lock was rejected due to starvation. 103 | Starved, 104 | } 105 | 106 | // Same unsafe impls as `std::sync::Mutex` 107 | unsafe impl Sync for FairMutex {} 108 | unsafe impl Send for FairMutex {} 109 | 110 | unsafe impl Sync for FairMutexGuard<'_, T> {} 111 | unsafe impl Send for FairMutexGuard<'_, T> {} 112 | 113 | impl FairMutex { 114 | /// Creates a new [`FairMutex`] wrapping the supplied data. 115 | /// 116 | /// # Example 117 | /// 118 | /// ``` 119 | /// use spin::mutex::FairMutex; 120 | /// 121 | /// static MUTEX: FairMutex<()> = FairMutex::<_>::new(()); 122 | /// 123 | /// fn demo() { 124 | /// let lock = MUTEX.lock(); 125 | /// // do something with lock 126 | /// drop(lock); 127 | /// } 128 | /// ``` 129 | #[inline(always)] 130 | pub const fn new(data: T) -> Self { 131 | FairMutex { 132 | lock: AtomicUsize::new(0), 133 | data: UnsafeCell::new(data), 134 | phantom: PhantomData, 135 | } 136 | } 137 | 138 | /// Consumes this [`FairMutex`] and unwraps the underlying data. 139 | /// 140 | /// # Example 141 | /// 142 | /// ``` 143 | /// let lock = spin::mutex::FairMutex::<_>::new(42); 144 | /// assert_eq!(42, lock.into_inner()); 145 | /// ``` 146 | #[inline(always)] 147 | pub fn into_inner(self) -> T { 148 | // We know statically that there are no outstanding references to 149 | // `self` so there's no need to lock. 150 | let FairMutex { data, .. } = self; 151 | data.into_inner() 152 | } 153 | 154 | /// Returns a mutable pointer to the underlying data. 155 | /// 156 | /// This is mostly meant to be used for applications which require manual unlocking, but where 157 | /// storing both the lock and the pointer to the inner data gets inefficient. 158 | /// 159 | /// # Example 160 | /// ``` 161 | /// let lock = spin::mutex::FairMutex::<_>::new(42); 162 | /// 163 | /// unsafe { 164 | /// core::mem::forget(lock.lock()); 165 | /// 166 | /// assert_eq!(lock.as_mut_ptr().read(), 42); 167 | /// lock.as_mut_ptr().write(58); 168 | /// 169 | /// lock.force_unlock(); 170 | /// } 171 | /// 172 | /// assert_eq!(*lock.lock(), 58); 173 | /// 174 | /// ``` 175 | #[inline(always)] 176 | pub fn as_mut_ptr(&self) -> *mut T { 177 | self.data.get() 178 | } 179 | } 180 | 181 | impl FairMutex { 182 | /// Locks the [`FairMutex`] and returns a guard that permits access to the inner data. 183 | /// 184 | /// The returned value may be dereferenced for data access 185 | /// and the lock will be dropped when the guard falls out of scope. 186 | /// 187 | /// ``` 188 | /// let lock = spin::mutex::FairMutex::<_>::new(0); 189 | /// { 190 | /// let mut data = lock.lock(); 191 | /// // The lock is now locked and the data can be accessed 192 | /// *data += 1; 193 | /// // The lock is implicitly dropped at the end of the scope 194 | /// } 195 | /// ``` 196 | #[inline(always)] 197 | pub fn lock(&self) -> FairMutexGuard { 198 | // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock` 199 | // when called in a loop. 200 | let mut spins = 0; 201 | while self 202 | .lock 203 | .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed) 204 | .is_err() 205 | { 206 | // Wait until the lock looks unlocked before retrying 207 | while self.is_locked() { 208 | R::relax(); 209 | 210 | // If we've been spinning for a while, switch to a fairer strategy that will prevent 211 | // newer users from stealing our lock from us. 212 | if spins > STARVATION_SPINS { 213 | return self.starve().lock(); 214 | } 215 | spins += 1; 216 | } 217 | } 218 | 219 | FairMutexGuard { 220 | lock: &self.lock, 221 | data: unsafe { &mut *self.data.get() }, 222 | } 223 | } 224 | } 225 | 226 | impl FairMutex { 227 | /// Returns `true` if the lock is currently held. 228 | /// 229 | /// # Safety 230 | /// 231 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 232 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 233 | #[inline(always)] 234 | pub fn is_locked(&self) -> bool { 235 | self.lock.load(Ordering::Relaxed) & LOCKED != 0 236 | } 237 | 238 | /// Force unlock this [`FairMutex`]. 239 | /// 240 | /// # Safety 241 | /// 242 | /// This is *extremely* unsafe if the lock is not held by the current 243 | /// thread. However, this can be useful in some instances for exposing the 244 | /// lock to FFI that doesn't know how to deal with RAII. 245 | #[inline(always)] 246 | pub unsafe fn force_unlock(&self) { 247 | self.lock.fetch_and(!LOCKED, Ordering::Release); 248 | } 249 | 250 | /// Try to lock this [`FairMutex`], returning a lock guard if successful. 251 | /// 252 | /// # Example 253 | /// 254 | /// ``` 255 | /// let lock = spin::mutex::FairMutex::<_>::new(42); 256 | /// 257 | /// let maybe_guard = lock.try_lock(); 258 | /// assert!(maybe_guard.is_some()); 259 | /// 260 | /// // `maybe_guard` is still held, so the second call fails 261 | /// let maybe_guard2 = lock.try_lock(); 262 | /// assert!(maybe_guard2.is_none()); 263 | /// ``` 264 | #[inline(always)] 265 | pub fn try_lock(&self) -> Option> { 266 | self.try_lock_starver().ok() 267 | } 268 | 269 | /// Tries to lock this [`FairMutex`] and returns a result that indicates whether the lock was 270 | /// rejected due to a starver or not. 271 | #[inline(always)] 272 | pub fn try_lock_starver(&self) -> Result, LockRejectReason> { 273 | match self 274 | .lock 275 | .compare_exchange(0, LOCKED, Ordering::Acquire, Ordering::Relaxed) 276 | .unwrap_or_else(|x| x) 277 | { 278 | 0 => Ok(FairMutexGuard { 279 | lock: &self.lock, 280 | data: unsafe { &mut *self.data.get() }, 281 | }), 282 | LOCKED => Err(LockRejectReason::Locked), 283 | _ => Err(LockRejectReason::Starved), 284 | } 285 | } 286 | 287 | /// Indicates that the current user has been waiting for the lock for a while 288 | /// and that the lock should yield to this thread over a newly arriving thread. 289 | /// 290 | /// # Example 291 | /// 292 | /// ``` 293 | /// let lock = spin::mutex::FairMutex::<_>::new(42); 294 | /// 295 | /// // Lock the mutex to simulate it being used by another user. 296 | /// let guard1 = lock.lock(); 297 | /// 298 | /// // Try to lock the mutex. 299 | /// let guard2 = lock.try_lock(); 300 | /// assert!(guard2.is_none()); 301 | /// 302 | /// // Wait for a while. 303 | /// wait_for_a_while(); 304 | /// 305 | /// // We are now starved, indicate as such. 306 | /// let starve = lock.starve(); 307 | /// 308 | /// // Once the lock is released, another user trying to lock it will 309 | /// // fail. 310 | /// drop(guard1); 311 | /// let guard3 = lock.try_lock(); 312 | /// assert!(guard3.is_none()); 313 | /// 314 | /// // However, we will be able to lock it. 315 | /// let guard4 = starve.try_lock(); 316 | /// assert!(guard4.is_ok()); 317 | /// 318 | /// # fn wait_for_a_while() {} 319 | /// ``` 320 | pub fn starve(&self) -> Starvation<'_, T, R> { 321 | // Add a new starver to the state. 322 | if self.lock.fetch_add(STARVED, Ordering::Relaxed) > (core::isize::MAX - 1) as usize { 323 | // In the event of a potential lock overflow, abort. 324 | crate::abort(); 325 | } 326 | 327 | Starvation { lock: self } 328 | } 329 | 330 | /// Returns a mutable reference to the underlying data. 331 | /// 332 | /// Since this call borrows the [`FairMutex`] mutably, and a mutable reference is guaranteed to be exclusive in 333 | /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As 334 | /// such, this is a 'zero-cost' operation. 335 | /// 336 | /// # Example 337 | /// 338 | /// ``` 339 | /// let mut lock = spin::mutex::FairMutex::<_>::new(0); 340 | /// *lock.get_mut() = 10; 341 | /// assert_eq!(*lock.lock(), 10); 342 | /// ``` 343 | #[inline(always)] 344 | pub fn get_mut(&mut self) -> &mut T { 345 | // We know statically that there are no other references to `self`, so 346 | // there's no need to lock the inner mutex. 347 | unsafe { &mut *self.data.get() } 348 | } 349 | } 350 | 351 | impl fmt::Debug for FairMutex { 352 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 353 | struct LockWrapper<'a, T: ?Sized + fmt::Debug>(Option>); 354 | 355 | impl fmt::Debug for LockWrapper<'_, T> { 356 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 357 | match &self.0 { 358 | Some(guard) => fmt::Debug::fmt(guard, f), 359 | None => f.write_str(""), 360 | } 361 | } 362 | } 363 | 364 | f.debug_struct("FairMutex") 365 | .field("data", &LockWrapper(self.try_lock())) 366 | .finish() 367 | } 368 | } 369 | 370 | impl Default for FairMutex { 371 | fn default() -> Self { 372 | Self::new(Default::default()) 373 | } 374 | } 375 | 376 | impl From for FairMutex { 377 | fn from(data: T) -> Self { 378 | Self::new(data) 379 | } 380 | } 381 | 382 | impl<'a, T: ?Sized> FairMutexGuard<'a, T> { 383 | /// Leak the lock guard, yielding a mutable reference to the underlying data. 384 | /// 385 | /// Note that this function will permanently lock the original [`FairMutex`]. 386 | /// 387 | /// ``` 388 | /// let mylock = spin::mutex::FairMutex::<_>::new(0); 389 | /// 390 | /// let data: &mut i32 = spin::mutex::FairMutexGuard::leak(mylock.lock()); 391 | /// 392 | /// *data = 1; 393 | /// assert_eq!(*data, 1); 394 | /// ``` 395 | #[inline(always)] 396 | pub fn leak(this: Self) -> &'a mut T { 397 | // Use ManuallyDrop to avoid stacked-borrow invalidation 398 | let mut this = ManuallyDrop::new(this); 399 | // We know statically that only we are referencing data 400 | unsafe { &mut *this.data } 401 | } 402 | } 403 | 404 | impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for FairMutexGuard<'a, T> { 405 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 406 | fmt::Debug::fmt(&**self, f) 407 | } 408 | } 409 | 410 | impl<'a, T: ?Sized + fmt::Display> fmt::Display for FairMutexGuard<'a, T> { 411 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 412 | fmt::Display::fmt(&**self, f) 413 | } 414 | } 415 | 416 | impl<'a, T: ?Sized> Deref for FairMutexGuard<'a, T> { 417 | type Target = T; 418 | fn deref(&self) -> &T { 419 | // We know statically that only we are referencing data 420 | unsafe { &*self.data } 421 | } 422 | } 423 | 424 | impl<'a, T: ?Sized> DerefMut for FairMutexGuard<'a, T> { 425 | fn deref_mut(&mut self) -> &mut T { 426 | // We know statically that only we are referencing data 427 | unsafe { &mut *self.data } 428 | } 429 | } 430 | 431 | impl<'a, T: ?Sized> Drop for FairMutexGuard<'a, T> { 432 | /// The dropping of the MutexGuard will release the lock it was created from. 433 | fn drop(&mut self) { 434 | self.lock.fetch_and(!LOCKED, Ordering::Release); 435 | } 436 | } 437 | 438 | impl<'a, T: ?Sized, R> Starvation<'a, T, R> { 439 | /// Attempts the lock the mutex if we are the only starving user. 440 | /// 441 | /// This allows another user to lock the mutex if they are starving as well. 442 | pub fn try_lock_fair(self) -> Result, Self> { 443 | // Try to lock the mutex. 444 | if self 445 | .lock 446 | .lock 447 | .compare_exchange( 448 | STARVED, 449 | STARVED | LOCKED, 450 | Ordering::Acquire, 451 | Ordering::Relaxed, 452 | ) 453 | .is_ok() 454 | { 455 | // We are the only starving user, lock the mutex. 456 | Ok(FairMutexGuard { 457 | lock: &self.lock.lock, 458 | data: self.lock.data.get(), 459 | }) 460 | } else { 461 | // Another user is starving, fail. 462 | Err(self) 463 | } 464 | } 465 | 466 | /// Attempts to lock the mutex. 467 | /// 468 | /// If the lock is currently held by another thread, this will return `None`. 469 | /// 470 | /// # Example 471 | /// 472 | /// ``` 473 | /// let lock = spin::mutex::FairMutex::<_>::new(42); 474 | /// 475 | /// // Lock the mutex to simulate it being used by another user. 476 | /// let guard1 = lock.lock(); 477 | /// 478 | /// // Try to lock the mutex. 479 | /// let guard2 = lock.try_lock(); 480 | /// assert!(guard2.is_none()); 481 | /// 482 | /// // Wait for a while. 483 | /// wait_for_a_while(); 484 | /// 485 | /// // We are now starved, indicate as such. 486 | /// let starve = lock.starve(); 487 | /// 488 | /// // Once the lock is released, another user trying to lock it will 489 | /// // fail. 490 | /// drop(guard1); 491 | /// let guard3 = lock.try_lock(); 492 | /// assert!(guard3.is_none()); 493 | /// 494 | /// // However, we will be able to lock it. 495 | /// let guard4 = starve.try_lock(); 496 | /// assert!(guard4.is_ok()); 497 | /// 498 | /// # fn wait_for_a_while() {} 499 | /// ``` 500 | pub fn try_lock(self) -> Result, Self> { 501 | // Try to lock the mutex. 502 | if self.lock.lock.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 { 503 | // We have successfully locked the mutex. 504 | // By dropping `self` here, we decrement the starvation count. 505 | Ok(FairMutexGuard { 506 | lock: &self.lock.lock, 507 | data: self.lock.data.get(), 508 | }) 509 | } else { 510 | Err(self) 511 | } 512 | } 513 | } 514 | 515 | impl<'a, T: ?Sized, R: RelaxStrategy> Starvation<'a, T, R> { 516 | /// Locks the mutex. 517 | pub fn lock(mut self) -> FairMutexGuard<'a, T> { 518 | // Try to lock the mutex. 519 | loop { 520 | match self.try_lock() { 521 | Ok(lock) => return lock, 522 | Err(starve) => self = starve, 523 | } 524 | 525 | // Relax until the lock is released. 526 | while self.lock.is_locked() { 527 | R::relax(); 528 | } 529 | } 530 | } 531 | } 532 | 533 | impl<'a, T: ?Sized, R> Drop for Starvation<'a, T, R> { 534 | fn drop(&mut self) { 535 | // As there is no longer a user being starved, we decrement the starver count. 536 | self.lock.lock.fetch_sub(STARVED, Ordering::Release); 537 | } 538 | } 539 | 540 | impl fmt::Display for LockRejectReason { 541 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 542 | match self { 543 | LockRejectReason::Locked => write!(f, "locked"), 544 | LockRejectReason::Starved => write!(f, "starved"), 545 | } 546 | } 547 | } 548 | 549 | #[cfg(feature = "std")] 550 | impl std::error::Error for LockRejectReason {} 551 | 552 | #[cfg(feature = "lock_api")] 553 | unsafe impl lock_api_crate::RawMutex for FairMutex<(), R> { 554 | type GuardMarker = lock_api_crate::GuardSend; 555 | 556 | const INIT: Self = Self::new(()); 557 | 558 | fn lock(&self) { 559 | // Prevent guard destructor running 560 | core::mem::forget(Self::lock(self)); 561 | } 562 | 563 | fn try_lock(&self) -> bool { 564 | // Prevent guard destructor running 565 | Self::try_lock(self).map(core::mem::forget).is_some() 566 | } 567 | 568 | unsafe fn unlock(&self) { 569 | self.force_unlock(); 570 | } 571 | 572 | fn is_locked(&self) -> bool { 573 | Self::is_locked(self) 574 | } 575 | } 576 | 577 | #[cfg(test)] 578 | mod tests { 579 | use std::prelude::v1::*; 580 | 581 | use std::sync::atomic::{AtomicUsize, Ordering}; 582 | use std::sync::mpsc::channel; 583 | use std::sync::Arc; 584 | use std::thread; 585 | 586 | type FairMutex = super::FairMutex; 587 | 588 | #[derive(Eq, PartialEq, Debug)] 589 | struct NonCopy(i32); 590 | 591 | #[test] 592 | fn smoke() { 593 | let m = FairMutex::<_>::new(()); 594 | drop(m.lock()); 595 | drop(m.lock()); 596 | } 597 | 598 | #[test] 599 | fn lots_and_lots() { 600 | static M: FairMutex<()> = FairMutex::<_>::new(()); 601 | static mut CNT: u32 = 0; 602 | const J: u32 = 1000; 603 | const K: u32 = 3; 604 | 605 | fn inc() { 606 | for _ in 0..J { 607 | unsafe { 608 | let _g = M.lock(); 609 | CNT += 1; 610 | } 611 | } 612 | } 613 | 614 | let (tx, rx) = channel(); 615 | for _ in 0..K { 616 | let tx2 = tx.clone(); 617 | thread::spawn(move || { 618 | inc(); 619 | tx2.send(()).unwrap(); 620 | }); 621 | let tx2 = tx.clone(); 622 | thread::spawn(move || { 623 | inc(); 624 | tx2.send(()).unwrap(); 625 | }); 626 | } 627 | 628 | drop(tx); 629 | for _ in 0..2 * K { 630 | rx.recv().unwrap(); 631 | } 632 | assert_eq!(unsafe { CNT }, J * K * 2); 633 | } 634 | 635 | #[test] 636 | fn try_lock() { 637 | let mutex = FairMutex::<_>::new(42); 638 | 639 | // First lock succeeds 640 | let a = mutex.try_lock(); 641 | assert_eq!(a.as_ref().map(|r| **r), Some(42)); 642 | 643 | // Additional lock fails 644 | let b = mutex.try_lock(); 645 | assert!(b.is_none()); 646 | 647 | // After dropping lock, it succeeds again 648 | ::core::mem::drop(a); 649 | let c = mutex.try_lock(); 650 | assert_eq!(c.as_ref().map(|r| **r), Some(42)); 651 | } 652 | 653 | #[test] 654 | fn test_into_inner() { 655 | let m = FairMutex::<_>::new(NonCopy(10)); 656 | assert_eq!(m.into_inner(), NonCopy(10)); 657 | } 658 | 659 | #[test] 660 | fn test_into_inner_drop() { 661 | struct Foo(Arc); 662 | impl Drop for Foo { 663 | fn drop(&mut self) { 664 | self.0.fetch_add(1, Ordering::SeqCst); 665 | } 666 | } 667 | let num_drops = Arc::new(AtomicUsize::new(0)); 668 | let m = FairMutex::<_>::new(Foo(num_drops.clone())); 669 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 670 | { 671 | let _inner = m.into_inner(); 672 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 673 | } 674 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 675 | } 676 | 677 | #[test] 678 | fn test_mutex_arc_nested() { 679 | // Tests nested mutexes and access 680 | // to underlying data. 681 | let arc = Arc::new(FairMutex::<_>::new(1)); 682 | let arc2 = Arc::new(FairMutex::<_>::new(arc)); 683 | let (tx, rx) = channel(); 684 | let _t = thread::spawn(move || { 685 | let lock = arc2.lock(); 686 | let lock2 = lock.lock(); 687 | assert_eq!(*lock2, 1); 688 | tx.send(()).unwrap(); 689 | }); 690 | rx.recv().unwrap(); 691 | } 692 | 693 | #[test] 694 | fn test_mutex_arc_access_in_unwind() { 695 | let arc = Arc::new(FairMutex::<_>::new(1)); 696 | let arc2 = arc.clone(); 697 | let _ = thread::spawn(move || -> () { 698 | struct Unwinder { 699 | i: Arc>, 700 | } 701 | impl Drop for Unwinder { 702 | fn drop(&mut self) { 703 | *self.i.lock() += 1; 704 | } 705 | } 706 | let _u = Unwinder { i: arc2 }; 707 | panic!(); 708 | }) 709 | .join(); 710 | let lock = arc.lock(); 711 | assert_eq!(*lock, 2); 712 | } 713 | 714 | #[test] 715 | fn test_mutex_unsized() { 716 | let mutex: &FairMutex<[i32]> = &FairMutex::<_>::new([1, 2, 3]); 717 | { 718 | let b = &mut *mutex.lock(); 719 | b[0] = 4; 720 | b[2] = 5; 721 | } 722 | let comp: &[i32] = &[4, 2, 5]; 723 | assert_eq!(&*mutex.lock(), comp); 724 | } 725 | 726 | #[test] 727 | fn test_mutex_force_lock() { 728 | let lock = FairMutex::<_>::new(()); 729 | ::std::mem::forget(lock.lock()); 730 | unsafe { 731 | lock.force_unlock(); 732 | } 733 | assert!(lock.try_lock().is_some()); 734 | } 735 | } 736 | -------------------------------------------------------------------------------- /src/mutex/spin.rs: -------------------------------------------------------------------------------- 1 | //! A naïve spinning mutex. 2 | //! 3 | //! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case 4 | //! latency is theoretically infinite. 5 | 6 | use crate::{ 7 | atomic::{AtomicBool, Ordering}, 8 | RelaxStrategy, Spin, 9 | }; 10 | use core::{ 11 | cell::UnsafeCell, 12 | fmt, 13 | marker::PhantomData, 14 | mem::ManuallyDrop, 15 | ops::{Deref, DerefMut}, 16 | }; 17 | 18 | /// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data. 19 | /// 20 | /// # Example 21 | /// 22 | /// ``` 23 | /// use spin; 24 | /// 25 | /// let lock = spin::mutex::SpinMutex::<_>::new(0); 26 | /// 27 | /// // Modify the data 28 | /// *lock.lock() = 2; 29 | /// 30 | /// // Read the data 31 | /// let answer = *lock.lock(); 32 | /// assert_eq!(answer, 2); 33 | /// ``` 34 | /// 35 | /// # Thread safety example 36 | /// 37 | /// ``` 38 | /// use spin; 39 | /// use std::sync::{Arc, Barrier}; 40 | /// 41 | /// let thread_count = 1000; 42 | /// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0)); 43 | /// 44 | /// // We use a barrier to ensure the readout happens after all writing 45 | /// let barrier = Arc::new(Barrier::new(thread_count + 1)); 46 | /// 47 | /// # let mut ts = Vec::new(); 48 | /// for _ in (0..thread_count) { 49 | /// let my_barrier = barrier.clone(); 50 | /// let my_lock = spin_mutex.clone(); 51 | /// # let t = 52 | /// std::thread::spawn(move || { 53 | /// let mut guard = my_lock.lock(); 54 | /// *guard += 1; 55 | /// 56 | /// // Release the lock to prevent a deadlock 57 | /// drop(guard); 58 | /// my_barrier.wait(); 59 | /// }); 60 | /// # ts.push(t); 61 | /// } 62 | /// 63 | /// barrier.wait(); 64 | /// 65 | /// let answer = { *spin_mutex.lock() }; 66 | /// assert_eq!(answer, thread_count); 67 | /// 68 | /// # for t in ts { 69 | /// # t.join().unwrap(); 70 | /// # } 71 | /// ``` 72 | pub struct SpinMutex { 73 | phantom: PhantomData, 74 | pub(crate) lock: AtomicBool, 75 | data: UnsafeCell, 76 | } 77 | 78 | /// A guard that provides mutable data access. 79 | /// 80 | /// When the guard falls out of scope it will release the lock. 81 | pub struct SpinMutexGuard<'a, T: ?Sized + 'a> { 82 | lock: &'a AtomicBool, 83 | data: *mut T, 84 | } 85 | 86 | // Same unsafe impls as `std::sync::Mutex` 87 | unsafe impl Sync for SpinMutex {} 88 | unsafe impl Send for SpinMutex {} 89 | 90 | unsafe impl Sync for SpinMutexGuard<'_, T> {} 91 | unsafe impl Send for SpinMutexGuard<'_, T> {} 92 | 93 | impl SpinMutex { 94 | /// Creates a new [`SpinMutex`] wrapping the supplied data. 95 | /// 96 | /// # Example 97 | /// 98 | /// ``` 99 | /// use spin::mutex::SpinMutex; 100 | /// 101 | /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(()); 102 | /// 103 | /// fn demo() { 104 | /// let lock = MUTEX.lock(); 105 | /// // do something with lock 106 | /// drop(lock); 107 | /// } 108 | /// ``` 109 | #[inline(always)] 110 | pub const fn new(data: T) -> Self { 111 | SpinMutex { 112 | lock: AtomicBool::new(false), 113 | data: UnsafeCell::new(data), 114 | phantom: PhantomData, 115 | } 116 | } 117 | 118 | /// Consumes this [`SpinMutex`] and unwraps the underlying data. 119 | /// 120 | /// # Example 121 | /// 122 | /// ``` 123 | /// let lock = spin::mutex::SpinMutex::<_>::new(42); 124 | /// assert_eq!(42, lock.into_inner()); 125 | /// ``` 126 | #[inline(always)] 127 | pub fn into_inner(self) -> T { 128 | // We know statically that there are no outstanding references to 129 | // `self` so there's no need to lock. 130 | let SpinMutex { data, .. } = self; 131 | data.into_inner() 132 | } 133 | 134 | /// Returns a mutable pointer to the underlying data. 135 | /// 136 | /// This is mostly meant to be used for applications which require manual unlocking, but where 137 | /// storing both the lock and the pointer to the inner data gets inefficient. 138 | /// 139 | /// # Example 140 | /// ``` 141 | /// let lock = spin::mutex::SpinMutex::<_>::new(42); 142 | /// 143 | /// unsafe { 144 | /// core::mem::forget(lock.lock()); 145 | /// 146 | /// assert_eq!(lock.as_mut_ptr().read(), 42); 147 | /// lock.as_mut_ptr().write(58); 148 | /// 149 | /// lock.force_unlock(); 150 | /// } 151 | /// 152 | /// assert_eq!(*lock.lock(), 58); 153 | /// 154 | /// ``` 155 | #[inline(always)] 156 | pub fn as_mut_ptr(&self) -> *mut T { 157 | self.data.get() 158 | } 159 | } 160 | 161 | impl SpinMutex { 162 | /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data. 163 | /// 164 | /// The returned value may be dereferenced for data access 165 | /// and the lock will be dropped when the guard falls out of scope. 166 | /// 167 | /// ``` 168 | /// let lock = spin::mutex::SpinMutex::<_>::new(0); 169 | /// { 170 | /// let mut data = lock.lock(); 171 | /// // The lock is now locked and the data can be accessed 172 | /// *data += 1; 173 | /// // The lock is implicitly dropped at the end of the scope 174 | /// } 175 | /// ``` 176 | #[inline(always)] 177 | pub fn lock(&self) -> SpinMutexGuard { 178 | // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock` 179 | // when called in a loop. 180 | loop { 181 | if let Some(guard) = self.try_lock_weak() { 182 | break guard; 183 | } 184 | 185 | while self.is_locked() { 186 | R::relax(); 187 | } 188 | } 189 | } 190 | } 191 | 192 | impl SpinMutex { 193 | /// Returns `true` if the lock is currently held. 194 | /// 195 | /// # Safety 196 | /// 197 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 198 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 199 | #[inline(always)] 200 | pub fn is_locked(&self) -> bool { 201 | self.lock.load(Ordering::Relaxed) 202 | } 203 | 204 | /// Force unlock this [`SpinMutex`]. 205 | /// 206 | /// # Safety 207 | /// 208 | /// This is *extremely* unsafe if the lock is not held by the current 209 | /// thread. However, this can be useful in some instances for exposing the 210 | /// lock to FFI that doesn't know how to deal with RAII. 211 | #[inline(always)] 212 | pub unsafe fn force_unlock(&self) { 213 | self.lock.store(false, Ordering::Release); 214 | } 215 | 216 | /// Try to lock this [`SpinMutex`], returning a lock guard if successful. 217 | /// 218 | /// # Example 219 | /// 220 | /// ``` 221 | /// let lock = spin::mutex::SpinMutex::<_>::new(42); 222 | /// 223 | /// let maybe_guard = lock.try_lock(); 224 | /// assert!(maybe_guard.is_some()); 225 | /// 226 | /// // `maybe_guard` is still held, so the second call fails 227 | /// let maybe_guard2 = lock.try_lock(); 228 | /// assert!(maybe_guard2.is_none()); 229 | /// ``` 230 | #[inline(always)] 231 | pub fn try_lock(&self) -> Option> { 232 | // The reason for using a strong compare_exchange is explained here: 233 | // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107 234 | if self 235 | .lock 236 | .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) 237 | .is_ok() 238 | { 239 | Some(SpinMutexGuard { 240 | lock: &self.lock, 241 | data: unsafe { &mut *self.data.get() }, 242 | }) 243 | } else { 244 | None 245 | } 246 | } 247 | 248 | /// Try to lock this [`SpinMutex`], returning a lock guard if succesful. 249 | /// 250 | /// Unlike [`SpinMutex::try_lock`], this function is allowed to spuriously fail even when the mutex is unlocked, 251 | /// which can result in more efficient code on some platforms. 252 | #[inline(always)] 253 | pub fn try_lock_weak(&self) -> Option> { 254 | if self 255 | .lock 256 | .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) 257 | .is_ok() 258 | { 259 | Some(SpinMutexGuard { 260 | lock: &self.lock, 261 | data: unsafe { &mut *self.data.get() }, 262 | }) 263 | } else { 264 | None 265 | } 266 | } 267 | 268 | /// Returns a mutable reference to the underlying data. 269 | /// 270 | /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in 271 | /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As 272 | /// such, this is a 'zero-cost' operation. 273 | /// 274 | /// # Example 275 | /// 276 | /// ``` 277 | /// let mut lock = spin::mutex::SpinMutex::<_>::new(0); 278 | /// *lock.get_mut() = 10; 279 | /// assert_eq!(*lock.lock(), 10); 280 | /// ``` 281 | #[inline(always)] 282 | pub fn get_mut(&mut self) -> &mut T { 283 | // We know statically that there are no other references to `self`, so 284 | // there's no need to lock the inner mutex. 285 | unsafe { &mut *self.data.get() } 286 | } 287 | } 288 | 289 | impl fmt::Debug for SpinMutex { 290 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 291 | match self.try_lock() { 292 | Some(guard) => write!(f, "Mutex {{ data: ") 293 | .and_then(|()| (&*guard).fmt(f)) 294 | .and_then(|()| write!(f, " }}")), 295 | None => write!(f, "Mutex {{ }}"), 296 | } 297 | } 298 | } 299 | 300 | impl Default for SpinMutex { 301 | fn default() -> Self { 302 | Self::new(Default::default()) 303 | } 304 | } 305 | 306 | impl From for SpinMutex { 307 | fn from(data: T) -> Self { 308 | Self::new(data) 309 | } 310 | } 311 | 312 | impl<'a, T: ?Sized> SpinMutexGuard<'a, T> { 313 | /// Leak the lock guard, yielding a mutable reference to the underlying data. 314 | /// 315 | /// Note that this function will permanently lock the original [`SpinMutex`]. 316 | /// 317 | /// ``` 318 | /// let mylock = spin::mutex::SpinMutex::<_>::new(0); 319 | /// 320 | /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock()); 321 | /// 322 | /// *data = 1; 323 | /// assert_eq!(*data, 1); 324 | /// ``` 325 | #[inline(always)] 326 | pub fn leak(this: Self) -> &'a mut T { 327 | // Use ManuallyDrop to avoid stacked-borrow invalidation 328 | let mut this = ManuallyDrop::new(this); 329 | // We know statically that only we are referencing data 330 | unsafe { &mut *this.data } 331 | } 332 | } 333 | 334 | impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> { 335 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 336 | fmt::Debug::fmt(&**self, f) 337 | } 338 | } 339 | 340 | impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> { 341 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 342 | fmt::Display::fmt(&**self, f) 343 | } 344 | } 345 | 346 | impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> { 347 | type Target = T; 348 | fn deref(&self) -> &T { 349 | // We know statically that only we are referencing data 350 | unsafe { &*self.data } 351 | } 352 | } 353 | 354 | impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> { 355 | fn deref_mut(&mut self) -> &mut T { 356 | // We know statically that only we are referencing data 357 | unsafe { &mut *self.data } 358 | } 359 | } 360 | 361 | impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> { 362 | /// The dropping of the MutexGuard will release the lock it was created from. 363 | fn drop(&mut self) { 364 | self.lock.store(false, Ordering::Release); 365 | } 366 | } 367 | 368 | #[cfg(feature = "lock_api")] 369 | unsafe impl lock_api_crate::RawMutex for SpinMutex<(), R> { 370 | type GuardMarker = lock_api_crate::GuardSend; 371 | 372 | const INIT: Self = Self::new(()); 373 | 374 | fn lock(&self) { 375 | // Prevent guard destructor running 376 | core::mem::forget(Self::lock(self)); 377 | } 378 | 379 | fn try_lock(&self) -> bool { 380 | // Prevent guard destructor running 381 | Self::try_lock(self).map(core::mem::forget).is_some() 382 | } 383 | 384 | unsafe fn unlock(&self) { 385 | self.force_unlock(); 386 | } 387 | 388 | fn is_locked(&self) -> bool { 389 | Self::is_locked(self) 390 | } 391 | } 392 | 393 | #[cfg(test)] 394 | mod tests { 395 | use std::prelude::v1::*; 396 | 397 | use std::sync::atomic::{AtomicUsize, Ordering}; 398 | use std::sync::mpsc::channel; 399 | use std::sync::Arc; 400 | use std::thread; 401 | 402 | type SpinMutex = super::SpinMutex; 403 | 404 | #[derive(Eq, PartialEq, Debug)] 405 | struct NonCopy(i32); 406 | 407 | #[test] 408 | fn smoke() { 409 | let m = SpinMutex::<_>::new(()); 410 | drop(m.lock()); 411 | drop(m.lock()); 412 | } 413 | 414 | #[test] 415 | fn lots_and_lots() { 416 | static M: SpinMutex<()> = SpinMutex::<_>::new(()); 417 | static mut CNT: u32 = 0; 418 | const J: u32 = 1000; 419 | const K: u32 = 3; 420 | 421 | fn inc() { 422 | for _ in 0..J { 423 | unsafe { 424 | let _g = M.lock(); 425 | CNT += 1; 426 | } 427 | } 428 | } 429 | 430 | let (tx, rx) = channel(); 431 | let mut ts = Vec::new(); 432 | for _ in 0..K { 433 | let tx2 = tx.clone(); 434 | ts.push(thread::spawn(move || { 435 | inc(); 436 | tx2.send(()).unwrap(); 437 | })); 438 | let tx2 = tx.clone(); 439 | ts.push(thread::spawn(move || { 440 | inc(); 441 | tx2.send(()).unwrap(); 442 | })); 443 | } 444 | 445 | drop(tx); 446 | for _ in 0..2 * K { 447 | rx.recv().unwrap(); 448 | } 449 | assert_eq!(unsafe { CNT }, J * K * 2); 450 | 451 | for t in ts { 452 | t.join().unwrap(); 453 | } 454 | } 455 | 456 | #[test] 457 | fn try_lock() { 458 | let mutex = SpinMutex::<_>::new(42); 459 | 460 | // First lock succeeds 461 | let a = mutex.try_lock(); 462 | assert_eq!(a.as_ref().map(|r| **r), Some(42)); 463 | 464 | // Additional lock fails 465 | let b = mutex.try_lock(); 466 | assert!(b.is_none()); 467 | 468 | // After dropping lock, it succeeds again 469 | ::core::mem::drop(a); 470 | let c = mutex.try_lock(); 471 | assert_eq!(c.as_ref().map(|r| **r), Some(42)); 472 | } 473 | 474 | #[test] 475 | fn test_into_inner() { 476 | let m = SpinMutex::<_>::new(NonCopy(10)); 477 | assert_eq!(m.into_inner(), NonCopy(10)); 478 | } 479 | 480 | #[test] 481 | fn test_into_inner_drop() { 482 | struct Foo(Arc); 483 | impl Drop for Foo { 484 | fn drop(&mut self) { 485 | self.0.fetch_add(1, Ordering::SeqCst); 486 | } 487 | } 488 | let num_drops = Arc::new(AtomicUsize::new(0)); 489 | let m = SpinMutex::<_>::new(Foo(num_drops.clone())); 490 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 491 | { 492 | let _inner = m.into_inner(); 493 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 494 | } 495 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 496 | } 497 | 498 | #[test] 499 | fn test_mutex_arc_nested() { 500 | // Tests nested mutexes and access 501 | // to underlying data. 502 | let arc = Arc::new(SpinMutex::<_>::new(1)); 503 | let arc2 = Arc::new(SpinMutex::<_>::new(arc)); 504 | let (tx, rx) = channel(); 505 | let t = thread::spawn(move || { 506 | let lock = arc2.lock(); 507 | let lock2 = lock.lock(); 508 | assert_eq!(*lock2, 1); 509 | tx.send(()).unwrap(); 510 | }); 511 | rx.recv().unwrap(); 512 | t.join().unwrap(); 513 | } 514 | 515 | #[test] 516 | fn test_mutex_arc_access_in_unwind() { 517 | let arc = Arc::new(SpinMutex::<_>::new(1)); 518 | let arc2 = arc.clone(); 519 | let _ = thread::spawn(move || -> () { 520 | struct Unwinder { 521 | i: Arc>, 522 | } 523 | impl Drop for Unwinder { 524 | fn drop(&mut self) { 525 | *self.i.lock() += 1; 526 | } 527 | } 528 | let _u = Unwinder { i: arc2 }; 529 | panic!(); 530 | }) 531 | .join(); 532 | let lock = arc.lock(); 533 | assert_eq!(*lock, 2); 534 | } 535 | 536 | #[test] 537 | fn test_mutex_unsized() { 538 | let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]); 539 | { 540 | let b = &mut *mutex.lock(); 541 | b[0] = 4; 542 | b[2] = 5; 543 | } 544 | let comp: &[i32] = &[4, 2, 5]; 545 | assert_eq!(&*mutex.lock(), comp); 546 | } 547 | 548 | #[test] 549 | fn test_mutex_force_lock() { 550 | let lock = SpinMutex::<_>::new(()); 551 | ::std::mem::forget(lock.lock()); 552 | unsafe { 553 | lock.force_unlock(); 554 | } 555 | assert!(lock.try_lock().is_some()); 556 | } 557 | } 558 | -------------------------------------------------------------------------------- /src/mutex/ticket.rs: -------------------------------------------------------------------------------- 1 | //! A ticket-based mutex. 2 | //! 3 | //! Waiting threads take a 'ticket' from the lock in the order they arrive and gain access to the lock when their 4 | //! ticket is next in the queue. Best-case latency is slightly worse than a regular spinning mutex, but worse-case 5 | //! latency is infinitely better. Waiting threads simply need to wait for all threads that come before them in the 6 | //! queue to finish. 7 | 8 | use crate::{ 9 | atomic::{AtomicUsize, Ordering}, 10 | RelaxStrategy, Spin, 11 | }; 12 | use core::{ 13 | cell::UnsafeCell, 14 | fmt, 15 | marker::PhantomData, 16 | ops::{Deref, DerefMut}, 17 | }; 18 | 19 | /// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data. 20 | /// 21 | /// A ticket lock is analogous to a queue management system for lock requests. When a thread tries to take a lock, it 22 | /// is assigned a 'ticket'. It then spins until its ticket becomes next in line. When the lock guard is released, the 23 | /// next ticket will be processed. 24 | /// 25 | /// Ticket locks significantly reduce the worse-case performance of locking at the cost of slightly higher average-time 26 | /// overhead. 27 | /// 28 | /// # Example 29 | /// 30 | /// ``` 31 | /// use spin; 32 | /// 33 | /// let lock = spin::mutex::TicketMutex::<_>::new(0); 34 | /// 35 | /// // Modify the data 36 | /// *lock.lock() = 2; 37 | /// 38 | /// // Read the data 39 | /// let answer = *lock.lock(); 40 | /// assert_eq!(answer, 2); 41 | /// ``` 42 | /// 43 | /// # Thread safety example 44 | /// 45 | /// ``` 46 | /// use spin; 47 | /// use std::sync::{Arc, Barrier}; 48 | /// 49 | /// let thread_count = 1000; 50 | /// let spin_mutex = Arc::new(spin::mutex::TicketMutex::<_>::new(0)); 51 | /// 52 | /// // We use a barrier to ensure the readout happens after all writing 53 | /// let barrier = Arc::new(Barrier::new(thread_count + 1)); 54 | /// 55 | /// for _ in (0..thread_count) { 56 | /// let my_barrier = barrier.clone(); 57 | /// let my_lock = spin_mutex.clone(); 58 | /// std::thread::spawn(move || { 59 | /// let mut guard = my_lock.lock(); 60 | /// *guard += 1; 61 | /// 62 | /// // Release the lock to prevent a deadlock 63 | /// drop(guard); 64 | /// my_barrier.wait(); 65 | /// }); 66 | /// } 67 | /// 68 | /// barrier.wait(); 69 | /// 70 | /// let answer = { *spin_mutex.lock() }; 71 | /// assert_eq!(answer, thread_count); 72 | /// ``` 73 | pub struct TicketMutex { 74 | phantom: PhantomData, 75 | next_ticket: AtomicUsize, 76 | next_serving: AtomicUsize, 77 | data: UnsafeCell, 78 | } 79 | 80 | /// A guard that protects some data. 81 | /// 82 | /// When the guard is dropped, the next ticket will be processed. 83 | pub struct TicketMutexGuard<'a, T: ?Sized + 'a> { 84 | next_serving: &'a AtomicUsize, 85 | ticket: usize, 86 | data: &'a mut T, 87 | } 88 | 89 | unsafe impl Sync for TicketMutex {} 90 | unsafe impl Send for TicketMutex {} 91 | 92 | impl TicketMutex { 93 | /// Creates a new [`TicketMutex`] wrapping the supplied data. 94 | /// 95 | /// # Example 96 | /// 97 | /// ``` 98 | /// use spin::mutex::TicketMutex; 99 | /// 100 | /// static MUTEX: TicketMutex<()> = TicketMutex::<_>::new(()); 101 | /// 102 | /// fn demo() { 103 | /// let lock = MUTEX.lock(); 104 | /// // do something with lock 105 | /// drop(lock); 106 | /// } 107 | /// ``` 108 | #[inline(always)] 109 | pub const fn new(data: T) -> Self { 110 | Self { 111 | phantom: PhantomData, 112 | next_ticket: AtomicUsize::new(0), 113 | next_serving: AtomicUsize::new(0), 114 | data: UnsafeCell::new(data), 115 | } 116 | } 117 | 118 | /// Consumes this [`TicketMutex`] and unwraps the underlying data. 119 | /// 120 | /// # Example 121 | /// 122 | /// ``` 123 | /// let lock = spin::mutex::TicketMutex::<_>::new(42); 124 | /// assert_eq!(42, lock.into_inner()); 125 | /// ``` 126 | #[inline(always)] 127 | pub fn into_inner(self) -> T { 128 | self.data.into_inner() 129 | } 130 | /// Returns a mutable pointer to the underying data. 131 | /// 132 | /// This is mostly meant to be used for applications which require manual unlocking, but where 133 | /// storing both the lock and the pointer to the inner data gets inefficient. 134 | /// 135 | /// # Example 136 | /// ``` 137 | /// let lock = spin::mutex::SpinMutex::<_>::new(42); 138 | /// 139 | /// unsafe { 140 | /// core::mem::forget(lock.lock()); 141 | /// 142 | /// assert_eq!(lock.as_mut_ptr().read(), 42); 143 | /// lock.as_mut_ptr().write(58); 144 | /// 145 | /// lock.force_unlock(); 146 | /// } 147 | /// 148 | /// assert_eq!(*lock.lock(), 58); 149 | /// 150 | /// ``` 151 | #[inline(always)] 152 | pub fn as_mut_ptr(&self) -> *mut T { 153 | self.data.get() 154 | } 155 | } 156 | 157 | impl fmt::Debug for TicketMutex { 158 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 159 | match self.try_lock() { 160 | Some(guard) => write!(f, "Mutex {{ data: ") 161 | .and_then(|()| (&*guard).fmt(f)) 162 | .and_then(|()| write!(f, " }}")), 163 | None => write!(f, "Mutex {{ }}"), 164 | } 165 | } 166 | } 167 | 168 | impl TicketMutex { 169 | /// Locks the [`TicketMutex`] and returns a guard that permits access to the inner data. 170 | /// 171 | /// The returned data may be dereferenced for data access 172 | /// and the lock will be dropped when the guard falls out of scope. 173 | /// 174 | /// ``` 175 | /// let lock = spin::mutex::TicketMutex::<_>::new(0); 176 | /// { 177 | /// let mut data = lock.lock(); 178 | /// // The lock is now locked and the data can be accessed 179 | /// *data += 1; 180 | /// // The lock is implicitly dropped at the end of the scope 181 | /// } 182 | /// ``` 183 | #[inline(always)] 184 | pub fn lock(&self) -> TicketMutexGuard { 185 | let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed); 186 | 187 | while self.next_serving.load(Ordering::Acquire) != ticket { 188 | R::relax(); 189 | } 190 | 191 | TicketMutexGuard { 192 | next_serving: &self.next_serving, 193 | ticket, 194 | // Safety 195 | // We know that we are the next ticket to be served, 196 | // so there's no other thread accessing the data. 197 | // 198 | // Every other thread has another ticket number so it's 199 | // definitely stuck in the spin loop above. 200 | data: unsafe { &mut *self.data.get() }, 201 | } 202 | } 203 | } 204 | 205 | impl TicketMutex { 206 | /// Returns `true` if the lock is currently held. 207 | /// 208 | /// # Safety 209 | /// 210 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 211 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 212 | #[inline(always)] 213 | pub fn is_locked(&self) -> bool { 214 | let ticket = self.next_ticket.load(Ordering::Relaxed); 215 | self.next_serving.load(Ordering::Relaxed) != ticket 216 | } 217 | 218 | /// Force unlock this [`TicketMutex`], by serving the next ticket. 219 | /// 220 | /// # Safety 221 | /// 222 | /// This is *extremely* unsafe if the lock is not held by the current 223 | /// thread. However, this can be useful in some instances for exposing the 224 | /// lock to FFI that doesn't know how to deal with RAII. 225 | #[inline(always)] 226 | pub unsafe fn force_unlock(&self) { 227 | self.next_serving.fetch_add(1, Ordering::Release); 228 | } 229 | 230 | /// Try to lock this [`TicketMutex`], returning a lock guard if successful. 231 | /// 232 | /// # Example 233 | /// 234 | /// ``` 235 | /// let lock = spin::mutex::TicketMutex::<_>::new(42); 236 | /// 237 | /// let maybe_guard = lock.try_lock(); 238 | /// assert!(maybe_guard.is_some()); 239 | /// 240 | /// // `maybe_guard` is still held, so the second call fails 241 | /// let maybe_guard2 = lock.try_lock(); 242 | /// assert!(maybe_guard2.is_none()); 243 | /// ``` 244 | #[inline(always)] 245 | pub fn try_lock(&self) -> Option> { 246 | // TODO: Replace with `fetch_update` to avoid manual CAS when upgrading MSRV 247 | let ticket = { 248 | let mut prev = self.next_ticket.load(Ordering::SeqCst); 249 | loop { 250 | if self.next_serving.load(Ordering::Acquire) == prev { 251 | match self.next_ticket.compare_exchange_weak( 252 | prev, 253 | prev + 1, 254 | Ordering::SeqCst, 255 | Ordering::SeqCst, 256 | ) { 257 | Ok(x) => break Some(x), 258 | Err(next_prev) => prev = next_prev, 259 | } 260 | } else { 261 | break None; 262 | } 263 | } 264 | }; 265 | 266 | ticket.map(|ticket| TicketMutexGuard { 267 | next_serving: &self.next_serving, 268 | ticket, 269 | // Safety 270 | // We have a ticket that is equal to the next_serving ticket, so we know: 271 | // - that no other thread can have the same ticket id as this thread 272 | // - that we are the next one to be served so we have exclusive access to the data 273 | data: unsafe { &mut *self.data.get() }, 274 | }) 275 | } 276 | 277 | /// Returns a mutable reference to the underlying data. 278 | /// 279 | /// Since this call borrows the [`TicketMutex`] mutably, and a mutable reference is guaranteed to be exclusive in 280 | /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As 281 | /// such, this is a 'zero-cost' operation. 282 | /// 283 | /// # Example 284 | /// 285 | /// ``` 286 | /// let mut lock = spin::mutex::TicketMutex::<_>::new(0); 287 | /// *lock.get_mut() = 10; 288 | /// assert_eq!(*lock.lock(), 10); 289 | /// ``` 290 | #[inline(always)] 291 | pub fn get_mut(&mut self) -> &mut T { 292 | // Safety: 293 | // We know that there are no other references to `self`, 294 | // so it's safe to return a exclusive reference to the data. 295 | unsafe { &mut *self.data.get() } 296 | } 297 | } 298 | 299 | impl Default for TicketMutex { 300 | fn default() -> Self { 301 | Self::new(Default::default()) 302 | } 303 | } 304 | 305 | impl From for TicketMutex { 306 | fn from(data: T) -> Self { 307 | Self::new(data) 308 | } 309 | } 310 | 311 | impl<'a, T: ?Sized> TicketMutexGuard<'a, T> { 312 | /// Leak the lock guard, yielding a mutable reference to the underlying data. 313 | /// 314 | /// Note that this function will permanently lock the original [`TicketMutex`]. 315 | /// 316 | /// ``` 317 | /// let mylock = spin::mutex::TicketMutex::<_>::new(0); 318 | /// 319 | /// let data: &mut i32 = spin::mutex::TicketMutexGuard::leak(mylock.lock()); 320 | /// 321 | /// *data = 1; 322 | /// assert_eq!(*data, 1); 323 | /// ``` 324 | #[inline(always)] 325 | pub fn leak(this: Self) -> &'a mut T { 326 | let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing 327 | core::mem::forget(this); 328 | unsafe { &mut *data } 329 | } 330 | } 331 | 332 | impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for TicketMutexGuard<'a, T> { 333 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 334 | fmt::Debug::fmt(&**self, f) 335 | } 336 | } 337 | 338 | impl<'a, T: ?Sized + fmt::Display> fmt::Display for TicketMutexGuard<'a, T> { 339 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 340 | fmt::Display::fmt(&**self, f) 341 | } 342 | } 343 | 344 | impl<'a, T: ?Sized> Deref for TicketMutexGuard<'a, T> { 345 | type Target = T; 346 | fn deref(&self) -> &T { 347 | self.data 348 | } 349 | } 350 | 351 | impl<'a, T: ?Sized> DerefMut for TicketMutexGuard<'a, T> { 352 | fn deref_mut(&mut self) -> &mut T { 353 | self.data 354 | } 355 | } 356 | 357 | impl<'a, T: ?Sized> Drop for TicketMutexGuard<'a, T> { 358 | fn drop(&mut self) { 359 | let new_ticket = self.ticket + 1; 360 | self.next_serving.store(new_ticket, Ordering::Release); 361 | } 362 | } 363 | 364 | #[cfg(feature = "lock_api")] 365 | unsafe impl lock_api_crate::RawMutex for TicketMutex<(), R> { 366 | type GuardMarker = lock_api_crate::GuardSend; 367 | 368 | const INIT: Self = Self::new(()); 369 | 370 | fn lock(&self) { 371 | // Prevent guard destructor running 372 | core::mem::forget(Self::lock(self)); 373 | } 374 | 375 | fn try_lock(&self) -> bool { 376 | // Prevent guard destructor running 377 | Self::try_lock(self).map(core::mem::forget).is_some() 378 | } 379 | 380 | unsafe fn unlock(&self) { 381 | self.force_unlock(); 382 | } 383 | 384 | fn is_locked(&self) -> bool { 385 | Self::is_locked(self) 386 | } 387 | } 388 | 389 | #[cfg(test)] 390 | mod tests { 391 | use std::prelude::v1::*; 392 | 393 | use std::sync::atomic::{AtomicUsize, Ordering}; 394 | use std::sync::mpsc::channel; 395 | use std::sync::Arc; 396 | use std::thread; 397 | 398 | type TicketMutex = super::TicketMutex; 399 | 400 | #[derive(Eq, PartialEq, Debug)] 401 | struct NonCopy(i32); 402 | 403 | #[test] 404 | fn smoke() { 405 | let m = TicketMutex::<_>::new(()); 406 | drop(m.lock()); 407 | drop(m.lock()); 408 | } 409 | 410 | #[test] 411 | fn lots_and_lots() { 412 | static M: TicketMutex<()> = TicketMutex::<_>::new(()); 413 | static mut CNT: u32 = 0; 414 | const J: u32 = 1000; 415 | const K: u32 = 3; 416 | 417 | fn inc() { 418 | for _ in 0..J { 419 | unsafe { 420 | let _g = M.lock(); 421 | CNT += 1; 422 | } 423 | } 424 | } 425 | 426 | let (tx, rx) = channel(); 427 | for _ in 0..K { 428 | let tx2 = tx.clone(); 429 | thread::spawn(move || { 430 | inc(); 431 | tx2.send(()).unwrap(); 432 | }); 433 | let tx2 = tx.clone(); 434 | thread::spawn(move || { 435 | inc(); 436 | tx2.send(()).unwrap(); 437 | }); 438 | } 439 | 440 | drop(tx); 441 | for _ in 0..2 * K { 442 | rx.recv().unwrap(); 443 | } 444 | assert_eq!(unsafe { CNT }, J * K * 2); 445 | } 446 | 447 | #[test] 448 | fn try_lock() { 449 | let mutex = TicketMutex::<_>::new(42); 450 | 451 | // First lock succeeds 452 | let a = mutex.try_lock(); 453 | assert_eq!(a.as_ref().map(|r| **r), Some(42)); 454 | 455 | // Additional lock fails 456 | let b = mutex.try_lock(); 457 | assert!(b.is_none()); 458 | 459 | // After dropping lock, it succeeds again 460 | ::core::mem::drop(a); 461 | let c = mutex.try_lock(); 462 | assert_eq!(c.as_ref().map(|r| **r), Some(42)); 463 | } 464 | 465 | #[test] 466 | fn test_into_inner() { 467 | let m = TicketMutex::<_>::new(NonCopy(10)); 468 | assert_eq!(m.into_inner(), NonCopy(10)); 469 | } 470 | 471 | #[test] 472 | fn test_into_inner_drop() { 473 | struct Foo(Arc); 474 | impl Drop for Foo { 475 | fn drop(&mut self) { 476 | self.0.fetch_add(1, Ordering::SeqCst); 477 | } 478 | } 479 | let num_drops = Arc::new(AtomicUsize::new(0)); 480 | let m = TicketMutex::<_>::new(Foo(num_drops.clone())); 481 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 482 | { 483 | let _inner = m.into_inner(); 484 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 485 | } 486 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 487 | } 488 | 489 | #[test] 490 | fn test_mutex_arc_nested() { 491 | // Tests nested mutexes and access 492 | // to underlying data. 493 | let arc = Arc::new(TicketMutex::<_>::new(1)); 494 | let arc2 = Arc::new(TicketMutex::<_>::new(arc)); 495 | let (tx, rx) = channel(); 496 | let _t = thread::spawn(move || { 497 | let lock = arc2.lock(); 498 | let lock2 = lock.lock(); 499 | assert_eq!(*lock2, 1); 500 | tx.send(()).unwrap(); 501 | }); 502 | rx.recv().unwrap(); 503 | } 504 | 505 | #[test] 506 | fn test_mutex_arc_access_in_unwind() { 507 | let arc = Arc::new(TicketMutex::<_>::new(1)); 508 | let arc2 = arc.clone(); 509 | let _ = thread::spawn(move || -> () { 510 | struct Unwinder { 511 | i: Arc>, 512 | } 513 | impl Drop for Unwinder { 514 | fn drop(&mut self) { 515 | *self.i.lock() += 1; 516 | } 517 | } 518 | let _u = Unwinder { i: arc2 }; 519 | panic!(); 520 | }) 521 | .join(); 522 | let lock = arc.lock(); 523 | assert_eq!(*lock, 2); 524 | } 525 | 526 | #[test] 527 | fn test_mutex_unsized() { 528 | let mutex: &TicketMutex<[i32]> = &TicketMutex::<_>::new([1, 2, 3]); 529 | { 530 | let b = &mut *mutex.lock(); 531 | b[0] = 4; 532 | b[2] = 5; 533 | } 534 | let comp: &[i32] = &[4, 2, 5]; 535 | assert_eq!(&*mutex.lock(), comp); 536 | } 537 | 538 | #[test] 539 | fn is_locked() { 540 | let mutex = TicketMutex::<_>::new(()); 541 | assert!(!mutex.is_locked()); 542 | let lock = mutex.lock(); 543 | assert!(mutex.is_locked()); 544 | drop(lock); 545 | assert!(!mutex.is_locked()); 546 | } 547 | } 548 | -------------------------------------------------------------------------------- /src/once.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives for one-time evaluation. 2 | 3 | use crate::{ 4 | atomic::{AtomicU8, Ordering}, 5 | RelaxStrategy, Spin, 6 | }; 7 | use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit}; 8 | 9 | /// A primitive that provides lazy one-time initialization. 10 | /// 11 | /// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a 12 | /// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with 13 | /// `Once`). 14 | /// 15 | /// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics. 16 | /// 17 | /// # Examples 18 | /// 19 | /// ``` 20 | /// use spin; 21 | /// 22 | /// static START: spin::Once = spin::Once::new(); 23 | /// 24 | /// START.call_once(|| { 25 | /// // run initialization here 26 | /// }); 27 | /// ``` 28 | pub struct Once { 29 | phantom: PhantomData, 30 | status: AtomicStatus, 31 | data: UnsafeCell>, 32 | } 33 | 34 | impl Default for Once { 35 | fn default() -> Self { 36 | Self::new() 37 | } 38 | } 39 | 40 | impl fmt::Debug for Once { 41 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 42 | let mut d = f.debug_tuple("Once"); 43 | let d = if let Some(x) = self.get() { 44 | d.field(&x) 45 | } else { 46 | d.field(&format_args!("")) 47 | }; 48 | d.finish() 49 | } 50 | } 51 | 52 | // Same unsafe impls as `std::sync::RwLock`, because this also allows for 53 | // concurrent reads. 54 | unsafe impl Sync for Once {} 55 | unsafe impl Send for Once {} 56 | 57 | mod status { 58 | use super::*; 59 | 60 | // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have 61 | // a value for which there exists a valid Status. This means that users of this API must only 62 | // be allowed to load and store `Status`es. 63 | #[repr(transparent)] 64 | pub struct AtomicStatus(AtomicU8); 65 | 66 | // Four states that a Once can be in, encoded into the lower bits of `status` in 67 | // the Once structure. 68 | #[repr(u8)] 69 | #[derive(Clone, Copy, Debug, PartialEq)] 70 | pub enum Status { 71 | Incomplete = 0x00, 72 | Running = 0x01, 73 | Complete = 0x02, 74 | Panicked = 0x03, 75 | } 76 | impl Status { 77 | // Construct a status from an inner u8 integer. 78 | // 79 | // # Safety 80 | // 81 | // For this to be safe, the inner number must have a valid corresponding enum variant. 82 | unsafe fn new_unchecked(inner: u8) -> Self { 83 | core::mem::transmute(inner) 84 | } 85 | } 86 | 87 | impl AtomicStatus { 88 | #[inline(always)] 89 | pub const fn new(status: Status) -> Self { 90 | // SAFETY: We got the value directly from status, so transmuting back is fine. 91 | Self(AtomicU8::new(status as u8)) 92 | } 93 | #[inline(always)] 94 | pub fn load(&self, ordering: Ordering) -> Status { 95 | // SAFETY: We know that the inner integer must have been constructed from a Status in 96 | // the first place. 97 | unsafe { Status::new_unchecked(self.0.load(ordering)) } 98 | } 99 | #[inline(always)] 100 | pub fn store(&self, status: Status, ordering: Ordering) { 101 | // SAFETY: While not directly unsafe, this is safe because the value was retrieved from 102 | // a status, thus making transmutation safe. 103 | self.0.store(status as u8, ordering); 104 | } 105 | #[inline(always)] 106 | pub fn compare_exchange( 107 | &self, 108 | old: Status, 109 | new: Status, 110 | success: Ordering, 111 | failure: Ordering, 112 | ) -> Result { 113 | match self 114 | .0 115 | .compare_exchange(old as u8, new as u8, success, failure) 116 | { 117 | // SAFETY: A compare exchange will always return a value that was later stored into 118 | // the atomic u8, but due to the invariant that it must be a valid Status, we know 119 | // that both Ok(_) and Err(_) will be safely transmutable. 120 | Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }), 121 | Err(err) => Err(unsafe { Status::new_unchecked(err) }), 122 | } 123 | } 124 | #[inline(always)] 125 | pub fn get_mut(&mut self) -> &mut Status { 126 | // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast 127 | // it to a &mut Status. 128 | unsafe { &mut *((self.0.get_mut() as *mut u8).cast::()) } 129 | } 130 | } 131 | } 132 | use self::status::{AtomicStatus, Status}; 133 | 134 | impl Once { 135 | /// Performs an initialization routine once and only once. The given closure 136 | /// will be executed if this is the first time `call_once` has been called, 137 | /// and otherwise the routine will *not* be invoked. 138 | /// 139 | /// This method will block the calling thread if another initialization 140 | /// routine is currently running. 141 | /// 142 | /// When this function returns, it is guaranteed that some initialization 143 | /// has run and completed (it may not be the closure specified). The 144 | /// returned pointer will point to the result from the closure that was 145 | /// run. 146 | /// 147 | /// # Panics 148 | /// 149 | /// This function will panic if the [`Once`] previously panicked while attempting 150 | /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s 151 | /// primitives. 152 | /// 153 | /// # Examples 154 | /// 155 | /// ``` 156 | /// use spin; 157 | /// 158 | /// static INIT: spin::Once = spin::Once::new(); 159 | /// 160 | /// fn get_cached_val() -> usize { 161 | /// *INIT.call_once(expensive_computation) 162 | /// } 163 | /// 164 | /// fn expensive_computation() -> usize { 165 | /// // ... 166 | /// # 2 167 | /// } 168 | /// ``` 169 | pub fn call_once T>(&self, f: F) -> &T { 170 | match self.try_call_once(|| Ok::(f())) { 171 | Ok(x) => x, 172 | Err(void) => match void {}, 173 | } 174 | } 175 | 176 | /// This method is similar to `call_once`, but allows the given closure to 177 | /// fail, and lets the `Once` in a uninitialized state if it does. 178 | /// 179 | /// This method will block the calling thread if another initialization 180 | /// routine is currently running. 181 | /// 182 | /// When this function returns without error, it is guaranteed that some 183 | /// initialization has run and completed (it may not be the closure 184 | /// specified). The returned reference will point to the result from the 185 | /// closure that was run. 186 | /// 187 | /// # Panics 188 | /// 189 | /// This function will panic if the [`Once`] previously panicked while attempting 190 | /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s 191 | /// primitives. 192 | /// 193 | /// # Examples 194 | /// 195 | /// ``` 196 | /// use spin; 197 | /// 198 | /// static INIT: spin::Once = spin::Once::new(); 199 | /// 200 | /// fn get_cached_val() -> Result { 201 | /// INIT.try_call_once(expensive_fallible_computation).map(|x| *x) 202 | /// } 203 | /// 204 | /// fn expensive_fallible_computation() -> Result { 205 | /// // ... 206 | /// # Ok(2) 207 | /// } 208 | /// ``` 209 | pub fn try_call_once Result, E>(&self, f: F) -> Result<&T, E> { 210 | if let Some(value) = self.get() { 211 | Ok(value) 212 | } else { 213 | self.try_call_once_slow(f) 214 | } 215 | } 216 | 217 | #[cold] 218 | fn try_call_once_slow Result, E>(&self, f: F) -> Result<&T, E> { 219 | loop { 220 | let xchg = self.status.compare_exchange( 221 | Status::Incomplete, 222 | Status::Running, 223 | Ordering::Acquire, 224 | Ordering::Acquire, 225 | ); 226 | 227 | match xchg { 228 | Ok(_must_be_state_incomplete) => { 229 | // Impl is defined after the match for readability 230 | } 231 | Err(Status::Panicked) => panic!("Once panicked"), 232 | Err(Status::Running) => match self.poll() { 233 | Some(v) => return Ok(v), 234 | None => continue, 235 | }, 236 | Err(Status::Complete) => { 237 | return Ok(unsafe { 238 | // SAFETY: The status is Complete 239 | self.force_get() 240 | }); 241 | } 242 | Err(Status::Incomplete) => { 243 | // The compare_exchange failed, so this shouldn't ever be reached, 244 | // however if we decide to switch to compare_exchange_weak it will 245 | // be safer to leave this here than hit an unreachable 246 | continue; 247 | } 248 | } 249 | 250 | // The compare-exchange succeeded, so we shall initialize it. 251 | 252 | // We use a guard (Finish) to catch panics caused by builder 253 | let finish = Finish { 254 | status: &self.status, 255 | }; 256 | let val = match f() { 257 | Ok(val) => val, 258 | Err(err) => { 259 | // If an error occurs, clean up everything and leave. 260 | core::mem::forget(finish); 261 | self.status.store(Status::Incomplete, Ordering::Release); 262 | return Err(err); 263 | } 264 | }; 265 | unsafe { 266 | // SAFETY: 267 | // `UnsafeCell`/deref: currently the only accessor, mutably 268 | // and immutably by cas exclusion. 269 | // `write`: pointer comes from `MaybeUninit`. 270 | (*self.data.get()).as_mut_ptr().write(val); 271 | }; 272 | // If there were to be a panic with unwind enabled, the code would 273 | // short-circuit and never reach the point where it writes the inner data. 274 | // The destructor for Finish will run, and poison the Once to ensure that other 275 | // threads accessing it do not exhibit unwanted behavior, if there were to be 276 | // any inconsistency in data structures caused by the panicking thread. 277 | // 278 | // However, f() is expected in the general case not to panic. In that case, we 279 | // simply forget the guard, bypassing its destructor. We could theoretically 280 | // clear a flag instead, but this eliminates the call to the destructor at 281 | // compile time, and unconditionally poisons during an eventual panic, if 282 | // unwinding is enabled. 283 | core::mem::forget(finish); 284 | 285 | // SAFETY: Release is required here, so that all memory accesses done in the 286 | // closure when initializing, become visible to other threads that perform Acquire 287 | // loads. 288 | // 289 | // And, we also know that the changes this thread has done will not magically 290 | // disappear from our cache, so it does not need to be AcqRel. 291 | self.status.store(Status::Complete, Ordering::Release); 292 | 293 | // This next line is mainly an optimization. 294 | return unsafe { Ok(self.force_get()) }; 295 | } 296 | } 297 | 298 | /// Spins until the [`Once`] contains a value. 299 | /// 300 | /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`]. 301 | /// 302 | /// # Panics 303 | /// 304 | /// This function will panic if the [`Once`] previously panicked while attempting 305 | /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s 306 | /// primitives. 307 | pub fn wait(&self) -> &T { 308 | loop { 309 | match self.poll() { 310 | Some(x) => break x, 311 | None => R::relax(), 312 | } 313 | } 314 | } 315 | 316 | /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being 317 | /// initialized. If initialization has not even begun, `None` will be returned. 318 | /// 319 | /// Note that in releases prior to `0.7`, this function was named `wait`. 320 | /// 321 | /// # Panics 322 | /// 323 | /// This function will panic if the [`Once`] previously panicked while attempting 324 | /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s 325 | /// primitives. 326 | pub fn poll(&self) -> Option<&T> { 327 | loop { 328 | // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make 329 | // sure that all memory accessed done while initializing that value, are visible when 330 | // we return a reference to the inner data after this load. 331 | match self.status.load(Ordering::Acquire) { 332 | Status::Incomplete => return None, 333 | Status::Running => R::relax(), // We spin 334 | Status::Complete => return Some(unsafe { self.force_get() }), 335 | Status::Panicked => panic!("Once previously poisoned by a panicked"), 336 | } 337 | } 338 | } 339 | } 340 | 341 | impl Once { 342 | /// Initialization constant of [`Once`]. 343 | #[allow(clippy::declare_interior_mutable_const)] 344 | pub const INIT: Self = Self { 345 | phantom: PhantomData, 346 | status: AtomicStatus::new(Status::Incomplete), 347 | data: UnsafeCell::new(MaybeUninit::uninit()), 348 | }; 349 | 350 | /// Creates a new [`Once`]. 351 | pub const fn new() -> Self { 352 | Self::INIT 353 | } 354 | 355 | /// Creates a new initialized [`Once`]. 356 | pub const fn initialized(data: T) -> Self { 357 | Self { 358 | phantom: PhantomData, 359 | status: AtomicStatus::new(Status::Complete), 360 | data: UnsafeCell::new(MaybeUninit::new(data)), 361 | } 362 | } 363 | 364 | /// Retrieve a pointer to the inner data. 365 | /// 366 | /// While this method itself is safe, accessing the pointer before the [`Once`] has been 367 | /// initialized is UB, unless this method has already been written to from a pointer coming 368 | /// from this method. 369 | pub fn as_mut_ptr(&self) -> *mut T { 370 | // SAFETY: 371 | // * MaybeUninit always has exactly the same layout as T 372 | self.data.get().cast::() 373 | } 374 | 375 | /// Get a reference to the initialized instance. Must only be called once COMPLETE. 376 | unsafe fn force_get(&self) -> &T { 377 | // SAFETY: 378 | // * `UnsafeCell`/inner deref: data never changes again 379 | // * `MaybeUninit`/outer deref: data was initialized 380 | &*(*self.data.get()).as_ptr() 381 | } 382 | 383 | /// Get a reference to the initialized instance. Must only be called once COMPLETE. 384 | unsafe fn force_get_mut(&mut self) -> &mut T { 385 | // SAFETY: 386 | // * `UnsafeCell`/inner deref: data never changes again 387 | // * `MaybeUninit`/outer deref: data was initialized 388 | &mut *(*self.data.get()).as_mut_ptr() 389 | } 390 | 391 | /// Get a reference to the initialized instance. Must only be called once COMPLETE. 392 | unsafe fn force_into_inner(self) -> T { 393 | // SAFETY: 394 | // * `UnsafeCell`/inner deref: data never changes again 395 | // * `MaybeUninit`/outer deref: data was initialized 396 | (*self.data.get()).as_ptr().read() 397 | } 398 | 399 | /// Returns a reference to the inner value if the [`Once`] has been initialized. 400 | pub fn get(&self) -> Option<&T> { 401 | // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the 402 | // nonatomic stores done when initializing, once we have loaded and checked the status. 403 | match self.status.load(Ordering::Acquire) { 404 | Status::Complete => Some(unsafe { self.force_get() }), 405 | _ => None, 406 | } 407 | } 408 | 409 | /// Returns a reference to the inner value on the unchecked assumption that the [`Once`] has been initialized. 410 | /// 411 | /// # Safety 412 | /// 413 | /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized 414 | /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused). 415 | /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically 416 | /// checking initialization is unacceptable and the `Once` has already been initialized. 417 | pub unsafe fn get_unchecked(&self) -> &T { 418 | debug_assert_eq!( 419 | self.status.load(Ordering::SeqCst), 420 | Status::Complete, 421 | "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.", 422 | ); 423 | self.force_get() 424 | } 425 | 426 | /// Returns a mutable reference to the inner value if the [`Once`] has been initialized. 427 | /// 428 | /// Because this method requires a mutable reference to the [`Once`], no synchronization 429 | /// overhead is required to access the inner value. In effect, it is zero-cost. 430 | pub fn get_mut(&mut self) -> Option<&mut T> { 431 | match *self.status.get_mut() { 432 | Status::Complete => Some(unsafe { self.force_get_mut() }), 433 | _ => None, 434 | } 435 | } 436 | 437 | /// Returns a mutable reference to the inner value 438 | /// 439 | /// # Safety 440 | /// 441 | /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized 442 | /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused). 443 | /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically 444 | /// checking initialization is unacceptable and the `Once` has already been initialized. 445 | pub unsafe fn get_mut_unchecked(&mut self) -> &mut T { 446 | debug_assert_eq!( 447 | self.status.load(Ordering::SeqCst), 448 | Status::Complete, 449 | "Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.", 450 | ); 451 | self.force_get_mut() 452 | } 453 | 454 | /// Returns a the inner value if the [`Once`] has been initialized. 455 | /// 456 | /// Because this method requires ownership of the [`Once`], no synchronization overhead 457 | /// is required to access the inner value. In effect, it is zero-cost. 458 | pub fn try_into_inner(mut self) -> Option { 459 | match *self.status.get_mut() { 460 | Status::Complete => Some(unsafe { self.force_into_inner() }), 461 | _ => None, 462 | } 463 | } 464 | 465 | /// Returns a the inner value if the [`Once`] has been initialized. 466 | /// # Safety 467 | /// 468 | /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized 469 | /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused) 470 | /// This can be useful, if `Once` has already been initialized, and you want to bypass an 471 | /// option check. 472 | pub unsafe fn into_inner_unchecked(self) -> T { 473 | debug_assert_eq!( 474 | self.status.load(Ordering::SeqCst), 475 | Status::Complete, 476 | "Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.", 477 | ); 478 | self.force_into_inner() 479 | } 480 | 481 | /// Checks whether the value has been initialized. 482 | /// 483 | /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and 484 | /// therefore it is safe to access the value directly via 485 | /// [`get_unchecked`](Self::get_unchecked) if this returns true. 486 | pub fn is_completed(&self) -> bool { 487 | // TODO: Add a similar variant for Relaxed? 488 | self.status.load(Ordering::Acquire) == Status::Complete 489 | } 490 | } 491 | 492 | impl From for Once { 493 | fn from(data: T) -> Self { 494 | Self::initialized(data) 495 | } 496 | } 497 | 498 | impl Drop for Once { 499 | fn drop(&mut self) { 500 | // No need to do any atomic access here, we have &mut! 501 | if *self.status.get_mut() == Status::Complete { 502 | unsafe { 503 | //TODO: Use MaybeUninit::assume_init_drop once stabilised 504 | core::ptr::drop_in_place((*self.data.get()).as_mut_ptr()); 505 | } 506 | } 507 | } 508 | } 509 | 510 | struct Finish<'a> { 511 | status: &'a AtomicStatus, 512 | } 513 | 514 | impl<'a> Drop for Finish<'a> { 515 | fn drop(&mut self) { 516 | // While using Relaxed here would most likely not be an issue, we use SeqCst anyway. 517 | // This is mainly because panics are not meant to be fast at all, but also because if 518 | // there were to be a compiler bug which reorders accesses within the same thread, 519 | // where it should not, we want to be sure that the panic really is handled, and does 520 | // not cause additional problems. SeqCst will therefore help guarding against such 521 | // bugs. 522 | self.status.store(Status::Panicked, Ordering::SeqCst); 523 | } 524 | } 525 | 526 | #[cfg(test)] 527 | mod tests { 528 | use std::prelude::v1::*; 529 | 530 | use std::sync::atomic::AtomicU32; 531 | use std::sync::mpsc::channel; 532 | use std::sync::Arc; 533 | use std::thread; 534 | 535 | use super::*; 536 | 537 | #[test] 538 | fn smoke_once() { 539 | static O: Once = Once::new(); 540 | let mut a = 0; 541 | O.call_once(|| a += 1); 542 | assert_eq!(a, 1); 543 | O.call_once(|| a += 1); 544 | assert_eq!(a, 1); 545 | } 546 | 547 | #[test] 548 | fn smoke_once_value() { 549 | static O: Once = Once::new(); 550 | let a = O.call_once(|| 1); 551 | assert_eq!(*a, 1); 552 | let b = O.call_once(|| 2); 553 | assert_eq!(*b, 1); 554 | } 555 | 556 | #[test] 557 | fn stampede_once() { 558 | static O: Once = Once::new(); 559 | static mut RUN: bool = false; 560 | 561 | let (tx, rx) = channel(); 562 | let mut ts = Vec::new(); 563 | for _ in 0..10 { 564 | let tx = tx.clone(); 565 | ts.push(thread::spawn(move || { 566 | for _ in 0..4 { 567 | thread::yield_now() 568 | } 569 | unsafe { 570 | O.call_once(|| { 571 | assert!(!RUN); 572 | RUN = true; 573 | }); 574 | assert!(RUN); 575 | } 576 | tx.send(()).unwrap(); 577 | })); 578 | } 579 | 580 | unsafe { 581 | O.call_once(|| { 582 | assert!(!RUN); 583 | RUN = true; 584 | }); 585 | assert!(RUN); 586 | } 587 | 588 | for _ in 0..10 { 589 | rx.recv().unwrap(); 590 | } 591 | 592 | for t in ts { 593 | t.join().unwrap(); 594 | } 595 | } 596 | 597 | #[test] 598 | fn get() { 599 | static INIT: Once = Once::new(); 600 | 601 | assert!(INIT.get().is_none()); 602 | INIT.call_once(|| 2); 603 | assert_eq!(INIT.get().map(|r| *r), Some(2)); 604 | } 605 | 606 | #[test] 607 | fn get_no_wait() { 608 | static INIT: Once = Once::new(); 609 | 610 | assert!(INIT.get().is_none()); 611 | let t = thread::spawn(move || { 612 | INIT.call_once(|| { 613 | thread::sleep(std::time::Duration::from_secs(3)); 614 | 42 615 | }); 616 | }); 617 | assert!(INIT.get().is_none()); 618 | 619 | t.join().unwrap(); 620 | } 621 | 622 | #[test] 623 | fn poll() { 624 | static INIT: Once = Once::new(); 625 | 626 | assert!(INIT.poll().is_none()); 627 | INIT.call_once(|| 3); 628 | assert_eq!(INIT.poll().map(|r| *r), Some(3)); 629 | } 630 | 631 | #[test] 632 | fn wait() { 633 | static INIT: Once = Once::new(); 634 | 635 | let t = std::thread::spawn(|| { 636 | assert_eq!(*INIT.wait(), 3); 637 | assert!(INIT.is_completed()); 638 | }); 639 | 640 | for _ in 0..4 { 641 | thread::yield_now() 642 | } 643 | 644 | assert!(INIT.poll().is_none()); 645 | INIT.call_once(|| 3); 646 | 647 | t.join().unwrap(); 648 | } 649 | 650 | #[test] 651 | fn panic() { 652 | use std::panic; 653 | 654 | static INIT: Once = Once::new(); 655 | 656 | // poison the once 657 | let t = panic::catch_unwind(|| { 658 | INIT.call_once(|| panic!()); 659 | }); 660 | assert!(t.is_err()); 661 | 662 | // poisoning propagates 663 | let t = panic::catch_unwind(|| { 664 | INIT.call_once(|| {}); 665 | }); 666 | assert!(t.is_err()); 667 | } 668 | 669 | #[test] 670 | fn init_constant() { 671 | static O: Once = Once::INIT; 672 | let mut a = 0; 673 | O.call_once(|| a += 1); 674 | assert_eq!(a, 1); 675 | O.call_once(|| a += 1); 676 | assert_eq!(a, 1); 677 | } 678 | 679 | static mut CALLED: bool = false; 680 | 681 | struct DropTest {} 682 | 683 | impl Drop for DropTest { 684 | fn drop(&mut self) { 685 | unsafe { 686 | CALLED = true; 687 | } 688 | } 689 | } 690 | 691 | #[test] 692 | fn try_call_once_err() { 693 | let once = Once::<_, Spin>::new(); 694 | let shared = Arc::new((once, AtomicU32::new(0))); 695 | 696 | let (tx, rx) = channel(); 697 | 698 | let t0 = { 699 | let shared = shared.clone(); 700 | thread::spawn(move || { 701 | let (once, called) = &*shared; 702 | 703 | once.try_call_once(|| { 704 | called.fetch_add(1, Ordering::AcqRel); 705 | tx.send(()).unwrap(); 706 | thread::sleep(std::time::Duration::from_millis(50)); 707 | Err(()) 708 | }) 709 | .ok(); 710 | }) 711 | }; 712 | 713 | let t1 = { 714 | let shared = shared.clone(); 715 | thread::spawn(move || { 716 | rx.recv().unwrap(); 717 | let (once, called) = &*shared; 718 | assert_eq!( 719 | called.load(Ordering::Acquire), 720 | 1, 721 | "leader thread did not run first" 722 | ); 723 | 724 | once.call_once(|| { 725 | called.fetch_add(1, Ordering::AcqRel); 726 | }); 727 | }) 728 | }; 729 | 730 | t0.join().unwrap(); 731 | t1.join().unwrap(); 732 | 733 | assert_eq!(shared.1.load(Ordering::Acquire), 2); 734 | } 735 | 736 | // This is sort of two test cases, but if we write them as separate test methods 737 | // they can be executed concurrently and then fail some small fraction of the 738 | // time. 739 | #[test] 740 | fn drop_occurs_and_skip_uninit_drop() { 741 | unsafe { 742 | CALLED = false; 743 | } 744 | 745 | { 746 | let once = Once::<_>::new(); 747 | once.call_once(|| DropTest {}); 748 | } 749 | 750 | assert!(unsafe { CALLED }); 751 | // Now test that we skip drops for the uninitialized case. 752 | unsafe { 753 | CALLED = false; 754 | } 755 | 756 | let once = Once::::new(); 757 | drop(once); 758 | 759 | assert!(unsafe { !CALLED }); 760 | } 761 | 762 | #[test] 763 | fn call_once_test() { 764 | for _ in 0..20 { 765 | use std::sync::atomic::AtomicUsize; 766 | use std::sync::Arc; 767 | use std::time::Duration; 768 | let share = Arc::new(AtomicUsize::new(0)); 769 | let once = Arc::new(Once::<_, Spin>::new()); 770 | let mut hs = Vec::new(); 771 | for _ in 0..8 { 772 | let h = thread::spawn({ 773 | let share = share.clone(); 774 | let once = once.clone(); 775 | move || { 776 | thread::sleep(Duration::from_millis(10)); 777 | once.call_once(|| { 778 | share.fetch_add(1, Ordering::SeqCst); 779 | }); 780 | } 781 | }); 782 | hs.push(h); 783 | } 784 | for h in hs { 785 | h.join().unwrap(); 786 | } 787 | assert_eq!(1, share.load(Ordering::SeqCst)); 788 | } 789 | } 790 | } 791 | -------------------------------------------------------------------------------- /src/relax.rs: -------------------------------------------------------------------------------- 1 | //! Strategies that determine the behaviour of locks when encountering contention. 2 | 3 | /// A trait implemented by spinning relax strategies. 4 | pub trait RelaxStrategy { 5 | /// Perform the relaxing operation during a period of contention. 6 | fn relax(); 7 | } 8 | 9 | /// A strategy that rapidly spins while informing the CPU that it should power down non-essential components via 10 | /// [`core::hint::spin_loop`]. 11 | /// 12 | /// Note that spinning is a 'dumb' strategy and most schedulers cannot correctly differentiate it from useful work, 13 | /// thereby misallocating even more CPU time to the spinning process. This is known as 14 | /// ['priority inversion'](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html). 15 | /// 16 | /// If you see signs that priority inversion is occurring, consider switching to [`Yield`] or, even better, not using a 17 | /// spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating 18 | /// systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just 19 | /// because priority inversion isn't occurring in your tests does not mean that it will not occur. Use a scheduler- 20 | /// aware lock if at all possible. 21 | pub struct Spin; 22 | 23 | impl RelaxStrategy for Spin { 24 | #[inline(always)] 25 | fn relax() { 26 | // Use the deprecated spin_loop_hint() to ensure that we don't get 27 | // a higher MSRV than we need to. 28 | #[allow(deprecated)] 29 | core::sync::atomic::spin_loop_hint(); 30 | } 31 | } 32 | 33 | /// A strategy that yields the current time slice to the scheduler in favour of other threads or processes. 34 | /// 35 | /// This is generally used as a strategy for minimising power consumption and priority inversion on targets that have a 36 | /// standard library available. Note that such targets have scheduler-integrated concurrency primitives available, and 37 | /// you should generally use these instead, except in rare circumstances. 38 | #[cfg(feature = "std")] 39 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 40 | pub struct Yield; 41 | 42 | #[cfg(feature = "std")] 43 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 44 | impl RelaxStrategy for Yield { 45 | #[inline(always)] 46 | fn relax() { 47 | std::thread::yield_now(); 48 | } 49 | } 50 | 51 | /// A strategy that rapidly spins, without telling the CPU to do any powering down. 52 | /// 53 | /// You almost certainly do not want to use this. Use [`Spin`] instead. It exists for completeness and for targets 54 | /// that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for 55 | /// them (i.e: this is a workaround for possible compiler bugs). 56 | pub struct Loop; 57 | 58 | impl RelaxStrategy for Loop { 59 | #[inline(always)] 60 | fn relax() {} 61 | } 62 | -------------------------------------------------------------------------------- /src/rwlock.rs: -------------------------------------------------------------------------------- 1 | //! A lock that provides data access to either one writer or many readers. 2 | 3 | use crate::{ 4 | atomic::{AtomicUsize, Ordering}, 5 | RelaxStrategy, Spin, 6 | }; 7 | use core::{ 8 | cell::UnsafeCell, 9 | fmt, 10 | marker::PhantomData, 11 | mem, 12 | mem::ManuallyDrop, 13 | ops::{Deref, DerefMut}, 14 | }; 15 | 16 | /// A lock that provides data access to either one writer or many readers. 17 | /// 18 | /// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses 19 | /// spinning for synchronisation instead. Unlike its namespace, this lock does not 20 | /// track lock poisoning. 21 | /// 22 | /// This type of lock allows a number of readers or at most one writer at any 23 | /// point in time. The write portion of this lock typically allows modification 24 | /// of the underlying data (exclusive access) and the read portion of this lock 25 | /// typically allows for read-only access (shared access). 26 | /// 27 | /// The type parameter `T` represents the data that this lock protects. It is 28 | /// required that `T` satisfies `Send` to be shared across tasks and `Sync` to 29 | /// allow concurrent access through readers. The RAII guards returned from the 30 | /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) 31 | /// to allow access to the contained of the lock. 32 | /// 33 | /// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a 34 | /// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) 35 | /// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions. 36 | /// Writable or upgradeable guards can be downgraded through their respective `downgrade` 37 | /// functions. 38 | /// 39 | /// Based on Facebook's 40 | /// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h). 41 | /// This implementation is unfair to writers - if the lock always has readers, then no writers will 42 | /// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no 43 | /// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken 44 | /// when there are existing readers. However if the lock is that highly contended and writes are 45 | /// crucial then this implementation may be a poor choice. 46 | /// 47 | /// # Examples 48 | /// 49 | /// ``` 50 | /// use spin; 51 | /// 52 | /// let lock = spin::RwLock::new(5); 53 | /// 54 | /// // many reader locks can be held at once 55 | /// { 56 | /// let r1 = lock.read(); 57 | /// let r2 = lock.read(); 58 | /// assert_eq!(*r1, 5); 59 | /// assert_eq!(*r2, 5); 60 | /// } // read locks are dropped at this point 61 | /// 62 | /// // only one write lock may be held, however 63 | /// { 64 | /// let mut w = lock.write(); 65 | /// *w += 1; 66 | /// assert_eq!(*w, 6); 67 | /// } // write lock is dropped here 68 | /// ``` 69 | pub struct RwLock { 70 | phantom: PhantomData, 71 | lock: AtomicUsize, 72 | data: UnsafeCell, 73 | } 74 | 75 | const READER: usize = 1 << 2; 76 | const UPGRADED: usize = 1 << 1; 77 | const WRITER: usize = 1; 78 | 79 | /// A guard that provides immutable data access. 80 | /// 81 | /// When the guard falls out of scope it will decrement the read count, 82 | /// potentially releasing the lock. 83 | pub struct RwLockReadGuard<'a, T: 'a + ?Sized> { 84 | lock: &'a AtomicUsize, 85 | data: *const T, 86 | } 87 | 88 | /// A guard that provides mutable data access. 89 | /// 90 | /// When the guard falls out of scope it will release the lock. 91 | pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> { 92 | phantom: PhantomData, 93 | inner: &'a RwLock, 94 | data: *mut T, 95 | } 96 | 97 | /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. 98 | /// 99 | /// No writers or other upgradeable guards can exist while this is in scope. New reader 100 | /// creation is prevented (to alleviate writer starvation) but there may be existing readers 101 | /// when the lock is acquired. 102 | /// 103 | /// When the guard falls out of scope it will release the lock. 104 | pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> { 105 | phantom: PhantomData, 106 | inner: &'a RwLock, 107 | data: *const T, 108 | } 109 | 110 | // Same unsafe impls as `std::sync::RwLock` 111 | unsafe impl Send for RwLock {} 112 | unsafe impl Sync for RwLock {} 113 | 114 | unsafe impl Send for RwLockWriteGuard<'_, T, R> {} 115 | unsafe impl Sync for RwLockWriteGuard<'_, T, R> {} 116 | 117 | unsafe impl Send for RwLockReadGuard<'_, T> {} 118 | unsafe impl Sync for RwLockReadGuard<'_, T> {} 119 | 120 | unsafe impl Send for RwLockUpgradableGuard<'_, T, R> {} 121 | unsafe impl Sync for RwLockUpgradableGuard<'_, T, R> {} 122 | 123 | impl RwLock { 124 | /// Creates a new spinlock wrapping the supplied data. 125 | /// 126 | /// May be used statically: 127 | /// 128 | /// ``` 129 | /// use spin; 130 | /// 131 | /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(()); 132 | /// 133 | /// fn demo() { 134 | /// let lock = RW_LOCK.read(); 135 | /// // do something with lock 136 | /// drop(lock); 137 | /// } 138 | /// ``` 139 | #[inline] 140 | pub const fn new(data: T) -> Self { 141 | RwLock { 142 | phantom: PhantomData, 143 | lock: AtomicUsize::new(0), 144 | data: UnsafeCell::new(data), 145 | } 146 | } 147 | 148 | /// Consumes this `RwLock`, returning the underlying data. 149 | #[inline] 150 | pub fn into_inner(self) -> T { 151 | // We know statically that there are no outstanding references to 152 | // `self` so there's no need to lock. 153 | let RwLock { data, .. } = self; 154 | data.into_inner() 155 | } 156 | /// Returns a mutable pointer to the underying data. 157 | /// 158 | /// This is mostly meant to be used for applications which require manual unlocking, but where 159 | /// storing both the lock and the pointer to the inner data gets inefficient. 160 | /// 161 | /// While this is safe, writing to the data is undefined behavior unless the current thread has 162 | /// acquired a write lock, and reading requires either a read or write lock. 163 | /// 164 | /// # Example 165 | /// ``` 166 | /// let lock = spin::RwLock::new(42); 167 | /// 168 | /// unsafe { 169 | /// core::mem::forget(lock.write()); 170 | /// 171 | /// assert_eq!(lock.as_mut_ptr().read(), 42); 172 | /// lock.as_mut_ptr().write(58); 173 | /// 174 | /// lock.force_write_unlock(); 175 | /// } 176 | /// 177 | /// assert_eq!(*lock.read(), 58); 178 | /// 179 | /// ``` 180 | #[inline(always)] 181 | pub fn as_mut_ptr(&self) -> *mut T { 182 | self.data.get() 183 | } 184 | } 185 | 186 | impl RwLock { 187 | /// Locks this rwlock with shared read access, blocking the current thread 188 | /// until it can be acquired. 189 | /// 190 | /// The calling thread will be blocked until there are no more writers which 191 | /// hold the lock. There may be other readers currently inside the lock when 192 | /// this method returns. This method does not provide any guarantees with 193 | /// respect to the ordering of whether contentious readers or writers will 194 | /// acquire the lock first. 195 | /// 196 | /// Returns an RAII guard which will release this thread's shared access 197 | /// once it is dropped. 198 | /// 199 | /// ``` 200 | /// let mylock = spin::RwLock::new(0); 201 | /// { 202 | /// let mut data = mylock.read(); 203 | /// // The lock is now locked and the data can be read 204 | /// println!("{}", *data); 205 | /// // The lock is dropped 206 | /// } 207 | /// ``` 208 | #[inline] 209 | pub fn read(&self) -> RwLockReadGuard { 210 | loop { 211 | match self.try_read() { 212 | Some(guard) => return guard, 213 | None => R::relax(), 214 | } 215 | } 216 | } 217 | 218 | /// Lock this rwlock with exclusive write access, blocking the current 219 | /// thread until it can be acquired. 220 | /// 221 | /// This function will not return while other writers or other readers 222 | /// currently have access to the lock. 223 | /// 224 | /// Returns an RAII guard which will drop the write access of this rwlock 225 | /// when dropped. 226 | /// 227 | /// ``` 228 | /// let mylock = spin::RwLock::new(0); 229 | /// { 230 | /// let mut data = mylock.write(); 231 | /// // The lock is now locked and the data can be written 232 | /// *data += 1; 233 | /// // The lock is dropped 234 | /// } 235 | /// ``` 236 | #[inline] 237 | pub fn write(&self) -> RwLockWriteGuard { 238 | loop { 239 | match self.try_write_internal(false) { 240 | Some(guard) => return guard, 241 | None => R::relax(), 242 | } 243 | } 244 | } 245 | 246 | /// Obtain a readable lock guard that can later be upgraded to a writable lock guard. 247 | /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method. 248 | #[inline] 249 | pub fn upgradeable_read(&self) -> RwLockUpgradableGuard { 250 | loop { 251 | match self.try_upgradeable_read() { 252 | Some(guard) => return guard, 253 | None => R::relax(), 254 | } 255 | } 256 | } 257 | } 258 | 259 | impl RwLock { 260 | // Acquire a read lock, returning the new lock value. 261 | fn acquire_reader(&self) -> usize { 262 | // An arbitrary cap that allows us to catch overflows long before they happen 263 | const MAX_READERS: usize = core::usize::MAX / READER / 2; 264 | 265 | let value = self.lock.fetch_add(READER, Ordering::Acquire); 266 | 267 | if value > MAX_READERS * READER { 268 | self.lock.fetch_sub(READER, Ordering::Relaxed); 269 | panic!("Too many lock readers, cannot safely proceed"); 270 | } else { 271 | value 272 | } 273 | } 274 | 275 | /// Attempt to acquire this lock with shared read access. 276 | /// 277 | /// This function will never block and will return immediately if `read` 278 | /// would otherwise succeed. Returns `Some` of an RAII guard which will 279 | /// release the shared access of this thread when dropped, or `None` if the 280 | /// access could not be granted. This method does not provide any 281 | /// guarantees with respect to the ordering of whether contentious readers 282 | /// or writers will acquire the lock first. 283 | /// 284 | /// ``` 285 | /// let mylock = spin::RwLock::new(0); 286 | /// { 287 | /// match mylock.try_read() { 288 | /// Some(data) => { 289 | /// // The lock is now locked and the data can be read 290 | /// println!("{}", *data); 291 | /// // The lock is dropped 292 | /// }, 293 | /// None => (), // no cigar 294 | /// }; 295 | /// } 296 | /// ``` 297 | #[inline] 298 | pub fn try_read(&self) -> Option> { 299 | let value = self.acquire_reader(); 300 | 301 | // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held. 302 | // This helps reduce writer starvation. 303 | if value & (WRITER | UPGRADED) != 0 { 304 | // Lock is taken, undo. 305 | self.lock.fetch_sub(READER, Ordering::Release); 306 | None 307 | } else { 308 | Some(RwLockReadGuard { 309 | lock: &self.lock, 310 | data: unsafe { &*self.data.get() }, 311 | }) 312 | } 313 | } 314 | 315 | /// Return the number of readers that currently hold the lock (including upgradable readers). 316 | /// 317 | /// # Safety 318 | /// 319 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 320 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 321 | pub fn reader_count(&self) -> usize { 322 | let state = self.lock.load(Ordering::Relaxed); 323 | state / READER + (state & UPGRADED) / UPGRADED 324 | } 325 | 326 | /// Return the number of writers that currently hold the lock. 327 | /// 328 | /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`. 329 | /// 330 | /// # Safety 331 | /// 332 | /// This function provides no synchronization guarantees and so its result should be considered 'out of date' 333 | /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic. 334 | pub fn writer_count(&self) -> usize { 335 | (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER 336 | } 337 | 338 | /// Force decrement the reader count. 339 | /// 340 | /// # Safety 341 | /// 342 | /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s 343 | /// live, or if called more times than `read` has been called, but can be 344 | /// useful in FFI contexts where the caller doesn't know how to deal with 345 | /// RAII. The underlying atomic operation uses `Ordering::Release`. 346 | #[inline] 347 | pub unsafe fn force_read_decrement(&self) { 348 | debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0); 349 | self.lock.fetch_sub(READER, Ordering::Release); 350 | } 351 | 352 | /// Force unlock exclusive write access. 353 | /// 354 | /// # Safety 355 | /// 356 | /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s 357 | /// live, or if called when there are current readers, but can be useful in 358 | /// FFI contexts where the caller doesn't know how to deal with RAII. The 359 | /// underlying atomic operation uses `Ordering::Release`. 360 | #[inline] 361 | pub unsafe fn force_write_unlock(&self) { 362 | debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0); 363 | self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release); 364 | } 365 | 366 | #[inline(always)] 367 | fn try_write_internal(&self, strong: bool) -> Option> { 368 | if compare_exchange( 369 | &self.lock, 370 | 0, 371 | WRITER, 372 | Ordering::Acquire, 373 | Ordering::Relaxed, 374 | strong, 375 | ) 376 | .is_ok() 377 | { 378 | Some(RwLockWriteGuard { 379 | phantom: PhantomData, 380 | inner: self, 381 | data: unsafe { &mut *self.data.get() }, 382 | }) 383 | } else { 384 | None 385 | } 386 | } 387 | 388 | /// Attempt to lock this rwlock with exclusive write access. 389 | /// 390 | /// This function does not ever block, and it will return `None` if a call 391 | /// to `write` would otherwise block. If successful, an RAII guard is 392 | /// returned. 393 | /// 394 | /// ``` 395 | /// let mylock = spin::RwLock::new(0); 396 | /// { 397 | /// match mylock.try_write() { 398 | /// Some(mut data) => { 399 | /// // The lock is now locked and the data can be written 400 | /// *data += 1; 401 | /// // The lock is implicitly dropped 402 | /// }, 403 | /// None => (), // no cigar 404 | /// }; 405 | /// } 406 | /// ``` 407 | #[inline] 408 | pub fn try_write(&self) -> Option> { 409 | self.try_write_internal(true) 410 | } 411 | 412 | /// Attempt to lock this rwlock with exclusive write access. 413 | /// 414 | /// Unlike [`RwLock::try_write`], this function is allowed to spuriously fail even when acquiring exclusive write access 415 | /// would otherwise succeed, which can result in more efficient code on some platforms. 416 | #[inline] 417 | pub fn try_write_weak(&self) -> Option> { 418 | self.try_write_internal(false) 419 | } 420 | 421 | /// Tries to obtain an upgradeable lock guard. 422 | #[inline] 423 | pub fn try_upgradeable_read(&self) -> Option> { 424 | if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 { 425 | Some(RwLockUpgradableGuard { 426 | phantom: PhantomData, 427 | inner: self, 428 | data: unsafe { &*self.data.get() }, 429 | }) 430 | } else { 431 | // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock. 432 | // When they unlock, they will clear the bit. 433 | None 434 | } 435 | } 436 | 437 | /// Returns a mutable reference to the underlying data. 438 | /// 439 | /// Since this call borrows the `RwLock` mutably, no actual locking needs to 440 | /// take place -- the mutable borrow statically guarantees no locks exist. 441 | /// 442 | /// # Examples 443 | /// 444 | /// ``` 445 | /// let mut lock = spin::RwLock::new(0); 446 | /// *lock.get_mut() = 10; 447 | /// assert_eq!(*lock.read(), 10); 448 | /// ``` 449 | pub fn get_mut(&mut self) -> &mut T { 450 | // We know statically that there are no other references to `self`, so 451 | // there's no need to lock the inner lock. 452 | unsafe { &mut *self.data.get() } 453 | } 454 | } 455 | 456 | impl fmt::Debug for RwLock { 457 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 458 | match self.try_read() { 459 | Some(guard) => write!(f, "RwLock {{ data: ") 460 | .and_then(|()| (&*guard).fmt(f)) 461 | .and_then(|()| write!(f, " }}")), 462 | None => write!(f, "RwLock {{ }}"), 463 | } 464 | } 465 | } 466 | 467 | impl Default for RwLock { 468 | fn default() -> Self { 469 | Self::new(Default::default()) 470 | } 471 | } 472 | 473 | impl From for RwLock { 474 | fn from(data: T) -> Self { 475 | Self::new(data) 476 | } 477 | } 478 | 479 | impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { 480 | /// Leak the lock guard, yielding a reference to the underlying data. 481 | /// 482 | /// Note that this function will permanently lock the original lock for all but reading locks. 483 | /// 484 | /// ``` 485 | /// let mylock = spin::RwLock::new(0); 486 | /// 487 | /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read()); 488 | /// 489 | /// assert_eq!(*data, 0); 490 | /// ``` 491 | #[inline] 492 | pub fn leak(this: Self) -> &'rwlock T { 493 | let this = ManuallyDrop::new(this); 494 | // Safety: We know statically that only we are referencing data 495 | unsafe { &*this.data } 496 | } 497 | } 498 | 499 | impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> { 500 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 501 | fmt::Debug::fmt(&**self, f) 502 | } 503 | } 504 | 505 | impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> { 506 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 507 | fmt::Display::fmt(&**self, f) 508 | } 509 | } 510 | 511 | impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> { 512 | /// Upgrades an upgradeable lock guard to a writable lock guard. 513 | /// 514 | /// ``` 515 | /// let mylock = spin::RwLock::new(0); 516 | /// 517 | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable 518 | /// let writable = upgradeable.upgrade(); 519 | /// ``` 520 | #[inline] 521 | pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> { 522 | loop { 523 | self = match self.try_upgrade_internal(false) { 524 | Ok(guard) => return guard, 525 | Err(e) => e, 526 | }; 527 | 528 | R::relax(); 529 | } 530 | } 531 | } 532 | 533 | impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { 534 | #[inline(always)] 535 | fn try_upgrade_internal(self, strong: bool) -> Result, Self> { 536 | if compare_exchange( 537 | &self.inner.lock, 538 | UPGRADED, 539 | WRITER, 540 | Ordering::Acquire, 541 | Ordering::Relaxed, 542 | strong, 543 | ) 544 | .is_ok() 545 | { 546 | let inner = self.inner; 547 | 548 | // Forget the old guard so its destructor doesn't run (before mutably aliasing data below) 549 | mem::forget(self); 550 | 551 | // Upgrade successful 552 | Ok(RwLockWriteGuard { 553 | phantom: PhantomData, 554 | inner, 555 | data: unsafe { &mut *inner.data.get() }, 556 | }) 557 | } else { 558 | Err(self) 559 | } 560 | } 561 | 562 | /// Tries to upgrade an upgradeable lock guard to a writable lock guard. 563 | /// 564 | /// ``` 565 | /// let mylock = spin::RwLock::new(0); 566 | /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable 567 | /// 568 | /// match upgradeable.try_upgrade() { 569 | /// Ok(writable) => /* upgrade successful - use writable lock guard */ (), 570 | /// Err(upgradeable) => /* upgrade unsuccessful */ (), 571 | /// }; 572 | /// ``` 573 | #[inline] 574 | pub fn try_upgrade(self) -> Result, Self> { 575 | self.try_upgrade_internal(true) 576 | } 577 | 578 | /// Tries to upgrade an upgradeable lock guard to a writable lock guard. 579 | /// 580 | /// Unlike [`RwLockUpgradableGuard::try_upgrade`], this function is allowed to spuriously fail even when upgrading 581 | /// would otherwise succeed, which can result in more efficient code on some platforms. 582 | #[inline] 583 | pub fn try_upgrade_weak(self) -> Result, Self> { 584 | self.try_upgrade_internal(false) 585 | } 586 | 587 | #[inline] 588 | /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. 589 | /// 590 | /// ``` 591 | /// let mylock = spin::RwLock::new(1); 592 | /// 593 | /// let upgradeable = mylock.upgradeable_read(); 594 | /// assert!(mylock.try_read().is_none()); 595 | /// assert_eq!(*upgradeable, 1); 596 | /// 597 | /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin 598 | /// assert!(mylock.try_read().is_some()); 599 | /// assert_eq!(*readable, 1); 600 | /// ``` 601 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { 602 | // Reserve the read guard for ourselves 603 | self.inner.acquire_reader(); 604 | 605 | let inner = self.inner; 606 | 607 | // Dropping self removes the UPGRADED bit 608 | mem::drop(self); 609 | 610 | RwLockReadGuard { 611 | lock: &inner.lock, 612 | data: unsafe { &*inner.data.get() }, 613 | } 614 | } 615 | 616 | /// Leak the lock guard, yielding a reference to the underlying data. 617 | /// 618 | /// Note that this function will permanently lock the original lock. 619 | /// 620 | /// ``` 621 | /// let mylock = spin::RwLock::new(0); 622 | /// 623 | /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read()); 624 | /// 625 | /// assert_eq!(*data, 0); 626 | /// ``` 627 | #[inline] 628 | pub fn leak(this: Self) -> &'rwlock T { 629 | let this = ManuallyDrop::new(this); 630 | // Safety: We know statically that only we are referencing data 631 | unsafe { &*this.data } 632 | } 633 | } 634 | 635 | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> { 636 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 637 | fmt::Debug::fmt(&**self, f) 638 | } 639 | } 640 | 641 | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> { 642 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 643 | fmt::Display::fmt(&**self, f) 644 | } 645 | } 646 | 647 | impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { 648 | /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. 649 | /// 650 | /// ``` 651 | /// let mylock = spin::RwLock::new(0); 652 | /// 653 | /// let mut writable = mylock.write(); 654 | /// *writable = 1; 655 | /// 656 | /// let readable = writable.downgrade(); // This is guaranteed not to spin 657 | /// # let readable_2 = mylock.try_read().unwrap(); 658 | /// assert_eq!(*readable, 1); 659 | /// ``` 660 | #[inline] 661 | pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> { 662 | // Reserve the read guard for ourselves 663 | self.inner.acquire_reader(); 664 | 665 | let inner = self.inner; 666 | 667 | // Dropping self removes the UPGRADED bit 668 | mem::drop(self); 669 | 670 | RwLockReadGuard { 671 | lock: &inner.lock, 672 | data: unsafe { &*inner.data.get() }, 673 | } 674 | } 675 | 676 | /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin. 677 | /// 678 | /// ``` 679 | /// let mylock = spin::RwLock::new(0); 680 | /// 681 | /// let mut writable = mylock.write(); 682 | /// *writable = 1; 683 | /// 684 | /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin 685 | /// assert_eq!(*readable, 1); 686 | /// ``` 687 | #[inline] 688 | pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> { 689 | debug_assert_eq!( 690 | self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), 691 | WRITER 692 | ); 693 | 694 | // Reserve the read guard for ourselves 695 | self.inner.lock.store(UPGRADED, Ordering::Release); 696 | 697 | let inner = self.inner; 698 | 699 | // Dropping self removes the UPGRADED bit 700 | mem::forget(self); 701 | 702 | RwLockUpgradableGuard { 703 | phantom: PhantomData, 704 | inner, 705 | data: unsafe { &*inner.data.get() }, 706 | } 707 | } 708 | 709 | /// Leak the lock guard, yielding a mutable reference to the underlying data. 710 | /// 711 | /// Note that this function will permanently lock the original lock. 712 | /// 713 | /// ``` 714 | /// let mylock = spin::RwLock::new(0); 715 | /// 716 | /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write()); 717 | /// 718 | /// *data = 1; 719 | /// assert_eq!(*data, 1); 720 | /// ``` 721 | #[inline] 722 | pub fn leak(this: Self) -> &'rwlock mut T { 723 | let mut this = ManuallyDrop::new(this); 724 | // Safety: We know statically that only we are referencing data 725 | unsafe { &mut *this.data } 726 | } 727 | } 728 | 729 | impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> { 730 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 731 | fmt::Debug::fmt(&**self, f) 732 | } 733 | } 734 | 735 | impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> { 736 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 737 | fmt::Display::fmt(&**self, f) 738 | } 739 | } 740 | 741 | impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { 742 | type Target = T; 743 | 744 | fn deref(&self) -> &T { 745 | // Safety: We know statically that only we are referencing data 746 | unsafe { &*self.data } 747 | } 748 | } 749 | 750 | impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> { 751 | type Target = T; 752 | 753 | fn deref(&self) -> &T { 754 | // Safety: We know statically that only we are referencing data 755 | unsafe { &*self.data } 756 | } 757 | } 758 | 759 | impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> { 760 | type Target = T; 761 | 762 | fn deref(&self) -> &T { 763 | // Safety: We know statically that only we are referencing data 764 | unsafe { &*self.data } 765 | } 766 | } 767 | 768 | impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> { 769 | fn deref_mut(&mut self) -> &mut T { 770 | // Safety: We know statically that only we are referencing data 771 | unsafe { &mut *self.data } 772 | } 773 | } 774 | 775 | impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> { 776 | fn drop(&mut self) { 777 | debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0); 778 | self.lock.fetch_sub(READER, Ordering::Release); 779 | } 780 | } 781 | 782 | impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> { 783 | fn drop(&mut self) { 784 | debug_assert_eq!( 785 | self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED), 786 | UPGRADED 787 | ); 788 | self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel); 789 | } 790 | } 791 | 792 | impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> { 793 | fn drop(&mut self) { 794 | debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER); 795 | 796 | // Writer is responsible for clearing both WRITER and UPGRADED bits. 797 | // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held. 798 | self.inner 799 | .lock 800 | .fetch_and(!(WRITER | UPGRADED), Ordering::Release); 801 | } 802 | } 803 | 804 | #[inline(always)] 805 | fn compare_exchange( 806 | atomic: &AtomicUsize, 807 | current: usize, 808 | new: usize, 809 | success: Ordering, 810 | failure: Ordering, 811 | strong: bool, 812 | ) -> Result { 813 | if strong { 814 | atomic.compare_exchange(current, new, success, failure) 815 | } else { 816 | atomic.compare_exchange_weak(current, new, success, failure) 817 | } 818 | } 819 | 820 | #[cfg(feature = "lock_api")] 821 | unsafe impl lock_api_crate::RawRwLock for RwLock<(), R> { 822 | type GuardMarker = lock_api_crate::GuardSend; 823 | 824 | const INIT: Self = Self::new(()); 825 | 826 | #[inline(always)] 827 | fn lock_exclusive(&self) { 828 | // Prevent guard destructor running 829 | core::mem::forget(self.write()); 830 | } 831 | 832 | #[inline(always)] 833 | fn try_lock_exclusive(&self) -> bool { 834 | // Prevent guard destructor running 835 | self.try_write().map(|g| core::mem::forget(g)).is_some() 836 | } 837 | 838 | #[inline(always)] 839 | unsafe fn unlock_exclusive(&self) { 840 | drop(RwLockWriteGuard { 841 | inner: self, 842 | data: &mut (), 843 | phantom: PhantomData, 844 | }); 845 | } 846 | 847 | #[inline(always)] 848 | fn lock_shared(&self) { 849 | // Prevent guard destructor running 850 | core::mem::forget(self.read()); 851 | } 852 | 853 | #[inline(always)] 854 | fn try_lock_shared(&self) -> bool { 855 | // Prevent guard destructor running 856 | self.try_read().map(|g| core::mem::forget(g)).is_some() 857 | } 858 | 859 | #[inline(always)] 860 | unsafe fn unlock_shared(&self) { 861 | drop(RwLockReadGuard { 862 | lock: &self.lock, 863 | data: &(), 864 | }); 865 | } 866 | 867 | #[inline(always)] 868 | fn is_locked(&self) -> bool { 869 | self.lock.load(Ordering::Relaxed) != 0 870 | } 871 | } 872 | 873 | #[cfg(feature = "lock_api")] 874 | unsafe impl lock_api_crate::RawRwLockUpgrade for RwLock<(), R> { 875 | #[inline(always)] 876 | fn lock_upgradable(&self) { 877 | // Prevent guard destructor running 878 | core::mem::forget(self.upgradeable_read()); 879 | } 880 | 881 | #[inline(always)] 882 | fn try_lock_upgradable(&self) -> bool { 883 | // Prevent guard destructor running 884 | self.try_upgradeable_read() 885 | .map(|g| core::mem::forget(g)) 886 | .is_some() 887 | } 888 | 889 | #[inline(always)] 890 | unsafe fn unlock_upgradable(&self) { 891 | drop(RwLockUpgradableGuard { 892 | inner: self, 893 | data: &(), 894 | phantom: PhantomData, 895 | }); 896 | } 897 | 898 | #[inline(always)] 899 | unsafe fn upgrade(&self) { 900 | let tmp_guard = RwLockUpgradableGuard { 901 | inner: self, 902 | data: &(), 903 | phantom: PhantomData, 904 | }; 905 | core::mem::forget(tmp_guard.upgrade()); 906 | } 907 | 908 | #[inline(always)] 909 | unsafe fn try_upgrade(&self) -> bool { 910 | let tmp_guard = RwLockUpgradableGuard { 911 | inner: self, 912 | data: &(), 913 | phantom: PhantomData, 914 | }; 915 | tmp_guard 916 | .try_upgrade() 917 | .map(|g| core::mem::forget(g)) 918 | .is_ok() 919 | } 920 | } 921 | 922 | #[cfg(feature = "lock_api")] 923 | unsafe impl lock_api_crate::RawRwLockDowngrade for RwLock<(), R> { 924 | unsafe fn downgrade(&self) { 925 | let tmp_guard = RwLockWriteGuard { 926 | inner: self, 927 | data: &mut (), 928 | phantom: PhantomData, 929 | }; 930 | core::mem::forget(tmp_guard.downgrade()); 931 | } 932 | } 933 | 934 | #[cfg(feature = "lock_api")] 935 | unsafe impl lock_api_crate::RawRwLockUpgradeDowngrade for RwLock<(), R> { 936 | unsafe fn downgrade_upgradable(&self) { 937 | let tmp_guard = RwLockUpgradableGuard { 938 | inner: self, 939 | data: &(), 940 | phantom: PhantomData, 941 | }; 942 | core::mem::forget(tmp_guard.downgrade()); 943 | } 944 | 945 | unsafe fn downgrade_to_upgradable(&self) { 946 | let tmp_guard = RwLockWriteGuard { 947 | inner: self, 948 | data: &mut (), 949 | phantom: PhantomData, 950 | }; 951 | core::mem::forget(tmp_guard.downgrade_to_upgradeable()); 952 | } 953 | } 954 | 955 | #[cfg(test)] 956 | mod tests { 957 | use std::prelude::v1::*; 958 | 959 | use std::sync::atomic::{AtomicUsize, Ordering}; 960 | use std::sync::mpsc::channel; 961 | use std::sync::Arc; 962 | use std::thread; 963 | 964 | type RwLock = super::RwLock; 965 | 966 | #[derive(Eq, PartialEq, Debug)] 967 | struct NonCopy(i32); 968 | 969 | #[test] 970 | fn smoke() { 971 | let l = RwLock::new(()); 972 | drop(l.read()); 973 | drop(l.write()); 974 | drop((l.read(), l.read())); 975 | drop(l.write()); 976 | } 977 | 978 | // TODO: needs RNG 979 | //#[test] 980 | //fn frob() { 981 | // static R: RwLock = RwLock::new(); 982 | // const N: usize = 10; 983 | // const M: usize = 1000; 984 | // 985 | // let (tx, rx) = channel::<()>(); 986 | // for _ in 0..N { 987 | // let tx = tx.clone(); 988 | // thread::spawn(move|| { 989 | // let mut rng = rand::thread_rng(); 990 | // for _ in 0..M { 991 | // if rng.gen_weighted_bool(N) { 992 | // drop(R.write()); 993 | // } else { 994 | // drop(R.read()); 995 | // } 996 | // } 997 | // drop(tx); 998 | // }); 999 | // } 1000 | // drop(tx); 1001 | // let _ = rx.recv(); 1002 | // unsafe { R.destroy(); } 1003 | //} 1004 | 1005 | #[test] 1006 | fn test_rw_arc() { 1007 | let arc = Arc::new(RwLock::new(0)); 1008 | let arc2 = arc.clone(); 1009 | let (tx, rx) = channel(); 1010 | 1011 | let t = thread::spawn(move || { 1012 | let mut lock = arc2.write(); 1013 | for _ in 0..10 { 1014 | let tmp = *lock; 1015 | *lock = -1; 1016 | thread::yield_now(); 1017 | *lock = tmp + 1; 1018 | } 1019 | tx.send(()).unwrap(); 1020 | }); 1021 | 1022 | // Readers try to catch the writer in the act 1023 | let mut children = Vec::new(); 1024 | for _ in 0..5 { 1025 | let arc3 = arc.clone(); 1026 | children.push(thread::spawn(move || { 1027 | let lock = arc3.read(); 1028 | assert!(*lock >= 0); 1029 | })); 1030 | } 1031 | 1032 | // Wait for children to pass their asserts 1033 | for r in children { 1034 | assert!(r.join().is_ok()); 1035 | } 1036 | 1037 | // Wait for writer to finish 1038 | rx.recv().unwrap(); 1039 | let lock = arc.read(); 1040 | assert_eq!(*lock, 10); 1041 | 1042 | assert!(t.join().is_ok()); 1043 | } 1044 | 1045 | #[test] 1046 | fn test_rw_access_in_unwind() { 1047 | let arc = Arc::new(RwLock::new(1)); 1048 | let arc2 = arc.clone(); 1049 | let _ = thread::spawn(move || -> () { 1050 | struct Unwinder { 1051 | i: Arc>, 1052 | } 1053 | impl Drop for Unwinder { 1054 | fn drop(&mut self) { 1055 | let mut lock = self.i.write(); 1056 | *lock += 1; 1057 | } 1058 | } 1059 | let _u = Unwinder { i: arc2 }; 1060 | panic!(); 1061 | }) 1062 | .join(); 1063 | let lock = arc.read(); 1064 | assert_eq!(*lock, 2); 1065 | } 1066 | 1067 | #[test] 1068 | fn test_rwlock_unsized() { 1069 | let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); 1070 | { 1071 | let b = &mut *rw.write(); 1072 | b[0] = 4; 1073 | b[2] = 5; 1074 | } 1075 | let comp: &[i32] = &[4, 2, 5]; 1076 | assert_eq!(&*rw.read(), comp); 1077 | } 1078 | 1079 | #[test] 1080 | fn test_rwlock_try_write() { 1081 | use std::mem::drop; 1082 | 1083 | let lock = RwLock::new(0isize); 1084 | let read_guard = lock.read(); 1085 | 1086 | let write_result = lock.try_write(); 1087 | match write_result { 1088 | None => (), 1089 | Some(_) => assert!( 1090 | false, 1091 | "try_write should not succeed while read_guard is in scope" 1092 | ), 1093 | } 1094 | 1095 | drop(read_guard); 1096 | } 1097 | 1098 | #[test] 1099 | fn test_rw_try_read() { 1100 | let m = RwLock::new(0); 1101 | ::std::mem::forget(m.write()); 1102 | assert!(m.try_read().is_none()); 1103 | } 1104 | 1105 | #[test] 1106 | fn test_into_inner() { 1107 | let m = RwLock::new(NonCopy(10)); 1108 | assert_eq!(m.into_inner(), NonCopy(10)); 1109 | } 1110 | 1111 | #[test] 1112 | fn test_into_inner_drop() { 1113 | struct Foo(Arc); 1114 | impl Drop for Foo { 1115 | fn drop(&mut self) { 1116 | self.0.fetch_add(1, Ordering::SeqCst); 1117 | } 1118 | } 1119 | let num_drops = Arc::new(AtomicUsize::new(0)); 1120 | let m = RwLock::new(Foo(num_drops.clone())); 1121 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 1122 | { 1123 | let _inner = m.into_inner(); 1124 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 1125 | } 1126 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 1127 | } 1128 | 1129 | #[test] 1130 | fn test_force_read_decrement() { 1131 | let m = RwLock::new(()); 1132 | ::std::mem::forget(m.read()); 1133 | ::std::mem::forget(m.read()); 1134 | ::std::mem::forget(m.read()); 1135 | assert!(m.try_write().is_none()); 1136 | unsafe { 1137 | m.force_read_decrement(); 1138 | m.force_read_decrement(); 1139 | } 1140 | assert!(m.try_write().is_none()); 1141 | unsafe { 1142 | m.force_read_decrement(); 1143 | } 1144 | assert!(m.try_write().is_some()); 1145 | } 1146 | 1147 | #[test] 1148 | fn test_force_write_unlock() { 1149 | let m = RwLock::new(()); 1150 | ::std::mem::forget(m.write()); 1151 | assert!(m.try_read().is_none()); 1152 | unsafe { 1153 | m.force_write_unlock(); 1154 | } 1155 | assert!(m.try_read().is_some()); 1156 | } 1157 | 1158 | #[test] 1159 | fn test_upgrade_downgrade() { 1160 | let m = RwLock::new(()); 1161 | { 1162 | let _r = m.read(); 1163 | let upg = m.try_upgradeable_read().unwrap(); 1164 | assert!(m.try_read().is_none()); 1165 | assert!(m.try_write().is_none()); 1166 | assert!(upg.try_upgrade().is_err()); 1167 | } 1168 | { 1169 | let w = m.write(); 1170 | assert!(m.try_upgradeable_read().is_none()); 1171 | let _r = w.downgrade(); 1172 | assert!(m.try_upgradeable_read().is_some()); 1173 | assert!(m.try_read().is_some()); 1174 | assert!(m.try_write().is_none()); 1175 | } 1176 | { 1177 | let _u = m.upgradeable_read(); 1178 | assert!(m.try_upgradeable_read().is_none()); 1179 | } 1180 | 1181 | assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok()); 1182 | } 1183 | } 1184 | --------------------------------------------------------------------------------