├── .github └── workflows │ ├── release-plz.yml │ └── rust.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benchmark ├── Cargo.toml └── src │ ├── args.rs │ ├── mutex.rs │ └── rwlock.rs ├── bors.toml ├── core ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── build.rs └── src │ ├── lib.rs │ ├── parking_lot.rs │ ├── spinwait.rs │ ├── thread_parker │ ├── generic.rs │ ├── linux.rs │ ├── mod.rs │ ├── redox.rs │ ├── sgx.rs │ ├── unix.rs │ ├── wasm.rs │ ├── wasm_atomic.rs │ └── windows │ │ ├── bindings.rs │ │ ├── keyed_event.rs │ │ ├── mod.rs │ │ └── waitaddress.rs │ ├── util.rs │ └── word_lock.rs ├── lock_api ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── build.rs └── src │ ├── lib.rs │ ├── mutex.rs │ ├── remutex.rs │ └── rwlock.rs ├── release-plz.toml ├── src ├── condvar.rs ├── deadlock.rs ├── elision.rs ├── fair_mutex.rs ├── lib.rs ├── mutex.rs ├── once.rs ├── raw_fair_mutex.rs ├── raw_mutex.rs ├── raw_rwlock.rs ├── remutex.rs ├── rwlock.rs └── util.rs └── tests ├── issue_203.rs └── issue_392.rs /.github/workflows/release-plz.yml: -------------------------------------------------------------------------------- 1 | name: Release-plz 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | release-plz-release: 10 | name: Release-plz release 11 | runs-on: ubuntu-latest 12 | if: ${{ github.repository_owner == 'Amanieu' }} 13 | permissions: 14 | contents: write 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Install Rust toolchain 21 | uses: dtolnay/rust-toolchain@stable 22 | - name: Run release-plz 23 | uses: release-plz/action@v0.5 24 | with: 25 | command: release 26 | env: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 29 | 30 | release-plz-pr: 31 | name: Release-plz PR 32 | runs-on: ubuntu-latest 33 | if: ${{ github.repository_owner == 'Amanieu' }} 34 | permissions: 35 | pull-requests: write 36 | contents: write 37 | concurrency: 38 | group: release-plz-${{ github.ref }} 39 | cancel-in-progress: false 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | with: 44 | fetch-depth: 0 45 | - name: Install Rust toolchain 46 | uses: dtolnay/rust-toolchain@stable 47 | - name: Run release-plz 48 | uses: release-plz/action@v0.5 49 | with: 50 | command: release-pr 51 | env: 52 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 53 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 54 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - trying 7 | - staging 8 | pull_request: 9 | 10 | env: 11 | CARGO_INCREMENTAL: 0 12 | RUST_TEST_THREADS: 1 13 | 14 | jobs: 15 | build_tier_one: 16 | runs-on: ${{ matrix.os }}-latest 17 | strategy: 18 | matrix: 19 | os: [ubuntu, macos, windows] 20 | channel: [stable, beta, nightly] 21 | feature: [arc_lock, serde, deadlock_detection] 22 | include: 23 | - channel: nightly 24 | feature: nightly 25 | os: ubuntu 26 | - channel: nightly 27 | feature: hardware-lock-elision 28 | os: ubuntu 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | - run: rustup default ${{ matrix.channel }} 33 | - run: cargo build --all 34 | - run: cargo test --all 35 | - run: cargo build --all --features ${{ matrix.feature }} 36 | - run: cargo test --all --features ${{ matrix.feature }} 37 | if: matrix.feature == 'nightly' 38 | - run: cargo install cargo-msrv 39 | - run: cargo msrv --workspace verify 40 | build_other_platforms: 41 | runs-on: ubuntu-latest 42 | strategy: 43 | matrix: 44 | target: 45 | - wasm32-unknown-unknown 46 | - x86_64-fortanix-unknown-sgx 47 | - x86_64-unknown-redox 48 | - x86_64-linux-android 49 | steps: 50 | - uses: actions/checkout@v4 51 | - run: rustup default nightly 52 | - run: rustup target add ${{ matrix.target }} 53 | - run: cargo build --workspace --target ${{ matrix.target }} --features nightly 54 | build_docs: 55 | runs-on: ubuntu-latest 56 | steps: 57 | - uses: actions/checkout@v4 58 | - run: rustup default nightly 59 | - run: cargo doc --workspace --features arc_lock,serde,deadlock_detection --no-deps -p parking_lot -p parking_lot_core -p lock_api 60 | benchmark: 61 | runs-on: ubuntu-latest 62 | steps: 63 | - uses: actions/checkout@v4 64 | - run: rustup default nightly 65 | - run: | 66 | cd benchmark 67 | cargo run --release --bin mutex -- 2 1 0 1 2 68 | cargo run --release --bin rwlock -- 1 1 1 0 1 2 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## `parking_lot` - [0.12.4](https://github.com/Amanieu/parking_lot/compare/parking_lot-v0.12.3...parking_lot-v0.12.4) - 2025-05-29 11 | 12 | - Fix parked upgraders potentially not being woken up after a write lock 13 | - Fix clearing `PARKED_WRITER_BIT` after a timeout 14 | 15 | ## `parking_lot_core` - [0.9.11](https://github.com/Amanieu/parking_lot/compare/parking_lot_core-v0.9.10...parking_lot_core-v0.9.11) - 2025-05-29 16 | 17 | - Use Release/Acquire ordering in thread_parker::windows::Backend::create 18 | - Remove warnings due to new lint on unknown cfgs 19 | 20 | ## `lock_api` - [0.4.13](https://github.com/Amanieu/parking_lot/compare/lock_api-v0.4.12...lock_api-v0.4.13) - 2025-05-29 21 | 22 | - Remove warnings due to new lint on unknown cfgs 23 | 24 | ## parking_lot 0.12.3 (2024-05-24) 25 | 26 | - Export types provided by arc_lock feature (#442) 27 | 28 | ## parking_lot 0.12.2, parking_lot_core 0.9.10, lock_api 0.4.12 (2024-04-15) 29 | 30 | - Fixed panic when calling `with_upgraded` twice on a `ArcRwLockUpgradableReadGuard` (#431) 31 | - Fixed `RwLockUpgradeableReadGuard::with_upgraded` 32 | - Added lock_api::{Mutex, ReentrantMutex, RwLock}::from_raw methods (#429) 33 | - Added Apple visionOS support (#433) 34 | 35 | ## parking_lot_core 0.9.9, lock_api 0.4.11 (2023-10-18) 36 | 37 | - Fixed `RwLockUpgradeableReadGuard::with_upgraded`. (#393) 38 | - Fixed `ReentrantMutex::bump` lock count. (#390) 39 | - Added methods to unsafely create a lock guard out of thin air. (#403) 40 | - Added support for Apple tvOS. (#405) 41 | 42 | ## parking_lot_core 0.9.8, lock_api 0.4.10 (2023-06-05) 43 | 44 | - Mark guards with `#[clippy::has_significant_drop]` (#369, #371) 45 | - Removed windows-sys dependency (#374, #378) 46 | - Add `atomic_usize` default feature to support platforms without atomics. (#380) 47 | - Add with_upgraded API to upgradable read locks (#386) 48 | - Make RwLock guards Sync again (#370) 49 | 50 | ## parking_lot_core 0.9.7 (2023-02-01) 51 | 52 | - Update windows-sys dependency to 0.45. (#368) 53 | 54 | ## parking_lot_core 0.9.6 (2023-01-11) 55 | 56 | - Add support for watchOS. (#367) 57 | 58 | ## parking_lot_core 0.9.5 (2022-11-29) 59 | 60 | - Update use of `libc::timespec` to prepare for future libc version (#363) 61 | 62 | ## parking_lot_core 0.9.4 (2022-10-18) 63 | 64 | - Bump windows-sys dependency to 0.42. (#356) 65 | 66 | ## lock_api 0.4.9 (2022-09-20) 67 | 68 | - Fixed `ReentrantMutexGuard::try_map` signature (#355) 69 | 70 | ## lock_api 0.4.8 (2022-08-28) 71 | 72 | - Fixed unsound `Sync`/`Send` impls for `ArcMutexGuard`. (#349) 73 | - Added `ArcMutexGuard::into_arc`. (#350) 74 | 75 | ## parking_lot 0.12.1 (2022-05-31) 76 | 77 | - Fixed incorrect memory ordering in `RwLock`. (#344) 78 | - Added `Condvar::wait_while` convenience methods (#343) 79 | 80 | ## parking_lot_core 0.9.3 (2022-04-30) 81 | 82 | - Bump windows-sys dependency to 0.36. (#339) 83 | 84 | ## parking_lot_core 0.9.2, lock_api 0.4.7 (2022-03-25) 85 | 86 | - Enable const new() on lock types on stable. (#325) 87 | - Added `MutexGuard::leak` function. (#333) 88 | - Bump windows-sys dependency to 0.34. (#331) 89 | - Bump petgraph dependency to 0.6. (#326) 90 | - Don't use pthread attributes on the espidf platform. (#319) 91 | 92 | ## parking_lot_core 0.9.1 (2022-02-06) 93 | 94 | - Bump windows-sys dependency to 0.32. (#316) 95 | 96 | ## parking_lot 0.12.0, parking_lot_core 0.9.0, lock_api 0.4.6 (2022-01-28) 97 | 98 | - The MSRV is bumped to 1.49.0. 99 | - Disabled eventual fairness on wasm32-unknown-unknown. (#302) 100 | - Added a rwlock method to report if lock is held exclusively. (#303) 101 | - Use new `asm!` macro. (#304) 102 | - Use windows-rs instead of winapi for faster builds. (#311) 103 | - Moved hardware lock elision support to a separate Cargo feature. (#313) 104 | - Removed used of deprecated `spin_loop_hint`. (#314) 105 | 106 | ## parking_lot 0.11.2, parking_lot_core 0.8.4, lock_api 0.4.5 (2021-08-28) 107 | 108 | - Fixed incorrect memory orderings on `RwLock` and `WordLock`. (#294, #292) 109 | - Added `Arc`-based lock guards. (#291) 110 | - Added workaround for TSan's lack of support for `fence`. (#292) 111 | 112 | ## lock_api 0.4.4 (2021-05-01) 113 | 114 | - Update for latest nightly. (#281) 115 | 116 | ## lock_api 0.4.3 (2021-04-03) 117 | 118 | - Added `[Raw]ReentrantMutex::is_owned`. (#280) 119 | 120 | ## parking_lot_core 0.8.3 (2021-02-12) 121 | 122 | - Updated smallvec to 1.6. (#276) 123 | 124 | ## parking_lot_core 0.8.2 (2020-12-21) 125 | 126 | - Fixed assertion failure on OpenBSD. (#270) 127 | 128 | ## parking_lot_core 0.8.1 (2020-12-04) 129 | 130 | - Removed deprecated CloudABI support. (#263) 131 | - Fixed build on wasm32-unknown-unknown. (#265) 132 | - Relaxed dependency on `smallvec`. (#266) 133 | 134 | ## parking_lot 0.11.1, lock_api 0.4.2 (2020-11-18) 135 | 136 | - Fix bounds on Send and Sync impls for lock guards. (#262) 137 | - Fix incorrect memory ordering in `RwLock`. (#260) 138 | 139 | ## lock_api 0.4.1 (2020-07-06) 140 | 141 | - Add `data_ptr` method to lock types to allow unsafely accessing the inner data 142 | without a guard. (#247) 143 | 144 | ## parking_lot 0.11.0, parking_lot_core 0.8.0, lock_api 0.4.0 (2020-06-23) 145 | 146 | - Add `is_locked` method to mutex types. (#235) 147 | - Make `RawReentrantMutex` public. (#233) 148 | - Allow lock guard to be sent to another thread with the `send_guard` feature. (#240) 149 | - Use `Instant` type from the `instant` crate on wasm32-unknown-unknown. (#231) 150 | - Remove deprecated and unsound `MappedRwLockWriteGuard::downgrade`. (#244) 151 | - Most methods on the `Raw*` traits have been made unsafe since they assume 152 | the current thread holds the lock. (#243) 153 | 154 | ## parking_lot_core 0.7.2 (2020-04-21) 155 | 156 | - Add support for `wasm32-unknown-unknown` under the "nightly" feature. (#226) 157 | 158 | ## parking_lot 0.10.2 (2020-04-10) 159 | 160 | - Update minimum version of `lock_api`. 161 | 162 | ## parking_lot 0.10.1, parking_lot_core 0.7.1, lock_api 0.3.4 (2020-04-10) 163 | 164 | - Add methods to construct `Mutex`, `RwLock`, etc in a `const` context. (#217) 165 | - Add `FairMutex` which always uses fair unlocking. (#204) 166 | - Fixed panic with deadlock detection on macOS. (#203) 167 | - Fixed incorrect synchronization in `create_hashtable`. (#210) 168 | - Use `llvm_asm!` instead of the deprecated `asm!`. (#223) 169 | 170 | ## lock_api 0.3.3 (2020-01-04) 171 | 172 | - Deprecate unsound `MappedRwLockWriteGuard::downgrade` (#198) 173 | 174 | ## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25) 175 | 176 | - Upgrade smallvec dependency to 1.0 in parking_lot_core. 177 | - Replace all usage of `mem::uninitialized` with `mem::MaybeUninit`. 178 | - The minimum required Rust version is bumped to 1.36. Because of the above two changes. 179 | - Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference. 180 | 181 | ## parking_lot_core 0.6.2 (2019-07-22) 182 | 183 | - Fixed compile error on Windows with old cfg_if version. (#164) 184 | 185 | ## parking_lot_core 0.6.1 (2019-07-17) 186 | 187 | - Fixed Android build. (#163) 188 | 189 | ## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14) 190 | 191 | - Re-export lock_api (0.3.1) from parking_lot (#150) 192 | - Removed (non-dev) dependency on rand crate for fairness mechanism, by 193 | including a simple xorshift PRNG in core (#144) 194 | - Android now uses the futex-based ThreadParker. (#140) 195 | - Fixed CloudABI ThreadParker. (#140) 196 | - Fix race condition in lock_api::ReentrantMutex (da16c2c7) 197 | 198 | ## lock_api 0.3.0 (2019-07-03, _yanked_) 199 | 200 | - Use NonZeroUsize in GetThreadId::nonzero_thread_id (#148) 201 | - Debug assert lock_count in ReentrantMutex (#148) 202 | - Tag as `unsafe` and document some internal methods (#148) 203 | - This release was _yanked_ due to a regression in ReentrantMutex (da16c2c7) 204 | 205 | ## parking_lot 0.8.1 (2019-07-03, _yanked_) 206 | 207 | - Re-export lock_api (0.3.0) from parking_lot (#150) 208 | - This release was _yanked_ from crates.io due to unexpected breakage (#156) 209 | 210 | ## parking_lot 0.8.0, parking_lot_core 0.5.0, lock_api 0.2.0 (2019-05-04) 211 | 212 | - Fix race conditions in deadlock detection. 213 | - Support for more platforms by adding ThreadParker implementations for 214 | Wasm, Redox, SGX and CloudABI. 215 | - Drop support for older Rust. parking_lot now requires 1.31 and is a 216 | Rust 2018 edition crate (#122). 217 | - Disable the owning_ref feature by default. 218 | - Fix was_last_thread value in the timeout callback of park() (#129). 219 | - Support single byte Mutex/Once on stable Rust when compiler is at least 220 | version 1.34. 221 | - Make Condvar::new and Once::new const fns on stable Rust and remove 222 | ONCE_INIT (#134). 223 | - Add optional Serde support (#135). 224 | 225 | ## parking_lot 0.7.1 (2019-01-01) 226 | 227 | - Fixed potential deadlock when upgrading a RwLock. 228 | - Fixed overflow panic on very long timeouts (#111). 229 | 230 | ## parking_lot 0.7.0, parking_lot_core 0.4.0 (2018-11-26) 231 | 232 | - Return if or how many threads were notified from `Condvar::notify_*` 233 | 234 | ## parking_lot 0.6.3 (2018-07-18) 235 | 236 | - Export `RawMutex`, `RawRwLock` and `RawThreadId`. 237 | 238 | ## parking_lot 0.6.2 (2018-06-18) 239 | 240 | - Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79) 241 | 242 | ## parking_lot 0.6.1 (2018-06-08) 243 | 244 | Added missing typedefs for mapped lock guards: 245 | 246 | - `MappedMutexGuard` 247 | - `MappedReentrantMutexGuard` 248 | - `MappedRwLockReadGuard` 249 | - `MappedRwLockWriteGuard` 250 | 251 | ## parking_lot 0.6.0 (2018-06-08) 252 | 253 | This release moves most of the code for type-safe `Mutex` and `RwLock` types 254 | into a separate crate called `lock_api`. This new crate is compatible with 255 | `no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a raw 256 | mutex type which implements the `RawMutex` or `RawRwLock` trait. The API 257 | provided by the wrapper types can be extended by implementing more traits on 258 | the raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See 259 | the crate documentation for more details. 260 | 261 | There are also several major changes: 262 | 263 | - The minimum required Rust version is bumped to 1.26. 264 | - All methods on `MutexGuard` (and other guard types) are no longer inherent 265 | methods and must be called as `MutexGuard::method(self)`. This avoids 266 | conflicts with methods from the inner type. 267 | - `MutexGuard` (and other guard types) add the `unlocked` method which 268 | temporarily unlocks a mutex, runs the given closure, and then re-locks the 269 | mutex. 270 | - `MutexGuard` (and other guard types) add the `bump` method which gives a 271 | chance for other threads to acquire the mutex by temporarily unlocking it and 272 | re-locking it. However this is optimized for the common case where there are 273 | no threads waiting on the lock, in which case no unlocking is performed. 274 | - `MutexGuard` (and other guard types) add the `map` method which returns a 275 | `MappedMutexGuard` which holds only a subset of the original locked type. The 276 | `MappedMutexGuard` type is identical to `MutexGuard` except that it does not 277 | support the `unlocked` and `bump` methods, and can't be used with `CondVar`. 278 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parking_lot" 3 | version = "0.12.4" 4 | authors = ["Amanieu d'Antras "] 5 | description = "More compact and efficient implementations of the standard synchronization primitives." 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/Amanieu/parking_lot" 8 | readme = "README.md" 9 | keywords = ["mutex", "condvar", "rwlock", "once", "thread"] 10 | categories = ["concurrency"] 11 | edition = "2021" 12 | rust-version = "1.64" 13 | 14 | [package.metadata.docs.rs] 15 | features = ["arc_lock", "serde", "deadlock_detection"] 16 | rustdoc-args = ["--generate-link-to-definition"] 17 | 18 | [package.metadata.playground] 19 | features = ["arc_lock", "serde", "deadlock_detection"] 20 | 21 | [dependencies] 22 | parking_lot_core = { path = "core", version = "0.9.11" } 23 | lock_api = { path = "lock_api", version = "0.4.13" } 24 | 25 | [dev-dependencies] 26 | rand = "0.8.3" 27 | 28 | # Used when testing out serde support. 29 | bincode = "1.3.3" 30 | 31 | [features] 32 | default = [] 33 | arc_lock = ["lock_api/arc_lock"] 34 | owning_ref = ["lock_api/owning_ref"] 35 | nightly = ["parking_lot_core/nightly", "lock_api/nightly"] 36 | deadlock_detection = ["parking_lot_core/deadlock_detection"] 37 | serde = ["lock_api/serde"] 38 | send_guard = [] 39 | hardware-lock-elision = [] 40 | 41 | [workspace] 42 | exclude = ["benchmark"] 43 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | parking_lot 2 | ============ 3 | 4 | [![Rust](https://github.com/Amanieu/parking_lot/workflows/Rust/badge.svg)](https://github.com/Amanieu/parking_lot/actions) 5 | [![Crates.io](https://img.shields.io/crates/v/parking_lot.svg)](https://crates.io/crates/parking_lot) 6 | 7 | [Documentation (synchronization primitives)](https://docs.rs/parking_lot/) 8 | 9 | [Documentation (core parking lot API)](https://docs.rs/parking_lot_core/) 10 | 11 | [Documentation (type-safe lock API)](https://docs.rs/lock_api/) 12 | 13 | This library provides implementations of `Mutex`, `RwLock`, `Condvar` and 14 | `Once` that are smaller, faster and more flexible than those in the Rust 15 | standard library, as well as a `ReentrantMutex` type which supports recursive 16 | locking. It also exposes a low-level API for creating your own efficient 17 | synchronization primitives. 18 | 19 | When tested on x86_64 Linux, `parking_lot::Mutex` was found to be 1.5x 20 | faster than `std::sync::Mutex` when uncontended, and up to 5x faster when 21 | contended from multiple threads. The numbers for `RwLock` vary depending on 22 | the number of reader and writer threads, but are almost always faster than 23 | the standard library `RwLock`, and even up to 50x faster in some cases. 24 | 25 | ## Features 26 | 27 | The primitives provided by this library have several advantages over those 28 | in the Rust standard library: 29 | 30 | 1. `Mutex` and `Once` only require 1 byte of storage space, while `Condvar` 31 | and `RwLock` only require 1 word of storage space. On the other hand on 32 | some platforms (macOS and a few others) the standard library primitives 33 | require a dynamically allocated `Box` to hold OS-specific synchronization 34 | primitives. The small size of `Mutex` in particular encourages the use 35 | of fine-grained locks to increase parallelism. 36 | 2. Uncontended lock acquisition and release is done through fast inline 37 | paths which only require a single atomic operation. 38 | 3. Microcontention (a contended lock with a short critical section) is 39 | efficiently handled by spinning a few times while trying to acquire a 40 | lock. 41 | 4. The locks are adaptive and will suspend a thread after a few failed spin 42 | attempts. This makes the locks suitable for both long and short critical 43 | sections. 44 | 5. `Condvar`, `RwLock` and `Once` work on Windows XP, unlike the standard 45 | library versions of those types. 46 | 6. `RwLock` takes advantage of hardware lock elision on processors that 47 | support it, which can lead to huge performance wins with many readers. 48 | This must be enabled with the `hardware-lock-elision` feature. 49 | 7. `RwLock` uses a task-fair locking policy, which avoids reader and writer 50 | starvation, whereas the standard library version makes no guarantees. 51 | 8. `Condvar` is guaranteed not to produce spurious wakeups. A thread will 52 | only be woken up if it timed out or it was woken up by a notification. 53 | 9. `Condvar::notify_all` will only wake up a single thread and requeue the 54 | rest to wait on the associated `Mutex`. This avoids a thundering herd 55 | problem where all threads try to acquire the lock at the same time. 56 | 10. `RwLock` supports atomically downgrading a write lock into a read lock. 57 | 11. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object. 58 | 12. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard 59 | object. 60 | 13. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350) 61 | which allows them to be fair on average without sacrificing performance. 62 | 14. A `ReentrantMutex` type which supports recursive locking. 63 | 15. An *experimental* deadlock detector that works for `Mutex`, 64 | `RwLock` and `ReentrantMutex`. This feature is disabled by default and 65 | can be enabled via the `deadlock_detection` feature. 66 | 16. `RwLock` supports atomically upgrading an "upgradable" read lock into a 67 | write lock. 68 | 17. Optional support for [serde](https://docs.serde.rs/serde/). Enable via the 69 | feature `serde`. **NOTE!** this support is for `Mutex`, `ReentrantMutex`, 70 | and `RwLock` only; `Condvar` and `Once` are not currently supported. 71 | 18. Lock guards can be sent to other threads when the `send_guard` feature is 72 | enabled. 73 | 74 | ## The parking lot 75 | 76 | To keep these primitives small, all thread queuing and suspending 77 | functionality is offloaded to the *parking lot*. The idea behind this is 78 | based on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) 79 | class, which essentially consists of a hash table mapping of lock addresses 80 | to queues of parked (sleeping) threads. The Webkit parking lot was itself 81 | inspired by Linux [futexes](https://man7.org/linux/man-pages/man2/futex.2.html), 82 | but it is more powerful since it allows invoking callbacks while holding a queue 83 | lock. 84 | 85 | ## Nightly vs stable 86 | 87 | There are a few restrictions when using this library on stable Rust: 88 | 89 | - The `wasm32-unknown-unknown` target is only fully supported on nightly with 90 | `-C target-feature=+atomics` in `RUSTFLAGS` and `-Zbuild-std=panic_abort,std` 91 | passed to cargo. parking_lot will work mostly fine on stable, the only 92 | difference is it will panic instead of block forever if you hit a deadlock. 93 | Just make sure not to enable `-C target-feature=+atomics` on stable as that 94 | will allow wasm to run with multiple threads which will completely break 95 | parking_lot's concurrency guarantees. 96 | 97 | To enable nightly-only functionality, you need to enable the `nightly` feature 98 | in Cargo (see below). 99 | 100 | ## Usage 101 | 102 | Add this to your `Cargo.toml`: 103 | 104 | ```toml 105 | [dependencies] 106 | parking_lot = "0.12" 107 | ``` 108 | 109 | To enable nightly-only features, add this to your `Cargo.toml` instead: 110 | 111 | ```toml 112 | [dependencies] 113 | parking_lot = { version = "0.12", features = ["nightly"] } 114 | ``` 115 | 116 | The experimental deadlock detector can be enabled with the 117 | `deadlock_detection` Cargo feature. 118 | 119 | To allow sending `MutexGuard`s and `RwLock*Guard`s to other threads, enable the 120 | `send_guard` option. 121 | 122 | Note that the `deadlock_detection` and `send_guard` features are incompatible 123 | and cannot be used together. 124 | 125 | Hardware lock elision support for x86 can be enabled with the 126 | `hardware-lock-elision` feature. This requires Rust 1.59 due to the use of 127 | inline assembly. 128 | 129 | The core parking lot API is provided by the `parking_lot_core` crate. It is 130 | separate from the synchronization primitives in the `parking_lot` crate so that 131 | changes to the core API do not cause breaking changes for users of `parking_lot`. 132 | 133 | ## Minimum Rust version 134 | 135 | The current minimum required Rust version is 1.64. Any change to this is 136 | considered a breaking change and will require a major version bump. 137 | 138 | ## License 139 | 140 | Licensed under either of 141 | 142 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) 143 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) 144 | 145 | at your option. 146 | 147 | ### Contribution 148 | 149 | Unless you explicitly state otherwise, any contribution intentionally submitted 150 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 151 | additional terms or conditions. 152 | -------------------------------------------------------------------------------- /benchmark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parking_lot-benchmark" 3 | version = "0.0.0" 4 | authors = ["Amanieu d'Antras "] 5 | edition = "2021" 6 | 7 | [dependencies] 8 | parking_lot = {path = ".."} 9 | seqlock = "0.2" 10 | libc = "0.2" 11 | 12 | [[bin]] 13 | name = "mutex" 14 | path = "src/mutex.rs" 15 | 16 | [[bin]] 17 | name = "rwlock" 18 | path = "src/rwlock.rs" 19 | 20 | [features] 21 | nightly = ["parking_lot/nightly"] 22 | deadlock_detection = ["parking_lot/deadlock_detection"] 23 | 24 | [target.'cfg(windows)'.dependencies] 25 | winapi = { version = "0.3", features = ["synchapi"] } 26 | -------------------------------------------------------------------------------- /benchmark/src/args.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::{env, process}; 9 | 10 | #[derive(Copy, Clone)] 11 | pub struct ArgRange { 12 | current: usize, 13 | limit: usize, 14 | step: usize, 15 | } 16 | 17 | impl ArgRange { 18 | pub fn is_single(&self) -> bool { 19 | self.current.saturating_add(self.step) > self.limit 20 | } 21 | } 22 | 23 | impl Iterator for ArgRange { 24 | type Item = usize; 25 | fn next(&mut self) -> Option { 26 | if self.current <= self.limit { 27 | let result = self.current; 28 | self.current = self.current.saturating_add(self.step); 29 | Some(result) 30 | } else { 31 | None 32 | } 33 | } 34 | } 35 | 36 | fn print_usage(names: &[&str], error_msg: Option) -> ! { 37 | if let Some(error) = error_msg { 38 | println!("{}", error); 39 | } 40 | println!("Usage: {} {}", env::args().next().unwrap(), names.join(" ")); 41 | println!( 42 | "Each argument can be a single value or a range in the form start:end or \ 43 | start:end:step" 44 | ); 45 | process::exit(1); 46 | } 47 | 48 | fn parse_num(names: &[&str], name: &str, value: &str) -> usize { 49 | value.parse().unwrap_or_else(|_| { 50 | print_usage( 51 | names, 52 | Some(format!("Invalid value for {}: {}", name, value)), 53 | ) 54 | }) 55 | } 56 | 57 | fn parse_one(names: &[&str], name: &str, value: &str) -> ArgRange { 58 | let components = value.split(':').collect::>(); 59 | match components.len() { 60 | 1 => { 61 | let val = parse_num(names, name, components[0]); 62 | ArgRange { 63 | current: val, 64 | limit: val, 65 | step: 1, 66 | } 67 | } 68 | 2 => { 69 | let start = parse_num(names, name, components[0]); 70 | let end = parse_num(names, name, components[1]); 71 | if start > end { 72 | print_usage( 73 | names, 74 | Some(format!("Invalid range for {}: {}", name, value)), 75 | ); 76 | } 77 | ArgRange { 78 | current: start, 79 | limit: end, 80 | step: 1, 81 | } 82 | } 83 | 3 => { 84 | let start = parse_num(names, name, components[0]); 85 | let end = parse_num(names, name, components[1]); 86 | let step = parse_num(names, name, components[2]); 87 | if start > end { 88 | print_usage( 89 | names, 90 | Some(format!("Invalid range for {}: {}", name, value)), 91 | ); 92 | } 93 | ArgRange { 94 | current: start, 95 | limit: end, 96 | step: step, 97 | } 98 | } 99 | _ => print_usage( 100 | names, 101 | Some(format!("Invalid value for {}: {}", name, value)), 102 | ), 103 | } 104 | } 105 | 106 | pub fn parse(names: &[&str]) -> Vec { 107 | let args = env::args().skip(1).collect::>(); 108 | if args.is_empty() { 109 | print_usage(names, None); 110 | } 111 | if args.len() != names.len() { 112 | print_usage( 113 | names, 114 | Some(format!( 115 | "Invalid number of arguments (expected {}, got {})", 116 | names.len(), 117 | args.len() 118 | )), 119 | ); 120 | } 121 | 122 | let mut result = vec![]; 123 | for (name, value) in names.iter().zip(args) { 124 | result.push(parse_one(names, name, &value)); 125 | } 126 | result 127 | } 128 | -------------------------------------------------------------------------------- /benchmark/src/mutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | mod args; 9 | use crate::args::ArgRange; 10 | 11 | #[cfg(any(windows, unix))] 12 | use std::cell::UnsafeCell; 13 | use std::{ 14 | sync::{ 15 | atomic::{AtomicBool, Ordering}, 16 | Arc, Barrier, 17 | }, 18 | thread, 19 | time::Duration, 20 | }; 21 | 22 | trait Mutex { 23 | fn new(v: T) -> Self; 24 | fn lock(&self, f: F) -> R 25 | where 26 | F: FnOnce(&mut T) -> R; 27 | fn name() -> &'static str; 28 | } 29 | 30 | impl Mutex for std::sync::Mutex { 31 | fn new(v: T) -> Self { 32 | Self::new(v) 33 | } 34 | fn lock(&self, f: F) -> R 35 | where 36 | F: FnOnce(&mut T) -> R, 37 | { 38 | f(&mut *self.lock().unwrap()) 39 | } 40 | fn name() -> &'static str { 41 | "std::sync::Mutex" 42 | } 43 | } 44 | 45 | impl Mutex for parking_lot::Mutex { 46 | fn new(v: T) -> Self { 47 | Self::new(v) 48 | } 49 | fn lock(&self, f: F) -> R 50 | where 51 | F: FnOnce(&mut T) -> R, 52 | { 53 | f(&mut *self.lock()) 54 | } 55 | fn name() -> &'static str { 56 | "parking_lot::Mutex" 57 | } 58 | } 59 | 60 | #[cfg(not(windows))] 61 | type SrwLock = std::sync::Mutex; 62 | 63 | #[cfg(windows)] 64 | use winapi::um::synchapi; 65 | #[cfg(windows)] 66 | struct SrwLock(UnsafeCell, UnsafeCell); 67 | #[cfg(windows)] 68 | unsafe impl Sync for SrwLock {} 69 | #[cfg(windows)] 70 | unsafe impl Send for SrwLock {} 71 | #[cfg(windows)] 72 | impl Mutex for SrwLock { 73 | fn new(v: T) -> Self { 74 | let mut h: synchapi::SRWLOCK = synchapi::SRWLOCK { Ptr: std::ptr::null_mut() }; 75 | 76 | unsafe { 77 | synchapi::InitializeSRWLock(&mut h); 78 | } 79 | SrwLock( 80 | UnsafeCell::new(v), 81 | UnsafeCell::new(h), 82 | ) 83 | } 84 | fn lock(&self, f: F) -> R 85 | where 86 | F: FnOnce(&mut T) -> R, 87 | { 88 | unsafe { 89 | synchapi::AcquireSRWLockExclusive(self.1.get()); 90 | let res = f(&mut *self.0.get()); 91 | synchapi::ReleaseSRWLockExclusive(self.1.get()); 92 | res 93 | } 94 | } 95 | fn name() -> &'static str { 96 | "winapi_srwlock" 97 | } 98 | } 99 | 100 | #[cfg(not(unix))] 101 | type PthreadMutex = std::sync::Mutex; 102 | 103 | #[cfg(unix)] 104 | struct PthreadMutex(UnsafeCell, UnsafeCell); 105 | #[cfg(unix)] 106 | unsafe impl Sync for PthreadMutex {} 107 | #[cfg(unix)] 108 | impl Mutex for PthreadMutex { 109 | fn new(v: T) -> Self { 110 | PthreadMutex( 111 | UnsafeCell::new(v), 112 | UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), 113 | ) 114 | } 115 | fn lock(&self, f: F) -> R 116 | where 117 | F: FnOnce(&mut T) -> R, 118 | { 119 | unsafe { 120 | libc::pthread_mutex_lock(self.1.get()); 121 | let res = f(&mut *self.0.get()); 122 | libc::pthread_mutex_unlock(self.1.get()); 123 | res 124 | } 125 | } 126 | fn name() -> &'static str { 127 | "pthread_mutex_t" 128 | } 129 | } 130 | #[cfg(unix)] 131 | impl Drop for PthreadMutex { 132 | fn drop(&mut self) { 133 | unsafe { 134 | libc::pthread_mutex_destroy(self.1.get()); 135 | } 136 | } 137 | } 138 | 139 | fn run_benchmark + Send + Sync + 'static>( 140 | num_threads: usize, 141 | work_per_critical_section: usize, 142 | work_between_critical_sections: usize, 143 | seconds_per_test: usize, 144 | ) -> Vec { 145 | let lock = Arc::new(([0u8; 300], M::new(0.0), [0u8; 300])); 146 | let keep_going = Arc::new(AtomicBool::new(true)); 147 | let barrier = Arc::new(Barrier::new(num_threads)); 148 | let mut threads = vec![]; 149 | for _ in 0..num_threads { 150 | let barrier = barrier.clone(); 151 | let lock = lock.clone(); 152 | let keep_going = keep_going.clone(); 153 | threads.push(thread::spawn(move || { 154 | let mut local_value = 0.0; 155 | let mut value = 0.0; 156 | let mut iterations = 0usize; 157 | barrier.wait(); 158 | while keep_going.load(Ordering::Relaxed) { 159 | lock.1.lock(|shared_value| { 160 | for _ in 0..work_per_critical_section { 161 | *shared_value += value; 162 | *shared_value *= 1.01; 163 | value = *shared_value; 164 | } 165 | }); 166 | for _ in 0..work_between_critical_sections { 167 | local_value += value; 168 | local_value *= 1.01; 169 | value = local_value; 170 | } 171 | iterations += 1; 172 | } 173 | (iterations, value) 174 | })); 175 | } 176 | 177 | thread::sleep(Duration::from_secs(seconds_per_test as u64)); 178 | keep_going.store(false, Ordering::Relaxed); 179 | threads.into_iter().map(|x| x.join().unwrap().0).collect() 180 | } 181 | 182 | fn run_benchmark_iterations + Send + Sync + 'static>( 183 | num_threads: usize, 184 | work_per_critical_section: usize, 185 | work_between_critical_sections: usize, 186 | seconds_per_test: usize, 187 | test_iterations: usize, 188 | ) { 189 | let mut data = vec![]; 190 | for _ in 0..test_iterations { 191 | let run_data = run_benchmark::( 192 | num_threads, 193 | work_per_critical_section, 194 | work_between_critical_sections, 195 | seconds_per_test, 196 | ); 197 | data.extend_from_slice(&run_data); 198 | } 199 | 200 | let average = data.iter().fold(0f64, |a, b| a + *b as f64) / data.len() as f64; 201 | let variance = data 202 | .iter() 203 | .fold(0f64, |a, b| a + ((*b as f64 - average).powi(2))) 204 | / data.len() as f64; 205 | data.sort(); 206 | 207 | let k_hz = 1.0 / seconds_per_test as f64 / 1000.0; 208 | println!( 209 | "{:20} | {:10.3} kHz | {:10.3} kHz | {:10.3} kHz", 210 | M::name(), 211 | average * k_hz, 212 | data[data.len() / 2] as f64 * k_hz, 213 | variance.sqrt() * k_hz 214 | ); 215 | } 216 | 217 | fn run_all( 218 | args: &[ArgRange], 219 | first: &mut bool, 220 | num_threads: usize, 221 | work_per_critical_section: usize, 222 | work_between_critical_sections: usize, 223 | seconds_per_test: usize, 224 | test_iterations: usize, 225 | ) { 226 | if num_threads == 0 { 227 | return; 228 | } 229 | if *first || !args[0].is_single() { 230 | println!("- Running with {} threads", num_threads); 231 | } 232 | if *first || !args[1].is_single() || !args[2].is_single() { 233 | println!( 234 | "- {} iterations inside lock, {} iterations outside lock", 235 | work_per_critical_section, work_between_critical_sections 236 | ); 237 | } 238 | if *first || !args[3].is_single() { 239 | println!("- {} seconds per test", seconds_per_test); 240 | } 241 | *first = false; 242 | 243 | println!( 244 | "{:^20} | {:^14} | {:^14} | {:^14}", 245 | "name", "average", "median", "std.dev." 246 | ); 247 | 248 | run_benchmark_iterations::>( 249 | num_threads, 250 | work_per_critical_section, 251 | work_between_critical_sections, 252 | seconds_per_test, 253 | test_iterations, 254 | ); 255 | 256 | run_benchmark_iterations::>( 257 | num_threads, 258 | work_per_critical_section, 259 | work_between_critical_sections, 260 | seconds_per_test, 261 | test_iterations, 262 | ); 263 | if cfg!(windows) { 264 | run_benchmark_iterations::>( 265 | num_threads, 266 | work_per_critical_section, 267 | work_between_critical_sections, 268 | seconds_per_test, 269 | test_iterations, 270 | ); 271 | } 272 | if cfg!(unix) { 273 | run_benchmark_iterations::>( 274 | num_threads, 275 | work_per_critical_section, 276 | work_between_critical_sections, 277 | seconds_per_test, 278 | test_iterations, 279 | ); 280 | } 281 | } 282 | 283 | fn main() { 284 | let args = args::parse(&[ 285 | "numThreads", 286 | "workPerCriticalSection", 287 | "workBetweenCriticalSections", 288 | "secondsPerTest", 289 | "testIterations", 290 | ]); 291 | let mut first = true; 292 | for num_threads in args[0] { 293 | for work_per_critical_section in args[1] { 294 | for work_between_critical_sections in args[2] { 295 | for seconds_per_test in args[3] { 296 | for test_iterations in args[4] { 297 | run_all( 298 | &args, 299 | &mut first, 300 | num_threads, 301 | work_per_critical_section, 302 | work_between_critical_sections, 303 | seconds_per_test, 304 | test_iterations, 305 | ); 306 | } 307 | } 308 | } 309 | } 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /benchmark/src/rwlock.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | mod args; 9 | use crate::args::ArgRange; 10 | 11 | #[cfg(any(windows, unix))] 12 | use std::cell::UnsafeCell; 13 | use std::{ 14 | sync::{ 15 | atomic::{AtomicBool, Ordering}, 16 | Arc, Barrier, 17 | }, 18 | thread, 19 | time::Duration, 20 | }; 21 | 22 | trait RwLock { 23 | fn new(v: T) -> Self; 24 | fn read(&self, f: F) -> R 25 | where 26 | F: FnOnce(&T) -> R; 27 | fn write(&self, f: F) -> R 28 | where 29 | F: FnOnce(&mut T) -> R; 30 | fn name() -> &'static str; 31 | } 32 | 33 | impl RwLock for std::sync::RwLock { 34 | fn new(v: T) -> Self { 35 | Self::new(v) 36 | } 37 | fn read(&self, f: F) -> R 38 | where 39 | F: FnOnce(&T) -> R, 40 | { 41 | f(&*self.read().unwrap()) 42 | } 43 | fn write(&self, f: F) -> R 44 | where 45 | F: FnOnce(&mut T) -> R, 46 | { 47 | f(&mut *self.write().unwrap()) 48 | } 49 | fn name() -> &'static str { 50 | "std::sync::RwLock" 51 | } 52 | } 53 | 54 | impl RwLock for parking_lot::RwLock { 55 | fn new(v: T) -> Self { 56 | Self::new(v) 57 | } 58 | fn read(&self, f: F) -> R 59 | where 60 | F: FnOnce(&T) -> R, 61 | { 62 | f(&*self.read()) 63 | } 64 | fn write(&self, f: F) -> R 65 | where 66 | F: FnOnce(&mut T) -> R, 67 | { 68 | f(&mut *self.write()) 69 | } 70 | fn name() -> &'static str { 71 | "parking_lot::RwLock" 72 | } 73 | } 74 | 75 | impl RwLock for seqlock::SeqLock { 76 | fn new(v: T) -> Self { 77 | Self::new(v) 78 | } 79 | fn read(&self, f: F) -> R 80 | where 81 | F: FnOnce(&T) -> R, 82 | { 83 | f(&self.read()) 84 | } 85 | fn write(&self, f: F) -> R 86 | where 87 | F: FnOnce(&mut T) -> R, 88 | { 89 | f(&mut *self.lock_write()) 90 | } 91 | fn name() -> &'static str { 92 | "seqlock::SeqLock" 93 | } 94 | } 95 | 96 | #[cfg(not(windows))] 97 | type SrwLock = std::sync::RwLock; 98 | 99 | #[cfg(windows)] 100 | use winapi::um::synchapi; 101 | #[cfg(windows)] 102 | struct SrwLock(UnsafeCell, UnsafeCell); 103 | #[cfg(windows)] 104 | unsafe impl Sync for SrwLock {} 105 | #[cfg(windows)] 106 | unsafe impl Send for SrwLock {} 107 | #[cfg(windows)] 108 | impl RwLock for SrwLock { 109 | fn new(v: T) -> Self { 110 | let mut h: synchapi::SRWLOCK = synchapi::SRWLOCK { Ptr: std::ptr::null_mut() }; 111 | 112 | unsafe { 113 | synchapi::InitializeSRWLock(&mut h); 114 | } 115 | SrwLock( 116 | UnsafeCell::new(v), 117 | UnsafeCell::new(h), 118 | ) 119 | } 120 | fn read(&self, f: F) -> R 121 | where 122 | F: FnOnce(&T) -> R, 123 | { 124 | unsafe { 125 | synchapi::AcquireSRWLockShared(self.1.get()); 126 | let res = f(&*self.0.get()); 127 | synchapi::ReleaseSRWLockShared(self.1.get()); 128 | res 129 | } 130 | } 131 | fn write(&self, f: F) -> R 132 | where 133 | F: FnOnce(&mut T) -> R, 134 | { 135 | unsafe { 136 | synchapi::AcquireSRWLockExclusive(self.1.get()); 137 | let res = f(&mut *self.0.get()); 138 | synchapi::ReleaseSRWLockExclusive(self.1.get()); 139 | res 140 | } 141 | } 142 | fn name() -> &'static str { 143 | "winapi_srwlock" 144 | } 145 | } 146 | 147 | #[cfg(not(unix))] 148 | type PthreadRwLock = std::sync::RwLock; 149 | 150 | #[cfg(unix)] 151 | struct PthreadRwLock(UnsafeCell, UnsafeCell); 152 | #[cfg(unix)] 153 | unsafe impl Sync for PthreadRwLock {} 154 | #[cfg(unix)] 155 | impl RwLock for PthreadRwLock { 156 | fn new(v: T) -> Self { 157 | PthreadRwLock( 158 | UnsafeCell::new(v), 159 | UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), 160 | ) 161 | } 162 | fn read(&self, f: F) -> R 163 | where 164 | F: FnOnce(&T) -> R, 165 | { 166 | unsafe { 167 | libc::pthread_rwlock_wrlock(self.1.get()); 168 | let res = f(&*self.0.get()); 169 | libc::pthread_rwlock_unlock(self.1.get()); 170 | res 171 | } 172 | } 173 | fn write(&self, f: F) -> R 174 | where 175 | F: FnOnce(&mut T) -> R, 176 | { 177 | unsafe { 178 | libc::pthread_rwlock_wrlock(self.1.get()); 179 | let res = f(&mut *self.0.get()); 180 | libc::pthread_rwlock_unlock(self.1.get()); 181 | res 182 | } 183 | } 184 | fn name() -> &'static str { 185 | "pthread_rwlock_t" 186 | } 187 | } 188 | #[cfg(unix)] 189 | impl Drop for PthreadRwLock { 190 | fn drop(&mut self) { 191 | unsafe { 192 | libc::pthread_rwlock_destroy(self.1.get()); 193 | } 194 | } 195 | } 196 | 197 | fn run_benchmark + Send + Sync + 'static>( 198 | num_writer_threads: usize, 199 | num_reader_threads: usize, 200 | work_per_critical_section: usize, 201 | work_between_critical_sections: usize, 202 | seconds_per_test: usize, 203 | ) -> (Vec, Vec) { 204 | let lock = Arc::new(([0u8; 300], M::new(0.0), [0u8; 300])); 205 | let keep_going = Arc::new(AtomicBool::new(true)); 206 | let barrier = Arc::new(Barrier::new(num_reader_threads + num_writer_threads)); 207 | let mut writers = vec![]; 208 | let mut readers = vec![]; 209 | for _ in 0..num_writer_threads { 210 | let barrier = barrier.clone(); 211 | let lock = lock.clone(); 212 | let keep_going = keep_going.clone(); 213 | writers.push(thread::spawn(move || { 214 | let mut local_value = 0.0; 215 | let mut value = 0.0; 216 | let mut iterations = 0usize; 217 | barrier.wait(); 218 | while keep_going.load(Ordering::Relaxed) { 219 | lock.1.write(|shared_value| { 220 | for _ in 0..work_per_critical_section { 221 | *shared_value += value; 222 | *shared_value *= 1.01; 223 | value = *shared_value; 224 | } 225 | }); 226 | for _ in 0..work_between_critical_sections { 227 | local_value += value; 228 | local_value *= 1.01; 229 | value = local_value; 230 | } 231 | iterations += 1; 232 | } 233 | (iterations, value) 234 | })); 235 | } 236 | for _ in 0..num_reader_threads { 237 | let barrier = barrier.clone(); 238 | let lock = lock.clone(); 239 | let keep_going = keep_going.clone(); 240 | readers.push(thread::spawn(move || { 241 | let mut local_value = 0.0; 242 | let mut value = 0.0; 243 | let mut iterations = 0usize; 244 | barrier.wait(); 245 | while keep_going.load(Ordering::Relaxed) { 246 | lock.1.read(|shared_value| { 247 | for _ in 0..work_per_critical_section { 248 | local_value += value; 249 | local_value *= *shared_value; 250 | value = local_value; 251 | } 252 | }); 253 | for _ in 0..work_between_critical_sections { 254 | local_value += value; 255 | local_value *= 1.01; 256 | value = local_value; 257 | } 258 | iterations += 1; 259 | } 260 | (iterations, value) 261 | })); 262 | } 263 | 264 | thread::sleep(Duration::new(seconds_per_test as u64, 0)); 265 | keep_going.store(false, Ordering::Relaxed); 266 | 267 | let run_writers = writers 268 | .into_iter() 269 | .map(|x| x.join().unwrap().0) 270 | .collect::>(); 271 | let run_readers = readers 272 | .into_iter() 273 | .map(|x| x.join().unwrap().0) 274 | .collect::>(); 275 | 276 | (run_writers, run_readers) 277 | } 278 | 279 | fn run_benchmark_iterations + Send + Sync + 'static>( 280 | num_writer_threads: usize, 281 | num_reader_threads: usize, 282 | work_per_critical_section: usize, 283 | work_between_critical_sections: usize, 284 | seconds_per_test: usize, 285 | test_iterations: usize, 286 | ) { 287 | let mut writers = vec![]; 288 | let mut readers = vec![]; 289 | 290 | for _ in 0..test_iterations { 291 | let (run_writers, run_readers) = run_benchmark::( 292 | num_writer_threads, 293 | num_reader_threads, 294 | work_per_critical_section, 295 | work_between_critical_sections, 296 | seconds_per_test, 297 | ); 298 | writers.extend_from_slice(&run_writers); 299 | readers.extend_from_slice(&run_readers); 300 | } 301 | 302 | let total_writers = writers.iter().fold(0f64, |a, b| a + *b as f64) / test_iterations as f64; 303 | let total_readers = readers.iter().fold(0f64, |a, b| a + *b as f64) / test_iterations as f64; 304 | println!( 305 | "{:20} - [write] {:10.3} kHz [read] {:10.3} kHz", 306 | M::name(), 307 | total_writers as f64 / seconds_per_test as f64 / 1000.0, 308 | total_readers as f64 / seconds_per_test as f64 / 1000.0 309 | ); 310 | } 311 | 312 | fn run_all( 313 | args: &[ArgRange], 314 | first: &mut bool, 315 | num_writer_threads: usize, 316 | num_reader_threads: usize, 317 | work_per_critical_section: usize, 318 | work_between_critical_sections: usize, 319 | seconds_per_test: usize, 320 | test_iterations: usize, 321 | ) { 322 | if num_writer_threads == 0 && num_reader_threads == 0 { 323 | return; 324 | } 325 | if *first || !args[0].is_single() || !args[1].is_single() { 326 | println!( 327 | "- Running with {} writer threads and {} reader threads", 328 | num_writer_threads, num_reader_threads 329 | ); 330 | } 331 | if *first || !args[2].is_single() || !args[3].is_single() { 332 | println!( 333 | "- {} iterations inside lock, {} iterations outside lock", 334 | work_per_critical_section, work_between_critical_sections 335 | ); 336 | } 337 | if *first || !args[4].is_single() { 338 | println!("- {} seconds per test", seconds_per_test); 339 | } 340 | *first = false; 341 | 342 | run_benchmark_iterations::>( 343 | num_writer_threads, 344 | num_reader_threads, 345 | work_per_critical_section, 346 | work_between_critical_sections, 347 | seconds_per_test, 348 | test_iterations, 349 | ); 350 | run_benchmark_iterations::>( 351 | num_writer_threads, 352 | num_reader_threads, 353 | work_per_critical_section, 354 | work_between_critical_sections, 355 | seconds_per_test, 356 | test_iterations, 357 | ); 358 | if cfg!(windows) { 359 | run_benchmark_iterations::>( 360 | num_writer_threads, 361 | num_reader_threads, 362 | work_per_critical_section, 363 | work_between_critical_sections, 364 | seconds_per_test, 365 | test_iterations, 366 | ); 367 | } 368 | if cfg!(unix) { 369 | run_benchmark_iterations::>( 370 | num_writer_threads, 371 | num_reader_threads, 372 | work_per_critical_section, 373 | work_between_critical_sections, 374 | seconds_per_test, 375 | test_iterations, 376 | ); 377 | } 378 | } 379 | fn main() { 380 | let args = args::parse(&[ 381 | "numWriterThreads", 382 | "numReaderThreads", 383 | "workPerCriticalSection", 384 | "workBetweenCriticalSections", 385 | "secondsPerTest", 386 | "testIterations", 387 | ]); 388 | let mut first = true; 389 | for num_writer_threads in args[0] { 390 | for num_reader_threads in args[1] { 391 | for work_per_critical_section in args[2] { 392 | for work_between_critical_sections in args[3] { 393 | for seconds_per_test in args[4] { 394 | for test_iterations in args[5] { 395 | run_all( 396 | &args, 397 | &mut first, 398 | num_writer_threads, 399 | num_reader_threads, 400 | work_per_critical_section, 401 | work_between_critical_sections, 402 | seconds_per_test, 403 | test_iterations, 404 | ); 405 | } 406 | } 407 | } 408 | } 409 | } 410 | } 411 | } 412 | -------------------------------------------------------------------------------- /bors.toml: -------------------------------------------------------------------------------- 1 | status = [ 2 | "build_tier_one", 3 | "build_other_platforms", 4 | ] 5 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parking_lot_core" 3 | version = "0.9.11" 4 | authors = ["Amanieu d'Antras "] 5 | description = "An advanced API for creating custom synchronization primitives." 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/Amanieu/parking_lot" 8 | keywords = ["mutex", "condvar", "rwlock", "once", "thread"] 9 | categories = ["concurrency"] 10 | edition = "2021" 11 | rust-version = "1.64.0" 12 | 13 | [package.metadata.docs.rs] 14 | rustdoc-args = ["--generate-link-to-definition"] 15 | 16 | [dependencies] 17 | cfg-if = "1.0.0" 18 | smallvec = "1.6.1" 19 | petgraph = { version = "0.6.0", optional = true } 20 | thread-id = { version = "4.0.0", optional = true } 21 | backtrace = { version = "0.3.60", optional = true } 22 | 23 | [target.'cfg(unix)'.dependencies] 24 | libc = "0.2.95" 25 | 26 | [target.'cfg(target_os = "redox")'.dependencies] 27 | redox_syscall = "0.5" 28 | 29 | [target.'cfg(windows)'.dependencies] 30 | windows-targets = "0.52.0" 31 | 32 | [features] 33 | nightly = [] 34 | deadlock_detection = ["petgraph", "thread-id", "backtrace"] 35 | -------------------------------------------------------------------------------- /core/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /core/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /core/build.rs: -------------------------------------------------------------------------------- 1 | // Automatically detect tsan in a way that's compatible with both stable (which 2 | // doesn't support sanitizers) and nightly (which does). Works because build 3 | // scripts gets `cfg` info, even if the cfg is unstable. 4 | fn main() { 5 | println!("cargo:rerun-if-changed=build.rs"); 6 | println!("cargo:rustc-check-cfg=cfg(tsan_enabled)"); 7 | let santizer_list = std::env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); 8 | if santizer_list.contains("thread") { 9 | println!("cargo:rustc-cfg=tsan_enabled"); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! This library exposes a low-level API for creating your own efficient 9 | //! synchronization primitives. 10 | //! 11 | //! # The parking lot 12 | //! 13 | //! To keep synchronization primitives small, all thread queuing and suspending 14 | //! functionality is offloaded to the *parking lot*. The idea behind this is based 15 | //! on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/) 16 | //! class, which essentially consists of a hash table mapping of lock addresses 17 | //! to queues of parked (sleeping) threads. The Webkit parking lot was itself 18 | //! inspired by Linux [futexes](http://man7.org/linux/man-pages/man2/futex.2.html), 19 | //! but it is more powerful since it allows invoking callbacks while holding a 20 | //! queue lock. 21 | //! 22 | //! There are two main operations that can be performed on the parking lot: 23 | //! 24 | //! - *Parking* refers to suspending the thread while simultaneously enqueuing it 25 | //! on a queue keyed by some address. 26 | //! - *Unparking* refers to dequeuing a thread from a queue keyed by some address 27 | //! and resuming it. 28 | //! 29 | //! See the documentation of the individual functions for more details. 30 | //! 31 | //! # Building custom synchronization primitives 32 | //! 33 | //! Building custom synchronization primitives is very simple since the parking 34 | //! lot takes care of all the hard parts for you. A simple example for a 35 | //! custom primitive would be to integrate a `Mutex` inside another data type. 36 | //! Since a mutex only requires 2 bits, it can share space with other data. 37 | //! For example, one could create an `ArcMutex` type that combines the atomic 38 | //! reference count and the two mutex bits in the same atomic word. 39 | 40 | #![warn(missing_docs)] 41 | #![warn(rust_2018_idioms)] 42 | #![cfg_attr( 43 | all(target_env = "sgx", target_vendor = "fortanix"), 44 | feature(sgx_platform) 45 | )] 46 | #![cfg_attr( 47 | all( 48 | feature = "nightly", 49 | target_family = "wasm", 50 | target_feature = "atomics" 51 | ), 52 | feature(stdarch_wasm_atomic_wait) 53 | )] 54 | 55 | mod parking_lot; 56 | mod spinwait; 57 | mod thread_parker; 58 | mod util; 59 | mod word_lock; 60 | 61 | pub use self::parking_lot::deadlock; 62 | pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue}; 63 | pub use self::parking_lot::{ 64 | FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken, 65 | }; 66 | pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; 67 | pub use self::spinwait::SpinWait; 68 | -------------------------------------------------------------------------------- /core/src/spinwait.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::thread_parker; 9 | use core::hint::spin_loop; 10 | 11 | // Wastes some CPU time for the given number of iterations, 12 | // using a hint to indicate to the CPU that we are spinning. 13 | #[inline] 14 | fn cpu_relax(iterations: u32) { 15 | for _ in 0..iterations { 16 | spin_loop() 17 | } 18 | } 19 | 20 | /// A counter used to perform exponential backoff in spin loops. 21 | #[derive(Default)] 22 | pub struct SpinWait { 23 | counter: u32, 24 | } 25 | 26 | impl SpinWait { 27 | /// Creates a new `SpinWait`. 28 | #[inline] 29 | pub fn new() -> Self { 30 | Self::default() 31 | } 32 | 33 | /// Resets a `SpinWait` to its initial state. 34 | #[inline] 35 | pub fn reset(&mut self) { 36 | self.counter = 0; 37 | } 38 | 39 | /// Spins until the sleep threshold has been reached. 40 | /// 41 | /// This function returns whether the sleep threshold has been reached, at 42 | /// which point further spinning has diminishing returns and the thread 43 | /// should be parked instead. 44 | /// 45 | /// The spin strategy will initially use a CPU-bound loop but will fall back 46 | /// to yielding the CPU to the OS after a few iterations. 47 | #[inline] 48 | pub fn spin(&mut self) -> bool { 49 | if self.counter >= 10 { 50 | return false; 51 | } 52 | self.counter += 1; 53 | if self.counter <= 3 { 54 | cpu_relax(1 << self.counter); 55 | } else { 56 | thread_parker::thread_yield(); 57 | } 58 | true 59 | } 60 | 61 | /// Spins without yielding the thread to the OS. 62 | /// 63 | /// Instead, the backoff is simply capped at a maximum value. This can be 64 | /// used to improve throughput in `compare_exchange` loops that have high 65 | /// contention. 66 | #[inline] 67 | pub fn spin_no_yield(&mut self) { 68 | self.counter += 1; 69 | if self.counter > 10 { 70 | self.counter = 10; 71 | } 72 | cpu_relax(1 << self.counter); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /core/src/thread_parker/generic.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! A simple spin lock based thread parker. Used on platforms without better 9 | //! parking facilities available. 10 | 11 | use core::hint::spin_loop; 12 | use core::sync::atomic::{AtomicBool, Ordering}; 13 | use std::thread; 14 | use std::time::Instant; 15 | 16 | // Helper type for putting a thread to sleep until some other thread wakes it up 17 | pub struct ThreadParker { 18 | parked: AtomicBool, 19 | } 20 | 21 | impl super::ThreadParkerT for ThreadParker { 22 | type UnparkHandle = UnparkHandle; 23 | 24 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 25 | 26 | #[inline] 27 | fn new() -> ThreadParker { 28 | ThreadParker { 29 | parked: AtomicBool::new(false), 30 | } 31 | } 32 | 33 | #[inline] 34 | unsafe fn prepare_park(&self) { 35 | self.parked.store(true, Ordering::Relaxed); 36 | } 37 | 38 | #[inline] 39 | unsafe fn timed_out(&self) -> bool { 40 | self.parked.load(Ordering::Relaxed) != false 41 | } 42 | 43 | #[inline] 44 | unsafe fn park(&self) { 45 | while self.parked.load(Ordering::Acquire) != false { 46 | spin_loop(); 47 | } 48 | } 49 | 50 | #[inline] 51 | unsafe fn park_until(&self, timeout: Instant) -> bool { 52 | while self.parked.load(Ordering::Acquire) != false { 53 | if Instant::now() >= timeout { 54 | return false; 55 | } 56 | spin_loop(); 57 | } 58 | true 59 | } 60 | 61 | #[inline] 62 | unsafe fn unpark_lock(&self) -> UnparkHandle { 63 | // We don't need to lock anything, just clear the state 64 | self.parked.store(false, Ordering::Release); 65 | UnparkHandle(()) 66 | } 67 | } 68 | 69 | pub struct UnparkHandle(()); 70 | 71 | impl super::UnparkHandleT for UnparkHandle { 72 | #[inline] 73 | unsafe fn unpark(self) {} 74 | } 75 | 76 | #[inline] 77 | pub fn thread_yield() { 78 | thread::yield_now(); 79 | } 80 | -------------------------------------------------------------------------------- /core/src/thread_parker/linux.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | ptr, 10 | sync::atomic::{AtomicI32, Ordering}, 11 | }; 12 | use libc; 13 | use std::thread; 14 | use std::time::Instant; 15 | 16 | // x32 Linux uses a non-standard type for tv_nsec in timespec. 17 | // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 18 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] 19 | #[allow(non_camel_case_types)] 20 | type tv_nsec_t = i64; 21 | #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] 22 | #[allow(non_camel_case_types)] 23 | type tv_nsec_t = libc::c_long; 24 | 25 | fn errno() -> libc::c_int { 26 | #[cfg(target_os = "linux")] 27 | unsafe { 28 | *libc::__errno_location() 29 | } 30 | #[cfg(target_os = "android")] 31 | unsafe { 32 | *libc::__errno() 33 | } 34 | } 35 | 36 | // Helper type for putting a thread to sleep until some other thread wakes it up 37 | pub struct ThreadParker { 38 | futex: AtomicI32, 39 | } 40 | 41 | impl super::ThreadParkerT for ThreadParker { 42 | type UnparkHandle = UnparkHandle; 43 | 44 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 45 | 46 | #[inline] 47 | fn new() -> ThreadParker { 48 | ThreadParker { 49 | futex: AtomicI32::new(0), 50 | } 51 | } 52 | 53 | #[inline] 54 | unsafe fn prepare_park(&self) { 55 | self.futex.store(1, Ordering::Relaxed); 56 | } 57 | 58 | #[inline] 59 | unsafe fn timed_out(&self) -> bool { 60 | self.futex.load(Ordering::Relaxed) != 0 61 | } 62 | 63 | #[inline] 64 | unsafe fn park(&self) { 65 | while self.futex.load(Ordering::Acquire) != 0 { 66 | self.futex_wait(None); 67 | } 68 | } 69 | 70 | #[inline] 71 | unsafe fn park_until(&self, timeout: Instant) -> bool { 72 | while self.futex.load(Ordering::Acquire) != 0 { 73 | let now = Instant::now(); 74 | if timeout <= now { 75 | return false; 76 | } 77 | let diff = timeout - now; 78 | if diff.as_secs() as libc::time_t as u64 != diff.as_secs() { 79 | // Timeout overflowed, just sleep indefinitely 80 | self.park(); 81 | return true; 82 | } 83 | // SAFETY: libc::timespec is zero initializable. 84 | let mut ts: libc::timespec = std::mem::zeroed(); 85 | ts.tv_sec = diff.as_secs() as libc::time_t; 86 | ts.tv_nsec = diff.subsec_nanos() as tv_nsec_t; 87 | self.futex_wait(Some(ts)); 88 | } 89 | true 90 | } 91 | 92 | // Locks the parker to prevent the target thread from exiting. This is 93 | // necessary to ensure that thread-local ThreadData objects remain valid. 94 | // This should be called while holding the queue lock. 95 | #[inline] 96 | unsafe fn unpark_lock(&self) -> UnparkHandle { 97 | // We don't need to lock anything, just clear the state 98 | self.futex.store(0, Ordering::Release); 99 | 100 | UnparkHandle { futex: &self.futex } 101 | } 102 | } 103 | 104 | impl ThreadParker { 105 | #[inline] 106 | fn futex_wait(&self, ts: Option) { 107 | let ts_ptr = ts 108 | .as_ref() 109 | .map(|ts_ref| ts_ref as *const _) 110 | .unwrap_or(ptr::null()); 111 | let r = unsafe { 112 | libc::syscall( 113 | libc::SYS_futex, 114 | &self.futex, 115 | libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG, 116 | 1, 117 | ts_ptr, 118 | ) 119 | }; 120 | debug_assert!(r == 0 || r == -1); 121 | if r == -1 { 122 | debug_assert!( 123 | errno() == libc::EINTR 124 | || errno() == libc::EAGAIN 125 | || (ts.is_some() && errno() == libc::ETIMEDOUT) 126 | ); 127 | } 128 | } 129 | } 130 | 131 | pub struct UnparkHandle { 132 | futex: *const AtomicI32, 133 | } 134 | 135 | impl super::UnparkHandleT for UnparkHandle { 136 | #[inline] 137 | unsafe fn unpark(self) { 138 | // The thread data may have been freed at this point, but it doesn't 139 | // matter since the syscall will just return EFAULT in that case. 140 | let r = libc::syscall( 141 | libc::SYS_futex, 142 | self.futex, 143 | libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, 144 | 1, 145 | ); 146 | debug_assert!(r == 0 || r == 1 || r == -1); 147 | if r == -1 { 148 | debug_assert_eq!(errno(), libc::EFAULT); 149 | } 150 | } 151 | } 152 | 153 | #[inline] 154 | pub fn thread_yield() { 155 | thread::yield_now(); 156 | } 157 | -------------------------------------------------------------------------------- /core/src/thread_parker/mod.rs: -------------------------------------------------------------------------------- 1 | use cfg_if::cfg_if; 2 | use std::time::Instant; 3 | 4 | /// Trait for the platform thread parker implementation. 5 | /// 6 | /// All unsafe methods are unsafe because the Unix thread parker is based on 7 | /// pthread mutexes and condvars. Those primitives must not be moved and used 8 | /// from any other memory address than the one they were located at when they 9 | /// were initialized. As such, it's UB to call any unsafe method on 10 | /// `ThreadParkerT` if the implementing instance has moved since the last 11 | /// call to any of the unsafe methods. 12 | pub trait ThreadParkerT { 13 | type UnparkHandle: UnparkHandleT; 14 | 15 | const IS_CHEAP_TO_CONSTRUCT: bool; 16 | 17 | fn new() -> Self; 18 | 19 | /// Prepares the parker. This should be called before adding it to the queue. 20 | unsafe fn prepare_park(&self); 21 | 22 | /// Checks if the park timed out. This should be called while holding the 23 | /// queue lock after `park_until` has returned false. 24 | unsafe fn timed_out(&self) -> bool; 25 | 26 | /// Parks the thread until it is unparked. This should be called after it has 27 | /// been added to the queue, after unlocking the queue. 28 | unsafe fn park(&self); 29 | 30 | /// Parks the thread until it is unparked or the timeout is reached. This 31 | /// should be called after it has been added to the queue, after unlocking 32 | /// the queue. Returns true if we were unparked and false if we timed out. 33 | unsafe fn park_until(&self, timeout: Instant) -> bool; 34 | 35 | /// Locks the parker to prevent the target thread from exiting. This is 36 | /// necessary to ensure that thread-local `ThreadData` objects remain valid. 37 | /// This should be called while holding the queue lock. 38 | unsafe fn unpark_lock(&self) -> Self::UnparkHandle; 39 | } 40 | 41 | /// Handle for a thread that is about to be unparked. We need to mark the thread 42 | /// as unparked while holding the queue lock, but we delay the actual unparking 43 | /// until after the queue lock is released. 44 | pub trait UnparkHandleT { 45 | /// Wakes up the parked thread. This should be called after the queue lock is 46 | /// released to avoid blocking the queue for too long. 47 | /// 48 | /// This method is unsafe for the same reason as the unsafe methods in 49 | /// `ThreadParkerT`. 50 | unsafe fn unpark(self); 51 | } 52 | 53 | cfg_if! { 54 | if #[cfg(any(target_os = "linux", target_os = "android"))] { 55 | #[path = "linux.rs"] 56 | mod imp; 57 | } else if #[cfg(unix)] { 58 | #[path = "unix.rs"] 59 | mod imp; 60 | } else if #[cfg(windows)] { 61 | #[path = "windows/mod.rs"] 62 | mod imp; 63 | } else if #[cfg(target_os = "redox")] { 64 | #[path = "redox.rs"] 65 | mod imp; 66 | } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { 67 | #[path = "sgx.rs"] 68 | mod imp; 69 | } else if #[cfg(all( 70 | feature = "nightly", 71 | target_family = "wasm", 72 | target_feature = "atomics" 73 | ))] { 74 | #[path = "wasm_atomic.rs"] 75 | mod imp; 76 | } else if #[cfg(target_family = "wasm")] { 77 | #[path = "wasm.rs"] 78 | mod imp; 79 | } else { 80 | #[path = "generic.rs"] 81 | mod imp; 82 | } 83 | } 84 | 85 | pub use self::imp::{thread_yield, ThreadParker}; 86 | -------------------------------------------------------------------------------- /core/src/thread_parker/redox.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | ptr, 10 | sync::atomic::{AtomicI32, Ordering}, 11 | }; 12 | use std::thread; 13 | use std::time::Instant; 14 | use syscall::{ 15 | call::futex, 16 | data::TimeSpec, 17 | error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT}, 18 | flag::{FUTEX_WAIT, FUTEX_WAKE}, 19 | }; 20 | 21 | const UNPARKED: i32 = 0; 22 | const PARKED: i32 = 1; 23 | 24 | // Helper type for putting a thread to sleep until some other thread wakes it up 25 | pub struct ThreadParker { 26 | futex: AtomicI32, 27 | } 28 | 29 | impl super::ThreadParkerT for ThreadParker { 30 | type UnparkHandle = UnparkHandle; 31 | 32 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 33 | 34 | #[inline] 35 | fn new() -> ThreadParker { 36 | ThreadParker { 37 | futex: AtomicI32::new(UNPARKED), 38 | } 39 | } 40 | 41 | #[inline] 42 | unsafe fn prepare_park(&self) { 43 | self.futex.store(PARKED, Ordering::Relaxed); 44 | } 45 | 46 | #[inline] 47 | unsafe fn timed_out(&self) -> bool { 48 | self.futex.load(Ordering::Relaxed) != UNPARKED 49 | } 50 | 51 | #[inline] 52 | unsafe fn park(&self) { 53 | while self.futex.load(Ordering::Acquire) != UNPARKED { 54 | self.futex_wait(None); 55 | } 56 | } 57 | 58 | #[inline] 59 | unsafe fn park_until(&self, timeout: Instant) -> bool { 60 | while self.futex.load(Ordering::Acquire) != UNPARKED { 61 | let now = Instant::now(); 62 | if timeout <= now { 63 | return false; 64 | } 65 | let diff = timeout - now; 66 | if diff.as_secs() > i64::max_value() as u64 { 67 | // Timeout overflowed, just sleep indefinitely 68 | self.park(); 69 | return true; 70 | } 71 | let ts = TimeSpec { 72 | tv_sec: diff.as_secs() as i64, 73 | tv_nsec: diff.subsec_nanos() as i32, 74 | }; 75 | self.futex_wait(Some(ts)); 76 | } 77 | true 78 | } 79 | 80 | #[inline] 81 | unsafe fn unpark_lock(&self) -> UnparkHandle { 82 | // We don't need to lock anything, just clear the state 83 | self.futex.store(UNPARKED, Ordering::Release); 84 | 85 | UnparkHandle { futex: self.ptr() } 86 | } 87 | } 88 | 89 | impl ThreadParker { 90 | #[inline] 91 | fn futex_wait(&self, ts: Option) { 92 | let ts_ptr = ts 93 | .as_ref() 94 | .map(|ts_ref| ts_ref as *const _) 95 | .unwrap_or(ptr::null()); 96 | let r = unsafe { 97 | futex( 98 | self.ptr(), 99 | FUTEX_WAIT, 100 | PARKED, 101 | ts_ptr as usize, 102 | ptr::null_mut(), 103 | ) 104 | }; 105 | match r { 106 | Ok(r) => debug_assert_eq!(r, 0), 107 | Err(Error { errno }) => { 108 | debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT); 109 | } 110 | } 111 | } 112 | 113 | #[inline] 114 | fn ptr(&self) -> *mut i32 { 115 | &self.futex as *const AtomicI32 as *mut i32 116 | } 117 | } 118 | 119 | pub struct UnparkHandle { 120 | futex: *mut i32, 121 | } 122 | 123 | impl super::UnparkHandleT for UnparkHandle { 124 | #[inline] 125 | unsafe fn unpark(self) { 126 | // The thread data may have been freed at this point, but it doesn't 127 | // matter since the syscall will just return EFAULT in that case. 128 | let r = futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut()); 129 | match r { 130 | Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1), 131 | Err(Error { errno }) => debug_assert_eq!(errno, EFAULT), 132 | } 133 | } 134 | } 135 | 136 | #[inline] 137 | pub fn thread_yield() { 138 | thread::yield_now(); 139 | } 140 | -------------------------------------------------------------------------------- /core/src/thread_parker/sgx.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::sync::atomic::{AtomicBool, Ordering}; 9 | use std::time::Instant; 10 | use std::{ 11 | io, 12 | os::fortanix_sgx::{ 13 | thread::current as current_tcs, 14 | usercalls::{ 15 | self, 16 | raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE}, 17 | }, 18 | }, 19 | thread, 20 | }; 21 | 22 | // Helper type for putting a thread to sleep until some other thread wakes it up 23 | pub struct ThreadParker { 24 | parked: AtomicBool, 25 | tcs: Tcs, 26 | } 27 | 28 | impl super::ThreadParkerT for ThreadParker { 29 | type UnparkHandle = UnparkHandle; 30 | 31 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 32 | 33 | #[inline] 34 | fn new() -> ThreadParker { 35 | ThreadParker { 36 | parked: AtomicBool::new(false), 37 | tcs: current_tcs(), 38 | } 39 | } 40 | 41 | #[inline] 42 | unsafe fn prepare_park(&self) { 43 | self.parked.store(true, Ordering::Relaxed); 44 | } 45 | 46 | #[inline] 47 | unsafe fn timed_out(&self) -> bool { 48 | self.parked.load(Ordering::Relaxed) 49 | } 50 | 51 | #[inline] 52 | unsafe fn park(&self) { 53 | while self.parked.load(Ordering::Acquire) { 54 | let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE); 55 | debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK); 56 | } 57 | } 58 | 59 | #[inline] 60 | unsafe fn park_until(&self, _timeout: Instant) -> bool { 61 | // FIXME: https://github.com/fortanix/rust-sgx/issues/31 62 | panic!("timeout not supported in SGX"); 63 | } 64 | 65 | #[inline] 66 | unsafe fn unpark_lock(&self) -> UnparkHandle { 67 | // We don't need to lock anything, just clear the state 68 | self.parked.store(false, Ordering::Release); 69 | UnparkHandle(self.tcs) 70 | } 71 | } 72 | 73 | pub struct UnparkHandle(Tcs); 74 | 75 | impl super::UnparkHandleT for UnparkHandle { 76 | #[inline] 77 | unsafe fn unpark(self) { 78 | let result = usercalls::send(EV_UNPARK, Some(self.0)); 79 | if cfg!(debug_assertions) { 80 | if let Err(error) = result { 81 | // `InvalidInput` may be returned if the thread we send to has 82 | // already been unparked and exited. 83 | if error.kind() != io::ErrorKind::InvalidInput { 84 | panic!("send returned an unexpected error: {:?}", error); 85 | } 86 | } 87 | } 88 | } 89 | } 90 | 91 | #[inline] 92 | pub fn thread_yield() { 93 | thread::yield_now(); 94 | } 95 | -------------------------------------------------------------------------------- /core/src/thread_parker/unix.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | #[cfg(target_vendor = "apple")] 9 | use core::ptr; 10 | use core::{ 11 | cell::{Cell, UnsafeCell}, 12 | mem::MaybeUninit, 13 | }; 14 | use libc; 15 | use std::time::Instant; 16 | use std::{thread, time::Duration}; 17 | 18 | // x32 Linux uses a non-standard type for tv_nsec in timespec. 19 | // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 20 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] 21 | #[allow(non_camel_case_types)] 22 | type tv_nsec_t = i64; 23 | #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] 24 | #[allow(non_camel_case_types)] 25 | type tv_nsec_t = libc::c_long; 26 | 27 | // Helper type for putting a thread to sleep until some other thread wakes it up 28 | pub struct ThreadParker { 29 | should_park: Cell, 30 | mutex: UnsafeCell, 31 | condvar: UnsafeCell, 32 | initialized: Cell, 33 | } 34 | 35 | impl super::ThreadParkerT for ThreadParker { 36 | type UnparkHandle = UnparkHandle; 37 | 38 | const IS_CHEAP_TO_CONSTRUCT: bool = false; 39 | 40 | #[inline] 41 | fn new() -> ThreadParker { 42 | ThreadParker { 43 | should_park: Cell::new(false), 44 | mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), 45 | condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER), 46 | initialized: Cell::new(false), 47 | } 48 | } 49 | 50 | #[inline] 51 | unsafe fn prepare_park(&self) { 52 | self.should_park.set(true); 53 | if !self.initialized.get() { 54 | self.init(); 55 | self.initialized.set(true); 56 | } 57 | } 58 | 59 | #[inline] 60 | unsafe fn timed_out(&self) -> bool { 61 | // We need to grab the mutex here because another thread may be 62 | // concurrently executing UnparkHandle::unpark, which is done without 63 | // holding the queue lock. 64 | let r = libc::pthread_mutex_lock(self.mutex.get()); 65 | debug_assert_eq!(r, 0); 66 | let should_park = self.should_park.get(); 67 | let r = libc::pthread_mutex_unlock(self.mutex.get()); 68 | debug_assert_eq!(r, 0); 69 | should_park 70 | } 71 | 72 | #[inline] 73 | unsafe fn park(&self) { 74 | let r = libc::pthread_mutex_lock(self.mutex.get()); 75 | debug_assert_eq!(r, 0); 76 | while self.should_park.get() { 77 | let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); 78 | debug_assert_eq!(r, 0); 79 | } 80 | let r = libc::pthread_mutex_unlock(self.mutex.get()); 81 | debug_assert_eq!(r, 0); 82 | } 83 | 84 | #[inline] 85 | unsafe fn park_until(&self, timeout: Instant) -> bool { 86 | let r = libc::pthread_mutex_lock(self.mutex.get()); 87 | debug_assert_eq!(r, 0); 88 | while self.should_park.get() { 89 | let now = Instant::now(); 90 | if timeout <= now { 91 | let r = libc::pthread_mutex_unlock(self.mutex.get()); 92 | debug_assert_eq!(r, 0); 93 | return false; 94 | } 95 | 96 | if let Some(ts) = timeout_to_timespec(timeout - now) { 97 | let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts); 98 | if ts.tv_sec < 0 { 99 | // On some systems, negative timeouts will return EINVAL. In 100 | // that case we won't sleep and will just busy loop instead, 101 | // which is the best we can do. 102 | debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL); 103 | } else { 104 | debug_assert!(r == 0 || r == libc::ETIMEDOUT); 105 | } 106 | } else { 107 | // Timeout calculation overflowed, just sleep indefinitely 108 | let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); 109 | debug_assert_eq!(r, 0); 110 | } 111 | } 112 | let r = libc::pthread_mutex_unlock(self.mutex.get()); 113 | debug_assert_eq!(r, 0); 114 | true 115 | } 116 | 117 | #[inline] 118 | unsafe fn unpark_lock(&self) -> UnparkHandle { 119 | let r = libc::pthread_mutex_lock(self.mutex.get()); 120 | debug_assert_eq!(r, 0); 121 | 122 | UnparkHandle { 123 | thread_parker: self, 124 | } 125 | } 126 | } 127 | 128 | impl ThreadParker { 129 | /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. 130 | #[cfg(any(target_vendor = "apple", target_os = "android", target_os = "espidf"))] 131 | #[inline] 132 | unsafe fn init(&self) {} 133 | 134 | /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. 135 | #[cfg(not(any(target_vendor = "apple", target_os = "android", target_os = "espidf")))] 136 | #[inline] 137 | unsafe fn init(&self) { 138 | let mut attr = MaybeUninit::::uninit(); 139 | let r = libc::pthread_condattr_init(attr.as_mut_ptr()); 140 | debug_assert_eq!(r, 0); 141 | let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); 142 | debug_assert_eq!(r, 0); 143 | let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr()); 144 | debug_assert_eq!(r, 0); 145 | let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); 146 | debug_assert_eq!(r, 0); 147 | } 148 | } 149 | 150 | impl Drop for ThreadParker { 151 | #[inline] 152 | fn drop(&mut self) { 153 | // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a 154 | // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. 155 | // Once it is used (locked/unlocked) or pthread_mutex_init() is called, 156 | // this behaviour no longer occurs. The same applies to condvars. 157 | unsafe { 158 | let r = libc::pthread_mutex_destroy(self.mutex.get()); 159 | debug_assert!(r == 0 || r == libc::EINVAL); 160 | let r = libc::pthread_cond_destroy(self.condvar.get()); 161 | debug_assert!(r == 0 || r == libc::EINVAL); 162 | } 163 | } 164 | } 165 | 166 | pub struct UnparkHandle { 167 | thread_parker: *const ThreadParker, 168 | } 169 | 170 | impl super::UnparkHandleT for UnparkHandle { 171 | #[inline] 172 | unsafe fn unpark(self) { 173 | (*self.thread_parker).should_park.set(false); 174 | 175 | // We notify while holding the lock here to avoid races with the target 176 | // thread. In particular, the thread could exit after we unlock the 177 | // mutex, which would make the condvar access invalid memory. 178 | let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get()); 179 | debug_assert_eq!(r, 0); 180 | let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get()); 181 | debug_assert_eq!(r, 0); 182 | } 183 | } 184 | 185 | // Returns the current time on the clock used by pthread_cond_t as a timespec. 186 | #[cfg(target_vendor = "apple")] 187 | #[inline] 188 | fn timespec_now() -> libc::timespec { 189 | let mut now = MaybeUninit::::uninit(); 190 | let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) }; 191 | debug_assert_eq!(r, 0); 192 | // SAFETY: We know `libc::gettimeofday` has initialized the value. 193 | let now = unsafe { now.assume_init() }; 194 | libc::timespec { 195 | tv_sec: now.tv_sec, 196 | tv_nsec: now.tv_usec as tv_nsec_t * 1000, 197 | } 198 | } 199 | #[cfg(not(target_vendor = "apple"))] 200 | #[inline] 201 | fn timespec_now() -> libc::timespec { 202 | let mut now = MaybeUninit::::uninit(); 203 | let clock = if cfg!(target_os = "android") { 204 | // Android doesn't support pthread_condattr_setclock, so we need to 205 | // specify the timeout in CLOCK_REALTIME. 206 | libc::CLOCK_REALTIME 207 | } else { 208 | libc::CLOCK_MONOTONIC 209 | }; 210 | let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) }; 211 | debug_assert_eq!(r, 0); 212 | // SAFETY: We know `libc::clock_gettime` has initialized the value. 213 | unsafe { now.assume_init() } 214 | } 215 | 216 | // Converts a relative timeout into an absolute timeout in the clock used by 217 | // pthread_cond_t. 218 | #[inline] 219 | fn timeout_to_timespec(timeout: Duration) -> Option { 220 | // Handle overflows early on 221 | if timeout.as_secs() > libc::time_t::max_value() as u64 { 222 | return None; 223 | } 224 | 225 | let now = timespec_now(); 226 | let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t; 227 | let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t); 228 | if nsec >= 1_000_000_000 { 229 | nsec -= 1_000_000_000; 230 | sec = sec.and_then(|sec| sec.checked_add(1)); 231 | } 232 | 233 | sec.map(|sec| libc::timespec { 234 | tv_nsec: nsec, 235 | tv_sec: sec, 236 | }) 237 | } 238 | 239 | #[inline] 240 | pub fn thread_yield() { 241 | thread::yield_now(); 242 | } 243 | -------------------------------------------------------------------------------- /core/src/thread_parker/wasm.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! The wasm platform can't park when atomic support is not available. 9 | //! So this ThreadParker just panics on any attempt to park. 10 | 11 | use std::thread; 12 | use std::time::Instant; 13 | 14 | pub struct ThreadParker(()); 15 | 16 | impl super::ThreadParkerT for ThreadParker { 17 | type UnparkHandle = UnparkHandle; 18 | 19 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 20 | 21 | fn new() -> ThreadParker { 22 | ThreadParker(()) 23 | } 24 | 25 | unsafe fn prepare_park(&self) { 26 | panic!("Parking not supported on this platform"); 27 | } 28 | 29 | unsafe fn timed_out(&self) -> bool { 30 | panic!("Parking not supported on this platform"); 31 | } 32 | 33 | unsafe fn park(&self) { 34 | panic!("Parking not supported on this platform"); 35 | } 36 | 37 | unsafe fn park_until(&self, _timeout: Instant) -> bool { 38 | panic!("Parking not supported on this platform"); 39 | } 40 | 41 | unsafe fn unpark_lock(&self) -> UnparkHandle { 42 | panic!("Parking not supported on this platform"); 43 | } 44 | } 45 | 46 | pub struct UnparkHandle(()); 47 | 48 | impl super::UnparkHandleT for UnparkHandle { 49 | unsafe fn unpark(self) {} 50 | } 51 | 52 | pub fn thread_yield() { 53 | thread::yield_now(); 54 | } 55 | -------------------------------------------------------------------------------- /core/src/thread_parker/wasm_atomic.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | arch::wasm32, 10 | sync::atomic::{AtomicI32, Ordering}, 11 | }; 12 | use std::time::{Duration, Instant}; 13 | use std::{convert::TryFrom, thread}; 14 | 15 | // Helper type for putting a thread to sleep until some other thread wakes it up 16 | pub struct ThreadParker { 17 | parked: AtomicI32, 18 | } 19 | 20 | const UNPARKED: i32 = 0; 21 | const PARKED: i32 = 1; 22 | 23 | impl super::ThreadParkerT for ThreadParker { 24 | type UnparkHandle = UnparkHandle; 25 | 26 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 27 | 28 | #[inline] 29 | fn new() -> ThreadParker { 30 | ThreadParker { 31 | parked: AtomicI32::new(UNPARKED), 32 | } 33 | } 34 | 35 | #[inline] 36 | unsafe fn prepare_park(&self) { 37 | self.parked.store(PARKED, Ordering::Relaxed); 38 | } 39 | 40 | #[inline] 41 | unsafe fn timed_out(&self) -> bool { 42 | self.parked.load(Ordering::Relaxed) == PARKED 43 | } 44 | 45 | #[inline] 46 | unsafe fn park(&self) { 47 | while self.parked.load(Ordering::Acquire) == PARKED { 48 | let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, -1); 49 | // we should have either woken up (0) or got a not-equal due to a 50 | // race (1). We should never time out (2) 51 | debug_assert!(r == 0 || r == 1); 52 | } 53 | } 54 | 55 | #[inline] 56 | unsafe fn park_until(&self, timeout: Instant) -> bool { 57 | while self.parked.load(Ordering::Acquire) == PARKED { 58 | if let Some(left) = timeout.checked_duration_since(Instant::now()) { 59 | let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value()); 60 | let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, nanos_left); 61 | debug_assert!(r == 0 || r == 1 || r == 2); 62 | } else { 63 | return false; 64 | } 65 | } 66 | true 67 | } 68 | 69 | #[inline] 70 | unsafe fn unpark_lock(&self) -> UnparkHandle { 71 | // We don't need to lock anything, just clear the state 72 | self.parked.store(UNPARKED, Ordering::Release); 73 | UnparkHandle(self.ptr()) 74 | } 75 | } 76 | 77 | impl ThreadParker { 78 | #[inline] 79 | fn ptr(&self) -> *mut i32 { 80 | &self.parked as *const AtomicI32 as *mut i32 81 | } 82 | } 83 | 84 | pub struct UnparkHandle(*mut i32); 85 | 86 | impl super::UnparkHandleT for UnparkHandle { 87 | #[inline] 88 | unsafe fn unpark(self) { 89 | let num_notified = wasm32::memory_atomic_notify(self.0 as *mut i32, 1); 90 | debug_assert!(num_notified == 0 || num_notified == 1); 91 | } 92 | } 93 | 94 | #[inline] 95 | pub fn thread_yield() { 96 | thread::yield_now(); 97 | } 98 | -------------------------------------------------------------------------------- /core/src/thread_parker/windows/bindings.rs: -------------------------------------------------------------------------------- 1 | //! Manual bindings to the win32 API to avoid dependencies on windows-sys or winapi 2 | //! as these bindings will **never** change and `parking_lot_core` is a foundational 3 | //! dependency for the Rust ecosystem, so the dependencies used by it have an 4 | //! outsize affect 5 | 6 | pub const INFINITE: u32 = 4294967295; 7 | pub const ERROR_TIMEOUT: u32 = 1460; 8 | pub const GENERIC_READ: u32 = 2147483648; 9 | pub const GENERIC_WRITE: u32 = 1073741824; 10 | pub const STATUS_SUCCESS: i32 = 0; 11 | pub const STATUS_TIMEOUT: i32 = 258; 12 | 13 | pub type HANDLE = isize; 14 | pub type HINSTANCE = isize; 15 | pub type BOOL = i32; 16 | pub type BOOLEAN = u8; 17 | pub type NTSTATUS = i32; 18 | pub type FARPROC = Option isize>; 19 | pub type WaitOnAddress = unsafe extern "system" fn( 20 | Address: *const std::ffi::c_void, 21 | CompareAddress: *const std::ffi::c_void, 22 | AddressSize: usize, 23 | dwMilliseconds: u32, 24 | ) -> BOOL; 25 | pub type WakeByAddressSingle = unsafe extern "system" fn(Address: *const std::ffi::c_void); 26 | 27 | windows_targets::link!("kernel32.dll" "system" fn GetLastError() -> u32); 28 | windows_targets::link!("kernel32.dll" "system" fn CloseHandle(hObject: HANDLE) -> BOOL); 29 | windows_targets::link!("kernel32.dll" "system" fn GetModuleHandleA(lpModuleName: *const u8) -> HINSTANCE); 30 | windows_targets::link!("kernel32.dll" "system" fn GetProcAddress(hModule: HINSTANCE, lpProcName: *const u8) -> FARPROC); 31 | windows_targets::link!("kernel32.dll" "system" fn Sleep(dwMilliseconds: u32) -> ()); 32 | -------------------------------------------------------------------------------- /core/src/thread_parker/windows/keyed_event.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | ffi, 10 | mem::{self, MaybeUninit}, 11 | ptr, 12 | }; 13 | use std::sync::atomic::{AtomicUsize, Ordering}; 14 | use std::time::Instant; 15 | 16 | const STATE_UNPARKED: usize = 0; 17 | const STATE_PARKED: usize = 1; 18 | const STATE_TIMED_OUT: usize = 2; 19 | 20 | use super::bindings::*; 21 | 22 | #[allow(non_snake_case)] 23 | pub struct KeyedEvent { 24 | handle: HANDLE, 25 | NtReleaseKeyedEvent: extern "system" fn( 26 | EventHandle: HANDLE, 27 | Key: *mut ffi::c_void, 28 | Alertable: BOOLEAN, 29 | Timeout: *mut i64, 30 | ) -> NTSTATUS, 31 | NtWaitForKeyedEvent: extern "system" fn( 32 | EventHandle: HANDLE, 33 | Key: *mut ffi::c_void, 34 | Alertable: BOOLEAN, 35 | Timeout: *mut i64, 36 | ) -> NTSTATUS, 37 | } 38 | 39 | impl KeyedEvent { 40 | #[inline] 41 | unsafe fn wait_for(&self, key: *mut ffi::c_void, timeout: *mut i64) -> NTSTATUS { 42 | (self.NtWaitForKeyedEvent)(self.handle, key, false.into(), timeout) 43 | } 44 | 45 | #[inline] 46 | unsafe fn release(&self, key: *mut ffi::c_void) -> NTSTATUS { 47 | (self.NtReleaseKeyedEvent)(self.handle, key, false.into(), ptr::null_mut()) 48 | } 49 | 50 | #[allow(non_snake_case)] 51 | pub fn create() -> Option { 52 | let ntdll = unsafe { GetModuleHandleA(b"ntdll.dll\0".as_ptr()) }; 53 | if ntdll == 0 { 54 | return None; 55 | } 56 | 57 | let NtCreateKeyedEvent = 58 | unsafe { GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr())? }; 59 | let NtReleaseKeyedEvent = 60 | unsafe { GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr())? }; 61 | let NtWaitForKeyedEvent = 62 | unsafe { GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr())? }; 63 | 64 | let NtCreateKeyedEvent: extern "system" fn( 65 | KeyedEventHandle: *mut HANDLE, 66 | DesiredAccess: u32, 67 | ObjectAttributes: *mut ffi::c_void, 68 | Flags: u32, 69 | ) -> NTSTATUS = unsafe { mem::transmute(NtCreateKeyedEvent) }; 70 | let mut handle = MaybeUninit::uninit(); 71 | let status = NtCreateKeyedEvent( 72 | handle.as_mut_ptr(), 73 | GENERIC_READ | GENERIC_WRITE, 74 | ptr::null_mut(), 75 | 0, 76 | ); 77 | if status != STATUS_SUCCESS { 78 | return None; 79 | } 80 | 81 | Some(KeyedEvent { 82 | handle: unsafe { handle.assume_init() }, 83 | NtReleaseKeyedEvent: unsafe { mem::transmute(NtReleaseKeyedEvent) }, 84 | NtWaitForKeyedEvent: unsafe { mem::transmute(NtWaitForKeyedEvent) }, 85 | }) 86 | } 87 | 88 | #[inline] 89 | pub fn prepare_park(&'static self, key: &AtomicUsize) { 90 | key.store(STATE_PARKED, Ordering::Relaxed); 91 | } 92 | 93 | #[inline] 94 | pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { 95 | key.load(Ordering::Relaxed) == STATE_TIMED_OUT 96 | } 97 | 98 | #[inline] 99 | pub unsafe fn park(&'static self, key: &AtomicUsize) { 100 | let status = self.wait_for(key as *const _ as *mut ffi::c_void, ptr::null_mut()); 101 | debug_assert_eq!(status, STATUS_SUCCESS); 102 | } 103 | 104 | #[inline] 105 | pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { 106 | let now = Instant::now(); 107 | if timeout <= now { 108 | // If another thread unparked us, we need to call 109 | // NtWaitForKeyedEvent otherwise that thread will stay stuck at 110 | // NtReleaseKeyedEvent. 111 | if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { 112 | self.park(key); 113 | return true; 114 | } 115 | return false; 116 | } 117 | 118 | // NT uses a timeout in units of 100ns. We use a negative value to 119 | // indicate a relative timeout based on a monotonic clock. 120 | let diff = timeout - now; 121 | let value = (diff.as_secs() as i64) 122 | .checked_mul(-10000000) 123 | .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100)); 124 | 125 | let mut nt_timeout = match value { 126 | Some(x) => x, 127 | None => { 128 | // Timeout overflowed, just sleep indefinitely 129 | self.park(key); 130 | return true; 131 | } 132 | }; 133 | 134 | let status = self.wait_for(key as *const _ as *mut ffi::c_void, &mut nt_timeout); 135 | if status == STATUS_SUCCESS { 136 | return true; 137 | } 138 | debug_assert_eq!(status, STATUS_TIMEOUT); 139 | 140 | // If another thread unparked us, we need to call NtWaitForKeyedEvent 141 | // otherwise that thread will stay stuck at NtReleaseKeyedEvent. 142 | if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { 143 | self.park(key); 144 | return true; 145 | } 146 | false 147 | } 148 | 149 | #[inline] 150 | pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { 151 | // If the state was STATE_PARKED then we need to wake up the thread 152 | if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED { 153 | UnparkHandle { 154 | key: key, 155 | keyed_event: self, 156 | } 157 | } else { 158 | UnparkHandle { 159 | key: ptr::null(), 160 | keyed_event: self, 161 | } 162 | } 163 | } 164 | } 165 | 166 | impl Drop for KeyedEvent { 167 | #[inline] 168 | fn drop(&mut self) { 169 | unsafe { 170 | let ok = CloseHandle(self.handle); 171 | debug_assert_eq!(ok, true.into()); 172 | } 173 | } 174 | } 175 | 176 | // Handle for a thread that is about to be unparked. We need to mark the thread 177 | // as unparked while holding the queue lock, but we delay the actual unparking 178 | // until after the queue lock is released. 179 | pub struct UnparkHandle { 180 | key: *const AtomicUsize, 181 | keyed_event: &'static KeyedEvent, 182 | } 183 | 184 | impl UnparkHandle { 185 | // Wakes up the parked thread. This should be called after the queue lock is 186 | // released to avoid blocking the queue for too long. 187 | #[inline] 188 | pub unsafe fn unpark(self) { 189 | if !self.key.is_null() { 190 | let status = self.keyed_event.release(self.key as *mut ffi::c_void); 191 | debug_assert_eq!(status, STATUS_SUCCESS); 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /core/src/thread_parker/windows/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | ptr, 10 | sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, 11 | }; 12 | use std::time::Instant; 13 | 14 | mod bindings; 15 | mod keyed_event; 16 | mod waitaddress; 17 | 18 | enum Backend { 19 | KeyedEvent(keyed_event::KeyedEvent), 20 | WaitAddress(waitaddress::WaitAddress), 21 | } 22 | 23 | static BACKEND: AtomicPtr = AtomicPtr::new(ptr::null_mut()); 24 | 25 | impl Backend { 26 | #[inline] 27 | fn get() -> &'static Backend { 28 | // Fast path: use the existing object 29 | let backend_ptr = BACKEND.load(Ordering::Acquire); 30 | if !backend_ptr.is_null() { 31 | return unsafe { &*backend_ptr }; 32 | }; 33 | 34 | Backend::create() 35 | } 36 | 37 | #[cold] 38 | fn create() -> &'static Backend { 39 | // Try to create a new Backend 40 | let backend; 41 | if let Some(waitaddress) = waitaddress::WaitAddress::create() { 42 | backend = Backend::WaitAddress(waitaddress); 43 | } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() { 44 | backend = Backend::KeyedEvent(keyed_event); 45 | } else { 46 | panic!( 47 | "parking_lot requires either NT Keyed Events (WinXP+) or \ 48 | WaitOnAddress/WakeByAddress (Win8+)" 49 | ); 50 | } 51 | 52 | // Try to set our new Backend as the global one 53 | let backend_ptr = Box::into_raw(Box::new(backend)); 54 | match BACKEND.compare_exchange( 55 | ptr::null_mut(), 56 | backend_ptr, 57 | Ordering::Release, 58 | Ordering::Acquire, 59 | ) { 60 | Ok(_) => unsafe { &*backend_ptr }, 61 | Err(global_backend_ptr) => { 62 | unsafe { 63 | // We lost the race, free our object and return the global one 64 | let _ = Box::from_raw(backend_ptr); 65 | &*global_backend_ptr 66 | } 67 | } 68 | } 69 | } 70 | } 71 | 72 | // Helper type for putting a thread to sleep until some other thread wakes it up 73 | pub struct ThreadParker { 74 | key: AtomicUsize, 75 | backend: &'static Backend, 76 | } 77 | 78 | impl super::ThreadParkerT for ThreadParker { 79 | type UnparkHandle = UnparkHandle; 80 | 81 | const IS_CHEAP_TO_CONSTRUCT: bool = true; 82 | 83 | #[inline] 84 | fn new() -> ThreadParker { 85 | // Initialize the backend here to ensure we don't get any panics 86 | // later on, which could leave synchronization primitives in a broken 87 | // state. 88 | ThreadParker { 89 | key: AtomicUsize::new(0), 90 | backend: Backend::get(), 91 | } 92 | } 93 | 94 | // Prepares the parker. This should be called before adding it to the queue. 95 | #[inline] 96 | unsafe fn prepare_park(&self) { 97 | match *self.backend { 98 | Backend::KeyedEvent(ref x) => x.prepare_park(&self.key), 99 | Backend::WaitAddress(ref x) => x.prepare_park(&self.key), 100 | } 101 | } 102 | 103 | // Checks if the park timed out. This should be called while holding the 104 | // queue lock after park_until has returned false. 105 | #[inline] 106 | unsafe fn timed_out(&self) -> bool { 107 | match *self.backend { 108 | Backend::KeyedEvent(ref x) => x.timed_out(&self.key), 109 | Backend::WaitAddress(ref x) => x.timed_out(&self.key), 110 | } 111 | } 112 | 113 | // Parks the thread until it is unparked. This should be called after it has 114 | // been added to the queue, after unlocking the queue. 115 | #[inline] 116 | unsafe fn park(&self) { 117 | match *self.backend { 118 | Backend::KeyedEvent(ref x) => x.park(&self.key), 119 | Backend::WaitAddress(ref x) => x.park(&self.key), 120 | } 121 | } 122 | 123 | // Parks the thread until it is unparked or the timeout is reached. This 124 | // should be called after it has been added to the queue, after unlocking 125 | // the queue. Returns true if we were unparked and false if we timed out. 126 | #[inline] 127 | unsafe fn park_until(&self, timeout: Instant) -> bool { 128 | match *self.backend { 129 | Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout), 130 | Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout), 131 | } 132 | } 133 | 134 | // Locks the parker to prevent the target thread from exiting. This is 135 | // necessary to ensure that thread-local ThreadData objects remain valid. 136 | // This should be called while holding the queue lock. 137 | #[inline] 138 | unsafe fn unpark_lock(&self) -> UnparkHandle { 139 | match *self.backend { 140 | Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)), 141 | Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)), 142 | } 143 | } 144 | } 145 | 146 | // Handle for a thread that is about to be unparked. We need to mark the thread 147 | // as unparked while holding the queue lock, but we delay the actual unparking 148 | // until after the queue lock is released. 149 | pub enum UnparkHandle { 150 | KeyedEvent(keyed_event::UnparkHandle), 151 | WaitAddress(waitaddress::UnparkHandle), 152 | } 153 | 154 | impl super::UnparkHandleT for UnparkHandle { 155 | // Wakes up the parked thread. This should be called after the queue lock is 156 | // released to avoid blocking the queue for too long. 157 | #[inline] 158 | unsafe fn unpark(self) { 159 | match self { 160 | UnparkHandle::KeyedEvent(x) => x.unpark(), 161 | UnparkHandle::WaitAddress(x) => x.unpark(), 162 | } 163 | } 164 | } 165 | 166 | // Yields the rest of the current timeslice to the OS 167 | #[inline] 168 | pub fn thread_yield() { 169 | unsafe { 170 | // We don't use SwitchToThread here because it doesn't consider all 171 | // threads in the system and the thread we are waiting for may not get 172 | // selected. 173 | bindings::Sleep(0); 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /core/src/thread_parker/windows/waitaddress.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use core::{ 9 | mem, 10 | sync::atomic::{AtomicUsize, Ordering}, 11 | }; 12 | use std::{ffi, time::Instant}; 13 | 14 | use super::bindings::*; 15 | 16 | #[allow(non_snake_case)] 17 | pub struct WaitAddress { 18 | WaitOnAddress: WaitOnAddress, 19 | WakeByAddressSingle: WakeByAddressSingle, 20 | } 21 | 22 | impl WaitAddress { 23 | #[allow(non_snake_case)] 24 | pub fn create() -> Option { 25 | let synch_dll = unsafe { GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr()) }; 26 | if synch_dll == 0 { 27 | return None; 28 | } 29 | 30 | let WaitOnAddress = unsafe { GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr())? }; 31 | let WakeByAddressSingle = 32 | unsafe { GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr())? }; 33 | 34 | Some(WaitAddress { 35 | WaitOnAddress: unsafe { mem::transmute(WaitOnAddress) }, 36 | WakeByAddressSingle: unsafe { mem::transmute(WakeByAddressSingle) }, 37 | }) 38 | } 39 | 40 | #[inline] 41 | pub fn prepare_park(&'static self, key: &AtomicUsize) { 42 | key.store(1, Ordering::Relaxed); 43 | } 44 | 45 | #[inline] 46 | pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { 47 | key.load(Ordering::Relaxed) != 0 48 | } 49 | 50 | #[inline] 51 | pub fn park(&'static self, key: &AtomicUsize) { 52 | while key.load(Ordering::Acquire) != 0 { 53 | let r = self.wait_on_address(key, INFINITE); 54 | debug_assert!(r == true.into()); 55 | } 56 | } 57 | 58 | #[inline] 59 | pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { 60 | while key.load(Ordering::Acquire) != 0 { 61 | let now = Instant::now(); 62 | if timeout <= now { 63 | return false; 64 | } 65 | let diff = timeout - now; 66 | let timeout = diff 67 | .as_secs() 68 | .checked_mul(1000) 69 | .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000)) 70 | .map(|ms| { 71 | if ms > std::u32::MAX as u64 { 72 | INFINITE 73 | } else { 74 | ms as u32 75 | } 76 | }) 77 | .unwrap_or(INFINITE); 78 | if self.wait_on_address(key, timeout) == false.into() { 79 | debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT); 80 | } 81 | } 82 | true 83 | } 84 | 85 | #[inline] 86 | pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { 87 | // We don't need to lock anything, just clear the state 88 | key.store(0, Ordering::Release); 89 | 90 | UnparkHandle { 91 | key: key, 92 | waitaddress: self, 93 | } 94 | } 95 | 96 | #[inline] 97 | fn wait_on_address(&'static self, key: &AtomicUsize, timeout: u32) -> BOOL { 98 | let cmp = 1usize; 99 | unsafe { 100 | (self.WaitOnAddress)( 101 | key as *const _ as *mut ffi::c_void, 102 | &cmp as *const _ as *mut ffi::c_void, 103 | mem::size_of::(), 104 | timeout, 105 | ) 106 | } 107 | } 108 | } 109 | 110 | // Handle for a thread that is about to be unparked. We need to mark the thread 111 | // as unparked while holding the queue lock, but we delay the actual unparking 112 | // until after the queue lock is released. 113 | pub struct UnparkHandle { 114 | key: *const AtomicUsize, 115 | waitaddress: &'static WaitAddress, 116 | } 117 | 118 | impl UnparkHandle { 119 | // Wakes up the parked thread. This should be called after the queue lock is 120 | // released to avoid blocking the queue for too long. 121 | #[inline] 122 | pub fn unpark(self) { 123 | unsafe { (self.waitaddress.WakeByAddressSingle)(self.key as *mut ffi::c_void) }; 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /core/src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | // Option::unchecked_unwrap 9 | pub trait UncheckedOptionExt { 10 | unsafe fn unchecked_unwrap(self) -> T; 11 | } 12 | 13 | impl UncheckedOptionExt for Option { 14 | #[inline] 15 | unsafe fn unchecked_unwrap(self) -> T { 16 | match self { 17 | Some(x) => x, 18 | None => unreachable(), 19 | } 20 | } 21 | } 22 | 23 | // hint::unreachable_unchecked() in release mode 24 | #[inline] 25 | unsafe fn unreachable() -> ! { 26 | if cfg!(debug_assertions) { 27 | unreachable!(); 28 | } else { 29 | core::hint::unreachable_unchecked() 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /core/src/word_lock.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::spinwait::SpinWait; 9 | use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT}; 10 | use core::{ 11 | cell::Cell, 12 | mem, ptr, 13 | sync::atomic::{fence, AtomicUsize, Ordering}, 14 | }; 15 | 16 | struct ThreadData { 17 | parker: ThreadParker, 18 | 19 | // Linked list of threads in the queue. The queue is split into two parts: 20 | // the processed part and the unprocessed part. When new nodes are added to 21 | // the list, they only have the next pointer set, and queue_tail is null. 22 | // 23 | // Nodes are processed with the queue lock held, which consists of setting 24 | // the prev pointer for each node and setting the queue_tail pointer on the 25 | // first processed node of the list. 26 | // 27 | // This setup allows nodes to be added to the queue without a lock, while 28 | // still allowing O(1) removal of nodes from the processed part of the list. 29 | // The only cost is the O(n) processing, but this only needs to be done 30 | // once for each node, and therefore isn't too expensive. 31 | queue_tail: Cell<*const ThreadData>, 32 | prev: Cell<*const ThreadData>, 33 | next: Cell<*const ThreadData>, 34 | } 35 | 36 | impl ThreadData { 37 | #[inline] 38 | fn new() -> ThreadData { 39 | assert!(mem::align_of::() > !QUEUE_MASK); 40 | ThreadData { 41 | parker: ThreadParker::new(), 42 | queue_tail: Cell::new(ptr::null()), 43 | prev: Cell::new(ptr::null()), 44 | next: Cell::new(ptr::null()), 45 | } 46 | } 47 | } 48 | 49 | // Invokes the given closure with a reference to the current thread `ThreadData`. 50 | #[inline] 51 | fn with_thread_data(f: impl FnOnce(&ThreadData) -> T) -> T { 52 | let mut thread_data_ptr = ptr::null(); 53 | // If ThreadData is expensive to construct, then we want to use a cached 54 | // version in thread-local storage if possible. 55 | if !ThreadParker::IS_CHEAP_TO_CONSTRUCT { 56 | thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); 57 | if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) { 58 | thread_data_ptr = tls_thread_data; 59 | } 60 | } 61 | // Otherwise just create a ThreadData on the stack 62 | let mut thread_data_storage = None; 63 | if thread_data_ptr.is_null() { 64 | thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new); 65 | } 66 | 67 | f(unsafe { &*thread_data_ptr }) 68 | } 69 | 70 | const LOCKED_BIT: usize = 1; 71 | const QUEUE_LOCKED_BIT: usize = 2; 72 | const QUEUE_MASK: usize = !3; 73 | 74 | // Word-sized lock that is used to implement the parking_lot API. Since this 75 | // can't use parking_lot, it instead manages its own queue of waiting threads. 76 | pub struct WordLock { 77 | state: AtomicUsize, 78 | } 79 | 80 | impl WordLock { 81 | /// Returns a new, unlocked, `WordLock`. 82 | pub const fn new() -> Self { 83 | WordLock { 84 | state: AtomicUsize::new(0), 85 | } 86 | } 87 | 88 | #[inline] 89 | pub fn lock(&self) { 90 | if self 91 | .state 92 | .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) 93 | .is_ok() 94 | { 95 | return; 96 | } 97 | self.lock_slow(); 98 | } 99 | 100 | /// Must not be called on an already unlocked `WordLock`! 101 | #[inline] 102 | pub unsafe fn unlock(&self) { 103 | let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release); 104 | if state.is_queue_locked() || state.queue_head().is_null() { 105 | return; 106 | } 107 | self.unlock_slow(); 108 | } 109 | 110 | #[cold] 111 | fn lock_slow(&self) { 112 | let mut spinwait = SpinWait::new(); 113 | let mut state = self.state.load(Ordering::Relaxed); 114 | loop { 115 | // Grab the lock if it isn't locked, even if there is a queue on it 116 | if !state.is_locked() { 117 | match self.state.compare_exchange_weak( 118 | state, 119 | state | LOCKED_BIT, 120 | Ordering::Acquire, 121 | Ordering::Relaxed, 122 | ) { 123 | Ok(_) => return, 124 | Err(x) => state = x, 125 | } 126 | continue; 127 | } 128 | 129 | // If there is no queue, try spinning a few times 130 | if state.queue_head().is_null() && spinwait.spin() { 131 | state = self.state.load(Ordering::Relaxed); 132 | continue; 133 | } 134 | 135 | // Get our thread data and prepare it for parking 136 | state = with_thread_data(|thread_data| { 137 | // The pthread implementation is still unsafe, so we need to surround `prepare_park` 138 | // with `unsafe {}`. 139 | #[allow(unused_unsafe)] 140 | unsafe { 141 | thread_data.parker.prepare_park(); 142 | } 143 | 144 | // Add our thread to the front of the queue 145 | let queue_head = state.queue_head(); 146 | if queue_head.is_null() { 147 | thread_data.queue_tail.set(thread_data); 148 | thread_data.prev.set(ptr::null()); 149 | } else { 150 | thread_data.queue_tail.set(ptr::null()); 151 | thread_data.prev.set(ptr::null()); 152 | thread_data.next.set(queue_head); 153 | } 154 | if let Err(x) = self.state.compare_exchange_weak( 155 | state, 156 | state.with_queue_head(thread_data), 157 | Ordering::AcqRel, 158 | Ordering::Relaxed, 159 | ) { 160 | return x; 161 | } 162 | 163 | // Sleep until we are woken up by an unlock 164 | // Ignoring unused unsafe, since it's only a few platforms where this is unsafe. 165 | #[allow(unused_unsafe)] 166 | unsafe { 167 | thread_data.parker.park(); 168 | } 169 | 170 | // Loop back and try locking again 171 | spinwait.reset(); 172 | self.state.load(Ordering::Relaxed) 173 | }); 174 | } 175 | } 176 | 177 | #[cold] 178 | fn unlock_slow(&self) { 179 | let mut state = self.state.load(Ordering::Relaxed); 180 | loop { 181 | // We just unlocked the WordLock. Just check if there is a thread 182 | // to wake up. If the queue is locked then another thread is already 183 | // taking care of waking up a thread. 184 | if state.is_queue_locked() || state.queue_head().is_null() { 185 | return; 186 | } 187 | 188 | // Try to grab the queue lock 189 | match self.state.compare_exchange_weak( 190 | state, 191 | state | QUEUE_LOCKED_BIT, 192 | Ordering::Acquire, 193 | Ordering::Relaxed, 194 | ) { 195 | Ok(_) => break, 196 | Err(x) => state = x, 197 | } 198 | } 199 | 200 | // Now we have the queue lock and the queue is non-empty 201 | 'outer: loop { 202 | // First, we need to fill in the prev pointers for any newly added 203 | // threads. We do this until we reach a node that we previously 204 | // processed, which has a non-null queue_tail pointer. 205 | let queue_head = state.queue_head(); 206 | let mut queue_tail; 207 | let mut current = queue_head; 208 | loop { 209 | queue_tail = unsafe { (*current).queue_tail.get() }; 210 | if !queue_tail.is_null() { 211 | break; 212 | } 213 | unsafe { 214 | let next = (*current).next.get(); 215 | (*next).prev.set(current); 216 | current = next; 217 | } 218 | } 219 | 220 | // Set queue_tail on the queue head to indicate that the whole list 221 | // has prev pointers set correctly. 222 | unsafe { 223 | (*queue_head).queue_tail.set(queue_tail); 224 | } 225 | 226 | // If the WordLock is locked, then there is no point waking up a 227 | // thread now. Instead we let the next unlocker take care of waking 228 | // up a thread. 229 | if state.is_locked() { 230 | match self.state.compare_exchange_weak( 231 | state, 232 | state & !QUEUE_LOCKED_BIT, 233 | Ordering::Release, 234 | Ordering::Relaxed, 235 | ) { 236 | Ok(_) => return, 237 | Err(x) => state = x, 238 | } 239 | 240 | // Need an acquire fence before reading the new queue 241 | fence_acquire(&self.state); 242 | continue; 243 | } 244 | 245 | // Remove the last thread from the queue and unlock the queue 246 | let new_tail = unsafe { (*queue_tail).prev.get() }; 247 | if new_tail.is_null() { 248 | loop { 249 | match self.state.compare_exchange_weak( 250 | state, 251 | state & LOCKED_BIT, 252 | Ordering::Release, 253 | Ordering::Relaxed, 254 | ) { 255 | Ok(_) => break, 256 | Err(x) => state = x, 257 | } 258 | 259 | // If the compare_exchange failed because a new thread was 260 | // added to the queue then we need to re-scan the queue to 261 | // find the previous element. 262 | if state.queue_head().is_null() { 263 | continue; 264 | } else { 265 | // Need an acquire fence before reading the new queue 266 | fence_acquire(&self.state); 267 | continue 'outer; 268 | } 269 | } 270 | } else { 271 | unsafe { 272 | (*queue_head).queue_tail.set(new_tail); 273 | } 274 | self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release); 275 | } 276 | 277 | // Finally, wake up the thread we removed from the queue. Note that 278 | // we don't need to worry about any races here since the thread is 279 | // guaranteed to be sleeping right now and we are the only one who 280 | // can wake it up. 281 | unsafe { 282 | (*queue_tail).parker.unpark_lock().unpark(); 283 | } 284 | break; 285 | } 286 | } 287 | } 288 | 289 | // Thread-Sanitizer only has partial fence support, so when running under it, we 290 | // try and avoid false positives by using a discarded acquire load instead. 291 | #[inline] 292 | fn fence_acquire(a: &AtomicUsize) { 293 | if cfg!(tsan_enabled) { 294 | let _ = a.load(Ordering::Acquire); 295 | } else { 296 | fence(Ordering::Acquire); 297 | } 298 | } 299 | 300 | trait LockState { 301 | fn is_locked(self) -> bool; 302 | fn is_queue_locked(self) -> bool; 303 | fn queue_head(self) -> *const ThreadData; 304 | fn with_queue_head(self, thread_data: *const ThreadData) -> Self; 305 | } 306 | 307 | impl LockState for usize { 308 | #[inline] 309 | fn is_locked(self) -> bool { 310 | self & LOCKED_BIT != 0 311 | } 312 | 313 | #[inline] 314 | fn is_queue_locked(self) -> bool { 315 | self & QUEUE_LOCKED_BIT != 0 316 | } 317 | 318 | #[inline] 319 | fn queue_head(self) -> *const ThreadData { 320 | (self & QUEUE_MASK) as *const ThreadData 321 | } 322 | 323 | #[inline] 324 | fn with_queue_head(self, thread_data: *const ThreadData) -> Self { 325 | (self & !QUEUE_MASK) | thread_data as *const _ as usize 326 | } 327 | } 328 | -------------------------------------------------------------------------------- /lock_api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lock_api" 3 | version = "0.4.13" 4 | authors = ["Amanieu d'Antras "] 5 | description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std." 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/Amanieu/parking_lot" 8 | keywords = ["mutex", "rwlock", "lock", "no_std"] 9 | categories = ["concurrency", "no-std"] 10 | edition = "2021" 11 | rust-version = "1.64.0" 12 | 13 | [package.metadata.docs.rs] 14 | all-features = true 15 | rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"] 16 | 17 | [dependencies] 18 | scopeguard = { version = "1.1.0", default-features = false } 19 | owning_ref = { version = "0.4.1", optional = true } 20 | 21 | # Optional dependency for supporting serde. Optional crates automatically 22 | # create a feature with the same name as the crate, so if you need serde 23 | # support, just pass "--features serde" when building this crate. 24 | serde = { version = "1.0.126", default-features = false, optional = true } 25 | 26 | [build-dependencies] 27 | autocfg = "1.1.0" 28 | 29 | [features] 30 | default = ["atomic_usize"] 31 | nightly = [] 32 | arc_lock = [] 33 | atomic_usize = [] 34 | -------------------------------------------------------------------------------- /lock_api/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /lock_api/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /lock_api/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let cfg = autocfg::new(); 3 | 4 | println!("cargo:rerun-if-changed=build.rs"); 5 | println!("cargo:rustc-check-cfg=cfg(has_const_fn_trait_bound)"); 6 | if cfg.probe_rustc_version(1, 61) { 7 | println!("cargo:rustc-cfg=has_const_fn_trait_bound"); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /lock_api/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! This library provides type-safe and fully-featured [`Mutex`] and [`RwLock`] 9 | //! types which wrap a simple raw mutex or rwlock type. This has several 10 | //! benefits: not only does it eliminate a large portion of the work in 11 | //! implementing custom lock types, it also allows users to write code which is 12 | //! generic with regards to different lock implementations. 13 | //! 14 | //! Basic usage of this crate is very straightforward: 15 | //! 16 | //! 1. Create a raw lock type. This should only contain the lock state, not any 17 | //! data protected by the lock. 18 | //! 2. Implement the `RawMutex` trait for your custom lock type. 19 | //! 3. Export your mutex as a type alias for `lock_api::Mutex`, and 20 | //! your mutex guard as a type alias for `lock_api::MutexGuard`. 21 | //! See the [example](#example) below for details. 22 | //! 23 | //! This process is similar for [`RwLock`]s, except that two guards need to be 24 | //! exported instead of one. (Or 3 guards if your type supports upgradable read 25 | //! locks, see [extension traits](#extension-traits) below for details) 26 | //! 27 | //! # Example 28 | //! 29 | //! ``` 30 | //! use lock_api::{RawMutex, Mutex, GuardSend}; 31 | //! use std::sync::atomic::{AtomicBool, Ordering}; 32 | //! 33 | //! // 1. Define our raw lock type 34 | //! pub struct RawSpinlock(AtomicBool); 35 | //! 36 | //! // 2. Implement RawMutex for this type 37 | //! unsafe impl RawMutex for RawSpinlock { 38 | //! const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false)); 39 | //! 40 | //! // A spinlock guard can be sent to another thread and unlocked there 41 | //! type GuardMarker = GuardSend; 42 | //! 43 | //! fn lock(&self) { 44 | //! // Note: This isn't the best way of implementing a spinlock, but it 45 | //! // suffices for the sake of this example. 46 | //! while !self.try_lock() {} 47 | //! } 48 | //! 49 | //! fn try_lock(&self) -> bool { 50 | //! self.0 51 | //! .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) 52 | //! .is_ok() 53 | //! } 54 | //! 55 | //! unsafe fn unlock(&self) { 56 | //! self.0.store(false, Ordering::Release); 57 | //! } 58 | //! } 59 | //! 60 | //! // 3. Export the wrappers. This are the types that your users will actually use. 61 | //! pub type Spinlock = lock_api::Mutex; 62 | //! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>; 63 | //! ``` 64 | //! 65 | //! # Extension traits 66 | //! 67 | //! In addition to basic locking & unlocking functionality, you have the option 68 | //! of exposing additional functionality in your lock types by implementing 69 | //! additional traits for it. Examples of extension features include: 70 | //! 71 | //! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`) 72 | //! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`) 73 | //! - Downgradable write locks (`RawRwLockDowngradable`) 74 | //! - Recursive read locks (`RawRwLockRecursive`) 75 | //! - Upgradable read locks (`RawRwLockUpgrade`) 76 | //! 77 | //! The `Mutex` and `RwLock` wrappers will automatically expose this additional 78 | //! functionality if the raw lock type implements these extension traits. 79 | //! 80 | //! # Cargo features 81 | //! 82 | //! This crate supports three cargo features: 83 | //! 84 | //! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate. 85 | //! - `arc_lock`: Enables locking from an `Arc`. This enables types such as `ArcMutexGuard`. Note that this 86 | //! requires the `alloc` crate to be present. 87 | 88 | #![no_std] 89 | #![cfg_attr(docsrs, feature(doc_auto_cfg))] 90 | #![warn(missing_docs)] 91 | #![warn(rust_2018_idioms)] 92 | 93 | #[macro_use] 94 | extern crate scopeguard; 95 | 96 | #[cfg(feature = "arc_lock")] 97 | extern crate alloc; 98 | 99 | /// Marker type which indicates that the Guard type for a lock is `Send`. 100 | pub struct GuardSend(()); 101 | 102 | /// Marker type which indicates that the Guard type for a lock is not `Send`. 103 | #[allow(dead_code)] 104 | pub struct GuardNoSend(*mut ()); 105 | 106 | unsafe impl Sync for GuardNoSend {} 107 | 108 | mod mutex; 109 | pub use crate::mutex::*; 110 | 111 | #[cfg(feature = "atomic_usize")] 112 | mod remutex; 113 | #[cfg(feature = "atomic_usize")] 114 | pub use crate::remutex::*; 115 | 116 | mod rwlock; 117 | pub use crate::rwlock::*; 118 | -------------------------------------------------------------------------------- /release-plz.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | # set the path of all the crates to the changelog to the root of the repository 3 | changelog_path = "./CHANGELOG.md" 4 | 5 | [changelog] 6 | body = """ 7 | 8 | ## `{{ package }}` - [{{ version }}]{%- if release_link -%}({{ release_link }}){% endif %} - {{ timestamp | date(format="%Y-%m-%d") }} 9 | {% for group, commits in commits | group_by(attribute="group") %} 10 | ### {{ group | upper_first }} 11 | {% for commit in commits %} 12 | {%- if commit.scope -%} 13 | - *({{commit.scope}})* {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message }}{%- if commit.links %} ({% for link in commit.links %}[{{link.text}}]({{link.href}}) {% endfor -%}){% endif %} 14 | {% else -%} 15 | - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message }} 16 | {% endif -%} 17 | {% endfor -%} 18 | {% endfor -%} 19 | """ 20 | -------------------------------------------------------------------------------- /src/deadlock.rs: -------------------------------------------------------------------------------- 1 | //! \[Experimental\] Deadlock detection 2 | //! 3 | //! This feature is optional and can be enabled via the `deadlock_detection` feature flag. 4 | //! 5 | //! # Example 6 | //! 7 | //! ``` 8 | //! #[cfg(feature = "deadlock_detection")] 9 | //! { // only for #[cfg] 10 | //! use std::thread; 11 | //! use std::time::Duration; 12 | //! use parking_lot::deadlock; 13 | //! 14 | //! // Create a background thread which checks for deadlocks every 10s 15 | //! thread::spawn(move || { 16 | //! loop { 17 | //! thread::sleep(Duration::from_secs(10)); 18 | //! let deadlocks = deadlock::check_deadlock(); 19 | //! if deadlocks.is_empty() { 20 | //! continue; 21 | //! } 22 | //! 23 | //! println!("{} deadlocks detected", deadlocks.len()); 24 | //! for (i, threads) in deadlocks.iter().enumerate() { 25 | //! println!("Deadlock #{}", i); 26 | //! for t in threads { 27 | //! println!("Thread Id {:#?}", t.thread_id()); 28 | //! println!("{:#?}", t.backtrace()); 29 | //! } 30 | //! } 31 | //! } 32 | //! }); 33 | //! } // only for #[cfg] 34 | //! ``` 35 | 36 | #[cfg(feature = "deadlock_detection")] 37 | pub use parking_lot_core::deadlock::check_deadlock; 38 | pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource}; 39 | 40 | #[cfg(test)] 41 | #[cfg(feature = "deadlock_detection")] 42 | mod tests { 43 | use crate::{Mutex, ReentrantMutex, RwLock}; 44 | use std::sync::{Arc, Barrier}; 45 | use std::thread::{self, sleep}; 46 | use std::time::Duration; 47 | 48 | // We need to serialize these tests since deadlock detection uses global state 49 | static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(()); 50 | 51 | fn check_deadlock() -> bool { 52 | use parking_lot_core::deadlock::check_deadlock; 53 | !check_deadlock().is_empty() 54 | } 55 | 56 | #[test] 57 | fn test_mutex_deadlock() { 58 | let _guard = DEADLOCK_DETECTION_LOCK.lock(); 59 | 60 | let m1: Arc> = Default::default(); 61 | let m2: Arc> = Default::default(); 62 | let m3: Arc> = Default::default(); 63 | let b = Arc::new(Barrier::new(4)); 64 | 65 | let m1_ = m1.clone(); 66 | let m2_ = m2.clone(); 67 | let m3_ = m3.clone(); 68 | let b1 = b.clone(); 69 | let b2 = b.clone(); 70 | let b3 = b.clone(); 71 | 72 | assert!(!check_deadlock()); 73 | 74 | let _t1 = thread::spawn(move || { 75 | let _g = m1.lock(); 76 | b1.wait(); 77 | let _ = m2_.lock(); 78 | }); 79 | 80 | let _t2 = thread::spawn(move || { 81 | let _g = m2.lock(); 82 | b2.wait(); 83 | let _ = m3_.lock(); 84 | }); 85 | 86 | let _t3 = thread::spawn(move || { 87 | let _g = m3.lock(); 88 | b3.wait(); 89 | let _ = m1_.lock(); 90 | }); 91 | 92 | assert!(!check_deadlock()); 93 | 94 | b.wait(); 95 | sleep(Duration::from_millis(50)); 96 | assert!(check_deadlock()); 97 | 98 | assert!(!check_deadlock()); 99 | } 100 | 101 | #[test] 102 | fn test_mutex_deadlock_reentrant() { 103 | let _guard = DEADLOCK_DETECTION_LOCK.lock(); 104 | 105 | let m1: Arc> = Default::default(); 106 | 107 | assert!(!check_deadlock()); 108 | 109 | let _t1 = thread::spawn(move || { 110 | let _g = m1.lock(); 111 | let _ = m1.lock(); 112 | }); 113 | 114 | sleep(Duration::from_millis(50)); 115 | assert!(check_deadlock()); 116 | 117 | assert!(!check_deadlock()); 118 | } 119 | 120 | #[test] 121 | fn test_remutex_deadlock() { 122 | let _guard = DEADLOCK_DETECTION_LOCK.lock(); 123 | 124 | let m1: Arc> = Default::default(); 125 | let m2: Arc> = Default::default(); 126 | let m3: Arc> = Default::default(); 127 | let b = Arc::new(Barrier::new(4)); 128 | 129 | let m1_ = m1.clone(); 130 | let m2_ = m2.clone(); 131 | let m3_ = m3.clone(); 132 | let b1 = b.clone(); 133 | let b2 = b.clone(); 134 | let b3 = b.clone(); 135 | 136 | assert!(!check_deadlock()); 137 | 138 | let _t1 = thread::spawn(move || { 139 | let _g = m1.lock(); 140 | let _g = m1.lock(); 141 | b1.wait(); 142 | let _ = m2_.lock(); 143 | }); 144 | 145 | let _t2 = thread::spawn(move || { 146 | let _g = m2.lock(); 147 | let _g = m2.lock(); 148 | b2.wait(); 149 | let _ = m3_.lock(); 150 | }); 151 | 152 | let _t3 = thread::spawn(move || { 153 | let _g = m3.lock(); 154 | let _g = m3.lock(); 155 | b3.wait(); 156 | let _ = m1_.lock(); 157 | }); 158 | 159 | assert!(!check_deadlock()); 160 | 161 | b.wait(); 162 | sleep(Duration::from_millis(50)); 163 | assert!(check_deadlock()); 164 | 165 | assert!(!check_deadlock()); 166 | } 167 | 168 | #[test] 169 | fn test_rwlock_deadlock() { 170 | let _guard = DEADLOCK_DETECTION_LOCK.lock(); 171 | 172 | let m1: Arc> = Default::default(); 173 | let m2: Arc> = Default::default(); 174 | let m3: Arc> = Default::default(); 175 | let b = Arc::new(Barrier::new(4)); 176 | 177 | let m1_ = m1.clone(); 178 | let m2_ = m2.clone(); 179 | let m3_ = m3.clone(); 180 | let b1 = b.clone(); 181 | let b2 = b.clone(); 182 | let b3 = b.clone(); 183 | 184 | assert!(!check_deadlock()); 185 | 186 | let _t1 = thread::spawn(move || { 187 | let _g = m1.read(); 188 | b1.wait(); 189 | let _g = m2_.write(); 190 | }); 191 | 192 | let _t2 = thread::spawn(move || { 193 | let _g = m2.read(); 194 | b2.wait(); 195 | let _g = m3_.write(); 196 | }); 197 | 198 | let _t3 = thread::spawn(move || { 199 | let _g = m3.read(); 200 | b3.wait(); 201 | let _ = m1_.write(); 202 | }); 203 | 204 | assert!(!check_deadlock()); 205 | 206 | b.wait(); 207 | sleep(Duration::from_millis(50)); 208 | assert!(check_deadlock()); 209 | 210 | assert!(!check_deadlock()); 211 | } 212 | 213 | #[cfg(rwlock_deadlock_detection_not_supported)] 214 | #[test] 215 | fn test_rwlock_deadlock_reentrant() { 216 | let _guard = DEADLOCK_DETECTION_LOCK.lock(); 217 | 218 | let m1: Arc> = Default::default(); 219 | 220 | assert!(!check_deadlock()); 221 | 222 | let _t1 = thread::spawn(move || { 223 | let _g = m1.read(); 224 | let _ = m1.write(); 225 | }); 226 | 227 | sleep(Duration::from_millis(50)); 228 | assert!(check_deadlock()); 229 | 230 | assert!(!check_deadlock()); 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/elision.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::sync::atomic::AtomicUsize; 9 | 10 | // Extension trait to add lock elision primitives to atomic types 11 | pub trait AtomicElisionExt { 12 | type IntType; 13 | 14 | // Perform a compare_exchange and start a transaction 15 | fn elision_compare_exchange_acquire( 16 | &self, 17 | current: Self::IntType, 18 | new: Self::IntType, 19 | ) -> Result; 20 | 21 | // Perform a fetch_sub and end a transaction 22 | fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType; 23 | } 24 | 25 | // Indicates whether the target architecture supports lock elision 26 | #[inline] 27 | pub fn have_elision() -> bool { 28 | cfg!(all( 29 | feature = "hardware-lock-elision", 30 | any(target_arch = "x86", target_arch = "x86_64"), 31 | )) 32 | } 33 | 34 | // This implementation is never actually called because it is guarded by 35 | // have_elision(). 36 | #[cfg(not(all( 37 | feature = "hardware-lock-elision", 38 | any(target_arch = "x86", target_arch = "x86_64") 39 | )))] 40 | impl AtomicElisionExt for AtomicUsize { 41 | type IntType = usize; 42 | 43 | #[inline] 44 | fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result { 45 | unreachable!(); 46 | } 47 | 48 | #[inline] 49 | fn elision_fetch_sub_release(&self, _: usize) -> usize { 50 | unreachable!(); 51 | } 52 | } 53 | 54 | #[cfg(all( 55 | feature = "hardware-lock-elision", 56 | any(target_arch = "x86", target_arch = "x86_64") 57 | ))] 58 | impl AtomicElisionExt for AtomicUsize { 59 | type IntType = usize; 60 | 61 | #[inline] 62 | fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { 63 | unsafe { 64 | use core::arch::asm; 65 | let prev: usize; 66 | #[cfg(target_pointer_width = "32")] 67 | asm!( 68 | "xacquire", 69 | "lock", 70 | "cmpxchg [{:e}], {:e}", 71 | in(reg) self, 72 | in(reg) new, 73 | inout("eax") current => prev, 74 | ); 75 | #[cfg(target_pointer_width = "64")] 76 | asm!( 77 | "xacquire", 78 | "lock", 79 | "cmpxchg [{}], {}", 80 | in(reg) self, 81 | in(reg) new, 82 | inout("rax") current => prev, 83 | ); 84 | if prev == current { 85 | Ok(prev) 86 | } else { 87 | Err(prev) 88 | } 89 | } 90 | } 91 | 92 | #[inline] 93 | fn elision_fetch_sub_release(&self, val: usize) -> usize { 94 | unsafe { 95 | use core::arch::asm; 96 | let prev: usize; 97 | #[cfg(target_pointer_width = "32")] 98 | asm!( 99 | "xrelease", 100 | "lock", 101 | "xadd [{:e}], {:e}", 102 | in(reg) self, 103 | inout(reg) val.wrapping_neg() => prev, 104 | ); 105 | #[cfg(target_pointer_width = "64")] 106 | asm!( 107 | "xrelease", 108 | "lock", 109 | "xadd [{}], {}", 110 | in(reg) self, 111 | inout(reg) val.wrapping_neg() => prev, 112 | ); 113 | prev 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/fair_mutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::raw_fair_mutex::RawFairMutex; 9 | 10 | /// A mutual exclusive primitive that is always fair, useful for protecting shared data 11 | /// 12 | /// This mutex will block threads waiting for the lock to become available. The 13 | /// mutex can be statically initialized or created by the `new` 14 | /// constructor. Each mutex has a type parameter which represents the data that 15 | /// it is protecting. The data can only be accessed through the RAII guards 16 | /// returned from `lock` and `try_lock`, which guarantees that the data is only 17 | /// ever accessed when the mutex is locked. 18 | /// 19 | /// The regular mutex provided by `parking_lot` uses eventual fairness 20 | /// (after some time it will default to the fair algorithm), but eventual 21 | /// fairness does not provide the same guarantees an always fair method would. 22 | /// Fair mutexes are generally slower, but sometimes needed. 23 | /// 24 | /// In a fair mutex the waiters form a queue, and the lock is always granted to 25 | /// the next requester in the queue, in first-in first-out order. This ensures 26 | /// that one thread cannot starve others by quickly re-acquiring the lock after 27 | /// releasing it. 28 | /// 29 | /// A fair mutex may not be interesting if threads have different priorities (this is known as 30 | /// priority inversion). 31 | /// 32 | /// # Differences from the standard library `Mutex` 33 | /// 34 | /// - No poisoning, the lock is released normally on panic. 35 | /// - Only requires 1 byte of space, whereas the standard library boxes the 36 | /// `FairMutex` due to platform limitations. 37 | /// - Can be statically constructed. 38 | /// - Does not require any drop glue when dropped. 39 | /// - Inline fast path for the uncontended case. 40 | /// - Efficient handling of micro-contention using adaptive spinning. 41 | /// - Allows raw locking & unlocking without a guard. 42 | /// 43 | /// # Examples 44 | /// 45 | /// ``` 46 | /// use parking_lot::FairMutex; 47 | /// use std::sync::{Arc, mpsc::channel}; 48 | /// use std::thread; 49 | /// 50 | /// const N: usize = 10; 51 | /// 52 | /// // Spawn a few threads to increment a shared variable (non-atomically), and 53 | /// // let the main thread know once all increments are done. 54 | /// // 55 | /// // Here we're using an Arc to share memory among threads, and the data inside 56 | /// // the Arc is protected with a mutex. 57 | /// let data = Arc::new(FairMutex::new(0)); 58 | /// 59 | /// let (tx, rx) = channel(); 60 | /// for _ in 0..10 { 61 | /// let (data, tx) = (Arc::clone(&data), tx.clone()); 62 | /// thread::spawn(move || { 63 | /// // The shared state can only be accessed once the lock is held. 64 | /// // Our non-atomic increment is safe because we're the only thread 65 | /// // which can access the shared state when the lock is held. 66 | /// let mut data = data.lock(); 67 | /// *data += 1; 68 | /// if *data == N { 69 | /// tx.send(()).unwrap(); 70 | /// } 71 | /// // the lock is unlocked here when `data` goes out of scope. 72 | /// }); 73 | /// } 74 | /// 75 | /// rx.recv().unwrap(); 76 | /// ``` 77 | pub type FairMutex = lock_api::Mutex; 78 | 79 | /// Creates a new fair mutex in an unlocked state ready for use. 80 | /// 81 | /// This allows creating a fair mutex in a constant context on stable Rust. 82 | pub const fn const_fair_mutex(val: T) -> FairMutex { 83 | FairMutex::const_new(::INIT, val) 84 | } 85 | 86 | /// An RAII implementation of a "scoped lock" of a mutex. When this structure is 87 | /// dropped (falls out of scope), the lock will be unlocked. 88 | /// 89 | /// The data protected by the mutex can be accessed through this guard via its 90 | /// `Deref` and `DerefMut` implementations. 91 | pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; 92 | 93 | /// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a 94 | /// subfield of the protected data. 95 | /// 96 | /// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the 97 | /// former doesn't support temporarily unlocking and re-locking, since that 98 | /// could introduce soundness issues if the locked object is modified by another 99 | /// thread. 100 | pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; 101 | 102 | #[cfg(test)] 103 | mod tests { 104 | use crate::FairMutex; 105 | use std::sync::atomic::{AtomicUsize, Ordering}; 106 | use std::sync::mpsc::channel; 107 | use std::sync::Arc; 108 | use std::thread; 109 | 110 | #[cfg(feature = "serde")] 111 | use bincode::{deserialize, serialize}; 112 | 113 | #[derive(Eq, PartialEq, Debug)] 114 | struct NonCopy(i32); 115 | 116 | #[test] 117 | fn smoke() { 118 | let m = FairMutex::new(()); 119 | drop(m.lock()); 120 | drop(m.lock()); 121 | } 122 | 123 | #[test] 124 | fn lots_and_lots() { 125 | const J: u32 = 1000; 126 | const K: u32 = 3; 127 | 128 | let m = Arc::new(FairMutex::new(0)); 129 | 130 | fn inc(m: &FairMutex) { 131 | for _ in 0..J { 132 | *m.lock() += 1; 133 | } 134 | } 135 | 136 | let (tx, rx) = channel(); 137 | for _ in 0..K { 138 | let tx2 = tx.clone(); 139 | let m2 = m.clone(); 140 | thread::spawn(move || { 141 | inc(&m2); 142 | tx2.send(()).unwrap(); 143 | }); 144 | let tx2 = tx.clone(); 145 | let m2 = m.clone(); 146 | thread::spawn(move || { 147 | inc(&m2); 148 | tx2.send(()).unwrap(); 149 | }); 150 | } 151 | 152 | drop(tx); 153 | for _ in 0..2 * K { 154 | rx.recv().unwrap(); 155 | } 156 | assert_eq!(*m.lock(), J * K * 2); 157 | } 158 | 159 | #[test] 160 | fn try_lock() { 161 | let m = FairMutex::new(()); 162 | *m.try_lock().unwrap() = (); 163 | } 164 | 165 | #[test] 166 | fn test_into_inner() { 167 | let m = FairMutex::new(NonCopy(10)); 168 | assert_eq!(m.into_inner(), NonCopy(10)); 169 | } 170 | 171 | #[test] 172 | fn test_into_inner_drop() { 173 | struct Foo(Arc); 174 | impl Drop for Foo { 175 | fn drop(&mut self) { 176 | self.0.fetch_add(1, Ordering::SeqCst); 177 | } 178 | } 179 | let num_drops = Arc::new(AtomicUsize::new(0)); 180 | let m = FairMutex::new(Foo(num_drops.clone())); 181 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 182 | { 183 | let _inner = m.into_inner(); 184 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 185 | } 186 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 187 | } 188 | 189 | #[test] 190 | fn test_get_mut() { 191 | let mut m = FairMutex::new(NonCopy(10)); 192 | *m.get_mut() = NonCopy(20); 193 | assert_eq!(m.into_inner(), NonCopy(20)); 194 | } 195 | 196 | #[test] 197 | fn test_mutex_arc_nested() { 198 | // Tests nested mutexes and access 199 | // to underlying data. 200 | let arc = Arc::new(FairMutex::new(1)); 201 | let arc2 = Arc::new(FairMutex::new(arc)); 202 | let (tx, rx) = channel(); 203 | let _t = thread::spawn(move || { 204 | let lock = arc2.lock(); 205 | let lock2 = lock.lock(); 206 | assert_eq!(*lock2, 1); 207 | tx.send(()).unwrap(); 208 | }); 209 | rx.recv().unwrap(); 210 | } 211 | 212 | #[test] 213 | fn test_mutex_arc_access_in_unwind() { 214 | let arc = Arc::new(FairMutex::new(1)); 215 | let arc2 = arc.clone(); 216 | let _ = thread::spawn(move || { 217 | struct Unwinder { 218 | i: Arc>, 219 | } 220 | impl Drop for Unwinder { 221 | fn drop(&mut self) { 222 | *self.i.lock() += 1; 223 | } 224 | } 225 | let _u = Unwinder { i: arc2 }; 226 | panic!(); 227 | }) 228 | .join(); 229 | let lock = arc.lock(); 230 | assert_eq!(*lock, 2); 231 | } 232 | 233 | #[test] 234 | fn test_mutex_unsized() { 235 | let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); 236 | { 237 | let b = &mut *mutex.lock(); 238 | b[0] = 4; 239 | b[2] = 5; 240 | } 241 | let comp: &[i32] = &[4, 2, 5]; 242 | assert_eq!(&*mutex.lock(), comp); 243 | } 244 | 245 | #[test] 246 | fn test_mutexguard_sync() { 247 | fn sync(_: T) {} 248 | 249 | let mutex = FairMutex::new(()); 250 | sync(mutex.lock()); 251 | } 252 | 253 | #[test] 254 | fn test_mutex_debug() { 255 | let mutex = FairMutex::new(vec![0u8, 10]); 256 | 257 | assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); 258 | let _lock = mutex.lock(); 259 | assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); 260 | } 261 | 262 | #[cfg(feature = "serde")] 263 | #[test] 264 | fn test_serde() { 265 | let contents: Vec = vec![0, 1, 2]; 266 | let mutex = FairMutex::new(contents.clone()); 267 | 268 | let serialized = serialize(&mutex).unwrap(); 269 | let deserialized: FairMutex> = deserialize(&serialized).unwrap(); 270 | 271 | assert_eq!(*(mutex.lock()), *(deserialized.lock())); 272 | assert_eq!(contents, *(deserialized.lock())); 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and 9 | //! `Once` that are smaller, faster and more flexible than those in the Rust 10 | //! standard library. It also provides a `ReentrantMutex` type. 11 | 12 | #![warn(missing_docs)] 13 | #![warn(rust_2018_idioms)] 14 | 15 | mod condvar; 16 | mod elision; 17 | mod fair_mutex; 18 | mod mutex; 19 | mod once; 20 | mod raw_fair_mutex; 21 | mod raw_mutex; 22 | mod raw_rwlock; 23 | mod remutex; 24 | mod rwlock; 25 | mod util; 26 | 27 | #[cfg(feature = "deadlock_detection")] 28 | pub mod deadlock; 29 | #[cfg(not(feature = "deadlock_detection"))] 30 | mod deadlock; 31 | 32 | // If deadlock detection is enabled, we cannot allow lock guards to be sent to 33 | // other threads. 34 | #[cfg(all(feature = "send_guard", feature = "deadlock_detection"))] 35 | compile_error!("the `send_guard` and `deadlock_detection` features cannot be used together"); 36 | #[cfg(feature = "send_guard")] 37 | type GuardMarker = lock_api::GuardSend; 38 | #[cfg(not(feature = "send_guard"))] 39 | type GuardMarker = lock_api::GuardNoSend; 40 | 41 | pub use self::condvar::{Condvar, WaitTimeoutResult}; 42 | pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard}; 43 | pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard}; 44 | pub use self::once::{Once, OnceState}; 45 | pub use self::raw_fair_mutex::RawFairMutex; 46 | pub use self::raw_mutex::RawMutex; 47 | pub use self::raw_rwlock::RawRwLock; 48 | pub use self::remutex::{ 49 | const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, 50 | ReentrantMutexGuard, 51 | }; 52 | pub use self::rwlock::{ 53 | const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, 54 | RwLockUpgradableReadGuard, RwLockWriteGuard, 55 | }; 56 | pub use ::lock_api; 57 | 58 | #[cfg(feature = "arc_lock")] 59 | pub use self::lock_api::{ArcMutexGuard, ArcReentrantMutexGuard, ArcRwLockReadGuard, ArcRwLockUpgradableReadGuard, ArcRwLockWriteGuard}; 60 | -------------------------------------------------------------------------------- /src/mutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::raw_mutex::RawMutex; 9 | 10 | /// A mutual exclusion primitive useful for protecting shared data 11 | /// 12 | /// This mutex will block threads waiting for the lock to become available. The 13 | /// mutex can be statically initialized or created by the `new` 14 | /// constructor. Each mutex has a type parameter which represents the data that 15 | /// it is protecting. The data can only be accessed through the RAII guards 16 | /// returned from `lock` and `try_lock`, which guarantees that the data is only 17 | /// ever accessed when the mutex is locked. 18 | /// 19 | /// # Fairness 20 | /// 21 | /// A typical unfair lock can often end up in a situation where a single thread 22 | /// quickly acquires and releases the same mutex in succession, which can starve 23 | /// other threads waiting to acquire the mutex. While this improves throughput 24 | /// because it doesn't force a context switch when a thread tries to re-acquire 25 | /// a mutex it has just released, this can starve other threads. 26 | /// 27 | /// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350) 28 | /// to ensure that the lock will be fair on average without sacrificing 29 | /// throughput. This is done by forcing a fair unlock on average every 0.5ms, 30 | /// which will force the lock to go to the next thread waiting for the mutex. 31 | /// 32 | /// Additionally, any critical section longer than 1ms will always use a fair 33 | /// unlock, which has a negligible impact on throughput considering the length 34 | /// of the critical section. 35 | /// 36 | /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when 37 | /// unlocking a mutex instead of simply dropping the `MutexGuard`. 38 | /// 39 | /// # Differences from the standard library `Mutex` 40 | /// 41 | /// - No poisoning, the lock is released normally on panic. 42 | /// - Only requires 1 byte of space, whereas the standard library boxes the 43 | /// `Mutex` due to platform limitations. 44 | /// - Can be statically constructed. 45 | /// - Does not require any drop glue when dropped. 46 | /// - Inline fast path for the uncontended case. 47 | /// - Efficient handling of micro-contention using adaptive spinning. 48 | /// - Allows raw locking & unlocking without a guard. 49 | /// - Supports eventual fairness so that the mutex is fair on average. 50 | /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`. 51 | /// 52 | /// # Examples 53 | /// 54 | /// ``` 55 | /// use parking_lot::Mutex; 56 | /// use std::sync::{Arc, mpsc::channel}; 57 | /// use std::thread; 58 | /// 59 | /// const N: usize = 10; 60 | /// 61 | /// // Spawn a few threads to increment a shared variable (non-atomically), and 62 | /// // let the main thread know once all increments are done. 63 | /// // 64 | /// // Here we're using an Arc to share memory among threads, and the data inside 65 | /// // the Arc is protected with a mutex. 66 | /// let data = Arc::new(Mutex::new(0)); 67 | /// 68 | /// let (tx, rx) = channel(); 69 | /// for _ in 0..10 { 70 | /// let (data, tx) = (Arc::clone(&data), tx.clone()); 71 | /// thread::spawn(move || { 72 | /// // The shared state can only be accessed once the lock is held. 73 | /// // Our non-atomic increment is safe because we're the only thread 74 | /// // which can access the shared state when the lock is held. 75 | /// let mut data = data.lock(); 76 | /// *data += 1; 77 | /// if *data == N { 78 | /// tx.send(()).unwrap(); 79 | /// } 80 | /// // the lock is unlocked here when `data` goes out of scope. 81 | /// }); 82 | /// } 83 | /// 84 | /// rx.recv().unwrap(); 85 | /// ``` 86 | pub type Mutex = lock_api::Mutex; 87 | 88 | /// Creates a new mutex in an unlocked state ready for use. 89 | /// 90 | /// This allows creating a mutex in a constant context on stable Rust. 91 | pub const fn const_mutex(val: T) -> Mutex { 92 | Mutex::const_new(::INIT, val) 93 | } 94 | 95 | /// An RAII implementation of a "scoped lock" of a mutex. When this structure is 96 | /// dropped (falls out of scope), the lock will be unlocked. 97 | /// 98 | /// The data protected by the mutex can be accessed through this guard via its 99 | /// `Deref` and `DerefMut` implementations. 100 | pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>; 101 | 102 | /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a 103 | /// subfield of the protected data. 104 | /// 105 | /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the 106 | /// former doesn't support temporarily unlocking and re-locking, since that 107 | /// could introduce soundness issues if the locked object is modified by another 108 | /// thread. 109 | pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>; 110 | 111 | #[cfg(test)] 112 | mod tests { 113 | use crate::{Condvar, Mutex}; 114 | use std::sync::atomic::{AtomicUsize, Ordering}; 115 | use std::sync::mpsc::channel; 116 | use std::sync::Arc; 117 | use std::thread; 118 | 119 | #[cfg(feature = "serde")] 120 | use bincode::{deserialize, serialize}; 121 | 122 | struct Packet(Arc<(Mutex, Condvar)>); 123 | 124 | #[derive(Eq, PartialEq, Debug)] 125 | struct NonCopy(i32); 126 | 127 | unsafe impl Send for Packet {} 128 | unsafe impl Sync for Packet {} 129 | 130 | #[test] 131 | fn smoke() { 132 | let m = Mutex::new(()); 133 | drop(m.lock()); 134 | drop(m.lock()); 135 | } 136 | 137 | #[test] 138 | fn lots_and_lots() { 139 | const J: u32 = 1000; 140 | const K: u32 = 3; 141 | 142 | let m = Arc::new(Mutex::new(0)); 143 | 144 | fn inc(m: &Mutex) { 145 | for _ in 0..J { 146 | *m.lock() += 1; 147 | } 148 | } 149 | 150 | let (tx, rx) = channel(); 151 | for _ in 0..K { 152 | let tx2 = tx.clone(); 153 | let m2 = m.clone(); 154 | thread::spawn(move || { 155 | inc(&m2); 156 | tx2.send(()).unwrap(); 157 | }); 158 | let tx2 = tx.clone(); 159 | let m2 = m.clone(); 160 | thread::spawn(move || { 161 | inc(&m2); 162 | tx2.send(()).unwrap(); 163 | }); 164 | } 165 | 166 | drop(tx); 167 | for _ in 0..2 * K { 168 | rx.recv().unwrap(); 169 | } 170 | assert_eq!(*m.lock(), J * K * 2); 171 | } 172 | 173 | #[test] 174 | fn try_lock() { 175 | let m = Mutex::new(()); 176 | *m.try_lock().unwrap() = (); 177 | } 178 | 179 | #[test] 180 | fn test_into_inner() { 181 | let m = Mutex::new(NonCopy(10)); 182 | assert_eq!(m.into_inner(), NonCopy(10)); 183 | } 184 | 185 | #[test] 186 | fn test_into_inner_drop() { 187 | struct Foo(Arc); 188 | impl Drop for Foo { 189 | fn drop(&mut self) { 190 | self.0.fetch_add(1, Ordering::SeqCst); 191 | } 192 | } 193 | let num_drops = Arc::new(AtomicUsize::new(0)); 194 | let m = Mutex::new(Foo(num_drops.clone())); 195 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 196 | { 197 | let _inner = m.into_inner(); 198 | assert_eq!(num_drops.load(Ordering::SeqCst), 0); 199 | } 200 | assert_eq!(num_drops.load(Ordering::SeqCst), 1); 201 | } 202 | 203 | #[test] 204 | fn test_get_mut() { 205 | let mut m = Mutex::new(NonCopy(10)); 206 | *m.get_mut() = NonCopy(20); 207 | assert_eq!(m.into_inner(), NonCopy(20)); 208 | } 209 | 210 | #[test] 211 | fn test_mutex_arc_condvar() { 212 | let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); 213 | let packet2 = Packet(packet.0.clone()); 214 | let (tx, rx) = channel(); 215 | let _t = thread::spawn(move || { 216 | // wait until parent gets in 217 | rx.recv().unwrap(); 218 | let (lock, cvar) = &*packet2.0; 219 | let mut lock = lock.lock(); 220 | *lock = true; 221 | cvar.notify_one(); 222 | }); 223 | 224 | let (lock, cvar) = &*packet.0; 225 | let mut lock = lock.lock(); 226 | tx.send(()).unwrap(); 227 | assert!(!*lock); 228 | while !*lock { 229 | cvar.wait(&mut lock); 230 | } 231 | } 232 | 233 | #[test] 234 | fn test_mutex_arc_nested() { 235 | // Tests nested mutexes and access 236 | // to underlying data. 237 | let arc = Arc::new(Mutex::new(1)); 238 | let arc2 = Arc::new(Mutex::new(arc)); 239 | let (tx, rx) = channel(); 240 | let _t = thread::spawn(move || { 241 | let lock = arc2.lock(); 242 | let lock2 = lock.lock(); 243 | assert_eq!(*lock2, 1); 244 | tx.send(()).unwrap(); 245 | }); 246 | rx.recv().unwrap(); 247 | } 248 | 249 | #[test] 250 | fn test_mutex_arc_access_in_unwind() { 251 | let arc = Arc::new(Mutex::new(1)); 252 | let arc2 = arc.clone(); 253 | let _ = thread::spawn(move || { 254 | struct Unwinder { 255 | i: Arc>, 256 | } 257 | impl Drop for Unwinder { 258 | fn drop(&mut self) { 259 | *self.i.lock() += 1; 260 | } 261 | } 262 | let _u = Unwinder { i: arc2 }; 263 | panic!(); 264 | }) 265 | .join(); 266 | let lock = arc.lock(); 267 | assert_eq!(*lock, 2); 268 | } 269 | 270 | #[test] 271 | fn test_mutex_unsized() { 272 | let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); 273 | { 274 | let b = &mut *mutex.lock(); 275 | b[0] = 4; 276 | b[2] = 5; 277 | } 278 | let comp: &[i32] = &[4, 2, 5]; 279 | assert_eq!(&*mutex.lock(), comp); 280 | } 281 | 282 | #[test] 283 | fn test_mutexguard_sync() { 284 | fn sync(_: T) {} 285 | 286 | let mutex = Mutex::new(()); 287 | sync(mutex.lock()); 288 | } 289 | 290 | #[test] 291 | fn test_mutex_debug() { 292 | let mutex = Mutex::new(vec![0u8, 10]); 293 | 294 | assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); 295 | let _lock = mutex.lock(); 296 | assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); 297 | } 298 | 299 | #[cfg(feature = "serde")] 300 | #[test] 301 | fn test_serde() { 302 | let contents: Vec = vec![0, 1, 2]; 303 | let mutex = Mutex::new(contents.clone()); 304 | 305 | let serialized = serialize(&mutex).unwrap(); 306 | let deserialized: Mutex> = deserialize(&serialized).unwrap(); 307 | 308 | assert_eq!(*(mutex.lock()), *(deserialized.lock())); 309 | assert_eq!(contents, *(deserialized.lock())); 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /src/raw_fair_mutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::raw_mutex::RawMutex; 9 | use lock_api::RawMutexFair; 10 | 11 | /// Raw fair mutex type backed by the parking lot. 12 | pub struct RawFairMutex(RawMutex); 13 | 14 | unsafe impl lock_api::RawMutex for RawFairMutex { 15 | const INIT: Self = RawFairMutex(::INIT); 16 | 17 | type GuardMarker = ::GuardMarker; 18 | 19 | #[inline] 20 | fn lock(&self) { 21 | self.0.lock() 22 | } 23 | 24 | #[inline] 25 | fn try_lock(&self) -> bool { 26 | self.0.try_lock() 27 | } 28 | 29 | #[inline] 30 | unsafe fn unlock(&self) { 31 | self.unlock_fair() 32 | } 33 | 34 | #[inline] 35 | fn is_locked(&self) -> bool { 36 | self.0.is_locked() 37 | } 38 | } 39 | 40 | unsafe impl lock_api::RawMutexFair for RawFairMutex { 41 | #[inline] 42 | unsafe fn unlock_fair(&self) { 43 | self.0.unlock_fair() 44 | } 45 | 46 | #[inline] 47 | unsafe fn bump(&self) { 48 | self.0.bump() 49 | } 50 | } 51 | 52 | unsafe impl lock_api::RawMutexTimed for RawFairMutex { 53 | type Duration = ::Duration; 54 | type Instant = ::Instant; 55 | 56 | #[inline] 57 | fn try_lock_until(&self, timeout: Self::Instant) -> bool { 58 | self.0.try_lock_until(timeout) 59 | } 60 | 61 | #[inline] 62 | fn try_lock_for(&self, timeout: Self::Duration) -> bool { 63 | self.0.try_lock_for(timeout) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/raw_mutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::{deadlock, util}; 9 | use core::{ 10 | sync::atomic::{AtomicU8, Ordering}, 11 | time::Duration, 12 | }; 13 | use lock_api::RawMutex as RawMutex_; 14 | use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; 15 | use std::time::Instant; 16 | 17 | // UnparkToken used to indicate that that the target thread should attempt to 18 | // lock the mutex again as soon as it is unparked. 19 | pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); 20 | 21 | // UnparkToken used to indicate that the mutex is being handed off to the target 22 | // thread directly without unlocking it. 23 | pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); 24 | 25 | /// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. 26 | const LOCKED_BIT: u8 = 0b01; 27 | /// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being 28 | /// parked if it wants to lock the mutex, but it is currently being held by some other thread. 29 | const PARKED_BIT: u8 = 0b10; 30 | 31 | /// Raw mutex type backed by the parking lot. 32 | pub struct RawMutex { 33 | /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits 34 | /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. 35 | /// 36 | /// # State table: 37 | /// 38 | /// PARKED_BIT | LOCKED_BIT | Description 39 | /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. 40 | /// -----------+------------+------------------------------------------------------------------ 41 | /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is 42 | /// | | waiting for it. 43 | /// -----------+------------+------------------------------------------------------------------ 44 | /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to 45 | /// | | park. At least one of the parked threads are just about to be 46 | /// | | unparked, or a thread heading for parking might abort the park. 47 | /// -----------+------------+------------------------------------------------------------------ 48 | /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is 49 | /// | | parked or about to park, waiting for the lock to become available. 50 | /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock 51 | /// | | is held (i.e. in a parking_lot_core callback). This ensures that 52 | /// | | we never end up in a situation where there are parked threads but 53 | /// | | PARKED_BIT is not set (which would result in those threads 54 | /// | | potentially never getting woken up). 55 | state: AtomicU8, 56 | } 57 | 58 | unsafe impl lock_api::RawMutex for RawMutex { 59 | const INIT: RawMutex = RawMutex { 60 | state: AtomicU8::new(0), 61 | }; 62 | 63 | type GuardMarker = crate::GuardMarker; 64 | 65 | #[inline] 66 | fn lock(&self) { 67 | if self 68 | .state 69 | .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) 70 | .is_err() 71 | { 72 | self.lock_slow(None); 73 | } 74 | unsafe { deadlock::acquire_resource(self as *const _ as usize) }; 75 | } 76 | 77 | #[inline] 78 | fn try_lock(&self) -> bool { 79 | let mut state = self.state.load(Ordering::Relaxed); 80 | loop { 81 | if state & LOCKED_BIT != 0 { 82 | return false; 83 | } 84 | match self.state.compare_exchange_weak( 85 | state, 86 | state | LOCKED_BIT, 87 | Ordering::Acquire, 88 | Ordering::Relaxed, 89 | ) { 90 | Ok(_) => { 91 | unsafe { deadlock::acquire_resource(self as *const _ as usize) }; 92 | return true; 93 | } 94 | Err(x) => state = x, 95 | } 96 | } 97 | } 98 | 99 | #[inline] 100 | unsafe fn unlock(&self) { 101 | deadlock::release_resource(self as *const _ as usize); 102 | if self 103 | .state 104 | .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) 105 | .is_ok() 106 | { 107 | return; 108 | } 109 | self.unlock_slow(false); 110 | } 111 | 112 | #[inline] 113 | fn is_locked(&self) -> bool { 114 | let state = self.state.load(Ordering::Relaxed); 115 | state & LOCKED_BIT != 0 116 | } 117 | } 118 | 119 | unsafe impl lock_api::RawMutexFair for RawMutex { 120 | #[inline] 121 | unsafe fn unlock_fair(&self) { 122 | deadlock::release_resource(self as *const _ as usize); 123 | if self 124 | .state 125 | .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) 126 | .is_ok() 127 | { 128 | return; 129 | } 130 | self.unlock_slow(true); 131 | } 132 | 133 | #[inline] 134 | unsafe fn bump(&self) { 135 | if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { 136 | self.bump_slow(); 137 | } 138 | } 139 | } 140 | 141 | unsafe impl lock_api::RawMutexTimed for RawMutex { 142 | type Duration = Duration; 143 | type Instant = Instant; 144 | 145 | #[inline] 146 | fn try_lock_until(&self, timeout: Instant) -> bool { 147 | let result = if self 148 | .state 149 | .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) 150 | .is_ok() 151 | { 152 | true 153 | } else { 154 | self.lock_slow(Some(timeout)) 155 | }; 156 | if result { 157 | unsafe { deadlock::acquire_resource(self as *const _ as usize) }; 158 | } 159 | result 160 | } 161 | 162 | #[inline] 163 | fn try_lock_for(&self, timeout: Duration) -> bool { 164 | let result = if self 165 | .state 166 | .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) 167 | .is_ok() 168 | { 169 | true 170 | } else { 171 | self.lock_slow(util::to_deadline(timeout)) 172 | }; 173 | if result { 174 | unsafe { deadlock::acquire_resource(self as *const _ as usize) }; 175 | } 176 | result 177 | } 178 | } 179 | 180 | impl RawMutex { 181 | // Used by Condvar when requeuing threads to us, must be called while 182 | // holding the queue lock. 183 | #[inline] 184 | pub(crate) fn mark_parked_if_locked(&self) -> bool { 185 | let mut state = self.state.load(Ordering::Relaxed); 186 | loop { 187 | if state & LOCKED_BIT == 0 { 188 | return false; 189 | } 190 | match self.state.compare_exchange_weak( 191 | state, 192 | state | PARKED_BIT, 193 | Ordering::Relaxed, 194 | Ordering::Relaxed, 195 | ) { 196 | Ok(_) => return true, 197 | Err(x) => state = x, 198 | } 199 | } 200 | } 201 | 202 | // Used by Condvar when requeuing threads to us, must be called while 203 | // holding the queue lock. 204 | #[inline] 205 | pub(crate) fn mark_parked(&self) { 206 | self.state.fetch_or(PARKED_BIT, Ordering::Relaxed); 207 | } 208 | 209 | #[cold] 210 | fn lock_slow(&self, timeout: Option) -> bool { 211 | let mut spinwait = SpinWait::new(); 212 | let mut state = self.state.load(Ordering::Relaxed); 213 | loop { 214 | // Grab the lock if it isn't locked, even if there is a queue on it 215 | if state & LOCKED_BIT == 0 { 216 | match self.state.compare_exchange_weak( 217 | state, 218 | state | LOCKED_BIT, 219 | Ordering::Acquire, 220 | Ordering::Relaxed, 221 | ) { 222 | Ok(_) => return true, 223 | Err(x) => state = x, 224 | } 225 | continue; 226 | } 227 | 228 | // If there is no queue, try spinning a few times 229 | if state & PARKED_BIT == 0 && spinwait.spin() { 230 | state = self.state.load(Ordering::Relaxed); 231 | continue; 232 | } 233 | 234 | // Set the parked bit 235 | if state & PARKED_BIT == 0 { 236 | if let Err(x) = self.state.compare_exchange_weak( 237 | state, 238 | state | PARKED_BIT, 239 | Ordering::Relaxed, 240 | Ordering::Relaxed, 241 | ) { 242 | state = x; 243 | continue; 244 | } 245 | } 246 | 247 | // Park our thread until we are woken up by an unlock 248 | let addr = self as *const _ as usize; 249 | let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; 250 | let before_sleep = || {}; 251 | let timed_out = |_, was_last_thread| { 252 | // Clear the parked bit if we were the last parked thread 253 | if was_last_thread { 254 | self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); 255 | } 256 | }; 257 | // SAFETY: 258 | // * `addr` is an address we control. 259 | // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. 260 | // * `before_sleep` does not call `park`, nor does it panic. 261 | match unsafe { 262 | parking_lot_core::park( 263 | addr, 264 | validate, 265 | before_sleep, 266 | timed_out, 267 | DEFAULT_PARK_TOKEN, 268 | timeout, 269 | ) 270 | } { 271 | // The thread that unparked us passed the lock on to us 272 | // directly without unlocking it. 273 | ParkResult::Unparked(TOKEN_HANDOFF) => return true, 274 | 275 | // We were unparked normally, try acquiring the lock again 276 | ParkResult::Unparked(_) => (), 277 | 278 | // The validation function failed, try locking again 279 | ParkResult::Invalid => (), 280 | 281 | // Timeout expired 282 | ParkResult::TimedOut => return false, 283 | } 284 | 285 | // Loop back and try locking again 286 | spinwait.reset(); 287 | state = self.state.load(Ordering::Relaxed); 288 | } 289 | } 290 | 291 | #[cold] 292 | fn unlock_slow(&self, force_fair: bool) { 293 | // Unpark one thread and leave the parked bit set if there might 294 | // still be parked threads on this address. 295 | let addr = self as *const _ as usize; 296 | let callback = |result: UnparkResult| { 297 | // If we are using a fair unlock then we should keep the 298 | // mutex locked and hand it off to the unparked thread. 299 | if result.unparked_threads != 0 && (force_fair || result.be_fair) { 300 | // Clear the parked bit if there are no more parked 301 | // threads. 302 | if !result.have_more_threads { 303 | self.state.store(LOCKED_BIT, Ordering::Relaxed); 304 | } 305 | return TOKEN_HANDOFF; 306 | } 307 | 308 | // Clear the locked bit, and the parked bit as well if there 309 | // are no more parked threads. 310 | if result.have_more_threads { 311 | self.state.store(PARKED_BIT, Ordering::Release); 312 | } else { 313 | self.state.store(0, Ordering::Release); 314 | } 315 | TOKEN_NORMAL 316 | }; 317 | // SAFETY: 318 | // * `addr` is an address we control. 319 | // * `callback` does not panic or call into any function of `parking_lot`. 320 | unsafe { 321 | parking_lot_core::unpark_one(addr, callback); 322 | } 323 | } 324 | 325 | #[cold] 326 | fn bump_slow(&self) { 327 | unsafe { deadlock::release_resource(self as *const _ as usize) }; 328 | self.unlock_slow(true); 329 | self.lock(); 330 | } 331 | } 332 | -------------------------------------------------------------------------------- /src/remutex.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use crate::raw_mutex::RawMutex; 9 | use core::num::NonZeroUsize; 10 | use lock_api::{self, GetThreadId}; 11 | 12 | /// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`. 13 | pub struct RawThreadId; 14 | 15 | unsafe impl GetThreadId for RawThreadId { 16 | const INIT: RawThreadId = RawThreadId; 17 | 18 | fn nonzero_thread_id(&self) -> NonZeroUsize { 19 | // The address of a thread-local variable is guaranteed to be unique to the 20 | // current thread, and is also guaranteed to be non-zero. The variable has to have a 21 | // non-zero size to guarantee it has a unique address for each thread. 22 | thread_local!(static KEY: u8 = 0); 23 | KEY.with(|x| { 24 | NonZeroUsize::new(x as *const _ as usize) 25 | .expect("thread-local variable address is null") 26 | }) 27 | } 28 | } 29 | 30 | /// A mutex which can be recursively locked by a single thread. 31 | /// 32 | /// This type is identical to `Mutex` except for the following points: 33 | /// 34 | /// - Locking multiple times from the same thread will work correctly instead of 35 | /// deadlocking. 36 | /// - `ReentrantMutexGuard` does not give mutable references to the locked data. 37 | /// Use a `RefCell` if you need this. 38 | /// 39 | /// See [`Mutex`](crate::Mutex) for more details about the underlying mutex 40 | /// primitive. 41 | pub type ReentrantMutex = lock_api::ReentrantMutex; 42 | 43 | /// Creates a new reentrant mutex in an unlocked state ready for use. 44 | /// 45 | /// This allows creating a reentrant mutex in a constant context on stable Rust. 46 | pub const fn const_reentrant_mutex(val: T) -> ReentrantMutex { 47 | ReentrantMutex::const_new( 48 | ::INIT, 49 | ::INIT, 50 | val, 51 | ) 52 | } 53 | 54 | /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure 55 | /// is dropped (falls out of scope), the lock will be unlocked. 56 | /// 57 | /// The data protected by the mutex can be accessed through this guard via its 58 | /// `Deref` implementation. 59 | pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; 60 | 61 | /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a 62 | /// subfield of the protected data. 63 | /// 64 | /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the 65 | /// former doesn't support temporarily unlocking and re-locking, since that 66 | /// could introduce soundness issues if the locked object is modified by another 67 | /// thread. 68 | pub type MappedReentrantMutexGuard<'a, T> = 69 | lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; 70 | 71 | #[cfg(test)] 72 | mod tests { 73 | use crate::ReentrantMutex; 74 | use crate::ReentrantMutexGuard; 75 | use std::cell::RefCell; 76 | use std::sync::mpsc::channel; 77 | use std::sync::Arc; 78 | use std::thread; 79 | 80 | #[cfg(feature = "serde")] 81 | use bincode::{deserialize, serialize}; 82 | 83 | #[test] 84 | fn smoke() { 85 | let m = ReentrantMutex::new(2); 86 | { 87 | let a = m.lock(); 88 | { 89 | let b = m.lock(); 90 | { 91 | let c = m.lock(); 92 | assert_eq!(*c, 2); 93 | } 94 | assert_eq!(*b, 2); 95 | } 96 | assert_eq!(*a, 2); 97 | } 98 | } 99 | 100 | #[test] 101 | fn is_mutex() { 102 | let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); 103 | let m2 = m.clone(); 104 | let lock = m.lock(); 105 | let child = thread::spawn(move || { 106 | let lock = m2.lock(); 107 | assert_eq!(*lock.borrow(), 4950); 108 | }); 109 | for i in 0..100 { 110 | let lock = m.lock(); 111 | *lock.borrow_mut() += i; 112 | } 113 | drop(lock); 114 | child.join().unwrap(); 115 | } 116 | 117 | #[test] 118 | fn trylock_works() { 119 | let m = Arc::new(ReentrantMutex::new(())); 120 | let m2 = m.clone(); 121 | let _lock = m.try_lock(); 122 | let _lock2 = m.try_lock(); 123 | thread::spawn(move || { 124 | let lock = m2.try_lock(); 125 | assert!(lock.is_none()); 126 | }) 127 | .join() 128 | .unwrap(); 129 | let _lock3 = m.try_lock(); 130 | } 131 | 132 | #[test] 133 | fn test_reentrant_mutex_debug() { 134 | let mutex = ReentrantMutex::new(vec![0u8, 10]); 135 | 136 | assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }"); 137 | } 138 | 139 | #[test] 140 | fn test_reentrant_mutex_bump() { 141 | let mutex = Arc::new(ReentrantMutex::new(())); 142 | let mutex2 = mutex.clone(); 143 | 144 | let mut guard = mutex.lock(); 145 | 146 | let (tx, rx) = channel(); 147 | 148 | thread::spawn(move || { 149 | let _guard = mutex2.lock(); 150 | tx.send(()).unwrap(); 151 | }); 152 | 153 | // `bump()` repeatedly until the thread starts up and requests the lock 154 | while rx.try_recv().is_err() { 155 | ReentrantMutexGuard::bump(&mut guard); 156 | } 157 | } 158 | 159 | #[cfg(feature = "serde")] 160 | #[test] 161 | fn test_serde() { 162 | let contents: Vec = vec![0, 1, 2]; 163 | let mutex = ReentrantMutex::new(contents.clone()); 164 | 165 | let serialized = serialize(&mutex).unwrap(); 166 | let deserialized: ReentrantMutex> = deserialize(&serialized).unwrap(); 167 | 168 | assert_eq!(*(mutex.lock()), *(deserialized.lock())); 169 | assert_eq!(contents, *(deserialized.lock())); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::time::{Duration, Instant}; 9 | 10 | // Option::unchecked_unwrap 11 | pub trait UncheckedOptionExt { 12 | unsafe fn unchecked_unwrap(self) -> T; 13 | } 14 | 15 | impl UncheckedOptionExt for Option { 16 | #[inline] 17 | unsafe fn unchecked_unwrap(self) -> T { 18 | match self { 19 | Some(x) => x, 20 | None => unreachable(), 21 | } 22 | } 23 | } 24 | 25 | // hint::unreachable_unchecked() in release mode 26 | #[inline] 27 | unsafe fn unreachable() -> ! { 28 | if cfg!(debug_assertions) { 29 | unreachable!(); 30 | } else { 31 | core::hint::unreachable_unchecked() 32 | } 33 | } 34 | 35 | #[inline] 36 | pub fn to_deadline(timeout: Duration) -> Option { 37 | Instant::now().checked_add(timeout) 38 | } 39 | -------------------------------------------------------------------------------- /tests/issue_203.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::RwLock; 2 | use std::thread; 3 | 4 | struct Bar(RwLock<()>); 5 | 6 | impl Drop for Bar { 7 | fn drop(&mut self) { 8 | let _n = self.0.write(); 9 | } 10 | } 11 | 12 | thread_local! { 13 | static B: Bar = Bar(RwLock::new(())); 14 | } 15 | 16 | #[test] 17 | fn main() { 18 | thread::spawn(|| { 19 | B.with(|_| ()); 20 | 21 | let a = RwLock::new(()); 22 | let _a = a.read(); 23 | }) 24 | .join() 25 | .unwrap(); 26 | } 27 | -------------------------------------------------------------------------------- /tests/issue_392.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::RwLock; 2 | 3 | struct Lock(RwLock); 4 | 5 | #[test] 6 | fn issue_392() { 7 | let lock = Lock(RwLock::new(0)); 8 | let mut rl = lock.0.upgradable_read(); 9 | rl.with_upgraded(|_| { 10 | println!("lock upgrade"); 11 | }); 12 | rl.with_upgraded(|_| { 13 | println!("lock upgrade"); 14 | }); 15 | } 16 | --------------------------------------------------------------------------------