├── .github ├── DOCS.md └── workflows │ ├── check.yml │ ├── safety.yml │ └── test.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── single_thread.rs └── stack.rs ├── docs └── GUIDE.md ├── rustfmt.toml ├── src ├── collector.rs ├── guard.rs ├── guide.rs ├── lib.rs ├── raw │ ├── collector.rs │ ├── membarrier.rs │ ├── mod.rs │ ├── tls │ │ ├── mod.rs │ │ └── thread_id.rs │ └── utils.rs └── reclaim.rs └── tests └── lib.rs /.github/DOCS.md: -------------------------------------------------------------------------------- 1 | Workflows adapted from https://github.com/jonhoo/rust-ci-conf. 2 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs 2 | # several checks: 3 | # - fmt: checks that the code is formatted according to rustfmt 4 | # - clippy: checks that the code does not contain any clippy warnings 5 | # - doc: checks that the code can be documented without errors 6 | # - hack: check combinations of feature flags 7 | # - msrv: check that the msrv specified in the crate is correct 8 | permissions: 9 | contents: read 10 | # This configuration allows maintainers of this repo to create a branch and pull request based on 11 | # the new branch. Restricting the push trigger to the main branch ensures that the PR only gets 12 | # built once. 13 | on: 14 | push: 15 | branches: [master] 16 | pull_request: 17 | # If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that 18 | # we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5 19 | concurrency: 20 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 21 | cancel-in-progress: true 22 | name: check 23 | jobs: 24 | fmt: 25 | runs-on: ubuntu-latest 26 | name: stable / fmt 27 | steps: 28 | - uses: actions/checkout@v4 29 | with: 30 | submodules: true 31 | - name: Install stable 32 | uses: dtolnay/rust-toolchain@stable 33 | with: 34 | components: rustfmt 35 | - name: cargo fmt --check 36 | run: cargo fmt --check 37 | clippy: 38 | runs-on: ubuntu-latest 39 | name: ${{ matrix.toolchain }} / clippy 40 | permissions: 41 | contents: read 42 | checks: write 43 | strategy: 44 | fail-fast: false 45 | matrix: 46 | # Get early warning of new lints which are regularly introduced in beta channels. 47 | toolchain: [stable, beta] 48 | steps: 49 | - uses: actions/checkout@v4 50 | with: 51 | submodules: true 52 | - name: Install ${{ matrix.toolchain }} 53 | uses: dtolnay/rust-toolchain@master 54 | with: 55 | toolchain: ${{ matrix.toolchain }} 56 | components: clippy 57 | - name: cargo clippy 58 | uses: giraffate/clippy-action@v1 59 | with: 60 | reporter: 'github-pr-check' 61 | github_token: ${{ secrets.GITHUB_TOKEN }} 62 | semver: 63 | runs-on: ubuntu-latest 64 | name: semver 65 | steps: 66 | - uses: actions/checkout@v4 67 | with: 68 | submodules: true 69 | - name: Install stable 70 | uses: dtolnay/rust-toolchain@stable 71 | with: 72 | components: rustfmt 73 | - name: cargo-semver-checks 74 | uses: obi1kenobi/cargo-semver-checks-action@v2 75 | doc: 76 | # run docs generation on nightly rather than stable. This enables features like 77 | # https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an 78 | # API be documented as only available in some specific platforms. 79 | runs-on: ubuntu-latest 80 | name: nightly / doc 81 | steps: 82 | - uses: actions/checkout@v4 83 | with: 84 | submodules: true 85 | - name: Install nightly 86 | uses: dtolnay/rust-toolchain@nightly 87 | - name: cargo doc 88 | run: cargo doc --no-deps --all-features 89 | env: 90 | RUSTDOCFLAGS: --cfg docsrs 91 | hack: 92 | # cargo-hack checks combinations of feature flags to ensure that features are all additive 93 | # which is required for feature unification 94 | runs-on: ubuntu-latest 95 | name: ubuntu / stable / features 96 | steps: 97 | - uses: actions/checkout@v4 98 | with: 99 | submodules: true 100 | - name: Install stable 101 | uses: dtolnay/rust-toolchain@stable 102 | - name: cargo install cargo-hack 103 | uses: taiki-e/install-action@cargo-hack 104 | # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 105 | # --feature-powerset runs for every combination of features 106 | - name: cargo hack 107 | run: cargo hack --feature-powerset check 108 | msrv: 109 | # check that we can build using the minimal rust version that is specified by this crate 110 | runs-on: ubuntu-latest 111 | # we use a matrix here just because env can't be used in job names 112 | # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability 113 | strategy: 114 | matrix: 115 | msrv: ["1.72.0"] 116 | name: ubuntu / ${{ matrix.msrv }} 117 | steps: 118 | - uses: actions/checkout@v4 119 | with: 120 | submodules: true 121 | - name: Install ${{ matrix.msrv }} 122 | uses: dtolnay/rust-toolchain@master 123 | with: 124 | toolchain: ${{ matrix.msrv }} 125 | - name: cargo +${{ matrix.msrv }} check 126 | run: cargo check 127 | -------------------------------------------------------------------------------- /.github/workflows/safety.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs checks for unsafe code. In crates that don't have any unsafe code, this can be 2 | # removed. Runs: 3 | # - miri - detects undefined behavior and memory leaks 4 | # - address sanitizer - detects memory errors 5 | # - leak sanitizer - detects memory leaks 6 | # See check.yml for information about how the concurrency cancellation and workflow triggering works 7 | permissions: 8 | contents: read 9 | on: 10 | push: 11 | branches: [master] 12 | pull_request: 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 15 | cancel-in-progress: true 16 | name: safety 17 | jobs: 18 | sanitizers: 19 | runs-on: ubuntu-latest 20 | timeout-minutes: 15 21 | steps: 22 | - uses: actions/checkout@v4 23 | with: 24 | submodules: true 25 | - name: Install nightly 26 | uses: dtolnay/rust-toolchain@nightly 27 | - run: | 28 | # to get the symbolizer for debug symbol resolution 29 | sudo apt install llvm 30 | # to fix buggy leak analyzer: 31 | # https://github.com/japaric/rust-san#unrealiable-leaksanitizer 32 | # ensure there's a profile.dev section 33 | if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then 34 | echo >> Cargo.toml 35 | echo '[profile.dev]' >> Cargo.toml 36 | fi 37 | # remove pre-existing opt-levels in profile.dev 38 | sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml 39 | # now set opt-level to 1 40 | sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml 41 | cat Cargo.toml 42 | name: Enable debug symbols 43 | - name: cargo test -Zsanitizer=address 44 | # only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945 45 | run: cargo test --lib --tests --all-features --target x86_64-unknown-linux-gnu 46 | env: 47 | ASAN_OPTIONS: "detect_odr_violation=0:detect_leaks=0" 48 | RUSTFLAGS: "-Z sanitizer=address --cfg seize_asan" 49 | - name: cargo test -Zsanitizer=leak 50 | if: always() 51 | run: cargo test --all-features --target x86_64-unknown-linux-gnu 52 | env: 53 | LSAN_OPTIONS: "suppressions=lsan-suppressions.txt" 54 | RUSTFLAGS: "-Z sanitizer=leak" 55 | miri: 56 | runs-on: ubuntu-latest 57 | timeout-minutes: 15 58 | steps: 59 | - uses: actions/checkout@v4 60 | with: 61 | submodules: true 62 | - run: | 63 | echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> $GITHUB_ENV 64 | - name: Install ${{ env.NIGHTLY }} 65 | uses: dtolnay/rust-toolchain@master 66 | with: 67 | toolchain: ${{ env.NIGHTLY }} 68 | components: miri 69 | - name: cargo miri test 70 | run: cargo miri test 71 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # This is the main CI workflow that runs the test suite on all pushes to main and all pull requests. 2 | # It runs the following jobs: 3 | # - required: runs the test suite on ubuntu with stable and beta rust toolchains 4 | # requirements of this crate, and its dependencies 5 | # - os-check: runs the test suite on mac and windows 6 | # See check.yml for information about how the concurrency cancellation and workflow triggering works 7 | permissions: 8 | contents: read 9 | on: 10 | push: 11 | branches: [master] 12 | pull_request: 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 15 | cancel-in-progress: true 16 | name: test 17 | jobs: 18 | required: 19 | runs-on: ubuntu-latest 20 | timeout-minutes: 15 21 | name: ubuntu / ${{ matrix.toolchain }} 22 | strategy: 23 | matrix: 24 | # run on stable and beta to ensure that tests won't break on the next version of the rust 25 | # toolchain 26 | toolchain: [stable, beta] 27 | steps: 28 | - uses: actions/checkout@v4 29 | with: 30 | submodules: true 31 | - name: Install ${{ matrix.toolchain }} 32 | uses: dtolnay/rust-toolchain@master 33 | with: 34 | toolchain: ${{ matrix.toolchain }} 35 | - name: cargo generate-lockfile 36 | # enable this ci template to run regardless of whether the lockfile is checked in or not 37 | if: hashFiles('Cargo.lock') == '' 38 | run: cargo generate-lockfile 39 | # https://twitter.com/jonhoo/status/1571290371124260865 40 | - name: cargo test --locked 41 | run: cargo test --locked --all-features --all-targets 42 | # https://github.com/rust-lang/cargo/issues/6669 43 | - name: cargo test --doc 44 | run: cargo test --locked --all-features --doc 45 | os-check: 46 | # run cargo test on mac and windows 47 | runs-on: ${{ matrix.os }} 48 | timeout-minutes: 15 49 | name: ${{ matrix.os }} / stable 50 | strategy: 51 | fail-fast: false 52 | matrix: 53 | os: [macos-latest, windows-latest] 54 | steps: 55 | - uses: actions/checkout@v4 56 | with: 57 | submodules: true 58 | - name: Install stable 59 | uses: dtolnay/rust-toolchain@stable 60 | - name: cargo generate-lockfile 61 | if: hashFiles('Cargo.lock') == '' 62 | run: cargo generate-lockfile 63 | - name: cargo test 64 | run: cargo test --locked --all-features --all-targets 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "seize" 3 | version = "0.5.0" 4 | edition = "2021" 5 | license = "MIT" 6 | authors = ["Ibraheem Ahmed "] 7 | description = "Fast, efficient, and predictable memory reclamation for concurrent data structures." 8 | repository = "https://github.com/ibraheemdev/seize" 9 | keywords = ["lock-free", "rcu", "atomic", "garbage", "concurrency"] 10 | categories = ["concurrency", "memory-management"] 11 | rust-version = "1.72.0" 12 | 13 | [dependencies] 14 | libc = { version = "0.2", optional = true } 15 | 16 | [target.'cfg(windows)'.dependencies] 17 | windows-sys = { version = "0.59", features = ["Win32_System_Threading"], optional = true } 18 | 19 | [features] 20 | default = ["fast-barrier"] 21 | 22 | # Enables runtime detection of fast memory barriers on Linux and Windows. 23 | fast-barrier = ["windows-sys", "libc"] 24 | 25 | [dev-dependencies] 26 | criterion = "0.3.5" 27 | crossbeam-epoch = "0.9.8" 28 | haphazard = { git = "https://github.com/jonhoo/haphazard", rev = "e0e18f60f78652a63aba235be854f87d106c1a1b" } 29 | 30 | [lints.rust] 31 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(seize_asan)'] } 32 | 33 | [[bench]] 34 | name = "stack" 35 | harness = false 36 | 37 | [[bench]] 38 | name = "single_thread" 39 | harness = false 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Ibraheem Ahmed 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `seize` 2 | 3 | [crates.io](https://crates.io/crates/seize) 4 | [github](https://github.com/ibraheemdev/seize) 5 | [docs.rs](https://docs.rs/seize) 6 | 7 | Fast, efficient, and predictable memory reclamation for concurrent data 8 | structures. 9 | 10 | Refer to the [quick-start guide] to get started. 11 | 12 | ## Background 13 | 14 | Concurrent data structures are faced with the problem of deciding when it is 15 | safe to free memory. Despite an object being logically removed, it may still be 16 | accessible by other threads that are holding references to it, and thus it is 17 | not safe to free immediately. Over the years, many algorithms have been devised 18 | to solve this problem. However, most traditional memory reclamation schemes make 19 | a tradeoff between performance and efficiency. 20 | 21 | For example, [hazard pointers] track individual pointers, making them very 22 | memory efficient but also relatively slow. On the other hand, [epoch based 23 | reclamation] is fast and lightweight, but lacks predictability, requiring 24 | periodic checks to determine when it is safe to free memory. This can cause 25 | reclamation to trigger unpredictably, leading to poor latency distributions. 26 | 27 | Alternative epoch-based schemes forgo workload balancing, relying on the thread 28 | that retires an object always being the one that frees it. While this can avoid 29 | synchronization costs, it also leads to unbalanced reclamation in read-dominated 30 | workloads; parallelism is reduced when only a fraction of threads are writing, 31 | degrading memory efficiency as well as performance. 32 | 33 | ## Implementation 34 | 35 | `seize` is based on the [hyaline reclamation scheme], which uses reference 36 | counting to determine when it is safe to free memory. However, unlike 37 | traditional reference counting schemes where every memory access requires 38 | modifying shared memory, reference counters are only used for retired objects. 39 | When a batch of objects is retired, a reference counter is initialized and 40 | propagated to all active threads. Threads cooperate to decrement the reference 41 | counter as they exit, eventually freeing the batch. Reclamation is naturally 42 | balanced as the thread with the last reference to an object is the one that 43 | frees it. This also removes the need to check whether other threads have made 44 | progress, leading to predictable latency without sacrificing performance. 45 | 46 | `seize` provides performance competitive with that of epoch based schemes, while 47 | memory efficiency is similar to that of hazard pointers. `seize` is compatible 48 | with all modern hardware that supports single-word atomic operations such as FAA 49 | and CAS. 50 | 51 | [quick-start guide]: https://docs.rs/seize/latest/seize/guide/index.html 52 | [hazard pointers]: 53 | https://www.cs.otago.ac.nz/cosc440/readings/hazard-pointers.pdf 54 | [hyaline reclamation scheme]: https://arxiv.org/pdf/1905.07903.pdf 55 | [epoch based reclamation]: 56 | https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf 57 | -------------------------------------------------------------------------------- /benches/single_thread.rs: -------------------------------------------------------------------------------- 1 | use std::hint::black_box; 2 | 3 | use criterion::{criterion_group, criterion_main, Criterion}; 4 | 5 | fn enter_leave(c: &mut Criterion) { 6 | let mut group = c.benchmark_group("enter_leave"); 7 | group.bench_function("seize", |b| { 8 | let collector = seize::Collector::new(); 9 | b.iter(|| { 10 | black_box(collector.enter()); 11 | }); 12 | }); 13 | 14 | group.bench_function("crossbeam", |b| { 15 | b.iter(|| { 16 | black_box(crossbeam_epoch::pin()); 17 | }); 18 | }); 19 | } 20 | 21 | criterion_group!(benches, enter_leave); 22 | criterion_main!(benches); 23 | -------------------------------------------------------------------------------- /benches/stack.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Barrier}; 2 | use std::thread; 3 | 4 | use criterion::{criterion_group, criterion_main, Criterion}; 5 | 6 | const THREADS: usize = 16; 7 | const ITEMS: usize = 1000; 8 | 9 | fn treiber_stack(c: &mut Criterion) { 10 | c.bench_function("trieber_stack-haphazard", |b| { 11 | b.iter(run::>) 12 | }); 13 | 14 | c.bench_function("trieber_stack-crossbeam", |b| { 15 | b.iter(run::>) 16 | }); 17 | 18 | c.bench_function("trieber_stack-seize", |b| { 19 | b.iter(run::>) 20 | }); 21 | } 22 | 23 | trait Stack { 24 | fn new() -> Self; 25 | fn push(&self, value: T); 26 | fn pop(&self) -> Option; 27 | fn is_empty(&self) -> bool; 28 | } 29 | 30 | fn run() 31 | where 32 | T: Stack + Send + Sync + 'static, 33 | { 34 | let stack = Arc::new(T::new()); 35 | let barrier = Arc::new(Barrier::new(THREADS)); 36 | 37 | let handles = (0..THREADS - 1) 38 | .map(|_| { 39 | let stack = stack.clone(); 40 | let barrier = barrier.clone(); 41 | 42 | thread::spawn(move || { 43 | barrier.wait(); 44 | for i in 0..ITEMS { 45 | stack.push(i); 46 | assert!(stack.pop().is_some()); 47 | } 48 | }) 49 | }) 50 | .collect::>(); 51 | 52 | barrier.wait(); 53 | for i in 0..ITEMS { 54 | stack.push(i); 55 | assert!(stack.pop().is_some()); 56 | } 57 | 58 | for handle in handles { 59 | handle.join().unwrap(); 60 | } 61 | 62 | assert!(stack.pop().is_none()); 63 | assert!(stack.is_empty()); 64 | } 65 | 66 | criterion_group!(benches, treiber_stack); 67 | criterion_main!(benches); 68 | 69 | mod seize_stack { 70 | use super::Stack; 71 | use seize::{reclaim, Collector, Guard}; 72 | use std::mem::ManuallyDrop; 73 | use std::ptr::{self, NonNull}; 74 | use std::sync::atomic::{AtomicPtr, Ordering}; 75 | 76 | #[derive(Debug)] 77 | pub struct TreiberStack { 78 | head: AtomicPtr>, 79 | collector: Collector, 80 | } 81 | 82 | #[derive(Debug)] 83 | struct Node { 84 | data: ManuallyDrop, 85 | next: *mut Node, 86 | } 87 | 88 | impl Stack for TreiberStack { 89 | fn new() -> TreiberStack { 90 | TreiberStack { 91 | head: AtomicPtr::new(ptr::null_mut()), 92 | collector: Collector::new().batch_size(32), 93 | } 94 | } 95 | 96 | fn push(&self, value: T) { 97 | let node = Box::into_raw(Box::new(Node { 98 | data: ManuallyDrop::new(value), 99 | next: ptr::null_mut(), 100 | })); 101 | 102 | let guard = self.collector.enter(); 103 | 104 | loop { 105 | let head = guard.protect(&self.head, Ordering::Relaxed); 106 | unsafe { (*node).next = head } 107 | 108 | if self 109 | .head 110 | .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) 111 | .is_ok() 112 | { 113 | break; 114 | } 115 | } 116 | } 117 | 118 | fn pop(&self) -> Option { 119 | let guard = self.collector.enter(); 120 | 121 | loop { 122 | let head = NonNull::new(guard.protect(&self.head, Ordering::Acquire))?.as_ptr(); 123 | 124 | let next = unsafe { (*head).next }; 125 | 126 | if self 127 | .head 128 | .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) 129 | .is_ok() 130 | { 131 | unsafe { 132 | let data = ptr::read(&(*head).data); 133 | guard.defer_retire(head, reclaim::boxed); 134 | return Some(ManuallyDrop::into_inner(data)); 135 | } 136 | } 137 | } 138 | } 139 | 140 | fn is_empty(&self) -> bool { 141 | self.head.load(Ordering::Relaxed).is_null() 142 | } 143 | } 144 | 145 | impl Drop for TreiberStack { 146 | fn drop(&mut self) { 147 | while self.pop().is_some() {} 148 | } 149 | } 150 | } 151 | 152 | mod haphazard_stack { 153 | use super::Stack; 154 | use haphazard::{Domain, HazardPointer}; 155 | use std::mem::ManuallyDrop; 156 | use std::ptr; 157 | use std::sync::atomic::{AtomicPtr, Ordering}; 158 | 159 | #[derive(Debug)] 160 | pub struct TreiberStack { 161 | head: AtomicPtr>, 162 | } 163 | 164 | #[derive(Debug)] 165 | struct Node { 166 | data: ManuallyDrop, 167 | next: *mut Node, 168 | } 169 | 170 | unsafe impl Send for Node {} 171 | unsafe impl Sync for Node {} 172 | 173 | impl Stack for TreiberStack { 174 | fn new() -> TreiberStack { 175 | TreiberStack { 176 | head: AtomicPtr::default(), 177 | } 178 | } 179 | 180 | fn push(&self, value: T) { 181 | let node = Box::into_raw(Box::new(Node { 182 | data: ManuallyDrop::new(value), 183 | next: ptr::null_mut(), 184 | })); 185 | 186 | let mut h = HazardPointer::new(); 187 | 188 | loop { 189 | let head = match h.protect_ptr(&self.head) { 190 | Some((ptr, _)) => ptr.as_ptr(), 191 | None => ptr::null_mut(), 192 | }; 193 | 194 | unsafe { (*node).next = head } 195 | 196 | if self 197 | .head 198 | .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) 199 | .is_ok() 200 | { 201 | break; 202 | } 203 | } 204 | } 205 | 206 | fn pop(&self) -> Option { 207 | let mut h = HazardPointer::new(); 208 | 209 | loop { 210 | let (head, _) = h.protect_ptr(&self.head)?; 211 | let next = unsafe { head.as_ref().next }; 212 | 213 | if self 214 | .head 215 | .compare_exchange(head.as_ptr(), next, Ordering::Relaxed, Ordering::Relaxed) 216 | .is_ok() 217 | { 218 | unsafe { 219 | let data = ptr::read(&head.as_ref().data); 220 | Domain::global().retire_ptr::<_, Box>>(head.as_ptr()); 221 | return Some(ManuallyDrop::into_inner(data)); 222 | } 223 | } 224 | } 225 | } 226 | 227 | fn is_empty(&self) -> bool { 228 | let mut h = HazardPointer::new(); 229 | unsafe { h.protect(&self.head) }.is_none() 230 | } 231 | } 232 | 233 | impl Drop for TreiberStack { 234 | fn drop(&mut self) { 235 | while self.pop().is_some() {} 236 | } 237 | } 238 | } 239 | 240 | mod crossbeam_stack { 241 | use super::Stack; 242 | use crossbeam_epoch::{Atomic, Owned, Shared}; 243 | use std::mem::ManuallyDrop; 244 | use std::ptr; 245 | use std::sync::atomic::Ordering; 246 | 247 | #[derive(Debug)] 248 | pub struct TreiberStack { 249 | head: Atomic>, 250 | } 251 | 252 | unsafe impl Send for TreiberStack {} 253 | unsafe impl Sync for TreiberStack {} 254 | 255 | #[derive(Debug)] 256 | struct Node { 257 | data: ManuallyDrop, 258 | next: *const Node, 259 | } 260 | 261 | impl Stack for TreiberStack { 262 | fn new() -> TreiberStack { 263 | TreiberStack { 264 | head: Atomic::null(), 265 | } 266 | } 267 | 268 | fn push(&self, value: T) { 269 | let guard = crossbeam_epoch::pin(); 270 | 271 | let mut node = Owned::new(Node { 272 | data: ManuallyDrop::new(value), 273 | next: ptr::null_mut(), 274 | }); 275 | 276 | loop { 277 | let head = self.head.load(Ordering::Relaxed, &guard); 278 | node.next = head.as_raw(); 279 | 280 | match self.head.compare_exchange( 281 | head, 282 | node, 283 | Ordering::Release, 284 | Ordering::Relaxed, 285 | &guard, 286 | ) { 287 | Ok(_) => break, 288 | Err(err) => node = err.new, 289 | } 290 | } 291 | } 292 | 293 | fn pop(&self) -> Option { 294 | let guard = crossbeam_epoch::pin(); 295 | 296 | loop { 297 | let head = self.head.load(Ordering::Acquire, &guard); 298 | 299 | if head.is_null() { 300 | return None; 301 | } 302 | 303 | let next = unsafe { head.deref().next }; 304 | 305 | if self 306 | .head 307 | .compare_exchange( 308 | head, 309 | Shared::from(next), 310 | Ordering::Relaxed, 311 | Ordering::Relaxed, 312 | &guard, 313 | ) 314 | .is_ok() 315 | { 316 | unsafe { 317 | let data = ptr::read(&head.deref().data); 318 | guard.defer_destroy(head); 319 | return Some(ManuallyDrop::into_inner(data)); 320 | } 321 | } 322 | } 323 | } 324 | 325 | fn is_empty(&self) -> bool { 326 | let guard = crossbeam_epoch::pin(); 327 | self.head.load(Ordering::Relaxed, &guard).is_null() 328 | } 329 | } 330 | 331 | impl Drop for TreiberStack { 332 | fn drop(&mut self) { 333 | while self.pop().is_some() {} 334 | } 335 | } 336 | } 337 | -------------------------------------------------------------------------------- /docs/GUIDE.md: -------------------------------------------------------------------------------- 1 | A quick-start guide for working with `seize`. 2 | 3 | # Introduction 4 | 5 | `seize` tries to stay out of your way as much as possible. It works with raw 6 | pointers directly instead of creating safe wrapper types that end up being a 7 | hassle to work with in practice. Below is a step-by-step guide on how to get 8 | started. We'll be writing a stack that implements concurrent `push` and `pop` 9 | operations. The details of how the stack works are not directly relevant, the 10 | guide will instead focus on how `seize` works generally. 11 | 12 | # Collectors 13 | 14 | `seize` avoids the use of global state and encourages creating a designated 15 | _collector_ per data structure. Collectors allow you to safely read and reclaim 16 | objects. For our concurrent stack, the collector will sit alongside the head 17 | node. 18 | 19 | ```rust,ignore 20 | use seize::{reclaim, Collector, Linked}; 21 | use std::mem::ManuallyDrop; 22 | use std::sync::atomic::{AtomicPtr, Ordering}; 23 | 24 | pub struct Stack { 25 | // The collector for memory reclamation. 26 | collector: Collector, 27 | 28 | // The head of the stack. 29 | head: AtomicPtr>, 30 | } 31 | 32 | struct Node { 33 | // The node's value. 34 | value: ManuallyDrop, 35 | 36 | // The next node in the stack. 37 | next: *mut Linked>, 38 | } 39 | ``` 40 | 41 | # Performing Operations 42 | 43 | Before starting an operation that involves loading objects that may be 44 | reclaimed, you must mark the thread as _active_ by calling the `enter` method. 45 | 46 | ```rust,ignore 47 | impl Stack { 48 | pub fn push(&self, value: T) { 49 | let node = Box::into:raw(Box::new(Node { 50 | next: std::ptr::null_mut(), 51 | value: ManuallyDrop::new(value), 52 | })); 53 | 54 | let guard = self.collector.enter(); // <=== 55 | 56 | // ... 57 | } 58 | } 59 | ``` 60 | 61 | # Protecting Loads 62 | 63 | `enter` returns a guard that allows you to safely load atomic pointers. Guards 64 | are the core of safe memory reclamation, letting other threads know that the 65 | current thread may be accessing shared memory. 66 | 67 | Using a guard, you cana perform a _protected_ load of an atomic pointer using 68 | the [`Guard::protect`] method. Any valid pointer that is protected is guaranteed 69 | to stay valid until the guard is dropped, or the pointer is retired by the 70 | current thread. Importantly, if another thread retires an object that you 71 | protected, the collector knows not to reclaim the object until your guard is 72 | dropped. 73 | 74 | ```rust,ignore 75 | impl Stack { 76 | pub fn push(&self, value: T) { 77 | // ... 78 | 79 | let guard = self.collector.enter(); 80 | 81 | loop { 82 | let head = guard.protect(&self.head.load, Ordering::Relaxed); // <=== 83 | unsafe { (*node).next = head; } 84 | 85 | if self 86 | .head 87 | .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) 88 | .is_ok() 89 | { 90 | break; 91 | } 92 | } 93 | 94 | drop(guard); 95 | } 96 | } 97 | ``` 98 | 99 | Notice that the lifetime of a guarded pointer is logically tied to that of the 100 | guard — when the guard is dropped the pointer is invalidated — but we work with 101 | raw pointers for convenience. Data structures that return shared references to 102 | values should ensure that the lifetime of the reference is tied to the lifetime 103 | of a guard. 104 | 105 | # Retiring Objects 106 | 107 | Objects that have been removed from a data structure can be safely _retired_ 108 | through the collector. It will be _reclaimed_, or freed, when no threads holds a 109 | reference to it any longer. 110 | 111 | ```rust,ignore 112 | impl Stack { 113 | pub fn pop(&self) -> Option { 114 | // Mark the thread as active. 115 | let guard = self.collector.enter(); 116 | 117 | loop { 118 | // Perform a protected load of the head. 119 | let head = guard.protect(&self.head.load, Ordering::Acquire); 120 | 121 | if head.is_null() { 122 | return None; 123 | } 124 | 125 | let next = unsafe { (*head).next }; 126 | 127 | // Pop the head from the stack. 128 | if self 129 | .head 130 | .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) 131 | .is_ok() 132 | { 133 | unsafe { 134 | // Read the value of the previous head. 135 | let data = ptr::read(&(*head).value); 136 | 137 | // Retire the previous head now that it has been popped. 138 | self.collector.retire(head, reclaim::boxed); // <=== 139 | 140 | // Return the value. 141 | return Some(ManuallyDrop::into_inner(data)); 142 | } 143 | } 144 | } 145 | } 146 | } 147 | ``` 148 | 149 | There are a couple important things to note about retiring an object. 150 | 151 | ### 1. Retired objects must be logically removed 152 | 153 | An object can only be retired if it is _no longer accessible_ to any thread that 154 | comes after. In the above code example this was ensured by swapping out the node 155 | before retiring it. Threads that loaded a value _before_ it was retired are 156 | safe, but threads that come after are not. 157 | 158 | Note that concurrent stacks typically suffer from the [ABA problem]. Using 159 | `retire` after popping a node ensures that the node is only freed _after_ all 160 | active threads that could have loaded it exit, avoiding any potential ABA. 161 | 162 | ### 2. Retired objects cannot be accessed by the current thread 163 | 164 | A guard does not protect objects retired by the current thread. If no other 165 | thread holds a reference to an object, it may be reclaimed _immediately_. This 166 | makes the following code unsound. 167 | 168 | ```rust,ignore 169 | let ptr = guard.protect(&node, Ordering::Acquire); 170 | collector.retire(ptr, reclaim::boxed); 171 | 172 | // **Unsound**, the pointer has been retired. 173 | println!("{}", (*ptr).value); 174 | ``` 175 | 176 | Retirement can be delayed until the guard is dropped by calling [`defer_retire`] 177 | on the guard, instead of on the collector directly. 178 | 179 | ```rust,ignore 180 | let ptr = guard.protect(&node, Ordering::Acquire); 181 | guard.defer_retire(ptr, reclaim::boxed); 182 | 183 | // This read is fine. 184 | println!("{}", (*ptr).value); 185 | // However, once the guard is dropped, the pointer is invalidated. 186 | drop(guard); 187 | ``` 188 | 189 | ### 3. Custom Reclaimers 190 | 191 | You probably noticed that `retire` takes a function as a second parameter. This 192 | function is known as a _reclaimer_, and is run when the collector decides it is 193 | safe to free the retired object. Typically you will pass in a function from the 194 | [`seize::reclaim`] module. For example, values allocated with `Box` can use 195 | [`reclaim::boxed`], as we used in our stack. 196 | 197 | ```rust,ignore 198 | use seize::reclaim; 199 | 200 | impl Stack { 201 | pub fn pop(&self) -> Option { 202 | // ... 203 | self.collector.retire(head, reclaim::boxed); 204 | // ... 205 | } 206 | } 207 | ``` 208 | 209 | If you need to run custom reclamation code, you can write a custom reclaimer. 210 | 211 | ```rust,ignore 212 | collector.retire(value, |value: *mut Node, _collector: &Collector| unsafe { 213 | // Safety: The value was allocated with `Box::new`. 214 | let value = Box::from_raw(ptr); 215 | println!("Dropping {value}"); 216 | drop(value); 217 | }); 218 | ``` 219 | 220 | Note that the reclaimer receives a reference to the collector as its second 221 | argument, allowing for recursive reclamation. 222 | 223 | [`defer_retire`]: 224 | https://docs.rs/seize/latest/seize/trait.Guard.html#tymethod.defer_retire 225 | [`Guard::protect`]: 226 | https://docs.rs/seize/latest/seize/trait.Guard.html#tymethod.protect 227 | [`seize::reclaim`]: https://docs.rs/seize/latest/seize/reclaim/index.html 228 | [`reclaim::boxed`]: https://docs.rs/seize/latest/seize/reclaim/fn.boxed.html 229 | [ABA problem]: https://en.wikipedia.org/wiki/ABA_problem 230 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | wrap_comments = true 2 | comment_width = 80 3 | -------------------------------------------------------------------------------- /src/collector.rs: -------------------------------------------------------------------------------- 1 | use crate::raw::{self, membarrier, Thread}; 2 | use crate::{LocalGuard, OwnedGuard}; 3 | 4 | use std::fmt; 5 | use std::sync::OnceLock; 6 | 7 | /// A concurrent garbage collector. 8 | /// 9 | /// A `Collector` manages the access and retirement of concurrent objects 10 | /// Objects can be safely loaded through *guards*, which can be created using 11 | /// the [`enter`](Collector::enter) or [`enter_owned`](Collector::enter_owned) 12 | /// methods. 13 | /// 14 | /// Every instance of a concurrent data structure should typically own its 15 | /// `Collector`. This allows the garbage collection of non-`'static` values, as 16 | /// memory reclamation is guaranteed to run when the `Collector` is dropped. 17 | #[repr(transparent)] 18 | pub struct Collector { 19 | /// The underlying raw collector instance. 20 | pub(crate) raw: raw::Collector, 21 | } 22 | 23 | impl Default for Collector { 24 | fn default() -> Self { 25 | Self::new() 26 | } 27 | } 28 | 29 | impl Collector { 30 | /// The default batch size for a new collector. 31 | const DEFAULT_BATCH_SIZE: usize = 32; 32 | 33 | /// Creates a new collector. 34 | pub fn new() -> Self { 35 | // Initialize the `membarrier` module, detecting the presence of 36 | // operating-system strong barrier APIs. 37 | membarrier::detect(); 38 | 39 | // available_parallelism is quite slow (microseconds). 40 | static CPUS: OnceLock = OnceLock::new(); 41 | let cpus = *CPUS.get_or_init(|| { 42 | std::thread::available_parallelism() 43 | .map(Into::into) 44 | .unwrap_or(1) 45 | }); 46 | 47 | // Ensure every batch accumulates at least as many entries 48 | // as there are threads on the system. 49 | let batch_size = cpus.max(Self::DEFAULT_BATCH_SIZE); 50 | 51 | Self { 52 | raw: raw::Collector::new(cpus, batch_size), 53 | } 54 | } 55 | 56 | /// Sets the number of objects that must be in a batch before reclamation is 57 | /// attempted. 58 | /// 59 | /// Retired objects are added to thread-local *batches* before starting the 60 | /// reclamation process. After `batch_size` is hit, the objects are moved to 61 | /// separate *retirement lists*, where reference counting kicks in and 62 | /// batches are eventually reclaimed. 63 | /// 64 | /// A larger batch size amortizes the cost of retirement. However, 65 | /// reclamation latency can also grow due to the large number of objects 66 | /// needed to be freed. Note that reclamation can not be attempted 67 | /// unless the batch contains at least as many objects as the number of 68 | /// active threads. 69 | /// 70 | /// The default batch size is `32`. 71 | pub fn batch_size(mut self, batch_size: usize) -> Self { 72 | self.raw.batch_size = batch_size; 73 | self 74 | } 75 | 76 | /// Marks the current thread as active, returning a guard that protects 77 | /// loads of concurrent objects for its lifetime. The thread will be 78 | /// marked as inactive when the guard is dropped. 79 | /// 80 | /// Note that loads of objects that may be retired must be protected with 81 | /// the [`Guard::protect`]. See [the 82 | /// guide](crate::guide#starting-operations) for an introduction to 83 | /// using guards, or the documentation of [`LocalGuard`] for 84 | /// more details. 85 | /// 86 | /// Note that `enter` is reentrant, and it is legal to create multiple 87 | /// guards on the same thread. The thread will stay marked as active 88 | /// until the last guard is dropped. 89 | /// 90 | /// [`Guard::protect`]: crate::Guard::protect 91 | /// 92 | /// # Performance 93 | /// 94 | /// Performance-wise, creating and destroying a `LocalGuard` is about the 95 | /// same as locking and unlocking an uncontended `Mutex`. Because of 96 | /// this, guards should be reused across multiple operations if 97 | /// possible. However, holding a guard prevents the reclamation of any 98 | /// concurrent objects retired during its lifetime, so there is 99 | /// a tradeoff between performance and memory usage. 100 | /// 101 | /// # Examples 102 | /// 103 | /// ```rust 104 | /// # use std::sync::atomic::{AtomicPtr, Ordering}; 105 | /// use seize::Guard; 106 | /// # let collector = seize::Collector::new(); 107 | /// 108 | /// // An atomic object. 109 | /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize))); 110 | /// 111 | /// { 112 | /// // Create a guard that is active for this scope. 113 | /// let guard = collector.enter(); 114 | /// 115 | /// // Read the object using a protected load. 116 | /// let value = guard.protect(&ptr, Ordering::Acquire); 117 | /// unsafe { assert_eq!(*value, 1) } 118 | /// 119 | /// // If there are other thread that may retire the object, 120 | /// // the pointer is no longer valid after the guard is dropped. 121 | /// drop(guard); 122 | /// } 123 | /// # unsafe { drop(Box::from_raw(ptr.load(Ordering::Relaxed))) }; 124 | /// ``` 125 | #[inline] 126 | pub fn enter(&self) -> LocalGuard<'_> { 127 | LocalGuard::enter(self) 128 | } 129 | 130 | /// Create an owned guard that protects objects for its lifetime. 131 | /// 132 | /// Unlike local guards created with [`enter`](Collector::enter), owned 133 | /// guards are independent of the current thread, allowing them to 134 | /// implement `Send` and `Sync`. See the documentation of [`OwnedGuard`] 135 | /// for more details. 136 | #[inline] 137 | pub fn enter_owned(&self) -> OwnedGuard<'_> { 138 | OwnedGuard::enter(self) 139 | } 140 | 141 | /// Retires a value, running `reclaim` when no threads hold a reference to 142 | /// it. 143 | /// 144 | /// Note that this method is disconnected from any guards on the current 145 | /// thread, so the pointer may be reclaimed immediately. Use 146 | /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer may 147 | /// still be accessed by the current thread while the guard is active. 148 | /// 149 | /// # Safety 150 | /// 151 | /// The retired pointer must no longer be accessible to any thread that 152 | /// enters after it is removed. It also cannot be accessed by the 153 | /// current thread after `retire` is called. 154 | /// 155 | /// Additionally, the pointer must be valid to pass to the provided 156 | /// reclaimer, once it is safe to reclaim. 157 | /// 158 | /// # Examples 159 | /// 160 | /// Common reclaimers are provided by the [`reclaim`](crate::reclaim) 161 | /// module. 162 | /// 163 | /// ``` 164 | /// # use std::sync::atomic::{AtomicPtr, Ordering}; 165 | /// # let collector = seize::Collector::new(); 166 | /// use seize::reclaim; 167 | /// 168 | /// // An atomic object. 169 | /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize))); 170 | /// 171 | /// // Create a guard. 172 | /// let guard = collector.enter(); 173 | /// 174 | /// // Store a new value. 175 | /// let old = ptr.swap(Box::into_raw(Box::new(2_usize)), Ordering::Release); 176 | /// 177 | /// // Reclaim the old value. 178 | /// // 179 | /// // Safety: The `swap` above made the old value unreachable for any new threads. 180 | /// // Additionally, the old value was allocated with a `Box`, so `reclaim::boxed` 181 | /// // is valid. 182 | /// unsafe { collector.retire(old, reclaim::boxed) }; 183 | /// # unsafe { collector.retire(ptr.load(Ordering::Relaxed), reclaim::boxed) }; 184 | /// ``` 185 | /// 186 | /// Alternative, a custom reclaimer function can be used. 187 | /// 188 | /// ``` 189 | /// use seize::Collector; 190 | /// 191 | /// let collector = Collector::new(); 192 | /// 193 | /// // Allocate a value and immediately retire it. 194 | /// let value: *mut usize = Box::into_raw(Box::new(1_usize)); 195 | /// 196 | /// // Safety: The value was never shared. 197 | /// unsafe { 198 | /// collector.retire(value, |ptr: *mut usize, _collector: &Collector| unsafe { 199 | /// // Safety: The value was allocated with `Box::new`. 200 | /// let value = Box::from_raw(ptr); 201 | /// println!("Dropping {value}"); 202 | /// drop(value); 203 | /// }); 204 | /// } 205 | /// ``` 206 | #[inline] 207 | pub unsafe fn retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { 208 | debug_assert!(!ptr.is_null(), "attempted to retire a null pointer"); 209 | 210 | // Note that `add` doesn't ever actually reclaim the pointer immediately if 211 | // the current thread is active. Instead, it adds it to the current thread's 212 | // reclamation list, but we don't guarantee that publicly. 213 | unsafe { self.raw.add(ptr, reclaim, Thread::current()) } 214 | } 215 | 216 | /// Reclaim any values that have been retired. 217 | /// 218 | /// This method reclaims any objects that have been retired across *all* 219 | /// threads. After calling this method, any values that were previous 220 | /// retired, or retired recursively on the current thread during this 221 | /// call, will have been reclaimed. 222 | /// 223 | /// # Safety 224 | /// 225 | /// This function is **extremely unsafe** to call. It is only sound when no 226 | /// threads are currently active, whether accessing values that have 227 | /// been retired or accessing the collector through any type of guard. 228 | /// This is akin to having a unique reference to the collector. However, 229 | /// this method takes a shared reference, as reclaimers to 230 | /// be run by this thread are allowed to access the collector recursively. 231 | /// 232 | /// # Notes 233 | /// 234 | /// Note that if reclaimers initialize guards across threads, or initialize 235 | /// owned guards, objects retired through those guards may not be 236 | /// reclaimed. 237 | pub unsafe fn reclaim_all(&self) { 238 | unsafe { self.raw.reclaim_all() }; 239 | } 240 | 241 | // Create a reference to `Collector` from an underlying `raw::Collector`. 242 | pub(crate) fn from_raw(raw: &raw::Collector) -> &Collector { 243 | unsafe { &*(raw as *const raw::Collector as *const Collector) } 244 | } 245 | } 246 | 247 | impl Eq for Collector {} 248 | 249 | impl PartialEq for Collector { 250 | /// Checks if both references point to the same collector. 251 | #[inline] 252 | fn eq(&self, other: &Self) -> bool { 253 | self.raw.id == other.raw.id 254 | } 255 | } 256 | 257 | impl fmt::Debug for Collector { 258 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 259 | f.debug_struct("Collector") 260 | .field("batch_size", &self.raw.batch_size) 261 | .finish() 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /src/guard.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::marker::PhantomData; 3 | use std::sync::atomic::{AtomicPtr, Ordering}; 4 | 5 | use crate::raw::{self, Reservation, Thread}; 6 | use crate::Collector; 7 | 8 | /// A guard that enables protected loads of concurrent objects. 9 | /// 10 | /// This trait provides common functionality implemented by [`LocalGuard`] and 11 | /// [`OwnedGuard`]. See [the guide](crate::guide#starting-operations) for an 12 | /// introduction to using guards. 13 | pub trait Guard { 14 | /// Refreshes the guard. 15 | /// 16 | /// Calling this method is similar to dropping and immediately creating a 17 | /// new guard. The current thread remains active, but any pointers that 18 | /// were previously protected may be reclaimed. 19 | /// 20 | /// # Safety 21 | /// 22 | /// This method is not marked as `unsafe`, but will affect the validity of 23 | /// pointers loaded using [`Guard::protect`], similar to dropping a guard. 24 | /// It is intended to be used safely by users of concurrent data structures, 25 | /// as references will be tied to the guard and this method takes `&mut 26 | /// self`. 27 | fn refresh(&mut self); 28 | 29 | /// Flush any retired values in the local batch. 30 | /// 31 | /// This method flushes any values from the current thread's local batch, 32 | /// starting the reclamation process. Note that no memory can be 33 | /// reclaimed while this guard is active, but calling `flush` may allow 34 | /// memory to be reclaimed more quickly after the guard is dropped. 35 | /// 36 | /// Note that the batch must contain at least as many objects as the number 37 | /// of currently active threads for a flush to be performed. See 38 | /// [`Collector::batch_size`] for details about batch sizes. 39 | fn flush(&self); 40 | 41 | /// Returns the collector this guard was created from. 42 | fn collector(&self) -> &Collector; 43 | 44 | /// Returns a numeric identifier for the current thread. 45 | /// 46 | /// Guards rely on thread-local state, including thread IDs. This method is 47 | /// a cheap way to get an identifier for the current thread without TLS 48 | /// overhead. Note that thread IDs may be reused, so the value returned 49 | /// is only unique for the lifetime of this thread. 50 | fn thread_id(&self) -> usize; 51 | 52 | /// Protects the load of an atomic pointer. 53 | /// 54 | /// Any valid pointer loaded through a guard using the `protect` method is 55 | /// guaranteed to stay valid until the guard is dropped, or the object 56 | /// is retired by the current thread. Importantly, if another thread 57 | /// retires this object, it will not be reclaimed for the lifetime of 58 | /// this guard. 59 | /// 60 | /// Note that the lifetime of a guarded pointer is logically tied to that of 61 | /// the guard — when the guard is dropped the pointer is invalidated. Data 62 | /// structures that return shared references to values should ensure that 63 | /// the lifetime of the reference is tied to the lifetime of a guard. 64 | fn protect(&self, ptr: &AtomicPtr, order: Ordering) -> *mut T { 65 | ptr.load(raw::Collector::protect(order)) 66 | } 67 | 68 | /// Stores a value into the pointer, returning the protected previous value. 69 | /// 70 | /// This method is equivalent to [`AtomicPtr::swap`], except the returned 71 | /// value is guaranteed to be protected with the same guarantees as 72 | /// [`Guard::protect`]. 73 | fn swap(&self, ptr: &AtomicPtr, value: *mut T, order: Ordering) -> *mut T { 74 | ptr.swap(value, raw::Collector::protect(order)) 75 | } 76 | 77 | /// Stores a value into the pointer if the current value is the same as the 78 | /// `current` value, returning the protected previous value. 79 | /// 80 | /// This method is equivalent to [`AtomicPtr::compare_exchange`], except the 81 | /// returned value is guaranteed to be protected with the same 82 | /// guarantees as [`Guard::protect`]. 83 | fn compare_exchange( 84 | &self, 85 | ptr: &AtomicPtr, 86 | current: *mut T, 87 | new: *mut T, 88 | success: Ordering, 89 | failure: Ordering, 90 | ) -> Result<*mut T, *mut T> { 91 | ptr.compare_exchange( 92 | current, 93 | new, 94 | raw::Collector::protect(success), 95 | raw::Collector::protect(failure), 96 | ) 97 | } 98 | 99 | /// Stores a value into the pointer if the current value is the same as the 100 | /// `current` value, returning the protected previous value. 101 | /// 102 | /// This method is equivalent to [`AtomicPtr::compare_exchange_weak`], 103 | /// except the returned value is guaranteed to be protected with the 104 | /// same guarantees as [`Guard::protect`]. 105 | fn compare_exchange_weak( 106 | &self, 107 | ptr: &AtomicPtr, 108 | current: *mut T, 109 | new: *mut T, 110 | success: Ordering, 111 | failure: Ordering, 112 | ) -> Result<*mut T, *mut T> { 113 | ptr.compare_exchange_weak( 114 | current, 115 | new, 116 | raw::Collector::protect(success), 117 | raw::Collector::protect(failure), 118 | ) 119 | } 120 | 121 | /// Retires a value, running `reclaim` when no threads hold a reference to 122 | /// it. 123 | /// 124 | /// This method delays reclamation until the guard is dropped, as opposed to 125 | /// [`Collector::retire`], which may reclaim objects immediately. 126 | /// 127 | /// 128 | /// # Safety 129 | /// 130 | /// The retired pointer must no longer be accessible to any thread that 131 | /// enters after it is removed. Additionally, the pointer must be valid 132 | /// to pass to the provided reclaimer, once it is safe to reclaim. 133 | unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)); 134 | } 135 | 136 | /// A guard that keeps the current thread marked as active. 137 | /// 138 | /// Local guards are created by calling [`Collector::enter`]. Unlike 139 | /// [`OwnedGuard`], a local guard is tied to the current thread and does not 140 | /// implement `Send`. This makes local guards relatively cheap to create and 141 | /// destroy. 142 | /// 143 | /// Most of the functionality provided by this type is through the [`Guard`] 144 | /// trait. 145 | pub struct LocalGuard<'a> { 146 | /// The collector that this guard is associated with. 147 | collector: &'a Collector, 148 | 149 | // The current thread. 150 | thread: Thread, 151 | 152 | // The reservation for the current thread. 153 | reservation: *const Reservation, 154 | 155 | // `LocalGuard` not be `Send or Sync` as we are tied to the state of the 156 | // current thread in the collector. 157 | _unsend: PhantomData<*mut ()>, 158 | } 159 | 160 | impl LocalGuard<'_> { 161 | #[inline] 162 | pub(crate) fn enter(collector: &Collector) -> LocalGuard<'_> { 163 | let thread = Thread::current(); 164 | 165 | // Safety: `thread` is the current thread. 166 | let reservation = unsafe { collector.raw.reservation(thread) }; 167 | 168 | // Calls to `enter` may be reentrant, so we need to keep track of the number of 169 | // active guards for the current thread. 170 | let guards = reservation.guards.get(); 171 | reservation.guards.set(guards + 1); 172 | 173 | if guards == 0 { 174 | // Safety: Only called on the current thread, which is currently inactive. 175 | unsafe { collector.raw.enter(reservation) }; 176 | } 177 | 178 | LocalGuard { 179 | thread, 180 | reservation, 181 | collector, 182 | _unsend: PhantomData, 183 | } 184 | } 185 | } 186 | 187 | impl Guard for LocalGuard<'_> { 188 | /// Refreshes the guard. 189 | #[inline] 190 | fn refresh(&mut self) { 191 | // Safety: `self.reservation` is owned by the current thread. 192 | let reservation = unsafe { &*self.reservation }; 193 | let guards = reservation.guards.get(); 194 | 195 | if guards == 1 { 196 | // Safety: We have a unique reference to the last active guard. 197 | unsafe { self.collector.raw.refresh(reservation) } 198 | } 199 | } 200 | 201 | /// Flush any retired values in the local batch. 202 | #[inline] 203 | fn flush(&self) { 204 | // Note that this does not actually retire any values, it just attempts to add 205 | // the batch to any active reservations lists, including ours. 206 | // 207 | // Safety: `self.thread` is the current thread. 208 | unsafe { self.collector.raw.try_retire_batch(self.thread) } 209 | } 210 | 211 | /// Returns the collector this guard was created from. 212 | #[inline] 213 | fn collector(&self) -> &Collector { 214 | self.collector 215 | } 216 | 217 | /// Returns a numeric identifier for the current thread. 218 | #[inline] 219 | fn thread_id(&self) -> usize { 220 | self.thread.id 221 | } 222 | 223 | /// Retires a value, running `reclaim` when no threads hold a reference to 224 | /// it. 225 | #[inline] 226 | unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { 227 | // Safety: 228 | // - `self.thread` is the current thread. 229 | // - The validity of the pointer is guaranteed by the caller. 230 | unsafe { self.collector.raw.add(ptr, reclaim, self.thread) } 231 | } 232 | } 233 | 234 | impl Drop for LocalGuard<'_> { 235 | #[inline] 236 | fn drop(&mut self) { 237 | // Safety: `self.reservation` is owned by the current thread. 238 | let reservation = unsafe { &*self.reservation }; 239 | 240 | // Decrement the active guard count. 241 | let guards = reservation.guards.get(); 242 | reservation.guards.set(guards - 1); 243 | 244 | if guards == 1 { 245 | // Safety: We have a unique reference to the last active guard. 246 | unsafe { self.collector.raw.leave(reservation) }; 247 | } 248 | } 249 | } 250 | 251 | impl fmt::Debug for LocalGuard<'_> { 252 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 253 | f.debug_tuple("LocalGuard").finish() 254 | } 255 | } 256 | 257 | /// A guard that protects objects for it's lifetime, independent of the current 258 | /// thread. 259 | /// 260 | /// Unlike [`LocalGuard`], an owned guard is independent of the current thread, 261 | /// allowing it to implement `Send` and `Sync`. This is useful for holding 262 | /// guards across `.await` points in work-stealing schedulers, where execution 263 | /// may be resumed on a different thread than started on. However, owned guards 264 | /// are more expensive to create and destroy, so should be avoided if 265 | /// cross-thread usage is not required. 266 | /// 267 | /// Most of the functionality provided by this type is through the [`Guard`] 268 | /// trait. 269 | pub struct OwnedGuard<'a> { 270 | /// The collector that this guard is associated with. 271 | collector: &'a Collector, 272 | 273 | // An owned thread, unique to this guard. 274 | thread: Thread, 275 | 276 | // The reservation for this guard. 277 | reservation: *const Reservation, 278 | } 279 | 280 | // Safety: All shared methods on `OwnedGuard` that access shared memory are 281 | // synchronized with locks. 282 | unsafe impl Sync for OwnedGuard<'_> {} 283 | 284 | // Safety: `OwnedGuard` owns its thread slot and is not tied to any 285 | // thread-locals. 286 | unsafe impl Send for OwnedGuard<'_> {} 287 | 288 | impl OwnedGuard<'_> { 289 | #[inline] 290 | pub(crate) fn enter(collector: &Collector) -> OwnedGuard<'_> { 291 | // Create a thread slot that will last for the lifetime of this guard. 292 | let thread = Thread::create(); 293 | 294 | // Safety: We have ownership of `thread` and have not shared it. 295 | let reservation = unsafe { collector.raw.reservation(thread) }; 296 | 297 | // Safety: We have ownership of `reservation`. 298 | unsafe { collector.raw.enter(reservation) }; 299 | 300 | OwnedGuard { 301 | collector, 302 | thread, 303 | reservation, 304 | } 305 | } 306 | } 307 | 308 | impl Guard for OwnedGuard<'_> { 309 | /// Refreshes the guard. 310 | #[inline] 311 | fn refresh(&mut self) { 312 | // Safety: `self.reservation` is owned by the current thread. 313 | let reservation = unsafe { &*self.reservation }; 314 | unsafe { self.collector.raw.refresh(reservation) } 315 | } 316 | 317 | /// Flush any retired values in the local batch. 318 | #[inline] 319 | fn flush(&self) { 320 | // Safety: `self.reservation` is owned by the current thread. 321 | let reservation = unsafe { &*self.reservation }; 322 | let _lock = reservation.lock.lock().unwrap(); 323 | // Note that this does not actually retire any values, it just attempts to add 324 | // the batch to any active reservations lists, including ours. 325 | // 326 | // Safety: We hold the lock and so have unique access to the batch. 327 | unsafe { self.collector.raw.try_retire_batch(self.thread) } 328 | } 329 | 330 | /// Returns the collector this guard was created from. 331 | #[inline] 332 | fn collector(&self) -> &Collector { 333 | self.collector 334 | } 335 | 336 | /// Returns a numeric identifier for the current thread. 337 | #[inline] 338 | fn thread_id(&self) -> usize { 339 | // We can't return the ID of our thread slot because `OwnedGuard` is `Send` so 340 | // the ID is not uniquely tied to the current thread. We also can't 341 | // return the OS thread ID because it might conflict with our thread 342 | // IDs, so we have to get/create the current thread. 343 | Thread::current().id 344 | } 345 | 346 | /// Retires a value, running `reclaim` when no threads hold a reference to 347 | /// it. 348 | #[inline] 349 | unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { 350 | // Safety: `self.reservation` is owned by the current thread. 351 | let reservation = unsafe { &*self.reservation }; 352 | let _lock = reservation.lock.lock().unwrap(); 353 | 354 | // Safety: 355 | // - We hold the lock and so have unique access to the batch. 356 | // - The validity of the pointer is guaranteed by the caller. 357 | unsafe { self.collector.raw.add(ptr, reclaim, self.thread) } 358 | } 359 | } 360 | 361 | impl Drop for OwnedGuard<'_> { 362 | #[inline] 363 | fn drop(&mut self) { 364 | // Safety: `self.reservation` is owned by the current thread. 365 | let reservation = unsafe { &*self.reservation }; 366 | 367 | // Safety: `self.thread` is an owned thread. 368 | unsafe { self.collector.raw.leave(reservation) }; 369 | 370 | // Safety: We are in `drop` and never share `self.thread`. 371 | unsafe { Thread::free(self.thread.id) }; 372 | } 373 | } 374 | -------------------------------------------------------------------------------- /src/guide.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../docs/GUIDE.md")] 2 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::missing_transmute_annotations)] 2 | #![deny(unsafe_op_in_unsafe_fn)] 3 | #![doc = include_str!("../README.md")] 4 | 5 | mod collector; 6 | mod guard; 7 | mod raw; 8 | 9 | pub mod guide; 10 | pub mod reclaim; 11 | 12 | pub use collector::Collector; 13 | pub use guard::{Guard, LocalGuard, OwnedGuard}; 14 | -------------------------------------------------------------------------------- /src/raw/collector.rs: -------------------------------------------------------------------------------- 1 | use super::membarrier; 2 | use super::tls::{Thread, ThreadLocal}; 3 | use super::utils::CachePadded; 4 | 5 | use std::cell::{Cell, UnsafeCell}; 6 | use std::ptr; 7 | use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; 8 | use std::sync::Mutex; 9 | 10 | /// Fast and efficient concurrent memory reclamation. 11 | /// 12 | /// The core memory reclamation algorithm used by seize is described 13 | /// [in this paper](https://arxiv.org/pdf/2108.02763.pdf). Specifically, 14 | /// this module implements the Hyaline-1 variant of the algorithm. 15 | pub struct Collector { 16 | /// Per-thread batches of retired nodes. 17 | /// 18 | /// Retired values are added to thread-local batches before starting the 19 | /// reclamation process, amortizing the cost of retirement. 20 | batches: ThreadLocal>>, 21 | 22 | /// Per-thread reservations lists. 23 | /// 24 | /// A reservation list is a list of batches that were retired while the 25 | /// current thread was active. The thread must decrement the reference 26 | /// count and potentially free the batch of any reservations before 27 | /// exiting. 28 | reservations: ThreadLocal>, 29 | 30 | /// A unique identifier for a collector. 31 | pub(crate) id: usize, 32 | 33 | /// The minimum number of nodes required in a batch before attempting 34 | /// retirement. 35 | pub(crate) batch_size: usize, 36 | } 37 | 38 | impl Collector { 39 | /// Create a collector with the provided batch size and initial thread 40 | /// count. 41 | pub fn new(threads: usize, batch_size: usize) -> Self { 42 | // A counter for collector IDs. 43 | static ID: AtomicUsize = AtomicUsize::new(0); 44 | 45 | Self { 46 | id: ID.fetch_add(1, Ordering::Relaxed), 47 | reservations: ThreadLocal::with_capacity(threads), 48 | batches: ThreadLocal::with_capacity(threads), 49 | batch_size: batch_size.next_power_of_two(), 50 | } 51 | } 52 | 53 | /// Return the reservation for the given thread. 54 | /// 55 | /// # Safety 56 | /// 57 | /// The current thread must have unique access to the reservation for the 58 | /// provided `thread`. 59 | #[inline] 60 | pub unsafe fn reservation(&self, thread: Thread) -> &Reservation { 61 | // Safety: Guaranteed by caller. 62 | unsafe { self.reservations.load(thread) } 63 | } 64 | 65 | /// Mark the current thread as active. 66 | /// 67 | /// `enter` and `leave` calls maintain a local reference count to allow 68 | /// reentrancy. If the current thread is already marked as active, this 69 | /// method simply increases the reference count. 70 | /// 71 | /// # Safety 72 | /// 73 | /// This method is not safe to call concurrently on the same thread, and 74 | /// must only be called if the current thread is inactive. 75 | #[inline] 76 | pub unsafe fn enter(&self, reservation: &Reservation) { 77 | // Mark the current thread as active. 78 | reservation 79 | .head 80 | .store(ptr::null_mut(), membarrier::light_store()); 81 | 82 | // This barrier, combined with the light store above, synchronizes with the 83 | // heavy barrier in `retire`: 84 | // - If our store comes first, the thread retiring will see that we are active. 85 | // - If the fence comes first, we will see the new values of any objects being 86 | // retired by that thread 87 | // 88 | // Note that all pointer loads perform a light barrier to participate in the 89 | // total order. 90 | membarrier::light_barrier(); 91 | } 92 | 93 | /// Strengthens an ordering to that necessary to protect the load of a 94 | /// pointer. 95 | #[inline] 96 | pub fn protect(_order: Ordering) -> Ordering { 97 | // We have to respect both the user provided ordering and the ordering required 98 | // by the membarrier strategy. `SeqCst` is equivalent to `Acquire` on 99 | // most platforms, so we just use it unconditionally. 100 | // 101 | // Loads performed with this ordering, paired with the light barrier in `enter`, 102 | // will participate in the total order established by `enter`, and thus see the 103 | // new values of any pointers that were retired when the thread was inactive. 104 | Ordering::SeqCst 105 | } 106 | 107 | /// Mark the current thread as inactive. 108 | /// 109 | /// # Safety 110 | /// 111 | /// Any previously protected pointers may be invalidated after calling 112 | /// `leave`. Additionally, this method is not safe to call concurrently 113 | /// with the same reservation. 114 | #[inline] 115 | pub unsafe fn leave(&self, reservation: &Reservation) { 116 | // Release: Exit the critical section, ensuring that any pointer accesses 117 | // happen-before we are marked as inactive. 118 | let head = reservation.head.swap(Entry::INACTIVE, Ordering::Release); 119 | 120 | if head != Entry::INACTIVE { 121 | // Acquire any new entries in the reservation list, as well as the new values of 122 | // any objects that were retired while we were active. 123 | atomic::fence(Ordering::Acquire); 124 | 125 | // Decrement the reference counts of any batches that were retired. 126 | unsafe { self.traverse(head) } 127 | } 128 | } 129 | 130 | /// Clear the reservation list, keeping the thread marked as active. 131 | /// 132 | /// # Safety 133 | /// 134 | /// Any previously protected pointers may be invalidated after calling 135 | /// `leave`. Additionally, this method is not safe to call concurrently 136 | /// with the same reservation. 137 | #[inline] 138 | pub unsafe fn refresh(&self, reservation: &Reservation) { 139 | // SeqCst: Establish the ordering of a combined call to `leave` and `enter`. 140 | let head = reservation.head.swap(ptr::null_mut(), Ordering::SeqCst); 141 | 142 | if head != Entry::INACTIVE { 143 | // Decrement the reference counts of any batches that were retired. 144 | unsafe { self.traverse(head) } 145 | } 146 | } 147 | 148 | /// Add a node to the retirement batch, retiring the batch if `batch_size` 149 | /// nodes are reached. 150 | /// 151 | /// # Safety 152 | /// 153 | /// The given pointer must no longer be accessible to any thread that enters 154 | /// after it is removed. It also cannot be accessed by the current 155 | /// thread after `add` is called. 156 | /// 157 | /// The pointer also be valid to pass to the provided reclaimer once it is 158 | /// safe to reclaim. 159 | /// 160 | /// Additionally, current thread must have unique access to the batch for 161 | /// the provided `thread`. 162 | #[inline] 163 | pub unsafe fn add( 164 | &self, 165 | ptr: *mut T, 166 | reclaim: unsafe fn(*mut T, &crate::Collector), 167 | thread: Thread, 168 | ) { 169 | // Safety: The caller guarantees we have unique access to the batch. 170 | let local_batch = unsafe { self.batches.load(thread).get() }; 171 | 172 | // Safety: The caller guarantees we have unique access to the batch. 173 | let batch = unsafe { (*local_batch).get_or_init(self.batch_size) }; 174 | 175 | // If we are in a recursive call during `drop` or `reclaim_all`, reclaim the 176 | // object immediately. 177 | if batch == LocalBatch::DROP { 178 | // Safety: `LocalBatch::DROP` means we have unique access to the collector. 179 | // Additionally, the caller guarantees that the pointer is valid for the 180 | // provided reclaimer. 181 | unsafe { reclaim(ptr, crate::Collector::from_raw(self)) } 182 | return; 183 | } 184 | 185 | // Safety: `fn(*mut T) and fn(*mut U)` are ABI compatible if `T, U: Sized`. 186 | let reclaim: unsafe fn(*mut (), &crate::Collector) = 187 | unsafe { std::mem::transmute(reclaim) }; 188 | 189 | // Safety: The caller guarantees we have unique access to the batch. 190 | let len = unsafe { 191 | // Create an entry for this node. 192 | (*batch).entries.push(Entry { 193 | batch, 194 | reclaim, 195 | ptr: ptr.cast::<()>(), 196 | state: EntryState { 197 | head: ptr::null_mut(), 198 | }, 199 | }); 200 | 201 | (*batch).entries.len() 202 | }; 203 | 204 | // Attempt to retire the batch if we have enough entries. 205 | if len >= self.batch_size { 206 | // Safety: The caller guarantees that we have unique access to the batch, and we 207 | // are not holding on to any mutable references. 208 | unsafe { self.try_retire(local_batch) } 209 | } 210 | } 211 | 212 | /// Attempt to retire objects in the current thread's batch. 213 | /// 214 | /// # Safety 215 | /// 216 | /// The current thread must have unique access to the batch for the given 217 | /// `thread`. 218 | #[inline] 219 | pub unsafe fn try_retire_batch(&self, thread: Thread) { 220 | // Safety: Guaranteed by caller. 221 | unsafe { self.try_retire(self.batches.load(thread).get()) } 222 | } 223 | 224 | /// Attempt to retire objects in this batch. 225 | /// 226 | /// Note that if a guard on the current thread is active, the batch will 227 | /// also be added to the current reservation list for deferred 228 | /// reclamation. 229 | /// 230 | /// # Safety 231 | /// 232 | /// The current thread must have unique access to the provided batch. 233 | /// 234 | /// Additionally, the caller should not be holding on to any mutable 235 | /// references the the local batch, as they may be invalidated by 236 | /// recursive calls to `try_retire`. 237 | #[inline] 238 | pub unsafe fn try_retire(&self, local_batch: *mut LocalBatch) { 239 | // Establish a total order between the retirement of nodes in this batch and 240 | // light stores marking a thread as active: 241 | // - If the store comes first, we will see that the thread is active. 242 | // - If this barrier comes first, the thread will see the new values of any 243 | // objects in this batch. 244 | // 245 | // This barrier also establishes synchronizes with the light store executed when 246 | // a thread is created: 247 | // - If our barrier comes first, they will see the new values of any objects in 248 | // this batch. 249 | // - If their store comes first, we will see the new thread. 250 | membarrier::heavy(); 251 | 252 | // Safety: The caller guarantees we have unique access to the batch. 253 | let batch = unsafe { (*local_batch).batch }; 254 | 255 | // There is nothing to retire. 256 | if batch.is_null() || batch == LocalBatch::DROP { 257 | return; 258 | } 259 | 260 | // Safety: The caller guarantees we have unique access to the batch. 261 | let batch_entries = unsafe { (*batch).entries.as_mut_ptr() }; 262 | 263 | let mut marked = 0; 264 | 265 | // Record all active threads, including the current thread. 266 | // 267 | // We need to do this in a separate step before actually retiring the batch to 268 | // ensure we have enough entries for reservation lists, as the number of 269 | // threads can grow dynamically. 270 | // 271 | // Safety: We only access `reservation.head`, which is an atomic pointer that is 272 | // sound to access from multiple threads. 273 | for reservation in unsafe { self.reservations.iter() } { 274 | // If this thread is inactive, we can skip it. The heavy barrier above ensurse 275 | // that the next time it becomes active, it will see the new values 276 | // of any objects in this batch. 277 | // 278 | // Relaxed: See the Acquire fence below. 279 | if reservation.head.load(Ordering::Relaxed) == Entry::INACTIVE { 280 | continue; 281 | } 282 | 283 | // If we don't have enough entries to insert into the reservation lists of all 284 | // active threads, try again later. 285 | // 286 | // Safety: The caller guarantees we have unique access to the batch. 287 | let Some(entry) = unsafe { &mut (*batch).entries }.get_mut(marked) else { 288 | return; 289 | }; 290 | 291 | // Temporarily store this reservation list in the batch. 292 | // 293 | // Safety: All nodes in a batch are valid and this batch has not yet been shared 294 | // to other threads. 295 | entry.state.head = &reservation.head; 296 | marked += 1; 297 | } 298 | 299 | // We have enough entries to perform reclamation. At this point, we can reset 300 | // the local batch. 301 | unsafe { *local_batch = LocalBatch::default() }; 302 | 303 | // For any inactive threads we skipped above, synchronize with `leave` to ensure 304 | // any accesses happen-before we retire. We ensured with the heavy 305 | // barrier above that the thread will see the new values of any objects 306 | // in this batch the next time it becomes active. 307 | atomic::fence(Ordering::Acquire); 308 | 309 | let mut active = 0; 310 | 311 | // Add the batch to the reservation lists of any active threads. 312 | 'retire: for i in 0..marked { 313 | // Safety: The caller guarantees we have unique access to the batch, and we 314 | // ensure we have at least `marked` entries in the batch. 315 | let curr = unsafe { batch_entries.add(i) }; 316 | 317 | // Safety: `curr` is a valid node in the batch, and we just initialized `head` 318 | // for all `marked` nodes in the previous loop. 319 | let head = unsafe { &*(*curr).state.head }; 320 | 321 | // Relaxed: All writes to the `head` use RMW instructions, so the previous node 322 | // in the list is synchronized through the release sequence on 323 | // `head`. 324 | let mut prev = head.load(Ordering::Relaxed); 325 | 326 | loop { 327 | // The thread became inactive, skip it. 328 | // 329 | // As long as the thread became inactive at some point after the heavy barrier, 330 | // it can no longer access any objects in this batch. The next 331 | // time it becomes active it will load the new object values. 332 | if prev == Entry::INACTIVE { 333 | // Acquire: Synchronize with `leave` to ensure any accesses happen-before we 334 | // retire. 335 | atomic::fence(Ordering::Acquire); 336 | continue 'retire; 337 | } 338 | 339 | // Link this node to the reservation list. 340 | unsafe { (*curr).state.next = prev } 341 | 342 | // Release: Ensure our access of the node, as well as the stores of new values 343 | // for any objects in the batch, are synchronized when this 344 | // thread calls `leave` and attempts to reclaim this batch. 345 | match head.compare_exchange_weak(prev, curr, Ordering::Release, Ordering::Relaxed) { 346 | Ok(_) => break, 347 | // Lost the race to another thread, retry. 348 | Err(found) => prev = found, 349 | } 350 | } 351 | 352 | active += 1; 353 | } 354 | 355 | // Release: If we don't free the list, ensure our access of the batch is 356 | // synchronized with the thread that eventually will. 357 | // 358 | // Safety: The caller guarantees we have unique access to the batch. 359 | if unsafe { &*batch } 360 | .active 361 | .fetch_add(active, Ordering::Release) 362 | .wrapping_add(active) 363 | == 0 364 | { 365 | // Acquire: Ensure any access of objects in the batch, by threads that were 366 | // active and decremented the reference count, happen-before we free 367 | // it. 368 | atomic::fence(Ordering::Acquire); 369 | 370 | // Safety: The reference count is zero, meaning that either no threads were 371 | // active, or they have all already decremented the reference count. 372 | // 373 | // Additionally, the local batch has been reset and we are not holding on to any 374 | // mutable references, so any recursive calls to `retire` during 375 | // reclamation are valid. 376 | unsafe { self.free_batch(batch) } 377 | } 378 | } 379 | 380 | /// Traverse the reservation list, decrementing the reference count of each 381 | /// batch. 382 | /// 383 | /// # Safety 384 | /// 385 | /// `list` must be a valid reservation list. 386 | #[cold] 387 | #[inline(never)] 388 | unsafe fn traverse(&self, mut list: *mut Entry) { 389 | while !list.is_null() { 390 | let curr = list; 391 | 392 | // Advance the cursor. 393 | // Safety: `curr` is a valid, non-null node in the list. 394 | list = unsafe { (*curr).state.next }; 395 | let batch = unsafe { (*curr).batch }; 396 | 397 | // Safety: Batch pointers are valid for reads until they are reclaimed. 398 | unsafe { 399 | // Release: If we don't free the list, ensure our access of the batch is 400 | // synchronized with the thread that eventually will. 401 | if (*batch).active.fetch_sub(1, Ordering::Release) == 1 { 402 | // Ensure any access of objects in the batch by other active threads 403 | // happen-before we free it. 404 | atomic::fence(Ordering::Acquire); 405 | 406 | // Safety: We have the last reference to the batch and it has been removed from 407 | // our reservation list. 408 | self.free_batch(batch) 409 | } 410 | } 411 | } 412 | } 413 | 414 | /// Reclaim all values in the collector, including recursive calls to 415 | /// retire. 416 | /// 417 | /// # Safety 418 | /// 419 | /// No threads may be accessing the collector or any values that have been 420 | /// retired. This is equivalent to having a unique reference to the data 421 | /// structure containing the collector. 422 | #[inline] 423 | pub unsafe fn reclaim_all(&self) { 424 | // Safety: Guaranteed by caller. 425 | for local_batch in unsafe { self.batches.iter() } { 426 | let local_batch = local_batch.value.get(); 427 | 428 | // Safety: The caller guarantees we have unique access to the batch. 429 | let batch = unsafe { (*local_batch).batch }; 430 | 431 | // There is nothing to reclaim. 432 | if batch.is_null() { 433 | continue; 434 | } 435 | 436 | // Tell any recursive calls to `retire` to reclaim immediately. 437 | // 438 | // Safety: The caller guarantees we have unique access to the batch. 439 | unsafe { (*local_batch).batch = LocalBatch::DROP }; 440 | 441 | // Safety: The caller guarantees we have unique access to the batch, and we 442 | // ensured it is non-null. Additionally, the local batch was reset 443 | // above, so the batch is inaccessible through recursive calls to 444 | // `retire`. 445 | unsafe { self.free_batch(batch) }; 446 | 447 | // Reset the batch. 448 | // 449 | // Safety: The caller guarantees we have unique access to the batch. 450 | unsafe { (*local_batch).batch = ptr::null_mut() }; 451 | } 452 | } 453 | 454 | /// Free a batch of objects. 455 | /// 456 | /// # Safety 457 | /// 458 | /// The batch reference count must be zero. 459 | /// 460 | /// Additionally, the current thread must not be holding on to any mutable 461 | /// references to thread-locals as recursive calls to `retire` may still 462 | /// access the local batch; the batch being retired must be unreachable 463 | /// through any recursive calls. 464 | #[inline] 465 | unsafe fn free_batch(&self, batch: *mut Batch) { 466 | // Safety: We have a unique reference to the batch. 467 | for entry in unsafe { (*batch).entries.iter_mut() } { 468 | unsafe { (entry.reclaim)(entry.ptr.cast(), crate::Collector::from_raw(self)) }; 469 | } 470 | 471 | unsafe { LocalBatch::free(batch) }; 472 | } 473 | } 474 | 475 | impl Drop for Collector { 476 | fn drop(&mut self) { 477 | // Safety: Values are only retired after being made inaccessible to any 478 | // inactive threads. Additionally, we have `&mut self`, meaning that any 479 | // active threads are no longer accessing retired values. 480 | unsafe { self.reclaim_all() }; 481 | } 482 | } 483 | 484 | /// A per-thread reservation list. 485 | /// 486 | /// Reservation lists are lists of retired entries, where each entry represents 487 | /// a batch. 488 | #[repr(C)] 489 | pub struct Reservation { 490 | /// The head of the list 491 | head: AtomicPtr, 492 | 493 | /// The number of active guards for this thread. 494 | pub guards: Cell, 495 | 496 | /// A lock used for owned guards to prevent concurrent operations. 497 | pub lock: Mutex<()>, 498 | } 499 | 500 | // Safety: Reservations are only accessed by the current thread, or synchronized 501 | // through a lock. 502 | unsafe impl Sync for Reservation {} 503 | 504 | impl Default for Reservation { 505 | fn default() -> Self { 506 | Reservation { 507 | head: AtomicPtr::new(Entry::INACTIVE), 508 | guards: Cell::new(0), 509 | lock: Mutex::new(()), 510 | } 511 | } 512 | } 513 | 514 | /// A batch of nodes waiting to be retired. 515 | struct Batch { 516 | /// Nodes in this batch. 517 | /// 518 | /// TODO: This allocation could be flattened. 519 | entries: Vec, 520 | 521 | /// The reference count for any active threads. 522 | active: AtomicUsize, 523 | } 524 | 525 | impl Batch { 526 | /// Create a new batch with the specified capacity. 527 | #[inline] 528 | fn new(capacity: usize) -> Batch { 529 | Batch { 530 | entries: Vec::with_capacity(capacity), 531 | active: AtomicUsize::new(0), 532 | } 533 | } 534 | } 535 | 536 | /// A retired object. 537 | struct Entry { 538 | /// The pointer to the retired object. 539 | ptr: *mut (), 540 | 541 | /// The function used to reclaim the object. 542 | reclaim: unsafe fn(*mut (), &crate::Collector), 543 | 544 | /// The state of the retired object. 545 | state: EntryState, 546 | 547 | /// The batch that this node is a part of. 548 | batch: *mut Batch, 549 | } 550 | 551 | /// The state of a retired object. 552 | #[repr(C)] 553 | pub union EntryState { 554 | // While retiring: A temporary location for an active reservation list. 555 | head: *const AtomicPtr, 556 | 557 | // After retiring: The next node in the thread's reservation list. 558 | next: *mut Entry, 559 | } 560 | 561 | impl Entry { 562 | /// Represents an inactive thread. 563 | /// 564 | /// While null indicates an empty list, `INACTIVE` indicates the thread has 565 | /// no active guards and is not currently accessing any objects. 566 | pub const INACTIVE: *mut Entry = usize::MAX as _; 567 | } 568 | 569 | /// A pointer to a batch, unique to the current thread. 570 | pub struct LocalBatch { 571 | batch: *mut Batch, 572 | } 573 | 574 | impl Default for LocalBatch { 575 | fn default() -> Self { 576 | LocalBatch { 577 | batch: ptr::null_mut(), 578 | } 579 | } 580 | } 581 | 582 | impl LocalBatch { 583 | /// This is set during a call to `reclaim_all`, signalling recursive calls 584 | /// to retire to reclaim immediately. 585 | const DROP: *mut Batch = usize::MAX as _; 586 | 587 | /// Returns a pointer to the batch, initializing the batch if it was null. 588 | #[inline] 589 | fn get_or_init(&mut self, capacity: usize) -> *mut Batch { 590 | if self.batch.is_null() { 591 | self.batch = Box::into_raw(Box::new(Batch::new(capacity))); 592 | } 593 | 594 | self.batch 595 | } 596 | 597 | /// Free the batch. 598 | /// 599 | /// # Safety 600 | /// 601 | /// The safety requirements of `Box::from_raw` apply. 602 | #[inline] 603 | unsafe fn free(batch: *mut Batch) { 604 | // Safety: Guaranteed by caller. 605 | unsafe { drop(Box::from_raw(batch)) } 606 | } 607 | } 608 | 609 | // Safety: Any access to the batch owned by `LocalBatch` is unsafe. 610 | unsafe impl Send for LocalBatch {} 611 | -------------------------------------------------------------------------------- /src/raw/membarrier.rs: -------------------------------------------------------------------------------- 1 | //! Memory barriers optimized for RCU, inspired by . 2 | //! 3 | //! # Semantics 4 | //! 5 | //! There is a total order over all memory barriers provided by this module: 6 | //! - Light store barriers, created by a pair of [`light_store`] and 7 | //! [`light_barrier`]. 8 | //! - Light load barriers, created by a pair of [`light_barrier`] and 9 | //! [`light_load`]. 10 | //! - Sequentially consistent barriers, or cumulative light barriers. 11 | //! - Heavy barriers, created by [`heavy`]. 12 | //! 13 | //! If thread A issues barrier X and thread B issues barrier Y and X occurs 14 | //! before Y in the total order, X is ordered before Y with respect to coherence 15 | //! only if either X or Y is a heavy barrier. In other words, there is no way to 16 | //! establish an ordering between light barriers without the presence of a heavy 17 | //! barrier. 18 | #![allow(dead_code)] 19 | 20 | #[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))] 21 | pub use linux::*; 22 | 23 | #[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))] 24 | pub use windows::*; 25 | 26 | #[cfg(any( 27 | not(feature = "fast-barrier"), 28 | not(any(target_os = "windows", target_os = "linux")), 29 | miri 30 | ))] 31 | pub use default::*; 32 | 33 | #[cfg(any( 34 | not(feature = "fast-barrier"), 35 | not(any(target_os = "windows", target_os = "linux")), 36 | miri 37 | ))] 38 | mod default { 39 | use core::sync::atomic::{fence, Ordering}; 40 | 41 | pub fn detect() {} 42 | 43 | /// The ordering for a store operation that synchronizes with heavy 44 | /// barriers. 45 | /// 46 | /// Must be followed by a light barrier. 47 | #[inline] 48 | pub fn light_store() -> Ordering { 49 | // Synchronize with `SeqCst` heavy barriers. 50 | Ordering::SeqCst 51 | } 52 | 53 | /// Issues a light memory barrier for a preceding store or subsequent load 54 | /// operation. 55 | #[inline] 56 | pub fn light_barrier() { 57 | // This is a no-op due to strong loads and stores. 58 | } 59 | 60 | /// The ordering for a load operation that synchronizes with heavy barriers. 61 | #[inline] 62 | pub fn light_load() -> Ordering { 63 | // Participate in the total order established by light and heavy `SeqCst` 64 | // barriers. 65 | Ordering::SeqCst 66 | } 67 | 68 | /// Issues a heavy memory barrier for slow path that synchronizes with light 69 | /// stores. 70 | #[inline] 71 | pub fn heavy() { 72 | // Synchronize with `SeqCst` light stores. 73 | fence(Ordering::SeqCst); 74 | } 75 | } 76 | 77 | #[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))] 78 | mod linux { 79 | use std::sync::atomic::{self, AtomicU8, Ordering}; 80 | 81 | /// The ordering for a store operation that synchronizes with heavy 82 | /// barriers. 83 | /// 84 | /// Must be followed by a light barrier. 85 | #[inline] 86 | pub fn light_store() -> Ordering { 87 | match STRATEGY.load(Ordering::Relaxed) { 88 | FALLBACK => Ordering::SeqCst, 89 | _ => Ordering::Relaxed, 90 | } 91 | } 92 | 93 | /// Issues a light memory barrier for a preceding store or subsequent load 94 | /// operation. 95 | #[inline] 96 | pub fn light_barrier() { 97 | atomic::compiler_fence(atomic::Ordering::SeqCst) 98 | } 99 | 100 | /// The ordering for a load operation that synchronizes with heavy barriers. 101 | #[inline] 102 | pub fn light_load() -> Ordering { 103 | // There is no difference between `Acquire` and `SeqCst` loads on most 104 | // platforms, so checking the strategy is not worth it. 105 | Ordering::SeqCst 106 | } 107 | 108 | /// Issues a heavy memory barrier for slow path. 109 | #[inline] 110 | pub fn heavy() { 111 | // Issue a private expedited membarrier using the `sys_membarrier()` system 112 | // call, if supported; otherwise, fall back to `mprotect()`-based 113 | // process-wide memory barrier. 114 | match STRATEGY.load(Ordering::Relaxed) { 115 | MEMBARRIER => membarrier::barrier(), 116 | MPROTECT => mprotect::barrier(), 117 | _ => atomic::fence(atomic::Ordering::SeqCst), 118 | } 119 | } 120 | 121 | /// Use the `membarrier` system call. 122 | const MEMBARRIER: u8 = 0; 123 | 124 | /// Use the `mprotect`-based trick. 125 | const MPROTECT: u8 = 1; 126 | 127 | /// Use `SeqCst` fences. 128 | const FALLBACK: u8 = 2; 129 | 130 | /// The right strategy to use on the current machine. 131 | static STRATEGY: AtomicU8 = AtomicU8::new(FALLBACK); 132 | 133 | /// Perform runtime detection for a membarrier strategy. 134 | pub fn detect() { 135 | if membarrier::is_supported() { 136 | STRATEGY.store(MEMBARRIER, Ordering::Relaxed); 137 | } else if mprotect::is_supported() { 138 | STRATEGY.store(MPROTECT, Ordering::Relaxed); 139 | } 140 | } 141 | 142 | macro_rules! fatal_assert { 143 | ($cond:expr) => { 144 | if !$cond { 145 | #[allow(unused_unsafe)] 146 | unsafe { 147 | libc::abort(); 148 | } 149 | } 150 | }; 151 | } 152 | 153 | mod membarrier { 154 | /// Commands for the membarrier system call. 155 | /// 156 | /// # Caveat 157 | /// 158 | /// We're defining it here because, unfortunately, the `libc` crate 159 | /// currently doesn't expose `membarrier_cmd` for us. You can 160 | /// find the numbers in the [Linux source code](https://github.com/torvalds/linux/blob/master/include/uapi/linux/membarrier.h). 161 | /// 162 | /// This enum should really be `#[repr(libc::c_int)]`, but Rust 163 | /// currently doesn't allow it. 164 | #[repr(i32)] 165 | #[allow(dead_code, non_camel_case_types)] 166 | enum membarrier_cmd { 167 | MEMBARRIER_CMD_QUERY = 0, 168 | MEMBARRIER_CMD_GLOBAL = (1 << 0), 169 | MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1), 170 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2), 171 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), 172 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), 173 | MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5), 174 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6), 175 | } 176 | 177 | /// Call the `sys_membarrier` system call. 178 | #[inline] 179 | fn sys_membarrier(cmd: membarrier_cmd) -> libc::c_long { 180 | unsafe { libc::syscall(libc::SYS_membarrier, cmd as libc::c_int, 0 as libc::c_int) } 181 | } 182 | 183 | /// Returns `true` if the `sys_membarrier` call is available. 184 | pub fn is_supported() -> bool { 185 | // Queries which membarrier commands are supported. Checks if private expedited 186 | // membarrier is supported. 187 | let ret = sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_QUERY); 188 | if ret < 0 189 | || ret & membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED as libc::c_long == 0 190 | || ret & membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED as libc::c_long 191 | == 0 192 | { 193 | return false; 194 | } 195 | 196 | // Registers the current process as a user of private expedited membarrier. 197 | if sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED) < 0 { 198 | return false; 199 | } 200 | 201 | true 202 | } 203 | 204 | /// Executes a heavy `sys_membarrier`-based barrier. 205 | #[inline] 206 | pub fn barrier() { 207 | fatal_assert!(sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED) >= 0); 208 | } 209 | } 210 | 211 | mod mprotect { 212 | use std::cell::UnsafeCell; 213 | use std::mem::MaybeUninit; 214 | use std::ptr; 215 | use std::sync::{atomic, OnceLock}; 216 | 217 | struct Barrier { 218 | lock: UnsafeCell, 219 | page: u64, 220 | page_size: libc::size_t, 221 | } 222 | 223 | unsafe impl Sync for Barrier {} 224 | 225 | impl Barrier { 226 | /// Issues a process-wide barrier by changing access protections of 227 | /// a single mmap-ed page. This method is not as fast as 228 | /// the `sys_membarrier()` call, but works very 229 | /// similarly. 230 | #[inline] 231 | fn barrier(&self) { 232 | let page = self.page as *mut libc::c_void; 233 | 234 | unsafe { 235 | // Lock the mutex. 236 | fatal_assert!(libc::pthread_mutex_lock(self.lock.get()) == 0); 237 | 238 | // Set the page access protections to read + write. 239 | fatal_assert!( 240 | libc::mprotect(page, self.page_size, libc::PROT_READ | libc::PROT_WRITE,) 241 | == 0 242 | ); 243 | 244 | // Ensure that the page is dirty before we change the protection so that we 245 | // prevent the OS from skipping the global TLB flush. 246 | let atomic_usize = &*(page as *const atomic::AtomicUsize); 247 | atomic_usize.fetch_add(1, atomic::Ordering::SeqCst); 248 | 249 | // Set the page access protections to none. 250 | // 251 | // Changing a page protection from read + write to none causes the OS to issue 252 | // an interrupt to flush TLBs on all processors. This also results in flushing 253 | // the processor buffers. 254 | fatal_assert!(libc::mprotect(page, self.page_size, libc::PROT_NONE) == 0); 255 | 256 | // Unlock the mutex. 257 | fatal_assert!(libc::pthread_mutex_unlock(self.lock.get()) == 0); 258 | } 259 | } 260 | } 261 | 262 | /// An alternative solution to `sys_membarrier` that works on older 263 | /// Linux kernels and x86/x86-64 systems. 264 | static BARRIER: OnceLock = OnceLock::new(); 265 | 266 | /// Returns `true` if the `mprotect`-based trick is supported. 267 | pub fn is_supported() -> bool { 268 | cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") 269 | } 270 | 271 | /// Executes a heavy `mprotect`-based barrier. 272 | #[inline] 273 | pub fn barrier() { 274 | let barrier = BARRIER.get_or_init(|| { 275 | unsafe { 276 | // Find out the page size on the current system. 277 | let page_size = libc::sysconf(libc::_SC_PAGESIZE); 278 | fatal_assert!(page_size > 0); 279 | let page_size = page_size as libc::size_t; 280 | 281 | // Create a dummy page. 282 | let page = libc::mmap( 283 | ptr::null_mut(), 284 | page_size, 285 | libc::PROT_NONE, 286 | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, 287 | -1 as libc::c_int, 288 | 0 as libc::off_t, 289 | ); 290 | fatal_assert!(page != libc::MAP_FAILED); 291 | fatal_assert!(page as libc::size_t % page_size == 0); 292 | 293 | // Locking the page ensures that it stays in memory during the two mprotect 294 | // calls in `Barrier::barrier()`. If the page was unmapped between those calls, 295 | // they would not have the expected effect of generating IPI. 296 | libc::mlock(page, page_size as libc::size_t); 297 | 298 | // Initialize the mutex. 299 | let lock = UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER); 300 | let mut attr = MaybeUninit::::uninit(); 301 | fatal_assert!(libc::pthread_mutexattr_init(attr.as_mut_ptr()) == 0); 302 | let mut attr = attr.assume_init(); 303 | fatal_assert!( 304 | libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL) == 0 305 | ); 306 | fatal_assert!(libc::pthread_mutex_init(lock.get(), &attr) == 0); 307 | fatal_assert!(libc::pthread_mutexattr_destroy(&mut attr) == 0); 308 | 309 | let page = page as u64; 310 | 311 | Barrier { 312 | lock, 313 | page, 314 | page_size, 315 | } 316 | } 317 | }); 318 | 319 | barrier.barrier(); 320 | } 321 | } 322 | } 323 | 324 | #[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))] 325 | mod windows { 326 | use core::sync::atomic::{self, Ordering}; 327 | use windows_sys; 328 | 329 | pub fn detect() {} 330 | 331 | /// The ordering for a store operation that synchronizes with heavy 332 | /// barriers. 333 | /// 334 | /// Must be followed by a light barrier. 335 | #[inline] 336 | pub fn light_store() -> Ordering { 337 | Ordering::Relaxed 338 | } 339 | 340 | /// Issues a light memory barrier for a preceding store or subsequent load 341 | /// operation. 342 | #[inline] 343 | pub fn light_barrier() { 344 | atomic::compiler_fence(atomic::Ordering::SeqCst) 345 | } 346 | 347 | /// The ordering for a load operation that synchronizes with heavy barriers. 348 | #[inline] 349 | pub fn light_load() -> Ordering { 350 | Ordering::Relaxed 351 | } 352 | 353 | /// Issues a heavy memory barrier for slow path that synchronizes with light 354 | /// stores. 355 | #[inline] 356 | pub fn heavy() { 357 | // Invoke the `FlushProcessWriteBuffers()` system call. 358 | unsafe { windows_sys::Win32::System::Threading::FlushProcessWriteBuffers() } 359 | } 360 | } 361 | -------------------------------------------------------------------------------- /src/raw/mod.rs: -------------------------------------------------------------------------------- 1 | mod collector; 2 | mod tls; 3 | mod utils; 4 | 5 | pub mod membarrier; 6 | 7 | pub use collector::{Collector, Reservation}; 8 | pub use tls::Thread; 9 | -------------------------------------------------------------------------------- /src/raw/tls/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | mod thread_id; 9 | 10 | use std::cell::UnsafeCell; 11 | use std::mem::MaybeUninit; 12 | use std::sync::atomic::{self, AtomicBool, AtomicPtr, Ordering}; 13 | use std::{mem, ptr}; 14 | 15 | pub use thread_id::Thread; 16 | 17 | /// Per-object thread local storage. 18 | pub struct ThreadLocal { 19 | /// Buckets with increasing power-of-two sizes. 20 | buckets: [AtomicPtr>; thread_id::BUCKETS], 21 | } 22 | 23 | /// An entry in a `ThreadLocal`. 24 | struct Entry { 25 | /// A flag for initialization. 26 | present: AtomicBool, 27 | 28 | /// The value for this entry. 29 | value: UnsafeCell>, 30 | } 31 | 32 | /// Safety: 33 | /// 34 | /// - We expose mutable references to values when the `ThreadLocal` is dropped, 35 | /// hence `T: Send`. 36 | /// - However, it is impossible to obtain shared references to `T`s except by 37 | /// sharing the `ThreadLocal`, so `T: Sync` is not required. 38 | unsafe impl Send for ThreadLocal {} 39 | 40 | /// Safety: 41 | /// 42 | /// - Values can be inserted through a shared reference and thus dropped on 43 | /// another thread than they were created on, hence `T: Send`. 44 | /// - However, there is no way to access a `T` inserted by another thread except 45 | /// through iteration, which is unsafe, so `T: Sync` is not required. 46 | unsafe impl Sync for ThreadLocal {} 47 | 48 | impl ThreadLocal { 49 | /// Create a `ThreadLocal` container with the given initial capacity. 50 | pub fn with_capacity(capacity: usize) -> ThreadLocal { 51 | let init = match capacity { 52 | 0 => 0, 53 | // Initialize enough buckets for `capacity` elements. 54 | n => Thread::new(n).bucket, 55 | }; 56 | 57 | let mut buckets = [ptr::null_mut(); thread_id::BUCKETS]; 58 | 59 | // Initialize the initial buckets. 60 | for (i, bucket) in buckets[..=init].iter_mut().enumerate() { 61 | let bucket_size = Thread::bucket_capacity(i); 62 | *bucket = allocate_bucket::(bucket_size); 63 | } 64 | 65 | ThreadLocal { 66 | // Safety: `AtomicPtr` has the same representation as `*mut T`. 67 | buckets: unsafe { mem::transmute(buckets) }, 68 | } 69 | } 70 | 71 | /// Load the slot for the given `thread`, initializing it with a default 72 | /// value if necessary. 73 | /// 74 | /// # Safety 75 | /// 76 | /// The current thread must have unique access to the slot for the provided 77 | /// `thread`. 78 | #[inline] 79 | pub unsafe fn load(&self, thread: Thread) -> &T 80 | where 81 | T: Default, 82 | { 83 | // Safety: Guaranteed by caller. 84 | unsafe { self.load_or(T::default, thread) } 85 | } 86 | 87 | /// Load the entry for the given `thread`, initializing it using the 88 | /// provided function if necessary. 89 | /// 90 | /// # Safety 91 | /// 92 | /// The current thread must have unique access to the slot for the given 93 | /// `thread`. 94 | #[inline] 95 | pub unsafe fn load_or(&self, create: impl Fn() -> T, thread: Thread) -> &T { 96 | // Safety: `thread.bucket` is always in bounds. 97 | let bucket = unsafe { self.buckets.get_unchecked(thread.bucket) }; 98 | let mut bucket_ptr = bucket.load(Ordering::Acquire); 99 | 100 | if bucket_ptr.is_null() { 101 | bucket_ptr = self.initialize(bucket, thread); 102 | } 103 | 104 | // Safety: `thread.entry` is always in bounds, and we ensured the bucket was 105 | // initialized above. 106 | let entry = unsafe { &*bucket_ptr.add(thread.entry) }; 107 | 108 | // Relaxed: Only the current thread can set the value. 109 | if !entry.present.load(Ordering::Relaxed) { 110 | // Safety: Guaranteed by caller. 111 | unsafe { self.write(entry, create) } 112 | } 113 | 114 | // Safety: The entry was initialized above. 115 | unsafe { (*entry.value.get()).assume_init_ref() } 116 | } 117 | 118 | /// Load the entry for the current thread, returning `None` if it has not 119 | /// been initialized. 120 | #[cfg(test)] 121 | fn try_load(&self) -> Option<&T> { 122 | let thread = Thread::current(); 123 | 124 | // Safety: `thread.bucket` is always in bounds. 125 | let bucket_ptr = 126 | unsafe { self.buckets.get_unchecked(thread.bucket) }.load(Ordering::Acquire); 127 | 128 | if bucket_ptr.is_null() { 129 | return None; 130 | } 131 | 132 | // Safety: `thread.entry` is always in bounds, and we ensured the bucket was 133 | // initialized above. 134 | let entry = unsafe { &*bucket_ptr.add(thread.entry) }; 135 | 136 | // Relaxed: Only the current thread can set the value. 137 | if !entry.present.load(Ordering::Relaxed) { 138 | return None; 139 | } 140 | 141 | // Safety: The entry was initialized above. 142 | unsafe { Some((*entry.value.get()).assume_init_ref()) } 143 | } 144 | 145 | /// Initialize the entry for the given thread. 146 | /// 147 | /// # Safety 148 | /// 149 | /// The current thread must have unique access to the uninitialized `entry`. 150 | #[cold] 151 | #[inline(never)] 152 | unsafe fn write(&self, entry: &Entry, create: impl Fn() -> T) { 153 | // Insert the new element into the bucket. 154 | // 155 | // Safety: Guaranteed by caller. 156 | unsafe { entry.value.get().write(MaybeUninit::new(create())) }; 157 | 158 | // Release: Necessary for synchronization with iterators. 159 | entry.present.store(true, Ordering::Release); 160 | 161 | // Synchronize with the heavy barrier in `retire`: 162 | // - If this fence comes first, the thread retiring will see our entry. 163 | // - If their barrier comes first, we will see the new values of any pointers 164 | // being retired by that thread. 165 | // 166 | // Note that we do not use a light barrier here because the initialization of 167 | // the bucket is not performed with the light-store ordering. We 168 | // probably could avoid a full fence here, but there are no serious 169 | // performance implications. 170 | atomic::fence(Ordering::SeqCst); 171 | } 172 | 173 | // Initialize the bucket for the given thread's entry. 174 | #[cold] 175 | #[inline(never)] 176 | fn initialize(&self, bucket: &AtomicPtr>, thread: Thread) -> *mut Entry { 177 | let new_bucket = allocate_bucket(Thread::bucket_capacity(thread.bucket)); 178 | 179 | match bucket.compare_exchange( 180 | ptr::null_mut(), 181 | new_bucket, 182 | // Release: If we win the race, synchronize with Acquire loads of the bucket from other 183 | // threads. 184 | Ordering::Release, 185 | // Acquire: If we lose the race, synchronize with the initialization of the bucket that 186 | // won. 187 | Ordering::Acquire, 188 | ) { 189 | // We won the race and initialized the bucket. 190 | Ok(_) => new_bucket, 191 | 192 | // We lost the race and can use the bucket that was stored instead. 193 | Err(other) => unsafe { 194 | // Safety: The pointer has not been shared. 195 | let _ = Box::from_raw(ptr::slice_from_raw_parts_mut( 196 | new_bucket, 197 | Thread::bucket_capacity(thread.bucket), 198 | )); 199 | 200 | other 201 | }, 202 | } 203 | } 204 | 205 | /// Returns an iterator over all active thread slots. 206 | /// 207 | /// # Safety 208 | /// 209 | /// The values stored in the `ThreadLocal` by threads other than the current 210 | /// one must be sound to access. 211 | #[inline] 212 | pub unsafe fn iter(&self) -> Iter<'_, T> { 213 | Iter { 214 | index: 0, 215 | bucket: 0, 216 | thread_local: self, 217 | bucket_size: Thread::bucket_capacity(0), 218 | } 219 | } 220 | } 221 | 222 | impl Drop for ThreadLocal { 223 | fn drop(&mut self) { 224 | // Drop any buckets that were allocatec. 225 | for (i, bucket) in self.buckets.iter_mut().enumerate() { 226 | let bucket_ptr = *bucket.get_mut(); 227 | 228 | if bucket_ptr.is_null() { 229 | continue; 230 | } 231 | 232 | let bucket_size = Thread::bucket_capacity(i); 233 | 234 | // Safety: We have `&mut self` and ensured the bucket was initialized. 235 | let _ = 236 | unsafe { Box::from_raw(std::slice::from_raw_parts_mut(bucket_ptr, bucket_size)) }; 237 | } 238 | } 239 | } 240 | 241 | impl Drop for Entry { 242 | fn drop(&mut self) { 243 | if *self.present.get_mut() { 244 | // Safety: We have `&mut self` and ensured the entry was initialized. 245 | unsafe { 246 | ptr::drop_in_place((*self.value.get()).as_mut_ptr()); 247 | } 248 | } 249 | } 250 | } 251 | 252 | /// An iterator over a `ThreadLocal`. 253 | pub struct Iter<'a, T> { 254 | bucket: usize, 255 | index: usize, 256 | bucket_size: usize, 257 | thread_local: &'a ThreadLocal, 258 | } 259 | 260 | impl<'a, T> Iterator for Iter<'a, T> { 261 | type Item = &'a T; 262 | 263 | #[inline] 264 | fn next(&mut self) -> Option { 265 | // Because we reuse thread IDs, a new thread could join and be inserted into the 266 | // middle of the vector, meaning we have to check all the buckets here. 267 | // Yielding extra values is fine, but not yielding all originally active 268 | // threads is not. 269 | while self.bucket < thread_id::BUCKETS { 270 | // Safety: We ensured `self.bucket` was in-bounds above. 271 | let bucket = unsafe { 272 | self.thread_local 273 | .buckets 274 | .get_unchecked(self.bucket) 275 | .load(Ordering::Acquire) 276 | }; 277 | 278 | if !bucket.is_null() { 279 | while self.index < self.bucket_size { 280 | // Safety: We ensured `self.index` was in-bounds above. 281 | let entry = unsafe { &*bucket.add(self.index) }; 282 | 283 | // Advance to the next entry. 284 | self.index += 1; 285 | 286 | if entry.present.load(Ordering::Acquire) { 287 | // Safety: We ensured the entry was initialized above, and the Acquire load 288 | // ensures we synchronized with its initialization. 289 | return Some(unsafe { (*entry.value.get()).assume_init_ref() }); 290 | } 291 | } 292 | } 293 | 294 | // Advance to the next bucket. 295 | self.index = 0; 296 | self.bucket += 1; 297 | self.bucket_size <<= 1; 298 | } 299 | 300 | None 301 | } 302 | } 303 | 304 | /// Allocate a bucket with the given capacity. 305 | fn allocate_bucket(capacity: usize) -> *mut Entry { 306 | let entries = (0..capacity) 307 | .map(|_| Entry:: { 308 | present: AtomicBool::new(false), 309 | value: UnsafeCell::new(MaybeUninit::uninit()), 310 | }) 311 | .collect::]>>(); 312 | 313 | Box::into_raw(entries) as *mut _ 314 | } 315 | 316 | #[cfg(test)] 317 | #[allow(clippy::redundant_closure)] 318 | mod tests { 319 | use super::*; 320 | 321 | use std::sync::atomic::AtomicUsize; 322 | use std::sync::atomic::Ordering::Relaxed; 323 | use std::sync::{Arc, Barrier}; 324 | use std::thread; 325 | 326 | fn make_create() -> Arc usize + Send + Sync> { 327 | let count = AtomicUsize::new(0); 328 | Arc::new(move || count.fetch_add(1, Relaxed)) 329 | } 330 | 331 | #[test] 332 | fn same_thread() { 333 | // Safety: Loading with `Thread::current` is always sound. 334 | unsafe { 335 | let create = make_create(); 336 | let tls = ThreadLocal::with_capacity(1); 337 | assert_eq!(None, tls.try_load()); 338 | assert_eq!(0, *tls.load_or(|| create(), Thread::current())); 339 | assert_eq!(Some(&0), tls.try_load()); 340 | assert_eq!(0, *tls.load_or(|| create(), Thread::current())); 341 | assert_eq!(Some(&0), tls.try_load()); 342 | assert_eq!(0, *tls.load_or(|| create(), Thread::current())); 343 | assert_eq!(Some(&0), tls.try_load()); 344 | } 345 | } 346 | 347 | #[test] 348 | fn different_thread() { 349 | // Safety: Loading with `Thread::current` is always sound. 350 | unsafe { 351 | let create = make_create(); 352 | let tls = Arc::new(ThreadLocal::with_capacity(1)); 353 | assert_eq!(None, tls.try_load()); 354 | assert_eq!(0, *tls.load_or(|| create(), Thread::current())); 355 | assert_eq!(Some(&0), tls.try_load()); 356 | 357 | let tls2 = tls.clone(); 358 | let create2 = create.clone(); 359 | thread::spawn(move || { 360 | assert_eq!(None, tls2.try_load()); 361 | assert_eq!(1, *tls2.load_or(|| create2(), Thread::current())); 362 | assert_eq!(Some(&1), tls2.try_load()); 363 | }) 364 | .join() 365 | .unwrap(); 366 | 367 | assert_eq!(Some(&0), tls.try_load()); 368 | assert_eq!(0, *tls.load_or(|| create(), Thread::current())); 369 | } 370 | } 371 | 372 | #[test] 373 | fn iter() { 374 | // Safety: Loading with `Thread::current` is always sound. 375 | unsafe { 376 | let tls = Arc::new(ThreadLocal::with_capacity(1)); 377 | tls.load_or(|| Box::new(1), Thread::current()); 378 | 379 | let tls2 = tls.clone(); 380 | thread::spawn(move || { 381 | tls2.load_or(|| Box::new(2), Thread::current()); 382 | let tls3 = tls2.clone(); 383 | thread::spawn(move || { 384 | tls3.load_or(|| Box::new(3), Thread::current()); 385 | }) 386 | .join() 387 | .unwrap(); 388 | drop(tls2); 389 | }) 390 | .join() 391 | .unwrap(); 392 | 393 | let tls = Arc::try_unwrap(tls).unwrap_or_else(|_| panic!(".")); 394 | 395 | let mut v = tls.iter().map(|x| **x).collect::>(); 396 | v.sort_unstable(); 397 | assert_eq!(vec![1, 2, 3], v); 398 | } 399 | } 400 | 401 | #[test] 402 | fn iter_snapshot() { 403 | // Safety: Loading with `Thread::current` is always sound. 404 | unsafe { 405 | let tls = Arc::new(ThreadLocal::with_capacity(1)); 406 | tls.load_or(|| Box::new(1), Thread::current()); 407 | 408 | let iterator = tls.iter(); 409 | tls.load_or(|| Box::new(2), Thread::current()); 410 | 411 | let v = iterator.map(|x| **x).collect::>(); 412 | assert_eq!(vec![1], v); 413 | } 414 | } 415 | 416 | #[test] 417 | fn test_drop() { 418 | let local = ThreadLocal::with_capacity(1); 419 | struct Dropped(Arc); 420 | impl Drop for Dropped { 421 | fn drop(&mut self) { 422 | self.0.fetch_add(1, Relaxed); 423 | } 424 | } 425 | 426 | let dropped = Arc::new(AtomicUsize::new(0)); 427 | // Safety: Loading with `Thread::current` is always sound. 428 | unsafe { 429 | local.load_or(|| Dropped(dropped.clone()), Thread::current()); 430 | } 431 | assert_eq!(dropped.load(Relaxed), 0); 432 | drop(local); 433 | assert_eq!(dropped.load(Relaxed), 1); 434 | } 435 | 436 | #[test] 437 | fn iter_many() { 438 | let tls = Arc::new(ThreadLocal::with_capacity(0)); 439 | let barrier = Arc::new(Barrier::new(65)); 440 | 441 | for i in 0..64 { 442 | let tls = tls.clone(); 443 | let barrier = barrier.clone(); 444 | thread::spawn(move || { 445 | dbg!(i); 446 | // Safety: Loading with `Thread::current` is always sound. 447 | unsafe { 448 | tls.load_or(|| 1, Thread::current()); 449 | } 450 | barrier.wait(); 451 | }); 452 | } 453 | 454 | barrier.wait(); 455 | unsafe { assert_eq!(tls.iter().count(), 64) } 456 | } 457 | } 458 | -------------------------------------------------------------------------------- /src/raw/tls/thread_id.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Amanieu d'Antras 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::cell::Cell; 9 | use std::cmp::Reverse; 10 | use std::collections::BinaryHeap; 11 | use std::sync::{Mutex, OnceLock}; 12 | 13 | /// An allocator for thread IDs. 14 | /// 15 | /// The allocator attempts to aggressively reuse thread IDs where possible to 16 | /// avoid cases where a `ThreadLocal` grows indefinitely when it is used by many 17 | /// short-lived threads. 18 | #[derive(Default)] 19 | struct ThreadIdManager { 20 | free_from: usize, 21 | free_list: BinaryHeap>, 22 | } 23 | 24 | impl ThreadIdManager { 25 | /// Allocate a new thread ID. 26 | fn alloc(&mut self) -> usize { 27 | if let Some(id) = self.free_list.pop() { 28 | id.0 29 | } else { 30 | let id = self.free_from; 31 | self.free_from = self 32 | .free_from 33 | .checked_add(1) 34 | .expect("Ran out of thread IDs"); 35 | id 36 | } 37 | } 38 | 39 | /// Free a thread ID for reuse. 40 | fn free(&mut self, id: usize) { 41 | self.free_list.push(Reverse(id)); 42 | } 43 | } 44 | 45 | /// Returns a reference to the global thread ID manager. 46 | fn thread_id_manager() -> &'static Mutex { 47 | static THREAD_ID_MANAGER: OnceLock> = OnceLock::new(); 48 | THREAD_ID_MANAGER.get_or_init(Default::default) 49 | } 50 | 51 | /// A unique identifier for a slot in a triangular vector, such as 52 | /// `ThreadLocal`. 53 | /// 54 | /// A thread ID may be reused after the corresponding thread exits. 55 | #[derive(Clone, Copy)] 56 | pub struct Thread { 57 | /// A unique identifier for the thread. 58 | pub id: usize, 59 | 60 | /// The index of the entry in the bucket. 61 | pub entry: usize, 62 | 63 | /// The index of the bucket. 64 | pub bucket: usize, 65 | } 66 | 67 | /// The number of entries that are skipped from the start of a vector. 68 | /// 69 | /// Index calculations assume that buckets are of sizes `[2^0, 2^1, ..., 2^63]`. 70 | /// To skip shorter buckets and avoid unnecessary allocations, the zeroeth entry 71 | /// index is remapped to a larger index (`2^0 + ... + 2^4 = 31`). 72 | const ZERO_ENTRY: usize = 31; 73 | 74 | /// The number of buckets that are skipped from the start of a vector. 75 | /// 76 | /// This is the index that the zeroeth bucket index is remapped to (currently 77 | /// `5`). 78 | const ZERO_BUCKET: usize = (usize::BITS - ZERO_ENTRY.leading_zeros()) as usize; 79 | 80 | /// The number of buckets in a vector. 81 | pub const BUCKETS: usize = (usize::BITS as usize) - ZERO_BUCKET; 82 | 83 | /// The maximum index of an element in the vector. 84 | /// 85 | /// Note that capacity of the vector is: 86 | /// `2^ZERO_BUCKET + ... + 2^63 = usize::MAX - ZERO_INDEX`. 87 | const MAX_INDEX: usize = usize::MAX - ZERO_ENTRY - 1; 88 | 89 | impl Thread { 90 | /// Returns a `ThreadId` identifier from a generic unique thread ID. 91 | /// 92 | /// The ID provided must not exceed `MAX_INDEX`. 93 | #[inline] 94 | pub fn new(id: usize) -> Thread { 95 | if id > MAX_INDEX { 96 | panic!("exceeded maximum thread count") 97 | } 98 | 99 | // Offset the ID based on the number of entries we skip at the start of the 100 | // buckets array. 101 | let index = id + ZERO_ENTRY; 102 | 103 | // Calculate the bucket index based on ⌊log2(index)⌋. 104 | let bucket = BUCKETS - ((index + 1).leading_zeros() as usize) - 1; 105 | 106 | // Offset the absolute index by the capacity of the preceding buckets. 107 | let entry = index - (Thread::bucket_capacity(bucket) - 1); 108 | 109 | Thread { id, bucket, entry } 110 | } 111 | 112 | /// Returns the capacity of the bucket at the given index. 113 | #[inline] 114 | pub fn bucket_capacity(bucket: usize) -> usize { 115 | 1 << (bucket + ZERO_BUCKET) 116 | } 117 | 118 | /// Get the current thread. 119 | #[inline] 120 | pub fn current() -> Thread { 121 | THREAD.with(|thread| { 122 | if let Some(thread) = thread.get() { 123 | thread 124 | } else { 125 | Thread::init_slow(thread) 126 | } 127 | }) 128 | } 129 | 130 | /// Slow path for allocating a thread ID. 131 | #[cold] 132 | #[inline(never)] 133 | fn init_slow(thread: &Cell>) -> Thread { 134 | let new = Thread::create(); 135 | thread.set(Some(new)); 136 | THREAD_GUARD.with(|guard| guard.id.set(new.id)); 137 | new 138 | } 139 | 140 | /// Create a new thread. 141 | pub fn create() -> Thread { 142 | Thread::new(thread_id_manager().lock().unwrap().alloc()) 143 | } 144 | 145 | /// Free the given thread. 146 | /// 147 | /// # Safety 148 | /// 149 | /// This function must only be called once on a given thread. 150 | pub unsafe fn free(id: usize) { 151 | thread_id_manager().lock().unwrap().free(id); 152 | } 153 | } 154 | 155 | // This is split into 2 thread-local variables so that we can check whether the 156 | // thread is initialized without having to register a thread-local destructor. 157 | // 158 | // This makes the fast path smaller. 159 | thread_local! { static THREAD: Cell> = const { Cell::new(None) }; } 160 | thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } 161 | 162 | // Guard to ensure the thread ID is released on thread exit. 163 | struct ThreadGuard { 164 | // We keep a copy of the thread ID in the `ThreadGuard`: we can't reliably access 165 | // `THREAD` in our `Drop` impl due to the unpredictable order of TLS destructors. 166 | id: Cell, 167 | } 168 | 169 | impl Drop for ThreadGuard { 170 | fn drop(&mut self) { 171 | // Release the thread ID. Any further accesses to the thread ID will go through 172 | // get_slow which will either panic or initialize a new ThreadGuard. 173 | let _ = THREAD.try_with(|thread| thread.set(None)); 174 | 175 | // Safety: We are in `drop` and the current thread uniquely owns this ID. 176 | unsafe { Thread::free(self.id.get()) }; 177 | } 178 | } 179 | 180 | #[cfg(test)] 181 | mod tests { 182 | use super::*; 183 | 184 | #[test] 185 | fn thread() { 186 | assert_eq!(Thread::bucket_capacity(0), 32); 187 | for i in 0..32 { 188 | let thread = Thread::new(i); 189 | assert_eq!(thread.id, i); 190 | assert_eq!(thread.bucket, 0); 191 | assert_eq!(thread.entry, i); 192 | } 193 | 194 | assert_eq!(Thread::bucket_capacity(1), 64); 195 | for i in 33..96 { 196 | let thread = Thread::new(i); 197 | assert_eq!(thread.id, i); 198 | assert_eq!(thread.bucket, 1); 199 | assert_eq!(thread.entry, i - 32); 200 | } 201 | 202 | assert_eq!(Thread::bucket_capacity(2), 128); 203 | for i in 96..224 { 204 | let thread = Thread::new(i); 205 | assert_eq!(thread.id, i); 206 | assert_eq!(thread.bucket, 2); 207 | assert_eq!(thread.entry, i - 96); 208 | } 209 | } 210 | 211 | #[test] 212 | fn max_entries() { 213 | let mut entries = 0; 214 | for i in 0..BUCKETS { 215 | entries += Thread::bucket_capacity(i); 216 | } 217 | assert_eq!(entries, MAX_INDEX + 1); 218 | 219 | let max = Thread::new(MAX_INDEX); 220 | assert_eq!(max.id, MAX_INDEX); 221 | assert_eq!(max.bucket, BUCKETS - 1); 222 | assert_eq!(Thread::bucket_capacity(BUCKETS - 1), 1 << (usize::BITS - 1)); 223 | assert_eq!(max.entry, (1 << (usize::BITS - 1)) - 1); 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /src/raw/utils.rs: -------------------------------------------------------------------------------- 1 | /// Pads and aligns a value to the length of a cache line. 2 | #[cfg_attr( 3 | any( 4 | target_arch = "x86_64", 5 | target_arch = "aarch64", 6 | target_arch = "powerpc64", 7 | ), 8 | repr(align(128)) 9 | )] 10 | #[cfg_attr( 11 | any( 12 | target_arch = "arm", 13 | target_arch = "mips", 14 | target_arch = "mips64", 15 | target_arch = "riscv64", 16 | ), 17 | repr(align(32)) 18 | )] 19 | #[cfg_attr(target_arch = "s390x", repr(align(256)))] 20 | #[cfg_attr( 21 | not(any( 22 | target_arch = "x86_64", 23 | target_arch = "aarch64", 24 | target_arch = "powerpc64", 25 | target_arch = "arm", 26 | target_arch = "mips", 27 | target_arch = "mips64", 28 | target_arch = "riscv64", 29 | target_arch = "s390x", 30 | )), 31 | repr(align(64)) 32 | )] 33 | #[derive(Default)] 34 | pub struct CachePadded { 35 | pub value: T, 36 | } 37 | 38 | impl std::ops::Deref for CachePadded { 39 | type Target = T; 40 | 41 | fn deref(&self) -> &T { 42 | &self.value 43 | } 44 | } 45 | 46 | impl std::ops::DerefMut for CachePadded { 47 | fn deref_mut(&mut self) -> &mut T { 48 | &mut self.value 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/reclaim.rs: -------------------------------------------------------------------------------- 1 | //! Common memory reclaimers. 2 | //! 3 | //! The functions in this module can be passed to 4 | //! [`retire`](crate::Collector::retire) to free allocated memory or run drop 5 | //! glue. See [the guide](crate#custom-reclaimers) for details about memory 6 | //! reclamation, and writing custom reclaimers. 7 | 8 | use std::ptr; 9 | 10 | use crate::Collector; 11 | 12 | /// Reclaims memory allocated with [`Box`]. 13 | /// 14 | /// # Safety 15 | /// 16 | /// The safety requirements of [`Box::from_raw`] apply. 17 | pub unsafe fn boxed(ptr: *mut T, _collector: &Collector) { 18 | unsafe { drop(Box::from_raw(ptr)) } 19 | } 20 | 21 | /// Reclaims memory by dropping the value in place. 22 | /// 23 | /// # Safety 24 | /// 25 | /// The safety requirements of [`ptr::drop_in_place`] apply. 26 | pub unsafe fn in_place(ptr: *mut T, _collector: &Collector) { 27 | unsafe { ptr::drop_in_place::(ptr) } 28 | } 29 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | use seize::{reclaim, Collector, Guard}; 2 | 3 | use std::mem::ManuallyDrop; 4 | use std::ptr; 5 | use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; 6 | use std::sync::{mpsc, Arc, Barrier}; 7 | use std::thread; 8 | 9 | #[test] 10 | fn is_sync() { 11 | fn assert_send_sync() {} 12 | assert_send_sync::(); 13 | assert_send_sync::(); 14 | } 15 | 16 | struct DropTrack(Arc); 17 | 18 | impl Drop for DropTrack { 19 | fn drop(&mut self) { 20 | self.0.fetch_add(1, Ordering::Relaxed); 21 | } 22 | } 23 | 24 | fn boxed(value: T) -> *mut T { 25 | Box::into_raw(Box::new(value)) 26 | } 27 | 28 | struct UnsafeSend(T); 29 | unsafe impl Send for UnsafeSend {} 30 | 31 | #[test] 32 | fn single_thread() { 33 | let collector = Arc::new(Collector::new().batch_size(2)); 34 | let dropped = Arc::new(AtomicUsize::new(0)); 35 | 36 | // multiple of 2 37 | let items = cfg::ITEMS & !1; 38 | 39 | for _ in 0..items { 40 | let zero = AtomicPtr::new(boxed(DropTrack(dropped.clone()))); 41 | 42 | { 43 | let guard = collector.enter(); 44 | let _ = guard.protect(&zero, Ordering::Relaxed); 45 | } 46 | 47 | { 48 | let guard = collector.enter(); 49 | let value = guard.protect(&zero, Ordering::Acquire); 50 | unsafe { collector.retire(value, reclaim::boxed) } 51 | } 52 | } 53 | 54 | assert_eq!(dropped.load(Ordering::Relaxed), items); 55 | } 56 | 57 | #[test] 58 | fn two_threads() { 59 | let collector = Arc::new(Collector::new().batch_size(3)); 60 | 61 | let a_dropped = Arc::new(AtomicUsize::new(0)); 62 | let b_dropped = Arc::new(AtomicUsize::new(0)); 63 | 64 | let (tx, rx) = mpsc::channel(); 65 | 66 | let one = Arc::new(AtomicPtr::new(boxed(DropTrack(a_dropped.clone())))); 67 | 68 | let h = thread::spawn({ 69 | let one = one.clone(); 70 | let collector = collector.clone(); 71 | 72 | move || { 73 | let guard = collector.enter(); 74 | let _value = guard.protect(&one, Ordering::Acquire); 75 | tx.send(()).unwrap(); 76 | drop(guard); 77 | tx.send(()).unwrap(); 78 | } 79 | }); 80 | 81 | for _ in 0..2 { 82 | let zero = AtomicPtr::new(boxed(DropTrack(b_dropped.clone()))); 83 | let guard = collector.enter(); 84 | let value = guard.protect(&zero, Ordering::Acquire); 85 | unsafe { collector.retire(value, reclaim::boxed) } 86 | } 87 | 88 | rx.recv().unwrap(); // wait for thread to access value 89 | let guard = collector.enter(); 90 | let value = guard.protect(&one, Ordering::Acquire); 91 | unsafe { collector.retire(value, reclaim::boxed) } 92 | 93 | rx.recv().unwrap(); // wait for thread to drop guard 94 | h.join().unwrap(); 95 | 96 | drop(guard); 97 | 98 | assert_eq!( 99 | ( 100 | b_dropped.load(Ordering::Acquire), 101 | a_dropped.load(Ordering::Acquire) 102 | ), 103 | (2, 1) 104 | ); 105 | } 106 | 107 | #[test] 108 | fn refresh() { 109 | let collector = Arc::new(Collector::new().batch_size(3)); 110 | 111 | let items = (0..cfg::ITEMS) 112 | .map(|i| AtomicPtr::new(boxed(i))) 113 | .collect::>(); 114 | 115 | let handles = (0..cfg::THREADS) 116 | .map(|_| { 117 | thread::spawn({ 118 | let items = items.clone(); 119 | let collector = collector.clone(); 120 | 121 | move || { 122 | let mut guard = collector.enter(); 123 | 124 | for _ in 0..cfg::ITER { 125 | for item in items.iter() { 126 | let item = guard.protect(item, Ordering::Acquire); 127 | unsafe { assert!(*item < cfg::ITEMS) } 128 | } 129 | 130 | guard.refresh(); 131 | } 132 | } 133 | }) 134 | }) 135 | .collect::>(); 136 | 137 | for i in 0..cfg::ITER { 138 | for item in items.iter() { 139 | let old = item.swap(Box::into_raw(Box::new(i)), Ordering::AcqRel); 140 | unsafe { collector.retire(old, reclaim::boxed) } 141 | } 142 | } 143 | 144 | for handle in handles { 145 | handle.join().unwrap() 146 | } 147 | 148 | // cleanup 149 | for item in items.iter() { 150 | let old = item.swap(ptr::null_mut(), Ordering::Acquire); 151 | unsafe { collector.retire(old, reclaim::boxed) } 152 | } 153 | } 154 | 155 | #[test] 156 | fn recursive_retire() { 157 | struct Recursive { 158 | _value: usize, 159 | pointers: Vec<*mut usize>, 160 | } 161 | 162 | let collector = Collector::new().batch_size(1); 163 | 164 | let ptr = boxed(Recursive { 165 | _value: 0, 166 | pointers: (0..cfg::ITEMS).map(boxed).collect(), 167 | }); 168 | 169 | unsafe { 170 | collector.retire(ptr, |ptr: *mut Recursive, collector| { 171 | let value = Box::from_raw(ptr); 172 | 173 | for pointer in value.pointers { 174 | collector.retire(pointer, reclaim::boxed); 175 | 176 | let mut guard = collector.enter(); 177 | guard.flush(); 178 | guard.refresh(); 179 | drop(guard); 180 | } 181 | }); 182 | 183 | collector.enter().flush(); 184 | } 185 | } 186 | 187 | #[test] 188 | fn reclaim_all() { 189 | let collector = Collector::new().batch_size(2); 190 | 191 | for _ in 0..cfg::ITER { 192 | let dropped = Arc::new(AtomicUsize::new(0)); 193 | 194 | let items = (0..cfg::ITEMS) 195 | .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) 196 | .collect::>(); 197 | 198 | for item in items { 199 | unsafe { collector.retire(item.load(Ordering::Relaxed), reclaim::boxed) }; 200 | } 201 | 202 | unsafe { collector.reclaim_all() }; 203 | assert_eq!(dropped.load(Ordering::Relaxed), cfg::ITEMS); 204 | } 205 | } 206 | 207 | #[test] 208 | fn recursive_retire_reclaim_all() { 209 | struct Recursive { 210 | _value: usize, 211 | pointers: Vec<*mut DropTrack>, 212 | } 213 | 214 | unsafe { 215 | let collector = Collector::new().batch_size(cfg::ITEMS * 2); 216 | let dropped = Arc::new(AtomicUsize::new(0)); 217 | 218 | let ptr = boxed(Recursive { 219 | _value: 0, 220 | pointers: (0..cfg::ITEMS) 221 | .map(|_| boxed(DropTrack(dropped.clone()))) 222 | .collect(), 223 | }); 224 | 225 | collector.retire(ptr, |ptr: *mut Recursive, collector| { 226 | let value = Box::from_raw(ptr); 227 | for pointer in value.pointers { 228 | (*collector).retire(pointer, reclaim::boxed); 229 | } 230 | }); 231 | 232 | collector.reclaim_all(); 233 | assert_eq!(dropped.load(Ordering::Relaxed), cfg::ITEMS); 234 | } 235 | } 236 | 237 | #[test] 238 | fn defer_retire() { 239 | let collector = Collector::new().batch_size(5); 240 | let dropped = Arc::new(AtomicUsize::new(0)); 241 | 242 | let objects: Vec<_> = (0..30).map(|_| boxed(DropTrack(dropped.clone()))).collect(); 243 | 244 | let guard = collector.enter(); 245 | 246 | for object in objects { 247 | unsafe { guard.defer_retire(object, reclaim::boxed) } 248 | guard.flush(); 249 | } 250 | 251 | // guard is still active 252 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 253 | drop(guard); 254 | // now the objects should have been dropped 255 | assert_eq!(dropped.load(Ordering::Relaxed), 30); 256 | } 257 | 258 | #[test] 259 | fn reentrant() { 260 | let collector = Arc::new(Collector::new().batch_size(5)); 261 | let dropped = Arc::new(AtomicUsize::new(0)); 262 | 263 | let objects: UnsafeSend> = 264 | UnsafeSend((0..5).map(|_| boxed(DropTrack(dropped.clone()))).collect()); 265 | 266 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 267 | 268 | let guard1 = collector.enter(); 269 | let guard2 = collector.enter(); 270 | let guard3 = collector.enter(); 271 | 272 | thread::spawn({ 273 | let collector = collector.clone(); 274 | 275 | move || { 276 | let guard = collector.enter(); 277 | for object in { objects }.0 { 278 | unsafe { guard.defer_retire(object, reclaim::boxed) } 279 | } 280 | } 281 | }) 282 | .join() 283 | .unwrap(); 284 | 285 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 286 | drop(guard1); 287 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 288 | drop(guard2); 289 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 290 | drop(guard3); 291 | assert_eq!(dropped.load(Ordering::Relaxed), 5); 292 | 293 | let dropped = Arc::new(AtomicUsize::new(0)); 294 | 295 | let objects: UnsafeSend> = 296 | UnsafeSend((0..5).map(|_| boxed(DropTrack(dropped.clone()))).collect()); 297 | 298 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 299 | 300 | let mut guard1 = collector.enter(); 301 | let mut guard2 = collector.enter(); 302 | let mut guard3 = collector.enter(); 303 | 304 | thread::spawn({ 305 | let collector = collector.clone(); 306 | 307 | move || { 308 | let guard = collector.enter(); 309 | for object in { objects }.0 { 310 | unsafe { guard.defer_retire(object, reclaim::boxed) } 311 | } 312 | } 313 | }) 314 | .join() 315 | .unwrap(); 316 | 317 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 318 | guard1.refresh(); 319 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 320 | drop(guard1); 321 | guard2.refresh(); 322 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 323 | drop(guard2); 324 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 325 | guard3.refresh(); 326 | assert_eq!(dropped.load(Ordering::Relaxed), 5); 327 | } 328 | 329 | #[test] 330 | fn swap_stress() { 331 | for _ in 0..cfg::ITER { 332 | let collector = Collector::new(); 333 | let entries = [const { AtomicPtr::new(ptr::null_mut()) }; cfg::ITEMS]; 334 | 335 | thread::scope(|s| { 336 | for _ in 0..cfg::THREADS { 337 | s.spawn(|| { 338 | for i in 0..cfg::ITEMS { 339 | let guard = collector.enter(); 340 | let new = Box::into_raw(Box::new(i)); 341 | let old = guard.swap(&entries[i], new, Ordering::AcqRel); 342 | if !old.is_null() { 343 | unsafe { assert_eq!(*old, i) } 344 | unsafe { guard.defer_retire(old, reclaim::boxed) } 345 | } 346 | } 347 | }); 348 | } 349 | }); 350 | 351 | for i in 0..cfg::ITEMS { 352 | let val = entries[i].load(Ordering::Relaxed); 353 | let _ = unsafe { Box::from_raw(val) }; 354 | } 355 | } 356 | } 357 | 358 | #[test] 359 | fn cas_stress() { 360 | for _ in 0..cfg::ITER { 361 | let collector = Collector::new(); 362 | let entries = [const { AtomicPtr::new(ptr::null_mut()) }; cfg::ITEMS]; 363 | 364 | thread::scope(|s| { 365 | for _ in 0..cfg::THREADS { 366 | s.spawn(|| { 367 | for i in 0..cfg::ITEMS { 368 | let guard = collector.enter(); 369 | let new = Box::into_raw(Box::new(i)); 370 | 371 | loop { 372 | let old = entries[i].load(Ordering::Relaxed); 373 | 374 | let result = guard.compare_exchange( 375 | &entries[i], 376 | old, 377 | new, 378 | Ordering::AcqRel, 379 | Ordering::Relaxed, 380 | ); 381 | 382 | let Ok(old) = result else { 383 | continue; 384 | }; 385 | 386 | if !old.is_null() { 387 | unsafe { assert_eq!(*old, i) } 388 | unsafe { guard.defer_retire(old, reclaim::boxed) } 389 | } 390 | 391 | break; 392 | } 393 | } 394 | }); 395 | } 396 | }); 397 | 398 | for i in 0..cfg::ITEMS { 399 | let val = entries[i].load(Ordering::Relaxed); 400 | let _ = unsafe { Box::from_raw(val) }; 401 | } 402 | } 403 | } 404 | 405 | #[test] 406 | fn owned_guard() { 407 | let collector = Collector::new().batch_size(5); 408 | let dropped = Arc::new(AtomicUsize::new(0)); 409 | 410 | let objects = UnsafeSend( 411 | (0..5) 412 | .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) 413 | .collect::>(), 414 | ); 415 | 416 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 417 | 418 | thread::scope(|s| { 419 | let guard1 = collector.enter_owned(); 420 | 421 | let guard2 = collector.enter(); 422 | for object in objects.0.iter() { 423 | unsafe { guard2.defer_retire(object.load(Ordering::Acquire), reclaim::boxed) } 424 | } 425 | 426 | drop(guard2); 427 | 428 | // guard1 is still active 429 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 430 | 431 | s.spawn(move || { 432 | for object in objects.0.iter() { 433 | let _ = unsafe { &*guard1.protect(object, Ordering::Relaxed) }; 434 | } 435 | 436 | // guard1 is still active 437 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 438 | 439 | drop(guard1); 440 | 441 | assert_eq!(dropped.load(Ordering::Relaxed), 5); 442 | }); 443 | }); 444 | } 445 | 446 | #[test] 447 | fn owned_guard_concurrent() { 448 | let collector = Collector::new().batch_size(1); 449 | let dropped = Arc::new(AtomicUsize::new(0)); 450 | 451 | let objects = UnsafeSend( 452 | (0..cfg::THREADS) 453 | .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) 454 | .collect::>(), 455 | ); 456 | 457 | let guard = collector.enter_owned(); 458 | let barrier = Barrier::new(cfg::THREADS); 459 | 460 | thread::scope(|s| { 461 | for i in 0..cfg::THREADS { 462 | let guard = &guard; 463 | let objects = &objects; 464 | let dropped = &dropped; 465 | let barrier = &barrier; 466 | 467 | s.spawn(move || { 468 | barrier.wait(); 469 | 470 | unsafe { guard.defer_retire(objects.0[i].load(Ordering::Acquire), reclaim::boxed) }; 471 | 472 | guard.flush(); 473 | 474 | for object in objects.0.iter() { 475 | let _ = unsafe { &*guard.protect(object, Ordering::Relaxed) }; 476 | } 477 | 478 | assert_eq!(dropped.load(Ordering::Relaxed), 0); 479 | }); 480 | } 481 | }); 482 | 483 | drop(guard); 484 | assert_eq!(dropped.load(Ordering::Relaxed), cfg::THREADS); 485 | } 486 | 487 | #[test] 488 | fn collector_equality() { 489 | let a = Collector::new(); 490 | let b = Collector::new(); 491 | 492 | assert_eq!(a, a); 493 | assert_eq!(b, b); 494 | assert_ne!(a, b); 495 | 496 | assert_eq!(*a.enter().collector(), a); 497 | assert_ne!(*a.enter().collector(), b); 498 | 499 | assert_eq!(*b.enter().collector(), b); 500 | assert_ne!(*b.enter().collector(), a); 501 | } 502 | 503 | #[test] 504 | fn stress() { 505 | // stress test with operation on a shared stack 506 | for _ in 0..cfg::ITER { 507 | let stack = Arc::new(Stack::new(1)); 508 | 509 | thread::scope(|s| { 510 | for i in 0..cfg::ITEMS { 511 | stack.push(i, &stack.collector.enter()); 512 | stack.pop(&stack.collector.enter()); 513 | } 514 | 515 | for _ in 0..cfg::THREADS { 516 | s.spawn(|| { 517 | for i in 0..cfg::ITEMS { 518 | stack.push(i, &stack.collector.enter()); 519 | stack.pop(&stack.collector.enter()); 520 | } 521 | }); 522 | } 523 | }); 524 | 525 | assert!(stack.pop(&stack.collector.enter()).is_none()); 526 | assert!(stack.is_empty()); 527 | } 528 | } 529 | 530 | #[test] 531 | fn shared_owned_stress() { 532 | // all threads sharing an owned guard 533 | for _ in 0..cfg::ITER { 534 | let stack = Arc::new(Stack::new(1)); 535 | let guard = &stack.collector.enter_owned(); 536 | 537 | thread::scope(|s| { 538 | for i in 0..cfg::ITEMS { 539 | stack.push(i, guard); 540 | stack.pop(guard); 541 | } 542 | 543 | for _ in 0..cfg::THREADS { 544 | s.spawn(|| { 545 | for i in 0..cfg::ITEMS { 546 | stack.push(i, guard); 547 | stack.pop(guard); 548 | } 549 | }); 550 | } 551 | }); 552 | 553 | assert!(stack.pop(guard).is_none()); 554 | assert!(stack.is_empty()); 555 | } 556 | } 557 | 558 | #[test] 559 | fn owned_stress() { 560 | // all threads creating an owned guard (this is very unrealistic and stresses 561 | // tls synchronization) 562 | for _ in 0..cfg::ITER { 563 | let stack = Arc::new(Stack::new(1)); 564 | 565 | thread::scope(|s| { 566 | for i in 0..cfg::ITEMS { 567 | let guard = &stack.collector.enter_owned(); 568 | stack.push(i, guard); 569 | stack.pop(guard); 570 | } 571 | 572 | for _ in 0..cfg::THREADS { 573 | s.spawn(|| { 574 | for i in 0..cfg::ITEMS { 575 | let guard = &stack.collector.enter_owned(); 576 | stack.push(i, guard); 577 | stack.pop(guard); 578 | } 579 | }); 580 | } 581 | }); 582 | 583 | assert!(stack.pop(&stack.collector.enter_owned()).is_none()); 584 | assert!(stack.is_empty()); 585 | } 586 | } 587 | 588 | #[derive(Debug)] 589 | pub struct Stack { 590 | head: AtomicPtr>, 591 | collector: Collector, 592 | } 593 | 594 | #[derive(Debug)] 595 | struct Node { 596 | data: ManuallyDrop, 597 | next: *mut Node, 598 | } 599 | 600 | impl Stack { 601 | pub fn new(batch_size: usize) -> Stack { 602 | Stack { 603 | head: AtomicPtr::new(ptr::null_mut()), 604 | collector: Collector::new().batch_size(batch_size), 605 | } 606 | } 607 | 608 | pub fn push(&self, value: T, guard: &impl Guard) { 609 | let new = boxed(Node { 610 | data: ManuallyDrop::new(value), 611 | next: ptr::null_mut(), 612 | }); 613 | 614 | loop { 615 | let head = guard.protect(&self.head, Ordering::Relaxed); 616 | unsafe { (*new).next = head } 617 | 618 | if self 619 | .head 620 | .compare_exchange(head, new, Ordering::Release, Ordering::Relaxed) 621 | .is_ok() 622 | { 623 | break; 624 | } 625 | } 626 | } 627 | 628 | pub fn pop(&self, guard: &impl Guard) -> Option { 629 | loop { 630 | let head = guard.protect(&self.head, Ordering::Acquire); 631 | 632 | if head.is_null() { 633 | return None; 634 | } 635 | 636 | let next = unsafe { (*head).next }; 637 | 638 | if self 639 | .head 640 | .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) 641 | .is_ok() 642 | { 643 | unsafe { 644 | let data = ptr::read(&(*head).data); 645 | self.collector.retire(head, reclaim::boxed); 646 | return Some(ManuallyDrop::into_inner(data)); 647 | } 648 | } 649 | } 650 | } 651 | 652 | pub fn is_empty(&self) -> bool { 653 | self.head.load(Ordering::Relaxed).is_null() 654 | } 655 | } 656 | 657 | impl Drop for Stack { 658 | fn drop(&mut self) { 659 | let guard = self.collector.enter(); 660 | while self.pop(&guard).is_some() {} 661 | } 662 | } 663 | 664 | #[cfg(any(miri, seize_asan))] 665 | mod cfg { 666 | pub const THREADS: usize = 4; 667 | pub const ITEMS: usize = 100; 668 | pub const ITER: usize = 4; 669 | } 670 | 671 | #[cfg(not(any(miri, seize_asan)))] 672 | mod cfg { 673 | pub const THREADS: usize = 32; 674 | pub const ITEMS: usize = 10_000; 675 | pub const ITER: usize = 50; 676 | } 677 | --------------------------------------------------------------------------------