├── .gitignore ├── .travis.yml ├── Cargo.toml ├── DEBUG.md ├── DESIGN.md ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── guard.rs └── retire.rs ├── ci └── default.sh ├── examples ├── hash_set │ ├── main.rs │ └── ordered.rs └── treiber │ ├── main.rs │ └── stack.rs ├── hazptr_rewrite ├── Cargo.toml └── src │ ├── config.rs │ ├── default.rs │ ├── global.rs │ ├── guard.rs │ ├── hazard │ ├── list.rs │ └── mod.rs │ ├── lib.rs │ ├── local │ ├── inner.rs │ └── mod.rs │ ├── queue.rs │ └── retire │ ├── global_retire.rs │ ├── local_retire.rs │ └── mod.rs ├── rustfmt.toml ├── src ├── config.rs ├── default.rs ├── global.rs ├── guard.rs ├── hazard │ ├── list.rs │ └── mod.rs ├── lib.rs ├── local.rs ├── main.rs └── retired.rs └── tests └── integration.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /target 3 | **/*.rs.bk 4 | Cargo.lock -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - stable 5 | - 1.36.0 6 | - nightly 7 | - beta 8 | 9 | script: ./ci/default.sh 10 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hazptr" 3 | description = "hazard pointer based concurrent memory reclamation" 4 | version = "0.2.0" 5 | authors = ["Oliver Giersch"] 6 | license = "MIT/Apache-2.0" 7 | readme = "README.md" 8 | repository = "https://github.com/oliver-giersch/hazptr.git" 9 | documentation = "https://docs.rs/hazptr" 10 | keywords = ["hazard", "pointer", "concurrent", "reclamation", "lock-free"] 11 | edition = "2018" 12 | exclude = [".travis.yml", "ci/*", "DEBUG.md"] 13 | 14 | [features] 15 | default = ["std"] 16 | 17 | # disable for use in no_std crates (for limitations see README.md) 18 | std = ["arrayvec/std", "conquer-once/std", "reclaim/std"] 19 | 20 | # counting towards the scan threshold caused by dropping hazard pointer guards 21 | count-release = [] 22 | 23 | # for instrumentation with TSAN 24 | sanitize-threads = [] 25 | 26 | [dependencies] 27 | cfg-if = "0.1.7" 28 | 29 | [dependencies.arrayvec] 30 | version = "0.4.11" 31 | default-features = false 32 | 33 | [dependencies.conquer-once] 34 | version = "0.1.2" 35 | default-features = false 36 | 37 | [dependencies.reclaim] 38 | version = "0.2.2" 39 | default-features = false 40 | 41 | [dev-dependencies] 42 | matches = "0.1.8" 43 | rand = "0.6.5" 44 | 45 | [[example]] 46 | name = "hash_set" 47 | required-features = ["std"] 48 | 49 | [[example]] 50 | name = "treiber" 51 | required-features = ["std"] 52 | 53 | [[test]] 54 | name = "integration" 55 | required-features = ["std", "count-release"] 56 | -------------------------------------------------------------------------------- /DEBUG.md: -------------------------------------------------------------------------------- 1 | # Useful Debugging Notes 2 | 3 | ## ASAN 4 | 5 | gdb: `break __sanitizer::Die 6 | 7 | ## TSAN 8 | 9 | Cargo.toml feature: `sanitize-thread` 10 | RUSTFLAGS=-Z sanitizer=thread 11 | -------------------------------------------------------------------------------- /DESIGN.md: -------------------------------------------------------------------------------- 1 | # Ideas 2 | 3 | - store `Hazard`s in segmented (array-based) list: Each `HazardNode` contains 31 128-byte aligned `Hazards` and one 128-byte 4 | aligned `next` pointer -> might improve iteration 5 | 6 | # API Redesign 7 | 8 | Currently, only global (as in `static`) data structures can be used. Allowing data-structure specific sets of hazard pointers 9 | and garbage heaps has some advantages, such as more focused iteration (i.e. only iterating HPs that actually belong to the 10 | same data-structure. Likewise, this may give more flexibility for later adding custom allocator support. Adding support for 11 | **policies** is also advantageous. 12 | 13 | ## Policies 14 | 15 | Different policies for choosing garbage collection strategies: 16 | 17 | Global: 18 | 19 | ```rust 20 | enum GlobalPolicy { 21 | LocalGarbage(AbandonList), 22 | GlobalGarbage(GarbageList), 23 | } 24 | ``` 25 | 26 | Local: 27 | 28 | ```rust 29 | enum LocalPolicy { 30 | LocalGarbage(Vec, ...), 31 | GlobalGarbage, 32 | } 33 | ``` 34 | 35 | (use runtime checks to assert matching policies of associated globals and locals) 36 | 37 | ## Global 38 | 39 | Globals must no longer be static, but can have any lifetime 40 | 41 | ```rust 42 | pub struct Global { 43 | hazards: HazardList, 44 | policy: GlobalPolicy, 45 | alloc: A, 46 | } 47 | ``` 48 | 49 | ## Local 50 | 51 | Locals contain an explicit reference to their associated `Global`, with which they must have matching policies. 52 | 53 | ```rust 54 | struct LocalInner<'a, A: Alloc> { 55 | config: Config, 56 | global: &'a Global, 57 | policy: LocalPolicy, 58 | guard_count: u32, 59 | ops_count: u32, 60 | scan_cache: Vec, 61 | } 62 | ``` 63 | 64 | ## Guards 65 | 66 | Guards must necessarily be restricted by the lifetime of their associated `Global`, the `guard_count` field in `LocalInner` 67 | ensures the lifetime of `Local` will be long enough: 68 | 69 | ```rust 70 | pub struct Guard<'a, A: Alloc> { 71 | local: *const Local<'a, A>, // this must be a pointer this since references into std TLS are not allowed 72 | hazard: &'a Hazard, 73 | } 74 | ``` 75 | 76 | ### Alternative 77 | 78 | Instead of storing references to the associated `Local` in a pointer, the `LocalAccess` trait could see continoued usage 79 | as a means for abstracting over access through TLS or through a safe `&'a Local`, this would change the signature of `Guard` 80 | to potentially express to lifetimes `'global` and `'local`. 81 | 82 | ## Retiring Records 83 | 84 | It would now be possible to protect pointers with hazard pointers belonging to one `Global` and retiring records in a cache 85 | that is checked against the hazard pointers of **another** `Global`. There is no obvious way to prevent this on a type-level 86 | and additional runtime checks would likely have to be extensive. 87 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright 2019 Oliver Giersch 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Oliver Giersch 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hazptr 2 | 3 | Hazard pointer based concurrent lock-free memory reclamation. 4 | 5 | [![Build Status](https://travis-ci.org/oliver-giersch/hazptr.svg?branch=master)]( 6 | https://travis-ci.org/oliver-giersch/hazptr) 7 | [![Latest version](https://img.shields.io/crates/v/hazptr.svg)](https://crates.io/crates/hazptr) 8 | [![Documentation](https://docs.rs/hazptr/badge.svg)](https://docs.rs/hazptr) 9 | [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( 10 | https://github.com/oliver-giersch/hazptr) 11 | [![Rust 1.36+](https://img.shields.io/badge/rust-1.36+-lightgray.svg)]( 12 | https://www.rust-lang.org) 13 | 14 | Whenever a thread reads a value from shared memory it also protects the loaded 15 | value with a globally visible *hazard pointer*. 16 | All threads can *retire* shared values that are no longer needed and accessible 17 | and cache them locally. 18 | Retired records are reclaimed (dropped and de-allocated) in bulk, but only when 19 | they are not or no longer protected by any hazard pointers. 20 | 21 | ## Usage 22 | 23 | Add this to your `Cargo.toml` 24 | 25 | ``` 26 | [dependencies] 27 | hazptr = "0.2.0" 28 | ``` 29 | 30 | ## Minimum Supported Rust Version (MSRV) 31 | 32 | The minimum supported Rust version for this crate is 1.36.0. 33 | 34 | ## Comparison with [crossbeam-epoch](https://crates.io/crates/crossbeam-epoch) 35 | 36 | The hazard pointer reclamation scheme is generally less efficient then 37 | epoch-based reclamation schemes (or any other type of reclamation scheme for 38 | that matter). 39 | This is mainly because acquisition of hazard pointers requires an expensive 40 | memory fence to be issued after every load from shared memory. 41 | It is, however, usually the best scheme in terms of reclamation reliability. 42 | Retired records are generally reclaimed in a timely manner and reclamation is 43 | not affected by contention. 44 | These properties can lead to a better memory footprint of applications using 45 | hazard pointers instead of other reclamation schemes. 46 | Also, since hazard pointers only protect individual pointers from reclamation, 47 | they can be better suited for protecting individual records for long periods of 48 | time. 49 | Epoch-based schemes, on the other hand, completely prevent reclamation by all 50 | threads whenever records need to be protected. 51 | 52 | ## Examples 53 | 54 | See [examples/treiber/stack.rs](examples/treiber/stack.rs) for an implementation 55 | of Treiber's stack with hazard pointers or 56 | [examples/hash_set/ordered.rs](examples/hash_set/ordered/mod.rs) for an 57 | implementation of a concurrent hash set. 58 | 59 | ## Crate Features & Runtime Configuration 60 | 61 | The following features are defined for this crate: 62 | 63 | - `std` (default) 64 | - `count-release` 65 | 66 | By default, a thread initiates a GC scan and attempts to flush its cache of 67 | retired records, once it has retired a certain threshold count of records. 68 | By compiling the crate with the `count-release` feature, this can be changed to 69 | count the instances of successfully acquired hazard pointers `Guard`s going 70 | out of scope (i.e. being released) instead. 71 | This can be beneficial, e.g. when there are only few records overall and 72 | their retirement is rare. 73 | 74 | ### Scan Threshold 75 | 76 | The scan threshold value is used internally for determining the frequency of 77 | GC scans. 78 | These scans traverse the thread local list of retired records and reclaim all 79 | records which are no longer protected by any hazard pointers. 80 | This threshold variable can be any positive non-zero 32-bit integer value and 81 | is set to 100 by default. 82 | It can be set to a different value exactly once during the runtime of the 83 | program. 84 | Note that only threads that are spawned **after** setting this variable will 85 | be able to use the adjusted value. 86 | The following code gives an example for how to adjust this value: 87 | 88 | ```rust 89 | use hazptr::{ConfigBuilder, CONFIG}; 90 | 91 | fn main() { 92 | // preferably this should be called before spawning any threads 93 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(512).build()); 94 | } 95 | ``` 96 | 97 | A scan threshold of 1 means, for example, that a GC scan is initiated 98 | after **every** operation counting towards the threshold, meaning either 99 | operations for retiring records or releases of `Guard`s, in case the 100 | `count-release` feature is enabled. 101 | The env var can be specified e.g. by invoking `cargo` with: 102 | 103 | ``` 104 | env HAZPTR_SCAN_THRESHOLD=1 cargo build 105 | ``` 106 | 107 | Alternatively, this variable could also be set as part of a build script: 108 | 109 | ```rust 110 | // build.rs 111 | 112 | fn main() { 113 | // alternative: std::env::set_var("HAZPTR_SCAN_THRESHOLD", "1") 114 | println!("cargo:rustc-env=HAZPTR_SCAN_THRESHOLD=1"); 115 | } 116 | ``` 117 | 118 | It is necessary to call `cargo clean`, before attempting to change this variable 119 | once set, in order to force a rebuild with the new value. 120 | 121 | As a general rule, a higher scan threshold is better performance-wise, since 122 | threads have to attempt to reclaim records less frequently, but could lead to 123 | the accumulation of large amounts of garbage and also degrade performance. 124 | 125 | ### Usage in `#[no_std]` environments 126 | 127 | When building the crate without the (default) `std` feature, it becomes 128 | possible to use its functionality in an `#[no_std]` + `alloc` environment, albeit 129 | with arguably worse ergonomics. 130 | In this configuration, the crate's public API additionally exposes the `Local` 131 | type. 132 | Also, instead of exporting the `Guard` type, a different `LocalGuard` type is 133 | exported, which contains an explicit reference to the thread local state. 134 | 135 | In order to use `hazptr` in such an environment, one has to manually to do the 136 | following steps: 137 | 138 | - for every thread, create a separate `Local` instance 139 | - hazard pointers can only be created by explicitly passing a reference to the 140 | current thread's `Local` instance 141 | 142 | ## License 143 | 144 | Hazptr is distributed under the terms of both the MIT license and the 145 | Apache License (Version 2.0). 146 | 147 | See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) for details. 148 | -------------------------------------------------------------------------------- /benches/guard.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use std::sync::atomic::Ordering::Relaxed; 6 | 7 | use test::Bencher; 8 | 9 | use hazptr::{ConfigBuilder, Guard, CONFIG}; 10 | 11 | type Atomic = hazptr::Atomic; 12 | 13 | #[bench] 14 | fn pin_and_load(b: &mut Bencher) { 15 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(128).build()); 16 | 17 | let atomic = Atomic::new(1); 18 | 19 | b.iter(|| { 20 | let guard = &mut Guard::new(); 21 | assert_eq!(*atomic.load(Relaxed, guard).unwrap(), 1); 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /benches/retire.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use std::sync::atomic::Ordering::{AcqRel, Relaxed}; 6 | use std::thread; 7 | 8 | use test::Bencher; 9 | 10 | use conquer_once::Lazy; 11 | use hazptr::{ConfigBuilder, CONFIG}; 12 | 13 | type Atomic = hazptr::Atomic; 14 | type Owned = hazptr::Owned; 15 | 16 | #[bench] 17 | fn single_retire(b: &mut Bencher) { 18 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(128).build()); 19 | 20 | let global = Atomic::new(1); 21 | 22 | b.iter(|| { 23 | let unlinked = global.swap(Owned::new(1), Relaxed).unwrap(); 24 | unsafe { unlinked.retire() }; 25 | }); 26 | } 27 | 28 | #[bench] 29 | fn multi_retire(b: &mut Bencher) { 30 | const STEPS: u32 = 100_000; 31 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(128).build()); 32 | 33 | let global = Atomic::new(1); 34 | 35 | b.iter(|| { 36 | for _ in 0..STEPS { 37 | let unlinked = global.swap(Owned::new(1), Relaxed).unwrap(); 38 | unsafe { unlinked.retire() }; 39 | } 40 | }); 41 | } 42 | 43 | #[bench] 44 | fn multi_retire_varied(b: &mut Bencher) { 45 | const STEPS: u32 = 100_000; 46 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(128).build()); 47 | 48 | let int = Atomic::new(1); 49 | let string = Atomic::new(String::from("string")); 50 | let arr = Atomic::new([0usize; 16]); 51 | 52 | b.iter(|| unsafe { 53 | for _ in 0..STEPS { 54 | int.swap(Owned::new(1), Relaxed).unwrap().retire(); 55 | string.swap(Owned::new(String::from("string")), Relaxed).unwrap().retire(); 56 | arr.swap(Owned::new([0usize; 16]), Relaxed).unwrap().retire(); 57 | } 58 | }); 59 | } 60 | 61 | #[bench] 62 | fn parallel_retire(b: &mut Bencher) { 63 | const THREADS: u32 = 8; 64 | const STEPS: u32 = 100_000; 65 | 66 | static GLOBAL: Lazy> = Lazy::new(|| Atomic::new(0)); 67 | 68 | let handles: Vec<_> = (0..THREADS) 69 | .map(|id| { 70 | thread::spawn(|| { 71 | for _ in 0..STEPS { 72 | let unlinked = GLOBAL.swap(Owned::new(id), AcqRel).unwrap(); 73 | unsafe { unlinked.retire() }; 74 | } 75 | }) 76 | }) 77 | .collect(); 78 | } 79 | -------------------------------------------------------------------------------- /ci/default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cargo build --verbose 4 | cargo test --verbose 5 | cargo test --verbose --features "count-release" --verbose 6 | cargo build --no-default-features --verbose 7 | cargo test --no-default-features --verbose 8 | cargo test --no-default-features --features "count-release" --verbose 9 | 10 | cargo clean 11 | cargo test --test integration --features "count-release" --verbose 12 | -------------------------------------------------------------------------------- /examples/hash_set/main.rs: -------------------------------------------------------------------------------- 1 | // implementation is currently defunct 2 | 3 | mod ordered; 4 | 5 | use std::borrow::Borrow; 6 | use std::collections::hash_map::RandomState; 7 | use std::hash::{BuildHasher, Hash, Hasher}; 8 | use std::mem; 9 | use std::slice; 10 | use std::sync::atomic::{AtomicUsize, Ordering}; 11 | use std::sync::Arc; 12 | use std::thread; 13 | 14 | use hazptr::Guard; 15 | use reclaim::prelude::*; 16 | 17 | use crate::ordered::OrderedSet; 18 | 19 | const DEFAULT_BUCKETS: usize = 64; 20 | 21 | //////////////////////////////////////////////////////////////////////////////////////////////////// 22 | // HashSet 23 | //////////////////////////////////////////////////////////////////////////////////////////////////// 24 | 25 | pub struct HashSet { 26 | inner: Arc>, 27 | } 28 | 29 | impl Default for HashSet { 30 | #[inline] 31 | fn default() -> Self { 32 | Self::new() 33 | } 34 | } 35 | 36 | impl HashSet { 37 | /// Creates a new hash set. 38 | #[inline] 39 | pub fn new() -> Self { 40 | Self::with_hasher(RandomState::new()) 41 | } 42 | 43 | /// Creates a new hash set with the specified number of buckets. 44 | /// 45 | /// # Panics 46 | /// 47 | /// This function will panic, if `buckets` is 0. 48 | #[inline] 49 | pub fn with_buckets(buckets: usize) -> Self { 50 | Self::with_hasher_and_buckets(RandomState::new(), buckets) 51 | } 52 | } 53 | 54 | impl HashSet 55 | where 56 | T: Hash + Ord, 57 | S: BuildHasher, 58 | { 59 | /// Creates a new hash set with the default number of buckets and the given `hash_builder`. 60 | #[inline] 61 | pub fn with_hasher(hash_builder: S) -> Self { 62 | Self { 63 | inner: Arc::new(RawHashSet { 64 | size: DEFAULT_BUCKETS, 65 | buckets: Self::allocate_buckets(DEFAULT_BUCKETS), 66 | hash_builder, 67 | }), 68 | } 69 | } 70 | 71 | /// Creates a new hash set with the specified number of buckets and the given `hash_builder`. 72 | /// 73 | /// # Panics 74 | /// 75 | /// This function will panic, if `buckets` is 0. 76 | #[inline] 77 | pub fn with_hasher_and_buckets(hash_builder: S, buckets: usize) -> Self { 78 | assert!(buckets > 0, "hash set needs at least one bucket"); 79 | Self { 80 | inner: Arc::new(RawHashSet { 81 | size: buckets, 82 | buckets: Self::allocate_buckets(buckets), 83 | hash_builder, 84 | }), 85 | } 86 | } 87 | 88 | /// Returns the number of buckets in this hash set. 89 | #[inline] 90 | pub fn buckets(&self) -> usize { 91 | self.inner.size 92 | } 93 | 94 | /// Returns a reference to the set's `BuildHasher`. 95 | #[inline] 96 | pub fn hasher(&self) -> &S { 97 | &self.inner.hash_builder 98 | } 99 | 100 | /// Returns a new handle to the [`HashSet`]. 101 | #[inline] 102 | pub fn handle(&self) -> Handle { 103 | Handle { inner: Arc::clone(&self.inner), guards: Guards::new() } 104 | } 105 | 106 | /// Allocates a boxed slice of ordered sets. 107 | #[inline] 108 | fn allocate_buckets(buckets: usize) -> Box<[OrderedSet]> { 109 | assert_eq!(mem::size_of::>(), mem::size_of::()); 110 | 111 | let slice: &mut [usize] = Box::leak(vec![0usize; buckets].into_boxed_slice()); 112 | let (ptr, len) = (slice.as_mut_ptr(), slice.len()); 113 | 114 | // this is safe because `Atomic::null()` and `0usize` have the same in-memory representation 115 | unsafe { 116 | let slice: &mut [OrderedSet] = slice::from_raw_parts_mut(ptr as *mut _, len); 117 | Box::from_raw(slice) 118 | } 119 | } 120 | } 121 | 122 | //////////////////////////////////////////////////////////////////////////////////////////////////// 123 | // Handle 124 | //////////////////////////////////////////////////////////////////////////////////////////////////// 125 | 126 | pub struct Handle { 127 | inner: Arc>, 128 | guards: Guards, 129 | } 130 | 131 | impl Handle 132 | where 133 | T: Hash + Ord + 'static, 134 | S: BuildHasher, 135 | { 136 | /// Returns `true` if the set contains the given `value`. 137 | /// 138 | /// This method requires a mutable `self` reference, because the internally use hazard pointers 139 | /// must be adapted during iteration of the set. 140 | #[inline] 141 | pub fn contains(&mut self, value: &Q) -> bool 142 | where 143 | T: Borrow, 144 | Q: Hash + Ord, 145 | { 146 | self.inner.contains(value, &mut self.guards) 147 | } 148 | 149 | /// Returns a reference to the value in the set, if any, that is equal to the given value. 150 | /// 151 | /// The value may be any borrowed form of the set's value type, but [`Hash`][Hash] and 152 | /// [`Eq`][Eq] on the borrowed form *must* match those for the value type. 153 | /// 154 | /// This method requires a mutable `self` reference, because the internally use hazard pointers 155 | /// must be adapted during iteration of the set. 156 | /// The returned reference is likewise protected by one of these hazard pointers, so it can not 157 | /// be used after calling another method that mutates these. 158 | /// 159 | /// [Hash]: std::hash::Hash 160 | /// [Eq]: std::cmp::Eq 161 | #[inline] 162 | pub fn get(&mut self, value: &Q) -> Option<&T> 163 | where 164 | T: Borrow, 165 | Q: Hash + Ord, 166 | { 167 | self.inner.get(value, &mut self.guards) 168 | } 169 | 170 | /// Adds a value to the set. 171 | /// 172 | /// If the set did not have this value present, `true` is returned. 173 | /// If the set did have this value present, `false` is returned. 174 | #[inline] 175 | pub fn insert(&mut self, value: T) -> bool { 176 | self.inner.insert(value, &mut self.guards) 177 | } 178 | 179 | /// Removes a value from the set. Returns whether the value was 180 | /// present in the set. 181 | /// 182 | /// The value may be any borrowed form of the set's value type, but 183 | /// [`Hash`][Hash] and [`Eq`][Eq] on the borrowed form *must* match those for 184 | /// the value type. 185 | /// 186 | /// [Hash]: std::hash::Hash 187 | /// [Eq]: std::cmp::Eq 188 | #[inline] 189 | pub fn remove(&mut self, value: &Q) -> bool 190 | where 191 | T: Borrow, 192 | Q: Ord + Hash, 193 | { 194 | self.inner.remove(value, &mut self.guards) 195 | } 196 | } 197 | 198 | //////////////////////////////////////////////////////////////////////////////////////////////////// 199 | // Guards 200 | //////////////////////////////////////////////////////////////////////////////////////////////////// 201 | 202 | /// A container for the three hazard pointers required to safely traverse a hash 203 | /// set. 204 | #[derive(Debug, Default)] 205 | struct Guards { 206 | prev: Guard, 207 | curr: Guard, 208 | next: Guard, 209 | } 210 | 211 | impl Guards { 212 | /// Creates a new set of [`Guards`]. 213 | #[inline] 214 | fn new() -> Self { 215 | Self { prev: Guard::new(), curr: Guard::new(), next: Guard::new() } 216 | } 217 | 218 | /// Releases all contained guards. 219 | #[inline] 220 | fn release_all(&mut self) { 221 | self.prev.release(); 222 | self.curr.release(); 223 | self.next.release(); 224 | } 225 | } 226 | 227 | //////////////////////////////////////////////////////////////////////////////////////////////////// 228 | // RawHashSet 229 | //////////////////////////////////////////////////////////////////////////////////////////////////// 230 | 231 | /// A concurrent hash set. 232 | struct RawHashSet { 233 | size: usize, 234 | buckets: Box<[OrderedSet]>, 235 | hash_builder: S, 236 | } 237 | 238 | impl RawHashSet 239 | where 240 | T: Hash + Ord + 'static, 241 | S: BuildHasher, 242 | { 243 | /// Returns `true` if the set contains the given `value`. 244 | #[inline] 245 | pub fn contains(&self, value: &Q, guards: &mut Guards) -> bool 246 | where 247 | T: Borrow, 248 | Q: Hash + Ord, 249 | { 250 | let set = &self.buckets[Self::make_hash(&self.hash_builder, value, self.size)]; 251 | let res = set.get(value, guards).is_some(); 252 | guards.release_all(); 253 | 254 | res 255 | } 256 | 257 | /// Returns a reference to the value in the set, if any, that is equal to the given value. 258 | /// 259 | /// The value may be any borrowed form of the set's value type, but [`Hash`][Hash] and 260 | /// [`Eq`][Eq] on the borrowed form *must* match those for the value type. 261 | /// 262 | /// [Hash]: std::hash::Hash 263 | /// [Eq]: std::cmp::Eq 264 | #[inline] 265 | pub fn get<'g, Q>(&self, value: &Q, guards: &'g mut Guards) -> Option<&'g T> 266 | where 267 | T: Borrow, 268 | Q: Hash + Ord, 269 | { 270 | let set = &self.buckets[Self::make_hash(&self.hash_builder, value, self.size)]; 271 | set.get(value, guards) 272 | } 273 | 274 | /// Adds a value to the set. 275 | /// 276 | /// If the set did not have this value present, `true` is returned. 277 | /// If the set did have this value present, `false` is returned. 278 | #[inline] 279 | pub fn insert(&self, value: T, guards: &mut Guards) -> bool { 280 | let set = &self.buckets[Self::make_hash(&self.hash_builder, &value, self.size)]; 281 | set.insert_node(value, guards) 282 | } 283 | 284 | /// Removes a value from the set. Returns whether the value was 285 | /// present in the set. 286 | /// 287 | /// The value may be any borrowed form of the set's value type, but 288 | /// [`Hash`][Hash] and [`Eq`][Eq] on the borrowed form *must* match those for 289 | /// the value type. 290 | /// 291 | /// [Hash]: std::hash::Hash 292 | /// [Eq]: std::cmp::Eq 293 | #[inline] 294 | pub fn remove(&self, value: &Q, guards: &mut Guards) -> bool 295 | where 296 | T: Borrow, 297 | Q: Ord + Hash, 298 | { 299 | let set = &self.buckets[Self::make_hash(&self.hash_builder, value, self.size)]; 300 | set.remove_node(value, guards) 301 | } 302 | } 303 | 304 | impl RawHashSet 305 | where 306 | T: Hash + Ord, 307 | S: BuildHasher, 308 | { 309 | /// Generates a hash for `value` and transforms it into a slice index for the given number of 310 | /// buckets. 311 | #[inline] 312 | fn make_hash(builder: &S, value: &Q, buckets: usize) -> usize 313 | where 314 | T: Borrow, 315 | Q: Hash + Ord, 316 | { 317 | let mut state = builder.build_hasher(); 318 | value.hash(&mut state); 319 | (state.finish() % buckets as u64) as usize 320 | } 321 | } 322 | 323 | //////////////////////////////////////////////////////////////////////////////////////////////////// 324 | // Example 325 | //////////////////////////////////////////////////////////////////////////////////////////////////// 326 | 327 | #[derive(Debug, Default)] 328 | #[repr(align(64))] 329 | struct ThreadCount(AtomicUsize); 330 | 331 | #[derive(Debug)] 332 | struct DropI8<'a>(i8, &'a ThreadCount); 333 | 334 | impl Borrow for DropI8<'_> { 335 | #[inline] 336 | fn borrow(&self) -> &i8 { 337 | &self.0 338 | } 339 | } 340 | 341 | impl Hash for DropI8<'_> { 342 | fn hash(&self, state: &mut H) { 343 | self.0.hash(state); 344 | } 345 | } 346 | 347 | impl Drop for DropI8<'_> { 348 | #[inline] 349 | fn drop(&mut self) { 350 | (self.1).0.fetch_add(1, Ordering::Relaxed); 351 | } 352 | } 353 | 354 | impl PartialEq for DropI8<'_> { 355 | fn eq(&self, other: &Self) -> bool { 356 | self.0.eq(&other.0) 357 | } 358 | } 359 | 360 | impl PartialOrd for DropI8<'_> { 361 | fn partial_cmp(&self, other: &Self) -> Option { 362 | self.0.partial_cmp(&other.0) 363 | } 364 | } 365 | 366 | impl Eq for DropI8<'_> {} 367 | 368 | impl Ord for DropI8<'_> { 369 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 370 | self.0.cmp(&other.0) 371 | } 372 | } 373 | 374 | fn test_insert_remove() { 375 | let set = HashSet::with_buckets(1); 376 | let mut handle = set.handle(); 377 | 378 | // insert 379 | assert!(handle.insert(0)); 380 | assert!(handle.insert(1)); 381 | assert!(handle.insert(-10)); 382 | assert!(handle.insert(10)); 383 | assert!(handle.insert(5)); 384 | assert!(handle.insert(-5)); 385 | assert!(handle.insert(7)); 386 | assert!(handle.insert(-2)); 387 | 388 | // remove 389 | assert!(handle.remove(&-10)); 390 | assert!(handle.remove(&-5)); 391 | assert!(handle.remove(&-2)); 392 | assert!(handle.remove(&0)); 393 | assert!(handle.remove(&5)); 394 | assert!(handle.remove(&7)); 395 | assert!(handle.remove(&10)); 396 | 397 | assert!(!handle.contains(&-10)); 398 | assert!(!handle.contains(&-5)); 399 | assert!(!handle.contains(&-2)); 400 | assert!(!handle.contains(&0)); 401 | assert!(!handle.contains(&5)); 402 | assert!(!handle.contains(&7)); 403 | assert!(!handle.contains(&10)); 404 | 405 | println!("test_insert_remove: success"); 406 | } 407 | 408 | fn test_random() { 409 | use rand::prelude::*; 410 | 411 | let set = HashSet::with_buckets(1); 412 | let mut handle = set.handle(); 413 | 414 | let mut conflicts = 0; 415 | for _ in 0..10_000 { 416 | let value: i8 = rand::thread_rng().gen(); 417 | if handle.contains(&value) { 418 | conflicts += 1; 419 | handle.remove(&value); 420 | } else { 421 | handle.insert(value); 422 | } 423 | } 424 | 425 | println!("test_random: success, detected {} insertion conflicts", conflicts); 426 | } 427 | 428 | fn main() { 429 | use rand::prelude::*; 430 | 431 | const THREADS: usize = 8; 432 | const OPS_COUNT: usize = 10_000_000; 433 | 434 | static THREAD_COUNTS: [ThreadCount; THREADS] = [ 435 | ThreadCount(AtomicUsize::new(0)), 436 | ThreadCount(AtomicUsize::new(0)), 437 | ThreadCount(AtomicUsize::new(0)), 438 | ThreadCount(AtomicUsize::new(0)), 439 | ThreadCount(AtomicUsize::new(0)), 440 | ThreadCount(AtomicUsize::new(0)), 441 | ThreadCount(AtomicUsize::new(0)), 442 | ThreadCount(AtomicUsize::new(0)), 443 | ]; 444 | 445 | test_insert_remove(); 446 | test_random(); 447 | 448 | // the single bucket ensures maximum contention 449 | let set = HashSet::with_buckets(1); 450 | 451 | let handles: Vec<_> = (0..THREADS) 452 | .map(|id| { 453 | let mut handle = set.handle(); 454 | thread::spawn(move || { 455 | let mut alloc_count = 0u32; 456 | 457 | for ops in 0..OPS_COUNT { 458 | if ops > 0 && ops % (OPS_COUNT / 10) == 0 { 459 | println!("thread {}: {} out of {} ops", id, ops, OPS_COUNT); 460 | } 461 | 462 | let value: i8 = rand::thread_rng().gen(); 463 | if handle.contains(&value) { 464 | handle.remove(&value); 465 | } else { 466 | handle.insert(DropI8(value, &THREAD_COUNTS[id])); 467 | alloc_count += 1; 468 | } 469 | } 470 | 471 | println!("thread {}: done", id); 472 | alloc_count 473 | }) 474 | }) 475 | .collect(); 476 | 477 | let total_alloc: u32 = handles.into_iter().map(|handle| handle.join().unwrap()).sum(); 478 | mem::drop(set); 479 | let total_drop: usize = THREAD_COUNTS.iter().map(|count| count.0.load(Ordering::Relaxed)).sum(); 480 | assert_eq!(total_alloc as usize, total_drop); 481 | println!( 482 | "main: {} threads reclaimed {} out of {} allocated records", 483 | THREADS, total_drop, total_alloc 484 | ); 485 | println!("success, no leaks detected."); 486 | } 487 | -------------------------------------------------------------------------------- /examples/hash_set/ordered.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::cmp::Ordering::{Equal, Greater}; 3 | use std::mem; 4 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 5 | 6 | use hazptr::typenum; 7 | use reclaim::align::CacheAligned; 8 | use reclaim::prelude::*; 9 | use typenum::U1; 10 | 11 | use crate::Guards; 12 | 13 | use self::FindResult::*; 14 | 15 | pub type Atomic = hazptr::Atomic; 16 | pub type Owned = hazptr::Owned; 17 | pub type Shared<'g, T> = hazptr::Shared<'g, T, U1>; 18 | 19 | const DELETE_TAG: usize = 1; 20 | 21 | /// A concurrent linked-list based ordered set. 22 | #[derive(Debug, Default)] 23 | pub(crate) struct OrderedSet { 24 | head: Atomic>, 25 | } 26 | 27 | impl OrderedSet 28 | where 29 | T: Ord + 'static, 30 | { 31 | /// Inserts a new node for the given `value` and returns `true`, if it did 32 | /// not already exist in the set. 33 | #[inline] 34 | pub fn insert_node(&self, value: T, guards: &mut Guards) -> bool { 35 | let mut node = Owned::new(Node::new(value)); 36 | 37 | let success = loop { 38 | let elem = node.elem(); 39 | if let Insert { prev, next } = self.find(elem, guards) { 40 | node.next().store(next, Relaxed); 41 | // (ORD:1) this `Release` CAS synchronizes-with the `Acquire` CAS (ORD:3) and the 42 | // `Acquire` loads (ORD:4) and (ORD:5) 43 | match prev.compare_exchange(next, node, Release, Relaxed) { 44 | Ok(_) => break true, 45 | Err(failure) => node = failure.input, 46 | } 47 | } else { 48 | break false; 49 | } 50 | }; 51 | 52 | guards.release_all(); 53 | success 54 | } 55 | 56 | /// Tries to remove a node containing the given `value` from the set and 57 | /// returns `true`, if the value was found and successfully removed. 58 | #[inline] 59 | pub fn remove_node(&self, value: &Q, guards: &mut Guards) -> bool 60 | where 61 | T: Borrow, 62 | Q: Ord, 63 | { 64 | let success = loop { 65 | match self.find(value, guards) { 66 | Insert { .. } => break false, 67 | Found { prev, curr, next } => { 68 | let next_marked = Marked::marked(next, DELETE_TAG); 69 | // (ORD:2) this `Acquire` CAS synchronizes-with the `Release` CAS (ORD:1), 70 | // (ORD:3), (ORD:6) 71 | if curr.next().compare_exchange(next, next_marked, Acquire, Relaxed).is_err() { 72 | continue; 73 | } 74 | 75 | // (ORD:3) this `Release` CAS synchronizes-with the `Acquire` CAS (ORD:2) and 76 | // the `Acquire` loads (ORD:4) and (ORD:5) 77 | match prev.compare_exchange(curr, next, Release, Relaxed) { 78 | Ok(unlinked) => unsafe { unlinked.retire() }, 79 | Err(_) => { 80 | let _ = self.find(value, guards); 81 | } 82 | } 83 | 84 | break true; 85 | } 86 | }; 87 | }; 88 | 89 | guards.release_all(); 90 | success 91 | } 92 | 93 | /// Returns a reference to the value in the set, if any, that is equal to 94 | /// the given `value`. 95 | #[inline] 96 | pub fn get<'g, Q>(&self, value: &Q, guards: &'g mut Guards) -> Option<&'g T> 97 | where 98 | T: Borrow, 99 | Q: Ord, 100 | { 101 | match self.find(value, guards) { 102 | Found { curr, .. } => Some(Shared::into_ref(curr).elem()), 103 | Insert { .. } => None, 104 | } 105 | } 106 | 107 | // this function uses unsafe code internally, but the interface is safe: 108 | // the three guards are each advanced in turn and are guaranteed to eventually protect all of 109 | // the returned references. 110 | // FIXME: Try some refactoring when NLL+ is there? 111 | fn find<'set, 'g, Q>(&'set self, value: &Q, guards: &'g mut Guards) -> FindResult<'set, 'g, T> 112 | where 113 | T: Borrow, 114 | Q: Ord, 115 | 'g: 'set, 116 | { 117 | 'retry: loop { 118 | // prev is still protected by guards.prev (except in the first iteration where prev == head) 119 | let mut prev = &self.head; 120 | // (ORD:4) this `Acquire` load synchronizes-with the `Release` CAS (ORD:1), (ORD:3) and 121 | // (ORD:6) 122 | // prev is protected by guards.curr and the node holding prev by guards.prev 123 | while let Some(curr_marked) = prev.load(Acquire, &mut guards.curr) { 124 | let (curr, curr_tag) = Shared::decompose(curr_marked); 125 | if curr_tag == DELETE_TAG { 126 | continue 'retry; 127 | } 128 | 129 | let curr_next: &'g Atomic> = unsafe { &*(curr.next() as *const _) }; 130 | let next_raw = curr_next.load_raw(Relaxed); 131 | 132 | // (ORD:5) this `Acquire` load synchronizes-with the `Release`CAS (ORD:1), 133 | // (ORD:3) and (ORD:6) 134 | // next is protected by guards.next 135 | match curr_next.load_marked_if_equal(next_raw, Acquire, &mut guards.next) { 136 | Err(_) => continue 'retry, 137 | Ok(next_marked) => { 138 | if prev.load_raw(Relaxed) != curr.as_marked_ptr() { 139 | continue 'retry; 140 | } 141 | 142 | let (next, next_tag) = Marked::decompose(next_marked); 143 | if next_tag == DELETE_TAG { 144 | // (ORD:6) this `Release` CAS synchronizes-with the `Acquire` CAS 145 | // (ORD:2) and the `Acquire` loads (ORD:4) and (ORD:5) 146 | match prev.compare_exchange(curr, next, Release, Relaxed) { 147 | Ok(unlinked) => unsafe { unlinked.retire() }, 148 | Err(_) => continue 'retry, 149 | }; 150 | } else { 151 | match curr.elem().borrow().cmp(value) { 152 | Equal => return unsafe { found_result(prev, curr, next) }, 153 | Greater => return unsafe { insert_result(prev, curr) }, 154 | _ => {} 155 | }; 156 | 157 | prev = curr_next; 158 | // the old prev is no longer be protected afterwards 159 | mem::swap(&mut guards.prev, &mut guards.curr); 160 | } 161 | } 162 | }; 163 | } 164 | 165 | return Insert { prev, next: None }; 166 | } 167 | } 168 | } 169 | 170 | impl Drop for OrderedSet { 171 | #[inline] 172 | fn drop(&mut self) { 173 | let mut node = self.head.take(); 174 | while let Some(mut curr) = node { 175 | node = curr.next.take(); 176 | } 177 | } 178 | } 179 | 180 | #[inline] 181 | unsafe fn found_result<'a, 'set: 'a, 'g: 'set, T: 'static>( 182 | prev: &'set Atomic>, 183 | curr: Shared<'a, Node>, 184 | next: Marked>>, 185 | ) -> FindResult<'set, 'g, T> { 186 | Found { prev, curr: Shared::cast(curr), next: next.map(|next| Shared::cast(next)) } 187 | } 188 | 189 | #[inline] 190 | unsafe fn insert_result<'a, 'set: 'a, 'g: 'set, T: 'static>( 191 | prev: &'set Atomic>, 192 | curr: Shared<'a, Node>, 193 | ) -> FindResult<'set, 'g, T> { 194 | Insert { prev, next: Some(Shared::cast(curr)) } 195 | } 196 | 197 | //////////////////////////////////////////////////////////////////////////////////////////////////// 198 | // Node 199 | //////////////////////////////////////////////////////////////////////////////////////////////////// 200 | 201 | #[derive(Debug)] 202 | struct Node { 203 | elem: CacheAligned, 204 | next: CacheAligned>>, 205 | } 206 | 207 | impl Node { 208 | #[inline] 209 | fn new(elem: T) -> Self { 210 | Self { elem: CacheAligned(elem), next: CacheAligned(Atomic::null()) } 211 | } 212 | 213 | #[inline] 214 | fn elem(&self) -> &T { 215 | CacheAligned::get(&self.elem) 216 | } 217 | 218 | #[inline] 219 | fn next(&self) -> &Atomic> { 220 | CacheAligned::get(&self.next) 221 | } 222 | } 223 | 224 | //////////////////////////////////////////////////////////////////////////////////////////////////// 225 | // FindResult 226 | //////////////////////////////////////////////////////////////////////////////////////////////////// 227 | 228 | enum FindResult<'set, 'g, T> { 229 | Found { 230 | prev: &'set Atomic>, 231 | curr: Shared<'g, Node>, 232 | next: Marked>>, 233 | }, 234 | Insert { 235 | prev: &'set Atomic>, 236 | next: Option>>, 237 | }, 238 | } 239 | -------------------------------------------------------------------------------- /examples/treiber/main.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | use std::sync::{ 3 | atomic::{AtomicUsize, Ordering}, 4 | Arc, 5 | }; 6 | use std::thread; 7 | 8 | mod stack; 9 | 10 | use crate::stack::Stack; 11 | 12 | #[repr(align(64))] 13 | struct ThreadCount(AtomicUsize); 14 | 15 | struct DropCount<'a>(&'a AtomicUsize); 16 | impl Drop for DropCount<'_> { 17 | fn drop(&mut self) { 18 | self.0.fetch_add(1, Ordering::Relaxed); 19 | } 20 | } 21 | 22 | fn main() { 23 | const THREADS: usize = 8; 24 | const PER_THREAD_ALLOCATIONS: usize = 10_000_000 + 1_000; 25 | static COUNTERS: [ThreadCount; THREADS] = [ 26 | ThreadCount(AtomicUsize::new(0)), 27 | ThreadCount(AtomicUsize::new(0)), 28 | ThreadCount(AtomicUsize::new(0)), 29 | ThreadCount(AtomicUsize::new(0)), 30 | ThreadCount(AtomicUsize::new(0)), 31 | ThreadCount(AtomicUsize::new(0)), 32 | ThreadCount(AtomicUsize::new(0)), 33 | ThreadCount(AtomicUsize::new(0)), 34 | ]; 35 | 36 | let stack = Arc::new(Stack::new()); 37 | let handles: Vec<_> = (0..THREADS) 38 | .map(|id| { 39 | let stack = Arc::clone(&stack); 40 | thread::spawn(move || { 41 | let counter = &COUNTERS[id].0; 42 | 43 | for _ in 0..1_000 { 44 | stack.push(DropCount(counter)); 45 | } 46 | 47 | for _ in 0..10_000_000 { 48 | let _res = stack.pop(); 49 | stack.push(DropCount(counter)); 50 | } 51 | 52 | println!( 53 | "thread {} reclaimed {:7} records before exiting", 54 | id, 55 | counter.load(Ordering::Relaxed) 56 | ); 57 | }) 58 | }) 59 | .collect(); 60 | 61 | for handle in handles { 62 | handle.join().unwrap(); 63 | } 64 | 65 | mem::drop(stack); 66 | let drop_sum = COUNTERS.iter().map(|local| local.0.load(Ordering::Relaxed)).sum(); 67 | 68 | assert_eq!(THREADS * PER_THREAD_ALLOCATIONS, drop_sum); 69 | println!("total dropped records: {}, no memory was leaked", drop_sum); 70 | } 71 | -------------------------------------------------------------------------------- /examples/treiber/stack.rs: -------------------------------------------------------------------------------- 1 | //! An implementation of Treiber's stack with hazard pointers. 2 | //! 3 | //! There is a total of three lines of unsafe code. 4 | 5 | use std::mem::ManuallyDrop; 6 | use std::ptr; 7 | use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 8 | 9 | use hazptr::typenum::U0; 10 | use hazptr::{Guard, Owned}; 11 | 12 | type Atomic = hazptr::Atomic; 13 | 14 | #[derive(Default)] 15 | pub struct Stack { 16 | head: Atomic>, 17 | } 18 | 19 | impl Stack { 20 | #[inline] 21 | pub const fn new() -> Self { 22 | Self { head: Atomic::null() } 23 | } 24 | 25 | #[inline] 26 | pub fn push(&self, elem: T) { 27 | let mut node = Owned::new(Node::new(elem)); 28 | let mut guard = Guard::new(); 29 | 30 | loop { 31 | let head = self.head.load(Relaxed, &mut guard); 32 | node.next.store(head, Relaxed); 33 | 34 | // (TRE:1) this `Release` CAS synchronizes-with the `Acquire` load in (TRE:2) 35 | match self.head.compare_exchange_weak(head, node, Release, Relaxed) { 36 | Ok(_) => return, 37 | Err(fail) => node = fail.input, 38 | } 39 | } 40 | } 41 | 42 | #[inline] 43 | pub fn pop(&self) -> Option { 44 | let mut guard = Guard::new(); 45 | 46 | // (TRE:2) this `Acquire` load synchronizes-with the `Release` CAS in (TRE:1) 47 | while let Some(head) = self.head.load(Acquire, &mut guard) { 48 | let next = head.next.load_unprotected(Relaxed); 49 | 50 | // (TRE:3) this `Release` CAS synchronizes-with the `Acquire` load in (TRE:2) 51 | if let Ok(unlinked) = self.head.compare_exchange_weak(head, next, Release, Relaxed) { 52 | // the `Drop` code for T is never called for retired nodes, so it is 53 | // safe to use `retire_unchecked` and not require that `T: 'static`. 54 | unsafe { 55 | let res = ptr::read(&*unlinked.elem); 56 | unlinked.retire_unchecked(); 57 | 58 | return Some(res); 59 | } 60 | } 61 | } 62 | 63 | None 64 | } 65 | } 66 | 67 | impl Drop for Stack { 68 | fn drop(&mut self) { 69 | let mut curr = self.head.take(); 70 | 71 | // it's necessary to manually drop all elements iteratively 72 | while let Some(mut node) = curr { 73 | unsafe { ManuallyDrop::drop(&mut node.elem) } 74 | curr = node.next.take(); 75 | } 76 | } 77 | } 78 | 79 | #[derive(Debug)] 80 | struct Node { 81 | elem: ManuallyDrop, 82 | next: Atomic>, 83 | } 84 | 85 | impl Node { 86 | #[inline] 87 | fn new(elem: T) -> Self { 88 | Self { elem: ManuallyDrop::new(elem), next: Atomic::null() } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /hazptr_rewrite/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hazptr_rewrite" 3 | version = "0.3.0" 4 | authors = ["oliver "] 5 | edition = "2018" 6 | 7 | [features] 8 | default = ["std"] 9 | 10 | std = ["arrayvec/std", "conquer-once/std", "conquer-reclaim/std"] 11 | 12 | [dependencies] 13 | cfg-if = "0.1.10" 14 | 15 | [dependencies.arrayvec] 16 | version = "0.5.1" 17 | default-features = false 18 | 19 | [dependencies.conquer-once] 20 | version = "0.2.0" 21 | default-features = false 22 | 23 | [dependencies.conquer-reclaim] 24 | #git = "https://github.com/oliver-giersch/conquer-reclaim" 25 | path = "../../conquer-reclaim" 26 | default-features = false 27 | 28 | [dependencies.conquer-util] 29 | # version = "0.2.0" 30 | git = "https://github.com/oliver-giersch/conquer-util" 31 | default-features = false 32 | features = ["align"] 33 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/config.rs: -------------------------------------------------------------------------------- 1 | const DEFAULT_SCAN_CACHE_SIZE: usize = 128; 2 | const DEFAULT_MAX_RESERVED_HAZARD_POINTERS: u32 = 16; 3 | const DEFAULT_OPS_COUNT_THRESHOLD: u32 = 128; 4 | const DEFAULT_COUNT_STRATEGY: Operation = Operation::Retire; 5 | 6 | //////////////////////////////////////////////////////////////////////////////////////////////////// 7 | // ConfigBuilder 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | 10 | #[derive(Copy, Clone, Debug, Default, Hash, Eq, Ord, PartialEq, PartialOrd)] 11 | pub struct ConfigBuilder { 12 | initial_scan_cache_size: Option, 13 | max_reserved_hazard_pointers: Option, 14 | ops_count_threshold: Option, 15 | count_strategy: Option, 16 | } 17 | 18 | /********** impl inherent *************************************************************************/ 19 | 20 | impl ConfigBuilder { 21 | #[inline] 22 | pub fn new() -> Self { 23 | Self::default() 24 | } 25 | 26 | #[inline] 27 | pub fn initial_scan_cache_size(mut self, val: usize) -> Self { 28 | self.initial_scan_cache_size = Some(val); 29 | self 30 | } 31 | 32 | #[inline] 33 | pub fn build(self) -> Config { 34 | Config { 35 | initial_scan_cache_size: self 36 | .initial_scan_cache_size 37 | .unwrap_or(DEFAULT_SCAN_CACHE_SIZE), 38 | max_reserved_hazard_pointers: self 39 | .max_reserved_hazard_pointers 40 | .unwrap_or(DEFAULT_MAX_RESERVED_HAZARD_POINTERS), 41 | ops_count_threshold: self.ops_count_threshold.unwrap_or(DEFAULT_OPS_COUNT_THRESHOLD), 42 | count_strategy: self.count_strategy.unwrap_or(DEFAULT_COUNT_STRATEGY), 43 | } 44 | } 45 | } 46 | 47 | //////////////////////////////////////////////////////////////////////////////////////////////////// 48 | // Config 49 | //////////////////////////////////////////////////////////////////////////////////////////////////// 50 | 51 | #[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] 52 | #[non_exhaustive] 53 | pub struct Config { 54 | pub initial_scan_cache_size: usize, 55 | pub max_reserved_hazard_pointers: u32, 56 | pub ops_count_threshold: u32, 57 | pub count_strategy: Operation, 58 | } 59 | 60 | /********* impl inherent **************************************************************************/ 61 | 62 | impl Config { 63 | #[inline] 64 | pub fn is_count_release(&self) -> bool { 65 | self.count_strategy == Operation::Release 66 | } 67 | 68 | #[inline] 69 | pub fn is_count_retire(&self) -> bool { 70 | self.count_strategy == Operation::Retire 71 | } 72 | } 73 | 74 | /********** impl Default **************************************************************************/ 75 | 76 | impl Default for Config { 77 | #[inline] 78 | fn default() -> Self { 79 | Self { 80 | initial_scan_cache_size: DEFAULT_SCAN_CACHE_SIZE, 81 | max_reserved_hazard_pointers: DEFAULT_MAX_RESERVED_HAZARD_POINTERS, 82 | ops_count_threshold: DEFAULT_OPS_COUNT_THRESHOLD, 83 | count_strategy: Default::default(), 84 | } 85 | } 86 | } 87 | 88 | //////////////////////////////////////////////////////////////////////////////////////////////////// 89 | // Operation 90 | //////////////////////////////////////////////////////////////////////////////////////////////////// 91 | 92 | #[derive(Copy, Clone, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] 93 | #[non_exhaustive] 94 | pub enum Operation { 95 | Release, 96 | Retire, 97 | } 98 | 99 | /********** impl Default **************************************************************************/ 100 | 101 | impl Default for Operation { 102 | #[inline] 103 | fn default() -> Self { 104 | DEFAULT_COUNT_STRATEGY 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/default.rs: -------------------------------------------------------------------------------- 1 | use std::rc::Rc; 2 | use std::sync::RwLock; 3 | 4 | use conquer_once::Lazy; 5 | use conquer_reclaim::{BuildReclaimRef, GlobalReclaim, Reclaim, ReclaimRef, Retired}; 6 | 7 | use crate::config::Config; 8 | use crate::global::GlobalRef; 9 | use crate::guard::Guard; 10 | use crate::local::LocalHandle; 11 | use crate::retire::LocalRetire; 12 | 13 | type Local = crate::local::Local<'static>; 14 | type Hp = crate::Hp; 15 | 16 | /********** globals & thread-locals ***************************************************************/ 17 | 18 | /// The global hazard pointer configuration. 19 | pub static CONFIG: Lazy> = Lazy::new(Default::default); 20 | 21 | /// The global hazard pointer state. 22 | static HP: Lazy = Lazy::new(Default::default); 23 | 24 | thread_local!(static LOCAL: Rc = { 25 | let config = *CONFIG.read().unwrap(); 26 | Rc::new(Local::new(config, GlobalRef::from_ref(&HP.state))) 27 | }); 28 | 29 | //////////////////////////////////////////////////////////////////////////////////////////////////// 30 | // GlobalHP 31 | //////////////////////////////////////////////////////////////////////////////////////////////////// 32 | 33 | /// A handle to the global hazard pointer state. 34 | #[derive(Debug, Default)] 35 | pub struct GlobalHp; 36 | 37 | /********** impl GlobalReclaimer ******************************************************************/ 38 | 39 | impl GlobalReclaim for GlobalHp { 40 | #[inline] 41 | fn build_global_ref() -> Self::Ref { 42 | GlobalHpRef 43 | } 44 | } 45 | 46 | /********** impl Reclaimer ************************************************************************/ 47 | 48 | unsafe impl Reclaim for GlobalHp { 49 | type Header = ::Header; 50 | type Ref = GlobalHpRef; 51 | 52 | #[inline] 53 | fn new() -> Self { 54 | Self::default() 55 | } 56 | } 57 | 58 | //////////////////////////////////////////////////////////////////////////////////////////////////// 59 | // GlobalHpRef 60 | //////////////////////////////////////////////////////////////////////////////////////////////////// 61 | 62 | #[derive(Copy, Clone, Debug, Default)] 63 | pub struct GlobalHpRef; 64 | 65 | /********** impl BuildReclaimRef ******************************************************************/ 66 | 67 | impl<'a> BuildReclaimRef<'a> for GlobalHpRef { 68 | #[inline] 69 | fn from_ref(_: &'a Self::Reclaimer) -> Self { 70 | Self 71 | } 72 | } 73 | 74 | /********** impl ReclaimRef ***********************************************************************/ 75 | 76 | unsafe impl ReclaimRef for GlobalHpRef { 77 | type Guard = Guard<'static, 'static, Self::Reclaimer>; 78 | type Reclaimer = GlobalHp; 79 | 80 | #[inline] 81 | unsafe fn from_raw(_: &Self::Reclaimer) -> Self { 82 | Self 83 | } 84 | 85 | #[inline] 86 | fn into_guard(self) -> Self::Guard { 87 | LOCAL.with(|local| Guard::with_handle(LocalHandle::from_owned(Rc::clone(local)))) 88 | } 89 | 90 | #[inline] 91 | unsafe fn retire(self, record: Retired) { 92 | LOCAL.with(move |local| { 93 | local.retire(record.into_raw()); 94 | }); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/global.rs: -------------------------------------------------------------------------------- 1 | use core::convert::AsRef; 2 | use core::sync::atomic::{self, Ordering}; 3 | 4 | use crate::hazard::{HazardList, HazardPtr, ProtectStrategy, ProtectedPtr, ProtectedResult}; 5 | use crate::retire::GlobalRetireState; 6 | 7 | //////////////////////////////////////////////////////////////////////////////////////////////////// 8 | // GlobalRef 9 | //////////////////////////////////////////////////////////////////////////////////////////////////// 10 | 11 | #[derive(Debug)] 12 | pub(crate) struct GlobalRef<'global> { 13 | inner: Ref<'global>, 14 | } 15 | 16 | /********** impl inherent *************************************************************************/ 17 | 18 | impl<'global> GlobalRef<'global> { 19 | /// Creates a new [`GlobalRef`] from the reference `global` which is 20 | /// consequently bound to its lifetime. 21 | #[inline] 22 | pub fn from_ref(global: &'global Global) -> Self { 23 | Self { inner: Ref::Ref(global) } 24 | } 25 | } 26 | 27 | impl GlobalRef<'_> { 28 | /// Creates a new [`GlobalRef`] from the raw pointer `global`. 29 | /// 30 | /// # Safety 31 | /// 32 | /// The caller has to ensure that the resulting [`GlobalRef`] does not 33 | /// outlive the [`Global`] it points to. 34 | #[inline] 35 | pub unsafe fn from_raw(global: *const Global) -> Self { 36 | Self { inner: Ref::Raw(global) } 37 | } 38 | } 39 | 40 | /********** impl AsRef ****************************************************************************/ 41 | 42 | impl<'global> AsRef for GlobalRef<'global> { 43 | #[inline] 44 | fn as_ref(&self) -> &Global { 45 | match &self.inner { 46 | Ref::Ref(global) => *global, 47 | Ref::Raw(ref global) => unsafe { &**global }, 48 | } 49 | } 50 | } 51 | 52 | //////////////////////////////////////////////////////////////////////////////////////////////////// 53 | // Global 54 | //////////////////////////////////////////////////////////////////////////////////////////////////// 55 | 56 | #[derive(Debug)] 57 | pub(crate) struct Global { 58 | pub(crate) retire_state: GlobalRetireState, 59 | hazards: HazardList, 60 | } 61 | 62 | /********** impl inherent *************************************************************************/ 63 | 64 | impl Global { 65 | #[inline] 66 | pub const fn new(retire_state: GlobalRetireState) -> Self { 67 | Self { retire_state, hazards: HazardList::new() } 68 | } 69 | 70 | #[inline] 71 | pub fn get_hazard(&self, strategy: ProtectStrategy) -> &HazardPtr { 72 | match strategy { 73 | ProtectStrategy::ReserveOnly => self.hazards.get_or_insert_reserved_hazard(), 74 | ProtectStrategy::Protect(protected) => { 75 | self.hazards.get_or_insert_hazard(protected.into_inner()) 76 | } 77 | } 78 | } 79 | 80 | #[inline] 81 | pub fn collect_protected_hazards(&self, vec: &mut Vec, order: Ordering) { 82 | assert_eq!(order, Ordering::SeqCst, "this method must have `SeqCst` ordering"); 83 | vec.clear(); 84 | 85 | atomic::fence(Ordering::SeqCst); 86 | 87 | for hazard in self.hazards.iter() { 88 | match hazard.protected(Ordering::Relaxed) { 89 | ProtectedResult::Protected(protected) => vec.push(protected), 90 | ProtectedResult::Abort => return, 91 | _ => {} 92 | } 93 | } 94 | } 95 | } 96 | 97 | //////////////////////////////////////////////////////////////////////////////////////////////////// 98 | // Ref 99 | //////////////////////////////////////////////////////////////////////////////////////////////////// 100 | 101 | /// A reference to a [`Global`] that is either safe but lifetime-bound or unsafe 102 | /// and lifetime-independent (a raw pointer). 103 | #[derive(Debug)] 104 | enum Ref<'a> { 105 | Ref(&'a Global), 106 | Raw(*const Global), 107 | } 108 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/guard.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::Ordering; 2 | 3 | use conquer_reclaim::conquer_pointer::{ 4 | MarkedPtr, 5 | MaybeNull::{self, NotNull, Null}, 6 | }; 7 | use conquer_reclaim::typenum::Unsigned; 8 | use conquer_reclaim::{Atomic, NotEqualError, Protect, Reclaim, Shared}; 9 | 10 | use crate::config::Operation; 11 | use crate::hazard::{HazardPtr, ProtectStrategy}; 12 | use crate::local::LocalHandle; 13 | 14 | //////////////////////////////////////////////////////////////////////////////////////////////////// 15 | // Guard 16 | //////////////////////////////////////////////////////////////////////////////////////////////////// 17 | 18 | pub struct Guard<'local, 'global, R> { 19 | /// Hazards are borrowed through the local handle from global state, so they 20 | /// act like `'global` references. 21 | hazard: *const HazardPtr, 22 | /// Each guard contains an e.g. reference-counted local handle which is 23 | /// accessed when a guard is cloned or dropped. 24 | local: LocalHandle<'local, 'global, R>, 25 | } 26 | 27 | /********** impl Clone ****************************************************************************/ 28 | 29 | impl Clone for Guard<'_, '_, R> { 30 | #[inline] 31 | fn clone(&self) -> Self { 32 | let local = self.local.clone(); 33 | let hazard = match unsafe { (*self.hazard).protected(Ordering::Relaxed).protected() } { 34 | Some(protected) => local.as_ref().get_hazard(ProtectStrategy::Protect(protected)), 35 | None => local.as_ref().get_hazard(ProtectStrategy::ReserveOnly), 36 | }; 37 | 38 | Self { hazard, local } 39 | } 40 | 41 | #[inline] 42 | fn clone_from(&mut self, source: &Self) { 43 | unsafe { 44 | // TODO: is relaxed enough? 45 | if let Some(protected) = (*source.hazard).protected(Ordering::Relaxed).protected() { 46 | (*self.hazard).set_protected(protected.into_inner(), Ordering::SeqCst); 47 | } 48 | } 49 | } 50 | } 51 | 52 | /********** impl inherent *************************************************************************/ 53 | 54 | impl<'local, 'global, R> Guard<'local, 'global, R> { 55 | #[inline] 56 | pub fn with_handle(local: LocalHandle<'local, 'global, R>) -> Self { 57 | let hazard = local.as_ref().get_hazard(ProtectStrategy::ReserveOnly); 58 | Self { hazard, local } 59 | } 60 | } 61 | 62 | /********** impl Drop *****************************************************************************/ 63 | 64 | impl<'local, 'global, R> Drop for Guard<'local, 'global, R> { 65 | #[inline] 66 | fn drop(&mut self) { 67 | let local = self.local.as_ref(); 68 | local.try_increase_ops_count(Operation::Release); 69 | let hazard = unsafe { &*self.hazard }; 70 | if local.try_recycle_hazard(hazard).is_err() { 71 | hazard.set_free(Ordering::Release); 72 | } 73 | } 74 | } 75 | 76 | /********** impl Protect **************************************************************************/ 77 | 78 | macro_rules! release { 79 | ($self:ident, $tag:expr) => {{ 80 | $self.release(); 81 | Null($tag) 82 | }}; 83 | } 84 | 85 | unsafe impl Protect for Guard<'_, '_, R> { 86 | type Reclaimer = R; 87 | 88 | #[inline] 89 | fn release(&mut self) { 90 | self.local.as_ref().try_increase_ops_count(Operation::Release); 91 | unsafe { (*self.hazard).set_thread_reserved(Ordering::Release) }; 92 | } 93 | 94 | #[inline] 95 | fn protect( 96 | &mut self, 97 | src: &Atomic, 98 | order: Ordering, 99 | ) -> MaybeNull> { 100 | match MaybeNull::from(src.load_raw(Ordering::Relaxed)) { 101 | Null(tag) => release!(self, tag), 102 | NotNull(ptr) => { 103 | let mut protect = ptr.decompose_non_null(); 104 | unsafe { (*self.hazard).set_protected(protect.cast(), Ordering::SeqCst) }; 105 | 106 | loop { 107 | match MaybeNull::from(src.load_raw(order)) { 108 | Null(tag) => return release!(self, tag), 109 | NotNull(ptr) => { 110 | let temp = ptr.decompose_non_null(); 111 | if protect == temp { 112 | return NotNull(unsafe { Shared::from_marked_non_null(ptr) }); 113 | } 114 | 115 | unsafe { (*self.hazard).set_protected(temp.cast(), Ordering::SeqCst) }; 116 | protect = temp; 117 | } 118 | } 119 | } 120 | } 121 | } 122 | } 123 | 124 | #[inline] 125 | fn protect_if_equal( 126 | &mut self, 127 | src: &Atomic, 128 | expected: MarkedPtr, 129 | order: Ordering, 130 | ) -> Result>, NotEqualError> { 131 | let raw = src.load_raw(order); 132 | if raw != expected { 133 | return Err(NotEqualError); 134 | } 135 | 136 | match MaybeNull::from(raw) { 137 | Null(tag) => Ok(release!(self, tag)), 138 | NotNull(ptr) => { 139 | let protect = ptr.decompose_non_null().cast(); 140 | unsafe { (*self.hazard).set_protected(protect, Ordering::SeqCst) }; 141 | 142 | if src.load_raw(order) == ptr.into_marked_ptr() { 143 | Ok(NotNull(unsafe { Shared::from_marked_non_null(ptr) })) 144 | } else { 145 | unsafe { (*self.hazard).set_thread_reserved(Ordering::Release) }; 146 | Err(NotEqualError) 147 | } 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/hazard/list.rs: -------------------------------------------------------------------------------- 1 | //! An iterable lock-free data structure for storing hazard pointers. 2 | 3 | use core::iter::FusedIterator; 4 | use core::mem::{self, MaybeUninit}; 5 | use core::ptr::{self, NonNull}; 6 | use core::sync::atomic::{AtomicPtr, Ordering}; 7 | 8 | #[cfg(not(feature = "std"))] 9 | use alloc::boxed::Box; 10 | 11 | use conquer_util::align::Aligned128 as CacheAligned; 12 | 13 | use crate::hazard::{HazardPtr, FREE, NOT_YET_USED, THREAD_RESERVED}; 14 | 15 | /// The number of elements is chosen so that 31 hazards aligned to 128-byte and 16 | /// one likewise aligned next pointer fit into a 4096 byte memory page. 17 | const ELEMENTS: usize = 31; 18 | 19 | //////////////////////////////////////////////////////////////////////////////////////////////////// 20 | // HazardList 21 | //////////////////////////////////////////////////////////////////////////////////////////////////// 22 | 23 | /// A linked list of [`HazardArrayNode`]s containing re-usable hazard pointers. 24 | /// 25 | /// When requesting a hazard pointer, the list is traversed from head to tail 26 | /// and each node is searched for a [`FREE`] hazard pointer. 27 | /// If none can be found a new node is appended to the list's tail. 28 | /// In order to avoid having to deal with memory reclamation the list never 29 | /// shrinks and hence maintains its maximum extent at all times. 30 | #[derive(Debug, Default)] 31 | pub(crate) struct HazardList { 32 | /// Atomic pointer to the head of the linked list. 33 | head: AtomicPtr, 34 | } 35 | 36 | /********** impl inherent *************************************************************************/ 37 | 38 | impl HazardList { 39 | /// Creates a new empty [`HazardList`]. 40 | #[inline] 41 | pub const fn new() -> Self { 42 | Self { head: AtomicPtr::new(ptr::null_mut()) } 43 | } 44 | 45 | /// Acquires a thread-reserved hazard pointer. 46 | #[cold] 47 | #[inline(never)] 48 | #[must_use = "discarding a reserved hazard pointer without freeing it renders it unusable"] 49 | pub fn get_or_insert_reserved_hazard(&self) -> &HazardPtr { 50 | unsafe { self.get_or_insert_unchecked(THREAD_RESERVED, Ordering::Relaxed) } 51 | } 52 | 53 | /// Acquires a hazard pointer and sets it to point at `protected`. 54 | #[cold] 55 | #[inline(never)] 56 | #[must_use = "discarding a reserved hazard pointer without freeing it renders it unusable"] 57 | pub fn get_or_insert_hazard(&self, protect: NonNull<()>) -> &HazardPtr { 58 | unsafe { self.get_or_insert_unchecked(protect.as_ptr() as _, Ordering::SeqCst) } 59 | } 60 | 61 | /// Returns an iterator over all currently allocated [`HazardPointers`]. 62 | #[inline] 63 | pub fn iter(&self) -> Iter { 64 | Iter { idx: 0, curr: unsafe { self.head.load(Ordering::Acquire).as_ref() } } 65 | } 66 | 67 | #[inline] 68 | unsafe fn get_or_insert_unchecked(&self, protect: *const (), order: Ordering) -> &HazardPtr { 69 | let mut prev = &self.head as *const AtomicPtr; 70 | let mut curr = (*prev).load(Ordering::Acquire); 71 | 72 | // iterate the linked list of hazard nodes 73 | while !curr.is_null() { 74 | // try to acquire a hazard pointer in the current node 75 | if let Some(hazard) = self.try_insert_in_node(curr as *const _, protect, order) { 76 | return hazard; 77 | } 78 | 79 | prev = &(*curr).next.aligned as *const _; 80 | curr = (*prev).load(Ordering::Acquire); 81 | } 82 | 83 | // no hazard pointer could be acquired in any already allocated node, insert a new node at 84 | // the tail of the list 85 | self.insert_back(prev, protect, order) 86 | } 87 | 88 | #[inline] 89 | unsafe fn insert_back( 90 | &self, 91 | mut tail: *const AtomicPtr, 92 | protected: *const (), 93 | order: Ordering, 94 | ) -> &HazardPtr { 95 | // allocates a new hazard node with the first hazard already set to `protected` 96 | let node = Box::into_raw(Box::new(HazardArrayNode::new(protected))); 97 | while let Err(tail_node) = 98 | (*tail).compare_exchange(ptr::null_mut(), node, Ordering::AcqRel, Ordering::Acquire) 99 | { 100 | // try insert in tail node, on success return and deallocate node again 101 | if let Some(hazard) = self.try_insert_in_node(tail_node, protected, order) { 102 | Box::from_raw(node); 103 | return hazard; 104 | } 105 | 106 | // update the local tail pointer 107 | tail = &(*tail_node).next.aligned; 108 | } 109 | 110 | &(*node).elements[0].aligned 111 | } 112 | 113 | #[inline] 114 | unsafe fn try_insert_in_node( 115 | &self, 116 | node: *const HazardArrayNode, 117 | protected: *const (), 118 | order: Ordering, 119 | ) -> Option<&HazardPtr> { 120 | // attempts to acquire every hazard pointer in the current `node` once 121 | for element in &(*node).elements[..] { 122 | let hazard = &element.aligned; 123 | let current = hazard.protected.load(Ordering::Relaxed); 124 | let success = (current == FREE || current == NOT_YET_USED) 125 | && hazard 126 | .protected 127 | .compare_exchange(current, protected as *mut (), order, Ordering::Relaxed) 128 | .is_ok(); 129 | 130 | // the hazard pointer was successfully set to `protected` 131 | if success { 132 | return Some(hazard); 133 | } 134 | } 135 | 136 | None 137 | } 138 | } 139 | 140 | /********** impl Drop *****************************************************************************/ 141 | 142 | impl Drop for HazardList { 143 | #[inline(never)] 144 | fn drop(&mut self) { 145 | let mut curr = self.head.load(Ordering::Relaxed); 146 | while !curr.is_null() { 147 | let node = unsafe { Box::from_raw(curr) }; 148 | curr = node.next.aligned.load(Ordering::Relaxed); 149 | } 150 | } 151 | } 152 | 153 | //////////////////////////////////////////////////////////////////////////////////////////////////// 154 | // Iter 155 | //////////////////////////////////////////////////////////////////////////////////////////////////// 156 | 157 | pub(crate) struct Iter<'a> { 158 | idx: usize, 159 | curr: Option<&'a HazardArrayNode>, 160 | } 161 | 162 | /********** impl Iterator *************************************************************************/ 163 | 164 | impl<'a> Iterator for Iter<'a> { 165 | type Item = &'a HazardPtr; 166 | 167 | #[inline] 168 | fn next(&mut self) -> Option { 169 | // this loop is executed at most twice 170 | while let Some(node) = self.curr { 171 | if self.idx < ELEMENTS { 172 | let idx = self.idx; 173 | self.idx += 1; 174 | return Some(&node.elements[idx].aligned); 175 | } else { 176 | self.curr = unsafe { node.next.aligned.load(Ordering::Acquire).as_ref() }; 177 | self.idx = 0; 178 | } 179 | } 180 | 181 | None 182 | } 183 | } 184 | 185 | /********** impl FusedIterator ********************************************************************/ 186 | 187 | impl FusedIterator for Iter<'_> {} 188 | 189 | //////////////////////////////////////////////////////////////////////////////////////////////////// 190 | // HazardArrayNode 191 | //////////////////////////////////////////////////////////////////////////////////////////////////// 192 | 193 | struct HazardArrayNode { 194 | elements: [CacheAligned; ELEMENTS], 195 | next: CacheAligned>, 196 | } 197 | 198 | /********** impl inherent *************************************************************************/ 199 | 200 | impl HazardArrayNode { 201 | #[inline] 202 | fn new(protected: *const ()) -> Self { 203 | let mut elements: [MaybeUninit>; ELEMENTS] = 204 | unsafe { MaybeUninit::uninit().assume_init() }; 205 | 206 | elements[0] = MaybeUninit::new(CacheAligned::new(HazardPtr::with_protected(protected))); 207 | for elem in &mut elements[1..] { 208 | *elem = MaybeUninit::new(CacheAligned::new(HazardPtr::new())); 209 | } 210 | 211 | Self { 212 | elements: unsafe { mem::transmute(elements) }, 213 | next: CacheAligned::new(AtomicPtr::default()), 214 | } 215 | } 216 | } 217 | 218 | #[cfg(test)] 219 | mod tests { 220 | use core::ptr::NonNull; 221 | use core::sync::atomic::Ordering; 222 | 223 | use super::{HazardList, ELEMENTS}; 224 | use crate::hazard::ProtectedResult::Unprotected; 225 | 226 | #[test] 227 | fn new() { 228 | let list = HazardList::new(); 229 | assert!(list.iter().next().is_none()); 230 | } 231 | 232 | #[test] 233 | fn insert_one() { 234 | let list = HazardList::new(); 235 | let hazard = list.get_or_insert_reserved_hazard(); 236 | assert_eq!(hazard as *const _, list.iter().next().unwrap() as *const _); 237 | } 238 | 239 | #[test] 240 | fn insert_full_node() { 241 | let list = HazardList::new(); 242 | 243 | for _ in 0..ELEMENTS { 244 | let _ = list.get_or_insert_reserved_hazard(); 245 | } 246 | 247 | let vec: Vec<_> = list.iter().collect(); 248 | assert_eq!(vec.len(), ELEMENTS); 249 | } 250 | 251 | #[test] 252 | fn insert_reserved_full_node_plus_one() { 253 | let list = HazardList::new(); 254 | 255 | #[allow(clippy::range_plus_one)] 256 | for _ in 0..ELEMENTS + 1 { 257 | let _ = list.get_or_insert_reserved_hazard(); 258 | } 259 | 260 | let hazards: Vec<_> = list.iter().collect(); 261 | 262 | assert_eq!(hazards.len(), 2 * ELEMENTS); 263 | assert_eq!( 264 | hazards 265 | .iter() 266 | .take_while(|hazard| hazard.protected(Ordering::Relaxed) == Unprotected) 267 | .count(), 268 | ELEMENTS + 1 269 | ); 270 | } 271 | 272 | #[test] 273 | fn insert_protected_full_node_plus_one() { 274 | let list = HazardList::new(); 275 | let protect = NonNull::from(&mut 1); 276 | 277 | #[allow(clippy::range_plus_one)] 278 | for _ in 0..ELEMENTS + 1 { 279 | let _ = list.get_or_insert_hazard(protect.cast()); 280 | } 281 | 282 | let hazards: Vec<_> = list 283 | .iter() 284 | .take_while(|hazard| hazard.protected(Ordering::Relaxed).protected().is_some()) 285 | .collect(); 286 | assert_eq!(hazards.len(), ELEMENTS + 1); 287 | } 288 | 289 | #[test] 290 | fn reuse_hazard_from_list() { 291 | let list = HazardList::new(); 292 | 293 | for _ in 0..ELEMENTS + (ELEMENTS / 2) { 294 | let _ = list.get_or_insert_reserved_hazard(); 295 | } 296 | 297 | let hazards: Vec<_> = list.iter().collect(); 298 | 299 | let inner_hazard = hazards[ELEMENTS - 2]; 300 | inner_hazard.set_free(Ordering::Relaxed); 301 | 302 | let acquired_hazard = list.get_or_insert_reserved_hazard(); 303 | assert_eq!(inner_hazard as *const _, acquired_hazard as *const _); 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/hazard/mod.rs: -------------------------------------------------------------------------------- 1 | mod list; 2 | 3 | use core::ptr::NonNull; 4 | use core::sync::atomic::{AtomicPtr, Ordering}; 5 | 6 | pub(crate) use self::list::HazardList; 7 | 8 | const FREE: *mut () = 0 as *mut (); 9 | const THREAD_RESERVED: *mut () = 1 as *mut (); 10 | const NOT_YET_USED: *mut () = 2 as *mut (); 11 | 12 | //////////////////////////////////////////////////////////////////////////////////////////////////// 13 | // HazardPtr 14 | //////////////////////////////////////////////////////////////////////////////////////////////////// 15 | 16 | /// A pointer that must visible to all threads that indicates that the currently 17 | /// pointed-to value is in use by some thread and therefore protected from 18 | /// reclamation, i.e. it must not be de-allocated. 19 | #[derive(Debug)] 20 | pub(crate) struct HazardPtr { 21 | protected: AtomicPtr<()>, 22 | } 23 | 24 | /********** impl Hazard ***************************************************************************/ 25 | 26 | impl HazardPtr { 27 | /// Sets the [`HazardPtr`] free meaning it can be acquired by other threads 28 | /// and the previous value is no longer protected. 29 | #[inline] 30 | pub fn set_free(&self, order: Ordering) { 31 | self.protected.store(FREE, order); 32 | } 33 | 34 | /// Sets the [`HazardPtr`] as thread-reserved meaning the previous value is 35 | /// no longer protected but the pointer is still logically owned by the 36 | /// calling thread. 37 | #[inline] 38 | pub fn set_thread_reserved(&self, order: Ordering) { 39 | self.protected.store(THREAD_RESERVED, order); 40 | } 41 | 42 | #[inline] 43 | pub fn protected(&self, order: Ordering) -> ProtectedResult { 44 | match self.protected.load(order) { 45 | FREE | THREAD_RESERVED => ProtectedResult::Unprotected, 46 | NOT_YET_USED => ProtectedResult::Abort, 47 | ptr => ProtectedResult::Protected(ProtectedPtr(NonNull::new(ptr).unwrap())), 48 | } 49 | } 50 | 51 | #[inline] 52 | pub fn set_protected(&self, protected: NonNull<()>, order: Ordering) { 53 | assert_eq!(order, Ordering::SeqCst, "this method requires sequential consistency"); 54 | self.protected.store(protected.as_ptr(), order); 55 | } 56 | 57 | /// Creates a new [`HazardPointer`]. 58 | #[inline] 59 | const fn new() -> Self { 60 | Self { protected: AtomicPtr::new(NOT_YET_USED) } 61 | } 62 | 63 | /// Creates a new [`HazardPointer`] set to initially set to `protected`. 64 | #[inline] 65 | const fn with_protected(protected: *const ()) -> Self { 66 | Self { protected: AtomicPtr::new(protected as *mut _) } 67 | } 68 | } 69 | 70 | //////////////////////////////////////////////////////////////////////////////////////////////////// 71 | // ProtectedResult 72 | //////////////////////////////////////////////////////////////////////////////////////////////////// 73 | 74 | /// The result of a call to [`protected`][HazardPtr::protected]. 75 | #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] 76 | pub(crate) enum ProtectedResult { 77 | /// Indicates that the hazard pointer currently protects some value. 78 | Protected(ProtectedPtr), 79 | /// Indicates that the hazard pointer currently does not protect any value. 80 | Unprotected, 81 | /// Indicates that hazard pointer has never been used before. 82 | /// 83 | /// Since hazard pointers are acquired in order this means that any 84 | /// iteration of all hazard pointers can abort early, since no subsequent 85 | /// hazards pointers could be in use either. 86 | Abort, 87 | } 88 | 89 | /********** impl inherent *************************************************************************/ 90 | 91 | impl ProtectedResult { 92 | #[inline] 93 | pub fn protected(self) -> Option { 94 | match self { 95 | ProtectedResult::Protected(protected) => Some(protected), 96 | _ => None, 97 | } 98 | } 99 | } 100 | 101 | //////////////////////////////////////////////////////////////////////////////////////////////////// 102 | // ProtectedPtr 103 | //////////////////////////////////////////////////////////////////////////////////////////////////// 104 | 105 | /// An untyped pointer protected from reclamation, because it is stored within a hazard pair. 106 | /// 107 | /// The type information is deliberately stripped as it is not needed in order to determine whether 108 | /// a pointer is protected or not. 109 | #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] 110 | pub struct ProtectedPtr(NonNull<()>); 111 | 112 | /********** impl inherent *************************************************************************/ 113 | 114 | impl ProtectedPtr { 115 | /// Gets the internal non-nullable pointer. 116 | #[inline] 117 | pub fn into_inner(self) -> NonNull<()> { 118 | self.0 119 | } 120 | 121 | /// Gets the memory address of the [`ProtectedPtr`]. 122 | #[inline] 123 | pub fn address(self) -> usize { 124 | self.0.as_ptr() as usize 125 | } 126 | } 127 | 128 | //////////////////////////////////////////////////////////////////////////////////////////////////// 129 | // ProtectStrategy 130 | //////////////////////////////////////////////////////////////////////////////////////////////////// 131 | 132 | pub(crate) enum ProtectStrategy { 133 | ReserveOnly, 134 | Protect(ProtectedPtr), 135 | } 136 | 137 | #[cfg(test)] 138 | mod tests { 139 | use core::ptr::NonNull; 140 | use core::sync::atomic::Ordering; 141 | 142 | use super::{HazardPtr, ProtectedResult}; 143 | 144 | #[test] 145 | fn hazard_ptr() { 146 | let hazard = HazardPtr::new(); 147 | assert_eq!(hazard.protected(Ordering::Relaxed), ProtectedResult::Abort); 148 | hazard.set_protected(NonNull::from(&mut 1).cast(), Ordering::Relaxed); 149 | assert!(hazard.protected(Ordering::Relaxed).protected().is_some()); 150 | hazard.set_thread_reserved(Ordering::Relaxed); 151 | assert_eq!(hazard.protected(Ordering::Relaxed), ProtectedResult::Unprotected); 152 | hazard.set_free(Ordering::Relaxed); 153 | assert_eq!(hazard.protected(Ordering::Relaxed), ProtectedResult::Unprotected); 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(any(test, feature = "std")), no_std)] 2 | 3 | #[cfg(not(feature = "std"))] 4 | extern crate alloc; 5 | 6 | #[cfg(feature = "std")] 7 | mod default; 8 | 9 | mod config; 10 | mod global; 11 | mod guard; 12 | mod hazard; 13 | mod local; 14 | mod queue; 15 | mod retire; 16 | 17 | use conquer_reclaim::Reclaim; 18 | 19 | pub use crate::config::{Config, ConfigBuilder, Operation}; 20 | pub use crate::local::{Local, LocalHandle}; 21 | pub use crate::retire::{GlobalRetire, LocalRetire}; 22 | 23 | use crate::global::{Global, GlobalRef}; 24 | use crate::retire::global_retire::Header; 25 | use crate::retire::{GlobalRetireState, RetireStrategy}; 26 | 27 | //////////////////////////////////////////////////////////////////////////////////////////////////// 28 | // Hp 29 | //////////////////////////////////////////////////////////////////////////////////////////////////// 30 | 31 | /// The global state for the hazard pointer memory reclamation scheme. 32 | #[derive(Debug)] 33 | pub struct Hp { 34 | state: Global, 35 | retire_strategy: S, 36 | } 37 | 38 | /********** impl inherent *************************************************************************/ 39 | 40 | impl Hp { 41 | /// Builds a new instance of a [`Local`] that stores a reference (i.e. 42 | /// borrows) the internal global state of `self`. 43 | /// 44 | /// If `config` wraps a [`Config`] instance this instance is used to 45 | /// supply the [`Local`]'s internal configuration, otherwise the default 46 | /// configuration is applied. 47 | #[inline] 48 | pub fn build_local(&self, config: Option) -> Local { 49 | Local::new(config.unwrap_or_default(), GlobalRef::from_ref(&self.state)) 50 | } 51 | 52 | /// Builds a new instance of a [`Local`] that stores a pointer (i.e. without 53 | /// borrowing) the internal global state of `self`. 54 | /// 55 | /// If `config` wraps a [`Config`] instance this instance is used to 56 | /// supply the [`Local`]'s internal configuration, otherwise the default 57 | /// configuration is applied. 58 | /// 59 | /// # Safety 60 | /// 61 | /// The resulting [`Local`] is not lifetime-dependent on the [`Hp`] instance 62 | /// it is derived from, which allows e.g. self-referential types. 63 | /// The caller is required, however, to ensure that the [`Local`] instance 64 | /// does not outlive `self`. 65 | #[inline] 66 | pub unsafe fn build_local_unchecked(&self, config: Option) -> Local<'_> { 67 | Local::new(config.unwrap_or_default(), GlobalRef::from_raw(&self.state)) 68 | } 69 | } 70 | 71 | /********** impl Default **************************************************************************/ 72 | 73 | impl Default for Hp { 74 | #[inline] 75 | fn default() -> Self { 76 | Self { 77 | state: Global::new(GlobalRetireState::global_strategy()), 78 | retire_strategy: GlobalRetire, 79 | } 80 | } 81 | } 82 | 83 | impl Default for Hp { 84 | #[inline] 85 | fn default() -> Self { 86 | Self { 87 | state: Global::new(GlobalRetireState::local_strategy()), 88 | retire_strategy: LocalRetire, 89 | } 90 | } 91 | } 92 | 93 | /********** impl Reclaim **************************************************************************/ 94 | 95 | unsafe impl Reclaim for Hp { 96 | // the global retire strategy requires each record to have a specific 97 | // header. 98 | type Header = Header; 99 | type Ref = LocalHandle<'static, 'static, Self>; 100 | 101 | #[inline] 102 | fn new() -> Self { 103 | Default::default() 104 | } 105 | } 106 | 107 | unsafe impl Reclaim for Hp { 108 | type Header = (); 109 | type Ref = LocalHandle<'static, 'static, Self>; 110 | 111 | #[inline] 112 | fn new() -> Self { 113 | Default::default() 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/local/inner.rs: -------------------------------------------------------------------------------- 1 | use core::mem::ManuallyDrop; 2 | use core::ptr; 3 | use core::sync::atomic::Ordering; 4 | 5 | use arrayvec::{ArrayVec, CapacityError}; 6 | use conquer_reclaim::RawRetired; 7 | 8 | use crate::config::{Config, Operation}; 9 | use crate::global::GlobalRef; 10 | use crate::hazard::{HazardPtr, ProtectStrategy, ProtectedPtr}; 11 | use crate::retire::{GlobalRetireState, LocalRetireState}; 12 | 13 | //////////////////////////////////////////////////////////////////////////////////////////////////// 14 | // RecycleError 15 | //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | 17 | /// Error type for thread local recycle operations. 18 | #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] 19 | pub(crate) struct RecycleError; 20 | 21 | /********** impl From *****************************************************************************/ 22 | 23 | impl From> for RecycleError { 24 | #[inline] 25 | fn from(_: CapacityError<&HazardPtr>) -> Self { 26 | RecycleError 27 | } 28 | } 29 | 30 | //////////////////////////////////////////////////////////////////////////////////////////////////// 31 | // LocalInner 32 | //////////////////////////////////////////////////////////////////////////////////////////////////// 33 | 34 | const HAZARD_CACHE: usize = 16; 35 | 36 | #[derive(Debug)] 37 | pub(super) struct LocalInner<'global> { 38 | config: Config, 39 | global: GlobalRef<'global>, 40 | state: ManuallyDrop, 41 | ops_count: u32, 42 | hazard_cache: ArrayVec<[&'global HazardPtr; HAZARD_CACHE]>, 43 | scan_cache: Vec, 44 | } 45 | 46 | /********** impl inherent *************************************************************************/ 47 | 48 | impl<'global> LocalInner<'global> { 49 | #[inline] 50 | pub fn new(config: Config, global: GlobalRef<'global>) -> Self { 51 | let state = ManuallyDrop::new(LocalRetireState::from(&global.as_ref().retire_state)); 52 | Self { 53 | config, 54 | global, 55 | state, 56 | ops_count: Default::default(), 57 | hazard_cache: Default::default(), 58 | scan_cache: Default::default(), 59 | } 60 | } 61 | 62 | #[inline] 63 | pub fn try_increase_ops_count(&mut self, op: Operation) { 64 | if op == self.config.count_strategy { 65 | self.ops_count += 1; 66 | 67 | if self.ops_count == self.config.ops_count_threshold { 68 | self.ops_count = 0; 69 | self.try_reclaim(); 70 | } 71 | } 72 | } 73 | 74 | #[inline] 75 | pub fn retire(&mut self, retired: RawRetired) { 76 | unsafe { self.retire_inner(retired) }; 77 | 78 | if self.config.is_count_retire() { 79 | self.ops_count += 1; 80 | } 81 | } 82 | 83 | #[inline] 84 | pub fn get_hazard(&mut self, strategy: ProtectStrategy) -> &HazardPtr { 85 | match self.hazard_cache.pop() { 86 | Some(hazard) => { 87 | if let ProtectStrategy::Protect(protected) = strategy { 88 | hazard.set_protected(protected.into_inner(), Ordering::SeqCst); 89 | } 90 | 91 | hazard 92 | } 93 | None => self.global.as_ref().get_hazard(strategy), 94 | } 95 | } 96 | 97 | #[inline] 98 | pub fn try_recycle_hazard(&mut self, hazard: &'global HazardPtr) -> Result<(), RecycleError> { 99 | // todo: use small vec, incorporate config? 100 | self.hazard_cache.try_push(hazard)?; 101 | hazard.set_thread_reserved(Ordering::Release); 102 | 103 | Ok(()) 104 | } 105 | 106 | #[inline] 107 | fn try_reclaim(&mut self) { 108 | if !self.has_retired_records() { 109 | return; 110 | } 111 | 112 | // collect into scan_cache 113 | self.global.as_ref().collect_protected_hazards(&mut self.scan_cache, Ordering::SeqCst); 114 | 115 | unsafe { self.reclaim_all_unprotected() }; 116 | } 117 | 118 | #[inline] 119 | fn has_retired_records(&self) -> bool { 120 | match &*self.state { 121 | LocalRetireState::GlobalStrategy => match &self.global.as_ref().retire_state { 122 | GlobalRetireState::GlobalStrategy(queue) => !queue.is_empty(), 123 | _ => unreachable!(), 124 | }, 125 | LocalRetireState::LocalStrategy(node) => !node.is_empty(), 126 | } 127 | } 128 | 129 | #[inline] 130 | unsafe fn retire_inner(&mut self, retired: RawRetired) { 131 | match &mut *self.state { 132 | LocalRetireState::GlobalStrategy => match &self.global.as_ref().retire_state { 133 | GlobalRetireState::GlobalStrategy(queue) => queue.retire(retired), 134 | _ => unreachable!(), 135 | }, 136 | LocalRetireState::LocalStrategy(node) => node.retire(retired), 137 | } 138 | } 139 | 140 | #[inline] 141 | unsafe fn reclaim_all_unprotected(&mut self) { 142 | match &mut *self.state { 143 | LocalRetireState::GlobalStrategy => match &self.global.as_ref().retire_state { 144 | GlobalRetireState::GlobalStrategy(queue) => { 145 | queue.reclaim_all_unprotected(&self.scan_cache) 146 | } 147 | _ => unreachable!(), 148 | }, 149 | LocalRetireState::LocalStrategy(local) => match &self.global.as_ref().retire_state { 150 | GlobalRetireState::LocalStrategy(queue) => { 151 | if let Some(node) = queue.take_all_and_merge() { 152 | local.merge(node.into_inner()) 153 | } 154 | 155 | self.scan_cache.sort_unstable(); 156 | local.reclaim_all_unprotected(&self.scan_cache) 157 | } 158 | _ => unreachable!(), 159 | }, 160 | } 161 | } 162 | } 163 | 164 | /********** impl Drop *****************************************************************************/ 165 | 166 | impl Drop for LocalInner<'_> { 167 | #[inline(never)] 168 | fn drop(&mut self) { 169 | // set all thread-reserved hazard pointers free 170 | for hazard in self.hazard_cache.iter() { 171 | hazard.set_free(Ordering::Relaxed); 172 | } 173 | 174 | // execute a final reclamation attempt 175 | self.try_reclaim(); 176 | 177 | // with the local retire strategy, any remaining retired records must 178 | // be abandoned, i.e. stored globally so that other threads can adopt 179 | // and eventually reclaim them 180 | let state = unsafe { ptr::read(&*self.state) }; 181 | if let LocalRetireState::LocalStrategy(node) = state { 182 | // if there are no remaining records the node can be de-allocated 183 | // right away 184 | if node.is_empty() { 185 | return; 186 | } 187 | 188 | match &self.global.as_ref().retire_state { 189 | GlobalRetireState::LocalStrategy(queue) => queue.push(node), 190 | _ => unreachable!(), 191 | } 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/local/mod.rs: -------------------------------------------------------------------------------- 1 | mod inner; 2 | 3 | use core::cell::UnsafeCell; 4 | use core::convert::AsRef; 5 | use core::marker::PhantomData; 6 | 7 | cfg_if::cfg_if! { 8 | if #[cfg(feature = "std")] { 9 | use std::rc::Rc; 10 | } else { 11 | use alloc::rc::Rc; 12 | use alloc::vec::Vec; 13 | } 14 | } 15 | 16 | use conquer_reclaim::{BuildReclaimRef, RawRetired, Reclaim, ReclaimRef, Retired}; 17 | 18 | use crate::config::{Config, Operation}; 19 | use crate::global::GlobalRef; 20 | use crate::guard::Guard; 21 | use crate::hazard::{HazardPtr, ProtectStrategy}; 22 | use crate::retire::RetireStrategy; 23 | use crate::Hp; 24 | 25 | use self::inner::{LocalInner, RecycleError}; 26 | 27 | //////////////////////////////////////////////////////////////////////////////////////////////////// 28 | // LocalHandle 29 | //////////////////////////////////////////////////////////////////////////////////////////////////// 30 | 31 | #[derive(Debug)] 32 | pub struct LocalHandle<'local, 'global, R> { 33 | inner: Ref<'local, 'global>, 34 | _marker: PhantomData, 35 | } 36 | 37 | /*********** impl Clone ***************************************************************************/ 38 | 39 | impl Clone for LocalHandle<'_, '_, R> { 40 | #[inline] 41 | fn clone(&self) -> Self { 42 | Self { inner: self.inner.clone(), _marker: PhantomData } 43 | } 44 | } 45 | 46 | /********** impl inherent *************************************************************************/ 47 | 48 | impl<'global, R> LocalHandle<'_, 'global, R> { 49 | #[inline] 50 | pub(crate) fn new(config: Config, global: GlobalRef<'global>) -> Self { 51 | Self { inner: Ref::Rc(Rc::new(Local::new(config, global))), _marker: PhantomData } 52 | } 53 | 54 | #[inline] 55 | pub fn from_owned(local: Rc>) -> Self { 56 | Self { inner: Ref::Rc(local), _marker: PhantomData } 57 | } 58 | 59 | #[inline] 60 | pub unsafe fn from_raw(local: *const Local<'global>) -> Self { 61 | Self { inner: Ref::Raw(local), _marker: PhantomData } 62 | } 63 | } 64 | 65 | impl<'local, 'global, R> LocalHandle<'local, 'global, R> { 66 | #[inline] 67 | pub fn from_ref(local: &'local Local<'global>) -> Self { 68 | Self { inner: Ref::Ref(local), _marker: PhantomData } 69 | } 70 | } 71 | 72 | /*********** impl AsRef ***************************************************************************/ 73 | 74 | impl<'global, R> AsRef> for LocalHandle<'_, 'global, R> { 75 | #[inline] 76 | fn as_ref(&self) -> &Local<'global> { 77 | match &self.inner { 78 | Ref::Rc(local) => local.as_ref(), 79 | Ref::Ref(local) => local, 80 | Ref::Raw(local) => unsafe { &**local }, 81 | } 82 | } 83 | } 84 | 85 | /********** impl BuildReclaimRef ******************************************************************/ 86 | 87 | impl<'global, S: RetireStrategy> BuildReclaimRef<'global> for LocalHandle<'_, 'global, Hp> 88 | where 89 | Self: 'global, 90 | Hp: Reclaim, 91 | { 92 | #[inline] 93 | fn from_ref(global: &'global Self::Reclaimer) -> Self { 94 | Self::new(Default::default(), GlobalRef::from_ref(&global.state)) 95 | } 96 | } 97 | 98 | /********** impl ReclaimRef ***********************************************************************/ 99 | 100 | unsafe impl<'local, 'global, S: RetireStrategy> ReclaimRef for LocalHandle<'local, 'global, Hp> 101 | where 102 | Hp: Reclaim, 103 | { 104 | type Guard = Guard<'local, 'global, Self::Reclaimer>; 105 | type Reclaimer = Hp; 106 | 107 | #[inline] 108 | unsafe fn from_raw(global: &Self::Reclaimer) -> Self { 109 | Self::new(Default::default(), GlobalRef::from_raw(&global.state)) 110 | } 111 | 112 | #[inline] 113 | fn into_guard(self) -> Self::Guard { 114 | Guard::with_handle(self) 115 | } 116 | 117 | #[inline] 118 | unsafe fn retire(self, retired: Retired) { 119 | self.inner.as_ref().retire(retired.into_raw()) 120 | } 121 | } 122 | 123 | //////////////////////////////////////////////////////////////////////////////////////////////////// 124 | // Local 125 | //////////////////////////////////////////////////////////////////////////////////////////////////// 126 | 127 | #[derive(Debug)] 128 | pub struct Local<'global> { 129 | inner: UnsafeCell>, 130 | } 131 | 132 | /********** impl inherent *************************************************************************/ 133 | 134 | impl<'global> Local<'global> { 135 | #[inline] 136 | pub(crate) fn new(config: Config, global: GlobalRef<'global>) -> Self { 137 | Self { inner: UnsafeCell::new(LocalInner::new(config, global)) } 138 | } 139 | 140 | #[inline] 141 | pub(crate) fn try_increase_ops_count(&self, op: Operation) { 142 | unsafe { (*self.inner.get()).try_increase_ops_count(op) } 143 | } 144 | 145 | #[inline] 146 | pub(crate) fn retire(&self, retired: RawRetired) { 147 | unsafe { (*self.inner.get()).retire(retired) }; 148 | } 149 | 150 | #[inline] 151 | pub(crate) fn get_hazard(&self, strategy: ProtectStrategy) -> &HazardPtr { 152 | unsafe { (*self.inner.get()).get_hazard(strategy) } 153 | } 154 | 155 | #[inline] 156 | pub(crate) fn try_recycle_hazard( 157 | &self, 158 | hazard: &'global HazardPtr, 159 | ) -> Result<(), RecycleError> { 160 | unsafe { (*self.inner.get()).try_recycle_hazard(hazard) } 161 | } 162 | } 163 | 164 | //////////////////////////////////////////////////////////////////////////////////////////////////// 165 | // Ref 166 | //////////////////////////////////////////////////////////////////////////////////////////////////// 167 | 168 | #[derive(Debug)] 169 | enum Ref<'local, 'global> { 170 | Rc(Rc>), 171 | Ref(&'local Local<'global>), 172 | Raw(*const Local<'global>), 173 | } 174 | 175 | /********** impl AsRef ****************************************************************************/ 176 | 177 | impl<'global> AsRef> for Ref<'_, 'global> { 178 | #[inline] 179 | fn as_ref(&self) -> &Local<'global> { 180 | match self { 181 | Ref::Rc(local) => &**local, 182 | Ref::Ref(local) => *local, 183 | Ref::Raw(local) => unsafe { &**local }, 184 | } 185 | } 186 | } 187 | 188 | /********** impl Clone ****************************************************************************/ 189 | 190 | impl<'local, 'global> Clone for Ref<'local, 'global> { 191 | #[inline] 192 | fn clone(&self) -> Self { 193 | match self { 194 | Ref::Rc(local) => Ref::Rc(Rc::clone(local)), 195 | Ref::Ref(local) => Ref::Ref(*local), 196 | Ref::Raw(local) => Ref::Raw(*local), 197 | } 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/queue.rs: -------------------------------------------------------------------------------- 1 | use core::ptr; 2 | use core::sync::atomic::{AtomicPtr, Ordering}; 3 | 4 | //////////////////////////////////////////////////////////////////////////////////////////////////// 5 | // RawNode (trait) 6 | //////////////////////////////////////////////////////////////////////////////////////////////////// 7 | 8 | /// A trait for node types that contain `next` pointers and can be accessed 9 | /// through raw pointers. 10 | pub(crate) trait RawNode { 11 | /// Returns the `node`'s next pointer. 12 | /// 13 | /// # Safety 14 | /// 15 | /// The caller has to ensure `node` is a valid pointer to a mutable node and 16 | /// that the aliasing rules are not violated. 17 | unsafe fn next(node: *mut Self) -> *mut Self; 18 | 19 | /// Sets the `node`'s next pointer to `next`. 20 | /// 21 | /// # Safety 22 | /// 23 | /// The caller has to ensure `node` is a valid pointer to a mutable node and 24 | /// that the aliasing rules are not violated. 25 | unsafe fn set_next(node: *mut Self, next: *mut Self); 26 | } 27 | 28 | //////////////////////////////////////////////////////////////////////////////////////////////////// 29 | // RawQueue 30 | //////////////////////////////////////////////////////////////////////////////////////////////////// 31 | 32 | /// A concurrent linked-list based queue operating on raw pointers that serves 33 | /// as a building block for more specialized data structures. 34 | /// 35 | /// Elements are inserted at the front (i.e. in FIFO order) and can only be 36 | /// removed all at once by returning the first node which contains a link to the 37 | /// next node and so on and switching the queue to empty. 38 | #[derive(Debug, Default)] 39 | pub(crate) struct RawQueue { 40 | head: AtomicPtr, 41 | } 42 | 43 | /********** impl inherent *************************************************************************/ 44 | 45 | impl RawQueue { 46 | /// Creates a new empty [`RawQueue`]. 47 | #[inline] 48 | pub const fn new() -> Self { 49 | Self { head: AtomicPtr::new(ptr::null_mut()) } 50 | } 51 | } 52 | 53 | impl RawQueue { 54 | #[inline] 55 | pub fn is_empty(&self) -> bool { 56 | self.head.load(Ordering::Relaxed).is_null() 57 | } 58 | 59 | #[inline] 60 | pub unsafe fn push(&self, node: *mut N) { 61 | loop { 62 | let head = self.head.load(Ordering::Relaxed); 63 | N::set_next(node, head); 64 | 65 | if self 66 | .head 67 | .compare_exchange_weak(head, node, Ordering::Release, Ordering::Relaxed) 68 | .is_ok() 69 | { 70 | return; 71 | } 72 | } 73 | } 74 | 75 | #[inline] 76 | pub unsafe fn push_many(&self, (first, last): (*mut N, *mut N)) { 77 | loop { 78 | let head = self.head.load(Ordering::Relaxed); 79 | N::set_next(last, head); 80 | 81 | if self 82 | .head 83 | .compare_exchange_weak(head, first, Ordering::Release, Ordering::Relaxed) 84 | .is_ok() 85 | { 86 | return; 87 | } 88 | } 89 | } 90 | 91 | /// Swaps out the first node and leaves the [`RawQueue`] empty. 92 | /// 93 | /// The returned node (if it is non-`null`) effectively owns all following 94 | /// nodes and can deallocate or mutate them as required. 95 | #[inline] 96 | pub fn take_all(&self) -> *mut N { 97 | self.head.swap(ptr::null_mut(), Ordering::Acquire) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/retire/global_retire.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the global retire strategy. 2 | //! 3 | //! With this strategy, all threads store their retired records in a single 4 | //! global data structure. 5 | //! This means, that all threads can potentially reclaim records by all other 6 | //! threads, which is especially useful when only certain threads ever retire 7 | //! any records but all threads should be able to help in reclaiming these 8 | //! records. 9 | //! It can also be applicable if records are only retired fairly infrequently. 10 | //! 11 | //! The disadvantages for this strategy lie in the increased synchronization 12 | //! overhead, since every retired record requires a synchronized access to a 13 | //! single global shared data structure, which limits scalability. 14 | 15 | use core::ptr; 16 | 17 | use conquer_reclaim::RawRetired; 18 | 19 | use crate::hazard::ProtectedPtr; 20 | use crate::queue::{RawNode, RawQueue}; 21 | 22 | //////////////////////////////////////////////////////////////////////////////////////////////////// 23 | // Header 24 | //////////////////////////////////////////////////////////////////////////////////////////////////// 25 | 26 | /// With a global retire strategy, every record is allocated in a way that 27 | /// allows it to be inserted into a linked list of retired records, so it 28 | /// contains a next pointer, which is initially `null`. 29 | /// The `retired` field is only set once when a record is retired and inserted 30 | /// into the global linked list (queue) of retired records. 31 | /// A [`RawRetired`] is essentially a fat pointer. 32 | /// The first half points at the record itself and the second half points at its 33 | /// `Drop` implementation (its vtable, actually). 34 | /// By storing it in the records header itself, the header contains all relevant 35 | /// information for traversing the linked list and reclaiming the records memory 36 | /// without concern for its concrete type. 37 | #[derive(Debug)] 38 | pub struct Header { 39 | /// The pointer to the header of the next retired record. 40 | next: *mut Self, 41 | /// The handle for the retired record itself. 42 | retired: Option, 43 | } 44 | 45 | /********** impl Sync *****************************************************************************/ 46 | 47 | unsafe impl Sync for Header {} 48 | 49 | /*********** impl Default *************************************************************************/ 50 | 51 | impl Default for Header { 52 | #[inline] 53 | fn default() -> Self { 54 | Self { next: ptr::null_mut(), retired: None } 55 | } 56 | } 57 | 58 | /*********** impl RawNode *************************************************************************/ 59 | 60 | impl RawNode for Header { 61 | #[inline] 62 | unsafe fn next(node: *mut Self) -> *mut Self { 63 | (*node).next 64 | } 65 | 66 | #[inline] 67 | unsafe fn set_next(node: *mut Self, next: *mut Self) { 68 | (*node).next = next; 69 | } 70 | } 71 | 72 | //////////////////////////////////////////////////////////////////////////////////////////////////// 73 | // RetiredQueue 74 | //////////////////////////////////////////////////////////////////////////////////////////////////// 75 | 76 | /// A linked-list based for storing retired records. 77 | /// 78 | /// Every record must be allocated with a [`Header`] that allows it to be 79 | /// inserted into the queue and to be later reclaimed. 80 | #[derive(Debug, Default)] 81 | pub(crate) struct RetiredQueue { 82 | raw: RawQueue
, 83 | } 84 | 85 | /********** impl inherent *************************************************************************/ 86 | 87 | impl RetiredQueue { 88 | /// Creates a new empty [`RetiredQueue`]. 89 | #[inline] 90 | pub const fn new() -> Self { 91 | Self { raw: RawQueue::new() } 92 | } 93 | 94 | /// Returns `true` if the [`RetiredQueue`] is empty. 95 | #[inline] 96 | pub fn is_empty(&self) -> bool { 97 | self.raw.is_empty() 98 | } 99 | 100 | /// Pushes `retired` into the queue. 101 | /// 102 | /// # Safety 103 | /// 104 | /// The caller has to ensure `retired` points at a record that has a header 105 | /// of the correct type. 106 | /// Specifically, this requires that `retired` was derived from a 107 | /// `Retired>`. 108 | #[inline] 109 | pub unsafe fn retire(&self, retired: RawRetired) { 110 | // `retired` points to a record, which has layout guarantees regarding field ordering 111 | // and the record's header is always first 112 | let header = retired.as_ptr() as *mut () as *mut Header; 113 | // store the retired record in the header itself, because it is necessary for later 114 | // reclamation 115 | (*header).retired = Some(retired); 116 | self.raw.push(header); 117 | } 118 | 119 | #[inline] 120 | pub unsafe fn reclaim_all_unprotected(&self, protected: &[ProtectedPtr]) { 121 | // take all retired records from the global queue 122 | let mut curr = self.raw.take_all(); 123 | // these variables are used to create a simple inline linked list structure 124 | // all records which can not be reclaimed are put back into this list and are 125 | // eventually pushed back into the global queue. 126 | let (mut first, mut last): (*mut Header, *mut Header) = (ptr::null_mut(), ptr::null_mut()); 127 | 128 | // iterate all retired records and reclaim all which are no longer protected 129 | while !curr.is_null() { 130 | let addr = curr as usize; 131 | let next = (*curr).next; 132 | match protected.binary_search_by(|protected| protected.address().cmp(&addr)) { 133 | // the record is still protected by some hazard pointer 134 | Ok(_) => { 135 | // the next pointer must be zeroed since it may still point at some record 136 | // from the global queue 137 | (*curr).next = ptr::null_mut(); 138 | if first.is_null() { 139 | first = curr; 140 | last = curr; 141 | } else { 142 | (*last).next = curr; 143 | last = curr; 144 | } 145 | } 146 | // the record can be reclaimed 147 | Err(_) => (*curr).retired.take().unwrap().reclaim(), 148 | } 149 | 150 | curr = next; 151 | } 152 | 153 | // not all records were reclaimed, push all others back into the global queue in bulk. 154 | if !first.is_null() { 155 | self.raw.push_many((first, last)); 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/retire/local_retire.rs: -------------------------------------------------------------------------------- 1 | use core::cmp; 2 | use core::mem; 3 | use core::ptr; 4 | 5 | cfg_if::cfg_if! { 6 | if #[cfg(not(feature = "std"))] { 7 | use alloc::boxed::Box; 8 | use alloc::vec::Vec; 9 | } 10 | } 11 | 12 | use conquer_reclaim::RawRetired; 13 | 14 | use crate::hazard::ProtectedPtr; 15 | use crate::queue::{RawNode, RawQueue}; 16 | 17 | //////////////////////////////////////////////////////////////////////////////////////////////////// 18 | // RetireNode 19 | //////////////////////////////////////////////////////////////////////////////////////////////////// 20 | 21 | #[derive(Debug)] 22 | pub(crate) struct RetireNode { 23 | vec: Vec, 24 | next: *mut Self, 25 | } 26 | 27 | /********** impl inherent *************************************************************************/ 28 | 29 | impl RetireNode { 30 | const DEFAULT_INITIAL_CAPACITY: usize = 128; 31 | 32 | #[inline] 33 | pub fn into_inner(self) -> Vec { 34 | self.vec 35 | } 36 | 37 | #[inline] 38 | pub fn is_empty(&self) -> bool { 39 | self.vec.is_empty() 40 | } 41 | 42 | #[inline] 43 | pub fn merge(&mut self, mut other: Vec) { 44 | if (other.capacity() - other.len()) > self.vec.capacity() { 45 | mem::swap(&mut self.vec, &mut other); 46 | } 47 | 48 | self.vec.append(&mut other); 49 | } 50 | 51 | #[inline] 52 | pub unsafe fn retire(&mut self, retired: RawRetired) { 53 | self.vec.push(ReclaimOnDrop::new(retired)); 54 | } 55 | 56 | #[inline] 57 | pub unsafe fn reclaim_all_unprotected(&mut self, protected: &[ProtectedPtr]) { 58 | self.vec.retain(|retired| { 59 | // retain (i.e. DON'T drop) all records found within the scan cache of protected hazards 60 | protected.binary_search_by(|&protected| retired.compare_with(protected)).is_ok() 61 | }); 62 | } 63 | } 64 | 65 | /********** impl Default **************************************************************************/ 66 | 67 | impl Default for RetireNode { 68 | #[inline] 69 | fn default() -> Self { 70 | Self { vec: Vec::with_capacity(Self::DEFAULT_INITIAL_CAPACITY), next: ptr::null_mut() } 71 | } 72 | } 73 | 74 | /********** impl RawNode **************************************************************************/ 75 | 76 | impl RawNode for RetireNode { 77 | #[inline] 78 | unsafe fn next(node: *mut Self) -> *mut Self { 79 | (*node).next 80 | } 81 | 82 | #[inline] 83 | unsafe fn set_next(node: *mut Self, next: *mut Self) { 84 | (*node).next = next; 85 | } 86 | } 87 | 88 | //////////////////////////////////////////////////////////////////////////////////////////////////// 89 | // AbandonedQueue 90 | //////////////////////////////////////////////////////////////////////////////////////////////////// 91 | 92 | #[derive(Debug, Default)] 93 | pub(crate) struct AbandonedQueue { 94 | raw: RawQueue, 95 | } 96 | 97 | /********** impl inherent *************************************************************************/ 98 | 99 | impl AbandonedQueue { 100 | #[inline] 101 | pub const fn new() -> Self { 102 | Self { raw: RawQueue::new() } 103 | } 104 | 105 | #[inline] 106 | pub fn push(&self, node: Box) { 107 | let node = Box::leak(node); 108 | unsafe { self.raw.push(node) }; 109 | } 110 | 111 | #[inline] 112 | pub fn take_all_and_merge(&self) -> Option> { 113 | unsafe { 114 | match self.raw.take_all() { 115 | ptr if ptr.is_null() => None, 116 | ptr => { 117 | let mut boxed = Box::from_raw(ptr); 118 | let mut curr = boxed.next; 119 | while !curr.is_null() { 120 | let RetireNode { vec: container, next } = *Box::from_raw(curr); 121 | boxed.merge(container); 122 | curr = next; 123 | } 124 | 125 | Some(boxed) 126 | } 127 | } 128 | } 129 | } 130 | } 131 | 132 | //////////////////////////////////////////////////////////////////////////////////////////////////// 133 | // ReclaimOnDrop 134 | //////////////////////////////////////////////////////////////////////////////////////////////////// 135 | 136 | #[derive(Debug)] 137 | pub(crate) struct ReclaimOnDrop(RawRetired); 138 | 139 | /********** impl inherent *************************************************************************/ 140 | 141 | impl ReclaimOnDrop { 142 | #[inline] 143 | unsafe fn new(retired: RawRetired) -> Self { 144 | Self(retired) 145 | } 146 | 147 | #[inline] 148 | fn compare_with(&self, protected: ProtectedPtr) -> cmp::Ordering { 149 | protected.address().cmp(&self.0.address()) 150 | } 151 | } 152 | 153 | /********** impl Drop *****************************************************************************/ 154 | 155 | impl Drop for ReclaimOnDrop { 156 | #[inline(always)] 157 | fn drop(&mut self) { 158 | unsafe { self.0.reclaim() }; 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /hazptr_rewrite/src/retire/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod global_retire; 2 | pub(crate) mod local_retire; 3 | 4 | use self::global_retire::RetiredQueue; 5 | use self::local_retire::{AbandonedQueue, RetireNode}; 6 | 7 | //////////////////////////////////////////////////////////////////////////////////////////////////// 8 | // RetireStrategy (trait) 9 | //////////////////////////////////////////////////////////////////////////////////////////////////// 10 | 11 | pub trait RetireStrategy: Sized + 'static {} 12 | 13 | //////////////////////////////////////////////////////////////////////////////////////////////////// 14 | // GlobalRetire 15 | //////////////////////////////////////////////////////////////////////////////////////////////////// 16 | 17 | #[derive(Copy, Clone, Debug, Default, Hash, Eq, Ord, PartialEq, PartialOrd)] 18 | pub struct GlobalRetire; 19 | 20 | /********** impl RetireStrategy *******************************************************************/ 21 | 22 | impl RetireStrategy for GlobalRetire {} 23 | 24 | //////////////////////////////////////////////////////////////////////////////////////////////////// 25 | // GlobalRetireState 26 | //////////////////////////////////////////////////////////////////////////////////////////////////// 27 | 28 | #[derive(Debug)] 29 | pub(crate) enum GlobalRetireState { 30 | GlobalStrategy(RetiredQueue), 31 | LocalStrategy(AbandonedQueue), 32 | } 33 | 34 | /********** impl inherent *************************************************************************/ 35 | 36 | impl GlobalRetireState { 37 | pub(crate) const fn global_strategy() -> Self { 38 | GlobalRetireState::GlobalStrategy(RetiredQueue::new()) 39 | } 40 | 41 | pub(crate) const fn local_strategy() -> Self { 42 | GlobalRetireState::LocalStrategy(AbandonedQueue::new()) 43 | } 44 | } 45 | 46 | //////////////////////////////////////////////////////////////////////////////////////////////////// 47 | // LocalRetire 48 | //////////////////////////////////////////////////////////////////////////////////////////////////// 49 | 50 | #[derive(Copy, Clone, Debug, Default, Hash, Eq, Ord, PartialEq, PartialOrd)] 51 | pub struct LocalRetire; 52 | 53 | /********** impl RetireStrategy *******************************************************************/ 54 | 55 | impl RetireStrategy for LocalRetire {} 56 | 57 | //////////////////////////////////////////////////////////////////////////////////////////////////// 58 | // LocalRetireState 59 | //////////////////////////////////////////////////////////////////////////////////////////////////// 60 | 61 | #[derive(Debug)] 62 | pub(crate) enum LocalRetireState { 63 | GlobalStrategy, 64 | LocalStrategy(Box), 65 | } 66 | 67 | /********** impl From *****************************************************************************/ 68 | 69 | impl From<&GlobalRetireState> for LocalRetireState { 70 | #[inline] 71 | fn from(retire_state: &GlobalRetireState) -> Self { 72 | match retire_state { 73 | GlobalRetireState::GlobalStrategy(_) => LocalRetireState::GlobalStrategy, 74 | GlobalRetireState::LocalStrategy(abandoned) => { 75 | // check if there are any abandoned records that can be used by 76 | // the new thread instead of allocating a new local queue 77 | match abandoned.take_all_and_merge() { 78 | Some(node) => LocalRetireState::LocalStrategy(node), 79 | None => LocalRetireState::LocalStrategy(Box::new(Default::default())), 80 | } 81 | } 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | use_field_init_shorthand = true 2 | use_small_heuristics = "Max" 3 | use_try_shorthand = true 4 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! Types for global one-time configuration of the runtime parameters used by 2 | //! the reclamation scheme. 3 | 4 | const DEFAULT_INIT_CACHE: usize = 128; 5 | const DEFAULT_MIN_REQUIRED_RECORDS: u32 = 0; 6 | const DEFAULT_SCAN_THRESHOLD: u32 = 128; 7 | 8 | //////////////////////////////////////////////////////////////////////////////////////////////////// 9 | // Config 10 | //////////////////////////////////////////////////////////////////////////////////////////////////// 11 | 12 | /// Runtime configuration parameters. 13 | #[derive(Copy, Clone, Debug)] 14 | pub struct Config { 15 | init_cache: usize, 16 | min_required_records: u32, 17 | scan_threshold: u32, 18 | } 19 | 20 | /********** impl Default **************************************************************************/ 21 | 22 | impl Default for Config { 23 | #[inline] 24 | fn default() -> Self { 25 | ConfigBuilder::new().build() 26 | } 27 | } 28 | 29 | /********** impl inherent *************************************************************************/ 30 | 31 | impl Config { 32 | /// Creates a new [`Config`] with the given parameters 33 | /// 34 | /// # Panics 35 | /// 36 | /// This function panics, if `scan_threshold` is 0. 37 | #[inline] 38 | pub fn with_params(init_cache: usize, min_required_records: u32, scan_threshold: u32) -> Self { 39 | assert!(scan_threshold > 0, "scan threshold must be greater than 0"); 40 | Self { init_cache, min_required_records, scan_threshold } 41 | } 42 | 43 | /// Returns the initial cache size for newly spawned threads. 44 | #[inline] 45 | pub fn init_cache(&self) -> usize { 46 | self.init_cache 47 | } 48 | 49 | /// Returns the minimum amount of retired records that is required, before 50 | /// an attempt at reclaiming records is initiated. 51 | #[inline] 52 | pub fn min_required_records(&self) -> u32 { 53 | self.min_required_records 54 | } 55 | 56 | /// Returns the scan threshold. 57 | /// 58 | /// Every retired record or dropped hazard `Guard` (depending on which 59 | /// feature is selected) counts towards this threshold. 60 | /// Once it is reached, an attempt is made to reclaim records. 61 | #[inline] 62 | pub fn scan_threshold(&self) -> u32 { 63 | self.scan_threshold 64 | } 65 | } 66 | 67 | //////////////////////////////////////////////////////////////////////////////////////////////////// 68 | // ConfigBuilder 69 | //////////////////////////////////////////////////////////////////////////////////////////////////// 70 | 71 | /// A builder type for gradually initializing a [`Config`]. 72 | /// 73 | /// This is mainly useful for keeping stability, in case the internal structure 74 | /// of the [`Config`] type changes in the future, e.g. because further 75 | /// parameters are added. 76 | #[derive(Copy, Clone, Debug, Default)] 77 | pub struct ConfigBuilder { 78 | init_cache: Option, 79 | min_required_records: Option, 80 | scan_threshold: Option, 81 | } 82 | 83 | impl ConfigBuilder { 84 | /// Creates a new [`ConfigBuilder`] with default values. 85 | #[inline] 86 | pub fn new() -> Self { 87 | Self::default() 88 | } 89 | 90 | /// Sets the initial size of the cache for retired records of each newly 91 | /// created thread. 92 | /// 93 | /// If this is set to e.g. 0, retiring the first record will require the 94 | /// allocation of memory by the internally used data structure. 95 | #[inline] 96 | pub fn init_cache(mut self, init_cache: usize) -> Self { 97 | self.init_cache = Some(init_cache); 98 | self 99 | } 100 | 101 | /// Sets the minimum amount of records that must have been retired by a 102 | /// thread, before the thread may attempt to reclaim any memory. 103 | #[inline] 104 | pub fn min_required_records(mut self, min_required_records: u32) -> Self { 105 | self.min_required_records = Some(min_required_records); 106 | self 107 | } 108 | 109 | /// Sets the scan threshold. 110 | #[inline] 111 | pub fn scan_threshold(mut self, scan_threshold: u32) -> Self { 112 | self.scan_threshold = Some(scan_threshold); 113 | self 114 | } 115 | 116 | /// Consumes the [`ConfigBuilder`] and returns a initialized [`Config`]. 117 | /// 118 | /// Unspecified parameters are initialized with their default values. 119 | #[inline] 120 | pub fn build(self) -> Config { 121 | Config::with_params( 122 | self.init_cache.unwrap_or(DEFAULT_INIT_CACHE), 123 | self.min_required_records.unwrap_or(DEFAULT_MIN_REQUIRED_RECORDS), 124 | self.scan_threshold.unwrap_or(DEFAULT_SCAN_THRESHOLD), 125 | ) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/default.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::NonNull; 2 | 3 | use reclaim::typenum::Unsigned; 4 | use reclaim::{GlobalReclaim, Reclaim}; 5 | 6 | use crate::hazard::Hazard; 7 | use crate::local::{Local, LocalAccess, RecycleError}; 8 | use crate::{Unlinked, HP}; 9 | 10 | pub type Guard = crate::guard::Guard; 11 | 12 | // Per-thread instances of `Local` 13 | thread_local!(static LOCAL: Local = Local::new()); 14 | 15 | /********** impl GlobalReclaim ********************************************************************/ 16 | 17 | unsafe impl GlobalReclaim for HP { 18 | type Guard = Guard; 19 | 20 | #[inline] 21 | fn try_flush() { 22 | LOCAL.with(Local::try_flush); 23 | } 24 | 25 | #[inline] 26 | unsafe fn retire(unlinked: Unlinked) { 27 | LOCAL.with(move |local| Self::retire_local(local, unlinked)) 28 | } 29 | 30 | #[inline] 31 | unsafe fn retire_unchecked(unlinked: Unlinked) { 32 | LOCAL.with(move |local| Self::retire_local_unchecked(local, unlinked)) 33 | } 34 | } 35 | 36 | /********** impl inherent *************************************************************************/ 37 | 38 | impl Guard { 39 | #[inline] 40 | pub fn new() -> Self { 41 | Self::with_access(DefaultAccess) 42 | } 43 | } 44 | 45 | /********** impl Default **************************************************************************/ 46 | 47 | impl Default for Guard { 48 | #[inline] 49 | fn default() -> Self { 50 | Self::new() 51 | } 52 | } 53 | 54 | //////////////////////////////////////////////////////////////////////////////////////////////////// 55 | // DefaultAccess 56 | //////////////////////////////////////////////////////////////////////////////////////////////////// 57 | 58 | #[derive(Copy, Clone, Debug, Default)] 59 | pub struct DefaultAccess; 60 | 61 | /********** impl LocalAccess **********************************************************************/ 62 | 63 | impl LocalAccess for DefaultAccess { 64 | #[inline] 65 | fn get_hazard(self, protect: Option>) -> &'static Hazard { 66 | LOCAL.with(|local| local.get_hazard(protect)) 67 | } 68 | 69 | #[inline] 70 | fn try_recycle_hazard(self, hazard: &'static Hazard) -> Result<(), RecycleError> { 71 | LOCAL 72 | .try_with(|local| local.try_recycle_hazard(hazard)) 73 | .unwrap_or(Err(RecycleError::Access)) 74 | } 75 | 76 | #[inline] 77 | fn increase_ops_count(self) { 78 | LOCAL.with(|local| local.increase_ops_count()); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/global.rs: -------------------------------------------------------------------------------- 1 | //! Operations on globally shared data for hazard pointers and abandoned retired 2 | //! records. 3 | 4 | use core::ptr::NonNull; 5 | use core::sync::atomic::{ 6 | self, 7 | Ordering::{self, SeqCst}, 8 | }; 9 | 10 | #[cfg(not(feature = "std"))] 11 | use alloc::{boxed::Box, vec::Vec}; 12 | 13 | use crate::hazard::{Hazard, HazardList, Protected}; 14 | use crate::retired::{AbandonedBags, RetiredBag}; 15 | use crate::sanitize; 16 | 17 | /// The single static `Global` instance 18 | pub(crate) static GLOBAL: Global = Global::new(); 19 | 20 | //////////////////////////////////////////////////////////////////////////////////////////////////// 21 | // Global 22 | //////////////////////////////////////////////////////////////////////////////////////////////////// 23 | 24 | /// Global data structures required for managing memory reclamation with hazard 25 | /// pointers. 26 | #[derive(Debug)] 27 | pub(crate) struct Global { 28 | hazards: HazardList, 29 | abandoned: AbandonedBags, 30 | } 31 | 32 | /********** impl inherent *************************************************************************/ 33 | 34 | impl Global { 35 | /// Creates a new instance of a `Global`. 36 | #[inline] 37 | pub const fn new() -> Self { 38 | Self { hazards: HazardList::new(), abandoned: AbandonedBags::new() } 39 | } 40 | 41 | /// Acquires a hazard pointer from the global list and reserves it for the 42 | /// thread requesting it. 43 | /// 44 | /// This operation traverses the entire list from the head, trying to find 45 | /// an unused hazard. 46 | /// If it does not find one, it allocates a new one and appends it to the 47 | /// end of the list. 48 | #[inline] 49 | pub fn get_hazard(&'static self, protect: Option>) -> &'static Hazard { 50 | self.hazards.get_hazard(protect) 51 | } 52 | 53 | /// Collects all currently active hazard pointers into the supplied `Vec`. 54 | #[inline] 55 | pub fn collect_protected_hazards(&'static self, vec: &mut Vec, order: Ordering) { 56 | debug_assert_eq!(order, SeqCst, "must only be called with `SeqCst`"); 57 | vec.clear(); 58 | 59 | atomic::fence(order); 60 | 61 | for hazard in self.hazards.iter().fuse() { 62 | if let Some(protected) = hazard.protected(sanitize::RELAXED_LOAD) { 63 | vec.push(protected); 64 | } 65 | } 66 | } 67 | 68 | /// Stores an exiting thread's (non-empty) bag of retired records, which 69 | /// could not be reclaimed at the time the thread exited. 70 | #[inline] 71 | pub fn abandon_retired_bag(&'static self, bag: Box) { 72 | debug_assert!(!bag.inner.is_empty()); 73 | self.abandoned.push(bag); 74 | } 75 | 76 | /// Takes and merges all abandoned records and returns them as a single 77 | /// `RetiredBag`. 78 | #[inline] 79 | pub fn try_adopt_abandoned_records(&'static self) -> Option> { 80 | self.abandoned.take_and_merge() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/guard.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::Ordering::{self, Relaxed, Release, SeqCst}; 2 | 3 | use reclaim::prelude::*; 4 | use reclaim::typenum::Unsigned; 5 | use reclaim::{MarkedNonNull, MarkedPtr, NotEqualError}; 6 | 7 | use crate::hazard::Hazard; 8 | use crate::local::LocalAccess; 9 | use crate::{Atomic, Shared, HP}; 10 | 11 | //////////////////////////////////////////////////////////////////////////////////////////////////// 12 | // Guarded 13 | //////////////////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | /// A guarded pointer that can be used to acquire hazard pointers. 16 | #[derive(Debug)] 17 | pub struct Guard { 18 | hazard: &'static Hazard, 19 | local_access: L, 20 | } 21 | 22 | /********** impl LocalAccess **********************************************************************/ 23 | 24 | unsafe impl Send for Guard {} 25 | 26 | /********** impl Clone ****************************************************************************/ 27 | 28 | impl Clone for Guard { 29 | #[inline] 30 | fn clone(&self) -> Self { 31 | let local_access = self.local_access; 32 | match self.hazard.protected(Relaxed) { 33 | Some(protect) => { 34 | Self { hazard: local_access.get_hazard(Some(protect.into_inner())), local_access } 35 | } 36 | None => Self { hazard: local_access.get_hazard(None), local_access }, 37 | } 38 | } 39 | } 40 | 41 | // a small shorthand for a two-line return statement 42 | macro_rules! release { 43 | ($self:ident, $tag:expr) => {{ 44 | // (GUA:1) this `Release` store synchronizes-with the `SeqCst` fence (LOC:2) but WITHOUT 45 | // enforcing a total order 46 | $self.hazard.set_thread_reserved(Release); 47 | Null($tag) 48 | }}; 49 | } 50 | 51 | /********** impl Protect **************************************************************************/ 52 | 53 | unsafe impl Protect for Guard { 54 | type Reclaimer = HP; 55 | 56 | #[inline] 57 | fn release(&mut self) { 58 | // (GUA:2) this `Release` store synchronizes-with the `SeqCst` fence (LOC:2) but WITHOUT 59 | // enforcing a total order 60 | self.hazard.set_thread_reserved(Release); 61 | } 62 | 63 | #[inline] 64 | fn protect( 65 | &mut self, 66 | atomic: &Atomic, 67 | order: Ordering, 68 | ) -> Marked> { 69 | match MarkedNonNull::new(atomic.load_raw(Relaxed)) { 70 | Null(tag) => return release!(self, tag), 71 | Value(ptr) => { 72 | let mut protect = ptr.decompose_non_null(); 73 | // (GUA:3) this `SeqCst` store synchronizes-with the `SeqCst` fence (LOC:2) 74 | self.hazard.set_protected(protect.cast(), SeqCst); 75 | 76 | loop { 77 | match MarkedNonNull::new(atomic.load_raw(order)) { 78 | Null(tag) => return release!(self, tag), 79 | Value(ptr) => { 80 | let unmarked = ptr.decompose_non_null(); 81 | if protect == unmarked { 82 | return Value(unsafe { Shared::from_marked_non_null(ptr) }); 83 | } 84 | 85 | // (GUA:4) this `SeqCst` store synchronizes-with the `SeqCst` fence 86 | // (LOC:2) 87 | self.hazard.set_protected(unmarked.cast(), SeqCst); 88 | protect = unmarked; 89 | } 90 | } 91 | } 92 | } 93 | } 94 | } 95 | 96 | #[inline] 97 | fn protect_if_equal( 98 | &mut self, 99 | atomic: &Atomic, 100 | expected: MarkedPtr, 101 | order: Ordering, 102 | ) -> Result>, NotEqualError> { 103 | let raw = atomic.load_raw(Relaxed); 104 | if raw != expected { 105 | return Err(NotEqualError); 106 | } 107 | 108 | match MarkedNonNull::new(atomic.load_raw(order)) { 109 | Null(tag) => Ok(release!(self, tag)), 110 | Value(ptr) => { 111 | let unmarked = ptr.decompose_non_null(); 112 | // (GUA:5) this `SeqCst` store synchronizes-with the `SeqCst` fence (LOC:2) 113 | self.hazard.set_protected(unmarked.cast(), SeqCst); 114 | 115 | if atomic.load_raw(order) != ptr { 116 | // (GUA:6) this `Release` store synchronizes-with the `SeqCst` fence (LOC:2) but 117 | // WITHOUT enforcing a total order 118 | self.hazard.set_thread_reserved(Release); 119 | Err(NotEqualError) 120 | } else { 121 | Ok(unsafe { Marked::from_marked_non_null(ptr) }) 122 | } 123 | } 124 | } 125 | } 126 | } 127 | 128 | /********** impl inherent *************************************************************************/ 129 | 130 | impl Guard { 131 | /// Creates a new [`Guard`] with the given means for `local_access`. 132 | #[inline] 133 | pub fn with_access(local_access: L) -> Self { 134 | Self { hazard: local_access.get_hazard(None), local_access } 135 | } 136 | } 137 | 138 | /********** impl Drop *****************************************************************************/ 139 | 140 | impl Drop for Guard { 141 | #[inline] 142 | fn drop(&mut self) { 143 | if cfg!(feature = "count-release") { 144 | self.local_access.increase_ops_count(); 145 | } 146 | 147 | if self.local_access.try_recycle_hazard(self.hazard).is_err() { 148 | self.hazard.set_free(Release); 149 | } 150 | } 151 | } 152 | 153 | #[cfg(test)] 154 | mod tests { 155 | use std::sync::atomic::Ordering::Relaxed; 156 | 157 | use matches::assert_matches; 158 | 159 | use reclaim::prelude::*; 160 | use reclaim::typenum::U0; 161 | 162 | use crate::guard::Guard; 163 | use crate::local::Local; 164 | use crate::Shared; 165 | 166 | type Atomic = crate::Atomic; 167 | type Owned = crate::Owned; 168 | type MarkedPtr = reclaim::MarkedPtr; 169 | 170 | #[test] 171 | fn new() { 172 | let local = Local::new(); 173 | let guard = Guard::with_access(&local); 174 | assert!(guard.hazard.protected(Relaxed).is_none()); 175 | } 176 | 177 | #[test] 178 | fn protect() { 179 | let local = Local::new(); 180 | let mut guard = Guard::with_access(&local); 181 | 182 | let null = Atomic::null(); 183 | let marked = guard.protect(&null, Relaxed); 184 | assert_matches!(marked, Null(0)); 185 | assert!(guard.hazard.protected(Relaxed).is_none()); 186 | 187 | let atomic = Atomic::new(1); 188 | let shared = guard.protect(&atomic, Relaxed).unwrap_value(); 189 | let reference = Shared::into_ref(shared); 190 | let addr = reference as *const _ as usize; 191 | assert_eq!(reference, &1); 192 | assert_eq!(guard.hazard.protected(Relaxed).unwrap().address(), addr); 193 | 194 | let _ = guard.protect(&null, Relaxed); 195 | assert!(guard.hazard.protected(Relaxed).is_none()); 196 | } 197 | 198 | #[test] 199 | fn protect_if_equal() { 200 | let local = Local::new(); 201 | let mut guard = Guard::with_access(&local); 202 | 203 | let null = Atomic::null(); 204 | let null_ptr = MarkedPtr::null(); 205 | 206 | let res = guard.protect_if_equal(&null, null_ptr, Relaxed); 207 | assert_matches!(res, Ok(Null(0))); 208 | assert!(guard.hazard.protected(Relaxed).is_none()); 209 | 210 | let owned = Owned::new(1); 211 | let marked = Owned::as_marked_ptr(&owned); 212 | let atomic = Atomic::from(owned); 213 | 214 | let res = guard.protect_if_equal(&atomic, null_ptr, Relaxed); 215 | assert_matches!(res, Err(_)); 216 | assert!(guard.hazard.protected(Relaxed).is_none()); 217 | 218 | let res = guard.protect_if_equal(&atomic, marked, Relaxed); 219 | let shared = res.unwrap().unwrap_value(); 220 | let reference = Shared::into_ref(shared); 221 | assert_eq!(reference, &1); 222 | assert_eq!(guard.hazard.protected(Relaxed).unwrap().address(), marked.into_usize()); 223 | 224 | // a failed protection attempt must not alter the previous state 225 | let res = guard.protect_if_equal(&null, marked, Relaxed); 226 | assert!(res.is_err()); 227 | assert_eq!(guard.hazard.protected(Relaxed).unwrap().address(), marked.into_usize()); 228 | 229 | let res = guard.protect_if_equal(&null, null_ptr, Relaxed); 230 | assert_matches!(res, Ok(Null(0))); 231 | assert!(guard.hazard.protected(Relaxed).is_none()); 232 | } 233 | } 234 | -------------------------------------------------------------------------------- /src/hazard/list.rs: -------------------------------------------------------------------------------- 1 | //! Concurrent linked list implementation for globally storing all allocated 2 | //! hazard pointers. 3 | //! 4 | //! A thread requesting a hazard pointer first traverses this list and searches 5 | //! for an already allocated one that is not currently in use. 6 | //! If there is none, the list allocates a new one, appends it to the end of the 7 | //! list and returns a reference (`&'static Hazard`) to it. 8 | //! Once allocated, hazard pointers are never de-allocated again during the 9 | //! lifetime of the program (i.e. they have `'static` lifetime). 10 | //! When a thread does no longer need an acquired hazard pointer, marks it as 11 | //! no longer in use, which allows other threads to acquire it during the list 12 | //! traversal instead of having to allocate a new one. 13 | //! Additionally, each thread maintains a small cache of previously acquired 14 | //! hazard pointers, which are specifically reserved for use by that thread. 15 | //! 16 | //! # Synchronization 17 | //! 18 | //! ```ignore 19 | //! struct Node { 20 | //! protected: #[repr(align(64))] AtomicPtr<()>, 21 | //! next: #[repr(align(64))] AtomicPtr, 22 | //! } 23 | //! ``` 24 | //! 25 | //! Above is an approximate and simplified description of a node in the global 26 | //! linked list of hazard pointers. 27 | //! Both fields of this struct are aligned to the size of a cache-line in order 28 | //! to prevent false sharing. 29 | //! This is desirable, since the `next` field is effectively constant once a 30 | //! node is inserted and is no longer at the tail, while the `protected` field 31 | //! can be frequently written to. 32 | //! 33 | //! All atomic operations on the `next` field can be synchronized using 34 | //! acquire-release semantics, since all threads are required to synchronize 35 | //! through the **same** variable (i.e. the current tail of the list). 36 | //! All stores to the `protected` field that mark a specific pointer as 37 | //! protected from reclamation, however, **must** establish a total order and 38 | //! thus require sequential consistency (HAZ:2 and LIS:3P). 39 | //! Similarly, the loads on that field made during a scan of all active hazard 40 | //! pointers must also be sequentially consistent (GLO:1). 41 | //! Otherwise, a thread scanning the global list of hazard pointers might not 42 | //! see a consistent view of all protected pointers, since stores to the various 43 | //! `protected` fields are all independent writes. 44 | //! Consequently, a thread might go ahead and deallocate a retired record for 45 | //! which a hazard pointer has previously been successfully acquired but the 46 | //! corresponding store has not yet become visible to the reclaiming thread, 47 | //! potentially leading to a critical **use after free** error. 48 | //! All stores that write a sentinel value (e.g. `0x0` for `FREE` and `0x1` for 49 | //! `RESERVED`) to a `protected` field, on the other hand, do not require such 50 | //! strict ordering constraints. 51 | //! If such a store is delayed and not visible during a thread's scan prior to 52 | //! reclamation the worst-case outcome is a record not being reclaimed that 53 | //! would actually be a valid candidate for reclamation. 54 | 55 | #[cfg(not(any(test, feature = "std")))] 56 | use alloc::boxed::Box; 57 | 58 | use core::iter::FusedIterator; 59 | use core::mem; 60 | use core::ptr::NonNull; 61 | use core::sync::atomic::{ 62 | self, 63 | Ordering::{self, Acquire, Relaxed, Release, SeqCst}, 64 | }; 65 | 66 | use reclaim::align::CacheAligned; 67 | use reclaim::leak::Owned; 68 | 69 | use crate::hazard::{Hazard, FREE, THREAD_RESERVED}; 70 | use crate::sanitize::{RELEASE_FAIL, RELEASE_SUCCESS}; 71 | 72 | type Atomic = reclaim::leak::Atomic; 73 | type Shared<'g, T> = reclaim::leak::Shared<'g, T, reclaim::typenum::U0>; 74 | 75 | //////////////////////////////////////////////////////////////////////////////////////////////////// 76 | // HazardList 77 | //////////////////////////////////////////////////////////////////////////////////////////////////// 78 | 79 | /// Linked list for storing hazard pointers 80 | #[derive(Debug, Default)] 81 | pub(crate) struct HazardList { 82 | head: Atomic, 83 | } 84 | 85 | /********** impl inherent *************************************************************************/ 86 | 87 | impl HazardList { 88 | /// Creates a new empty list. 89 | #[inline] 90 | pub const fn new() -> Self { 91 | Self { head: Atomic::null() } 92 | } 93 | 94 | /// Creates a (fused) iterator for the list. 95 | #[inline] 96 | pub fn iter(&self) -> Iter { 97 | Iter { 98 | // (LIS:1) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5) 99 | current: self.head.load_shared(Acquire), 100 | } 101 | } 102 | 103 | /// Acquires an already inserted and inactive hazard pointer or allocates a 104 | /// new one at the tail and returns a reference to it. 105 | #[cold] 106 | pub fn get_hazard(&self, protect: Option>) -> &Hazard { 107 | // this should be evaluated at compile-time 108 | let (ptr, order) = match protect { 109 | Some(protect) => (protect.as_ptr(), SeqCst), 110 | None => (THREAD_RESERVED, Release), 111 | }; 112 | 113 | self.get_hazard_for(ptr, order) 114 | } 115 | 116 | #[inline] 117 | fn get_hazard_for(&self, ptr: *mut (), order: Ordering) -> &Hazard { 118 | let mut prev = &self.head; 119 | // (LIS:2) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5) 120 | let mut curr = prev.load_shared(Acquire); 121 | 122 | while let Some(node) = curr.map(Shared::into_ref) { 123 | if node.hazard().protected.load(Relaxed) == FREE { 124 | // (LIS:3P) this `SeqCst`/`Release` CAS synchronizes-with the `SeqCst` fence (LOC:2) 125 | // and enforces a total order in case BOTH are `SeqCst` 126 | let prev = node.hazard.protected.compare_and_swap(FREE, ptr, order); 127 | 128 | if prev == FREE { 129 | return node.hazard(); 130 | } 131 | } 132 | 133 | prev = node.next(); 134 | // (LIS:4) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5) 135 | curr = node.next().load_shared(Acquire); 136 | } 137 | 138 | self.insert_back(prev, ptr) 139 | } 140 | 141 | /// Allocates and inserts a new node (hazard pointer) at the tail of the list. 142 | #[inline] 143 | fn insert_back(&self, mut tail: &Atomic, ptr: *mut ()) -> &Hazard { 144 | let node = unsafe { 145 | Owned::leak_shared(Owned::new(HazardNode { 146 | hazard: CacheAligned(Hazard::new(ptr)), 147 | next: CacheAligned(Atomic::null()), 148 | })) 149 | }; 150 | 151 | loop { 152 | // (LIS:5) this `Release` CAS synchronizes-with the `Acquire` loads (LIS:1), (LIS:2), 153 | // (LIS:4) and the `Acquire` fence (LIS:7) 154 | match tail.compare_exchange_weak(Shared::none(), node, RELEASE_SUCCESS, RELEASE_FAIL) { 155 | Ok(_) => return &*Shared::into_ref(node).hazard, 156 | Err(fail) => { 157 | // (LIS:6) this `Acquire` fence synchronizes-with the `Release` CAS (LIS:5) 158 | atomic::fence(Acquire); 159 | 160 | // this is safe because nodes are never retired or reclaimed 161 | if let Some(node) = fail.loaded { 162 | tail = unsafe { &node.deref_unprotected().next }; 163 | } 164 | } 165 | } 166 | } 167 | } 168 | } 169 | 170 | /********** impl Drop *****************************************************************************/ 171 | 172 | impl Drop for HazardList { 173 | #[inline] 174 | fn drop(&mut self) { 175 | let mut curr = self.head.take(); 176 | while let Some(mut owned) = curr { 177 | curr = owned.next.take(); 178 | mem::drop(owned); 179 | } 180 | } 181 | } 182 | 183 | //////////////////////////////////////////////////////////////////////////////////////////////////// 184 | // Iter 185 | //////////////////////////////////////////////////////////////////////////////////////////////////// 186 | 187 | /// Iterator for a `HazardList` 188 | #[derive(Debug)] 189 | pub(crate) struct Iter<'a> { 190 | current: Option>, 191 | } 192 | 193 | /********** impl Iterator *************************************************************************/ 194 | 195 | impl<'a> Iterator for Iter<'a> { 196 | type Item = &'a Hazard; 197 | 198 | #[inline] 199 | fn next(&mut self) -> Option { 200 | self.current.take().map(|node| { 201 | let node = Shared::into_ref(node); 202 | self.current = node.next.load_shared(Acquire); 203 | &*node.hazard 204 | }) 205 | } 206 | } 207 | 208 | /********** impl FusedIterator ********************************************************************/ 209 | 210 | impl<'a> FusedIterator for Iter<'a> {} 211 | 212 | //////////////////////////////////////////////////////////////////////////////////////////////////// 213 | // HazardNode 214 | //////////////////////////////////////////////////////////////////////////////////////////////////// 215 | 216 | #[derive(Debug)] 217 | struct HazardNode { 218 | hazard: CacheAligned, 219 | next: CacheAligned>, 220 | } 221 | 222 | /********** impl inherent *************************************************************************/ 223 | 224 | impl HazardNode { 225 | #[inline] 226 | fn hazard(&self) -> &Hazard { 227 | &*self.hazard 228 | } 229 | 230 | #[inline] 231 | fn next(&self) -> &Atomic { 232 | &*self.next 233 | } 234 | } 235 | 236 | #[cfg(test)] 237 | mod tests { 238 | use std::ptr::NonNull; 239 | use std::sync::atomic::Ordering; 240 | 241 | use super::HazardList; 242 | 243 | #[test] 244 | fn insert_one() { 245 | let ptr = NonNull::new(0xDEAD_BEEF as *mut ()).unwrap(); 246 | 247 | let list = HazardList::new(); 248 | let hazard = list.get_hazard(Some(ptr)); 249 | assert_eq!(hazard.protected.load(Ordering::Relaxed), 0xDEAD_BEEF as *mut ()); 250 | } 251 | 252 | #[test] 253 | fn iter() { 254 | let ptr = NonNull::new(0xDEAD_BEEF as *mut ()).unwrap(); 255 | 256 | let list = HazardList::new(); 257 | let _ = list.get_hazard(Some(ptr)); 258 | let _ = list.get_hazard(Some(ptr)); 259 | let _ = list.get_hazard(Some(ptr)); 260 | 261 | assert!(list 262 | .iter() 263 | .fuse() 264 | .all(|hazard| hazard.protected.load(Ordering::Relaxed) == ptr.as_ptr())); 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /src/hazard/mod.rs: -------------------------------------------------------------------------------- 1 | //! Data structures and functionality for temporarily protecting specific 2 | //! pointers acquired by specific threads from concurrent reclamation. 3 | //! 4 | //! # Global List 5 | //! 6 | //! All hazard pointers are stored in a global linked list. This list can never 7 | //! remove and deallocate any of its entries, since this would require some 8 | //! scheme for concurrent memory reclamation on its own. Consequently, this 9 | //! linked list can only grow for the entire program runtime and is never 10 | //! actually dropped. However, its individual entries can be reused arbitrarily 11 | //! often. 12 | //! 13 | //! # Hazard Pointers 14 | //! 15 | //! Whenever a thread reads a value in a data structure from shared memory it 16 | //! has to acquire a hazard pointer for it before the loaded reference to the 17 | //! value can be safely dereferenced. These pointers are stored in the global 18 | //! list of hazard pointers. Any time a thread wants to reclaim a retired 19 | //! record, it has to ensure that no hazard pointer in this list still protects 20 | //! the retired value. 21 | 22 | mod list; 23 | 24 | use core::ptr::NonNull; 25 | use core::sync::atomic::{AtomicPtr, Ordering}; 26 | 27 | pub(crate) use self::list::HazardList; 28 | 29 | const FREE: *mut () = 0 as *mut (); 30 | const THREAD_RESERVED: *mut () = 1 as *mut (); 31 | 32 | //////////////////////////////////////////////////////////////////////////////////////////////////// 33 | // Hazard 34 | //////////////////////////////////////////////////////////////////////////////////////////////////// 35 | 36 | /// A pointer visible to all threads that is protected from reclamation. 37 | #[derive(Debug)] 38 | pub struct Hazard { 39 | protected: AtomicPtr<()>, 40 | } 41 | 42 | /********** impl inherent *************************************************************************/ 43 | 44 | impl Hazard { 45 | /// Marks the hazard as unused (available for acquisition by any thread). 46 | #[inline] 47 | pub fn set_free(&self, order: Ordering) { 48 | self.protected.store(FREE, order); 49 | } 50 | 51 | /// Marks the hazard as unused but reserved by a specific thread for quick 52 | /// acquisition. 53 | #[inline] 54 | pub fn set_thread_reserved(&self, order: Ordering) { 55 | self.protected.store(THREAD_RESERVED, order); 56 | } 57 | 58 | /// Gets the protected pointer, if there is one. 59 | #[inline] 60 | pub fn protected(&self, order: Ordering) -> Option { 61 | match self.protected.load(order) { 62 | FREE | THREAD_RESERVED => None, 63 | ptr => Some(Protected(unsafe { NonNull::new_unchecked(ptr) })), 64 | } 65 | } 66 | 67 | /// Marks the hazard as actively protecting the given pointer `protect`. 68 | /// 69 | /// The ordering can be specified, but must be `SeqCst`. This is done so the 70 | /// ordering is clearly specified at the call site. 71 | /// 72 | /// # Panics 73 | /// 74 | /// This operation panics if `ordering` is not `SeqCst`. 75 | #[inline] 76 | pub fn set_protected(&self, protect: NonNull<()>, order: Ordering) { 77 | assert_eq!(order, Ordering::SeqCst, "must only be called with `SeqCst`"); 78 | self.protected.store(protect.as_ptr(), order); 79 | } 80 | 81 | /// Creates new hazard for insertion in the global hazards list. 82 | /// 83 | /// The hazard is initially reserved for the thread initiating the request 84 | /// for a hazard. 85 | #[inline] 86 | fn new(ptr: *mut ()) -> Self { 87 | debug_assert_ne!(ptr, FREE); 88 | Self { protected: AtomicPtr::new(ptr) } 89 | } 90 | } 91 | 92 | //////////////////////////////////////////////////////////////////////////////////////////////////// 93 | // Protected 94 | //////////////////////////////////////////////////////////////////////////////////////////////////// 95 | 96 | /// An untyped pointer protected from reclamation, because it is stored within a hazard pair. 97 | /// 98 | /// The type information is deliberately stripped as it is not needed in order to determine whether 99 | /// a pointer is protected or not. 100 | #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] 101 | pub struct Protected(NonNull<()>); 102 | 103 | /********** impl inherent *************************************************************************/ 104 | 105 | impl Protected { 106 | /// Gets the memory address of the protected pointer. 107 | #[inline] 108 | pub fn address(self) -> usize { 109 | self.0.as_ptr() as usize 110 | } 111 | 112 | /// Gets the internal non-nullable pointer. 113 | #[inline] 114 | pub fn into_inner(self) -> NonNull<()> { 115 | self.0 116 | } 117 | } 118 | 119 | #[cfg(test)] 120 | mod tests { 121 | use std::ptr::NonNull; 122 | use std::sync::atomic::Ordering; 123 | 124 | use super::*; 125 | 126 | #[test] 127 | fn protect_hazard() { 128 | let ptr = NonNull::from(&1); 129 | 130 | let hazard = Hazard::new(ptr.cast().as_ptr()); 131 | assert_eq!(ptr.as_ptr() as usize, hazard.protected(Ordering::Relaxed).unwrap().address()); 132 | 133 | hazard.set_free(Ordering::Relaxed); 134 | assert_eq!(None, hazard.protected(Ordering::Relaxed)); 135 | assert_eq!(FREE, hazard.protected.load(Ordering::Relaxed)); 136 | 137 | hazard.set_thread_reserved(Ordering::Relaxed); 138 | assert_eq!(None, hazard.protected(Ordering::Relaxed)); 139 | assert_eq!(THREAD_RESERVED, hazard.protected.load(Ordering::Relaxed)); 140 | 141 | hazard.set_protected(ptr.cast(), Ordering::SeqCst); 142 | assert_eq!(ptr.as_ptr() as usize, hazard.protected(Ordering::Relaxed).unwrap().address()); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Hazard pointer based concurrent memory reclamation. 2 | //! 3 | //! A difficult problem that has to be considered when implementing lock-free 4 | //! collections or data structures is deciding, when a removed entry can be 5 | //! safely deallocated. 6 | //! It is usually not correct to deallocate removed entries right away, because 7 | //! different threads might still hold references to such entries and could 8 | //! consequently access already freed memory. 9 | //! 10 | //! Concurrent memory reclamation schemes solve that problem by extending the 11 | //! lifetime of removed entries for a certain *grace period*. 12 | //! After this period it must be impossible for other threads to have any 13 | //! references to these entries anymore and they can be finally deallocated. 14 | //! This is similar to the concept of *Garbage Collection* in languages like Go 15 | //! and Java, but with a much more limited scope. 16 | //! 17 | //! The Hazard-pointer reclamation scheme was described by Maged M. Michael in 18 | //! 2004 [[1]]. 19 | //! It requires every *read* of an entry from shared memory to be accompanied by 20 | //! a global announcement marking the read entry as protected. 21 | //! Threads must store removed (retired) entries in a local cache and regularly 22 | //! attempt to reclaim all cached records in bulk. 23 | //! A record is safe to be reclaimed, once there is no hazard pointer protecting 24 | //! it anymore. 25 | //! 26 | //! # Reclamation Interface and Pointer Types 27 | //! 28 | //! The API of this library follows the abstract interface defined by the 29 | //! [`reclaim`][reclaim] crate. 30 | //! Hence, it uses the following types for atomically reading and writing from 31 | //! and to shared memory: 32 | //! 33 | //! - [`Atomic`] 34 | //! - [`Owned`] 35 | //! - [`Shared`] 36 | //! - [`Unlinked`] 37 | //! - [`Unprotected`] 38 | //! 39 | //! The primary type exposed by this API is [`Atomic`], which is a 40 | //! shared atomic pointer with similar semantics to `Option>`. 41 | //! It provides all operations that are also supported by `AtomicPtr`, such as 42 | //! `store`, `load` or `compare_exchange`. 43 | //! All *load* operations on an [`Atomic`] return (optional) [`Shared`] 44 | //! references. 45 | //! [`Shared`] is a non-nullable pointer type that is protected by a hazard 46 | //! pointer and has similar semantics to `&T`. 47 | //! *Read-Modify-Write* operations (`swap`, `compare_exchange`, 48 | //! `compare_exchange_weak`) return [`Unlinked`] values if they succeed. 49 | //! Only values that are successfully unlinked in this manner can be retired, 50 | //! which means they will be automatically reclaimed at some some point when it 51 | //! is safe to do so. 52 | //! [`Unprotected`] is useful for comparing and storing values, which do not 53 | //! need to be de-referenced and hence don't need to be protected by hazard 54 | //! pointers. 55 | //! 56 | //! # Compare-and-Swap 57 | //! 58 | //! The atomic [`compare_exchange`][reclaim::Atomic::compare_exchange] method of 59 | //! the [`Atomic`] type is highly versatile and uses generics and (internal) 60 | //! traits in order to achieve some degree of argument *overloading*. 61 | //! The `current` and `new` arguments accept a wide variety of pointer types, 62 | //! interchangeably. 63 | //! 64 | //! For instance, `current` accepts values of either types [`Shared`], 65 | //! [`Option`][Option], or [`Marked`][Marked]. 66 | //! The same range of types and wrappers is also accepted for [`Unprotected`] 67 | //! values. 68 | //! A *compare-and-swap* can only succeed if the `current` value is equal to 69 | //! the value that is actually stored in the [`Atomic`]. 70 | //! Consequently, the return type of this method adapts to the input type: 71 | //! When `current` is either a [`Shared`] or an [`Unprotected`], the return 72 | //! type is [`Unlinked`], since all of these types are non-nullable. 73 | //! However, when `current` is an `Option`, the return type is 74 | //! `Option`. 75 | //! 76 | //! The `new` argument accepts types like [`Owned`], [`Shared`], [`Unlinked`], 77 | //! [`Unprotected`] or `Option` thereof. 78 | //! Care has to be taken when inserting a [`Shared`] in this way, as it is 79 | //! possible to insert the value twice at different positions of the same 80 | //! collection, which violates the primary reclamation invariant (which is also 81 | //! the reason why `retire` is unsafe): 82 | //! It must be impossible for a thread to read a reference to a value that has 83 | //! previously been retired. 84 | //! 85 | //! When a *compare-and-swap* fails, a [`struct`][reclaim::CompareExchangeFailure] 86 | //! is returned that contains both the *actual* value and the value that was 87 | //! attempted to be inserted. 88 | //! This ensures that move-only types like [`Owned`] and [`Unlinked`] can be 89 | //! retrieved again in the case of a failed *compare-and-swap*. 90 | //! The actually loaded value is returned in the form a [`MarkedPtr`][reclaim::MarkedPtr]. 91 | //! 92 | //! The other methods of [`Atomic`][Atomic] are similarly versatile in terms of 93 | //! accepted argument types. 94 | //! 95 | //! # Pointer Tagging 96 | //! 97 | //! Many concurrent algorithms require the use of atomic pointers with 98 | //! additional information stored in one or more of a pointer's lower bits. 99 | //! For this purpose the [`reclaim`][reclaim] crate provides a type-based 100 | //! generic solution for making pointer types markable. 101 | //! The number of usable lower bits is part of the type signature of types like 102 | //! [`Atomic`] or [`Owned`]. 103 | //! If the pointed-to type is not able to provide the required number of mark 104 | //! bits (which depends on its alignment) this will lead to a compilation error. 105 | //! Since the number of mark bits is part of the types themselves, using zero 106 | //! mark bits also has zero runtime overhead. 107 | //! 108 | //! [1]: https://dl.acm.org/citation.cfm?id=987595 109 | //! [reclaim]: https://github.com/oliver-giersch/reclaim 110 | 111 | #![cfg_attr(not(any(test, feature = "std")), no_std)] 112 | #![warn(missing_docs)] 113 | 114 | #[cfg(not(feature = "std"))] 115 | extern crate alloc; 116 | 117 | #[cfg(any(test, feature = "std"))] 118 | mod default; 119 | 120 | mod config; 121 | mod global; 122 | mod guard; 123 | mod hazard; 124 | mod local; 125 | mod retired; 126 | 127 | pub use reclaim; 128 | pub use reclaim::typenum; 129 | 130 | use cfg_if::cfg_if; 131 | use reclaim::prelude::*; 132 | use typenum::Unsigned; 133 | 134 | pub use crate::config::{Config, ConfigBuilder}; 135 | 136 | /// A specialization of [`Atomic`][reclaim::Atomic] for the [`HP`] reclamation 137 | /// scheme. 138 | pub type Atomic = reclaim::Atomic; 139 | /// A specialization of [`Shared`][reclaim::Shared] for the [`HP`] reclamation 140 | /// scheme. 141 | pub type Shared<'g, T, N> = reclaim::Shared<'g, T, HP, N>; 142 | /// A specialization of [`Owned`][reclaim::Owned] for the [`HP`] reclamation 143 | /// scheme. 144 | pub type Owned = reclaim::Owned; 145 | /// A specialization of [`Unlinked`][reclaim::Unlinked] for the [`HP`] 146 | /// reclamation scheme. 147 | pub type Unlinked = reclaim::Unlinked; 148 | /// A specialization of [`Unprotected`][reclaim::Unprotected] for the [`HP`] 149 | /// reclamation scheme. 150 | pub type Unprotected = reclaim::Unprotected; 151 | 152 | cfg_if! { 153 | if #[cfg(feature = "std")] { 154 | /// A guarded pointer that can be used to acquire hazard pointers. 155 | pub type Guard = crate::default::Guard; 156 | } else { 157 | pub use crate::local::{Local, RecycleError}; 158 | /// A **thread local** guarded pointer that can be used to acquire 159 | /// hazard pointers. 160 | pub type LocalGuard<'a> = crate::guarded::Guard<&'a Local>; 161 | } 162 | } 163 | 164 | #[cfg(not(feature = "std"))] 165 | use conquer_once::spin::OnceCell; 166 | #[cfg(feature = "std")] 167 | use conquer_once::OnceCell; 168 | 169 | use crate::retired::Retired; 170 | 171 | /// Global one-time configuration for runtime parameters used for memory 172 | /// reclamation. 173 | pub static CONFIG: OnceCell = OnceCell::new(); 174 | 175 | //////////////////////////////////////////////////////////////////////////////////////////////////// 176 | // HP 177 | //////////////////////////////////////////////////////////////////////////////////////////////////// 178 | 179 | /// Hazard Pointer based reclamation scheme. 180 | #[derive(Debug, Default, Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] 181 | pub struct HP; 182 | 183 | /********** impl Reclaim **************************************************************************/ 184 | 185 | unsafe impl Reclaim for HP { 186 | type Local = crate::local::Local; 187 | type RecordHeader = (); // no extra header per allocated record is required 188 | 189 | #[inline] 190 | unsafe fn retire_local(local: &Self::Local, unlinked: Unlinked) { 191 | Self::retire_local_unchecked(local, unlinked) 192 | } 193 | 194 | #[inline] 195 | unsafe fn retire_local_unchecked( 196 | local: &Self::Local, 197 | unlinked: Unlinked, 198 | ) { 199 | let unmarked = Unlinked::into_marked_non_null(unlinked).decompose_non_null(); 200 | local.retire_record(Retired::new_unchecked(unmarked)); 201 | } 202 | } 203 | 204 | // The ThreadSanitizer can not correctly asses ordering restraints from explicit 205 | // fences, so memory operations around such fences need stricter ordering than 206 | // `Relaxed`, when instrumentation is chosen. 207 | 208 | #[cfg(not(feature = "sanitize-threads"))] 209 | mod sanitize { 210 | use core::sync::atomic::Ordering; 211 | 212 | pub const RELAXED_LOAD: Ordering = Ordering::Relaxed; 213 | pub const RELAXED_STORE: Ordering = Ordering::Relaxed; 214 | 215 | pub const RELEASE_SUCCESS: Ordering = Ordering::Release; 216 | pub const RELEASE_FAIL: Ordering = Ordering::Relaxed; 217 | } 218 | 219 | #[cfg(feature = "sanitize-threads")] 220 | mod sanitize { 221 | use core::sync::atomic::Ordering; 222 | 223 | pub const RELAXED_LOAD: Ordering = Ordering::Acquire; 224 | pub const RELAXED_STORE: Ordering = Ordering::Release; 225 | 226 | pub const RELEASE_SUCCESS: Ordering = Ordering::AcqRel; 227 | pub const RELEASE_FAIL: Ordering = Ordering::Acquire; 228 | } 229 | -------------------------------------------------------------------------------- /src/local.rs: -------------------------------------------------------------------------------- 1 | //! Thread local state and caches for reserving hazard pointers and storing 2 | //! retired records. 3 | 4 | #[cfg(feature = "std")] 5 | use std::error; 6 | 7 | #[cfg(not(feature = "std"))] 8 | use alloc::{boxed::Box, vec::Vec}; 9 | 10 | use core::cell::UnsafeCell; 11 | use core::fmt; 12 | use core::mem::ManuallyDrop; 13 | use core::ptr::{self, NonNull}; 14 | use core::sync::atomic::{ 15 | self, 16 | Ordering::{Release, SeqCst}, 17 | }; 18 | 19 | use arrayvec::{ArrayVec, CapacityError}; 20 | 21 | use crate::global::GLOBAL; 22 | use crate::hazard::{Hazard, Protected}; 23 | use crate::retired::{ReclaimOnDrop, Retired, RetiredBag}; 24 | use crate::{sanitize, Config, CONFIG}; 25 | 26 | //////////////////////////////////////////////////////////////////////////////////////////////////// 27 | // constants 28 | //////////////////////////////////////////////////////////////////////////////////////////////////// 29 | 30 | const HAZARD_CACHE: usize = 16; 31 | const SCAN_CACHE: usize = 64; 32 | 33 | //////////////////////////////////////////////////////////////////////////////////////////////////// 34 | // LocalAccess (trait) 35 | //////////////////////////////////////////////////////////////////////////////////////////////////// 36 | 37 | /// A trait for abstracting over different means of accessing thread local state 38 | pub trait LocalAccess 39 | where 40 | Self: Clone + Copy + Sized, 41 | { 42 | /// Gets a hazard from local or global storage. 43 | fn get_hazard(self, protect: Option>) -> &'static Hazard; 44 | 45 | /// Attempts to recycle `hazard` in the thread local cache for hazards 46 | /// reserved for the current thread. 47 | /// 48 | /// # Errors 49 | /// 50 | /// This operation can fail in two circumstances: 51 | /// 52 | /// - the thread local cache is full ([`RecycleErr::Capacity`](RecycleErr::Capacity)) 53 | /// - access to the thread local state fails ([`RecycleErr::Access`](RecycleErr::Access)) 54 | fn try_recycle_hazard(self, hazard: &'static Hazard) -> Result<(), RecycleError>; 55 | 56 | /// Increase the internal count of a threads operations counting towards the 57 | /// threshold for initiating a new attempt for reclaiming all retired 58 | /// records. 59 | fn increase_ops_count(self); 60 | } 61 | 62 | //////////////////////////////////////////////////////////////////////////////////////////////////// 63 | // Local 64 | //////////////////////////////////////////////////////////////////////////////////////////////////// 65 | 66 | /// Container for all thread local data required for reclamation with hazard 67 | /// pointers. 68 | #[derive(Debug)] 69 | pub struct Local(UnsafeCell); 70 | 71 | /********** impl Default ***************************************************************************/ 72 | 73 | impl Default for Local { 74 | #[inline] 75 | fn default() -> Self { 76 | Self::new() 77 | } 78 | } 79 | 80 | /********** impl inherent *************************************************************************/ 81 | 82 | impl Local { 83 | /// Creates a new container for the thread local state. 84 | #[inline] 85 | pub fn new() -> Self { 86 | let config = CONFIG.try_get().ok().copied().unwrap_or_default(); 87 | 88 | Self(UnsafeCell::new(LocalInner { 89 | config, 90 | ops_count: 0, 91 | flush_count: 0, 92 | hazard_cache: ArrayVec::new(), 93 | scan_cache: Vec::with_capacity(SCAN_CACHE), 94 | retired_bag: match GLOBAL.try_adopt_abandoned_records() { 95 | Some(boxed) => ManuallyDrop::new(boxed), 96 | None => ManuallyDrop::new(Box::new(RetiredBag::new(config.init_cache()))), 97 | }, 98 | })) 99 | } 100 | 101 | /// Attempts to reclaim some retired records. 102 | #[inline] 103 | pub(crate) fn try_flush(&self) { 104 | unsafe { &mut *self.0.get() }.try_flush(); 105 | } 106 | 107 | /// Retires a record and increases the operations count. 108 | /// 109 | /// If the operations count reaches a threshold, a scan is triggered which 110 | /// reclaims all records that can be safely reclaimed and resets the 111 | /// operations count. 112 | /// Previously, an attempt is made to adopt all globally abandoned records. 113 | #[inline] 114 | pub(crate) fn retire_record(&self, record: Retired) { 115 | let local = unsafe { &mut *self.0.get() }; 116 | local.retired_bag.inner.push(unsafe { ReclaimOnDrop::new(record) }); 117 | #[cfg(not(feature = "count-release"))] 118 | local.increase_ops_count(); 119 | } 120 | } 121 | 122 | /********** impl LocalAccess **********************************************************************/ 123 | 124 | impl<'a> LocalAccess for &'a Local { 125 | /// Attempts to take a reserved hazard from the thread local cache if there 126 | /// are any. 127 | #[inline] 128 | fn get_hazard(self, protect: Option>) -> &'static Hazard { 129 | // FIXME: `protect` is only protected when a hazard is retrieved globally 130 | let local = unsafe { &mut *self.0.get() }; 131 | match local.hazard_cache.pop() { 132 | Some(hazard) => hazard, 133 | None => GLOBAL.get_hazard(protect), 134 | } 135 | } 136 | 137 | /// Attempts to cache `hazard` in the thread local storage. 138 | /// 139 | /// # Errors 140 | /// 141 | /// The operation can fail if the thread local hazard cache is at maximum 142 | /// capacity. 143 | #[inline] 144 | fn try_recycle_hazard(self, hazard: &'static Hazard) -> Result<(), RecycleError> { 145 | unsafe { &mut *self.0.get() }.hazard_cache.try_push(hazard)?; 146 | 147 | // (LOC:1) this `Release` store synchronizes-with the `SeqCst` fence (LOC:2) but WITHOUT 148 | // enforcing a total order 149 | hazard.set_thread_reserved(Release); 150 | Ok(()) 151 | } 152 | 153 | /// Increases the thread local operations count and triggers a scan if the 154 | /// threshold is reached. 155 | #[inline] 156 | fn increase_ops_count(self) { 157 | unsafe { &mut *self.0.get() }.increase_ops_count(); 158 | } 159 | } 160 | 161 | //////////////////////////////////////////////////////////////////////////////////////////////////// 162 | // LocalInner 163 | //////////////////////////////////////////////////////////////////////////////////////////////////// 164 | 165 | #[derive(Debug)] 166 | struct LocalInner { 167 | /// The copy of the global configuration that is read once during 168 | /// a thread's creation 169 | config: Config, 170 | /// The counter for determining when to attempt to adopt abandoned records 171 | flush_count: u32, 172 | /// The thread local cache for reserved hazard pointers 173 | hazard_cache: ArrayVec<[&'static Hazard; HAZARD_CACHE]>, 174 | /// The counter for determining when to attempt reclamation of retired 175 | /// records. 176 | ops_count: u32, 177 | /// The cache for storing currently protected records during scan attempts 178 | scan_cache: Vec, 179 | /// The cache for storing retired records 180 | retired_bag: ManuallyDrop>, 181 | } 182 | 183 | /********** impl inherent *************************************************************************/ 184 | 185 | impl LocalInner { 186 | /// Increases the operations count and triggers a scan if the threshold is 187 | /// reached. 188 | #[inline] 189 | fn increase_ops_count(&mut self) { 190 | self.ops_count += 1; 191 | 192 | if self.ops_count == self.config.scan_threshold() { 193 | self.try_flush(); 194 | } 195 | } 196 | 197 | /// Attempts to reclaim some retired records. 198 | #[cold] 199 | fn try_flush(&mut self) { 200 | self.ops_count = 0; 201 | 202 | // try to adopt and merge any (global) abandoned retired bags 203 | if let Some(abandoned_bag) = GLOBAL.try_adopt_abandoned_records() { 204 | self.retired_bag.merge(abandoned_bag.inner); 205 | } 206 | 207 | self.scan_hazards(); 208 | } 209 | 210 | /// Reclaims all locally retired records that are unprotected and returns 211 | /// the number of reclaimed records. 212 | #[inline] 213 | fn scan_hazards(&mut self) { 214 | let len = self.retired_bag.inner.len(); 215 | if len <= self.config.min_required_records() as usize { 216 | return; 217 | } 218 | 219 | // (LOC:2) this `SeqCst` fence synchronizes-with the `SeqCst` stores (GUA:3), (GUA:4), 220 | // (GUA:5) and the `SeqCst` CAS (LIS:3P). 221 | // This enforces a total order between all these operations, which is required in order to 222 | // ensure that all stores PROTECTING pointers are fully visible BEFORE the hazard pointers 223 | // are scanned and unprotected retired records are reclaimed. 224 | GLOBAL.collect_protected_hazards(&mut self.scan_cache, SeqCst); 225 | 226 | self.scan_cache.sort_unstable(); 227 | unsafe { self.reclaim_unprotected_records() }; 228 | } 229 | 230 | // this is declared unsafe because in this function the retired records are actually dropped. 231 | #[allow(unused_unsafe)] 232 | #[inline] 233 | unsafe fn reclaim_unprotected_records(&mut self) { 234 | let scan_cache = &self.scan_cache; 235 | self.retired_bag.inner.retain(|retired| { 236 | // retain (i.e. DON'T drop) all records found within the scan cache of protected hazards 237 | scan_cache.binary_search_by(|&protected| retired.compare_with(protected)).is_ok() 238 | }); 239 | } 240 | } 241 | 242 | /********** impl Drop *****************************************************************************/ 243 | 244 | impl Drop for LocalInner { 245 | #[cold] 246 | fn drop(&mut self) { 247 | // (LOC:3) this `Release` fence synchronizes-with the `SeqCst` fence (LOC:2) but WITHOUT 248 | // enforcing a total order 249 | atomic::fence(Release); 250 | 251 | for hazard in &self.hazard_cache { 252 | hazard.set_free(sanitize::RELAXED_STORE); 253 | } 254 | 255 | self.scan_hazards(); 256 | // this is safe because the field is neither accessed afterwards nor dropped 257 | let bag = unsafe { ptr::read(&*self.retired_bag) }; 258 | 259 | if !bag.inner.is_empty() { 260 | GLOBAL.abandon_retired_bag(bag); 261 | } 262 | } 263 | } 264 | 265 | //////////////////////////////////////////////////////////////////////////////////////////////////// 266 | // RecycleError 267 | //////////////////////////////////////////////////////////////////////////////////////////////////// 268 | 269 | /// Error type for thread local recycle operations. 270 | #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] 271 | pub enum RecycleError { 272 | Access, 273 | Capacity, 274 | } 275 | 276 | /********** impl From *****************************************************************************/ 277 | 278 | impl From> for RecycleError { 279 | #[inline] 280 | fn from(_: CapacityError<&'static Hazard>) -> Self { 281 | RecycleError::Capacity 282 | } 283 | } 284 | 285 | /********** impl Display **************************************************************************/ 286 | 287 | impl fmt::Display for RecycleError { 288 | #[inline] 289 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 290 | use RecycleError::{Access, Capacity}; 291 | match *self { 292 | Access => write!(f, "failed to access already destroyed thread local storage"), 293 | Capacity => write!(f, "thread local cache for hazard pointer already full"), 294 | } 295 | } 296 | } 297 | 298 | /********** impl Error ****************************************************************************/ 299 | 300 | #[cfg(feature = "std")] 301 | impl error::Error for RecycleError {} 302 | 303 | #[cfg(test)] 304 | mod tests { 305 | use std::mem; 306 | use std::ptr::NonNull; 307 | use std::sync::atomic::{AtomicUsize, Ordering}; 308 | 309 | use crate::retired::Retired; 310 | use crate::Config; 311 | 312 | use super::{Local, LocalAccess, HAZARD_CACHE, SCAN_CACHE}; 313 | 314 | struct DropCount<'a>(&'a AtomicUsize); 315 | impl Drop for DropCount<'_> { 316 | fn drop(&mut self) { 317 | self.0.fetch_add(1, Ordering::Relaxed); 318 | } 319 | } 320 | 321 | #[test] 322 | fn acquire_local() { 323 | let local = Local::new(); 324 | let ptr = NonNull::from(&()); 325 | 326 | (0..HAZARD_CACHE) 327 | .map(|_| local.get_hazard(Some(ptr.cast()))) 328 | .collect::>() 329 | .iter() 330 | .try_for_each(|hazard| local.try_recycle_hazard(hazard)) 331 | .unwrap(); 332 | 333 | { 334 | // local hazard cache is full 335 | let inner = unsafe { &*local.0.get() }; 336 | assert_eq!(0, inner.ops_count); 337 | assert_eq!(HAZARD_CACHE, inner.hazard_cache.len()); 338 | assert_eq!(SCAN_CACHE, inner.scan_cache.capacity()); 339 | assert_eq!(0, inner.scan_cache.len()); 340 | } 341 | 342 | // takes all hazards out of local cache and then allocates a new one. 343 | let hazards: Box<[_]> = 344 | (0..HAZARD_CACHE).map(|_| local.get_hazard(Some(ptr.cast()))).collect(); 345 | let extra = local.get_hazard(Some(ptr.cast())); 346 | 347 | { 348 | // local hazard cache is empty 349 | let inner = unsafe { &*local.0.get() }; 350 | assert_eq!(0, inner.ops_count); 351 | assert_eq!(0, inner.hazard_cache.len()); 352 | assert_eq!(SCAN_CACHE, inner.scan_cache.capacity()); 353 | assert_eq!(0, inner.scan_cache.len()); 354 | } 355 | 356 | hazards.iter().try_for_each(|hazard| local.try_recycle_hazard(*hazard)).unwrap(); 357 | 358 | local.try_recycle_hazard(extra).unwrap_err(); 359 | } 360 | 361 | #[test] 362 | #[cfg_attr(feature = "count-release", ignore)] 363 | fn retire() { 364 | let threshold = Config::default().scan_threshold(); 365 | 366 | let count = AtomicUsize::new(0); 367 | let local = Local::new(); 368 | 369 | // allocate & retire (THRESHOLD - 1) records 370 | (0..threshold - 1) 371 | .map(|_| Box::new(DropCount(&count))) 372 | .map(|record| unsafe { Retired::new_unchecked(NonNull::from(Box::leak(record))) }) 373 | .for_each(|retired| local.retire_record(retired)); 374 | 375 | { 376 | let inner = unsafe { &*local.0.get() }; 377 | assert_eq!(threshold - 1, inner.ops_count); 378 | assert_eq!((threshold - 1) as usize, inner.retired_bag.inner.len()); 379 | } 380 | 381 | // nothing has been dropped so far 382 | assert_eq!(0, count.load(Ordering::Relaxed)); 383 | 384 | // retire another record, triggering a scan which deallocates all records 385 | local.retire_record(unsafe { 386 | Retired::new_unchecked(NonNull::from(Box::leak(Box::new(DropCount(&count))))) 387 | }); 388 | 389 | { 390 | let inner = unsafe { &*local.0.get() }; 391 | assert_eq!(0, inner.ops_count as usize); 392 | assert_eq!(0, inner.retired_bag.inner.len()); 393 | } 394 | 395 | assert_eq!(threshold as usize, count.load(Ordering::Relaxed)); 396 | } 397 | 398 | #[test] 399 | fn drop() { 400 | let below_threshold = Config::default().scan_threshold() / 2; 401 | 402 | let count = AtomicUsize::new(0); 403 | let local = Local::new(); 404 | 405 | (0..below_threshold) 406 | .map(|_| Box::new(DropCount(&count))) 407 | .map(|record| unsafe { Retired::new_unchecked(NonNull::from(Box::leak(record))) }) 408 | .for_each(|retired| local.retire_record(retired)); 409 | 410 | // all retired records are reclaimed when local is dropped 411 | mem::drop(local); 412 | assert_eq!(below_threshold as usize, count.load(Ordering::Relaxed)); 413 | } 414 | } 415 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // this is mainly useful for checking the assembly output 2 | 3 | use std::sync::atomic::Ordering::{Acquire, Release}; 4 | 5 | use hazptr::{Guard, Owned}; 6 | 7 | type Atomic = hazptr::Atomic; 8 | 9 | static GLOBAL: Atomic = Atomic::null(); 10 | 11 | fn main() { 12 | init(); 13 | let mut guard = Guard::new(); 14 | let _global = GLOBAL.load(Acquire, &mut guard).unwrap(); 15 | } 16 | 17 | #[inline(never)] 18 | fn init() { 19 | GLOBAL.store(Owned::new(1), Release); 20 | } 21 | -------------------------------------------------------------------------------- /src/retired.rs: -------------------------------------------------------------------------------- 1 | //! Caching for retired records until they can be safely dropped and 2 | //! deallocated. 3 | //! 4 | //! # Retired and Retired Bags 5 | //! 6 | //! Pointers to retired records are stored in `Retired` structs. These contain 7 | //! fat pointers, so they do maintain dynamic type information, of which only 8 | //! the concrete `Drop` implementation is actually required. 9 | //! They are stored in `RetiredBag` structs and removed (i.e. dropped and 10 | //! de-allocated) only when no thread has an active hazard pointer protecting the 11 | //! same memory address of the reclaimed record. 12 | //! 13 | //! # Abandoned Bags 14 | //! 15 | //! When a thread exits it attempts to reclaim all of its retired records. 16 | //! However, it is possible that some records may not be reclaimed if other 17 | //! threads still have active hazard pointers to these records. 18 | //! In this case, the exiting thread's retired bag with the remaining 19 | //! un-reclaimed records is abandoned, meaning it is stored in a special global 20 | //! queue. 21 | //! Other threads will occasionally attempt to adopt such abandoned records, at 22 | //! which point it becomes the adopting thread's responsibility to reclaim these 23 | //! records. 24 | 25 | #[cfg(not(feature = "std"))] 26 | use alloc::{boxed::Box, vec::Vec}; 27 | 28 | use core::cmp; 29 | use core::mem; 30 | use core::ptr::{self, NonNull}; 31 | use core::sync::atomic::{ 32 | AtomicPtr, 33 | Ordering::{Acquire, Relaxed, Release}, 34 | }; 35 | 36 | use crate::hazard::Protected; 37 | 38 | pub(crate) type Retired = reclaim::Retired; 39 | 40 | //////////////////////////////////////////////////////////////////////////////////////////////////// 41 | // RetiredBag 42 | //////////////////////////////////////////////////////////////////////////////////////////////////// 43 | 44 | /// List for caching reclaimed records before they can be finally 45 | /// dropped/deallocated. 46 | /// 47 | /// This type also functions as potential list node for the global list of 48 | /// abandoned bags. 49 | /// The internal cache uses a `Vec`, which will have to be reallocated if too 50 | /// many retired records are cached at any time. 51 | #[derive(Debug)] 52 | pub(crate) struct RetiredBag { 53 | pub inner: Vec, 54 | next: Option>, 55 | } 56 | 57 | /********** impl inherent *************************************************************************/ 58 | 59 | impl RetiredBag { 60 | /// Creates a new `RetiredBag` with default capacity for retired records. 61 | #[inline] 62 | pub fn new(init_cache: usize) -> Self { 63 | Self { inner: Vec::with_capacity(init_cache), next: None } 64 | } 65 | 66 | /// Merges `self` with the given other `Vec`, which is then dropped 67 | /// (de-allocated). 68 | /// 69 | /// If the `other` bag has substantially higher (free) capacity than `self`, 70 | /// both vectors are swapped before merging. 71 | /// By keeping the larger vector in this case and dropping the smaller one, 72 | /// instead, it could be possible to avoid/defer future re-allocations, when 73 | /// more records are retired. 74 | #[inline] 75 | pub fn merge(&mut self, mut other: Vec) { 76 | if (other.capacity() - other.len()) > self.inner.capacity() { 77 | mem::swap(&mut self.inner, &mut other); 78 | } 79 | 80 | self.inner.append(&mut other); 81 | } 82 | } 83 | 84 | //////////////////////////////////////////////////////////////////////////////////////////////////// 85 | // ReclaimOnDrop 86 | //////////////////////////////////////////////////////////////////////////////////////////////////// 87 | 88 | #[derive(Debug)] 89 | pub(crate) struct ReclaimOnDrop(Retired); 90 | 91 | /********** impl inherent *************************************************************************/ 92 | 93 | impl ReclaimOnDrop { 94 | /// Creates a new [`ReclaimOnDrop`] wrapper for `retired`. 95 | /// 96 | /// # Safety 97 | /// 98 | /// The caller must ensure that the returned wrapper is not dropped before 99 | /// no hazard pointer protects the retired value anymore. 100 | #[allow(unused_unsafe)] 101 | #[inline] 102 | pub unsafe fn new(retired: Retired) -> Self { 103 | Self(retired) 104 | } 105 | 106 | /// Compares the address of `protected` with the address of `self`. 107 | /// 108 | /// This is used for binary search, so the argument order may matter! 109 | #[inline] 110 | pub fn compare_with(&self, protected: Protected) -> cmp::Ordering { 111 | protected.address().cmp(&self.0.address()) 112 | } 113 | } 114 | 115 | /********** impl Drop *****************************************************************************/ 116 | 117 | impl Drop for ReclaimOnDrop { 118 | #[inline] 119 | fn drop(&mut self) { 120 | // this is safe because it is guaranteed that even in case of a panic, 121 | // retired records are only ever dropped during the course of 122 | // `LocalInner::scan_hazards`. 123 | unsafe { self.0.reclaim() }; 124 | } 125 | } 126 | 127 | //////////////////////////////////////////////////////////////////////////////////////////////////// 128 | // AbandonedBags 129 | //////////////////////////////////////////////////////////////////////////////////////////////////// 130 | 131 | /// Concurrent queue containing all retired bags abandoned by exited threads 132 | #[derive(Debug)] 133 | pub(crate) struct AbandonedBags { 134 | head: AtomicPtr, 135 | } 136 | 137 | /********** impl inherent *************************************************************************/ 138 | 139 | impl AbandonedBags { 140 | /// Creates a new (empty) queue. 141 | #[inline] 142 | pub const fn new() -> Self { 143 | Self { head: AtomicPtr::new(ptr::null_mut()) } 144 | } 145 | 146 | /// Adds a new abandoned retired bag to the front of the queue. 147 | #[inline] 148 | pub fn push(&self, abandoned: Box) { 149 | let leaked = Box::leak(abandoned); 150 | 151 | loop { 152 | let head = self.head.load(Relaxed); 153 | leaked.next = NonNull::new(head); 154 | 155 | // (RET:1) this `Release` CAS synchronizes-with the `Acquire` swap in (RET:2) 156 | if self.head.compare_exchange_weak(head, leaked, Release, Relaxed).is_ok() { 157 | return; 158 | } 159 | } 160 | } 161 | 162 | /// Takes the entire content of the queue and merges the retired records of 163 | /// all retired bags into one. 164 | #[inline] 165 | pub fn take_and_merge(&self) -> Option> { 166 | // probe first in order to avoid the swap if the stack is empty 167 | if self.head.load(Relaxed).is_null() { 168 | return None; 169 | } 170 | 171 | // (RET:2) this `Acquire` swap synchronizes-with the `Release` CAS in (RET:1) 172 | let queue = unsafe { self.head.swap(ptr::null_mut(), Acquire).as_mut() }; 173 | queue.map(|bag| { 174 | let mut boxed = unsafe { Box::from_raw(bag) }; 175 | 176 | let mut curr = boxed.next; 177 | while let Some(ptr) = curr { 178 | let RetiredBag { inner: bag, next } = unsafe { *Box::from_raw(ptr.as_ptr()) }; 179 | boxed.merge(bag); 180 | curr = next; 181 | } 182 | 183 | boxed 184 | }) 185 | } 186 | } 187 | 188 | #[cfg(test)] 189 | mod tests { 190 | use std::ptr::NonNull; 191 | use std::sync::atomic::{AtomicUsize, Ordering}; 192 | 193 | use super::{AbandonedBags, ReclaimOnDrop, Retired, RetiredBag}; 194 | 195 | struct DropCount<'a>(&'a AtomicUsize); 196 | impl Drop for DropCount<'_> { 197 | fn drop(&mut self) { 198 | self.0.fetch_add(1, Ordering::Relaxed); 199 | } 200 | } 201 | 202 | #[test] 203 | fn abandoned_bags() { 204 | let count = AtomicUsize::new(0); 205 | 206 | let mut bag1 = Box::new(RetiredBag::new(128)); 207 | 208 | let rec1 = NonNull::from(Box::leak(Box::new(1))); 209 | let rec2 = NonNull::from(Box::leak(Box::new(2.2))); 210 | let rec3 = NonNull::from(Box::leak(Box::new(String::from("String")))); 211 | 212 | bag1.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec1)) }); 213 | bag1.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec2)) }); 214 | bag1.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec3)) }); 215 | 216 | let mut bag2 = Box::new(RetiredBag::new(128)); 217 | 218 | let rec4 = NonNull::from(Box::leak(Box::new(vec![1, 2, 3, 4]))); 219 | let rec5 = NonNull::from(Box::leak(Box::new("slice"))); 220 | 221 | bag2.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec4)) }); 222 | bag2.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec5)) }); 223 | 224 | let mut bag3 = Box::new(RetiredBag::new(128)); 225 | 226 | let rec6 = NonNull::from(Box::leak(Box::new(DropCount(&count)))); 227 | let rec7 = NonNull::from(Box::leak(Box::new(DropCount(&count)))); 228 | 229 | bag3.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec6)) }); 230 | bag3.inner.push(unsafe { ReclaimOnDrop::new(Retired::new_unchecked(rec7)) }); 231 | 232 | let abandoned = AbandonedBags::new(); 233 | abandoned.push(bag1); 234 | abandoned.push(bag2); 235 | abandoned.push(bag3); 236 | 237 | let merged = abandoned.take_and_merge().unwrap(); 238 | assert_eq!(merged.inner.len(), 7); 239 | assert_eq!(128, merged.inner.capacity()); 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{ 2 | atomic::{AtomicUsize, Ordering}, 3 | Arc, Barrier, 4 | }; 5 | use std::thread; 6 | 7 | use hazptr::typenum::U0; 8 | use hazptr::{ConfigBuilder, Guard, Owned, CONFIG}; 9 | 10 | type Atomic = hazptr::Atomic; 11 | 12 | struct DropCount(Arc); 13 | impl Drop for DropCount { 14 | #[inline] 15 | fn drop(&mut self) { 16 | self.0.fetch_add(1, Ordering::Relaxed); 17 | } 18 | } 19 | 20 | #[test] 21 | fn abandon_on_panic() { 22 | CONFIG.init_once(|| ConfigBuilder::new().scan_threshold(1).build()); 23 | 24 | let drop_count = Arc::new(AtomicUsize::new(0)); 25 | 26 | let records = Arc::new([ 27 | Atomic::new(DropCount(Arc::clone(&drop_count))), 28 | Atomic::new(DropCount(Arc::clone(&drop_count))), 29 | Atomic::new(DropCount(Arc::clone(&drop_count))), 30 | ]); 31 | 32 | let barrier1 = Arc::new(Barrier::new(2)); 33 | let barrier2 = Arc::new(Barrier::new(2)); 34 | 35 | let t1 = { 36 | let records = Arc::clone(&records); 37 | let barrier1 = Arc::clone(&barrier1); 38 | let barrier2 = Arc::clone(&barrier2); 39 | thread::spawn(move || { 40 | let mut guard1 = Guard::new(); 41 | let mut guard2 = Guard::new(); 42 | 43 | let r1 = records[0].load(Ordering::Relaxed, &mut guard1); 44 | let r2 = records[1].load(Ordering::Relaxed, &mut guard2); 45 | 46 | barrier1.wait(); 47 | barrier2.wait(); 48 | 49 | assert!(r1.is_some() && r2.is_some(), "references must still be valid"); 50 | }) 51 | }; 52 | 53 | let t2 = { 54 | let records = Arc::clone(&records); 55 | let barrier = Arc::clone(&barrier1); 56 | thread::spawn(move || { 57 | barrier.wait(); 58 | unsafe { 59 | records[0].swap(Owned::none(), Ordering::Relaxed).unwrap().retire(); 60 | records[1].swap(Owned::none(), Ordering::Relaxed).unwrap().retire(); 61 | records[2].swap(Owned::none(), Ordering::Relaxed).unwrap().retire(); 62 | } 63 | 64 | panic!("explicit panic: thread 2 abandons all retired records it can't reclaim"); 65 | }) 66 | }; 67 | 68 | t2.join().unwrap_err(); 69 | 70 | // thread 1 still holds two protected references, so only one record must have been reclaimed 71 | // when the thread panicked 72 | assert_eq!(drop_count.load(Ordering::Relaxed), 1); 73 | 74 | barrier2.wait(); 75 | 76 | t1.join().unwrap(); 77 | 78 | // "count-release" and "maximum--reclamation-freq" ensures that thread 1 initiates two GC scans 79 | // when r1 and r2 go out of scope, the first of which adopts the retired records abandoned by 80 | // thread 2 and reclaims them 81 | assert_eq!(drop_count.load(Ordering::Relaxed), 3); 82 | } 83 | --------------------------------------------------------------------------------