├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── lib.rs └── src ├── lib.rs ├── link.rs ├── rcu_cell.rs └── rcu_weak.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - '**.md' 7 | pull_request: 8 | paths-ignore: 9 | - '**.md' 10 | workflow_dispatch: 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | jobs: 16 | lints: 17 | name: Run cargo fmt and cargo clippy 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout sources 21 | uses: actions/checkout@v4 22 | - name: Install toolchain 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | profile: minimal 26 | toolchain: stable 27 | override: true 28 | components: rustfmt, clippy 29 | - name: cargo fmt --check 30 | uses: actions-rs/cargo@v1 31 | with: 32 | command: fmt 33 | args: --all -- --check 34 | - name: Run cargo clippy 35 | uses: actions-rs/cargo@v1 36 | with: 37 | command: clippy 38 | args: -- -D warnings 39 | - name: Run cargo release tests 40 | uses: actions-rs/cargo@v1 41 | with: 42 | command: test 43 | args: --release 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | Cargo.lock 13 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rcu_cell" 3 | edition = "2021" 4 | version = "1.2.1" 5 | description = "a lockless rcu cell implementation" 6 | authors = ["Xudong Huang "] 7 | categories = ["concurrency", "data-structures", "no-std"] 8 | keywords = ["rcu", "lockless", "atomic"] 9 | repository = "https://github.com/Xudong-Huang/rcu_cell" 10 | homepage = "https://github.com/Xudong-Huang/rcu_cell" 11 | documentation = "https://docs.rs/rcu_cell" 12 | license = "LGPL-3.0" 13 | readme = "./README.md" 14 | exclude = [".gitignore", "benches/**"] 15 | 16 | [dependencies] 17 | crossbeam-utils = "0.8.20" 18 | 19 | [dev-dependencies] 20 | spin = "0.10" 21 | arc-swap = "1.7" 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | 167 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://github.com/Xudong-Huang/rcu_cell/workflows/CI/badge.svg)](https://github.com/Xudong-Huang/rcu_cell/actions?query=workflow%3ACI+branch%3Amaster) 2 | [![Current Crates.io Version](https://img.shields.io/crates/v/rcu_cell.svg)](https://crates.io/crates/rcu_cell) 3 | [![Document](https://img.shields.io/badge/doc-rcu_cell-green.svg)](https://docs.rs/rcu_cell) 4 | 5 | # RcuCell 6 | 7 | A lockless rcu cell implementation that can be used safely in multithread context. 8 | 9 | ## Features 10 | 11 | - Support multi-thread read and write operations. 12 | - The read operation would not block other read operations. 13 | - The read operation is always waitless. 14 | - The read operation is something like Arc::clone. 15 | - The write operation would not block other read operations. 16 | - The write operation is lockless. 17 | - The write operation is something like Atomic Swap. 18 | - The RcuCell could contain no data 19 | - Could be compiled with no_std 20 | 21 | 22 | ## Usage 23 | 24 | ```rust 25 | use rcu_cell::RcuCell; 26 | use std::sync::Arc; 27 | 28 | let t = Arc::new(RcuCell::new(10)); 29 | let t1 = t.clone(); 30 | let t2 = t.clone(); 31 | let d1 = t1.take().unwrap(); 32 | assert_eq!(*d1, 10); 33 | assert_eq!(t1.read(), None); 34 | let d2 = t2.write(42); 35 | assert!(d2.is_none()); 36 | let d3 = t2.read().unwrap(); 37 | assert_eq!(*d3, 42); 38 | ``` 39 | -------------------------------------------------------------------------------- /benches/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use rcu_cell::RcuCell; 6 | use test::Bencher; 7 | 8 | use std::sync::atomic::{AtomicUsize, Ordering}; 9 | use std::sync::Arc; 10 | 11 | #[bench] 12 | fn rcu_read(b: &mut Bencher) { 13 | let rcu_cell = Arc::new(RcuCell::new(10)); 14 | b.iter(|| { 15 | let v = rcu_cell.read().unwrap(); 16 | test::black_box(&*v); 17 | }); 18 | } 19 | 20 | #[bench] 21 | fn rcu_write(b: &mut Bencher) { 22 | let rcu_cell = Arc::new(RcuCell::new(0)); 23 | let mut i = 0; 24 | b.iter(|| { 25 | i += 1; 26 | let v = rcu_cell.write(i).unwrap(); 27 | assert_eq!(*v, i - 1); 28 | }); 29 | } 30 | 31 | #[bench] 32 | fn read_update_1(b: &mut Bencher) { 33 | static REF: AtomicUsize = AtomicUsize::new(0); 34 | 35 | struct Foo(usize); 36 | 37 | impl Drop for Foo { 38 | fn drop(&mut self) { 39 | REF.fetch_add(1, Ordering::Relaxed); 40 | } 41 | } 42 | 43 | b.iter(|| { 44 | REF.store(0, Ordering::Relaxed); 45 | let rcu_cell = Arc::new(RcuCell::new(Foo(42))); 46 | std::thread::scope(|s| { 47 | let rcu = rcu_cell.clone(); 48 | s.spawn(move || { 49 | for i in 0..1000 { 50 | rcu.update(|_| Some(Foo(i))); 51 | } 52 | }); 53 | let readers = 8; 54 | for _ in 0..readers { 55 | let rcu = rcu_cell.clone(); 56 | s.spawn(move || { 57 | for _i in 0..1000 { 58 | let v = rcu.read().unwrap(); 59 | test::black_box(&*v); 60 | } 61 | }); 62 | } 63 | }); 64 | assert_eq!(rcu_cell.read().unwrap().0, 999); 65 | drop(rcu_cell); 66 | assert_eq!(REF.load(Ordering::Relaxed), 1001); 67 | }); 68 | } 69 | 70 | #[bench] 71 | fn read_update_2(b: &mut Bencher) { 72 | static REF: AtomicUsize = AtomicUsize::new(0); 73 | 74 | struct Foo(usize); 75 | 76 | impl Drop for Foo { 77 | fn drop(&mut self) { 78 | REF.fetch_add(1, Ordering::Relaxed); 79 | } 80 | } 81 | 82 | b.iter(|| { 83 | REF.store(0, Ordering::Relaxed); 84 | let rcu_cell = Arc::new(RcuCell::new(Foo(42))); 85 | std::thread::scope(|s| { 86 | let rcu = rcu_cell.clone(); 87 | s.spawn(move || { 88 | for i in 0..1000 { 89 | rcu.update(|_| Some(Foo(i))); 90 | } 91 | }); 92 | 93 | let rcu = rcu_cell.clone(); 94 | s.spawn(move || { 95 | for i in 0..1000 { 96 | rcu.update(|_| Some(Foo(i))); 97 | } 98 | }); 99 | 100 | let readers = 8; 101 | for _ in 0..readers { 102 | let rcu = rcu_cell.clone(); 103 | s.spawn(move || { 104 | for _i in 0..1000 { 105 | let v = rcu.read().unwrap(); 106 | test::black_box(&*v); 107 | } 108 | }); 109 | } 110 | }); 111 | assert_eq!(rcu_cell.read().unwrap().0, 999); 112 | drop(rcu_cell); 113 | assert_eq!(REF.load(Ordering::Relaxed), 2001); 114 | }); 115 | } 116 | 117 | #[bench] 118 | fn read_write_1(b: &mut Bencher) { 119 | static REF: AtomicUsize = AtomicUsize::new(0); 120 | 121 | struct Foo(usize); 122 | 123 | impl Drop for Foo { 124 | fn drop(&mut self) { 125 | REF.fetch_add(1, Ordering::Relaxed); 126 | } 127 | } 128 | 129 | b.iter(|| { 130 | REF.store(0, Ordering::Relaxed); 131 | let rcu_cell = Arc::new(RcuCell::new(Foo(42))); 132 | std::thread::scope(|s| { 133 | let rcu = rcu_cell.clone(); 134 | s.spawn(move || { 135 | for i in 0..1000 { 136 | rcu.write(Foo(i)); 137 | } 138 | }); 139 | let readers = 8; 140 | for _ in 0..readers { 141 | let rcu = rcu_cell.clone(); 142 | s.spawn(move || { 143 | for _i in 0..1000 { 144 | let v = rcu.read().unwrap(); 145 | test::black_box(&*v); 146 | } 147 | }); 148 | } 149 | }); 150 | assert_eq!(rcu_cell.read().unwrap().0, 999); 151 | drop(rcu_cell); 152 | assert_eq!(REF.load(Ordering::Relaxed), 1001); 153 | }); 154 | } 155 | 156 | #[bench] 157 | fn read_write_2(b: &mut Bencher) { 158 | static REF: AtomicUsize = AtomicUsize::new(0); 159 | 160 | struct Foo(usize); 161 | 162 | impl Drop for Foo { 163 | fn drop(&mut self) { 164 | REF.fetch_add(1, Ordering::Relaxed); 165 | } 166 | } 167 | 168 | b.iter(|| { 169 | REF.store(0, Ordering::Relaxed); 170 | let rcu_cell = Arc::new(RcuCell::new(Foo(42))); 171 | std::thread::scope(|s| { 172 | let rcu = rcu_cell.clone(); 173 | s.spawn(move || { 174 | for i in 0..1000 { 175 | rcu.write(Foo(i)); 176 | } 177 | }); 178 | 179 | let rcu = rcu_cell.clone(); 180 | s.spawn(move || { 181 | for i in 0..1000 { 182 | rcu.write(Foo(i)); 183 | } 184 | }); 185 | 186 | let readers = 8; 187 | for _ in 0..readers { 188 | let rcu = rcu_cell.clone(); 189 | s.spawn(move || { 190 | for _i in 0..1000 { 191 | let v = rcu.read().unwrap(); 192 | test::black_box(&*v); 193 | } 194 | }); 195 | } 196 | }); 197 | assert_eq!(rcu_cell.read().unwrap().0, 999); 198 | drop(rcu_cell); 199 | assert_eq!(REF.load(Ordering::Relaxed), 2001); 200 | }); 201 | } 202 | 203 | #[bench] 204 | fn arc_swap(b: &mut Bencher) { 205 | use arc_swap::ArcSwap; 206 | static REF: AtomicUsize = AtomicUsize::new(0); 207 | 208 | struct Foo(usize); 209 | 210 | impl Drop for Foo { 211 | fn drop(&mut self) { 212 | REF.fetch_add(1, Ordering::Relaxed); 213 | } 214 | } 215 | 216 | b.iter(|| { 217 | REF.store(0, Ordering::Relaxed); 218 | let arc_swap = Arc::new(ArcSwap::new(Arc::new(Foo(42)))); 219 | std::thread::scope(|s| { 220 | let rcu = arc_swap.clone(); 221 | s.spawn(move || { 222 | for i in 0..1000 { 223 | rcu.store(Arc::new(Foo(i))); 224 | } 225 | }); 226 | let readers = 8; 227 | for _ in 0..readers { 228 | let rcu = arc_swap.clone(); 229 | s.spawn(move || { 230 | for _i in 0..1000 { 231 | let v = rcu.load(); 232 | test::black_box(&*v); 233 | } 234 | }); 235 | } 236 | }); 237 | assert_eq!(arc_swap.load().0, 999); 238 | drop(arc_swap); 239 | assert_eq!(REF.load(Ordering::Relaxed), 1001); 240 | }); 241 | } 242 | 243 | #[bench] 244 | fn rwlock_arc(b: &mut Bencher) { 245 | use spin::RwLock; 246 | static REF: AtomicUsize = AtomicUsize::new(0); 247 | 248 | struct Foo(usize); 249 | 250 | impl Drop for Foo { 251 | fn drop(&mut self) { 252 | REF.fetch_add(1, Ordering::Relaxed); 253 | } 254 | } 255 | 256 | b.iter(|| { 257 | REF.store(0, Ordering::Relaxed); 258 | let arc_swap = Arc::new(RwLock::new(Arc::new(Foo(42)))); 259 | std::thread::scope(|s| { 260 | let rcu = arc_swap.clone(); 261 | s.spawn(move || { 262 | for i in 0..1000 { 263 | *rcu.write() = Arc::new(Foo(i)); 264 | } 265 | }); 266 | let readers = 8; 267 | for _ in 0..readers { 268 | let rcu = arc_swap.clone(); 269 | s.spawn(move || { 270 | for _i in 0..1000 { 271 | let _v: Arc<_> = rcu.read().clone(); 272 | } 273 | }); 274 | } 275 | }); 276 | assert_eq!(arc_swap.read().0, 999); 277 | drop(arc_swap); 278 | assert_eq!(REF.load(Ordering::Relaxed), 1001); 279 | }); 280 | } 281 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![no_std] 3 | 4 | extern crate alloc; 5 | 6 | mod link; 7 | mod rcu_cell; 8 | mod rcu_weak; 9 | 10 | pub use rcu_cell::RcuCell; 11 | pub use rcu_weak::RcuWeak; 12 | 13 | // we only support 64-bit platform 14 | const _: () = assert!(usize::MAX.count_ones() == 64); 15 | const _: () = assert!(core::mem::size_of::<*const ()>() == 8); 16 | 17 | use alloc::sync::Arc; 18 | 19 | pub trait ArcPointer { 20 | fn as_ptr(&self) -> *const T; 21 | fn into_raw(self) -> *const T; 22 | /// # Safety 23 | /// you must ensure the pointer is valid 24 | unsafe fn from_raw(ptr: *const T) -> Self; 25 | } 26 | 27 | impl ArcPointer for Option> { 28 | fn as_ptr(&self) -> *const T { 29 | match self { 30 | Some(v) => Arc::as_ptr(v), 31 | None => core::ptr::null(), 32 | } 33 | } 34 | 35 | fn into_raw(self) -> *const T { 36 | match self { 37 | Some(v) => Arc::into_raw(v), 38 | None => core::ptr::null(), 39 | } 40 | } 41 | 42 | unsafe fn from_raw(ptr: *const T) -> Self { 43 | (!ptr.is_null()).then(|| Arc::from_raw(ptr)) 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod test { 49 | use super::RcuCell; 50 | use alloc::sync::Arc; 51 | use core::sync::atomic::{AtomicUsize, Ordering}; 52 | 53 | #[test] 54 | fn test_default() { 55 | let x = RcuCell::::default(); 56 | assert!(x.read().is_none()); 57 | } 58 | 59 | #[test] 60 | fn simple_drop() { 61 | static REF: AtomicUsize = AtomicUsize::new(0); 62 | struct Foo(usize); 63 | impl Foo { 64 | fn new(data: usize) -> Self { 65 | REF.fetch_add(data, Ordering::Relaxed); 66 | Foo(data) 67 | } 68 | } 69 | impl Drop for Foo { 70 | fn drop(&mut self) { 71 | REF.fetch_sub(self.0, Ordering::Relaxed); 72 | } 73 | } 74 | let a = RcuCell::new(Foo::new(10)); 75 | let b = a.read().unwrap(); 76 | assert_eq!(REF.load(Ordering::Relaxed), 10); 77 | drop(b); 78 | assert_eq!(REF.load(Ordering::Relaxed), 10); 79 | drop(a); 80 | assert_eq!(REF.load(Ordering::Relaxed), 0); 81 | } 82 | 83 | #[test] 84 | fn single_thread() { 85 | let t = RcuCell::new(Some(10)); 86 | let x = t.read(); 87 | let y = t.read(); 88 | t.take(); 89 | let z = t.read(); 90 | let a = z.clone(); 91 | drop(t); // t can be dropped before reader 92 | assert_eq!(x.map(|v| *v), Some(10)); 93 | assert_eq!(y.map(|v| *v), Some(10)); 94 | assert_eq!(z.map(|v| *v), None); 95 | assert_eq!(a.map(|v| *v), None); 96 | } 97 | 98 | #[test] 99 | fn single_thread_clone() { 100 | let t = Arc::new(RcuCell::new(Some(10))); 101 | let t1 = t.clone(); 102 | assert_eq!(t1.read().map(|v| *v), Some(10)); 103 | t1.write(5); 104 | assert_eq!(t.read().map(|v| *v), Some(5)); 105 | } 106 | 107 | #[test] 108 | fn test_rcu_update() { 109 | let t = RcuCell::new(Some(10)); 110 | let old = t.update(|v| v.map(|x| *x + 1)); 111 | assert_eq!(t.read().map(|v| *v), Some(11)); 112 | assert_eq!(old.map(|v| *v), Some(10)); 113 | let old = t.update(|v| match v { 114 | Some(x) if *x == 11 => None, 115 | _ => Some(42), 116 | }); 117 | assert!(t.read().is_none()); 118 | assert_eq!(old.map(|v| *v), Some(11)); 119 | } 120 | 121 | #[test] 122 | fn test_is_none() { 123 | let t = RcuCell::new(10); 124 | assert!(!t.is_none()); 125 | t.take(); 126 | assert!(t.is_none()); 127 | } 128 | 129 | #[test] 130 | fn test_clone_rcu_cell() { 131 | let t = Arc::new(RcuCell::new(Some(10))); 132 | let t1 = t.clone(); 133 | let t2 = t.clone(); 134 | let t3 = t.clone(); 135 | t1.write(11); 136 | drop(t1); 137 | assert_eq!(t.read().map(|v| *v), Some(11)); 138 | t2.write(12); 139 | drop(t2); 140 | assert_eq!(t.read().map(|v| *v), Some(12)); 141 | t3.write(13); 142 | drop(t3); 143 | assert_eq!(t.read().map(|v| *v), Some(13)); 144 | } 145 | 146 | #[test] 147 | fn test_rcu_reader() { 148 | let t = Arc::new(RcuCell::new(10)); 149 | let t1 = t.clone(); 150 | let t2 = t.clone(); 151 | let t3 = t.clone(); 152 | let d1 = t1.read().unwrap(); 153 | let d3 = t3.read().unwrap(); 154 | t1.write(11); 155 | let d2 = t2.read().unwrap(); 156 | assert_ne!(d1, d2); 157 | assert_eq!(d1, d3); 158 | assert_ne!(d2, d3); 159 | } 160 | 161 | #[test] 162 | fn test_rcu_take() { 163 | let t = Arc::new(RcuCell::new(10)); 164 | let t1 = t.clone(); 165 | let t2 = t.clone(); 166 | let d1 = t1.take().unwrap(); 167 | assert_eq!(*d1, 10); 168 | assert_eq!(t1.read(), None); 169 | let d2 = t2.write(42); 170 | assert!(d2.is_none()); 171 | let d3 = t2.read().unwrap(); 172 | assert_eq!(*d3, 42); 173 | } 174 | 175 | #[test] 176 | fn test_arc_eq() { 177 | let t = RcuCell::new(10); 178 | let v = t.read().unwrap(); 179 | assert!(t.arc_eq(&v)); 180 | t.write(11); 181 | assert!(!t.arc_eq(&v)); 182 | let t1 = RcuCell::from(v.clone()); 183 | assert!(t1.arc_eq(&v)); 184 | let v2 = t.write(v); 185 | let t2 = RcuCell::from(v2.clone()); 186 | assert!(RcuCell::ptr_eq(&t, &t1)); 187 | assert!(t2.arc_eq(v2.as_ref().unwrap())); 188 | } 189 | 190 | #[test] 191 | fn cas_test() { 192 | use super::ArcPointer; 193 | use Ordering::SeqCst; 194 | 195 | let a = RcuCell::new(1234); 196 | 197 | let curr = a.read().as_ptr(); 198 | let res1 = unsafe { a.compare_exchange(curr, None, SeqCst, SeqCst) }.unwrap(); 199 | assert_eq!(res1, curr); 200 | assert!(a.is_none()); 201 | let res2 = unsafe { a.compare_exchange(res1, Some(&Arc::new(5678)), SeqCst, SeqCst) }; 202 | assert!(res2.is_err()); 203 | 204 | let null = core::ptr::null(); 205 | let res2 = unsafe { a.compare_exchange(null, Some(&Arc::new(5678)), SeqCst, SeqCst) }; 206 | assert!(res2.is_ok()); 207 | assert_eq!(a.read().map(|v| *v), Some(5678)); 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/link.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | use core::sync::atomic::{AtomicUsize, Ordering}; 4 | 5 | const LEADING_BITS: usize = 8; 6 | const ALIGN_BITS: usize = 3; 7 | 8 | const LOWER_MASK: usize = (1 << ALIGN_BITS) - 1; 9 | const HIGHER_MASK: usize = !((1 << (usize::MAX.leading_ones() as usize - LEADING_BITS)) - 1); 10 | const REFCOUNT_MASK: usize = (1 << (LEADING_BITS + ALIGN_BITS)) - 1; 11 | const UPDTATE_MASK: usize = 1 << (LEADING_BITS + ALIGN_BITS - 1); 12 | const UPDATE_REF_MASK: usize = REFCOUNT_MASK & !UPDTATE_MASK; 13 | 14 | #[repr(C)] 15 | union Ptr { 16 | addr: usize, 17 | ptr: *const T, 18 | } 19 | 20 | impl Ptr { 21 | #[inline] 22 | const fn addr(self) -> usize { 23 | unsafe { self.addr } 24 | } 25 | 26 | #[inline] 27 | const fn ptr(self) -> *const T { 28 | unsafe { self.ptr } 29 | } 30 | } 31 | 32 | /// A wrapper of the pointer to the inner Arc data 33 | pub(crate) struct LinkWrapper { 34 | ptr: AtomicUsize, 35 | phantom: PhantomData<*const T>, 36 | } 37 | 38 | impl LinkWrapper { 39 | #[inline] 40 | pub(crate) const fn new(ptr: *const T) -> Self { 41 | let addr = Ptr { ptr }.addr(); 42 | debug_assert!(addr & LOWER_MASK == 0); 43 | debug_assert!(addr & HIGHER_MASK == 0); 44 | LinkWrapper { 45 | ptr: AtomicUsize::new(addr << LEADING_BITS), 46 | phantom: PhantomData, 47 | } 48 | } 49 | 50 | pub(crate) unsafe fn compare_exchange( 51 | &self, 52 | current: *const T, 53 | new: *const T, 54 | success: Ordering, 55 | failure: Ordering, 56 | ) -> Result<*const T, *const T> { 57 | let new_addr = Ptr { ptr: new }.addr(); 58 | let new = new_addr << LEADING_BITS; 59 | 60 | let old_addr = Ptr { ptr: current }.addr(); 61 | let old = old_addr << LEADING_BITS; 62 | 63 | let backoff = crossbeam_utils::Backoff::new(); 64 | loop { 65 | match self.ptr.compare_exchange(old, new, success, failure) { 66 | Ok(_addr) => { 67 | // assert_eq!(old, addr); 68 | return Ok(current); 69 | } 70 | Err(addr) => { 71 | let addr = (addr & !REFCOUNT_MASK) >> LEADING_BITS; 72 | if addr != old { 73 | return Err(Ptr { addr }.ptr()); 74 | } 75 | backoff.snooze(); 76 | } 77 | } 78 | } 79 | } 80 | 81 | pub(crate) fn update(&self, ptr: *const T) -> *const T { 82 | use Ordering::*; 83 | let addr = Ptr { ptr }.addr(); 84 | debug_assert!(addr & LOWER_MASK == 0); 85 | debug_assert!(addr & HIGHER_MASK == 0); 86 | let new = addr << LEADING_BITS; 87 | let mut old = self.ptr.load(Relaxed) & !REFCOUNT_MASK; 88 | 89 | let backoff = crossbeam_utils::Backoff::new(); 90 | // wait all reader release 91 | while let Err(addr) = self.ptr.compare_exchange_weak(old, new, Release, Relaxed) { 92 | old = addr & !REFCOUNT_MASK; 93 | backoff.snooze(); 94 | } 95 | 96 | core::sync::atomic::fence(Ordering::Acquire); 97 | let addr = old >> LEADING_BITS; 98 | Ptr { addr }.ptr() 99 | } 100 | 101 | // this is only used after lock_read 102 | pub(crate) fn unlock_update(&self, ptr: *const T) -> *const T { 103 | use Ordering::*; 104 | let addr = Ptr { ptr }.addr(); 105 | debug_assert!(addr & LOWER_MASK == 0); 106 | debug_assert!(addr & HIGHER_MASK == 0); 107 | let new = addr << LEADING_BITS; 108 | let mut old = self.ptr.load(Relaxed) & !UPDATE_REF_MASK | UPDTATE_MASK; 109 | 110 | let backoff = crossbeam_utils::Backoff::new(); 111 | // wait all reader release 112 | while let Err(addr) = self.ptr.compare_exchange_weak(old, new, Release, Relaxed) { 113 | old = addr & !UPDATE_REF_MASK | UPDTATE_MASK; 114 | backoff.snooze(); 115 | } 116 | 117 | core::sync::atomic::fence(Ordering::Acquire); 118 | let addr = (old & !UPDTATE_MASK) >> LEADING_BITS; 119 | Ptr { addr }.ptr() 120 | } 121 | 122 | #[inline] 123 | pub(crate) fn is_none(&self) -> bool { 124 | self.ptr.load(Ordering::Relaxed) & !REFCOUNT_MASK == 0 125 | } 126 | 127 | #[inline] 128 | pub(crate) fn inc_ref(&self) -> *const T { 129 | let addr = self.ptr.fetch_add(1, Ordering::Acquire); 130 | let refs = addr & REFCOUNT_MASK; 131 | assert!(refs < REFCOUNT_MASK, "Too many references"); 132 | let addr = (addr & !REFCOUNT_MASK) >> LEADING_BITS; 133 | Ptr { addr }.ptr() 134 | } 135 | 136 | #[inline] 137 | pub(crate) fn get_ref(&self) -> *const T { 138 | let addr = self.ptr.load(Ordering::Acquire); 139 | let addr = (addr & !REFCOUNT_MASK) >> LEADING_BITS; 140 | Ptr { addr }.ptr() 141 | } 142 | 143 | #[inline] 144 | pub(crate) fn dec_ref(&self) { 145 | self.ptr.fetch_sub(1, Ordering::Release); 146 | } 147 | 148 | // read the inner Arc and increase the ref count 149 | // to prevet other writer to update the inner Arc 150 | // should be paired used with unlock_update 151 | #[inline] 152 | pub(crate) fn lock_read(&self) -> *const T { 153 | use Ordering::*; 154 | 155 | let addr = self.ptr.load(Relaxed); 156 | let mut old = addr & !UPDTATE_MASK; // clear the update flag 157 | let mut new = addr | UPDTATE_MASK; // set the update flag 158 | 159 | let refs = old & UPDATE_REF_MASK; 160 | assert!(refs < UPDATE_REF_MASK, "Too many references"); 161 | 162 | let backoff = crossbeam_utils::Backoff::new(); 163 | while let Err(addr) = self.ptr.compare_exchange_weak(old, new, Release, Relaxed) { 164 | old = addr & !UPDTATE_MASK; 165 | new = addr | UPDTATE_MASK; 166 | backoff.snooze(); 167 | } 168 | 169 | core::sync::atomic::fence(Ordering::Acquire); 170 | 171 | let addr = (old & !REFCOUNT_MASK) >> LEADING_BITS; 172 | Ptr { addr }.ptr() 173 | } 174 | } 175 | 176 | impl fmt::Debug for LinkWrapper { 177 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 178 | let ptr = self.get_ref(); 179 | f.debug_struct("Link").field("ptr", &ptr).finish() 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/rcu_cell.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use core::mem::ManuallyDrop; 3 | use core::ptr; 4 | use core::sync::atomic::Ordering; 5 | 6 | use crate::link::LinkWrapper; 7 | use crate::ArcPointer; 8 | 9 | #[inline] 10 | fn ptr_to_arc(ptr: *const T) -> Option> { 11 | unsafe { ArcPointer::from_raw(ptr) } 12 | } 13 | 14 | /// RCU cell, it behaves like `RwLock>>` 15 | #[derive(Debug)] 16 | pub struct RcuCell { 17 | link: LinkWrapper, 18 | } 19 | 20 | unsafe impl Send for RcuCell {} 21 | unsafe impl Sync for RcuCell {} 22 | 23 | impl Drop for RcuCell { 24 | fn drop(&mut self) { 25 | let ptr = self.link.get_ref(); 26 | let _ = ptr_to_arc(ptr); 27 | } 28 | } 29 | 30 | impl Default for RcuCell { 31 | fn default() -> Self { 32 | RcuCell::none() 33 | } 34 | } 35 | 36 | impl From> for RcuCell { 37 | fn from(data: Arc) -> Self { 38 | let arc_ptr = Arc::into_raw(data); 39 | RcuCell { 40 | link: LinkWrapper::new(arc_ptr), 41 | } 42 | } 43 | } 44 | 45 | impl From>> for RcuCell { 46 | fn from(data: Option>) -> Self { 47 | let ptr = data.into_raw(); 48 | RcuCell { 49 | link: LinkWrapper::new(ptr), 50 | } 51 | } 52 | } 53 | 54 | impl RcuCell { 55 | /// create an empty rcu cell instance 56 | #[inline] 57 | pub const fn none() -> Self { 58 | RcuCell { 59 | link: LinkWrapper::new(ptr::null()), 60 | } 61 | } 62 | 63 | /// create rcu cell from a value 64 | #[inline] 65 | pub fn some(data: T) -> Self { 66 | let ptr = Arc::into_raw(Arc::new(data)); 67 | RcuCell { 68 | link: LinkWrapper::new(ptr), 69 | } 70 | } 71 | 72 | /// create rcu cell from value that can be converted to Option 73 | #[inline] 74 | pub fn new(data: impl Into>) -> Self { 75 | let data = data.into(); 76 | match data { 77 | Some(data) => Self::some(data), 78 | None => Self::none(), 79 | } 80 | } 81 | 82 | /// convert the rcu cell to an Arc value 83 | #[inline] 84 | pub fn into_arc(self) -> Option> { 85 | let ptr = self.link.get_ref(); 86 | let ret = ptr_to_arc(ptr); 87 | let _ = ManuallyDrop::new(self); 88 | ret 89 | } 90 | 91 | /// check if the rcu cell is empty 92 | #[inline] 93 | pub fn is_none(&self) -> bool { 94 | self.link.is_none() 95 | } 96 | 97 | /// write an option arc value to the rcu cell and return the old value 98 | #[inline] 99 | pub fn set(&self, data: Option>) -> Option> { 100 | let new_ptr = data.into_raw(); 101 | ptr_to_arc(self.link.update(new_ptr)) 102 | } 103 | 104 | /// take the value from the rcu cell, leave the rcu cell empty 105 | #[inline] 106 | pub fn take(&self) -> Option> { 107 | self.set(None) 108 | } 109 | 110 | /// write a value to the rcu cell and return the old value 111 | #[inline] 112 | pub fn write(&self, data: impl Into>) -> Option> { 113 | let data = data.into(); 114 | self.set(Some(data)) 115 | } 116 | 117 | /// Atomicly update the value with a closure and return the old value. 118 | /// The closure will be called with the old value and return the new value. 119 | /// The closure should not take too long time, internally it's use a spin 120 | /// lock to prevent other writer to update the value 121 | pub fn update(&self, f: F) -> Option> 122 | where 123 | F: FnOnce(Option>) -> Option, 124 | R: Into>, 125 | { 126 | // increase ref count to lock the inner Arc 127 | let ptr = self.link.lock_read(); 128 | let old_value = ptr_to_arc(ptr); 129 | let new_ptr = match f(old_value.clone()) { 130 | Some(data) => Arc::into_raw(data.into()), 131 | None => ptr::null_mut(), 132 | }; 133 | self.link.unlock_update(new_ptr); 134 | old_value 135 | } 136 | 137 | /// Stores the optional Arc ref `new` into the RcuCell if the current 138 | /// value is the same as `current`. The tag is also taken into account, so two pointers to the 139 | /// same object, but with different tags, will not be considered equal. 140 | /// 141 | /// The return value is a result indicating whether the new value was written and containing the previous value. 142 | /// On success this value is guaranteed to be equal to current. 143 | /// 144 | /// This method takes two `Ordering` arguments to describe the memory 145 | /// ordering of this operation. `success` describes the required ordering for the 146 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. 147 | /// `failure` describes the required ordering for the load operation that takes place when 148 | /// the comparison fails. Using `Acquire` as success ordering makes the store part 149 | /// of this operation `Relaxed`, and using `Release` makes the successful load 150 | /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` 151 | /// and must be equivalent to or weaker than the success ordering. 152 | /// 153 | /// # Safety 154 | /// 155 | /// don't deref the returned pointer, it's may be dropped by other threads 156 | /// 157 | /// # Examples 158 | /// 159 | /// ``` 160 | /// use rcu_cell::{RcuCell, ArcPointer}; 161 | /// 162 | /// use std::sync::Arc; 163 | /// use std::sync::atomic::Ordering::SeqCst; 164 | /// 165 | /// let a = RcuCell::new(1234); 166 | /// 167 | /// let curr = a.read(); 168 | /// let res1 = unsafe { a.compare_exchange(curr.as_ptr(), None, SeqCst, SeqCst) }.unwrap(); 169 | /// let res2 = unsafe { a.compare_exchange(res1, Some(&Arc::new(5678)), SeqCst, SeqCst) }; 170 | /// ``` 171 | pub unsafe fn compare_exchange<'a>( 172 | &self, 173 | current: *const T, 174 | new: Option<&'a Arc>, 175 | success: Ordering, 176 | failure: Ordering, 177 | ) -> Result<*const T, *const T> 178 | where 179 | T: 'a, 180 | { 181 | let new_ptr = match new { 182 | Some(data) => Arc::as_ptr(data), 183 | None => ptr::null(), 184 | }; 185 | 186 | self.link 187 | .compare_exchange(current, new_ptr, success, failure) 188 | .inspect(|ptr| { 189 | // drop the old arc in the rcu cell 190 | let _ = ptr_to_arc(ptr); 191 | // we have succeed to exchange the arc 192 | if let Some(v) = new { 193 | // clone and forget the arc that hold by rcu cell 194 | let _ = Arc::into_raw(Arc::clone(v)); 195 | } 196 | }) 197 | } 198 | 199 | /// read out the inner Arc value 200 | #[inline] 201 | pub fn read(&self) -> Option> { 202 | let ptr = self.link.inc_ref(); 203 | let v = ManuallyDrop::new(ptr_to_arc(ptr)); 204 | let cloned = v.as_ref().cloned(); 205 | self.link.dec_ref(); 206 | core::sync::atomic::fence(Ordering::Acquire); 207 | cloned 208 | } 209 | 210 | /// read inner ptr and check if it is the same as the given Arc 211 | #[inline] 212 | pub fn arc_eq(&self, data: &Arc) -> bool { 213 | core::ptr::eq(self.link.get_ref(), Arc::as_ptr(data)) 214 | } 215 | 216 | /// check if two RcuCell instances point to the same inner Arc 217 | #[inline] 218 | pub fn ptr_eq(this: &Self, other: &Self) -> bool { 219 | core::ptr::eq(this.link.get_ref(), other.link.get_ref()) 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/rcu_weak.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::{Arc, Weak}; 2 | use core::mem::ManuallyDrop; 3 | use core::ptr; 4 | use core::sync::atomic::Ordering; 5 | 6 | use crate::link::LinkWrapper; 7 | 8 | #[inline] 9 | fn ptr_to_weak(ptr: *const T) -> Weak { 10 | if ptr.is_null() { 11 | Weak::new() 12 | } else { 13 | unsafe { Weak::from_raw(ptr) } 14 | } 15 | } 16 | 17 | /// RCU weak cell, it behaves like `RwLock>` 18 | #[derive(Debug)] 19 | pub struct RcuWeak { 20 | link: LinkWrapper, 21 | } 22 | 23 | unsafe impl Send for RcuWeak {} 24 | unsafe impl Sync for RcuWeak {} 25 | 26 | impl Drop for RcuWeak { 27 | fn drop(&mut self) { 28 | let ptr = self.link.get_ref(); 29 | let _ = ptr_to_weak(ptr); 30 | } 31 | } 32 | 33 | impl Default for RcuWeak { 34 | fn default() -> Self { 35 | Self::new() 36 | } 37 | } 38 | 39 | impl From> for RcuWeak { 40 | fn from(data: Weak) -> Self { 41 | let weak_ptr = Weak::into_raw(data); 42 | RcuWeak { 43 | link: LinkWrapper::new(weak_ptr), 44 | } 45 | } 46 | } 47 | 48 | impl RcuWeak { 49 | /// create an dummy rcu weak cell instance, upgrade from it will return None 50 | #[inline] 51 | pub const fn new() -> Self { 52 | RcuWeak { 53 | link: LinkWrapper::new(ptr::null()), 54 | } 55 | } 56 | 57 | /// convert the rcu weak to a `Weak`` value 58 | #[inline] 59 | pub fn into_weak(self) -> Weak { 60 | let ptr = self.link.get_ref(); 61 | let ret = ptr_to_weak(ptr); 62 | let _ = ManuallyDrop::new(self); 63 | ret 64 | } 65 | 66 | /// take the value from the rcu weak, leave the rcu weak with default value 67 | #[inline] 68 | pub fn take(&self) -> Weak { 69 | ptr_to_weak(self.link.update(ptr::null())) 70 | } 71 | 72 | /// write a new weak value to the rcu weak cell and return the old value 73 | #[inline] 74 | pub fn write(&self, data: Weak) -> Weak { 75 | let new_ptr = if data.ptr_eq(&Weak::new()) { 76 | ptr::null() 77 | } else { 78 | Weak::into_raw(data) 79 | }; 80 | ptr_to_weak(self.link.update(new_ptr)) 81 | } 82 | 83 | /// write a new `Weak` value downgrade from the `Arc`` to the cell and return the old value 84 | #[inline] 85 | pub fn write_arc(&self, data: &Arc) -> Weak { 86 | let weak = Arc::downgrade(data); 87 | let new_ptr = Weak::into_raw(weak); 88 | ptr_to_weak(self.link.update(new_ptr)) 89 | } 90 | 91 | /// read out the inner weak value 92 | #[inline] 93 | pub fn read(&self) -> Weak { 94 | let ptr = self.link.inc_ref(); 95 | let v = ManuallyDrop::new(ptr_to_weak(ptr)); 96 | let cloned = (*v).clone(); 97 | self.link.dec_ref(); 98 | core::sync::atomic::fence(Ordering::Acquire); 99 | cloned 100 | } 101 | 102 | /// upgrade the innner weak value to an Arc value 103 | #[inline] 104 | pub fn upgrade(&self) -> Option> { 105 | let ptr = self.link.inc_ref(); 106 | let v = ManuallyDrop::new(ptr_to_weak(ptr)); 107 | let cloned = v.upgrade(); 108 | self.link.dec_ref(); 109 | core::sync::atomic::fence(Ordering::Acquire); 110 | cloned 111 | } 112 | 113 | /// read inner ptr and check if it is the same as the given Arc 114 | #[inline] 115 | pub fn arc_eq(&self, data: &Arc) -> bool { 116 | core::ptr::eq(self.link.get_ref(), Arc::as_ptr(data)) 117 | } 118 | 119 | /// read inner ptr and check if it is the same as the given Weak 120 | #[inline] 121 | pub fn weak_eq(&self, data: &Weak) -> bool { 122 | core::ptr::eq(self.link.get_ref(), Weak::as_ptr(data)) 123 | } 124 | 125 | /// check if two RcuWeak instances point to the same inner Weak 126 | #[inline] 127 | pub fn ptr_eq(this: &Self, other: &Self) -> bool { 128 | core::ptr::eq(this.link.get_ref(), other.link.get_ref()) 129 | } 130 | } 131 | --------------------------------------------------------------------------------