├── .github └── workflows │ └── test.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── benches ├── avltree.rs ├── btree.rs ├── queue.rs ├── stack.rs └── util │ ├── concurrent.rs │ ├── mod.rs │ └── sequential.rs ├── rust-toolchain ├── src ├── avltree │ ├── mod.rs │ ├── rwlock.rs │ └── seqlock.rs ├── btree │ └── mod.rs ├── lib.rs ├── linkedlist │ └── mod.rs ├── lock │ ├── fclock.rs │ ├── mod.rs │ ├── mutex.rs │ ├── seqlock.rs │ └── spinlock.rs ├── map.rs ├── queue │ ├── fclock.rs │ ├── lockfree.rs │ ├── mod.rs │ ├── mutex.rs │ └── spinlock.rs ├── stack │ ├── lock.rs │ ├── lockfree.rs │ └── mod.rs └── util │ ├── mod.rs │ └── random.rs └── tests ├── avltree ├── mod.rs ├── rwlock.rs └── seqlock.rs ├── btree └── mod.rs ├── linkedlist ├── linkedlist.rs └── mod.rs ├── lock ├── mod.rs └── spinlock.rs ├── queue ├── fclock.rs ├── lockfree.rs ├── mod.rs ├── mutex.rs └── spinlock.rs ├── stack ├── eb.rs ├── mod.rs ├── mutex.rs ├── spinlock.rs ├── stack.rs └── treiber.rs ├── tests.rs └── util ├── map.rs ├── mod.rs └── queue.rs /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'src/**/*' 7 | - 'tests/**/*' 8 | - 'benches/**/*' 9 | - 'Cargo.toml' 10 | - 'rust-toolchain' 11 | 12 | jobs: 13 | debug_build_and_test_AMD64: 14 | name: Build & Test(Debug) on AMD64 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | - uses: actions/cache@v3 19 | env: 20 | cache-name: debug-cache 21 | with: 22 | path: ~/target 23 | key: ${{ runner.os }}-build-${{ env.cache-name }} 24 | - uses: actions-rs/toolchain@v1 25 | - run: RUST_MIN_STACK=33554432 cargo test -- --nocapture 26 | release_build_and_test_AMD64: 27 | name: Build & Test(Release) on AMD64 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - uses: actions/cache@v3 32 | env: 33 | cache-name: release-cache 34 | with: 35 | path: ~/target 36 | key: ${{ runner.os }}-build-${{ env.cache-name }} 37 | - uses: actions-rs/toolchain@v1 38 | - run: RUST_MIN_STACK=33554432 cargo test --release -- --nocapture 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | .DS_Store 4 | 5 | # disable push flamegraph.svg 6 | *.svg 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cds" 3 | version = "0.1.0" 4 | authors = ["Taewoo An "] 5 | edition = "2018" 6 | 7 | [features] 8 | default = ["concurrent_stat"] 9 | concurrent_stat = [] 10 | 11 | [dependencies] 12 | crossbeam-epoch = "0.9.5" 13 | crossbeam-utils = "0.8.5" 14 | rand = "0.8.4" 15 | thread_local = "1.1.4" 16 | parking_lot = "0.12.1" 17 | 18 | [dev-dependencies] 19 | criterion = "0.3.4" 20 | num_cpus = "1.13.0" 21 | crossbeam-queue = "0.3.5" 22 | 23 | [[bench]] 24 | name = "stack" 25 | harness = false 26 | 27 | [[bench]] 28 | name = "queue" 29 | harness = false 30 | 31 | [[bench]] 32 | name = "avltree" 33 | harness = false 34 | 35 | [[bench]] 36 | name = "btree" 37 | harness = false 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Concurrent Data Structure for Rust 2 | 3 | ## Goal & Status 4 | Implement sequential, lock-based and lock-free concurrent data structures below: 5 | 6 | | | Stack | Queue | Linked List | AVL Tree | HashTable | 7 | |------------|-------|-------|-------------|----------|-----------| 8 | | Sequential | Done | Done | Done | Done | | 9 | | Lock-based | Done | Done | | Done | | 10 | | Lock-free | Done | Done | | | | 11 | 12 | ## Benchmark 13 | You can run bench like this: 14 | ```bash 15 | cargo install cargo-criterion 16 | # default feature has accumulating stats on available structure. 17 | cargo criterion --bench {bench_name} --no-default-features 18 | ``` 19 | 20 | Available Benches: 21 | - stack 22 | - queue 23 | - avltree 24 | - btree 25 | 26 | ## Profile 27 | 28 | ### Use CDS stats 29 | Several cds has its own statistics. Use it by printing on test. 30 | 31 | ### Flamegraph 32 | ```bash 33 | cargo install flamegraph 34 | sudo cargo flamegraph --no-default-features --test tests -- {test_name} 35 | ``` 36 | 37 | ## Detail 38 | ### Lock 39 | - common spin lock and sequece lock(SeqLock) 40 | - flat combining lock 41 | 42 | ### Stack 43 | - lock stack(based on std::sync::Mutex and spin lock) 44 | - Treiber's Stack 45 | - Elimination-Backoff Stack 46 | 47 | ### Queue 48 | - lock queue(based on std::sync::Mutex and spin lock) 49 | - two lock queue 50 | - FCQueue(use flat combining lock) 51 | - Michael-Scott queue 52 | 53 | ### Linked List 54 | - TODO: implement Harris linked list 55 | 56 | ### AVL Tree 57 | - SeqLockAVLTree, RwLockAVLTree(use crossbeam_utils::sync::ShardedLock) 58 | 59 | ### HashTable 60 | - TODO: ? 61 | 62 | ## Reference 63 | ### General 64 | - The Art of Multiprocessor Programming 65 | - https://github.com/kaist-cp/cs431 66 | - https://github.com/khizmax/libcds 67 | - https://www.cs.cmu.edu/~yihans/papers/tutorial.pdf 68 | 69 | ### Lock 70 | - flat combining lock: https://people.csail.mit.edu/shanir/publications/Flat%20Combining%20SPAA%2010.pdf 71 | 72 | ### Stack 73 | - Treiber's Stack: https://dominoweb.draco.res.ibm.com/58319a2ed2b1078985257003004617ef.html 74 | - Elimination-Backoff Stack: https://people.csail.mit.edu/shanir/publications/Lock_Free.pdf 75 | 76 | ### Queue 77 | - two lock queue, Michael-Scott Queue: https://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf 78 | 79 | ### Binary Search Tree 80 | - AVL Tree: https://stanford-ppl.github.io/website/papers/ppopp207-bronson.pdf 81 | - B+ Tree: http://www.vldb.org/pvldb/vol4/p795-sewall.pdf 82 | - Red-Black Tree: https://www.cs.umanitoba.ca/~hacamero/Research/RBTreesKim.pdf 83 | - BzTree(B Tree): http://www.vldb.org/pvldb/vol11/p553-arulraj.pdf 84 | -------------------------------------------------------------------------------- /benches/avltree.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | use std::time::Duration; 4 | 5 | use cds::avltree::SeqLockAVLTree; 6 | use criterion::{criterion_group, Criterion}; 7 | use criterion::{criterion_main, SamplingMode, Throughput}; 8 | 9 | use util::concurrent::*; 10 | 11 | const MAP_ALREADY_INSERTED: u64 = 500_000; 12 | 13 | const OPS_RATE: [(u64, u64, u64); 7] = [ 14 | (100, 0, 0), 15 | (0, 100, 0), 16 | (0, 0, 100), 17 | (5, 90, 5), 18 | (30, 50, 20), 19 | (40, 20, 40), 20 | (50, 0, 50), 21 | ]; 22 | 23 | fn bench_mixed_per_seqlockavltree(c: &mut Criterion) { 24 | for (insert, lookup, remove) in OPS_RATE { 25 | let mut group = c.benchmark_group(format!( 26 | "SeqLockAVLTree/{:+e} pre-inserted, Ops(I: {}%, L: {}%, R: {}%, per: scaled by iters)", 27 | MAP_ALREADY_INSERTED, insert, lookup, remove 28 | )); 29 | group.sample_size(20); 30 | group.measurement_time(Duration::from_secs(15)); 31 | group.sampling_mode(SamplingMode::Linear); 32 | 33 | for num in get_test_thread_nums() { 34 | group.throughput(Throughput::Elements((100 * num) as u64)); 35 | criterion_linear_bench_mixed_concurrent_map::>( 36 | MAP_ALREADY_INSERTED, 37 | insert, 38 | lookup, 39 | remove, 40 | num, 41 | &mut group, 42 | ); 43 | } 44 | group.finish(); 45 | } 46 | } 47 | 48 | criterion_group!(bench, bench_mixed_per_seqlockavltree,); 49 | criterion_main! { 50 | bench, 51 | } 52 | -------------------------------------------------------------------------------- /benches/btree.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | use std::time::Duration; 4 | 5 | use criterion::{criterion_group, Criterion}; 6 | use criterion::{criterion_main, SamplingMode, Throughput}; 7 | 8 | use cds::{avltree::AVLTree, btree::BTree}; 9 | 10 | use util::sequential::{bench_logs_btreemap, bench_logs_sequential_map, fuzz_sequential_logs}; 11 | 12 | const MAP_ALREADY_INSERTED: u64 = 500_000; 13 | const MAP_TOTAL_OPS: usize = 192_000; 14 | 15 | const OPS_RATE: [(usize, usize, usize); 7] = [ 16 | (100, 0, 0), 17 | (0, 100, 0), 18 | (0, 0, 100), 19 | (5, 90, 5), 20 | (30, 50, 20), 21 | (40, 20, 40), 22 | (50, 0, 50), 23 | ]; 24 | 25 | fn bench_vs_btreemap(c: &mut Criterion) { 26 | for (insert, lookup, remove) in OPS_RATE { 27 | let logs = fuzz_sequential_logs( 28 | 200, 29 | MAP_ALREADY_INSERTED, 30 | MAP_TOTAL_OPS * insert / 100, 31 | MAP_TOTAL_OPS * lookup / 100, 32 | MAP_TOTAL_OPS * remove / 100, 33 | ); 34 | 35 | let mut group = c.benchmark_group(format!( 36 | "Inserted {:+e}, Ops (I: {}%, L: {}%, R: {}%, total: {:+e})", 37 | MAP_ALREADY_INSERTED, insert, lookup, remove, MAP_TOTAL_OPS 38 | )); 39 | group.measurement_time(Duration::from_secs(15)); // Note: make almost same the measurement_time to iters * avg_op_time 40 | group.sampling_mode(SamplingMode::Flat); 41 | group.sample_size(20); 42 | group.throughput(Throughput::Elements(MAP_TOTAL_OPS as u64)); 43 | 44 | bench_logs_btreemap(logs.clone(), &mut group); 45 | bench_logs_sequential_map::>("BTree", logs.clone(), &mut group); 46 | bench_logs_sequential_map::>("AVLTree", logs, &mut group); 47 | } 48 | } 49 | 50 | criterion_group!(bench, bench_vs_btreemap); 51 | criterion_main! { 52 | bench, 53 | } 54 | -------------------------------------------------------------------------------- /benches/queue.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | use std::time::{Duration, Instant}; 4 | 5 | use cds::lock::{RawMutex, RawSpinLock}; 6 | use cds::queue::*; 7 | use criterion::{black_box, criterion_group, Criterion}; 8 | use criterion::{criterion_main, SamplingMode, Throughput}; 9 | use crossbeam_queue::SegQueue; 10 | use crossbeam_utils::thread; 11 | use rand::{thread_rng, Rng}; 12 | 13 | use util::concurrent::{bench_mixed_concurrent_queue, get_test_thread_nums}; 14 | use util::sequential::bench_mixed_sequential_queue; 15 | 16 | const QUEUE_PER_OPS: usize = 10_000; 17 | const QUEUE_PUSH_RATE: usize = 50; 18 | const QUEUE_POP_RATE: usize = 50; 19 | 20 | fn bench_crossbeam_seg_queue(c: &mut Criterion) { 21 | let mut group = c.benchmark_group(format!( 22 | "crossbeam_queue::SegQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 23 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 24 | )); 25 | group.sampling_mode(SamplingMode::Flat); 26 | 27 | for num in get_test_thread_nums() { 28 | group.measurement_time(Duration::from_secs(1 * num as u64)); 29 | group.throughput(Throughput::Elements((QUEUE_PER_OPS * num) as u64)); 30 | group.bench_function(&format!("{} threads", num,), |b| { 31 | b.iter_custom(|iters| { 32 | let queue = SegQueue::new(); 33 | 34 | let mut duration = Duration::ZERO; 35 | for _ in 0..iters { 36 | let batched_time = thread::scope(|s| { 37 | let mut threads = Vec::new(); 38 | 39 | for _ in 0..num { 40 | let t = s.spawn(|_| { 41 | let mut rng = thread_rng(); 42 | let mut duration = Duration::ZERO; 43 | 44 | for _ in 0..QUEUE_PER_OPS { 45 | let op_idx = rng.gen_range(0..QUEUE_PER_OPS); 46 | 47 | if op_idx < QUEUE_PUSH_RATE * QUEUE_PER_OPS / 100 { 48 | let value: u64 = rng.gen(); 49 | 50 | let start = Instant::now(); 51 | let _ = black_box(queue.push(value)); 52 | duration += start.elapsed(); 53 | } else { 54 | let start = Instant::now(); 55 | let _ = black_box(queue.pop()); 56 | duration += start.elapsed(); 57 | } 58 | } 59 | 60 | duration 61 | }); 62 | 63 | threads.push(t); 64 | } 65 | 66 | threads 67 | .into_iter() 68 | .map(|h| h.join().unwrap()) 69 | .collect::>() 70 | .iter() 71 | .sum::() 72 | }) 73 | .unwrap(); 74 | 75 | duration += batched_time 76 | } 77 | 78 | // avg thread time 79 | duration / (num as u32) 80 | }); 81 | }); 82 | } 83 | } 84 | 85 | fn bench_sequential>(name: String, c: &mut Criterion) { 86 | let mut group = c.benchmark_group(name); 87 | group.measurement_time(Duration::from_secs(1)); 88 | group.sampling_mode(SamplingMode::Flat); 89 | group.throughput(Throughput::Elements(QUEUE_PER_OPS as u64)); 90 | bench_mixed_sequential_queue::>( 91 | QUEUE_PER_OPS * QUEUE_PUSH_RATE / 100, 92 | QUEUE_PER_OPS * QUEUE_POP_RATE / 100, 93 | &mut group, 94 | ); 95 | } 96 | 97 | fn bench_concurrent>(name: String, c: &mut Criterion) { 98 | let mut group = c.benchmark_group(name); 99 | group.sampling_mode(SamplingMode::Flat); 100 | 101 | for num in get_test_thread_nums() { 102 | group.measurement_time(Duration::from_secs(1 * num as u64)); 103 | group.throughput(Throughput::Elements((QUEUE_PER_OPS * num) as u64)); 104 | bench_mixed_concurrent_queue::>>( 105 | QUEUE_PER_OPS * QUEUE_PUSH_RATE / 100, 106 | QUEUE_PER_OPS * QUEUE_POP_RATE / 100, 107 | num, 108 | &mut group, 109 | ); 110 | } 111 | } 112 | 113 | fn bench_mixed_queue(c: &mut Criterion) { 114 | bench_sequential::>( 115 | format!( 116 | "Queue/Ops(push: {}%, pop: {}%, per: {:+e})", 117 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 118 | ), 119 | c, 120 | ) 121 | } 122 | 123 | fn bench_mixed_fat_node_queue(c: &mut Criterion) { 124 | bench_sequential::>( 125 | format!( 126 | "FatQueueQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 127 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 128 | ), 129 | c, 130 | ) 131 | } 132 | 133 | fn bench_mixed_flat_combining_spinlock_queue(c: &mut Criterion) { 134 | bench_concurrent::>>( 135 | format!( 136 | "FCQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 137 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 138 | ), 139 | c, 140 | ); 141 | } 142 | 143 | fn bench_mixed_flat_combining_spinlock_fat_node_queue(c: &mut Criterion) { 144 | bench_concurrent::>>( 145 | format!( 146 | "FCQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 147 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 148 | ), 149 | c, 150 | ); 151 | } 152 | 153 | fn bench_mixed_flat_combining_mutex_queue(c: &mut Criterion) { 154 | bench_concurrent::>>( 155 | format!( 156 | "FCQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 157 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 158 | ), 159 | c, 160 | ); 161 | } 162 | 163 | fn bench_mixed_flat_combining_mutex_fat_node_queue(c: &mut Criterion) { 164 | bench_concurrent::>>( 165 | format!( 166 | "FCQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 167 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 168 | ), 169 | c, 170 | ); 171 | } 172 | 173 | fn bench_mixed_mutex_queue(c: &mut Criterion) { 174 | bench_concurrent::>( 175 | format!( 176 | "MutexQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 177 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 178 | ), 179 | c, 180 | ); 181 | } 182 | 183 | fn bench_mixed_two_mutex_queue(c: &mut Criterion) { 184 | bench_concurrent::>( 185 | format!( 186 | "TwoMutexQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 187 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 188 | ), 189 | c, 190 | ); 191 | } 192 | 193 | fn bench_mixed_spin_lock_queue(c: &mut Criterion) { 194 | bench_concurrent::>( 195 | format!( 196 | "SpinLockQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 197 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 198 | ), 199 | c, 200 | ); 201 | } 202 | 203 | fn bench_mixed_two_spin_lock_queue(c: &mut Criterion) { 204 | bench_concurrent::>( 205 | format!( 206 | "TwoSpinLockQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 207 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 208 | ), 209 | c, 210 | ); 211 | } 212 | 213 | fn bench_mixed_ms_queue(c: &mut Criterion) { 214 | bench_concurrent::>( 215 | format!( 216 | "MSQueue/Ops(push: {}%, pop: {}%, per: {:+e})", 217 | QUEUE_PUSH_RATE, QUEUE_POP_RATE, QUEUE_PER_OPS 218 | ), 219 | c, 220 | ); 221 | } 222 | 223 | criterion_group!( 224 | bench, 225 | bench_mixed_queue, 226 | bench_mixed_fat_node_queue, 227 | bench_crossbeam_seg_queue, 228 | bench_mixed_flat_combining_spinlock_queue, 229 | bench_mixed_flat_combining_spinlock_fat_node_queue, 230 | bench_mixed_flat_combining_mutex_queue, 231 | bench_mixed_flat_combining_mutex_fat_node_queue, 232 | bench_mixed_mutex_queue, 233 | bench_mixed_spin_lock_queue, 234 | bench_mixed_two_mutex_queue, 235 | bench_mixed_two_spin_lock_queue, 236 | bench_mixed_ms_queue 237 | ); 238 | 239 | criterion_main! { 240 | bench, 241 | } 242 | -------------------------------------------------------------------------------- /benches/stack.rs: -------------------------------------------------------------------------------- 1 | mod util; 2 | 3 | use std::time::{Duration, Instant}; 4 | 5 | use cds::stack::{EBStack, MutexStack, SpinLockStack, Stack, TreiberStack}; 6 | use criterion::{black_box, criterion_group, Criterion}; 7 | use criterion::{criterion_main, SamplingMode, Throughput}; 8 | use rand::{thread_rng, Rng}; 9 | 10 | use util::concurrent::{bench_mixed_concurrent_stack, get_test_thread_nums}; 11 | 12 | const STACK_PER_OPS: usize = 10_000; 13 | const STACK_PUSH_RATE: usize = 50; 14 | const STACK_POP_RATE: usize = 50; 15 | 16 | fn bench_mixed_stack(c: &mut Criterion) { 17 | let mut group = c.benchmark_group(format!( 18 | "Stack/Ops(push: {}%, pop: {}%, per: {:+e})", 19 | STACK_PUSH_RATE, STACK_POP_RATE, STACK_PER_OPS 20 | )); 21 | group.measurement_time(Duration::from_secs(1)); 22 | group.sampling_mode(SamplingMode::Flat); 23 | group.throughput(Throughput::Elements(STACK_PER_OPS as u64)); 24 | 25 | group.bench_function("sequential", |b| { 26 | b.iter_custom(|iters| { 27 | let mut stack = Stack::new(); 28 | 29 | let mut duration = Duration::ZERO; 30 | 31 | for _ in 0..iters { 32 | let mut rng = thread_rng(); 33 | 34 | let op_idx = rng.gen_range(0..STACK_PER_OPS); 35 | 36 | if op_idx < STACK_PER_OPS { 37 | let value: u64 = rng.gen(); 38 | 39 | let start = Instant::now(); 40 | let _ = black_box(stack.push(value)); 41 | duration += start.elapsed(); 42 | } else { 43 | let start = Instant::now(); 44 | let _ = black_box(stack.pop()); 45 | duration += start.elapsed(); 46 | } 47 | } 48 | 49 | duration 50 | }); 51 | }); 52 | } 53 | 54 | fn bench_mixed_mutex_stack(c: &mut Criterion) { 55 | let mut group = c.benchmark_group(format!( 56 | "MutexStack/Ops(push: {}%, pop: {}%, per: {:+e})", 57 | STACK_PUSH_RATE, STACK_POP_RATE, STACK_PER_OPS 58 | )); 59 | group.sampling_mode(SamplingMode::Flat); 60 | 61 | for num in get_test_thread_nums() { 62 | group.measurement_time(Duration::from_secs(1 * num as u64)); 63 | group.throughput(Throughput::Elements((STACK_PER_OPS * num) as u64)); 64 | bench_mixed_concurrent_stack::>( 65 | STACK_PER_OPS * STACK_PUSH_RATE / 100, 66 | STACK_PER_OPS * STACK_POP_RATE / 100, 67 | num, 68 | &mut group, 69 | ); 70 | } 71 | } 72 | 73 | fn bench_mixed_spinlock_stack(c: &mut Criterion) { 74 | let mut group = c.benchmark_group(format!( 75 | "SpinLockStack/Ops(push: {}%, pop: {}%, per: {:+e})", 76 | STACK_PUSH_RATE, STACK_POP_RATE, STACK_PER_OPS 77 | )); 78 | group.sampling_mode(SamplingMode::Flat); 79 | 80 | for num in get_test_thread_nums() { 81 | group.measurement_time(Duration::from_secs(1 * num as u64)); 82 | group.throughput(Throughput::Elements((STACK_PER_OPS * num) as u64)); 83 | bench_mixed_concurrent_stack::>( 84 | STACK_PER_OPS * STACK_PUSH_RATE / 100, 85 | STACK_PER_OPS * STACK_POP_RATE / 100, 86 | num, 87 | &mut group, 88 | ); 89 | } 90 | } 91 | 92 | fn bench_mixed_treiber_stack(c: &mut Criterion) { 93 | let mut group = c.benchmark_group(format!( 94 | "TreiberStack/Ops(push: {}%, pop: {}%, per: {:+e})", 95 | STACK_PUSH_RATE, STACK_POP_RATE, STACK_PER_OPS 96 | )); 97 | group.sampling_mode(SamplingMode::Flat); 98 | 99 | for num in get_test_thread_nums() { 100 | group.measurement_time(Duration::from_secs(1 * num as u64)); 101 | group.throughput(Throughput::Elements((STACK_PER_OPS * num) as u64)); 102 | bench_mixed_concurrent_stack::>( 103 | STACK_PER_OPS * STACK_PUSH_RATE / 100, 104 | STACK_PER_OPS * STACK_POP_RATE / 100, 105 | num, 106 | &mut group, 107 | ); 108 | } 109 | } 110 | 111 | fn bench_mixed_ebstack(c: &mut Criterion) { 112 | let mut group = c.benchmark_group(format!( 113 | "EBStack/Ops(push: {}%, pop: {}%, per: {:+e})", 114 | STACK_PUSH_RATE, STACK_POP_RATE, STACK_PER_OPS 115 | )); 116 | group.sampling_mode(SamplingMode::Flat); 117 | 118 | for num in get_test_thread_nums() { 119 | group.measurement_time(Duration::from_secs(1 * num as u64)); 120 | group.throughput(Throughput::Elements((STACK_PER_OPS * num) as u64)); 121 | bench_mixed_concurrent_stack::>( 122 | STACK_PER_OPS * STACK_PUSH_RATE / 100, 123 | STACK_PER_OPS * STACK_POP_RATE / 100, 124 | num, 125 | &mut group, 126 | ); 127 | } 128 | } 129 | 130 | criterion_group!( 131 | bench, 132 | bench_mixed_stack, 133 | bench_mixed_mutex_stack, 134 | bench_mixed_spinlock_stack, 135 | bench_mixed_treiber_stack, 136 | bench_mixed_ebstack, 137 | ); 138 | criterion_main! { 139 | bench, 140 | } 141 | -------------------------------------------------------------------------------- /benches/util/concurrent.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use cds::{map::ConcurrentMap, queue::ConcurrentQueue, stack::ConcurrentStack}; 4 | use criterion::{black_box, measurement::WallTime, BenchmarkGroup}; 5 | use crossbeam_utils::thread; 6 | use rand::{prelude::SliceRandom, thread_rng, Rng}; 7 | 8 | pub fn get_test_thread_nums() -> Vec { 9 | let mut nums = Vec::new(); 10 | let logical_cores = num_cpus::get(); 11 | 12 | let mut num = 1; 13 | 14 | while num <= 2 * logical_cores { 15 | nums.push(num); 16 | 17 | if num <= 16 { 18 | num *= 2; 19 | } else { 20 | num += 16; 21 | } 22 | } 23 | 24 | if *nums.last().unwrap() != logical_cores { 25 | nums.push(logical_cores); 26 | } 27 | 28 | nums 29 | } 30 | 31 | pub fn bench_mixed_concurrent_stack( 32 | push: usize, 33 | pop: usize, 34 | thread_num: usize, 35 | c: &mut BenchmarkGroup, 36 | ) where 37 | S: Sync + ConcurrentStack, 38 | { 39 | let per_ops = push + pop; 40 | 41 | c.bench_function(&format!("{} threads", thread_num,), |b| { 42 | b.iter_custom(|iters| { 43 | let stack = S::new(); 44 | 45 | let mut duration = Duration::ZERO; 46 | for _ in 0..iters { 47 | let batched_time = thread::scope(|s| { 48 | let mut threads = Vec::new(); 49 | 50 | for _ in 0..thread_num { 51 | let t = s.spawn(|_| { 52 | let mut rng = thread_rng(); 53 | let mut duration = Duration::ZERO; 54 | 55 | for _ in 0..per_ops { 56 | let op_idx = rng.gen_range(0..per_ops); 57 | 58 | if op_idx < push { 59 | let value: u64 = rng.gen(); 60 | 61 | let start = Instant::now(); 62 | let _ = black_box(stack.push(value)); 63 | duration += start.elapsed(); 64 | } else { 65 | let start = Instant::now(); 66 | let _ = black_box(stack.try_pop()); 67 | duration += start.elapsed(); 68 | } 69 | } 70 | 71 | duration 72 | }); 73 | 74 | threads.push(t); 75 | } 76 | 77 | threads 78 | .into_iter() 79 | .map(|h| h.join().unwrap()) 80 | .collect::>() 81 | .iter() 82 | .sum::() 83 | }) 84 | .unwrap(); 85 | 86 | duration += batched_time 87 | } 88 | 89 | // avg thread time 90 | duration / (thread_num as u32) 91 | }); 92 | }); 93 | } 94 | 95 | pub fn bench_mixed_concurrent_queue( 96 | push: usize, 97 | pop: usize, 98 | thread_num: usize, 99 | c: &mut BenchmarkGroup, 100 | ) where 101 | S: Sync + ConcurrentQueue, 102 | { 103 | let per_ops = push + pop; 104 | 105 | c.bench_function(&format!("{} threads", thread_num,), |b| { 106 | b.iter_custom(|iters| { 107 | let stack = S::new(); 108 | 109 | let mut duration = Duration::ZERO; 110 | for _ in 0..iters { 111 | let batched_time = thread::scope(|s| { 112 | let mut threads = Vec::new(); 113 | 114 | for _ in 0..thread_num { 115 | let t = s.spawn(|_| { 116 | let mut rng = thread_rng(); 117 | let mut duration = Duration::ZERO; 118 | 119 | for _ in 0..per_ops { 120 | let op_idx = rng.gen_range(0..per_ops); 121 | 122 | if op_idx < push { 123 | let value: u64 = rng.gen(); 124 | 125 | let start = Instant::now(); 126 | let _ = black_box(stack.push(value)); 127 | duration += start.elapsed(); 128 | } else { 129 | let start = Instant::now(); 130 | let _ = black_box(stack.try_pop()); 131 | duration += start.elapsed(); 132 | } 133 | } 134 | 135 | duration 136 | }); 137 | 138 | threads.push(t); 139 | } 140 | 141 | threads 142 | .into_iter() 143 | .map(|h| h.join().unwrap()) 144 | .collect::>() 145 | .iter() 146 | .sum::() 147 | }) 148 | .unwrap(); 149 | 150 | duration += batched_time 151 | } 152 | 153 | // avg thread time 154 | duration / (thread_num as u32) 155 | }); 156 | }); 157 | } 158 | 159 | pub fn criterion_flat_bench_mixed_concurrent_map( 160 | already_inserted: u64, 161 | insert: u64, 162 | lookup: u64, 163 | remove: u64, 164 | thread_num: usize, 165 | c: &mut BenchmarkGroup, 166 | ) where 167 | M: Sync + ConcurrentMap, 168 | { 169 | c.bench_function(&format!("{} threads", thread_num,), |b| { 170 | b.iter_custom(|iters| { 171 | let mut duraction = Duration::ZERO; 172 | 173 | for _ in 0..iters { 174 | duraction += bench_mixed_concurrent_map::( 175 | already_inserted, 176 | insert, 177 | lookup, 178 | remove, 179 | thread_num, 180 | ); 181 | } 182 | 183 | duraction 184 | }) 185 | }); 186 | } 187 | 188 | pub fn criterion_linear_bench_mixed_concurrent_map( 189 | already_inserted: u64, 190 | insert: u64, 191 | lookup: u64, 192 | remove: u64, 193 | thread_num: usize, 194 | c: &mut BenchmarkGroup, 195 | ) where 196 | M: Sync + ConcurrentMap, 197 | { 198 | c.bench_function(&format!("{} threads", thread_num,), |b| { 199 | b.iter_custom(|iters| { 200 | bench_mixed_concurrent_map::( 201 | already_inserted, 202 | insert * iters, 203 | lookup * iters, 204 | remove * iters, 205 | thread_num, 206 | ) 207 | }) 208 | }); 209 | } 210 | 211 | pub fn bench_mixed_concurrent_map( 212 | already_inserted: u64, 213 | insert: u64, 214 | lookup: u64, 215 | remove: u64, 216 | thread_num: usize, 217 | ) -> Duration 218 | where 219 | M: Sync + ConcurrentMap, 220 | { 221 | let per_ops = insert + lookup + remove; 222 | 223 | let map = M::new(); 224 | let mut rng = thread_rng(); 225 | 226 | let mut range: Vec = (0..already_inserted).collect(); 227 | range.shuffle(&mut rng); 228 | 229 | // pre-insert 230 | for i in range { 231 | let _ = map.insert(&i, i); 232 | } 233 | 234 | let duration = thread::scope(|s| { 235 | let mut threads = Vec::new(); 236 | 237 | for _ in 0..thread_num { 238 | let t = s.spawn(|_| { 239 | let mut rng = thread_rng(); 240 | let mut duration = Duration::ZERO; 241 | 242 | for _ in 0..per_ops { 243 | let op_idx = rng.gen_range(0..per_ops); 244 | 245 | if op_idx < insert { 246 | let key: u64 = rng.gen_range(already_inserted..u64::MAX); 247 | 248 | let start = Instant::now(); 249 | let _ = black_box(map.insert(&key, key)); 250 | duration += start.elapsed(); 251 | } else if op_idx < insert + lookup { 252 | let key: u64 = rng.gen_range(0..already_inserted); 253 | 254 | let start = Instant::now(); 255 | let _ = black_box(map.get(&key)); 256 | duration += start.elapsed(); 257 | } else { 258 | let key: u64 = rng.gen_range(0..already_inserted); 259 | 260 | let start = Instant::now(); 261 | let _ = black_box(map.remove(&key)); 262 | duration += start.elapsed(); 263 | } 264 | } 265 | 266 | duration 267 | }); 268 | 269 | threads.push(t); 270 | } 271 | 272 | threads 273 | .into_iter() 274 | .map(|h| h.join().unwrap()) 275 | .collect::>() 276 | .iter() 277 | .sum::() 278 | }) 279 | .unwrap(); 280 | 281 | // avg thread time 282 | duration / (thread_num as u32) 283 | } 284 | -------------------------------------------------------------------------------- /benches/util/mod.rs: -------------------------------------------------------------------------------- 1 | // since the linter show as dead_code if it is not used at least one bench. 2 | #[allow(dead_code)] 3 | pub mod sequential; 4 | #[allow(dead_code)] 5 | pub mod concurrent; 6 | -------------------------------------------------------------------------------- /benches/util/sequential.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::BTreeMap, 3 | time::{Duration, Instant}, 4 | }; 5 | 6 | use cds::{map::SequentialMap, queue::SequentialQueue}; 7 | use criterion::{black_box, measurement::WallTime, BenchmarkGroup}; 8 | use rand::{prelude::SliceRandom, thread_rng, Rng}; 9 | 10 | #[derive(Clone, Copy)] 11 | pub enum Op { 12 | Insert(u64), 13 | Lookup(u64), 14 | Remove(u64), 15 | } 16 | 17 | pub fn fuzz_sequential_logs( 18 | iters: u64, 19 | already_inserted: u64, 20 | insert: usize, 21 | lookup: usize, 22 | remove: usize, 23 | ) -> Vec<(Vec, Vec)> { 24 | let mut rng = thread_rng(); 25 | let mut result = Vec::new(); 26 | 27 | for _ in 0..iters { 28 | let mut logs = Vec::new(); 29 | 30 | let mut pre_inserted: Vec = (0..already_inserted).collect(); 31 | pre_inserted.shuffle(&mut rng); 32 | 33 | for _ in 0..insert { 34 | logs.push(Op::Insert(rng.gen_range(already_inserted..u64::MAX))); 35 | } 36 | 37 | for _ in 0..lookup { 38 | logs.push(Op::Lookup(rng.gen_range(0..already_inserted))); 39 | } 40 | 41 | for _ in 0..remove { 42 | logs.push(Op::Remove(rng.gen_range(0..already_inserted))); 43 | } 44 | 45 | logs.shuffle(&mut rng); 46 | result.push((pre_inserted, logs)); 47 | } 48 | 49 | result 50 | } 51 | 52 | pub fn bench_mixed_sequential_queue(push: usize, pop: usize, c: &mut BenchmarkGroup) 53 | where 54 | S: SequentialQueue, 55 | { 56 | let per_ops = push + pop; 57 | 58 | c.bench_function("sequential", |b| { 59 | b.iter_custom(|iters| { 60 | let mut queue = S::new(); 61 | 62 | let mut duration = Duration::ZERO; 63 | 64 | for _ in 0..iters { 65 | let mut rng = thread_rng(); 66 | 67 | let op_idx = rng.gen_range(0..per_ops); 68 | 69 | if op_idx < per_ops { 70 | let value: u64 = rng.gen(); 71 | 72 | let start = Instant::now(); 73 | let _ = black_box(queue.push(value)); 74 | duration += start.elapsed(); 75 | } else { 76 | let start = Instant::now(); 77 | let _ = black_box(queue.pop()); 78 | duration += start.elapsed(); 79 | } 80 | } 81 | 82 | duration 83 | }); 84 | }); 85 | } 86 | 87 | pub fn bench_logs_btreemap(mut logs: Vec<(Vec, Vec)>, c: &mut BenchmarkGroup) { 88 | c.bench_function("std::BTreeMap", |b| { 89 | b.iter_custom(|iters| { 90 | let mut duration = Duration::ZERO; 91 | 92 | for _ in 0..iters { 93 | let (pre_inserted, logs) = logs.pop().unwrap(); 94 | let mut map = BTreeMap::new(); 95 | 96 | // pre-insert 97 | for key in pre_inserted { 98 | let _ = map.insert(key, key); 99 | } 100 | 101 | let start = Instant::now(); 102 | for op in logs { 103 | match op { 104 | Op::Insert(key) => { 105 | let _ = black_box(map.insert(key, key)); 106 | } 107 | Op::Lookup(key) => { 108 | let _ = black_box(map.get(&key)); 109 | } 110 | Op::Remove(key) => { 111 | let _ = black_box(map.remove(&key)); 112 | } 113 | } 114 | } 115 | duration += start.elapsed(); 116 | } 117 | 118 | duration 119 | }); 120 | }); 121 | } 122 | 123 | pub fn bench_logs_sequential_map( 124 | name: &str, 125 | mut logs: Vec<(Vec, Vec)>, 126 | c: &mut BenchmarkGroup, 127 | ) where 128 | M: SequentialMap, 129 | { 130 | c.bench_function(name, |b| { 131 | b.iter_custom(|iters| { 132 | let mut duration = Duration::ZERO; 133 | 134 | for _ in 0..iters { 135 | let (pre_inserted, logs) = logs.pop().unwrap(); 136 | let mut map = M::new(); 137 | 138 | // pre-insert 139 | for key in pre_inserted { 140 | let _ = map.insert(&key, key); 141 | } 142 | 143 | let start = Instant::now(); 144 | for op in logs { 145 | match op { 146 | Op::Insert(key) => { 147 | let _ = black_box(map.insert(&key, key)); 148 | } 149 | Op::Lookup(key) => { 150 | let _ = black_box(map.lookup(&key)); 151 | } 152 | Op::Remove(key) => { 153 | let _ = black_box(map.remove(&key)); 154 | } 155 | } 156 | } 157 | duration += start.elapsed(); 158 | } 159 | 160 | duration 161 | }); 162 | }); 163 | } 164 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.64.0 2 | -------------------------------------------------------------------------------- /src/avltree/mod.rs: -------------------------------------------------------------------------------- 1 | mod rwlock; 2 | mod seqlock; 3 | 4 | pub use rwlock::RwLockAVLTree; 5 | pub use seqlock::SeqLockAVLTree; 6 | 7 | use crate::map::SequentialMap; 8 | use std::{ 9 | cmp::max, 10 | fmt::Debug, 11 | mem, 12 | ops::DerefMut, 13 | ptr::{drop_in_place, NonNull}, 14 | usize, 15 | }; 16 | 17 | pub struct AVLTree { 18 | root: NonNull>, // root node is dummy for simplicity 19 | } 20 | 21 | impl Debug for AVLTree { 22 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 23 | unsafe { 24 | f.debug_struct("AVLTree") 25 | .field("root", self.root.as_ref()) 26 | .finish() 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Copy, PartialEq)] 32 | enum Dir { 33 | Left, 34 | Eq, 35 | Right, 36 | } 37 | 38 | #[derive(Debug)] 39 | struct Node { 40 | key: K, 41 | value: V, 42 | height: isize, 43 | left: Option>>, 44 | right: Option>>, 45 | } 46 | 47 | impl Default for Node { 48 | fn default() -> Self { 49 | Self::new(K::default(), V::default()) 50 | } 51 | } 52 | 53 | impl Node { 54 | fn new(key: K, value: V) -> Node { 55 | Node { 56 | key, 57 | value, 58 | height: 1, 59 | left: None, 60 | right: None, 61 | } 62 | } 63 | 64 | /// get the mutable reference of the child of the node by dir 65 | fn child_mut(&mut self, dir: Dir) -> &mut Option>> { 66 | match dir { 67 | Dir::Left => &mut self.left, 68 | Dir::Right => &mut self.right, 69 | Dir::Eq => panic!("There is no 'Eq' child"), 70 | } 71 | } 72 | 73 | /// renew the height of the node from the childs 74 | fn renew_height(&mut self) { 75 | let left_height = if let Some(node) = &self.left { 76 | node.height 77 | } else { 78 | 0 79 | }; 80 | 81 | let right_height = if let Some(node) = &self.right { 82 | node.height 83 | } else { 84 | 0 85 | }; 86 | 87 | self.height = max(left_height, right_height) + 1; 88 | } 89 | 90 | /// get difference of the heights from the childs 91 | fn get_factor(&self) -> isize { 92 | let left_height = if let Some(node) = &self.left { 93 | node.height 94 | } else { 95 | 0 96 | }; 97 | 98 | let right_height = if let Some(node) = &self.right { 99 | node.height 100 | } else { 101 | 0 102 | }; 103 | 104 | left_height - right_height 105 | } 106 | 107 | /// rotate left the node 108 | /// 109 | /// Change Parent-Right Child to Left Child-Parent, then return new parent(old right child). 110 | fn rotate_left(mut node: Box>) -> Box> { 111 | let mut new_parent = node.right.take().unwrap(); 112 | let _ = mem::replace(&mut node.right, new_parent.left); 113 | new_parent.left = Some(node); 114 | 115 | new_parent 116 | } 117 | 118 | /// rotate right the node 119 | /// 120 | /// Change Left Child-Parent to Parent-Right Child, then return new parent(old left child). 121 | fn rotate_right(mut node: Box>) -> Box> { 122 | let mut new_parent = node.left.take().unwrap(); 123 | let _ = mem::replace(&mut node.left, new_parent.right); 124 | new_parent.right = Some(node); 125 | 126 | new_parent 127 | } 128 | } 129 | 130 | /// manage the current state of the node 131 | /// 132 | /// ancestors: the parents of the node 133 | /// current: the node which it sees now. 134 | /// dir: the direction that it moves on next. If Eq, the cursor cannot move since it arrived the destination node. 135 | struct Cursor { 136 | ancestors: Vec<(NonNull>, Dir)>, 137 | current: NonNull>, 138 | dir: Dir, 139 | } 140 | 141 | impl<'c, K, V> Cursor 142 | where 143 | K: Default + Ord + Clone, 144 | V: Default, 145 | { 146 | fn new(tree: &AVLTree) -> Cursor { 147 | let cursor = Cursor { 148 | ancestors: Vec::with_capacity(tree.get_height() + 1), 149 | current: tree.root, 150 | dir: Dir::Right, 151 | }; 152 | 153 | cursor 154 | } 155 | 156 | /// get the immutable reference of the next node by the direction 157 | fn next_node(&self) -> Option<&Box>> { 158 | unsafe { 159 | match self.dir { 160 | Dir::Left => self.current.as_ref().left.as_ref(), 161 | Dir::Right => self.current.as_ref().right.as_ref(), 162 | Dir::Eq => panic!("The node is already arrived."), 163 | } 164 | } 165 | } 166 | 167 | /// get the mutable reference of the next node by the direction 168 | fn next_node_mut(&mut self) -> &mut Option>> { 169 | unsafe { 170 | match self.dir { 171 | Dir::Left => &mut self.current.as_mut().left, 172 | Dir::Right => &mut self.current.as_mut().right, 173 | Dir::Eq => panic!("The node is already arrived."), 174 | } 175 | } 176 | } 177 | 178 | /// move the cursor to the direction 179 | /// 180 | /// The cursor's dir is never changed by any functions. You should change it manually like `cursor.dir = Dir::Left`. 181 | fn move_next(&mut self) { 182 | unsafe { 183 | let next = match self.dir { 184 | Dir::Left => self.current.as_mut().left.as_mut().unwrap(), 185 | Dir::Right => self.current.as_mut().right.as_mut().unwrap(), 186 | Dir::Eq => panic!("The node is already arrived."), 187 | }; 188 | 189 | let parent = mem::replace(&mut self.current, NonNull::new(next.deref_mut()).unwrap()); 190 | self.ancestors.push((parent, self.dir)); 191 | } 192 | } 193 | 194 | /// move the node that has the greatest key on the left subtree 195 | /// 196 | /// This function is for removing the node that has two nodes. 197 | fn move_greatest_on_left_subtree(&mut self) { 198 | if self.dir != Dir::Eq { 199 | panic!("The node is not arrived at Eq.") 200 | } 201 | 202 | self.dir = Dir::Left; 203 | if self.next_node().is_none() { 204 | self.dir = Dir::Eq; 205 | return; 206 | } 207 | self.move_next(); 208 | 209 | self.dir = Dir::Right; 210 | while self.next_node().is_some() { 211 | self.move_next(); 212 | } 213 | 214 | self.dir = Dir::Eq; 215 | } 216 | 217 | /// rebalance the nodes by the rule of AVL using the cursor's ancestors 218 | fn rebalance(&mut self) { 219 | let parent_rotate_left = |mut node: Box>| -> Box> { 220 | let child_factor = node.right.as_ref().unwrap().get_factor(); 221 | 222 | if child_factor > 0 { 223 | let right_child = node.right.take().unwrap(); 224 | let mut right_child = Node::rotate_right(right_child); 225 | right_child.right.as_mut().unwrap().renew_height(); 226 | node.right = Some(right_child); 227 | } 228 | 229 | Node::rotate_left(node) 230 | }; 231 | 232 | let parent_rotate_right = |mut node: Box>| -> Box> { 233 | let child_factor = node.left.as_ref().unwrap().get_factor(); 234 | 235 | if child_factor < 0 { 236 | let left_child = node.left.take().unwrap(); 237 | let mut left_child = Node::rotate_left(left_child); 238 | left_child.left.as_mut().unwrap().renew_height(); 239 | node.left = Some(left_child); 240 | } 241 | 242 | Node::rotate_right(node) 243 | }; 244 | 245 | while let Some((mut node, dir)) = self.ancestors.pop() { 246 | // the root node for target node 247 | let root = unsafe { node.as_mut() }; 248 | 249 | let target = match dir { 250 | Dir::Left => &mut root.left, 251 | Dir::Right => &mut root.right, 252 | _ => unreachable!(), 253 | }; 254 | 255 | let factor = target.as_ref().unwrap().get_factor(); 256 | 257 | match factor { 258 | -2 => { 259 | let mut new_target = parent_rotate_left(target.take().unwrap()); 260 | new_target.left.as_mut().unwrap().renew_height(); 261 | new_target.renew_height(); 262 | *target = Some(new_target); 263 | } 264 | -1..=1 => target.as_mut().unwrap().renew_height(), 265 | 2 => { 266 | let mut new_target = parent_rotate_right(target.take().unwrap()); 267 | new_target.right.as_mut().unwrap().renew_height(); 268 | new_target.renew_height(); 269 | *target = Some(new_target); 270 | } 271 | _ => unreachable!(), 272 | } 273 | } 274 | } 275 | } 276 | 277 | impl AVLTree 278 | where 279 | K: Default + Ord + Clone, 280 | V: Default, 281 | { 282 | /// find the last state of the cursor by the key 283 | /// 284 | /// If there exists the key on the tree, the cursor's current is the node and the dir is Eq. 285 | /// If there does not exist the key on the tree, the cursor's current is leaf node and the dir is 286 | /// Left if the key is greater than the key of the node, or Right if the key is less than. 287 | fn find(&self, key: &K) -> Cursor { 288 | let mut cursor = Cursor::new(self); 289 | 290 | loop { 291 | if cursor.next_node().is_none() { 292 | return cursor; 293 | } 294 | 295 | cursor.move_next(); 296 | 297 | unsafe { 298 | if *key == cursor.current.as_ref().key { 299 | cursor.dir = Dir::Eq; 300 | return cursor; 301 | } else if *key < cursor.current.as_ref().key { 302 | cursor.dir = Dir::Left; 303 | } else { 304 | // *key > next.key 305 | cursor.dir = Dir::Right; 306 | } 307 | } 308 | } 309 | } 310 | 311 | /// get the height of the tree 312 | pub fn get_height(&self) -> usize { 313 | if let Some(node) = unsafe { self.root.as_ref().right.as_ref() } { 314 | node.height as usize 315 | } else { 316 | 0 317 | } 318 | } 319 | } 320 | 321 | impl SequentialMap for AVLTree 322 | where 323 | K: Default + Ord + Clone, 324 | V: Default, 325 | { 326 | fn new() -> Self { 327 | let root = Box::new(Node::default()); 328 | 329 | let tree = AVLTree { 330 | root: Box::leak(root).into(), 331 | }; 332 | 333 | tree 334 | } 335 | 336 | fn insert(&mut self, key: &K, value: V) -> Result<(), V> { 337 | let node = Box::new(Node::new(key.clone(), value)); 338 | 339 | let mut cursor = self.find(key); 340 | 341 | if cursor.dir == Dir::Eq { 342 | return Err(node.value); 343 | } 344 | 345 | *(cursor.next_node_mut()) = Some(node); 346 | 347 | unsafe { 348 | cursor.current.as_mut().renew_height(); 349 | } 350 | cursor.rebalance(); 351 | 352 | Ok(()) 353 | } 354 | 355 | fn lookup(&self, key: &K) -> Option<&V> { 356 | let cursor = self.find(key); 357 | 358 | unsafe { 359 | if cursor.dir == Dir::Eq { 360 | return Some(&cursor.current.as_ref().value); 361 | } else { 362 | return None; 363 | } 364 | } 365 | } 366 | 367 | fn remove(&mut self, key: &K) -> Result { 368 | let mut cursor = self.find(key); 369 | 370 | if cursor.dir != Dir::Eq { 371 | return Err(()); 372 | } 373 | 374 | let current = unsafe { cursor.current.as_ref() }; 375 | 376 | let (left, right) = (current.left.is_some(), current.right.is_some()); 377 | 378 | // special case: find largest node from left subtree, swap, and remove 379 | if left && right { 380 | let (mut parent, dir) = cursor.ancestors.last_mut().unwrap(); 381 | let child = unsafe { parent.as_mut().child_mut(*dir).as_mut().unwrap() }; 382 | 383 | cursor.move_greatest_on_left_subtree(); 384 | 385 | let (mut swap_node_parent, dir) = cursor.ancestors.pop().unwrap(); 386 | let swap_node_ptr = unsafe { swap_node_parent.as_mut().child_mut(dir) }; 387 | let swap_node = swap_node_ptr.as_mut().unwrap(); 388 | 389 | mem::swap(&mut child.key, &mut swap_node.key); 390 | mem::swap(&mut child.value, &mut swap_node.value); 391 | 392 | let swap_node = swap_node_ptr.take().unwrap(); 393 | if swap_node.left.is_some() { 394 | *swap_node_ptr = swap_node.left; 395 | } 396 | 397 | cursor.rebalance(); 398 | 399 | return Ok(swap_node.value); 400 | } 401 | 402 | let (mut parent, dir) = cursor.ancestors.pop().unwrap(); 403 | let child = unsafe { parent.as_mut().child_mut(dir) }; 404 | let node = child.take().unwrap(); 405 | 406 | if left { 407 | *child = node.left; 408 | } else if right { 409 | *child = node.right; 410 | } 411 | 412 | cursor.rebalance(); 413 | Ok(node.value) 414 | } 415 | } 416 | 417 | impl Drop for AVLTree { 418 | fn drop(&mut self) { 419 | // since the struct had 'pointer' instead of 'ownership' of the root, 420 | // manually drop the root. Then, the childs are dropped recursively. 421 | unsafe { drop_in_place(self.root.as_mut()) }; 422 | } 423 | } 424 | -------------------------------------------------------------------------------- /src/avltree/rwlock.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_epoch::pin; 2 | use crossbeam_epoch::unprotected; 3 | use crossbeam_epoch::Atomic; 4 | use crossbeam_epoch::Guard; 5 | use crossbeam_epoch::Owned; 6 | use crossbeam_epoch::Shared; 7 | use crossbeam_utils::sync::ShardedLock; 8 | use crossbeam_utils::sync::ShardedLockReadGuard; 9 | use crossbeam_utils::sync::ShardedLockWriteGuard; 10 | use std::cmp::max; 11 | use std::fmt::Debug; 12 | use std::mem; 13 | use std::mem::ManuallyDrop; 14 | use std::sync::atomic::AtomicIsize; 15 | use std::sync::atomic::Ordering; 16 | 17 | use crate::map::ConcurrentMap; 18 | 19 | struct Node { 20 | key: K, 21 | height: AtomicIsize, 22 | inner: ShardedLock>, 23 | } 24 | 25 | impl Debug for Node { 26 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 27 | f.debug_struct("Node") 28 | .field("key", &self.key) 29 | .field("height", &self.height.load(Ordering::Relaxed)) 30 | .field("inner", &*self.inner.read().unwrap()) 31 | .finish() 32 | } 33 | } 34 | struct NodeInner { 35 | value: Option, 36 | left: Atomic>, 37 | right: Atomic>, 38 | } 39 | 40 | impl Debug for NodeInner { 41 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 42 | unsafe { 43 | let mut result = f.debug_struct("NodeInner"); 44 | result.field("value", &self.value); 45 | 46 | if let Some(left) = self.left.load(Ordering::Acquire, unprotected()).as_ref() { 47 | result.field("left", &left); 48 | } else { 49 | result.field("left", &"null"); 50 | } 51 | 52 | if let Some(right) = self.right.load(Ordering::Acquire, unprotected()).as_ref() { 53 | result.field("right", &right); 54 | } else { 55 | result.field("right", &"null"); 56 | } 57 | 58 | result.finish() 59 | } 60 | } 61 | } 62 | 63 | impl NodeInner { 64 | fn get_child(&self, dir: Dir) -> &Atomic> { 65 | match dir { 66 | Dir::Left => &self.left, 67 | Dir::Right => &self.right, 68 | Dir::Eq => unreachable!(), 69 | } 70 | } 71 | 72 | fn is_same_child(&self, dir: Dir, child: Shared>, guard: &Guard) -> bool { 73 | self.get_child(dir).load(Ordering::Relaxed, guard) == child 74 | } 75 | 76 | fn get_factor(&self, guard: &Guard) -> isize { 77 | let left = self.left.load(Ordering::Relaxed, guard); 78 | let right = self.right.load(Ordering::Relaxed, guard); 79 | 80 | let left_height = if !left.is_null() { 81 | unsafe { left.as_ref().unwrap().height.load(Ordering::Acquire) } 82 | } else { 83 | 0 84 | }; 85 | 86 | let right_height = if !right.is_null() { 87 | unsafe { right.as_ref().unwrap().height.load(Ordering::Acquire) } 88 | } else { 89 | 0 90 | }; 91 | 92 | left_height - right_height 93 | } 94 | 95 | fn get_new_height(&self, guard: &Guard) -> isize { 96 | let left = self.left.load(Ordering::Relaxed, guard); 97 | let right = self.right.load(Ordering::Relaxed, guard); 98 | 99 | let left = if !left.is_null() { 100 | unsafe { left.as_ref().unwrap().height.load(Ordering::Acquire) } 101 | } else { 102 | 0 103 | }; 104 | 105 | let right = if !right.is_null() { 106 | unsafe { right.as_ref().unwrap().height.load(Ordering::Acquire) } 107 | } else { 108 | 0 109 | }; 110 | 111 | max(left, right) + 1 112 | } 113 | } 114 | 115 | impl Default for Node { 116 | fn default() -> Self { 117 | Self::new(K::default(), V::default()) 118 | } 119 | } 120 | 121 | impl Node { 122 | fn new(key: K, value: V) -> Node { 123 | Node { 124 | key, 125 | height: AtomicIsize::new(1), 126 | inner: ShardedLock::new(NodeInner { 127 | value: Some(value), 128 | left: Atomic::null(), 129 | right: Atomic::null(), 130 | }), 131 | } 132 | } 133 | 134 | /// rotate left the node 135 | /// 136 | /// Change Parent-Right Child to Left Child-Parent, then return new parent(old right child). 137 | /// For simple managing locks, the function does not call lock, only use given lock guards. 138 | fn rotate_left<'g>( 139 | current: Shared>, 140 | current_guard: &ShardedLockWriteGuard>, 141 | right_child_guard: &ShardedLockWriteGuard>, 142 | guard: &'g Guard, 143 | ) -> Shared<'g, Node> { 144 | let right_child_left_child = right_child_guard.left.load(Ordering::Relaxed, guard); 145 | let new_parent = current_guard 146 | .right 147 | .swap(right_child_left_child, Ordering::Relaxed, guard); 148 | right_child_guard.left.store(current, Ordering::Relaxed); 149 | 150 | new_parent 151 | } 152 | 153 | /// rotate right the node 154 | /// 155 | /// Change Left Child-Parent to Parent-Right Child, then return new parent(old left child). 156 | /// For simple managing locks, the function does not call lock, only use given lock guards. 157 | fn rotate_right<'g>( 158 | current: Shared>, 159 | current_guard: &ShardedLockWriteGuard>, 160 | left_child_guard: &ShardedLockWriteGuard>, 161 | guard: &'g Guard, 162 | ) -> Shared<'g, Node> { 163 | let left_child_right_child = left_child_guard.right.load(Ordering::Relaxed, guard); 164 | let new_parent = current_guard 165 | .left 166 | .swap(left_child_right_child, Ordering::Relaxed, guard); 167 | left_child_guard.right.store(current, Ordering::Relaxed); 168 | 169 | new_parent 170 | } 171 | 172 | /// cleanup moving to ancestor 173 | /// 174 | /// If the node does not have full childs, delete it and move child to its position. 175 | /// If successing to defer_destroy it, return true else false. 176 | fn try_cleanup( 177 | current: Shared>, 178 | parent: Shared>, 179 | dir: Dir, 180 | guard: &Guard, 181 | ) -> bool { 182 | let parent_ref = unsafe { parent.as_ref().unwrap() }; 183 | 184 | let current_ref = unsafe { current.as_ref().unwrap() }; 185 | let read_guard = current_ref.inner.read().unwrap(); 186 | 187 | // only already logically removed node can be cleaned up 188 | if read_guard.value.is_none() { 189 | let (left, right) = ( 190 | read_guard.left.load(Ordering::Relaxed, guard).is_null(), 191 | read_guard.right.load(Ordering::Relaxed, guard).is_null(), 192 | ); 193 | 194 | // if the node has one or zero node, the node can be directly removed 195 | if left || right { 196 | drop(read_guard); 197 | 198 | let parent_write_guard = parent_ref.inner.write().unwrap(); 199 | 200 | // check if current's parent is even parent now 201 | if parent_write_guard.is_same_child(dir, current, guard) { 202 | let write_guard = current_ref.inner.write().unwrap(); 203 | 204 | let (left, right) = ( 205 | write_guard.left.load(Ordering::Relaxed, guard), 206 | write_guard.right.load(Ordering::Relaxed, guard), 207 | ); 208 | 209 | // re-check if it can be removed 210 | if write_guard.value.is_none() && (left.is_null() || right.is_null()) { 211 | let replace_node = if !left.is_null() { 212 | write_guard 213 | .left 214 | .swap(Shared::null(), Ordering::Relaxed, guard) 215 | } else { 216 | write_guard 217 | .right 218 | .swap(Shared::null(), Ordering::Relaxed, guard) 219 | }; 220 | 221 | let current = parent_write_guard.get_child(dir).swap( 222 | replace_node, 223 | Ordering::Relaxed, 224 | guard, 225 | ); 226 | 227 | drop(parent_write_guard); 228 | drop(write_guard); 229 | 230 | // request deallocate removed node 231 | unsafe { 232 | guard.defer_destroy(current); 233 | } 234 | 235 | return true; 236 | } 237 | } 238 | } 239 | } 240 | 241 | false 242 | } 243 | 244 | /// rebalance from current to grand_parent and renew all changed nodes 245 | /// 246 | /// If the relation among the nodes is not changed and the heights are needed to rotate, do it. 247 | fn try_rebalance<'g>( 248 | parent: Shared>, 249 | (root, root_dir): &(Shared>, Dir), // if rotating, root's child pointer should be rewritten 250 | guard: &'g Guard, 251 | ) { 252 | let parent_guard = unsafe { parent.as_ref().unwrap().inner.read().unwrap() }; 253 | 254 | if (-1..=1).contains(&parent_guard.get_factor(guard)) { 255 | return; 256 | } 257 | 258 | drop(parent_guard); 259 | 260 | let root_guard = unsafe { root.as_ref().unwrap().inner.write().unwrap() }; 261 | 262 | if !root_guard.is_same_child(*root_dir, parent, guard) { 263 | // The parent is separated from root between parent's read and write guard 264 | return; 265 | } 266 | 267 | let parent_ref = unsafe { parent.as_ref().unwrap() }; 268 | let parent_guard = parent_ref.inner.write().unwrap(); 269 | let mut current: Shared>; 270 | let mut current_guard: ShardedLockWriteGuard>; 271 | 272 | if parent_guard.get_factor(guard) <= -2 { 273 | // R* rotation 274 | current = parent_guard.right.load(Ordering::Relaxed, guard); 275 | let current_ref = unsafe { current.as_ref().unwrap() }; 276 | current_guard = current_ref.inner.write().unwrap(); 277 | 278 | if current_guard.get_factor(guard) > 0 { 279 | // partial RL rotation 280 | let left_child = current_guard.left.load(Ordering::Relaxed, guard); 281 | 282 | let left_child_guard = 283 | unsafe { left_child.as_ref().unwrap().inner.write().unwrap() }; 284 | 285 | parent_guard.right.store( 286 | Node::rotate_right(current, ¤t_guard, &left_child_guard, guard), 287 | Ordering::Relaxed, 288 | ); 289 | 290 | unsafe { 291 | current 292 | .as_ref() 293 | .unwrap() 294 | .height 295 | .store(current_guard.get_new_height(guard), Ordering::Release) 296 | }; 297 | 298 | current = left_child; 299 | current_guard = left_child_guard; 300 | } 301 | 302 | // RR rotation 303 | root_guard.get_child(*root_dir).store( 304 | Node::rotate_left(parent, &parent_guard, ¤t_guard, guard), 305 | Ordering::Relaxed, 306 | ); 307 | } else if parent_guard.get_factor(guard) >= 2 { 308 | // L* rotation 309 | current = parent_guard.left.load(Ordering::Relaxed, guard); 310 | let current_ref = unsafe { current.as_ref().unwrap() }; 311 | current_guard = current_ref.inner.write().unwrap(); 312 | 313 | if current_guard.get_factor(guard) < 0 { 314 | // partial LR rotation 315 | let right_child = current_guard.right.load(Ordering::Relaxed, guard); 316 | 317 | let right_child_guard = 318 | unsafe { right_child.as_ref().unwrap().inner.write().unwrap() }; 319 | 320 | parent_guard.left.store( 321 | Node::rotate_left(current, ¤t_guard, &right_child_guard, guard), 322 | Ordering::Relaxed, 323 | ); 324 | 325 | unsafe { 326 | current 327 | .as_ref() 328 | .unwrap() 329 | .height 330 | .store(current_guard.get_new_height(guard), Ordering::Release) 331 | }; 332 | 333 | current = right_child; 334 | current_guard = right_child_guard; 335 | } 336 | 337 | // LL rotation 338 | root_guard.get_child(*root_dir).store( 339 | Node::rotate_right(parent, &parent_guard, ¤t_guard, guard), 340 | Ordering::Relaxed, 341 | ); 342 | } else { 343 | // The structure is changed stable between read guard and write guard. 344 | return; 345 | } 346 | 347 | unsafe { 348 | parent 349 | .as_ref() 350 | .unwrap() 351 | .height 352 | .store(parent_guard.get_new_height(guard), Ordering::Release); 353 | current 354 | .as_ref() 355 | .unwrap() 356 | .height 357 | .store(current_guard.get_new_height(guard), Ordering::Release); 358 | } 359 | } 360 | } 361 | 362 | #[derive(Debug, Clone, Copy, PartialEq)] 363 | enum Dir { 364 | Left, 365 | Eq, 366 | Right, 367 | } 368 | 369 | struct Cursor<'g, K, V> { 370 | ancestors: Vec<(Shared<'g, Node>, Dir)>, 371 | current: Shared<'g, Node>, 372 | /// the read lock for current node's inner 373 | /// It keeps current node's inner and is for hand-over-hand locking. 374 | inner_guard: ManuallyDrop>>, 375 | dir: Dir, 376 | } 377 | 378 | impl<'g, K, V> Cursor<'g, K, V> { 379 | fn new(tree: &RwLockAVLTree, guard: &'g Guard) -> Cursor<'g, K, V> { 380 | let root = tree.root.load(Ordering::Relaxed, guard); 381 | let inner_guard = unsafe { root.as_ref().unwrap().inner.read().unwrap() }; 382 | 383 | let cursor = Cursor { 384 | ancestors: Vec::with_capacity(tree.get_height() + 5), 385 | current: root, 386 | inner_guard: ManuallyDrop::new(inner_guard), 387 | dir: Dir::Right, 388 | }; 389 | 390 | cursor 391 | } 392 | 393 | /// move the cursor to the direction using hand-over-hand locking 394 | /// 395 | /// The cursor's dir is never changed by any functions. You should change it manually like `cursor.dir = Dir::Left`. 396 | fn move_next(&mut self, guard: &'g Guard) -> Result<(), ()> { 397 | let next = match self.dir { 398 | Dir::Left => self.inner_guard.left.load(Ordering::Relaxed, guard), 399 | Dir::Right => self.inner_guard.right.load(Ordering::Relaxed, guard), 400 | Dir::Eq => panic!("The node is already arrived."), 401 | }; 402 | 403 | if next.is_null() { 404 | return Err(()); 405 | } 406 | 407 | let next_node = unsafe { next.as_ref().unwrap() }; 408 | let next_guard = next_node.inner.read().unwrap(); 409 | 410 | let parent = mem::replace(&mut self.current, next); 411 | self.ancestors.push((parent, self.dir)); 412 | 413 | // replace with current's read guard, then unlock parent read guard by dropping 414 | let mut parent_guard = mem::replace(&mut self.inner_guard, ManuallyDrop::new(next_guard)); 415 | 416 | unsafe { 417 | ManuallyDrop::drop(&mut parent_guard); 418 | } 419 | 420 | Ok(()) 421 | } 422 | 423 | /// try to cleanup and rebalance the node 424 | /// TODO: manage repair operation by unique on current waiting list 425 | fn repair(mut cursor: Cursor<'g, K, V>, guard: &'g Guard) { 426 | while let Some((parent, dir)) = cursor.ancestors.pop() { 427 | if !Node::try_cleanup(cursor.current, parent, dir, guard) { 428 | { 429 | let current = unsafe { cursor.current.as_ref().unwrap() }; 430 | let current_guard = current.inner.read().unwrap(); 431 | 432 | current 433 | .height 434 | .store(current_guard.get_new_height(guard), Ordering::Release); 435 | } 436 | 437 | // the cursor.current is alive, so try rebalancing 438 | if let Some(root_pair) = cursor.ancestors.last() { 439 | Node::try_rebalance(parent, root_pair, guard); 440 | } 441 | } 442 | 443 | cursor.current = parent; 444 | } 445 | } 446 | } 447 | 448 | pub struct RwLockAVLTree { 449 | root: Atomic>, 450 | } 451 | 452 | impl Debug for RwLockAVLTree { 453 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 454 | unsafe { 455 | f.debug_struct("RwLockAVLTree") 456 | .field( 457 | "root", 458 | self.root 459 | .load(Ordering::Acquire, unprotected()) 460 | .as_ref() 461 | .unwrap(), 462 | ) 463 | .finish() 464 | } 465 | } 466 | } 467 | 468 | impl RwLockAVLTree { 469 | /// find the last state of the cursor by the key 470 | /// 471 | /// If there exists the key on the tree, the cursor's current is the node and the dir is Eq. 472 | /// If there does not exist the key on the tree, the cursor's current is leaf node and the dir is 473 | /// Left if the key is greater than the key of the node, or Right if the key is less than. 474 | fn find<'g>(&self, key: &K, guard: &'g Guard) -> Cursor<'g, K, V> 475 | where 476 | K: Ord, 477 | { 478 | let mut cursor = Cursor::new(self, guard); 479 | 480 | loop { 481 | if cursor.move_next(guard).is_err() { 482 | return cursor; 483 | } 484 | 485 | unsafe { 486 | if *key == cursor.current.as_ref().unwrap().key { 487 | cursor.dir = Dir::Eq; 488 | return cursor; 489 | } else if *key < cursor.current.as_ref().unwrap().key { 490 | cursor.dir = Dir::Left; 491 | } else { 492 | // *key > next.key 493 | cursor.dir = Dir::Right; 494 | } 495 | } 496 | } 497 | } 498 | 499 | /// get the height of the tree 500 | pub fn get_height(&self) -> usize { 501 | unsafe { 502 | if let Some(node) = self 503 | .root 504 | .load(Ordering::Relaxed, &pin()) 505 | .as_ref() 506 | .unwrap() 507 | .inner 508 | .read() 509 | .unwrap() 510 | .right 511 | .load(Ordering::Acquire, &pin()) 512 | .as_ref() 513 | { 514 | node.height.load(Ordering::Relaxed) as usize 515 | } else { 516 | 0 517 | } 518 | } 519 | } 520 | } 521 | 522 | impl ConcurrentMap for RwLockAVLTree 523 | where 524 | K: Ord + Clone + Default, 525 | V: Default, 526 | { 527 | fn new() -> Self { 528 | RwLockAVLTree { 529 | root: Atomic::new(Node::default()), 530 | } 531 | } 532 | 533 | fn insert(&self, key: &K, value: V) -> Result<(), V> { 534 | let guard = pin(); 535 | 536 | let node = Node::new(key.clone(), value); 537 | 538 | // TODO: it can be optimized by re-search nearby ancestors 539 | loop { 540 | let mut cursor = self.find(key, &guard); 541 | 542 | // unlock read lock and lock write lock... very inefficient, need upgrade from read lock to write lock 543 | unsafe { 544 | ManuallyDrop::drop(&mut cursor.inner_guard); 545 | } 546 | 547 | if cursor.dir == Dir::Eq && cursor.inner_guard.value.is_some() { 548 | let node_inner = node.inner.into_inner().unwrap(); 549 | return Err(node_inner.value.unwrap()); 550 | } 551 | 552 | let current = unsafe { cursor.current.as_ref().unwrap() }; 553 | 554 | // check if the current is alive now by checking parent node. If disconnected, retry 555 | // TODO: is it efficient? It needs to check only whether the current is connected, not checking the current's parent is changed. 556 | let parent_read_guard = if let Some((parent, dir)) = cursor.ancestors.last() { 557 | let parent_read_guard = unsafe { parent.as_ref().unwrap().inner.read().unwrap() }; 558 | if !parent_read_guard.is_same_child(*dir, cursor.current, &guard) { 559 | // Before inserting, the current is already disconnected. 560 | continue; 561 | } 562 | Some(parent_read_guard) 563 | } else { 564 | None 565 | }; 566 | 567 | let mut write_guard = current.inner.write().unwrap(); 568 | 569 | drop(parent_read_guard); 570 | 571 | match cursor.dir { 572 | Dir::Left => { 573 | if !write_guard.left.load(Ordering::Relaxed, &guard).is_null() { 574 | continue; // some thread already writed. Retry 575 | } 576 | 577 | write_guard.left.store(Owned::new(node), Ordering::Relaxed); 578 | } 579 | Dir::Right => { 580 | if !write_guard.right.load(Ordering::Relaxed, &guard).is_null() { 581 | continue; // some thread already writed. Retry 582 | } 583 | 584 | write_guard.right.store(Owned::new(node), Ordering::Relaxed); 585 | } 586 | Dir::Eq => { 587 | let value = node.inner.into_inner().unwrap().value.unwrap(); 588 | 589 | if write_guard.value.is_some() { 590 | return Err(value); 591 | } 592 | 593 | write_guard.value = Some(value); 594 | } 595 | } 596 | 597 | drop(write_guard); 598 | 599 | Cursor::repair(cursor, &guard); 600 | 601 | return Ok(()); 602 | } 603 | } 604 | 605 | fn lookup(&self, key: &K, f: F) -> R 606 | where 607 | F: FnOnce(Option<&V>) -> R, 608 | { 609 | let guard = pin(); 610 | 611 | let mut cursor = self.find(key, &guard); 612 | 613 | if cursor.dir == Dir::Eq { 614 | unsafe { 615 | ManuallyDrop::drop(&mut cursor.inner_guard); 616 | } 617 | let current = unsafe { cursor.current.as_ref().unwrap() }; 618 | let write_guard = current.inner.write().unwrap(); 619 | 620 | return f(write_guard.value.as_ref()); 621 | } else { 622 | return f(None); 623 | } 624 | } 625 | 626 | fn get(&self, key: &K) -> Option 627 | where 628 | V: Clone, 629 | { 630 | let guard = pin(); 631 | 632 | let mut cursor = self.find(key, &guard); 633 | 634 | if cursor.dir == Dir::Eq { 635 | let inner_guard = ManuallyDrop::into_inner(cursor.inner_guard); 636 | return inner_guard.value.clone(); 637 | } else { 638 | unsafe { ManuallyDrop::drop(&mut cursor.inner_guard) }; 639 | return None; 640 | } 641 | } 642 | 643 | fn remove(&self, key: &K) -> Result { 644 | let guard = pin(); 645 | 646 | let mut cursor = self.find(key, &guard); 647 | 648 | let current = unsafe { cursor.current.as_ref().unwrap() }; 649 | unsafe { ManuallyDrop::drop(&mut cursor.inner_guard) }; 650 | 651 | if cursor.dir != Dir::Eq { 652 | return Err(()); 653 | } 654 | 655 | // unlock read lock and lock write lock... very inefficient, need upgrade from read lock to write lock 656 | let mut write_guard = current.inner.write().unwrap(); 657 | 658 | if write_guard.value.is_none() { 659 | return Err(()); 660 | } 661 | 662 | let value = write_guard.value.take().unwrap(); 663 | drop(write_guard); 664 | 665 | Cursor::repair(cursor, &guard); 666 | 667 | Ok(value) 668 | } 669 | } 670 | 671 | impl Drop for RwLockAVLTree { 672 | fn drop(&mut self) { 673 | let pin = pin(); 674 | let mut nodes = vec![mem::replace(&mut self.root, Atomic::null())]; 675 | while let Some(node) = nodes.pop() { 676 | let node = unsafe { node.into_owned() }; 677 | let mut write_guard = node.inner.write().unwrap(); 678 | 679 | let left = mem::replace(&mut write_guard.left, Atomic::null()); 680 | let right = mem::replace(&mut write_guard.right, Atomic::null()); 681 | 682 | if !left.load(Ordering::Relaxed, &pin).is_null() { 683 | nodes.push(left); 684 | } 685 | if !right.load(Ordering::Relaxed, &pin).is_null() { 686 | nodes.push(right); 687 | } 688 | } 689 | } 690 | } 691 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod avltree; 2 | pub mod btree; 3 | pub mod linkedlist; 4 | pub mod lock; 5 | pub mod map; 6 | pub mod queue; 7 | pub mod stack; 8 | pub mod util; 9 | -------------------------------------------------------------------------------- /src/linkedlist/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::map::SequentialMap; 2 | 3 | // simple sequential linked list 4 | pub struct LinkedList { 5 | head: Node, // dummy node with key = Default, but the key is not considered on algorithm 6 | } 7 | 8 | struct Node { 9 | key: K, 10 | value: V, 11 | next: Option>>, 12 | } 13 | 14 | impl Default for Node { 15 | fn default() -> Self { 16 | Self::new(K::default(), V::default()) 17 | } 18 | } 19 | 20 | impl Node { 21 | fn new(key: K, value: V) -> Node { 22 | Node { 23 | key, 24 | value, 25 | next: None, 26 | } 27 | } 28 | } 29 | 30 | impl SequentialMap for LinkedList 31 | where 32 | K: Default + Eq + Clone, 33 | V: Default, 34 | { 35 | fn new() -> LinkedList { 36 | LinkedList { 37 | head: Node::default(), 38 | } 39 | } 40 | 41 | fn insert(&mut self, key: &K, value: V) -> Result<(), V> { 42 | let new = Box::new(Node::new(key.clone(), value)); 43 | 44 | let mut current = &mut self.head.next; 45 | 46 | loop { 47 | match current { 48 | Some(node) => { 49 | if node.key == *key { 50 | return Err(new.value); 51 | } 52 | 53 | current = &mut node.next; 54 | } 55 | None => { 56 | *current = Some(new); 57 | return Ok(()); 58 | } 59 | } 60 | } 61 | } 62 | 63 | fn lookup(&self, key: &K) -> Option<&V> { 64 | let mut current = &self.head.next; 65 | 66 | loop { 67 | match current { 68 | Some(node) => { 69 | let value = &node.value; 70 | 71 | if node.key == *key { 72 | return Some(value); 73 | } 74 | 75 | current = &node.next; 76 | } 77 | None => return None, 78 | } 79 | } 80 | } 81 | 82 | fn remove(&mut self, key: &K) -> Result { 83 | let mut prev = &mut self.head; 84 | 85 | loop { 86 | match prev.next.is_some() { 87 | true => { 88 | if prev.next.as_ref().unwrap().key == *key { 89 | let mut node = prev.next.take(); 90 | prev.next = node.as_mut().unwrap().next.take(); 91 | 92 | return Ok(node.unwrap().value); 93 | } 94 | 95 | prev = prev.next.as_mut().unwrap(); 96 | } 97 | false => return Err(()), 98 | } 99 | } 100 | } 101 | } 102 | 103 | impl Drop for LinkedList { 104 | fn drop(&mut self) { 105 | let mut node = self.head.next.take(); 106 | 107 | while let Some(mut inside) = node { 108 | node = inside.next.take(); 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/lock/fclock.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * This code is refered to https://github.com/khizmax/libcds/blob/master/cds/algo/flat_combining/kernel.h 3 | */ 4 | 5 | use std::{ 6 | cell::UnsafeCell, 7 | fmt::Debug, 8 | ptr, 9 | sync::atomic::{AtomicBool, AtomicUsize, Ordering}, 10 | }; 11 | 12 | use crossbeam_epoch::{pin, unprotected, Atomic, Guard, Owned, Shared}; 13 | use crossbeam_utils::{Backoff, CachePadded}; 14 | use thread_local::ThreadLocal; 15 | 16 | use super::RawSimpleLock; 17 | 18 | pub trait FlatCombining { 19 | fn apply(&mut self, operation: T) -> T; 20 | } 21 | 22 | // libcds constant: 1024 - 1, 8 23 | const COMPACT_FACTOR: usize = 1024 - 1; 24 | const COMBINE_PASS: usize = 8; 25 | 26 | pub struct Record { 27 | operation: Atomic, // The tag 0/1 means response/request. 28 | state: AtomicBool, // false: inactive, true: active 29 | age: AtomicUsize, 30 | next: Atomic>, 31 | } 32 | 33 | impl Debug for Record { 34 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 35 | let guard = &pin(); 36 | 37 | unsafe { 38 | let mut debug = f.debug_struct("Record"); 39 | 40 | if let Some(operation) = self.operation.load(Ordering::SeqCst, guard).as_ref() { 41 | debug.field("operation", operation); 42 | } else { 43 | debug.field("operation", &"null"); 44 | } 45 | 46 | debug.field("state", &self.state.load(Ordering::SeqCst)); 47 | 48 | debug.field("age", &self.age.load(Ordering::SeqCst)); 49 | 50 | if let Some(next) = self.next.load(Ordering::SeqCst, guard).as_ref() { 51 | debug.field("next", next).finish() 52 | } else { 53 | debug.field("next", &"null").finish() 54 | } 55 | } 56 | } 57 | } 58 | 59 | impl Record { 60 | #[inline] 61 | pub fn set(&self, operation: T) { 62 | self.operation 63 | .store(Owned::new(operation).with_tag(1), Ordering::Release); 64 | } 65 | 66 | #[inline] 67 | fn is_response(&self, guard: &Guard) -> bool { 68 | self.operation.load(Ordering::Acquire, guard).tag() == 0 69 | } 70 | 71 | #[inline] 72 | pub fn get_operation(&self, guard: &Guard) -> T { 73 | unsafe { ptr::read(self.operation.load(Ordering::Relaxed, guard).deref()) } 74 | } 75 | } 76 | 77 | pub struct FCLock { 78 | publications: Atomic>, 79 | lock: CachePadded, 80 | target: UnsafeCell>>, 81 | thread_local: ThreadLocal>>, 82 | age: AtomicUsize, 83 | stat: FCLockStat, 84 | } 85 | 86 | #[derive(Default, Debug)] 87 | struct FCLockStat { 88 | repush_record: AtomicUsize, 89 | 90 | // the stat on combining 91 | combine: AtomicUsize, 92 | passive_wait: AtomicUsize, 93 | passive_wait_iter: AtomicUsize, 94 | passive_response_after_lock: AtomicUsize, 95 | passive_to_combine: AtomicUsize, 96 | 97 | // the stat on compacting publications 98 | compact_pubs: AtomicUsize, 99 | deactivated_record: AtomicUsize, 100 | } 101 | 102 | impl Drop for FCLock { 103 | fn drop(&mut self) { 104 | unsafe { 105 | let guard = unprotected(); 106 | 107 | for local_record in self.thread_local.iter() { 108 | let dummy = local_record.load(Ordering::Relaxed, guard); 109 | drop(dummy.into_owned()); 110 | } 111 | } 112 | } 113 | } 114 | 115 | impl FCLock { 116 | #[inline] 117 | fn repush_record(&self, record: Shared>, guard: &Guard) { 118 | unsafe { 119 | if !record.deref().state.load(Ordering::Acquire) { 120 | self.push_record(record, guard); 121 | 122 | #[cfg(feature = "concurrent_stat")] 123 | self.stat.repush_record.fetch_add(1, Ordering::Relaxed); 124 | } 125 | } 126 | } 127 | 128 | fn combine(&self, guard: &Guard) { 129 | let current_age = self.age.fetch_add(1, Ordering::Relaxed) + 1; 130 | 131 | // TODO: this way is useful? 132 | let mut useful_pass = 0; 133 | let mut empty_pass = 0; 134 | for _ in 0..COMBINE_PASS { 135 | if self.combine_pass(current_age, guard) { 136 | useful_pass += 1; 137 | } else { 138 | empty_pass += 1; 139 | 140 | if empty_pass > useful_pass { 141 | break; 142 | } 143 | } 144 | } 145 | 146 | #[cfg(feature = "concurrent_stat")] 147 | self.stat.combine.fetch_add(1, Ordering::Relaxed); 148 | 149 | if current_age & COMPACT_FACTOR == 0 { 150 | self.compact_publications(current_age, guard); 151 | } 152 | } 153 | 154 | fn combine_pass(&self, current_age: usize, guard: &Guard) -> bool { 155 | let mut is_done = false; 156 | 157 | unsafe { 158 | let target = &mut *self.target.get(); 159 | 160 | let mut node = self.publications.load(Ordering::Acquire, guard); 161 | 162 | while !node.is_null() { 163 | let node_ref = node.deref(); 164 | 165 | if node_ref.state.load(Ordering::Acquire) { 166 | // active record 167 | let operation = node_ref.operation.load(Ordering::Acquire, guard); 168 | 169 | if operation.tag() == 1 { 170 | let operation = ptr::read(operation.deref()); 171 | 172 | node_ref.age.store(current_age, Ordering::Relaxed); 173 | 174 | let response = target.apply(operation); 175 | 176 | node_ref 177 | .operation 178 | .store(Owned::new(response).with_tag(0), Ordering::Release); 179 | 180 | is_done = true; 181 | } 182 | } 183 | 184 | node = node_ref.next.load(Ordering::Acquire, guard); 185 | } 186 | } 187 | 188 | is_done 189 | } 190 | 191 | fn compact_publications(&self, current_age: usize, guard: &Guard) { 192 | unsafe { 193 | let mut parent = self.publications.load(Ordering::Acquire, guard); 194 | let mut node = parent.deref().next.load(Ordering::Acquire, guard); 195 | 196 | while !node.is_null() { 197 | let node_ref = node.deref(); 198 | 199 | if node_ref.state.load(Ordering::Acquire) 200 | && current_age.wrapping_sub(node_ref.age.load(Ordering::Relaxed)) 201 | > COMPACT_FACTOR 202 | { 203 | // remove old inactive node 204 | let parent_ref = parent.deref(); 205 | let new = node_ref.next.load(Ordering::Acquire, guard); 206 | 207 | if parent_ref 208 | .next 209 | .compare_exchange(node, new, Ordering::Acquire, Ordering::Relaxed, guard) 210 | .is_ok() 211 | { 212 | node_ref.state.store(false, Ordering::Relaxed); 213 | node = new; 214 | 215 | #[cfg(feature = "concurrent_stat")] 216 | self.stat.deactivated_record.fetch_add(1, Ordering::Relaxed); 217 | } 218 | 219 | continue; 220 | } 221 | 222 | // just move next 223 | parent = node; 224 | node = node_ref.next.load(Ordering::Acquire, guard); 225 | } 226 | } 227 | 228 | #[cfg(feature = "concurrent_stat")] 229 | self.stat.compact_pubs.fetch_add(1, Ordering::Relaxed); 230 | } 231 | 232 | pub fn new(target: impl FlatCombining + 'static) -> Self { 233 | Self { 234 | publications: Atomic::null(), 235 | lock: CachePadded::new(L::new()), 236 | target: UnsafeCell::new(Box::new(target)), 237 | thread_local: ThreadLocal::new(), 238 | age: AtomicUsize::new(0), 239 | stat: FCLockStat::default(), 240 | } 241 | } 242 | 243 | pub fn acquire_record<'a>(&self, guard: &'a Guard) -> Shared<'a, Record> { 244 | let node = self.thread_local.get_or(|| { 245 | Atomic::new(Record { 246 | operation: Atomic::null(), 247 | state: AtomicBool::new(false), 248 | age: AtomicUsize::new(0), 249 | next: Atomic::null(), 250 | }) 251 | }); 252 | 253 | let node = node.load(Ordering::Relaxed, guard); 254 | 255 | if unsafe { !node.deref().state.load(Ordering::Acquire) } { 256 | self.push_record(node, guard); 257 | } 258 | 259 | node 260 | } 261 | 262 | pub fn push_record(&self, record: Shared>, guard: &Guard) { 263 | unsafe { 264 | let record_ref = record.deref(); 265 | 266 | debug_assert!(!record_ref.state.load(Ordering::Relaxed)); 267 | 268 | record_ref.state.store(true, Ordering::Relaxed); 269 | 270 | let backoff = Backoff::new(); 271 | 272 | loop { 273 | let head = self.publications.load(Ordering::Relaxed, guard); 274 | 275 | record_ref.next.store(head, Ordering::Release); 276 | 277 | if self 278 | .publications 279 | .compare_exchange(head, record, Ordering::Release, Ordering::Relaxed, guard) 280 | .is_ok() 281 | { 282 | return; 283 | } 284 | 285 | backoff.spin(); 286 | } 287 | } 288 | } 289 | 290 | pub fn try_combine(&self, record: Shared>, guard: &Guard) { 291 | unsafe { 292 | let record_ref = record.deref(); 293 | 294 | if self.lock.try_lock() { 295 | // now the thread is combiner 296 | self.repush_record(record, guard); 297 | 298 | self.combine(guard); 299 | 300 | self.lock.unlock(); 301 | } else { 302 | #[cfg(feature = "concurrent_stat")] 303 | self.stat.passive_wait.fetch_add(1, Ordering::Relaxed); 304 | 305 | // wait and the thread may be combiner if its operation is not finished and it gets lock 306 | let backoff = Backoff::new(); 307 | 308 | while !record_ref.is_response(guard) { 309 | self.repush_record(record, guard); 310 | 311 | #[cfg(feature = "concurrent_stat")] 312 | self.stat.passive_wait_iter.fetch_add(1, Ordering::Relaxed); 313 | 314 | if self.lock.try_lock() { 315 | // Another combiner is finished. So, it can receive response 316 | 317 | if !record_ref.is_response(guard) { 318 | // It does not receive response. So, the thread becomes combiner 319 | self.repush_record(record, guard); 320 | 321 | self.combine(guard); 322 | 323 | #[cfg(feature = "concurrent_stat")] 324 | self.stat.passive_to_combine.fetch_add(1, Ordering::Relaxed); 325 | } else { 326 | #[cfg(feature = "concurrent_stat")] 327 | self.stat 328 | .passive_response_after_lock 329 | .fetch_add(1, Ordering::Relaxed); 330 | } 331 | 332 | self.lock.unlock(); 333 | break; 334 | } 335 | 336 | backoff.snooze(); 337 | } 338 | } 339 | } 340 | } 341 | 342 | #[cfg(feature = "concurrent_stat")] 343 | pub fn print_stat(&self) { 344 | println!("{:?}", self.stat); 345 | } 346 | } 347 | -------------------------------------------------------------------------------- /src/lock/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod fclock; 2 | pub mod mutex; 3 | pub mod seqlock; 4 | pub mod spinlock; 5 | 6 | pub use mutex::RawMutex; 7 | pub use seqlock::SeqLock; 8 | pub use spinlock::RawSpinLock; 9 | pub use spinlock::SpinLock; 10 | 11 | pub unsafe trait RawSimpleLock { 12 | fn new() -> Self; 13 | 14 | /// Non-blocking: Try locking. If succeeding, return true, or false. 15 | fn try_lock(&self) -> bool; 16 | 17 | /// Blocking: Get locking or wait until getting locking 18 | fn lock(&self); 19 | 20 | /// Release lock 21 | fn unlock(&self); 22 | } 23 | -------------------------------------------------------------------------------- /src/lock/mutex.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::lock_api::RawMutex as RMutex; 2 | 3 | use super::RawSimpleLock; 4 | 5 | pub struct RawMutex { 6 | inner: parking_lot::RawMutex, 7 | } 8 | 9 | unsafe impl RawSimpleLock for RawMutex { 10 | #[inline] 11 | fn new() -> Self { 12 | Self { 13 | inner: RMutex::INIT, 14 | } 15 | } 16 | 17 | #[inline] 18 | fn try_lock(&self) -> bool { 19 | self.inner.try_lock() 20 | } 21 | 22 | #[inline] 23 | fn lock(&self) { 24 | self.inner.lock(); 25 | } 26 | 27 | #[inline] 28 | fn unlock(&self) { 29 | unsafe { self.inner.unlock() }; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/lock/seqlock.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * This code is refered to https://github.com/kaist-cp/cs431/blob/main/lock/src/seqlock.rs 3 | */ 4 | 5 | use core::mem; 6 | use core::ops::Deref; 7 | use core::sync::atomic::{fence, AtomicUsize, Ordering}; 8 | 9 | use crossbeam_utils::Backoff; 10 | 11 | #[derive(Debug)] 12 | struct RawSeqLock { 13 | seq: AtomicUsize, 14 | } 15 | 16 | impl RawSeqLock { 17 | const fn new() -> Self { 18 | Self { 19 | seq: AtomicUsize::new(0), 20 | } 21 | } 22 | 23 | fn write_lock(&self) -> usize { 24 | let backoff = Backoff::new(); 25 | 26 | loop { 27 | let seq = self.seq.load(Ordering::Relaxed); 28 | if seq & 1 == 0 29 | && self 30 | .seq 31 | .compare_exchange( 32 | seq, 33 | seq.wrapping_add(1), 34 | Ordering::Acquire, 35 | Ordering::Relaxed, 36 | ) 37 | .is_ok() 38 | { 39 | fence(Ordering::Release); 40 | return seq; 41 | } 42 | 43 | backoff.snooze(); 44 | } 45 | } 46 | 47 | fn write_unlock(&self, seq: usize) { 48 | self.seq.store(seq.wrapping_add(2), Ordering::Release); 49 | } 50 | 51 | fn read_begin(&self) -> usize { 52 | let backoff = Backoff::new(); 53 | 54 | loop { 55 | let seq = self.seq.load(Ordering::Acquire); 56 | if seq & 1 == 0 { 57 | return seq; 58 | } 59 | 60 | backoff.snooze(); 61 | } 62 | } 63 | 64 | fn read_validate(&self, seq: usize) -> bool { 65 | fence(Ordering::Acquire); 66 | 67 | seq == self.seq.load(Ordering::Relaxed) 68 | } 69 | 70 | unsafe fn upgrade(&self, seq: usize) -> Result<(), ()> { 71 | if self 72 | .seq 73 | .compare_exchange( 74 | seq, 75 | seq.wrapping_add(1), 76 | Ordering::Acquire, 77 | Ordering::Relaxed, 78 | ) 79 | .is_err() 80 | { 81 | return Err(()); 82 | } 83 | 84 | fence(Ordering::Release); 85 | Ok(()) 86 | } 87 | } 88 | 89 | #[derive(Debug)] 90 | pub struct SeqLock { 91 | lock: RawSeqLock, 92 | data: T, 93 | } 94 | 95 | #[derive(Debug)] 96 | pub struct WriteGuard<'s, T> { 97 | lock: &'s SeqLock, 98 | seq: usize, 99 | } 100 | 101 | #[derive(Debug)] 102 | pub struct ReadGuard<'s, T> { 103 | lock: &'s SeqLock, 104 | seq: usize, 105 | } 106 | 107 | unsafe impl Send for SeqLock {} 108 | unsafe impl Sync for SeqLock {} 109 | 110 | unsafe impl<'s, T> Send for WriteGuard<'s, T> {} 111 | unsafe impl<'s, T: Send + Sync> Sync for WriteGuard<'s, T> {} 112 | 113 | unsafe impl<'s, T> Send for ReadGuard<'s, T> {} 114 | unsafe impl<'s, T: Send + Sync> Sync for ReadGuard<'s, T> {} 115 | 116 | impl SeqLock { 117 | pub fn new(data: T) -> Self { 118 | SeqLock { 119 | lock: RawSeqLock::new(), 120 | data: data.into(), 121 | } 122 | } 123 | 124 | pub fn write_lock(&self) -> WriteGuard { 125 | let seq = self.lock.write_lock(); 126 | WriteGuard { lock: self, seq } 127 | } 128 | 129 | pub unsafe fn read_lock(&self) -> ReadGuard { 130 | let seq = self.lock.read_begin(); 131 | ReadGuard { lock: self, seq } 132 | } 133 | 134 | pub unsafe fn read(&self, f: F) -> Result 135 | where 136 | F: FnOnce(&T) -> R, 137 | { 138 | let guard = self.read_lock(); 139 | let result = f(&guard); 140 | 141 | if guard.finish() { 142 | Ok(result) 143 | } else { 144 | Err(()) 145 | } 146 | } 147 | } 148 | 149 | impl<'s, T> Deref for WriteGuard<'s, T> { 150 | type Target = T; 151 | 152 | fn deref(&self) -> &Self::Target { 153 | &self.lock.data 154 | } 155 | } 156 | 157 | impl<'s, T> Drop for WriteGuard<'s, T> { 158 | fn drop(&mut self) { 159 | self.lock.lock.write_unlock(self.seq); 160 | } 161 | } 162 | 163 | impl<'s, T> Deref for ReadGuard<'s, T> { 164 | type Target = T; 165 | 166 | fn deref(&self) -> &Self::Target { 167 | &self.lock.data 168 | } 169 | } 170 | 171 | impl<'s, T> Clone for ReadGuard<'s, T> { 172 | fn clone(&self) -> Self { 173 | Self { 174 | lock: self.lock, 175 | seq: self.seq, 176 | } 177 | } 178 | } 179 | 180 | impl<'s, T> Drop for ReadGuard<'s, T> { 181 | fn drop(&mut self) { 182 | panic!("For safety, seqlock::ReadGuard cannot be dropped. Use finish or forget."); 183 | } 184 | } 185 | 186 | impl<'s, T> ReadGuard<'s, T> { 187 | pub fn validate(&self) -> bool { 188 | self.lock.lock.read_validate(self.seq) 189 | } 190 | 191 | pub fn restart(&mut self) { 192 | self.seq = self.lock.lock.read_begin(); 193 | } 194 | 195 | pub fn finish(self) -> bool { 196 | let result = self.lock.lock.read_validate(self.seq); 197 | mem::forget(self); 198 | result 199 | } 200 | 201 | /// Just forget ReadGuard without validation. Carefully use it only if it does not need to be validated. 202 | pub fn forget(self) { 203 | mem::forget(self); 204 | } 205 | 206 | pub fn upgrade(self) -> Result, ()> { 207 | let result = if unsafe { self.lock.lock.upgrade(self.seq).is_ok() } { 208 | Ok(WriteGuard { 209 | lock: self.lock, 210 | seq: self.seq, 211 | }) 212 | } else { 213 | Err(()) 214 | }; 215 | mem::forget(self); 216 | result 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/lock/spinlock.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::UnsafeCell, 3 | ops::{Deref, DerefMut}, 4 | sync::atomic::{AtomicBool, Ordering}, 5 | }; 6 | 7 | use crossbeam_utils::Backoff; 8 | 9 | use super::RawSimpleLock; 10 | 11 | pub struct RawSpinLock { 12 | flag: AtomicBool, 13 | } 14 | 15 | unsafe impl RawSimpleLock for RawSpinLock { 16 | fn new() -> Self { 17 | Self { 18 | flag: AtomicBool::new(false), 19 | } 20 | } 21 | 22 | #[inline] 23 | fn try_lock(&self) -> bool { 24 | self.flag 25 | .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) 26 | .is_ok() 27 | } 28 | 29 | #[inline] 30 | fn lock(&self) { 31 | let backoff = Backoff::new(); 32 | 33 | while !self.try_lock() { 34 | backoff.snooze(); 35 | } 36 | } 37 | 38 | #[inline] 39 | fn unlock(&self) { 40 | self.flag.store(false, Ordering::Release); 41 | } 42 | } 43 | 44 | pub struct SpinLock { 45 | lock: RawSpinLock, 46 | data: UnsafeCell, 47 | } 48 | 49 | unsafe impl Send for SpinLock {} 50 | unsafe impl Sync for SpinLock {} 51 | 52 | unsafe impl<'s, T> Send for Guard<'s, T> {} 53 | unsafe impl<'s, T: Send + Sync> Sync for Guard<'s, T> {} 54 | 55 | pub struct Guard<'s, T> { 56 | lock: &'s SpinLock, 57 | } 58 | 59 | impl SpinLock { 60 | pub fn new(data: T) -> Self { 61 | Self { 62 | lock: RawSpinLock::new(), 63 | data: UnsafeCell::new(data), 64 | } 65 | } 66 | 67 | pub fn lock(&self) -> Guard { 68 | self.lock.lock(); 69 | 70 | Guard { lock: self } 71 | } 72 | } 73 | 74 | impl<'s, T> Deref for Guard<'s, T> { 75 | type Target = T; 76 | 77 | fn deref(&self) -> &Self::Target { 78 | unsafe { &*self.lock.data.get() } 79 | } 80 | } 81 | 82 | impl<'s, T> DerefMut for Guard<'s, T> { 83 | fn deref_mut(&mut self) -> &mut Self::Target { 84 | unsafe { &mut *self.lock.data.get() } 85 | } 86 | } 87 | 88 | impl<'s, T> Drop for Guard<'s, T> { 89 | fn drop(&mut self) { 90 | self.lock.lock.unlock(); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/map.rs: -------------------------------------------------------------------------------- 1 | pub trait SequentialMap { 2 | fn new() -> Self; 3 | 4 | /// Insert (key, vaule) into the map. 5 | /// 6 | /// If success, return Ok(()). 7 | /// If fail, return Err(value) that you tried to insert. 8 | fn insert(&mut self, key: &K, value: V) -> Result<(), V>; 9 | 10 | /// Lookup (key, value) from the map with the key. 11 | /// 12 | /// If success, return the reference of the value. 13 | /// If fail, return None. 14 | fn lookup(&self, key: &K) -> Option<&V>; 15 | 16 | /// Remove (key, value) from the map with the key. 17 | /// 18 | /// If success, return Ok(value) which is inserted before. 19 | /// If fail, return Err(()). 20 | fn remove(&mut self, key: &K) -> Result; 21 | } 22 | 23 | pub trait ConcurrentMap { 24 | fn new() -> Self; 25 | 26 | /// Insert (key, vaule) into the map. 27 | /// 28 | /// If success, return Ok(()). 29 | /// If fail, return Err(value) that you tried to insert. 30 | fn insert(&self, key: &K, value: V) -> Result<(), V>; 31 | 32 | /// Lookup (key, value) from the map with the key. 33 | /// 34 | /// Execute function with the reference of the value, or None if it failed to find. 35 | fn lookup(&self, key: &K, f: F) -> R 36 | where 37 | F: FnOnce(Option<&V>) -> R; 38 | 39 | /// Lookup (key, value) from the map with the key 40 | /// 41 | /// If success, return the copy of value 42 | /// If fail, return None 43 | fn get(&self, key: &K) -> Option 44 | where 45 | V: Clone; 46 | 47 | /// Remove (key, value) from the map with the key. 48 | /// 49 | /// If success, return Ok(value) which is inserted before. 50 | /// If fail, return Err(()). 51 | fn remove(&self, key: &K) -> Result; 52 | } 53 | -------------------------------------------------------------------------------- /src/queue/fclock.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, hint::unreachable_unchecked, marker::PhantomData}; 2 | 3 | use crossbeam_epoch::pin; 4 | use crossbeam_utils::Backoff; 5 | 6 | use crate::lock::{ 7 | fclock::{FCLock, FlatCombining}, 8 | RawSimpleLock, 9 | }; 10 | 11 | use super::{ConcurrentQueue, SequentialQueue}; 12 | 13 | #[derive(Debug, PartialEq)] 14 | enum QueueOp { 15 | EnqRequest(V), 16 | EnqResponse, 17 | DeqRequest, 18 | DeqResponse(Option), 19 | } 20 | 21 | unsafe impl Send for QueueOp {} 22 | unsafe impl Sync for QueueOp {} 23 | 24 | impl> FlatCombining> for Q { 25 | fn apply(&mut self, operation: QueueOp) -> QueueOp { 26 | match operation { 27 | QueueOp::EnqRequest(value) => { 28 | self.push(value); 29 | QueueOp::EnqResponse 30 | } 31 | QueueOp::DeqRequest => QueueOp::DeqResponse(self.pop()), 32 | _ => unreachable!("The response cannot be applied."), 33 | } 34 | } 35 | } 36 | 37 | pub struct FCQueue> { 38 | queue: FCLock, L>, 39 | _marker: PhantomData, 40 | } 41 | 42 | unsafe impl> Send for FCQueue {} 43 | unsafe impl> Sync for FCQueue {} 44 | 45 | impl> FCQueue { 46 | #[cfg(feature = "concurrent_stat")] 47 | pub fn print_stat(&self) { 48 | self.queue.print_stat(); 49 | } 50 | } 51 | 52 | impl + FlatCombining>> 53 | ConcurrentQueue for FCQueue 54 | { 55 | fn new() -> Self { 56 | let queue = Q::new(); 57 | 58 | Self { 59 | queue: FCLock::new(queue), 60 | _marker: PhantomData, 61 | } 62 | } 63 | 64 | fn push(&self, value: V) { 65 | let guard = pin(); 66 | 67 | let record = self.queue.acquire_record(&guard); 68 | let record_ref = unsafe { record.deref() }; 69 | 70 | record_ref.set(QueueOp::EnqRequest(value)); 71 | 72 | self.queue.try_combine(record, &guard); 73 | } 74 | 75 | fn try_pop(&self) -> Option { 76 | let guard = pin(); 77 | 78 | let record = self.queue.acquire_record(&guard); 79 | let record_ref = unsafe { record.deref() }; 80 | 81 | record_ref.set(QueueOp::DeqRequest); 82 | 83 | self.queue.try_combine(record, &guard); 84 | 85 | let operation = record_ref.get_operation(&guard); 86 | 87 | if let QueueOp::DeqResponse(value) = operation { 88 | value 89 | } else { 90 | unsafe { unreachable_unchecked() } 91 | } 92 | } 93 | 94 | fn pop(&self) -> V { 95 | let backoff = Backoff::new(); 96 | 97 | loop { 98 | match self.try_pop() { 99 | Some(value) => return value, 100 | None => backoff.snooze(), 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/queue/lockfree.rs: -------------------------------------------------------------------------------- 1 | /* 2 | Refer to 3 | https://github.com/kaist-cp/cs431/blob/main/lockfree/src/queue.rs and 4 | https://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf 5 | */ 6 | 7 | use std::{mem::MaybeUninit, ptr, sync::atomic::Ordering}; 8 | 9 | use crossbeam_epoch::{pin, unprotected, Atomic, Owned, Shared}; 10 | use crossbeam_utils::{Backoff, CachePadded}; 11 | 12 | use super::ConcurrentQueue; 13 | 14 | pub struct MSQueue { 15 | head: CachePadded>>, 16 | tail: CachePadded>>, 17 | } 18 | 19 | struct Node { 20 | value: MaybeUninit, 21 | next: Atomic>, 22 | } 23 | 24 | impl Node { 25 | fn new(value: MaybeUninit) -> Self { 26 | Self { 27 | value, 28 | next: Atomic::null(), 29 | } 30 | } 31 | } 32 | 33 | impl ConcurrentQueue for MSQueue { 34 | fn new() -> Self { 35 | let queue = Self { 36 | head: CachePadded::new(Atomic::null()), 37 | tail: CachePadded::new(Atomic::null()), 38 | }; 39 | 40 | // store dummy node into both head and tail 41 | unsafe { 42 | let dummy = 43 | Owned::new(Node::new(MaybeUninit::::uninit())).into_shared(unprotected()); 44 | 45 | queue.head.store(dummy, Ordering::Relaxed); 46 | queue.tail.store(dummy, Ordering::Relaxed); 47 | } 48 | 49 | queue 50 | } 51 | 52 | fn push(&self, value: V) { 53 | let guard = pin(); 54 | 55 | let node = Owned::new(Node::new(MaybeUninit::new(value))).into_shared(&guard); 56 | 57 | loop { 58 | let tail = self.tail.load(Ordering::Acquire, &guard); 59 | let tail_ref = unsafe { tail.deref() }; 60 | let tail_next = tail_ref.next.load(Ordering::Acquire, &guard); 61 | 62 | if tail_next.is_null() { 63 | // If null, The tail pointer is real tail at that time. Try CAS 64 | if tail_ref 65 | .next 66 | .compare_exchange( 67 | Shared::null(), 68 | node, 69 | Ordering::Release, 70 | Ordering::Relaxed, 71 | &guard, 72 | ) 73 | .is_ok() 74 | { 75 | // just try move tail pointer to next(node) 76 | let _ = self.tail.compare_exchange( 77 | tail, 78 | node, 79 | Ordering::Release, 80 | Ordering::Relaxed, 81 | &guard, 82 | ); 83 | break; 84 | } 85 | } else { 86 | // The tail pointer is not real tail. Move to next and try again. 87 | let _ = self.tail.compare_exchange( 88 | tail, 89 | tail_next, 90 | Ordering::Release, 91 | Ordering::Relaxed, 92 | &guard, 93 | ); 94 | } 95 | } 96 | } 97 | 98 | fn try_pop(&self) -> Option { 99 | let guard = pin(); 100 | 101 | loop { 102 | let head = self.head.load(Ordering::Acquire, &guard); // the dummy node 103 | let head_next = unsafe { head.deref().next.load(Ordering::Acquire, &guard) }; // the real head 104 | 105 | let tail = self.tail.load(Ordering::Relaxed, &guard); 106 | 107 | if head_next.is_null() { 108 | // if the head's next pointer is null, the queue is observed as empty. 109 | return None; 110 | } 111 | 112 | if head == tail { 113 | // the head's next pointer is not null, but head == tail means that the tail pointer is STALE! 114 | // So, set the tail pointer to head's next. 115 | let _ = self.tail.compare_exchange( 116 | tail, 117 | head_next, 118 | Ordering::Release, 119 | Ordering::Relaxed, 120 | &guard, 121 | ); 122 | } 123 | 124 | // the queue may not be empty. Try CAS for moving head to next 125 | if self 126 | .head 127 | .compare_exchange( 128 | head, 129 | head_next, 130 | Ordering::Release, 131 | Ordering::Relaxed, 132 | &guard, 133 | ) 134 | .is_ok() 135 | { 136 | // free head and get head_next's value 137 | unsafe { 138 | guard.defer_destroy(head); 139 | return Some(ptr::read(&head_next.deref().value).assume_init()); 140 | } 141 | } 142 | } 143 | } 144 | 145 | fn pop(&self) -> V { 146 | let backoff = Backoff::new(); 147 | 148 | loop { 149 | match self.try_pop() { 150 | Some(value) => return value, 151 | None => {} 152 | } 153 | 154 | backoff.spin(); 155 | } 156 | } 157 | } 158 | 159 | impl Drop for MSQueue { 160 | fn drop(&mut self) { 161 | unsafe { 162 | let guard = unprotected(); 163 | 164 | while self.try_pop().is_some() {} 165 | 166 | let dummy = self.head.load(Ordering::Relaxed, guard); 167 | drop(dummy.into_owned()); 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/queue/mod.rs: -------------------------------------------------------------------------------- 1 | mod fclock; 2 | mod lockfree; 3 | mod mutex; 4 | mod spinlock; 5 | 6 | pub use fclock::FCQueue; 7 | pub use lockfree::MSQueue; 8 | pub use mutex::MutexQueue; 9 | pub use mutex::TwoMutexQueue; 10 | pub use spinlock::SpinLockQueue; 11 | pub use spinlock::TwoSpinLockQueue; 12 | 13 | use std::{fmt::Debug, mem, mem::MaybeUninit, ptr, ptr::NonNull}; 14 | 15 | pub trait SequentialQueue { 16 | fn new() -> Self; 17 | fn push(&mut self, value: V); 18 | fn pop(&mut self) -> Option; 19 | } 20 | 21 | pub trait ConcurrentQueue { 22 | fn new() -> Self; 23 | fn push(&self, value: V); 24 | /// non-blocking pop that can return `None` when the queue is observed as Empty. 25 | fn try_pop(&self) -> Option; 26 | /// blocking pop that can wait for returing value. 27 | fn pop(&self) -> V; 28 | } 29 | 30 | // simple sequential queue 31 | pub struct Queue { 32 | head: NonNull>, 33 | tail: NonNull>, 34 | } 35 | 36 | struct Node { 37 | value: MaybeUninit, 38 | next: Option>>, 39 | } 40 | 41 | impl Node { 42 | fn new(value: MaybeUninit) -> Self { 43 | Self { value, next: None } 44 | } 45 | 46 | fn new_non_null(value: MaybeUninit) -> NonNull { 47 | let node = Box::new(Self::new(value)); 48 | NonNull::new(Box::leak(node)).unwrap() 49 | } 50 | } 51 | 52 | impl Queue { 53 | pub fn is_empty(&self) -> bool { 54 | unsafe { self.head.as_ref().next.is_none() } 55 | } 56 | 57 | pub fn top(&self) -> Option<&V> { 58 | unsafe { 59 | match self.head.as_ref().next.as_ref() { 60 | Some(node) => Some(node.as_ref().value.assume_init_ref()), 61 | None => None, 62 | } 63 | } 64 | } 65 | } 66 | 67 | impl SequentialQueue for Queue { 68 | fn new() -> Self { 69 | let dummy = Node::new_non_null(MaybeUninit::uninit()); 70 | 71 | Self { 72 | head: dummy, 73 | tail: dummy, 74 | } 75 | } 76 | 77 | fn push(&mut self, value: V) { 78 | let node = Node::new_non_null(MaybeUninit::new(value)); 79 | 80 | let tail = unsafe { self.tail.as_mut() }; 81 | 82 | tail.next = Some(node); 83 | self.tail = node; 84 | } 85 | 86 | fn pop(&mut self) -> Option { 87 | unsafe { 88 | let head = self.head.as_mut(); 89 | 90 | if let Some(mut next) = head.next { 91 | let value = mem::replace(&mut next.as_mut().value, MaybeUninit::uninit()); 92 | self.head = next; 93 | drop(Box::from_raw(head)); 94 | 95 | Some(value.assume_init()) 96 | } else { 97 | None 98 | } 99 | } 100 | } 101 | } 102 | 103 | impl Drop for Queue { 104 | fn drop(&mut self) { 105 | while self.pop().is_some() {} 106 | 107 | unsafe { 108 | drop(Box::from_raw(self.head.as_ptr())); 109 | } 110 | } 111 | } 112 | 113 | // fat node sequential queue 114 | const FAT_SIZE: u8 = 16; 115 | 116 | pub struct FatNodeQueue { 117 | head: NonNull>, 118 | tail: NonNull>, 119 | } 120 | 121 | struct FatNode { 122 | head: u8, 123 | tail: u8, 124 | values: [V; FAT_SIZE as usize], // very unsafe since assume_init_array is not stable... 125 | next: Option>>, 126 | } 127 | 128 | impl Debug for FatNode { 129 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 130 | f.debug_struct("FatNode") 131 | .field( 132 | "values", 133 | &self.values.get((self.head as usize)..(self.tail as usize)), 134 | ) 135 | .field("head", &self.head) 136 | .field("tail", &self.tail) 137 | .field("next", &self.next.map(|next| unsafe { next.as_ref() })) 138 | .finish() 139 | } 140 | } 141 | 142 | impl Drop for FatNode { 143 | fn drop(&mut self) { 144 | for i in self.head..self.tail { 145 | unsafe { drop(ptr::read(self.values.get_unchecked(i as usize))) }; 146 | } 147 | 148 | mem::forget(self); 149 | } 150 | } 151 | 152 | impl FatNode { 153 | #[allow(deprecated, invalid_value)] 154 | fn new() -> Self { 155 | Self { 156 | head: 0, 157 | tail: 0, 158 | values: unsafe { mem::uninitialized() }, 159 | next: None, 160 | } 161 | } 162 | } 163 | 164 | impl Debug for FatNodeQueue { 165 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 166 | unsafe { 167 | f.debug_struct("FatNodeQueue") 168 | .field("inner", &self.head.as_ref()) 169 | .finish() 170 | } 171 | } 172 | } 173 | 174 | impl FatNodeQueue { 175 | pub fn is_empty(&self) -> bool { 176 | unsafe { 177 | if let Some(next) = self.head.as_ref().next { 178 | let next_ref = next.as_ref(); 179 | 180 | if next_ref.head != next_ref.tail { 181 | return false; 182 | } 183 | } 184 | } 185 | 186 | true 187 | } 188 | 189 | pub fn top(&self) -> Option<&V> { 190 | unsafe { 191 | match self.head.as_ref().next.as_ref() { 192 | Some(node) => { 193 | let node_ref = node.as_ref(); 194 | 195 | Some(&node_ref.values.get_unchecked(node_ref.head as usize)) 196 | } 197 | None => None, 198 | } 199 | } 200 | } 201 | } 202 | 203 | impl SequentialQueue for FatNodeQueue { 204 | fn new() -> Self { 205 | let dummy = unsafe { NonNull::new_unchecked(Box::leak(Box::new(FatNode::new()))) }; 206 | 207 | Self { 208 | head: dummy, 209 | tail: dummy, 210 | } 211 | } 212 | 213 | fn push(&mut self, value: V) { 214 | unsafe { 215 | let tail = self.tail.as_mut(); 216 | 217 | if self.head != self.tail && tail.tail < FAT_SIZE { 218 | *tail.values.get_unchecked_mut(tail.tail as usize) = value; 219 | tail.tail += 1; 220 | return; 221 | } 222 | 223 | let mut node = FatNode::new(); 224 | node.values[0] = value; 225 | node.tail = 1; 226 | 227 | let node = NonNull::new_unchecked(Box::leak(Box::new(node))); 228 | tail.next = Some(node); 229 | self.tail = node; 230 | } 231 | } 232 | 233 | fn pop(&mut self) -> Option { 234 | unsafe { 235 | let head = self.head.as_mut(); 236 | 237 | if let Some(mut next) = head.next { 238 | let next_ref = next.as_mut(); 239 | 240 | if next_ref.head == next_ref.tail { 241 | return None; 242 | } 243 | 244 | let value = ptr::read(next_ref.values.get_unchecked(next_ref.head as usize)); 245 | next_ref.head += 1; 246 | 247 | if next_ref.head == FAT_SIZE { 248 | self.head = next; 249 | drop(Box::from(head)); 250 | } 251 | 252 | Some(value) 253 | } else { 254 | None 255 | } 256 | } 257 | } 258 | } 259 | 260 | impl Drop for FatNodeQueue { 261 | fn drop(&mut self) { 262 | while self.pop().is_some() {} 263 | 264 | unsafe { 265 | drop(Box::from_raw(self.head.as_ptr())); 266 | } 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /src/queue/mutex.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | mem::{self, MaybeUninit}, 3 | ops::DerefMut, 4 | ptr::NonNull, 5 | sync::Mutex, 6 | }; 7 | 8 | use crossbeam_utils::{Backoff, CachePadded}; 9 | 10 | use super::{ConcurrentQueue, Node, Queue, SequentialQueue}; 11 | 12 | pub struct MutexQueue { 13 | queue: Mutex>, 14 | } 15 | 16 | unsafe impl Send for MutexQueue {} 17 | unsafe impl Sync for MutexQueue {} 18 | 19 | impl ConcurrentQueue for MutexQueue { 20 | fn new() -> Self { 21 | Self { 22 | queue: Mutex::new(Queue::new()), 23 | } 24 | } 25 | 26 | fn push(&self, value: V) { 27 | self.queue.lock().unwrap().push(value); 28 | } 29 | 30 | fn try_pop(&self) -> Option { 31 | self.queue.lock().unwrap().pop() 32 | } 33 | 34 | fn pop(&self) -> V { 35 | let backoff = Backoff::new(); 36 | 37 | loop { 38 | match self.try_pop() { 39 | Some(value) => return value, 40 | None => {} 41 | } 42 | 43 | backoff.snooze(); 44 | } 45 | } 46 | } 47 | 48 | pub struct TwoMutexQueue { 49 | head: CachePadded>>>, 50 | tail: CachePadded>>>, 51 | } 52 | 53 | unsafe impl Send for TwoMutexQueue {} 54 | unsafe impl Sync for TwoMutexQueue {} 55 | 56 | impl ConcurrentQueue for TwoMutexQueue { 57 | fn new() -> Self { 58 | let dummy = Node::new_non_null(MaybeUninit::uninit()); 59 | 60 | Self { 61 | head: CachePadded::new(Mutex::new(dummy)), 62 | tail: CachePadded::new(Mutex::new(dummy)), 63 | } 64 | } 65 | 66 | fn push(&self, value: V) { 67 | let node = Node::new_non_null(MaybeUninit::new(value)); 68 | 69 | let mut lock_guard = self.tail.lock().unwrap(); 70 | 71 | unsafe { 72 | lock_guard.as_mut().next = Some(node); 73 | *lock_guard.deref_mut() = node; 74 | } 75 | } 76 | 77 | fn try_pop(&self) -> Option { 78 | unsafe { 79 | let mut lock_guard = self.head.lock().unwrap(); 80 | 81 | let head_ref = lock_guard.as_mut(); 82 | 83 | if let Some(mut next) = head_ref.next { 84 | let value = mem::replace(&mut next.as_mut().value, MaybeUninit::uninit()); 85 | *lock_guard.deref_mut() = next; 86 | 87 | Some(value.assume_init()) 88 | } else { 89 | None 90 | } 91 | } 92 | } 93 | 94 | fn pop(&self) -> V { 95 | let backoff = Backoff::new(); 96 | 97 | loop { 98 | match self.try_pop() { 99 | Some(value) => return value, 100 | None => {} 101 | } 102 | 103 | backoff.snooze(); 104 | } 105 | } 106 | } 107 | 108 | impl Drop for TwoMutexQueue { 109 | fn drop(&mut self) { 110 | while let Some(_) = self.try_pop() {} 111 | 112 | unsafe { 113 | drop(Box::from_raw(self.head.lock().unwrap().as_ptr())); 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/queue/spinlock.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | mem::{self, MaybeUninit}, 3 | ops::DerefMut, 4 | ptr::NonNull, 5 | sync::Arc, 6 | }; 7 | 8 | use crossbeam_utils::{Backoff, CachePadded}; 9 | 10 | use super::{ConcurrentQueue, Node, Queue, SequentialQueue}; 11 | 12 | use crate::lock::spinlock::SpinLock; 13 | 14 | pub struct SpinLockQueue { 15 | queue: Arc>>, 16 | } 17 | 18 | unsafe impl Send for SpinLockQueue {} 19 | unsafe impl Sync for SpinLockQueue {} 20 | 21 | impl ConcurrentQueue for SpinLockQueue { 22 | fn new() -> Self { 23 | Self { 24 | queue: Arc::new(SpinLock::new(Queue::new())), 25 | } 26 | } 27 | 28 | fn push(&self, value: V) { 29 | let queue = self.queue.clone(); 30 | let mut lock_guard = queue.lock(); 31 | lock_guard.push(value); 32 | } 33 | 34 | fn try_pop(&self) -> Option { 35 | let queue = self.queue.clone(); 36 | let mut lock_guard = queue.lock(); 37 | lock_guard.pop() 38 | } 39 | 40 | fn pop(&self) -> V { 41 | let backoff = Backoff::new(); 42 | 43 | loop { 44 | match self.try_pop() { 45 | Some(value) => return value, 46 | None => {} 47 | } 48 | 49 | backoff.snooze(); 50 | } 51 | } 52 | } 53 | 54 | pub struct TwoSpinLockQueue { 55 | head: CachePadded>>>, 56 | tail: CachePadded>>>, 57 | } 58 | 59 | unsafe impl Send for TwoSpinLockQueue {} 60 | unsafe impl Sync for TwoSpinLockQueue {} 61 | 62 | impl ConcurrentQueue for TwoSpinLockQueue { 63 | fn new() -> Self { 64 | let dummy = Node::new_non_null(MaybeUninit::uninit()); 65 | 66 | Self { 67 | head: CachePadded::new(SpinLock::new(dummy)), 68 | tail: CachePadded::new(SpinLock::new(dummy)), 69 | } 70 | } 71 | 72 | fn push(&self, value: V) { 73 | let node = Node::new_non_null(MaybeUninit::new(value)); 74 | 75 | let mut lock_guard = self.tail.lock(); 76 | 77 | unsafe { 78 | lock_guard.as_mut().next = Some(node); 79 | *lock_guard.deref_mut() = node; 80 | } 81 | } 82 | 83 | fn try_pop(&self) -> Option { 84 | unsafe { 85 | let mut lock_guard = self.head.lock(); 86 | 87 | let head_ref = lock_guard.as_mut(); 88 | 89 | if let Some(mut next) = head_ref.next { 90 | let value = mem::replace(&mut next.as_mut().value, MaybeUninit::uninit()); 91 | *lock_guard.deref_mut() = next; 92 | 93 | Some(value.assume_init()) 94 | } else { 95 | None 96 | } 97 | } 98 | } 99 | 100 | fn pop(&self) -> V { 101 | let backoff = Backoff::new(); 102 | 103 | loop { 104 | match self.try_pop() { 105 | Some(value) => return value, 106 | None => {} 107 | } 108 | 109 | backoff.snooze(); 110 | } 111 | } 112 | } 113 | 114 | impl Drop for TwoSpinLockQueue { 115 | fn drop(&mut self) { 116 | while let Some(_) = self.try_pop() {} 117 | 118 | unsafe { 119 | drop(Box::from_raw(self.head.lock().as_ptr())); 120 | } 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/stack/lock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | 3 | use crossbeam_utils::Backoff; 4 | 5 | use crate::lock::spinlock::SpinLock; 6 | 7 | use super::{ConcurrentStack, Stack}; 8 | 9 | pub struct MutexStack { 10 | stack: Mutex>, 11 | } 12 | 13 | impl ConcurrentStack for MutexStack { 14 | fn new() -> Self { 15 | Self { 16 | stack: Mutex::new(Stack::new()), 17 | } 18 | } 19 | 20 | fn push(&self, value: V) { 21 | self.stack.lock().unwrap().push(value); 22 | } 23 | 24 | fn try_pop(&self) -> Option { 25 | let value = match self.stack.lock() { 26 | Ok(mut guard) => guard.pop(), 27 | Err(_) => unreachable!(), 28 | }; 29 | 30 | value 31 | } 32 | 33 | fn pop(&self) -> V { 34 | let backoff = Backoff::new(); 35 | 36 | loop { 37 | match self.try_pop() { 38 | Some(value) => return value, 39 | None => {} 40 | } 41 | 42 | backoff.snooze(); 43 | } 44 | } 45 | } 46 | 47 | pub struct SpinLockStack { 48 | stack: SpinLock>, 49 | } 50 | 51 | impl ConcurrentStack for SpinLockStack { 52 | fn new() -> Self { 53 | Self { 54 | stack: SpinLock::new(Stack::new()), 55 | } 56 | } 57 | 58 | fn push(&self, value: V) { 59 | let mut guard = self.stack.lock(); 60 | 61 | guard.push(value); 62 | } 63 | 64 | fn try_pop(&self) -> Option { 65 | let mut guard = self.stack.lock(); 66 | 67 | guard.pop() 68 | } 69 | 70 | fn pop(&self) -> V { 71 | let backoff = Backoff::new(); 72 | 73 | loop { 74 | match self.try_pop() { 75 | Some(value) => return value, 76 | None => {} 77 | } 78 | 79 | backoff.snooze(); 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/stack/lockfree.rs: -------------------------------------------------------------------------------- 1 | use std::{mem::ManuallyDrop, ptr, sync::atomic::Ordering, thread, time::Duration}; 2 | 3 | use crossbeam_epoch::{pin, Atomic, Guard, Owned, Shared}; 4 | use crossbeam_utils::Backoff; 5 | use rand::{thread_rng, Rng}; 6 | 7 | use super::ConcurrentStack; 8 | 9 | pub struct TreiberStack { 10 | head: Atomic>, 11 | } 12 | 13 | impl Default for TreiberStack { 14 | fn default() -> Self { 15 | Self::new() 16 | } 17 | } 18 | 19 | struct Node { 20 | value: ManuallyDrop, 21 | next: Atomic>, 22 | } 23 | 24 | impl Node { 25 | fn new(value: V) -> Self { 26 | Self { 27 | value: ManuallyDrop::new(value), 28 | next: Atomic::null(), 29 | } 30 | } 31 | } 32 | 33 | impl TreiberStack { 34 | pub fn is_empty(&self) -> bool { 35 | self.head.load(Ordering::Relaxed, &pin()).is_null() 36 | } 37 | 38 | pub fn top(&self) -> Option 39 | where 40 | V: Clone, 41 | { 42 | if let Some(node) = unsafe { self.head.load(Ordering::Acquire, &pin()).as_ref() } { 43 | Some(ManuallyDrop::into_inner(node.value.clone())) 44 | } else { 45 | None 46 | } 47 | } 48 | 49 | fn treiber_try_push(&self, node: Owned>, guard: &Guard) -> Result<(), Owned>> { 50 | let head = self.head.load(Ordering::Relaxed, guard); 51 | node.next.store(head, Ordering::Relaxed); 52 | 53 | match self 54 | .head 55 | .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed, guard) 56 | { 57 | Ok(_) => Ok(()), 58 | Err(e) => Err(e.new), 59 | } 60 | } 61 | 62 | fn treiber_try_pop(&self, guard: &Guard) -> Result, ()> { 63 | let head = self.head.load(Ordering::Acquire, guard); 64 | 65 | if let Some(h) = unsafe { head.as_ref() } { 66 | let next = h.next.load(Ordering::Relaxed, guard); 67 | 68 | if self 69 | .head 70 | .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed, guard) 71 | .is_ok() 72 | { 73 | unsafe { guard.defer_destroy(head) }; 74 | return unsafe { Ok(Some(ManuallyDrop::into_inner(ptr::read(&(*h).value)))) }; 75 | } 76 | 77 | return Err(()); 78 | } else { 79 | return Ok(None); 80 | } 81 | } 82 | } 83 | 84 | impl ConcurrentStack for TreiberStack { 85 | fn new() -> Self { 86 | Self { 87 | head: Atomic::null(), 88 | } 89 | } 90 | 91 | fn push(&self, value: V) { 92 | let guard = pin(); 93 | 94 | let mut node = Owned::new(Node::new(value)); 95 | let backoff = Backoff::new(); 96 | 97 | while let Err(e) = self.treiber_try_push(node, &guard) { 98 | node = e; 99 | backoff.spin(); 100 | } 101 | } 102 | 103 | fn try_pop(&self) -> Option { 104 | let guard = pin(); 105 | 106 | let backoff = Backoff::new(); 107 | 108 | loop { 109 | if let Ok(value) = self.treiber_try_pop(&guard) { 110 | return value; 111 | } 112 | 113 | backoff.spin(); 114 | } 115 | } 116 | 117 | fn pop(&self) -> V { 118 | let backoff = Backoff::new(); 119 | 120 | loop { 121 | match self.try_pop() { 122 | Some(value) => return value, 123 | None => {} 124 | } 125 | 126 | backoff.spin(); 127 | } 128 | } 129 | } 130 | 131 | const ELIM_SIZE: usize = 4; 132 | const ELIM_DELAY: Duration = Duration::from_millis(1); 133 | 134 | /// Elimination-Backoff Stack 135 | /// 136 | /// the tag of slot 137 | /// 0: empty slot 138 | /// 1: push slot 139 | /// 2: pop slot 140 | /// 3: paired slot 141 | pub struct EBStack { 142 | stack: TreiberStack, 143 | slots: [Atomic>; ELIM_SIZE], 144 | } 145 | 146 | #[inline] 147 | fn rand_idx() -> usize { 148 | thread_rng().gen_range(0..ELIM_SIZE) 149 | } 150 | 151 | impl Default for EBStack { 152 | fn default() -> Self { 153 | Self::new() 154 | } 155 | } 156 | 157 | impl EBStack { 158 | fn elem_try_push(&self, node: Owned>, guard: &Guard) -> Result<(), Owned>> { 159 | let node = match self.stack.treiber_try_push(node, guard) { 160 | Ok(_) => return Ok(()), 161 | Err(node) => node.into_shared(guard), 162 | }; 163 | 164 | let slot = unsafe { self.slots.get_unchecked(rand_idx()) }; 165 | let s = slot.load(Ordering::Relaxed, guard); 166 | let tag = s.tag(); 167 | 168 | let result = match tag { 169 | 0 => slot.compare_exchange( 170 | s, 171 | node.with_tag(1), 172 | Ordering::Release, 173 | Ordering::Relaxed, 174 | guard, 175 | ), 176 | 2 => slot.compare_exchange( 177 | s, 178 | node.with_tag(3), 179 | Ordering::Release, 180 | Ordering::Relaxed, 181 | guard, 182 | ), 183 | _ => return unsafe { Err(node.into_owned()) }, 184 | }; 185 | 186 | if let Err(e) = result { 187 | return unsafe { Err(e.new.into_owned()) }; 188 | } 189 | 190 | thread::sleep(ELIM_DELAY); 191 | 192 | let s = slot.load(Ordering::Relaxed, guard); 193 | 194 | if tag == 0 && s.tag() == 1 { 195 | return match slot.compare_exchange( 196 | node.with_tag(1), 197 | Shared::null(), 198 | Ordering::Relaxed, 199 | Ordering::Relaxed, 200 | guard, 201 | ) { 202 | Ok(_) => unsafe { Err(s.into_owned()) }, 203 | Err(_) => Ok(()), 204 | }; 205 | } 206 | 207 | Ok(()) 208 | } 209 | 210 | fn elem_try_pop(&self, guard: &Guard) -> Result, ()> { 211 | if let Ok(value) = self.stack.treiber_try_pop(guard) { 212 | return Ok(value); 213 | } 214 | 215 | let slot = unsafe { self.slots.get_unchecked(rand_idx()) }; 216 | let s = slot.load(Ordering::Relaxed, guard); 217 | 218 | let result = match s.tag() { 219 | 0 => slot.compare_exchange( 220 | s, 221 | s.with_tag(2), 222 | Ordering::Relaxed, 223 | Ordering::Relaxed, 224 | guard, 225 | ), 226 | 1 => slot.compare_exchange( 227 | s, 228 | s.with_tag(3), 229 | Ordering::Relaxed, 230 | Ordering::Relaxed, 231 | guard, 232 | ), 233 | _ => return Err(()), 234 | }; 235 | 236 | if result.is_err() { 237 | return Err(()); 238 | } 239 | 240 | thread::sleep(ELIM_DELAY); 241 | 242 | let s = slot.load(Ordering::Acquire, guard); 243 | 244 | if s.tag() == 3 { 245 | slot.store(Shared::null(), Ordering::Relaxed); 246 | let node = unsafe { s.into_owned() }; 247 | let value = ManuallyDrop::into_inner(node.into_box().value); 248 | Ok(Some(value)) 249 | } else { 250 | slot.store(Shared::null(), Ordering::Relaxed); 251 | Err(()) 252 | } 253 | } 254 | } 255 | 256 | impl ConcurrentStack for EBStack { 257 | fn new() -> Self { 258 | Self { 259 | stack: TreiberStack::new(), 260 | slots: Default::default(), 261 | } 262 | } 263 | 264 | fn push(&self, value: V) { 265 | let guard = pin(); 266 | 267 | let mut node = Owned::new(Node::new(value)); 268 | 269 | while let Err(e) = self.elem_try_push(node, &guard) { 270 | node = e; 271 | } 272 | } 273 | 274 | fn try_pop(&self) -> Option { 275 | let guard = pin(); 276 | 277 | loop { 278 | if let Ok(value) = self.elem_try_pop(&guard) { 279 | return value; 280 | } 281 | } 282 | } 283 | 284 | fn pop(&self) -> V { 285 | let backoff = Backoff::new(); 286 | 287 | loop { 288 | match self.try_pop() { 289 | Some(value) => return value, 290 | None => {} 291 | } 292 | 293 | backoff.spin(); 294 | } 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /src/stack/mod.rs: -------------------------------------------------------------------------------- 1 | mod lock; 2 | mod lockfree; 3 | 4 | pub use lock::MutexStack; 5 | pub use lock::SpinLockStack; 6 | pub use lockfree::EBStack; 7 | pub use lockfree::TreiberStack; 8 | 9 | use std::mem; 10 | 11 | pub trait ConcurrentStack { 12 | fn new() -> Self; 13 | fn push(&self, value: V); 14 | // non-blocking pop that can return `None` when the stack is observed as Empty. 15 | fn try_pop(&self) -> Option; 16 | // blocking pop that can wait for returing value. 17 | fn pop(&self) -> V; 18 | } 19 | 20 | // simple sequential stack 21 | pub struct Stack { 22 | head: Option>>, 23 | } 24 | 25 | struct Node { 26 | value: V, 27 | next: Option>>, 28 | } 29 | 30 | impl Node { 31 | fn new(value: V) -> Node { 32 | Node { value, next: None } 33 | } 34 | } 35 | 36 | impl Stack { 37 | pub fn new() -> Stack { 38 | Stack { head: None } 39 | } 40 | 41 | pub fn is_empty(&self) -> bool { 42 | self.head.is_none() 43 | } 44 | 45 | pub fn top(&self) -> Option<&V> { 46 | match &self.head { 47 | Some(node) => Some(&node.as_ref().value), 48 | None => None, 49 | } 50 | } 51 | 52 | pub fn push(&mut self, value: V) { 53 | let node = Box::new(Node::new(value)); 54 | 55 | let prev = mem::replace(&mut self.head, Some(node)); 56 | self.head.as_mut().unwrap().next = prev; 57 | } 58 | 59 | pub fn pop(&mut self) -> Option { 60 | if self.head.is_some() { 61 | let mut top = mem::replace(&mut self.head, None); 62 | self.head = mem::replace(&mut top.as_mut().unwrap().next, None); 63 | 64 | return Some(top.unwrap().value); 65 | } 66 | 67 | None 68 | } 69 | } 70 | 71 | impl Drop for Stack { 72 | fn drop(&mut self) { 73 | while self.pop().is_some() {} 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod random; 2 | 3 | #[macro_export] 4 | macro_rules! ok_or { 5 | ($e:expr, $err:expr) => {{ 6 | match $e { 7 | Ok(r) => r, 8 | Err(_) => $err, 9 | } 10 | }}; 11 | } 12 | 13 | #[macro_export] 14 | macro_rules! some_or { 15 | ($e:expr, $err:expr) => {{ 16 | match $e { 17 | Some(r) => r, 18 | None => $err, 19 | } 20 | }}; 21 | } 22 | -------------------------------------------------------------------------------- /src/util/random.rs: -------------------------------------------------------------------------------- 1 | use std::u128; 2 | 3 | use rand::{distributions::Alphanumeric, prelude::ThreadRng, Rng}; 4 | pub trait Random { 5 | fn gen(rng: &mut ThreadRng) -> Self; 6 | } 7 | 8 | const RANDOM_STRING_MIN: usize = 0; 9 | const RANDOM_STRING_MAX: usize = 10; 10 | 11 | impl Random for String { 12 | // get random string whose length is in [RANDOM_STRING_MIN, RANDOM_STRING_MAX) 13 | fn gen(rng: &mut ThreadRng) -> Self { 14 | let length: usize = rng.gen_range(RANDOM_STRING_MIN..RANDOM_STRING_MAX); 15 | 16 | rng.sample_iter(&Alphanumeric) 17 | .map(char::from) 18 | .take(length) 19 | .collect() 20 | } 21 | } 22 | 23 | impl Random for u128 { 24 | fn gen(rng: &mut ThreadRng) -> Self { 25 | rng.gen() 26 | } 27 | } 28 | 29 | impl Random for u64 { 30 | fn gen(rng: &mut ThreadRng) -> Self { 31 | rng.gen() 32 | } 33 | } 34 | 35 | impl Random for u32 { 36 | fn gen(rng: &mut ThreadRng) -> Self { 37 | rng.gen() 38 | } 39 | } 40 | 41 | impl Random for u16 { 42 | fn gen(rng: &mut ThreadRng) -> Self { 43 | rng.gen() 44 | } 45 | } 46 | 47 | impl Random for u8 { 48 | fn gen(rng: &mut ThreadRng) -> Self { 49 | rng.gen() 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /tests/avltree/mod.rs: -------------------------------------------------------------------------------- 1 | mod rwlock; 2 | mod seqlock; 3 | 4 | use crate::util::map::stress_sequential; 5 | use cds::{avltree::AVLTree, map::SequentialMap}; 6 | 7 | #[test] 8 | fn test_insert_lookup_avl_tree() { 9 | let mut avl: AVLTree = AVLTree::new(); 10 | 11 | for i in 0..65535 { 12 | // 65535 = 2^16 - 1 13 | assert_eq!(avl.insert(&i, i), Ok(())); 14 | } 15 | 16 | assert_eq!(avl.get_height(), 16); 17 | assert_eq!(avl.insert(&65536, 65536), Ok(())); 18 | assert_eq!(avl.get_height(), 17); 19 | 20 | for i in 0..65535 { 21 | assert_eq!(avl.lookup(&i), Some(&i)); 22 | } 23 | } 24 | 25 | #[test] 26 | fn test_remove_avl_tree() { 27 | let mut avl: AVLTree = AVLTree::new(); 28 | 29 | /* make tree like this 30 | * 31 | * 3 32 | * 2 4 33 | * 1 5 34 | */ 35 | 36 | assert_eq!(avl.insert(&3, 3), Ok(())); 37 | assert_eq!(avl.insert(&2, 2), Ok(())); 38 | assert_eq!(avl.insert(&4, 4), Ok(())); 39 | assert_eq!(avl.insert(&1, 1), Ok(())); 40 | assert_eq!(avl.insert(&5, 5), Ok(())); 41 | 42 | assert_eq!(avl.remove(&1), Ok(1)); // remove when the node is leaf 43 | assert_eq!(avl.insert(&1, 1), Ok(())); 44 | 45 | assert_eq!(avl.remove(&2), Ok(2)); // remove when the node has only left node 46 | assert_eq!(avl.remove(&4), Ok(4)); // remove when the node has only right node 47 | assert_eq!(avl.remove(&3), Ok(3)); // remove when the node has two nodes 48 | 49 | assert_eq!(avl.lookup(&1), Some(&1)); 50 | assert_eq!(avl.lookup(&5), Some(&5)); 51 | 52 | // side case of remove when the node has two nodes 53 | let mut avl: AVLTree = AVLTree::new(); 54 | assert_eq!(avl.insert(&4, 4), Ok(())); 55 | assert_eq!(avl.insert(&0, 0), Ok(())); 56 | assert_eq!(avl.insert(&-1, -1), Ok(())); 57 | assert_eq!(avl.insert(&5, 5), Ok(())); 58 | assert_eq!(avl.insert(&6, 6), Ok(())); 59 | assert_eq!(avl.insert(&2, 2), Ok(())); 60 | assert_eq!(avl.insert(&1, 1), Ok(())); 61 | 62 | assert_eq!(avl.remove(&4), Ok(4)); 63 | 64 | assert_eq!(avl.lookup(&-1), Some(&-1)); 65 | assert_eq!(avl.lookup(&0), Some(&0)); 66 | assert_eq!(avl.lookup(&1), Some(&1)); 67 | assert_eq!(avl.lookup(&2), Some(&2)); 68 | assert_eq!(avl.lookup(&5), Some(&5)); 69 | assert_eq!(avl.lookup(&6), Some(&6)); 70 | } 71 | 72 | #[test] 73 | fn stress_avl_tree() { 74 | stress_sequential::>(100_000); 75 | } 76 | -------------------------------------------------------------------------------- /tests/avltree/rwlock.rs: -------------------------------------------------------------------------------- 1 | use cds::{avltree::RwLockAVLTree, map::ConcurrentMap}; 2 | 3 | use crate::util::map::stress_concurrent_as_sequential; 4 | 5 | #[test] 6 | fn test_rwlock_avl_tree() { 7 | let num = 64; 8 | let avl: RwLockAVLTree = RwLockAVLTree::new(); 9 | 10 | for i in 0..num { 11 | assert_eq!(avl.insert(&i, i), Ok(())); 12 | } 13 | 14 | for i in 0..num { 15 | assert_eq!(avl.insert(&i, i), Err(i)); 16 | } 17 | 18 | assert_eq!(avl.get_height(), f32::log2(num as f32) as usize + 1); 19 | 20 | for i in 0..num { 21 | assert_eq!(avl.get(&i), Some(i)); 22 | } 23 | 24 | for i in 0..num { 25 | assert_eq!(avl.remove(&i), Ok(i)); 26 | } 27 | 28 | for i in 0..num { 29 | assert_eq!(avl.remove(&i), Err(())); 30 | } 31 | } 32 | 33 | #[test] 34 | fn stress_rwlock_avl_tree_sequential() { 35 | stress_concurrent_as_sequential::>(100_000); 36 | } 37 | 38 | // TODO: this DS may have deadlock 39 | // #[test] 40 | // fn stress_rwlock_avl_tree_concurrent() { 41 | // stress_concurrent::>(200_000, 16, false); 42 | // } 43 | 44 | // #[test] 45 | // fn assert_rwlock_avl_tree_concurrent() { 46 | // stress_concurrent::>(100_000, 32, true); 47 | // } 48 | -------------------------------------------------------------------------------- /tests/avltree/seqlock.rs: -------------------------------------------------------------------------------- 1 | use cds::{avltree::SeqLockAVLTree, map::ConcurrentMap}; 2 | 3 | use crate::util::map::{stress_concurrent, stress_concurrent_as_sequential}; 4 | 5 | #[test] 6 | fn test_seqlock_avl_tree() { 7 | let num = 64; 8 | let avl: SeqLockAVLTree = SeqLockAVLTree::new(); 9 | 10 | for i in 0..num { 11 | assert_eq!(avl.insert(&i, i), Ok(())); 12 | } 13 | 14 | for i in 0..num { 15 | assert_eq!(avl.insert(&i, i), Err(i)); 16 | } 17 | 18 | assert_eq!(avl.get_height(), f32::log2(num as f32) as usize + 1); 19 | 20 | for i in 0..num { 21 | assert_eq!(avl.get(&i), Some(i)); 22 | } 23 | 24 | for i in 0..num { 25 | assert_eq!(avl.remove(&i), Ok(i)); 26 | } 27 | 28 | for i in 0..num { 29 | assert_eq!(avl.remove(&i), Err(())); 30 | } 31 | } 32 | 33 | #[test] 34 | fn stress_seqlock_avl_tree_sequential() { 35 | stress_concurrent_as_sequential::>(100_000); 36 | } 37 | 38 | #[test] 39 | fn stress_seqlock_avl_tree_concurrent() { 40 | stress_concurrent::>(200_000, 16, false); 41 | } 42 | 43 | #[test] 44 | fn assert_seqlock_avl_tree_concurrent() { 45 | stress_concurrent::>(100_000, 32, true); 46 | stress_concurrent::>(100_000, 32, true); 47 | } 48 | -------------------------------------------------------------------------------- /tests/btree/mod.rs: -------------------------------------------------------------------------------- 1 | use cds::{btree::BTree, map::SequentialMap}; 2 | 3 | use crate::util::map::{stress_sequential}; 4 | 5 | #[test] 6 | fn test_insert_lookup_btree() { 7 | let num = 4095; 8 | let mut tree: BTree = BTree::new(); 9 | 10 | for i in 0..num { 11 | assert_eq!(tree.insert(&i, i), Ok(())); 12 | tree.assert(); 13 | // tree.print(); 14 | } 15 | 16 | for i in 0..num { 17 | assert_eq!(tree.lookup(&i), Some(&i)); 18 | } 19 | } 20 | 21 | #[test] 22 | fn test_remove_btree() { 23 | // CASE 0-1: remove on leaf root 24 | { 25 | let mut tree: BTree = BTree::new(); 26 | assert_eq!(tree.insert(&1, 1), Ok(())); 27 | // tree.print(); 28 | assert_eq!(tree.remove(&1), Ok(1)); 29 | // tree.print(); 30 | tree.assert(); 31 | } 32 | 33 | // CASE 0-2: remove on non-leaf root 34 | { 35 | let target = 2; 36 | 37 | let mut tree: BTree = BTree::new(); 38 | 39 | assert_eq!(tree.insert(&0, 0), Ok(())); 40 | assert_eq!(tree.insert(&2, 2), Ok(())); 41 | assert_eq!(tree.insert(&3, 3), Ok(())); 42 | assert_eq!(tree.insert(&4, 4), Ok(())); 43 | assert_eq!(tree.insert(&5, 5), Ok(())); 44 | assert_eq!(tree.insert(&1, 1), Ok(())); 45 | 46 | // tree.print(); 47 | assert_eq!(tree.remove(&target), Ok(target)); 48 | // tree.print(); 49 | 50 | for i in 0..5 { 51 | if i == target { 52 | assert_eq!(tree.lookup(&i), None); 53 | } else { 54 | assert_eq!(tree.lookup(&i), Some(&i)); 55 | } 56 | } 57 | tree.assert(); 58 | } 59 | 60 | // (parent_size, sibiling_size) 61 | // CASE 1-1: remove on (1, 1) with left leaf node 62 | { 63 | let target = 0; 64 | 65 | let mut tree: BTree = BTree::new(); 66 | 67 | for i in 0..3 { 68 | assert_eq!(tree.insert(&i, i), Ok(())); 69 | } 70 | 71 | // tree.print(); 72 | assert_eq!(tree.remove(&target), Ok(target)); 73 | // tree.print(); 74 | 75 | for i in 0..3 { 76 | if i == target { 77 | assert_eq!(tree.lookup(&i), None); 78 | } else { 79 | assert_eq!(tree.lookup(&i), Some(&i)); 80 | } 81 | } 82 | tree.assert(); 83 | } 84 | 85 | // CASE 5-1: remove on (1, 1) with right leaf node 86 | { 87 | let target = 2; 88 | 89 | let mut tree: BTree = BTree::new(); 90 | 91 | for i in 0..3 { 92 | assert_eq!(tree.insert(&i, i), Ok(())); 93 | } 94 | 95 | // tree.print(); 96 | assert_eq!(tree.remove(&target), Ok(target)); 97 | // tree.print(); 98 | 99 | for i in 0..3 { 100 | if i == target { 101 | assert_eq!(tree.lookup(&i), None); 102 | } else { 103 | assert_eq!(tree.lookup(&i), Some(&i)); 104 | } 105 | } 106 | tree.assert(); 107 | } 108 | 109 | // (CASE 5-1 ->) CASE 5-2: remove on (1, 1) with right non-leaf node 110 | { 111 | let target = 5; 112 | let mut tree: BTree = BTree::new(); 113 | 114 | for i in 0..7 { 115 | assert_eq!(tree.insert(&i, i), Ok(())); 116 | } 117 | 118 | // tree.print(); 119 | assert_eq!(tree.remove(&target), Ok(target)); 120 | // tree.print(); 121 | 122 | for i in 0..3 { 123 | if i == target { 124 | assert_eq!(tree.lookup(&i), None); 125 | } else { 126 | assert_eq!(tree.lookup(&i), Some(&i)); 127 | } 128 | } 129 | tree.assert(); 130 | } 131 | 132 | // (CASE 5-1 ->) CASE 1-2: remove on (1, 1) with left non-leaf node 133 | { 134 | let target = 1; 135 | let mut tree: BTree = BTree::new(); 136 | 137 | for i in 0..7 { 138 | assert_eq!(tree.insert(&i, i), Ok(())); 139 | } 140 | 141 | // tree.print(); 142 | assert_eq!(tree.remove(&target), Ok(target)); 143 | // tree.print(); 144 | 145 | for i in 0..3 { 146 | if i == target { 147 | assert_eq!(tree.lookup(&i), None); 148 | } else { 149 | assert_eq!(tree.lookup(&i), Some(&i)); 150 | } 151 | } 152 | tree.assert(); 153 | } 154 | 155 | // CASE 2-1: remove on (1, 2) with left leaf node 156 | { 157 | let target = 0; 158 | 159 | let mut tree: BTree = BTree::new(); 160 | 161 | for i in 0..4 { 162 | assert_eq!(tree.insert(&i, i), Ok(())); 163 | } 164 | 165 | // tree.print(); 166 | assert_eq!(tree.remove(&target), Ok(target)); 167 | // tree.print(); 168 | 169 | for i in 0..4 { 170 | if i == target { 171 | assert_eq!(tree.lookup(&i), None); 172 | } else { 173 | assert_eq!(tree.lookup(&i), Some(&i)); 174 | } 175 | } 176 | tree.assert(); 177 | } 178 | 179 | // (CASE 5-1 ->) CASE 2-2: remove on (1, 2) with left non-leaf node 180 | { 181 | let target = 1; 182 | 183 | let mut tree: BTree = BTree::new(); 184 | 185 | for i in 0..9 { 186 | assert_eq!(tree.insert(&i, i), Ok(())); 187 | } 188 | 189 | // tree.print(); 190 | assert_eq!(tree.remove(&target), Ok(target)); 191 | // tree.print(); 192 | 193 | for i in 0..9 { 194 | if i == target { 195 | assert_eq!(tree.lookup(&i), None); 196 | } else { 197 | assert_eq!(tree.lookup(&i), Some(&i)); 198 | } 199 | } 200 | tree.assert(); 201 | } 202 | 203 | // CASE 6-1: remove on (1, 2) with right leaf node 204 | { 205 | let target = 3; 206 | 207 | let mut tree: BTree = BTree::new(); 208 | 209 | for i in 1..4 { 210 | assert_eq!(tree.insert(&i, i), Ok(())); 211 | } 212 | assert_eq!(tree.insert(&0, 0), Ok(())); 213 | 214 | // tree.print(); 215 | assert_eq!(tree.remove(&target), Ok(target)); 216 | // tree.print(); 217 | 218 | for i in 0..4 { 219 | if i == target { 220 | assert_eq!(tree.lookup(&i), None); 221 | } else { 222 | assert_eq!(tree.lookup(&i), Some(&i)); 223 | } 224 | } 225 | tree.assert(); 226 | } 227 | 228 | // (CASE 5-1 ->) CASE 6-2: remove on (1, 2) with right non-leaf node 229 | { 230 | let target = 7; 231 | 232 | let mut tree: BTree = BTree::new(); 233 | 234 | for i in 2..9 { 235 | assert_eq!(tree.insert(&i, i), Ok(())); 236 | } 237 | assert_eq!(tree.insert(&1, 1), Ok(())); 238 | assert_eq!(tree.insert(&0, 0), Ok(())); 239 | 240 | // tree.print(); 241 | assert_eq!(tree.remove(&target), Ok(target)); 242 | // tree.print(); 243 | 244 | for i in 0..9 { 245 | if i == target { 246 | assert_eq!(tree.lookup(&i), None); 247 | } else { 248 | assert_eq!(tree.lookup(&i), Some(&i)); 249 | } 250 | } 251 | tree.assert(); 252 | } 253 | 254 | // CASE 3-1: remove on (2, 1) with left leaf node 255 | { 256 | let target = 0; 257 | 258 | let mut tree: BTree = BTree::new(); 259 | 260 | for i in 0..5 { 261 | assert_eq!(tree.insert(&i, i), Ok(())); 262 | } 263 | 264 | // tree.print(); 265 | assert_eq!(tree.remove(&target), Ok(target)); 266 | // tree.print(); 267 | 268 | for i in 0..5 { 269 | if i == target { 270 | assert_eq!(tree.lookup(&i), None); 271 | } else { 272 | assert_eq!(tree.lookup(&i), Some(&i)); 273 | } 274 | } 275 | tree.assert(); 276 | } 277 | 278 | // (CASE 5-1 ->) CASE 3-2: remove on (2, 1) with left non-leaf node 279 | { 280 | let target = 1; 281 | 282 | let mut tree: BTree = BTree::new(); 283 | 284 | for i in 0..11 { 285 | assert_eq!(tree.insert(&i, i), Ok(())); 286 | } 287 | 288 | // tree.print(); 289 | assert_eq!(tree.remove(&target), Ok(target)); 290 | // tree.print(); 291 | 292 | for i in 0..11 { 293 | if i == target { 294 | assert_eq!(tree.lookup(&i), None); 295 | } else { 296 | assert_eq!(tree.lookup(&i), Some(&i)); 297 | } 298 | } 299 | tree.assert(); 300 | } 301 | 302 | // CASE 7-1: remove on (2, 1) with right leaf node 303 | { 304 | let target = 4; 305 | 306 | let mut tree: BTree = BTree::new(); 307 | 308 | for i in 0..5 { 309 | assert_eq!(tree.insert(&i, i), Ok(())); 310 | } 311 | 312 | // tree.print(); 313 | assert_eq!(tree.remove(&target), Ok(target)); 314 | // tree.print(); 315 | 316 | for i in 0..5 { 317 | if i == target { 318 | assert_eq!(tree.lookup(&i), None); 319 | } else { 320 | assert_eq!(tree.lookup(&i), Some(&i)); 321 | } 322 | } 323 | tree.assert(); 324 | } 325 | 326 | // (CASE 5-1 ->) CASE 7-2: remove on (2, 1) with right non-leaf node 327 | { 328 | let target = 9; 329 | 330 | let mut tree: BTree = BTree::new(); 331 | 332 | for i in 0..11 { 333 | assert_eq!(tree.insert(&i, i), Ok(())); 334 | } 335 | 336 | // tree.print(); 337 | assert_eq!(tree.remove(&target), Ok(target)); 338 | // tree.print(); 339 | 340 | for i in 0..11 { 341 | if i == target { 342 | assert_eq!(tree.lookup(&i), None); 343 | } else { 344 | assert_eq!(tree.lookup(&i), Some(&i)); 345 | } 346 | } 347 | tree.assert(); 348 | } 349 | 350 | // CASE 4-1: remove on (2, 2) with left leaf node 351 | { 352 | let target = 0; 353 | 354 | let mut tree: BTree = BTree::new(); 355 | 356 | assert_eq!(tree.insert(&0, 0), Ok(())); 357 | assert_eq!(tree.insert(&1, 1), Ok(())); 358 | assert_eq!(tree.insert(&2, 2), Ok(())); 359 | assert_eq!(tree.insert(&4, 4), Ok(())); 360 | assert_eq!(tree.insert(&5, 5), Ok(())); 361 | assert_eq!(tree.insert(&3, 3), Ok(())); 362 | 363 | // tree.print(); 364 | assert_eq!(tree.remove(&target), Ok(target)); 365 | // tree.print(); 366 | 367 | for i in 0..6 { 368 | if i == target { 369 | assert_eq!(tree.lookup(&i), None); 370 | } else { 371 | assert_eq!(tree.lookup(&i), Some(&i)); 372 | } 373 | } 374 | tree.assert(); 375 | } 376 | 377 | // (CASE 5-1 ->) CASE 4-2: remove on (2, 2) with left non-leaf node 378 | { 379 | let target = 1; 380 | 381 | let mut tree: BTree = BTree::new(); 382 | 383 | for i in 0..7 { 384 | assert_eq!(tree.insert(&i, i), Ok(())); 385 | } 386 | 387 | for i in 9..13 { 388 | assert_eq!(tree.insert(&i, i), Ok(())); 389 | } 390 | 391 | assert_eq!(tree.insert(&7, 7), Ok(())); 392 | assert_eq!(tree.insert(&8, 8), Ok(())); 393 | 394 | // tree.print(); 395 | assert_eq!(tree.remove(&target), Ok(target)); 396 | // tree.print(); 397 | 398 | for i in 0..13 { 399 | if i == target { 400 | assert_eq!(tree.lookup(&i), None); 401 | } else { 402 | assert_eq!(tree.lookup(&i), Some(&i)); 403 | } 404 | } 405 | tree.assert(); 406 | } 407 | 408 | // CASE 8-1: remove on (2, 2) with right leaf node 409 | { 410 | let target = 5; 411 | 412 | let mut tree: BTree = BTree::new(); 413 | 414 | assert_eq!(tree.insert(&0, 0), Ok(())); 415 | assert_eq!(tree.insert(&1, 1), Ok(())); 416 | assert_eq!(tree.insert(&2, 2), Ok(())); 417 | assert_eq!(tree.insert(&4, 4), Ok(())); 418 | assert_eq!(tree.insert(&5, 5), Ok(())); 419 | assert_eq!(tree.insert(&3, 3), Ok(())); 420 | 421 | // tree.print(); 422 | assert_eq!(tree.remove(&target), Ok(target)); 423 | // tree.print(); 424 | 425 | for i in 0..6 { 426 | if i == target { 427 | assert_eq!(tree.lookup(&i), None); 428 | } else { 429 | assert_eq!(tree.lookup(&i), Some(&i)); 430 | } 431 | } 432 | tree.assert(); 433 | } 434 | 435 | // CASE 8-2: remove on (2, 2) with right non-leaf node 436 | { 437 | let target = 11; 438 | 439 | let mut tree: BTree = BTree::new(); 440 | 441 | for i in 0..7 { 442 | assert_eq!(tree.insert(&i, i), Ok(())); 443 | } 444 | 445 | for i in 9..13 { 446 | assert_eq!(tree.insert(&i, i), Ok(())); 447 | } 448 | 449 | assert_eq!(tree.insert(&7, 7), Ok(())); 450 | assert_eq!(tree.insert(&8, 8), Ok(())); 451 | 452 | // tree.print(); 453 | assert_eq!(tree.remove(&target), Ok(target)); 454 | // tree.print(); 455 | 456 | for i in 0..13 { 457 | if i == target { 458 | assert_eq!(tree.lookup(&i), None); 459 | } else { 460 | assert_eq!(tree.lookup(&i), Some(&i)); 461 | } 462 | } 463 | tree.assert(); 464 | } 465 | } 466 | 467 | #[test] 468 | fn stress_btree() { 469 | stress_sequential::>(100_000); 470 | } 471 | -------------------------------------------------------------------------------- /tests/linkedlist/linkedlist.rs: -------------------------------------------------------------------------------- 1 | use crate::util::map::stress_sequential; 2 | use cds::linkedlist::LinkedList; 3 | use cds::map::SequentialMap; 4 | 5 | #[test] 6 | fn test_linkedlist() { 7 | let mut list: LinkedList = LinkedList::new(); 8 | 9 | assert_eq!(list.lookup(&1), None); 10 | 11 | assert_eq!(list.insert(&1, 1), Ok(())); 12 | assert_eq!(list.insert(&2, 2), Ok(())); 13 | assert_eq!(list.insert(&3, 3), Ok(())); 14 | assert_eq!(list.insert(&4, 4), Ok(())); 15 | assert_eq!(list.insert(&5, 5), Ok(())); 16 | 17 | assert_eq!(list.lookup(&1), Some(&1)); 18 | assert_eq!(list.lookup(&2), Some(&2)); 19 | assert_eq!(list.lookup(&3), Some(&3)); 20 | assert_eq!(list.lookup(&4), Some(&4)); 21 | assert_eq!(list.lookup(&5), Some(&5)); 22 | 23 | assert_eq!(list.remove(&1), Ok(1)); 24 | assert_eq!(list.remove(&3), Ok(3)); 25 | assert_eq!(list.remove(&5), Ok(5)); 26 | 27 | assert_eq!(list.lookup(&1), None); 28 | assert_eq!(list.lookup(&2), Some(&2)); 29 | assert_eq!(list.lookup(&3), None); 30 | assert_eq!(list.lookup(&4), Some(&4)); 31 | assert_eq!(list.lookup(&5), None); 32 | 33 | assert_eq!(list.remove(&4), Ok(4)); 34 | assert_eq!(list.remove(&2), Ok(2)); 35 | 36 | assert_eq!(list.insert(&0, 0), Ok(())); 37 | assert_eq!(list.lookup(&0), Some(&0)); 38 | assert_eq!(list.remove(&0), Ok(0)); 39 | assert_eq!(list.lookup(&0), None); 40 | } 41 | 42 | #[test] 43 | fn stress_linkedlist() { 44 | stress_sequential::>(100_000); 45 | } 46 | -------------------------------------------------------------------------------- /tests/linkedlist/mod.rs: -------------------------------------------------------------------------------- 1 | mod linkedlist; 2 | -------------------------------------------------------------------------------- /tests/lock/mod.rs: -------------------------------------------------------------------------------- 1 | mod spinlock; 2 | -------------------------------------------------------------------------------- /tests/lock/spinlock.rs: -------------------------------------------------------------------------------- 1 | use std::ops::DerefMut; 2 | 3 | use cds::lock::SpinLock; 4 | use crossbeam_utils::thread::scope; 5 | 6 | #[test] 7 | fn test_spin_lock() { 8 | let counter = SpinLock::new(0); 9 | 10 | scope(|scope| { 11 | for _ in 0..50 { 12 | scope.spawn(|_| { 13 | for _ in 0..1_000 { 14 | let mut lock_guard = counter.lock(); 15 | *lock_guard.deref_mut() += 1; 16 | } 17 | }); 18 | } 19 | }) 20 | .unwrap(); 21 | 22 | assert_eq!(*counter.lock(), 50_000); 23 | } 24 | -------------------------------------------------------------------------------- /tests/queue/fclock.rs: -------------------------------------------------------------------------------- 1 | use cds::{ 2 | lock::{spinlock::RawSpinLock, RawMutex}, 3 | queue::{FCQueue, FatNodeQueue, Queue}, 4 | }; 5 | 6 | use super::*; 7 | 8 | // Maybe general macro for generating all cases of DS is very useful, but it is too complicated. 9 | 10 | #[test] 11 | fn test_fc_queue_sequential() { 12 | test_sequential_concurrent_queue::>>(); 13 | test_sequential_concurrent_queue::>>(); 14 | test_sequential_concurrent_queue::>>(); 15 | test_sequential_concurrent_queue::>>(); 16 | } 17 | 18 | #[test] 19 | fn test_fc_queue_simple() { 20 | test_simple_concurrent_queue::>>(); 21 | test_simple_concurrent_queue::>>(); 22 | test_simple_concurrent_queue::>>(); 23 | test_simple_concurrent_queue::>>(); 24 | } 25 | 26 | #[test] 27 | fn test_fc_queue_spsc() { 28 | test_spsc_concurrent_queue::>>(); 29 | test_spsc_concurrent_queue::>>(); 30 | test_spsc_concurrent_queue::>>(); 31 | test_spsc_concurrent_queue::>>(); 32 | } 33 | 34 | #[test] 35 | fn test_fc_queue_spmc() { 36 | test_spmc_concurrent_queue::>>(); 37 | test_spmc_concurrent_queue::>>(); 38 | test_spmc_concurrent_queue::>>(); 39 | test_spmc_concurrent_queue::>>(); 40 | } 41 | 42 | #[test] 43 | fn test_fc_queue_mpsc() { 44 | test_mpsc_concurrent_queue::>>(); 45 | test_mpsc_concurrent_queue::>>(); 46 | test_mpsc_concurrent_queue::>>(); 47 | test_mpsc_concurrent_queue::>>(); 48 | } 49 | 50 | #[test] 51 | fn test_fc_queue_mpmc() { 52 | test_mpmc_concurrent_queue::>>(); 53 | test_mpmc_concurrent_queue::>>(); 54 | test_mpmc_concurrent_queue::>>(); 55 | test_mpmc_concurrent_queue::>>(); 56 | } 57 | -------------------------------------------------------------------------------- /tests/queue/lockfree.rs: -------------------------------------------------------------------------------- 1 | use cds::queue::MSQueue; 2 | 3 | use super::*; 4 | 5 | #[test] 6 | fn test_ms_queue_sequential() { 7 | test_sequential_concurrent_queue::>(); 8 | } 9 | 10 | #[test] 11 | fn test_ms_queue_simple() { 12 | test_simple_concurrent_queue::>(); 13 | } 14 | 15 | #[test] 16 | fn test_ms_queue_spsc() { 17 | test_spsc_concurrent_queue::>(); 18 | } 19 | 20 | #[test] 21 | fn test_ms_queue_spmc() { 22 | test_spmc_concurrent_queue::>(); 23 | } 24 | 25 | #[test] 26 | fn test_ms_queue_mpsc() { 27 | test_mpsc_concurrent_queue::>(); 28 | } 29 | 30 | #[test] 31 | fn test_ms_queue_mpmc() { 32 | test_mpmc_concurrent_queue::>(); 33 | } 34 | -------------------------------------------------------------------------------- /tests/queue/mod.rs: -------------------------------------------------------------------------------- 1 | mod fclock; 2 | mod lockfree; 3 | mod mutex; 4 | mod spinlock; 5 | 6 | use cds::queue::{FatNodeQueue, Queue}; 7 | 8 | use crate::util::queue::*; 9 | 10 | #[test] 11 | fn test_simple_queue() { 12 | test_simple_sequential_queue::>(); 13 | } 14 | 15 | #[test] 16 | fn test_deep_queue() { 17 | test_deep_sequential_queue::>(); 18 | } 19 | 20 | #[test] 21 | fn test_fat_node_queue() { 22 | test_simple_sequential_queue::>(); 23 | } 24 | 25 | #[test] 26 | fn test_deep_fat_node_queue() { 27 | test_deep_sequential_queue::>(); 28 | } 29 | -------------------------------------------------------------------------------- /tests/queue/mutex.rs: -------------------------------------------------------------------------------- 1 | use cds::queue::{MutexQueue, TwoMutexQueue}; 2 | 3 | use super::*; 4 | 5 | #[test] 6 | fn test_mutex_queue_sequential() { 7 | test_sequential_concurrent_queue::>(); 8 | } 9 | 10 | #[test] 11 | fn test_mutex_queue_simple() { 12 | test_simple_concurrent_queue::>(); 13 | } 14 | 15 | #[test] 16 | fn test_mutex_queue_spsc() { 17 | test_spsc_concurrent_queue::>(); 18 | } 19 | 20 | #[test] 21 | fn test_mutex_queue_spmc() { 22 | test_spmc_concurrent_queue::>(); 23 | } 24 | 25 | #[test] 26 | fn test_mutex_queue_mpsc() { 27 | test_mpsc_concurrent_queue::>(); 28 | } 29 | 30 | #[test] 31 | fn test_mutex_queue_mpmc() { 32 | test_mpmc_concurrent_queue::>(); 33 | } 34 | 35 | #[test] 36 | fn test_two_mutex_queue_sequential() { 37 | test_sequential_concurrent_queue::>(); 38 | } 39 | 40 | #[test] 41 | fn test_two_mutex_queue_simple() { 42 | test_simple_concurrent_queue::>(); 43 | } 44 | 45 | #[test] 46 | fn test_two_mutex_queue_spsc() { 47 | test_spsc_concurrent_queue::>(); 48 | } 49 | 50 | #[test] 51 | fn test_two_mutex_queue_spmc() { 52 | test_spmc_concurrent_queue::>(); 53 | } 54 | 55 | #[test] 56 | fn test_two_mutex_queue_mpsc() { 57 | test_mpsc_concurrent_queue::>(); 58 | } 59 | 60 | #[test] 61 | fn test_two_mutex_queue_mpmc() { 62 | test_mpmc_concurrent_queue::>(); 63 | } 64 | -------------------------------------------------------------------------------- /tests/queue/spinlock.rs: -------------------------------------------------------------------------------- 1 | use cds::queue::{SpinLockQueue, TwoSpinLockQueue}; 2 | 3 | use super::*; 4 | 5 | #[test] 6 | fn test_spin_lock_queue_sequential() { 7 | test_sequential_concurrent_queue::>(); 8 | } 9 | 10 | #[test] 11 | fn test_spin_lock_queue_simple() { 12 | test_simple_concurrent_queue::>(); 13 | } 14 | 15 | #[test] 16 | fn test_spin_lock_queue_spsc() { 17 | test_spsc_concurrent_queue::>(); 18 | } 19 | 20 | #[test] 21 | fn test_spin_lock_queue_spmc() { 22 | test_spmc_concurrent_queue::>(); 23 | } 24 | 25 | #[test] 26 | fn test_spin_lock_queue_mpsc() { 27 | test_mpsc_concurrent_queue::>(); 28 | } 29 | 30 | #[test] 31 | fn test_spin_lock_queue_mpmc() { 32 | test_mpmc_concurrent_queue::>(); 33 | } 34 | 35 | #[test] 36 | fn test_two_spin_lock_queue_sequential() { 37 | test_sequential_concurrent_queue::>(); 38 | } 39 | 40 | #[test] 41 | fn test_two_spin_lock_queue_simple() { 42 | test_simple_concurrent_queue::>(); 43 | } 44 | 45 | #[test] 46 | fn test_two_spin_lock_queue_spsc() { 47 | test_spsc_concurrent_queue::>(); 48 | } 49 | 50 | #[test] 51 | fn test_two_spin_lock_queue_spmc() { 52 | test_spmc_concurrent_queue::>(); 53 | } 54 | 55 | #[test] 56 | fn test_two_spin_lock_queue_mpsc() { 57 | test_mpsc_concurrent_queue::>(); 58 | } 59 | 60 | #[test] 61 | fn test_two_spin_lock_queue_mpmc() { 62 | test_mpmc_concurrent_queue::>(); 63 | } 64 | -------------------------------------------------------------------------------- /tests/stack/eb.rs: -------------------------------------------------------------------------------- 1 | use cds::stack::{ConcurrentStack, EBStack}; 2 | use crossbeam_utils::thread::scope; 3 | 4 | #[test] 5 | fn test_ebstack() { 6 | let stack = EBStack::new(); 7 | 8 | scope(|scope| { 9 | for _ in 0..10 { 10 | scope.spawn(|_| { 11 | for i in 0..10_000 { 12 | stack.push(i); 13 | assert!(stack.try_pop().is_some()); 14 | } 15 | }); 16 | } 17 | }) 18 | .unwrap(); 19 | 20 | assert!(stack.try_pop().is_none()); 21 | } 22 | -------------------------------------------------------------------------------- /tests/stack/mod.rs: -------------------------------------------------------------------------------- 1 | mod eb; 2 | mod mutex; 3 | mod spinlock; 4 | mod stack; 5 | mod treiber; 6 | -------------------------------------------------------------------------------- /tests/stack/mutex.rs: -------------------------------------------------------------------------------- 1 | use cds::stack::{ConcurrentStack, MutexStack}; 2 | use crossbeam_utils::thread::scope; 3 | 4 | #[test] 5 | fn test_mutex_stack() { 6 | let stack = MutexStack::new(); 7 | 8 | scope(|scope| { 9 | for _ in 0..10 { 10 | scope.spawn(|_| { 11 | for i in 0..10_000 { 12 | stack.push(i); 13 | assert!(stack.try_pop().is_some()); 14 | } 15 | }); 16 | } 17 | }) 18 | .unwrap(); 19 | 20 | assert!(stack.try_pop().is_none()); 21 | } 22 | -------------------------------------------------------------------------------- /tests/stack/spinlock.rs: -------------------------------------------------------------------------------- 1 | use cds::stack::{ConcurrentStack, SpinLockStack}; 2 | use crossbeam_utils::thread::scope; 3 | 4 | #[test] 5 | fn test_spinlock_stack() { 6 | let stack = SpinLockStack::new(); 7 | 8 | scope(|scope| { 9 | for _ in 0..10 { 10 | scope.spawn(|_| { 11 | for i in 0..10_000 { 12 | stack.push(i); 13 | assert!(stack.try_pop().is_some()); 14 | } 15 | }); 16 | } 17 | }) 18 | .unwrap(); 19 | 20 | assert!(stack.try_pop().is_none()); 21 | } 22 | -------------------------------------------------------------------------------- /tests/stack/stack.rs: -------------------------------------------------------------------------------- 1 | use cds::stack::Stack; 2 | 3 | #[test] 4 | fn test_stack() { 5 | let mut stack = Stack::new(); 6 | assert_eq!(stack.is_empty(), true); 7 | 8 | stack.push(1); 9 | stack.push(2); 10 | stack.push(3); 11 | stack.push(4); 12 | stack.push(5); 13 | 14 | assert_eq!(stack.is_empty(), false); 15 | assert_eq!(stack.top(), Some(&5)); 16 | 17 | assert_eq!(stack.pop(), Some(5)); 18 | assert_eq!(stack.pop(), Some(4)); 19 | assert_eq!(stack.pop(), Some(3)); 20 | assert_eq!(stack.pop(), Some(2)); 21 | assert_eq!(stack.pop(), Some(1)); 22 | 23 | assert_eq!(stack.is_empty(), true); 24 | assert_eq!(stack.pop(), None); 25 | } 26 | 27 | #[test] 28 | fn test_deep_stack() { 29 | let mut stack = Stack::new(); 30 | 31 | for n in 1..100_000 { 32 | stack.push(n); 33 | } 34 | 35 | for n in (1..100_000).rev() { 36 | assert_eq!(stack.pop(), Some(n)); 37 | } 38 | 39 | assert_eq!(stack.is_empty(), true); 40 | } 41 | -------------------------------------------------------------------------------- /tests/stack/treiber.rs: -------------------------------------------------------------------------------- 1 | use cds::stack::{ConcurrentStack, TreiberStack}; 2 | 3 | #[test] 4 | fn test_treiber_stack() { 5 | let stack = TreiberStack::new(); 6 | 7 | assert_eq!(stack.is_empty(), true); 8 | 9 | stack.push(1); 10 | stack.push(2); 11 | stack.push(3); 12 | stack.push(4); 13 | stack.push(5); 14 | 15 | assert_eq!(stack.is_empty(), false); 16 | assert_eq!(stack.top(), Some(5)); 17 | 18 | assert_eq!(stack.try_pop(), Some(5)); 19 | assert_eq!(stack.try_pop(), Some(4)); 20 | assert_eq!(stack.try_pop(), Some(3)); 21 | assert_eq!(stack.try_pop(), Some(2)); 22 | assert_eq!(stack.try_pop(), Some(1)); 23 | 24 | assert_eq!(stack.is_empty(), true); 25 | assert_eq!(stack.try_pop(), None); 26 | } 27 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | mod avltree; 2 | mod btree; 3 | mod linkedlist; 4 | mod lock; 5 | mod queue; 6 | mod stack; 7 | mod util; 8 | -------------------------------------------------------------------------------- /tests/util/map.rs: -------------------------------------------------------------------------------- 1 | use cds::map::ConcurrentMap; 2 | use cds::map::SequentialMap; 3 | use cds::util::random::Random; 4 | use crossbeam_utils::thread; 5 | use rand::prelude::SliceRandom; 6 | use rand::prelude::ThreadRng; 7 | use rand::thread_rng; 8 | use rand::Rng; 9 | use std::cmp::Ordering; 10 | use std::collections::BTreeMap; 11 | use std::collections::HashMap; 12 | use std::collections::VecDeque; 13 | use std::fmt::Debug; 14 | use std::hash::Hash; 15 | use std::marker::PhantomData; 16 | use std::time::Duration; 17 | use std::time::Instant; 18 | 19 | #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 20 | enum Operation { 21 | Insert, 22 | Lookup, 23 | Remove, 24 | } 25 | 26 | #[derive(PartialEq)] 27 | enum OperationType { 28 | Some, // the operation for existing key on the map 29 | None, // the operation for not existing key on the map 30 | } 31 | 32 | pub fn stress_sequential(iter: u64) 33 | where 34 | K: Ord + Clone + Random + Debug, 35 | M: SequentialMap, 36 | { 37 | // 10 times try to get not existing key, or return if failing 38 | let gen_not_existing_key = |rng: &mut ThreadRng, map: &BTreeMap| { 39 | let mut key = K::gen(rng); 40 | 41 | for _ in 0..10 { 42 | if !map.contains_key(&key) { 43 | return Ok(key); 44 | } 45 | 46 | key = K::gen(rng); 47 | } 48 | 49 | Err(()) 50 | }; 51 | 52 | let ops = [Operation::Insert, Operation::Lookup, Operation::Remove]; 53 | let types = [OperationType::Some, OperationType::None]; 54 | 55 | let mut map = M::new(); 56 | let mut ref_map: BTreeMap = BTreeMap::new(); 57 | let mut rng = thread_rng(); 58 | 59 | for _ in 1..=iter { 60 | let t = types.choose(&mut rng).unwrap(); 61 | let ref_map_keys = ref_map.keys().collect::>(); 62 | let existing_key = ref_map_keys.choose(&mut rng); 63 | 64 | if existing_key.is_none() || *t == OperationType::None { 65 | // run operation with not existing key 66 | let not_existing_key = if let Ok(key) = gen_not_existing_key(&mut rng, &ref_map) { 67 | key 68 | } else { 69 | continue; 70 | }; 71 | 72 | match ops.choose(&mut rng).unwrap() { 73 | Operation::Insert => { 74 | // should success 75 | let value: u64 = rng.gen(); 76 | 77 | // println!( 78 | // "[{:0>10}] InsertNone: ({:?}, {})", 79 | // i, not_existing_key, value 80 | // ); 81 | assert_eq!(ref_map.insert(not_existing_key.clone(), value), None); 82 | assert_eq!(map.insert(¬_existing_key, value), Ok(())); 83 | } 84 | Operation::Lookup => { 85 | // should fail 86 | // println!("[{:0>10}] LookupNone: ({:?}, None)", i, not_existing_key); 87 | assert_eq!(ref_map.get(¬_existing_key), None); 88 | assert_eq!(map.lookup(¬_existing_key), None); 89 | } 90 | Operation::Remove => { 91 | // should fail 92 | // println!("[{:0>10}] RemoveNone: ({:?}, Err)", i, not_existing_key); 93 | assert_eq!(ref_map.remove(¬_existing_key), None); 94 | assert_eq!(map.remove(¬_existing_key), Err(())); 95 | } 96 | } 97 | } else { 98 | // run operation with existing key 99 | let existing_key = (*existing_key.unwrap()).clone(); 100 | 101 | match ops.choose(&mut rng).unwrap() { 102 | Operation::Insert => { 103 | // should fail 104 | let value: u64 = rng.gen(); 105 | 106 | // println!("[{:0>10}] InsertSome: ({:?}, {})", i, existing_key, value); 107 | assert_eq!(map.insert(&existing_key, value), Err(value)); 108 | } 109 | Operation::Lookup => { 110 | // should success 111 | let value = ref_map.get(&existing_key); 112 | 113 | // println!( 114 | // "[{:0>10}] LookupSome: ({:?}, {})", 115 | // i, 116 | // existing_key, 117 | // value.unwrap() 118 | // ); 119 | assert_eq!(map.lookup(&existing_key), value); 120 | } 121 | Operation::Remove => { 122 | // should success 123 | let value = ref_map.remove(&existing_key); 124 | 125 | // println!( 126 | // "[{:0>10}] RemoveSome: ({:?}, {})", 127 | // i, 128 | // existing_key, 129 | // value.unwrap() 130 | // ); 131 | assert_eq!(map.remove(&existing_key).ok(), value); 132 | 133 | // early stop code if the remove has any problems 134 | // for key in ref_map.keys().collect::>() { 135 | // assert_eq!(map.lookup(key).is_some(), true, "the key {:?} is not found.", key); 136 | // } 137 | } 138 | } 139 | } 140 | } 141 | } 142 | 143 | struct Sequentialized 144 | where 145 | K: Eq, 146 | M: ConcurrentMap, 147 | { 148 | inner: M, 149 | temp: *const Option, 150 | _marker: PhantomData<(*const K, V)>, 151 | } 152 | 153 | impl SequentialMap for Sequentialized 154 | where 155 | K: Eq, 156 | V: Clone, 157 | M: ConcurrentMap, 158 | { 159 | fn new() -> Self { 160 | let empty: Box> = Box::new(None); 161 | 162 | Self { 163 | inner: M::new(), 164 | temp: Box::leak(empty) as *const Option, 165 | _marker: PhantomData, 166 | } 167 | } 168 | 169 | fn insert(&mut self, key: &K, value: V) -> Result<(), V> { 170 | self.inner.insert(key, value) 171 | } 172 | 173 | fn lookup(&self, key: &K) -> Option<&V> { 174 | let value = self.inner.get(key); 175 | 176 | // HACK: temporarily save the value, and get its reference safely 177 | unsafe { 178 | *(self.temp as *mut Option) = value; 179 | (*self.temp).as_ref() 180 | } 181 | } 182 | 183 | fn remove(&mut self, key: &K) -> Result { 184 | self.inner.remove(key) 185 | } 186 | } 187 | 188 | pub fn stress_concurrent_as_sequential(iter: u64) 189 | where 190 | K: Ord + Clone + Random + Debug, 191 | M: ConcurrentMap, 192 | { 193 | stress_sequential::>(iter) 194 | } 195 | 196 | #[derive(Clone, Debug)] 197 | struct Log { 198 | start: Instant, 199 | end: Instant, 200 | op: Operation, 201 | key: K, 202 | // insert: Try inserting (K, V). If success, Ok(V) 203 | // lookup: Try looking up (K, ). If existing (K, V), Ok(V) 204 | // remove: Try removing (K, ). If success to remove (K, V), Ok(V) 205 | result: Result, 206 | } 207 | 208 | // LogBunch: (start, end) of Insert, (start, end) of Remove, logs 209 | type LogBunch = (Instant, Instant, Instant, Instant, Vec>); 210 | 211 | /// stress and assert on the concurrent model 212 | /// 213 | /// Since asserting logs is based on recursion, 214 | /// I recommend to use at most stress_concurrent(100_000, 32) on 8KiB stack memory. 215 | pub fn stress_concurrent(iter: u64, thread_num: u64, assert_log: bool) 216 | where 217 | K: Send + Ord + Clone + Random + Debug + Hash, 218 | M: Sync + ConcurrentMap, 219 | { 220 | let ops = [Operation::Insert, Operation::Lookup, Operation::Remove]; 221 | 222 | let map = M::new(); 223 | 224 | let logs = thread::scope(|s| { 225 | let mut threads = Vec::new(); 226 | 227 | for _ in 0..thread_num { 228 | let t = s.spawn(|_| { 229 | let mut rng = thread_rng(); 230 | let mut logs = Vec::new(); 231 | 232 | for _ in 0..iter { 233 | let key = K::gen(&mut rng); 234 | let op = ops.choose(&mut rng).unwrap().clone(); 235 | 236 | let (start, result, end) = match op { 237 | Operation::Insert => { 238 | let value = u64::gen(&mut rng); 239 | let start = Instant::now(); 240 | let result = match map.insert(&key, value) { 241 | Ok(()) => Ok(value), 242 | Err(_) => Err(()), 243 | }; 244 | let end = Instant::now(); 245 | 246 | (start, result, end) 247 | } 248 | Operation::Lookup => { 249 | let start = Instant::now(); 250 | let result = match map.get(&key) { 251 | Some(value) => Ok(value), 252 | None => Err(()), 253 | }; 254 | let end = Instant::now(); 255 | 256 | (start, result, end) 257 | } 258 | Operation::Remove => { 259 | let start = Instant::now(); 260 | let result = map.remove(&key); 261 | let end = Instant::now(); 262 | 263 | (start, result, end) 264 | } 265 | }; 266 | 267 | let log = Log { 268 | start, 269 | end, 270 | op, 271 | key, 272 | result, 273 | }; 274 | 275 | // println!("{:?} [{:0>10}] {:?}", std::thread::current().id(), i, log); 276 | 277 | logs.push(log); 278 | } 279 | 280 | logs 281 | }); 282 | 283 | threads.push(t); 284 | } 285 | 286 | threads 287 | .into_iter() 288 | .map(|h| h.join().unwrap()) 289 | .flatten() 290 | .collect::>() 291 | }) 292 | .unwrap(); 293 | 294 | if assert_log { 295 | // println!("Asserting logs..."); 296 | assert_logs(logs); 297 | } 298 | } 299 | 300 | // rearrange logs and check if they are consistent and have no contradiction 301 | fn assert_logs(logs: Vec>) { 302 | let mut key_logs = HashMap::new(); 303 | 304 | // classify logs by key 305 | for log in logs { 306 | key_logs 307 | .entry(log.key.clone()) 308 | .or_insert_with(|| Vec::new()) 309 | .push(log); 310 | } 311 | 312 | for (key, mut key_logs) in key_logs { 313 | key_logs.sort_by(|a, b| a.start.cmp(&b.start)); 314 | 315 | let mut value_logs = HashMap::new(); 316 | 317 | for log in &key_logs { 318 | value_logs 319 | .entry(log.result.clone()) 320 | .or_insert_with(|| Vec::new()) 321 | .push(log.clone()); 322 | } 323 | 324 | let mut error_logs = Vec::new(); 325 | 326 | let mut log_bunches: Vec> = Vec::new(); 327 | let mut last_flag = false; 328 | for (value, mut logs) in value_logs { 329 | if value == Err(()) { 330 | // Error logs cannot cause side effect. Therefore, collect all in one place and check correctness 331 | error_logs = logs; 332 | continue; 333 | } 334 | 335 | // make logs like [Insert, ..., Remove] 336 | logs.sort_by(|a, b| { 337 | let op = a.op.cmp(&b.op); 338 | 339 | if op == Ordering::Equal { 340 | a.start.cmp(&b.start) 341 | } else { 342 | op 343 | } 344 | }); 345 | 346 | assert!( 347 | verify_logs(logs.iter().collect::>()), 348 | "The logs of (key, value) failed to assert:\n{:?}", 349 | logs 350 | ); 351 | 352 | // TODO: split bunch into multiple bunches if multiple insert-remove pairs exist. 353 | let insert = (&logs) 354 | .into_iter() 355 | .filter(|x| x.op == Operation::Insert) 356 | .collect::>(); 357 | let remove = (&logs) 358 | .into_iter() 359 | .filter(|x| x.op == Operation::Remove) 360 | .collect::>(); 361 | 362 | assert_eq!( 363 | insert.len(), 364 | 1, 365 | "On one value, multiple insert is not checked right now." 366 | ); 367 | assert!( 368 | remove.len() <= 1, 369 | "On one value, multiple remove is not checked right now." 370 | ); 371 | 372 | let insert = logs.first().unwrap(); 373 | 374 | if remove.len() == 0 { 375 | // the latest insertion on the key 376 | if last_flag { 377 | panic!( 378 | "({:?}, {:?}) Multiple Insertion on last:\n {:?}", 379 | key, value, key_logs 380 | ); 381 | } 382 | 383 | last_flag = true; 384 | let last_instant = Instant::now() 385 | .checked_add(Duration::from_secs(300)) 386 | .unwrap(); 387 | log_bunches.push((insert.start, insert.end, last_instant, last_instant, logs)); 388 | } else { 389 | let remove = logs.last().unwrap(); 390 | log_bunches.push((insert.start, insert.end, remove.start, remove.end, logs)); 391 | } 392 | } 393 | 394 | if log_bunches.is_empty() { 395 | // There are only error logs or not. Therefore, we just check if the log is lookup(error) or remove(error). 396 | for error_log in error_logs { 397 | if error_log.op == Operation::Insert { 398 | panic!("If there are only error logs, they should be lookup or removal."); 399 | } 400 | } 401 | 402 | continue; 403 | } 404 | 405 | // rearrange batches by correctness 406 | log_bunches.sort_by(|a, b| a.0.cmp(&b.0)); 407 | 408 | let before = log_bunches.len(); 409 | 410 | let mut log_bunches = VecDeque::from(log_bunches); 411 | let mut final_log_bunches = vec![log_bunches.pop_front().unwrap()]; 412 | 413 | rearrange_log_bunches(&mut final_log_bunches, &mut log_bunches) 414 | .expect("Failed to rearrange logs to be correct"); 415 | 416 | assert_eq!(before, final_log_bunches.len()); 417 | 418 | if last_flag { 419 | let last_op = &final_log_bunches.last().unwrap().4.last().unwrap().op; 420 | assert!(*last_op != Operation::Remove); 421 | } 422 | 423 | // check if the error log has contradiction 424 | // 425 | // insert: if the error log occurs between finishing removing and starting inserting, it is contradiction 426 | // lookup/remove: if the error log occurs between finishing inserting and starting removing, it is contradiction 427 | error_logs.sort_by(|a, b| a.start.cmp(&b.start)); 428 | 429 | let mut error_logs = VecDeque::from(error_logs); 430 | 431 | // check the first range by first log bunch 432 | { 433 | let first_log_bunch = final_log_bunches.first().unwrap(); 434 | 435 | let mut i = 0; 436 | while i < error_logs.len() { 437 | let error_log = &error_logs[i]; 438 | 439 | if error_log.start < first_log_bunch.3 { 440 | // the error log is overlapped by the range of the bunch 441 | match error_log.op { 442 | Operation::Insert => { 443 | if error_log.end < first_log_bunch.0 { 444 | panic!( 445 | "The error log {:?} has contradiction on {:?}.", 446 | error_log, first_log_bunch 447 | ); 448 | } else { 449 | error_logs.remove(i); 450 | } 451 | } 452 | _ => i += 1, 453 | } 454 | } else { 455 | break; 456 | } 457 | } 458 | } 459 | 460 | // check the middle range by the two log bunches 461 | for bunches in final_log_bunches.windows(2) { 462 | let old = &bunches[0]; 463 | let new = &bunches[1]; 464 | let (start, end) = ( 465 | vec![old.0, old.2, new.0, new.2].into_iter().min().unwrap(), 466 | vec![old.1, old.3, new.1, new.3].into_iter().max().unwrap(), 467 | ); // the range of the bunch 468 | 469 | while let Some(error_log) = error_logs.front() { 470 | if error_log.start < end && error_log.end > start { 471 | // the error log is overlapped by the range 472 | match error_log.op { 473 | Operation::Insert => { 474 | if old.3 < error_log.start && error_log.end < new.0 { 475 | panic!( 476 | "The error log {:?} has contradiction on: {:?}.", 477 | error_log, old 478 | ); 479 | } else { 480 | error_logs.pop_front(); 481 | } 482 | } 483 | Operation::Lookup | Operation::Remove => { 484 | if old.1 < error_log.start && error_log.end < old.2 { 485 | panic!( 486 | "The error log {:?} has contradiction on {:?}, {:?}.", 487 | error_log, old, new 488 | ); 489 | } else { 490 | error_logs.pop_front(); 491 | } 492 | } 493 | } 494 | } else { 495 | break; 496 | } 497 | } 498 | } 499 | 500 | // check the last range by the log bunch 501 | { 502 | let last_log_bunch = final_log_bunches.last().unwrap(); 503 | 504 | while let Some(error_log) = error_logs.front() { 505 | if error_log.start < last_log_bunch.3 { 506 | // the error log is overlapped by the range of the bunch 507 | match error_log.op { 508 | Operation::Insert => { 509 | if last_log_bunch.4.last().unwrap().op == Operation::Remove 510 | && last_log_bunch.3 < error_log.start 511 | { 512 | panic!( 513 | "The error log {:?} has contradiction on {:?}.", 514 | error_log, last_log_bunch 515 | ); 516 | } else { 517 | error_logs.pop_front(); 518 | } 519 | } 520 | Operation::Lookup | Operation::Remove => { 521 | if last_log_bunch.1 < error_log.start 522 | && error_log.end < last_log_bunch.2 523 | { 524 | panic!( 525 | "The error log {:?} has contradiction on {:?}.", 526 | error_log, last_log_bunch 527 | ); 528 | } else { 529 | error_logs.pop_front(); 530 | } 531 | } 532 | } 533 | } else { 534 | break; 535 | } 536 | } 537 | } 538 | 539 | // after bunches, all error log should be 540 | while let Some(error_log) = error_logs.pop_front() { 541 | if !last_flag && error_log.op == Operation::Insert { 542 | panic!("Finishing with removal, the error log {:?} has contradiction since it is empty.", error_log); 543 | } else if last_flag 544 | && (error_log.op == Operation::Lookup || error_log.op == Operation::Remove) 545 | { 546 | panic!("Finishing without removal, the error log {:?} has contradiction since it is not empty.", error_log); 547 | } 548 | } 549 | 550 | // merge log bunches into single log 551 | let logs: Vec> = final_log_bunches 552 | .into_iter() 553 | .map(|bunch| bunch.4) 554 | .flatten() 555 | .collect(); 556 | 557 | assert!(verify_logs(logs.iter().collect::>())); 558 | } 559 | } 560 | 561 | // rearrange log bunches to be correct 562 | // 563 | // to use this function, please set first element into now_log_bunches by front poping from rest_log_bunches 564 | // use DFS: 565 | // 1) Insert b_1(bunch) into [] 566 | // i+1) For [b_1, ..., b_i], try insert b_{i+1}. If failed, insert b_{i+1} moving backward. 567 | // (ex. try inserting [b_1, ..., b_{i - 3}, b_{i + 1}, b_{i - 2}, b_{i - 1}, b_i]) 568 | // If failed to try all case on [b_1, ..., b_i], go back [b_1, ..., b_{i - 1}] and try inserting b_i on other place. 569 | // If failed to try all case on the list, the program is incorrect. 570 | fn rearrange_log_bunches( 571 | now_log_bunches: &mut Vec>, 572 | rest_log_bunches: &mut VecDeque>, 573 | ) -> Result<(), ()> { 574 | if rest_log_bunches.is_empty() { 575 | return Ok(()); 576 | } 577 | 578 | if verify_log_bunches(vec![ 579 | now_log_bunches.last().unwrap(), 580 | rest_log_bunches.front().unwrap(), 581 | ]) { 582 | // very good case: just push now log bunch into full logs 583 | now_log_bunches.push(rest_log_bunches.pop_front().unwrap()); 584 | 585 | let result = rearrange_log_bunches(now_log_bunches, rest_log_bunches); 586 | 587 | if result.is_ok() { 588 | return Ok(()); 589 | } 590 | 591 | rest_log_bunches.push_front(now_log_bunches.pop().unwrap()); 592 | } 593 | 594 | // try to insert it on best place like [i - 1, it, i] 595 | for i in (0..now_log_bunches.len()).rev() { 596 | if now_log_bunches[i].3 < rest_log_bunches.front().unwrap().0 { 597 | // if the target cannot be followed by now_log_bunches[i], it cannot be inserted. So, break. 598 | break; 599 | } 600 | 601 | let mut test_bunches = vec![]; 602 | 603 | if i >= 1 { 604 | test_bunches.push(&now_log_bunches[i - 1]); 605 | } 606 | 607 | test_bunches.push(rest_log_bunches.front().unwrap()); 608 | test_bunches.push(&now_log_bunches[i]); 609 | 610 | if verify_log_bunches(test_bunches) { 611 | now_log_bunches.insert(i, rest_log_bunches.pop_front().unwrap()); 612 | 613 | let result = rearrange_log_bunches(now_log_bunches, rest_log_bunches); 614 | 615 | if result.is_ok() { 616 | return Ok(()); 617 | } 618 | 619 | rest_log_bunches.push_front(now_log_bunches.pop().unwrap()); 620 | } 621 | } 622 | 623 | Err(()) 624 | } 625 | 626 | fn verify_log_bunches( 627 | log_bunches: Vec<&LogBunch>, 628 | ) -> bool { 629 | let merged_logs = log_bunches 630 | .iter() 631 | .map(|x| &x.4) 632 | .flatten() 633 | .collect::>(); 634 | verify_logs(merged_logs) 635 | } 636 | 637 | // verify if the logs have no contradiction on order 638 | fn verify_logs(logs: Vec<&Log>) -> bool { 639 | let mut old_log = &logs[0]; 640 | let mut state = if let Ok(state) = verify_state_log(None, &old_log) { 641 | state 642 | } else { 643 | panic!("Logs is contradiction: {:?}", logs); 644 | }; 645 | 646 | for log in logs.iter().skip(1) { 647 | // the old log should be former or overlapped 648 | if old_log.start <= log.end { 649 | if let Ok(new_state) = verify_state_log(state, &log) { 650 | state = new_state; 651 | old_log = log; 652 | } else { 653 | // The log has contradition on data 654 | return false; 655 | } 656 | } else { 657 | // The log is inconsistent on time 658 | return false; 659 | } 660 | } 661 | 662 | true 663 | } 664 | 665 | // verify if the log is correct to set on right next of the state 666 | // if correct, return Ok() with next state 667 | // if not correct, Err(()) 668 | fn verify_state_log( 669 | state: Option, 670 | log: &Log, 671 | ) -> Result, ()> { 672 | match log.op { 673 | Operation::Insert => { 674 | if let Some(_) = state.clone() { 675 | if let Ok(_) = log.result { 676 | Err(()) 677 | } else { 678 | Ok(state) 679 | } 680 | } else { 681 | if let Ok(v) = log.result.clone() { 682 | Ok(Some(v)) 683 | } else { 684 | Err(()) 685 | } 686 | } 687 | } 688 | Operation::Lookup => { 689 | if let Some(s) = state.clone() { 690 | if let Ok(v) = log.result.clone() { 691 | if s == v { 692 | Ok(state) 693 | } else { 694 | Err(()) 695 | } 696 | } else { 697 | Err(()) 698 | } 699 | } else { 700 | if let Ok(_) = log.result { 701 | Err(()) 702 | } else { 703 | Ok(state) 704 | } 705 | } 706 | } 707 | Operation::Remove => { 708 | if let Some(s) = state.clone() { 709 | if let Ok(v) = log.result.clone() { 710 | if s == v { 711 | Ok(None) 712 | } else { 713 | Err(()) 714 | } 715 | } else { 716 | Err(()) 717 | } 718 | } else { 719 | if let Ok(_) = log.result { 720 | Err(()) 721 | } else { 722 | Ok(state) 723 | } 724 | } 725 | } 726 | } 727 | } 728 | -------------------------------------------------------------------------------- /tests/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod map; 2 | pub mod queue; 3 | -------------------------------------------------------------------------------- /tests/util/queue.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use cds::queue::{ConcurrentQueue, SequentialQueue}; 4 | 5 | pub fn test_simple_sequential_queue>() { 6 | let mut queue = Q::new(); 7 | 8 | queue.push(1); 9 | queue.push(2); 10 | queue.push(3); 11 | queue.push(4); 12 | queue.push(5); 13 | 14 | assert_eq!(queue.pop(), Some(1)); 15 | assert_eq!(queue.pop(), Some(2)); 16 | assert_eq!(queue.pop(), Some(3)); 17 | assert_eq!(queue.pop(), Some(4)); 18 | assert_eq!(queue.pop(), Some(5)); 19 | 20 | assert_eq!(queue.pop(), None); 21 | } 22 | 23 | pub fn test_deep_sequential_queue>() { 24 | let mut queue = Q::new(); 25 | 26 | for n in 1..100_000 { 27 | queue.push(n); 28 | } 29 | 30 | for n in 1..100_000 { 31 | assert_eq!(queue.pop(), Some(n)); 32 | } 33 | 34 | assert_eq!(queue.pop(), None); 35 | } 36 | 37 | pub fn test_sequential_concurrent_queue>() { 38 | let queue = Q::new(); 39 | 40 | for i in 0..1_000 { 41 | queue.push(i); 42 | queue.pop(); 43 | } 44 | 45 | assert!(queue.try_pop().is_none()); 46 | } 47 | 48 | pub fn test_simple_concurrent_queue>() { 49 | let queue = Q::new(); 50 | 51 | thread::scope(|scope| { 52 | for _ in 0..10 { 53 | scope.spawn(|| { 54 | for i in 0..1_000 { 55 | queue.push(i); 56 | queue.pop(); 57 | } 58 | }); 59 | } 60 | }); 61 | 62 | assert!(queue.try_pop().is_none()); 63 | } 64 | 65 | pub fn test_spsc_concurrent_queue>() { 66 | let queue = Q::new(); 67 | 68 | thread::scope(|scope| { 69 | scope.spawn(|| { 70 | for i in 0..1_000_000 { 71 | queue.push(i); 72 | } 73 | }); 74 | 75 | scope.spawn(|| { 76 | let mut result = Vec::new(); 77 | 78 | for _ in 0..1_000_000 { 79 | result.push(queue.pop()); 80 | } 81 | 82 | let mut expected = result.clone(); 83 | expected.sort(); 84 | 85 | assert_eq!(expected, result); 86 | }); 87 | }); 88 | 89 | assert!(queue.try_pop().is_none()); 90 | } 91 | 92 | pub fn test_spmc_concurrent_queue>() { 93 | let queue = Q::new(); 94 | 95 | thread::scope(|scope| { 96 | scope.spawn(|| { 97 | for i in 0..1_000_000 { 98 | queue.push(i); 99 | } 100 | }); 101 | 102 | for _ in 0..10 { 103 | scope.spawn(|| { 104 | for _ in 0..100_000 { 105 | queue.pop(); 106 | } 107 | }); 108 | } 109 | }); 110 | 111 | assert!(queue.try_pop().is_none()); 112 | } 113 | 114 | pub fn test_mpsc_concurrent_queue>() { 115 | let queue = Q::new(); 116 | 117 | thread::scope(|scope| { 118 | for _ in 0..10 { 119 | scope.spawn(|| { 120 | for i in 0..100_000 { 121 | queue.push(i); 122 | } 123 | }); 124 | } 125 | 126 | scope.spawn(|| { 127 | for _ in 0..1_000_000 { 128 | queue.pop(); 129 | } 130 | }); 131 | }); 132 | 133 | assert!(queue.try_pop().is_none()); 134 | } 135 | 136 | pub fn test_mpmc_concurrent_queue>() { 137 | let queue = Q::new(); 138 | 139 | thread::scope(|scope| { 140 | for _ in 0..10 { 141 | scope.spawn(|| { 142 | for i in 0..100_000 { 143 | queue.push(i); 144 | } 145 | }); 146 | 147 | scope.spawn(|| { 148 | for _ in 0..100_000 { 149 | queue.pop(); 150 | } 151 | }); 152 | } 153 | }); 154 | 155 | assert!(queue.try_pop().is_none()); 156 | } 157 | --------------------------------------------------------------------------------