├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples └── mpmc.rs ├── fuzz ├── .gitignore ├── Cargo.toml └── fuzz_targets │ ├── fuzz_iter.rs │ ├── fuzz_model.rs │ └── fuzz_serde.rs ├── src ├── lib.rs └── serde.rs └── tests ├── concurrent.rs ├── fuzzer_regression_tests.rs └── leak_check.rs /.gitignore: -------------------------------------------------------------------------------- 1 | *Cargo.lock 2 | target 3 | flamegraph* 4 | perf* 5 | cachegrind* 6 | callgrind* 7 | dhat* 8 | massif* 9 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "concurrent-map" 3 | authors = ["Tyler Neely "] 4 | description = "a lock-free linearizable B+ tree" 5 | version = "5.0.37" 6 | edition = "2021" 7 | license = "MIT OR Apache-2.0" 8 | repository = "https://github.com/komora-io/concurrent-map" 9 | keywords = ["lock-free", "multi-threaded", "performance", "index", "tree"] 10 | categories = ["concurrency", "data-structures", "rust-patterns"] 11 | readme = "README.md" 12 | 13 | [profile.dev] 14 | opt-level = 1 15 | debug = true 16 | 17 | [profile.release] 18 | debug = true 19 | 20 | [features] 21 | fault_injection = ["rand"] 22 | timing = [] 23 | print_utilization_on_drop = [] 24 | fuzz_constants = [] 25 | serde = ["dep:serde"] 26 | 27 | [dependencies] 28 | ebr = { version = "0.2.13" } 29 | stack-map = "1.0.5" 30 | rand = { version = "0.8.5", optional = true } 31 | serde = { version = "1.0.192", optional = true } 32 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Tyler Neely 190 | Copyright 2023 Tyler Neely 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 Tyler Neely 2 | Copyright (c) 2023 Tyler Neely 3 | 4 | Permission is hereby granted, free of charge, to any 5 | person obtaining a copy of this software and associated 6 | documentation files (the "Software"), to deal in the 7 | Software without restriction, including without 8 | limitation the rights to use, copy, modify, merge, 9 | publish, distribute, sublicense, and/or sell copies of 10 | the Software, and to permit persons to whom the Software 11 | is furnished to do so, subject to the following 12 | conditions: 13 | 14 | The above copyright notice and this permission notice 15 | shall be included in all copies or substantial portions 16 | of the Software. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 19 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 20 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 21 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 22 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 23 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 25 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 | DEALINGS IN THE SOFTWARE. 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # concurrent-map 2 | 3 | 4 | 5 | Lock-free linearizable map. 6 | 7 | * `get`, `insert`, `cas`, `remove`, `iter`, `range`, `get_gt`, `get_gte`, `get_lt`, `get_lte`, `first`, `last`, `pop_first`, `pop_last` 8 | * fully lock-free node splits and merges based on the [sled](https://sled.rs) battle-tested implementation. `concurrent-map` can be though of in some ways as a simplified, in-memory `sled` that supports high-level types. 9 | * initially designed for use in sled's next generation object store, [marble](https://github.com/komora-io/marble). 10 | 11 | The `ConcurrentMap` allows users to tune the tree fan-out (`FANOUT`) 12 | and the underlying memory reclamation granularity (`LOCAL_GC_BUFFER_SIZE`) 13 | for achieving desired performance properties. The defaults are pretty good 14 | for most use cases but if you want to squeeze every bit of performance out 15 | for your particular workload, tweaking them based on realistic measurements 16 | may be beneficial. See the `ConcurrentMap` docs for more details. 17 | 18 | If you want to use a custom key type, you must 19 | implement the `Minimum` trait, 20 | allowing the left-most side of the tree to be 21 | created before inserting any data. 22 | 23 | This is an ordered data structure, and supports very high throughput iteration over 24 | lexicographically sorted ranges of values. If you are looking for simple point operation 25 | performance, you may find a better option among one of the many concurrent 26 | hashmap implementations that are floating around. Pay for what you actually use :) 27 | 28 | The minimum supported rust version (MSRV) is `1.70`. 29 | -------------------------------------------------------------------------------- /examples/mpmc.rs: -------------------------------------------------------------------------------- 1 | use std::thread::scope; 2 | use std::time::Instant; 3 | 4 | use concurrent_map::ConcurrentMap; 5 | 6 | const PRODUCERS: usize = 128; 7 | const CONSUMERS: usize = 128; 8 | const N: usize = 1024 * 1024; 9 | const PRODUCER_N: usize = N / PRODUCERS; 10 | const CONSUMER_N: usize = N / CONSUMERS; 11 | 12 | fn producer(cm: ConcurrentMap, min: usize, max: usize) { 13 | for i in min..max { 14 | cm.insert(i, i); 15 | } 16 | } 17 | 18 | fn consumer(cm: ConcurrentMap, n: usize) { 19 | let mut popped = 0; 20 | while popped < n { 21 | if let Some((k, v)) = cm.pop_first() { 22 | assert_eq!(k, v); 23 | popped += 1; 24 | } 25 | } 26 | } 27 | 28 | fn main() { 29 | let cm = ConcurrentMap::default(); 30 | 31 | let before = Instant::now(); 32 | scope(|s| { 33 | let mut handles = vec![]; 34 | 35 | for i in 0..PRODUCERS { 36 | let min = i * PRODUCER_N; 37 | let max = (i + 1) * PRODUCER_N; 38 | let cm = cm.clone(); 39 | let handle = s.spawn(move || producer(cm, min, max)); 40 | handles.push(handle); 41 | } 42 | 43 | for _ in 0..CONSUMERS { 44 | let cm = cm.clone(); 45 | let handle = s.spawn(move || consumer(cm, CONSUMER_N)); 46 | handles.push(handle); 47 | } 48 | 49 | for handle in handles.into_iter() { 50 | handle.join().unwrap() 51 | } 52 | }); 53 | 54 | let elapsed = before.elapsed(); 55 | 56 | let per_second = N as u128 * 1000 / elapsed.as_millis(); 57 | 58 | println!( 59 | "with {} producers and {} consumers, took {:?} to transfer {} items ({} per second)", 60 | PRODUCERS, CONSUMERS, elapsed, N, per_second 61 | ); 62 | } 63 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | corpus 3 | artifacts 4 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "concurrent-map-fuzz" 3 | version = "0.0.0" 4 | authors = ["Automatically generated"] 5 | publish = false 6 | edition = "2018" 7 | 8 | [package.metadata] 9 | cargo-fuzz = true 10 | 11 | [dependencies.libfuzzer-sys] 12 | version = "0.4.0" 13 | features = ["arbitrary-derive"] 14 | 15 | [dependencies] 16 | arbitrary = { version = "1.0.3", features = ["derive"] } 17 | bincode = "1.3.3" 18 | 19 | [dependencies.concurrent-map] 20 | path = ".." 21 | features = ["fuzz_constants", "serde", "fault_injection"] 22 | 23 | # Prevent this from interfering with workspaces 24 | [workspace] 25 | members = ["."] 26 | 27 | [[bin]] 28 | name = "fuzz_model" 29 | path = "fuzz_targets/fuzz_model.rs" 30 | test = false 31 | doc = false 32 | 33 | [[bin]] 34 | name = "fuzz_serde" 35 | path = "fuzz_targets/fuzz_serde.rs" 36 | test = false 37 | doc = false 38 | 39 | [[bin]] 40 | name = "fuzz_iter" 41 | path = "fuzz_targets/fuzz_iter.rs" 42 | test = false 43 | doc = false 44 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/fuzz_iter.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #[macro_use] 3 | extern crate libfuzzer_sys; 4 | extern crate concurrent_map; 5 | 6 | fuzz_target!(|data: Vec| { 7 | let mut model = std::collections::BTreeMap::default(); 8 | 9 | for item in &data { 10 | model.insert(*item, *item); 11 | } 12 | 13 | let model_iter: Vec<_> = model.into_iter().collect(); 14 | 15 | { 16 | let tree = concurrent_map::ConcurrentMap::<_, _, 4>::default(); 17 | 18 | for item in &data { 19 | tree.insert(*item, *item); 20 | } 21 | 22 | let self_iter: Vec<_> = tree.iter().collect(); 23 | 24 | assert_eq!(self_iter, model_iter); 25 | } 26 | { 27 | let tree = concurrent_map::ConcurrentMap::<_, _, 5>::default(); 28 | 29 | for item in &data { 30 | tree.insert(*item, *item); 31 | } 32 | 33 | let self_iter: Vec<_> = tree.iter().collect(); 34 | 35 | assert_eq!(self_iter, model_iter); 36 | } 37 | { 38 | let tree = concurrent_map::ConcurrentMap::<_, _, 7>::default(); 39 | 40 | for item in &data { 41 | tree.insert(*item, *item); 42 | } 43 | 44 | let self_iter: Vec<_> = tree.iter().collect(); 45 | 46 | assert_eq!(self_iter, model_iter); 47 | } 48 | { 49 | let tree = concurrent_map::ConcurrentMap::<_, _, 8>::default(); 50 | 51 | for item in &data { 52 | tree.insert(*item, *item); 53 | } 54 | 55 | let self_iter: Vec<_> = tree.iter().collect(); 56 | 57 | assert_eq!(self_iter, model_iter); 58 | } 59 | }); 60 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/fuzz_model.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #[macro_use] 3 | extern crate libfuzzer_sys; 4 | extern crate arbitrary; 5 | extern crate concurrent_map; 6 | 7 | use arbitrary::Arbitrary; 8 | 9 | const KEYSPACE: u64 = 128; 10 | 11 | #[derive(Debug)] 12 | enum Op { 13 | Insert { 14 | key: u64, 15 | value: u64, 16 | }, 17 | Remove { 18 | key: u64, 19 | }, 20 | Cas { 21 | key: u64, 22 | old: Option, 23 | new: Option, 24 | }, 25 | Range { 26 | start: u64, 27 | end: u64, 28 | }, 29 | } 30 | 31 | impl<'a> Arbitrary<'a> for Op { 32 | fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { 33 | Ok(if u.ratio(1, 2)? { 34 | Op::Insert { 35 | key: u.int_in_range(0..=KEYSPACE as u64)?, 36 | value: u.int_in_range(0..=KEYSPACE as u64)?, 37 | } 38 | } else if u.ratio(1, 2)? { 39 | Op::Remove { 40 | key: u.int_in_range(0..=KEYSPACE as u64)?, 41 | } 42 | } else if u.ratio(1, 2)? { 43 | Op::Cas { 44 | key: u.int_in_range(0..=KEYSPACE as u64)?, 45 | old: if u.ratio(1, 2)? { 46 | Some(u.int_in_range(0..=KEYSPACE as u64)?) 47 | } else { 48 | None 49 | }, 50 | new: if u.ratio(1, 2)? { 51 | Some(u.int_in_range(0..=KEYSPACE as u64)?) 52 | } else { 53 | None 54 | }, 55 | } 56 | } else { 57 | let start = u.int_in_range(0..=KEYSPACE as u64)?; 58 | let end = (start + 1).max(u.int_in_range(0..=KEYSPACE as u64)?); 59 | Op::Range { start, end } 60 | }) 61 | } 62 | } 63 | 64 | fuzz_target!(|ops: Vec| { 65 | let tree = concurrent_map::ConcurrentMap::<_, _, 8>::default(); 66 | let mut model = std::collections::BTreeMap::new(); 67 | 68 | for (_i, op) in ops.into_iter().enumerate() { 69 | match op { 70 | Op::Insert { key, value } => { 71 | assert_eq!(tree.insert(key, value), model.insert(key, value)); 72 | } 73 | Op::Remove { key } => { 74 | assert_eq!(tree.remove(&key), model.remove(&key)); 75 | } 76 | Op::Range { start, end } => { 77 | let model_iter = model.range(start..end); 78 | let mut tree_iter = tree.range(start..end); 79 | 80 | for (k1, v1) in model_iter { 81 | let (k2, v2) = tree_iter.next().unwrap(); 82 | assert_eq!((k1, v1), (&k2, &v2)); 83 | } 84 | 85 | assert_eq!(tree_iter.next(), None); 86 | 87 | let model_iter = model.range(start..end).rev(); 88 | let mut tree_iter = tree.range(start..end); 89 | 90 | for (k1, v1) in model_iter { 91 | let (k2, v2) = tree_iter.next_back().unwrap(); 92 | assert_eq!((k1, v1), (&k2, &v2)); 93 | } 94 | 95 | assert_eq!(tree_iter.next_back(), None); 96 | } 97 | Op::Cas { key, old, new } => { 98 | let succ = if old == model.get(&key).copied() { 99 | if let Some(n) = new { 100 | model.insert(key, n); 101 | } else { 102 | model.remove(&key); 103 | } 104 | true 105 | } else { 106 | false 107 | }; 108 | 109 | let res = tree.cas(key, old.as_ref(), new); 110 | 111 | if succ { 112 | assert!(res.is_ok()); 113 | } else { 114 | assert!(res.is_err()); 115 | } 116 | } 117 | }; 118 | 119 | for (key, value) in &model { 120 | assert_eq!(tree.get(key), Some(*value)); 121 | } 122 | 123 | for (key, value) in &tree { 124 | assert_eq!(model.get(&key), Some(&value)); 125 | } 126 | 127 | assert_eq!(tree.len(), model.len()); 128 | } 129 | 130 | let mut model_iter = model.iter(); 131 | let mut tree_iter = tree.iter(); 132 | 133 | for (k1, v1) in &mut model_iter { 134 | let (k2, v2) = tree_iter.next().unwrap(); 135 | assert_eq!((k1, v1), (&k2, &v2)); 136 | } 137 | 138 | assert_eq!(tree_iter.next(), None); 139 | }); 140 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/fuzz_serde.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #[macro_use] 3 | extern crate libfuzzer_sys; 4 | extern crate concurrent_map; 5 | 6 | fuzz_target!(|data: Vec| { 7 | let tree = concurrent_map::ConcurrentMap::<_, _, 4>::default(); 8 | 9 | for item in data { 10 | tree.insert(item, item); 11 | } 12 | 13 | let serialized = bincode::serialize(&tree).unwrap(); 14 | let deserialized = bincode::deserialize(&serialized).unwrap(); 15 | assert_eq!(tree, deserialized); 16 | }); 17 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr( 2 | test, 3 | deny( 4 | missing_docs, 5 | future_incompatible, 6 | nonstandard_style, 7 | rust_2018_idioms, 8 | missing_copy_implementations, 9 | trivial_casts, 10 | trivial_numeric_casts, 11 | unused_qualifications, 12 | ) 13 | )] 14 | #![cfg_attr(test, deny( 15 | clippy::cast_lossless, 16 | clippy::cast_possible_truncation, 17 | clippy::cast_possible_wrap, 18 | clippy::cast_precision_loss, 19 | clippy::cast_sign_loss, 20 | clippy::decimal_literal_representation, 21 | clippy::doc_markdown, 22 | // clippy::else_if_without_else, 23 | clippy::empty_enum, 24 | clippy::explicit_into_iter_loop, 25 | clippy::explicit_iter_loop, 26 | clippy::expl_impl_clone_on_copy, 27 | clippy::fallible_impl_from, 28 | clippy::filter_map_next, 29 | clippy::float_arithmetic, 30 | clippy::get_unwrap, 31 | clippy::if_not_else, 32 | clippy::indexing_slicing, 33 | clippy::inline_always, 34 | clippy::integer_arithmetic, 35 | clippy::invalid_upcast_comparisons, 36 | clippy::items_after_statements, 37 | clippy::manual_find_map, 38 | clippy::map_entry, 39 | clippy::map_flatten, 40 | clippy::match_like_matches_macro, 41 | clippy::match_same_arms, 42 | clippy::maybe_infinite_iter, 43 | clippy::mem_forget, 44 | // clippy::missing_docs_in_private_items, 45 | clippy::module_name_repetitions, 46 | clippy::multiple_inherent_impl, 47 | clippy::mut_mut, 48 | clippy::needless_borrow, 49 | clippy::needless_continue, 50 | clippy::needless_pass_by_value, 51 | clippy::non_ascii_literal, 52 | clippy::path_buf_push_overwrite, 53 | // clippy::print_stdout, 54 | clippy::redundant_closure_for_method_calls, 55 | clippy::shadow_reuse, 56 | clippy::shadow_same, 57 | clippy::shadow_unrelated, 58 | clippy::single_match_else, 59 | clippy::string_add, 60 | clippy::string_add_assign, 61 | clippy::type_repetition_in_bounds, 62 | clippy::unicode_not_nfc, 63 | clippy::unimplemented, 64 | clippy::unseparated_literal_suffix, 65 | clippy::used_underscore_binding, 66 | clippy::wildcard_dependencies, 67 | ))] 68 | #![cfg_attr( 69 | test, 70 | warn( 71 | clippy::missing_const_for_fn, 72 | clippy::multiple_crate_versions, 73 | clippy::wildcard_enum_match_arm, 74 | ) 75 | )] 76 | 77 | //! A lock-free B+ tree based on [sled](https://github.com/spacejam/sled)'s internal 78 | //! index structure, but supporting richer Rust types as keys and values than raw bytes. 79 | //! 80 | //! This structure supports atomic compare and swap operations with the 81 | //! [`ConcurrentMap::cas`] method. 82 | //! 83 | //! The [`ConcurrentMap`] allows users to tune the tree fan-out (`FANOUT`) 84 | //! and the underlying memory reclamation granularity (`LOCAL_GC_BUFFER_SIZE`) 85 | //! for achieving desired performance properties. The defaults are pretty good 86 | //! for most use cases but if you want to squeeze every bit of performance out 87 | //! for your particular workload, tweaking them based on realistic measurements 88 | //! may be beneficial. See the [`ConcurrentMap`] docs for more details. 89 | //! 90 | //! If you want to use a custom key type, you must 91 | //! implement the [`Minimum`] trait, 92 | //! allowing the left-most side of the tree to be 93 | //! created before inserting any data. If you wish 94 | //! to perform scans in reverse lexicographical order, 95 | //! you may instead implement [`Maximum`] for your key 96 | //! type and use [`std::cmp::Reverse`]. 97 | //! 98 | //! This is an ordered data structure, and supports very high throughput iteration over 99 | //! lexicographically sorted ranges of values. If you are looking for simple point operation 100 | //! performance, you may find a better option among one of the many concurrent 101 | //! hashmap implementations that are floating around. 102 | 103 | #[cfg(feature = "serde")] 104 | mod serde; 105 | 106 | #[cfg(not(feature = "fault_injection"))] 107 | #[inline] 108 | const fn debug_delay() -> bool { 109 | false 110 | } 111 | 112 | /// This function is useful for inducing random jitter into 113 | /// our atomic operations, shaking out more possible 114 | /// interleavings quickly. It gets fully eliminated by the 115 | /// compiler in non-test code. 116 | #[cfg(feature = "fault_injection")] 117 | fn debug_delay() -> bool { 118 | use std::thread; 119 | use std::time::Duration; 120 | 121 | use rand::{thread_rng, Rng}; 122 | 123 | let mut rng = thread_rng(); 124 | 125 | match rng.gen_range(0..100) { 126 | 0..=98 => false, 127 | _ => { 128 | thread::yield_now(); 129 | true 130 | } 131 | } 132 | } 133 | 134 | use stack_map::StackMap; 135 | 136 | use std::borrow::Borrow; 137 | use std::fmt; 138 | use std::mem::size_of; 139 | use std::num::{ 140 | NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, 141 | NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, 142 | }; 143 | use std::ops::{Bound, Deref}; 144 | use std::ptr::NonNull; 145 | use std::sync::{ 146 | atomic::{AtomicPtr, AtomicUsize, Ordering}, 147 | Arc, 148 | }; 149 | 150 | #[cfg(feature = "timing")] 151 | use std::time::{Duration, Instant}; 152 | 153 | use ebr::{Ebr, Guard}; 154 | 155 | // NB this must always be 1 156 | const MERGE_SIZE: usize = 1; 157 | 158 | #[derive(Debug)] 159 | enum Deferred< 160 | K: 'static + Clone + Minimum + Send + Sync + Ord, 161 | V: 'static + Clone + Send + Sync, 162 | const FANOUT: usize, 163 | > { 164 | #[allow(unused)] 165 | Node(Box>), 166 | BoxedAtomicPtr(BoxedAtomicPtr), 167 | } 168 | 169 | impl< 170 | K: 'static + Clone + Minimum + Send + Sync + Ord, 171 | V: 'static + Clone + Send + Sync, 172 | const FANOUT: usize, 173 | > Drop for Deferred 174 | { 175 | fn drop(&mut self) { 176 | if let Deferred::BoxedAtomicPtr(id) = self { 177 | assert!(!id.0.is_null()); 178 | let reclaimed: Box>> = 179 | unsafe { Box::from_raw(id.0 as *mut _) }; 180 | drop(reclaimed); 181 | } 182 | } 183 | } 184 | 185 | #[derive(Debug, Clone, Eq)] 186 | struct BoxedAtomicPtr< 187 | K: 'static + Clone + Minimum + Send + Sync + Ord, 188 | V: 'static + Clone + Send + Sync, 189 | const FANOUT: usize, 190 | >(*const AtomicPtr>); 191 | 192 | impl< 193 | K: 'static + Clone + Minimum + Send + Sync + Ord, 194 | V: 'static + Clone + Send + Sync, 195 | const FANOUT: usize, 196 | > Copy for BoxedAtomicPtr 197 | { 198 | } 199 | 200 | impl< 201 | K: 'static + Clone + Minimum + Send + Sync + Ord, 202 | V: 'static + Clone + Send + Sync, 203 | const FANOUT: usize, 204 | > PartialEq for BoxedAtomicPtr 205 | { 206 | fn eq(&self, other: &Self) -> bool { 207 | self.0 == other.0 208 | } 209 | } 210 | 211 | unsafe impl< 212 | K: 'static + Clone + Minimum + Send + Sync + Ord, 213 | V: 'static + Clone + Send + Sync, 214 | const FANOUT: usize, 215 | > Send for BoxedAtomicPtr 216 | { 217 | } 218 | 219 | unsafe impl< 220 | K: 'static + Clone + Minimum + Send + Sync + Ord, 221 | V: 'static + Clone + Send + Sync, 222 | const FANOUT: usize, 223 | > Sync for BoxedAtomicPtr 224 | { 225 | } 226 | 227 | impl< 228 | K: 'static + Clone + Minimum + Send + Sync + Ord, 229 | V: 'static + Clone + Send + Sync, 230 | const FANOUT: usize, 231 | > Deref for BoxedAtomicPtr 232 | { 233 | type Target = AtomicPtr>; 234 | 235 | fn deref(&self) -> &AtomicPtr> { 236 | unsafe { &*self.0 } 237 | } 238 | } 239 | 240 | impl< 241 | K: 'static + Clone + Minimum + Send + Sync + Ord, 242 | V: 'static + Clone + Send + Sync, 243 | const FANOUT: usize, 244 | > BoxedAtomicPtr 245 | { 246 | fn new(node: Box>) -> BoxedAtomicPtr { 247 | let pointee_ptr = Box::into_raw(node); 248 | let pointer_ptr = Box::into_raw(Box::new(AtomicPtr::new(pointee_ptr))); 249 | BoxedAtomicPtr(pointer_ptr) 250 | } 251 | 252 | fn node_view( 253 | &self, 254 | _guard: &mut Guard<'_, Deferred, LOCAL_GC_BUFFER_SIZE>, 255 | ) -> Option> { 256 | let ptr = NonNull::new(self.load(Ordering::Acquire))?; 257 | 258 | Some(NodeView { ptr, id: *self }) 259 | } 260 | } 261 | 262 | /// Error type for the [`ConcurrentMap::cas`] operation. 263 | #[derive(Debug, PartialEq, Eq)] 264 | pub struct CasFailure { 265 | /// The current actual value that failed the comparison 266 | pub actual: Option, 267 | /// The value that was proposed as a new value, which could 268 | /// not be installed due to the comparison failure. 269 | pub returned_new_value: Option, 270 | } 271 | 272 | #[derive(Debug)] 273 | struct NodeView 274 | where 275 | K: 'static + Clone + Minimum + Ord + Send + Sync, 276 | V: 'static + Clone + Send + Sync, 277 | { 278 | ptr: NonNull>, 279 | id: BoxedAtomicPtr, 280 | } 281 | 282 | impl NodeView 283 | where 284 | K: 'static + Clone + Minimum + Ord + Send + Sync, 285 | V: 'static + Clone + Send + Sync, 286 | { 287 | /// Try to replace. If the node has been deleted since we got our view, 288 | /// an Err(None) is returned. 289 | fn cas( 290 | &self, 291 | replacement: Box>, 292 | guard: &mut Guard<'_, Deferred, LOCAL_GC_BUFFER_SIZE>, 293 | ) -> Result, Option>> { 294 | assert!( 295 | !(replacement.hi.is_some() ^ replacement.next.is_some()), 296 | "hi and next must both either be None or Some" 297 | ); 298 | 299 | if debug_delay() { 300 | return Err(Some(NodeView { 301 | ptr: self.ptr, 302 | id: self.id, 303 | })); 304 | } 305 | 306 | let replacement_ptr = Box::into_raw(replacement); 307 | let res = self.id.compare_exchange( 308 | self.ptr.as_ptr(), 309 | replacement_ptr, 310 | Ordering::AcqRel, 311 | Ordering::Acquire, 312 | ); 313 | 314 | match res { 315 | Ok(_) => { 316 | let replaced: Box> = unsafe { Box::from_raw(self.ptr.as_ptr()) }; 317 | guard.defer_drop(Deferred::Node(replaced)); 318 | Ok(NodeView { 319 | ptr: NonNull::new(replacement_ptr).unwrap(), 320 | id: self.id, 321 | }) 322 | } 323 | Err(actual) => { 324 | let failed_value: Box> = 325 | unsafe { Box::from_raw(replacement_ptr) }; 326 | drop(failed_value); 327 | 328 | if actual.is_null() { 329 | Err(None) 330 | } else { 331 | Err(Some(NodeView { 332 | ptr: NonNull::new(actual).unwrap(), 333 | id: self.id, 334 | })) 335 | } 336 | } 337 | } 338 | } 339 | 340 | /// This function is used to get a mutable reference to 341 | /// the node. It is intended as an optimization to avoid 342 | /// RCU overhead when the overall ConcurrentMap's inner 343 | /// Arc only has a single copy, giving us enough runtime 344 | /// information to uphold the required invariant that there 345 | /// is at most one accessing thread for the overall structure. 346 | /// 347 | /// Additional care must be taken to ensure that at any time, 348 | /// there is only ever a single mutable reference to this 349 | /// inner Node, otherwise various optimizations may cause 350 | /// memory corruption. When in doubt, don't use this. 351 | unsafe fn get_mut(&mut self) -> &mut Node { 352 | self.ptr.as_mut() 353 | } 354 | } 355 | 356 | impl Deref for NodeView 357 | where 358 | K: 'static + Clone + Minimum + Ord + Send + Sync, 359 | V: 'static + Clone + Send + Sync, 360 | { 361 | type Target = Node; 362 | 363 | fn deref(&self) -> &Self::Target { 364 | unsafe { self.ptr.as_ref() } 365 | } 366 | } 367 | 368 | /// Trait for types for which a minimum possible value exists. 369 | /// 370 | /// This trait must be implemented for any `K` key type in the [`ConcurrentMap`]. 371 | pub trait Minimum: Ord { 372 | /// The returned value must be less than or equal 373 | /// to all possible values for this type. 374 | const MIN: Self; 375 | } 376 | 377 | /// Trait for types for which a maximum possible value exists. 378 | /// 379 | /// This exists primarily to play nicely with [`std::cmp::Reverse`] keys 380 | /// for achieving high performance reverse iteration. 381 | pub trait Maximum: Ord { 382 | /// The returned value must be greater than or equal 383 | /// to all possible values for this type. 384 | const MAX: Self; 385 | } 386 | 387 | impl Minimum for () { 388 | const MIN: Self = (); 389 | } 390 | 391 | impl Minimum for bool { 392 | const MIN: Self = false; 393 | } 394 | 395 | impl Minimum for std::cmp::Reverse { 396 | const MIN: Self = std::cmp::Reverse(T::MAX); 397 | } 398 | 399 | macro_rules! impl_integer { 400 | ($($t:ty),+) => { 401 | $( 402 | impl Minimum for $t { 403 | const MIN: Self = <$t>::MIN; 404 | } 405 | 406 | impl Maximum for $t { 407 | const MAX: Self = <$t>::MAX; 408 | } 409 | )* 410 | } 411 | } 412 | 413 | impl_integer!( 414 | usize, 415 | u8, 416 | u16, 417 | u32, 418 | u64, 419 | u128, 420 | isize, 421 | i8, 422 | i16, 423 | i32, 424 | i64, 425 | i128, 426 | NonZeroI128, 427 | NonZeroI16, 428 | NonZeroI32, 429 | NonZeroI64, 430 | NonZeroI8, 431 | NonZeroIsize, 432 | NonZeroU128, 433 | NonZeroU16, 434 | NonZeroU32, 435 | NonZeroU64, 436 | NonZeroU8, 437 | NonZeroUsize 438 | ); 439 | 440 | impl Minimum for Vec { 441 | const MIN: Self = Vec::new(); 442 | } 443 | 444 | impl Minimum for &[T] { 445 | const MIN: Self = &[]; 446 | } 447 | 448 | impl Minimum for [T; LEN] { 449 | const MIN: Self = [T::MIN; LEN]; 450 | } 451 | 452 | impl Minimum for String { 453 | const MIN: Self = String::new(); 454 | } 455 | 456 | impl Minimum for &str { 457 | const MIN: Self = ""; 458 | } 459 | 460 | impl Minimum for (A, B) { 461 | const MIN: Self = (A::MIN, B::MIN); 462 | } 463 | impl Minimum for (A, B, C) { 464 | const MIN: Self = (A::MIN, B::MIN, C::MIN); 465 | } 466 | impl Minimum for (A, B, C, D) { 467 | const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN); 468 | } 469 | impl Minimum for (A, B, C, D, E) { 470 | const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN, E::MIN); 471 | } 472 | impl Minimum 473 | for (A, B, C, D, E, F) 474 | { 475 | const MIN: Self = (A::MIN, B::MIN, C::MIN, D::MIN, E::MIN, F::MIN); 476 | } 477 | 478 | /// A lock-free B+ tree. 479 | /// 480 | /// Note that this structure is `Send` but NOT `Sync`, 481 | /// despite being a lock-free tree. This is because the 482 | /// inner reclamation system, provided by the `ebr` crate 483 | /// completely avoids atomic operations in its hot path 484 | /// for efficiency. If you want to share [`ConcurrentMap`] 485 | /// between threads, simply clone it, and this will set up 486 | /// a new efficient thread-local memory reclamation state. 487 | /// 488 | /// If you want to use a custom key type, you must 489 | /// implement the [`Minimum`] trait, 490 | /// allowing the left-most side of the tree to be 491 | /// created before inserting any data. If you wish 492 | /// to perform scans in reverse lexicographical order, 493 | /// you may instead implement [`Maximum`] for your key 494 | /// type and use [`std::cmp::Reverse`]. 495 | /// 496 | /// The `FANOUT` const generic must be greater than 3. 497 | /// This const generic controls how large the fixed-size 498 | /// array for either child pointers (for index nodes) or 499 | /// values (for leaf nodes) will be. A higher value may 500 | /// make reads and scans faster, but writes will be slower 501 | /// because each modification performs a read-copy-update 502 | /// of the full node. In some cases, it may be preferable 503 | /// to wrap complex values in an `Arc` to avoid higher 504 | /// copy costs. 505 | /// 506 | /// The `LOCAL_GC_BUFFER_SIZE` const generic must be greater than 0. 507 | /// This controls the epoch-based reclamation granularity. 508 | /// Garbage is placed into fixed-size arrays, and garbage collection 509 | /// only happens after this array fills up and a final timestamp is 510 | /// assigned to it. Lower values will cause replaced values to be dropped 511 | /// more quickly, but the efficiency will be lower. Values that are 512 | /// extremely high may cause undesirable memory usage because it will 513 | /// take more time to fill up each thread-local garbage segment. 514 | /// 515 | /// # Examples 516 | /// 517 | /// ``` 518 | /// let map = concurrent_map::ConcurrentMap::::default(); 519 | /// 520 | /// // insert and remove atomically returns the last value, if it was set, 521 | /// // similarly to a BTreeMap 522 | /// assert_eq!(map.insert(1, 10), None); 523 | /// assert_eq!(map.insert(1, 11), Some(10)); 524 | /// assert_eq!(map.remove(&1), Some(11)); 525 | /// 526 | /// // get also functions similarly to BTreeMap, except it 527 | /// // returns a cloned version of the value rather than a 528 | /// // reference to it, so that no locks need to be maintained. 529 | /// // For this reason, it can be a good idea to use types that 530 | /// // are cheap to clone for values, which can be easily handled 531 | /// // with `Arc` etc... 532 | /// assert_eq!(map.insert(1, 12), None); 533 | /// assert_eq!(map.get(&1), Some(12)); 534 | /// 535 | /// // compare and swap from value 12 to value 20 536 | /// map.cas(1, Some(&12_usize), Some(20)).unwrap(); 537 | /// 538 | /// assert_eq!(map.get(&1).unwrap(), 20); 539 | /// 540 | /// // there are a lot of methods that are not covered 541 | /// // here - check out the docs! 542 | /// ``` 543 | #[derive(Clone)] 544 | pub struct ConcurrentMap 545 | where 546 | K: 'static + Clone + Minimum + Ord + Send + Sync, 547 | V: 'static + Clone + Send + Sync, 548 | { 549 | // epoch-based reclamation 550 | ebr: Ebr, LOCAL_GC_BUFFER_SIZE>, 551 | // the tree structure, separate from the other 552 | // types so that we can mix mutable references 553 | // to ebr with immutable references to other 554 | // things. 555 | inner: Arc>, 556 | // an eventually consistent, lagging count of the 557 | // number of items in this structure. 558 | len: Arc, 559 | } 560 | 561 | impl PartialEq 562 | for ConcurrentMap 563 | where 564 | K: 'static + fmt::Debug + Clone + Minimum + Ord + Send + Sync + PartialEq, 565 | V: 'static + fmt::Debug + Clone + Send + Sync + PartialEq, 566 | { 567 | fn eq(&self, other: &Self) -> bool { 568 | let literally_the_same = Arc::as_ptr(&self.inner) == Arc::as_ptr(&other.inner); 569 | if literally_the_same { 570 | return true; 571 | } 572 | 573 | let self_iter = self.iter(); 574 | let mut other_iter = other.iter(); 575 | 576 | for self_kv in self_iter { 577 | let other_kv = other_iter.next(); 578 | if !Some(self_kv).eq(&other_kv) { 579 | return false; 580 | } 581 | } 582 | 583 | other_iter.next().is_none() 584 | } 585 | } 586 | 587 | impl fmt::Debug 588 | for ConcurrentMap 589 | where 590 | K: 'static + fmt::Debug + Clone + Minimum + Ord + Send + Sync, 591 | V: 'static + fmt::Debug + Clone + Send + Sync, 592 | { 593 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 594 | f.write_str("ConcurrentMap ")?; 595 | f.debug_map().entries(self.iter()).finish() 596 | } 597 | } 598 | 599 | impl Default 600 | for ConcurrentMap 601 | where 602 | K: 'static + Clone + Minimum + Ord + Send + Sync, 603 | V: 'static + Clone + Send + Sync, 604 | { 605 | fn default() -> ConcurrentMap { 606 | assert!(FANOUT > 3, "ConcurrentMap FANOUT must be greater than 3"); 607 | assert!( 608 | LOCAL_GC_BUFFER_SIZE > 0, 609 | "LOCAL_GC_BUFFER_SIZE must be greater than 0" 610 | ); 611 | let mut root_node = Node::::new_root(); 612 | let root_node_lo = root_node.lo.clone(); 613 | let leaf_node = Node::::new_leaf(root_node.lo.clone()); 614 | let leaf = BoxedAtomicPtr::new(leaf_node); 615 | root_node.index_mut().insert(root_node_lo, leaf); 616 | 617 | let root = BoxedAtomicPtr::new(root_node); 618 | 619 | let inner = Arc::new(Inner { 620 | root, 621 | #[cfg(feature = "timing")] 622 | slowest_op: u64::MIN.into(), 623 | #[cfg(feature = "timing")] 624 | fastest_op: u64::MAX.into(), 625 | }); 626 | 627 | ConcurrentMap { 628 | ebr: Ebr::default(), 629 | inner, 630 | len: Arc::new(0.into()), 631 | } 632 | } 633 | } 634 | 635 | struct Inner 636 | where 637 | K: 'static + Clone + Minimum + Ord + Send + Sync, 638 | V: 'static + Clone + Send + Sync, 639 | { 640 | root: BoxedAtomicPtr, 641 | #[cfg(feature = "timing")] 642 | slowest_op: AtomicU64, 643 | #[cfg(feature = "timing")] 644 | fastest_op: AtomicU64, 645 | } 646 | 647 | impl Drop 648 | for Inner 649 | where 650 | K: 'static + Clone + Minimum + Ord + Send + Sync, 651 | V: 'static + Clone + Send + Sync, 652 | { 653 | fn drop(&mut self) { 654 | #[cfg(feature = "timing")] 655 | self.print_timing(); 656 | 657 | let ebr = Ebr::default(); 658 | let mut guard = ebr.pin(); 659 | 660 | let mut cursor: NodeView = self.root(&mut guard); 661 | 662 | let mut lhs_chain: Vec> = vec![]; 663 | 664 | loop { 665 | lhs_chain.push(cursor.id); 666 | if cursor.is_leaf() { 667 | break; 668 | } 669 | let child_ptr: BoxedAtomicPtr = cursor.index().get_index(0).unwrap().1; 670 | 671 | cursor = child_ptr.node_view(&mut guard).unwrap(); 672 | } 673 | 674 | let mut layer = 0; 675 | for lhs_ptr in lhs_chain { 676 | layer += 1; 677 | 678 | let mut min_fill_physical: f64 = 1.0; 679 | let mut max_fill_physical: f64 = 0.0; 680 | let mut fill_sum_physical: f64 = 0.0; 681 | 682 | let mut min_fill_logical: f64 = 1.0; 683 | let mut max_fill_logical: f64 = 0.0; 684 | let mut fill_sum_logical: f64 = 0.0; 685 | let mut nodes_counted: usize = 0; 686 | 687 | let mut next_opt: Option> = Some(lhs_ptr); 688 | while let Some(next) = next_opt { 689 | assert!(!next.0.is_null()); 690 | let sibling_cursor = next.node_view(&mut guard).unwrap(); 691 | 692 | let fill_phy = ((size_of::() + size_of::()) * sibling_cursor.len()) as f64 693 | / size_of::>() as f64; 694 | min_fill_physical = min_fill_physical.min(fill_phy); 695 | max_fill_physical = max_fill_physical.max(fill_phy); 696 | fill_sum_physical += fill_phy; 697 | 698 | let fill_log = sibling_cursor.len() as f64 / FANOUT as f64; 699 | min_fill_logical = min_fill_logical.min(fill_log); 700 | max_fill_logical = max_fill_logical.max(fill_log); 701 | fill_sum_logical += fill_log; 702 | nodes_counted += 1; 703 | 704 | next_opt = sibling_cursor.next; 705 | let node_box = unsafe { Box::from_raw(sibling_cursor.ptr.as_ptr()) }; 706 | drop(node_box); 707 | 708 | let reclaimed_ptr: Box>> = 709 | unsafe { Box::from_raw(next.0 as *mut _) }; 710 | drop(reclaimed_ptr); 711 | } 712 | 713 | if cfg!(feature = "print_utilization_on_drop") { 714 | println!("layer {layer} count {nodes_counted}"); 715 | println!( 716 | "logical: min: {min_fill_logical} max: {max_fill_logical} avg: {}", 717 | fill_sum_logical / nodes_counted as f64 718 | ); 719 | println!( 720 | "physical: min: {min_fill_physical} max: {max_fill_physical} avg: {}", 721 | fill_sum_physical / nodes_counted as f64 722 | ); 723 | } 724 | } 725 | } 726 | } 727 | 728 | impl 729 | ConcurrentMap 730 | where 731 | K: 'static + Clone + Minimum + Ord + Send + Sync, 732 | V: 'static + Clone + Send + Sync, 733 | { 734 | /// Creates a new empty `ConcurrentMap`. 735 | /// 736 | /// # Examples 737 | /// ``` 738 | /// use concurrent_map::ConcurrentMap; 739 | /// 740 | /// let cm: ConcurrentMap = ConcurrentMap::new(); 741 | /// ``` 742 | pub fn new() -> Self { 743 | Self::default() 744 | } 745 | 746 | /// Atomically get a value out of the map that is associated with this key. 747 | /// 748 | /// # Examples 749 | /// ``` 750 | /// let map = concurrent_map::ConcurrentMap::::default(); 751 | /// 752 | /// map.insert(1, 1); 753 | /// 754 | /// let actual = map.get(&0); 755 | /// let expected = None; 756 | /// assert_eq!(expected, actual); 757 | /// 758 | /// let actual = map.get(&1); 759 | /// let expected = Some(1); 760 | /// assert_eq!(expected, actual); 761 | /// ``` 762 | pub fn get(&self, key: &Q) -> Option 763 | where 764 | K: Borrow, 765 | Q: Ord + ?Sized, 766 | { 767 | let mut guard = self.ebr.pin(); 768 | 769 | let leaf = self.inner.leaf_for_key(LeafSearch::Eq(key), &mut guard); 770 | 771 | leaf.get(key) 772 | } 773 | 774 | /// Returns `true` if the `ConcurrentMap` contains the specified key. 775 | /// 776 | /// # Examples 777 | /// ``` 778 | /// let map = concurrent_map::ConcurrentMap::::default(); 779 | /// 780 | /// map.insert(1, 1); 781 | /// 782 | /// assert!(map.contains_key(&1)); 783 | /// assert!(!map.contains_key(&2)); 784 | /// ``` 785 | pub fn contains_key(&self, key: &Q) -> bool 786 | where 787 | K: Borrow, 788 | Q: Ord + ?Sized, 789 | { 790 | self.get(key).is_some() 791 | } 792 | 793 | /// Atomically get a key and value out of the map that is associated with the key that 794 | /// is lexicographically less than the provided key. 795 | /// 796 | /// This will always return `None` if the key passed to `get_lt` == `K::MIN`. 797 | /// 798 | /// # Examples 799 | /// ``` 800 | /// let map = concurrent_map::ConcurrentMap::::default(); 801 | /// 802 | /// map.insert(1, 1); 803 | /// 804 | /// let actual = map.get_lt(&0); 805 | /// let expected = None; 806 | /// assert_eq!(expected, actual); 807 | /// 808 | /// let actual = map.get_lt(&1); 809 | /// let expected = None; 810 | /// assert_eq!(expected, actual); 811 | /// 812 | /// let actual = map.get_lt(&2); 813 | /// let expected = Some((1, 1)); 814 | /// assert_eq!(expected, actual); 815 | /// ``` 816 | pub fn get_lt(&self, key: &Q) -> Option<(K, V)> 817 | where 818 | K: Borrow, 819 | Q: ?Sized + Ord + PartialEq, 820 | { 821 | if key == K::MIN.borrow() { 822 | return None; 823 | } 824 | 825 | let start = Bound::Unbounded; 826 | let end = Bound::Excluded(key); 827 | self.range((start, end)).next_back() 828 | } 829 | 830 | /// Atomically get a key and value out of the map that is associated with the key that 831 | /// is lexicographically less than or equal to the provided key. 832 | /// 833 | /// # Examples 834 | /// ``` 835 | /// let map = concurrent_map::ConcurrentMap::::default(); 836 | /// 837 | /// map.insert(1, 1); 838 | /// 839 | /// let actual = map.get_lte(&0); 840 | /// let expected = None; 841 | /// assert_eq!(expected, actual); 842 | /// 843 | /// let actual = map.get_lte(&1); 844 | /// let expected = Some((1, 1)); 845 | /// assert_eq!(expected, actual); 846 | /// 847 | /// let actual = map.get_lte(&2); 848 | /// let expected = Some((1, 1)); 849 | /// assert_eq!(expected, actual); 850 | /// ``` 851 | pub fn get_lte(&self, key: &Q) -> Option<(K, V)> 852 | where 853 | K: Borrow, 854 | Q: ?Sized + Ord + PartialEq, 855 | { 856 | let mut guard = self.ebr.pin(); 857 | let end = LeafSearch::Eq(key); 858 | let current_back = self.inner.leaf_for_key(end, &mut guard); 859 | 860 | // fast path 861 | if let Some((k, v)) = current_back.leaf().get_less_than_or_equal(key) { 862 | return Some((k.clone(), v.clone())); 863 | } 864 | 865 | // slow path: fall back to reverse iterator 866 | let current = self 867 | .inner 868 | .leaf_for_key(LeafSearch::Eq(K::MIN.borrow()), &mut guard); 869 | 870 | Iter { 871 | guard, 872 | inner: &self.inner, 873 | range: (Bound::Unbounded, Bound::Included(key)), 874 | current, 875 | current_back, 876 | next_index: 0, 877 | next_index_from_back: 0, 878 | q: std::marker::PhantomData, 879 | } 880 | .next_back() 881 | } 882 | 883 | /// Atomically get a key and value out of the map that is associated with the key 884 | /// that is lexicographically greater than the provided key. 885 | /// 886 | /// # Examples 887 | /// ``` 888 | /// let map = concurrent_map::ConcurrentMap::::default(); 889 | /// 890 | /// map.insert(1, 1); 891 | /// 892 | /// let actual = map.get_gt(&0); 893 | /// let expected = Some((1, 1)); 894 | /// assert_eq!(expected, actual); 895 | /// 896 | /// let actual = map.get_gt(&1); 897 | /// let expected = None; 898 | /// assert_eq!(expected, actual); 899 | /// 900 | /// let actual = map.get_gt(&2); 901 | /// let expected = None; 902 | /// assert_eq!(expected, actual); 903 | /// ``` 904 | pub fn get_gt(&self, key: &Q) -> Option<(K, V)> 905 | where 906 | K: Borrow, 907 | Q: ?Sized + Ord + PartialEq, 908 | { 909 | self.range((Bound::Excluded(key), Bound::Unbounded)).next() 910 | } 911 | 912 | /// Atomically get a key and value out of the map that is associated with the key 913 | /// that is lexicographically greater than or equal to the provided key. 914 | /// 915 | /// # Examples 916 | /// ``` 917 | /// let map = concurrent_map::ConcurrentMap::::default(); 918 | /// 919 | /// map.insert(1, 1); 920 | /// 921 | /// let actual = map.get_gte(&0); 922 | /// let expected = Some((1, 1)); 923 | /// assert_eq!(expected, actual); 924 | /// 925 | /// let actual = map.get_gte(&1); 926 | /// let expected = Some((1, 1)); 927 | /// assert_eq!(expected, actual); 928 | /// 929 | /// let actual = map.get_gte(&2); 930 | /// let expected = None; 931 | /// assert_eq!(expected, actual); 932 | /// ``` 933 | pub fn get_gte(&self, key: &Q) -> Option<(K, V)> 934 | where 935 | K: Borrow, 936 | Q: ?Sized + Ord + PartialEq, 937 | { 938 | self.range((Bound::Included(key), Bound::Unbounded)).next() 939 | } 940 | 941 | /// Get the minimum item stored in this structure. 942 | /// 943 | /// # Examples 944 | /// ``` 945 | /// let map = concurrent_map::ConcurrentMap::::default(); 946 | /// 947 | /// map.insert(1, 1); 948 | /// map.insert(2, 2); 949 | /// map.insert(3, 3); 950 | /// 951 | /// let actual = map.first(); 952 | /// let expected = Some((1, 1)); 953 | /// assert_eq!(actual, expected); 954 | /// ``` 955 | pub fn first(&self) -> Option<(K, V)> { 956 | self.iter().next() 957 | } 958 | 959 | /// Atomically remove the minimum item stored in this structure. 960 | /// 961 | /// # Examples 962 | /// ``` 963 | /// let map = concurrent_map::ConcurrentMap::::default(); 964 | /// 965 | /// map.insert(1, 1); 966 | /// map.insert(2, 2); 967 | /// map.insert(3, 3); 968 | /// 969 | /// let actual = map.pop_first(); 970 | /// let expected = Some((1, 1)); 971 | /// assert_eq!(actual, expected); 972 | /// 973 | /// assert_eq!(map.get(&1), None); 974 | /// ``` 975 | pub fn pop_first(&self) -> Option<(K, V)> 976 | where 977 | V: PartialEq, 978 | { 979 | loop { 980 | let (k, v) = self.first()?; 981 | if self.cas(k.clone(), Some(&v), None).is_ok() { 982 | return Some((k, v)); 983 | } 984 | } 985 | } 986 | 987 | /// Pops the first kv pair in the provided range, or returns `None` if nothing 988 | /// exists within that range. 989 | /// 990 | /// # Panics 991 | /// 992 | /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN). 993 | /// 994 | /// # Examples 995 | /// 996 | /// ``` 997 | /// use concurrent_map::ConcurrentMap; 998 | /// 999 | /// let data = vec![ 1000 | /// ("key 1", 1), 1001 | /// ("key 2", 2), 1002 | /// ("key 3", 3) 1003 | /// ]; 1004 | /// 1005 | /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect(); 1006 | /// 1007 | /// let r1 = map.pop_first_in_range("key 1"..="key 3"); 1008 | /// assert_eq!(Some(("key 1", 1_usize)), r1); 1009 | /// 1010 | /// let r2 = map.pop_first_in_range("key 1".."key 3"); 1011 | /// assert_eq!(Some(("key 2", 2_usize)), r2); 1012 | /// 1013 | /// let r3: Vec<_> = map.range("key 4"..).collect(); 1014 | /// assert!(r3.is_empty()); 1015 | /// 1016 | /// let r4 = map.pop_first_in_range("key 2"..="key 3"); 1017 | /// assert_eq!(Some(("key 3", 3_usize)), r4); 1018 | /// ``` 1019 | pub fn pop_first_in_range(&self, range: R) -> Option<(K, V)> 1020 | where 1021 | R: std::ops::RangeBounds + Clone, 1022 | K: Borrow, 1023 | V: PartialEq, 1024 | Q: ?Sized + Ord + PartialEq, 1025 | { 1026 | loop { 1027 | let mut r = self.range(range.clone()); 1028 | let (k, v) = r.next()?; 1029 | if self.cas(k.clone(), Some(&v), None).is_ok() { 1030 | return Some((k, v)); 1031 | } 1032 | } 1033 | } 1034 | 1035 | /// Get the maximum item stored in this structure. 1036 | /// 1037 | /// # Examples 1038 | /// ``` 1039 | /// let map = concurrent_map::ConcurrentMap::::default(); 1040 | /// 1041 | /// map.insert(1, 1); 1042 | /// map.insert(2, 2); 1043 | /// map.insert(3, 3); 1044 | /// 1045 | /// let actual = map.last(); 1046 | /// let expected = Some((3, 3)); 1047 | /// assert_eq!(actual, expected); 1048 | /// ``` 1049 | pub fn last(&self) -> Option<(K, V)> { 1050 | self.iter().next_back() 1051 | } 1052 | 1053 | /// Atomically remove the maximum item stored in this structure. 1054 | /// 1055 | /// # Examples 1056 | /// ``` 1057 | /// let map = concurrent_map::ConcurrentMap::::default(); 1058 | /// 1059 | /// map.insert(1, 1); 1060 | /// map.insert(2, 2); 1061 | /// map.insert(3, 3); 1062 | /// 1063 | /// let actual = map.pop_last(); 1064 | /// let expected = Some((3, 3)); 1065 | /// assert_eq!(actual, expected); 1066 | /// 1067 | /// assert_eq!(map.get(&3), None); 1068 | /// ``` 1069 | pub fn pop_last(&self) -> Option<(K, V)> 1070 | where 1071 | V: PartialEq, 1072 | { 1073 | loop { 1074 | let (k, v) = self.last()?; 1075 | if self.cas(k.clone(), Some(&v), None).is_ok() { 1076 | return Some((k, v)); 1077 | } 1078 | } 1079 | } 1080 | 1081 | /// Pops the last kv pair in the provided range, or returns `None` if nothing 1082 | /// exists within that range. 1083 | /// 1084 | /// # Panics 1085 | /// 1086 | /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN). 1087 | /// 1088 | /// # Examples 1089 | /// 1090 | /// ``` 1091 | /// use concurrent_map::ConcurrentMap; 1092 | /// 1093 | /// let data = vec![ 1094 | /// ("key 1", 1), 1095 | /// ("key 2", 2), 1096 | /// ("key 3", 3) 1097 | /// ]; 1098 | /// 1099 | /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect(); 1100 | /// 1101 | /// let r1 = map.pop_last_in_range("key 1"..="key 3"); 1102 | /// assert_eq!(Some(("key 3", 3_usize)), r1); 1103 | /// 1104 | /// let r2 = map.pop_last_in_range("key 1".."key 3"); 1105 | /// assert_eq!(Some(("key 2", 2_usize)), r2); 1106 | /// 1107 | /// let r3 = map.pop_last_in_range("key 4"..); 1108 | /// assert!(r3.is_none()); 1109 | /// 1110 | /// let r4 = map.pop_last_in_range("key 2"..="key 3"); 1111 | /// assert!(r4.is_none()); 1112 | /// 1113 | /// let r5 = map.pop_last_in_range("key 0"..="key 3"); 1114 | /// assert_eq!(Some(("key 1", 1_usize)), r5); 1115 | /// 1116 | /// let r6 = map.pop_last_in_range("key 0"..="key 3"); 1117 | /// assert!(r6.is_none()); 1118 | /// ``` 1119 | pub fn pop_last_in_range(&self, range: R) -> Option<(K, V)> 1120 | where 1121 | R: std::ops::RangeBounds + Clone, 1122 | K: Borrow, 1123 | V: PartialEq, 1124 | Q: ?Sized + Ord + PartialEq, 1125 | { 1126 | loop { 1127 | let mut r = self.range(range.clone()); 1128 | let (k, v) = r.next_back()?; 1129 | if self.cas(k.clone(), Some(&v), None).is_ok() { 1130 | return Some((k, v)); 1131 | } 1132 | } 1133 | } 1134 | 1135 | /// Atomically insert a key-value pair into the map, returning the previous value associated with this key if one existed. 1136 | /// 1137 | /// This method has an optimization that skips lock-free RCU when the internal `Arc` has a 1138 | /// strong count of `1`, significantly increasing insertion throughput when used from a 1139 | /// single thread. 1140 | /// 1141 | /// # Examples 1142 | /// 1143 | /// ``` 1144 | /// let map = concurrent_map::ConcurrentMap::::default(); 1145 | /// 1146 | /// assert_eq!(map.insert(1, 1), None); 1147 | /// assert_eq!(map.insert(1, 1), Some(1)); 1148 | /// ``` 1149 | pub fn insert(&self, key: K, value: V) -> Option { 1150 | let strong_count = Arc::strong_count(&self.inner); 1151 | let direct_mutations_safe = strong_count == 1; 1152 | 1153 | // This optimization allows us to completely skip RCU. 1154 | // We use the debug_delay here to exercise both paths 1155 | // even when testing with a single thread. 1156 | if direct_mutations_safe && !debug_delay() { 1157 | let mut guard = self.ebr.pin(); 1158 | let mut leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard); 1159 | let node_mut_ref: &mut Node = unsafe { leaf.get_mut() }; 1160 | assert!(!node_mut_ref.should_split(), "bad leaf: should split",); 1161 | 1162 | let ret = node_mut_ref.insert(key, value); 1163 | 1164 | if node_mut_ref.should_split() { 1165 | // don't need to track this for potential cleanup due to the fact that it's 1166 | // guaranteed to succeed. 1167 | node_mut_ref.split(); 1168 | } 1169 | 1170 | if ret.is_none() { 1171 | self.len.fetch_add(1, Ordering::Relaxed); 1172 | } 1173 | 1174 | return ret; 1175 | } 1176 | 1177 | // Concurrent workloads need to do the normal RCU loop. 1178 | loop { 1179 | let mut guard = self.ebr.pin(); 1180 | let leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard); 1181 | 1182 | let mut leaf_clone: Box> = Box::new((*leaf).clone()); 1183 | assert!(!leaf_clone.should_split(), "bad leaf: should split",); 1184 | 1185 | let ret = leaf_clone.insert(key.clone(), value.clone()); 1186 | 1187 | let rhs_ptr_opt = if leaf_clone.should_split() { 1188 | Some(leaf_clone.split()) 1189 | } else { 1190 | None 1191 | }; 1192 | 1193 | let install_attempt = leaf.cas(leaf_clone, &mut guard); 1194 | 1195 | if install_attempt.is_ok() { 1196 | if ret.is_none() { 1197 | self.len.fetch_add(1, Ordering::Relaxed); 1198 | } 1199 | return ret; 1200 | } else if let Some(new_ptr) = rhs_ptr_opt { 1201 | // clear dangling BoxedAtomicPtr (cas freed the pointee already) 1202 | let reclaimed_ptr: Box>> = 1203 | unsafe { Box::from_raw(new_ptr.0 as *mut _) }; 1204 | 1205 | let _dropping_reclaimed_rhs: Box> = 1206 | unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) }; 1207 | } 1208 | } 1209 | } 1210 | 1211 | /// Atomically remove the value associated with this key from the map, returning the previous value if one existed. 1212 | /// 1213 | /// # Examples 1214 | /// 1215 | /// ``` 1216 | /// let map = concurrent_map::ConcurrentMap::::default(); 1217 | /// 1218 | /// assert_eq!(map.remove(&1), None); 1219 | /// assert_eq!(map.insert(1, 1), None); 1220 | /// assert_eq!(map.remove(&1), Some(1)); 1221 | /// ``` 1222 | pub fn remove(&self, key: &Q) -> Option 1223 | where 1224 | K: Borrow, 1225 | Q: Ord + ?Sized, 1226 | { 1227 | loop { 1228 | let mut guard = self.ebr.pin(); 1229 | let leaf = self.inner.leaf_for_key(LeafSearch::Eq(key), &mut guard); 1230 | let mut leaf_clone: Box> = Box::new((*leaf).clone()); 1231 | let ret = leaf_clone.remove(key); 1232 | let install_attempt = leaf.cas(leaf_clone, &mut guard); 1233 | if install_attempt.is_ok() { 1234 | if ret.is_some() { 1235 | self.len.fetch_sub(1, Ordering::Relaxed); 1236 | } 1237 | return ret; 1238 | } 1239 | } 1240 | } 1241 | 1242 | /// Atomically compare and swap the value associated with this key from the old value to the 1243 | /// new one. An old value of `None` means "only create this value if it does not already 1244 | /// exist". A new value of `None` means "delete this value, if it matches the provided old value". 1245 | /// If successful, returns the old value if it existed. If unsuccessful, returns both the proposed 1246 | /// new value that failed to be installed as well as the current actual value in a [`CasFailure`] 1247 | /// struct. 1248 | /// 1249 | /// # Examples 1250 | /// 1251 | /// ``` 1252 | /// let map = concurrent_map::ConcurrentMap::::default(); 1253 | /// 1254 | /// // key 1 does not yet exist 1255 | /// assert_eq!(map.get(&1), None); 1256 | /// 1257 | /// // uniquely create value 10 1258 | /// map.cas(1, None, Some(10)).unwrap(); 1259 | /// 1260 | /// assert_eq!(map.get(&1).unwrap(), 10); 1261 | /// 1262 | /// // compare and swap from value 10 to value 20 1263 | /// map.cas(1, Some(&10_usize), Some(20)).unwrap(); 1264 | /// 1265 | /// assert_eq!(map.get(&1).unwrap(), 20); 1266 | /// 1267 | /// // if we guess the wrong current value, a CasFailure is returned 1268 | /// // which will tell us what the actual current value is (which we 1269 | /// // failed to provide) and it will give us back our proposed new 1270 | /// // value. 1271 | /// let cas_result = map.cas(1, Some(&999999_usize), Some(30)); 1272 | /// 1273 | /// let expected_cas_failure = Err(concurrent_map::CasFailure { 1274 | /// actual: Some(20), 1275 | /// returned_new_value: Some(30), 1276 | /// }); 1277 | /// 1278 | /// assert_eq!(cas_result, expected_cas_failure); 1279 | /// 1280 | /// // conditionally delete 1281 | /// map.cas(1, Some(&20_usize), None).unwrap(); 1282 | /// 1283 | /// assert_eq!(map.get(&1), None); 1284 | /// ``` 1285 | pub fn cas( 1286 | &self, 1287 | key: K, 1288 | old: Option<&VRef>, 1289 | new: Option, 1290 | ) -> Result, CasFailure> 1291 | where 1292 | V: Borrow, 1293 | VRef: PartialEq + ?Sized, 1294 | { 1295 | loop { 1296 | let mut guard = self.ebr.pin(); 1297 | let leaf = self.inner.leaf_for_key(LeafSearch::Eq(&key), &mut guard); 1298 | let mut leaf_clone: Box> = Box::new((*leaf).clone()); 1299 | let ret = leaf_clone.cas(key.clone(), old, new.clone()); 1300 | 1301 | let rhs_ptr_opt = if leaf_clone.should_split() { 1302 | Some(leaf_clone.split()) 1303 | } else { 1304 | None 1305 | }; 1306 | 1307 | let install_attempt = leaf.cas(leaf_clone, &mut guard); 1308 | 1309 | if install_attempt.is_ok() { 1310 | if matches!(ret, Ok(Some(_))) && new.is_none() { 1311 | self.len.fetch_sub(1, Ordering::Relaxed); 1312 | } else if matches!(ret, Ok(None)) && new.is_some() { 1313 | self.len.fetch_add(1, Ordering::Relaxed); 1314 | } 1315 | return ret; 1316 | } else if let Some(new_ptr) = rhs_ptr_opt { 1317 | // clear dangling BoxedAtomicPtr (cas freed pointee already) 1318 | let reclaimed_ptr: Box>> = 1319 | unsafe { Box::from_raw(new_ptr.0 as *mut _) }; 1320 | 1321 | let _dropping_reclaimed_rhs: Box> = 1322 | unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) }; 1323 | } 1324 | } 1325 | } 1326 | 1327 | /// A **lagging**, eventually-consistent length count. This is NOT atomically 1328 | /// updated with [`insert`] / [`remove`] / [`cas`], but is updated after those 1329 | /// operations complete their atomic modifications to the shared map. 1330 | pub fn len(&self) -> usize { 1331 | self.len.load(Ordering::Relaxed) 1332 | } 1333 | 1334 | /// A **lagging**, eventually-consistent check for emptiness, based on the correspondingly 1335 | /// non-atomic `len` method. 1336 | pub fn is_empty(&self) -> bool { 1337 | self.len() == 0 1338 | } 1339 | 1340 | /// Iterate over the map. 1341 | /// 1342 | /// This is not an atomic snapshot, and it caches B+tree leaf 1343 | /// nodes as it iterates through them to achieve high throughput. 1344 | /// As a result, the following behaviors are possible: 1345 | /// 1346 | /// * may (or may not!) return values that were concurrently added to the map after the 1347 | /// iterator was created 1348 | /// * may (or may not!) return items that were concurrently deleted from the map after 1349 | /// the iterator was created 1350 | /// * If a key's value is changed from one value to another one after this iterator 1351 | /// is created, this iterator might return the old or the new value. 1352 | /// 1353 | /// But, you can be certain that any key that existed prior to the creation of this 1354 | /// iterator, and was not changed during iteration, will be observed as expected. 1355 | /// 1356 | /// # Examples 1357 | /// 1358 | /// ``` 1359 | /// use concurrent_map::ConcurrentMap; 1360 | /// 1361 | /// let data = vec![ 1362 | /// ("key 1", 1), 1363 | /// ("key 2", 2), 1364 | /// ("key 3", 3) 1365 | /// ]; 1366 | /// 1367 | /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect(); 1368 | /// 1369 | /// let r: Vec<_> = map.iter().collect(); 1370 | /// 1371 | /// assert_eq!(&data, &r); 1372 | /// ``` 1373 | pub fn iter(&self) -> Iter<'_, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE> { 1374 | let mut guard = self.ebr.pin(); 1375 | 1376 | let current = self.inner.leaf_for_key(LeafSearch::Eq(&K::MIN), &mut guard); 1377 | let current_back = self.inner.leaf_for_key(LeafSearch::Max, &mut guard); 1378 | let next_index_from_back = 0; 1379 | 1380 | Iter { 1381 | guard, 1382 | inner: &self.inner, 1383 | current, 1384 | range: std::ops::RangeFull, 1385 | next_index: 0, 1386 | current_back, 1387 | next_index_from_back, 1388 | q: std::marker::PhantomData, 1389 | } 1390 | } 1391 | 1392 | /// Iterate over a range of the map. 1393 | /// 1394 | /// This is not an atomic snapshot, and it caches B+tree leaf 1395 | /// nodes as it iterates through them to achieve high throughput. 1396 | /// As a result, the following behaviors are possible: 1397 | /// 1398 | /// * may (or may not!) return values that were concurrently added to the map after the 1399 | /// iterator was created 1400 | /// * may (or may not!) return items that were concurrently deleted from the map after 1401 | /// the iterator was created 1402 | /// * If a key's value is changed from one value to another one after this iterator 1403 | /// is created, this iterator might return the old or the new value. 1404 | /// 1405 | /// But, you can be certain that any key that existed prior to the creation of this 1406 | /// iterator, and was not changed during iteration, will be observed as expected. 1407 | /// 1408 | /// # Panics 1409 | /// 1410 | /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN). 1411 | /// 1412 | /// # Examples 1413 | /// 1414 | /// ``` 1415 | /// use concurrent_map::ConcurrentMap; 1416 | /// 1417 | /// let data = vec![ 1418 | /// ("key 1", 1), 1419 | /// ("key 2", 2), 1420 | /// ("key 3", 3) 1421 | /// ]; 1422 | /// 1423 | /// let map: ConcurrentMap<&'static str, usize> = data.iter().copied().collect(); 1424 | /// 1425 | /// let r1: Vec<_> = map.range("key 1"..="key 3").collect(); 1426 | /// assert_eq!(&data, &r1); 1427 | /// 1428 | /// let r2: Vec<_> = map.range("key 1".."key 3").collect(); 1429 | /// assert_eq!(&data[..2], &r2); 1430 | /// 1431 | /// let r3: Vec<_> = map.range("key 2"..="key 3").collect(); 1432 | /// assert_eq!(&data[1..], &r3); 1433 | /// 1434 | /// let r4: Vec<_> = map.range("key 4"..).collect(); 1435 | /// assert!(r4.is_empty()); 1436 | /// ``` 1437 | pub fn range(&self, range: R) -> Iter<'_, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q> 1438 | where 1439 | R: std::ops::RangeBounds, 1440 | K: Borrow, 1441 | Q: ?Sized + Ord + PartialEq, 1442 | { 1443 | let mut guard = self.ebr.pin(); 1444 | 1445 | let kmin = &K::MIN; 1446 | let min = kmin.borrow(); 1447 | let start = match range.start_bound() { 1448 | Bound::Unbounded => min, 1449 | Bound::Included(k) | Bound::Excluded(k) => k, 1450 | }; 1451 | 1452 | let end = match range.end_bound() { 1453 | Bound::Unbounded => LeafSearch::Max, 1454 | Bound::Included(k) => LeafSearch::Eq(k), 1455 | Bound::Excluded(k) => { 1456 | assert!(k != K::MIN.borrow()); 1457 | LeafSearch::Lt(k) 1458 | } 1459 | }; 1460 | 1461 | let current = self.inner.leaf_for_key(LeafSearch::Eq(start), &mut guard); 1462 | let current_back = self.inner.leaf_for_key(end, &mut guard); 1463 | 1464 | Iter { 1465 | guard, 1466 | inner: &self.inner, 1467 | range, 1468 | current, 1469 | current_back, 1470 | next_index: 0, 1471 | next_index_from_back: 0, 1472 | q: std::marker::PhantomData, 1473 | } 1474 | } 1475 | 1476 | /// Fetch the value, apply a function to it and return the result. 1477 | /// Similar to [`ConcurrentMap::cas`], returning a `None` from the provided 1478 | /// closure will cause a deletion of the value. 1479 | /// 1480 | /// # Note 1481 | /// 1482 | /// This may call the function multiple times if the value has been 1483 | /// changed from other threads in the meantime. 1484 | /// This function essentially implements the common CAS loop pattern 1485 | /// for atomically pushing a function to some shared data. 1486 | /// 1487 | /// # Examples 1488 | /// 1489 | /// ``` 1490 | /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default(); 1491 | /// 1492 | /// fn increment(old_opt: Option<&usize>) -> Option { 1493 | /// let incremented = match old_opt { 1494 | /// Some(old) => { 1495 | /// old + 1 1496 | /// } 1497 | /// None => 0, 1498 | /// }; 1499 | /// 1500 | /// // returning `None` here means "delete this value" 1501 | /// Some(incremented) 1502 | /// } 1503 | /// 1504 | /// assert_eq!(map.update_and_fetch("counter", increment), Some(0)); 1505 | /// assert_eq!(map.update_and_fetch("counter", increment), Some(1)); 1506 | /// assert_eq!(map.update_and_fetch("counter", increment), Some(2)); 1507 | /// assert_eq!(map.update_and_fetch("counter", increment), Some(3)); 1508 | /// 1509 | /// // push a "deletion" that returns None 1510 | /// assert_eq!(map.update_and_fetch("counter", |_| None), None); 1511 | /// ``` 1512 | pub fn update_and_fetch(&self, key: K, mut f: F) -> Option 1513 | where 1514 | F: FnMut(Option<&V>) -> Option, 1515 | V: PartialEq, 1516 | { 1517 | let mut current_opt = self.get(&key); 1518 | 1519 | loop { 1520 | let next = f(current_opt.as_ref()); 1521 | match self.cas(key.clone(), current_opt.as_ref(), next.clone()) { 1522 | Ok(_) => return next, 1523 | Err(CasFailure { actual: cur, .. }) => { 1524 | current_opt = cur; 1525 | } 1526 | } 1527 | } 1528 | } 1529 | 1530 | /// Fetch the value, apply a function to it and return the previous value. 1531 | /// Similar to [`ConcurrentMap::cas`], returning a `None` from the provided 1532 | /// closure will cause a deletion of the value. 1533 | /// 1534 | /// # Note 1535 | /// 1536 | /// This may call the function multiple times if the value has been 1537 | /// changed from other threads in the meantime. 1538 | /// This function essentially implements the common CAS loop pattern 1539 | /// for atomically pushing a function to some shared data. 1540 | /// 1541 | /// # Examples 1542 | /// 1543 | /// ``` 1544 | /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default(); 1545 | /// 1546 | /// fn increment(old_opt: Option<&usize>) -> Option { 1547 | /// let incremented = match old_opt { 1548 | /// Some(old) => { 1549 | /// old + 1 1550 | /// } 1551 | /// None => 0, 1552 | /// }; 1553 | /// 1554 | /// // returning `None` here means "delete this value" 1555 | /// Some(incremented) 1556 | /// } 1557 | /// 1558 | /// assert_eq!(map.fetch_and_update("counter", increment), None); 1559 | /// assert_eq!(map.fetch_and_update("counter", increment), Some(0)); 1560 | /// assert_eq!(map.fetch_and_update("counter", increment), Some(1)); 1561 | /// assert_eq!(map.fetch_and_update("counter", increment), Some(2)); 1562 | /// 1563 | /// // push a "deletion" that returns the previous value, essentially 1564 | /// // mimicking the ConcurrentMap::remove method. 1565 | /// assert_eq!(map.fetch_and_update("counter", |_| None), Some(3)); 1566 | /// 1567 | /// // verify that it's not present 1568 | /// assert_eq!(map.get("counter"), None); 1569 | /// ``` 1570 | pub fn fetch_and_update(&self, key: K, mut f: F) -> Option 1571 | where 1572 | F: FnMut(Option<&V>) -> Option, 1573 | V: PartialEq, 1574 | { 1575 | let mut current_opt = self.get(&key); 1576 | 1577 | loop { 1578 | let next = f(current_opt.as_ref()); 1579 | match self.cas(key.clone(), current_opt.as_ref(), next) { 1580 | Ok(_) => return current_opt, 1581 | Err(CasFailure { actual: cur, .. }) => { 1582 | current_opt = cur; 1583 | } 1584 | } 1585 | } 1586 | } 1587 | } 1588 | 1589 | // This impl block is for `fetch_max` and `fetch_min` operations on 1590 | // values that implement `Ord`. 1591 | impl 1592 | ConcurrentMap 1593 | where 1594 | K: 'static + Clone + Minimum + Ord + Send + Sync, 1595 | V: 'static + Clone + Send + Sync + Ord, 1596 | { 1597 | /// Similar to [`std::sync::atomic::AtomicU64::fetch_min`] in spirit, this 1598 | /// atomically sets the value to the minimum of the 1599 | /// previous value and the provided value. 1600 | /// 1601 | /// The previous value is returned. None is returned if 1602 | /// there was no previous value, in which case the 1603 | /// value is set to the provided value. The value is 1604 | /// unchanged if the current value is already lower 1605 | /// than the provided value. 1606 | /// 1607 | /// # Examples 1608 | /// 1609 | /// ``` 1610 | /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default(); 1611 | /// 1612 | /// // acts as an insertion if no value is present 1613 | /// assert_eq!(map.fetch_min("key 1", 5), None); 1614 | /// 1615 | /// // sets the value to the new lower value, returns the old value 1616 | /// assert_eq!(map.fetch_min("key 1", 2), Some(5)); 1617 | /// 1618 | /// // fails to set the value to a lower number, returns the 1619 | /// // current value. 1620 | /// assert_eq!(map.fetch_min("key 1", 10), Some(2)); 1621 | /// 1622 | /// ``` 1623 | pub fn fetch_min(&self, key: K, value: V) -> Option { 1624 | let f = move |prev_opt: Option<&V>| { 1625 | if let Some(prev) = prev_opt { 1626 | Some(prev.min(&value).clone()) 1627 | } else { 1628 | Some(value.clone()) 1629 | } 1630 | }; 1631 | 1632 | self.fetch_and_update(key, f) 1633 | } 1634 | 1635 | /// Similar to [`std::sync::atomic::AtomicU64::fetch_max`] in spirit, this 1636 | /// atomically sets the value to the maximum of the 1637 | /// previous value and the provided value. 1638 | /// 1639 | /// The previous value is returned. None is returned if 1640 | /// there was no previous value, in which case the 1641 | /// value is set to the provided value. The value is 1642 | /// unchanged if the current value is already higher 1643 | /// than the provided value. 1644 | /// 1645 | /// # Examples 1646 | /// 1647 | /// ``` 1648 | /// let map = concurrent_map::ConcurrentMap::<&'static str, usize>::default(); 1649 | /// 1650 | /// // acts as an insertion if no value is present 1651 | /// assert_eq!(map.fetch_max("key 1", 5), None); 1652 | /// 1653 | /// // sets the value to the new higher value, returns the old value 1654 | /// assert_eq!(map.fetch_max("key 1", 10), Some(5)); 1655 | /// 1656 | /// // fails to set the value to a higher number, returns the 1657 | /// // current value. 1658 | /// assert_eq!(map.fetch_max("key 1", 2), Some(10)); 1659 | /// 1660 | /// ``` 1661 | pub fn fetch_max(&self, key: K, value: V) -> Option { 1662 | let f = move |prev_opt: Option<&V>| { 1663 | if let Some(prev) = prev_opt { 1664 | Some(prev.max(&value).clone()) 1665 | } else { 1666 | Some(value.clone()) 1667 | } 1668 | }; 1669 | 1670 | self.fetch_and_update(key, f) 1671 | } 1672 | } 1673 | 1674 | /// An iterator over a [`ConcurrentMap`]. Note that this is 1675 | /// not an atomic snapshot of the overall shared state, but 1676 | /// it will contain any data that existed before the iterator 1677 | /// was created. 1678 | /// 1679 | /// Note that this iterator contains an epoch-based reclamation 1680 | /// guard, and the overall concurrent structure will be unable 1681 | /// to free any memory until this iterator drops again. 1682 | /// 1683 | /// There are a lot of generics on this struct. Most of them directly 1684 | /// correspond to the generics of the [`ConcurrentMap`] itself. But 1685 | /// there are two that don't: 1686 | /// 1687 | /// * `R` is The type of the range that is stored in the iterator 1688 | /// * `Q` is the type that exists INSIDE of `R` 1689 | /// 1690 | /// So, if an `Iter` is created from: 1691 | /// 1692 | /// ``` 1693 | /// let map = concurrent_map::ConcurrentMap::::default(); 1694 | /// let start = std::ops::Bound::Excluded(0_usize); 1695 | /// let end = std::ops::Bound::Included(5_usize); 1696 | /// let iter = map.range((start, end)); 1697 | /// ``` 1698 | /// 1699 | /// then the type of `R` is `(std::ops::Bound, std::ops::Bound)` 1700 | /// (a 2-tuple of `std::ops::Bound`), and the type of `Q` is `usize`. 1701 | pub struct Iter< 1702 | 'a, 1703 | K, 1704 | V, 1705 | const FANOUT: usize, 1706 | const LOCAL_GC_BUFFER_SIZE: usize, 1707 | R = std::ops::RangeFull, 1708 | Q = K, 1709 | > where 1710 | K: 'static + Clone + Minimum + Ord + Send + Sync, 1711 | V: 'static + Clone + Send + Sync, 1712 | R: std::ops::RangeBounds, 1713 | K: Borrow, 1714 | Q: ?Sized, 1715 | { 1716 | inner: &'a Inner, 1717 | guard: Guard<'a, Deferred, LOCAL_GC_BUFFER_SIZE>, 1718 | range: R, 1719 | current: NodeView, 1720 | next_index: usize, 1721 | current_back: NodeView, 1722 | next_index_from_back: usize, 1723 | q: std::marker::PhantomData<&'a Q>, 1724 | } 1725 | 1726 | impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize, R, Q> Iterator 1727 | for Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q> 1728 | where 1729 | K: 'static + Clone + Minimum + Ord + Send + Sync, 1730 | V: 'static + Clone + Send + Sync, 1731 | R: std::ops::RangeBounds, 1732 | K: Borrow, 1733 | Q: ?Sized + PartialEq + Ord, 1734 | { 1735 | type Item = (K, V); 1736 | 1737 | fn next(&mut self) -> Option { 1738 | loop { 1739 | if let Some((k, v)) = self.current.leaf().get_index(self.next_index) { 1740 | // iterate over current cached b+ tree leaf node 1741 | self.next_index += 1; 1742 | if !self.range.contains(k.borrow()) { 1743 | // we might hit this on the first iteration 1744 | continue; 1745 | } 1746 | return Some((k.clone(), v.clone())); 1747 | } else if let Some(next_ptr) = self.current.next { 1748 | if !self 1749 | .range 1750 | .contains(self.current.hi.as_ref().unwrap().borrow()) 1751 | { 1752 | // we have reached the end of our range 1753 | return None; 1754 | } 1755 | if let Some(next_current) = next_ptr.node_view(&mut self.guard) { 1756 | // we were able to take the fast path by following the sibling pointer 1757 | 1758 | // it's possible that nodes were merged etc... so we need to make sure 1759 | // that we make forward progress 1760 | self.next_index = next_current 1761 | .leaf() 1762 | .iter() 1763 | .position(|(k, _v)| k >= self.current.hi.as_ref().unwrap()) 1764 | .unwrap_or(0); 1765 | 1766 | self.current = next_current; 1767 | } else if let Some(ref hi) = self.current.hi { 1768 | // we have to take the slow path by traversing the 1769 | // map due to a concurrent merge that deleted the 1770 | // right sibling. we are protected from a use after 1771 | // free of the ID itself due to holding an ebr Guard 1772 | // on the Iter struct, holding a barrier against re-use. 1773 | let next_current = self 1774 | .inner 1775 | .leaf_for_key(LeafSearch::Eq(hi.borrow()), &mut self.guard); 1776 | 1777 | // it's possible that nodes were merged etc... so we need to make sure 1778 | // that we make forward progress 1779 | self.next_index = next_current 1780 | .leaf() 1781 | .iter() 1782 | .position(|(k, _v)| k >= hi) 1783 | .unwrap_or(0); 1784 | self.current = next_current; 1785 | } else { 1786 | panic!("somehow hit a node that has a next but not a hi key"); 1787 | } 1788 | } else { 1789 | // end of the collection 1790 | return None; 1791 | } 1792 | } 1793 | } 1794 | } 1795 | 1796 | impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize, R, Q> DoubleEndedIterator 1797 | for Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, R, Q> 1798 | where 1799 | K: 'static + Clone + Minimum + Ord + Send + Sync, 1800 | V: 'static + Clone + Send + Sync, 1801 | R: std::ops::RangeBounds, 1802 | K: Borrow, 1803 | Q: ?Sized + PartialEq + Ord, 1804 | { 1805 | fn next_back(&mut self) -> Option { 1806 | loop { 1807 | if self.next_index_from_back >= self.current_back.leaf().len() { 1808 | if !self.range.contains(self.current_back.lo.borrow()) 1809 | || self.current_back.lo == K::MIN 1810 | { 1811 | // finished 1812 | return None; 1813 | } 1814 | 1815 | let next_current_back = self.inner.leaf_for_key( 1816 | LeafSearch::Lt(self.current_back.lo.borrow()), 1817 | &mut self.guard, 1818 | ); 1819 | assert!(next_current_back.lo != self.current_back.lo); 1820 | 1821 | self.next_index_from_back = next_current_back 1822 | .leaf() 1823 | .iter() 1824 | .rev() 1825 | .position(|(k, _v)| k < &self.current_back.lo) 1826 | .unwrap_or(0); 1827 | 1828 | self.current_back = next_current_back; 1829 | 1830 | if self.current_back.leaf().is_empty() { 1831 | continue; 1832 | } 1833 | } 1834 | 1835 | let offset_to_return = self.current_back.leaf().len() - (1 + self.next_index_from_back); 1836 | let (k, v) = self 1837 | .current_back 1838 | .leaf() 1839 | .get_index(offset_to_return) 1840 | .unwrap(); 1841 | 1842 | self.next_index_from_back += 1; 1843 | if !self.range.contains(k.borrow()) { 1844 | continue; 1845 | } else { 1846 | return Some((k.clone(), v.clone())); 1847 | } 1848 | } 1849 | } 1850 | } 1851 | 1852 | enum LeafSearch { 1853 | // For finding a leaf that would contain this key, if present. 1854 | Eq(K), 1855 | // For finding the direct left sibling of a node during reverse 1856 | // iteration. The actual semantic is to find a leaf that has a lo key 1857 | // that is less than K and a hi key that is >= K 1858 | Lt(K), 1859 | Max, 1860 | } 1861 | 1862 | impl 1863 | Inner 1864 | where 1865 | K: 'static + Clone + Minimum + Ord + Send + Sync, 1866 | V: 'static + Clone + Send + Sync, 1867 | { 1868 | fn root( 1869 | &self, 1870 | _guard: &mut Guard<'_, Deferred, LOCAL_GC_BUFFER_SIZE>, 1871 | ) -> NodeView { 1872 | loop { 1873 | if let Some(ptr) = NonNull::new(self.root.load(Ordering::Acquire)) { 1874 | return NodeView { ptr, id: self.root }; 1875 | } 1876 | } 1877 | } 1878 | 1879 | // lock-free merging: 1880 | // 1. try to mark the parent's merging_child 1881 | // a. must not be the left-most child 1882 | // b. if unsuccessful, give up 1883 | // 2. mark the child as merging 1884 | // 3. find the left sibling 1885 | // 4. cas the left sibling to eat the right sibling 1886 | // a. loop until successful 1887 | // b. go right if the left-most child split and no-longer points to merging child 1888 | // c. split the new larger left sibling if it is at the split threshold 1889 | // 5. cas the parent to remove the merged child 1890 | // 6. remove the child's pointer in the page table 1891 | // 7. defer the reclamation of the BoxedAtomicPtr 1892 | // 8. defer putting the child's ID into the free stack 1893 | // 1894 | // merge threshold must be >= 1, because otherwise index nodes with 1 empty 1895 | // child will never be compactible. 1896 | 1897 | fn install_parent_merge<'a>( 1898 | &'a self, 1899 | parent: &NodeView, 1900 | child: &NodeView, 1901 | guard: &mut Guard<'a, Deferred, LOCAL_GC_BUFFER_SIZE>, 1902 | ) -> Result, ()> { 1903 | // 1. try to mark the parent's merging_child 1904 | // a. must not be the left-most child 1905 | // b. if unsuccessful, give up 1906 | let is_leftmost_child = parent.index().get_index(0).unwrap().0 == child.lo; 1907 | 1908 | if is_leftmost_child { 1909 | return Err(()); 1910 | } 1911 | 1912 | if !parent.index().contains_key(&child.lo) { 1913 | // can't install a parent merge if the child is unknown to the parent. 1914 | return Err(()); 1915 | } 1916 | 1917 | if parent.merging_child.is_some() { 1918 | // there is already a merge in-progress for a different child. 1919 | return Err(()); 1920 | } 1921 | 1922 | let mut parent_clone: Box> = Box::new((*parent).clone()); 1923 | parent_clone.merging_child = Some(child.id); 1924 | parent.cas(parent_clone, guard).map_err(|_| ()) 1925 | } 1926 | 1927 | fn merge_child<'a>( 1928 | &'a self, 1929 | parent: &mut NodeView, 1930 | child: &mut NodeView, 1931 | guard: &mut Guard<'a, Deferred, LOCAL_GC_BUFFER_SIZE>, 1932 | ) { 1933 | // 2. mark child as merging 1934 | while !child.is_merging { 1935 | let mut child_clone: Box> = Box::new((*child).clone()); 1936 | child_clone.is_merging = true; 1937 | *child = match child.cas(child_clone, guard) { 1938 | Ok(new_child) | Err(Some(new_child)) => new_child, 1939 | Err(None) => { 1940 | // child already removed 1941 | return; 1942 | } 1943 | }; 1944 | } 1945 | 1946 | // 3. find the left sibling 1947 | let first_left_sibling_guess = parent 1948 | .index() 1949 | .iter() 1950 | .filter(|(k, _v)| (..&child.lo).contains(&k)) 1951 | .next_back() 1952 | .unwrap() 1953 | .1; 1954 | 1955 | let mut left_sibling = if let Some(view) = first_left_sibling_guess.node_view(guard) { 1956 | view 1957 | } else { 1958 | // the merge already completed and this left sibling has already also been merged 1959 | return; 1960 | }; 1961 | 1962 | loop { 1963 | if left_sibling.next.is_none() { 1964 | // the merge completed and the left sibling became the infinity node in the mean 1965 | // time 1966 | return; 1967 | } 1968 | 1969 | if child.hi.is_some() && left_sibling.hi.is_some() && left_sibling.hi >= child.hi { 1970 | // step 4 happened concurrently 1971 | break; 1972 | } 1973 | 1974 | let next = left_sibling.next.unwrap(); 1975 | if next != child.id { 1976 | left_sibling = if let Some(view) = next.node_view(guard) { 1977 | view 1978 | } else { 1979 | // the merge already completed and this left sibling has already also been merged 1980 | return; 1981 | }; 1982 | continue; 1983 | } 1984 | 1985 | // 4. cas the left sibling to eat the right sibling 1986 | // a. loop until successful 1987 | // b. go right if the left-most child split and no-longer points to merging child 1988 | // c. split the new larger left sibling if it is at the split threshold 1989 | let mut left_sibling_clone: Box> = Box::new((*left_sibling).clone()); 1990 | left_sibling_clone.merge(child); 1991 | 1992 | let rhs_ptr_opt = if left_sibling_clone.should_split() { 1993 | // we have to try to split the sibling, funny enough. 1994 | // this is the consequence of using fixed-size arrays 1995 | // for storing items with no flexibility. 1996 | 1997 | Some(left_sibling_clone.split()) 1998 | } else { 1999 | None 2000 | }; 2001 | 2002 | let cas_result = left_sibling.cas(left_sibling_clone, guard); 2003 | if let (Err(_), Some(rhs_ptr)) = (&cas_result, rhs_ptr_opt) { 2004 | // We need to free the split right sibling that we installed 2005 | let reclaimed_ptr: Box>> = 2006 | unsafe { Box::from_raw(rhs_ptr.0 as *mut _) }; 2007 | 2008 | let _dropping_reclaimed_rhs: Box> = 2009 | unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) }; 2010 | } 2011 | 2012 | match cas_result { 2013 | Ok(_) => { 2014 | break; 2015 | } 2016 | Err(Some(actual)) => left_sibling = actual, 2017 | Err(None) => { 2018 | return; 2019 | } 2020 | } 2021 | } 2022 | 2023 | // 5. cas the parent to remove the merged child 2024 | while parent.merging_child == Some(child.id) { 2025 | let mut parent_clone: Box> = Box::new((*parent).clone()); 2026 | 2027 | assert!(parent_clone.merging_child.is_some()); 2028 | assert!(parent_clone.index().contains_key(&child.lo)); 2029 | 2030 | parent_clone.merging_child = None; 2031 | parent_clone.index_mut().remove(&child.lo).unwrap(); 2032 | 2033 | let cas_result = parent.cas(parent_clone, guard); 2034 | match cas_result { 2035 | Ok(new_parent) | Err(Some(new_parent)) => *parent = new_parent, 2036 | Err(None) => { 2037 | return; 2038 | } 2039 | } 2040 | } 2041 | 2042 | // 6. remove the child's pointer in the page table 2043 | if child 2044 | .id 2045 | .compare_exchange( 2046 | child.ptr.as_ptr(), 2047 | std::ptr::null_mut(), 2048 | Ordering::AcqRel, 2049 | Ordering::Acquire, 2050 | ) 2051 | .is_err() 2052 | { 2053 | // only the thread that uninstalls this pointer gets to 2054 | // mark resources for reuse. 2055 | return; 2056 | } 2057 | 2058 | // 7. defer the reclamation of the BoxedAtomicPtr 2059 | guard.defer_drop(Deferred::BoxedAtomicPtr(child.id)); 2060 | 2061 | // 8. defer the reclamation of the child node 2062 | let replaced: Box> = unsafe { Box::from_raw(child.ptr.as_ptr()) }; 2063 | guard.defer_drop(Deferred::Node(replaced)); 2064 | } 2065 | 2066 | #[cfg(feature = "timing")] 2067 | fn print_timing(&self) { 2068 | println!( 2069 | "min : {:?}", 2070 | Duration::from_nanos(self.fastest_op.load(Ordering::Acquire)) 2071 | ); 2072 | println!( 2073 | "max : {:?}", 2074 | Duration::from_nanos(self.slowest_op.load(Ordering::Acquire)) 2075 | ); 2076 | } 2077 | 2078 | #[cfg(feature = "timing")] 2079 | fn record_timing(&self, time: Duration) { 2080 | let nanos = time.as_nanos() as u64; 2081 | let min = self.fastest_op.load(Ordering::Relaxed); 2082 | if nanos < min { 2083 | self.fastest_op.fetch_min(nanos, Ordering::Relaxed); 2084 | } 2085 | 2086 | let max = self.slowest_op.load(Ordering::Relaxed); 2087 | if nanos > max { 2088 | self.slowest_op.fetch_max(nanos, Ordering::Relaxed); 2089 | } 2090 | } 2091 | 2092 | fn leaf_for_key<'a, Q>( 2093 | &'a self, 2094 | search: LeafSearch<&Q>, 2095 | guard: &mut Guard<'a, Deferred, LOCAL_GC_BUFFER_SIZE>, 2096 | ) -> NodeView 2097 | where 2098 | K: Borrow, 2099 | Q: Ord + ?Sized, 2100 | { 2101 | let mut parent_cursor_opt: Option> = None; 2102 | let mut cursor = self.root(guard); 2103 | let mut root_cursor = NodeView { 2104 | ptr: cursor.ptr, 2105 | id: cursor.id, 2106 | }; 2107 | 2108 | macro_rules! reset { 2109 | ($reason:expr) => { 2110 | // println!("resetting because of {:?}", $reason); 2111 | parent_cursor_opt = None; 2112 | cursor = self.root(guard); 2113 | root_cursor = NodeView { 2114 | ptr: cursor.ptr, 2115 | id: cursor.id, 2116 | }; 2117 | continue; 2118 | }; 2119 | } 2120 | 2121 | #[cfg(feature = "timing")] 2122 | let before = Instant::now(); 2123 | 2124 | loop { 2125 | if let Some(merging_child_ptr) = cursor.merging_child { 2126 | let mut child = if let Some(view) = merging_child_ptr.node_view(guard) { 2127 | view 2128 | } else { 2129 | reset!("merging child of marked parent already freed"); 2130 | }; 2131 | self.merge_child(&mut cursor, &mut child, guard); 2132 | reset!("cooperatively performed merge_child after detecting parent"); 2133 | } 2134 | 2135 | if cursor.is_merging { 2136 | reset!("resetting after detected child merging without corresponding parent child_merge"); 2137 | } 2138 | 2139 | if cursor.should_merge() { 2140 | if let Some(ref mut parent_cursor) = parent_cursor_opt { 2141 | let is_leftmost_child = 2142 | parent_cursor.index().get_index(0).unwrap().0 == cursor.lo; 2143 | 2144 | if !is_leftmost_child { 2145 | if let Ok(new_parent) = 2146 | self.install_parent_merge(parent_cursor, &cursor, guard) 2147 | { 2148 | *parent_cursor = new_parent; 2149 | } else { 2150 | reset!("failed to install parent merge"); 2151 | } 2152 | 2153 | self.merge_child(parent_cursor, &mut cursor, guard); 2154 | reset!("completed merge_child"); 2155 | } 2156 | } else { 2157 | assert!(!cursor.is_leaf()); 2158 | } 2159 | } 2160 | 2161 | match search { 2162 | LeafSearch::Eq(k) | LeafSearch::Lt(k) => assert!(k >= cursor.lo.borrow()), 2163 | LeafSearch::Max => {} 2164 | } 2165 | 2166 | if let Some(hi) = &cursor.hi { 2167 | let go_right = match search { 2168 | LeafSearch::Eq(k) => k >= hi.borrow(), 2169 | // Lt looks for a node with lo < K, hi >= K 2170 | LeafSearch::Lt(k) => k > hi.borrow(), 2171 | LeafSearch::Max => true, 2172 | }; 2173 | if go_right { 2174 | // go right to the tree sibling 2175 | let next = cursor.next.unwrap(); 2176 | let rhs = if let Some(view) = next.node_view(guard) { 2177 | view 2178 | } else { 2179 | reset!("right child already freed"); 2180 | }; 2181 | 2182 | if let Some(ref mut parent_cursor) = parent_cursor_opt { 2183 | if parent_cursor.is_viable_parent_for(&rhs) { 2184 | let mut parent_clone: Box> = 2185 | Box::new((*parent_cursor).clone()); 2186 | assert!(!parent_clone.is_leaf()); 2187 | parent_clone.index_mut().insert(rhs.lo.clone(), next); 2188 | 2189 | let rhs_ptr_opt = if parent_clone.should_split() { 2190 | Some(parent_clone.split()) 2191 | } else { 2192 | None 2193 | }; 2194 | 2195 | if let Ok(new_parent_view) = parent_cursor.cas(parent_clone, guard) { 2196 | parent_cursor_opt = Some(new_parent_view); 2197 | } else if let Some(rhs_ptr) = rhs_ptr_opt { 2198 | let reclaimed_ptr: Box>> = 2199 | unsafe { Box::from_raw(rhs_ptr.0 as *mut _) }; 2200 | 2201 | let _dropping_reclaimed_rhs: Box> = 2202 | unsafe { Box::from_raw(reclaimed_ptr.load(Ordering::Acquire)) }; 2203 | } 2204 | } 2205 | } else { 2206 | // root hoist 2207 | let current_root_ptr: AtomicPtr<_> = root_cursor.ptr.as_ptr().into(); 2208 | let new_index_ptr = 2209 | BoxedAtomicPtr(Box::into_raw(Box::new(current_root_ptr))); 2210 | 2211 | let mut new_root_node = Node::::new_root(); 2212 | new_root_node 2213 | .index_mut() 2214 | .insert(cursor.lo.clone(), new_index_ptr); 2215 | new_root_node.index_mut().insert(rhs.lo.clone(), next); 2216 | let new_root_ptr = Box::into_raw(new_root_node); 2217 | 2218 | let worked = !debug_delay() 2219 | && self 2220 | .root 2221 | .compare_exchange( 2222 | root_cursor.ptr.as_ptr(), 2223 | new_root_ptr, 2224 | Ordering::AcqRel, 2225 | Ordering::Acquire, 2226 | ) 2227 | .is_ok(); 2228 | 2229 | if worked { 2230 | let parent_view = NodeView { 2231 | id: self.root, 2232 | ptr: NonNull::new(new_root_ptr).unwrap(), 2233 | }; 2234 | parent_cursor_opt = Some(parent_view); 2235 | } else { 2236 | let dangling_root = unsafe { Box::from_raw(new_root_ptr) }; 2237 | drop(dangling_root); 2238 | 2239 | let reclaimed_ptr: Box>> = 2240 | unsafe { Box::from_raw(new_index_ptr.0 as *mut _) }; 2241 | drop(reclaimed_ptr); 2242 | } 2243 | } 2244 | 2245 | cursor = rhs; 2246 | continue; 2247 | } 2248 | } 2249 | 2250 | if cursor.is_leaf() { 2251 | assert!(!cursor.is_merging); 2252 | assert!(cursor.merging_child.is_none()); 2253 | if let Some(ref hi) = cursor.hi { 2254 | match search { 2255 | LeafSearch::Eq(k) => assert!(k < hi.borrow()), 2256 | LeafSearch::Lt(k) => assert!(k <= hi.borrow()), 2257 | LeafSearch::Max => { 2258 | unreachable!("leaf should have no hi key if we're searching for Max") 2259 | } 2260 | } 2261 | } 2262 | break; 2263 | } 2264 | 2265 | // go down the tree 2266 | let index = cursor.index(); 2267 | let child_ptr = match search { 2268 | LeafSearch::Eq(k) => index.get_less_than_or_equal(k).unwrap().1, 2269 | LeafSearch::Lt(k) => { 2270 | // Lt looks for a node with lo < K and hi >= K 2271 | // so we find the first child with a lo key > K and 2272 | // return its left sibling 2273 | index.get_less_than(k).unwrap().1 2274 | } 2275 | LeafSearch::Max => { 2276 | index 2277 | .get_index(index.len().checked_sub(1).unwrap()) 2278 | .unwrap() 2279 | .1 2280 | } 2281 | }; 2282 | 2283 | parent_cursor_opt = Some(cursor); 2284 | cursor = if let Some(view) = child_ptr.node_view(guard) { 2285 | view 2286 | } else { 2287 | reset!("attempt to traverse to child failed because the child has been freed"); 2288 | }; 2289 | } 2290 | 2291 | #[cfg(feature = "timing")] 2292 | self.record_timing(before.elapsed()); 2293 | 2294 | cursor 2295 | } 2296 | } 2297 | 2298 | #[derive(Debug, Clone)] 2299 | #[repr(u8)] 2300 | enum Data 2301 | where 2302 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2303 | V: 'static + Clone + Send + Sync, 2304 | { 2305 | Leaf(StackMap), 2306 | Index(StackMap, FANOUT>), 2307 | } 2308 | 2309 | impl Data 2310 | where 2311 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2312 | V: 'static + Clone + Send + Sync, 2313 | { 2314 | const fn len(&self) -> usize { 2315 | match self { 2316 | Data::Leaf(ref leaf) => leaf.len(), 2317 | Data::Index(ref index) => index.len(), 2318 | } 2319 | } 2320 | } 2321 | 2322 | #[derive(Debug)] 2323 | struct Node 2324 | where 2325 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2326 | V: 'static + Clone + Send + Sync, 2327 | { 2328 | next: Option>, 2329 | merging_child: Option>, 2330 | data: Data, 2331 | lo: K, 2332 | hi: Option, 2333 | is_merging: bool, 2334 | } 2335 | 2336 | impl Clone for Node 2337 | where 2338 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2339 | V: 'static + Clone + Send + Sync, 2340 | { 2341 | fn clone(&self) -> Node { 2342 | Node { 2343 | lo: self.lo.clone(), 2344 | hi: self.hi.clone(), 2345 | next: self.next, 2346 | data: self.data.clone(), 2347 | merging_child: self.merging_child, 2348 | is_merging: self.is_merging, 2349 | } 2350 | } 2351 | } 2352 | 2353 | impl Node 2354 | where 2355 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2356 | V: 'static + Clone + Send + Sync, 2357 | { 2358 | const fn index(&self) -> &StackMap, FANOUT> { 2359 | if let Data::Index(ref index) = self.data { 2360 | index 2361 | } else { 2362 | unreachable!() 2363 | } 2364 | } 2365 | 2366 | fn index_mut(&mut self) -> &mut StackMap, FANOUT> { 2367 | if let Data::Index(ref mut index) = self.data { 2368 | index 2369 | } else { 2370 | unreachable!() 2371 | } 2372 | } 2373 | 2374 | const fn leaf(&self) -> &StackMap { 2375 | if let Data::Leaf(ref leaf) = self.data { 2376 | leaf 2377 | } else { 2378 | unreachable!() 2379 | } 2380 | } 2381 | 2382 | fn leaf_mut(&mut self) -> &mut StackMap { 2383 | if let Data::Leaf(ref mut leaf) = self.data { 2384 | leaf 2385 | } else { 2386 | unreachable!() 2387 | } 2388 | } 2389 | 2390 | const fn is_leaf(&self) -> bool { 2391 | matches!(self.data, Data::Leaf(..)) 2392 | } 2393 | 2394 | fn new_root() -> Box> { 2395 | let min_key = K::MIN; 2396 | Box::new(Node { 2397 | lo: min_key, 2398 | hi: None, 2399 | next: None, 2400 | data: Data::Index(StackMap::new()), 2401 | merging_child: None, 2402 | is_merging: false, 2403 | }) 2404 | } 2405 | 2406 | fn new_leaf(lo: K) -> Box> { 2407 | Box::new(Node { 2408 | lo, 2409 | hi: None, 2410 | next: None, 2411 | data: Data::Leaf(StackMap::new()), 2412 | merging_child: None, 2413 | is_merging: false, 2414 | }) 2415 | } 2416 | 2417 | fn get(&self, key: &Q) -> Option 2418 | where 2419 | K: Borrow, 2420 | Q: Ord + ?Sized, 2421 | { 2422 | assert!(!self.is_merging); 2423 | assert!(self.merging_child.is_none()); 2424 | assert!(self.is_leaf()); 2425 | 2426 | self.leaf().get(key).cloned() 2427 | } 2428 | 2429 | fn remove(&mut self, key: &Q) -> Option 2430 | where 2431 | K: Borrow, 2432 | Q: Ord + ?Sized, 2433 | { 2434 | assert!(!self.is_merging); 2435 | assert!(self.merging_child.is_none()); 2436 | 2437 | self.leaf_mut().remove(key) 2438 | } 2439 | 2440 | fn insert(&mut self, key: K, value: V) -> Option { 2441 | assert!(!self.is_merging); 2442 | assert!(self.merging_child.is_none()); 2443 | assert!(!self.should_split()); 2444 | 2445 | self.leaf_mut().insert(key, value) 2446 | } 2447 | 2448 | fn cas( 2449 | &mut self, 2450 | key: K, 2451 | old: Option<&V2>, 2452 | new: Option, 2453 | ) -> Result, CasFailure> 2454 | where 2455 | V: Borrow, 2456 | V2: ?Sized + PartialEq, 2457 | { 2458 | assert!(!self.is_merging); 2459 | assert!(self.merging_child.is_none()); 2460 | 2461 | // anything that should be split should have been split 2462 | // prior to becoming globally visible via this codepath. 2463 | assert!(!self.should_split()); 2464 | 2465 | match (old, self.leaf().get(&key)) { 2466 | (expected, actual) if expected == actual.map(Borrow::borrow) => { 2467 | if let Some(to_insert) = new { 2468 | Ok(self.leaf_mut().insert(key, to_insert)) 2469 | } else { 2470 | Ok(self.leaf_mut().remove(&key)) 2471 | } 2472 | } 2473 | (_, actual) => Err(CasFailure { 2474 | actual: actual.cloned(), 2475 | returned_new_value: new, 2476 | }), 2477 | } 2478 | } 2479 | 2480 | const fn should_merge(&self) -> bool { 2481 | if self.merging_child.is_some() || self.is_merging { 2482 | return false; 2483 | } 2484 | self.len() <= MERGE_SIZE 2485 | } 2486 | 2487 | const fn should_split(&self) -> bool { 2488 | if self.merging_child.is_some() || self.is_merging { 2489 | return false; 2490 | } 2491 | self.len() > FANOUT - MERGE_SIZE 2492 | } 2493 | 2494 | const fn len(&self) -> usize { 2495 | self.data.len() 2496 | } 2497 | 2498 | fn split(&mut self) -> BoxedAtomicPtr { 2499 | assert!(!self.is_merging); 2500 | assert!(self.merging_child.is_none()); 2501 | 2502 | let split_idx = if self.hi.is_none() { 2503 | // the infinity node should split almost at the end to improve fill ratio 2504 | self.len() - 2 2505 | } else if self.lo == K::MIN { 2506 | // the left-most node should split almost at the beginning to improve fill ratio 2507 | 2 2508 | } else { 2509 | FANOUT / 2 2510 | }; 2511 | 2512 | let (split_point, rhs_data) = match self.data { 2513 | Data::Leaf(ref mut leaf) => { 2514 | let rhs_leaf = leaf.split_off(split_idx); 2515 | let split_point = rhs_leaf.first().unwrap().0.clone(); 2516 | assert!(leaf.len() > MERGE_SIZE); 2517 | (split_point, Data::Leaf(rhs_leaf)) 2518 | } 2519 | Data::Index(ref mut index) => { 2520 | let rhs_index = index.split_off(split_idx); 2521 | let split_point = rhs_index.first().unwrap().0.clone(); 2522 | assert!(index.len() > MERGE_SIZE); 2523 | (split_point, Data::Index(rhs_index)) 2524 | } 2525 | }; 2526 | 2527 | assert!(rhs_data.len() < FANOUT - MERGE_SIZE); 2528 | assert!(rhs_data.len() > MERGE_SIZE); 2529 | 2530 | let rhs_hi = std::mem::replace(&mut self.hi, Some(split_point.clone())); 2531 | 2532 | let rhs = BoxedAtomicPtr::new(Box::new(Node { 2533 | lo: split_point, 2534 | hi: rhs_hi, 2535 | next: self.next, 2536 | data: rhs_data, 2537 | merging_child: None, 2538 | is_merging: false, 2539 | })); 2540 | 2541 | self.next = Some(rhs); 2542 | 2543 | assert!(!self.should_split()); 2544 | 2545 | rhs 2546 | } 2547 | 2548 | fn merge(&mut self, rhs: &NodeView) { 2549 | assert!(rhs.is_merging); 2550 | assert!(!self.is_merging); 2551 | 2552 | self.hi = rhs.hi.clone(); 2553 | self.next = rhs.next; 2554 | 2555 | match self.data { 2556 | Data::Leaf(ref mut leaf) => { 2557 | for (k, v) in rhs.leaf().iter() { 2558 | let prev = leaf.insert(k.clone(), v.clone()); 2559 | assert!(prev.is_none()); 2560 | } 2561 | } 2562 | Data::Index(ref mut index) => { 2563 | for (k, v) in rhs.index().iter() { 2564 | let prev = index.insert(k.clone(), *v); 2565 | assert!(prev.is_none()); 2566 | } 2567 | } 2568 | } 2569 | } 2570 | 2571 | fn is_viable_parent_for(&self, possible_child: &NodeView) -> bool { 2572 | match (&self.hi, &possible_child.hi) { 2573 | (Some(_), None) => return false, 2574 | (Some(parent_hi), Some(child_hi)) if parent_hi < child_hi => return false, 2575 | _ => {} 2576 | } 2577 | self.lo <= possible_child.lo 2578 | } 2579 | } 2580 | 2581 | impl FromIterator<(K, V)> 2582 | for ConcurrentMap 2583 | where 2584 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2585 | V: 'static + Clone + Send + Sync, 2586 | { 2587 | fn from_iter>(iter: I) -> Self { 2588 | let map = ConcurrentMap::default(); 2589 | 2590 | for (k, v) in iter { 2591 | map.insert(k, v); 2592 | } 2593 | 2594 | map 2595 | } 2596 | } 2597 | 2598 | impl<'a, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> IntoIterator 2599 | for &'a ConcurrentMap 2600 | where 2601 | K: 'static + Clone + Minimum + Ord + Send + Sync, 2602 | V: 'static + Clone + Send + Sync, 2603 | { 2604 | type Item = (K, V); 2605 | type IntoIter = Iter<'a, K, V, FANOUT, LOCAL_GC_BUFFER_SIZE, std::ops::RangeFull>; 2606 | 2607 | fn into_iter(self) -> Self::IntoIter { 2608 | self.iter() 2609 | } 2610 | } 2611 | 2612 | // This ensures that ConcurrentMap is Send and Clone. 2613 | const fn _test_impls() { 2614 | const fn send() {} 2615 | const fn clone() {} 2616 | send::>(); 2617 | clone::>(); 2618 | } 2619 | 2620 | #[test] 2621 | fn basic_map() { 2622 | let map = ConcurrentMap::::default(); 2623 | 2624 | let n = 64; // SPLIT_SIZE 2625 | for i in 0..=n { 2626 | assert_eq!(map.get(&i), None); 2627 | map.insert(i, i); 2628 | assert_eq!(map.get(&i), Some(i), "failed to get key {i}"); 2629 | } 2630 | 2631 | for (i, (k, _v)) in map.range(..).enumerate() { 2632 | assert_eq!(i, k); 2633 | } 2634 | 2635 | for (i, (k, _v)) in map.range(..).rev().enumerate() { 2636 | assert_eq!(n - i, k); 2637 | } 2638 | 2639 | for (i, (k, _v)) in map.iter().enumerate() { 2640 | assert_eq!(i, k); 2641 | } 2642 | 2643 | for (i, (k, _v)) in map.iter().rev().enumerate() { 2644 | assert_eq!(n - i, k); 2645 | } 2646 | 2647 | for (i, (k, _v)) in map.range(0..).enumerate() { 2648 | assert_eq!(i, k); 2649 | } 2650 | 2651 | for (i, (k, _v)) in map.range(0..).rev().enumerate() { 2652 | assert_eq!(n - i, k); 2653 | } 2654 | 2655 | for (i, (k, _v)) in map.range(0..n).enumerate() { 2656 | assert_eq!(i, k); 2657 | } 2658 | 2659 | for (i, (k, _v)) in map.range(0..n).rev().enumerate() { 2660 | assert_eq!((n - 1) - i, k); 2661 | } 2662 | 2663 | for (i, (k, _v)) in map.range(0..=n).enumerate() { 2664 | assert_eq!(i, k); 2665 | } 2666 | 2667 | for (i, (k, _v)) in map.range(0..=n).rev().enumerate() { 2668 | assert_eq!(n - i, k); 2669 | } 2670 | 2671 | for i in 0..=n { 2672 | assert_eq!(map.get(&i), Some(i), "failed to get key {i}"); 2673 | } 2674 | } 2675 | 2676 | #[test] 2677 | fn timing_map() { 2678 | use std::time::Instant; 2679 | 2680 | let map = ConcurrentMap::::default(); 2681 | 2682 | let n = 1024 * 1024; 2683 | 2684 | let insert = Instant::now(); 2685 | for i in 0..n { 2686 | map.insert(i, i); 2687 | } 2688 | let insert_elapsed = insert.elapsed(); 2689 | println!( 2690 | "{} inserts/s, total {:?}", 2691 | (n * 1_000_000) / u64::try_from(insert_elapsed.as_micros().max(1)).unwrap_or(u64::MAX), 2692 | insert_elapsed 2693 | ); 2694 | 2695 | let scan = Instant::now(); 2696 | let count = map.range(..).count(); 2697 | assert_eq!(count as u64, n); 2698 | let scan_elapsed = scan.elapsed(); 2699 | println!( 2700 | "{} scanned items/s, total {:?}", 2701 | (n * 1_000_000) / u64::try_from(scan_elapsed.as_micros().max(1)).unwrap_or(u64::MAX), 2702 | scan_elapsed 2703 | ); 2704 | 2705 | let scan_rev = Instant::now(); 2706 | let count = map.range(..).rev().count(); 2707 | assert_eq!(count as u64, n); 2708 | let scan_rev_elapsed = scan_rev.elapsed(); 2709 | println!( 2710 | "{} reverse-scanned items/s, total {:?}", 2711 | (n * 1_000_000) / u64::try_from(scan_rev_elapsed.as_micros().max(1)).unwrap_or(u64::MAX), 2712 | scan_rev_elapsed 2713 | ); 2714 | 2715 | let gets = Instant::now(); 2716 | for i in 0..n { 2717 | map.get(&i); 2718 | } 2719 | let gets_elapsed = gets.elapsed(); 2720 | println!( 2721 | "{} gets/s, total {:?}", 2722 | (n * 1_000_000) / u64::try_from(gets_elapsed.as_micros().max(1)).unwrap_or(u64::MAX), 2723 | gets_elapsed 2724 | ); 2725 | } 2726 | -------------------------------------------------------------------------------- /src/serde.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use serde::de::{Deserializer, MapAccess, Visitor}; 4 | use serde::{Deserialize, Serialize, Serializer}; 5 | 6 | use crate::{ConcurrentMap, Minimum}; 7 | 8 | impl Serialize 9 | for ConcurrentMap 10 | where 11 | K: 'static + Serialize + Clone + Minimum + Ord + Send + Sync, 12 | V: 'static + Serialize + Clone + Send + Sync, 13 | { 14 | fn serialize(&self, s: S) -> Result 15 | where 16 | S: Serializer, 17 | { 18 | use serde::ser::SerializeMap; 19 | let mut map = s.serialize_map(Some(self.len()))?; 20 | //let mut map = s.serialize_map(None)?; 21 | for (k, v) in self.iter() { 22 | map.serialize_entry(&k, &v)?; 23 | } 24 | map.end() 25 | } 26 | } 27 | 28 | struct ConcurrentMapVisitor { 29 | pd: PhantomData<(K, V)>, 30 | } 31 | 32 | impl<'de, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> Visitor<'de> 33 | for ConcurrentMapVisitor 34 | where 35 | K: 'static + Deserialize<'de> + Clone + Minimum + Ord + Send + Sync, 36 | V: 'static + Deserialize<'de> + Clone + Send + Sync, 37 | { 38 | type Value = ConcurrentMap; 39 | 40 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 41 | formatter.write_str("a StackMap") 42 | } 43 | 44 | fn visit_map(self, mut access: M) -> Result 45 | where 46 | M: MapAccess<'de>, 47 | { 48 | let map = ConcurrentMap::default(); 49 | 50 | while let Some((key, value)) = access.next_entry()? { 51 | map.insert(key, value); 52 | } 53 | 54 | Ok(map) 55 | } 56 | } 57 | 58 | impl<'de, K, V, const FANOUT: usize, const LOCAL_GC_BUFFER_SIZE: usize> Deserialize<'de> 59 | for ConcurrentMap 60 | where 61 | K: 'static + Deserialize<'de> + Clone + Minimum + Ord + Send + Sync, 62 | V: 'static + Deserialize<'de> + Clone + Send + Sync, 63 | { 64 | fn deserialize(d: D) -> Result, D::Error> 65 | where 66 | D: Deserializer<'de>, 67 | { 68 | d.deserialize_map(ConcurrentMapVisitor { pd: PhantomData }) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /tests/concurrent.rs: -------------------------------------------------------------------------------- 1 | use concurrent_map::ConcurrentMap; 2 | 3 | #[test] 4 | fn concurrent_tree() { 5 | let n: u16 = 1024; 6 | let concurrency = std::thread::available_parallelism() 7 | .map(std::num::NonZeroUsize::get) 8 | .unwrap_or(8) 9 | * 2; 10 | 11 | let run = |tree: ConcurrentMap, barrier: &std::sync::Barrier, low_bits| { 12 | let shift = concurrency.next_power_of_two().trailing_zeros(); 13 | let unique_key = |key| (key << shift) | low_bits; 14 | 15 | barrier.wait(); 16 | for key in 0..n { 17 | let i = unique_key(key); 18 | assert_eq!(tree.get(&i), None); 19 | tree.insert(i, i); 20 | assert_eq!(tree.get(&i), Some(i), "failed to get key {i}"); 21 | } 22 | for key in 0_u16..n { 23 | let i = unique_key(key); 24 | assert_eq!(tree.get(&i), Some(i), "failed to get key {i}"); 25 | } 26 | for key in 0_u16..n { 27 | let i = unique_key(key); 28 | assert_eq!( 29 | tree.cas(i, Some(&i), Some(unique_key(key * 2))), 30 | Ok(Some(i)), 31 | "failed to get key {i}" 32 | ); 33 | } 34 | let visible: std::collections::HashMap = tree.iter().collect(); 35 | let visible_rev: std::collections::HashMap = tree.iter().rev().collect(); 36 | 37 | for key in 0_u16..n { 38 | let i = unique_key(key); 39 | let v = unique_key(key * 2); 40 | assert_eq!(visible.get(&i).copied(), Some(v), "failed to get key {i}"); 41 | assert_eq!( 42 | visible_rev.get(&i).copied(), 43 | Some(v), 44 | "failed to get key {i}" 45 | ); 46 | } 47 | 48 | for key in 0..n { 49 | let i = unique_key(key); 50 | let v = unique_key(key * 2); 51 | assert_eq!(tree.remove(&i), Some(v)); 52 | } 53 | for key in 0..n { 54 | let i = unique_key(key); 55 | assert_eq!(tree.get(&i), None, "failed to get key {i}"); 56 | } 57 | }; 58 | 59 | let tree = ConcurrentMap::default(); 60 | 61 | std::thread::scope(|s| { 62 | for _ in 0..64 { 63 | let barrier = std::sync::Arc::new(std::sync::Barrier::new(concurrency)); 64 | let mut threads = vec![]; 65 | for i in 0..concurrency { 66 | let tree_2 = tree.clone(); 67 | let barrier_2 = barrier.clone(); 68 | 69 | let thread = s.spawn(move || run(tree_2, &barrier_2, u16::try_from(i).unwrap())); 70 | threads.push(thread); 71 | } 72 | for thread in threads { 73 | thread.join().unwrap(); 74 | } 75 | } 76 | }); 77 | } 78 | 79 | #[test] 80 | fn big_scan() { 81 | let n: u32 = 16 * 1024 * 1024; 82 | let concurrency = 8; 83 | let stride = n / concurrency; 84 | 85 | let fill = 86 | |tree: ConcurrentMap, barrier: &std::sync::Barrier, start_fill, stop_fill| { 87 | barrier.wait(); 88 | let insert = std::time::Instant::now(); 89 | for i in start_fill..stop_fill { 90 | tree.insert(i, i); 91 | } 92 | let insert_elapsed = insert.elapsed(); 93 | println!( 94 | "{} inserts/s, total {} in {:?}", 95 | (u64::from(stride) * 1000) 96 | / u64::try_from(insert_elapsed.as_millis()).unwrap_or(u64::MAX), 97 | stop_fill - start_fill, 98 | insert_elapsed 99 | ); 100 | }; 101 | 102 | let read = |tree: ConcurrentMap, barrier: &std::sync::Barrier| { 103 | barrier.wait(); 104 | let scan = std::time::Instant::now(); 105 | let count = tree.range(..).take(stride as _).count(); 106 | assert_eq!(count, stride as _); 107 | let scan_elapsed = scan.elapsed(); 108 | let scan_micros = scan_elapsed.as_micros().max(1) as f64; 109 | println!( 110 | "{} scanned items/s, total {:?}", 111 | ((f64::from(stride) * 1_000_000.0) / scan_micros) as u64, 112 | scan_elapsed 113 | ); 114 | }; 115 | 116 | let tree = ConcurrentMap::default(); 117 | let barrier = std::sync::Barrier::new(concurrency as _); 118 | 119 | std::thread::scope(|s| { 120 | let mut threads = vec![]; 121 | for i in 0..concurrency { 122 | let tree_2 = tree.clone(); 123 | let barrier_2 = &barrier; 124 | 125 | let start_fill = i * stride; 126 | let stop_fill = (i + 1) * stride; 127 | 128 | let thread = s.spawn(move || fill(tree_2, barrier_2, start_fill, stop_fill)); 129 | threads.push(thread); 130 | } 131 | for thread in threads { 132 | thread.join().unwrap(); 133 | } 134 | }); 135 | 136 | std::thread::scope(|s| { 137 | let mut threads = vec![]; 138 | for _ in 0..concurrency { 139 | let tree_2 = tree.clone(); 140 | let barrier_2 = &barrier; 141 | 142 | let thread = s.spawn(move || read(tree_2, barrier_2)); 143 | threads.push(thread); 144 | } 145 | for thread in threads { 146 | thread.join().unwrap(); 147 | } 148 | }); 149 | } 150 | 151 | #[test] 152 | fn bulk_load() { 153 | let n: u64 = 16 * 1024 * 1024; 154 | 155 | let concurrency = std::thread::available_parallelism() 156 | .map(std::num::NonZeroUsize::get) 157 | .unwrap_or(8) as u64; 158 | 159 | let run = |tree: ConcurrentMap, barrier: &std::sync::Barrier, low_bits| { 160 | let shift = concurrency.next_power_of_two().trailing_zeros(); 161 | let unique_key = |key| (key << shift) | low_bits; 162 | 163 | barrier.wait(); 164 | for key in 0..n / concurrency { 165 | let i = unique_key(key); 166 | tree.insert(i, i); 167 | } 168 | }; 169 | 170 | let tree = ConcurrentMap::default(); 171 | 172 | std::thread::scope(|s| { 173 | let barrier = std::sync::Arc::new(std::sync::Barrier::new(1 + concurrency as usize)); 174 | let mut threads = vec![]; 175 | for i in 0..concurrency { 176 | let tree_2 = tree.clone(); 177 | let barrier_2 = barrier.clone(); 178 | 179 | let thread = s.spawn(move || run(tree_2, &barrier_2, i)); 180 | threads.push(thread); 181 | } 182 | barrier.wait(); 183 | let insert = std::time::Instant::now(); 184 | for thread in threads { 185 | thread.join().unwrap(); 186 | } 187 | let insert_elapsed = insert.elapsed(); 188 | println!( 189 | "{} bulk inserts/s, total {:?}", 190 | (n * 1000) / u64::try_from(insert_elapsed.as_millis()).unwrap_or(u64::MAX), 191 | insert_elapsed 192 | ); 193 | }); 194 | } 195 | -------------------------------------------------------------------------------- /tests/fuzzer_regression_tests.rs: -------------------------------------------------------------------------------- 1 | /// Tests are placed here when the fuzzer finds bugs, so we can re-play them deterministically 2 | /// even if tested on a machine without a fuzzer corpus loaded etc... 3 | use concurrent_map::ConcurrentMap; 4 | use std::collections::BTreeMap; 5 | 6 | fn map_model( 7 | items: &[u64], 8 | ) -> (ConcurrentMap, BTreeMap) { 9 | let map = concurrent_map::ConcurrentMap::<_, _, FANOUT>::default(); 10 | let mut model = std::collections::BTreeMap::new(); 11 | 12 | for item in items { 13 | map.insert(*item, *item); 14 | model.insert(*item, *item); 15 | } 16 | 17 | (map, model) 18 | } 19 | 20 | fn prop_iter_matches( 21 | map: &ConcurrentMap, 22 | model: &BTreeMap, 23 | ) { 24 | let expected = model.iter().map(|(k, v)| (*k, *v)).collect::>(); 25 | let actual = map.iter().collect::>(); 26 | assert_eq!(expected, actual); 27 | } 28 | 29 | fn prop_rev_iter_matches( 30 | map: &ConcurrentMap, 31 | model: &BTreeMap, 32 | ) { 33 | let expected = model 34 | .iter() 35 | .rev() 36 | .map(|(k, v)| (*k, *v)) 37 | .collect::>(); 38 | let actual = map.iter().rev().collect::>(); 39 | assert_eq!(expected, actual); 40 | } 41 | 42 | fn prop_range_matches( 43 | map: &ConcurrentMap, 44 | model: &BTreeMap, 45 | bounds: std::ops::Range, 46 | ) { 47 | let expected = model 48 | .range(bounds.clone()) 49 | .map(|(k, _v)| *k) 50 | .collect::>(); 51 | let actual = map 52 | .range(bounds.clone()) 53 | .map(|(k, _v)| k) 54 | .collect::>(); 55 | assert_eq!(expected, actual); 56 | } 57 | 58 | fn prop_rev_range_matches( 59 | map: &ConcurrentMap, 60 | model: &BTreeMap, 61 | bounds: std::ops::Range, 62 | ) { 63 | let expected_rev = model 64 | .range(bounds.clone()) 65 | .rev() 66 | .map(|(k, _v)| *k) 67 | .collect::>(); 68 | let actual_rev = map.range(bounds).rev().map(|(k, _v)| k).collect::>(); 69 | 70 | assert_eq!(expected_rev, actual_rev); 71 | } 72 | 73 | #[test] 74 | fn test_00() { 75 | let map = concurrent_map::ConcurrentMap::<_, _, 8>::default(); 76 | map.insert(0, 0); 77 | map.insert(103, 103); 78 | 79 | let mut range = map.range(0..1); 80 | 81 | let expected = (0, 0); 82 | 83 | assert_eq!(range.next().unwrap(), expected); 84 | assert_eq!(range.next_back().unwrap(), expected); 85 | } 86 | 87 | #[test] 88 | fn test_01() { 89 | let items = [95, 126, 2, 73, 0, 106, 54]; 90 | 91 | let (map, model) = map_model::<4>(&items); 92 | 93 | let bounds = 81..124; 94 | 95 | prop_iter_matches(&map, &model); 96 | prop_rev_iter_matches(&map, &model); 97 | prop_range_matches(&map, &model, bounds.clone()); 98 | prop_rev_range_matches(&map, &model, bounds.clone()); 99 | } 100 | 101 | #[test] 102 | fn test_02() { 103 | let items = [2365587456, 12989, 18446742974197923840, 1099511627775]; 104 | 105 | let (map, model) = map_model::<4>(&items); 106 | 107 | prop_iter_matches(&map, &model); 108 | prop_rev_iter_matches(&map, &model); 109 | } 110 | 111 | #[test] 112 | fn test_03() { 113 | let items = [ 114 | 838873789, 115 | 49478023249920, 116 | 5859553998519926784, 117 | 0, 118 | 11936128518274744320, 119 | 165, 120 | ]; 121 | 122 | let (map, model) = map_model::<4>(&items); 123 | 124 | prop_iter_matches(&map, &model); 125 | prop_rev_iter_matches(&map, &model); 126 | } 127 | -------------------------------------------------------------------------------- /tests/leak_check.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use concurrent_map::ConcurrentMap; 4 | 5 | mod alloc { 6 | use std::alloc::{Layout, System}; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | 9 | #[global_allocator] 10 | static ALLOCATOR: Alloc = Alloc; 11 | 12 | static ALLOCATED: AtomicUsize = AtomicUsize::new(0); 13 | static FREED: AtomicUsize = AtomicUsize::new(0); 14 | static RESIDENT: AtomicUsize = AtomicUsize::new(0); 15 | 16 | pub fn allocated() -> usize { 17 | ALLOCATED.swap(0, Ordering::Relaxed) / 1_000_000 18 | } 19 | 20 | pub fn freed() -> usize { 21 | FREED.swap(0, Ordering::Relaxed) / 1_000_000 22 | } 23 | 24 | pub fn resident() -> usize { 25 | RESIDENT.load(Ordering::Relaxed) / 1_000_000 26 | } 27 | 28 | #[derive(Default, Debug, Clone, Copy)] 29 | struct Alloc; 30 | 31 | unsafe impl std::alloc::GlobalAlloc for Alloc { 32 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 33 | let ret = System.alloc(layout); 34 | assert_ne!( 35 | ret, 36 | std::ptr::null_mut(), 37 | "alloc returned null pointer for layout {layout:?}" 38 | ); 39 | ALLOCATED.fetch_add(layout.size(), Ordering::Relaxed); 40 | RESIDENT.fetch_add(layout.size(), Ordering::Relaxed); 41 | std::ptr::write_bytes(ret, 0xa1, layout.size()); 42 | ret 43 | } 44 | 45 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 46 | std::ptr::write_bytes(ptr, 0xde, layout.size()); 47 | FREED.fetch_add(layout.size(), Ordering::Relaxed); 48 | RESIDENT.fetch_sub(layout.size(), Ordering::Relaxed); 49 | System.dealloc(ptr, layout) 50 | } 51 | } 52 | } 53 | 54 | #[test] 55 | fn leak_check() { 56 | let n: u32 = 16 * 1024; 57 | 58 | let concurrency = std::thread::available_parallelism() 59 | .map(std::num::NonZeroUsize::get) 60 | .unwrap_or(8) 61 | * 2; 62 | 63 | let run = |tree: ConcurrentMap, barrier: &std::sync::Barrier, low_bits| { 64 | let shift = concurrency.next_power_of_two().trailing_zeros(); 65 | let unique_key = |key| (key << shift) | low_bits; 66 | 67 | barrier.wait(); 68 | for key in 0..n { 69 | let i = unique_key(key); 70 | assert_eq!(tree.get(&i), None); 71 | tree.insert(i, i); 72 | assert_eq!(tree.get(&i), Some(i), "failed to get key {i}"); 73 | } 74 | for key in 0_u32..n { 75 | let i = unique_key(key); 76 | assert_eq!(tree.get(&i), Some(i), "failed to get key {i}"); 77 | } 78 | for key in 0_u32..n { 79 | let i = unique_key(key); 80 | assert_eq!( 81 | tree.cas(i, Some(&i), Some(unique_key(key * 2))), 82 | Ok(Some(i)), 83 | "failed to get key {i}" 84 | ); 85 | } 86 | let visible: std::collections::HashMap = tree.iter().collect(); 87 | 88 | for key in 0_u32..n { 89 | let i = unique_key(key); 90 | let v = unique_key(key * 2); 91 | assert_eq!(visible.get(&i).copied(), Some(v), "failed to get key {i}"); 92 | } 93 | 94 | for key in 0..n { 95 | let i = unique_key(key); 96 | let v = unique_key(key * 2); 97 | assert_eq!(tree.remove(&i), Some(v)); 98 | } 99 | for key in 0..n { 100 | let i = unique_key(key); 101 | assert_eq!(tree.get(&i), None, "failed to get key {i}"); 102 | } 103 | }; 104 | 105 | let before = Instant::now(); 106 | let resident_before = alloc::resident(); 107 | 108 | let tree = ConcurrentMap::default(); 109 | std::thread::scope(|s| { 110 | for _ in 0..64 { 111 | let barrier = std::sync::Arc::new(std::sync::Barrier::new(concurrency)); 112 | let mut threads = vec![]; 113 | for i in 0..concurrency { 114 | let tree_2 = tree.clone(); 115 | let barrier_2 = barrier.clone(); 116 | 117 | let thread = s.spawn(move || run(tree_2, &barrier_2, u32::try_from(i).unwrap())); 118 | threads.push(thread); 119 | } 120 | for thread in threads { 121 | thread.join().unwrap(); 122 | } 123 | } 124 | }); 125 | 126 | drop(tree); 127 | 128 | let resident_after = alloc::resident(); 129 | 130 | println!( 131 | "{:.2} million wps {} mb allocated {} mb freed {} mb resident to insert {} items", 132 | n as f64 / (before.elapsed().as_micros().max(1)) as f64, 133 | alloc::allocated(), 134 | alloc::freed(), 135 | resident_after, 136 | n, 137 | ); 138 | 139 | assert_eq!( 140 | resident_after - resident_before, 141 | 0, 142 | "leaked {}mb", 143 | resident_after 144 | ); 145 | } 146 | --------------------------------------------------------------------------------