├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .rustfmt.toml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── RELEASES.md ├── benches ├── bench.rs └── faststring.rs ├── src ├── arbitrary.rs ├── borsh.rs ├── lib.rs ├── macros.rs ├── map.rs ├── map │ ├── core.rs │ ├── core │ │ ├── entry.rs │ │ └── raw_entry_v1.rs │ ├── iter.rs │ ├── mutable.rs │ ├── serde_seq.rs │ ├── slice.rs │ └── tests.rs ├── rayon │ ├── map.rs │ ├── mod.rs │ └── set.rs ├── serde.rs ├── set.rs ├── set │ ├── iter.rs │ ├── mutable.rs │ ├── slice.rs │ └── tests.rs └── util.rs ├── test-nostd ├── Cargo.toml └── src │ └── lib.rs ├── test-serde ├── Cargo.toml └── src │ └── lib.rs └── tests ├── equivalent_trait.rs ├── macros_full_path.rs ├── quick.rs └── tests.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | merge_group: 7 | 8 | name: CI 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | CARGO_INCREMENTAL: 0 13 | 14 | jobs: 15 | tests: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | include: 20 | - rust: 1.68.0 # MSRV 21 | features: 22 | - rust: stable 23 | features: arbitrary 24 | - rust: stable 25 | features: quickcheck 26 | - rust: stable 27 | features: rayon 28 | - rust: stable 29 | features: serde 30 | - rust: stable 31 | features: borsh 32 | - rust: stable 33 | features: std 34 | - rust: beta 35 | features: 36 | - rust: nightly 37 | bench: test build benchmarks 38 | 39 | steps: 40 | - uses: actions/checkout@v4 41 | - name: Lock MSRV-compatible dependencies 42 | if: matrix.rust == '1.68.0' 43 | env: 44 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 45 | # Note that this uses the runner's pre-installed stable cargo 46 | run: cargo generate-lockfile 47 | - uses: dtolnay/rust-toolchain@master 48 | with: 49 | toolchain: ${{ matrix.rust }} 50 | - name: Tests 51 | run: | 52 | cargo build --verbose --features "${{ matrix.features }}" 53 | cargo doc --verbose --features "${{ matrix.features }}" 54 | cargo test --verbose --features "${{ matrix.features }}" 55 | cargo test --release --verbose --features "${{ matrix.features }}" 56 | - name: Tests (serde) 57 | if: matrix.features == 'serde' 58 | run: | 59 | cargo test --verbose -p test-serde 60 | - name: Test run benchmarks 61 | if: matrix.bench != '' 62 | run: cargo test -v --benches 63 | 64 | nostd_build: 65 | runs-on: ubuntu-latest 66 | strategy: 67 | matrix: 68 | include: 69 | - rust: 1.68.0 70 | target: thumbv6m-none-eabi 71 | - rust: stable 72 | target: thumbv6m-none-eabi 73 | 74 | steps: 75 | - uses: actions/checkout@v4 76 | - name: Lock MSRV-compatible dependencies 77 | if: matrix.rust == '1.68.0' 78 | env: 79 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 80 | # Note that this uses the runner's pre-installed stable cargo 81 | run: cargo generate-lockfile 82 | - uses: dtolnay/rust-toolchain@master 83 | with: 84 | toolchain: ${{ matrix.rust }} 85 | target: ${{ matrix.target }} 86 | - name: Tests 87 | run: | 88 | cargo build -vv --target=${{ matrix.target }} --no-default-features 89 | cargo build -v -p test-nostd --target=${{ matrix.target }} 90 | 91 | clippy: 92 | runs-on: ubuntu-latest 93 | steps: 94 | - uses: actions/checkout@v4 95 | - uses: dtolnay/rust-toolchain@beta 96 | with: 97 | components: clippy 98 | - run: cargo clippy --all-features 99 | 100 | miri: 101 | runs-on: ubuntu-latest 102 | steps: 103 | - uses: actions/checkout@v4 104 | - uses: dtolnay/rust-toolchain@nightly 105 | with: 106 | components: miri, rust-src 107 | - uses: taiki-e/install-action@v2 108 | with: 109 | tool: cargo-nextest 110 | - run: cargo miri nextest run 111 | - run: cargo miri test --doc 112 | 113 | minimal-versions: 114 | name: Check MSRV and minimal-versions 115 | runs-on: ubuntu-latest 116 | steps: 117 | - uses: actions/checkout@v4 118 | - uses: dtolnay/rust-toolchain@nightly 119 | - uses: dtolnay/rust-toolchain@1.68.0 # MSRV 120 | - uses: taiki-e/install-action@v2 121 | with: 122 | tool: cargo-hack 123 | - name: Lock minimal direct dependencies 124 | run: cargo +nightly hack generate-lockfile --remove-dev-deps -Z direct-minimal-versions 125 | env: 126 | CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback 127 | - name: Build (nightly) 128 | run: cargo +nightly build --verbose --all-features 129 | - name: Build (MSRV) 130 | run: cargo build --verbose --features arbitrary,quickcheck,serde,rayon 131 | 132 | # One job that "summarizes" the success state of this pipeline. This can then be added to branch 133 | # protection, rather than having to add each job separately. 134 | success: 135 | name: Success 136 | runs-on: ubuntu-latest 137 | needs: [tests, nostd_build, clippy, miri, minimal-versions] 138 | # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency 139 | # failed" as success. So we have to do some contortions to ensure the job fails if any of its 140 | # dependencies fails. 141 | if: always() # make sure this is never "skipped" 142 | steps: 143 | # Manually check the status of all dependencies. `if: failure()` does not work. 144 | - name: check if any dependency failed 145 | run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' 146 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ringmap" 3 | edition = "2021" 4 | version = "0.1.3" 5 | documentation = "https://docs.rs/ringmap/" 6 | repository = "https://github.com/indexmap-rs/ringmap" 7 | license = "Apache-2.0 OR MIT" 8 | description = "A hash table with consistent deque-like order and fast iteration." 9 | keywords = ["hashmap", "no_std"] 10 | categories = ["data-structures", "no-std"] 11 | rust-version = "1.68" 12 | 13 | [lib] 14 | bench = false 15 | 16 | [dependencies] 17 | equivalent = { version = "1.0", default-features = false } 18 | 19 | arbitrary = { version = "1.0", optional = true, default-features = false } 20 | quickcheck = { version = "1.0", optional = true, default-features = false } 21 | serde = { version = "1.0", optional = true, default-features = false } 22 | borsh = { version = "1.2", optional = true, default-features = false } 23 | rayon = { version = "1.9", optional = true } 24 | 25 | [dependencies.hashbrown] 26 | version = "0.15.0" 27 | default-features = false 28 | 29 | [dev-dependencies] 30 | itertools = "0.14" 31 | rand = {version = "0.9", features = ["small_rng"] } 32 | quickcheck = { version = "1.0", default-features = false } 33 | fnv = "1.0" 34 | lazy_static = "1.3" 35 | serde_derive = "1.0" 36 | 37 | [features] 38 | default = ["std"] 39 | std = [] 40 | 41 | # for testing only, of course 42 | test_debug = [] 43 | 44 | [profile.bench] 45 | debug = true 46 | 47 | [package.metadata.release] 48 | allow-branch = ["main"] 49 | sign-tag = true 50 | tag-name = "{{version}}" 51 | 52 | [package.metadata.docs.rs] 53 | features = ["arbitrary", "quickcheck", "serde", "borsh", "rayon"] 54 | rustdoc-args = ["--cfg", "docsrs"] 55 | 56 | [workspace] 57 | members = ["test-nostd", "test-serde"] 58 | 59 | [lints.clippy] 60 | style = "allow" 61 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016--2017 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ringmap 2 | 3 | [![build status](https://github.com/indexmap-rs/ringmap/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/indexmap-rs/ringmap/actions) 4 | [![crates.io](https://img.shields.io/crates/v/ringmap.svg)](https://crates.io/crates/ringmap) 5 | [![docs](https://docs.rs/ringmap/badge.svg)](https://docs.rs/ringmap) 6 | [![rustc](https://img.shields.io/badge/rust-1.68%2B-orange.svg)](https://img.shields.io/badge/rust-1.68%2B-orange.svg) 7 | 8 | A pure-Rust hash table which preserves (in a limited sense) insertion order, 9 | with efficient deque-like manipulation of both the front and back ends. 10 | 11 | This crate implements compact map and set data-structures, 12 | where the iteration order of the keys is independent from their hash or 13 | value. It preserves insertion order in most mutating operations, and it 14 | allows lookup of entries by either hash table key or numerical index. 15 | 16 | # Background 17 | 18 | This crate was forked from [`indexmap`](https://crates.io/crates/indexmap), 19 | with the primary difference being a change from `Vec` to `VecDeque` for the 20 | primary item storage. As a result, it has many of the same properties, as 21 | well as a few new ones: 22 | 23 | - Order is **independent of hash function** and hash values of keys. 24 | - Fast to iterate. 25 | - Indexed in compact space. 26 | - Efficient pushing and popping from both the front and back. 27 | - Preserves insertion order **as long** as you don't call `.swap_remove_back()` 28 | or other methods that explicitly change order. 29 | - In `ringmap`, the regular `.remove()` **does** preserve insertion order, 30 | equivalent to what `indexmap` calls `.shift_remove()`. 31 | - Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does. 32 | 33 | `ringmap` also follows [`ordermap`](https://crates.io/crates/ordermap) in using 34 | its entry order for `PartialEq` and `Eq`, whereas `indexmap` considers the same 35 | entries in *any* order to be equal for drop-in compatibility with `HashMap` 36 | semantics. Using the order is faster, and also allows `ringmap` to implement 37 | `PartialOrd`, `Ord`, and `Hash`. 38 | 39 | # Recent Changes 40 | 41 | See [RELEASES.md](https://github.com/indexmap-rs/ringmap/blob/main/RELEASES.md). 42 | -------------------------------------------------------------------------------- /RELEASES.md: -------------------------------------------------------------------------------- 1 | # Releases 2 | 3 | ## 0.1.3 (2025-04-04) 4 | 5 | - Added a `get_disjoint_mut` method to `RingMap`, matching Rust 1.86's 6 | `HashMap` method. 7 | - Added a `get_disjoint_indices_mut` method to `RingMap` and `map::Slice`, 8 | matching Rust 1.86's `get_disjoint_mut` method on slices. 9 | 10 | ## 0.1.2 (2025-03-10) 11 | 12 | - Added `ringmap_with_default!` and `ringset_with_default!` to be used with 13 | alternative hashers, especially when using the crate without `std`. 14 | - Implemented `PartialEq` between each `Slice` and `[]`/arrays. 15 | 16 | ## 0.1.1 (2025-01-29) 17 | 18 | - Optimized the branch behavior of the iterators. 19 | 20 | ## 0.1.0 (2025-01-21) 21 | 22 | - Initial release, based on `indexmap v2.7.1`. 23 | -------------------------------------------------------------------------------- /benches/bench.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | #[macro_use] 5 | extern crate lazy_static; 6 | 7 | use fnv::FnvHasher; 8 | use std::hash::BuildHasherDefault; 9 | use std::hash::Hash; 10 | type FnvBuilder = BuildHasherDefault; 11 | 12 | use test::black_box; 13 | use test::Bencher; 14 | 15 | use ringmap::RingMap; 16 | 17 | use std::collections::HashMap; 18 | 19 | use rand::rngs::SmallRng; 20 | use rand::seq::SliceRandom; 21 | use rand::SeedableRng; 22 | 23 | /// Use a consistently seeded Rng for benchmark stability 24 | fn small_rng() -> SmallRng { 25 | let seed = u64::from_le_bytes(*b"ringmap!"); 26 | SmallRng::seed_from_u64(seed) 27 | } 28 | 29 | #[bench] 30 | fn new_hashmap(b: &mut Bencher) { 31 | b.iter(|| HashMap::::new()); 32 | } 33 | 34 | #[bench] 35 | fn new_ringmap(b: &mut Bencher) { 36 | b.iter(|| RingMap::::new()); 37 | } 38 | 39 | #[bench] 40 | fn with_capacity_10e5_hashmap(b: &mut Bencher) { 41 | b.iter(|| HashMap::::with_capacity(10_000)); 42 | } 43 | 44 | #[bench] 45 | fn with_capacity_10e5_ringmap(b: &mut Bencher) { 46 | b.iter(|| RingMap::::with_capacity(10_000)); 47 | } 48 | 49 | #[bench] 50 | fn insert_hashmap_10_000(b: &mut Bencher) { 51 | let c = 10_000; 52 | b.iter(|| { 53 | let mut map = HashMap::with_capacity(c); 54 | for x in 0..c { 55 | map.insert(x, ()); 56 | } 57 | map 58 | }); 59 | } 60 | 61 | #[bench] 62 | fn insert_ringmap_10_000(b: &mut Bencher) { 63 | let c = 10_000; 64 | b.iter(|| { 65 | let mut map = RingMap::with_capacity(c); 66 | for x in 0..c { 67 | map.insert(x, ()); 68 | } 69 | map 70 | }); 71 | } 72 | 73 | #[bench] 74 | fn insert_ringmap_10_000_before_0(b: &mut Bencher) { 75 | let c = 10_000; 76 | b.iter(|| { 77 | let mut map = RingMap::with_capacity(c); 78 | for x in 0..c { 79 | map.insert_before(0, x, ()); 80 | } 81 | map 82 | }); 83 | } 84 | 85 | #[bench] 86 | fn insert_hashmap_string_10_000(b: &mut Bencher) { 87 | let c = 10_000; 88 | b.iter(|| { 89 | let mut map = HashMap::with_capacity(c); 90 | for x in 0..c { 91 | map.insert(x.to_string(), ()); 92 | } 93 | map 94 | }); 95 | } 96 | 97 | #[bench] 98 | fn insert_ringmap_string_10_000(b: &mut Bencher) { 99 | let c = 10_000; 100 | b.iter(|| { 101 | let mut map = RingMap::with_capacity(c); 102 | for x in 0..c { 103 | map.insert(x.to_string(), ()); 104 | } 105 | map 106 | }); 107 | } 108 | 109 | #[bench] 110 | fn insert_hashmap_str_10_000(b: &mut Bencher) { 111 | let c = 10_000; 112 | let ss = Vec::from_iter((0..c).map(|x| x.to_string())); 113 | b.iter(|| { 114 | let mut map = HashMap::with_capacity(c); 115 | for key in &ss { 116 | map.insert(&key[..], ()); 117 | } 118 | map 119 | }); 120 | } 121 | 122 | #[bench] 123 | fn insert_ringmap_str_10_000(b: &mut Bencher) { 124 | let c = 10_000; 125 | let ss = Vec::from_iter((0..c).map(|x| x.to_string())); 126 | b.iter(|| { 127 | let mut map = RingMap::with_capacity(c); 128 | for key in &ss { 129 | map.insert(&key[..], ()); 130 | } 131 | map 132 | }); 133 | } 134 | 135 | #[bench] 136 | fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { 137 | let c = 10_000; 138 | let value = [0u64; 10]; 139 | b.iter(|| { 140 | let mut map = HashMap::with_capacity(c); 141 | for i in 0..c { 142 | map.insert(i, value); 143 | } 144 | map 145 | }); 146 | } 147 | 148 | #[bench] 149 | fn insert_ringmap_int_bigvalue_10_000(b: &mut Bencher) { 150 | let c = 10_000; 151 | let value = [0u64; 10]; 152 | b.iter(|| { 153 | let mut map = RingMap::with_capacity(c); 154 | for i in 0..c { 155 | map.insert(i, value); 156 | } 157 | map 158 | }); 159 | } 160 | 161 | #[bench] 162 | fn insert_hashmap_100_000(b: &mut Bencher) { 163 | let c = 100_000; 164 | b.iter(|| { 165 | let mut map = HashMap::with_capacity(c); 166 | for x in 0..c { 167 | map.insert(x, ()); 168 | } 169 | map 170 | }); 171 | } 172 | 173 | #[bench] 174 | fn insert_ringmap_100_000(b: &mut Bencher) { 175 | let c = 100_000; 176 | b.iter(|| { 177 | let mut map = RingMap::with_capacity(c); 178 | for x in 0..c { 179 | map.insert(x, ()); 180 | } 181 | map 182 | }); 183 | } 184 | 185 | #[bench] 186 | fn insert_hashmap_150(b: &mut Bencher) { 187 | let c = 150; 188 | b.iter(|| { 189 | let mut map = HashMap::with_capacity(c); 190 | for x in 0..c { 191 | map.insert(x, ()); 192 | } 193 | map 194 | }); 195 | } 196 | 197 | #[bench] 198 | fn insert_ringmap_150(b: &mut Bencher) { 199 | let c = 150; 200 | b.iter(|| { 201 | let mut map = RingMap::with_capacity(c); 202 | for x in 0..c { 203 | map.insert(x, ()); 204 | } 205 | map 206 | }); 207 | } 208 | 209 | #[bench] 210 | fn insert_ringmap_150_before_0(b: &mut Bencher) { 211 | let c = 150; 212 | b.iter(|| { 213 | let mut map = RingMap::with_capacity(c); 214 | for x in 0..c { 215 | map.insert_before(0, x, ()); 216 | } 217 | map 218 | }); 219 | } 220 | 221 | #[bench] 222 | fn entry_hashmap_150(b: &mut Bencher) { 223 | let c = 150; 224 | b.iter(|| { 225 | let mut map = HashMap::with_capacity(c); 226 | for x in 0..c { 227 | map.entry(x).or_insert(()); 228 | } 229 | map 230 | }); 231 | } 232 | 233 | #[bench] 234 | fn entry_ringmap_150(b: &mut Bencher) { 235 | let c = 150; 236 | b.iter(|| { 237 | let mut map = RingMap::with_capacity(c); 238 | for x in 0..c { 239 | map.entry(x).or_insert(()); 240 | } 241 | map 242 | }); 243 | } 244 | 245 | #[bench] 246 | fn iter_sum_hashmap_10_000(b: &mut Bencher) { 247 | let c = 10_000; 248 | let mut map = HashMap::with_capacity(c); 249 | let len = c - c / 10; 250 | for x in 0..len { 251 | map.insert(x, ()); 252 | } 253 | assert_eq!(map.len(), len); 254 | b.iter(|| map.keys().sum::()); 255 | } 256 | 257 | #[bench] 258 | fn iter_sum_ringmap_10_000(b: &mut Bencher) { 259 | let c = 10_000; 260 | let mut map = RingMap::with_capacity(c); 261 | let len = c - c / 10; 262 | for x in 0..len { 263 | map.insert(x, ()); 264 | } 265 | assert_eq!(map.len(), len); 266 | b.iter(|| map.keys().sum::()); 267 | } 268 | 269 | #[bench] 270 | fn iter_black_box_hashmap_10_000(b: &mut Bencher) { 271 | let c = 10_000; 272 | let mut map = HashMap::with_capacity(c); 273 | let len = c - c / 10; 274 | for x in 0..len { 275 | map.insert(x, ()); 276 | } 277 | assert_eq!(map.len(), len); 278 | b.iter(|| { 279 | for &key in map.keys() { 280 | black_box(key); 281 | } 282 | }); 283 | } 284 | 285 | #[bench] 286 | fn iter_black_box_ringmap_10_000(b: &mut Bencher) { 287 | let c = 10_000; 288 | let mut map = RingMap::with_capacity(c); 289 | let len = c - c / 10; 290 | for x in 0..len { 291 | map.insert(x, ()); 292 | } 293 | assert_eq!(map.len(), len); 294 | b.iter(|| { 295 | for &key in map.keys() { 296 | black_box(key); 297 | } 298 | }); 299 | } 300 | 301 | fn shuffled_keys(iter: I) -> Vec 302 | where 303 | I: IntoIterator, 304 | { 305 | let mut v = Vec::from_iter(iter); 306 | let mut rng = small_rng(); 307 | v.shuffle(&mut rng); 308 | v 309 | } 310 | 311 | #[bench] 312 | fn lookup_hashmap_10_000_exist(b: &mut Bencher) { 313 | let c = 10_000; 314 | let mut map = HashMap::with_capacity(c); 315 | let keys = shuffled_keys(0..c); 316 | for &key in &keys { 317 | map.insert(key, 1); 318 | } 319 | b.iter(|| { 320 | let mut found = 0; 321 | for key in 5000..c { 322 | found += map.get(&key).is_some() as i32; 323 | } 324 | found 325 | }); 326 | } 327 | 328 | #[bench] 329 | fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { 330 | let c = 10_000; 331 | let mut map = HashMap::with_capacity(c); 332 | let keys = shuffled_keys(0..c); 333 | for &key in &keys { 334 | map.insert(key, 1); 335 | } 336 | b.iter(|| { 337 | let mut found = 0; 338 | for key in c..15000 { 339 | found += map.get(&key).is_some() as i32; 340 | } 341 | found 342 | }); 343 | } 344 | 345 | #[bench] 346 | fn lookup_ringmap_10_000_exist(b: &mut Bencher) { 347 | let c = 10_000; 348 | let mut map = RingMap::with_capacity(c); 349 | let keys = shuffled_keys(0..c); 350 | for &key in &keys { 351 | map.insert(key, 1); 352 | } 353 | b.iter(|| { 354 | let mut found = 0; 355 | for key in 5000..c { 356 | found += map.get(&key).is_some() as i32; 357 | } 358 | found 359 | }); 360 | } 361 | 362 | #[bench] 363 | fn lookup_ringmap_10_000_noexist(b: &mut Bencher) { 364 | let c = 10_000; 365 | let mut map = RingMap::with_capacity(c); 366 | let keys = shuffled_keys(0..c); 367 | for &key in &keys { 368 | map.insert(key, 1); 369 | } 370 | b.iter(|| { 371 | let mut found = 0; 372 | for key in c..15000 { 373 | found += map.get(&key).is_some() as i32; 374 | } 375 | found 376 | }); 377 | } 378 | 379 | // number of items to look up 380 | const LOOKUP_MAP_SIZE: u32 = 100_000_u32; 381 | const LOOKUP_SAMPLE_SIZE: u32 = 5000; 382 | const SORT_MAP_SIZE: usize = 10_000; 383 | 384 | // use lazy_static so that comparison benchmarks use the exact same inputs 385 | lazy_static! { 386 | static ref KEYS: Vec = shuffled_keys(0..LOOKUP_MAP_SIZE); 387 | } 388 | 389 | lazy_static! { 390 | static ref HMAP_100K: HashMap = { 391 | let c = LOOKUP_MAP_SIZE; 392 | let mut map = HashMap::with_capacity(c as usize); 393 | let keys = &*KEYS; 394 | for &key in keys { 395 | map.insert(key, key); 396 | } 397 | map 398 | }; 399 | } 400 | 401 | lazy_static! { 402 | static ref IMAP_100K: RingMap = { 403 | let c = LOOKUP_MAP_SIZE; 404 | let mut map = RingMap::with_capacity(c as usize); 405 | let keys = &*KEYS; 406 | for &key in keys { 407 | map.insert(key, key); 408 | } 409 | map 410 | }; 411 | } 412 | 413 | lazy_static! { 414 | static ref IMAP_SORT_U32: RingMap = { 415 | let mut map = RingMap::with_capacity(SORT_MAP_SIZE); 416 | for &key in &KEYS[..SORT_MAP_SIZE] { 417 | map.insert(key, key); 418 | } 419 | map 420 | }; 421 | } 422 | lazy_static! { 423 | static ref IMAP_SORT_S: RingMap = { 424 | let mut map = RingMap::with_capacity(SORT_MAP_SIZE); 425 | for &key in &KEYS[..SORT_MAP_SIZE] { 426 | map.insert(format!("{:^16x}", &key), String::new()); 427 | } 428 | map 429 | }; 430 | } 431 | 432 | #[bench] 433 | fn lookup_hashmap_100_000_multi(b: &mut Bencher) { 434 | let map = &*HMAP_100K; 435 | b.iter(|| { 436 | let mut found = 0; 437 | for key in 0..LOOKUP_SAMPLE_SIZE { 438 | found += map.get(&key).is_some() as u32; 439 | } 440 | found 441 | }); 442 | } 443 | 444 | #[bench] 445 | fn lookup_ringmap_100_000_multi(b: &mut Bencher) { 446 | let map = &*IMAP_100K; 447 | b.iter(|| { 448 | let mut found = 0; 449 | for key in 0..LOOKUP_SAMPLE_SIZE { 450 | found += map.get(&key).is_some() as u32; 451 | } 452 | found 453 | }); 454 | } 455 | 456 | // inorder: Test looking up keys in the same order as they were inserted 457 | #[bench] 458 | fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { 459 | let map = &*HMAP_100K; 460 | let keys = &*KEYS; 461 | b.iter(|| { 462 | let mut found = 0; 463 | for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { 464 | found += map.get(key).is_some() as u32; 465 | } 466 | found 467 | }); 468 | } 469 | 470 | #[bench] 471 | fn lookup_ringmap_100_000_inorder_multi(b: &mut Bencher) { 472 | let map = &*IMAP_100K; 473 | let keys = &*KEYS; 474 | b.iter(|| { 475 | let mut found = 0; 476 | for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { 477 | found += map.get(key).is_some() as u32; 478 | } 479 | found 480 | }); 481 | } 482 | 483 | #[bench] 484 | fn lookup_hashmap_100_000_single(b: &mut Bencher) { 485 | let map = &*HMAP_100K; 486 | let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); 487 | b.iter(|| { 488 | let key = iter.next().unwrap(); 489 | map.get(&key).is_some() 490 | }); 491 | } 492 | 493 | #[bench] 494 | fn lookup_ringmap_100_000_single(b: &mut Bencher) { 495 | let map = &*IMAP_100K; 496 | let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); 497 | b.iter(|| { 498 | let key = iter.next().unwrap(); 499 | map.get(&key).is_some() 500 | }); 501 | } 502 | 503 | const GROW_SIZE: usize = 100_000; 504 | type GrowKey = u32; 505 | 506 | // Test grow/resize without preallocation 507 | #[bench] 508 | fn grow_fnv_hashmap_100_000(b: &mut Bencher) { 509 | b.iter(|| { 510 | let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); 511 | for x in 0..GROW_SIZE { 512 | map.insert(x as GrowKey, x as GrowKey); 513 | } 514 | map 515 | }); 516 | } 517 | 518 | #[bench] 519 | fn grow_fnv_ringmap_100_000(b: &mut Bencher) { 520 | b.iter(|| { 521 | let mut map: RingMap<_, _, FnvBuilder> = RingMap::default(); 522 | for x in 0..GROW_SIZE { 523 | map.insert(x as GrowKey, x as GrowKey); 524 | } 525 | map 526 | }); 527 | } 528 | 529 | const MERGE: u64 = 10_000; 530 | #[bench] 531 | fn hashmap_merge_simple(b: &mut Bencher) { 532 | let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); 533 | let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 534 | b.iter(|| { 535 | let mut merged = first_map.clone(); 536 | merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); 537 | merged 538 | }); 539 | } 540 | 541 | #[bench] 542 | fn hashmap_merge_shuffle(b: &mut Bencher) { 543 | let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); 544 | let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 545 | let mut v = Vec::new(); 546 | let mut rng = small_rng(); 547 | b.iter(|| { 548 | let mut merged = first_map.clone(); 549 | v.extend(second_map.iter().map(|(&k, &v)| (k, v))); 550 | v.shuffle(&mut rng); 551 | merged.extend(v.drain(..)); 552 | 553 | merged 554 | }); 555 | } 556 | 557 | #[bench] 558 | fn ringmap_merge_simple(b: &mut Bencher) { 559 | let first_map: RingMap = (0..MERGE).map(|i| (i, ())).collect(); 560 | let second_map: RingMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 561 | b.iter(|| { 562 | let mut merged = first_map.clone(); 563 | merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); 564 | merged 565 | }); 566 | } 567 | 568 | #[bench] 569 | fn ringmap_merge_shuffle(b: &mut Bencher) { 570 | let first_map: RingMap = (0..MERGE).map(|i| (i, ())).collect(); 571 | let second_map: RingMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); 572 | let mut v = Vec::new(); 573 | let mut rng = small_rng(); 574 | b.iter(|| { 575 | let mut merged = first_map.clone(); 576 | v.extend(second_map.iter().map(|(&k, &v)| (k, v))); 577 | v.shuffle(&mut rng); 578 | merged.extend(v.drain(..)); 579 | 580 | merged 581 | }); 582 | } 583 | 584 | #[bench] 585 | fn swap_remove_back_ringmap_100_000(b: &mut Bencher) { 586 | let map = IMAP_100K.clone(); 587 | let mut keys = Vec::from_iter(map.keys().copied()); 588 | let mut rng = small_rng(); 589 | keys.shuffle(&mut rng); 590 | 591 | b.iter(|| { 592 | let mut map = map.clone(); 593 | for key in &keys { 594 | map.swap_remove_back(key); 595 | } 596 | assert_eq!(map.len(), 0); 597 | map 598 | }); 599 | } 600 | 601 | #[bench] 602 | fn swap_remove_front_ringmap_100_000(b: &mut Bencher) { 603 | let map = IMAP_100K.clone(); 604 | let mut keys = Vec::from_iter(map.keys().copied()); 605 | let mut rng = small_rng(); 606 | keys.shuffle(&mut rng); 607 | 608 | b.iter(|| { 609 | let mut map = map.clone(); 610 | for key in &keys { 611 | map.swap_remove_front(key); 612 | } 613 | assert_eq!(map.len(), 0); 614 | map 615 | }); 616 | } 617 | 618 | #[bench] 619 | fn remove_ringmap_100_000_few(b: &mut Bencher) { 620 | let map = IMAP_100K.clone(); 621 | let mut keys = Vec::from_iter(map.keys().copied()); 622 | let mut rng = small_rng(); 623 | keys.shuffle(&mut rng); 624 | keys.truncate(50); 625 | 626 | b.iter(|| { 627 | let mut map = map.clone(); 628 | for key in &keys { 629 | map.remove(key); 630 | } 631 | assert_eq!(map.len(), IMAP_100K.len() - keys.len()); 632 | map 633 | }); 634 | } 635 | 636 | #[bench] 637 | fn remove_ringmap_2_000_full(b: &mut Bencher) { 638 | let mut keys = KEYS[..2_000].to_vec(); 639 | let mut map = RingMap::with_capacity(keys.len()); 640 | for &key in &keys { 641 | map.insert(key, key); 642 | } 643 | let mut rng = small_rng(); 644 | keys.shuffle(&mut rng); 645 | 646 | b.iter(|| { 647 | let mut map = map.clone(); 648 | for key in &keys { 649 | map.remove(key); 650 | } 651 | assert_eq!(map.len(), 0); 652 | map 653 | }); 654 | } 655 | 656 | #[bench] 657 | fn pop_back_ringmap_100_000(b: &mut Bencher) { 658 | let map = IMAP_100K.clone(); 659 | 660 | b.iter(|| { 661 | let mut map = map.clone(); 662 | while !map.is_empty() { 663 | map.pop_back(); 664 | } 665 | assert_eq!(map.len(), 0); 666 | map 667 | }); 668 | } 669 | 670 | #[bench] 671 | fn pop_front_ringmap_100_000(b: &mut Bencher) { 672 | let map = IMAP_100K.clone(); 673 | 674 | b.iter(|| { 675 | let mut map = map.clone(); 676 | while !map.is_empty() { 677 | map.pop_front(); 678 | } 679 | assert_eq!(map.len(), 0); 680 | map 681 | }); 682 | } 683 | 684 | #[bench] 685 | fn push_back_ringmap_100_000(b: &mut Bencher) { 686 | let static_map = &*IMAP_100K; 687 | 688 | b.iter(|| { 689 | let mut map = RingMap::with_capacity(static_map.len()); 690 | for (&key, &value) in static_map { 691 | map.push_back(key, value); 692 | } 693 | assert_eq!(map.len(), static_map.len()); 694 | map 695 | }); 696 | } 697 | 698 | #[bench] 699 | fn push_front_ringmap_100_000(b: &mut Bencher) { 700 | let static_map = &*IMAP_100K; 701 | 702 | b.iter(|| { 703 | let mut map = RingMap::with_capacity(static_map.len()); 704 | for (&key, &value) in static_map { 705 | map.push_front(key, value); 706 | } 707 | assert_eq!(map.len(), static_map.len()); 708 | map 709 | }); 710 | } 711 | 712 | #[bench] 713 | fn few_retain_ringmap_100_000(b: &mut Bencher) { 714 | let map = IMAP_100K.clone(); 715 | 716 | b.iter(|| { 717 | let mut map = map.clone(); 718 | map.retain(|k, _| *k % 7 == 0); 719 | map 720 | }); 721 | } 722 | 723 | #[bench] 724 | fn few_retain_hashmap_100_000(b: &mut Bencher) { 725 | let map = HMAP_100K.clone(); 726 | 727 | b.iter(|| { 728 | let mut map = map.clone(); 729 | map.retain(|k, _| *k % 7 == 0); 730 | map 731 | }); 732 | } 733 | 734 | #[bench] 735 | fn half_retain_ringmap_100_000(b: &mut Bencher) { 736 | let map = IMAP_100K.clone(); 737 | 738 | b.iter(|| { 739 | let mut map = map.clone(); 740 | map.retain(|k, _| *k % 2 == 0); 741 | map 742 | }); 743 | } 744 | 745 | #[bench] 746 | fn half_retain_hashmap_100_000(b: &mut Bencher) { 747 | let map = HMAP_100K.clone(); 748 | 749 | b.iter(|| { 750 | let mut map = map.clone(); 751 | map.retain(|k, _| *k % 2 == 0); 752 | map 753 | }); 754 | } 755 | 756 | #[bench] 757 | fn many_retain_ringmap_100_000(b: &mut Bencher) { 758 | let map = IMAP_100K.clone(); 759 | 760 | b.iter(|| { 761 | let mut map = map.clone(); 762 | map.retain(|k, _| *k % 100 != 0); 763 | map 764 | }); 765 | } 766 | 767 | #[bench] 768 | fn many_retain_hashmap_100_000(b: &mut Bencher) { 769 | let map = HMAP_100K.clone(); 770 | 771 | b.iter(|| { 772 | let mut map = map.clone(); 773 | map.retain(|k, _| *k % 100 != 0); 774 | map 775 | }); 776 | } 777 | 778 | // simple sort impl for comparison 779 | pub fn simple_sort(m: &mut RingMap) { 780 | let mut ordered: Vec<_> = m.drain(..).collect(); 781 | ordered.sort_by(|left, right| left.0.cmp(&right.0)); 782 | m.extend(ordered); 783 | } 784 | 785 | #[bench] 786 | fn ringmap_sort_s(b: &mut Bencher) { 787 | let map = IMAP_SORT_S.clone(); 788 | 789 | // there's a map clone there, but it's still useful to profile this 790 | b.iter(|| { 791 | let mut map = map.clone(); 792 | map.sort_keys(); 793 | map 794 | }); 795 | } 796 | 797 | #[bench] 798 | fn ringmap_simple_sort_s(b: &mut Bencher) { 799 | let map = IMAP_SORT_S.clone(); 800 | 801 | // there's a map clone there, but it's still useful to profile this 802 | b.iter(|| { 803 | let mut map = map.clone(); 804 | simple_sort(&mut map); 805 | map 806 | }); 807 | } 808 | 809 | #[bench] 810 | fn ringmap_sort_u32(b: &mut Bencher) { 811 | let map = IMAP_SORT_U32.clone(); 812 | 813 | // there's a map clone there, but it's still useful to profile this 814 | b.iter(|| { 815 | let mut map = map.clone(); 816 | map.sort_keys(); 817 | map 818 | }); 819 | } 820 | 821 | #[bench] 822 | fn ringmap_simple_sort_u32(b: &mut Bencher) { 823 | let map = IMAP_SORT_U32.clone(); 824 | 825 | // there's a map clone there, but it's still useful to profile this 826 | b.iter(|| { 827 | let mut map = map.clone(); 828 | simple_sort(&mut map); 829 | map 830 | }); 831 | } 832 | 833 | // measure the fixed overhead of cloning in sort benchmarks 834 | #[bench] 835 | fn ringmap_clone_for_sort_s(b: &mut Bencher) { 836 | let map = IMAP_SORT_S.clone(); 837 | 838 | b.iter(|| map.clone()); 839 | } 840 | 841 | #[bench] 842 | fn ringmap_clone_for_sort_u32(b: &mut Bencher) { 843 | let map = IMAP_SORT_U32.clone(); 844 | 845 | b.iter(|| map.clone()); 846 | } 847 | -------------------------------------------------------------------------------- /benches/faststring.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use test::Bencher; 6 | 7 | use ringmap::RingMap; 8 | 9 | use std::collections::HashMap; 10 | 11 | use rand::rngs::SmallRng; 12 | use rand::seq::SliceRandom; 13 | use rand::SeedableRng; 14 | 15 | use std::hash::{Hash, Hasher}; 16 | 17 | use std::borrow::Borrow; 18 | use std::ops::Deref; 19 | 20 | /// Use a consistently seeded Rng for benchmark stability 21 | fn small_rng() -> SmallRng { 22 | let seed = u64::from_le_bytes(*b"ringmap!"); 23 | SmallRng::seed_from_u64(seed) 24 | } 25 | 26 | #[derive(PartialEq, Eq, Copy, Clone)] 27 | #[repr(transparent)] 28 | pub struct OneShot(pub T); 29 | 30 | impl Hash for OneShot { 31 | fn hash(&self, h: &mut H) { 32 | h.write(self.0.as_bytes()) 33 | } 34 | } 35 | 36 | impl<'a, S> From<&'a S> for &'a OneShot 37 | where 38 | S: AsRef, 39 | { 40 | fn from(s: &'a S) -> Self { 41 | let s: &str = s.as_ref(); 42 | unsafe { &*(s as *const str as *const OneShot) } 43 | } 44 | } 45 | 46 | impl Hash for OneShot { 47 | fn hash(&self, h: &mut H) { 48 | h.write(self.0.as_bytes()) 49 | } 50 | } 51 | 52 | impl Borrow> for OneShot { 53 | fn borrow(&self) -> &OneShot { 54 | <&OneShot>::from(&self.0) 55 | } 56 | } 57 | 58 | impl Deref for OneShot { 59 | type Target = T; 60 | fn deref(&self) -> &T { 61 | &self.0 62 | } 63 | } 64 | 65 | fn shuffled_keys(iter: I) -> Vec 66 | where 67 | I: IntoIterator, 68 | { 69 | let mut v = Vec::from_iter(iter); 70 | let mut rng = small_rng(); 71 | v.shuffle(&mut rng); 72 | v 73 | } 74 | 75 | #[bench] 76 | fn insert_hashmap_string_10_000(b: &mut Bencher) { 77 | let c = 10_000; 78 | b.iter(|| { 79 | let mut map = HashMap::with_capacity(c); 80 | for x in 0..c { 81 | map.insert(x.to_string(), ()); 82 | } 83 | map 84 | }); 85 | } 86 | 87 | #[bench] 88 | fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { 89 | let c = 10_000; 90 | b.iter(|| { 91 | let mut map = HashMap::with_capacity(c); 92 | for x in 0..c { 93 | map.insert(OneShot(x.to_string()), ()); 94 | } 95 | map 96 | }); 97 | } 98 | 99 | #[bench] 100 | fn insert_ringmap_string_10_000(b: &mut Bencher) { 101 | let c = 10_000; 102 | b.iter(|| { 103 | let mut map = RingMap::with_capacity(c); 104 | for x in 0..c { 105 | map.insert(x.to_string(), ()); 106 | } 107 | map 108 | }); 109 | } 110 | 111 | #[bench] 112 | fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { 113 | let c = 10_000; 114 | let mut map = HashMap::with_capacity(c); 115 | let keys = shuffled_keys(0..c); 116 | for &key in &keys { 117 | map.insert(key.to_string(), 1); 118 | } 119 | let lookups = (5000..c).map(|x| x.to_string()).collect::>(); 120 | b.iter(|| { 121 | let mut found = 0; 122 | for key in &lookups { 123 | found += map.get(key).is_some() as i32; 124 | } 125 | found 126 | }); 127 | } 128 | 129 | #[bench] 130 | fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { 131 | let c = 10_000; 132 | let mut map = HashMap::with_capacity(c); 133 | let keys = shuffled_keys(0..c); 134 | for &key in &keys { 135 | map.insert(OneShot(key.to_string()), 1); 136 | } 137 | let lookups = (5000..c) 138 | .map(|x| OneShot(x.to_string())) 139 | .collect::>(); 140 | b.iter(|| { 141 | let mut found = 0; 142 | for key in &lookups { 143 | found += map.get(key).is_some() as i32; 144 | } 145 | found 146 | }); 147 | } 148 | 149 | #[bench] 150 | fn lookup_ringmap_10_000_exist_string(b: &mut Bencher) { 151 | let c = 10_000; 152 | let mut map = RingMap::with_capacity(c); 153 | let keys = shuffled_keys(0..c); 154 | for &key in &keys { 155 | map.insert(key.to_string(), 1); 156 | } 157 | let lookups = (5000..c).map(|x| x.to_string()).collect::>(); 158 | b.iter(|| { 159 | let mut found = 0; 160 | for key in &lookups { 161 | found += map.get(key).is_some() as i32; 162 | } 163 | found 164 | }); 165 | } 166 | 167 | #[bench] 168 | fn lookup_ringmap_10_000_exist_string_oneshot(b: &mut Bencher) { 169 | let c = 10_000; 170 | let mut map = RingMap::with_capacity(c); 171 | let keys = shuffled_keys(0..c); 172 | for &key in &keys { 173 | map.insert(OneShot(key.to_string()), 1); 174 | } 175 | let lookups = (5000..c) 176 | .map(|x| OneShot(x.to_string())) 177 | .collect::>(); 178 | b.iter(|| { 179 | let mut found = 0; 180 | for key in &lookups { 181 | found += map.get(key).is_some() as i32; 182 | } 183 | found 184 | }); 185 | } 186 | -------------------------------------------------------------------------------- /src/arbitrary.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "arbitrary")] 2 | #[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] 3 | mod impl_arbitrary { 4 | use crate::{RingMap, RingSet}; 5 | use arbitrary::{Arbitrary, Result, Unstructured}; 6 | use core::hash::{BuildHasher, Hash}; 7 | 8 | impl<'a, K, V, S> Arbitrary<'a> for RingMap 9 | where 10 | K: Arbitrary<'a> + Hash + Eq, 11 | V: Arbitrary<'a>, 12 | S: BuildHasher + Default, 13 | { 14 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 15 | u.arbitrary_iter()?.collect() 16 | } 17 | 18 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 19 | u.arbitrary_take_rest_iter()?.collect() 20 | } 21 | } 22 | 23 | impl<'a, T, S> Arbitrary<'a> for RingSet 24 | where 25 | T: Arbitrary<'a> + Hash + Eq, 26 | S: BuildHasher + Default, 27 | { 28 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 29 | u.arbitrary_iter()?.collect() 30 | } 31 | 32 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 33 | u.arbitrary_take_rest_iter()?.collect() 34 | } 35 | } 36 | } 37 | 38 | #[cfg(feature = "quickcheck")] 39 | #[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] 40 | mod impl_quickcheck { 41 | use crate::{RingMap, RingSet}; 42 | use alloc::boxed::Box; 43 | use alloc::vec::Vec; 44 | use core::hash::{BuildHasher, Hash}; 45 | use quickcheck::{Arbitrary, Gen}; 46 | 47 | impl Arbitrary for RingMap 48 | where 49 | K: Arbitrary + Hash + Eq, 50 | V: Arbitrary, 51 | S: BuildHasher + Default + Clone + 'static, 52 | { 53 | fn arbitrary(g: &mut Gen) -> Self { 54 | Self::from_iter(Vec::arbitrary(g)) 55 | } 56 | 57 | fn shrink(&self) -> Box> { 58 | let vec = Vec::from_iter(self.clone()); 59 | Box::new(vec.shrink().map(Self::from_iter)) 60 | } 61 | } 62 | 63 | impl Arbitrary for RingSet 64 | where 65 | T: Arbitrary + Hash + Eq, 66 | S: BuildHasher + Default + Clone + 'static, 67 | { 68 | fn arbitrary(g: &mut Gen) -> Self { 69 | Self::from_iter(Vec::arbitrary(g)) 70 | } 71 | 72 | fn shrink(&self) -> Box> { 73 | let vec = Vec::from_iter(self.clone()); 74 | Box::new(vec.shrink().map(Self::from_iter)) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/borsh.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "borsh")))] 2 | 3 | use alloc::vec::Vec; 4 | use core::hash::BuildHasher; 5 | use core::hash::Hash; 6 | use core::mem::size_of; 7 | 8 | use borsh::error::ERROR_ZST_FORBIDDEN; 9 | use borsh::io::{Error, ErrorKind, Read, Result, Write}; 10 | use borsh::{BorshDeserialize, BorshSerialize}; 11 | 12 | use crate::map::RingMap; 13 | use crate::set::RingSet; 14 | 15 | impl BorshSerialize for RingMap 16 | where 17 | K: BorshSerialize, 18 | V: BorshSerialize, 19 | { 20 | #[inline] 21 | fn serialize(&self, writer: &mut W) -> Result<()> { 22 | check_zst::()?; 23 | 24 | let iterator = self.iter(); 25 | 26 | u32::try_from(iterator.len()) 27 | .map_err(|_| ErrorKind::InvalidData)? 28 | .serialize(writer)?; 29 | 30 | for (key, value) in iterator { 31 | key.serialize(writer)?; 32 | value.serialize(writer)?; 33 | } 34 | 35 | Ok(()) 36 | } 37 | } 38 | 39 | impl BorshDeserialize for RingMap 40 | where 41 | K: BorshDeserialize + Eq + Hash, 42 | V: BorshDeserialize, 43 | S: BuildHasher + Default, 44 | { 45 | #[inline] 46 | fn deserialize_reader(reader: &mut R) -> Result { 47 | check_zst::()?; 48 | let vec = >::deserialize_reader(reader)?; 49 | Ok(vec.into_iter().collect::>()) 50 | } 51 | } 52 | 53 | impl BorshSerialize for RingSet 54 | where 55 | T: BorshSerialize, 56 | { 57 | #[inline] 58 | fn serialize(&self, writer: &mut W) -> Result<()> { 59 | check_zst::()?; 60 | 61 | let iterator = self.iter(); 62 | 63 | u32::try_from(iterator.len()) 64 | .map_err(|_| ErrorKind::InvalidData)? 65 | .serialize(writer)?; 66 | 67 | for item in iterator { 68 | item.serialize(writer)?; 69 | } 70 | 71 | Ok(()) 72 | } 73 | } 74 | 75 | impl BorshDeserialize for RingSet 76 | where 77 | T: BorshDeserialize + Eq + Hash, 78 | S: BuildHasher + Default, 79 | { 80 | #[inline] 81 | fn deserialize_reader(reader: &mut R) -> Result { 82 | check_zst::()?; 83 | let vec = >::deserialize_reader(reader)?; 84 | Ok(vec.into_iter().collect::>()) 85 | } 86 | } 87 | 88 | fn check_zst() -> Result<()> { 89 | if size_of::() == 0 { 90 | return Err(Error::new(ErrorKind::InvalidData, ERROR_ZST_FORBIDDEN)); 91 | } 92 | Ok(()) 93 | } 94 | 95 | #[cfg(test)] 96 | mod borsh_tests { 97 | use super::*; 98 | 99 | #[test] 100 | fn map_borsh_roundtrip() { 101 | let original_map: RingMap = { 102 | let mut map = RingMap::new(); 103 | map.insert(1, 2); 104 | map.insert(3, 4); 105 | map.insert(5, 6); 106 | map 107 | }; 108 | let serialized_map = borsh::to_vec(&original_map).unwrap(); 109 | let deserialized_map: RingMap = 110 | BorshDeserialize::try_from_slice(&serialized_map).unwrap(); 111 | assert_eq!(original_map, deserialized_map); 112 | } 113 | 114 | #[test] 115 | fn set_borsh_roundtrip() { 116 | let original_map: RingSet = [1, 2, 3, 4, 5, 6].into_iter().collect(); 117 | let serialized_map = borsh::to_vec(&original_map).unwrap(); 118 | let deserialized_map: RingSet = 119 | BorshDeserialize::try_from_slice(&serialized_map).unwrap(); 120 | assert_eq!(original_map, deserialized_map); 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // We *mostly* avoid unsafe code, but `Slice` allows it for DST casting. 2 | #![deny(unsafe_code)] 3 | #![warn(rust_2018_idioms)] 4 | #![no_std] 5 | 6 | //! [`RingMap`] is a hash table where the iteration order of the key-value pairs is 7 | //! independent of the hash values of the keys. Entries are stored in a ring buffer 8 | //! (`VecDeque`), allowing efficient manipulation of both the front and back ends. 9 | //! 10 | //! [`RingSet`] is a corresponding hash set using the same implementation and 11 | //! with similar properties. 12 | //! 13 | //! ### Highlights 14 | //! 15 | //! [`RingMap`] and [`RingSet`] are nearly drop-in compatible with the std `HashMap` 16 | //! and `HashSet`, but they also have some features of note: 17 | //! 18 | //! - The ordering semantics (see their documentation for details) 19 | //! - Sorting methods and the [`.pop_back()`][RingMap::pop_back] and 20 | //! [`.pop_front()`][RingMap::pop_front] methods. 21 | //! - The [`Equivalent`] trait, which offers more flexible equality definitions 22 | //! between borrowed and owned versions of keys. 23 | //! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable 24 | //! access to map keys, and [`MutableValues`][set::MutableValues] for sets. 25 | //! 26 | //! ### Feature Flags 27 | //! 28 | //! To reduce the amount of compiled code in the crate by default, certain 29 | //! features are gated behind [feature flags]. These allow you to opt in to (or 30 | //! out of) functionality. Below is a list of the features available in this 31 | //! crate. 32 | //! 33 | //! * `std`: Enables features which require the Rust standard library. For more 34 | //! information see the section on [`no_std`]. 35 | //! * `rayon`: Enables parallel iteration and other parallel methods. 36 | //! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`] 37 | //! to [`RingMap`] and [`RingSet`]. Alternative implementations for 38 | //! (de)serializing [`RingMap`] as an ordered sequence are available in the 39 | //! [`map::serde_seq`] module. 40 | //! * `borsh`: Adds implementations for [`BorshSerialize`] and [`BorshDeserialize`] 41 | //! to [`RingMap`] and [`RingSet`]. 42 | //! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait 43 | //! to [`RingMap`] and [`RingSet`]. 44 | //! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait 45 | //! to [`RingMap`] and [`RingSet`]. 46 | //! 47 | //! _Note: only the `std` feature is enabled by default._ 48 | //! 49 | //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section 50 | //! [`no_std`]: #no-standard-library-targets 51 | //! [`Serialize`]: `::serde::Serialize` 52 | //! [`Deserialize`]: `::serde::Deserialize` 53 | //! [`BorshSerialize`]: `::borsh::BorshSerialize` 54 | //! [`BorshDeserialize`]: `::borsh::BorshDeserialize` 55 | //! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary` 56 | //! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary` 57 | //! 58 | //! ### Alternate Hashers 59 | //! 60 | //! [`RingMap`] and [`RingSet`] have a default hasher type 61 | //! [`S = RandomState`][std::collections::hash_map::RandomState], 62 | //! just like the standard `HashMap` and `HashSet`, which is resistant to 63 | //! HashDoS attacks but not the most performant. Type aliases can make it easier 64 | //! to use alternate hashers: 65 | //! 66 | //! ``` 67 | //! use fnv::FnvBuildHasher; 68 | //! use ringmap::{RingMap, RingSet}; 69 | //! 70 | //! type FnvRingMap = RingMap; 71 | //! type FnvRingSet = RingSet; 72 | //! 73 | //! let std: RingSet = (0..100).collect(); 74 | //! let fnv: FnvRingSet = (0..100).collect(); 75 | //! assert_eq!(std, fnv); 76 | //! ``` 77 | //! 78 | //! ### Rust Version 79 | //! 80 | //! This version of ringmap requires Rust 1.68 or later. 81 | //! 82 | //! The ringmap 0.1 release series will use a carefully considered version 83 | //! upgrade policy, where in a later 0.x version, we will raise the minimum 84 | //! required Rust version. 85 | //! 86 | //! ## No Standard Library Targets 87 | //! 88 | //! This crate supports being built without `std`, requiring `alloc` instead. 89 | //! This is chosen by disabling the default "std" cargo feature, by adding 90 | //! `default-features = false` to your dependency specification. 91 | //! 92 | //! - Creating maps and sets using [`new`][RingMap::new] and 93 | //! [`with_capacity`][RingMap::with_capacity] is unavailable without `std`. 94 | //! Use methods [`RingMap::default`], [`with_hasher`][RingMap::with_hasher], 95 | //! [`with_capacity_and_hasher`][RingMap::with_capacity_and_hasher] instead. 96 | //! A no-std compatible hasher will be needed as well, for example 97 | //! from the crate `twox-hash`. 98 | //! - Macros [`ringmap!`] and [`ringset!`] are unavailable without `std`. Use 99 | //! the macros [`ringmap_with_default!`] and [`ringset_with_default!`] instead. 100 | 101 | #![cfg_attr(docsrs, feature(doc_cfg))] 102 | 103 | extern crate alloc; 104 | 105 | #[cfg(feature = "std")] 106 | #[macro_use] 107 | extern crate std; 108 | 109 | use alloc::collections::vec_deque::{self, VecDeque}; 110 | 111 | mod arbitrary; 112 | #[macro_use] 113 | mod macros; 114 | #[cfg(feature = "borsh")] 115 | mod borsh; 116 | #[cfg(feature = "serde")] 117 | mod serde; 118 | mod util; 119 | 120 | pub mod map; 121 | pub mod set; 122 | 123 | // Placed after `map` and `set` so new `rayon` methods on the types 124 | // are documented after the "normal" methods. 125 | #[cfg(feature = "rayon")] 126 | mod rayon; 127 | 128 | pub use crate::map::RingMap; 129 | pub use crate::set::RingSet; 130 | pub use equivalent::Equivalent; 131 | 132 | // shared private items 133 | 134 | /// Hash value newtype. Not larger than usize, since anything larger 135 | /// isn't used for selecting position anyway. 136 | #[derive(Clone, Copy, Debug, PartialEq)] 137 | struct HashValue(usize); 138 | 139 | impl HashValue { 140 | #[inline(always)] 141 | fn get(self) -> u64 { 142 | self.0 as u64 143 | } 144 | } 145 | 146 | #[derive(Copy, Debug)] 147 | struct Bucket { 148 | hash: HashValue, 149 | key: K, 150 | value: V, 151 | } 152 | 153 | impl Clone for Bucket 154 | where 155 | K: Clone, 156 | V: Clone, 157 | { 158 | fn clone(&self) -> Self { 159 | Bucket { 160 | hash: self.hash, 161 | key: self.key.clone(), 162 | value: self.value.clone(), 163 | } 164 | } 165 | 166 | fn clone_from(&mut self, other: &Self) { 167 | self.hash = other.hash; 168 | self.key.clone_from(&other.key); 169 | self.value.clone_from(&other.value); 170 | } 171 | } 172 | 173 | impl Bucket { 174 | // field accessors -- used for `f` instead of closures in `.map(f)` 175 | fn key_ref(&self) -> &K { 176 | &self.key 177 | } 178 | fn value_ref(&self) -> &V { 179 | &self.value 180 | } 181 | fn value_mut(&mut self) -> &mut V { 182 | &mut self.value 183 | } 184 | fn key(self) -> K { 185 | self.key 186 | } 187 | fn value(self) -> V { 188 | self.value 189 | } 190 | fn key_value(self) -> (K, V) { 191 | (self.key, self.value) 192 | } 193 | fn refs(&self) -> (&K, &V) { 194 | (&self.key, &self.value) 195 | } 196 | fn ref_mut(&mut self) -> (&K, &mut V) { 197 | (&self.key, &mut self.value) 198 | } 199 | fn muts(&mut self) -> (&mut K, &mut V) { 200 | (&mut self.key, &mut self.value) 201 | } 202 | } 203 | 204 | trait Entries { 205 | type Entry; 206 | fn into_entries(self) -> VecDeque; 207 | fn as_entries(&self) -> &VecDeque; 208 | fn as_entries_mut(&mut self) -> &mut VecDeque; 209 | fn with_contiguous_entries(&mut self, f: F) 210 | where 211 | F: FnOnce(&mut [Self::Entry]); 212 | } 213 | 214 | /// The error type for [`try_reserve`][RingMap::try_reserve] methods. 215 | #[derive(Clone, PartialEq, Eq, Debug)] 216 | pub struct TryReserveError { 217 | kind: TryReserveErrorKind, 218 | } 219 | 220 | #[derive(Clone, PartialEq, Eq, Debug)] 221 | enum TryReserveErrorKind { 222 | // The standard library's kind is currently opaque to us, otherwise we could unify this. 223 | Std(alloc::collections::TryReserveError), 224 | CapacityOverflow, 225 | AllocError { layout: alloc::alloc::Layout }, 226 | } 227 | 228 | // These are not `From` so we don't expose them in our public API. 229 | impl TryReserveError { 230 | fn from_alloc(error: alloc::collections::TryReserveError) -> Self { 231 | Self { 232 | kind: TryReserveErrorKind::Std(error), 233 | } 234 | } 235 | 236 | fn from_hashbrown(error: hashbrown::TryReserveError) -> Self { 237 | Self { 238 | kind: match error { 239 | hashbrown::TryReserveError::CapacityOverflow => { 240 | TryReserveErrorKind::CapacityOverflow 241 | } 242 | hashbrown::TryReserveError::AllocError { layout } => { 243 | TryReserveErrorKind::AllocError { layout } 244 | } 245 | }, 246 | } 247 | } 248 | } 249 | 250 | impl core::fmt::Display for TryReserveError { 251 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 252 | let reason = match &self.kind { 253 | TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f), 254 | TryReserveErrorKind::CapacityOverflow => { 255 | " because the computed capacity exceeded the collection's maximum" 256 | } 257 | TryReserveErrorKind::AllocError { .. } => { 258 | " because the memory allocator returned an error" 259 | } 260 | }; 261 | f.write_str("memory allocation failed")?; 262 | f.write_str(reason) 263 | } 264 | } 265 | 266 | #[cfg(feature = "std")] 267 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 268 | impl std::error::Error for TryReserveError {} 269 | 270 | // NOTE: This is copied from the slice module in the std lib. 271 | /// The error type returned by [`get_disjoint_indices_mut`][`RingMap::get_disjoint_indices_mut`]. 272 | /// 273 | /// It indicates one of two possible errors: 274 | /// - An index is out-of-bounds. 275 | /// - The same index appeared multiple times in the array. 276 | // (or different but overlapping indices when ranges are provided) 277 | #[derive(Debug, Clone, PartialEq, Eq)] 278 | pub enum GetDisjointMutError { 279 | /// An index provided was out-of-bounds for the slice. 280 | IndexOutOfBounds, 281 | /// Two indices provided were overlapping. 282 | OverlappingIndices, 283 | } 284 | 285 | impl core::fmt::Display for GetDisjointMutError { 286 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 287 | let msg = match self { 288 | GetDisjointMutError::IndexOutOfBounds => "an index is out of bounds", 289 | GetDisjointMutError::OverlappingIndices => "there were overlapping indices", 290 | }; 291 | 292 | core::fmt::Display::fmt(msg, f) 293 | } 294 | } 295 | 296 | #[cfg(feature = "std")] 297 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 298 | impl std::error::Error for GetDisjointMutError {} 299 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | /// Create an [`RingMap`][crate::RingMap] from a list of key-value pairs 2 | /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. 3 | /// 4 | /// ## Example 5 | /// 6 | /// ``` 7 | /// use ringmap::ringmap_with_default; 8 | /// use fnv::FnvHasher; 9 | /// 10 | /// let map = ringmap_with_default!{ 11 | /// FnvHasher; 12 | /// "a" => 1, 13 | /// "b" => 2, 14 | /// }; 15 | /// assert_eq!(map["a"], 1); 16 | /// assert_eq!(map["b"], 2); 17 | /// assert_eq!(map.get("c"), None); 18 | /// 19 | /// // "a" is the first key 20 | /// assert_eq!(map.keys().next(), Some(&"a")); 21 | /// ``` 22 | #[macro_export] 23 | macro_rules! ringmap_with_default { 24 | ($H:ty; $($key:expr => $value:expr,)+) => { $crate::ringmap_with_default!($H; $($key => $value),+) }; 25 | ($H:ty; $($key:expr => $value:expr),*) => {{ 26 | let builder = ::core::hash::BuildHasherDefault::<$H>::default(); 27 | const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); 28 | #[allow(unused_mut)] 29 | // Specify your custom `H` (must implement Default + Hasher) as the hasher: 30 | let mut map = $crate::RingMap::with_capacity_and_hasher(CAP, builder); 31 | $( 32 | map.insert($key, $value); 33 | )* 34 | map 35 | }}; 36 | } 37 | 38 | #[cfg(feature = "std")] 39 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 40 | #[macro_export] 41 | /// Create an [`RingMap`][crate::RingMap] from a list of key-value pairs 42 | /// 43 | /// ## Example 44 | /// 45 | /// ``` 46 | /// use ringmap::ringmap; 47 | /// 48 | /// let map = ringmap!{ 49 | /// "a" => 1, 50 | /// "b" => 2, 51 | /// }; 52 | /// assert_eq!(map["a"], 1); 53 | /// assert_eq!(map["b"], 2); 54 | /// assert_eq!(map.get("c"), None); 55 | /// 56 | /// // "a" is the first key 57 | /// assert_eq!(map.keys().next(), Some(&"a")); 58 | /// ``` 59 | macro_rules! ringmap { 60 | ($($key:expr => $value:expr,)+) => { $crate::ringmap!($($key => $value),+) }; 61 | ($($key:expr => $value:expr),*) => { 62 | { 63 | // Note: `stringify!($key)` is just here to consume the repetition, 64 | // but we throw away that string literal during constant evaluation. 65 | const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); 66 | let mut map = $crate::RingMap::with_capacity(CAP); 67 | $( 68 | map.insert($key, $value); 69 | )* 70 | map 71 | } 72 | }; 73 | } 74 | 75 | /// Create an [`RingSet`][crate::RingSet] from a list of values 76 | /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. 77 | /// 78 | /// ## Example 79 | /// 80 | /// ``` 81 | /// use ringmap::ringset_with_default; 82 | /// use fnv::FnvHasher; 83 | /// 84 | /// let set = ringset_with_default!{ 85 | /// FnvHasher; 86 | /// "a", 87 | /// "b", 88 | /// }; 89 | /// assert!(set.contains("a")); 90 | /// assert!(set.contains("b")); 91 | /// assert!(!set.contains("c")); 92 | /// 93 | /// // "a" is the first value 94 | /// assert_eq!(set.iter().next(), Some(&"a")); 95 | /// ``` 96 | #[macro_export] 97 | macro_rules! ringset_with_default { 98 | ($H:ty; $($value:expr,)+) => { $crate::ringset_with_default!($H; $($value),+) }; 99 | ($H:ty; $($value:expr),*) => {{ 100 | let builder = ::core::hash::BuildHasherDefault::<$H>::default(); 101 | const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); 102 | #[allow(unused_mut)] 103 | // Specify your custom `H` (must implement Default + Hash) as the hasher: 104 | let mut set = $crate::RingSet::with_capacity_and_hasher(CAP, builder); 105 | $( 106 | set.insert($value); 107 | )* 108 | set 109 | }}; 110 | } 111 | 112 | #[cfg(feature = "std")] 113 | #[cfg_attr(docsrs, doc(cfg(feature = "std")))] 114 | #[macro_export] 115 | /// Create an [`RingSet`][crate::RingSet] from a list of values 116 | /// 117 | /// ## Example 118 | /// 119 | /// ``` 120 | /// use ringmap::ringset; 121 | /// 122 | /// let set = ringset!{ 123 | /// "a", 124 | /// "b", 125 | /// }; 126 | /// assert!(set.contains("a")); 127 | /// assert!(set.contains("b")); 128 | /// assert!(!set.contains("c")); 129 | /// 130 | /// // "a" is the first value 131 | /// assert_eq!(set.iter().next(), Some(&"a")); 132 | /// ``` 133 | macro_rules! ringset { 134 | ($($value:expr,)+) => { $crate::ringset!($($value),+) }; 135 | ($($value:expr),*) => { 136 | { 137 | // Note: `stringify!($value)` is just here to consume the repetition, 138 | // but we throw away that string literal during constant evaluation. 139 | const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); 140 | let mut set = $crate::RingSet::with_capacity(CAP); 141 | $( 142 | set.insert($value); 143 | )* 144 | set 145 | } 146 | }; 147 | } 148 | 149 | // generate all the Iterator methods by just forwarding to the underlying 150 | // self.iter and mapping its element. 151 | macro_rules! iterator_methods { 152 | // $map_elt is the mapping function from the underlying iterator's element 153 | // same mapping function for both options and iterators 154 | ($map_elt:expr) => { 155 | fn next(&mut self) -> Option { 156 | self.iter.next().map($map_elt) 157 | } 158 | 159 | fn size_hint(&self) -> (usize, Option) { 160 | self.iter.size_hint() 161 | } 162 | 163 | fn count(self) -> usize { 164 | self.iter.len() 165 | } 166 | 167 | fn nth(&mut self, n: usize) -> Option { 168 | self.iter.nth(n).map($map_elt) 169 | } 170 | 171 | fn last(mut self) -> Option { 172 | self.next_back() 173 | } 174 | 175 | fn fold(self, acc: Acc, f: F) -> Acc 176 | where 177 | F: FnMut(Acc, Self::Item) -> Acc, 178 | { 179 | self.iter.map($map_elt).fold(acc, f) 180 | } 181 | 182 | fn collect(self) -> C 183 | where 184 | C: FromIterator, 185 | { 186 | // NB: forwarding this directly to standard iterators will 187 | // allow it to leverage unstable traits like `TrustedLen`. 188 | self.iter.map($map_elt).collect() 189 | } 190 | }; 191 | } 192 | 193 | macro_rules! double_ended_iterator_methods { 194 | // $map_elt is the mapping function from the underlying iterator's element 195 | // same mapping function for both options and iterators 196 | ($map_elt:expr) => { 197 | fn next_back(&mut self) -> Option { 198 | self.iter.next_back().map($map_elt) 199 | } 200 | 201 | fn nth_back(&mut self, n: usize) -> Option { 202 | self.iter.nth_back(n).map($map_elt) 203 | } 204 | 205 | fn rfold(self, acc: Acc, f: F) -> Acc 206 | where 207 | F: FnMut(Acc, Self::Item) -> Acc, 208 | { 209 | self.iter.map($map_elt).rfold(acc, f) 210 | } 211 | }; 212 | } 213 | 214 | // generate `ParallelIterator` methods by just forwarding to the underlying 215 | // self.entries and mapping its elements. 216 | #[cfg(feature = "rayon")] 217 | macro_rules! parallel_iterator_methods { 218 | // $map_elt is the mapping function from the underlying iterator's element 219 | ($map_elt:expr) => { 220 | fn drive_unindexed(self, consumer: C) -> C::Result 221 | where 222 | C: UnindexedConsumer, 223 | { 224 | self.entries 225 | .into_par_iter() 226 | .map($map_elt) 227 | .drive_unindexed(consumer) 228 | } 229 | 230 | // NB: This allows indexed collection, e.g. directly into a `Vec`, but the 231 | // underlying iterator must really be indexed. We should remove this if we 232 | // start having tombstones that must be filtered out. 233 | fn opt_len(&self) -> Option { 234 | Some(self.entries.len()) 235 | } 236 | }; 237 | } 238 | 239 | // generate `IndexedParallelIterator` methods by just forwarding to the underlying 240 | // self.entries and mapping its elements. 241 | #[cfg(feature = "rayon")] 242 | macro_rules! indexed_parallel_iterator_methods { 243 | // $map_elt is the mapping function from the underlying iterator's element 244 | ($map_elt:expr) => { 245 | fn drive(self, consumer: C) -> C::Result 246 | where 247 | C: Consumer, 248 | { 249 | self.entries.into_par_iter().map($map_elt).drive(consumer) 250 | } 251 | 252 | fn len(&self) -> usize { 253 | self.entries.len() 254 | } 255 | 256 | fn with_producer(self, callback: CB) -> CB::Output 257 | where 258 | CB: ProducerCallback, 259 | { 260 | self.entries 261 | .into_par_iter() 262 | .map($map_elt) 263 | .with_producer(callback) 264 | } 265 | }; 266 | } 267 | -------------------------------------------------------------------------------- /src/map/mutable.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | 3 | use super::{ 4 | Bucket, Entries, Entry, Equivalent, IndexedEntry, IterMut2, OccupiedEntry, RingMap, VacantEntry, 5 | }; 6 | 7 | /// Opt-in mutable access to [`RingMap`] keys. 8 | /// 9 | /// These methods expose `&mut K`, mutable references to the key as it is stored 10 | /// in the map. 11 | /// You are allowed to modify the keys in the map **if the modification 12 | /// does not change the key’s hash and equality**. 13 | /// 14 | /// If keys are modified erroneously, you can no longer look them up. 15 | /// This is sound (memory safe) but a logical error hazard (just like 16 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 17 | /// 18 | /// `use` this trait to enable its methods for `RingMap`. 19 | /// 20 | /// This trait is sealed and cannot be implemented for types outside this crate. 21 | pub trait MutableKeys: private::Sealed { 22 | type Key; 23 | type Value; 24 | 25 | /// Return item index, mutable reference to key and value 26 | /// 27 | /// Computes in **O(1)** time (average). 28 | fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut Self::Key, &mut Self::Value)> 29 | where 30 | Q: ?Sized + Hash + Equivalent; 31 | 32 | /// Return mutable reference to key and value at an index. 33 | /// 34 | /// Valid indices are `0 <= index < self.len()`. 35 | /// 36 | /// Computes in **O(1)** time. 37 | fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>; 38 | 39 | /// Return an iterator over the key-value pairs of the map, in their order 40 | fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value>; 41 | 42 | /// Scan through each key-value pair in the map and keep those where the 43 | /// closure `keep` returns `true`. 44 | /// 45 | /// The elements are visited in order, and remaining elements keep their 46 | /// order. 47 | /// 48 | /// Computes in **O(n)** time (average). 49 | fn retain2(&mut self, keep: F) 50 | where 51 | F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; 52 | } 53 | 54 | /// Opt-in mutable access to [`RingMap`] keys. 55 | /// 56 | /// See [`MutableKeys`] for more information. 57 | impl MutableKeys for RingMap 58 | where 59 | S: BuildHasher, 60 | { 61 | type Key = K; 62 | type Value = V; 63 | 64 | fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> 65 | where 66 | Q: ?Sized + Hash + Equivalent, 67 | { 68 | if let Some(i) = self.get_index_of(key) { 69 | let entry = &mut self.as_entries_mut()[i]; 70 | Some((i, &mut entry.key, &mut entry.value)) 71 | } else { 72 | None 73 | } 74 | } 75 | 76 | fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> { 77 | self.as_entries_mut().get_mut(index).map(Bucket::muts) 78 | } 79 | 80 | fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value> { 81 | IterMut2::new(self.as_entries_mut()) 82 | } 83 | 84 | fn retain2(&mut self, keep: F) 85 | where 86 | F: FnMut(&mut K, &mut V) -> bool, 87 | { 88 | self.core.retain_in_order(keep); 89 | } 90 | } 91 | 92 | /// Opt-in mutable access to [`Entry`] keys. 93 | /// 94 | /// These methods expose `&mut K`, mutable references to the key as it is stored 95 | /// in the map. 96 | /// You are allowed to modify the keys in the map **if the modification 97 | /// does not change the key’s hash and equality**. 98 | /// 99 | /// If keys are modified erroneously, you can no longer look them up. 100 | /// This is sound (memory safe) but a logical error hazard (just like 101 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 102 | /// 103 | /// `use` this trait to enable its methods for `Entry`. 104 | /// 105 | /// This trait is sealed and cannot be implemented for types outside this crate. 106 | pub trait MutableEntryKey: private::Sealed { 107 | type Key; 108 | 109 | /// Gets a mutable reference to the entry's key, either within the map if occupied, 110 | /// or else the new key that was used to find the entry. 111 | fn key_mut(&mut self) -> &mut Self::Key; 112 | } 113 | 114 | /// Opt-in mutable access to [`Entry`] keys. 115 | /// 116 | /// See [`MutableEntryKey`] for more information. 117 | impl MutableEntryKey for Entry<'_, K, V> { 118 | type Key = K; 119 | fn key_mut(&mut self) -> &mut Self::Key { 120 | match self { 121 | Entry::Occupied(e) => e.key_mut(), 122 | Entry::Vacant(e) => e.key_mut(), 123 | } 124 | } 125 | } 126 | 127 | /// Opt-in mutable access to [`OccupiedEntry`] keys. 128 | /// 129 | /// See [`MutableEntryKey`] for more information. 130 | impl MutableEntryKey for OccupiedEntry<'_, K, V> { 131 | type Key = K; 132 | fn key_mut(&mut self) -> &mut Self::Key { 133 | self.key_mut() 134 | } 135 | } 136 | 137 | /// Opt-in mutable access to [`VacantEntry`] keys. 138 | /// 139 | /// See [`MutableEntryKey`] for more information. 140 | impl MutableEntryKey for VacantEntry<'_, K, V> { 141 | type Key = K; 142 | fn key_mut(&mut self) -> &mut Self::Key { 143 | self.key_mut() 144 | } 145 | } 146 | 147 | /// Opt-in mutable access to [`IndexedEntry`] keys. 148 | /// 149 | /// See [`MutableEntryKey`] for more information. 150 | impl MutableEntryKey for IndexedEntry<'_, K, V> { 151 | type Key = K; 152 | fn key_mut(&mut self) -> &mut Self::Key { 153 | self.key_mut() 154 | } 155 | } 156 | 157 | mod private { 158 | pub trait Sealed {} 159 | 160 | impl Sealed for super::RingMap {} 161 | impl Sealed for super::Entry<'_, K, V> {} 162 | impl Sealed for super::OccupiedEntry<'_, K, V> {} 163 | impl Sealed for super::VacantEntry<'_, K, V> {} 164 | impl Sealed for super::IndexedEntry<'_, K, V> {} 165 | } 166 | -------------------------------------------------------------------------------- /src/map/serde_seq.rs: -------------------------------------------------------------------------------- 1 | //! Functions to serialize and deserialize an [`RingMap`] as an ordered sequence. 2 | //! 3 | //! The default `serde` implementation serializes `RingMap` as a normal map, 4 | //! but there is no guarantee that serialization formats will preserve the order 5 | //! of the key-value pairs. This module serializes `RingMap` as a sequence of 6 | //! `(key, value)` elements instead, in order. 7 | //! 8 | //! This module may be used in a field attribute for derived implementations: 9 | //! 10 | //! ``` 11 | //! # use ringmap::RingMap; 12 | //! # use serde_derive::{Deserialize, Serialize}; 13 | //! #[derive(Deserialize, Serialize)] 14 | //! struct Data { 15 | //! #[serde(with = "ringmap::map::serde_seq")] 16 | //! map: RingMap, 17 | //! // ... 18 | //! } 19 | //! ``` 20 | 21 | use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; 22 | use serde::ser::{Serialize, Serializer}; 23 | 24 | use core::fmt::{self, Formatter}; 25 | use core::hash::{BuildHasher, Hash}; 26 | use core::marker::PhantomData; 27 | 28 | use crate::map::Slice as MapSlice; 29 | use crate::serde::cautious_capacity; 30 | use crate::set::Slice as SetSlice; 31 | use crate::RingMap; 32 | 33 | /// Serializes a [`map::Slice`][MapSlice] as an ordered sequence. 34 | /// 35 | /// This behaves like [`crate::map::serde_seq`] for `RingMap`, serializing a sequence 36 | /// of `(key, value)` pairs, rather than as a map that might not preserve order. 37 | impl Serialize for MapSlice 38 | where 39 | K: Serialize, 40 | V: Serialize, 41 | { 42 | fn serialize(&self, serializer: T) -> Result 43 | where 44 | T: Serializer, 45 | { 46 | serializer.collect_seq(self) 47 | } 48 | } 49 | 50 | /// Serializes a [`set::Slice`][SetSlice] as an ordered sequence. 51 | impl Serialize for SetSlice 52 | where 53 | T: Serialize, 54 | { 55 | fn serialize(&self, serializer: Se) -> Result 56 | where 57 | Se: Serializer, 58 | { 59 | serializer.collect_seq(self) 60 | } 61 | } 62 | 63 | /// Serializes an [`RingMap`] as an ordered sequence. 64 | /// 65 | /// This function may be used in a field attribute for deriving [`Serialize`]: 66 | /// 67 | /// ``` 68 | /// # use ringmap::RingMap; 69 | /// # use serde_derive::Serialize; 70 | /// #[derive(Serialize)] 71 | /// struct Data { 72 | /// #[serde(serialize_with = "ringmap::map::serde_seq::serialize")] 73 | /// map: RingMap, 74 | /// // ... 75 | /// } 76 | /// ``` 77 | pub fn serialize(map: &RingMap, serializer: T) -> Result 78 | where 79 | K: Serialize, 80 | V: Serialize, 81 | T: Serializer, 82 | { 83 | serializer.collect_seq(map) 84 | } 85 | 86 | /// Visitor to deserialize a *sequenced* `RingMap` 87 | struct SeqVisitor(PhantomData<(K, V, S)>); 88 | 89 | impl<'de, K, V, S> Visitor<'de> for SeqVisitor 90 | where 91 | K: Deserialize<'de> + Eq + Hash, 92 | V: Deserialize<'de>, 93 | S: Default + BuildHasher, 94 | { 95 | type Value = RingMap; 96 | 97 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 98 | write!(formatter, "a sequenced map") 99 | } 100 | 101 | fn visit_seq(self, mut seq: A) -> Result 102 | where 103 | A: SeqAccess<'de>, 104 | { 105 | let capacity = cautious_capacity::(seq.size_hint()); 106 | let mut map = RingMap::with_capacity_and_hasher(capacity, S::default()); 107 | 108 | while let Some((key, value)) = seq.next_element()? { 109 | map.insert(key, value); 110 | } 111 | 112 | Ok(map) 113 | } 114 | } 115 | 116 | /// Deserializes an [`RingMap`] from an ordered sequence. 117 | /// 118 | /// This function may be used in a field attribute for deriving [`Deserialize`]: 119 | /// 120 | /// ``` 121 | /// # use ringmap::RingMap; 122 | /// # use serde_derive::Deserialize; 123 | /// #[derive(Deserialize)] 124 | /// struct Data { 125 | /// #[serde(deserialize_with = "ringmap::map::serde_seq::deserialize")] 126 | /// map: RingMap, 127 | /// // ... 128 | /// } 129 | /// ``` 130 | pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> 131 | where 132 | D: Deserializer<'de>, 133 | K: Deserialize<'de> + Eq + Hash, 134 | V: Deserialize<'de>, 135 | S: Default + BuildHasher, 136 | { 137 | deserializer.deserialize_seq(SeqVisitor(PhantomData)) 138 | } 139 | -------------------------------------------------------------------------------- /src/map/slice.rs: -------------------------------------------------------------------------------- 1 | use super::{Bucket, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut}; 2 | use crate::util::{slice_eq, try_simplify_range}; 3 | use crate::GetDisjointMutError; 4 | 5 | use alloc::boxed::Box; 6 | use alloc::collections::VecDeque; 7 | use core::cmp::Ordering; 8 | use core::fmt; 9 | use core::hash::{Hash, Hasher}; 10 | use core::ops::{self, Bound, Index, IndexMut, RangeBounds}; 11 | 12 | /// A dynamically-sized slice of key-value pairs in an [`RingMap`][super::RingMap]. 13 | /// 14 | /// This supports indexed operations much like a `[(K, V)]` slice, 15 | /// but not any hashed operations on the map keys. 16 | #[repr(transparent)] 17 | pub struct Slice { 18 | pub(crate) entries: [Bucket], 19 | } 20 | 21 | // SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, 22 | // and reference lifetimes are bound together in function signatures. 23 | #[allow(unsafe_code)] 24 | impl Slice { 25 | pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { 26 | unsafe { &*(entries as *const [Bucket] as *const Self) } 27 | } 28 | 29 | pub(super) fn from_mut_slice(entries: &mut [Bucket]) -> &mut Self { 30 | unsafe { &mut *(entries as *mut [Bucket] as *mut Self) } 31 | } 32 | 33 | pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { 34 | unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } 35 | } 36 | 37 | fn into_boxed(self: Box) -> Box<[Bucket]> { 38 | unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } 39 | } 40 | } 41 | 42 | impl Slice { 43 | pub(crate) fn into_entries(self: Box) -> VecDeque> { 44 | self.into_boxed().into_vec().into() 45 | } 46 | 47 | /// Returns an empty slice. 48 | pub const fn new<'a>() -> &'a Self { 49 | Self::from_slice(&[]) 50 | } 51 | 52 | /// Returns an empty mutable slice. 53 | pub fn new_mut<'a>() -> &'a mut Self { 54 | Self::from_mut_slice(&mut []) 55 | } 56 | 57 | /// Return the number of key-value pairs in the map slice. 58 | #[inline] 59 | pub const fn len(&self) -> usize { 60 | self.entries.len() 61 | } 62 | 63 | /// Returns true if the map slice contains no elements. 64 | #[inline] 65 | pub const fn is_empty(&self) -> bool { 66 | self.entries.is_empty() 67 | } 68 | 69 | /// Get a key-value pair by index. 70 | /// 71 | /// Valid indices are `0 <= index < self.len()`. 72 | pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { 73 | self.entries.get(index).map(Bucket::refs) 74 | } 75 | 76 | /// Get a key-value pair by index, with mutable access to the value. 77 | /// 78 | /// Valid indices are `0 <= index < self.len()`. 79 | pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { 80 | self.entries.get_mut(index).map(Bucket::ref_mut) 81 | } 82 | 83 | /// Returns a slice of key-value pairs in the given range of indices. 84 | /// 85 | /// Valid indices are `0 <= index < self.len()`. 86 | pub fn get_range>(&self, range: R) -> Option<&Self> { 87 | let range = try_simplify_range(range, self.entries.len())?; 88 | self.entries.get(range).map(Slice::from_slice) 89 | } 90 | 91 | /// Returns a mutable slice of key-value pairs in the given range of indices. 92 | /// 93 | /// Valid indices are `0 <= index < self.len()`. 94 | pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { 95 | let range = try_simplify_range(range, self.entries.len())?; 96 | self.entries.get_mut(range).map(Slice::from_mut_slice) 97 | } 98 | 99 | /// Get the first key-value pair. 100 | pub fn first(&self) -> Option<(&K, &V)> { 101 | self.entries.first().map(Bucket::refs) 102 | } 103 | 104 | /// Get the first key-value pair, with mutable access to the value. 105 | pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { 106 | self.entries.first_mut().map(Bucket::ref_mut) 107 | } 108 | 109 | /// Get the last key-value pair. 110 | pub fn last(&self) -> Option<(&K, &V)> { 111 | self.entries.last().map(Bucket::refs) 112 | } 113 | 114 | /// Get the last key-value pair, with mutable access to the value. 115 | pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { 116 | self.entries.last_mut().map(Bucket::ref_mut) 117 | } 118 | 119 | /// Divides one slice into two at an index. 120 | /// 121 | /// ***Panics*** if `index > len`. 122 | pub fn split_at(&self, index: usize) -> (&Self, &Self) { 123 | let (first, second) = self.entries.split_at(index); 124 | (Self::from_slice(first), Self::from_slice(second)) 125 | } 126 | 127 | /// Divides one mutable slice into two at an index. 128 | /// 129 | /// ***Panics*** if `index > len`. 130 | pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { 131 | let (first, second) = self.entries.split_at_mut(index); 132 | (Self::from_mut_slice(first), Self::from_mut_slice(second)) 133 | } 134 | 135 | /// Returns the first key-value pair and the rest of the slice, 136 | /// or `None` if it is empty. 137 | pub fn split_first(&self) -> Option<((&K, &V), &Self)> { 138 | if let [first, rest @ ..] = &self.entries { 139 | Some((first.refs(), Self::from_slice(rest))) 140 | } else { 141 | None 142 | } 143 | } 144 | 145 | /// Returns the first key-value pair and the rest of the slice, 146 | /// with mutable access to the value, or `None` if it is empty. 147 | pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { 148 | if let [first, rest @ ..] = &mut self.entries { 149 | Some((first.ref_mut(), Self::from_mut_slice(rest))) 150 | } else { 151 | None 152 | } 153 | } 154 | 155 | /// Returns the last key-value pair and the rest of the slice, 156 | /// or `None` if it is empty. 157 | pub fn split_last(&self) -> Option<((&K, &V), &Self)> { 158 | if let [rest @ .., last] = &self.entries { 159 | Some((last.refs(), Self::from_slice(rest))) 160 | } else { 161 | None 162 | } 163 | } 164 | 165 | /// Returns the last key-value pair and the rest of the slice, 166 | /// with mutable access to the value, or `None` if it is empty. 167 | pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { 168 | if let [rest @ .., last] = &mut self.entries { 169 | Some((last.ref_mut(), Self::from_mut_slice(rest))) 170 | } else { 171 | None 172 | } 173 | } 174 | 175 | /// Return an iterator over the key-value pairs of the map slice. 176 | pub fn iter(&self) -> Iter<'_, K, V> { 177 | Iter::from_slice(&self.entries) 178 | } 179 | 180 | /// Return an iterator over the key-value pairs of the map slice. 181 | pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { 182 | IterMut::from_mut_slice(&mut self.entries) 183 | } 184 | 185 | /// Return an iterator over the keys of the map slice. 186 | pub fn keys(&self) -> Keys<'_, K, V> { 187 | Keys::from_slice(&self.entries) 188 | } 189 | 190 | /// Return an owning iterator over the keys of the map slice. 191 | pub fn into_keys(self: Box) -> IntoKeys { 192 | IntoKeys::new(self.into_entries()) 193 | } 194 | 195 | /// Return an iterator over the values of the map slice. 196 | pub fn values(&self) -> Values<'_, K, V> { 197 | Values::from_slice(&self.entries) 198 | } 199 | 200 | /// Return an iterator over mutable references to the the values of the map slice. 201 | pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { 202 | ValuesMut::from_mut_slice(&mut self.entries) 203 | } 204 | 205 | /// Return an owning iterator over the values of the map slice. 206 | pub fn into_values(self: Box) -> IntoValues { 207 | IntoValues::new(self.into_entries()) 208 | } 209 | 210 | /// Search over a sorted map for a key. 211 | /// 212 | /// Returns the position where that key is present, or the position where it can be inserted to 213 | /// maintain the sort. See [`slice::binary_search`] for more details. 214 | /// 215 | /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up in 216 | /// the map this is a slice from using [`RingMap::get_index_of`][super::RingMap::get_index_of], 217 | /// but this can also position missing keys. 218 | pub fn binary_search_keys(&self, x: &K) -> Result 219 | where 220 | K: Ord, 221 | { 222 | self.binary_search_by(|p, _| p.cmp(x)) 223 | } 224 | 225 | /// Search over a sorted map with a comparator function. 226 | /// 227 | /// Returns the position where that value is present, or the position where it can be inserted 228 | /// to maintain the sort. See [`slice::binary_search_by`] for more details. 229 | /// 230 | /// Computes in **O(log(n))** time. 231 | #[inline] 232 | pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result 233 | where 234 | F: FnMut(&'a K, &'a V) -> Ordering, 235 | { 236 | self.entries.binary_search_by(move |a| f(&a.key, &a.value)) 237 | } 238 | 239 | /// Search over a sorted map with an extraction function. 240 | /// 241 | /// Returns the position where that value is present, or the position where it can be inserted 242 | /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. 243 | /// 244 | /// Computes in **O(log(n))** time. 245 | #[inline] 246 | pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result 247 | where 248 | F: FnMut(&'a K, &'a V) -> B, 249 | B: Ord, 250 | { 251 | self.binary_search_by(|k, v| f(k, v).cmp(b)) 252 | } 253 | 254 | /// Returns the index of the partition point of a sorted map according to the given predicate 255 | /// (the index of the first element of the second partition). 256 | /// 257 | /// See [`slice::partition_point`] for more details. 258 | /// 259 | /// Computes in **O(log(n))** time. 260 | #[must_use] 261 | pub fn partition_point

(&self, mut pred: P) -> usize 262 | where 263 | P: FnMut(&K, &V) -> bool, 264 | { 265 | self.entries 266 | .partition_point(move |a| pred(&a.key, &a.value)) 267 | } 268 | 269 | /// Get an array of `N` key-value pairs by `N` indices 270 | /// 271 | /// Valid indices are *0 <= index < self.len()* and each index needs to be unique. 272 | pub fn get_disjoint_mut( 273 | &mut self, 274 | indices: [usize; N], 275 | ) -> Result<[(&K, &mut V); N], GetDisjointMutError> { 276 | let indices = indices.map(Some); 277 | let empty_tail = Self::new_mut(); 278 | let key_values = Self::get_disjoint_opt_mut(self, empty_tail, indices)?; 279 | Ok(key_values.map(Option::unwrap)) 280 | } 281 | 282 | #[allow(unsafe_code)] 283 | pub(crate) fn get_disjoint_opt_mut<'a, const N: usize>( 284 | head: &mut Self, 285 | tail: &mut Self, 286 | indices: [Option; N], 287 | ) -> Result<[Option<(&'a K, &'a mut V)>; N], GetDisjointMutError> { 288 | let mid = head.len(); 289 | let len = mid + tail.len(); 290 | 291 | // SAFETY: Can't allow duplicate indices as we would return several mutable refs to the same data. 292 | for i in 0..N { 293 | if let Some(idx) = indices[i] { 294 | if idx >= len { 295 | return Err(GetDisjointMutError::IndexOutOfBounds); 296 | } else if indices[..i].contains(&Some(idx)) { 297 | return Err(GetDisjointMutError::OverlappingIndices); 298 | } 299 | } 300 | } 301 | 302 | let head_ptr = head.entries.as_mut_ptr(); 303 | let tail_ptr = tail.entries.as_mut_ptr(); 304 | let out = indices.map(|idx_opt| { 305 | match idx_opt { 306 | Some(idx) => { 307 | // SAFETY: The base pointers are valid as they come from slices and the reference is always 308 | // in-bounds & unique as we've already checked the indices above. 309 | unsafe { 310 | let ptr = match idx.checked_sub(mid) { 311 | None => head_ptr.add(idx), 312 | Some(tidx) => tail_ptr.add(tidx), 313 | }; 314 | Some((*ptr).ref_mut()) 315 | } 316 | } 317 | None => None, 318 | } 319 | }); 320 | 321 | Ok(out) 322 | } 323 | } 324 | 325 | impl<'a, K, V> IntoIterator for &'a Slice { 326 | type IntoIter = Iter<'a, K, V>; 327 | type Item = (&'a K, &'a V); 328 | 329 | fn into_iter(self) -> Self::IntoIter { 330 | self.iter() 331 | } 332 | } 333 | 334 | impl<'a, K, V> IntoIterator for &'a mut Slice { 335 | type IntoIter = IterMut<'a, K, V>; 336 | type Item = (&'a K, &'a mut V); 337 | 338 | fn into_iter(self) -> Self::IntoIter { 339 | self.iter_mut() 340 | } 341 | } 342 | 343 | impl IntoIterator for Box> { 344 | type IntoIter = IntoIter; 345 | type Item = (K, V); 346 | 347 | fn into_iter(self) -> Self::IntoIter { 348 | IntoIter::new(self.into_entries()) 349 | } 350 | } 351 | 352 | impl Default for &'_ Slice { 353 | fn default() -> Self { 354 | Slice::from_slice(&[]) 355 | } 356 | } 357 | 358 | impl Default for &'_ mut Slice { 359 | fn default() -> Self { 360 | Slice::from_mut_slice(&mut []) 361 | } 362 | } 363 | 364 | impl Default for Box> { 365 | fn default() -> Self { 366 | Slice::from_boxed(Box::default()) 367 | } 368 | } 369 | 370 | impl Clone for Box> { 371 | fn clone(&self) -> Self { 372 | Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) 373 | } 374 | } 375 | 376 | impl From<&Slice> for Box> { 377 | fn from(slice: &Slice) -> Self { 378 | Slice::from_boxed(Box::from(&slice.entries)) 379 | } 380 | } 381 | 382 | impl fmt::Debug for Slice { 383 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 384 | f.debug_list().entries(self).finish() 385 | } 386 | } 387 | 388 | impl PartialEq> for Slice 389 | where 390 | K: PartialEq, 391 | V: PartialEq, 392 | { 393 | fn eq(&self, other: &Slice) -> bool { 394 | slice_eq(&self.entries, &other.entries, |b1, b2| { 395 | b1.key == b2.key && b1.value == b2.value 396 | }) 397 | } 398 | } 399 | 400 | impl PartialEq<[(K2, V2)]> for Slice 401 | where 402 | K: PartialEq, 403 | V: PartialEq, 404 | { 405 | fn eq(&self, other: &[(K2, V2)]) -> bool { 406 | slice_eq(&self.entries, other, |b, t| b.key == t.0 && b.value == t.1) 407 | } 408 | } 409 | 410 | impl PartialEq> for [(K, V)] 411 | where 412 | K: PartialEq, 413 | V: PartialEq, 414 | { 415 | fn eq(&self, other: &Slice) -> bool { 416 | slice_eq(self, &other.entries, |t, b| t.0 == b.key && t.1 == b.value) 417 | } 418 | } 419 | 420 | impl PartialEq<[(K2, V2); N]> for Slice 421 | where 422 | K: PartialEq, 423 | V: PartialEq, 424 | { 425 | fn eq(&self, other: &[(K2, V2); N]) -> bool { 426 | >::eq(self, other) 427 | } 428 | } 429 | 430 | impl PartialEq> for [(K, V); N] 431 | where 432 | K: PartialEq, 433 | V: PartialEq, 434 | { 435 | fn eq(&self, other: &Slice) -> bool { 436 | <[_] as PartialEq<_>>::eq(self, other) 437 | } 438 | } 439 | 440 | impl Eq for Slice {} 441 | 442 | impl PartialOrd for Slice { 443 | fn partial_cmp(&self, other: &Self) -> Option { 444 | self.iter().partial_cmp(other) 445 | } 446 | } 447 | 448 | impl Ord for Slice { 449 | fn cmp(&self, other: &Self) -> Ordering { 450 | self.iter().cmp(other) 451 | } 452 | } 453 | 454 | impl Hash for Slice { 455 | fn hash(&self, state: &mut H) { 456 | self.len().hash(state); 457 | for (key, value) in self { 458 | key.hash(state); 459 | value.hash(state); 460 | } 461 | } 462 | } 463 | 464 | impl Index for Slice { 465 | type Output = V; 466 | 467 | fn index(&self, index: usize) -> &V { 468 | &self.entries[index].value 469 | } 470 | } 471 | 472 | impl IndexMut for Slice { 473 | fn index_mut(&mut self, index: usize) -> &mut V { 474 | &mut self.entries[index].value 475 | } 476 | } 477 | 478 | // We can't have `impl> Index` because that conflicts 479 | // both upstream with `Index` and downstream with `Index<&Q>`. 480 | // Instead, we repeat the implementations for all the core range types. 481 | macro_rules! impl_index { 482 | ($($range:ty),*) => {$( 483 | impl Index<$range> for Slice { 484 | type Output = Slice; 485 | 486 | fn index(&self, range: $range) -> &Self { 487 | Self::from_slice(&self.entries[range]) 488 | } 489 | } 490 | 491 | impl IndexMut<$range> for Slice { 492 | fn index_mut(&mut self, range: $range) -> &mut Self { 493 | Self::from_mut_slice(&mut self.entries[range]) 494 | } 495 | } 496 | )*} 497 | } 498 | impl_index!( 499 | ops::Range, 500 | ops::RangeFrom, 501 | ops::RangeFull, 502 | ops::RangeInclusive, 503 | ops::RangeTo, 504 | ops::RangeToInclusive, 505 | (Bound, Bound) 506 | ); 507 | 508 | #[cfg(test)] 509 | mod tests { 510 | use super::*; 511 | use crate::RingMap; 512 | use alloc::vec::Vec; 513 | 514 | #[test] 515 | fn slice_index() { 516 | fn check(vec_slice: &[(i32, i32)], map_slice: &Slice) { 517 | itertools::assert_equal( 518 | vec_slice.iter().copied(), 519 | map_slice.iter().map(|(&k, &v)| (k, v)), 520 | ); 521 | itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys()); 522 | itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values()); 523 | } 524 | 525 | let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); 526 | let map: RingMap = vec.iter().cloned().collect(); 527 | let (slice, tail) = map.as_slices(); 528 | assert!(tail.is_empty()); 529 | 530 | // RangeFull 531 | check(&vec[..], &slice[..]); 532 | 533 | for i in 0usize..10 { 534 | // Index 535 | assert_eq!(vec[i].1, map[i]); 536 | assert_eq!(vec[i].1, slice[i]); 537 | assert_eq!(map[&(i as i32)], map[i]); 538 | assert_eq!(map[&(i as i32)], slice[i]); 539 | 540 | // RangeFrom 541 | check(&vec[i..], &slice[i..]); 542 | 543 | // RangeTo 544 | check(&vec[..i], &slice[..i]); 545 | 546 | // RangeToInclusive 547 | check(&vec[..=i], &slice[..=i]); 548 | 549 | // (Bound, Bound) 550 | let bounds = (Bound::Excluded(i), Bound::Unbounded); 551 | check(&vec[i + 1..], &slice[bounds]); 552 | 553 | for j in i..=10 { 554 | // Range 555 | check(&vec[i..j], &slice[i..j]); 556 | } 557 | 558 | for j in i..10 { 559 | // RangeInclusive 560 | check(&vec[i..=j], &slice[i..=j]); 561 | } 562 | } 563 | } 564 | 565 | #[test] 566 | fn slice_index_mut() { 567 | fn check_mut(vec_slice: &[(i32, i32)], map_slice: &mut Slice) { 568 | itertools::assert_equal( 569 | vec_slice.iter().copied(), 570 | map_slice.iter_mut().map(|(&k, &mut v)| (k, v)), 571 | ); 572 | itertools::assert_equal( 573 | vec_slice.iter().map(|&(_, v)| v), 574 | map_slice.values_mut().map(|&mut v| v), 575 | ); 576 | } 577 | 578 | let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); 579 | let mut map: RingMap = vec.iter().cloned().collect(); 580 | let mut map2 = map.clone(); 581 | let (slice, tail) = map2.as_mut_slices(); 582 | assert!(tail.is_empty()); 583 | 584 | // RangeFull 585 | check_mut(&vec[..], &mut slice[..]); 586 | 587 | for i in 0usize..10 { 588 | // IndexMut 589 | assert_eq!(&mut map[i], &mut slice[i]); 590 | 591 | // RangeFrom 592 | check_mut(&vec[i..], &mut slice[i..]); 593 | 594 | // RangeTo 595 | check_mut(&vec[..i], &mut slice[..i]); 596 | 597 | // RangeToInclusive 598 | check_mut(&vec[..=i], &mut slice[..=i]); 599 | 600 | // (Bound, Bound) 601 | let bounds = (Bound::Excluded(i), Bound::Unbounded); 602 | check_mut(&vec[i + 1..], &mut slice[bounds]); 603 | 604 | for j in i..=10 { 605 | // Range 606 | check_mut(&vec[i..j], &mut slice[i..j]); 607 | } 608 | 609 | for j in i..10 { 610 | // RangeInclusive 611 | check_mut(&vec[i..=j], &mut slice[i..=j]); 612 | } 613 | } 614 | } 615 | } 616 | -------------------------------------------------------------------------------- /src/rayon/map.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [`RingMap`] with [`rayon`][::rayon]. 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need to name one of the 4 | //! iterator types. 5 | 6 | use super::collect; 7 | use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; 8 | use rayon::prelude::*; 9 | 10 | use alloc::boxed::Box; 11 | use alloc::collections::VecDeque; 12 | use alloc::vec::Vec; 13 | use core::cmp::Ordering; 14 | use core::fmt; 15 | use core::hash::{BuildHasher, Hash}; 16 | use core::ops::RangeBounds; 17 | 18 | use crate::map::Slice; 19 | use crate::Bucket; 20 | use crate::Entries; 21 | use crate::RingMap; 22 | 23 | impl IntoParallelIterator for RingMap 24 | where 25 | K: Send, 26 | V: Send, 27 | { 28 | type Item = (K, V); 29 | type Iter = IntoParIter; 30 | 31 | fn into_par_iter(self) -> Self::Iter { 32 | IntoParIter { 33 | entries: self.into_entries(), 34 | } 35 | } 36 | } 37 | 38 | impl IntoParallelIterator for Box> 39 | where 40 | K: Send, 41 | V: Send, 42 | { 43 | type Item = (K, V); 44 | type Iter = IntoParIter; 45 | 46 | fn into_par_iter(self) -> Self::Iter { 47 | IntoParIter { 48 | entries: self.into_entries(), 49 | } 50 | } 51 | } 52 | 53 | /// A parallel owning iterator over the entries of an [`RingMap`]. 54 | /// 55 | /// This `struct` is created by the [`RingMap::into_par_iter`] method 56 | /// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. 57 | pub struct IntoParIter { 58 | entries: VecDeque>, 59 | } 60 | 61 | impl fmt::Debug for IntoParIter { 62 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 63 | let iter = self.entries.iter().map(Bucket::refs); 64 | f.debug_list().entries(iter).finish() 65 | } 66 | } 67 | 68 | impl ParallelIterator for IntoParIter { 69 | type Item = (K, V); 70 | 71 | parallel_iterator_methods!(Bucket::key_value); 72 | } 73 | 74 | impl IndexedParallelIterator for IntoParIter { 75 | indexed_parallel_iterator_methods!(Bucket::key_value); 76 | } 77 | 78 | /// Internal iterator over `VecDeque` slices 79 | pub(super) struct ParBuckets<'a, K, V> { 80 | head: &'a [Bucket], 81 | tail: &'a [Bucket], 82 | } 83 | 84 | impl<'a, K, V> ParBuckets<'a, K, V> { 85 | pub(super) fn new(entries: &'a VecDeque>) -> Self { 86 | let (head, tail) = entries.as_slices(); 87 | Self { head, tail } 88 | } 89 | 90 | pub(super) fn from_slice(slice: &'a [Bucket]) -> Self { 91 | Self { 92 | head: slice, 93 | tail: &[], 94 | } 95 | } 96 | 97 | pub(super) fn iter(&self) -> impl Iterator> { 98 | self.head.iter().chain(self.tail) 99 | } 100 | } 101 | 102 | impl Clone for ParBuckets<'_, K, V> { 103 | fn clone(&self) -> Self { 104 | Self { ..*self } 105 | } 106 | } 107 | 108 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParBuckets<'a, K, V> { 109 | type Item = &'a Bucket; 110 | 111 | fn drive_unindexed(self, consumer: C) -> C::Result 112 | where 113 | C: UnindexedConsumer, 114 | { 115 | self.head 116 | .par_iter() 117 | .chain(self.tail) 118 | .drive_unindexed(consumer) 119 | } 120 | 121 | fn opt_len(&self) -> Option { 122 | Some(self.len()) 123 | } 124 | } 125 | 126 | impl IndexedParallelIterator for ParBuckets<'_, K, V> { 127 | fn drive(self, consumer: C) -> C::Result 128 | where 129 | C: Consumer, 130 | { 131 | self.head.par_iter().chain(self.tail).drive(consumer) 132 | } 133 | 134 | fn len(&self) -> usize { 135 | self.head.len() + self.tail.len() 136 | } 137 | 138 | fn with_producer(self, callback: CB) -> CB::Output 139 | where 140 | CB: ProducerCallback, 141 | { 142 | self.head 143 | .par_iter() 144 | .chain(self.tail) 145 | .with_producer(callback) 146 | } 147 | } 148 | 149 | impl<'a, K, V, S> IntoParallelIterator for &'a RingMap 150 | where 151 | K: Sync, 152 | V: Sync, 153 | { 154 | type Item = (&'a K, &'a V); 155 | type Iter = ParIter<'a, K, V>; 156 | 157 | fn into_par_iter(self) -> Self::Iter { 158 | ParIter { 159 | entries: ParBuckets::new(self.as_entries()), 160 | } 161 | } 162 | } 163 | 164 | impl<'a, K, V> IntoParallelIterator for &'a Slice 165 | where 166 | K: Sync, 167 | V: Sync, 168 | { 169 | type Item = (&'a K, &'a V); 170 | type Iter = ParIter<'a, K, V>; 171 | 172 | fn into_par_iter(self) -> Self::Iter { 173 | ParIter { 174 | entries: ParBuckets::from_slice(&self.entries), 175 | } 176 | } 177 | } 178 | 179 | /// A parallel iterator over the entries of an [`RingMap`]. 180 | /// 181 | /// This `struct` is created by the [`RingMap::par_iter`] method 182 | /// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. 183 | /// 184 | /// [`RingMap::par_iter`]: ../struct.RingMap.html#method.par_iter 185 | pub struct ParIter<'a, K, V> { 186 | entries: ParBuckets<'a, K, V>, 187 | } 188 | 189 | impl Clone for ParIter<'_, K, V> { 190 | fn clone(&self) -> Self { 191 | ParIter { 192 | entries: self.entries.clone(), 193 | } 194 | } 195 | } 196 | 197 | impl fmt::Debug for ParIter<'_, K, V> { 198 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 199 | let iter = self.entries.iter().map(Bucket::refs); 200 | f.debug_list().entries(iter).finish() 201 | } 202 | } 203 | 204 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { 205 | type Item = (&'a K, &'a V); 206 | 207 | parallel_iterator_methods!(Bucket::refs); 208 | } 209 | 210 | impl IndexedParallelIterator for ParIter<'_, K, V> { 211 | indexed_parallel_iterator_methods!(Bucket::refs); 212 | } 213 | 214 | /// Internal iterator over `VecDeque` mutable slices 215 | struct ParBucketsMut<'a, K, V> { 216 | head: &'a mut [Bucket], 217 | tail: &'a mut [Bucket], 218 | } 219 | 220 | impl<'a, K, V> ParBucketsMut<'a, K, V> { 221 | fn new(entries: &'a mut VecDeque>) -> Self { 222 | let (head, tail) = entries.as_mut_slices(); 223 | Self { head, tail } 224 | } 225 | 226 | fn from_mut_slice(slice: &'a mut [Bucket]) -> Self { 227 | Self { 228 | head: slice, 229 | tail: &mut [], 230 | } 231 | } 232 | 233 | fn iter(&self) -> impl Iterator> { 234 | self.head.iter().chain(&*self.tail) 235 | } 236 | } 237 | 238 | impl<'a, K: Send, V: Send> ParallelIterator for ParBucketsMut<'a, K, V> { 239 | type Item = &'a mut Bucket; 240 | 241 | fn drive_unindexed(self, consumer: C) -> C::Result 242 | where 243 | C: UnindexedConsumer, 244 | { 245 | self.head 246 | .par_iter_mut() 247 | .chain(self.tail) 248 | .drive_unindexed(consumer) 249 | } 250 | 251 | fn opt_len(&self) -> Option { 252 | Some(self.len()) 253 | } 254 | } 255 | 256 | impl IndexedParallelIterator for ParBucketsMut<'_, K, V> { 257 | fn drive(self, consumer: C) -> C::Result 258 | where 259 | C: Consumer, 260 | { 261 | self.head.par_iter_mut().chain(self.tail).drive(consumer) 262 | } 263 | 264 | fn len(&self) -> usize { 265 | self.head.len() + self.tail.len() 266 | } 267 | 268 | fn with_producer(self, callback: CB) -> CB::Output 269 | where 270 | CB: ProducerCallback, 271 | { 272 | self.head 273 | .par_iter_mut() 274 | .chain(self.tail) 275 | .with_producer(callback) 276 | } 277 | } 278 | 279 | impl<'a, K, V, S> IntoParallelIterator for &'a mut RingMap 280 | where 281 | K: Sync + Send, 282 | V: Send, 283 | { 284 | type Item = (&'a K, &'a mut V); 285 | type Iter = ParIterMut<'a, K, V>; 286 | 287 | fn into_par_iter(self) -> Self::Iter { 288 | ParIterMut { 289 | entries: ParBucketsMut::new(self.as_entries_mut()), 290 | } 291 | } 292 | } 293 | 294 | impl<'a, K, V> IntoParallelIterator for &'a mut Slice 295 | where 296 | K: Sync + Send, 297 | V: Send, 298 | { 299 | type Item = (&'a K, &'a mut V); 300 | type Iter = ParIterMut<'a, K, V>; 301 | 302 | fn into_par_iter(self) -> Self::Iter { 303 | ParIterMut { 304 | entries: ParBucketsMut::from_mut_slice(&mut self.entries), 305 | } 306 | } 307 | } 308 | 309 | /// A parallel mutable iterator over the entries of an [`RingMap`]. 310 | /// 311 | /// This `struct` is created by the [`RingMap::par_iter_mut`] method 312 | /// (provided by rayon's [`IntoParallelRefMutIterator`] trait). See its documentation for more. 313 | /// 314 | /// [`RingMap::par_iter_mut`]: ../struct.RingMap.html#method.par_iter_mut 315 | pub struct ParIterMut<'a, K, V> { 316 | entries: ParBucketsMut<'a, K, V>, 317 | } 318 | 319 | impl fmt::Debug for ParIterMut<'_, K, V> { 320 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 321 | let iter = self.entries.iter().map(Bucket::refs); 322 | f.debug_list().entries(iter).finish() 323 | } 324 | } 325 | 326 | impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { 327 | type Item = (&'a K, &'a mut V); 328 | 329 | parallel_iterator_methods!(Bucket::ref_mut); 330 | } 331 | 332 | impl IndexedParallelIterator for ParIterMut<'_, K, V> { 333 | indexed_parallel_iterator_methods!(Bucket::ref_mut); 334 | } 335 | 336 | impl<'a, K, V, S> ParallelDrainRange for &'a mut RingMap 337 | where 338 | K: Send, 339 | V: Send, 340 | { 341 | type Item = (K, V); 342 | type Iter = ParDrain<'a, K, V>; 343 | 344 | fn par_drain>(self, range: R) -> Self::Iter { 345 | ParDrain { 346 | entries: self.core.par_drain(range), 347 | } 348 | } 349 | } 350 | 351 | /// A parallel draining iterator over the entries of an [`RingMap`]. 352 | /// 353 | /// This `struct` is created by the [`RingMap::par_drain`] method 354 | /// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. 355 | /// 356 | /// [`RingMap::par_drain`]: ../struct.RingMap.html#method.par_drain 357 | pub struct ParDrain<'a, K: Send, V: Send> { 358 | entries: rayon::collections::vec_deque::Drain<'a, Bucket>, 359 | } 360 | 361 | impl ParallelIterator for ParDrain<'_, K, V> { 362 | type Item = (K, V); 363 | 364 | parallel_iterator_methods!(Bucket::key_value); 365 | } 366 | 367 | impl IndexedParallelIterator for ParDrain<'_, K, V> { 368 | indexed_parallel_iterator_methods!(Bucket::key_value); 369 | } 370 | 371 | /// Parallel iterator methods and other parallel methods. 372 | /// 373 | /// The following methods **require crate feature `"rayon"`**. 374 | /// 375 | /// See also the `IntoParallelIterator` implementations. 376 | impl RingMap 377 | where 378 | K: Sync, 379 | V: Sync, 380 | { 381 | /// Return a parallel iterator over the keys of the map. 382 | /// 383 | /// While parallel iterators can process items in any order, their relative order 384 | /// in the map is still preserved for operations like `reduce` and `collect`. 385 | pub fn par_keys(&self) -> ParKeys<'_, K, V> { 386 | ParKeys { 387 | entries: ParBuckets::new(self.as_entries()), 388 | } 389 | } 390 | 391 | /// Return a parallel iterator over the values of the map. 392 | /// 393 | /// While parallel iterators can process items in any order, their relative order 394 | /// in the map is still preserved for operations like `reduce` and `collect`. 395 | pub fn par_values(&self) -> ParValues<'_, K, V> { 396 | ParValues { 397 | entries: ParBuckets::new(self.as_entries()), 398 | } 399 | } 400 | } 401 | 402 | /// Parallel iterator methods and other parallel methods. 403 | /// 404 | /// The following methods **require crate feature `"rayon"`**. 405 | /// 406 | /// See also the `IntoParallelIterator` implementations. 407 | impl Slice 408 | where 409 | K: Sync, 410 | V: Sync, 411 | { 412 | /// Return a parallel iterator over the keys of the map slice. 413 | /// 414 | /// While parallel iterators can process items in any order, their relative order 415 | /// in the slice is still preserved for operations like `reduce` and `collect`. 416 | pub fn par_keys(&self) -> ParKeys<'_, K, V> { 417 | ParKeys { 418 | entries: ParBuckets::from_slice(&self.entries), 419 | } 420 | } 421 | 422 | /// Return a parallel iterator over the values of the map slice. 423 | /// 424 | /// While parallel iterators can process items in any order, their relative order 425 | /// in the slice is still preserved for operations like `reduce` and `collect`. 426 | pub fn par_values(&self) -> ParValues<'_, K, V> { 427 | ParValues { 428 | entries: ParBuckets::from_slice(&self.entries), 429 | } 430 | } 431 | } 432 | 433 | impl RingMap 434 | where 435 | K: PartialEq + Sync, 436 | V: Sync, 437 | { 438 | /// Returns `true` if `self` contains all of the same key-value pairs as `other`, 439 | /// in the same indexed order, determined in parallel. 440 | pub fn par_eq(&self, other: &RingMap) -> bool 441 | where 442 | V: PartialEq, 443 | { 444 | self.len() == other.len() && self.par_iter().eq(other) 445 | } 446 | } 447 | 448 | /// A parallel iterator over the keys of an [`RingMap`]. 449 | /// 450 | /// This `struct` is created by the [`RingMap::par_keys`] method. 451 | /// See its documentation for more. 452 | pub struct ParKeys<'a, K, V> { 453 | entries: ParBuckets<'a, K, V>, 454 | } 455 | 456 | impl Clone for ParKeys<'_, K, V> { 457 | fn clone(&self) -> Self { 458 | ParKeys { 459 | entries: self.entries.clone(), 460 | } 461 | } 462 | } 463 | 464 | impl fmt::Debug for ParKeys<'_, K, V> { 465 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 466 | let iter = self.entries.iter().map(Bucket::key_ref); 467 | f.debug_list().entries(iter).finish() 468 | } 469 | } 470 | 471 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { 472 | type Item = &'a K; 473 | 474 | parallel_iterator_methods!(Bucket::key_ref); 475 | } 476 | 477 | impl IndexedParallelIterator for ParKeys<'_, K, V> { 478 | indexed_parallel_iterator_methods!(Bucket::key_ref); 479 | } 480 | 481 | /// A parallel iterator over the values of an [`RingMap`]. 482 | /// 483 | /// This `struct` is created by the [`RingMap::par_values`] method. 484 | /// See its documentation for more. 485 | pub struct ParValues<'a, K, V> { 486 | entries: ParBuckets<'a, K, V>, 487 | } 488 | 489 | impl Clone for ParValues<'_, K, V> { 490 | fn clone(&self) -> Self { 491 | ParValues { 492 | entries: self.entries.clone(), 493 | } 494 | } 495 | } 496 | 497 | impl fmt::Debug for ParValues<'_, K, V> { 498 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 499 | let iter = self.entries.iter().map(Bucket::value_ref); 500 | f.debug_list().entries(iter).finish() 501 | } 502 | } 503 | 504 | impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { 505 | type Item = &'a V; 506 | 507 | parallel_iterator_methods!(Bucket::value_ref); 508 | } 509 | 510 | impl IndexedParallelIterator for ParValues<'_, K, V> { 511 | indexed_parallel_iterator_methods!(Bucket::value_ref); 512 | } 513 | 514 | impl RingMap 515 | where 516 | K: Send, 517 | V: Send, 518 | { 519 | /// Return a parallel iterator over mutable references to the values of the map 520 | /// 521 | /// While parallel iterators can process items in any order, their relative order 522 | /// in the map is still preserved for operations like `reduce` and `collect`. 523 | pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { 524 | ParValuesMut { 525 | entries: ParBucketsMut::new(self.as_entries_mut()), 526 | } 527 | } 528 | } 529 | 530 | impl Slice 531 | where 532 | K: Send, 533 | V: Send, 534 | { 535 | /// Return a parallel iterator over mutable references to the the values of the map slice. 536 | /// 537 | /// While parallel iterators can process items in any order, their relative order 538 | /// in the slice is still preserved for operations like `reduce` and `collect`. 539 | pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { 540 | ParValuesMut { 541 | entries: ParBucketsMut::from_mut_slice(&mut self.entries), 542 | } 543 | } 544 | } 545 | 546 | impl RingMap 547 | where 548 | K: Send, 549 | V: Send, 550 | { 551 | /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys. 552 | pub fn par_sort_keys(&mut self) 553 | where 554 | K: Ord, 555 | { 556 | self.with_contiguous_entries(|entries| { 557 | entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); 558 | }); 559 | } 560 | 561 | /// Sort the map’s key-value pairs in place and in parallel, using the comparison 562 | /// function `cmp`. 563 | /// 564 | /// The comparison function receives two key and value pairs to compare (you 565 | /// can sort by keys or values or their combination as needed). 566 | pub fn par_sort_by(&mut self, cmp: F) 567 | where 568 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 569 | { 570 | self.with_contiguous_entries(|entries| { 571 | entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 572 | }); 573 | } 574 | 575 | /// Sort the key-value pairs of the map in parallel and return a by-value parallel 576 | /// iterator of the key-value pairs with the result. 577 | pub fn par_sorted_by(self, cmp: F) -> IntoParIter 578 | where 579 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 580 | { 581 | let mut entries = self.into_entries(); 582 | entries 583 | .make_contiguous() 584 | .par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 585 | IntoParIter { entries } 586 | } 587 | 588 | /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. 589 | pub fn par_sort_unstable_keys(&mut self) 590 | where 591 | K: Ord, 592 | { 593 | self.with_contiguous_entries(|entries| { 594 | entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key)); 595 | }); 596 | } 597 | 598 | /// Sort the map's key-value pairs in place and in parallel, using the comparison 599 | /// function `cmp`. 600 | /// 601 | /// The comparison function receives two key and value pairs to compare (you 602 | /// can sort by keys or values or their combination as needed). 603 | pub fn par_sort_unstable_by(&mut self, cmp: F) 604 | where 605 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 606 | { 607 | self.with_contiguous_entries(|entries| { 608 | entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 609 | }); 610 | } 611 | 612 | /// Sort the key-value pairs of the map in parallel and return a by-value parallel 613 | /// iterator of the key-value pairs with the result. 614 | pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter 615 | where 616 | F: Fn(&K, &V, &K, &V) -> Ordering + Sync, 617 | { 618 | let mut entries = self.into_entries(); 619 | entries 620 | .make_contiguous() 621 | .par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); 622 | IntoParIter { entries } 623 | } 624 | 625 | /// Sort the map’s key-value pairs in place and in parallel, using a sort-key extraction 626 | /// function. 627 | pub fn par_sort_by_cached_key(&mut self, sort_key: F) 628 | where 629 | T: Ord + Send, 630 | F: Fn(&K, &V) -> T + Sync, 631 | { 632 | self.with_contiguous_entries(move |entries| { 633 | entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); 634 | }); 635 | } 636 | } 637 | 638 | /// A parallel mutable iterator over the values of an [`RingMap`]. 639 | /// 640 | /// This `struct` is created by the [`RingMap::par_values_mut`] method. 641 | /// See its documentation for more. 642 | pub struct ParValuesMut<'a, K, V> { 643 | entries: ParBucketsMut<'a, K, V>, 644 | } 645 | 646 | impl fmt::Debug for ParValuesMut<'_, K, V> { 647 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 648 | let iter = self.entries.iter().map(Bucket::value_ref); 649 | f.debug_list().entries(iter).finish() 650 | } 651 | } 652 | 653 | impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { 654 | type Item = &'a mut V; 655 | 656 | parallel_iterator_methods!(Bucket::value_mut); 657 | } 658 | 659 | impl IndexedParallelIterator for ParValuesMut<'_, K, V> { 660 | indexed_parallel_iterator_methods!(Bucket::value_mut); 661 | } 662 | 663 | impl FromParallelIterator<(K, V)> for RingMap 664 | where 665 | K: Eq + Hash + Send, 666 | V: Send, 667 | S: BuildHasher + Default + Send, 668 | { 669 | fn from_par_iter(iter: I) -> Self 670 | where 671 | I: IntoParallelIterator, 672 | { 673 | let list = collect(iter); 674 | let len = list.iter().map(Vec::len).sum(); 675 | let mut map = Self::with_capacity_and_hasher(len, S::default()); 676 | for vec in list { 677 | map.extend(vec); 678 | } 679 | map 680 | } 681 | } 682 | 683 | impl ParallelExtend<(K, V)> for RingMap 684 | where 685 | K: Eq + Hash + Send, 686 | V: Send, 687 | S: BuildHasher + Send, 688 | { 689 | fn par_extend(&mut self, iter: I) 690 | where 691 | I: IntoParallelIterator, 692 | { 693 | for vec in collect(iter) { 694 | self.extend(vec); 695 | } 696 | } 697 | } 698 | 699 | impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for RingMap 700 | where 701 | K: Copy + Eq + Hash + Send + Sync, 702 | V: Copy + Send + Sync, 703 | S: BuildHasher + Send, 704 | { 705 | fn par_extend(&mut self, iter: I) 706 | where 707 | I: IntoParallelIterator, 708 | { 709 | for vec in collect(iter) { 710 | self.extend(vec); 711 | } 712 | } 713 | } 714 | 715 | #[cfg(test)] 716 | mod tests { 717 | use super::*; 718 | 719 | #[test] 720 | fn insert_order() { 721 | let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; 722 | let mut map = RingMap::new(); 723 | 724 | for &elt in &insert { 725 | map.insert(elt, ()); 726 | } 727 | 728 | assert_eq!(map.par_keys().count(), map.len()); 729 | assert_eq!(map.par_keys().count(), insert.len()); 730 | insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { 731 | assert_eq!(a, b); 732 | }); 733 | (0..insert.len()) 734 | .into_par_iter() 735 | .zip(map.par_keys()) 736 | .for_each(|(i, k)| { 737 | assert_eq!(map.get_index(i).unwrap().0, k); 738 | }); 739 | } 740 | 741 | #[test] 742 | fn partial_eq_and_eq() { 743 | let mut map_a = RingMap::new(); 744 | map_a.insert(1, "1"); 745 | map_a.insert(2, "2"); 746 | let mut map_b = map_a.clone(); 747 | assert!(map_a.par_eq(&map_b)); 748 | map_b.swap_remove_back(&1); 749 | assert!(!map_a.par_eq(&map_b)); 750 | map_b.insert(3, "3"); 751 | assert!(!map_a.par_eq(&map_b)); 752 | } 753 | 754 | #[test] 755 | fn extend() { 756 | let mut map = RingMap::new(); 757 | map.par_extend(vec![(&1, &2), (&3, &4)]); 758 | map.par_extend(vec![(5, 6)]); 759 | assert_eq!( 760 | map.into_par_iter().collect::>(), 761 | vec![(1, 2), (3, 4), (5, 6)] 762 | ); 763 | } 764 | 765 | #[test] 766 | fn keys() { 767 | let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; 768 | let map: RingMap<_, _> = vec.into_par_iter().collect(); 769 | let keys: Vec<_> = map.par_keys().copied().collect(); 770 | assert_eq!(keys.len(), 3); 771 | assert!(keys.contains(&1)); 772 | assert!(keys.contains(&2)); 773 | assert!(keys.contains(&3)); 774 | } 775 | 776 | #[test] 777 | fn values() { 778 | let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; 779 | let map: RingMap<_, _> = vec.into_par_iter().collect(); 780 | let values: Vec<_> = map.par_values().copied().collect(); 781 | assert_eq!(values.len(), 3); 782 | assert!(values.contains(&'a')); 783 | assert!(values.contains(&'b')); 784 | assert!(values.contains(&'c')); 785 | } 786 | 787 | #[test] 788 | fn values_mut() { 789 | let vec = vec![(1, 1), (2, 2), (3, 3)]; 790 | let mut map: RingMap<_, _> = vec.into_par_iter().collect(); 791 | map.par_values_mut().for_each(|value| *value *= 2); 792 | let values: Vec<_> = map.par_values().copied().collect(); 793 | assert_eq!(values.len(), 3); 794 | assert!(values.contains(&2)); 795 | assert!(values.contains(&4)); 796 | assert!(values.contains(&6)); 797 | } 798 | } 799 | -------------------------------------------------------------------------------- /src/rayon/mod.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "rayon")))] 2 | 3 | use rayon::prelude::*; 4 | 5 | use alloc::collections::LinkedList; 6 | use alloc::vec::Vec; 7 | 8 | pub mod map; 9 | pub mod set; 10 | 11 | // This form of intermediate collection is also how Rayon collects `HashMap`. 12 | // Note that the order will also be preserved! 13 | fn collect(iter: I) -> LinkedList> { 14 | iter.into_par_iter().collect_vec_list() 15 | } 16 | -------------------------------------------------------------------------------- /src/rayon/set.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [`RingSet`] with [rayon][::rayon]. 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need to name one of the 4 | //! iterator types. 5 | 6 | use super::collect; 7 | use super::map::ParBuckets; 8 | use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; 9 | use rayon::prelude::*; 10 | 11 | use alloc::boxed::Box; 12 | use alloc::collections::VecDeque; 13 | use alloc::vec::Vec; 14 | use core::cmp::Ordering; 15 | use core::fmt; 16 | use core::hash::{BuildHasher, Hash}; 17 | use core::ops::RangeBounds; 18 | 19 | use crate::set::Slice; 20 | use crate::Entries; 21 | use crate::RingSet; 22 | 23 | type Bucket = crate::Bucket; 24 | 25 | impl IntoParallelIterator for RingSet 26 | where 27 | T: Send, 28 | { 29 | type Item = T; 30 | type Iter = IntoParIter; 31 | 32 | fn into_par_iter(self) -> Self::Iter { 33 | IntoParIter { 34 | entries: self.into_entries(), 35 | } 36 | } 37 | } 38 | 39 | impl IntoParallelIterator for Box> 40 | where 41 | T: Send, 42 | { 43 | type Item = T; 44 | type Iter = IntoParIter; 45 | 46 | fn into_par_iter(self) -> Self::Iter { 47 | IntoParIter { 48 | entries: self.into_entries(), 49 | } 50 | } 51 | } 52 | 53 | /// A parallel owning iterator over the items of an [`RingSet`]. 54 | /// 55 | /// This `struct` is created by the [`RingSet::into_par_iter`] method 56 | /// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. 57 | pub struct IntoParIter { 58 | entries: VecDeque>, 59 | } 60 | 61 | impl fmt::Debug for IntoParIter { 62 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 63 | let iter = self.entries.iter().map(Bucket::key_ref); 64 | f.debug_list().entries(iter).finish() 65 | } 66 | } 67 | 68 | impl ParallelIterator for IntoParIter { 69 | type Item = T; 70 | 71 | parallel_iterator_methods!(Bucket::key); 72 | } 73 | 74 | impl IndexedParallelIterator for IntoParIter { 75 | indexed_parallel_iterator_methods!(Bucket::key); 76 | } 77 | 78 | impl<'a, T, S> IntoParallelIterator for &'a RingSet 79 | where 80 | T: Sync, 81 | { 82 | type Item = &'a T; 83 | type Iter = ParIter<'a, T>; 84 | 85 | fn into_par_iter(self) -> Self::Iter { 86 | ParIter { 87 | entries: ParBuckets::new(self.as_entries()), 88 | } 89 | } 90 | } 91 | 92 | impl<'a, T> IntoParallelIterator for &'a Slice 93 | where 94 | T: Sync, 95 | { 96 | type Item = &'a T; 97 | type Iter = ParIter<'a, T>; 98 | 99 | fn into_par_iter(self) -> Self::Iter { 100 | ParIter { 101 | entries: ParBuckets::from_slice(&self.entries), 102 | } 103 | } 104 | } 105 | 106 | /// A parallel iterator over the items of an [`RingSet`]. 107 | /// 108 | /// This `struct` is created by the [`RingSet::par_iter`] method 109 | /// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. 110 | /// 111 | /// [`RingSet::par_iter`]: ../struct.RingSet.html#method.par_iter 112 | pub struct ParIter<'a, T> { 113 | entries: ParBuckets<'a, T, ()>, 114 | } 115 | 116 | impl Clone for ParIter<'_, T> { 117 | fn clone(&self) -> Self { 118 | ParIter { 119 | entries: self.entries.clone(), 120 | } 121 | } 122 | } 123 | 124 | impl fmt::Debug for ParIter<'_, T> { 125 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 126 | let iter = self.entries.iter().map(Bucket::key_ref); 127 | f.debug_list().entries(iter).finish() 128 | } 129 | } 130 | 131 | impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { 132 | type Item = &'a T; 133 | 134 | parallel_iterator_methods!(Bucket::key_ref); 135 | } 136 | 137 | impl IndexedParallelIterator for ParIter<'_, T> { 138 | indexed_parallel_iterator_methods!(Bucket::key_ref); 139 | } 140 | 141 | impl<'a, T, S> ParallelDrainRange for &'a mut RingSet 142 | where 143 | T: Send, 144 | { 145 | type Item = T; 146 | type Iter = ParDrain<'a, T>; 147 | 148 | fn par_drain>(self, range: R) -> Self::Iter { 149 | ParDrain { 150 | entries: self.map.core.par_drain(range), 151 | } 152 | } 153 | } 154 | 155 | /// A parallel draining iterator over the items of an [`RingSet`]. 156 | /// 157 | /// This `struct` is created by the [`RingSet::par_drain`] method 158 | /// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. 159 | /// 160 | /// [`RingSet::par_drain`]: ../struct.RingSet.html#method.par_drain 161 | pub struct ParDrain<'a, T: Send> { 162 | entries: rayon::collections::vec_deque::Drain<'a, Bucket>, 163 | } 164 | 165 | impl ParallelIterator for ParDrain<'_, T> { 166 | type Item = T; 167 | 168 | parallel_iterator_methods!(Bucket::key); 169 | } 170 | 171 | impl IndexedParallelIterator for ParDrain<'_, T> { 172 | indexed_parallel_iterator_methods!(Bucket::key); 173 | } 174 | 175 | impl RingSet 176 | where 177 | T: PartialEq + Sync, 178 | { 179 | /// Returns `true` if `self` contains all of the same values as `other`, 180 | /// in the same indexed order, determined in parallel. 181 | pub fn par_eq(&self, other: &RingSet) -> bool 182 | where 183 | S2: BuildHasher + Sync, 184 | { 185 | self.len() == other.len() && self.par_iter().eq(other) 186 | } 187 | } 188 | 189 | /// Parallel iterator methods and other parallel methods. 190 | /// 191 | /// The following methods **require crate feature `"rayon"`**. 192 | /// 193 | /// See also the `IntoParallelIterator` implementations. 194 | impl RingSet 195 | where 196 | T: Hash + Eq + Sync, 197 | S: BuildHasher + Sync, 198 | { 199 | /// Return a parallel iterator over the values that are in `self` but not `other`. 200 | /// 201 | /// While parallel iterators can process items in any order, their relative order 202 | /// in the `self` set is still preserved for operations like `reduce` and `collect`. 203 | pub fn par_difference<'a, S2>( 204 | &'a self, 205 | other: &'a RingSet, 206 | ) -> ParDifference<'a, T, S, S2> 207 | where 208 | S2: BuildHasher + Sync, 209 | { 210 | ParDifference { 211 | set1: self, 212 | set2: other, 213 | } 214 | } 215 | 216 | /// Return a parallel iterator over the values that are in `self` or `other`, 217 | /// but not in both. 218 | /// 219 | /// While parallel iterators can process items in any order, their relative order 220 | /// in the sets is still preserved for operations like `reduce` and `collect`. 221 | /// Values from `self` are produced in their original order, followed by 222 | /// values from `other` in their original order. 223 | pub fn par_symmetric_difference<'a, S2>( 224 | &'a self, 225 | other: &'a RingSet, 226 | ) -> ParSymmetricDifference<'a, T, S, S2> 227 | where 228 | S2: BuildHasher + Sync, 229 | { 230 | ParSymmetricDifference { 231 | set1: self, 232 | set2: other, 233 | } 234 | } 235 | 236 | /// Return a parallel iterator over the values that are in both `self` and `other`. 237 | /// 238 | /// While parallel iterators can process items in any order, their relative order 239 | /// in the `self` set is still preserved for operations like `reduce` and `collect`. 240 | pub fn par_intersection<'a, S2>( 241 | &'a self, 242 | other: &'a RingSet, 243 | ) -> ParIntersection<'a, T, S, S2> 244 | where 245 | S2: BuildHasher + Sync, 246 | { 247 | ParIntersection { 248 | set1: self, 249 | set2: other, 250 | } 251 | } 252 | 253 | /// Return a parallel iterator over all values that are in `self` or `other`. 254 | /// 255 | /// While parallel iterators can process items in any order, their relative order 256 | /// in the sets is still preserved for operations like `reduce` and `collect`. 257 | /// Values from `self` are produced in their original order, followed by 258 | /// values that are unique to `other` in their original order. 259 | pub fn par_union<'a, S2>(&'a self, other: &'a RingSet) -> ParUnion<'a, T, S, S2> 260 | where 261 | S2: BuildHasher + Sync, 262 | { 263 | ParUnion { 264 | set1: self, 265 | set2: other, 266 | } 267 | } 268 | 269 | /// Returns `true` if `self` contains all of the same values as `other`, 270 | /// regardless of each set's indexed order, determined in parallel. 271 | pub fn par_set_eq(&self, other: &RingSet) -> bool 272 | where 273 | S2: BuildHasher + Sync, 274 | { 275 | self.len() == other.len() && self.par_is_subset(other) 276 | } 277 | 278 | /// Returns `true` if `self` has no elements in common with `other`, 279 | /// determined in parallel. 280 | pub fn par_is_disjoint(&self, other: &RingSet) -> bool 281 | where 282 | S2: BuildHasher + Sync, 283 | { 284 | if self.len() <= other.len() { 285 | self.par_iter().all(move |value| !other.contains(value)) 286 | } else { 287 | other.par_iter().all(move |value| !self.contains(value)) 288 | } 289 | } 290 | 291 | /// Returns `true` if all elements of `other` are contained in `self`, 292 | /// determined in parallel. 293 | pub fn par_is_superset(&self, other: &RingSet) -> bool 294 | where 295 | S2: BuildHasher + Sync, 296 | { 297 | other.par_is_subset(self) 298 | } 299 | 300 | /// Returns `true` if all elements of `self` are contained in `other`, 301 | /// determined in parallel. 302 | pub fn par_is_subset(&self, other: &RingSet) -> bool 303 | where 304 | S2: BuildHasher + Sync, 305 | { 306 | self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value)) 307 | } 308 | } 309 | 310 | /// A parallel iterator producing elements in the difference of [`RingSet`]s. 311 | /// 312 | /// This `struct` is created by the [`RingSet::par_difference`] method. 313 | /// See its documentation for more. 314 | pub struct ParDifference<'a, T, S1, S2> { 315 | set1: &'a RingSet, 316 | set2: &'a RingSet, 317 | } 318 | 319 | impl Clone for ParDifference<'_, T, S1, S2> { 320 | fn clone(&self) -> Self { 321 | ParDifference { ..*self } 322 | } 323 | } 324 | 325 | impl fmt::Debug for ParDifference<'_, T, S1, S2> 326 | where 327 | T: fmt::Debug + Eq + Hash, 328 | S1: BuildHasher, 329 | S2: BuildHasher, 330 | { 331 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 332 | f.debug_list() 333 | .entries(self.set1.difference(self.set2)) 334 | .finish() 335 | } 336 | } 337 | 338 | impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2> 339 | where 340 | T: Hash + Eq + Sync, 341 | S1: BuildHasher + Sync, 342 | S2: BuildHasher + Sync, 343 | { 344 | type Item = &'a T; 345 | 346 | fn drive_unindexed(self, consumer: C) -> C::Result 347 | where 348 | C: UnindexedConsumer, 349 | { 350 | let Self { set1, set2 } = self; 351 | 352 | set1.par_iter() 353 | .filter(move |&item| !set2.contains(item)) 354 | .drive_unindexed(consumer) 355 | } 356 | } 357 | 358 | /// A parallel iterator producing elements in the intersection of [`RingSet`]s. 359 | /// 360 | /// This `struct` is created by the [`RingSet::par_intersection`] method. 361 | /// See its documentation for more. 362 | pub struct ParIntersection<'a, T, S1, S2> { 363 | set1: &'a RingSet, 364 | set2: &'a RingSet, 365 | } 366 | 367 | impl Clone for ParIntersection<'_, T, S1, S2> { 368 | fn clone(&self) -> Self { 369 | ParIntersection { ..*self } 370 | } 371 | } 372 | 373 | impl fmt::Debug for ParIntersection<'_, T, S1, S2> 374 | where 375 | T: fmt::Debug + Eq + Hash, 376 | S1: BuildHasher, 377 | S2: BuildHasher, 378 | { 379 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 380 | f.debug_list() 381 | .entries(self.set1.intersection(self.set2)) 382 | .finish() 383 | } 384 | } 385 | 386 | impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2> 387 | where 388 | T: Hash + Eq + Sync, 389 | S1: BuildHasher + Sync, 390 | S2: BuildHasher + Sync, 391 | { 392 | type Item = &'a T; 393 | 394 | fn drive_unindexed(self, consumer: C) -> C::Result 395 | where 396 | C: UnindexedConsumer, 397 | { 398 | let Self { set1, set2 } = self; 399 | 400 | set1.par_iter() 401 | .filter(move |&item| set2.contains(item)) 402 | .drive_unindexed(consumer) 403 | } 404 | } 405 | 406 | /// A parallel iterator producing elements in the symmetric difference of [`RingSet`]s. 407 | /// 408 | /// This `struct` is created by the [`RingSet::par_symmetric_difference`] method. 409 | /// See its documentation for more. 410 | pub struct ParSymmetricDifference<'a, T, S1, S2> { 411 | set1: &'a RingSet, 412 | set2: &'a RingSet, 413 | } 414 | 415 | impl Clone for ParSymmetricDifference<'_, T, S1, S2> { 416 | fn clone(&self) -> Self { 417 | ParSymmetricDifference { ..*self } 418 | } 419 | } 420 | 421 | impl fmt::Debug for ParSymmetricDifference<'_, T, S1, S2> 422 | where 423 | T: fmt::Debug + Eq + Hash, 424 | S1: BuildHasher, 425 | S2: BuildHasher, 426 | { 427 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 428 | f.debug_list() 429 | .entries(self.set1.symmetric_difference(self.set2)) 430 | .finish() 431 | } 432 | } 433 | 434 | impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2> 435 | where 436 | T: Hash + Eq + Sync, 437 | S1: BuildHasher + Sync, 438 | S2: BuildHasher + Sync, 439 | { 440 | type Item = &'a T; 441 | 442 | fn drive_unindexed(self, consumer: C) -> C::Result 443 | where 444 | C: UnindexedConsumer, 445 | { 446 | let Self { set1, set2 } = self; 447 | 448 | set1.par_difference(set2) 449 | .chain(set2.par_difference(set1)) 450 | .drive_unindexed(consumer) 451 | } 452 | } 453 | 454 | /// A parallel iterator producing elements in the union of [`RingSet`]s. 455 | /// 456 | /// This `struct` is created by the [`RingSet::par_union`] method. 457 | /// See its documentation for more. 458 | pub struct ParUnion<'a, T, S1, S2> { 459 | set1: &'a RingSet, 460 | set2: &'a RingSet, 461 | } 462 | 463 | impl Clone for ParUnion<'_, T, S1, S2> { 464 | fn clone(&self) -> Self { 465 | ParUnion { ..*self } 466 | } 467 | } 468 | 469 | impl fmt::Debug for ParUnion<'_, T, S1, S2> 470 | where 471 | T: fmt::Debug + Eq + Hash, 472 | S1: BuildHasher, 473 | S2: BuildHasher, 474 | { 475 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 476 | f.debug_list().entries(self.set1.union(self.set2)).finish() 477 | } 478 | } 479 | 480 | impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2> 481 | where 482 | T: Hash + Eq + Sync, 483 | S1: BuildHasher + Sync, 484 | S2: BuildHasher + Sync, 485 | { 486 | type Item = &'a T; 487 | 488 | fn drive_unindexed(self, consumer: C) -> C::Result 489 | where 490 | C: UnindexedConsumer, 491 | { 492 | let Self { set1, set2 } = self; 493 | 494 | set1.par_iter() 495 | .chain(set2.par_difference(set1)) 496 | .drive_unindexed(consumer) 497 | } 498 | } 499 | 500 | /// Parallel sorting methods. 501 | /// 502 | /// The following methods **require crate feature `"rayon"`**. 503 | impl RingSet 504 | where 505 | T: Send, 506 | { 507 | /// Sort the set’s values in parallel by their default ordering. 508 | pub fn par_sort(&mut self) 509 | where 510 | T: Ord, 511 | { 512 | self.with_contiguous_entries(|entries| { 513 | entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key)); 514 | }); 515 | } 516 | 517 | /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. 518 | pub fn par_sort_by(&mut self, cmp: F) 519 | where 520 | F: Fn(&T, &T) -> Ordering + Sync, 521 | { 522 | self.with_contiguous_entries(|entries| { 523 | entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); 524 | }); 525 | } 526 | 527 | /// Sort the values of the set in parallel and return a by-value parallel iterator of 528 | /// the values with the result. 529 | pub fn par_sorted_by(self, cmp: F) -> IntoParIter 530 | where 531 | F: Fn(&T, &T) -> Ordering + Sync, 532 | { 533 | let mut entries = self.into_entries(); 534 | entries 535 | .make_contiguous() 536 | .par_sort_by(move |a, b| cmp(&a.key, &b.key)); 537 | IntoParIter { entries } 538 | } 539 | 540 | /// Sort the set's values in parallel by their default ordering. 541 | pub fn par_sort_unstable(&mut self) 542 | where 543 | T: Ord, 544 | { 545 | self.with_contiguous_entries(|entries| { 546 | entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key)); 547 | }); 548 | } 549 | 550 | /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. 551 | pub fn par_sort_unstable_by(&mut self, cmp: F) 552 | where 553 | F: Fn(&T, &T) -> Ordering + Sync, 554 | { 555 | self.with_contiguous_entries(|entries| { 556 | entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); 557 | }); 558 | } 559 | 560 | /// Sort the values of the set in parallel and return a by-value parallel iterator of 561 | /// the values with the result. 562 | pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter 563 | where 564 | F: Fn(&T, &T) -> Ordering + Sync, 565 | { 566 | let mut entries = self.into_entries(); 567 | entries 568 | .make_contiguous() 569 | .par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); 570 | IntoParIter { entries } 571 | } 572 | 573 | /// Sort the set’s values in place and in parallel, using a key extraction function. 574 | pub fn par_sort_by_cached_key(&mut self, sort_key: F) 575 | where 576 | K: Ord + Send, 577 | F: Fn(&T) -> K + Sync, 578 | { 579 | self.with_contiguous_entries(move |entries| { 580 | entries.par_sort_by_cached_key(move |a| sort_key(&a.key)); 581 | }); 582 | } 583 | } 584 | 585 | impl FromParallelIterator for RingSet 586 | where 587 | T: Eq + Hash + Send, 588 | S: BuildHasher + Default + Send, 589 | { 590 | fn from_par_iter(iter: I) -> Self 591 | where 592 | I: IntoParallelIterator, 593 | { 594 | let list = collect(iter); 595 | let len = list.iter().map(Vec::len).sum(); 596 | let mut set = Self::with_capacity_and_hasher(len, S::default()); 597 | for vec in list { 598 | set.extend(vec); 599 | } 600 | set 601 | } 602 | } 603 | 604 | impl ParallelExtend for RingSet 605 | where 606 | T: Eq + Hash + Send, 607 | S: BuildHasher + Send, 608 | { 609 | fn par_extend(&mut self, iter: I) 610 | where 611 | I: IntoParallelIterator, 612 | { 613 | for vec in collect(iter) { 614 | self.extend(vec); 615 | } 616 | } 617 | } 618 | 619 | impl<'a, T: 'a, S> ParallelExtend<&'a T> for RingSet 620 | where 621 | T: Copy + Eq + Hash + Send + Sync, 622 | S: BuildHasher + Send, 623 | { 624 | fn par_extend(&mut self, iter: I) 625 | where 626 | I: IntoParallelIterator, 627 | { 628 | for vec in collect(iter) { 629 | self.extend(vec); 630 | } 631 | } 632 | } 633 | 634 | #[cfg(test)] 635 | mod tests { 636 | use super::*; 637 | 638 | #[test] 639 | fn insert_order() { 640 | let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; 641 | let mut set = RingSet::new(); 642 | 643 | for &elt in &insert { 644 | set.insert(elt); 645 | } 646 | 647 | assert_eq!(set.par_iter().count(), set.len()); 648 | assert_eq!(set.par_iter().count(), insert.len()); 649 | insert.par_iter().zip(&set).for_each(|(a, b)| { 650 | assert_eq!(a, b); 651 | }); 652 | (0..insert.len()) 653 | .into_par_iter() 654 | .zip(&set) 655 | .for_each(|(i, v)| { 656 | assert_eq!(set.get_index(i).unwrap(), v); 657 | }); 658 | } 659 | 660 | #[test] 661 | fn partial_eq_and_eq() { 662 | let mut set_a = RingSet::new(); 663 | set_a.insert(1); 664 | set_a.insert(2); 665 | let mut set_b = set_a.clone(); 666 | assert!(set_a.par_eq(&set_b)); 667 | set_b.swap_remove_back(&1); 668 | assert!(!set_a.par_eq(&set_b)); 669 | set_b.insert(3); 670 | assert!(!set_a.par_eq(&set_b)); 671 | 672 | let set_c: RingSet<_> = set_b.into_par_iter().collect(); 673 | assert!(!set_a.par_eq(&set_c)); 674 | assert!(!set_c.par_eq(&set_a)); 675 | } 676 | 677 | #[test] 678 | fn extend() { 679 | let mut set = RingSet::new(); 680 | set.par_extend(vec![&1, &2, &3, &4]); 681 | set.par_extend(vec![5, 6]); 682 | assert_eq!( 683 | set.into_par_iter().collect::>(), 684 | vec![1, 2, 3, 4, 5, 6] 685 | ); 686 | } 687 | 688 | #[test] 689 | fn comparisons() { 690 | let set_a: RingSet<_> = (0..3).collect(); 691 | let set_b: RingSet<_> = (3..6).collect(); 692 | let set_c: RingSet<_> = (0..6).collect(); 693 | let set_d: RingSet<_> = (3..9).collect(); 694 | 695 | assert!(!set_a.par_is_disjoint(&set_a)); 696 | assert!(set_a.par_is_subset(&set_a)); 697 | assert!(set_a.par_is_superset(&set_a)); 698 | 699 | assert!(set_a.par_is_disjoint(&set_b)); 700 | assert!(set_b.par_is_disjoint(&set_a)); 701 | assert!(!set_a.par_is_subset(&set_b)); 702 | assert!(!set_b.par_is_subset(&set_a)); 703 | assert!(!set_a.par_is_superset(&set_b)); 704 | assert!(!set_b.par_is_superset(&set_a)); 705 | 706 | assert!(!set_a.par_is_disjoint(&set_c)); 707 | assert!(!set_c.par_is_disjoint(&set_a)); 708 | assert!(set_a.par_is_subset(&set_c)); 709 | assert!(!set_c.par_is_subset(&set_a)); 710 | assert!(!set_a.par_is_superset(&set_c)); 711 | assert!(set_c.par_is_superset(&set_a)); 712 | 713 | assert!(!set_c.par_is_disjoint(&set_d)); 714 | assert!(!set_d.par_is_disjoint(&set_c)); 715 | assert!(!set_c.par_is_subset(&set_d)); 716 | assert!(!set_d.par_is_subset(&set_c)); 717 | assert!(!set_c.par_is_superset(&set_d)); 718 | assert!(!set_d.par_is_superset(&set_c)); 719 | } 720 | 721 | #[test] 722 | fn iter_comparisons() { 723 | use std::iter::empty; 724 | 725 | fn check<'a, I1, I2>(iter1: I1, iter2: I2) 726 | where 727 | I1: ParallelIterator, 728 | I2: Iterator, 729 | { 730 | let v1: Vec<_> = iter1.copied().collect(); 731 | let v2: Vec<_> = iter2.collect(); 732 | assert_eq!(v1, v2); 733 | } 734 | 735 | let set_a: RingSet<_> = (0..3).collect(); 736 | let set_b: RingSet<_> = (3..6).collect(); 737 | let set_c: RingSet<_> = (0..6).collect(); 738 | let set_d: RingSet<_> = (3..9).rev().collect(); 739 | 740 | check(set_a.par_difference(&set_a), empty()); 741 | check(set_a.par_symmetric_difference(&set_a), empty()); 742 | check(set_a.par_intersection(&set_a), 0..3); 743 | check(set_a.par_union(&set_a), 0..3); 744 | 745 | check(set_a.par_difference(&set_b), 0..3); 746 | check(set_b.par_difference(&set_a), 3..6); 747 | check(set_a.par_symmetric_difference(&set_b), 0..6); 748 | check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3)); 749 | check(set_a.par_intersection(&set_b), empty()); 750 | check(set_b.par_intersection(&set_a), empty()); 751 | check(set_a.par_union(&set_b), 0..6); 752 | check(set_b.par_union(&set_a), (3..6).chain(0..3)); 753 | 754 | check(set_a.par_difference(&set_c), empty()); 755 | check(set_c.par_difference(&set_a), 3..6); 756 | check(set_a.par_symmetric_difference(&set_c), 3..6); 757 | check(set_c.par_symmetric_difference(&set_a), 3..6); 758 | check(set_a.par_intersection(&set_c), 0..3); 759 | check(set_c.par_intersection(&set_a), 0..3); 760 | check(set_a.par_union(&set_c), 0..6); 761 | check(set_c.par_union(&set_a), 0..6); 762 | 763 | check(set_c.par_difference(&set_d), 0..3); 764 | check(set_d.par_difference(&set_c), (6..9).rev()); 765 | check( 766 | set_c.par_symmetric_difference(&set_d), 767 | (0..3).chain((6..9).rev()), 768 | ); 769 | check( 770 | set_d.par_symmetric_difference(&set_c), 771 | (6..9).rev().chain(0..3), 772 | ); 773 | check(set_c.par_intersection(&set_d), 3..6); 774 | check(set_d.par_intersection(&set_c), (3..6).rev()); 775 | check(set_c.par_union(&set_d), (0..6).chain((6..9).rev())); 776 | check(set_d.par_union(&set_c), (3..9).rev().chain(0..3)); 777 | } 778 | } 779 | -------------------------------------------------------------------------------- /src/serde.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, doc(cfg(feature = "serde")))] 2 | 3 | use serde::de::value::{MapDeserializer, SeqDeserializer}; 4 | use serde::de::{ 5 | Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor, 6 | }; 7 | use serde::ser::{Serialize, Serializer}; 8 | 9 | use core::fmt::{self, Formatter}; 10 | use core::hash::{BuildHasher, Hash}; 11 | use core::marker::PhantomData; 12 | use core::{cmp, mem}; 13 | 14 | use crate::{Bucket, RingMap, RingSet}; 15 | 16 | /// Limit our preallocated capacity from a deserializer `size_hint()`. 17 | /// 18 | /// We do account for the `Bucket` overhead from its saved `hash` field, but we don't count the 19 | /// `RawTable` allocation or the fact that its raw capacity will be rounded up to a power of two. 20 | /// The "max" is an arbitrary choice anyway, not something that needs precise adherence. 21 | /// 22 | /// This is based on the internal `serde::de::size_hint::cautious(hint)` function. 23 | pub(crate) fn cautious_capacity(hint: Option) -> usize { 24 | const MAX_PREALLOC_BYTES: usize = 1024 * 1024; 25 | 26 | cmp::min( 27 | hint.unwrap_or(0), 28 | MAX_PREALLOC_BYTES / mem::size_of::>(), 29 | ) 30 | } 31 | 32 | impl Serialize for RingMap 33 | where 34 | K: Serialize, 35 | V: Serialize, 36 | { 37 | fn serialize(&self, serializer: T) -> Result 38 | where 39 | T: Serializer, 40 | { 41 | serializer.collect_map(self) 42 | } 43 | } 44 | 45 | struct RingMapVisitor(PhantomData<(K, V, S)>); 46 | 47 | impl<'de, K, V, S> Visitor<'de> for RingMapVisitor 48 | where 49 | K: Deserialize<'de> + Eq + Hash, 50 | V: Deserialize<'de>, 51 | S: Default + BuildHasher, 52 | { 53 | type Value = RingMap; 54 | 55 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 56 | write!(formatter, "a map") 57 | } 58 | 59 | fn visit_map(self, mut map: A) -> Result 60 | where 61 | A: MapAccess<'de>, 62 | { 63 | let capacity = cautious_capacity::(map.size_hint()); 64 | let mut values = RingMap::with_capacity_and_hasher(capacity, S::default()); 65 | 66 | while let Some((key, value)) = map.next_entry()? { 67 | values.insert(key, value); 68 | } 69 | 70 | Ok(values) 71 | } 72 | } 73 | 74 | impl<'de, K, V, S> Deserialize<'de> for RingMap 75 | where 76 | K: Deserialize<'de> + Eq + Hash, 77 | V: Deserialize<'de>, 78 | S: Default + BuildHasher, 79 | { 80 | fn deserialize(deserializer: D) -> Result 81 | where 82 | D: Deserializer<'de>, 83 | { 84 | deserializer.deserialize_map(RingMapVisitor(PhantomData)) 85 | } 86 | } 87 | 88 | impl<'de, K, V, S, E> IntoDeserializer<'de, E> for RingMap 89 | where 90 | K: IntoDeserializer<'de, E> + Eq + Hash, 91 | V: IntoDeserializer<'de, E>, 92 | S: BuildHasher, 93 | E: Error, 94 | { 95 | type Deserializer = MapDeserializer<'de, ::IntoIter, E>; 96 | 97 | fn into_deserializer(self) -> Self::Deserializer { 98 | MapDeserializer::new(self.into_iter()) 99 | } 100 | } 101 | 102 | impl Serialize for RingSet 103 | where 104 | T: Serialize, 105 | { 106 | fn serialize(&self, serializer: Se) -> Result 107 | where 108 | Se: Serializer, 109 | { 110 | serializer.collect_seq(self) 111 | } 112 | } 113 | 114 | struct RingSetVisitor(PhantomData<(T, S)>); 115 | 116 | impl<'de, T, S> Visitor<'de> for RingSetVisitor 117 | where 118 | T: Deserialize<'de> + Eq + Hash, 119 | S: Default + BuildHasher, 120 | { 121 | type Value = RingSet; 122 | 123 | fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { 124 | write!(formatter, "a set") 125 | } 126 | 127 | fn visit_seq(self, mut seq: A) -> Result 128 | where 129 | A: SeqAccess<'de>, 130 | { 131 | let capacity = cautious_capacity::(seq.size_hint()); 132 | let mut values = RingSet::with_capacity_and_hasher(capacity, S::default()); 133 | 134 | while let Some(value) = seq.next_element()? { 135 | values.insert(value); 136 | } 137 | 138 | Ok(values) 139 | } 140 | } 141 | 142 | impl<'de, T, S> Deserialize<'de> for RingSet 143 | where 144 | T: Deserialize<'de> + Eq + Hash, 145 | S: Default + BuildHasher, 146 | { 147 | fn deserialize(deserializer: D) -> Result 148 | where 149 | D: Deserializer<'de>, 150 | { 151 | deserializer.deserialize_seq(RingSetVisitor(PhantomData)) 152 | } 153 | } 154 | 155 | impl<'de, T, S, E> IntoDeserializer<'de, E> for RingSet 156 | where 157 | T: IntoDeserializer<'de, E> + Eq + Hash, 158 | S: BuildHasher, 159 | E: Error, 160 | { 161 | type Deserializer = SeqDeserializer<::IntoIter, E>; 162 | 163 | fn into_deserializer(self) -> Self::Deserializer { 164 | SeqDeserializer::new(self.into_iter()) 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/set/iter.rs: -------------------------------------------------------------------------------- 1 | use super::{Bucket, Entries, RingSet}; 2 | use crate::map::Buckets; 3 | 4 | use alloc::collections::vec_deque::{self, VecDeque}; 5 | use core::fmt; 6 | use core::hash::{BuildHasher, Hash}; 7 | use core::iter::{Chain, FusedIterator}; 8 | use core::ops::RangeBounds; 9 | 10 | impl<'a, T, S> IntoIterator for &'a RingSet { 11 | type Item = &'a T; 12 | type IntoIter = Iter<'a, T>; 13 | 14 | fn into_iter(self) -> Self::IntoIter { 15 | self.iter() 16 | } 17 | } 18 | 19 | impl IntoIterator for RingSet { 20 | type Item = T; 21 | type IntoIter = IntoIter; 22 | 23 | fn into_iter(self) -> Self::IntoIter { 24 | IntoIter::new(self.into_entries()) 25 | } 26 | } 27 | 28 | /// An iterator over the items of an [`RingSet`]. 29 | /// 30 | /// This `struct` is created by the [`RingSet::iter`] method. 31 | /// See its documentation for more. 32 | pub struct Iter<'a, T> { 33 | iter: Buckets<'a, T, ()>, 34 | } 35 | 36 | impl<'a, T> Iter<'a, T> { 37 | pub(super) fn new(entries: &'a VecDeque>) -> Self { 38 | Self { 39 | iter: Buckets::new(entries), 40 | } 41 | } 42 | 43 | pub(super) fn from_slice(slice: &'a [Bucket]) -> Self { 44 | Self { 45 | iter: Buckets::from_slice(slice), 46 | } 47 | } 48 | } 49 | 50 | impl<'a, T> Iterator for Iter<'a, T> { 51 | type Item = &'a T; 52 | 53 | iterator_methods!(Bucket::key_ref); 54 | } 55 | 56 | impl DoubleEndedIterator for Iter<'_, T> { 57 | double_ended_iterator_methods!(Bucket::key_ref); 58 | } 59 | 60 | impl ExactSizeIterator for Iter<'_, T> { 61 | fn len(&self) -> usize { 62 | self.iter.len() 63 | } 64 | } 65 | 66 | impl FusedIterator for Iter<'_, T> {} 67 | 68 | impl Clone for Iter<'_, T> { 69 | fn clone(&self) -> Self { 70 | Iter { 71 | iter: self.iter.clone(), 72 | } 73 | } 74 | } 75 | 76 | impl fmt::Debug for Iter<'_, T> { 77 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 78 | f.debug_list().entries(self.clone()).finish() 79 | } 80 | } 81 | 82 | impl Default for Iter<'_, T> { 83 | fn default() -> Self { 84 | Self { 85 | iter: Default::default(), 86 | } 87 | } 88 | } 89 | 90 | /// An owning iterator over the items of an [`RingSet`]. 91 | /// 92 | /// This `struct` is created by the [`RingSet::into_iter`] method 93 | /// (provided by the [`IntoIterator`] trait). See its documentation for more. 94 | #[derive(Clone)] 95 | pub struct IntoIter { 96 | iter: vec_deque::IntoIter>, 97 | } 98 | 99 | impl IntoIter { 100 | pub(super) fn new(entries: VecDeque>) -> Self { 101 | Self { 102 | iter: entries.into_iter(), 103 | } 104 | } 105 | } 106 | 107 | impl Iterator for IntoIter { 108 | type Item = T; 109 | 110 | iterator_methods!(Bucket::key); 111 | } 112 | 113 | impl DoubleEndedIterator for IntoIter { 114 | double_ended_iterator_methods!(Bucket::key); 115 | } 116 | 117 | impl ExactSizeIterator for IntoIter { 118 | fn len(&self) -> usize { 119 | self.iter.len() 120 | } 121 | } 122 | 123 | impl FusedIterator for IntoIter {} 124 | 125 | impl fmt::Debug for IntoIter { 126 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 127 | // FIXME 128 | // let iter = self.iter.as_slice().iter().map(Bucket::key_ref); 129 | // f.debug_list().entries(iter).finish() 130 | f.debug_struct("IntoIter").finish_non_exhaustive() 131 | } 132 | } 133 | 134 | impl Default for IntoIter { 135 | fn default() -> Self { 136 | Self { 137 | iter: VecDeque::new().into_iter(), 138 | } 139 | } 140 | } 141 | 142 | /// A draining iterator over the items of an [`RingSet`]. 143 | /// 144 | /// This `struct` is created by the [`RingSet::drain`] method. 145 | /// See its documentation for more. 146 | pub struct Drain<'a, T> { 147 | iter: vec_deque::Drain<'a, Bucket>, 148 | } 149 | 150 | impl<'a, T> Drain<'a, T> { 151 | pub(super) fn new(iter: vec_deque::Drain<'a, Bucket>) -> Self { 152 | Self { iter } 153 | } 154 | } 155 | 156 | impl Iterator for Drain<'_, T> { 157 | type Item = T; 158 | 159 | iterator_methods!(Bucket::key); 160 | } 161 | 162 | impl DoubleEndedIterator for Drain<'_, T> { 163 | double_ended_iterator_methods!(Bucket::key); 164 | } 165 | 166 | impl ExactSizeIterator for Drain<'_, T> { 167 | fn len(&self) -> usize { 168 | self.iter.len() 169 | } 170 | } 171 | 172 | impl FusedIterator for Drain<'_, T> {} 173 | 174 | impl fmt::Debug for Drain<'_, T> { 175 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 176 | // FIXME 177 | // let iter = self.iter.as_slice().iter().map(Bucket::key_ref); 178 | // f.debug_list().entries(iter).finish() 179 | f.debug_struct("Drain").finish_non_exhaustive() 180 | } 181 | } 182 | 183 | /// A lazy iterator producing elements in the difference of [`RingSet`]s. 184 | /// 185 | /// This `struct` is created by the [`RingSet::difference`] method. 186 | /// See its documentation for more. 187 | pub struct Difference<'a, T, S> { 188 | iter: Iter<'a, T>, 189 | other: &'a RingSet, 190 | } 191 | 192 | impl<'a, T, S> Difference<'a, T, S> { 193 | pub(super) fn new(set: &'a RingSet, other: &'a RingSet) -> Self { 194 | Self { 195 | iter: set.iter(), 196 | other, 197 | } 198 | } 199 | } 200 | 201 | impl<'a, T, S> Iterator for Difference<'a, T, S> 202 | where 203 | T: Eq + Hash, 204 | S: BuildHasher, 205 | { 206 | type Item = &'a T; 207 | 208 | fn next(&mut self) -> Option { 209 | while let Some(item) = self.iter.next() { 210 | if !self.other.contains(item) { 211 | return Some(item); 212 | } 213 | } 214 | None 215 | } 216 | 217 | fn size_hint(&self) -> (usize, Option) { 218 | (0, self.iter.size_hint().1) 219 | } 220 | } 221 | 222 | impl DoubleEndedIterator for Difference<'_, T, S> 223 | where 224 | T: Eq + Hash, 225 | S: BuildHasher, 226 | { 227 | fn next_back(&mut self) -> Option { 228 | while let Some(item) = self.iter.next_back() { 229 | if !self.other.contains(item) { 230 | return Some(item); 231 | } 232 | } 233 | None 234 | } 235 | } 236 | 237 | impl FusedIterator for Difference<'_, T, S> 238 | where 239 | T: Eq + Hash, 240 | S: BuildHasher, 241 | { 242 | } 243 | 244 | impl Clone for Difference<'_, T, S> { 245 | fn clone(&self) -> Self { 246 | Difference { 247 | iter: self.iter.clone(), 248 | ..*self 249 | } 250 | } 251 | } 252 | 253 | impl fmt::Debug for Difference<'_, T, S> 254 | where 255 | T: fmt::Debug + Eq + Hash, 256 | S: BuildHasher, 257 | { 258 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 259 | f.debug_list().entries(self.clone()).finish() 260 | } 261 | } 262 | 263 | /// A lazy iterator producing elements in the intersection of [`RingSet`]s. 264 | /// 265 | /// This `struct` is created by the [`RingSet::intersection`] method. 266 | /// See its documentation for more. 267 | pub struct Intersection<'a, T, S> { 268 | iter: Iter<'a, T>, 269 | other: &'a RingSet, 270 | } 271 | 272 | impl<'a, T, S> Intersection<'a, T, S> { 273 | pub(super) fn new(set: &'a RingSet, other: &'a RingSet) -> Self { 274 | Self { 275 | iter: set.iter(), 276 | other, 277 | } 278 | } 279 | } 280 | 281 | impl<'a, T, S> Iterator for Intersection<'a, T, S> 282 | where 283 | T: Eq + Hash, 284 | S: BuildHasher, 285 | { 286 | type Item = &'a T; 287 | 288 | fn next(&mut self) -> Option { 289 | while let Some(item) = self.iter.next() { 290 | if self.other.contains(item) { 291 | return Some(item); 292 | } 293 | } 294 | None 295 | } 296 | 297 | fn size_hint(&self) -> (usize, Option) { 298 | (0, self.iter.size_hint().1) 299 | } 300 | } 301 | 302 | impl DoubleEndedIterator for Intersection<'_, T, S> 303 | where 304 | T: Eq + Hash, 305 | S: BuildHasher, 306 | { 307 | fn next_back(&mut self) -> Option { 308 | while let Some(item) = self.iter.next_back() { 309 | if self.other.contains(item) { 310 | return Some(item); 311 | } 312 | } 313 | None 314 | } 315 | } 316 | 317 | impl FusedIterator for Intersection<'_, T, S> 318 | where 319 | T: Eq + Hash, 320 | S: BuildHasher, 321 | { 322 | } 323 | 324 | impl Clone for Intersection<'_, T, S> { 325 | fn clone(&self) -> Self { 326 | Intersection { 327 | iter: self.iter.clone(), 328 | ..*self 329 | } 330 | } 331 | } 332 | 333 | impl fmt::Debug for Intersection<'_, T, S> 334 | where 335 | T: fmt::Debug + Eq + Hash, 336 | S: BuildHasher, 337 | { 338 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 339 | f.debug_list().entries(self.clone()).finish() 340 | } 341 | } 342 | 343 | /// A lazy iterator producing elements in the symmetric difference of [`RingSet`]s. 344 | /// 345 | /// This `struct` is created by the [`RingSet::symmetric_difference`] method. 346 | /// See its documentation for more. 347 | pub struct SymmetricDifference<'a, T, S1, S2> { 348 | iter: Chain, Difference<'a, T, S1>>, 349 | } 350 | 351 | impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2> 352 | where 353 | T: Eq + Hash, 354 | S1: BuildHasher, 355 | S2: BuildHasher, 356 | { 357 | pub(super) fn new(set1: &'a RingSet, set2: &'a RingSet) -> Self { 358 | let diff1 = set1.difference(set2); 359 | let diff2 = set2.difference(set1); 360 | Self { 361 | iter: diff1.chain(diff2), 362 | } 363 | } 364 | } 365 | 366 | impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> 367 | where 368 | T: Eq + Hash, 369 | S1: BuildHasher, 370 | S2: BuildHasher, 371 | { 372 | type Item = &'a T; 373 | 374 | fn next(&mut self) -> Option { 375 | self.iter.next() 376 | } 377 | 378 | fn size_hint(&self) -> (usize, Option) { 379 | self.iter.size_hint() 380 | } 381 | 382 | fn fold(self, init: B, f: F) -> B 383 | where 384 | F: FnMut(B, Self::Item) -> B, 385 | { 386 | self.iter.fold(init, f) 387 | } 388 | } 389 | 390 | impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> 391 | where 392 | T: Eq + Hash, 393 | S1: BuildHasher, 394 | S2: BuildHasher, 395 | { 396 | fn next_back(&mut self) -> Option { 397 | self.iter.next_back() 398 | } 399 | 400 | fn rfold(self, init: B, f: F) -> B 401 | where 402 | F: FnMut(B, Self::Item) -> B, 403 | { 404 | self.iter.rfold(init, f) 405 | } 406 | } 407 | 408 | impl FusedIterator for SymmetricDifference<'_, T, S1, S2> 409 | where 410 | T: Eq + Hash, 411 | S1: BuildHasher, 412 | S2: BuildHasher, 413 | { 414 | } 415 | 416 | impl Clone for SymmetricDifference<'_, T, S1, S2> { 417 | fn clone(&self) -> Self { 418 | SymmetricDifference { 419 | iter: self.iter.clone(), 420 | } 421 | } 422 | } 423 | 424 | impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> 425 | where 426 | T: fmt::Debug + Eq + Hash, 427 | S1: BuildHasher, 428 | S2: BuildHasher, 429 | { 430 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 431 | f.debug_list().entries(self.clone()).finish() 432 | } 433 | } 434 | 435 | /// A lazy iterator producing elements in the union of [`RingSet`]s. 436 | /// 437 | /// This `struct` is created by the [`RingSet::union`] method. 438 | /// See its documentation for more. 439 | pub struct Union<'a, T, S> { 440 | iter: Chain, Difference<'a, T, S>>, 441 | } 442 | 443 | impl<'a, T, S> Union<'a, T, S> 444 | where 445 | T: Eq + Hash, 446 | S: BuildHasher, 447 | { 448 | pub(super) fn new(set1: &'a RingSet, set2: &'a RingSet) -> Self 449 | where 450 | S2: BuildHasher, 451 | { 452 | Self { 453 | iter: set1.iter().chain(set2.difference(set1)), 454 | } 455 | } 456 | } 457 | 458 | impl<'a, T, S> Iterator for Union<'a, T, S> 459 | where 460 | T: Eq + Hash, 461 | S: BuildHasher, 462 | { 463 | type Item = &'a T; 464 | 465 | fn next(&mut self) -> Option { 466 | self.iter.next() 467 | } 468 | 469 | fn size_hint(&self) -> (usize, Option) { 470 | self.iter.size_hint() 471 | } 472 | 473 | fn fold(self, init: B, f: F) -> B 474 | where 475 | F: FnMut(B, Self::Item) -> B, 476 | { 477 | self.iter.fold(init, f) 478 | } 479 | } 480 | 481 | impl DoubleEndedIterator for Union<'_, T, S> 482 | where 483 | T: Eq + Hash, 484 | S: BuildHasher, 485 | { 486 | fn next_back(&mut self) -> Option { 487 | self.iter.next_back() 488 | } 489 | 490 | fn rfold(self, init: B, f: F) -> B 491 | where 492 | F: FnMut(B, Self::Item) -> B, 493 | { 494 | self.iter.rfold(init, f) 495 | } 496 | } 497 | 498 | impl FusedIterator for Union<'_, T, S> 499 | where 500 | T: Eq + Hash, 501 | S: BuildHasher, 502 | { 503 | } 504 | 505 | impl Clone for Union<'_, T, S> { 506 | fn clone(&self) -> Self { 507 | Union { 508 | iter: self.iter.clone(), 509 | } 510 | } 511 | } 512 | 513 | impl fmt::Debug for Union<'_, T, S> 514 | where 515 | T: fmt::Debug + Eq + Hash, 516 | S: BuildHasher, 517 | { 518 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 519 | f.debug_list().entries(self.clone()).finish() 520 | } 521 | } 522 | 523 | /// A splicing iterator for `RingSet`. 524 | /// 525 | /// This `struct` is created by [`RingSet::splice()`]. 526 | /// See its documentation for more. 527 | pub struct Splice<'a, I, T, S> 528 | where 529 | I: Iterator, 530 | T: Hash + Eq, 531 | S: BuildHasher, 532 | { 533 | iter: crate::map::Splice<'a, UnitValue, T, (), S>, 534 | } 535 | 536 | impl<'a, I, T, S> Splice<'a, I, T, S> 537 | where 538 | I: Iterator, 539 | T: Hash + Eq, 540 | S: BuildHasher, 541 | { 542 | #[track_caller] 543 | pub(super) fn new(set: &'a mut RingSet, range: R, replace_with: I) -> Self 544 | where 545 | R: RangeBounds, 546 | { 547 | Self { 548 | iter: set.map.splice(range, UnitValue(replace_with)), 549 | } 550 | } 551 | } 552 | 553 | impl Iterator for Splice<'_, I, T, S> 554 | where 555 | I: Iterator, 556 | T: Hash + Eq, 557 | S: BuildHasher, 558 | { 559 | type Item = T; 560 | 561 | fn next(&mut self) -> Option { 562 | Some(self.iter.next()?.0) 563 | } 564 | 565 | fn size_hint(&self) -> (usize, Option) { 566 | self.iter.size_hint() 567 | } 568 | } 569 | 570 | impl DoubleEndedIterator for Splice<'_, I, T, S> 571 | where 572 | I: Iterator, 573 | T: Hash + Eq, 574 | S: BuildHasher, 575 | { 576 | fn next_back(&mut self) -> Option { 577 | Some(self.iter.next_back()?.0) 578 | } 579 | } 580 | 581 | impl ExactSizeIterator for Splice<'_, I, T, S> 582 | where 583 | I: Iterator, 584 | T: Hash + Eq, 585 | S: BuildHasher, 586 | { 587 | fn len(&self) -> usize { 588 | self.iter.len() 589 | } 590 | } 591 | 592 | impl FusedIterator for Splice<'_, I, T, S> 593 | where 594 | I: Iterator, 595 | T: Hash + Eq, 596 | S: BuildHasher, 597 | { 598 | } 599 | 600 | struct UnitValue(I); 601 | 602 | impl Iterator for UnitValue { 603 | type Item = (I::Item, ()); 604 | 605 | fn next(&mut self) -> Option { 606 | self.0.next().map(|x| (x, ())) 607 | } 608 | } 609 | 610 | impl fmt::Debug for Splice<'_, I, T, S> 611 | where 612 | I: fmt::Debug + Iterator, 613 | T: fmt::Debug + Hash + Eq, 614 | S: BuildHasher, 615 | { 616 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 617 | fmt::Debug::fmt(&self.iter, f) 618 | } 619 | } 620 | 621 | impl fmt::Debug for UnitValue { 622 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 623 | fmt::Debug::fmt(&self.0, f) 624 | } 625 | } 626 | -------------------------------------------------------------------------------- /src/set/mutable.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | 3 | use super::{Equivalent, RingSet}; 4 | use crate::map::MutableKeys; 5 | 6 | /// Opt-in mutable access to [`RingSet`] values. 7 | /// 8 | /// These methods expose `&mut T`, mutable references to the value as it is stored 9 | /// in the set. 10 | /// You are allowed to modify the values in the set **if the modification 11 | /// does not change the value’s hash and equality**. 12 | /// 13 | /// If values are modified erroneously, you can no longer look them up. 14 | /// This is sound (memory safe) but a logical error hazard (just like 15 | /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). 16 | /// 17 | /// `use` this trait to enable its methods for `RingSet`. 18 | /// 19 | /// This trait is sealed and cannot be implemented for types outside this crate. 20 | pub trait MutableValues: private::Sealed { 21 | type Value; 22 | 23 | /// Return item index and mutable reference to the value 24 | /// 25 | /// Computes in **O(1)** time (average). 26 | fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut Self::Value)> 27 | where 28 | Q: ?Sized + Hash + Equivalent; 29 | 30 | /// Return mutable reference to the value at an index. 31 | /// 32 | /// Valid indices are `0 <= index < self.len()`. 33 | /// 34 | /// Computes in **O(1)** time. 35 | fn get_index_mut2(&mut self, index: usize) -> Option<&mut Self::Value>; 36 | 37 | /// Scan through each value in the set and keep those where the 38 | /// closure `keep` returns `true`. 39 | /// 40 | /// The values are visited in order, and remaining values keep their order. 41 | /// 42 | /// Computes in **O(n)** time (average). 43 | fn retain2(&mut self, keep: F) 44 | where 45 | F: FnMut(&mut Self::Value) -> bool; 46 | } 47 | 48 | /// Opt-in mutable access to [`RingSet`] values. 49 | /// 50 | /// See [`MutableValues`] for more information. 51 | impl MutableValues for RingSet 52 | where 53 | S: BuildHasher, 54 | { 55 | type Value = T; 56 | 57 | fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut T)> 58 | where 59 | Q: ?Sized + Hash + Equivalent, 60 | { 61 | match self.map.get_full_mut2(value) { 62 | Some((index, value, ())) => Some((index, value)), 63 | None => None, 64 | } 65 | } 66 | 67 | fn get_index_mut2(&mut self, index: usize) -> Option<&mut T> { 68 | match self.map.get_index_mut2(index) { 69 | Some((value, ())) => Some(value), 70 | None => None, 71 | } 72 | } 73 | 74 | fn retain2(&mut self, mut keep: F) 75 | where 76 | F: FnMut(&mut T) -> bool, 77 | { 78 | self.map.retain2(move |value, ()| keep(value)); 79 | } 80 | } 81 | 82 | mod private { 83 | pub trait Sealed {} 84 | 85 | impl Sealed for super::RingSet {} 86 | } 87 | -------------------------------------------------------------------------------- /src/set/slice.rs: -------------------------------------------------------------------------------- 1 | use super::{Bucket, IntoIter, Iter}; 2 | use crate::util::{slice_eq, try_simplify_range}; 3 | 4 | use alloc::boxed::Box; 5 | use alloc::collections::VecDeque; 6 | use core::cmp::Ordering; 7 | use core::fmt; 8 | use core::hash::{Hash, Hasher}; 9 | use core::ops::{self, Bound, Index, RangeBounds}; 10 | 11 | /// A dynamically-sized slice of values in an [`RingSet`][super::RingSet]. 12 | /// 13 | /// This supports indexed operations much like a `[T]` slice, 14 | /// but not any hashed operations on the values. 15 | #[repr(transparent)] 16 | pub struct Slice { 17 | pub(crate) entries: [Bucket], 18 | } 19 | 20 | // SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, 21 | // and reference lifetimes are bound together in function signatures. 22 | #[allow(unsafe_code)] 23 | impl Slice { 24 | pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { 25 | unsafe { &*(entries as *const [Bucket] as *const Self) } 26 | } 27 | 28 | pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { 29 | unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } 30 | } 31 | 32 | fn into_boxed(self: Box) -> Box<[Bucket]> { 33 | unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } 34 | } 35 | } 36 | 37 | impl Slice { 38 | pub(crate) fn into_entries(self: Box) -> VecDeque> { 39 | self.into_boxed().into_vec().into() 40 | } 41 | 42 | /// Returns an empty slice. 43 | pub const fn new<'a>() -> &'a Self { 44 | Self::from_slice(&[]) 45 | } 46 | 47 | /// Return the number of elements in the set slice. 48 | pub const fn len(&self) -> usize { 49 | self.entries.len() 50 | } 51 | 52 | /// Returns true if the set slice contains no elements. 53 | pub const fn is_empty(&self) -> bool { 54 | self.entries.is_empty() 55 | } 56 | 57 | /// Get a value by index. 58 | /// 59 | /// Valid indices are `0 <= index < self.len()`. 60 | pub fn get_index(&self, index: usize) -> Option<&T> { 61 | self.entries.get(index).map(Bucket::key_ref) 62 | } 63 | 64 | /// Returns a slice of values in the given range of indices. 65 | /// 66 | /// Valid indices are `0 <= index < self.len()`. 67 | pub fn get_range>(&self, range: R) -> Option<&Self> { 68 | let range = try_simplify_range(range, self.entries.len())?; 69 | self.entries.get(range).map(Self::from_slice) 70 | } 71 | 72 | /// Get the first value. 73 | pub fn first(&self) -> Option<&T> { 74 | self.entries.first().map(Bucket::key_ref) 75 | } 76 | 77 | /// Get the last value. 78 | pub fn last(&self) -> Option<&T> { 79 | self.entries.last().map(Bucket::key_ref) 80 | } 81 | 82 | /// Divides one slice into two at an index. 83 | /// 84 | /// ***Panics*** if `index > len`. 85 | pub fn split_at(&self, index: usize) -> (&Self, &Self) { 86 | let (first, second) = self.entries.split_at(index); 87 | (Self::from_slice(first), Self::from_slice(second)) 88 | } 89 | 90 | /// Returns the first value and the rest of the slice, 91 | /// or `None` if it is empty. 92 | pub fn split_first(&self) -> Option<(&T, &Self)> { 93 | if let [first, rest @ ..] = &self.entries { 94 | Some((&first.key, Self::from_slice(rest))) 95 | } else { 96 | None 97 | } 98 | } 99 | 100 | /// Returns the last value and the rest of the slice, 101 | /// or `None` if it is empty. 102 | pub fn split_last(&self) -> Option<(&T, &Self)> { 103 | if let [rest @ .., last] = &self.entries { 104 | Some((&last.key, Self::from_slice(rest))) 105 | } else { 106 | None 107 | } 108 | } 109 | 110 | /// Return an iterator over the values of the set slice. 111 | pub fn iter(&self) -> Iter<'_, T> { 112 | Iter::from_slice(&self.entries) 113 | } 114 | 115 | /// Search over a sorted set for a value. 116 | /// 117 | /// Returns the position where that value is present, or the position where it can be inserted 118 | /// to maintain the sort. See [`slice::binary_search`] for more details. 119 | /// 120 | /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in 121 | /// the set this is a slice from using [`RingSet::get_index_of`][super::RingSet::get_index_of], 122 | /// but this can also position missing values. 123 | pub fn binary_search(&self, x: &T) -> Result 124 | where 125 | T: Ord, 126 | { 127 | self.binary_search_by(|p| p.cmp(x)) 128 | } 129 | 130 | /// Search over a sorted set with a comparator function. 131 | /// 132 | /// Returns the position where that value is present, or the position where it can be inserted 133 | /// to maintain the sort. See [`slice::binary_search_by`] for more details. 134 | /// 135 | /// Computes in **O(log(n))** time. 136 | #[inline] 137 | pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result 138 | where 139 | F: FnMut(&'a T) -> Ordering, 140 | { 141 | self.entries.binary_search_by(move |a| f(&a.key)) 142 | } 143 | 144 | /// Search over a sorted set with an extraction function. 145 | /// 146 | /// Returns the position where that value is present, or the position where it can be inserted 147 | /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. 148 | /// 149 | /// Computes in **O(log(n))** time. 150 | #[inline] 151 | pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result 152 | where 153 | F: FnMut(&'a T) -> B, 154 | B: Ord, 155 | { 156 | self.binary_search_by(|k| f(k).cmp(b)) 157 | } 158 | 159 | /// Returns the index of the partition point of a sorted set according to the given predicate 160 | /// (the index of the first element of the second partition). 161 | /// 162 | /// See [`slice::partition_point`] for more details. 163 | /// 164 | /// Computes in **O(log(n))** time. 165 | #[must_use] 166 | pub fn partition_point

(&self, mut pred: P) -> usize 167 | where 168 | P: FnMut(&T) -> bool, 169 | { 170 | self.entries.partition_point(move |a| pred(&a.key)) 171 | } 172 | } 173 | 174 | impl<'a, T> IntoIterator for &'a Slice { 175 | type IntoIter = Iter<'a, T>; 176 | type Item = &'a T; 177 | 178 | fn into_iter(self) -> Self::IntoIter { 179 | self.iter() 180 | } 181 | } 182 | 183 | impl IntoIterator for Box> { 184 | type IntoIter = IntoIter; 185 | type Item = T; 186 | 187 | fn into_iter(self) -> Self::IntoIter { 188 | IntoIter::new(self.into_entries()) 189 | } 190 | } 191 | 192 | impl Default for &'_ Slice { 193 | fn default() -> Self { 194 | Slice::from_slice(&[]) 195 | } 196 | } 197 | 198 | impl Default for Box> { 199 | fn default() -> Self { 200 | Slice::from_boxed(Box::default()) 201 | } 202 | } 203 | 204 | impl Clone for Box> { 205 | fn clone(&self) -> Self { 206 | Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) 207 | } 208 | } 209 | 210 | impl From<&Slice> for Box> { 211 | fn from(slice: &Slice) -> Self { 212 | Slice::from_boxed(Box::from(&slice.entries)) 213 | } 214 | } 215 | 216 | impl fmt::Debug for Slice { 217 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 218 | f.debug_list().entries(self).finish() 219 | } 220 | } 221 | 222 | impl PartialEq> for Slice 223 | where 224 | T: PartialEq, 225 | { 226 | fn eq(&self, other: &Slice) -> bool { 227 | slice_eq(&self.entries, &other.entries, |b1, b2| b1.key == b2.key) 228 | } 229 | } 230 | 231 | impl PartialEq<[U]> for Slice 232 | where 233 | T: PartialEq, 234 | { 235 | fn eq(&self, other: &[U]) -> bool { 236 | slice_eq(&self.entries, other, |b, o| b.key == *o) 237 | } 238 | } 239 | 240 | impl PartialEq> for [T] 241 | where 242 | T: PartialEq, 243 | { 244 | fn eq(&self, other: &Slice) -> bool { 245 | slice_eq(self, &other.entries, |o, b| *o == b.key) 246 | } 247 | } 248 | 249 | impl PartialEq<[U; N]> for Slice 250 | where 251 | T: PartialEq, 252 | { 253 | fn eq(&self, other: &[U; N]) -> bool { 254 | >::eq(self, other) 255 | } 256 | } 257 | 258 | impl PartialEq> for [T; N] 259 | where 260 | T: PartialEq, 261 | { 262 | fn eq(&self, other: &Slice) -> bool { 263 | <[T] as PartialEq>>::eq(self, other) 264 | } 265 | } 266 | 267 | impl Eq for Slice {} 268 | 269 | impl PartialOrd for Slice { 270 | fn partial_cmp(&self, other: &Self) -> Option { 271 | self.iter().partial_cmp(other) 272 | } 273 | } 274 | 275 | impl Ord for Slice { 276 | fn cmp(&self, other: &Self) -> Ordering { 277 | self.iter().cmp(other) 278 | } 279 | } 280 | 281 | impl Hash for Slice { 282 | fn hash(&self, state: &mut H) { 283 | self.len().hash(state); 284 | for value in self { 285 | value.hash(state); 286 | } 287 | } 288 | } 289 | 290 | impl Index for Slice { 291 | type Output = T; 292 | 293 | fn index(&self, index: usize) -> &Self::Output { 294 | &self.entries[index].key 295 | } 296 | } 297 | 298 | // We can't have `impl> Index` because that conflicts with `Index`. 299 | // Instead, we repeat the implementations for all the core range types. 300 | macro_rules! impl_index { 301 | ($($range:ty),*) => {$( 302 | impl Index<$range> for Slice { 303 | type Output = Self; 304 | 305 | fn index(&self, range: $range) -> &Self::Output { 306 | Slice::from_slice(&self.entries[range]) 307 | } 308 | } 309 | )*} 310 | } 311 | impl_index!( 312 | ops::Range, 313 | ops::RangeFrom, 314 | ops::RangeFull, 315 | ops::RangeInclusive, 316 | ops::RangeTo, 317 | ops::RangeToInclusive, 318 | (Bound, Bound) 319 | ); 320 | 321 | #[cfg(test)] 322 | mod tests { 323 | use super::*; 324 | use crate::RingSet; 325 | use alloc::vec::Vec; 326 | 327 | #[test] 328 | fn slice_index() { 329 | fn check(vec_slice: &[i32], set_slice: &Slice) { 330 | itertools::assert_equal(vec_slice, set_slice); 331 | } 332 | 333 | let vec: Vec = (0..10).map(|i| i * i).collect(); 334 | let set: RingSet = vec.iter().cloned().collect(); 335 | let (slice, tail) = set.as_slices(); 336 | assert!(tail.is_empty()); 337 | 338 | // RangeFull 339 | check(&vec[..], &slice[..]); 340 | 341 | for i in 0usize..10 { 342 | // Index 343 | assert_eq!(vec[i], set[i]); 344 | assert_eq!(vec[i], slice[i]); 345 | 346 | // RangeFrom 347 | check(&vec[i..], &slice[i..]); 348 | 349 | // RangeTo 350 | check(&vec[..i], &slice[..i]); 351 | 352 | // RangeToInclusive 353 | check(&vec[..=i], &slice[..=i]); 354 | 355 | // (Bound, Bound) 356 | let bounds = (Bound::Excluded(i), Bound::Unbounded); 357 | check(&vec[i + 1..], &slice[bounds]); 358 | 359 | for j in i..=10 { 360 | // Range 361 | check(&vec[i..j], &slice[i..j]); 362 | } 363 | 364 | for j in i..10 { 365 | // RangeInclusive 366 | check(&vec[i..=j], &slice[i..=j]); 367 | } 368 | } 369 | } 370 | } 371 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Bound, Range, RangeBounds}; 2 | 3 | pub(crate) fn third(t: (A, B, C)) -> C { 4 | t.2 5 | } 6 | 7 | #[track_caller] 8 | pub(crate) fn simplify_range(range: R, len: usize) -> Range 9 | where 10 | R: RangeBounds, 11 | { 12 | let start = match range.start_bound() { 13 | Bound::Unbounded => 0, 14 | Bound::Included(&i) if i <= len => i, 15 | Bound::Excluded(&i) if i < len => i + 1, 16 | Bound::Included(i) | Bound::Excluded(i) => { 17 | panic!("range start index {i} out of range for slice of length {len}") 18 | } 19 | }; 20 | let end = match range.end_bound() { 21 | Bound::Unbounded => len, 22 | Bound::Excluded(&i) if i <= len => i, 23 | Bound::Included(&i) if i < len => i + 1, 24 | Bound::Included(i) | Bound::Excluded(i) => { 25 | panic!("range end index {i} out of range for slice of length {len}") 26 | } 27 | }; 28 | if start > end { 29 | panic!( 30 | "range start index {:?} should be <= range end index {:?}", 31 | range.start_bound(), 32 | range.end_bound() 33 | ); 34 | } 35 | start..end 36 | } 37 | 38 | pub(crate) fn try_simplify_range(range: R, len: usize) -> Option> 39 | where 40 | R: RangeBounds, 41 | { 42 | let start = match range.start_bound() { 43 | Bound::Unbounded => 0, 44 | Bound::Included(&i) if i <= len => i, 45 | Bound::Excluded(&i) if i < len => i + 1, 46 | _ => return None, 47 | }; 48 | let end = match range.end_bound() { 49 | Bound::Unbounded => len, 50 | Bound::Excluded(&i) if i <= len => i, 51 | Bound::Included(&i) if i < len => i + 1, 52 | _ => return None, 53 | }; 54 | if start > end { 55 | return None; 56 | } 57 | Some(start..end) 58 | } 59 | 60 | // Generic slice equality -- copied from the standard library but adding a custom comparator, 61 | // allowing for our `Bucket` wrapper on either or both sides. 62 | pub(crate) fn slice_eq(left: &[T], right: &[U], eq: impl Fn(&T, &U) -> bool) -> bool { 63 | if left.len() != right.len() { 64 | return false; 65 | } 66 | 67 | // Implemented as explicit indexing rather 68 | // than zipped iterators for performance reasons. 69 | // See PR https://github.com/rust-lang/rust/pull/116846 70 | for i in 0..left.len() { 71 | // bound checks are optimized away 72 | if !eq(&left[i], &right[i]) { 73 | return false; 74 | } 75 | } 76 | 77 | true 78 | } 79 | -------------------------------------------------------------------------------- /test-nostd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-nostd" 3 | version = "0.1.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [dependencies.ringmap] 8 | path = ".." 9 | default-features = false 10 | features = ["serde"] 11 | 12 | [dev-dependencies] 13 | -------------------------------------------------------------------------------- /test-nostd/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | use core::hash::BuildHasherDefault; 4 | use core::hash::Hasher; 5 | 6 | use ringmap::RingMap; 7 | use ringmap::RingSet; 8 | 9 | #[derive(Default)] 10 | struct BadHasher(u64); 11 | 12 | impl Hasher for BadHasher { 13 | fn finish(&self) -> u64 { 14 | self.0 15 | } 16 | fn write(&mut self, bytes: &[u8]) { 17 | for &byte in bytes { 18 | self.0 += byte as u64 19 | } 20 | } 21 | } 22 | 23 | type Map = RingMap>; 24 | type Set = RingSet>; 25 | 26 | pub fn test_compile() { 27 | let mut map = Map::default(); 28 | map.insert(1, 1); 29 | map.insert(2, 4); 30 | for (_, _) in map.iter() {} 31 | 32 | let _map2 = Map::from_iter(Some((1, 1))); 33 | 34 | let mut set = Set::default(); 35 | set.insert("a"); 36 | } 37 | -------------------------------------------------------------------------------- /test-serde/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-serde" 3 | version = "0.1.0" 4 | publish = false 5 | edition = "2021" 6 | 7 | [dependencies] 8 | 9 | [dev-dependencies] 10 | fnv = "1.0" 11 | ringmap = { path = "..", features = ["serde"] } 12 | serde = { version = "1.0.99", features = ["derive"] } 13 | serde_test = "1.0.99" 14 | -------------------------------------------------------------------------------- /test-serde/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | 3 | use fnv::FnvBuildHasher; 4 | use ringmap::{ringmap, ringset, RingMap, RingSet}; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_test::{assert_tokens, Token}; 7 | 8 | #[test] 9 | fn test_serde_map() { 10 | let map = ringmap! { 1 => 2, 3 => 4 }; 11 | assert_tokens( 12 | &map, 13 | &[ 14 | Token::Map { len: Some(2) }, 15 | Token::I32(1), 16 | Token::I32(2), 17 | Token::I32(3), 18 | Token::I32(4), 19 | Token::MapEnd, 20 | ], 21 | ); 22 | } 23 | 24 | #[test] 25 | fn test_serde_set() { 26 | let set = ringset! { 1, 2, 3, 4 }; 27 | assert_tokens( 28 | &set, 29 | &[ 30 | Token::Seq { len: Some(4) }, 31 | Token::I32(1), 32 | Token::I32(2), 33 | Token::I32(3), 34 | Token::I32(4), 35 | Token::SeqEnd, 36 | ], 37 | ); 38 | } 39 | 40 | #[test] 41 | fn test_serde_map_fnv_hasher() { 42 | let mut map: RingMap = Default::default(); 43 | map.insert(1, 2); 44 | map.insert(3, 4); 45 | assert_tokens( 46 | &map, 47 | &[ 48 | Token::Map { len: Some(2) }, 49 | Token::I32(1), 50 | Token::I32(2), 51 | Token::I32(3), 52 | Token::I32(4), 53 | Token::MapEnd, 54 | ], 55 | ); 56 | } 57 | 58 | #[test] 59 | fn test_serde_set_fnv_hasher() { 60 | let mut set: RingSet = Default::default(); 61 | set.extend(1..5); 62 | assert_tokens( 63 | &set, 64 | &[ 65 | Token::Seq { len: Some(4) }, 66 | Token::I32(1), 67 | Token::I32(2), 68 | Token::I32(3), 69 | Token::I32(4), 70 | Token::SeqEnd, 71 | ], 72 | ); 73 | } 74 | 75 | #[test] 76 | fn test_serde_seq_map() { 77 | #[derive(Debug, Deserialize, Serialize)] 78 | #[serde(transparent)] 79 | struct SeqRingMap { 80 | #[serde(with = "ringmap::map::serde_seq")] 81 | map: RingMap, 82 | } 83 | 84 | impl PartialEq for SeqRingMap { 85 | fn eq(&self, other: &Self) -> bool { 86 | // explicitly compare items in order 87 | self.map.iter().eq(&other.map) 88 | } 89 | } 90 | 91 | let map = ringmap! { 1 => 2, 3 => 4, -1 => -2, -3 => -4 }; 92 | assert_tokens( 93 | &SeqRingMap { map }, 94 | &[ 95 | Token::Seq { len: Some(4) }, 96 | Token::Tuple { len: 2 }, 97 | Token::I32(1), 98 | Token::I32(2), 99 | Token::TupleEnd, 100 | Token::Tuple { len: 2 }, 101 | Token::I32(3), 102 | Token::I32(4), 103 | Token::TupleEnd, 104 | Token::Tuple { len: 2 }, 105 | Token::I32(-1), 106 | Token::I32(-2), 107 | Token::TupleEnd, 108 | Token::Tuple { len: 2 }, 109 | Token::I32(-3), 110 | Token::I32(-4), 111 | Token::TupleEnd, 112 | Token::SeqEnd, 113 | ], 114 | ); 115 | } 116 | -------------------------------------------------------------------------------- /tests/equivalent_trait.rs: -------------------------------------------------------------------------------- 1 | use ringmap::ringmap; 2 | use ringmap::Equivalent; 3 | 4 | use std::hash::Hash; 5 | 6 | #[derive(Debug, Hash)] 7 | pub struct Pair(pub A, pub B); 8 | 9 | impl PartialEq<(A, B)> for Pair 10 | where 11 | C: PartialEq, 12 | D: PartialEq, 13 | { 14 | fn eq(&self, rhs: &(A, B)) -> bool { 15 | self.0 == rhs.0 && self.1 == rhs.1 16 | } 17 | } 18 | 19 | impl Equivalent for Pair 20 | where 21 | Pair: PartialEq, 22 | A: Hash + Eq, 23 | B: Hash + Eq, 24 | { 25 | fn equivalent(&self, other: &X) -> bool { 26 | *self == *other 27 | } 28 | } 29 | 30 | #[test] 31 | fn test_lookup() { 32 | let s = String::from; 33 | let map = ringmap! { 34 | (s("a"), s("b")) => 1, 35 | (s("a"), s("x")) => 2, 36 | }; 37 | 38 | assert!(map.contains_key(&Pair("a", "b"))); 39 | assert!(!map.contains_key(&Pair("b", "a"))); 40 | } 41 | 42 | #[test] 43 | fn test_string_str() { 44 | let s = String::from; 45 | let mut map = ringmap! { 46 | s("a") => 1, s("b") => 2, 47 | s("x") => 3, s("y") => 4, 48 | }; 49 | 50 | assert!(map.contains_key("a")); 51 | assert!(!map.contains_key("z")); 52 | assert_eq!(map.swap_remove_back("b"), Some(2)); 53 | } 54 | -------------------------------------------------------------------------------- /tests/macros_full_path.rs: -------------------------------------------------------------------------------- 1 | #[test] 2 | fn test_create_map() { 3 | let _m = ringmap::ringmap! { 4 | 1 => 2, 5 | 7 => 1, 6 | 2 => 2, 7 | 3 => 3, 8 | }; 9 | } 10 | 11 | #[test] 12 | fn test_create_set() { 13 | let _s = ringmap::ringset! { 14 | 1, 15 | 7, 16 | 2, 17 | 3, 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | use ringmap::{ringmap, ringset}; 2 | 3 | #[test] 4 | fn test_sort() { 5 | let m = ringmap! { 6 | 1 => 2, 7 | 7 => 1, 8 | 2 => 2, 9 | 3 => 3, 10 | }; 11 | 12 | itertools::assert_equal( 13 | m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), 14 | vec![(7, 1), (1, 2), (2, 2), (3, 3)], 15 | ); 16 | } 17 | 18 | #[test] 19 | fn test_sort_set() { 20 | let s = ringset! { 21 | 1, 22 | 7, 23 | 2, 24 | 3, 25 | }; 26 | 27 | itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]); 28 | } 29 | --------------------------------------------------------------------------------