├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── fuzz.yml ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Cargo.toml ├── LICENCE.md ├── README.md ├── fuzz ├── .gitignore ├── Cargo.toml └── fuzz_targets │ ├── assert.rs │ ├── inline_array.rs │ ├── ring_buffer.rs │ ├── sized_chunk.rs │ └── sparse_chunk.rs ├── proptest-regressions ├── sparse_chunk │ └── iter.txt └── tests │ ├── ring_buffer.txt │ └── sized_chunk.txt ├── release.toml └── src ├── arbitrary.rs ├── inline_array ├── iter.rs └── mod.rs ├── lib.rs ├── ring_buffer ├── index.rs ├── iter.rs ├── mod.rs ├── refpool.rs └── slice.rs ├── sized_chunk ├── iter.rs ├── mod.rs └── refpool.rs ├── sparse_chunk ├── iter.rs ├── mod.rs └── refpool.rs └── tests.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | ignore: 9 | - dependency-name: arbitrary 10 | versions: 11 | - 1.0.0 12 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | push: 4 | pull_request: 5 | schedule: 6 | - cron: "0 0 1,15 * *" 7 | 8 | jobs: 9 | check: 10 | name: Check 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | rust: 15 | - stable 16 | - nightly 17 | - 1.51.0 # lowest supported version 18 | steps: 19 | - uses: actions/checkout@v2 20 | - uses: actions-rs/toolchain@v1 21 | with: 22 | profile: minimal 23 | toolchain: ${{ matrix.rust }} 24 | override: true 25 | - uses: actions-rs/cargo@v1 26 | with: 27 | command: check 28 | args: --all-features 29 | 30 | test: 31 | name: Tests 32 | runs-on: ubuntu-latest 33 | strategy: 34 | matrix: 35 | rust: 36 | - stable 37 | - nightly 38 | - 1.51.0 # lowest supported version 39 | steps: 40 | - uses: actions/checkout@v2 41 | - uses: actions-rs/toolchain@v1 42 | with: 43 | profile: minimal 44 | toolchain: ${{ matrix.rust }} 45 | override: true 46 | - uses: actions-rs/cargo@v1 47 | with: 48 | command: test 49 | args: --all-features 50 | 51 | clippy: 52 | name: Clippy 53 | runs-on: ubuntu-latest 54 | strategy: 55 | matrix: 56 | rust: 57 | - stable 58 | - nightly 59 | steps: 60 | - uses: actions/checkout@v2 61 | - uses: actions-rs/toolchain@v1 62 | with: 63 | profile: minimal 64 | toolchain: ${{ matrix.rust }} 65 | override: true 66 | components: clippy 67 | - uses: actions-rs/clippy-check@v1 68 | with: 69 | name: Clippy-${{ matrix.rust }} 70 | token: ${{ secrets.GITHUB_TOKEN }} 71 | args: --all-features 72 | 73 | miri: 74 | name: Miri 75 | runs-on: ubuntu-latest 76 | steps: 77 | - uses: actions/checkout@v2 78 | - uses: actions-rs/toolchain@v1 79 | with: 80 | profile: minimal 81 | toolchain: nightly 82 | override: true 83 | components: miri 84 | - name: Run Miri 85 | run: | 86 | cargo miri setup 87 | cargo miri test 88 | -------------------------------------------------------------------------------- /.github/workflows/fuzz.yml: -------------------------------------------------------------------------------- 1 | name: libFuzzer 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: 7 | - cron: "8 0 * * *" 8 | 9 | jobs: 10 | fuzz: 11 | name: libFuzzer 12 | runs-on: ubuntu-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | target: 17 | - inline_array 18 | - ring_buffer 19 | - sized_chunk 20 | - sparse_chunk 21 | steps: 22 | - uses: actions/checkout@v2 23 | name: Checkout project 24 | - uses: actions/cache@v1 25 | name: Cache corpus 26 | id: cache-corpus 27 | with: 28 | path: fuzz/corpus/${{ matrix.target }} 29 | key: fuzz-corpus-${{ matrix.target }}-${{ github.run_id }} 30 | restore-keys: | 31 | fuzz-corpus-${{ matrix.target }}- 32 | - uses: actions-rs/toolchain@v1 33 | name: Install Rust 34 | with: 35 | profile: minimal 36 | toolchain: nightly 37 | override: true 38 | - uses: actions-rs/install@v0.1 39 | name: Install cargo-fuzz 40 | with: 41 | crate: cargo-fuzz 42 | version: latest 43 | use-tool-cache: true 44 | - name: Fuzz for 10 minutes 45 | run: cargo fuzz run ${{ matrix.target }} -- -max_total_time=600 # seconds 46 | - uses: actions/upload-artifact@v1 47 | name: Publish artifacts 48 | with: 49 | name: fuzz-artifacts 50 | path: fuzz/artifacts 51 | - uses: actions/upload-artifact@v2 52 | name: Publish corpus 53 | with: 54 | name: fuzz-corpus 55 | path: fuzz/corpus 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | lcov.info 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | arch: 7 | - amd64 8 | - arm64 9 | - ppc64le 10 | cache: 11 | directories: 12 | - /home/travis/.rustup 13 | - /home/travis/.cargo 14 | - /home/travis/target 15 | 16 | addons: 17 | apt: 18 | packages: 19 | - cmake 20 | - libssl-dev 21 | - pkg-config 22 | - zlib1g-dev 23 | 24 | install: 25 | - rustup update 26 | - mkdir -p .cargo 27 | - echo '[build]' > .cargo/config 28 | - echo 'target-dir = "/home/travis/target"' >> .cargo/config 29 | 30 | matrix: 31 | include: 32 | - name: Clippy 33 | arch: amd64 34 | rust: stable 35 | env: CLIPPY=1 36 | install: 37 | - rustup component add clippy; true 38 | script: cargo clippy -- -D warnings 39 | 40 | - name: Coveralls 41 | arch: amd64 42 | rust: nightly 43 | install: 44 | - cargo install cargo-tarpaulin; true 45 | script: | 46 | cargo tarpaulin --ignore-tests --run-types Tests --run-types Doctests \ 47 | --exclude-files 'fuzz/*' --coveralls $TRAVIS_JOB_ID --ciserver travis-ci 48 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project 6 | adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [0.7.0] - 2022-04-29 9 | 10 | ### CHANGED 11 | 12 | - Switched to const generics instead of the `typenum` crate. Rust 1.51 or later is required. 13 | 14 | ## [0.6.5] - 2021-04-16 15 | 16 | ### FIXED 17 | 18 | - When `InlineArray` cannot hold any values because of misalignment, report it as capacity 0 19 | instead of panicking at runtime. (#22) 20 | 21 | ## [0.6.4] - 2021-02-17 22 | 23 | ### FIXED 24 | 25 | - `InlineArray` can be used in recursive types again. 26 | 27 | ### CHANGED 28 | 29 | - `InlineArray::new()` now panics when it can't store elements with large alignment (this was UB 30 | prior to 0.6.3). Alignments of `usize` and smaller are always supported. Larger alignments are 31 | supported if the capacity-providing type has sufficient alignment. 32 | 33 | ## [0.6.3] - 2021-02-14 34 | 35 | ### FIXED 36 | 37 | - Multilple soundness fixes: `InlineArray` handles large alignment, panic safety in `Chunk`'s 38 | `clone` and `from_iter`, capacity checks in `unit()`, `pair()` and `from()`. 39 | - `InlineArray` can now handle zero sized values. This relies on conditionals in const functions, 40 | a feature which was introduced in Rust 1.46.0, which means this is now the minimum Rust version 41 | this crate will work on. 42 | 43 | ## [0.6.2] - 2020-05-15 44 | 45 | ### FIXED 46 | 47 | - This release exists for no other purpose than to bump the `refpool` optional dependency. 48 | 49 | ## [0.6.1] - 2020-03-26 50 | 51 | ### ADDED 52 | 53 | - The crate now has a `std` feature flag, which is on by default, and will make the crate `no_std` 54 | if disabled. 55 | 56 | ### FIXED 57 | 58 | - Fixed a compilation error if you had the `arbitrary` feature flag enabled without the 59 | `ringbuffer` flag. 60 | 61 | ## [0.6.0] - 2020-03-24 62 | 63 | ### CHANGED 64 | 65 | - `RingBuffer` and its accompanying slice types `Slice` and `SliceMut` now implement `Array` and 66 | `ArrayMut` from [`array-ops`](http://docs.rs/array-ops), giving them most of the methods that 67 | would be available on primitive slice types and cutting down on code duplication in the 68 | implementation, but at the price of having to pull `Array` et al into scope when you need them. 69 | Because this means adding a dependency to `array-ops`, `RingBuffer` has now been moved behind 70 | the `ringbuffer` feature flag. `Chunk` and `InlineArray` don't and won't implement `Array`, 71 | because they are both able to implement `Deref<[A]>`, which provides the same functionality more 72 | efficiently. 73 | 74 | ### ADDED 75 | 76 | - The `insert_from` and `insert_ordered` methods recently added to `Chunk` have now also been 77 | added to `RingBuffer`. 78 | - `RingBuffer`'s `Slice` and `SliceMut` now also have the three `binary_search` methods regular 79 | slices have. 80 | - `SparseChunk`, `RingBuffer`, `Slice` and `SliceMut` now have unsafe `get_unchecked` and 81 | `get_unchecked_mut` methods. 82 | - `PartialEq` implementations allowing you to compare `RingBuffer`s, `Slice`s and `SliceMut`s 83 | interchangeably have been added. 84 | 85 | ### FIXED 86 | 87 | - Fixed an aliasing issue in `RingBuffer`'s mutable iterator, as uncovered by Miri. Behind the 88 | scenes, the full non-fuzzing unit test suite is now able to run on Miri without crashing it 89 | (after migrating the last Proptest tests away from the test suite into the fuzz targets), and 90 | this has been included in its CI build. (#6) 91 | 92 | ## [0.5.3] - 2020-03-11 93 | 94 | ### FIXED 95 | 96 | - Debug only assertions made it into the previous release by accident, and this change has been 97 | reverted. (#7) 98 | 99 | ## [0.5.2] - 2020-03-10 100 | 101 | ### ADDED 102 | 103 | - `Chunk` now has an `insert_from` method for inserting multiple values at an index in one go. 104 | - `Chunk` now also has an `insert_ordered` method for inserting values into a sorted chunk. 105 | - `SparseChunk` now has the methods `option_iter()`, `option_iter_mut()` and `option_drain()` with 106 | their corresponding iterators to iterate over a chunk as if it were an array of `Option`s. 107 | - [`Arbitrary`](https://docs.rs/arbitrary/latest/arbitrary/trait.Arbitrary.html) implementations 108 | for all data types have been added behind the `arbitrary` feature flag. 109 | 110 | ### FIXED 111 | 112 | - Internal consistency assertions are now only performed in debug mode (like with 113 | `debug_assert!`). This means `sized_chunks` will no longer cause panics in release mode when you 114 | do things like pushing to a full chunk, but do bad and undefined things instead. It also means a 115 | very slight performance gain. 116 | 117 | ## [0.5.1] - 2019-12-12 118 | 119 | ### ADDED 120 | 121 | - `PoolDefault` and `PoolClone` implementations, from the 122 | [`refpool`](https://crates.io/crates/refpool) crate, are available for `Chunk`, `SparseChunk` 123 | and `RingBuffer`, behind the `refpool` feature flag. 124 | 125 | ## [0.5.0] - 2019-09-09 126 | 127 | ### CHANGED 128 | 129 | - The `Bitmap` type (and its helper type, `Bits`) has been split off into a separate crate, named 130 | `bitmaps`. If you need it, it's in that crate now. `sized-chunks` does not re-export it. Of 131 | course, this means `sized-chunks` has gained `bitmaps` as its second hard dependency. 132 | 133 | ## [0.4.0] - 2019-09-02 134 | 135 | ### CHANGED 136 | 137 | - The 0.3.2 release increased the minimum rustc version required, which should have been a major 138 | version bump, so 0.3.2 is being yanked and re-tagged as 0.4.0. 139 | 140 | ## [0.3.2] - 2019-08-29 141 | 142 | ### ADDED 143 | 144 | - Chunk/bitmap sizes up to 1024 are now supported. 145 | 146 | ### FIXED 147 | 148 | - Replaced `ManuallyDrop` in implementations with `MaybeUninit`, along with a general unsafe code 149 | cleanup. (#3) 150 | 151 | ## [0.3.1] - 2019-08-03 152 | 153 | ### ADDED 154 | 155 | - Chunk sizes up to 256 are now supported. 156 | 157 | ## [0.3.0] - 2019-05-18 158 | 159 | ### ADDED 160 | 161 | - A new data structure, `InlineArray`, which is a stack allocated array matching the size of a 162 | given type, intended for optimising for the case of very small vectors. 163 | - `Chunk` has an implementation of `From` which is considerably faster than going via 164 | iterators. 165 | 166 | ## [0.2.2] - 2019-05-10 167 | 168 | ### ADDED 169 | 170 | - `Slice::get` methods now return references with the lifetime of the underlying `RingBuffer` 171 | rather than the lifetime of the slice. 172 | 173 | ## [0.2.1] - 2019-04-15 174 | 175 | ### ADDED 176 | 177 | - A lot of documentation. 178 | - `std::io::Read` implementations for `Chunk` and `RingBuffer` to match their `Write` 179 | implementations. 180 | 181 | ## [0.2.0] - 2019-04-14 182 | 183 | ### CHANGED 184 | 185 | - The `capacity()` method has been replacied with a `CAPACITY` const on each type. 186 | 187 | ### ADDED 188 | 189 | - There is now a `RingBuffer` implementation, which should be nearly a drop-in replacement for 190 | `SizedChunk` but is always O(1) on push and cannot be dereferenced to slices (but it has a set 191 | of custom slice-like implementations to make that less of a drawback). 192 | - The `Drain` iterator for `SizedChunk` now implements `DoubleEndedIterator`. 193 | 194 | ### FIXED 195 | 196 | - `SizedChunk::drain_from_front/back` will now always panic if the iterator underflows, instead of 197 | only doing it in debug mode. 198 | 199 | ## [0.1.3] - 2019-04-12 200 | 201 | ### ADDED 202 | 203 | - `SparseChunk` now has a default length of `U64`. 204 | - `Chunk` now has `PartialEq` defined for anything that can be borrowed as a slice. 205 | - `SparseChunk` likewise has `PartialEq` defined for `BTreeMap` and 206 | `HashMap`. These are intended for debugging and aren't optimally `efficient. 207 | - `Chunk` and `SparseChunk` now have a new method `capacity()` which returns its maximum capacity 208 | (the number in the type) as a usize. 209 | - Added an `entries()` method to `SparseChunk`. 210 | - `SparseChunk` now has a `Debug` implementation. 211 | 212 | ### FIXED 213 | 214 | - Extensive integration tests were added for `Chunk` and `SparseChunk`. 215 | - `Chunk::clear` is now very slightly faster. 216 | 217 | ## [0.1.2] - 2019-03-11 218 | 219 | ### FIXED 220 | 221 | - Fixed an alignment issue in `Chunk::drain_from_back`. (#1) 222 | 223 | ## [0.1.1] - 2019-02-19 224 | 225 | ### FIXED 226 | 227 | - Some 2018 edition issues. 228 | 229 | ## [0.1.0] - 2019-02-19 230 | 231 | Initial release. 232 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at admin@immutable.rs. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sized-chunks" 3 | version = "0.7.0" 4 | authors = ["Bodil Stokke "] 5 | edition = "2018" 6 | rust-version = "1.51.0" 7 | license = "MPL-2.0+" 8 | description = "Efficient sized chunk datatypes" 9 | repository = "https://github.com/bodil/sized-chunks" 10 | documentation = "http://docs.rs/sized-chunks" 11 | readme = "./README.md" 12 | categories = ["data-structures"] 13 | keywords = ["sparse-array"] 14 | exclude = ["release.toml", "proptest-regressions/**"] 15 | 16 | [package.metadata.docs.rs] 17 | all-features = true 18 | 19 | [features] 20 | default = ["std"] 21 | std = [] 22 | ringbuffer = ["array-ops"] 23 | 24 | [dependencies] 25 | bitmaps = { version = "3.1.0", default-features = false } 26 | array-ops = { version = "0.1.0", optional = true } 27 | refpool = { version = "0.4.3", optional = true } 28 | arbitrary = { version = "1.0.2", optional = true } 29 | -------------------------------------------------------------------------------- /LICENCE.md: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | ### 1. Definitions 5 | 6 | **1.1. “Contributor”** 7 | means each individual or legal entity that creates, contributes to 8 | the creation of, or owns Covered Software. 9 | 10 | **1.2. “Contributor Version”** 11 | means the combination of the Contributions of others (if any) used 12 | by a Contributor and that particular Contributor's Contribution. 13 | 14 | **1.3. “Contribution”** 15 | means Covered Software of a particular Contributor. 16 | 17 | **1.4. “Covered Software”** 18 | means Source Code Form to which the initial Contributor has attached 19 | the notice in Exhibit A, the Executable Form of such Source Code 20 | Form, and Modifications of such Source Code Form, in each case 21 | including portions thereof. 22 | 23 | **1.5. “Incompatible With Secondary Licenses”** 24 | means 25 | 26 | * **(a)** that the initial Contributor has attached the notice described 27 | in Exhibit B to the Covered Software; or 28 | * **(b)** that the Covered Software was made available under the terms of 29 | version 1.1 or earlier of the License, but not also under the 30 | terms of a Secondary License. 31 | 32 | **1.6. “Executable Form”** 33 | means any form of the work other than Source Code Form. 34 | 35 | **1.7. “Larger Work”** 36 | means a work that combines Covered Software with other material, in 37 | a separate file or files, that is not Covered Software. 38 | 39 | **1.8. “License”** 40 | means this document. 41 | 42 | **1.9. “Licensable”** 43 | means having the right to grant, to the maximum extent possible, 44 | whether at the time of the initial grant or subsequently, any and 45 | all of the rights conveyed by this License. 46 | 47 | **1.10. “Modifications”** 48 | means any of the following: 49 | 50 | * **(a)** any file in Source Code Form that results from an addition to, 51 | deletion from, or modification of the contents of Covered 52 | Software; or 53 | * **(b)** any new file in Source Code Form that contains any Covered 54 | Software. 55 | 56 | **1.11. “Patent Claims” of a Contributor** 57 | means any patent claim(s), including without limitation, method, 58 | process, and apparatus claims, in any patent Licensable by such 59 | Contributor that would be infringed, but for the grant of the 60 | License, by the making, using, selling, offering for sale, having 61 | made, import, or transfer of either its Contributions or its 62 | Contributor Version. 63 | 64 | **1.12. “Secondary License”** 65 | means either the GNU General Public License, Version 2.0, the GNU 66 | Lesser General Public License, Version 2.1, the GNU Affero General 67 | Public License, Version 3.0, or any later versions of those 68 | licenses. 69 | 70 | **1.13. “Source Code Form”** 71 | means the form of the work preferred for making modifications. 72 | 73 | **1.14. “You” (or “Your”)** 74 | means an individual or a legal entity exercising rights under this 75 | License. For legal entities, “You” includes any entity that 76 | controls, is controlled by, or is under common control with You. For 77 | purposes of this definition, “control” means **(a)** the power, direct 78 | or indirect, to cause the direction or management of such entity, 79 | whether by contract or otherwise, or **(b)** ownership of more than 80 | fifty percent (50%) of the outstanding shares or beneficial 81 | ownership of such entity. 82 | 83 | 84 | ### 2. License Grants and Conditions 85 | 86 | #### 2.1. Grants 87 | 88 | Each Contributor hereby grants You a world-wide, royalty-free, 89 | non-exclusive license: 90 | 91 | * **(a)** under intellectual property rights (other than patent or trademark) 92 | Licensable by such Contributor to use, reproduce, make available, 93 | modify, display, perform, distribute, and otherwise exploit its 94 | Contributions, either on an unmodified basis, with Modifications, or 95 | as part of a Larger Work; and 96 | * **(b)** under Patent Claims of such Contributor to make, use, sell, offer 97 | for sale, have made, import, and otherwise transfer either its 98 | Contributions or its Contributor Version. 99 | 100 | #### 2.2. Effective Date 101 | 102 | The licenses granted in Section 2.1 with respect to any Contribution 103 | become effective for each Contribution on the date the Contributor first 104 | distributes such Contribution. 105 | 106 | #### 2.3. Limitations on Grant Scope 107 | 108 | The licenses granted in this Section 2 are the only rights granted under 109 | this License. No additional rights or licenses will be implied from the 110 | distribution or licensing of Covered Software under this License. 111 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 112 | Contributor: 113 | 114 | * **(a)** for any code that a Contributor has removed from Covered Software; 115 | or 116 | * **(b)** for infringements caused by: **(i)** Your and any other third party's 117 | modifications of Covered Software, or **(ii)** the combination of its 118 | Contributions with other software (except as part of its Contributor 119 | Version); or 120 | * **(c)** under Patent Claims infringed by Covered Software in the absence of 121 | its Contributions. 122 | 123 | This License does not grant any rights in the trademarks, service marks, 124 | or logos of any Contributor (except as may be necessary to comply with 125 | the notice requirements in Section 3.4). 126 | 127 | #### 2.4. Subsequent Licenses 128 | 129 | No Contributor makes additional grants as a result of Your choice to 130 | distribute the Covered Software under a subsequent version of this 131 | License (see Section 10.2) or under the terms of a Secondary License (if 132 | permitted under the terms of Section 3.3). 133 | 134 | #### 2.5. Representation 135 | 136 | Each Contributor represents that the Contributor believes its 137 | Contributions are its original creation(s) or it has sufficient rights 138 | to grant the rights to its Contributions conveyed by this License. 139 | 140 | #### 2.6. Fair Use 141 | 142 | This License is not intended to limit any rights You have under 143 | applicable copyright doctrines of fair use, fair dealing, or other 144 | equivalents. 145 | 146 | #### 2.7. Conditions 147 | 148 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 149 | in Section 2.1. 150 | 151 | 152 | ### 3. Responsibilities 153 | 154 | #### 3.1. Distribution of Source Form 155 | 156 | All distribution of Covered Software in Source Code Form, including any 157 | Modifications that You create or to which You contribute, must be under 158 | the terms of this License. You must inform recipients that the Source 159 | Code Form of the Covered Software is governed by the terms of this 160 | License, and how they can obtain a copy of this License. You may not 161 | attempt to alter or restrict the recipients' rights in the Source Code 162 | Form. 163 | 164 | #### 3.2. Distribution of Executable Form 165 | 166 | If You distribute Covered Software in Executable Form then: 167 | 168 | * **(a)** such Covered Software must also be made available in Source Code 169 | Form, as described in Section 3.1, and You must inform recipients of 170 | the Executable Form how they can obtain a copy of such Source Code 171 | Form by reasonable means in a timely manner, at a charge no more 172 | than the cost of distribution to the recipient; and 173 | 174 | * **(b)** You may distribute such Executable Form under the terms of this 175 | License, or sublicense it under different terms, provided that the 176 | license for the Executable Form does not attempt to limit or alter 177 | the recipients' rights in the Source Code Form under this License. 178 | 179 | #### 3.3. Distribution of a Larger Work 180 | 181 | You may create and distribute a Larger Work under terms of Your choice, 182 | provided that You also comply with the requirements of this License for 183 | the Covered Software. If the Larger Work is a combination of Covered 184 | Software with a work governed by one or more Secondary Licenses, and the 185 | Covered Software is not Incompatible With Secondary Licenses, this 186 | License permits You to additionally distribute such Covered Software 187 | under the terms of such Secondary License(s), so that the recipient of 188 | the Larger Work may, at their option, further distribute the Covered 189 | Software under the terms of either this License or such Secondary 190 | License(s). 191 | 192 | #### 3.4. Notices 193 | 194 | You may not remove or alter the substance of any license notices 195 | (including copyright notices, patent notices, disclaimers of warranty, 196 | or limitations of liability) contained within the Source Code Form of 197 | the Covered Software, except that You may alter any license notices to 198 | the extent required to remedy known factual inaccuracies. 199 | 200 | #### 3.5. Application of Additional Terms 201 | 202 | You may choose to offer, and to charge a fee for, warranty, support, 203 | indemnity or liability obligations to one or more recipients of Covered 204 | Software. However, You may do so only on Your own behalf, and not on 205 | behalf of any Contributor. You must make it absolutely clear that any 206 | such warranty, support, indemnity, or liability obligation is offered by 207 | You alone, and You hereby agree to indemnify every Contributor for any 208 | liability incurred by such Contributor as a result of warranty, support, 209 | indemnity or liability terms You offer. You may include additional 210 | disclaimers of warranty and limitations of liability specific to any 211 | jurisdiction. 212 | 213 | 214 | ### 4. Inability to Comply Due to Statute or Regulation 215 | 216 | If it is impossible for You to comply with any of the terms of this 217 | License with respect to some or all of the Covered Software due to 218 | statute, judicial order, or regulation then You must: **(a)** comply with 219 | the terms of this License to the maximum extent possible; and **(b)** 220 | describe the limitations and the code they affect. Such description must 221 | be placed in a text file included with all distributions of the Covered 222 | Software under this License. Except to the extent prohibited by statute 223 | or regulation, such description must be sufficiently detailed for a 224 | recipient of ordinary skill to be able to understand it. 225 | 226 | 227 | ### 5. Termination 228 | 229 | **5.1.** The rights granted under this License will terminate automatically 230 | if You fail to comply with any of its terms. However, if You become 231 | compliant, then the rights granted under this License from a particular 232 | Contributor are reinstated **(a)** provisionally, unless and until such 233 | Contributor explicitly and finally terminates Your grants, and **(b)** on an 234 | ongoing basis, if such Contributor fails to notify You of the 235 | non-compliance by some reasonable means prior to 60 days after You have 236 | come back into compliance. Moreover, Your grants from a particular 237 | Contributor are reinstated on an ongoing basis if such Contributor 238 | notifies You of the non-compliance by some reasonable means, this is the 239 | first time You have received notice of non-compliance with this License 240 | from such Contributor, and You become compliant prior to 30 days after 241 | Your receipt of the notice. 242 | 243 | **5.2.** If You initiate litigation against any entity by asserting a patent 244 | infringement claim (excluding declaratory judgment actions, 245 | counter-claims, and cross-claims) alleging that a Contributor Version 246 | directly or indirectly infringes any patent, then the rights granted to 247 | You by any and all Contributors for the Covered Software under Section 248 | 2.1 of this License shall terminate. 249 | 250 | **5.3.** In the event of termination under Sections 5.1 or 5.2 above, all 251 | end user license agreements (excluding distributors and resellers) which 252 | have been validly granted by You or Your distributors under this License 253 | prior to termination shall survive termination. 254 | 255 | 256 | ### 6. Disclaimer of Warranty 257 | 258 | > Covered Software is provided under this License on an “as is” 259 | > basis, without warranty of any kind, either expressed, implied, or 260 | > statutory, including, without limitation, warranties that the 261 | > Covered Software is free of defects, merchantable, fit for a 262 | > particular purpose or non-infringing. The entire risk as to the 263 | > quality and performance of the Covered Software is with You. 264 | > Should any Covered Software prove defective in any respect, You 265 | > (not any Contributor) assume the cost of any necessary servicing, 266 | > repair, or correction. This disclaimer of warranty constitutes an 267 | > essential part of this License. No use of any Covered Software is 268 | > authorized under this License except under this disclaimer. 269 | 270 | ### 7. Limitation of Liability 271 | 272 | > Under no circumstances and under no legal theory, whether tort 273 | > (including negligence), contract, or otherwise, shall any 274 | > Contributor, or anyone who distributes Covered Software as 275 | > permitted above, be liable to You for any direct, indirect, 276 | > special, incidental, or consequential damages of any character 277 | > including, without limitation, damages for lost profits, loss of 278 | > goodwill, work stoppage, computer failure or malfunction, or any 279 | > and all other commercial damages or losses, even if such party 280 | > shall have been informed of the possibility of such damages. This 281 | > limitation of liability shall not apply to liability for death or 282 | > personal injury resulting from such party's negligence to the 283 | > extent applicable law prohibits such limitation. Some 284 | > jurisdictions do not allow the exclusion or limitation of 285 | > incidental or consequential damages, so this exclusion and 286 | > limitation may not apply to You. 287 | 288 | 289 | ### 8. Litigation 290 | 291 | Any litigation relating to this License may be brought only in the 292 | courts of a jurisdiction where the defendant maintains its principal 293 | place of business and such litigation shall be governed by laws of that 294 | jurisdiction, without reference to its conflict-of-law provisions. 295 | Nothing in this Section shall prevent a party's ability to bring 296 | cross-claims or counter-claims. 297 | 298 | 299 | ### 9. Miscellaneous 300 | 301 | This License represents the complete agreement concerning the subject 302 | matter hereof. If any provision of this License is held to be 303 | unenforceable, such provision shall be reformed only to the extent 304 | necessary to make it enforceable. Any law or regulation which provides 305 | that the language of a contract shall be construed against the drafter 306 | shall not be used to construe this License against a Contributor. 307 | 308 | 309 | ### 10. Versions of the License 310 | 311 | #### 10.1. New Versions 312 | 313 | Mozilla Foundation is the license steward. Except as provided in Section 314 | 10.3, no one other than the license steward has the right to modify or 315 | publish new versions of this License. Each version will be given a 316 | distinguishing version number. 317 | 318 | #### 10.2. Effect of New Versions 319 | 320 | You may distribute the Covered Software under the terms of the version 321 | of the License under which You originally received the Covered Software, 322 | or under the terms of any subsequent version published by the license 323 | steward. 324 | 325 | #### 10.3. Modified Versions 326 | 327 | If you create software not governed by this License, and you want to 328 | create a new license for such software, you may create and use a 329 | modified version of this License if you rename the license and remove 330 | any references to the name of the license steward (except to note that 331 | such modified license differs from this License). 332 | 333 | #### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses 334 | 335 | If You choose to distribute Source Code Form that is Incompatible With 336 | Secondary Licenses under the terms of this version of the License, the 337 | notice described in Exhibit B of this License must be attached. 338 | 339 | ## Exhibit A - Source Code Form License Notice 340 | 341 | This Source Code Form is subject to the terms of the Mozilla Public 342 | License, v. 2.0. If a copy of the MPL was not distributed with this 343 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 344 | 345 | If it is not possible or desirable to put the notice in a particular 346 | file, then You may include the notice in a location (such as a LICENSE 347 | file in a relevant directory) where a recipient would be likely to look 348 | for such a notice. 349 | 350 | You may add additional accurate notices of copyright ownership. 351 | 352 | ## Exhibit B - “Incompatible With Secondary Licenses” Notice 353 | 354 | This Source Code Form is "Incompatible With Secondary Licenses", as 355 | defined by the Mozilla Public License, v. 2.0. 356 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sized-chunks 2 | 3 | Various fixed length array data types, designed for [immutable.rs]. 4 | 5 | ## Overview 6 | 7 | This crate provides the core building blocks for the immutable data structures 8 | in [immutable.rs]: a sized array with O(1) amortised double ended push/pop and 9 | smarter insert/remove performance (used by `im::Vector` and `im::OrdMap`), and a 10 | fixed size sparse array (used by `im::HashMap`). 11 | 12 | In a nutshell, this crate contains the unsafe bits from [immutable.rs], which 13 | may or may not be useful to anyone else, and have been split out for ease of 14 | auditing. 15 | 16 | ## Documentation 17 | 18 | * [API docs](https://docs.rs/sized-chunks) 19 | 20 | ## Licence 21 | 22 | Copyright 2019 Bodil Stokke 23 | 24 | This software is subject to the terms of the Mozilla Public 25 | License, v. 2.0. If a copy of the MPL was not distributed with this 26 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 27 | 28 | ## Code of Conduct 29 | 30 | Please note that this project is released with a [Contributor Code of 31 | Conduct][coc]. By participating in this project you agree to abide by its 32 | terms. 33 | 34 | [immutable.rs]: https://immutable.rs/ 35 | [coc]: https://github.com/bodil/sized-chunks/blob/master/CODE_OF_CONDUCT.md 36 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | target 3 | corpus 4 | artifacts 5 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sized-chunks-fuzz" 3 | version = "0.0.0" 4 | authors = ["Automatically generated"] 5 | publish = false 6 | edition = "2018" 7 | 8 | [package.metadata] 9 | cargo-fuzz = true 10 | 11 | [dependencies] 12 | array-ops = "0.1.0" 13 | libfuzzer-sys = "0.4.0" 14 | arbitrary = { version = "1.0.0", features = ["derive"] } 15 | 16 | [dependencies.sized-chunks] 17 | path = ".." 18 | features = ["arbitrary", "ringbuffer"] 19 | 20 | # Prevent this from interfering with workspaces 21 | [workspace] 22 | members = ["."] 23 | 24 | [[bin]] 25 | name = "sized_chunk" 26 | path = "fuzz_targets/sized_chunk.rs" 27 | 28 | [[bin]] 29 | name = "sparse_chunk" 30 | path = "fuzz_targets/sparse_chunk.rs" 31 | 32 | [[bin]] 33 | name = "inline_array" 34 | path = "fuzz_targets/inline_array.rs" 35 | 36 | [[bin]] 37 | name = "ring_buffer" 38 | path = "fuzz_targets/ring_buffer.rs" 39 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/assert.rs: -------------------------------------------------------------------------------- 1 | use std::panic::{catch_unwind, set_hook, take_hook, AssertUnwindSafe}; 2 | 3 | pub fn assert_panic(f: F) 4 | where 5 | F: FnOnce() -> A, 6 | { 7 | let old_hook = take_hook(); 8 | set_hook(Box::new(|_| {})); 9 | let result = catch_unwind(AssertUnwindSafe(f)); 10 | set_hook(old_hook); 11 | assert!( 12 | result.is_err(), 13 | "action that should have panicked didn't panic" 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/inline_array.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use std::fmt::Debug; 4 | 5 | use arbitrary::Arbitrary; 6 | use libfuzzer_sys::fuzz_target; 7 | 8 | use sized_chunks::InlineArray; 9 | 10 | mod assert; 11 | use assert::assert_panic; 12 | 13 | type TestType = [usize; 16]; 14 | 15 | #[derive(Arbitrary, Debug)] 16 | enum Action { 17 | Push(A), 18 | Pop, 19 | Set((usize, A)), 20 | Insert(usize, A), 21 | Remove(usize), 22 | SplitOff(usize), 23 | Drain, 24 | Clear, 25 | } 26 | 27 | fuzz_target!(|actions: Vec>| { 28 | let capacity = InlineArray::::CAPACITY; 29 | let mut chunk = InlineArray::::new(); 30 | let mut guide: Vec<_> = chunk.iter().cloned().collect(); 31 | for action in actions { 32 | match action { 33 | Action::Push(value) => { 34 | if chunk.is_full() { 35 | assert_panic(|| chunk.push(value)); 36 | } else { 37 | chunk.push(value); 38 | guide.push(value); 39 | } 40 | } 41 | Action::Pop => { 42 | assert_eq!(chunk.pop(), guide.pop()); 43 | } 44 | Action::Set((index, value)) => { 45 | if index >= chunk.len() { 46 | assert_panic(|| chunk[index] = value); 47 | } else { 48 | chunk[index] = value; 49 | guide[index] = value; 50 | } 51 | } 52 | Action::Insert(index, value) => { 53 | if index > chunk.len() || chunk.is_full() { 54 | assert_panic(|| chunk.insert(index, value)); 55 | } else { 56 | chunk.insert(index, value); 57 | guide.insert(index, value); 58 | } 59 | } 60 | Action::Remove(index) => { 61 | if index >= chunk.len() { 62 | assert_eq!(None, chunk.remove(index)); 63 | } else { 64 | assert_eq!(chunk.remove(index), Some(guide.remove(index))); 65 | } 66 | } 67 | Action::SplitOff(index) => { 68 | if index > chunk.len() { 69 | assert_panic(|| chunk.split_off(index)); 70 | } else { 71 | let chunk_off = chunk.split_off(index); 72 | let guide_off = guide.split_off(index); 73 | assert_eq!(chunk_off, guide_off); 74 | } 75 | } 76 | Action::Drain => { 77 | let drained: Vec<_> = chunk.drain().collect(); 78 | let drained_guide: Vec<_> = guide.drain(..).collect(); 79 | assert_eq!(drained, drained_guide); 80 | } 81 | Action::Clear => { 82 | chunk.clear(); 83 | guide.clear(); 84 | } 85 | } 86 | assert_eq!(chunk, guide); 87 | assert!(guide.len() <= capacity); 88 | } 89 | }); 90 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/ring_buffer.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #![feature(is_sorted)] 3 | 4 | use std::fmt::Debug; 5 | use std::iter::FromIterator; 6 | 7 | use arbitrary::Arbitrary; 8 | use array_ops::{ArrayMut, HasLength}; 9 | use libfuzzer_sys::fuzz_target; 10 | 11 | use sized_chunks::RingBuffer; 12 | 13 | mod assert; 14 | use assert::assert_panic; 15 | 16 | #[derive(Arbitrary, Debug)] 17 | enum Construct { 18 | Empty, 19 | Single(A), 20 | Pair((A, A)), 21 | DrainFrom(RingBuffer), 22 | CollectFrom(RingBuffer, usize), 23 | FromFront(RingBuffer, usize), 24 | FromBack(RingBuffer, usize), 25 | FromIter(RingBuffer), 26 | } 27 | 28 | #[derive(Arbitrary, Debug)] 29 | enum Action { 30 | PushFront(A), 31 | PushBack(A), 32 | PopFront, 33 | PopBack, 34 | DropLeft(usize), 35 | DropRight(usize), 36 | SplitOff(usize), 37 | Append(Construct), 38 | DrainFromFront(Construct, usize), 39 | DrainFromBack(Construct, usize), 40 | Set(usize, A), 41 | Insert(usize, A), 42 | InsertFrom(Vec, usize), 43 | InsertOrdered(A), 44 | Remove(usize), 45 | Drain, 46 | Clear, 47 | } 48 | 49 | impl Construct 50 | where 51 | A: Arbitrary<'static> + Clone + Debug + Eq, 52 | { 53 | fn make(self) -> RingBuffer { 54 | match self { 55 | Construct::Empty => { 56 | let out = RingBuffer::new(); 57 | assert!(out.is_empty()); 58 | out 59 | } 60 | Construct::Single(value) => { 61 | let out = RingBuffer::unit(value.clone()); 62 | assert_eq!(out, vec![value]); 63 | out 64 | } 65 | Construct::Pair((left, right)) => { 66 | let out = RingBuffer::pair(left.clone(), right.clone()); 67 | assert_eq!(out, vec![left, right]); 68 | out 69 | } 70 | Construct::DrainFrom(vec) => { 71 | let mut source = RingBuffer::from_iter(vec.iter().cloned()); 72 | let out = RingBuffer::drain_from(&mut source); 73 | assert!(source.is_empty()); 74 | assert_eq!(out, vec); 75 | out 76 | } 77 | Construct::CollectFrom(mut vec, len) => { 78 | if vec.is_empty() { 79 | return RingBuffer::new(); 80 | } 81 | let len = len % vec.len(); 82 | let mut source = vec.clone().into_iter(); 83 | let out = RingBuffer::collect_from(&mut source, len); 84 | let expected_remainder = vec.split_off(len); 85 | let remainder: Vec<_> = source.collect(); 86 | assert_eq!(expected_remainder, remainder); 87 | assert_eq!(out, vec); 88 | out 89 | } 90 | Construct::FromFront(mut vec, len) => { 91 | if vec.is_empty() { 92 | return RingBuffer::new(); 93 | } 94 | let len = len % vec.len(); 95 | let mut source = RingBuffer::from_iter(vec.iter().cloned()); 96 | let out = RingBuffer::from_front(&mut source, len); 97 | let remainder = vec.split_off(len); 98 | assert_eq!(source, remainder); 99 | assert_eq!(out, vec); 100 | out 101 | } 102 | Construct::FromBack(mut vec, len) => { 103 | if vec.is_empty() { 104 | return RingBuffer::new(); 105 | } 106 | let len = len % vec.len(); 107 | let mut source = RingBuffer::from_iter(vec.iter().cloned()); 108 | let out = RingBuffer::from_back(&mut source, len); 109 | let remainder = vec.split_off(vec.len() - len); 110 | assert_eq!(out, remainder); 111 | assert_eq!(source, vec); 112 | out 113 | } 114 | Construct::FromIter(vec) => { 115 | let out = vec.clone().into_iter().collect(); 116 | assert_eq!(out, vec); 117 | out 118 | } 119 | } 120 | } 121 | } 122 | 123 | fuzz_target!(|input: (Construct, Vec>)| { 124 | let (cons, actions) = input; 125 | let capacity = RingBuffer::::CAPACITY; 126 | let mut chunk = cons.make(); 127 | let mut guide: Vec<_> = chunk.iter().cloned().collect(); 128 | for action in actions { 129 | match action { 130 | Action::PushFront(value) => { 131 | if chunk.is_full() { 132 | assert_panic(|| chunk.push_front(value)); 133 | } else { 134 | chunk.push_front(value); 135 | guide.insert(0, value); 136 | } 137 | } 138 | Action::PushBack(value) => { 139 | if chunk.is_full() { 140 | assert_panic(|| chunk.push_back(value)); 141 | } else { 142 | chunk.push_back(value); 143 | guide.push(value); 144 | } 145 | } 146 | Action::PopFront => { 147 | assert_eq!( 148 | chunk.pop_front(), 149 | if guide.is_empty() { 150 | None 151 | } else { 152 | Some(guide.remove(0)) 153 | } 154 | ); 155 | } 156 | Action::PopBack => { 157 | assert_eq!(chunk.pop_back(), guide.pop()); 158 | } 159 | Action::DropLeft(index) => { 160 | if index > chunk.len() { 161 | assert_panic(|| chunk.drop_left(index)); 162 | } else { 163 | chunk.drop_left(index); 164 | guide.drain(..index); 165 | } 166 | } 167 | Action::DropRight(index) => { 168 | if index > chunk.len() { 169 | assert_panic(|| chunk.drop_right(index)); 170 | } else { 171 | chunk.drop_right(index); 172 | guide.drain(index..); 173 | } 174 | } 175 | Action::SplitOff(index) => { 176 | if index > chunk.len() { 177 | assert_panic(|| chunk.split_off(index)); 178 | } else { 179 | let chunk_off = chunk.split_off(index); 180 | let guide_off = guide.split_off(index); 181 | assert_eq!(chunk_off, guide_off); 182 | } 183 | } 184 | Action::Append(other) => { 185 | let mut other = other.make(); 186 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 187 | if other.len() + chunk.len() > capacity { 188 | assert_panic(|| chunk.append(&mut other)); 189 | } else { 190 | chunk.append(&mut other); 191 | guide.append(&mut other_guide); 192 | } 193 | } 194 | Action::DrainFromFront(other, count) => { 195 | let mut other = other.make(); 196 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 197 | if count > other.len() || chunk.len() + count > capacity { 198 | assert_panic(|| chunk.drain_from_front(&mut other, count)); 199 | } else { 200 | chunk.drain_from_front(&mut other, count); 201 | guide.extend(other_guide.drain(..count)); 202 | assert_eq!(other, other_guide); 203 | } 204 | } 205 | Action::DrainFromBack(other, count) => { 206 | let mut other = other.make(); 207 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 208 | if count > other.len() || chunk.len() + count > capacity { 209 | assert_panic(|| chunk.drain_from_back(&mut other, count)); 210 | } else { 211 | let other_index = other.len() - count; 212 | chunk.drain_from_back(&mut other, count); 213 | guide = other_guide 214 | .drain(other_index..) 215 | .chain(guide.into_iter()) 216 | .collect(); 217 | assert_eq!(other, other_guide); 218 | } 219 | } 220 | Action::Set(index, value) => { 221 | if index >= chunk.len() { 222 | assert_eq!(None, chunk.set(index, value)); 223 | } else { 224 | chunk.set(index, value); 225 | guide[index] = value; 226 | } 227 | } 228 | Action::Insert(index, value) => { 229 | if index > chunk.len() || chunk.is_full() { 230 | assert_panic(|| chunk.insert(index, value)); 231 | } else { 232 | chunk.insert(index, value); 233 | guide.insert(index, value); 234 | } 235 | } 236 | Action::InsertFrom(values, index) => { 237 | if index > chunk.len() || chunk.len() + values.len() > capacity { 238 | assert_panic(|| chunk.insert_from(index, values)); 239 | } else { 240 | chunk.insert_from(index, values.clone()); 241 | for value in values.into_iter().rev() { 242 | guide.insert(index, value); 243 | } 244 | } 245 | } 246 | Action::InsertOrdered(value) => { 247 | if chunk.iter().is_sorted() { 248 | if chunk.is_full() { 249 | assert_panic(|| chunk.insert_ordered(value)); 250 | } else { 251 | chunk.insert_ordered(value); 252 | match guide.binary_search(&value) { 253 | Ok(index) => guide.insert(index, value), 254 | Err(index) => guide.insert(index, value), 255 | } 256 | } 257 | } 258 | } 259 | Action::Remove(index) => { 260 | if index >= chunk.len() { 261 | assert_panic(|| chunk.remove(index)); 262 | } else { 263 | assert_eq!(chunk.remove(index), guide.remove(index)); 264 | } 265 | } 266 | Action::Drain => { 267 | let drained: Vec<_> = chunk.drain().collect(); 268 | let drained_guide: Vec<_> = guide.drain(..).collect(); 269 | assert_eq!(drained, drained_guide); 270 | } 271 | Action::Clear => { 272 | chunk.clear(); 273 | guide.clear(); 274 | } 275 | } 276 | assert_eq!(chunk, guide); 277 | assert!(guide.len() <= capacity); 278 | } 279 | }); 280 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/sized_chunk.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use std::fmt::Debug; 4 | use std::iter::FromIterator; 5 | 6 | use arbitrary::Arbitrary; 7 | use libfuzzer_sys::fuzz_target; 8 | 9 | use sized_chunks::Chunk; 10 | 11 | mod assert; 12 | use assert::assert_panic; 13 | 14 | #[derive(Arbitrary, Debug)] 15 | enum Construct { 16 | Empty, 17 | Single(A), 18 | Pair((A, A)), 19 | DrainFrom(Chunk), 20 | CollectFrom(Chunk, usize), 21 | FromFront(Chunk, usize), 22 | FromBack(Chunk, usize), 23 | } 24 | 25 | #[derive(Arbitrary, Debug)] 26 | enum Action { 27 | PushFront(A), 28 | PushBack(A), 29 | PopFront, 30 | PopBack, 31 | DropLeft(usize), 32 | DropRight(usize), 33 | SplitOff(usize), 34 | Append(Construct), 35 | DrainFromFront(Construct, usize), 36 | DrainFromBack(Construct, usize), 37 | Set(usize, A), 38 | Insert(usize, A), 39 | InsertFrom(Vec, usize), 40 | InsertOrdered(A), 41 | Remove(usize), 42 | Drain, 43 | Clear, 44 | } 45 | 46 | impl Construct 47 | where 48 | A: Arbitrary<'static> + Clone + Debug + Eq, 49 | { 50 | fn make(self) -> Chunk { 51 | match self { 52 | Construct::Empty => { 53 | let out = Chunk::new(); 54 | assert!(out.is_empty()); 55 | out 56 | } 57 | Construct::Single(value) => { 58 | let out = Chunk::unit(value.clone()); 59 | assert_eq!(out, vec![value]); 60 | out 61 | } 62 | Construct::Pair((left, right)) => { 63 | let out = Chunk::pair(left.clone(), right.clone()); 64 | assert_eq!(out, vec![left, right]); 65 | out 66 | } 67 | Construct::DrainFrom(vec) => { 68 | let mut source = Chunk::from_iter(vec.iter().cloned()); 69 | let out = Chunk::drain_from(&mut source); 70 | assert!(source.is_empty()); 71 | assert_eq!(out, vec); 72 | out 73 | } 74 | Construct::CollectFrom(mut vec, len) => { 75 | if vec.is_empty() { 76 | return Chunk::new(); 77 | } 78 | let len = len % vec.len(); 79 | let mut source = vec.clone().into_iter(); 80 | let out = Chunk::collect_from(&mut source, len); 81 | let expected_remainder = vec.split_off(len); 82 | let remainder: Vec<_> = source.collect(); 83 | assert_eq!(expected_remainder, remainder); 84 | assert_eq!(out, vec); 85 | out 86 | } 87 | Construct::FromFront(mut vec, len) => { 88 | if vec.is_empty() { 89 | return Chunk::new(); 90 | } 91 | let len = len % vec.len(); 92 | let mut source = Chunk::from_iter(vec.iter().cloned()); 93 | let out = Chunk::from_front(&mut source, len); 94 | let remainder = vec.split_off(len); 95 | assert_eq!(source, remainder); 96 | assert_eq!(out, vec); 97 | out 98 | } 99 | Construct::FromBack(mut vec, len) => { 100 | if vec.is_empty() { 101 | return Chunk::new(); 102 | } 103 | let len = len % vec.len(); 104 | let mut source = Chunk::from_iter(vec.iter().cloned()); 105 | let out = Chunk::from_back(&mut source, len); 106 | let remainder = vec.split_off(vec.len() - len); 107 | assert_eq!(out, remainder); 108 | assert_eq!(source, vec); 109 | out 110 | } 111 | } 112 | } 113 | } 114 | 115 | fuzz_target!(|input: (Construct, Vec>)| { 116 | let (cons, actions) = input; 117 | let capacity = Chunk::::CAPACITY; 118 | let mut chunk = cons.make(); 119 | let mut guide: Vec<_> = chunk.iter().cloned().collect(); 120 | for action in actions { 121 | match action { 122 | Action::PushFront(value) => { 123 | if chunk.is_full() { 124 | assert_panic(|| chunk.push_front(value)); 125 | } else { 126 | chunk.push_front(value); 127 | guide.insert(0, value); 128 | } 129 | } 130 | Action::PushBack(value) => { 131 | if chunk.is_full() { 132 | assert_panic(|| chunk.push_back(value)); 133 | } else { 134 | chunk.push_back(value); 135 | guide.push(value); 136 | } 137 | } 138 | Action::PopFront => { 139 | if chunk.is_empty() { 140 | assert_panic(|| chunk.pop_front()); 141 | } else { 142 | assert_eq!(chunk.pop_front(), guide.remove(0)); 143 | } 144 | } 145 | Action::PopBack => { 146 | if chunk.is_empty() { 147 | assert_panic(|| chunk.pop_back()); 148 | } else { 149 | assert_eq!(chunk.pop_back(), guide.pop().unwrap()); 150 | } 151 | } 152 | Action::DropLeft(index) => { 153 | if index > chunk.len() { 154 | assert_panic(|| chunk.drop_left(index)); 155 | } else { 156 | chunk.drop_left(index); 157 | guide.drain(..index); 158 | } 159 | } 160 | Action::DropRight(index) => { 161 | if index > chunk.len() { 162 | assert_panic(|| chunk.drop_right(index)); 163 | } else { 164 | chunk.drop_right(index); 165 | guide.drain(index..); 166 | } 167 | } 168 | Action::SplitOff(index) => { 169 | if index > chunk.len() { 170 | assert_panic(|| chunk.split_off(index)); 171 | } else { 172 | let chunk_off = chunk.split_off(index); 173 | let guide_off = guide.split_off(index); 174 | assert_eq!(chunk_off, guide_off); 175 | } 176 | } 177 | Action::Append(other) => { 178 | let mut other = other.make(); 179 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 180 | if other.len() + chunk.len() > capacity { 181 | assert_panic(|| chunk.append(&mut other)); 182 | } else { 183 | chunk.append(&mut other); 184 | guide.append(&mut other_guide); 185 | } 186 | } 187 | Action::DrainFromFront(other, count) => { 188 | let mut other = other.make(); 189 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 190 | if count > other.len() || chunk.len() + count > capacity { 191 | assert_panic(|| chunk.drain_from_front(&mut other, count)); 192 | } else { 193 | chunk.drain_from_front(&mut other, count); 194 | guide.extend(other_guide.drain(..count)); 195 | assert_eq!(other, other_guide); 196 | } 197 | } 198 | Action::DrainFromBack(other, count) => { 199 | let mut other = other.make(); 200 | let mut other_guide: Vec<_> = other.iter().cloned().collect(); 201 | if count > other.len() || chunk.len() + count > capacity { 202 | assert_panic(|| chunk.drain_from_back(&mut other, count)); 203 | } else { 204 | let other_index = other.len() - count; 205 | chunk.drain_from_back(&mut other, count); 206 | guide = other_guide 207 | .drain(other_index..) 208 | .chain(guide.into_iter()) 209 | .collect(); 210 | assert_eq!(other, other_guide); 211 | } 212 | } 213 | Action::Set(index, value) => { 214 | if index >= chunk.len() { 215 | assert_panic(|| chunk.set(index, value)); 216 | } else { 217 | chunk.set(index, value); 218 | guide[index] = value; 219 | } 220 | } 221 | Action::Insert(index, value) => { 222 | if index > chunk.len() || chunk.is_full() { 223 | assert_panic(|| chunk.insert(index, value)); 224 | } else { 225 | chunk.insert(index, value); 226 | guide.insert(index, value); 227 | } 228 | } 229 | Action::InsertFrom(values, index) => { 230 | if index > chunk.len() || chunk.len() + values.len() > capacity { 231 | assert_panic(|| chunk.insert_from(index, values)); 232 | } else { 233 | chunk.insert_from(index, values.clone()); 234 | for value in values.into_iter().rev() { 235 | guide.insert(index, value); 236 | } 237 | } 238 | } 239 | Action::InsertOrdered(value) => { 240 | if chunk.is_full() { 241 | assert_panic(|| chunk.insert_ordered(value)); 242 | } else { 243 | chunk.insert_ordered(value); 244 | match guide.binary_search(&value) { 245 | Ok(index) => guide.insert(index, value), 246 | Err(index) => guide.insert(index, value), 247 | } 248 | } 249 | } 250 | Action::Remove(index) => { 251 | if index >= chunk.len() { 252 | assert_panic(|| chunk.remove(index)); 253 | } else { 254 | assert_eq!(chunk.remove(index), guide.remove(index)); 255 | } 256 | } 257 | Action::Drain => { 258 | let drained: Vec<_> = chunk.drain().collect(); 259 | let drained_guide: Vec<_> = guide.drain(..).collect(); 260 | assert_eq!(drained, drained_guide); 261 | } 262 | Action::Clear => { 263 | chunk.clear(); 264 | guide.clear(); 265 | } 266 | } 267 | assert_eq!(chunk, guide); 268 | assert!(guide.len() <= capacity); 269 | } 270 | }); 271 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/sparse_chunk.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | 3 | use std::collections::BTreeMap; 4 | use std::fmt::Debug; 5 | 6 | use arbitrary::Arbitrary; 7 | use libfuzzer_sys::fuzz_target; 8 | 9 | use sized_chunks::SparseChunk; 10 | 11 | mod assert; 12 | use assert::assert_panic; 13 | 14 | #[derive(Arbitrary, Debug)] 15 | enum Construct { 16 | Empty, 17 | Single((usize, A)), 18 | Pair((usize, A, usize, A)), 19 | } 20 | 21 | #[derive(Arbitrary, Debug)] 22 | enum Action { 23 | Insert(usize, A), 24 | Remove(usize), 25 | Pop, 26 | } 27 | 28 | impl Construct 29 | where 30 | A: Arbitrary<'static> + Clone + Debug + Eq, 31 | { 32 | fn make(self) -> SparseChunk { 33 | match self { 34 | Construct::Empty => { 35 | let out = SparseChunk::new(); 36 | assert!(out.is_empty()); 37 | out 38 | } 39 | Construct::Single((index, value)) => { 40 | let index = index % SparseChunk::::CAPACITY; 41 | let out = SparseChunk::unit(index, value.clone()); 42 | let mut guide = BTreeMap::new(); 43 | guide.insert(index, value); 44 | assert_eq!(out, guide); 45 | out 46 | } 47 | Construct::Pair((left_index, left, right_index, right)) => { 48 | let left_index = left_index % SparseChunk::::CAPACITY; 49 | let right_index = right_index % SparseChunk::::CAPACITY; 50 | let out = SparseChunk::pair(left_index, left.clone(), right_index, right.clone()); 51 | let mut guide = BTreeMap::new(); 52 | guide.insert(left_index, left); 53 | guide.insert(right_index, right); 54 | assert_eq!(out, guide); 55 | out 56 | } 57 | } 58 | } 59 | } 60 | 61 | fuzz_target!(|input: (Construct, Vec>)| { 62 | let (cons, actions) = input; 63 | let capacity = SparseChunk::::CAPACITY; 64 | let mut chunk = cons.make(); 65 | let mut guide: BTreeMap<_, _> = chunk.entries().map(|(i, v)| (i, *v)).collect(); 66 | for action in actions { 67 | match action { 68 | Action::Insert(index, value) => { 69 | if index >= capacity { 70 | assert_panic(|| chunk.insert(index, value)); 71 | } else { 72 | assert_eq!(chunk.insert(index, value), guide.insert(index, value)); 73 | } 74 | } 75 | Action::Remove(index) => { 76 | if index >= capacity { 77 | assert_panic(|| chunk.remove(index)); 78 | } else { 79 | assert_eq!(chunk.remove(index), guide.remove(&index)); 80 | } 81 | } 82 | Action::Pop => { 83 | if let Some(index) = chunk.first_index() { 84 | assert_eq!(chunk.pop(), guide.remove(&index)); 85 | } else { 86 | assert_eq!(chunk.pop(), None); 87 | } 88 | } 89 | } 90 | assert_eq!(chunk, guide); 91 | assert!(guide.len() <= SparseChunk::::CAPACITY); 92 | } 93 | }); 94 | -------------------------------------------------------------------------------- /proptest-regressions/sparse_chunk/iter.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 2ad229d4eddb85b6333670a1903b6963a430957e19d1baa8828d8509e4303bbd # shrinks to ref vec = [] 8 | cc da4a8ca46d2ae07c1f8d9d1fbcaa3050dfcdb90ffa7a906dac81f1803d686f8a # shrinks to ref vec = [] 9 | cc d275ef825ba16af3b9c42bb48ca1b7cee1a4cfbbf031205652338b55182ca789 # shrinks to ref vec = [] 10 | -------------------------------------------------------------------------------- /proptest-regressions/tests/ring_buffer.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 2c928999051820c283f7ab9feb17d6602c7408e6eb2bccba5ffb7d6c03f73b68 # shrinks to cons = Empty, actions = [Append(Single(0))] 8 | cc a285955a949b92349e5b7a7a97fde8d38727566ff882d8d3edc4cdec41bafab4 # shrinks to cons = Empty, actions = [PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0)] 9 | -------------------------------------------------------------------------------- /proptest-regressions/tests/sized_chunk.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc c6400035ce2c932a234174d17724f6630d9aa38b1a0908a2757c6ba8eac6159b # shrinks to cons = FromBack([], 0) 8 | cc 9f0b6f83a6ebf03af4b559442bfa6ec9763814b97c5cbfc9c9d940de948ada6b # shrinks to cons = DrainFrom(InputVec([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])), actions = [PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0), PushFront(0)] 9 | cc 5c46cc6b716f841511a6d6538676ad2ca4293005ed998e37f0d8cbfd7d290004 # shrinks to cons = Single(0), actions = [Append(FromFront(InputVec([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 12970592535785856016)), Append(DrainFrom(InputVec([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])))] 10 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | sign-commit = true 2 | sign-tag = true 3 | pre-release-replacements = [ 4 | { file = "CHANGELOG.md", search = "\\[Unreleased\\]", replace = "[{{version}}] - {{date}}" } 5 | ] 6 | -------------------------------------------------------------------------------- /src/arbitrary.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | use bitmaps::{Bits, BitsImpl}; 6 | 7 | use ::arbitrary::{size_hint, Arbitrary, Result, Unstructured}; 8 | 9 | use crate::{Chunk, InlineArray, SparseChunk}; 10 | 11 | #[cfg(feature = "ringbuffer")] 12 | use crate::RingBuffer; 13 | 14 | impl<'a, A, const N: usize> Arbitrary<'a> for Chunk 15 | where 16 | A: Arbitrary<'a>, 17 | BitsImpl: Bits, 18 | { 19 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 20 | u.arbitrary_iter()?.take(Self::CAPACITY).collect() 21 | } 22 | 23 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 24 | u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect() 25 | } 26 | 27 | fn size_hint(depth: usize) -> (usize, Option) { 28 | size_hint::recursion_guard(depth, |depth| { 29 | let (_, upper) = A::size_hint(depth); 30 | (0, upper.map(|upper| upper * Self::CAPACITY)) 31 | }) 32 | } 33 | } 34 | 35 | #[cfg(feature = "ringbuffer")] 36 | impl<'a, A, const N: usize> Arbitrary<'a> for RingBuffer 37 | where 38 | A: Arbitrary<'a>, 39 | BitsImpl: Bits, 40 | { 41 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 42 | u.arbitrary_iter()?.take(Self::CAPACITY).collect() 43 | } 44 | 45 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 46 | u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect() 47 | } 48 | 49 | fn size_hint(depth: usize) -> (usize, Option) { 50 | size_hint::recursion_guard(depth, |depth| { 51 | let (_, upper) = A::size_hint(depth); 52 | (0, upper.map(|upper| upper * Self::CAPACITY)) 53 | }) 54 | } 55 | } 56 | 57 | impl<'a, A, const N: usize> Arbitrary<'a> for SparseChunk 58 | where 59 | A: Clone, 60 | Option: Arbitrary<'a>, 61 | BitsImpl: Bits, 62 | { 63 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 64 | u.arbitrary_iter()?.take(Self::CAPACITY).collect() 65 | } 66 | 67 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 68 | u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect() 69 | } 70 | 71 | fn size_hint(depth: usize) -> (usize, Option) { 72 | size_hint::recursion_guard(depth, |depth| { 73 | let (_, upper) = Option::::size_hint(depth); 74 | (0, upper.map(|upper| upper * Self::CAPACITY)) 75 | }) 76 | } 77 | } 78 | 79 | impl<'a, A, T> Arbitrary<'a> for InlineArray 80 | where 81 | A: Arbitrary<'a>, 82 | T: 'static, 83 | { 84 | fn arbitrary(u: &mut Unstructured<'a>) -> Result { 85 | u.arbitrary_iter()?.take(Self::CAPACITY).collect() 86 | } 87 | 88 | fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { 89 | u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect() 90 | } 91 | 92 | fn size_hint(depth: usize) -> (usize, Option) { 93 | size_hint::recursion_guard(depth, |depth| { 94 | let (_, upper) = A::size_hint(depth); 95 | (0, upper.map(|upper| upper * Self::CAPACITY)) 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/inline_array/iter.rs: -------------------------------------------------------------------------------- 1 | use core::iter::FusedIterator; 2 | 3 | use crate::InlineArray; 4 | 5 | /// A consuming iterator over the elements of an `InlineArray`. 6 | pub struct Iter { 7 | pub(crate) array: InlineArray, 8 | } 9 | 10 | impl Iterator for Iter { 11 | type Item = A; 12 | 13 | fn next(&mut self) -> Option { 14 | self.array.remove(0) 15 | } 16 | 17 | fn size_hint(&self) -> (usize, Option) { 18 | (self.array.len(), Some(self.array.len())) 19 | } 20 | } 21 | 22 | impl DoubleEndedIterator for Iter { 23 | fn next_back(&mut self) -> Option { 24 | self.array.pop() 25 | } 26 | } 27 | 28 | impl ExactSizeIterator for Iter {} 29 | 30 | impl FusedIterator for Iter {} 31 | 32 | /// A draining iterator over the elements of an `InlineArray`. 33 | /// 34 | /// "Draining" means that as the iterator yields each element, it's removed from 35 | /// the `InlineArray`. When the iterator terminates, the array will be empty. 36 | /// This is different from the consuming iterator `Iter` in that `Iter` will 37 | /// take ownership of the `InlineArray` and discard it when you're done 38 | /// iterating, while `Drain` leaves you still owning the drained `InlineArray`. 39 | pub struct Drain<'a, A, T> { 40 | pub(crate) array: &'a mut InlineArray, 41 | } 42 | 43 | impl<'a, A, T> Iterator for Drain<'a, A, T> { 44 | type Item = A; 45 | 46 | fn next(&mut self) -> Option { 47 | self.array.remove(0) 48 | } 49 | 50 | fn size_hint(&self) -> (usize, Option) { 51 | (self.array.len(), Some(self.array.len())) 52 | } 53 | } 54 | 55 | impl<'a, A, T> DoubleEndedIterator for Drain<'a, A, T> { 56 | fn next_back(&mut self) -> Option { 57 | self.array.pop() 58 | } 59 | } 60 | 61 | impl<'a, A, T> ExactSizeIterator for Drain<'a, A, T> {} 62 | 63 | impl<'a, A, T> FusedIterator for Drain<'a, A, T> {} 64 | -------------------------------------------------------------------------------- /src/inline_array/mod.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | //! A fixed capacity array sized to match some other type `T`. 6 | //! 7 | //! See [`InlineArray`](struct.InlineArray.html) 8 | 9 | use core::borrow::{Borrow, BorrowMut}; 10 | use core::cmp::Ordering; 11 | use core::fmt::{Debug, Error, Formatter}; 12 | use core::hash::{Hash, Hasher}; 13 | use core::iter::FromIterator; 14 | use core::marker::PhantomData; 15 | use core::mem::{self, MaybeUninit}; 16 | use core::ops::{Deref, DerefMut}; 17 | use core::ptr; 18 | use core::ptr::NonNull; 19 | use core::slice::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut}; 20 | 21 | mod iter; 22 | pub use self::iter::{Drain, Iter}; 23 | 24 | /// A fixed capacity array sized to match some other type `T`. 25 | /// 26 | /// This works like a vector, but allocated on the stack (and thus marginally 27 | /// faster than `Vec`), with the allocated space exactly matching the size of 28 | /// the given type `T`. The vector consists of a `usize` tracking its current 29 | /// length and zero or more elements of type `A`. The capacity is thus 30 | /// `( size_of::() - size_of::() ) / size_of::()`. This could lead 31 | /// to situations where the capacity is zero, if `size_of::()` is greater 32 | /// than `size_of::() - size_of::()`, which is not an error and 33 | /// handled properly by the data structure. 34 | /// 35 | /// If `size_of::()` is less than `size_of::()`, meaning the vector 36 | /// has no space to store its length, `InlineArray::new()` will panic. 37 | /// 38 | /// This is meant to facilitate optimisations where a list data structure 39 | /// allocates a fairly large struct for itself, allowing you to replace it with 40 | /// an `InlineArray` until it grows beyond its capacity. This not only gives you 41 | /// a performance boost at very small sizes, it also saves you from having to 42 | /// allocate anything on the heap until absolutely necessary. 43 | /// 44 | /// For instance, `im::Vector` in its final form currently looks like this 45 | /// (approximately): 46 | /// 47 | /// ```rust, ignore 48 | /// struct RRB { 49 | /// length: usize, 50 | /// tree_height: usize, 51 | /// outer_head: Rc>, 52 | /// inner_head: Rc>, 53 | /// tree: Rc>, 54 | /// inner_tail: Rc>, 55 | /// outer_tail: Rc>, 56 | /// } 57 | /// ``` 58 | /// 59 | /// That's two `usize`s and five `Rc`s, which comes in at 56 bytes on x86_64 60 | /// architectures. With `InlineArray`, that leaves us with 56 - 61 | /// `size_of::()` = 48 bytes we can use before having to expand into the 62 | /// full data struture. If `A` is `u8`, that's 48 elements, and even if `A` is a 63 | /// pointer we can still keep 6 of them inline before we run out of capacity. 64 | /// 65 | /// We can declare an enum like this: 66 | /// 67 | /// ```rust, ignore 68 | /// enum VectorWrapper { 69 | /// Inline(InlineArray>), 70 | /// Full(RRB), 71 | /// } 72 | /// ``` 73 | /// 74 | /// Both of these will have the same size, and we can swap the `Inline` case out 75 | /// with the `Full` case once the `InlineArray` runs out of capacity. 76 | #[repr(C)] 77 | pub struct InlineArray { 78 | // Alignment tricks 79 | // 80 | // We need both the `_header_align` and `data` to be properly aligned in memory. We do a few tricks 81 | // to handle that. 82 | // 83 | // * An alignment is always power of 2. Therefore, with a pair of alignments, one is always 84 | // a multiple of the other (one way or the other). 85 | // * A struct is aligned to at least the max alignment of each of its fields. 86 | // * A `repr(C)` struct follows the order of fields and pushes each as close to the previous one 87 | // as allowed by alignment. 88 | // 89 | // By placing two "fake" fields that have 0 size, but an alignment first, we make sure that all 90 | // 3 start at the beginning of the struct and that all of them are aligned to their maximum 91 | // alignment. 92 | // 93 | // Unfortunately, we can't use `[A; 0]` to align to actual alignment of the type `A`, because 94 | // it prevents use of `InlineArray` in recursive types. 95 | // We rely on alignment of `u64`/`usize` or `T` to be sufficient, and panic otherwise. We use 96 | // `u64` to handle all common types on 32-bit systems too. 97 | // 98 | // Furthermore, because we don't know if `u64` or `A` has bigger alignment, we decide on case by 99 | // case basis if the header or the elements go first. By placing the one with higher alignment 100 | // requirements first, we align that one and the other one will be aligned "automatically" when 101 | // placed just after it. 102 | // 103 | // To the best of our knowledge, this is all guaranteed by the compiler. But just to make sure, 104 | // we have bunch of asserts in the constructor to check; as these are invariants enforced by 105 | // the compiler, it should be trivial for it to remove the checks so they are for free (if we 106 | // are correct) or will save us (if we are not). 107 | _header_align: [(u64, usize); 0], 108 | _phantom: PhantomData, 109 | data: MaybeUninit, 110 | } 111 | 112 | const fn capacity( 113 | host_size: usize, 114 | header_size: usize, 115 | element_size: usize, 116 | element_align: usize, 117 | container_align: usize, 118 | ) -> usize { 119 | if element_size == 0 { 120 | usize::MAX 121 | } else if element_align <= container_align && host_size > header_size { 122 | (host_size - header_size) / element_size 123 | } else { 124 | 0 // larger alignment can't be guaranteed, so it'd be unsafe to store any elements 125 | } 126 | } 127 | 128 | impl InlineArray { 129 | const HOST_SIZE: usize = mem::size_of::(); 130 | const ELEMENT_SIZE: usize = mem::size_of::(); 131 | const HEADER_SIZE: usize = mem::size_of::(); 132 | // Do we place the header before the elements or the other way around? 133 | const HEADER_FIRST: bool = mem::align_of::() >= mem::align_of::(); 134 | // Note: one of the following is always 0 135 | // How many usizes to skip before the first element? 136 | const ELEMENT_SKIP: usize = Self::HEADER_FIRST as usize; 137 | // How many elements to skip before the header 138 | const HEADER_SKIP: usize = Self::CAPACITY * (1 - Self::ELEMENT_SKIP); 139 | 140 | /// The maximum number of elements the `InlineArray` can hold. 141 | pub const CAPACITY: usize = capacity( 142 | Self::HOST_SIZE, 143 | Self::HEADER_SIZE, 144 | Self::ELEMENT_SIZE, 145 | mem::align_of::(), 146 | mem::align_of::(), 147 | ); 148 | 149 | #[inline] 150 | #[must_use] 151 | unsafe fn len_const(&self) -> *const usize { 152 | let ptr = self 153 | .data 154 | .as_ptr() 155 | .cast::() 156 | .add(Self::HEADER_SKIP) 157 | .cast::(); 158 | debug_assert!(ptr as usize % mem::align_of::() == 0); 159 | ptr 160 | } 161 | 162 | #[inline] 163 | #[must_use] 164 | pub(crate) unsafe fn len_mut(&mut self) -> *mut usize { 165 | let ptr = self 166 | .data 167 | .as_mut_ptr() 168 | .cast::() 169 | .add(Self::HEADER_SKIP) 170 | .cast::(); 171 | debug_assert!(ptr as usize % mem::align_of::() == 0); 172 | ptr 173 | } 174 | 175 | #[inline] 176 | #[must_use] 177 | pub(crate) unsafe fn data(&self) -> *const A { 178 | if Self::CAPACITY == 0 { 179 | return NonNull::::dangling().as_ptr(); 180 | } 181 | let ptr = self 182 | .data 183 | .as_ptr() 184 | .cast::() 185 | .add(Self::ELEMENT_SKIP) 186 | .cast::(); 187 | debug_assert!(ptr as usize % mem::align_of::() == 0); 188 | ptr 189 | } 190 | 191 | #[inline] 192 | #[must_use] 193 | unsafe fn data_mut(&mut self) -> *mut A { 194 | if Self::CAPACITY == 0 { 195 | return NonNull::::dangling().as_ptr(); 196 | } 197 | let ptr = self 198 | .data 199 | .as_mut_ptr() 200 | .cast::() 201 | .add(Self::ELEMENT_SKIP) 202 | .cast::(); 203 | debug_assert!(ptr as usize % mem::align_of::() == 0); 204 | ptr 205 | } 206 | 207 | #[inline] 208 | #[must_use] 209 | unsafe fn ptr_at(&self, index: usize) -> *const A { 210 | debug_assert!(index < Self::CAPACITY); 211 | self.data().add(index) 212 | } 213 | 214 | #[inline] 215 | #[must_use] 216 | unsafe fn ptr_at_mut(&mut self, index: usize) -> *mut A { 217 | debug_assert!(index < Self::CAPACITY); 218 | self.data_mut().add(index) 219 | } 220 | 221 | #[inline] 222 | unsafe fn read_at(&self, index: usize) -> A { 223 | ptr::read(self.ptr_at(index)) 224 | } 225 | 226 | #[inline] 227 | unsafe fn write_at(&mut self, index: usize, value: A) { 228 | ptr::write(self.ptr_at_mut(index), value); 229 | } 230 | 231 | /// Get the length of the array. 232 | #[inline] 233 | #[must_use] 234 | pub fn len(&self) -> usize { 235 | unsafe { *self.len_const() } 236 | } 237 | 238 | /// Test if the array is empty. 239 | #[inline] 240 | #[must_use] 241 | pub fn is_empty(&self) -> bool { 242 | self.len() == 0 243 | } 244 | 245 | /// Test if the array is at capacity. 246 | #[inline] 247 | #[must_use] 248 | pub fn is_full(&self) -> bool { 249 | self.len() >= Self::CAPACITY 250 | } 251 | 252 | /// Construct a new empty array. 253 | /// 254 | /// # Panics 255 | /// 256 | /// If the element type requires large alignment, which is larger than 257 | /// both alignment of `usize` and alignment of the type that provides the capacity. 258 | #[inline] 259 | #[must_use] 260 | pub fn new() -> Self { 261 | assert!(Self::HOST_SIZE > Self::HEADER_SIZE); 262 | assert!( 263 | (Self::CAPACITY == 0) || (mem::align_of::() % mem::align_of::() == 0), 264 | "InlineArray can't satisfy alignment of {}", 265 | core::any::type_name::() 266 | ); 267 | 268 | let mut self_ = Self { 269 | _header_align: [], 270 | _phantom: PhantomData, 271 | data: MaybeUninit::uninit(), 272 | }; 273 | // Sanity check our assumptions about what is guaranteed by the compiler. If we are right, 274 | // these should completely optimize out of the resulting binary. 275 | assert_eq!( 276 | &self_ as *const _ as usize, 277 | self_.data.as_ptr() as usize, 278 | "Padding at the start of struct", 279 | ); 280 | assert_eq!( 281 | self_.data.as_ptr() as usize % mem::align_of::(), 282 | 0, 283 | "Unaligned header" 284 | ); 285 | assert!(mem::size_of::() == mem::size_of::() || mem::align_of::() < mem::align_of::()); 286 | assert_eq!(0, unsafe { self_.data() } as usize % mem::align_of::()); 287 | assert_eq!(0, unsafe { self_.data_mut() } as usize % mem::align_of::()); 288 | assert!(Self::ELEMENT_SKIP == 0 || Self::HEADER_SKIP == 0); 289 | unsafe { ptr::write(self_.len_mut(), 0usize) }; 290 | self_ 291 | } 292 | 293 | /// Push an item to the back of the array. 294 | /// 295 | /// Panics if the capacity of the array is exceeded. 296 | /// 297 | /// Time: O(1) 298 | pub fn push(&mut self, value: A) { 299 | if self.is_full() { 300 | panic!("InlineArray::push: chunk size overflow"); 301 | } 302 | unsafe { 303 | self.write_at(self.len(), value); 304 | *self.len_mut() += 1; 305 | } 306 | } 307 | 308 | /// Pop an item from the back of the array. 309 | /// 310 | /// Returns `None` if the array is empty. 311 | /// 312 | /// Time: O(1) 313 | pub fn pop(&mut self) -> Option { 314 | if self.is_empty() { 315 | None 316 | } else { 317 | unsafe { 318 | *self.len_mut() -= 1; 319 | } 320 | Some(unsafe { self.read_at(self.len()) }) 321 | } 322 | } 323 | 324 | /// Insert a new value at index `index`, shifting all the following values 325 | /// to the right. 326 | /// 327 | /// Panics if the index is out of bounds or the array is at capacity. 328 | /// 329 | /// Time: O(n) for the number of items shifted 330 | pub fn insert(&mut self, index: usize, value: A) { 331 | if self.is_full() { 332 | panic!("InlineArray::push: chunk size overflow"); 333 | } 334 | if index > self.len() { 335 | panic!("InlineArray::insert: index out of bounds"); 336 | } 337 | unsafe { 338 | let src = self.ptr_at_mut(index); 339 | ptr::copy(src, src.add(1), self.len() - index); 340 | ptr::write(src, value); 341 | *self.len_mut() += 1; 342 | } 343 | } 344 | 345 | /// Remove the value at index `index`, shifting all the following values to 346 | /// the left. 347 | /// 348 | /// Returns the removed value, or `None` if the array is empty or the index 349 | /// is out of bounds. 350 | /// 351 | /// Time: O(n) for the number of items shifted 352 | pub fn remove(&mut self, index: usize) -> Option { 353 | if index >= self.len() { 354 | None 355 | } else { 356 | unsafe { 357 | let src = self.ptr_at_mut(index); 358 | let value = ptr::read(src); 359 | *self.len_mut() -= 1; 360 | ptr::copy(src.add(1), src, self.len() - index); 361 | Some(value) 362 | } 363 | } 364 | } 365 | 366 | /// Split an array into two, the original array containing 367 | /// everything up to `index` and the returned array containing 368 | /// everything from `index` onwards. 369 | /// 370 | /// Panics if `index` is out of bounds. 371 | /// 372 | /// Time: O(n) for the number of items in the new chunk 373 | pub fn split_off(&mut self, index: usize) -> Self { 374 | if index > self.len() { 375 | panic!("InlineArray::split_off: index out of bounds"); 376 | } 377 | let mut out = Self::new(); 378 | if index < self.len() { 379 | unsafe { 380 | ptr::copy(self.ptr_at(index), out.data_mut(), self.len() - index); 381 | *out.len_mut() = self.len() - index; 382 | *self.len_mut() = index; 383 | } 384 | } 385 | out 386 | } 387 | 388 | #[inline] 389 | unsafe fn drop_contents(&mut self) { 390 | ptr::drop_in_place::<[A]>(&mut **self) // uses DerefMut 391 | } 392 | 393 | /// Discard the contents of the array. 394 | /// 395 | /// Time: O(n) 396 | pub fn clear(&mut self) { 397 | unsafe { 398 | self.drop_contents(); 399 | *self.len_mut() = 0; 400 | } 401 | } 402 | 403 | /// Construct an iterator that drains values from the front of the array. 404 | pub fn drain(&mut self) -> Drain<'_, A, T> { 405 | Drain { array: self } 406 | } 407 | } 408 | 409 | impl Drop for InlineArray { 410 | fn drop(&mut self) { 411 | unsafe { self.drop_contents() } 412 | } 413 | } 414 | 415 | impl Default for InlineArray { 416 | fn default() -> Self { 417 | Self::new() 418 | } 419 | } 420 | 421 | // WANT: 422 | // impl Copy for InlineArray where A: Copy {} 423 | 424 | impl Clone for InlineArray 425 | where 426 | A: Clone, 427 | { 428 | fn clone(&self) -> Self { 429 | let mut copy = Self::new(); 430 | for i in 0..self.len() { 431 | unsafe { 432 | copy.write_at(i, self.get_unchecked(i).clone()); 433 | } 434 | } 435 | unsafe { 436 | *copy.len_mut() = self.len(); 437 | } 438 | copy 439 | } 440 | } 441 | 442 | impl Deref for InlineArray { 443 | type Target = [A]; 444 | fn deref(&self) -> &Self::Target { 445 | unsafe { from_raw_parts(self.data(), self.len()) } 446 | } 447 | } 448 | 449 | impl DerefMut for InlineArray { 450 | fn deref_mut(&mut self) -> &mut Self::Target { 451 | unsafe { from_raw_parts_mut(self.data_mut(), self.len()) } 452 | } 453 | } 454 | 455 | impl Borrow<[A]> for InlineArray { 456 | fn borrow(&self) -> &[A] { 457 | self.deref() 458 | } 459 | } 460 | 461 | impl BorrowMut<[A]> for InlineArray { 462 | fn borrow_mut(&mut self) -> &mut [A] { 463 | self.deref_mut() 464 | } 465 | } 466 | 467 | impl AsRef<[A]> for InlineArray { 468 | fn as_ref(&self) -> &[A] { 469 | self.deref() 470 | } 471 | } 472 | 473 | impl AsMut<[A]> for InlineArray { 474 | fn as_mut(&mut self) -> &mut [A] { 475 | self.deref_mut() 476 | } 477 | } 478 | impl PartialEq for InlineArray 479 | where 480 | Slice: Borrow<[A]>, 481 | A: PartialEq, 482 | { 483 | fn eq(&self, other: &Slice) -> bool { 484 | self.deref() == other.borrow() 485 | } 486 | } 487 | 488 | impl Eq for InlineArray where A: Eq {} 489 | 490 | impl PartialOrd for InlineArray 491 | where 492 | A: PartialOrd, 493 | { 494 | fn partial_cmp(&self, other: &Self) -> Option { 495 | self.iter().partial_cmp(other.iter()) 496 | } 497 | } 498 | 499 | impl Ord for InlineArray 500 | where 501 | A: Ord, 502 | { 503 | fn cmp(&self, other: &Self) -> Ordering { 504 | self.iter().cmp(other.iter()) 505 | } 506 | } 507 | 508 | impl Debug for InlineArray 509 | where 510 | A: Debug, 511 | { 512 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { 513 | f.write_str("Chunk")?; 514 | f.debug_list().entries(self.iter()).finish() 515 | } 516 | } 517 | 518 | impl Hash for InlineArray 519 | where 520 | A: Hash, 521 | { 522 | fn hash(&self, hasher: &mut H) 523 | where 524 | H: Hasher, 525 | { 526 | for item in self { 527 | item.hash(hasher) 528 | } 529 | } 530 | } 531 | 532 | impl IntoIterator for InlineArray { 533 | type Item = A; 534 | type IntoIter = Iter; 535 | fn into_iter(self) -> Self::IntoIter { 536 | Iter { array: self } 537 | } 538 | } 539 | 540 | impl FromIterator for InlineArray { 541 | fn from_iter(it: I) -> Self 542 | where 543 | I: IntoIterator, 544 | { 545 | let mut chunk = Self::new(); 546 | for item in it { 547 | chunk.push(item); 548 | } 549 | chunk 550 | } 551 | } 552 | 553 | impl<'a, A, T> IntoIterator for &'a InlineArray { 554 | type Item = &'a A; 555 | type IntoIter = SliceIter<'a, A>; 556 | fn into_iter(self) -> Self::IntoIter { 557 | self.iter() 558 | } 559 | } 560 | 561 | impl<'a, A, T> IntoIterator for &'a mut InlineArray { 562 | type Item = &'a mut A; 563 | type IntoIter = SliceIterMut<'a, A>; 564 | fn into_iter(self) -> Self::IntoIter { 565 | self.iter_mut() 566 | } 567 | } 568 | 569 | impl Extend for InlineArray { 570 | /// Append the contents of the iterator to the back of the array. 571 | /// 572 | /// Panics if the array exceeds its capacity. 573 | /// 574 | /// Time: O(n) for the length of the iterator 575 | fn extend(&mut self, it: I) 576 | where 577 | I: IntoIterator, 578 | { 579 | for item in it { 580 | self.push(item); 581 | } 582 | } 583 | } 584 | 585 | impl<'a, A, T> Extend<&'a A> for InlineArray 586 | where 587 | A: 'a + Copy, 588 | { 589 | /// Append the contents of the iterator to the back of the array. 590 | /// 591 | /// Panics if the array exceeds its capacity. 592 | /// 593 | /// Time: O(n) for the length of the iterator 594 | fn extend(&mut self, it: I) 595 | where 596 | I: IntoIterator, 597 | { 598 | for item in it { 599 | self.push(*item); 600 | } 601 | } 602 | } 603 | 604 | #[cfg(test)] 605 | mod test { 606 | use super::*; 607 | use crate::tests::DropTest; 608 | use std::sync::atomic::{AtomicUsize, Ordering}; 609 | 610 | #[test] 611 | fn dropping() { 612 | let counter = AtomicUsize::new(0); 613 | { 614 | let mut chunk: InlineArray, [usize; 32]> = InlineArray::new(); 615 | for _i in 0..16 { 616 | chunk.push(DropTest::new(&counter)); 617 | } 618 | assert_eq!(16, counter.load(Ordering::Relaxed)); 619 | for _i in 0..8 { 620 | chunk.pop(); 621 | } 622 | assert_eq!(8, counter.load(Ordering::Relaxed)); 623 | } 624 | assert_eq!(0, counter.load(Ordering::Relaxed)); 625 | } 626 | 627 | #[test] 628 | fn zero_sized_values() { 629 | let mut chunk: InlineArray<(), [usize; 32]> = InlineArray::new(); 630 | for _i in 0..65536 { 631 | chunk.push(()); 632 | } 633 | assert_eq!(65536, chunk.len()); 634 | assert_eq!(Some(()), chunk.pop()); 635 | } 636 | 637 | #[test] 638 | fn low_align_base() { 639 | let mut chunk: InlineArray = InlineArray::new(); 640 | chunk.push("Hello".to_owned()); 641 | assert_eq!(chunk[0], "Hello"); 642 | 643 | let mut chunk: InlineArray = InlineArray::new(); 644 | chunk.push("Hello".to_owned()); 645 | assert_eq!(chunk[0], "Hello"); 646 | } 647 | 648 | #[test] 649 | fn float_align() { 650 | let mut chunk: InlineArray = InlineArray::new(); 651 | chunk.push(1234.); 652 | assert_eq!(chunk[0], 1234.); 653 | 654 | let mut chunk: InlineArray = InlineArray::new(); 655 | chunk.push(1234.); 656 | assert_eq!(chunk[0], 1234.); 657 | } 658 | 659 | #[test] 660 | fn recursive_types_compile() { 661 | #[allow(dead_code)] 662 | enum Recursive { 663 | A(InlineArray), 664 | B, 665 | } 666 | } 667 | 668 | #[test] 669 | fn insufficient_alignment1() { 670 | #[repr(align(256))] 671 | struct BigAlign(u8); 672 | #[repr(align(32))] 673 | struct MediumAlign(u8); 674 | 675 | assert_eq!(0, InlineArray::::CAPACITY); 676 | assert_eq!(0, InlineArray::::CAPACITY); 677 | assert_eq!(0, InlineArray::::CAPACITY); 678 | assert_eq!(0, InlineArray::::CAPACITY); 679 | } 680 | 681 | #[test] 682 | fn insufficient_alignment2() { 683 | #[repr(align(256))] 684 | struct BigAlign(usize); 685 | 686 | let mut bad: InlineArray = InlineArray::new(); 687 | assert_eq!(0, InlineArray::::CAPACITY); 688 | assert_eq!(0, bad.len()); 689 | assert_eq!(0, bad[..].len()); 690 | assert_eq!(true, bad.is_full()); 691 | assert_eq!(0, bad.drain().count()); 692 | assert!(bad.pop().is_none()); 693 | assert!(bad.remove(0).is_none()); 694 | assert!(bad.split_off(0).is_full()); 695 | bad.clear(); 696 | } 697 | 698 | #[test] 699 | fn sufficient_alignment1() { 700 | #[repr(align(256))] 701 | struct BigAlign(u8); 702 | 703 | assert_eq!(13, InlineArray::::CAPACITY); 704 | assert_eq!(1, InlineArray::::CAPACITY); 705 | assert_eq!(0, InlineArray::::CAPACITY); 706 | 707 | let mut chunk: InlineArray = InlineArray::new(); 708 | chunk.push(BigAlign(42)); 709 | assert_eq!( 710 | chunk.get(0).unwrap() as *const _ as usize % mem::align_of::(), 711 | 0 712 | ); 713 | } 714 | 715 | #[test] 716 | fn sufficient_alignment2() { 717 | #[repr(align(128))] 718 | struct BigAlign([u8; 64]); 719 | #[repr(align(256))] 720 | struct BiggerAlign(u8); 721 | assert_eq!(128, mem::align_of::()); 722 | assert_eq!(256, mem::align_of::()); 723 | 724 | assert_eq!(199, InlineArray::::CAPACITY); 725 | assert_eq!(3, InlineArray::::CAPACITY); 726 | assert_eq!(1, InlineArray::::CAPACITY); 727 | assert_eq!(0, InlineArray::::CAPACITY); 728 | 729 | let mut chunk: InlineArray = InlineArray::new(); 730 | chunk.push(BigAlign([0; 64])); 731 | assert_eq!( 732 | chunk.get(0).unwrap() as *const _ as usize % mem::align_of::(), 733 | 0 734 | ); 735 | } 736 | } 737 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | //! # Sized Chunks 6 | //! 7 | //! This crate contains three fixed size low level array like data structures, 8 | //! primarily intended for use in [immutable.rs], but fully supported as a 9 | //! standalone crate. 10 | //! 11 | //! Their sizing information is encoded in the type using the 12 | //! [`typenum`][typenum] crate, which you may want to take a look at before 13 | //! reading on, but usually all you need to know about it is that it provides 14 | //! types `U1` to `U128` to represent numbers, which the data types take as type 15 | //! parameters, eg. `SparseChunk` would give you a sparse array with 16 | //! room for 32 elements of type `A`. You can also omit the size, as they all 17 | //! default to a size of 64, so `SparseChunk` would be a sparse array with a 18 | //! capacity of 64. 19 | //! 20 | //! All data structures always allocate the same amount of space, as determined 21 | //! by their capacity, regardless of how many elements they contain, and when 22 | //! they run out of space, they will panic. 23 | //! 24 | //! ## Data Structures 25 | //! 26 | //! | Type | Description | Push | Pop | Deref to `&[A]` | 27 | //! | ---- | ----------- | ---- | --- | --------------- | 28 | //! | [`Chunk`][Chunk] | Contiguous array | O(1)/O(n) | O(1) | Yes | 29 | //! | [`RingBuffer`][RingBuffer] | Non-contiguous array | O(1) | O(1) | No | 30 | //! | [`SparseChunk`][SparseChunk] | Sparse array | N/A | N/A | No | 31 | //! 32 | //! The [`Chunk`][Chunk] and [`RingBuffer`][RingBuffer] are very similar in 33 | //! practice, in that they both work like a plain array, except that you can 34 | //! push to either end with some expectation of performance. The difference is 35 | //! that [`RingBuffer`][RingBuffer] always allows you to do this in constant 36 | //! time, but in order to give that guarantee, it doesn't lay out its elements 37 | //! contiguously in memory, which means that you can't dereference it to a slice 38 | //! `&[A]`. 39 | //! 40 | //! [`Chunk`][Chunk], on the other hand, will shift its contents around when 41 | //! necessary to accommodate a push to a full side, but is able to guarantee a 42 | //! contiguous memory layout in this way, so it can always be dereferenced into 43 | //! a slice. Performance wise, repeated pushes to the same side will always run 44 | //! in constant time, but a push to one side followed by a push to the other 45 | //! side will cause the latter to run in linear time if there's no room (which 46 | //! there would only be if you've popped from that side). 47 | //! 48 | //! To choose between them, you can use the following rules: 49 | //! - I only ever want to push to the back: you don't need this crate, try 50 | //! [`ArrayVec`][ArrayVec]. 51 | //! - I need to push to either side but probably not both on the same array: use 52 | //! [`Chunk`][Chunk]. 53 | //! - I need to push to both sides and I don't need slices: use 54 | //! [`RingBuffer`][RingBuffer]. 55 | //! - I need to push to both sides but I do need slices: use [`Chunk`][Chunk]. 56 | //! 57 | //! Finally, [`SparseChunk`][SparseChunk] is a more efficient version of 58 | //! `Vec>`: each index is either inhabited or not, but instead of 59 | //! using the `Option` discriminant to decide which is which, it uses a compact 60 | //! bitmap. You can also think of `SparseChunk` as a `BTreeMap` 61 | //! where the `usize` must be less than `N`, but without the performance 62 | //! overhead. Its API is also more consistent with a map than an array - there's 63 | //! no push, pop, append, etc, just insert, remove and lookup. 64 | //! 65 | //! # [`InlineArray`][InlineArray] 66 | //! 67 | //! Finally, there's [`InlineArray`][InlineArray], which is a simple vector that's 68 | //! sized to fit inside any `Sized` type that's big enough to hold a size counter 69 | //! and at least one instance of the array element type. This can be a useful 70 | //! optimisation when implementing a list like data structure with a nontrivial 71 | //! set of pointers in its full form, where you could plausibly fit several 72 | //! elements inside the space allocated for the pointers. `im::Vector` is a 73 | //! good example of that, and the use case for which [`InlineArray`][InlineArray] 74 | //! was implemented. 75 | //! 76 | //! # Feature Flags 77 | //! 78 | //! The following feature flags are available: 79 | //! 80 | //! | Feature | Description | 81 | //! | ------- | ----------- | 82 | //! | `arbitrary` | Provides [`Arbitrary`][Arbitrary] implementations from the [`arbitrary`][arbitrary_crate] crate. Requires the `std` flag. | 83 | //! | `refpool` | Provides [`PoolDefault`][PoolDefault] and [`PoolClone`][PoolClone] implemetations from the [`refpool`][refpool] crate. | 84 | //! | `ringbuffer` | Enables the [`RingBuffer`][RingBuffer] data structure. | 85 | //! | `std` | Without this flag (enabled by default), the crate will be `no_std`, and absent traits relating to `std::collections` and `std::io`. | 86 | //! 87 | //! [immutable.rs]: https://immutable.rs/ 88 | //! [typenum]: https://docs.rs/typenum/ 89 | //! [Chunk]: struct.Chunk.html 90 | //! [RingBuffer]: struct.RingBuffer.html 91 | //! [SparseChunk]: struct.SparseChunk.html 92 | //! [InlineArray]: struct.InlineArray.html 93 | //! [ArrayVec]: https://docs.rs/arrayvec/ 94 | //! [Arbitrary]: https://docs.rs/arbitrary/latest/arbitrary/trait.Arbitrary.html 95 | //! [arbitrary_crate]: https://docs.rs/arbitrary 96 | //! [refpool]: https://docs.rs/refpool 97 | //! [PoolDefault]: https://docs.rs/refpool/latest/refpool/trait.PoolDefault.html 98 | //! [PoolClone]: https://docs.rs/refpool/latest/refpool/trait.PoolClone.html 99 | 100 | #![forbid(rust_2018_idioms)] 101 | #![deny(nonstandard_style)] 102 | #![warn(unreachable_pub, missing_docs)] 103 | #![cfg_attr(test, deny(warnings))] 104 | #![cfg_attr(not(any(feature = "std", test)), no_std)] 105 | // Jeremy Francis Corbyn, clippy devs need to calm down 🤦‍♀️ 106 | #![allow(clippy::suspicious_op_assign_impl, clippy::suspicious_arithmetic_impl)] 107 | 108 | pub mod inline_array; 109 | pub mod sized_chunk; 110 | pub mod sparse_chunk; 111 | 112 | #[cfg(test)] 113 | mod tests; 114 | 115 | #[cfg(feature = "arbitrary")] 116 | mod arbitrary; 117 | 118 | pub use crate::inline_array::InlineArray; 119 | pub use crate::sized_chunk::Chunk; 120 | pub use crate::sparse_chunk::SparseChunk; 121 | 122 | #[cfg(feature = "ringbuffer")] 123 | pub mod ring_buffer; 124 | #[cfg(feature = "ringbuffer")] 125 | pub use crate::ring_buffer::RingBuffer; 126 | -------------------------------------------------------------------------------- /src/ring_buffer/index.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | use core::iter::FusedIterator; 6 | use core::ops::{Add, AddAssign, Sub, SubAssign}; 7 | 8 | pub(crate) struct RawIndex(usize); 9 | 10 | impl Clone for RawIndex { 11 | #[inline] 12 | #[must_use] 13 | fn clone(&self) -> Self { 14 | self.0.into() 15 | } 16 | } 17 | 18 | impl Copy for RawIndex {} 19 | 20 | impl RawIndex { 21 | #[inline] 22 | #[must_use] 23 | pub(crate) fn to_usize(self) -> usize { 24 | self.0 25 | } 26 | 27 | /// Increments the index and returns a copy of the index /before/ incrementing. 28 | #[inline] 29 | #[must_use] 30 | pub(crate) fn inc(&mut self) -> Self { 31 | let old = *self; 32 | self.0 = if self.0 == N - 1 { 0 } else { self.0 + 1 }; 33 | old 34 | } 35 | 36 | /// Decrements the index and returns a copy of the new value. 37 | #[inline] 38 | #[must_use] 39 | pub(crate) fn dec(&mut self) -> Self { 40 | self.0 = if self.0 == 0 { N - 1 } else { self.0 - 1 }; 41 | *self 42 | } 43 | } 44 | 45 | impl From for RawIndex { 46 | #[inline] 47 | #[must_use] 48 | fn from(index: usize) -> Self { 49 | debug_assert!(index < N); 50 | RawIndex(index) 51 | } 52 | } 53 | 54 | impl PartialEq for RawIndex { 55 | #[inline] 56 | #[must_use] 57 | fn eq(&self, other: &Self) -> bool { 58 | self.0 == other.0 59 | } 60 | } 61 | 62 | impl Eq for RawIndex {} 63 | 64 | impl Add for RawIndex { 65 | type Output = RawIndex; 66 | #[inline] 67 | #[must_use] 68 | fn add(self, other: Self) -> Self::Output { 69 | self + other.0 70 | } 71 | } 72 | 73 | impl Add for RawIndex { 74 | type Output = RawIndex; 75 | #[inline] 76 | #[must_use] 77 | fn add(self, other: usize) -> Self::Output { 78 | let mut result = self.0 + other; 79 | while result >= N { 80 | result -= N; 81 | } 82 | result.into() 83 | } 84 | } 85 | 86 | impl AddAssign for RawIndex { 87 | #[inline] 88 | fn add_assign(&mut self, other: usize) { 89 | self.0 += other; 90 | while self.0 >= N { 91 | self.0 -= N; 92 | } 93 | } 94 | } 95 | 96 | impl Sub for RawIndex { 97 | type Output = RawIndex; 98 | #[inline] 99 | #[must_use] 100 | fn sub(self, other: Self) -> Self::Output { 101 | self - other.0 102 | } 103 | } 104 | 105 | impl Sub for RawIndex { 106 | type Output = RawIndex; 107 | #[inline] 108 | #[must_use] 109 | fn sub(self, other: usize) -> Self::Output { 110 | let mut start = self.0; 111 | while other > start { 112 | start += N; 113 | } 114 | (start - other).into() 115 | } 116 | } 117 | 118 | impl SubAssign for RawIndex { 119 | #[inline] 120 | fn sub_assign(&mut self, other: usize) { 121 | while other > self.0 { 122 | self.0 += N; 123 | } 124 | self.0 -= other; 125 | } 126 | } 127 | 128 | pub(crate) struct IndexIter { 129 | pub(crate) remaining: usize, 130 | pub(crate) left_index: RawIndex, 131 | pub(crate) right_index: RawIndex, 132 | } 133 | 134 | impl Iterator for IndexIter { 135 | type Item = RawIndex; 136 | #[inline] 137 | fn next(&mut self) -> Option { 138 | if self.remaining > 0 { 139 | self.remaining -= 1; 140 | Some(self.left_index.inc()) 141 | } else { 142 | None 143 | } 144 | } 145 | 146 | #[inline] 147 | #[must_use] 148 | fn size_hint(&self) -> (usize, Option) { 149 | (self.remaining, Some(self.remaining)) 150 | } 151 | } 152 | 153 | impl DoubleEndedIterator for IndexIter { 154 | #[inline] 155 | fn next_back(&mut self) -> Option { 156 | if self.remaining > 0 { 157 | self.remaining -= 1; 158 | Some(self.right_index.dec()) 159 | } else { 160 | None 161 | } 162 | } 163 | } 164 | 165 | impl ExactSizeIterator for IndexIter {} 166 | 167 | impl FusedIterator for IndexIter {} 168 | -------------------------------------------------------------------------------- /src/ring_buffer/iter.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | use core::iter::FusedIterator; 6 | use core::marker::PhantomData; 7 | 8 | use super::{index::RawIndex, RingBuffer}; 9 | use array_ops::HasLength; 10 | 11 | /// A reference iterator over a `RingBuffer`. 12 | pub struct Iter<'a, A, const N: usize> { 13 | pub(crate) buffer: &'a RingBuffer, 14 | pub(crate) left_index: RawIndex, 15 | pub(crate) right_index: RawIndex, 16 | pub(crate) remaining: usize, 17 | } 18 | 19 | impl<'a, A, const N: usize> Iterator for Iter<'a, A, N> { 20 | type Item = &'a A; 21 | 22 | fn next(&mut self) -> Option { 23 | if self.remaining == 0 { 24 | None 25 | } else { 26 | self.remaining -= 1; 27 | Some(unsafe { &*self.buffer.ptr(self.left_index.inc()) }) 28 | } 29 | } 30 | 31 | #[inline] 32 | #[must_use] 33 | fn size_hint(&self) -> (usize, Option) { 34 | (self.remaining, Some(self.remaining)) 35 | } 36 | } 37 | 38 | impl<'a, A, const N: usize> DoubleEndedIterator for Iter<'a, A, N> { 39 | fn next_back(&mut self) -> Option { 40 | if self.remaining == 0 { 41 | None 42 | } else { 43 | self.remaining -= 1; 44 | Some(unsafe { &*self.buffer.ptr(self.right_index.dec()) }) 45 | } 46 | } 47 | } 48 | 49 | impl<'a, A, const N: usize> ExactSizeIterator for Iter<'a, A, N> {} 50 | 51 | impl<'a, A, const N: usize> FusedIterator for Iter<'a, A, N> {} 52 | 53 | /// A mutable reference iterator over a `RingBuffer`. 54 | pub struct IterMut<'a, A, const N: usize> { 55 | data: *mut A, 56 | left_index: RawIndex, 57 | right_index: RawIndex, 58 | remaining: usize, 59 | phantom: PhantomData<&'a ()>, 60 | } 61 | 62 | impl<'a, A, const N: usize> IterMut<'a, A, N> 63 | where 64 | A: 'a, 65 | { 66 | pub(crate) fn new(buffer: &mut RingBuffer) -> Self { 67 | Self::new_slice(buffer, buffer.origin, buffer.len()) 68 | } 69 | 70 | pub(crate) fn new_slice( 71 | buffer: &mut RingBuffer, 72 | origin: RawIndex, 73 | len: usize, 74 | ) -> Self { 75 | Self { 76 | left_index: origin, 77 | right_index: origin + len, 78 | remaining: len, 79 | phantom: PhantomData, 80 | data: buffer.data.as_mut_ptr().cast(), 81 | } 82 | } 83 | 84 | unsafe fn mut_ptr(&mut self, index: RawIndex) -> *mut A { 85 | self.data.add(index.to_usize()) 86 | } 87 | } 88 | 89 | impl<'a, A, const N: usize> Iterator for IterMut<'a, A, N> 90 | where 91 | A: 'a, 92 | { 93 | type Item = &'a mut A; 94 | 95 | fn next(&mut self) -> Option { 96 | if self.remaining == 0 { 97 | None 98 | } else { 99 | self.remaining -= 1; 100 | let index = self.left_index.inc(); 101 | Some(unsafe { &mut *self.mut_ptr(index) }) 102 | } 103 | } 104 | 105 | #[inline] 106 | #[must_use] 107 | fn size_hint(&self) -> (usize, Option) { 108 | (self.remaining, Some(self.remaining)) 109 | } 110 | } 111 | 112 | impl<'a, A, const N: usize> DoubleEndedIterator for IterMut<'a, A, N> 113 | where 114 | A: 'a, 115 | { 116 | fn next_back(&mut self) -> Option { 117 | if self.remaining == 0 { 118 | None 119 | } else { 120 | self.remaining -= 1; 121 | let index = self.right_index.dec(); 122 | Some(unsafe { &mut *self.mut_ptr(index) }) 123 | } 124 | } 125 | } 126 | 127 | impl<'a, A, const N: usize> ExactSizeIterator for IterMut<'a, A, N> where A: 'a {} 128 | 129 | impl<'a, A, const N: usize> FusedIterator for IterMut<'a, A, N> where A: 'a {} 130 | 131 | /// A draining iterator over a `RingBuffer`. 132 | pub struct Drain<'a, A, const N: usize> { 133 | pub(crate) buffer: &'a mut RingBuffer, 134 | } 135 | 136 | impl<'a, A: 'a, const N: usize> Iterator for Drain<'a, A, N> { 137 | type Item = A; 138 | 139 | #[inline] 140 | fn next(&mut self) -> Option { 141 | self.buffer.pop_front() 142 | } 143 | 144 | #[inline] 145 | #[must_use] 146 | fn size_hint(&self) -> (usize, Option) { 147 | (self.buffer.len(), Some(self.buffer.len())) 148 | } 149 | } 150 | 151 | impl<'a, A: 'a, const N: usize> DoubleEndedIterator for Drain<'a, A, N> { 152 | #[inline] 153 | fn next_back(&mut self) -> Option { 154 | self.buffer.pop_back() 155 | } 156 | } 157 | 158 | impl<'a, A: 'a, const N: usize> ExactSizeIterator for Drain<'a, A, N> {} 159 | 160 | impl<'a, A: 'a, const N: usize> FusedIterator for Drain<'a, A, N> {} 161 | 162 | /// A consuming iterator over a `RingBuffer`. 163 | pub struct OwnedIter { 164 | pub(crate) buffer: RingBuffer, 165 | } 166 | 167 | impl Iterator for OwnedIter { 168 | type Item = A; 169 | 170 | #[inline] 171 | fn next(&mut self) -> Option { 172 | self.buffer.pop_front() 173 | } 174 | 175 | #[inline] 176 | #[must_use] 177 | fn size_hint(&self) -> (usize, Option) { 178 | (self.buffer.len(), Some(self.buffer.len())) 179 | } 180 | } 181 | 182 | impl DoubleEndedIterator for OwnedIter { 183 | #[inline] 184 | fn next_back(&mut self) -> Option { 185 | self.buffer.pop_back() 186 | } 187 | } 188 | 189 | impl ExactSizeIterator for OwnedIter {} 190 | 191 | impl FusedIterator for OwnedIter {} 192 | -------------------------------------------------------------------------------- /src/ring_buffer/mod.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | //! A fixed capacity ring buffer. 6 | //! 7 | //! See [`RingBuffer`](struct.RingBuffer.html) 8 | 9 | use core::borrow::Borrow; 10 | use core::cmp::Ordering; 11 | use core::fmt::{Debug, Error, Formatter}; 12 | use core::hash::{Hash, Hasher}; 13 | use core::iter::FromIterator; 14 | use core::mem::{replace, MaybeUninit}; 15 | use core::ops::{Bound, Range, RangeBounds}; 16 | use core::ops::{Index, IndexMut}; 17 | 18 | pub use array_ops::{Array, ArrayMut, HasLength}; 19 | 20 | mod index; 21 | use index::{IndexIter, RawIndex}; 22 | 23 | mod iter; 24 | pub use iter::{Drain, Iter, IterMut, OwnedIter}; 25 | 26 | mod slice; 27 | pub use slice::{Slice, SliceMut}; 28 | 29 | #[cfg(feature = "refpool")] 30 | mod refpool; 31 | 32 | /// A fixed capacity ring buffer. 33 | /// 34 | /// A ring buffer is an array where the first logical index is at some arbitrary 35 | /// location inside the array, and the indices wrap around to the start of the 36 | /// array once they overflow its bounds. 37 | /// 38 | /// This gives us the ability to push to either the front or the end of the 39 | /// array in constant time, at the cost of losing the ability to get a single 40 | /// contiguous slice reference to the contents. 41 | /// 42 | /// It differs from the [`Chunk`][Chunk] in that the latter will have mostly 43 | /// constant time pushes, but may occasionally need to shift its contents around 44 | /// to make room. They both have constant time pop, and they both have linear 45 | /// time insert and remove. 46 | /// 47 | /// The `RingBuffer` offers its own [`Slice`][Slice] and [`SliceMut`][SliceMut] 48 | /// types to compensate for the loss of being able to take a slice, but they're 49 | /// somewhat less efficient, so the general rule should be that you shouldn't 50 | /// choose a `RingBuffer` if you rely heavily on slices - but if you don't, 51 | /// it's probably a marginally better choice overall than [`Chunk`][Chunk]. 52 | /// 53 | /// # Feature Flag 54 | /// 55 | /// To use this data structure, you need to enable the `ringbuffer` feature. 56 | /// 57 | /// [Chunk]: ../sized_chunk/struct.Chunk.html 58 | /// [Slice]: struct.Slice.html 59 | /// [SliceMut]: struct.SliceMut.html 60 | pub struct RingBuffer { 61 | origin: RawIndex, 62 | length: usize, 63 | data: MaybeUninit<[A; N]>, 64 | } 65 | 66 | impl Drop for RingBuffer { 67 | #[inline] 68 | fn drop(&mut self) { 69 | if core::mem::needs_drop::() { 70 | for i in self.range() { 71 | unsafe { self.force_drop(i) } 72 | } 73 | } 74 | } 75 | } 76 | 77 | impl HasLength for RingBuffer { 78 | /// Get the length of the ring buffer. 79 | #[inline] 80 | #[must_use] 81 | fn len(&self) -> usize { 82 | self.length 83 | } 84 | } 85 | 86 | impl Array for RingBuffer { 87 | /// Get a reference to the value at a given index. 88 | #[must_use] 89 | fn get(&self, index: usize) -> Option<&A> { 90 | if index >= self.len() { 91 | None 92 | } else { 93 | Some(unsafe { self.get_unchecked(index) }) 94 | } 95 | } 96 | } 97 | 98 | impl ArrayMut for RingBuffer { 99 | /// Get a mutable reference to the value at a given index. 100 | #[must_use] 101 | fn get_mut(&mut self, index: usize) -> Option<&mut A> { 102 | if index >= self.len() { 103 | None 104 | } else { 105 | Some(unsafe { self.get_unchecked_mut(index) }) 106 | } 107 | } 108 | } 109 | 110 | impl RingBuffer { 111 | /// The capacity of this ring buffer, as a `usize`. 112 | pub const CAPACITY: usize = N; 113 | 114 | /// Get the raw index for a logical index. 115 | #[inline] 116 | fn raw(&self, index: usize) -> RawIndex { 117 | self.origin + index 118 | } 119 | 120 | #[inline] 121 | unsafe fn ptr(&self, index: RawIndex) -> *const A { 122 | debug_assert!(index.to_usize() < Self::CAPACITY); 123 | (&self.data as *const _ as *const A).add(index.to_usize()) 124 | } 125 | 126 | #[inline] 127 | unsafe fn mut_ptr(&mut self, index: RawIndex) -> *mut A { 128 | debug_assert!(index.to_usize() < Self::CAPACITY); 129 | (&mut self.data as *mut _ as *mut A).add(index.to_usize()) 130 | } 131 | 132 | /// Drop the value at a raw index. 133 | #[inline] 134 | unsafe fn force_drop(&mut self, index: RawIndex) { 135 | core::ptr::drop_in_place(self.mut_ptr(index)) 136 | } 137 | 138 | /// Copy the value at a raw index, discarding ownership of the copied value 139 | #[inline] 140 | unsafe fn force_read(&self, index: RawIndex) -> A { 141 | core::ptr::read(self.ptr(index)) 142 | } 143 | 144 | /// Write a value at a raw index without trying to drop what's already there 145 | #[inline] 146 | unsafe fn force_write(&mut self, index: RawIndex, value: A) { 147 | core::ptr::write(self.mut_ptr(index), value) 148 | } 149 | 150 | /// Copy a range of raw indices from another buffer. 151 | unsafe fn copy_from( 152 | &mut self, 153 | source: &mut Self, 154 | from: RawIndex, 155 | to: RawIndex, 156 | count: usize, 157 | ) { 158 | #[inline] 159 | unsafe fn force_copy_to( 160 | source: &mut RingBuffer, 161 | from: RawIndex, 162 | target: &mut RingBuffer, 163 | to: RawIndex, 164 | count: usize, 165 | ) { 166 | if count > 0 { 167 | debug_assert!(from.to_usize() + count <= RingBuffer::::CAPACITY); 168 | debug_assert!(to.to_usize() + count <= RingBuffer::::CAPACITY); 169 | core::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count) 170 | } 171 | } 172 | 173 | if from.to_usize() + count > Self::CAPACITY { 174 | let first_length = Self::CAPACITY - from.to_usize(); 175 | let last_length = count - first_length; 176 | self.copy_from(source, from, to, first_length); 177 | self.copy_from(source, 0.into(), to + first_length, last_length); 178 | } else if to.to_usize() + count > Self::CAPACITY { 179 | let first_length = Self::CAPACITY - to.to_usize(); 180 | let last_length = count - first_length; 181 | force_copy_to(source, from, self, to, first_length); 182 | force_copy_to(source, from + first_length, self, 0.into(), last_length); 183 | } else { 184 | force_copy_to(source, from, self, to, count); 185 | } 186 | } 187 | 188 | /// Copy values from a slice. 189 | #[allow(dead_code)] 190 | unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex) { 191 | let count = source.len(); 192 | debug_assert!(to.to_usize() + count <= Self::CAPACITY); 193 | if to.to_usize() + count > Self::CAPACITY { 194 | let first_length = Self::CAPACITY - to.to_usize(); 195 | let first_slice = &source[..first_length]; 196 | let last_slice = &source[first_length..]; 197 | core::ptr::copy_nonoverlapping( 198 | first_slice.as_ptr(), 199 | self.mut_ptr(to), 200 | first_slice.len(), 201 | ); 202 | core::ptr::copy_nonoverlapping( 203 | last_slice.as_ptr(), 204 | self.mut_ptr(0.into()), 205 | last_slice.len(), 206 | ); 207 | } else { 208 | core::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count) 209 | } 210 | } 211 | 212 | /// Get an iterator over the raw indices of the buffer from left to right. 213 | #[inline] 214 | fn range(&self) -> IndexIter { 215 | IndexIter { 216 | remaining: self.len(), 217 | left_index: self.origin, 218 | right_index: self.origin + self.len(), 219 | } 220 | } 221 | 222 | /// Construct an empty ring buffer. 223 | #[inline] 224 | #[must_use] 225 | pub fn new() -> Self { 226 | Self { 227 | origin: 0.into(), 228 | length: 0, 229 | data: MaybeUninit::uninit(), 230 | } 231 | } 232 | 233 | /// Construct a ring buffer with a single item. 234 | #[inline] 235 | #[must_use] 236 | pub fn unit(value: A) -> Self { 237 | assert!(Self::CAPACITY >= 1); 238 | let mut buffer = Self { 239 | origin: 0.into(), 240 | length: 1, 241 | data: MaybeUninit::uninit(), 242 | }; 243 | unsafe { 244 | buffer.force_write(0.into(), value); 245 | } 246 | buffer 247 | } 248 | 249 | /// Construct a ring buffer with two items. 250 | #[inline] 251 | #[must_use] 252 | pub fn pair(value1: A, value2: A) -> Self { 253 | assert!(Self::CAPACITY >= 2); 254 | let mut buffer = Self { 255 | origin: 0.into(), 256 | length: 2, 257 | data: MaybeUninit::uninit(), 258 | }; 259 | unsafe { 260 | buffer.force_write(0.into(), value1); 261 | buffer.force_write(1.into(), value2); 262 | } 263 | buffer 264 | } 265 | 266 | /// Construct a new ring buffer and move every item from `other` into the 267 | /// new buffer. 268 | /// 269 | /// Time: O(n) 270 | #[inline] 271 | #[must_use] 272 | pub fn drain_from(other: &mut Self) -> Self { 273 | Self::from_front(other, other.len()) 274 | } 275 | 276 | /// Construct a new ring buffer and populate it by taking `count` items from 277 | /// the iterator `iter`. 278 | /// 279 | /// Panics if the iterator contains less than `count` items. 280 | /// 281 | /// Time: O(n) 282 | #[must_use] 283 | pub fn collect_from(iter: &mut I, count: usize) -> Self 284 | where 285 | I: Iterator, 286 | { 287 | let buffer = Self::from_iter(iter.take(count)); 288 | if buffer.len() < count { 289 | panic!("RingBuffer::collect_from: underfull iterator"); 290 | } 291 | buffer 292 | } 293 | 294 | /// Construct a new ring buffer and populate it by taking `count` items from 295 | /// the front of `other`. 296 | /// 297 | /// Time: O(n) for the number of items moved 298 | #[must_use] 299 | pub fn from_front(other: &mut Self, count: usize) -> Self { 300 | let mut buffer = Self::new(); 301 | buffer.drain_from_front(other, count); 302 | buffer 303 | } 304 | 305 | /// Construct a new ring buffer and populate it by taking `count` items from 306 | /// the back of `other`. 307 | /// 308 | /// Time: O(n) for the number of items moved 309 | #[must_use] 310 | pub fn from_back(other: &mut Self, count: usize) -> Self { 311 | let mut buffer = Self::new(); 312 | buffer.drain_from_back(other, count); 313 | buffer 314 | } 315 | 316 | /// Test if the ring buffer is full. 317 | #[inline] 318 | #[must_use] 319 | pub fn is_full(&self) -> bool { 320 | self.len() == Self::CAPACITY 321 | } 322 | 323 | /// Get an iterator over references to the items in the ring buffer in 324 | /// order. 325 | #[inline] 326 | #[must_use] 327 | pub fn iter(&self) -> Iter<'_, A, N> { 328 | Iter { 329 | buffer: self, 330 | left_index: self.origin, 331 | right_index: self.origin + self.len(), 332 | remaining: self.len(), 333 | } 334 | } 335 | 336 | /// Get an iterator over mutable references to the items in the ring buffer 337 | /// in order. 338 | #[inline] 339 | #[must_use] 340 | pub fn iter_mut(&mut self) -> IterMut<'_, A, N> { 341 | IterMut::new(self) 342 | } 343 | 344 | #[must_use] 345 | fn parse_range>(&self, range: R) -> Range { 346 | let new_range = Range { 347 | start: match range.start_bound() { 348 | Bound::Unbounded => 0, 349 | Bound::Included(index) => *index, 350 | Bound::Excluded(_) => unimplemented!(), 351 | }, 352 | end: match range.end_bound() { 353 | Bound::Unbounded => self.len(), 354 | Bound::Included(index) => *index + 1, 355 | Bound::Excluded(index) => *index, 356 | }, 357 | }; 358 | if new_range.end > self.len() || new_range.start > new_range.end { 359 | panic!("Slice::parse_range: index out of bounds"); 360 | } 361 | new_range 362 | } 363 | 364 | /// Get a `Slice` for a subset of the ring buffer. 365 | #[must_use] 366 | pub fn slice>(&self, range: R) -> Slice<'_, A, N> { 367 | Slice { 368 | buffer: self, 369 | range: self.parse_range(range), 370 | } 371 | } 372 | 373 | /// Get a `SliceMut` for a subset of the ring buffer. 374 | #[must_use] 375 | pub fn slice_mut>(&mut self, range: R) -> SliceMut<'_, A, N> { 376 | SliceMut { 377 | range: self.parse_range(range), 378 | buffer: self, 379 | } 380 | } 381 | 382 | /// Get an unchecked reference to the value at the given index. 383 | /// 384 | /// # Safety 385 | /// 386 | /// You must ensure the index is not out of bounds. 387 | #[must_use] 388 | pub unsafe fn get_unchecked(&self, index: usize) -> &A { 389 | &*self.ptr(self.raw(index)) 390 | } 391 | 392 | /// Get an unchecked mutable reference to the value at the given index. 393 | /// 394 | /// # Safety 395 | /// 396 | /// You must ensure the index is not out of bounds. 397 | #[must_use] 398 | pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { 399 | &mut *self.mut_ptr(self.raw(index)) 400 | } 401 | 402 | /// Push a value to the back of the buffer. 403 | /// 404 | /// Panics if the capacity of the buffer is exceeded. 405 | /// 406 | /// Time: O(1) 407 | pub fn push_back(&mut self, value: A) { 408 | if self.is_full() { 409 | panic!("RingBuffer::push_back: can't push to a full buffer") 410 | } else { 411 | unsafe { self.force_write(self.raw(self.length), value) } 412 | self.length += 1; 413 | } 414 | } 415 | 416 | /// Push a value to the front of the buffer. 417 | /// 418 | /// Panics if the capacity of the buffer is exceeded. 419 | /// 420 | /// Time: O(1) 421 | pub fn push_front(&mut self, value: A) { 422 | if self.is_full() { 423 | panic!("RingBuffer::push_front: can't push to a full buffer") 424 | } else { 425 | let origin = self.origin.dec(); 426 | self.length += 1; 427 | unsafe { self.force_write(origin, value) } 428 | } 429 | } 430 | 431 | /// Pop a value from the back of the buffer. 432 | /// 433 | /// Returns `None` if the buffer is empty. 434 | /// 435 | /// Time: O(1) 436 | pub fn pop_back(&mut self) -> Option { 437 | if self.is_empty() { 438 | None 439 | } else { 440 | self.length -= 1; 441 | Some(unsafe { self.force_read(self.raw(self.length)) }) 442 | } 443 | } 444 | 445 | /// Pop a value from the front of the buffer. 446 | /// 447 | /// Returns `None` if the buffer is empty. 448 | /// 449 | /// Time: O(1) 450 | pub fn pop_front(&mut self) -> Option { 451 | if self.is_empty() { 452 | None 453 | } else { 454 | self.length -= 1; 455 | let index = self.origin.inc(); 456 | Some(unsafe { self.force_read(index) }) 457 | } 458 | } 459 | 460 | /// Discard all items up to but not including `index`. 461 | /// 462 | /// Panics if `index` is out of bounds. 463 | /// 464 | /// Time: O(n) for the number of items dropped 465 | pub fn drop_left(&mut self, index: usize) { 466 | if index > 0 { 467 | if index > self.len() { 468 | panic!("RingBuffer::drop_left: index out of bounds"); 469 | } 470 | for i in self.range().take(index) { 471 | unsafe { self.force_drop(i) } 472 | } 473 | self.origin += index; 474 | self.length -= index; 475 | } 476 | } 477 | 478 | /// Discard all items from `index` onward. 479 | /// 480 | /// Panics if `index` is out of bounds. 481 | /// 482 | /// Time: O(n) for the number of items dropped 483 | pub fn drop_right(&mut self, index: usize) { 484 | if index > self.len() { 485 | panic!("RingBuffer::drop_right: index out of bounds"); 486 | } 487 | if index == self.len() { 488 | return; 489 | } 490 | for i in self.range().skip(index) { 491 | unsafe { self.force_drop(i) } 492 | } 493 | self.length = index; 494 | } 495 | 496 | /// Split a buffer into two, the original buffer containing 497 | /// everything up to `index` and the returned buffer containing 498 | /// everything from `index` onwards. 499 | /// 500 | /// Panics if `index` is out of bounds. 501 | /// 502 | /// Time: O(n) for the number of items in the new buffer 503 | #[must_use] 504 | pub fn split_off(&mut self, index: usize) -> Self { 505 | if index > self.len() { 506 | panic!("RingBuffer::split: index out of bounds"); 507 | } 508 | if index == self.len() { 509 | return Self::new(); 510 | } 511 | let mut right = Self::new(); 512 | let length = self.length - index; 513 | unsafe { right.copy_from(self, self.raw(index), 0.into(), length) }; 514 | self.length = index; 515 | right.length = length; 516 | right 517 | } 518 | 519 | /// Remove all items from `other` and append them to the back of `self`. 520 | /// 521 | /// Panics if the capacity of `self` is exceeded. 522 | /// 523 | /// `other` will be an empty buffer after this operation. 524 | /// 525 | /// Time: O(n) for the number of items moved 526 | #[inline] 527 | pub fn append(&mut self, other: &mut Self) { 528 | self.drain_from_front(other, other.len()); 529 | } 530 | 531 | /// Remove `count` items from the front of `other` and append them to the 532 | /// back of `self`. 533 | /// 534 | /// Panics if `self` doesn't have `count` items left, or if `other` has 535 | /// fewer than `count` items. 536 | /// 537 | /// Time: O(n) for the number of items moved 538 | pub fn drain_from_front(&mut self, other: &mut Self, count: usize) { 539 | let self_len = self.len(); 540 | let other_len = other.len(); 541 | if self_len + count > Self::CAPACITY { 542 | panic!("RingBuffer::drain_from_front: chunk size overflow"); 543 | } 544 | if other_len < count { 545 | panic!("RingBuffer::drain_from_front: index out of bounds"); 546 | } 547 | unsafe { self.copy_from(other, other.origin, self.raw(self.len()), count) }; 548 | other.origin += count; 549 | other.length -= count; 550 | self.length += count; 551 | } 552 | 553 | /// Remove `count` items from the back of `other` and append them to the 554 | /// front of `self`. 555 | /// 556 | /// Panics if `self` doesn't have `count` items left, or if `other` has 557 | /// fewer than `count` items. 558 | /// 559 | /// Time: O(n) for the number of items moved 560 | pub fn drain_from_back(&mut self, other: &mut Self, count: usize) { 561 | let self_len = self.len(); 562 | let other_len = other.len(); 563 | if self_len + count > Self::CAPACITY { 564 | panic!("RingBuffer::drain_from_back: chunk size overflow"); 565 | } 566 | if other_len < count { 567 | panic!("RingBuffer::drain_from_back: index out of bounds"); 568 | } 569 | self.origin -= count; 570 | let source_index = other.origin + (other.len() - count); 571 | unsafe { self.copy_from(other, source_index, self.origin, count) }; 572 | other.length -= count; 573 | self.length += count; 574 | } 575 | 576 | /// Insert a new value at index `index`, shifting all the following values 577 | /// to the right. 578 | /// 579 | /// Panics if the index is out of bounds. 580 | /// 581 | /// Time: O(n) for the number of items shifted 582 | pub fn insert(&mut self, index: usize, value: A) { 583 | if self.is_full() { 584 | panic!("RingBuffer::insert: chunk size overflow"); 585 | } 586 | if index > self.len() { 587 | panic!("RingBuffer::insert: index out of bounds"); 588 | } 589 | if index == 0 { 590 | return self.push_front(value); 591 | } 592 | if index == self.len() { 593 | return self.push_back(value); 594 | } 595 | let right_count = self.len() - index; 596 | // Check which side has fewer elements to shift. 597 | if right_count < index { 598 | // Shift to the right. 599 | let mut i = self.raw(self.len() - 1); 600 | let target = self.raw(index); 601 | while i != target { 602 | unsafe { self.force_write(i + 1, self.force_read(i)) }; 603 | i -= 1; 604 | } 605 | unsafe { self.force_write(target + 1, self.force_read(target)) }; 606 | self.length += 1; 607 | } else { 608 | // Shift to the left. 609 | self.origin -= 1; 610 | self.length += 1; 611 | for i in self.range().take(index) { 612 | unsafe { self.force_write(i, self.force_read(i + 1)) }; 613 | } 614 | } 615 | unsafe { self.force_write(self.raw(index), value) }; 616 | } 617 | 618 | /// Insert a new value into the buffer in sorted order. 619 | /// 620 | /// This assumes every element of the buffer is already in sorted order. 621 | /// If not, the value will still be inserted but the ordering is not 622 | /// guaranteed. 623 | /// 624 | /// Time: O(log n) to find the insert position, then O(n) for the number 625 | /// of elements shifted. 626 | pub fn insert_ordered(&mut self, value: A) 627 | where 628 | A: Ord, 629 | { 630 | if self.is_full() { 631 | panic!("Chunk::insert: chunk is full"); 632 | } 633 | match self.slice(..).binary_search(&value) { 634 | Ok(index) => self.insert(index, value), 635 | Err(index) => self.insert(index, value), 636 | } 637 | } 638 | 639 | /// Insert multiple values at index `index`, shifting all the following values 640 | /// to the right. 641 | /// 642 | /// Panics if the index is out of bounds or the chunk doesn't have room for 643 | /// all the values. 644 | /// 645 | /// Time: O(m+n) where m is the number of elements inserted and n is the number 646 | /// of elements following the insertion index. Calling `insert` 647 | /// repeatedly would be O(m*n). 648 | pub fn insert_from(&mut self, index: usize, iter: Iterable) 649 | where 650 | Iterable: IntoIterator, 651 | I: ExactSizeIterator, 652 | { 653 | let iter = iter.into_iter(); 654 | let insert_size = iter.len(); 655 | if self.len() + insert_size > Self::CAPACITY { 656 | panic!( 657 | "Chunk::insert_from: chunk cannot fit {} elements", 658 | insert_size 659 | ); 660 | } 661 | if index > self.len() { 662 | panic!("Chunk::insert_from: index out of bounds"); 663 | } 664 | if index == self.len() { 665 | self.extend(iter); 666 | return; 667 | } 668 | let right_count = self.len() - index; 669 | // Check which side has fewer elements to shift. 670 | if right_count < index { 671 | // Shift to the right. 672 | let mut i = self.raw(self.len() - 1); 673 | let target = self.raw(index); 674 | while i != target { 675 | unsafe { self.force_write(i + insert_size, self.force_read(i)) }; 676 | i -= 1; 677 | } 678 | unsafe { self.force_write(target + insert_size, self.force_read(target)) }; 679 | self.length += insert_size; 680 | } else { 681 | // Shift to the left. 682 | self.origin -= insert_size; 683 | self.length += insert_size; 684 | for i in self.range().take(index) { 685 | unsafe { self.force_write(i, self.force_read(i + insert_size)) }; 686 | } 687 | } 688 | let mut index = self.raw(index); 689 | // Panic safety: unless and until we fill it fully, there's a hole somewhere in the middle 690 | // and the destructor would drop non-existing elements. Therefore we pretend to be empty 691 | // for a while (and leak the elements instead in case something bad happens). 692 | let mut inserted = 0; 693 | let length = replace(&mut self.length, 0); 694 | for value in iter.take(insert_size) { 695 | unsafe { self.force_write(index, value) }; 696 | index += 1; 697 | inserted += 1; 698 | } 699 | // This would/could create a hole in the middle if it was less 700 | assert_eq!( 701 | inserted, insert_size, 702 | "Iterator has fewer elements than advertised", 703 | ); 704 | self.length = length; 705 | } 706 | 707 | /// Remove the value at index `index`, shifting all the following values to 708 | /// the left. 709 | /// 710 | /// Returns the removed value. 711 | /// 712 | /// Panics if the index is out of bounds. 713 | /// 714 | /// Time: O(n) for the number of items shifted 715 | pub fn remove(&mut self, index: usize) -> A { 716 | if index >= self.len() { 717 | panic!("RingBuffer::remove: index out of bounds"); 718 | } 719 | let value = unsafe { self.force_read(self.raw(index)) }; 720 | let right_count = self.len() - index; 721 | // Check which side has fewer elements to shift. 722 | if right_count < index { 723 | // Shift from the right. 724 | self.length -= 1; 725 | let mut i = self.raw(index); 726 | let target = self.raw(self.len()); 727 | while i != target { 728 | unsafe { self.force_write(i, self.force_read(i + 1)) }; 729 | i += 1; 730 | } 731 | } else { 732 | // Shift from the left. 733 | let mut i = self.raw(index); 734 | while i != self.origin { 735 | unsafe { self.force_write(i, self.force_read(i - 1)) }; 736 | i -= 1; 737 | } 738 | self.origin += 1; 739 | self.length -= 1; 740 | } 741 | value 742 | } 743 | 744 | /// Construct an iterator that drains values from the front of the buffer. 745 | pub fn drain(&mut self) -> Drain<'_, A, N> { 746 | Drain { buffer: self } 747 | } 748 | 749 | /// Discard the contents of the buffer. 750 | /// 751 | /// Time: O(n) 752 | pub fn clear(&mut self) { 753 | for i in self.range() { 754 | unsafe { self.force_drop(i) }; 755 | } 756 | self.origin = 0.into(); 757 | self.length = 0; 758 | } 759 | } 760 | 761 | impl Default for RingBuffer { 762 | #[inline] 763 | #[must_use] 764 | fn default() -> Self { 765 | Self::new() 766 | } 767 | } 768 | 769 | impl Clone for RingBuffer { 770 | fn clone(&self) -> Self { 771 | let mut out = Self::new(); 772 | out.origin = self.origin; 773 | out.length = self.length; 774 | let range = self.range(); 775 | // Panic safety. If we panic, we don't want to drop more than we have initialized. 776 | out.length = 0; 777 | for index in range { 778 | unsafe { out.force_write(index, (&*self.ptr(index)).clone()) }; 779 | out.length += 1; 780 | } 781 | out 782 | } 783 | } 784 | 785 | impl Index for RingBuffer { 786 | type Output = A; 787 | 788 | #[must_use] 789 | fn index(&self, index: usize) -> &Self::Output { 790 | if index >= self.len() { 791 | panic!( 792 | "RingBuffer::index: index out of bounds {} >= {}", 793 | index, 794 | self.len() 795 | ); 796 | } 797 | unsafe { &*self.ptr(self.raw(index)) } 798 | } 799 | } 800 | 801 | impl IndexMut for RingBuffer { 802 | #[must_use] 803 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { 804 | if index >= self.len() { 805 | panic!( 806 | "RingBuffer::index_mut: index out of bounds {} >= {}", 807 | index, 808 | self.len() 809 | ); 810 | } 811 | unsafe { &mut *self.mut_ptr(self.raw(index)) } 812 | } 813 | } 814 | 815 | impl PartialEq for RingBuffer { 816 | #[inline] 817 | #[must_use] 818 | fn eq(&self, other: &Self) -> bool { 819 | self.len() == other.len() && self.iter().eq(other.iter()) 820 | } 821 | } 822 | 823 | impl PartialEq for RingBuffer 824 | where 825 | PrimSlice: Borrow<[A]>, 826 | A: PartialEq, 827 | { 828 | #[inline] 829 | #[must_use] 830 | fn eq(&self, other: &PrimSlice) -> bool { 831 | let other = other.borrow(); 832 | self.len() == other.len() && self.iter().eq(other.iter()) 833 | } 834 | } 835 | 836 | impl PartialEq> for RingBuffer 837 | where 838 | A: PartialEq, 839 | { 840 | fn eq(&self, other: &Slice<'_, A, N>) -> bool { 841 | self.len() == other.len() && self.iter().eq(other.iter()) 842 | } 843 | } 844 | 845 | impl PartialEq> for RingBuffer 846 | where 847 | A: PartialEq, 848 | { 849 | fn eq(&self, other: &SliceMut<'_, A, N>) -> bool { 850 | self.len() == other.len() && self.iter().eq(other.iter()) 851 | } 852 | } 853 | 854 | impl Eq for RingBuffer {} 855 | 856 | impl PartialOrd for RingBuffer { 857 | #[inline] 858 | #[must_use] 859 | fn partial_cmp(&self, other: &Self) -> Option { 860 | self.iter().partial_cmp(other.iter()) 861 | } 862 | } 863 | 864 | impl Ord for RingBuffer { 865 | #[inline] 866 | #[must_use] 867 | fn cmp(&self, other: &Self) -> Ordering { 868 | self.iter().cmp(other.iter()) 869 | } 870 | } 871 | 872 | impl Extend for RingBuffer { 873 | #[inline] 874 | fn extend>(&mut self, iter: I) { 875 | for item in iter { 876 | self.push_back(item); 877 | } 878 | } 879 | } 880 | 881 | impl<'a, A: Clone + 'a, const N: usize> Extend<&'a A> for RingBuffer { 882 | #[inline] 883 | fn extend>(&mut self, iter: I) { 884 | for item in iter { 885 | self.push_back(item.clone()); 886 | } 887 | } 888 | } 889 | 890 | impl Debug for RingBuffer { 891 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { 892 | f.write_str("RingBuffer")?; 893 | f.debug_list().entries(self.iter()).finish() 894 | } 895 | } 896 | 897 | impl Hash for RingBuffer { 898 | #[inline] 899 | fn hash(&self, hasher: &mut H) { 900 | for item in self { 901 | item.hash(hasher) 902 | } 903 | } 904 | } 905 | 906 | #[cfg(feature = "std")] 907 | impl std::io::Write for RingBuffer { 908 | fn write(&mut self, mut buf: &[u8]) -> std::io::Result { 909 | let max_new = Self::CAPACITY - self.len(); 910 | if buf.len() > max_new { 911 | buf = &buf[..max_new]; 912 | } 913 | unsafe { self.copy_from_slice(buf, self.origin + self.len()) }; 914 | self.length += buf.len(); 915 | Ok(buf.len()) 916 | } 917 | 918 | #[inline] 919 | fn flush(&mut self) -> std::io::Result<()> { 920 | Ok(()) 921 | } 922 | } 923 | 924 | #[cfg(feature = "std")] 925 | impl std::io::Read for RingBuffer { 926 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 927 | let read_size = buf.len().min(self.len()); 928 | if read_size == 0 { 929 | Ok(0) 930 | } else { 931 | for p in buf.iter_mut().take(read_size) { 932 | *p = self.pop_front().unwrap(); 933 | } 934 | Ok(read_size) 935 | } 936 | } 937 | } 938 | 939 | impl FromIterator for RingBuffer { 940 | #[must_use] 941 | fn from_iter>(iter: I) -> Self { 942 | let mut buffer = RingBuffer::new(); 943 | buffer.extend(iter); 944 | buffer 945 | } 946 | } 947 | 948 | impl IntoIterator for RingBuffer { 949 | type Item = A; 950 | type IntoIter = OwnedIter; 951 | 952 | #[inline] 953 | #[must_use] 954 | fn into_iter(self) -> Self::IntoIter { 955 | OwnedIter { buffer: self } 956 | } 957 | } 958 | 959 | impl<'a, A, const N: usize> IntoIterator for &'a RingBuffer { 960 | type Item = &'a A; 961 | type IntoIter = Iter<'a, A, N>; 962 | 963 | #[inline] 964 | #[must_use] 965 | fn into_iter(self) -> Self::IntoIter { 966 | self.iter() 967 | } 968 | } 969 | 970 | impl<'a, A, const N: usize> IntoIterator for &'a mut RingBuffer { 971 | type Item = &'a mut A; 972 | type IntoIter = IterMut<'a, A, N>; 973 | 974 | #[inline] 975 | #[must_use] 976 | fn into_iter(self) -> Self::IntoIter { 977 | self.iter_mut() 978 | } 979 | } 980 | 981 | // Tests 982 | 983 | #[cfg(test)] 984 | mod test { 985 | use super::*; 986 | 987 | #[test] 988 | fn validity_invariant() { 989 | assert!(Some(RingBuffer::, 64>::new()).is_some()); 990 | } 991 | 992 | #[test] 993 | fn is_full() { 994 | let mut chunk = RingBuffer::<_, 64>::new(); 995 | for i in 0..64 { 996 | assert_eq!(false, chunk.is_full()); 997 | chunk.push_back(i); 998 | } 999 | assert_eq!(true, chunk.is_full()); 1000 | } 1001 | 1002 | #[test] 1003 | fn ref_iter() { 1004 | let chunk: RingBuffer = (0..64).collect(); 1005 | let out_vec: Vec<&i32> = chunk.iter().collect(); 1006 | let should_vec_p: Vec = (0..64).collect(); 1007 | let should_vec: Vec<&i32> = should_vec_p.iter().collect(); 1008 | assert_eq!(should_vec, out_vec); 1009 | } 1010 | 1011 | #[test] 1012 | fn mut_ref_iter() { 1013 | let mut chunk: RingBuffer = (0..64).collect(); 1014 | let out_vec: Vec<&mut i32> = chunk.iter_mut().collect(); 1015 | let mut should_vec_p: Vec = (0..64).collect(); 1016 | let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect(); 1017 | assert_eq!(should_vec, out_vec); 1018 | } 1019 | 1020 | #[test] 1021 | fn consuming_iter() { 1022 | let chunk: RingBuffer = (0..64).collect(); 1023 | let out_vec: Vec = chunk.into_iter().collect(); 1024 | let should_vec: Vec = (0..64).collect(); 1025 | assert_eq!(should_vec, out_vec); 1026 | } 1027 | 1028 | #[test] 1029 | fn draining_iter() { 1030 | let mut chunk: RingBuffer = (0..64).collect(); 1031 | let mut half: RingBuffer = chunk.drain().take(16).collect(); 1032 | half.extend(chunk.drain().rev().take(16)); 1033 | let should: Vec = (16..48).collect(); 1034 | assert_eq!(chunk, should); 1035 | let should: Vec = (0..16).chain((48..64).rev()).collect(); 1036 | assert_eq!(half, should); 1037 | } 1038 | 1039 | #[cfg(feature = "std")] 1040 | #[test] 1041 | fn io_write() { 1042 | use std::io::Write; 1043 | let mut buffer: RingBuffer = (0..32).collect(); 1044 | let to_write: Vec = (32..128).collect(); 1045 | assert_eq!(32, buffer.write(&to_write).unwrap()); 1046 | assert_eq!(buffer, (0..64).collect::>()); 1047 | } 1048 | 1049 | #[cfg(feature = "std")] 1050 | #[test] 1051 | fn io_read() { 1052 | use std::io::Read; 1053 | let mut buffer: RingBuffer = (16..48).collect(); 1054 | let mut read_buf: Vec = (0..16).collect(); 1055 | assert_eq!(16, buffer.read(&mut read_buf).unwrap()); 1056 | assert_eq!(read_buf, (16..32).collect::>()); 1057 | assert_eq!(buffer, (32..48).collect::>()); 1058 | assert_eq!(16, buffer.read(&mut read_buf).unwrap()); 1059 | assert_eq!(read_buf, (32..48).collect::>()); 1060 | assert_eq!(buffer, vec![]); 1061 | assert_eq!(0, buffer.read(&mut read_buf).unwrap()); 1062 | } 1063 | 1064 | #[test] 1065 | fn clone() { 1066 | let buffer: RingBuffer = (0..50).collect(); 1067 | assert_eq!(buffer, buffer.clone()); 1068 | } 1069 | 1070 | #[test] 1071 | fn failing() { 1072 | let mut buffer: RingBuffer = RingBuffer::new(); 1073 | buffer.push_front(0); 1074 | let mut add: RingBuffer = vec![1, 0, 0, 0, 0, 0].into_iter().collect(); 1075 | buffer.append(&mut add); 1076 | assert_eq!(1, buffer.remove(1)); 1077 | let expected = vec![0, 0, 0, 0, 0, 0]; 1078 | assert_eq!(buffer, expected); 1079 | } 1080 | 1081 | use crate::tests::DropTest; 1082 | use std::sync::atomic::{AtomicUsize, Ordering}; 1083 | 1084 | #[test] 1085 | fn dropping() { 1086 | let counter = AtomicUsize::new(0); 1087 | { 1088 | let mut chunk: RingBuffer, 64> = RingBuffer::new(); 1089 | for _i in 0..20 { 1090 | chunk.push_back(DropTest::new(&counter)) 1091 | } 1092 | for _i in 0..20 { 1093 | chunk.push_front(DropTest::new(&counter)) 1094 | } 1095 | assert_eq!(40, counter.load(Ordering::Relaxed)); 1096 | for _i in 0..10 { 1097 | chunk.pop_back(); 1098 | } 1099 | assert_eq!(30, counter.load(Ordering::Relaxed)); 1100 | } 1101 | assert_eq!(0, counter.load(Ordering::Relaxed)); 1102 | } 1103 | 1104 | #[test] 1105 | #[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")] 1106 | fn unit_on_empty() { 1107 | let _ = RingBuffer::::unit(1); 1108 | } 1109 | 1110 | #[test] 1111 | #[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")] 1112 | fn pair_on_empty() { 1113 | let _ = RingBuffer::::pair(1, 2); 1114 | } 1115 | } 1116 | -------------------------------------------------------------------------------- /src/ring_buffer/refpool.rs: -------------------------------------------------------------------------------- 1 | use core::mem::MaybeUninit; 2 | 3 | use ::refpool::{PoolClone, PoolDefault}; 4 | 5 | use crate::ring_buffer::index::RawIndex; 6 | use crate::RingBuffer; 7 | 8 | impl PoolDefault for RingBuffer { 9 | unsafe fn default_uninit(target: &mut MaybeUninit) { 10 | let ptr = target.as_mut_ptr(); 11 | let origin_ptr: *mut RawIndex = &mut (*ptr).origin; 12 | let length_ptr: *mut usize = &mut (*ptr).length; 13 | origin_ptr.write(0.into()); 14 | length_ptr.write(0); 15 | } 16 | } 17 | 18 | impl PoolClone for RingBuffer 19 | where 20 | A: Clone, 21 | { 22 | unsafe fn clone_uninit(&self, target: &mut MaybeUninit) { 23 | let ptr = target.as_mut_ptr(); 24 | let origin_ptr: *mut RawIndex = &mut (*ptr).origin; 25 | let length_ptr: *mut usize = &mut (*ptr).length; 26 | let data_ptr: *mut _ = &mut (*ptr).data; 27 | let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast(); 28 | origin_ptr.write(self.origin); 29 | length_ptr.write(self.length); 30 | for index in self.range() { 31 | data_ptr 32 | .add(index.to_usize()) 33 | .write((*self.ptr(index)).clone()); 34 | } 35 | } 36 | } 37 | 38 | #[cfg(test)] 39 | mod test { 40 | use super::*; 41 | use ::refpool::{Pool, PoolRef}; 42 | use std::iter::FromIterator; 43 | 44 | #[test] 45 | fn default_and_clone() { 46 | let pool: Pool> = Pool::new(16); 47 | let mut ref1 = PoolRef::default(&pool); 48 | { 49 | let chunk = PoolRef::make_mut(&pool, &mut ref1); 50 | chunk.push_back(1); 51 | chunk.push_back(2); 52 | chunk.push_back(3); 53 | } 54 | let ref2 = PoolRef::cloned(&pool, &ref1); 55 | let ref3 = PoolRef::clone_from(&pool, &RingBuffer::from_iter(1..=3)); 56 | assert_eq!(RingBuffer::::from_iter(1..=3), *ref1); 57 | assert_eq!(RingBuffer::::from_iter(1..=3), *ref2); 58 | assert_eq!(RingBuffer::::from_iter(1..=3), *ref3); 59 | assert_eq!(ref1, ref2); 60 | assert_eq!(ref1, ref3); 61 | assert_eq!(ref2, ref3); 62 | assert!(!PoolRef::ptr_eq(&ref1, &ref2)); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/ring_buffer/slice.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | use core::borrow::Borrow; 6 | use core::cmp::Ordering; 7 | use core::fmt::Debug; 8 | use core::fmt::Error; 9 | use core::fmt::Formatter; 10 | use core::hash::Hash; 11 | use core::hash::Hasher; 12 | use core::ops::IndexMut; 13 | use core::ops::{Bound, Index, Range, RangeBounds}; 14 | 15 | use super::{Iter, IterMut, RingBuffer}; 16 | 17 | use array_ops::{Array, ArrayMut, HasLength}; 18 | 19 | /// An indexable representation of a subset of a `RingBuffer`. 20 | pub struct Slice<'a, A, const N: usize> { 21 | pub(crate) buffer: &'a RingBuffer, 22 | pub(crate) range: Range, 23 | } 24 | 25 | impl<'a, A: 'a, const N: usize> HasLength for Slice<'a, A, N> { 26 | /// Get the length of the slice. 27 | #[inline] 28 | #[must_use] 29 | fn len(&self) -> usize { 30 | self.range.end - self.range.start 31 | } 32 | } 33 | 34 | impl<'a, A: 'a, const N: usize> Array for Slice<'a, A, N> { 35 | /// Get a reference to the value at a given index. 36 | #[inline] 37 | #[must_use] 38 | fn get(&self, index: usize) -> Option<&A> { 39 | if index >= self.len() { 40 | None 41 | } else { 42 | Some(unsafe { self.get_unchecked(index) }) 43 | } 44 | } 45 | } 46 | 47 | impl<'a, A: 'a, const N: usize> Slice<'a, A, N> { 48 | /// Get an unchecked reference to the value at the given index. 49 | /// 50 | /// # Safety 51 | /// 52 | /// You must ensure the index is not out of bounds. 53 | #[must_use] 54 | pub unsafe fn get_unchecked(&self, index: usize) -> &A { 55 | self.buffer.get_unchecked(self.range.start + index) 56 | } 57 | 58 | /// Get an iterator over references to the items in the slice in order. 59 | #[inline] 60 | #[must_use] 61 | pub fn iter(&self) -> Iter<'_, A, N> { 62 | Iter { 63 | buffer: self.buffer, 64 | left_index: self.buffer.origin + self.range.start, 65 | right_index: self.buffer.origin + self.range.start + self.len(), 66 | remaining: self.len(), 67 | } 68 | } 69 | 70 | /// Create a subslice of this slice. 71 | /// 72 | /// This consumes the slice. To create a subslice without consuming it, 73 | /// clone it first: `my_slice.clone().slice(1..2)`. 74 | #[must_use] 75 | pub fn slice>(self, range: R) -> Slice<'a, A, N> { 76 | let new_range = Range { 77 | start: match range.start_bound() { 78 | Bound::Unbounded => self.range.start, 79 | Bound::Included(index) => self.range.start + index, 80 | Bound::Excluded(_) => unimplemented!(), 81 | }, 82 | end: match range.end_bound() { 83 | Bound::Unbounded => self.range.end, 84 | Bound::Included(index) => self.range.start + index + 1, 85 | Bound::Excluded(index) => self.range.start + index, 86 | }, 87 | }; 88 | if new_range.start < self.range.start 89 | || new_range.end > self.range.end 90 | || new_range.start > new_range.end 91 | { 92 | panic!("Slice::slice: index out of bounds"); 93 | } 94 | Slice { 95 | buffer: self.buffer, 96 | range: new_range, 97 | } 98 | } 99 | 100 | /// Split the slice into two subslices at the given index. 101 | #[must_use] 102 | pub fn split_at(self, index: usize) -> (Slice<'a, A, N>, Slice<'a, A, N>) { 103 | if index > self.len() { 104 | panic!("Slice::split_at: index out of bounds"); 105 | } 106 | let index = self.range.start + index; 107 | ( 108 | Slice { 109 | buffer: self.buffer, 110 | range: Range { 111 | start: self.range.start, 112 | end: index, 113 | }, 114 | }, 115 | Slice { 116 | buffer: self.buffer, 117 | range: Range { 118 | start: index, 119 | end: self.range.end, 120 | }, 121 | }, 122 | ) 123 | } 124 | 125 | /// Construct a new `RingBuffer` by copying the elements in this slice. 126 | #[inline] 127 | #[must_use] 128 | pub fn to_owned(&self) -> RingBuffer 129 | where 130 | A: Clone, 131 | { 132 | self.iter().cloned().collect() 133 | } 134 | } 135 | 136 | impl<'a, A: 'a, const N: usize> From<&'a RingBuffer> for Slice<'a, A, N> { 137 | #[inline] 138 | #[must_use] 139 | fn from(buffer: &'a RingBuffer) -> Self { 140 | Slice { 141 | range: Range { 142 | start: 0, 143 | end: buffer.len(), 144 | }, 145 | buffer, 146 | } 147 | } 148 | } 149 | 150 | impl<'a, A: 'a, const N: usize> Clone for Slice<'a, A, N> { 151 | #[inline] 152 | #[must_use] 153 | fn clone(&self) -> Self { 154 | Slice { 155 | buffer: self.buffer, 156 | range: self.range.clone(), 157 | } 158 | } 159 | } 160 | 161 | impl<'a, A: 'a, const N: usize> Index for Slice<'a, A, N> { 162 | type Output = A; 163 | 164 | #[inline] 165 | #[must_use] 166 | fn index(&self, index: usize) -> &Self::Output { 167 | self.buffer.index(self.range.start + index) 168 | } 169 | } 170 | 171 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq for Slice<'a, A, N> { 172 | #[inline] 173 | #[must_use] 174 | fn eq(&self, other: &Self) -> bool { 175 | self.len() == other.len() && self.iter().eq(other.iter()) 176 | } 177 | } 178 | 179 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq> for Slice<'a, A, N> { 180 | #[inline] 181 | #[must_use] 182 | fn eq(&self, other: &SliceMut<'a, A, N>) -> bool { 183 | self.len() == other.len() && self.iter().eq(other.iter()) 184 | } 185 | } 186 | 187 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq> for Slice<'a, A, N> { 188 | #[inline] 189 | #[must_use] 190 | fn eq(&self, other: &RingBuffer) -> bool { 191 | self.len() == other.len() && self.iter().eq(other.iter()) 192 | } 193 | } 194 | 195 | impl<'a, A: PartialEq + 'a, S, const N: usize> PartialEq for Slice<'a, A, N> 196 | where 197 | S: Borrow<[A]>, 198 | { 199 | #[inline] 200 | #[must_use] 201 | fn eq(&self, other: &S) -> bool { 202 | let other = other.borrow(); 203 | self.len() == other.len() && self.iter().eq(other.iter()) 204 | } 205 | } 206 | 207 | impl<'a, A: Eq + 'a, const N: usize> Eq for Slice<'a, A, N> {} 208 | 209 | impl<'a, A: PartialOrd + 'a, const N: usize> PartialOrd for Slice<'a, A, N> { 210 | #[inline] 211 | #[must_use] 212 | fn partial_cmp(&self, other: &Self) -> Option { 213 | self.iter().partial_cmp(other.iter()) 214 | } 215 | } 216 | 217 | impl<'a, A: Ord + 'a, const N: usize> Ord for Slice<'a, A, N> { 218 | #[inline] 219 | #[must_use] 220 | fn cmp(&self, other: &Self) -> Ordering { 221 | self.iter().cmp(other.iter()) 222 | } 223 | } 224 | 225 | impl<'a, A: Debug + 'a, const N: usize> Debug for Slice<'a, A, N> { 226 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { 227 | f.write_str("RingBuffer")?; 228 | f.debug_list().entries(self.iter()).finish() 229 | } 230 | } 231 | 232 | impl<'a, A: Hash + 'a, const N: usize> Hash for Slice<'a, A, N> { 233 | #[inline] 234 | fn hash(&self, hasher: &mut H) { 235 | for item in self { 236 | item.hash(hasher) 237 | } 238 | } 239 | } 240 | 241 | impl<'a, A: 'a, const N: usize> IntoIterator for &'a Slice<'a, A, N> { 242 | type Item = &'a A; 243 | type IntoIter = Iter<'a, A, N>; 244 | 245 | #[inline] 246 | #[must_use] 247 | fn into_iter(self) -> Self::IntoIter { 248 | self.iter() 249 | } 250 | } 251 | 252 | // Mutable slice 253 | 254 | /// An indexable representation of a mutable subset of a `RingBuffer`. 255 | pub struct SliceMut<'a, A, const N: usize> { 256 | pub(crate) buffer: &'a mut RingBuffer, 257 | pub(crate) range: Range, 258 | } 259 | 260 | impl<'a, A: 'a, const N: usize> HasLength for SliceMut<'a, A, N> { 261 | /// Get the length of the slice. 262 | #[inline] 263 | #[must_use] 264 | fn len(&self) -> usize { 265 | self.range.end - self.range.start 266 | } 267 | } 268 | 269 | impl<'a, A: 'a, const N: usize> Array for SliceMut<'a, A, N> { 270 | /// Get a reference to the value at a given index. 271 | #[inline] 272 | #[must_use] 273 | fn get(&self, index: usize) -> Option<&A> { 274 | if index >= self.len() { 275 | None 276 | } else { 277 | Some(unsafe { self.get_unchecked(index) }) 278 | } 279 | } 280 | } 281 | 282 | impl<'a, A: 'a, const N: usize> ArrayMut for SliceMut<'a, A, N> { 283 | /// Get a mutable reference to the value at a given index. 284 | #[inline] 285 | #[must_use] 286 | fn get_mut(&mut self, index: usize) -> Option<&mut A> { 287 | if index >= self.len() { 288 | None 289 | } else { 290 | Some(unsafe { self.get_unchecked_mut(index) }) 291 | } 292 | } 293 | } 294 | 295 | impl<'a, A: 'a, const N: usize> SliceMut<'a, A, N> { 296 | /// Downgrade this slice into a non-mutable slice. 297 | #[inline] 298 | #[must_use] 299 | pub fn unmut(self) -> Slice<'a, A, N> { 300 | Slice { 301 | buffer: self.buffer, 302 | range: self.range, 303 | } 304 | } 305 | 306 | /// Get an unchecked reference to the value at the given index. 307 | /// 308 | /// # Safety 309 | /// 310 | /// You must ensure the index is not out of bounds. 311 | #[must_use] 312 | pub unsafe fn get_unchecked(&self, index: usize) -> &A { 313 | self.buffer.get_unchecked(self.range.start + index) 314 | } 315 | 316 | /// Get an unchecked mutable reference to the value at the given index. 317 | /// 318 | /// # Safety 319 | /// 320 | /// You must ensure the index is not out of bounds. 321 | #[must_use] 322 | pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { 323 | self.buffer.get_unchecked_mut(self.range.start + index) 324 | } 325 | 326 | /// Get an iterator over references to the items in the slice in order. 327 | #[inline] 328 | #[must_use] 329 | pub fn iter(&self) -> Iter<'_, A, N> { 330 | Iter { 331 | buffer: self.buffer, 332 | left_index: self.buffer.origin + self.range.start, 333 | right_index: self.buffer.origin + self.range.start + self.len(), 334 | remaining: self.len(), 335 | } 336 | } 337 | 338 | /// Get an iterator over mutable references to the items in the slice in 339 | /// order. 340 | #[inline] 341 | #[must_use] 342 | pub fn iter_mut(&mut self) -> IterMut<'_, A, N> { 343 | IterMut::new_slice( 344 | self.buffer, 345 | self.buffer.origin + self.range.start, 346 | self.len(), 347 | ) 348 | } 349 | 350 | /// Create a subslice of this slice. 351 | /// 352 | /// This consumes the slice. Because the slice works like a mutable 353 | /// reference, you can only have one slice over a given subset of a 354 | /// `RingBuffer` at any one time, so that's just how it's got to be. 355 | #[must_use] 356 | pub fn slice>(self, range: R) -> SliceMut<'a, A, N> { 357 | let new_range = Range { 358 | start: match range.start_bound() { 359 | Bound::Unbounded => self.range.start, 360 | Bound::Included(index) => self.range.start + index, 361 | Bound::Excluded(_) => unimplemented!(), 362 | }, 363 | end: match range.end_bound() { 364 | Bound::Unbounded => self.range.end, 365 | Bound::Included(index) => self.range.start + index + 1, 366 | Bound::Excluded(index) => self.range.start + index, 367 | }, 368 | }; 369 | if new_range.start < self.range.start 370 | || new_range.end > self.range.end 371 | || new_range.start > new_range.end 372 | { 373 | panic!("Slice::slice: index out of bounds"); 374 | } 375 | SliceMut { 376 | buffer: self.buffer, 377 | range: new_range, 378 | } 379 | } 380 | 381 | /// Split the slice into two subslices at the given index. 382 | #[must_use] 383 | pub fn split_at(self, index: usize) -> (SliceMut<'a, A, N>, SliceMut<'a, A, N>) { 384 | if index > self.len() { 385 | panic!("SliceMut::split_at: index out of bounds"); 386 | } 387 | let index = self.range.start + index; 388 | let ptr: *mut RingBuffer = self.buffer; 389 | ( 390 | SliceMut { 391 | buffer: unsafe { &mut *ptr }, 392 | range: Range { 393 | start: self.range.start, 394 | end: index, 395 | }, 396 | }, 397 | SliceMut { 398 | buffer: unsafe { &mut *ptr }, 399 | range: Range { 400 | start: index, 401 | end: self.range.end, 402 | }, 403 | }, 404 | ) 405 | } 406 | 407 | /// Construct a new `RingBuffer` by copying the elements in this slice. 408 | #[inline] 409 | #[must_use] 410 | pub fn to_owned(&self) -> RingBuffer 411 | where 412 | A: Clone, 413 | { 414 | self.iter().cloned().collect() 415 | } 416 | } 417 | 418 | impl<'a, A: 'a, const N: usize> From<&'a mut RingBuffer> for SliceMut<'a, A, N> { 419 | #[must_use] 420 | fn from(buffer: &'a mut RingBuffer) -> Self { 421 | SliceMut { 422 | range: Range { 423 | start: 0, 424 | end: buffer.len(), 425 | }, 426 | buffer, 427 | } 428 | } 429 | } 430 | 431 | impl<'a, A: 'a, const N: usize> Into> for SliceMut<'a, A, N> { 432 | #[inline] 433 | #[must_use] 434 | fn into(self) -> Slice<'a, A, N> { 435 | self.unmut() 436 | } 437 | } 438 | 439 | impl<'a, A: 'a, const N: usize> Index for SliceMut<'a, A, N> { 440 | type Output = A; 441 | 442 | #[inline] 443 | #[must_use] 444 | fn index(&self, index: usize) -> &Self::Output { 445 | self.buffer.index(self.range.start + index) 446 | } 447 | } 448 | 449 | impl<'a, A: 'a, const N: usize> IndexMut for SliceMut<'a, A, N> { 450 | #[inline] 451 | #[must_use] 452 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { 453 | self.buffer.index_mut(self.range.start + index) 454 | } 455 | } 456 | 457 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq for SliceMut<'a, A, N> { 458 | #[inline] 459 | #[must_use] 460 | fn eq(&self, other: &Self) -> bool { 461 | self.len() == other.len() && self.iter().eq(other.iter()) 462 | } 463 | } 464 | 465 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq> for SliceMut<'a, A, N> { 466 | #[inline] 467 | #[must_use] 468 | fn eq(&self, other: &Slice<'a, A, N>) -> bool { 469 | self.len() == other.len() && self.iter().eq(other.iter()) 470 | } 471 | } 472 | 473 | impl<'a, A: PartialEq + 'a, const N: usize> PartialEq> for SliceMut<'a, A, N> { 474 | #[inline] 475 | #[must_use] 476 | fn eq(&self, other: &RingBuffer) -> bool { 477 | self.len() == other.len() && self.iter().eq(other.iter()) 478 | } 479 | } 480 | 481 | impl<'a, A: PartialEq + 'a, S, const N: usize> PartialEq for SliceMut<'a, A, N> 482 | where 483 | S: Borrow<[A]>, 484 | { 485 | #[inline] 486 | #[must_use] 487 | fn eq(&self, other: &S) -> bool { 488 | let other = other.borrow(); 489 | self.len() == other.len() && self.iter().eq(other.iter()) 490 | } 491 | } 492 | 493 | impl<'a, A: Eq + 'a, const N: usize> Eq for SliceMut<'a, A, N> {} 494 | 495 | impl<'a, A: PartialOrd + 'a, const N: usize> PartialOrd for SliceMut<'a, A, N> { 496 | #[inline] 497 | #[must_use] 498 | fn partial_cmp(&self, other: &Self) -> Option { 499 | self.iter().partial_cmp(other.iter()) 500 | } 501 | } 502 | 503 | impl<'a, A: Ord + 'a, const N: usize> Ord for SliceMut<'a, A, N> { 504 | #[inline] 505 | #[must_use] 506 | fn cmp(&self, other: &Self) -> Ordering { 507 | self.iter().cmp(other.iter()) 508 | } 509 | } 510 | 511 | impl<'a, A: Debug + 'a, const N: usize> Debug for SliceMut<'a, A, N> { 512 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { 513 | f.write_str("RingBuffer")?; 514 | f.debug_list().entries(self.iter()).finish() 515 | } 516 | } 517 | 518 | impl<'a, A: Hash + 'a, const N: usize> Hash for SliceMut<'a, A, N> { 519 | #[inline] 520 | fn hash(&self, hasher: &mut H) { 521 | for item in self { 522 | item.hash(hasher) 523 | } 524 | } 525 | } 526 | 527 | impl<'a, 'b, A: 'a, const N: usize> IntoIterator for &'a SliceMut<'a, A, N> { 528 | type Item = &'a A; 529 | type IntoIter = Iter<'a, A, N>; 530 | 531 | #[inline] 532 | #[must_use] 533 | fn into_iter(self) -> Self::IntoIter { 534 | self.iter() 535 | } 536 | } 537 | 538 | impl<'a, 'b, A: 'a, const N: usize> IntoIterator for &'a mut SliceMut<'a, A, N> { 539 | type Item = &'a mut A; 540 | type IntoIter = IterMut<'a, A, N>; 541 | 542 | #[inline] 543 | #[must_use] 544 | fn into_iter(self) -> Self::IntoIter { 545 | self.iter_mut() 546 | } 547 | } 548 | -------------------------------------------------------------------------------- /src/sized_chunk/iter.rs: -------------------------------------------------------------------------------- 1 | use core::iter::FusedIterator; 2 | 3 | use super::Chunk; 4 | 5 | /// A consuming iterator over the elements of a `Chunk`. 6 | pub struct Iter { 7 | pub(crate) chunk: Chunk, 8 | } 9 | 10 | impl Iterator for Iter { 11 | type Item = A; 12 | fn next(&mut self) -> Option { 13 | if self.chunk.is_empty() { 14 | None 15 | } else { 16 | Some(self.chunk.pop_front()) 17 | } 18 | } 19 | 20 | fn size_hint(&self) -> (usize, Option) { 21 | (self.chunk.len(), Some(self.chunk.len())) 22 | } 23 | } 24 | 25 | impl DoubleEndedIterator for Iter { 26 | fn next_back(&mut self) -> Option { 27 | if self.chunk.is_empty() { 28 | None 29 | } else { 30 | Some(self.chunk.pop_back()) 31 | } 32 | } 33 | } 34 | 35 | impl ExactSizeIterator for Iter {} 36 | 37 | impl FusedIterator for Iter {} 38 | 39 | /// A draining iterator over the elements of a `Chunk`. 40 | /// 41 | /// "Draining" means that as the iterator yields each element, it's removed from 42 | /// the `Chunk`. When the iterator terminates, the chunk will be empty. This is 43 | /// different from the consuming iterator `Iter` in that `Iter` will take 44 | /// ownership of the `Chunk` and discard it when you're done iterating, while 45 | /// `Drain` leaves you still owning the drained `Chunk`. 46 | pub struct Drain<'a, A, const N: usize> { 47 | pub(crate) chunk: &'a mut Chunk, 48 | } 49 | 50 | impl<'a, A, const N: usize> Iterator for Drain<'a, A, N> 51 | where 52 | A: 'a, 53 | { 54 | type Item = A; 55 | 56 | fn next(&mut self) -> Option { 57 | if self.chunk.is_empty() { 58 | None 59 | } else { 60 | Some(self.chunk.pop_front()) 61 | } 62 | } 63 | 64 | fn size_hint(&self) -> (usize, Option) { 65 | (self.chunk.len(), Some(self.chunk.len())) 66 | } 67 | } 68 | 69 | impl<'a, A, const N: usize> DoubleEndedIterator for Drain<'a, A, N> 70 | where 71 | A: 'a, 72 | { 73 | fn next_back(&mut self) -> Option { 74 | if self.chunk.is_empty() { 75 | None 76 | } else { 77 | Some(self.chunk.pop_back()) 78 | } 79 | } 80 | } 81 | 82 | impl<'a, A, const N: usize> ExactSizeIterator for Drain<'a, A, N> where A: 'a {} 83 | 84 | impl<'a, A, const N: usize> FusedIterator for Drain<'a, A, N> where A: 'a {} 85 | -------------------------------------------------------------------------------- /src/sized_chunk/refpool.rs: -------------------------------------------------------------------------------- 1 | use core::mem::MaybeUninit; 2 | 3 | use ::refpool::{PoolClone, PoolDefault}; 4 | 5 | use crate::Chunk; 6 | 7 | impl PoolDefault for Chunk { 8 | unsafe fn default_uninit(target: &mut MaybeUninit) { 9 | let ptr = target.as_mut_ptr(); 10 | let left_ptr: *mut usize = &mut (*ptr).left; 11 | let right_ptr: *mut usize = &mut (*ptr).right; 12 | left_ptr.write(0); 13 | right_ptr.write(0); 14 | } 15 | } 16 | 17 | impl PoolClone for Chunk 18 | where 19 | A: Clone, 20 | { 21 | unsafe fn clone_uninit(&self, target: &mut MaybeUninit) { 22 | let ptr = target.as_mut_ptr(); 23 | let left_ptr: *mut usize = &mut (*ptr).left; 24 | let right_ptr: *mut usize = &mut (*ptr).right; 25 | let data_ptr: *mut _ = &mut (*ptr).data; 26 | let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast(); 27 | left_ptr.write(self.left); 28 | right_ptr.write(self.right); 29 | for index in self.left..self.right { 30 | data_ptr.add(index).write((*self.ptr(index)).clone()); 31 | } 32 | } 33 | } 34 | 35 | #[cfg(test)] 36 | mod test { 37 | use super::*; 38 | use ::refpool::{Pool, PoolRef}; 39 | use std::iter::FromIterator; 40 | 41 | #[test] 42 | fn default_and_clone() { 43 | let pool: Pool> = Pool::new(16); 44 | let mut ref1 = PoolRef::default(&pool); 45 | { 46 | let chunk = PoolRef::make_mut(&pool, &mut ref1); 47 | chunk.push_back(1); 48 | chunk.push_back(2); 49 | chunk.push_back(3); 50 | } 51 | let ref2 = PoolRef::cloned(&pool, &ref1); 52 | let ref3 = PoolRef::clone_from(&pool, &Chunk::from_iter(1..=3)); 53 | assert_eq!(Chunk::::from_iter(1..=3), *ref1); 54 | assert_eq!(Chunk::::from_iter(1..=3), *ref2); 55 | assert_eq!(Chunk::::from_iter(1..=3), *ref3); 56 | assert_eq!(ref1, ref2); 57 | assert_eq!(ref1, ref3); 58 | assert_eq!(ref2, ref3); 59 | assert!(!PoolRef::ptr_eq(&ref1, &ref2)); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/sparse_chunk/iter.rs: -------------------------------------------------------------------------------- 1 | use bitmaps::{Bitmap, Bits, BitsImpl, Iter as BitmapIter}; 2 | 3 | use super::SparseChunk; 4 | 5 | /// An iterator over references to the elements of a `SparseChunk`. 6 | pub struct Iter<'a, A, const N: usize> 7 | where 8 | BitsImpl: Bits, 9 | { 10 | pub(crate) indices: BitmapIter<'a, N>, 11 | pub(crate) chunk: &'a SparseChunk, 12 | } 13 | 14 | impl<'a, A, const N: usize> Iterator for Iter<'a, A, N> 15 | where 16 | BitsImpl: Bits, 17 | { 18 | type Item = &'a A; 19 | 20 | fn next(&mut self) -> Option { 21 | self.indices.next().map(|index| &self.chunk.values()[index]) 22 | } 23 | 24 | fn size_hint(&self) -> (usize, Option) { 25 | (0, Some(SparseChunk::::CAPACITY)) 26 | } 27 | } 28 | 29 | /// An iterator over mutable references to the elements of a `SparseChunk`. 30 | pub struct IterMut<'a, A, const N: usize> 31 | where 32 | BitsImpl: Bits, 33 | { 34 | pub(crate) bitmap: Bitmap, 35 | pub(crate) chunk: &'a mut SparseChunk, 36 | } 37 | 38 | impl<'a, A, const N: usize> Iterator for IterMut<'a, A, N> 39 | where 40 | BitsImpl: Bits, 41 | { 42 | type Item = &'a mut A; 43 | 44 | fn next(&mut self) -> Option { 45 | if let Some(index) = self.bitmap.first_index() { 46 | self.bitmap.set(index, false); 47 | unsafe { 48 | let p: *mut A = &mut self.chunk.values_mut()[index]; 49 | Some(&mut *p) 50 | } 51 | } else { 52 | None 53 | } 54 | } 55 | 56 | fn size_hint(&self) -> (usize, Option) { 57 | (0, Some(SparseChunk::::CAPACITY)) 58 | } 59 | } 60 | 61 | /// A draining iterator over the elements of a `SparseChunk`. 62 | /// 63 | /// "Draining" means that as the iterator yields each element, it's removed from 64 | /// the `SparseChunk`. When the iterator terminates, the chunk will be empty. 65 | pub struct Drain 66 | where 67 | BitsImpl: Bits, 68 | { 69 | pub(crate) chunk: SparseChunk, 70 | } 71 | 72 | impl<'a, A, const N: usize> Iterator for Drain 73 | where 74 | BitsImpl: Bits, 75 | { 76 | type Item = A; 77 | 78 | fn next(&mut self) -> Option { 79 | self.chunk.pop() 80 | } 81 | 82 | fn size_hint(&self) -> (usize, Option) { 83 | let len = self.chunk.len(); 84 | (len, Some(len)) 85 | } 86 | } 87 | 88 | /// An iterator over `Option`s of references to the elements of a `SparseChunk`. 89 | /// 90 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 91 | /// returning an `Option<&A>` for each index. 92 | pub struct OptionIter<'a, A, const N: usize> 93 | where 94 | BitsImpl: Bits, 95 | { 96 | pub(crate) index: usize, 97 | pub(crate) chunk: &'a SparseChunk, 98 | } 99 | 100 | impl<'a, A, const N: usize> Iterator for OptionIter<'a, A, N> 101 | where 102 | BitsImpl: Bits, 103 | { 104 | type Item = Option<&'a A>; 105 | 106 | fn next(&mut self) -> Option { 107 | if self.index < N { 108 | let result = self.chunk.get(self.index); 109 | self.index += 1; 110 | Some(result) 111 | } else { 112 | None 113 | } 114 | } 115 | 116 | fn size_hint(&self) -> (usize, Option) { 117 | ( 118 | SparseChunk::::CAPACITY - self.index, 119 | Some(SparseChunk::::CAPACITY - self.index), 120 | ) 121 | } 122 | } 123 | 124 | /// An iterator over `Option`s of mutable references to the elements of a `SparseChunk`. 125 | /// 126 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 127 | /// returning an `Option<&mut A>` for each index. 128 | pub struct OptionIterMut<'a, A, const N: usize> 129 | where 130 | BitsImpl: Bits, 131 | { 132 | pub(crate) index: usize, 133 | pub(crate) chunk: &'a mut SparseChunk, 134 | } 135 | 136 | impl<'a, A, const N: usize> Iterator for OptionIterMut<'a, A, N> 137 | where 138 | BitsImpl: Bits, 139 | { 140 | type Item = Option<&'a mut A>; 141 | 142 | fn next(&mut self) -> Option { 143 | if self.index < N { 144 | let result = if self.chunk.map.get(self.index) { 145 | unsafe { 146 | let p: *mut A = &mut self.chunk.values_mut()[self.index]; 147 | Some(Some(&mut *p)) 148 | } 149 | } else { 150 | Some(None) 151 | }; 152 | self.index += 1; 153 | result 154 | } else { 155 | None 156 | } 157 | } 158 | 159 | fn size_hint(&self) -> (usize, Option) { 160 | ( 161 | SparseChunk::::CAPACITY - self.index, 162 | Some(SparseChunk::::CAPACITY - self.index), 163 | ) 164 | } 165 | } 166 | 167 | /// A draining iterator over `Option`s of the elements of a `SparseChunk`. 168 | /// 169 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 170 | /// returning an `Option` for each index. 171 | pub struct OptionDrain 172 | where 173 | BitsImpl: Bits, 174 | { 175 | pub(crate) index: usize, 176 | pub(crate) chunk: SparseChunk, 177 | } 178 | 179 | impl<'a, A, const N: usize> Iterator for OptionDrain 180 | where 181 | BitsImpl: Bits, 182 | { 183 | type Item = Option; 184 | 185 | fn next(&mut self) -> Option { 186 | if self.index < N { 187 | let result = self.chunk.remove(self.index); 188 | self.index += 1; 189 | Some(result) 190 | } else { 191 | None 192 | } 193 | } 194 | 195 | fn size_hint(&self) -> (usize, Option) { 196 | ( 197 | SparseChunk::::CAPACITY - self.index, 198 | Some(SparseChunk::::CAPACITY - self.index), 199 | ) 200 | } 201 | } 202 | 203 | #[cfg(test)] 204 | mod test { 205 | use super::*; 206 | use std::iter::FromIterator; 207 | 208 | #[test] 209 | fn iter() { 210 | let vec: Vec> = 211 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 212 | let chunk: SparseChunk = vec.iter().cloned().collect(); 213 | let vec: Vec = vec 214 | .iter() 215 | .cloned() 216 | .filter(|v| v.is_some()) 217 | .map(|v| v.unwrap()) 218 | .collect(); 219 | assert!(vec.iter().eq(chunk.iter())); 220 | } 221 | 222 | #[test] 223 | fn iter_mut() { 224 | let vec: Vec> = 225 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 226 | let mut chunk: SparseChunk<_, 64> = vec.iter().cloned().collect(); 227 | let mut vec: Vec = vec 228 | .iter() 229 | .cloned() 230 | .filter(|v| v.is_some()) 231 | .map(|v| v.unwrap()) 232 | .collect(); 233 | assert!(vec.iter_mut().eq(chunk.iter_mut())); 234 | } 235 | 236 | #[test] 237 | fn drain() { 238 | let vec: Vec> = 239 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 240 | let chunk: SparseChunk<_, 64> = vec.iter().cloned().collect(); 241 | let vec: Vec = vec 242 | .iter() 243 | .cloned() 244 | .filter(|v| v.is_some()) 245 | .map(|v| v.unwrap()) 246 | .collect(); 247 | assert!(vec.into_iter().eq(chunk.into_iter())); 248 | } 249 | 250 | #[test] 251 | fn option_iter() { 252 | let vec: Vec> = 253 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 254 | let chunk: SparseChunk<_, 64> = vec.iter().cloned().collect(); 255 | assert!(vec 256 | .iter() 257 | .cloned() 258 | .eq(chunk.option_iter().map(|v| v.cloned()))); 259 | } 260 | 261 | #[test] 262 | fn option_iter_mut() { 263 | let vec: Vec> = 264 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 265 | let mut chunk: SparseChunk<_, 64> = vec.iter().cloned().collect(); 266 | assert!(vec 267 | .iter() 268 | .cloned() 269 | .eq(chunk.option_iter_mut().map(|v| v.cloned()))); 270 | } 271 | 272 | #[test] 273 | fn option_drain() { 274 | let vec: Vec> = 275 | Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None })); 276 | let chunk: SparseChunk<_, 64> = vec.iter().cloned().collect(); 277 | assert!(vec.iter().cloned().eq(chunk.option_drain())); 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /src/sparse_chunk/mod.rs: -------------------------------------------------------------------------------- 1 | // This Source Code Form is subject to the terms of the Mozilla Public 2 | // License, v. 2.0. If a copy of the MPL was not distributed with this 3 | // file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | //! A fixed capacity sparse array. 6 | //! 7 | //! See [`SparseChunk`](struct.SparseChunk.html) 8 | 9 | use core::fmt::{Debug, Error, Formatter}; 10 | use core::iter::FromIterator; 11 | use core::mem::{self, MaybeUninit}; 12 | use core::ops::Index; 13 | use core::ops::IndexMut; 14 | use core::ptr; 15 | use core::slice::{from_raw_parts, from_raw_parts_mut}; 16 | 17 | #[cfg(feature = "std")] 18 | use std::collections::{BTreeMap, HashMap}; 19 | 20 | use bitmaps::{Bitmap, Bits, BitsImpl, Iter as BitmapIter}; 21 | 22 | mod iter; 23 | 24 | pub use self::iter::{Drain, Iter, IterMut, OptionDrain, OptionIter, OptionIterMut}; 25 | 26 | #[cfg(feature = "refpool")] 27 | mod refpool; 28 | 29 | /// A fixed capacity sparse array. 30 | /// 31 | /// An inline sparse array of up to `N` items of type `A`. You can think of it as an array 32 | /// of `Option`, where the discriminant (whether the value is `Some` or 33 | /// `None`) is kept in a bitmap instead of adjacent to the value. 34 | /// 35 | /// # Examples 36 | /// 37 | /// ```rust 38 | /// # use sized_chunks::SparseChunk; 39 | /// // Construct a chunk with a 20 item capacity 40 | /// let mut chunk = SparseChunk::::new(); 41 | /// // Set the 18th index to the value 5. 42 | /// chunk.insert(18, 5); 43 | /// // Set the 5th index to the value 23. 44 | /// chunk.insert(5, 23); 45 | /// 46 | /// assert_eq!(chunk.len(), 2); 47 | /// assert_eq!(chunk.get(5), Some(&23)); 48 | /// assert_eq!(chunk.get(6), None); 49 | /// assert_eq!(chunk.get(18), Some(&5)); 50 | /// ``` 51 | pub struct SparseChunk 52 | where 53 | BitsImpl: Bits, 54 | { 55 | map: Bitmap, 56 | data: MaybeUninit<[A; N]>, 57 | } 58 | 59 | impl Drop for SparseChunk 60 | where 61 | BitsImpl: Bits, 62 | { 63 | fn drop(&mut self) { 64 | if mem::needs_drop::() { 65 | let bits = self.map; 66 | for index in &bits { 67 | unsafe { ptr::drop_in_place(&mut self.values_mut()[index]) } 68 | } 69 | } 70 | } 71 | } 72 | 73 | impl Clone for SparseChunk 74 | where 75 | BitsImpl: Bits, 76 | { 77 | fn clone(&self) -> Self { 78 | let mut out = Self::new(); 79 | for index in &self.map { 80 | out.insert(index, self[index].clone()); 81 | } 82 | out 83 | } 84 | } 85 | 86 | impl SparseChunk 87 | where 88 | BitsImpl: Bits, 89 | { 90 | /// The maximum number of elements a `SparseChunk` can contain. 91 | pub const CAPACITY: usize = N; 92 | 93 | #[inline] 94 | fn values(&self) -> &[A] { 95 | unsafe { from_raw_parts(&self.data as *const _ as *const A, N) } 96 | } 97 | 98 | #[inline] 99 | fn values_mut(&mut self) -> &mut [A] { 100 | unsafe { from_raw_parts_mut(&mut self.data as *mut _ as *mut A, N) } 101 | } 102 | 103 | /// Copy the value at an index, discarding ownership of the copied value 104 | #[inline] 105 | unsafe fn force_read(index: usize, chunk: &Self) -> A { 106 | ptr::read(&chunk.values()[index as usize]) 107 | } 108 | 109 | /// Write a value at an index without trying to drop what's already there 110 | #[inline] 111 | unsafe fn force_write(index: usize, value: A, chunk: &mut Self) { 112 | ptr::write(&mut chunk.values_mut()[index as usize], value) 113 | } 114 | 115 | /// Construct a new empty chunk. 116 | pub fn new() -> Self { 117 | Self { 118 | map: Bitmap::default(), 119 | data: MaybeUninit::uninit(), 120 | } 121 | } 122 | 123 | /// Construct a new chunk with one item. 124 | pub fn unit(index: usize, value: A) -> Self { 125 | let mut chunk = Self::new(); 126 | chunk.insert(index, value); 127 | chunk 128 | } 129 | 130 | /// Construct a new chunk with two items. 131 | pub fn pair(index1: usize, value1: A, index2: usize, value2: A) -> Self { 132 | let mut chunk = Self::new(); 133 | chunk.insert(index1, value1); 134 | chunk.insert(index2, value2); 135 | chunk 136 | } 137 | 138 | /// Get the length of the chunk. 139 | #[inline] 140 | pub fn len(&self) -> usize { 141 | self.map.len() 142 | } 143 | 144 | /// Test if the chunk is empty. 145 | #[inline] 146 | pub fn is_empty(&self) -> bool { 147 | self.map.len() == 0 148 | } 149 | 150 | /// Test if the chunk is at capacity. 151 | #[inline] 152 | pub fn is_full(&self) -> bool { 153 | self.len() == N 154 | } 155 | 156 | /// Insert a new value at a given index. 157 | /// 158 | /// Returns the previous value at that index, if any. 159 | pub fn insert(&mut self, index: usize, value: A) -> Option { 160 | if index >= N { 161 | panic!("SparseChunk::insert: index out of bounds"); 162 | } 163 | if self.map.set(index, true) { 164 | Some(mem::replace(&mut self.values_mut()[index], value)) 165 | } else { 166 | unsafe { SparseChunk::force_write(index, value, self) }; 167 | None 168 | } 169 | } 170 | 171 | /// Remove the value at a given index. 172 | /// 173 | /// Returns the value, or `None` if the index had no value. 174 | pub fn remove(&mut self, index: usize) -> Option { 175 | if index >= N { 176 | panic!("SparseChunk::remove: index out of bounds"); 177 | } 178 | if self.map.set(index, false) { 179 | Some(unsafe { SparseChunk::force_read(index, self) }) 180 | } else { 181 | None 182 | } 183 | } 184 | 185 | /// Remove the first value present in the array. 186 | /// 187 | /// Returns the value that was removed, or `None` if the array was empty. 188 | pub fn pop(&mut self) -> Option { 189 | self.first_index().and_then(|index| self.remove(index)) 190 | } 191 | 192 | /// Get the value at a given index. 193 | pub fn get(&self, index: usize) -> Option<&A> { 194 | if index >= N { 195 | return None; 196 | } 197 | if self.map.get(index) { 198 | Some(unsafe { self.get_unchecked(index) }) 199 | } else { 200 | None 201 | } 202 | } 203 | 204 | /// Get a mutable reference to the value at a given index. 205 | pub fn get_mut(&mut self, index: usize) -> Option<&mut A> { 206 | if index >= N { 207 | return None; 208 | } 209 | if self.map.get(index) { 210 | Some(unsafe { self.get_unchecked_mut(index) }) 211 | } else { 212 | None 213 | } 214 | } 215 | 216 | /// Get an unchecked reference to the value at a given index. 217 | /// 218 | /// # Safety 219 | /// 220 | /// Uninhabited indices contain uninitialised data, so make sure you validate 221 | /// the index before using this method. 222 | pub unsafe fn get_unchecked(&self, index: usize) -> &A { 223 | self.values().get_unchecked(index) 224 | } 225 | 226 | /// Get an unchecked mutable reference to the value at a given index. 227 | /// 228 | /// # Safety 229 | /// 230 | /// Uninhabited indices contain uninitialised data, so make sure you validate 231 | /// the index before using this method. 232 | pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A { 233 | self.values_mut().get_unchecked_mut(index) 234 | } 235 | 236 | /// Make an iterator over the indices which contain values. 237 | pub fn indices(&self) -> BitmapIter<'_, N> { 238 | self.map.into_iter() 239 | } 240 | 241 | /// Find the first index which contains a value. 242 | pub fn first_index(&self) -> Option { 243 | self.map.first_index() 244 | } 245 | 246 | /// Make an iterator of references to the values contained in the array. 247 | pub fn iter(&self) -> Iter<'_, A, N> { 248 | Iter { 249 | indices: self.indices(), 250 | chunk: self, 251 | } 252 | } 253 | 254 | /// Make an iterator of mutable references to the values contained in the 255 | /// array. 256 | pub fn iter_mut(&mut self) -> IterMut<'_, A, N> { 257 | IterMut { 258 | bitmap: self.map, 259 | chunk: self, 260 | } 261 | } 262 | 263 | /// Turn the chunk into an iterator over the values contained within it. 264 | pub fn drain(self) -> Drain { 265 | Drain { chunk: self } 266 | } 267 | 268 | /// Make an iterator of pairs of indices and references to the values 269 | /// contained in the array. 270 | pub fn entries(&self) -> impl Iterator { 271 | self.indices().zip(self.iter()) 272 | } 273 | 274 | /// Make an iterator of `Option`s of references to the values contained in the array. 275 | /// 276 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 277 | /// returning an `Option<&A>` for each index. 278 | pub fn option_iter(&self) -> OptionIter<'_, A, N> { 279 | OptionIter { 280 | chunk: self, 281 | index: 0, 282 | } 283 | } 284 | 285 | /// Make an iterator of `Option`s of mutable references to the values contained in the array. 286 | /// 287 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 288 | /// returning an `Option<&mut A>` for each index. 289 | pub fn option_iter_mut(&mut self) -> OptionIterMut<'_, A, N> { 290 | OptionIterMut { 291 | chunk: self, 292 | index: 0, 293 | } 294 | } 295 | 296 | /// Make a draining iterator of `Option's of the values contained in the array. 297 | /// 298 | /// Iterates over every index in the `SparseChunk`, from zero to its full capacity, 299 | /// returning an `Option` for each index. 300 | pub fn option_drain(self) -> OptionDrain { 301 | OptionDrain { 302 | chunk: self, 303 | index: 0, 304 | } 305 | } 306 | } 307 | 308 | impl Default for SparseChunk 309 | where 310 | BitsImpl: Bits, 311 | { 312 | fn default() -> Self { 313 | Self::new() 314 | } 315 | } 316 | 317 | impl Index for SparseChunk 318 | where 319 | BitsImpl: Bits, 320 | { 321 | type Output = A; 322 | 323 | #[inline] 324 | fn index(&self, index: usize) -> &Self::Output { 325 | self.get(index).unwrap() 326 | } 327 | } 328 | 329 | impl IndexMut for SparseChunk 330 | where 331 | BitsImpl: Bits, 332 | { 333 | #[inline] 334 | fn index_mut(&mut self, index: usize) -> &mut Self::Output { 335 | self.get_mut(index).unwrap() 336 | } 337 | } 338 | 339 | impl IntoIterator for SparseChunk 340 | where 341 | BitsImpl: Bits, 342 | { 343 | type Item = A; 344 | type IntoIter = Drain; 345 | 346 | #[inline] 347 | fn into_iter(self) -> Self::IntoIter { 348 | self.drain() 349 | } 350 | } 351 | 352 | impl FromIterator> for SparseChunk 353 | where 354 | BitsImpl: Bits, 355 | { 356 | fn from_iter(iter: I) -> Self 357 | where 358 | I: IntoIterator>, 359 | { 360 | let mut out = Self::new(); 361 | for (index, value) in iter.into_iter().enumerate() { 362 | if let Some(value) = value { 363 | out.insert(index, value); 364 | } 365 | } 366 | out 367 | } 368 | } 369 | 370 | impl PartialEq for SparseChunk 371 | where 372 | A: PartialEq, 373 | BitsImpl: Bits, 374 | { 375 | fn eq(&self, other: &Self) -> bool { 376 | if self.map != other.map { 377 | return false; 378 | } 379 | for index in self.indices() { 380 | if self.get(index) != other.get(index) { 381 | return false; 382 | } 383 | } 384 | true 385 | } 386 | } 387 | 388 | #[cfg(feature = "std")] 389 | impl PartialEq> for SparseChunk 390 | where 391 | A: PartialEq, 392 | BitsImpl: Bits, 393 | { 394 | fn eq(&self, other: &BTreeMap) -> bool { 395 | if self.len() != other.len() { 396 | return false; 397 | } 398 | for index in self.indices() { 399 | if self.get(index) != other.get(&index) { 400 | return false; 401 | } 402 | } 403 | true 404 | } 405 | } 406 | 407 | #[cfg(feature = "std")] 408 | impl PartialEq> for SparseChunk 409 | where 410 | A: PartialEq, 411 | BitsImpl: Bits, 412 | { 413 | fn eq(&self, other: &HashMap) -> bool { 414 | if self.len() != other.len() { 415 | return false; 416 | } 417 | for index in self.indices() { 418 | if self.get(index) != other.get(&index) { 419 | return false; 420 | } 421 | } 422 | true 423 | } 424 | } 425 | 426 | impl Eq for SparseChunk 427 | where 428 | A: Eq, 429 | BitsImpl: Bits, 430 | { 431 | } 432 | 433 | impl Debug for SparseChunk 434 | where 435 | A: Debug, 436 | BitsImpl: Bits, 437 | { 438 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { 439 | f.write_str("SparseChunk")?; 440 | f.debug_map().entries(self.entries()).finish() 441 | } 442 | } 443 | 444 | #[cfg(test)] 445 | mod test { 446 | use super::*; 447 | 448 | #[test] 449 | fn insert_remove_iterate() { 450 | let mut chunk: SparseChunk<_, 32> = SparseChunk::new(); 451 | assert_eq!(None, chunk.insert(5, 5)); 452 | assert_eq!(None, chunk.insert(1, 1)); 453 | assert_eq!(None, chunk.insert(24, 42)); 454 | assert_eq!(None, chunk.insert(22, 22)); 455 | assert_eq!(Some(42), chunk.insert(24, 24)); 456 | assert_eq!(None, chunk.insert(31, 31)); 457 | assert_eq!(Some(24), chunk.remove(24)); 458 | assert_eq!(4, chunk.len()); 459 | let indices: Vec<_> = chunk.indices().collect(); 460 | assert_eq!(vec![1, 5, 22, 31], indices); 461 | let values: Vec<_> = chunk.into_iter().collect(); 462 | assert_eq!(vec![1, 5, 22, 31], values); 463 | } 464 | 465 | #[test] 466 | fn clone_chunk() { 467 | let mut chunk: SparseChunk<_, 32> = SparseChunk::new(); 468 | assert_eq!(None, chunk.insert(5, 5)); 469 | assert_eq!(None, chunk.insert(1, 1)); 470 | assert_eq!(None, chunk.insert(24, 42)); 471 | assert_eq!(None, chunk.insert(22, 22)); 472 | let cloned = chunk.clone(); 473 | let right_indices: Vec<_> = chunk.indices().collect(); 474 | let left_indices: Vec<_> = cloned.indices().collect(); 475 | let right: Vec<_> = chunk.into_iter().collect(); 476 | let left: Vec<_> = cloned.into_iter().collect(); 477 | assert_eq!(left, right); 478 | assert_eq!(left_indices, right_indices); 479 | assert_eq!(vec![1, 5, 22, 24], left_indices); 480 | assert_eq!(vec![1, 5, 22, 24], right_indices); 481 | } 482 | 483 | use crate::tests::DropTest; 484 | use std::sync::atomic::{AtomicUsize, Ordering}; 485 | 486 | #[test] 487 | fn dropping() { 488 | let counter = AtomicUsize::new(0); 489 | { 490 | let mut chunk: SparseChunk, 64> = SparseChunk::new(); 491 | for i in 0..40 { 492 | chunk.insert(i, DropTest::new(&counter)); 493 | } 494 | assert_eq!(40, counter.load(Ordering::Relaxed)); 495 | for i in 0..20 { 496 | chunk.remove(i); 497 | } 498 | assert_eq!(20, counter.load(Ordering::Relaxed)); 499 | } 500 | assert_eq!(0, counter.load(Ordering::Relaxed)); 501 | } 502 | 503 | #[test] 504 | fn equality() { 505 | let mut c1 = SparseChunk::::new(); 506 | for i in 0..32 { 507 | c1.insert(i, i); 508 | } 509 | let mut c2 = c1.clone(); 510 | assert_eq!(c1, c2); 511 | for i in 4..8 { 512 | c2.insert(i, 0); 513 | } 514 | assert_ne!(c1, c2); 515 | c2 = c1.clone(); 516 | for i in 0..16 { 517 | c2.remove(i); 518 | } 519 | assert_ne!(c1, c2); 520 | } 521 | } 522 | -------------------------------------------------------------------------------- /src/sparse_chunk/refpool.rs: -------------------------------------------------------------------------------- 1 | use core::mem::MaybeUninit; 2 | 3 | use bitmaps::{Bitmap, Bits, BitsImpl}; 4 | 5 | use ::refpool::{PoolClone, PoolDefault}; 6 | 7 | use crate::SparseChunk; 8 | 9 | impl PoolDefault for SparseChunk 10 | where 11 | BitsImpl: Bits, 12 | { 13 | unsafe fn default_uninit(target: &mut MaybeUninit) { 14 | let ptr = target.as_mut_ptr(); 15 | let map_ptr: *mut Bitmap = &mut (*ptr).map; 16 | map_ptr.write(Bitmap::new()); 17 | } 18 | } 19 | 20 | impl PoolClone for SparseChunk 21 | where 22 | A: Clone, 23 | BitsImpl: Bits, 24 | { 25 | unsafe fn clone_uninit(&self, target: &mut MaybeUninit) { 26 | let ptr = target.as_mut_ptr(); 27 | let map_ptr: *mut Bitmap = &mut (*ptr).map; 28 | let data_ptr: *mut _ = &mut (*ptr).data; 29 | let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast(); 30 | map_ptr.write(self.map); 31 | for index in &self.map { 32 | data_ptr.add(index).write(self[index].clone()); 33 | } 34 | } 35 | } 36 | 37 | #[cfg(test)] 38 | mod test { 39 | use super::*; 40 | use ::refpool::{Pool, PoolRef}; 41 | 42 | #[test] 43 | fn default_and_clone() { 44 | let pool: Pool> = Pool::new(16); 45 | let mut ref1 = PoolRef::default(&pool); 46 | { 47 | let chunk = PoolRef::make_mut(&pool, &mut ref1); 48 | chunk.insert(5, 13); 49 | chunk.insert(10, 37); 50 | chunk.insert(31, 337); 51 | } 52 | let ref2 = PoolRef::cloned(&pool, &ref1); 53 | assert_eq!(ref1, ref2); 54 | assert!(!PoolRef::ptr_eq(&ref1, &ref2)); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | pub(crate) struct DropTest<'a> { 4 | counter: &'a AtomicUsize, 5 | } 6 | 7 | impl<'a> DropTest<'a> { 8 | pub(crate) fn new(counter: &'a AtomicUsize) -> Self { 9 | counter.fetch_add(1, Ordering::Relaxed); 10 | DropTest { counter } 11 | } 12 | } 13 | 14 | impl<'a> Drop for DropTest<'a> { 15 | fn drop(&mut self) { 16 | self.counter.fetch_sub(1, Ordering::Relaxed); 17 | } 18 | } 19 | --------------------------------------------------------------------------------