├── .github └── workflows │ ├── cache-padded-updates.yml │ └── main.yml ├── .gitignore ├── .gitmodules ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── single_thread_single_byte.rs ├── single_thread_two_bytes.rs ├── single_thread_with_chunks.rs └── two_threads.rs ├── performance-comparison ├── Cargo.toml ├── benches │ └── two_threads.rs └── src │ └── lib.rs ├── src ├── cache_padded.rs ├── chunks.rs └── lib.rs └── tests ├── chunks.rs ├── lib.rs └── write_and_read.rs /.github/workflows/cache-padded-updates.yml: -------------------------------------------------------------------------------- 1 | name: Check for changes in cache_padded.rs 2 | on: 3 | schedule: 4 | - cron: "11 11 * * Mon" 5 | jobs: 6 | check-cache-padded: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Clone Git repository 10 | uses: actions/checkout@v3 11 | with: 12 | submodules: true 13 | - name: Fetch newest stuff from crossbeam submodule 14 | working-directory: crossbeam 15 | run: | 16 | git fetch origin master 17 | - name: Check for changes in cache_padded.rs 18 | working-directory: crossbeam 19 | run: | 20 | git diff origin/master --exit-code -- crossbeam-utils/src/cache_padded.rs 21 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Run tests, check code 2 | on: [push, pull_request] 3 | env: 4 | CARGO_TERM_COLOR: always 5 | RUST_BACKTRACE: 1 6 | RUST_LOG: info 7 | jobs: 8 | tests: 9 | strategy: 10 | matrix: 11 | include: 12 | - os: macos-latest 13 | - os: windows-latest 14 | toolchain-suffix: -gnu 15 | - os: windows-latest 16 | toolchain-suffix: -msvc 17 | - os: ubuntu-latest 18 | runs-on: ${{ matrix.os }} 19 | steps: 20 | - name: Clone Git repository 21 | uses: actions/checkout@v3 22 | with: 23 | submodules: true 24 | - name: Install Rust 25 | uses: dtolnay/rust-toolchain@master 26 | with: 27 | toolchain: stable${{ matrix.toolchain-suffix }} 28 | - name: Run tests (all features) 29 | run: | 30 | cargo test --workspace --all-features 31 | - name: Run tests (no features) 32 | run: | 33 | cargo test --workspace --no-default-features 34 | - name: Test benchmarks 35 | run: | 36 | cargo test --benches 37 | 38 | msrv: 39 | strategy: 40 | matrix: 41 | rust-version: ["1.38"] 42 | runs-on: ubuntu-latest 43 | steps: 44 | - name: Clone Git repository 45 | uses: actions/checkout@v3 46 | with: 47 | submodules: true 48 | - name: Install Rust version ${{ matrix.rust-version }} 49 | uses: dtolnay/rust-toolchain@master 50 | with: 51 | toolchain: ${{ matrix.rust-version }} 52 | - name: Check whether it compiles 53 | run: | 54 | cargo check --all-features --verbose 55 | 56 | check-code: 57 | runs-on: ubuntu-latest 58 | steps: 59 | - name: Clone Git repository 60 | uses: actions/checkout@v3 61 | with: 62 | submodules: true 63 | - name: Install Rust 64 | uses: dtolnay/rust-toolchain@master 65 | with: 66 | toolchain: stable 67 | components: rustfmt, clippy 68 | - name: rustfmt 69 | run: | 70 | cargo fmt --all --check 71 | - name: clippy 72 | run: | 73 | cargo clippy --all-targets --all-features -- -D warnings 74 | - name: Build docs 75 | run: | 76 | cargo rustdoc --all-features -- -D warnings 77 | - name: Upload docs 78 | uses: actions/upload-artifact@v3 79 | with: 80 | name: docs 81 | path: target/doc/* 82 | - name: packaging 83 | run: | 84 | cargo package --verbose 85 | 86 | no_std: 87 | runs-on: ubuntu-latest 88 | steps: 89 | - name: Clone Git repository 90 | uses: actions/checkout@v3 91 | with: 92 | submodules: true 93 | - name: Install Rust 94 | uses: dtolnay/rust-toolchain@master 95 | with: 96 | toolchain: stable 97 | - name: Install cargo-nono 98 | # --locked is recommended, see https://github.com/hobofan/cargo-nono/pull/68 99 | run: | 100 | cargo install cargo-nono --locked 101 | - name: Check no_std compatibility 102 | run: | 103 | cargo nono check 104 | 105 | miri: 106 | runs-on: ubuntu-latest 107 | steps: 108 | - name: Clone Git repository 109 | uses: actions/checkout@v3 110 | with: 111 | submodules: true 112 | - name: Install Rust 113 | uses: dtolnay/rust-toolchain@master 114 | with: 115 | toolchain: nightly 116 | components: miri 117 | - name: Run Miri 118 | run: | 119 | cargo miri test 120 | - name: Run Miri again (with miri-preemption-rate=0) 121 | env: 122 | MIRIFLAGS: "-Zmiri-preemption-rate=0" 123 | # For now, this is only run on one test, see https://github.com/mgeier/rtrb/issues/114 124 | run: | 125 | cargo miri test no_race_with_is_abandoned 126 | 127 | thread-sanitizer: 128 | runs-on: ubuntu-latest 129 | steps: 130 | - name: Clone Git repository 131 | uses: actions/checkout@v3 132 | with: 133 | submodules: true 134 | - name: Install Rust 135 | uses: dtolnay/rust-toolchain@master 136 | with: 137 | toolchain: nightly 138 | components: rust-src 139 | - name: Run ThreadSanitizer 140 | env: 141 | RUSTFLAGS: "-Z sanitizer=thread" 142 | # "no_race_with_is_abandoned" is skipped because ThreadSanitizer 143 | # reports false positives when using standalone fences, 144 | # see https://github.com/google/sanitizers/issues/1415. 145 | run: | 146 | cargo test --tests -Z build-std --target x86_64-unknown-linux-gnu -- --skip no_race_with_is_abandoned 147 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target/ 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "crossbeam"] 2 | path = crossbeam 3 | url = https://github.com/crossbeam-rs/crossbeam.git 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rtrb" 3 | version = "0.3.2" 4 | authors = [ 5 | "Stjepan Glavina ", 6 | "Matthias Geier ", 7 | ] 8 | repository = "https://github.com/mgeier/rtrb" 9 | description = "A realtime-safe single-producer single-consumer ring buffer" 10 | readme = "README.md" 11 | categories = ["concurrency", "data-structures", "no-std"] 12 | keywords = ["lock-free", "wait-free", "spsc", "queue"] 13 | license = "MIT OR Apache-2.0" 14 | edition = "2018" 15 | rust-version = "1.38" 16 | 17 | exclude = [ 18 | ".*", 19 | ] 20 | 21 | [features] 22 | default = ["std"] 23 | std = [] 24 | 25 | [dev-dependencies] 26 | rand = "0.8" 27 | criterion = "0.3" 28 | # TODO: This is only needed for the doctests of cache_padded.rs! Is there a way to avoid this? 29 | crossbeam-utils = { version = "0.8", default-features = false } 30 | 31 | # aggressive optimization for benchmarks 32 | [profile.bench] 33 | lto = true 34 | opt-level = 3 35 | codegen-units = 1 36 | 37 | [lib] 38 | bench = false # Don't disturb criterion command line parsing 39 | 40 | [[bench]] 41 | name = "single_thread_single_byte" 42 | harness = false 43 | 44 | [[bench]] 45 | name = "single_thread_two_bytes" 46 | harness = false 47 | 48 | [[bench]] 49 | name = "single_thread_with_chunks" 50 | harness = false 51 | 52 | [[bench]] 53 | name = "two_threads" 54 | harness = false 55 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Real-Time Ring Buffer 2 | ===================== 3 | 4 | A wait-free single-producer single-consumer (SPSC) ring buffer for Rust. 5 | 6 | * Crate: https://crates.io/crates/rtrb 7 | * Documentation: https://docs.rs/rtrb 8 | 9 | This crate can be used without the standard library (`#![no_std]`) 10 | by disabling the `std` feature (which is enabled by default), 11 | but the [alloc](https://doc.rust-lang.org/alloc/) crate is needed nevertheless. 12 | 13 | 14 | Usage 15 | ----- 16 | 17 | Add this to your `Cargo.toml`: 18 | 19 | ```toml 20 | [dependencies] 21 | rtrb = "0.3" 22 | ``` 23 | 24 | 25 | Breaking Changes 26 | ---------------- 27 | 28 | For a list of breaking changes 29 | and for instructions how to upgrade between released versions, 30 | have a look at the [changelog](https://github.com/mgeier/rtrb/releases). 31 | 32 | 33 | Performance 34 | ----------- 35 | 36 | Measuring the performance of a data structure for inter-thread communication 37 | can be quite brittle and the results depend on many factors. 38 | A few performance comparisons between competing crates are shown in 39 | [issue #39](https://github.com/mgeier/rtrb/issues/39), 40 | but like all benchmarks, they are deeply flawed and to be taken with a grain of salt. 41 | You should make your own measurements that are relevant to your usage patterns. 42 | Feel free to share your own results by commenting on that issue. 43 | 44 | 45 | Development 46 | ----------- 47 | 48 | Creating the HTML docs (which will be available in `target/doc/rtrb/index.html`): 49 | 50 | cargo doc 51 | 52 | Running the tests: 53 | 54 | cargo test 55 | 56 | Testing the benchmarks (without actually benchmarking): 57 | 58 | cargo test --benches 59 | 60 | Running the benchmarks (using the [criterion](https://docs.rs/criterion/) crate; 61 | results will be available in `target/criterion/report/index.html`): 62 | 63 | cargo bench 64 | 65 | Creating [flame graphs](https://github.com/flamegraph-rs/flamegraph) for the benchmarks; 66 | first a few preparations: 67 | 68 | cargo install flamegraph 69 | echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid 70 | export CARGO_PROFILE_BENCH_DEBUG=true 71 | 72 | Then, creating the flame graph (which will be saved to `flamegraph.svg`), 73 | providing a benchmark (e.g. `two_threads`), a desired runtime and optionally 74 | a benchmark function (e.g. `large`): 75 | 76 | cargo flamegraph --bench two_threads -- --bench --profile-time 10 large 77 | 78 | To measure code coverage, nightly Rust is required, as well as a few additional dependencies: 79 | 80 | rustup toolchain install nightly 81 | rustup component add llvm-tools-preview 82 | cargo install grcov 83 | 84 | Test coverage data can be obtained and analyzed with these commands: 85 | 86 | cargo clean 87 | RUSTFLAGS="-Z instrument-coverage" RUSTDOCFLAGS="-Z instrument-coverage -Z unstable-options --persist-doctests target/debug/doctestbins" LLVM_PROFILE_FILE="coverage/%p-%m.profraw" cargo +nightly test 88 | grcov coverage --source-dir . --binary-path target/debug --output-type html --output-path coverage 89 | 90 | The last command creates an HTML report in `coverage/index.html`. 91 | 92 | Testing with Miri also needs nightly Rust: 93 | 94 | cargo +nightly miri test 95 | 96 | This Miri flag should also be tried: 97 | 98 | MIRIFLAGS="-Zmiri-preemption-rate=0" cargo +nightly miri test 99 | 100 | Running the tests with ThreadSanitizer requires nightly Rust as well: 101 | 102 | RUSTFLAGS="-Z sanitizer=thread" cargo +nightly test --tests -Z build-std --target x86_64-unknown-linux-gnu 103 | 104 | You might have to adapt the `--target` option to your system (see e.g. `rustup show`). 105 | 106 | 107 | Minimum Supported `rustc` Version 108 | --------------------------------- 109 | 110 | This crate's minimum supported `rustc` version (MSRV) is `1.38.0`. 111 | The MSRV is not expected to be updated frequently, but if it is, 112 | there will be (at least) a *minor* version bump. 113 | 114 | 115 | Origin Story 116 | ------------ 117 | 118 | The initial code has been ripped off of https://github.com/crossbeam-rs/crossbeam/pull/338, 119 | with permission of the PR author. 120 | 121 | It has been isolated from the rest of `crossbeam` with [git-filter-repo]: 122 | 123 | git-filter-repo --subdirectory-filter crossbeam-queue --path src/spsc.rs --path tests/spsc.rs --refs refs/heads/spsc 124 | 125 | [git-filter-repo]: https://github.com/newren/git-filter-repo 126 | 127 | 128 | Alternatives 129 | ------------ 130 | 131 | If you don't like this crate, no problem, there are several alternatives for you to choose from. 132 | There are many varieties of ring buffers available, here we limit the selection 133 | to wait-free SPSC implementations: 134 | 135 | * [ach-spsc](https://crates.io/crates/ach-spsc) (using const generics) 136 | * [heapless](https://crates.io/crates/heapless) (for embedded systems, see `heapless::spsc`) 137 | * [jack](https://crates.io/crates/jack) (FFI bindings for JACK, see `jack::Ringbuffer`) 138 | * [magnetic](https://crates.io/crates/magnetic) (see `magnetic::spsc` module) 139 | * [npnc](https://crates.io/crates/npnc) (see `npnc::bounded::spsc` module) 140 | * [omango](https://crates.io/crates/omango) (see `omango::queue::spsc::bounded()`) 141 | * [ringbuf](https://crates.io/crates/ringbuf) (supports const generics and heap allocation) 142 | * [ringbuffer-spsc](https://crates.io/crates/ringbuffer-spsc) (using const generics) 143 | * [shmem-ipc](https://crates.io/crates/shmem-ipc) (see `shmem_ipc::sharedring` and `shmem_ipc::ringbuf` modules) 144 | 145 | There are also implementations in other languages: 146 | 147 | * [boost::lockfree::spsc_queue](https://www.boost.org/doc/libs/master/doc/html/boost/lockfree/spsc_queue.html) (C++) 148 | * [folly::ProducerConsumerQueue](https://github.com/facebook/folly/blob/main/folly/docs/ProducerConsumerQueue.md) (C++) 149 | * [JACK ring buffer](https://jackaudio.org/api/ringbuffer_8h.html) (C) 150 | * [PortAudio ring buffer](http://portaudio.com/docs/v19-doxydocs-dev/pa__ringbuffer_8h.html) (C) 151 | * [readerwriterqueue](https://github.com/cameron314/readerwriterqueue) (C++) 152 | * [ringbuf.js](https://github.com/padenot/ringbuf.js) (JavaScript, using `SharedArrayBuffer`) 153 | * [SPSCQueue](https://github.com/rigtorp/SPSCQueue) (C++) 154 | 155 | If you know more alternatives for this list, 156 | please [open an issue](https://github.com/mgeier/rtrb/issues). 157 | 158 | 159 | License 160 | ------- 161 | 162 | Licensed under either of 163 | 164 | * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 165 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 166 | 167 | at your option. 168 | 169 | Note that this crate contains a copy of the file `cache_padded.rs` from 170 | https://github.com/crossbeam-rs/crossbeam. 171 | 172 | #### Contribution 173 | 174 | Unless you explicitly state otherwise, any contribution intentionally submitted 175 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 176 | dual licensed as above, without any additional terms or conditions. 177 | -------------------------------------------------------------------------------- /benches/single_thread_single_byte.rs: -------------------------------------------------------------------------------- 1 | //! Single-threaded benchmarks, pushing and popping a single byte using a single-element queue. 2 | //! 3 | //! This is *not* a typical use case but it should nevertheless be useful 4 | //! for comparing the overhead of different methods. 5 | 6 | use criterion::{black_box, criterion_group, criterion_main}; 7 | use criterion::{AxisScale, PlotConfiguration}; 8 | 9 | use rtrb::RingBuffer; 10 | 11 | fn add_function(group: &mut criterion::BenchmarkGroup, id: impl Into, mut f: F) 12 | where 13 | F: FnMut(u8) -> u8, 14 | M: criterion::measurement::Measurement, 15 | { 16 | group.bench_function(id, |b| { 17 | let mut i = 0; 18 | b.iter(|| { 19 | assert_eq!(f(black_box(i)), black_box(i)); 20 | i = i.wrapping_add(1); 21 | }); 22 | }); 23 | } 24 | 25 | pub fn criterion_benchmark(criterion: &mut criterion::Criterion) { 26 | let mut group = criterion.benchmark_group("single-thread-single-byte"); 27 | group.throughput(criterion::Throughput::Bytes(1)); 28 | group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); 29 | 30 | let mut v = Vec::::with_capacity(1); 31 | add_function(&mut group, "0-vec", |i| { 32 | v.push(i); 33 | v.pop().unwrap() 34 | }); 35 | 36 | let (mut p, mut c) = RingBuffer::::new(1); 37 | 38 | add_function(&mut group, "1-push-pop", |i| { 39 | p.push(i).unwrap(); 40 | c.pop().unwrap() 41 | }); 42 | 43 | group.finish(); 44 | } 45 | 46 | criterion_group!(benches, criterion_benchmark); 47 | criterion_main!(benches); 48 | -------------------------------------------------------------------------------- /benches/single_thread_two_bytes.rs: -------------------------------------------------------------------------------- 1 | //! Single-threaded benchmarks, writing and reading two bytes using a three-element queue. 2 | //! 3 | //! This is *not* a typical use case but it should nevertheless be useful 4 | //! for comparing the overhead of different methods. 5 | //! Writing two elements to a three-element queue makes sure 6 | //! that there is a ring buffer wrap-around every second time. 7 | 8 | use std::io::{Read, Write}; 9 | 10 | use criterion::{black_box, criterion_group, criterion_main}; 11 | use criterion::{AxisScale, PlotConfiguration}; 12 | 13 | use rtrb::{CopyToUninit, RingBuffer}; 14 | 15 | fn add_function(group: &mut criterion::BenchmarkGroup, id: impl Into, mut f: F) 16 | where 17 | F: FnMut(&[u8]) -> [u8; 2], 18 | M: criterion::measurement::Measurement, 19 | { 20 | group.bench_function(id, |b| { 21 | let mut i: u8 = 0; 22 | b.iter_batched( 23 | || { 24 | let mut data = [i, 0]; 25 | i = i.wrapping_add(1); 26 | data[1] = i; 27 | i = i.wrapping_add(1); 28 | data 29 | }, 30 | |data| { 31 | assert_eq!(f(black_box(&data)), black_box(data)); 32 | }, 33 | criterion::BatchSize::SmallInput, 34 | ); 35 | }); 36 | } 37 | 38 | pub fn criterion_benchmark(criterion: &mut criterion::Criterion) { 39 | let mut group = criterion.benchmark_group("single-thread-two-bytes"); 40 | group.throughput(criterion::Throughput::Bytes(2)); 41 | group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); 42 | 43 | let (mut p, mut c) = RingBuffer::::new(3); 44 | 45 | add_function(&mut group, "1-push-pop", |data| { 46 | let mut result = [0; 2]; 47 | for &i in data.iter() { 48 | p.push(i).unwrap(); 49 | } 50 | for i in &mut result { 51 | *i = c.pop().unwrap(); 52 | } 53 | result 54 | }); 55 | 56 | add_function(&mut group, "2-slice-read", |data| { 57 | let mut result = [0; 2]; 58 | for &i in data.iter() { 59 | p.push(i).unwrap(); 60 | } 61 | let chunk = c.read_chunk(data.len()).unwrap(); 62 | let (first, second) = chunk.as_slices(); 63 | let mid = first.len(); 64 | result[..mid].copy_from_slice(first); 65 | result[mid..].copy_from_slice(second); 66 | chunk.commit_all(); 67 | result 68 | }); 69 | 70 | add_function(&mut group, "2-slice-write", |data| { 71 | let mut result = [0; 2]; 72 | let mut chunk = p.write_chunk(data.len()).unwrap(); 73 | let (first, second) = chunk.as_mut_slices(); 74 | let mid = first.len(); 75 | first.copy_from_slice(&data[..mid]); 76 | second.copy_from_slice(&data[mid..]); 77 | chunk.commit_all(); 78 | for i in &mut result { 79 | *i = c.pop().unwrap(); 80 | } 81 | result 82 | }); 83 | 84 | add_function(&mut group, "2-slice-write-uninit", |data| { 85 | let mut result = [0; 2]; 86 | let mut chunk = p.write_chunk_uninit(data.len()).unwrap(); 87 | let (first, second) = chunk.as_mut_slices(); 88 | let mid = first.len(); 89 | data[..mid].copy_to_uninit(first); 90 | data[mid..].copy_to_uninit(second); 91 | unsafe { 92 | chunk.commit_all(); 93 | } 94 | for i in &mut result { 95 | *i = c.pop().unwrap(); 96 | } 97 | result 98 | }); 99 | 100 | add_function(&mut group, "3-iterate-read", |data| { 101 | let mut result = [0; 2]; 102 | for &i in data.iter() { 103 | p.push(i).unwrap(); 104 | } 105 | let chunk = c.read_chunk(data.len()).unwrap(); 106 | for (dst, src) in result.iter_mut().zip(chunk) { 107 | *dst = src; 108 | } 109 | result 110 | }); 111 | 112 | add_function(&mut group, "3-iterate-write", |data| { 113 | let mut result = [0; 2]; 114 | let chunk = p.write_chunk_uninit(data.len()).unwrap(); 115 | chunk.fill_from_iter(&mut data.iter().copied()); 116 | for i in &mut result { 117 | *i = c.pop().unwrap(); 118 | } 119 | result 120 | }); 121 | 122 | add_function(&mut group, "4-read", |data| { 123 | let mut result = [0; 2]; 124 | for &i in data.iter() { 125 | p.push(i).unwrap(); 126 | } 127 | let _ = c.read(&mut result).unwrap(); 128 | result 129 | }); 130 | 131 | add_function(&mut group, "4-write", |data| { 132 | let mut result = [0; 2]; 133 | let _ = p.write(data).unwrap(); 134 | for i in &mut result { 135 | *i = c.pop().unwrap(); 136 | } 137 | result 138 | }); 139 | 140 | group.finish(); 141 | } 142 | 143 | criterion_group!(benches, criterion_benchmark); 144 | criterion_main!(benches); 145 | -------------------------------------------------------------------------------- /benches/single_thread_with_chunks.rs: -------------------------------------------------------------------------------- 1 | //! Single-threaded benchmarks, writing and reading chunks. 2 | //! 3 | //! Single-threaded usage is *not* a typical use case! 4 | 5 | use std::io::{Read, Write}; 6 | 7 | use criterion::{black_box, criterion_group, criterion_main}; 8 | use criterion::{AxisScale, PlotConfiguration}; 9 | 10 | use rtrb::{CopyToUninit, RingBuffer}; 11 | 12 | const CHUNK_SIZE: usize = 64; 13 | 14 | fn add_function(group: &mut criterion::BenchmarkGroup, id: impl Into, mut f: F) 15 | where 16 | F: FnMut(&[u8]) -> [u8; CHUNK_SIZE], 17 | M: criterion::measurement::Measurement, 18 | { 19 | group.bench_function(id, |b| { 20 | let mut data = [0; CHUNK_SIZE]; 21 | let mut i: u8 = 0; 22 | for dst in data.iter_mut() { 23 | *dst = i; 24 | i = i.wrapping_add(1); 25 | } 26 | b.iter(|| { 27 | assert_eq!(f(black_box(&data)), data); 28 | }); 29 | }); 30 | } 31 | 32 | pub fn criterion_benchmark(criterion: &mut criterion::Criterion) { 33 | let mut group = criterion.benchmark_group("single-thread-with-chunks"); 34 | group.throughput(criterion::Throughput::Bytes(CHUNK_SIZE as u64)); 35 | group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); 36 | 37 | let (mut p, mut c) = RingBuffer::::new(CHUNK_SIZE + 1); 38 | 39 | add_function(&mut group, "2-slice-read", |data| { 40 | let mut result = [0; CHUNK_SIZE]; 41 | let _ = p.write(data).unwrap(); 42 | let chunk = c.read_chunk(data.len()).unwrap(); 43 | let (first, second) = chunk.as_slices(); 44 | let mid = first.len(); 45 | result[..mid].copy_from_slice(first); 46 | result[mid..].copy_from_slice(second); 47 | chunk.commit_all(); 48 | result 49 | }); 50 | 51 | add_function(&mut group, "2-slice-write", |data| { 52 | let mut result = [0; CHUNK_SIZE]; 53 | let mut chunk = p.write_chunk(data.len()).unwrap(); 54 | let (first, second) = chunk.as_mut_slices(); 55 | let mid = first.len(); 56 | first.copy_from_slice(&data[..mid]); 57 | second.copy_from_slice(&data[mid..]); 58 | chunk.commit_all(); 59 | let _ = c.read(&mut result).unwrap(); 60 | result 61 | }); 62 | 63 | add_function(&mut group, "2-slice-write-uninit", |data| { 64 | let mut result = [0; CHUNK_SIZE]; 65 | let mut chunk = p.write_chunk_uninit(data.len()).unwrap(); 66 | let (first, second) = chunk.as_mut_slices(); 67 | let mid = first.len(); 68 | data[..mid].copy_to_uninit(first); 69 | data[mid..].copy_to_uninit(second); 70 | unsafe { 71 | chunk.commit_all(); 72 | } 73 | let _ = c.read(&mut result).unwrap(); 74 | result 75 | }); 76 | 77 | add_function(&mut group, "3-iterate-read", |data| { 78 | let mut result = [0; CHUNK_SIZE]; 79 | let _ = p.write(data).unwrap(); 80 | let chunk = c.read_chunk(data.len()).unwrap(); 81 | for (dst, src) in result.iter_mut().zip(chunk) { 82 | *dst = src; 83 | } 84 | result 85 | }); 86 | 87 | add_function(&mut group, "3-iterate-write", |data| { 88 | let mut result = [0; CHUNK_SIZE]; 89 | let chunk = p.write_chunk_uninit(data.len()).unwrap(); 90 | chunk.fill_from_iter(&mut data.iter().copied()); 91 | let _ = c.read(&mut result).unwrap(); 92 | result 93 | }); 94 | 95 | add_function(&mut group, "4-write-read", |data| { 96 | let mut result = [0; CHUNK_SIZE]; 97 | let _ = p.write(data).unwrap(); 98 | let _ = c.read(&mut result).unwrap(); 99 | result 100 | }); 101 | 102 | group.finish(); 103 | } 104 | 105 | criterion_group!(benches, criterion_benchmark); 106 | criterion_main!(benches); 107 | -------------------------------------------------------------------------------- /benches/two_threads.rs: -------------------------------------------------------------------------------- 1 | macro_rules! create_two_threads_benchmark { 2 | ($($id:literal, $create:expr, $push:expr, $pop:expr);+) => { 3 | 4 | use std::convert::TryInto as _; 5 | use std::sync::{Arc, Barrier}; 6 | 7 | use criterion::{black_box, criterion_group, criterion_main}; 8 | 9 | fn help_with_type_inference(create: Create, push: Push, pop: Pop) -> (Create, Push, Pop) 10 | where 11 | Create: Fn(usize) -> (P, C), 12 | Push: Fn(&mut P, u8) -> bool, 13 | Pop: Fn(&mut C) -> Option, 14 | { 15 | (create, push, pop) 16 | } 17 | 18 | #[allow(unused)] 19 | fn criterion_benchmark(criterion: &mut criterion::Criterion) { 20 | $( 21 | let (create, push, pop) = help_with_type_inference($create, $push, $pop); 22 | // Just a quick check if the ring buffer works as expected: 23 | let (mut p, mut c) = create(2); 24 | assert!(pop(&mut c).is_none()); 25 | assert!(push(&mut p, 1)); 26 | assert!(push(&mut p, 2)); 27 | assert!(!push(&mut p, 3)); 28 | assert_eq!(pop(&mut c).unwrap(), 1); 29 | assert_eq!(pop(&mut c).unwrap(), 2); 30 | assert!(pop(&mut c).is_none()); 31 | )+ 32 | 33 | let mut group_large = criterion.benchmark_group("two-threads-large"); 34 | group_large.throughput(criterion::Throughput::Bytes(1)); 35 | $( 36 | group_large.bench_function($id, |b| { 37 | b.iter_custom(|iters| { 38 | let (create, push, pop) = help_with_type_inference($create, $push, $pop); 39 | // Queue is so long that there is no contention between threads. 40 | let (mut p, mut c) = create((2 * iters).try_into().unwrap()); 41 | for i in 0..iters { 42 | push(&mut p, i as u8); 43 | } 44 | let barrier = Arc::new(Barrier::new(3)); 45 | let push_thread = { 46 | let barrier = Arc::clone(&barrier); 47 | std::thread::spawn(move || { 48 | barrier.wait(); 49 | let start_pushing = std::time::Instant::now(); 50 | for i in 0..iters { 51 | // NB: This conversion truncates: 52 | push(&mut p, i as u8); 53 | } 54 | let stop_pushing = std::time::Instant::now(); 55 | (start_pushing, stop_pushing) 56 | }) 57 | }; 58 | let trigger_thread = { 59 | let barrier = Arc::clone(&barrier); 60 | std::thread::spawn(move || { 61 | // Try to force other threads to go to sleep on barrier. 62 | std::thread::yield_now(); 63 | std::thread::yield_now(); 64 | std::thread::yield_now(); 65 | barrier.wait(); 66 | // Hopefully, the other two threads now wake up at the same time. 67 | }) 68 | }; 69 | barrier.wait(); 70 | let start_popping = std::time::Instant::now(); 71 | for _ in 0..iters { 72 | black_box(pop(&mut c)); 73 | } 74 | let stop_popping = std::time::Instant::now(); 75 | let (start_pushing, stop_pushing) = push_thread.join().unwrap(); 76 | trigger_thread.join().unwrap(); 77 | let total = stop_pushing 78 | .max(stop_popping) 79 | .duration_since(start_pushing.min(start_popping)); 80 | 81 | /* 82 | if start_pushing < start_popping { 83 | println!( 84 | "popping started {:?} after pushing", 85 | start_popping.duration_since(start_pushing) 86 | ); 87 | } else { 88 | println!( 89 | "pushing started {:?} after popping", 90 | start_pushing.duration_since(start_popping) 91 | ); 92 | } 93 | */ 94 | 95 | // The goal is that both threads are finished at around the same time. 96 | // This can be checked with the following output. 97 | /* 98 | if stop_pushing < stop_popping { 99 | let diff = stop_popping.duration_since(stop_pushing); 100 | println!( 101 | "popping stopped {diff:?} after pushing ({:.1}% of total time)", 102 | (diff.as_secs_f64() / total.as_secs_f64()) * 100.0 103 | ); 104 | } else { 105 | let diff = stop_pushing.duration_since(stop_popping); 106 | println!( 107 | "pushing stopped {diff:?} after popping ({:.1}% of total time)", 108 | (diff.as_secs_f64() / total.as_secs_f64()) * 100.0 109 | ); 110 | } 111 | */ 112 | 113 | #[allow(clippy::let_and_return)] 114 | total 115 | }); 116 | }); 117 | )+ 118 | group_large.finish(); 119 | 120 | let mut group_small = criterion.benchmark_group("two-threads-small"); 121 | group_small.throughput(criterion::Throughput::Bytes(1)); 122 | $( 123 | group_small.bench_function($id, |b| { 124 | b.iter_custom(|iters| { 125 | let (create, push, pop) = help_with_type_inference($create, $push, $pop); 126 | // Queue is very short in order to force a lot of contention between threads. 127 | let (mut p, mut c) = create(2); 128 | let push_thread = { 129 | std::thread::spawn(move || { 130 | // The timing starts once both threads are ready. 131 | let start = std::time::Instant::now(); 132 | for i in 0..iters { 133 | while !push(&mut p, i as u8) { 134 | std::hint::spin_loop(); 135 | } 136 | } 137 | start 138 | }) 139 | }; 140 | // While the second thread is still starting up, this thread will busy-wait. 141 | for i in 0..iters { 142 | loop { 143 | if let Some(x) = pop(&mut c) { 144 | assert_eq!(x, i as u8); 145 | break; 146 | } 147 | std::hint::spin_loop(); 148 | } 149 | } 150 | // The timing stops once all items have been received. 151 | let stop = std::time::Instant::now(); 152 | let start = push_thread.join().unwrap(); 153 | stop.duration_since(start) 154 | }); 155 | }); 156 | )+ 157 | group_small.finish(); 158 | } 159 | 160 | criterion_group!(benches, criterion_benchmark); 161 | criterion_main!(benches); 162 | 163 | }; 164 | } 165 | 166 | create_two_threads_benchmark!( 167 | "rtrb", 168 | rtrb::RingBuffer::new, 169 | |p, i| p.push(i).is_ok(), 170 | |c| c.pop().ok() 171 | ); 172 | -------------------------------------------------------------------------------- /performance-comparison/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ringbuffer-performance-comparison" 3 | version = "0.0.0" 4 | edition = "2021" 5 | 6 | publish = false 7 | 8 | [dependencies] 9 | concurrent-queue = "2.3" 10 | crossbeam-queue = "0.3" 11 | crossbeam-queue-pr338 = { git = "https://github.com/mgeier/crossbeam", branch = "spsc", package = "crossbeam-queue" } 12 | npnc = "0.2" 13 | omango = "0.2" 14 | ringbuf = "0.4" 15 | rtrb = { path = ".." } 16 | 17 | [dev-dependencies] 18 | criterion = "0.5" 19 | 20 | # aggressive optimization for benchmarks 21 | [profile.bench] 22 | lto = true 23 | opt-level = 3 24 | codegen-units = 1 25 | 26 | [lib] 27 | bench = false # Don't disturb criterion command line parsing 28 | 29 | [[bench]] 30 | name = "two_threads" 31 | harness = false 32 | -------------------------------------------------------------------------------- /performance-comparison/benches/two_threads.rs: -------------------------------------------------------------------------------- 1 | #[path = "../../benches/two_threads.rs"] 2 | #[macro_use] 3 | mod two_threads; 4 | 5 | use ringbuf::traits::Consumer as _; 6 | use ringbuf::traits::Producer as _; 7 | use ringbuf::traits::Split as _; 8 | 9 | create_two_threads_benchmark!( 10 | 11 | "1-npnc", 12 | |capacity| npnc::bounded::spsc::channel(capacity.next_power_of_two()), 13 | |p, i| p.produce(i).is_ok(), 14 | |c| c.consume().ok(); 15 | 16 | "2-crossbeam-queue-pr338", 17 | crossbeam_queue_pr338::spsc::new, 18 | |q, i| q.push(i).is_ok(), 19 | |q| q.pop().ok(); 20 | 21 | "3-rtrb", 22 | rtrb::RingBuffer::new, 23 | |p, i| p.push(i).is_ok(), 24 | |c| c.pop().ok(); 25 | 26 | "4-omango", 27 | |capacity| omango::queue::spsc::bounded(u32::try_from(capacity).unwrap()), 28 | |p, i| p.try_send(i).is_ok(), 29 | |c| c.try_recv().ok(); 30 | 31 | "5-ringbuf", 32 | |capacity| ringbuf::HeapRb::new(capacity).split(), 33 | |p, i| p.try_push(i).is_ok(), 34 | |c| c.try_pop(); 35 | 36 | "6-concurrent-queue", 37 | |capacity| { 38 | let q = std::sync::Arc::new(concurrent_queue::ConcurrentQueue::bounded(capacity)); 39 | (q.clone(), q) 40 | }, 41 | |q, i| q.push(i).is_ok(), 42 | |q| q.pop().ok(); 43 | 44 | "7-crossbeam-queue", 45 | |capacity| { 46 | let q = std::sync::Arc::new(crossbeam_queue::ArrayQueue::new(capacity)); 47 | (q.clone(), q) 48 | }, 49 | |q, i| q.push(i).is_ok(), 50 | |q| q.pop() 51 | 52 | ); 53 | -------------------------------------------------------------------------------- /performance-comparison/src/lib.rs: -------------------------------------------------------------------------------- 1 | // empty 2 | -------------------------------------------------------------------------------- /src/cache_padded.rs: -------------------------------------------------------------------------------- 1 | ../crossbeam/crossbeam-utils/src/cache_padded.rs -------------------------------------------------------------------------------- /src/chunks.rs: -------------------------------------------------------------------------------- 1 | //! Writing and reading multiple items at once into and from a [`RingBuffer`]. 2 | //! 3 | //! Multiple items at once can be moved from an iterator into the ring buffer by using 4 | //! [`Producer::write_chunk_uninit()`] followed by [`WriteChunkUninit::fill_from_iter()`]. 5 | //! Alternatively, mutable access to the (uninitialized) slots of the chunk can be obtained with 6 | //! [`WriteChunkUninit::as_mut_slices()`], which requires writing some `unsafe` code. 7 | //! To avoid that, [`Producer::write_chunk()`] can be used, 8 | //! which initializes all slots with their [`Default`] value 9 | //! and provides mutable access by means of [`WriteChunk::as_mut_slices()`]. 10 | //! 11 | //! Multiple items at once can be moved out of the ring buffer by using 12 | //! [`Consumer::read_chunk()`] and iterating over the returned [`ReadChunk`] 13 | //! (or by explicitly calling [`ReadChunk::into_iter()`]). 14 | //! Immutable access to the slots of the chunk can be obtained with [`ReadChunk::as_slices()`]. 15 | //! 16 | //! # Examples 17 | //! 18 | //! This example uses a single thread for simplicity, but in a real application, 19 | //! `producer` and `consumer` would of course live on different threads: 20 | //! 21 | //! ``` 22 | //! use rtrb::RingBuffer; 23 | //! 24 | //! let (mut producer, mut consumer) = RingBuffer::new(5); 25 | //! 26 | //! if let Ok(chunk) = producer.write_chunk_uninit(4) { 27 | //! chunk.fill_from_iter([10, 11, 12]); 28 | //! // Note that we requested 4 slots but we've only written to 3 of them! 29 | //! } else { 30 | //! unreachable!(); 31 | //! } 32 | //! 33 | //! assert_eq!(producer.slots(), 2); 34 | //! assert_eq!(consumer.slots(), 3); 35 | //! 36 | //! if let Ok(chunk) = consumer.read_chunk(2) { 37 | //! assert_eq!(chunk.into_iter().collect::>(), [10, 11]); 38 | //! } else { 39 | //! unreachable!(); 40 | //! } 41 | //! 42 | //! // One element is still in the queue: 43 | //! assert_eq!(consumer.peek(), Ok(&12)); 44 | //! 45 | //! let data = vec![20, 21, 22, 23]; 46 | //! // NB: write_chunk_uninit() could be used for possibly better performance: 47 | //! if let Ok(mut chunk) = producer.write_chunk(4) { 48 | //! let (first, second) = chunk.as_mut_slices(); 49 | //! let mid = first.len(); 50 | //! first.copy_from_slice(&data[..mid]); 51 | //! second.copy_from_slice(&data[mid..]); 52 | //! chunk.commit_all(); 53 | //! } else { 54 | //! unreachable!(); 55 | //! } 56 | //! 57 | //! assert!(producer.is_full()); 58 | //! assert_eq!(consumer.slots(), 5); 59 | //! 60 | //! let mut v = Vec::::with_capacity(5); 61 | //! if let Ok(chunk) = consumer.read_chunk(5) { 62 | //! let (first, second) = chunk.as_slices(); 63 | //! v.extend(first); 64 | //! v.extend(second); 65 | //! chunk.commit_all(); 66 | //! } else { 67 | //! unreachable!(); 68 | //! } 69 | //! assert_eq!(v, [12, 20, 21, 22, 23]); 70 | //! assert!(consumer.is_empty()); 71 | //! ``` 72 | //! 73 | //! The iterator API can be used to move items from one ring buffer to another: 74 | //! 75 | //! ``` 76 | //! use rtrb::{Consumer, Producer}; 77 | //! 78 | //! fn move_items(src: &mut Consumer, dst: &mut Producer) -> usize { 79 | //! let n = src.slots().min(dst.slots()); 80 | //! dst.write_chunk_uninit(n).unwrap().fill_from_iter(src.read_chunk(n).unwrap()) 81 | //! } 82 | //! ``` 83 | //! 84 | //! ## Common Access Patterns 85 | //! 86 | //! The following examples show the [`Producer`] side; 87 | //! similar patterns can of course be used with [`Consumer::read_chunk()`] as well. 88 | //! Furthermore, the examples use [`Producer::write_chunk_uninit()`], 89 | //! along with a bit of `unsafe` code. 90 | //! To avoid this, you can use [`Producer::write_chunk()`] instead, 91 | //! which requires the trait bound `T: Default` and will lead to a small runtime overhead. 92 | //! 93 | //! Copy a whole slice of items into the ring buffer, but only if space permits 94 | //! (if not, the entire input slice is returned as an error): 95 | //! 96 | //! ``` 97 | //! use rtrb::{Producer, CopyToUninit}; 98 | //! 99 | //! fn push_entire_slice<'a, T>(queue: &mut Producer, slice: &'a [T]) -> Result<(), &'a [T]> 100 | //! where 101 | //! T: Copy, 102 | //! { 103 | //! if let Ok(mut chunk) = queue.write_chunk_uninit(slice.len()) { 104 | //! let (first, second) = chunk.as_mut_slices(); 105 | //! let mid = first.len(); 106 | //! slice[..mid].copy_to_uninit(first); 107 | //! slice[mid..].copy_to_uninit(second); 108 | //! // SAFETY: All slots have been initialized 109 | //! unsafe { chunk.commit_all() }; 110 | //! Ok(()) 111 | //! } else { 112 | //! Err(slice) 113 | //! } 114 | //! } 115 | //! ``` 116 | //! 117 | //! Copy as many items as possible from a given slice, returning the number of copied items: 118 | //! 119 | //! ``` 120 | //! use rtrb::{Producer, CopyToUninit, chunks::ChunkError::TooFewSlots}; 121 | //! 122 | //! fn push_partial_slice(queue: &mut Producer, slice: &[T]) -> usize 123 | //! where 124 | //! T: Copy, 125 | //! { 126 | //! let mut chunk = match queue.write_chunk_uninit(slice.len()) { 127 | //! Ok(chunk) => chunk, 128 | //! // Remaining slots are returned, this will always succeed: 129 | //! Err(TooFewSlots(n)) => queue.write_chunk_uninit(n).unwrap(), 130 | //! }; 131 | //! let end = chunk.len(); 132 | //! let (first, second) = chunk.as_mut_slices(); 133 | //! let mid = first.len(); 134 | //! slice[..mid].copy_to_uninit(first); 135 | //! slice[mid..end].copy_to_uninit(second); 136 | //! // SAFETY: All slots have been initialized 137 | //! unsafe { chunk.commit_all() }; 138 | //! end 139 | //! } 140 | //! ``` 141 | //! 142 | //! Write as many slots as possible, given an iterator 143 | //! (and return the number of written slots): 144 | //! 145 | //! ``` 146 | //! use rtrb::{Producer, chunks::ChunkError::TooFewSlots}; 147 | //! 148 | //! fn push_from_iter(queue: &mut Producer, iter: I) -> usize 149 | //! where 150 | //! T: Default, 151 | //! I: IntoIterator, 152 | //! { 153 | //! let iter = iter.into_iter(); 154 | //! let n = match iter.size_hint() { 155 | //! (_, None) => queue.slots(), 156 | //! (_, Some(n)) => n, 157 | //! }; 158 | //! let chunk = match queue.write_chunk_uninit(n) { 159 | //! Ok(chunk) => chunk, 160 | //! // Remaining slots are returned, this will always succeed: 161 | //! Err(TooFewSlots(n)) => queue.write_chunk_uninit(n).unwrap(), 162 | //! }; 163 | //! chunk.fill_from_iter(iter) 164 | //! } 165 | //! ``` 166 | 167 | use core::fmt; 168 | use core::marker::PhantomData; 169 | use core::mem::MaybeUninit; 170 | use core::sync::atomic::Ordering; 171 | 172 | use crate::{Consumer, CopyToUninit, Producer}; 173 | 174 | // This is used in the documentation. 175 | #[allow(unused_imports)] 176 | use crate::RingBuffer; 177 | 178 | impl Producer { 179 | /// Returns `n` slots (initially containing their [`Default`] value) for writing. 180 | /// 181 | /// [`WriteChunk::as_mut_slices()`] provides mutable access to the slots. 182 | /// After writing to those slots, they explicitly have to be made available 183 | /// to be read by the [`Consumer`] by calling [`WriteChunk::commit()`] 184 | /// or [`WriteChunk::commit_all()`]. 185 | /// 186 | /// For an alternative that does not require the trait bound [`Default`], 187 | /// see [`Producer::write_chunk_uninit()`]. 188 | /// 189 | /// If items are supposed to be moved from an iterator into the ring buffer, 190 | /// [`Producer::write_chunk_uninit()`] followed by [`WriteChunkUninit::fill_from_iter()`] 191 | /// can be used. 192 | /// 193 | /// # Errors 194 | /// 195 | /// If not enough slots are available, an error 196 | /// (containing the number of available slots) is returned. 197 | /// Use [`Producer::slots()`] to obtain the number of available slots beforehand. 198 | /// 199 | /// # Examples 200 | /// 201 | /// See the documentation of the [`chunks`](crate::chunks#examples) module. 202 | pub fn write_chunk(&mut self, n: usize) -> Result, ChunkError> 203 | where 204 | T: Default, 205 | { 206 | self.write_chunk_uninit(n).map(WriteChunk::from) 207 | } 208 | 209 | /// Returns `n` (uninitialized) slots for writing. 210 | /// 211 | /// [`WriteChunkUninit::as_mut_slices()`] provides mutable access 212 | /// to the uninitialized slots. 213 | /// After writing to those slots, they explicitly have to be made available 214 | /// to be read by the [`Consumer`] by calling [`WriteChunkUninit::commit()`] 215 | /// or [`WriteChunkUninit::commit_all()`]. 216 | /// 217 | /// Alternatively, [`WriteChunkUninit::fill_from_iter()`] can be used 218 | /// to move items from an iterator into the available slots. 219 | /// All moved items are automatically made available to be read by the [`Consumer`]. 220 | /// 221 | /// # Errors 222 | /// 223 | /// If not enough slots are available, an error 224 | /// (containing the number of available slots) is returned. 225 | /// Use [`Producer::slots()`] to obtain the number of available slots beforehand. 226 | /// 227 | /// # Safety 228 | /// 229 | /// This function itself is safe, as is [`WriteChunkUninit::fill_from_iter()`]. 230 | /// However, when using [`WriteChunkUninit::as_mut_slices()`], 231 | /// the user has to make sure that the relevant slots have been initialized 232 | /// before calling [`WriteChunkUninit::commit()`] or [`WriteChunkUninit::commit_all()`]. 233 | /// 234 | /// For a safe alternative that provides mutable slices of [`Default`]-initialized slots, 235 | /// see [`Producer::write_chunk()`]. 236 | pub fn write_chunk_uninit(&mut self, n: usize) -> Result, ChunkError> { 237 | let tail = self.cached_tail.get(); 238 | 239 | // Check if the queue has *possibly* not enough slots. 240 | if self.buffer.capacity - self.buffer.distance(self.cached_head.get(), tail) < n { 241 | // Refresh the head ... 242 | let head = self.buffer.head.load(Ordering::Acquire); 243 | self.cached_head.set(head); 244 | 245 | // ... and check if there *really* are not enough slots. 246 | let slots = self.buffer.capacity - self.buffer.distance(head, tail); 247 | if slots < n { 248 | return Err(ChunkError::TooFewSlots(slots)); 249 | } 250 | } 251 | let tail = self.buffer.collapse_position(tail); 252 | let first_len = n.min(self.buffer.capacity - tail); 253 | Ok(WriteChunkUninit { 254 | // SAFETY: tail has been updated to a valid position. 255 | first_ptr: unsafe { self.buffer.data_ptr.add(tail) }, 256 | first_len, 257 | second_ptr: self.buffer.data_ptr, 258 | second_len: n - first_len, 259 | producer: self, 260 | }) 261 | } 262 | } 263 | 264 | impl Consumer { 265 | /// Returns `n` slots for reading. 266 | /// 267 | /// [`ReadChunk::as_slices()`] provides immutable access to the slots. 268 | /// After reading from those slots, they explicitly have to be made available 269 | /// to be written again by the [`Producer`] by calling [`ReadChunk::commit()`] 270 | /// or [`ReadChunk::commit_all()`]. 271 | /// 272 | /// Alternatively, items can be moved out of the [`ReadChunk`] using iteration 273 | /// because it implements [`IntoIterator`] 274 | /// ([`ReadChunk::into_iter()`] can be used to explicitly turn it into an [`Iterator`]). 275 | /// All moved items are automatically made available to be written again by the [`Producer`]. 276 | /// 277 | /// # Errors 278 | /// 279 | /// If not enough slots are available, an error 280 | /// (containing the number of available slots) is returned. 281 | /// Use [`Consumer::slots()`] to obtain the number of available slots beforehand. 282 | /// 283 | /// # Examples 284 | /// 285 | /// See the documentation of the [`chunks`](crate::chunks#examples) module. 286 | pub fn read_chunk(&mut self, n: usize) -> Result, ChunkError> { 287 | let head = self.cached_head.get(); 288 | 289 | // Check if the queue has *possibly* not enough slots. 290 | if self.buffer.distance(head, self.cached_tail.get()) < n { 291 | // Refresh the tail ... 292 | let tail = self.buffer.tail.load(Ordering::Acquire); 293 | self.cached_tail.set(tail); 294 | 295 | // ... and check if there *really* are not enough slots. 296 | let slots = self.buffer.distance(head, tail); 297 | if slots < n { 298 | return Err(ChunkError::TooFewSlots(slots)); 299 | } 300 | } 301 | 302 | let head = self.buffer.collapse_position(head); 303 | let first_len = n.min(self.buffer.capacity - head); 304 | Ok(ReadChunk { 305 | // SAFETY: head has been updated to a valid position. 306 | first_ptr: unsafe { self.buffer.data_ptr.add(head) }, 307 | first_len, 308 | second_ptr: self.buffer.data_ptr, 309 | second_len: n - first_len, 310 | consumer: self, 311 | }) 312 | } 313 | } 314 | 315 | /// Structure for writing into multiple ([`Default`]-initialized) slots in one go. 316 | /// 317 | /// This is returned from [`Producer::write_chunk()`]. 318 | /// 319 | /// To obtain uninitialized slots, use [`Producer::write_chunk_uninit()`] instead, 320 | /// which also allows moving items from an iterator into the ring buffer 321 | /// by means of [`WriteChunkUninit::fill_from_iter()`]. 322 | #[derive(Debug, PartialEq, Eq)] 323 | pub struct WriteChunk<'a, T>(Option>, PhantomData); 324 | 325 | impl Drop for WriteChunk<'_, T> { 326 | fn drop(&mut self) { 327 | // NB: If `commit()` or `commit_all()` has been called, `self.0` is `None`. 328 | if let Some(mut chunk) = self.0.take() { 329 | // No part of the chunk has been committed, all slots are dropped. 330 | // SAFETY: All slots have been initialized in From::from(). 331 | unsafe { chunk.drop_suffix(0) }; 332 | } 333 | } 334 | } 335 | 336 | impl<'a, T> From> for WriteChunk<'a, T> 337 | where 338 | T: Default, 339 | { 340 | /// Fills all slots with the [`Default`] value. 341 | fn from(chunk: WriteChunkUninit<'a, T>) -> Self { 342 | for i in 0..chunk.first_len { 343 | // SAFETY: i is in a valid range. 344 | unsafe { chunk.first_ptr.add(i).write(Default::default()) }; 345 | } 346 | for i in 0..chunk.second_len { 347 | // SAFETY: i is in a valid range. 348 | unsafe { chunk.second_ptr.add(i).write(Default::default()) }; 349 | } 350 | WriteChunk(Some(chunk), PhantomData) 351 | } 352 | } 353 | 354 | impl WriteChunk<'_, T> 355 | where 356 | T: Default, 357 | { 358 | /// Returns two slices for writing to the requested slots. 359 | /// 360 | /// All slots are initially filled with their [`Default`] value. 361 | /// 362 | /// The first slice can only be empty if `0` slots have been requested. 363 | /// If the first slice contains all requested slots, the second one is empty. 364 | /// 365 | /// After writing to the slots, they are *not* automatically made available 366 | /// to be read by the [`Consumer`]. 367 | /// This has to be explicitly done by calling [`commit()`](WriteChunk::commit) 368 | /// or [`commit_all()`](WriteChunk::commit_all). 369 | /// If items are written but *not* committed afterwards, 370 | /// they will *not* become available for reading and 371 | /// they will be leaked (which is only relevant if `T` implements [`Drop`]). 372 | pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { 373 | // self.0 is always Some(chunk). 374 | let chunk = self.0.as_ref().unwrap(); 375 | // SAFETY: The pointers and lengths have been computed correctly in write_chunk_uninit() 376 | // and all slots have been initialized in From::from(). 377 | unsafe { 378 | ( 379 | core::slice::from_raw_parts_mut(chunk.first_ptr, chunk.first_len), 380 | core::slice::from_raw_parts_mut(chunk.second_ptr, chunk.second_len), 381 | ) 382 | } 383 | } 384 | 385 | /// Makes the first `n` slots of the chunk available for reading. 386 | /// 387 | /// The rest of the chunk is dropped. 388 | /// 389 | /// # Panics 390 | /// 391 | /// Panics if `n` is greater than the number of slots in the chunk. 392 | pub fn commit(mut self, n: usize) { 393 | // self.0 is always Some(chunk). 394 | let mut chunk = self.0.take().unwrap(); 395 | // SAFETY: All slots have been initialized in From::from(). 396 | unsafe { 397 | // Slots at index `n` and higher are dropped ... 398 | chunk.drop_suffix(n); 399 | // ... everything below `n` is committed. 400 | chunk.commit(n); 401 | } 402 | // `self` is dropped here, with `self.0` being set to `None`. 403 | } 404 | 405 | /// Makes the whole chunk available for reading. 406 | pub fn commit_all(mut self) { 407 | // self.0 is always Some(chunk). 408 | let chunk = self.0.take().unwrap(); 409 | // SAFETY: All slots have been initialized in From::from(). 410 | unsafe { chunk.commit_all() }; 411 | // `self` is dropped here, with `self.0` being set to `None`. 412 | } 413 | 414 | /// Returns the number of slots in the chunk. 415 | #[must_use] 416 | pub fn len(&self) -> usize { 417 | // self.0 is always Some(chunk). 418 | self.0.as_ref().unwrap().len() 419 | } 420 | 421 | /// Returns `true` if the chunk contains no slots. 422 | #[must_use] 423 | pub fn is_empty(&self) -> bool { 424 | // self.0 is always Some(chunk). 425 | self.0.as_ref().unwrap().is_empty() 426 | } 427 | } 428 | 429 | /// Structure for writing into multiple (uninitialized) slots in one go. 430 | /// 431 | /// This is returned from [`Producer::write_chunk_uninit()`]. 432 | #[derive(Debug, PartialEq, Eq)] 433 | pub struct WriteChunkUninit<'a, T> { 434 | first_ptr: *mut T, 435 | first_len: usize, 436 | second_ptr: *mut T, 437 | second_len: usize, 438 | producer: &'a Producer, 439 | } 440 | 441 | // SAFETY: WriteChunkUninit only exists while a unique reference to the Producer is held. 442 | // It is therefore safe to move it to another thread. 443 | unsafe impl Send for WriteChunkUninit<'_, T> {} 444 | 445 | impl WriteChunkUninit<'_, T> { 446 | /// Returns two slices for writing to the requested slots. 447 | /// 448 | /// The first slice can only be empty if `0` slots have been requested. 449 | /// If the first slice contains all requested slots, the second one is empty. 450 | /// 451 | /// The extension trait [`CopyToUninit`] can be used to safely copy data into those slices. 452 | /// 453 | /// After writing to the slots, they are *not* automatically made available 454 | /// to be read by the [`Consumer`]. 455 | /// This has to be explicitly done by calling [`commit()`](WriteChunkUninit::commit) 456 | /// or [`commit_all()`](WriteChunkUninit::commit_all). 457 | /// If items are written but *not* committed afterwards, 458 | /// they will *not* become available for reading and 459 | /// they will be leaked (which is only relevant if `T` implements [`Drop`]). 460 | pub fn as_mut_slices(&mut self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { 461 | // SAFETY: The pointers and lengths have been computed correctly in write_chunk_uninit(). 462 | unsafe { 463 | ( 464 | core::slice::from_raw_parts_mut(self.first_ptr.cast(), self.first_len), 465 | core::slice::from_raw_parts_mut(self.second_ptr.cast(), self.second_len), 466 | ) 467 | } 468 | } 469 | 470 | /// Makes the first `n` slots of the chunk available for reading. 471 | /// 472 | /// # Panics 473 | /// 474 | /// Panics if `n` is greater than the number of slots in the chunk. 475 | /// 476 | /// # Safety 477 | /// 478 | /// The caller must make sure that the first `n` elements have been initialized. 479 | pub unsafe fn commit(self, n: usize) { 480 | assert!(n <= self.len(), "cannot commit more than chunk size"); 481 | // SAFETY: Delegated to the caller. 482 | unsafe { self.commit_unchecked(n) }; 483 | } 484 | 485 | /// Makes the whole chunk available for reading. 486 | /// 487 | /// # Safety 488 | /// 489 | /// The caller must make sure that all elements have been initialized. 490 | pub unsafe fn commit_all(self) { 491 | let slots = self.len(); 492 | // SAFETY: Delegated to the caller. 493 | unsafe { self.commit_unchecked(slots) }; 494 | } 495 | 496 | unsafe fn commit_unchecked(self, n: usize) -> usize { 497 | let p = self.producer; 498 | let tail = p.buffer.increment(p.cached_tail.get(), n); 499 | p.buffer.tail.store(tail, Ordering::Release); 500 | p.cached_tail.set(tail); 501 | n 502 | } 503 | 504 | /// Moves items from an iterator into the (uninitialized) slots of the chunk. 505 | /// 506 | /// The number of moved items is returned. 507 | /// 508 | /// All moved items are automatically made availabe to be read by the [`Consumer`]. 509 | /// 510 | /// # Examples 511 | /// 512 | /// If the iterator contains too few items, only a part of the chunk 513 | /// is made available for reading: 514 | /// 515 | /// ``` 516 | /// use rtrb::{RingBuffer, PopError}; 517 | /// 518 | /// let (mut p, mut c) = RingBuffer::new(4); 519 | /// 520 | /// if let Ok(chunk) = p.write_chunk_uninit(3) { 521 | /// assert_eq!(chunk.fill_from_iter([10, 20]), 2); 522 | /// } else { 523 | /// unreachable!(); 524 | /// } 525 | /// assert_eq!(p.slots(), 2); 526 | /// assert_eq!(c.pop(), Ok(10)); 527 | /// assert_eq!(c.pop(), Ok(20)); 528 | /// assert_eq!(c.pop(), Err(PopError::Empty)); 529 | /// ``` 530 | /// 531 | /// If the chunk size is too small, some items may remain in the iterator. 532 | /// To be able to keep using the iterator after the call, 533 | /// `&mut` (or [`Iterator::by_ref()`]) can be used. 534 | /// 535 | /// ``` 536 | /// use rtrb::{RingBuffer, PopError}; 537 | /// 538 | /// let (mut p, mut c) = RingBuffer::new(4); 539 | /// 540 | /// let mut it = vec![10, 20, 30].into_iter(); 541 | /// if let Ok(chunk) = p.write_chunk_uninit(2) { 542 | /// assert_eq!(chunk.fill_from_iter(&mut it), 2); 543 | /// } else { 544 | /// unreachable!(); 545 | /// } 546 | /// assert_eq!(c.pop(), Ok(10)); 547 | /// assert_eq!(c.pop(), Ok(20)); 548 | /// assert_eq!(c.pop(), Err(PopError::Empty)); 549 | /// assert_eq!(it.next(), Some(30)); 550 | /// ``` 551 | pub fn fill_from_iter(self, iter: I) -> usize 552 | where 553 | I: IntoIterator, 554 | { 555 | let mut iter = iter.into_iter(); 556 | let mut iterated = 0; 557 | 'outer: for &(ptr, len) in &[ 558 | (self.first_ptr, self.first_len), 559 | (self.second_ptr, self.second_len), 560 | ] { 561 | for i in 0..len { 562 | match iter.next() { 563 | Some(item) => { 564 | // SAFETY: It is allowed to write to this memory slot 565 | unsafe { ptr.add(i).write(item) }; 566 | iterated += 1; 567 | } 568 | None => break 'outer, 569 | } 570 | } 571 | } 572 | // SAFETY: iterated slots have been initialized above 573 | unsafe { self.commit_unchecked(iterated) } 574 | } 575 | 576 | /// Returns the number of slots in the chunk. 577 | #[must_use] 578 | pub fn len(&self) -> usize { 579 | self.first_len + self.second_len 580 | } 581 | 582 | /// Returns `true` if the chunk contains no slots. 583 | #[must_use] 584 | pub fn is_empty(&self) -> bool { 585 | self.first_len == 0 586 | } 587 | 588 | /// Drops all elements starting from index `n`. 589 | /// 590 | /// All of those slots must be initialized. 591 | unsafe fn drop_suffix(&mut self, n: usize) { 592 | // NB: If n >= self.len(), the loops are not entered. 593 | for i in n..self.first_len { 594 | // SAFETY: The caller must make sure that all slots are initialized. 595 | unsafe { self.first_ptr.add(i).drop_in_place() }; 596 | } 597 | for i in n.saturating_sub(self.first_len)..self.second_len { 598 | // SAFETY: The caller must make sure that all slots are initialized. 599 | unsafe { self.second_ptr.add(i).drop_in_place() }; 600 | } 601 | } 602 | } 603 | 604 | /// Structure for reading from multiple slots in one go. 605 | /// 606 | /// This is returned from [`Consumer::read_chunk()`]. 607 | #[derive(Debug, PartialEq, Eq)] 608 | pub struct ReadChunk<'a, T> { 609 | // Must be "mut" for drop_in_place() 610 | first_ptr: *mut T, 611 | first_len: usize, 612 | // Must be "mut" for drop_in_place() 613 | second_ptr: *mut T, 614 | second_len: usize, 615 | consumer: &'a Consumer, 616 | } 617 | 618 | // SAFETY: ReadChunk only exists while a unique reference to the Consumer is held. 619 | // It is therefore safe to move it to another thread. 620 | unsafe impl Send for ReadChunk<'_, T> {} 621 | 622 | impl ReadChunk<'_, T> { 623 | /// Returns two slices for reading from the requested slots. 624 | /// 625 | /// The first slice can only be empty if `0` slots have been requested. 626 | /// If the first slice contains all requested slots, the second one is empty. 627 | /// 628 | /// The provided slots are *not* automatically made available 629 | /// to be written again by the [`Producer`]. 630 | /// This has to be explicitly done by calling [`commit()`](ReadChunk::commit) 631 | /// or [`commit_all()`](ReadChunk::commit_all). 632 | /// Note that this runs the destructor of the committed items (if `T` implements [`Drop`]). 633 | /// You can "peek" at the contained values by simply not calling any of the "commit" methods. 634 | #[must_use] 635 | pub fn as_slices(&self) -> (&[T], &[T]) { 636 | // SAFETY: The pointers and lengths have been computed correctly in read_chunk(). 637 | unsafe { 638 | ( 639 | core::slice::from_raw_parts(self.first_ptr, self.first_len), 640 | core::slice::from_raw_parts(self.second_ptr, self.second_len), 641 | ) 642 | } 643 | } 644 | 645 | /// Returns two mutable slices for reading from the requested slots. 646 | /// 647 | /// This has the same semantics as [`as_slices()`](ReadChunk::as_slices), 648 | /// except that it returns mutable slices and requires a mutable reference 649 | /// to the chunk. 650 | /// 651 | /// In the vast majority of cases, mutable access is not required when 652 | /// reading data and the immutable version should be preferred. However, 653 | /// there are some scenarios where it might be desirable to perform 654 | /// operations on the data in-place without copying it to a separate buffer 655 | /// (e.g. streaming decryption), in which case this version can be used. 656 | #[must_use] 657 | pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { 658 | // SAFETY: The pointers and lengths have been computed correctly in read_chunk(). 659 | unsafe { 660 | ( 661 | core::slice::from_raw_parts_mut(self.first_ptr, self.first_len), 662 | core::slice::from_raw_parts_mut(self.second_ptr, self.second_len), 663 | ) 664 | } 665 | } 666 | 667 | /// Drops the first `n` slots of the chunk, making the space available for writing again. 668 | /// 669 | /// # Panics 670 | /// 671 | /// Panics if `n` is greater than the number of slots in the chunk. 672 | /// 673 | /// # Examples 674 | /// 675 | /// The following example shows that items are dropped when "committed" 676 | /// (which is only relevant if `T` implements [`Drop`]). 677 | /// 678 | /// ``` 679 | /// use rtrb::RingBuffer; 680 | /// 681 | /// // Static variable to count all drop() invocations 682 | /// static mut DROP_COUNT: i32 = 0; 683 | /// #[derive(Debug)] 684 | /// struct Thing; 685 | /// impl Drop for Thing { 686 | /// fn drop(&mut self) { unsafe { DROP_COUNT += 1; } } 687 | /// } 688 | /// 689 | /// // Scope to limit lifetime of ring buffer 690 | /// { 691 | /// let (mut p, mut c) = RingBuffer::new(2); 692 | /// 693 | /// assert!(p.push(Thing).is_ok()); // 1 694 | /// assert!(p.push(Thing).is_ok()); // 2 695 | /// if let Ok(thing) = c.pop() { 696 | /// // "thing" has been *moved* out of the queue but not yet dropped 697 | /// assert_eq!(unsafe { DROP_COUNT }, 0); 698 | /// } else { 699 | /// unreachable!(); 700 | /// } 701 | /// // First Thing has been dropped when "thing" went out of scope: 702 | /// assert_eq!(unsafe { DROP_COUNT }, 1); 703 | /// assert!(p.push(Thing).is_ok()); // 3 704 | /// 705 | /// if let Ok(chunk) = c.read_chunk(2) { 706 | /// assert_eq!(chunk.len(), 2); 707 | /// assert_eq!(unsafe { DROP_COUNT }, 1); 708 | /// chunk.commit(1); // Drops only one of the two Things 709 | /// assert_eq!(unsafe { DROP_COUNT }, 2); 710 | /// } else { 711 | /// unreachable!(); 712 | /// } 713 | /// // The last Thing is still in the queue ... 714 | /// assert_eq!(unsafe { DROP_COUNT }, 2); 715 | /// } 716 | /// // ... and it is dropped when the ring buffer goes out of scope: 717 | /// assert_eq!(unsafe { DROP_COUNT }, 3); 718 | /// ``` 719 | pub fn commit(self, n: usize) { 720 | assert!(n <= self.len(), "cannot commit more than chunk size"); 721 | // SAFETY: self.len() initialized elements have been obtained in read_chunk(). 722 | unsafe { self.commit_unchecked(n) }; 723 | } 724 | 725 | /// Drops all slots of the chunk, making the space available for writing again. 726 | pub fn commit_all(self) { 727 | let slots = self.len(); 728 | // SAFETY: self.len() initialized elements have been obtained in read_chunk(). 729 | unsafe { self.commit_unchecked(slots) }; 730 | } 731 | 732 | unsafe fn commit_unchecked(self, n: usize) -> usize { 733 | let first_len = self.first_len.min(n); 734 | for i in 0..first_len { 735 | // SAFETY: The caller must make sure that there are n initialized elements. 736 | unsafe { self.first_ptr.add(i).drop_in_place() }; 737 | } 738 | let second_len = self.second_len.min(n - first_len); 739 | for i in 0..second_len { 740 | // SAFETY: The caller must make sure that there are n initialized elements. 741 | unsafe { self.second_ptr.add(i).drop_in_place() }; 742 | } 743 | let c = self.consumer; 744 | let head = c.buffer.increment(c.cached_head.get(), n); 745 | c.buffer.head.store(head, Ordering::Release); 746 | c.cached_head.set(head); 747 | n 748 | } 749 | 750 | /// Returns the number of slots in the chunk. 751 | #[must_use] 752 | pub fn len(&self) -> usize { 753 | self.first_len + self.second_len 754 | } 755 | 756 | /// Returns `true` if the chunk contains no slots. 757 | #[must_use] 758 | pub fn is_empty(&self) -> bool { 759 | self.first_len == 0 760 | } 761 | } 762 | 763 | impl<'a, T> IntoIterator for ReadChunk<'a, T> { 764 | type Item = T; 765 | type IntoIter = ReadChunkIntoIter<'a, T>; 766 | 767 | /// Turns a [`ReadChunk`] into an iterator. 768 | /// 769 | /// When the iterator is dropped, all iterated slots are made available for writing again. 770 | /// Non-iterated items remain in the ring buffer. 771 | fn into_iter(self) -> Self::IntoIter { 772 | Self::IntoIter { 773 | chunk: self, 774 | iterated: 0, 775 | } 776 | } 777 | } 778 | 779 | /// An iterator that moves out of a [`ReadChunk`]. 780 | /// 781 | /// This `struct` is created by the [`into_iter()`](ReadChunk::into_iter) method 782 | /// on [`ReadChunk`] (provided by the [`IntoIterator`] trait). 783 | /// 784 | /// When this `struct` is dropped, the iterated slots are made available for writing again. 785 | /// Non-iterated items remain in the ring buffer. 786 | #[derive(Debug)] 787 | pub struct ReadChunkIntoIter<'a, T> { 788 | chunk: ReadChunk<'a, T>, 789 | iterated: usize, 790 | } 791 | 792 | impl Drop for ReadChunkIntoIter<'_, T> { 793 | /// Makes all iterated slots available for writing again. 794 | /// 795 | /// Non-iterated items remain in the ring buffer and are *not* dropped. 796 | fn drop(&mut self) { 797 | let c = &self.chunk.consumer; 798 | let head = c.buffer.increment(c.cached_head.get(), self.iterated); 799 | c.buffer.head.store(head, Ordering::Release); 800 | c.cached_head.set(head); 801 | } 802 | } 803 | 804 | impl Iterator for ReadChunkIntoIter<'_, T> { 805 | type Item = T; 806 | 807 | #[inline] 808 | fn next(&mut self) -> Option { 809 | let ptr = if self.iterated < self.chunk.first_len { 810 | // SAFETY: first_len is valid. 811 | unsafe { self.chunk.first_ptr.add(self.iterated) } 812 | } else if self.iterated < self.chunk.first_len + self.chunk.second_len { 813 | // SAFETY: first_len and second_len are valid. 814 | unsafe { 815 | self.chunk 816 | .second_ptr 817 | .add(self.iterated - self.chunk.first_len) 818 | } 819 | } else { 820 | return None; 821 | }; 822 | self.iterated += 1; 823 | // SAFETY: ptr points to an initialized slot. 824 | Some(unsafe { ptr.read() }) 825 | } 826 | 827 | #[inline] 828 | fn size_hint(&self) -> (usize, Option) { 829 | let remaining = self.chunk.first_len + self.chunk.second_len - self.iterated; 830 | (remaining, Some(remaining)) 831 | } 832 | } 833 | 834 | impl ExactSizeIterator for ReadChunkIntoIter<'_, T> {} 835 | 836 | impl core::iter::FusedIterator for ReadChunkIntoIter<'_, T> {} 837 | 838 | #[cfg(feature = "std")] 839 | impl std::io::Write for Producer { 840 | #[inline] 841 | fn write(&mut self, buf: &[u8]) -> std::io::Result { 842 | use ChunkError::TooFewSlots; 843 | let mut chunk = match self.write_chunk_uninit(buf.len()) { 844 | Ok(chunk) => chunk, 845 | Err(TooFewSlots(0)) => return Err(std::io::ErrorKind::WouldBlock.into()), 846 | Err(TooFewSlots(n)) => self.write_chunk_uninit(n).unwrap(), 847 | }; 848 | let end = chunk.len(); 849 | let (first, second) = chunk.as_mut_slices(); 850 | let mid = first.len(); 851 | // NB: If buf.is_empty(), chunk will be empty as well and the following are no-ops: 852 | buf[..mid].copy_to_uninit(first); 853 | buf[mid..end].copy_to_uninit(second); 854 | // SAFETY: All slots have been initialized 855 | unsafe { chunk.commit_all() }; 856 | Ok(end) 857 | } 858 | 859 | #[inline] 860 | fn flush(&mut self) -> std::io::Result<()> { 861 | // Nothing to do here. 862 | Ok(()) 863 | } 864 | } 865 | 866 | #[cfg(feature = "std")] 867 | impl std::io::Read for Consumer { 868 | #[inline] 869 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 870 | use ChunkError::TooFewSlots; 871 | let chunk = match self.read_chunk(buf.len()) { 872 | Ok(chunk) => chunk, 873 | Err(TooFewSlots(0)) => return Err(std::io::ErrorKind::WouldBlock.into()), 874 | Err(TooFewSlots(n)) => self.read_chunk(n).unwrap(), 875 | }; 876 | let (first, second) = chunk.as_slices(); 877 | let mid = first.len(); 878 | let end = chunk.len(); 879 | // NB: If buf.is_empty(), chunk will be empty as well and the following are no-ops: 880 | buf[..mid].copy_from_slice(first); 881 | buf[mid..end].copy_from_slice(second); 882 | chunk.commit_all(); 883 | Ok(end) 884 | } 885 | } 886 | 887 | /// Error type for [`Consumer::read_chunk()`], [`Producer::write_chunk()`] 888 | /// and [`Producer::write_chunk_uninit()`]. 889 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 890 | pub enum ChunkError { 891 | /// Fewer than the requested number of slots were available. 892 | /// 893 | /// Contains the number of slots that were available. 894 | TooFewSlots(usize), 895 | } 896 | 897 | #[cfg(feature = "std")] 898 | impl std::error::Error for ChunkError {} 899 | 900 | impl fmt::Display for ChunkError { 901 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 902 | match self { 903 | ChunkError::TooFewSlots(n) => { 904 | alloc::format!("only {} slots available in ring buffer", n).fmt(f) 905 | } 906 | } 907 | } 908 | } 909 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A realtime-safe single-producer single-consumer (SPSC) ring buffer. 2 | //! 3 | //! A [`RingBuffer`] consists of two parts: 4 | //! a [`Producer`] for writing into the ring buffer and 5 | //! a [`Consumer`] for reading from the ring buffer. 6 | //! 7 | //! A fixed-capacity buffer is allocated on construction. 8 | //! After that, no more memory is allocated (unless the type `T` does that internally). 9 | //! Reading from and writing into the ring buffer is *lock-free* and *wait-free*. 10 | //! All reading and writing functions return immediately. 11 | //! Attempts to write to a full buffer return an error; 12 | //! values inside the buffer are *not* overwritten. 13 | //! Attempts to read from an empty buffer return an error as well. 14 | //! Only a single thread can write into the ring buffer and a single thread 15 | //! (typically a different one) can read from the ring buffer. 16 | //! If the queue is empty, there is no way for the reading thread to wait 17 | //! for new data, other than trying repeatedly until reading succeeds. 18 | //! Similarly, if the queue is full, there is no way for the writing thread 19 | //! to wait for newly available space to write to, other than trying repeatedly. 20 | //! 21 | //! # Examples 22 | //! 23 | //! Moving single elements into and out of a queue with 24 | //! [`Producer::push()`] and [`Consumer::pop()`], respectively: 25 | //! 26 | //! ``` 27 | //! use rtrb::{RingBuffer, PushError, PopError}; 28 | //! 29 | //! let (mut producer, mut consumer) = RingBuffer::new(2); 30 | //! 31 | //! assert_eq!(producer.push(10), Ok(())); 32 | //! assert_eq!(producer.push(20), Ok(())); 33 | //! assert_eq!(producer.push(30), Err(PushError::Full(30))); 34 | //! 35 | //! std::thread::spawn(move || { 36 | //! assert_eq!(consumer.pop(), Ok(10)); 37 | //! assert_eq!(consumer.pop(), Ok(20)); 38 | //! assert_eq!(consumer.pop(), Err(PopError::Empty)); 39 | //! }).join().unwrap(); 40 | //! ``` 41 | //! 42 | //! See the documentation of the [`chunks#examples`] module 43 | //! for examples that write multiple items at once with 44 | //! [`Producer::write_chunk_uninit()`] and [`Producer::write_chunk()`] 45 | //! and read multiple items with [`Consumer::read_chunk()`]. 46 | 47 | #![cfg_attr(not(feature = "std"), no_std)] 48 | #![warn(rust_2018_idioms)] 49 | #![deny(missing_docs, missing_debug_implementations)] 50 | #![deny(unsafe_op_in_unsafe_fn)] 51 | #![warn(clippy::undocumented_unsafe_blocks, clippy::unnecessary_safety_comment)] 52 | 53 | extern crate alloc; 54 | 55 | use alloc::sync::Arc; 56 | use alloc::vec::Vec; 57 | use core::cell::Cell; 58 | use core::fmt; 59 | use core::marker::PhantomData; 60 | use core::mem::{ManuallyDrop, MaybeUninit}; 61 | use core::sync::atomic::{AtomicUsize, Ordering}; 62 | 63 | #[allow(dead_code, clippy::undocumented_unsafe_blocks)] 64 | mod cache_padded; 65 | use cache_padded::CachePadded; 66 | 67 | pub mod chunks; 68 | 69 | // This is used in the documentation. 70 | #[allow(unused_imports)] 71 | use chunks::WriteChunkUninit; 72 | 73 | /// A bounded single-producer single-consumer (SPSC) queue. 74 | /// 75 | /// Elements can be written with a [`Producer`] and read with a [`Consumer`], 76 | /// both of which can be obtained with [`RingBuffer::new()`]. 77 | /// 78 | /// *See also the [crate-level documentation](crate).* 79 | #[derive(Debug)] 80 | pub struct RingBuffer { 81 | /// The head of the queue. 82 | /// 83 | /// This integer is in range `0 .. 2 * capacity`. 84 | head: CachePadded, 85 | 86 | /// The tail of the queue. 87 | /// 88 | /// This integer is in range `0 .. 2 * capacity`. 89 | tail: CachePadded, 90 | 91 | /// The buffer holding slots. 92 | data_ptr: *mut T, 93 | 94 | /// The queue capacity. 95 | capacity: usize, 96 | 97 | /// Indicates that dropping a `RingBuffer` may drop elements of type `T`. 98 | _marker: PhantomData, 99 | } 100 | 101 | impl RingBuffer { 102 | /// Creates a `RingBuffer` with the given `capacity` and returns [`Producer`] and [`Consumer`]. 103 | /// 104 | /// # Examples 105 | /// 106 | /// ``` 107 | /// use rtrb::RingBuffer; 108 | /// 109 | /// let (producer, consumer) = RingBuffer::::new(100); 110 | /// ``` 111 | /// 112 | /// Specifying an explicit type with the [turbofish](https://turbo.fish/) 113 | /// is is only necessary if it cannot be deduced by the compiler. 114 | /// 115 | /// ``` 116 | /// use rtrb::RingBuffer; 117 | /// 118 | /// let (mut producer, consumer) = RingBuffer::new(100); 119 | /// assert_eq!(producer.push(0.0f32), Ok(())); 120 | /// ``` 121 | #[allow(clippy::new_ret_no_self)] 122 | #[must_use] 123 | pub fn new(capacity: usize) -> (Producer, Consumer) { 124 | let buffer = Arc::new(RingBuffer { 125 | head: CachePadded::new(AtomicUsize::new(0)), 126 | tail: CachePadded::new(AtomicUsize::new(0)), 127 | data_ptr: ManuallyDrop::new(Vec::with_capacity(capacity)).as_mut_ptr(), 128 | capacity, 129 | _marker: PhantomData, 130 | }); 131 | let p = Producer { 132 | buffer: buffer.clone(), 133 | cached_head: Cell::new(0), 134 | cached_tail: Cell::new(0), 135 | }; 136 | let c = Consumer { 137 | buffer, 138 | cached_head: Cell::new(0), 139 | cached_tail: Cell::new(0), 140 | }; 141 | (p, c) 142 | } 143 | 144 | /// Returns the capacity of the queue. 145 | /// 146 | /// # Examples 147 | /// 148 | /// ``` 149 | /// use rtrb::RingBuffer; 150 | /// 151 | /// let (producer, consumer) = RingBuffer::::new(100); 152 | /// assert_eq!(producer.buffer().capacity(), 100); 153 | /// assert_eq!(consumer.buffer().capacity(), 100); 154 | /// // Both producer and consumer of course refer to the same ring buffer: 155 | /// assert_eq!(producer.buffer(), consumer.buffer()); 156 | /// ``` 157 | pub fn capacity(&self) -> usize { 158 | self.capacity 159 | } 160 | 161 | /// Wraps a position from the range `0 .. 2 * capacity` to `0 .. capacity`. 162 | fn collapse_position(&self, pos: usize) -> usize { 163 | debug_assert!(pos == 0 || pos < 2 * self.capacity); 164 | if pos < self.capacity { 165 | pos 166 | } else { 167 | pos - self.capacity 168 | } 169 | } 170 | 171 | /// Returns a pointer to the slot at position `pos`. 172 | /// 173 | /// If `pos == 0 && capacity == 0`, the returned pointer must not be dereferenced! 174 | unsafe fn slot_ptr(&self, pos: usize) -> *mut T { 175 | debug_assert!(pos == 0 || pos < 2 * self.capacity); 176 | let pos = self.collapse_position(pos); 177 | // SAFETY: The caller must ensure a valid pos. 178 | unsafe { self.data_ptr.add(pos) } 179 | } 180 | 181 | /// Increments a position by going `n` slots forward. 182 | fn increment(&self, pos: usize, n: usize) -> usize { 183 | debug_assert!(pos == 0 || pos < 2 * self.capacity); 184 | debug_assert!(n <= self.capacity); 185 | let threshold = 2 * self.capacity - n; 186 | if pos < threshold { 187 | pos + n 188 | } else { 189 | pos - threshold 190 | } 191 | } 192 | 193 | /// Increments a position by going one slot forward. 194 | /// 195 | /// This is more efficient than self.increment(..., 1). 196 | fn increment1(&self, pos: usize) -> usize { 197 | debug_assert_ne!(self.capacity, 0); 198 | debug_assert!(pos < 2 * self.capacity); 199 | if pos < 2 * self.capacity - 1 { 200 | pos + 1 201 | } else { 202 | 0 203 | } 204 | } 205 | 206 | /// Returns the distance between two positions. 207 | fn distance(&self, a: usize, b: usize) -> usize { 208 | debug_assert!(a == 0 || a < 2 * self.capacity); 209 | debug_assert!(b == 0 || b < 2 * self.capacity); 210 | if a <= b { 211 | b - a 212 | } else { 213 | 2 * self.capacity - a + b 214 | } 215 | } 216 | } 217 | 218 | impl Drop for RingBuffer { 219 | /// Drops all non-empty slots. 220 | fn drop(&mut self) { 221 | let mut head = self.head.load(Ordering::Relaxed); 222 | let tail = self.tail.load(Ordering::Relaxed); 223 | 224 | // Loop over all slots that hold a value and drop them. 225 | while head != tail { 226 | // SAFETY: All slots between head and tail have been initialized. 227 | unsafe { self.slot_ptr(head).drop_in_place() }; 228 | head = self.increment1(head); 229 | } 230 | 231 | // Finally, deallocate the buffer, but don't run any destructors. 232 | // SAFETY: data_ptr and capacity are still valid from the original initialization. 233 | unsafe { Vec::from_raw_parts(self.data_ptr, 0, self.capacity) }; 234 | } 235 | } 236 | 237 | impl PartialEq for RingBuffer { 238 | /// This method tests for `self` and `other` values to be equal, and is used by `==`. 239 | /// 240 | /// # Examples 241 | /// 242 | /// ``` 243 | /// use rtrb::RingBuffer; 244 | /// 245 | /// let (p1, c1) = RingBuffer::::new(1000); 246 | /// assert_eq!(p1.buffer(), c1.buffer()); 247 | /// 248 | /// let (p2, c2) = RingBuffer::::new(1000); 249 | /// assert_ne!(p1.buffer(), p2.buffer()); 250 | /// ``` 251 | fn eq(&self, other: &Self) -> bool { 252 | core::ptr::eq(self, other) 253 | } 254 | } 255 | 256 | impl Eq for RingBuffer {} 257 | 258 | /// The producer side of a [`RingBuffer`]. 259 | /// 260 | /// Can be moved between threads, 261 | /// but references from different threads are not allowed 262 | /// (i.e. it is [`Send`] but not [`Sync`]). 263 | /// 264 | /// Can only be created with [`RingBuffer::new()`] 265 | /// (together with its counterpart, the [`Consumer`]). 266 | /// 267 | /// Individual elements can be moved into the ring buffer with [`Producer::push()`], 268 | /// multiple elements at once can be written with [`Producer::write_chunk()`] 269 | /// and [`Producer::write_chunk_uninit()`]. 270 | /// 271 | /// The number of free slots currently available for writing can be obtained with 272 | /// [`Producer::slots()`]. 273 | /// 274 | /// When the `Producer` is dropped, [`Consumer::is_abandoned()`] will return `true`. 275 | /// This can be used as a crude way to communicate to the receiving thread 276 | /// that no more data will be produced. 277 | /// When the `Producer` is dropped after the [`Consumer`] has already been dropped, 278 | /// [`RingBuffer::drop()`] will be called, freeing the allocated memory. 279 | #[derive(Debug, PartialEq, Eq)] 280 | pub struct Producer { 281 | /// A reference to the ring buffer. 282 | buffer: Arc>, 283 | 284 | /// A copy of `buffer.head` for quick access. 285 | /// 286 | /// This value can be stale and sometimes needs to be resynchronized with `buffer.head`. 287 | cached_head: Cell, 288 | 289 | /// A copy of `buffer.tail` for quick access. 290 | /// 291 | /// This value is always in sync with `buffer.tail`. 292 | // NB: Caching the tail seems to have little effect on Intel CPUs, but it seems to 293 | // improve performance on AMD CPUs, see https://github.com/mgeier/rtrb/pull/132 294 | cached_tail: Cell, 295 | } 296 | 297 | // SAFETY: After moving a Producer to another thread, there is still only a single thread 298 | // that can access the producer side of the queue. 299 | unsafe impl Send for Producer {} 300 | 301 | impl Producer { 302 | /// Attempts to push an element into the queue. 303 | /// 304 | /// The element is *moved* into the ring buffer and its slot 305 | /// is made available to be read by the [`Consumer`]. 306 | /// 307 | /// # Errors 308 | /// 309 | /// If the queue is full, the element is returned back as an error. 310 | /// 311 | /// # Examples 312 | /// 313 | /// ``` 314 | /// use rtrb::{RingBuffer, PushError}; 315 | /// 316 | /// let (mut p, c) = RingBuffer::new(1); 317 | /// 318 | /// assert_eq!(p.push(10), Ok(())); 319 | /// assert_eq!(p.push(20), Err(PushError::Full(20))); 320 | /// ``` 321 | pub fn push(&mut self, value: T) -> Result<(), PushError> { 322 | if let Some(tail) = self.next_tail() { 323 | // SAFETY: tail points to an empty slot. 324 | unsafe { self.buffer.slot_ptr(tail).write(value) }; 325 | let tail = self.buffer.increment1(tail); 326 | self.buffer.tail.store(tail, Ordering::Release); 327 | self.cached_tail.set(tail); 328 | Ok(()) 329 | } else { 330 | Err(PushError::Full(value)) 331 | } 332 | } 333 | 334 | /// Returns the number of slots available for writing. 335 | /// 336 | /// Since items can be concurrently consumed on another thread, the actual number 337 | /// of available slots may increase at any time (up to the [`RingBuffer::capacity()`]). 338 | /// 339 | /// To check for a single available slot, 340 | /// using [`Producer::is_full()`] is often quicker 341 | /// (because it might not have to check an atomic variable). 342 | /// 343 | /// # Examples 344 | /// 345 | /// ``` 346 | /// use rtrb::RingBuffer; 347 | /// 348 | /// let (p, c) = RingBuffer::::new(1024); 349 | /// 350 | /// assert_eq!(p.slots(), 1024); 351 | /// ``` 352 | pub fn slots(&self) -> usize { 353 | let head = self.buffer.head.load(Ordering::Acquire); 354 | self.cached_head.set(head); 355 | self.buffer.capacity - self.buffer.distance(head, self.cached_tail.get()) 356 | } 357 | 358 | /// Returns `true` if there are currently no slots available for writing. 359 | /// 360 | /// A full ring buffer might cease to be full at any time 361 | /// if the corresponding [`Consumer`] is consuming items in another thread. 362 | /// 363 | /// # Examples 364 | /// 365 | /// ``` 366 | /// use rtrb::RingBuffer; 367 | /// 368 | /// let (p, c) = RingBuffer::::new(1); 369 | /// 370 | /// assert!(!p.is_full()); 371 | /// ``` 372 | /// 373 | /// Since items can be concurrently consumed on another thread, the ring buffer 374 | /// might not be full for long: 375 | /// 376 | /// ``` 377 | /// # use rtrb::RingBuffer; 378 | /// # let (p, c) = RingBuffer::::new(1); 379 | /// if p.is_full() { 380 | /// // The buffer might be full, but it might as well not be 381 | /// // if an item was just consumed on another thread. 382 | /// } 383 | /// ``` 384 | /// 385 | /// However, if it's not full, another thread cannot change that: 386 | /// 387 | /// ``` 388 | /// # use rtrb::RingBuffer; 389 | /// # let (p, c) = RingBuffer::::new(1); 390 | /// if !p.is_full() { 391 | /// // At least one slot is guaranteed to be available for writing. 392 | /// } 393 | /// ``` 394 | pub fn is_full(&self) -> bool { 395 | self.next_tail().is_none() 396 | } 397 | 398 | /// Returns `true` if the corresponding [`Consumer`] has been destroyed. 399 | /// 400 | /// Note that since Rust version 1.74.0, this is not synchronizing with the consumer thread 401 | /// anymore, see . 402 | /// In a future version of `rtrb`, the synchronizing behavior might be restored. 403 | /// 404 | /// # Examples 405 | /// 406 | /// ``` 407 | /// use rtrb::RingBuffer; 408 | /// 409 | /// let (mut p, c) = RingBuffer::new(7); 410 | /// assert!(!p.is_abandoned()); 411 | /// assert_eq!(p.push(10), Ok(())); 412 | /// drop(c); 413 | /// // The items that are still in the ring buffer are not accessible anymore. 414 | /// assert!(p.is_abandoned()); 415 | /// // Even though it's futile, items can still be written: 416 | /// assert_eq!(p.push(11), Ok(())); 417 | /// ``` 418 | /// 419 | /// Since the consumer can be concurrently dropped on another thread, 420 | /// the producer might become abandoned at any time: 421 | /// 422 | /// ``` 423 | /// # use rtrb::RingBuffer; 424 | /// # let (p, c) = RingBuffer::::new(1); 425 | /// if !p.is_abandoned() { 426 | /// // Right now, the consumer might still be alive, but it might as well not be 427 | /// // if another thread has just dropped it. 428 | /// } 429 | /// ``` 430 | /// 431 | /// However, if it already is abandoned, it will stay that way: 432 | /// 433 | /// ``` 434 | /// # use rtrb::RingBuffer; 435 | /// # let (p, c) = RingBuffer::::new(1); 436 | /// if p.is_abandoned() { 437 | /// // This is needed since Rust 1.74.0, see https://github.com/mgeier/rtrb/issues/114: 438 | /// std::sync::atomic::fence(std::sync::atomic::Ordering::Acquire); 439 | /// // The consumer does definitely not exist anymore. 440 | /// } 441 | /// ``` 442 | pub fn is_abandoned(&self) -> bool { 443 | Arc::strong_count(&self.buffer) < 2 444 | } 445 | 446 | /// Returns a read-only reference to the ring buffer. 447 | pub fn buffer(&self) -> &RingBuffer { 448 | &self.buffer 449 | } 450 | 451 | /// Get the tail position for writing the next slot, if available. 452 | /// 453 | /// This is a strict subset of the functionality implemented in `write_chunk_uninit()`. 454 | /// For performance, this special case is immplemented separately. 455 | fn next_tail(&self) -> Option { 456 | let tail = self.cached_tail.get(); 457 | 458 | // Check if the queue is *possibly* full. 459 | if self.buffer.distance(self.cached_head.get(), tail) == self.buffer.capacity { 460 | // Refresh the head ... 461 | let head = self.buffer.head.load(Ordering::Acquire); 462 | self.cached_head.set(head); 463 | 464 | // ... and check if it's *really* full. 465 | if self.buffer.distance(head, tail) == self.buffer.capacity { 466 | return None; 467 | } 468 | } 469 | Some(tail) 470 | } 471 | } 472 | 473 | /// The consumer side of a [`RingBuffer`]. 474 | /// 475 | /// Can be moved between threads, 476 | /// but references from different threads are not allowed 477 | /// (i.e. it is [`Send`] but not [`Sync`]). 478 | /// 479 | /// Can only be created with [`RingBuffer::new()`] 480 | /// (together with its counterpart, the [`Producer`]). 481 | /// 482 | /// Individual elements can be moved out of the ring buffer with [`Consumer::pop()`], 483 | /// multiple elements at once can be read with [`Consumer::read_chunk()`]. 484 | /// 485 | /// The number of slots currently available for reading can be obtained with 486 | /// [`Consumer::slots()`]. 487 | /// 488 | /// When the `Consumer` is dropped, [`Producer::is_abandoned()`] will return `true`. 489 | /// This can be used as a crude way to communicate to the sending thread 490 | /// that no more data will be consumed. 491 | /// When the `Consumer` is dropped after the [`Producer`] has already been dropped, 492 | /// [`RingBuffer::drop()`] will be called, freeing the allocated memory. 493 | #[derive(Debug, PartialEq, Eq)] 494 | pub struct Consumer { 495 | /// A reference to the ring buffer. 496 | buffer: Arc>, 497 | 498 | /// A copy of `buffer.head` for quick access. 499 | /// 500 | /// This value is always in sync with `buffer.head`. 501 | // NB: Caching the head seems to have little effect on Intel CPUs, but it seems to 502 | // improve performance on AMD CPUs, see https://github.com/mgeier/rtrb/pull/132 503 | cached_head: Cell, 504 | 505 | /// A copy of `buffer.tail` for quick access. 506 | /// 507 | /// This value can be stale and sometimes needs to be resynchronized with `buffer.tail`. 508 | cached_tail: Cell, 509 | } 510 | 511 | // SAFETY: After moving a Consumer to another thread, there is still only a single thread 512 | // that can access the consumer side of the queue. 513 | unsafe impl Send for Consumer {} 514 | 515 | impl Consumer { 516 | /// Attempts to pop an element from the queue. 517 | /// 518 | /// The element is *moved* out of the ring buffer and its slot 519 | /// is made available to be filled by the [`Producer`] again. 520 | /// 521 | /// # Errors 522 | /// 523 | /// If the queue is empty, an error is returned. 524 | /// 525 | /// # Examples 526 | /// 527 | /// ``` 528 | /// use rtrb::{PopError, RingBuffer}; 529 | /// 530 | /// let (mut p, mut c) = RingBuffer::new(1); 531 | /// 532 | /// assert_eq!(p.push(10), Ok(())); 533 | /// assert_eq!(c.pop(), Ok(10)); 534 | /// assert_eq!(c.pop(), Err(PopError::Empty)); 535 | /// ``` 536 | /// 537 | /// To obtain an [`Option`](Option), use [`.ok()`](Result::ok) on the result. 538 | /// 539 | /// ``` 540 | /// # use rtrb::RingBuffer; 541 | /// # let (mut p, mut c) = RingBuffer::new(1); 542 | /// assert_eq!(p.push(20), Ok(())); 543 | /// assert_eq!(c.pop().ok(), Some(20)); 544 | /// ``` 545 | pub fn pop(&mut self) -> Result { 546 | if let Some(head) = self.next_head() { 547 | // SAFETY: head points to an initialized slot. 548 | let value = unsafe { self.buffer.slot_ptr(head).read() }; 549 | let head = self.buffer.increment1(head); 550 | self.buffer.head.store(head, Ordering::Release); 551 | self.cached_head.set(head); 552 | Ok(value) 553 | } else { 554 | Err(PopError::Empty) 555 | } 556 | } 557 | 558 | /// Attempts to read an element from the queue without removing it. 559 | /// 560 | /// # Errors 561 | /// 562 | /// If the queue is empty, an error is returned. 563 | /// 564 | /// # Examples 565 | /// 566 | /// ``` 567 | /// use rtrb::{PeekError, RingBuffer}; 568 | /// 569 | /// let (mut p, c) = RingBuffer::new(1); 570 | /// 571 | /// assert_eq!(c.peek(), Err(PeekError::Empty)); 572 | /// assert_eq!(p.push(10), Ok(())); 573 | /// assert_eq!(c.peek(), Ok(&10)); 574 | /// assert_eq!(c.peek(), Ok(&10)); 575 | /// ``` 576 | pub fn peek(&self) -> Result<&T, PeekError> { 577 | if let Some(head) = self.next_head() { 578 | // SAFETY: head points to an initialized slot. 579 | Ok(unsafe { &*self.buffer.slot_ptr(head) }) 580 | } else { 581 | Err(PeekError::Empty) 582 | } 583 | } 584 | 585 | /// Returns the number of slots available for reading. 586 | /// 587 | /// Since items can be concurrently produced on another thread, the actual number 588 | /// of available slots may increase at any time (up to the [`RingBuffer::capacity()`]). 589 | /// 590 | /// To check for a single available slot, 591 | /// using [`Consumer::is_empty()`] is often quicker 592 | /// (because it might not have to check an atomic variable). 593 | /// 594 | /// # Examples 595 | /// 596 | /// ``` 597 | /// use rtrb::RingBuffer; 598 | /// 599 | /// let (p, c) = RingBuffer::::new(1024); 600 | /// 601 | /// assert_eq!(c.slots(), 0); 602 | /// ``` 603 | pub fn slots(&self) -> usize { 604 | let tail = self.buffer.tail.load(Ordering::Acquire); 605 | self.cached_tail.set(tail); 606 | self.buffer.distance(self.cached_head.get(), tail) 607 | } 608 | 609 | /// Returns `true` if there are currently no slots available for reading. 610 | /// 611 | /// An empty ring buffer might cease to be empty at any time 612 | /// if the corresponding [`Producer`] is producing items in another thread. 613 | /// 614 | /// # Examples 615 | /// 616 | /// ``` 617 | /// use rtrb::RingBuffer; 618 | /// 619 | /// let (p, c) = RingBuffer::::new(1); 620 | /// 621 | /// assert!(c.is_empty()); 622 | /// ``` 623 | /// 624 | /// Since items can be concurrently produced on another thread, the ring buffer 625 | /// might not be empty for long: 626 | /// 627 | /// ``` 628 | /// # use rtrb::RingBuffer; 629 | /// # let (p, c) = RingBuffer::::new(1); 630 | /// if c.is_empty() { 631 | /// // The buffer might be empty, but it might as well not be 632 | /// // if an item was just produced on another thread. 633 | /// } 634 | /// ``` 635 | /// 636 | /// However, if it's not empty, another thread cannot change that: 637 | /// 638 | /// ``` 639 | /// # use rtrb::RingBuffer; 640 | /// # let (p, c) = RingBuffer::::new(1); 641 | /// if !c.is_empty() { 642 | /// // At least one slot is guaranteed to be available for reading. 643 | /// } 644 | /// ``` 645 | pub fn is_empty(&self) -> bool { 646 | self.next_head().is_none() 647 | } 648 | 649 | /// Returns `true` if the corresponding [`Producer`] has been destroyed. 650 | /// 651 | /// Note that since Rust version 1.74.0, this is not synchronizing with the producer thread 652 | /// anymore, see . 653 | /// In a future version of `rtrb`, the synchronizing behavior might be restored. 654 | /// 655 | /// # Examples 656 | /// 657 | /// ``` 658 | /// use rtrb::RingBuffer; 659 | /// 660 | /// let (mut p, mut c) = RingBuffer::new(7); 661 | /// assert!(!c.is_abandoned()); 662 | /// assert_eq!(p.push(10), Ok(())); 663 | /// drop(p); 664 | /// assert!(c.is_abandoned()); 665 | /// // The items that are left in the ring buffer can still be consumed: 666 | /// assert_eq!(c.pop(), Ok(10)); 667 | /// ``` 668 | /// 669 | /// Since the producer can be concurrently dropped on another thread, 670 | /// the consumer might become abandoned at any time: 671 | /// 672 | /// ``` 673 | /// # use rtrb::RingBuffer; 674 | /// # let (p, c) = RingBuffer::::new(1); 675 | /// if !c.is_abandoned() { 676 | /// // Right now, the producer might still be alive, but it might as well not be 677 | /// // if another thread has just dropped it. 678 | /// } 679 | /// ``` 680 | /// 681 | /// However, if it already is abandoned, it will stay that way: 682 | /// 683 | /// ``` 684 | /// # use rtrb::RingBuffer; 685 | /// # let (p, c) = RingBuffer::::new(1); 686 | /// if c.is_abandoned() { 687 | /// // This is needed since Rust 1.74.0, see https://github.com/mgeier/rtrb/issues/114: 688 | /// std::sync::atomic::fence(std::sync::atomic::Ordering::Acquire); 689 | /// // The producer does definitely not exist anymore. 690 | /// } 691 | /// ``` 692 | pub fn is_abandoned(&self) -> bool { 693 | Arc::strong_count(&self.buffer) < 2 694 | } 695 | 696 | /// Returns a read-only reference to the ring buffer. 697 | pub fn buffer(&self) -> &RingBuffer { 698 | &self.buffer 699 | } 700 | 701 | /// Get the head position for reading the next slot, if available. 702 | /// 703 | /// This is a strict subset of the functionality implemented in `read_chunk()`. 704 | /// For performance, this special case is immplemented separately. 705 | fn next_head(&self) -> Option { 706 | let head = self.cached_head.get(); 707 | 708 | // Check if the queue is *possibly* empty. 709 | if head == self.cached_tail.get() { 710 | // Refresh the tail ... 711 | let tail = self.buffer.tail.load(Ordering::Acquire); 712 | self.cached_tail.set(tail); 713 | 714 | // ... and check if it's *really* empty. 715 | if head == tail { 716 | return None; 717 | } 718 | } 719 | Some(head) 720 | } 721 | } 722 | 723 | /// Extension trait used to provide a [`copy_to_uninit()`](CopyToUninit::copy_to_uninit) 724 | /// method on built-in slices. 725 | /// 726 | /// This can be used to safely copy data to the slices returned from 727 | /// [`WriteChunkUninit::as_mut_slices()`]. 728 | /// 729 | /// To use this, the trait has to be brought into scope, e.g. with: 730 | /// 731 | /// ``` 732 | /// use rtrb::CopyToUninit; 733 | /// ``` 734 | pub trait CopyToUninit { 735 | /// Copies contents to a possibly uninitialized slice. 736 | fn copy_to_uninit<'a>(&self, dst: &'a mut [MaybeUninit]) -> &'a mut [T]; 737 | } 738 | 739 | impl CopyToUninit for [T] { 740 | /// Copies contents to a possibly uninitialized slice. 741 | /// 742 | /// # Panics 743 | /// 744 | /// This function will panic if the two slices have different lengths. 745 | fn copy_to_uninit<'a>(&self, dst: &'a mut [MaybeUninit]) -> &'a mut [T] { 746 | assert_eq!( 747 | self.len(), 748 | dst.len(), 749 | "source slice length does not match destination slice length" 750 | ); 751 | let dst_ptr = dst.as_mut_ptr().cast(); 752 | // SAFETY: The lengths have been checked to be equal and 753 | // the mutable reference makes sure that there is no overlap. 754 | unsafe { 755 | self.as_ptr().copy_to_nonoverlapping(dst_ptr, self.len()); 756 | core::slice::from_raw_parts_mut(dst_ptr, self.len()) 757 | } 758 | } 759 | } 760 | 761 | /// Error type for [`Consumer::pop()`]. 762 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 763 | pub enum PopError { 764 | /// The queue was empty. 765 | Empty, 766 | } 767 | 768 | #[cfg(feature = "std")] 769 | impl std::error::Error for PopError {} 770 | 771 | impl fmt::Display for PopError { 772 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 773 | match self { 774 | PopError::Empty => "empty ring buffer".fmt(f), 775 | } 776 | } 777 | } 778 | 779 | /// Error type for [`Consumer::peek()`]. 780 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 781 | pub enum PeekError { 782 | /// The queue was empty. 783 | Empty, 784 | } 785 | 786 | #[cfg(feature = "std")] 787 | impl std::error::Error for PeekError {} 788 | 789 | impl fmt::Display for PeekError { 790 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 791 | match self { 792 | PeekError::Empty => "empty ring buffer".fmt(f), 793 | } 794 | } 795 | } 796 | 797 | /// Error type for [`Producer::push()`]. 798 | #[derive(Copy, Clone, PartialEq, Eq)] 799 | pub enum PushError { 800 | /// The queue was full. 801 | Full(T), 802 | } 803 | 804 | #[cfg(feature = "std")] 805 | impl std::error::Error for PushError {} 806 | 807 | impl fmt::Debug for PushError { 808 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 809 | match self { 810 | PushError::Full(_) => f.pad("Full(_)"), 811 | } 812 | } 813 | } 814 | 815 | impl fmt::Display for PushError { 816 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 817 | match self { 818 | PushError::Full(_) => "full ring buffer".fmt(f), 819 | } 820 | } 821 | } 822 | -------------------------------------------------------------------------------- /tests/chunks.rs: -------------------------------------------------------------------------------- 1 | use rtrb::{chunks::ChunkError, RingBuffer}; 2 | 3 | #[test] 4 | fn iterators() { 5 | let (mut p, mut c) = RingBuffer::new(3); 6 | if let Ok(chunk) = p.write_chunk_uninit(3) { 7 | chunk.fill_from_iter([10, 11]); 8 | } else { 9 | unreachable!(); 10 | } 11 | assert_eq!(c.slots(), 2); 12 | if let Ok(chunk) = c.read_chunk(2) { 13 | let mut iter = chunk.into_iter(); 14 | assert_eq!(iter.len(), 2); 15 | assert_eq!(iter.next().unwrap(), 10); 16 | assert_eq!(iter.len(), 1); 17 | } 18 | assert_eq!(c.slots(), 1); 19 | if let Ok(chunk) = p.write_chunk_uninit(p.slots()) { 20 | chunk.fill_from_iter([20, 21]); 21 | } else { 22 | unreachable!(); 23 | } 24 | if let Ok(chunk) = c.read_chunk(c.slots()) { 25 | let mut iter = chunk.into_iter(); 26 | assert_eq!(iter.len(), 3); 27 | assert_eq!(iter.next().unwrap(), 11); 28 | assert_eq!(iter.len(), 2); 29 | assert_eq!(iter.next().unwrap(), 20); 30 | assert_eq!(iter.len(), 1); 31 | assert_eq!(iter.next().unwrap(), 21); 32 | assert_eq!(iter.len(), 0); 33 | assert!(iter.next().is_none()); 34 | for _ in 0..1000 { 35 | // FusedIterator continues to yield None: 36 | assert!(iter.next().is_none()); 37 | } 38 | } 39 | } 40 | 41 | #[test] 42 | fn zero_capacity() { 43 | let (mut p, mut c) = RingBuffer::::new(0); 44 | 45 | assert_eq!(p.write_chunk(1).unwrap_err(), ChunkError::TooFewSlots(0)); 46 | assert_eq!(c.read_chunk(1).unwrap_err(), ChunkError::TooFewSlots(0)); 47 | 48 | if let Ok(mut chunk) = p.write_chunk(0) { 49 | assert_eq!(chunk.len(), 0); 50 | assert!(chunk.is_empty()); 51 | let (first, second) = chunk.as_mut_slices(); 52 | assert!(first.is_empty()); 53 | assert!(second.is_empty()); 54 | chunk.commit_all(); 55 | } else { 56 | unreachable!(); 57 | } 58 | 59 | if let Ok(chunk) = c.read_chunk(0) { 60 | assert_eq!(chunk.len(), 0); 61 | assert!(chunk.is_empty()); 62 | let (first, second) = chunk.as_slices(); 63 | assert!(first.is_empty()); 64 | assert!(second.is_empty()); 65 | chunk.commit_all(); 66 | } else { 67 | unreachable!(); 68 | } 69 | } 70 | 71 | #[test] 72 | fn single_capacity() { 73 | let (mut p, mut c) = RingBuffer::::new(1); 74 | 75 | if let Ok(mut chunk) = p.write_chunk(1) { 76 | assert_eq!(chunk.len(), 1); 77 | assert!(!chunk.is_empty()); 78 | let (first, second) = chunk.as_mut_slices(); 79 | first[0] = 2; 80 | assert!(second.is_empty()); 81 | chunk.commit_all(); 82 | } else { 83 | unreachable!(); 84 | } 85 | 86 | if let Ok(mut chunk) = c.read_chunk(1) { 87 | assert_eq!(chunk.len(), 1); 88 | assert!(!chunk.is_empty()); 89 | { 90 | let (first, second) = chunk.as_mut_slices(); 91 | assert_eq!(first[0], 2); 92 | first[0] *= 2; 93 | assert!(second.is_empty()); 94 | } 95 | { 96 | let (first, second) = chunk.as_slices(); 97 | assert_eq!(first[0], 4); 98 | assert!(second.is_empty()); 99 | } 100 | chunk.commit_all(); 101 | } else { 102 | unreachable!(); 103 | } 104 | } 105 | 106 | #[test] 107 | fn drop_write_chunk() { 108 | // Static variable to count all drop() invocations 109 | static mut DROP_COUNT: i32 = 0; 110 | 111 | #[derive(Default)] 112 | struct Thing; 113 | 114 | impl Drop for Thing { 115 | fn drop(&mut self) { 116 | unsafe { 117 | DROP_COUNT += 1; 118 | } 119 | } 120 | } 121 | 122 | { 123 | let (mut p, mut c) = RingBuffer::new(3); 124 | 125 | if let Ok(mut chunk) = p.write_chunk(3) { 126 | let (first, _second) = chunk.as_mut_slices(); 127 | assert_eq!(unsafe { DROP_COUNT }, 0); 128 | first[0] = Thing; 129 | // Overwriting drops the original Default element: 130 | assert_eq!(unsafe { DROP_COUNT }, 1); 131 | chunk.commit(1); 132 | // After committing, 2 (unwritten) slots are dropped 133 | assert_eq!(unsafe { DROP_COUNT }, 3); 134 | } else { 135 | unreachable!(); 136 | } 137 | 138 | let chunk = c.read_chunk(1).unwrap(); 139 | // Drop count is unchanged: 140 | assert_eq!(unsafe { DROP_COUNT }, 3); 141 | chunk.commit_all(); 142 | // The stored element is never read, but it is dropped: 143 | assert_eq!(unsafe { DROP_COUNT }, 4); 144 | 145 | let chunk = p.write_chunk(3).unwrap(); 146 | // Drop count is unchanged: 147 | assert_eq!(unsafe { DROP_COUNT }, 4); 148 | drop(chunk); 149 | // All three slots of the chunk are dropped: 150 | assert_eq!(unsafe { DROP_COUNT }, 7); 151 | } 152 | // RingBuffer was already empty, nothing is dropped: 153 | assert_eq!(unsafe { DROP_COUNT }, 7); 154 | } 155 | 156 | #[test] 157 | fn trait_impls() { 158 | let (mut p, mut c) = RingBuffer::::new(0); 159 | 160 | if let Ok(chunk) = p.write_chunk(0) { 161 | assert!(format!("{:?}", chunk).starts_with("WriteChunk")); 162 | } else { 163 | unreachable!(); 164 | } 165 | if let Ok(chunk) = p.write_chunk_uninit(0) { 166 | assert!(format!("{:?}", chunk).starts_with("WriteChunkUninit")); 167 | } else { 168 | unreachable!(); 169 | } 170 | if let Ok(chunk) = c.read_chunk(0) { 171 | assert!(format!("{:?}", chunk).starts_with("ReadChunk")); 172 | let iter = chunk.into_iter(); 173 | assert!(format!("{:?}", iter).starts_with("ReadChunkIntoIter")); 174 | } else { 175 | unreachable!(); 176 | } 177 | let e = c.read_chunk(100).unwrap_err(); 178 | assert_eq!(format!("{:?}", e), "TooFewSlots(0)"); 179 | assert_eq!(e.to_string(), "only 0 slots available in ring buffer"); 180 | } 181 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | use rtrb::RingBuffer; 2 | 3 | #[test] 4 | fn capacity() { 5 | for i in 0..10 { 6 | let (p, c) = RingBuffer::::new(i); 7 | assert_eq!(p.buffer().capacity(), i); 8 | assert_eq!(c.buffer().capacity(), i); 9 | } 10 | } 11 | 12 | #[test] 13 | fn zero_capacity() { 14 | let (mut p, mut c) = RingBuffer::::new(0); 15 | 16 | assert_eq!(p.slots(), 0); 17 | assert_eq!(c.slots(), 0); 18 | 19 | assert!(p.is_full()); 20 | assert!(c.is_empty()); 21 | 22 | assert!(p.push(10).is_err()); 23 | assert!(c.pop().is_err()); 24 | } 25 | 26 | #[test] 27 | fn zero_sized_type() { 28 | struct ZeroSized; 29 | assert_eq!(std::mem::size_of::(), 0); 30 | 31 | let (mut p, mut c) = RingBuffer::new(1); 32 | assert_eq!(p.buffer().capacity(), 1); 33 | assert_eq!(p.slots(), 1); 34 | assert_eq!(c.slots(), 0); 35 | assert!(p.push(ZeroSized).is_ok()); 36 | assert_eq!(p.slots(), 0); 37 | assert_eq!(c.slots(), 1); 38 | assert!(p.push(ZeroSized).is_err()); 39 | assert!(c.peek().is_ok()); 40 | assert!(c.pop().is_ok()); 41 | assert_eq!(c.slots(), 0); 42 | assert!(c.peek().is_err()); 43 | } 44 | 45 | #[test] 46 | fn parallel() { 47 | const COUNT: usize = if cfg!(miri) { 1_000 } else { 100_000 }; 48 | let (mut p, mut c) = RingBuffer::new(3); 49 | let pop_thread = std::thread::spawn(move || { 50 | for i in 0..COUNT { 51 | loop { 52 | if let Ok(x) = c.pop() { 53 | assert_eq!(x, i); 54 | break; 55 | } 56 | } 57 | } 58 | assert!(c.pop().is_err()); 59 | }); 60 | let push_thread = std::thread::spawn(move || { 61 | for i in 0..COUNT { 62 | while p.push(i).is_err() {} 63 | } 64 | }); 65 | push_thread.join().unwrap(); 66 | pop_thread.join().unwrap(); 67 | } 68 | 69 | #[test] 70 | fn drops() { 71 | use rand::{thread_rng, Rng}; 72 | use std::sync::atomic::{AtomicUsize, Ordering}; 73 | 74 | const RUNS: usize = if cfg!(miri) { 10 } else { 100 }; 75 | 76 | static DROPS: AtomicUsize = AtomicUsize::new(0); 77 | 78 | #[derive(Debug, PartialEq)] 79 | struct DropCounter; 80 | 81 | impl Drop for DropCounter { 82 | fn drop(&mut self) { 83 | DROPS.fetch_add(1, Ordering::SeqCst); 84 | } 85 | } 86 | 87 | let mut rng = thread_rng(); 88 | 89 | for _ in 0..RUNS { 90 | let steps = rng.gen_range(0..if cfg!(miri) { 100 } else { 10_000 }); 91 | let additional = rng.gen_range(0..50); 92 | 93 | DROPS.store(0, Ordering::SeqCst); 94 | let (mut p, mut c) = RingBuffer::new(50); 95 | let pop_thread = std::thread::spawn(move || { 96 | for _ in 0..steps { 97 | while c.pop().is_err() {} 98 | } 99 | }); 100 | let push_thread = std::thread::spawn(move || { 101 | for _ in 0..steps { 102 | while p.push(DropCounter).is_err() { 103 | DROPS.fetch_sub(1, Ordering::SeqCst); 104 | } 105 | } 106 | p 107 | }); 108 | p = push_thread.join().unwrap(); 109 | pop_thread.join().unwrap(); 110 | 111 | for _ in 0..additional { 112 | p.push(DropCounter).unwrap(); 113 | } 114 | 115 | assert_eq!(DROPS.load(Ordering::SeqCst), steps); 116 | drop(p); 117 | assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); 118 | } 119 | } 120 | 121 | #[test] 122 | fn trait_impls() { 123 | let (mut p, mut c) = RingBuffer::::new(0); 124 | 125 | assert!(format!("{:?}", p.buffer()).starts_with("RingBuffer {")); 126 | assert!(format!("{:?}", p).starts_with("Producer {")); 127 | assert!(format!("{:?}", c).starts_with("Consumer {")); 128 | 129 | assert_eq!(format!("{:?}", p.push(42).unwrap_err()), "Full(_)"); 130 | assert_eq!(p.push(42).unwrap_err().to_string(), "full ring buffer"); 131 | assert_eq!(format!("{:?}", c.pop().unwrap_err()), "Empty"); 132 | assert_eq!(c.pop().unwrap_err().to_string(), "empty ring buffer"); 133 | assert_eq!(format!("{:?}", c.peek().unwrap_err()), "Empty"); 134 | assert_eq!(c.peek().unwrap_err().to_string(), "empty ring buffer"); 135 | 136 | let (another_p, another_c) = RingBuffer::::new(0); 137 | assert_ne!(p, another_p); 138 | assert_ne!(c, another_c); 139 | } 140 | 141 | #[test] 142 | fn no_race_with_is_abandoned() { 143 | static mut V: u32 = 0; 144 | // NB: We give Miri multiple chances to find probabilistic bugs, 145 | // see https://github.com/rust-lang/rust/issues/117485: 146 | for _ in 0..5 { 147 | let (p, c) = RingBuffer::::new(7); 148 | let t = std::thread::spawn(move || { 149 | unsafe { V = 10 }; 150 | drop(p); 151 | }); 152 | std::thread::yield_now(); 153 | if c.is_abandoned() { 154 | std::sync::atomic::fence(std::sync::atomic::Ordering::Acquire); 155 | unsafe { V = 20 }; 156 | } 157 | t.join().unwrap(); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /tests/write_and_read.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "std")] 2 | 3 | use std::io::{Read, Write}; 4 | 5 | use rtrb::RingBuffer; 6 | 7 | #[test] 8 | fn write_and_read() { 9 | let (mut p, mut c) = RingBuffer::new(2); 10 | assert_eq!(p.write(&[10, 11]).unwrap(), 2); 11 | // Does nothing: 12 | assert!(p.flush().is_ok()); 13 | 14 | { 15 | let mut buf = [0]; 16 | assert_eq!(c.read(&mut buf).unwrap(), 1); 17 | assert_eq!(buf, [10]); 18 | } 19 | 20 | assert_eq!(p.write(&[12, 99]).unwrap(), 1); 21 | 22 | { 23 | let mut buf = [0, 0]; 24 | assert_eq!(c.read(&mut buf).unwrap(), 2); 25 | assert_eq!(buf, [11, 12]); 26 | } 27 | 28 | assert_eq!(p.write(&[13, 14]).unwrap(), 2); 29 | 30 | { 31 | let mut buf = [0]; 32 | assert_eq!(c.read(&mut buf).unwrap(), 1); 33 | assert_eq!(buf, [13]); 34 | } 35 | 36 | { 37 | let mut buf = [20, 21]; 38 | assert_eq!(c.read(&mut buf).unwrap(), 1); 39 | assert_eq!(buf, [14, 21]); 40 | } 41 | } 42 | 43 | #[test] 44 | fn write_empty_buf() { 45 | let (mut p, _c) = RingBuffer::new(2); 46 | assert_eq!(p.write(&[]).unwrap(), 0); 47 | } 48 | 49 | #[test] 50 | fn read_empty_buf() { 51 | let (mut p, mut c) = RingBuffer::new(2); 52 | assert_eq!(p.push(99), Ok(())); 53 | assert_eq!(c.read(&mut []).unwrap(), 0); 54 | } 55 | 56 | #[test] 57 | fn write_error() { 58 | let (mut p, _c) = RingBuffer::new(1); 59 | assert_eq!(p.push(10), Ok(())); 60 | assert_eq!( 61 | p.write(&[99]).unwrap_err().kind(), 62 | std::io::ErrorKind::WouldBlock 63 | ); 64 | } 65 | 66 | #[test] 67 | fn read_error() { 68 | let (_p, mut c) = RingBuffer::new(1); 69 | let mut buf = [0]; 70 | assert_eq!( 71 | c.read(&mut buf).unwrap_err().kind(), 72 | std::io::ErrorKind::WouldBlock 73 | ); 74 | } 75 | --------------------------------------------------------------------------------