├── .gitignore ├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── Cargo.toml ├── benches └── benchmarks.rs ├── CHANGELOG.md ├── README.md ├── Cargo.lock ├── LICENSE └── src └── lib.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 10 8 | - package-ecosystem: github-actions 9 | directory: / 10 | schedule: 11 | interval: weekly 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "triple_buffer" 3 | # 4 | # Release checklist: 5 | # 6 | # - Cross-check all public API documentation, update as needed 7 | # - Update changelog 8 | # - Update version number in Cargo.toml, run some command to update lockfile 9 | # - Push branch, auto-merge when it passes CI 10 | # - Cargo publish 11 | # - Roll an annotated git tag 12 | # - Add a github release 13 | # 14 | version = "9.0.0" 15 | authors = ["Hadrien G. "] 16 | description = "An implementation of triple buffering, useful for sharing frequently updated data between threads" 17 | documentation = "https://docs.rs/triple_buffer/" 18 | repository = "https://github.com/HadrienG2/triple-buffer" 19 | readme = "README.md" 20 | keywords = [ "synchronization", "spsc", "multithreading", "non-blocking", "wait-free" ] 21 | categories = [ "algorithms", "asynchronous", "concurrency", "data-structures" ] 22 | license = "MPL-2.0" 23 | edition = "2021" 24 | rust-version = "1.74" 25 | 26 | [badges] 27 | maintenance = { status = "passively-maintained" } 28 | 29 | [features] 30 | # Adjust the test configuration (e.g. reduce problem sizes) so the tests can run 31 | # under Miri within a reasonable time budget. 32 | miri = [] 33 | 34 | [dependencies] 35 | crossbeam-utils = { version = "0.8.11", default-features = false } 36 | 37 | [dev-dependencies] 38 | criterion = { version = "0.5", default-features = false } 39 | testbench = "1.0" 40 | 41 | [lib] 42 | bench = false 43 | 44 | [[bench]] 45 | name = "benchmarks" 46 | harness = false 47 | -------------------------------------------------------------------------------- /benches/benchmarks.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use triple_buffer::TripleBuffer; 3 | 4 | pub fn benchmark(c: &mut Criterion) { 5 | let (mut input, mut output) = TripleBuffer::::default().split(); 6 | 7 | { 8 | let mut uncontended = c.benchmark_group("uncontended"); 9 | uncontended.bench_function("read output", |b| b.iter(|| *output.output_buffer())); 10 | uncontended.bench_function("clean update", |b| { 11 | b.iter(|| { 12 | output.update(); 13 | }) 14 | }); 15 | uncontended.bench_function("clean receive", |b| b.iter(|| *output.read())); 16 | uncontended.bench_function("write input", |b| { 17 | b.iter(|| { 18 | *input.input_buffer_mut() = black_box(0); 19 | }) 20 | }); 21 | uncontended.bench_function("publish", |b| { 22 | b.iter(|| { 23 | input.publish(); 24 | }) 25 | }); 26 | uncontended.bench_function("guarded write + send", |b| { 27 | b.iter(|| { 28 | *input.input_buffer_publisher() = black_box(0); 29 | }) 30 | }); 31 | uncontended.bench_function("send", |b| b.iter(|| input.write(black_box(0)))); 32 | uncontended.bench_function("publish + dirty update", |b| { 33 | b.iter(|| { 34 | input.publish(); 35 | output.update(); 36 | }) 37 | }); 38 | uncontended.bench_function("transmit", |b| { 39 | b.iter(|| { 40 | input.write(black_box(0)); 41 | *output.read() 42 | }) 43 | }); 44 | uncontended.bench_function("guarded transmit", |b| { 45 | b.iter(|| { 46 | *input.input_buffer_publisher() = black_box(0); 47 | *output.read() 48 | }) 49 | }); 50 | } 51 | 52 | { 53 | let mut read_contended = c.benchmark_group("read contention"); 54 | testbench::run_under_contention( 55 | || black_box(*output.read()), 56 | || { 57 | read_contended.bench_function("write input", |b| { 58 | b.iter(|| { 59 | *input.input_buffer_mut() = black_box(0); 60 | }) 61 | }); 62 | read_contended.bench_function("publish", |b| { 63 | b.iter(|| { 64 | input.publish(); 65 | }) 66 | }); 67 | read_contended.bench_function("guarded write+send", |b| { 68 | b.iter(|| *input.input_buffer_publisher() = black_box(0)) 69 | }); 70 | 71 | read_contended.bench_function("send", |b| b.iter(|| input.write(black_box(0)))); 72 | }, 73 | ); 74 | } 75 | 76 | { 77 | let mut write_contended = c.benchmark_group("write contention"); 78 | testbench::run_under_contention( 79 | || input.write(black_box(0)), 80 | || { 81 | write_contended 82 | .bench_function("read output", |b| b.iter(|| *output.output_buffer())); 83 | write_contended.bench_function("update", |b| { 84 | b.iter(|| { 85 | output.update(); 86 | }) 87 | }); 88 | write_contended.bench_function("receive", |b| b.iter(|| *output.read())); 89 | }, 90 | ); 91 | } 92 | } 93 | 94 | criterion_group!(benches, benchmark); 95 | criterion_main!(benches); 96 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # There are two kinds of continuous integration jobs in this project: 2 | # 3 | # - Every code submission or master push passes continuous integration on the 4 | # minimal supported Rust version and the current stable Rust version. 5 | # - Two times a month, a scheduled job makes sure that the code remains 6 | # compatible and lint-free on upcoming Rust toolchains (beta and nightly). 7 | 8 | name: Continuous Integration 9 | 10 | on: 11 | push: 12 | pull_request: 13 | schedule: 14 | - cron: '0 0 11/15 * *' 15 | 16 | # Cancel existing jobs on new pushes to the same branch 17 | concurrency: 18 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 19 | cancel-in-progress: true 20 | 21 | env: 22 | CARGO_INCREMENTAL: 0 23 | RUSTFLAGS: -D warnings 24 | RUSTDOCFLAGS: -D warnings 25 | MINIMAL_RUST: 1.74.0 # Minimal Supported Rust Version 26 | 27 | # Workarounds for windows "spurious network error". 28 | # Try to remove them on the next MSRV bump. 29 | CARGO_NET_GIT_FETCH_WITH_CLI: true 30 | CARGO_HTTP_MULTIPLEXING: false 31 | 32 | jobs: 33 | # Workaround for github CI dropping env var expansion in matrix strategy 34 | matrix_vars: 35 | runs-on: ubuntu-latest 36 | outputs: 37 | MINIMAL_RUST: ${{ env.MINIMAL_RUST }} 38 | steps: 39 | - name: Forward env var to output 40 | run: echo "MINIMAL_RUST=${{ env.MINIMAL_RUST }}" >> $GITHUB_OUTPUT 41 | 42 | 43 | # Format doesn't depend on configuration, and lints don't depend on the 44 | # operating system since there's no OS-specific code path in this crate. 45 | # 46 | # We don't care about warnings on the minimum supported Rust version, only 47 | # about building and running correctly. 48 | format-lints: 49 | # Don't run CI twice when a PR is created from a branch internal to the repo 50 | if: github.event_name == 'push' || github.event_name == 'schedule' || github.event.pull_request.head.repo.full_name != github.repository 51 | 52 | runs-on: ubuntu-latest 53 | 54 | steps: 55 | - name: Checkout sources 56 | uses: actions/checkout@v6 57 | 58 | - name: Set up stable toolchain 59 | if: github.event_name != 'schedule' 60 | uses: actions-rust-lang/setup-rust-toolchain@v1 61 | with: 62 | components: rustfmt,clippy 63 | 64 | - name: Set up nightly toolchain 65 | if: github.event_name == 'schedule' 66 | uses: actions-rust-lang/setup-rust-toolchain@v1 67 | with: 68 | toolchain: nightly 69 | components: rustfmt,clippy 70 | 71 | - name: Check format 72 | run: cargo fmt --all --check 73 | 74 | - name: Check clippy lints 75 | run: cargo clippy --workspace --all-targets -- -D warnings 76 | 77 | - name: Install cargo-machete 78 | uses: taiki-e/install-action@cargo-machete 79 | 80 | - name: Look for unused dependencies with cargo-machete 81 | run: cargo machete 82 | 83 | - name: Check semver 84 | # Not guaranteed to run on nightly, so we use the separate job below 85 | if: github.event_name != 'schedule' 86 | uses: obi1kenobi/cargo-semver-checks-action@v2 87 | with: 88 | rust-toolchain: manual 89 | 90 | # Workaround for cargo-semver-checks not guaranteeing nightly support 91 | scheduled-semver-checks: 92 | if: github.event_name == 'schedule' 93 | 94 | runs-on: ubuntu-latest 95 | 96 | steps: 97 | - name: Checkout sources 98 | uses: actions/checkout@v6 99 | 100 | - name: Set up stable toolchain 101 | uses: actions-rust-lang/setup-rust-toolchain@v1 102 | with: 103 | components: rustfmt,clippy 104 | 105 | - name: Check semver (scheduled version) 106 | uses: obi1kenobi/cargo-semver-checks-action@v2 107 | with: 108 | rust-toolchain: manual 109 | 110 | 111 | # Run the tests on all supported OSes and Rust versions (main CI) 112 | test-contrib: 113 | # Don't run CI twice when a PR is created from a branch internal to the repo 114 | # Don't run in scheduled jobs, that's what test-scheduled is for 115 | if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository 116 | 117 | runs-on: ${{ matrix.os }} 118 | 119 | needs: matrix_vars 120 | 121 | strategy: 122 | matrix: 123 | os: 124 | - ubuntu-latest 125 | - windows-latest 126 | - macos-latest 127 | rust: 128 | - stable 129 | - ${{ needs.matrix_vars.outputs.MINIMAL_RUST }} 130 | 131 | steps: 132 | - name: Checkout sources 133 | uses: actions/checkout@v6 134 | 135 | - name: Set up toolchain 136 | uses: actions-rust-lang/setup-rust-toolchain@v1 137 | with: 138 | toolchain: ${{ matrix.rust }} 139 | 140 | - name: Run basic tests 141 | run: cargo test 142 | 143 | - name: Run concurrent tests 144 | # FIXME: macOS runners are too overloaded for concurrent testing 145 | if: runner.os != 'macOS' 146 | run: cargo test --release -- --ignored --test-threads=1 147 | 148 | - name: Check that benchmarks build 149 | run: cargo build --benches 150 | 151 | 152 | # Run the tests in Miri for more thorough runtime checks 153 | # 154 | # Since Miri is an abstract machine and this crate contains no OS-specific 155 | # code, testing on one single OS is sufficient. Per Miri documentation, Linux 156 | # gets the highest-quality support, so that's our obvious pick. 157 | miri: 158 | # Only run on "pull_request" event for external PRs. This is to avoid 159 | # duplicate builds for PRs created from internal branches. 160 | if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository || github.event_name == 'schedule' 161 | 162 | runs-on: ubuntu-latest 163 | 164 | steps: 165 | - name: Checkout sources 166 | uses: actions/checkout@v6 167 | 168 | - name: Set up toolchain 169 | uses: actions-rust-lang/setup-rust-toolchain@v1 170 | with: 171 | components: miri,rust-src 172 | toolchain: nightly 173 | 174 | - name: Run tests 175 | run: cargo +nightly miri test --features=miri -- --include-ignored 176 | 177 | # Check compatibility with newer Rust/deps versions (scheduled CI) 178 | # 179 | # FIXME: There should be a way to use conditional build matrices without 180 | # duplicating the whole job recipe... 181 | # 182 | test-scheduled: 183 | if: github.event_name == 'schedule' 184 | 185 | runs-on: ${{ matrix.os }} 186 | 187 | needs: matrix_vars 188 | 189 | strategy: 190 | matrix: 191 | os: 192 | - ubuntu-latest 193 | - windows-latest 194 | - macos-latest 195 | rust: 196 | - beta 197 | - nightly 198 | - ${{ needs.matrix_vars.outputs.MINIMAL_RUST }} 199 | 200 | steps: 201 | - name: Checkout sources 202 | uses: actions/checkout@v6 203 | 204 | - name: Set up toolchain 205 | uses: actions-rust-lang/setup-rust-toolchain@v1 206 | with: 207 | toolchain: ${{ matrix.rust }} 208 | 209 | - name: Run basic tests 210 | run: cargo test 211 | 212 | - name: Run concurrent tests 213 | # FIXME: macOS runners are too overloaded for concurrent testing 214 | if: runner.os != 'macOS' 215 | run: cargo test --release -- --ignored --test-threads=1 216 | 217 | - name: Check that benchmarks build 218 | run: cargo build --benches 219 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | 9 | ## [Unreleased] 10 | 11 | ### Changed 12 | 13 | - Turn `Input::input_buffer()` and `Output::output_buffer()` into read-only 14 | accessors and deprecate `Output::peek_output_buffer()`, moving forward with 15 | the plan set in issue #30 to eventually migrate towards an API naming 16 | convention that matches `std` and other Rust libraries. 17 | 18 | 19 | 20 | ## [8.1.1] - 2025-05-04 21 | 22 | ### Changed 23 | 24 | - Switched to edition 2021 since the current MSRV allows for it 25 | 26 | ### Fixed 27 | 28 | - Updated README to reflect current API 29 | - Commit lockfile to avoid surprise semver/MSRV breakage from deps 30 | 31 | 32 | ## [8.1.0] - 2025-02-02 33 | 34 | ### Added 35 | 36 | - Add `Input::input_buffer_publisher()` method to provide an RAII-based 37 | alternative to the low-level `input_buffer()`/`publish()` interface. Thanks 38 | @crop2000 ! 39 | 40 | ### Changed 41 | 42 | - Rename `Input::input_buffer()` to `Input::input_buffer_mut()`, keeping a 43 | deprecated alias for now, and do the same for `Output::output_buffer()`. This 44 | is the start of a gradual deprecation process whose end goal is to eventually 45 | follow the standard Rust accessor naming convention (`input_buffer(&self) -> 46 | &T`, `input_buffer_mut(&mut self) -> &mut T`, same thing on the output side). 47 | 48 | 49 | ## [8.0.0] - 2024-06-21 50 | 51 | ### Added 52 | 53 | - Add `Output::peek_output_buffer()` method to get read-only access to the 54 | output buffer from a shared reference to self. Thanks @tk70 ! 55 | 56 | ### Changed 57 | 58 | - Bumped MSRV to 1.74 owing to new dependency requirements. 59 | - Refactor CI workflow file to account for the latest GitHub CI oddities. 60 | 61 | 62 | ## [7.0.0] - 2023-10-22 63 | 64 | ### Changed 65 | 66 | - Bumped MSRV to 1.70 owing to new dependency requirements. 67 | 68 | 69 | ## [6.2.0] - 2022-06-27 70 | 71 | ### Added 72 | 73 | - A `triple_buffer()` shorthand is now available for the common 74 | `TripleBuffer::new().split()` pattern. 75 | 76 | ### Changed 77 | 78 | - The documentation example now features multi-threading to clarify ownership. 79 | 80 | 81 | ## [6.1.0] - 2022-10-05 82 | 83 | ### Added 84 | 85 | - `triple-buffer` is now usable in `no_std` contexts where an implementation of 86 | the `alloc` crate is available. 87 | 88 | 89 | ## [6.0.0] - 2021-12-18 90 | 91 | ### Changed 92 | 93 | - Latest dependency versions require Rust 1.46, we bump MSRV accordingly. 94 | - ...and since that's a breaking change, I'm also flushing the breaking change 95 | pipeline along the way: 96 | * TripleBuffer::new now takes a reference to its input. 97 | * The deprecated `raw` feature is now removed. 98 | 99 | 100 | ## [5.0.6] - 2021-01-16 101 | 102 | ### Added 103 | 104 | - As a result of the bugfix mentioned below, there is no performance motivation 105 | to gate `raw` features behind a feature flag, so those features are now 106 | available by default without a `raw_` prefix. Usage of the `raw_` prefix and 107 | the `raw` feature flag is deprecated and these may be removed in a future 108 | major release, but it doesn't harm to keep them indefinitely for now. 109 | 110 | ### Changed 111 | 112 | - Benchmarks now use `criterion`, and have been significantly cleaned up along 113 | the way. They are now more extensive and more reliable. 114 | - Moved MSRV to Rust 1.36 because we now use crossbeam for testing, which 115 | requires that much. The crate itself should still support Rust 1.34 for now, 116 | but we cannot test that it continues doing so... 117 | 118 | ### Fixed 119 | 120 | - Removed a possibility of data race that was not observed on current hardware, 121 | but could be triggered by future hardware or compiler evolutions. See 122 | https://github.com/HadrienG2/triple-buffer/issues/14 . 123 | 124 | 125 | ## [5.0.5] - 2020-07-05 126 | 127 | ### Changed 128 | 129 | - Use only cache-padded instead of the full crossbeam-utils crate 130 | - Clean up CI config and cache Rust toolchain there 131 | 132 | 133 | ## [5.0.4] - 2020-02-10 134 | 135 | ### Added 136 | 137 | - Add a changelog to the repository. 138 | 139 | ### Changed 140 | 141 | - Deduplicate CI configuration some more. 142 | 143 | ### Fixed 144 | 145 | - Drop now-unnecessary manual `rustfmt` configuration. 146 | - Avoid false sharing of back-buffer information. 147 | 148 | 149 | ## [5.0.3] - 2020-02-07 150 | 151 | ### Changed 152 | 153 | - Clean up and deduplicate GitHub Actions configuration. 154 | - Tune down concurrent test speed to reduce CI false positives. 155 | 156 | 157 | ## [5.0.2] - 2020-01-29 158 | 159 | ### Changed 160 | 161 | - Move continuous integration to GitHub Actions. 162 | 163 | 164 | ## [5.0.1] - 2019-11-07 165 | 166 | ### Fixed 167 | 168 | - Update to current version of dependencies. 169 | 170 | 171 | ## [5.0.0] - 2019-04-12 172 | 173 | ### Changed 174 | 175 | - Bump travis CI configuration to Ubuntu Xenial. 176 | - Bump minimal supported Rust version to 1.34.0. 177 | 178 | ### Fixed 179 | 180 | - Don't use an `usize` for buffer indices where an `u8` will suffice. 181 | - Improve Rust API guidelines compliance. 182 | 183 | 184 | ## [4.0.1] - 2018-12-31 185 | 186 | ### Fixed 187 | 188 | - Display `raw` feature documentation on docs.rs. 189 | 190 | 191 | ## [4.0.0] - 2018-12-18 192 | 193 | ### Changed 194 | 195 | - Migrate to Rust 2018. 196 | - Bump minimal supported Rust version to 1.31.0. 197 | 198 | ### Fixed 199 | 200 | - Update to current version of dependencies. 201 | - Start using Clippy and integrate it into continuous integration. 202 | - Re-apply `rustfmt` coding style (was not in CI at the time...). 203 | 204 | 205 | ## [3.0.1] - 2018-08-27 206 | 207 | ### Fixed 208 | 209 | - Make `testbench` a dev-dependency, as it's only used for tests and benchmarks. 210 | 211 | 212 | ## [3.0.0] - 2018-08-27 213 | 214 | ### Changed 215 | 216 | - Buffers are now padded to the size of a cache line to reduce false sharing. 217 | - Bump minimal supported Rust version to 1.26.0. 218 | 219 | ### Fixed 220 | 221 | - Make `testbench` version requirement more explicit. 222 | 223 | 224 | ## [2.0.0] - 2018-02-11 225 | 226 | ### Changed 227 | 228 | - Switch license to MPLv2, which is a better match to Rust's static linking 229 | philosophy than LGPL. 230 | 231 | 232 | ## [1.1.1] - 2017-11-19 233 | 234 | ### Fixed 235 | 236 | - Fix my understanding of Cargo features & make the `raw` feature actually work. 237 | 238 | 239 | ## [1.1.0] - 2017-11-18 240 | 241 | ### Added 242 | 243 | - Allow in-place writes on the input and output side, at the cost of stronger 244 | synchronization barriers, through use of the `raw` Cargo feature. 245 | 246 | ### Fixed 247 | 248 | - Do not require a `Clone` bound on the inner data. 249 | 250 | 251 | ## [1.0.0] - 2017-11-10 252 | 253 | ### Changed 254 | 255 | - Simplify component naming convention, e.g. `TripleBufferInput` -> `Input`. 256 | 257 | 258 | ## [0.3.4] - 2017-06-25 259 | 260 | ### Changed 261 | 262 | - Use `testbench::RaceCell` as an improved form of data race detection in tests. 263 | 264 | ### Fixed 265 | 266 | - Do not require a `PartialEq` bound on the inner data. 267 | 268 | 269 | ## [0.3.3] - 2017-06-15 270 | 271 | ### Changed 272 | 273 | - Tune down concurrent test speed to reduce CI false positives. 274 | 275 | 276 | ## [0.3.2] - 2017-06-15 277 | 278 | ### Changed 279 | 280 | - Tune down concurrent test speed to reduce CI false positives. 281 | 282 | 283 | ## [0.3.1] - 2017-06-15 284 | 285 | ### Changed 286 | 287 | - Tune down concurrent test speed to reduce CI false positives. 288 | 289 | 290 | ## [0.3.0] - 2017-06-14 291 | 292 | ### Added 293 | 294 | - Introduce Travis CI continuous integration. 295 | 296 | ### Fixed 297 | 298 | - Use CI to clarify minimal supported Rust version (currently 1.12.0). 299 | 300 | 301 | ## [0.2.4] - 2017-04-04 302 | 303 | ### Changed 304 | 305 | - Use `testbench` crate for concurrent testing and benchmarking. 306 | 307 | 308 | ## [0.2.3] - 2017-03-24 309 | 310 | ### Changed 311 | 312 | - More detailed comparison with other synchronization primitives in README. 313 | 314 | ### Fixed 315 | 316 | - Adopt `rustfmt` coding style. 317 | 318 | 319 | ## [0.2.2] - 2017-03-20 320 | 321 | ### Changed 322 | 323 | - Reduce reliance on Acquire-Release synchronization. 324 | 325 | 326 | ## [0.2.1] - 2017-03-11 327 | 328 | ### Changed 329 | 330 | - Make README a bit more spambot-proof. 331 | 332 | 333 | ## [0.2.0] - 2017-03-11 334 | 335 | ### Added 336 | 337 | - First tagged release of triple-buffer. 338 | 339 | 340 | 341 | [Unreleased]: https://github.com/HadrienG2/triple-buffer/compare/v8.1.1...HEAD 342 | [8.1.1]: https://github.com/HadrienG2/triple-buffer/compare/v8.1.0...v8.1.1 343 | [8.1.0]: https://github.com/HadrienG2/triple-buffer/compare/v8.0.0...v8.1.0 344 | [8.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v7.0.0...v8.0.0 345 | [7.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v6.2.0...v7.0.0 346 | [6.2.0]: https://github.com/HadrienG2/triple-buffer/compare/v6.1.0...v6.2.0 347 | [6.1.0]: https://github.com/HadrienG2/triple-buffer/compare/v6.0.0...v6.1.0 348 | [6.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.6...v6.0.0 349 | [5.0.6]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.5...v5.0.6 350 | [5.0.5]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.4...v5.0.5 351 | [5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.3...v5.0.4 352 | [5.0.3]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.2...v5.0.3 353 | [5.0.2]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.1...v5.0.2 354 | [5.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.0...v5.0.1 355 | [5.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.1...v5.0.0 356 | [4.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.0...v4.0.1 357 | [4.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.1...v4.0.0 358 | [3.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.0...v3.0.1 359 | [3.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v2.0.0...v3.0.0 360 | [2.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.1...v2.0.0 361 | [1.1.1]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.0...v1.1.1 362 | [1.1.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.0.0...v1.1.0 363 | [1.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.4...v1.0.0 364 | [0.3.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.3...v0.3.4 365 | [0.3.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.2...v0.3.3 366 | [0.3.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.1...v0.3.2 367 | [0.3.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.0...v0.3.1 368 | [0.3.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.4...v0.3.0 369 | [0.2.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.3...v0.2.4 370 | [0.2.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.2...v0.2.3 371 | [0.2.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.1...v0.2.2 372 | [0.2.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.0...v0.2.1 373 | [0.2.0]: https://github.com/HadrienG2/triple-buffer/releases/tag/v0.2.0 374 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Triple buffering in Rust 2 | 3 | [![MPLv2 licensed](https://img.shields.io/badge/license-MPLv2-blue.svg)](./LICENSE) 4 | [![On crates.io](https://img.shields.io/crates/v/triple_buffer.svg)](https://crates.io/crates/triple_buffer) 5 | [![On docs.rs](https://docs.rs/triple_buffer/badge.svg)](https://docs.rs/triple_buffer/) 6 | [![Continuous Integration](https://img.shields.io/github/actions/workflow/status/HadrienG2/triple-buffer/ci.yml?branch=master)](https://github.com/HadrienG2/triple-buffer/actions?query=workflow%3A%22Continuous+Integration%22) 7 | ![Requires rustc 8 | 1.74.0+](https://img.shields.io/badge/rustc-1.74.0+-lightgray.svg) 9 | 10 | 11 | ## What is this? 12 | 13 | This is an implementation of triple buffering written in Rust. You may find it 14 | useful for the following class of thread synchronization problems: 15 | 16 | - There is one producer thread and one consumer thread 17 | - The producer wants to update a shared memory value periodically 18 | - The consumer wants to access the latest update from the producer at any time 19 | 20 | For many use cases, you can use the ergonomic write/read interface, where 21 | the producer moves values into the buffer and the consumer accesses the 22 | latest buffer by shared reference: 23 | 24 | ```rust 25 | // Create a triple buffer 26 | use triple_buffer::triple_buffer; 27 | let (mut buf_input, mut buf_output) = triple_buffer(&0); 28 | 29 | // The producer thread can move a value into the buffer at any time 30 | let producer = std::thread::spawn(move || buf_input.write(42)); 31 | 32 | // The consumer thread can read the latest value at any time 33 | let consumer = std::thread::spawn(move || { 34 | let latest = buf_output.read(); 35 | assert!(*latest == 42 || *latest == 0); 36 | }); 37 | 38 | // Wait for both threads to be done 39 | producer.join().unwrap(); 40 | consumer.join().unwrap(); 41 | ``` 42 | 43 | In situations where moving the original value away and being unable to 44 | modify it on the consumer's side is too costly, such as if creating a new 45 | value involves dynamic memory allocation, you can use a lower-level API 46 | which allows you to access the producer and consumer's buffers in place 47 | and to precisely control when updates are propagated: 48 | 49 | ```rust 50 | // Create and split a triple buffer 51 | use triple_buffer::triple_buffer; 52 | let (mut buf_input, mut buf_output) = triple_buffer(&String::with_capacity(42)); 53 | 54 | // --- PRODUCER SIDE --- 55 | 56 | // Mutate the input buffer in place 57 | { 58 | // Acquire a reference to the input buffer 59 | let input = buf_input.input_buffer_mut(); 60 | 61 | // In general, you don't know what's inside of the buffer, so you should 62 | // always reset the value before use (this is a type-specific process). 63 | input.clear(); 64 | 65 | // Perform an in-place update 66 | input.push_str("Hello, "); 67 | } 68 | 69 | // Publish the above input buffer update 70 | buf_input.publish(); 71 | 72 | // --- CONSUMER SIDE --- 73 | 74 | // Manually fetch the buffer update from the consumer interface 75 | buf_output.update(); 76 | 77 | // Acquire a read-only reference to the output buffer 78 | let output = buf_output.output_buffer(); 79 | assert_eq!(*output, "Hello, "); 80 | 81 | // Or acquire a mutable reference if necessary 82 | let output_mut = buf_output.output_buffer_mut(); 83 | 84 | // Post-process the output value before use 85 | output_mut.push_str("world!"); 86 | ``` 87 | 88 | Finally, as a middle ground before the maximal ergonomics of the 89 | [`write()`](Input::write) API and the maximal control of the 90 | [`input_buffer_mut()`](Input::input_buffer_mut)/[`publish()`](Input::publish) 91 | API, you can also use the 92 | [`input_buffer_publisher()`](Input::input_buffer_publisher) RAII API on the 93 | producer side, which ensures that `publish()` is automatically called when 94 | the resulting input buffer handle goes out of scope: 95 | 96 | ```rust 97 | // Create and split a triple buffer 98 | use triple_buffer::triple_buffer; 99 | let (mut buf_input, _) = triple_buffer(&String::with_capacity(42)); 100 | 101 | // Mutate the input buffer in place and publish it 102 | { 103 | // Acquire a reference to the input buffer 104 | let mut input = buf_input.input_buffer_publisher(); 105 | 106 | // In general, you don't know what's inside of the buffer, so you should 107 | // always reset the value before use (this is a type-specific process). 108 | input.clear(); 109 | 110 | // Perform an in-place update 111 | input.push_str("Hello world!"); 112 | 113 | // Input buffer is automatically published at the end of the scope of 114 | // the "input" RAII guard 115 | } 116 | 117 | // From this point on, the consumer can see the updated version 118 | ``` 119 | 120 | 121 | ## Give me details! How does it compare to alternatives? 122 | 123 | Compared to a mutex: 124 | 125 | - Only works in single-producer, single-consumer scenarios 126 | - Is nonblocking, and more precisely bounded wait-free. Concurrent accesses will 127 | be slowed down by cache contention, but no deadlock, livelock, or thread 128 | scheduling induced slowdown is possible. 129 | - Allows the producer and consumer to work simultaneously 130 | - Uses a lot more memory (3x payload + 3x bytes vs 1x payload + 1 bool) 131 | - Does not allow in-place updates, as the producer and consumer do not access 132 | the same memory location 133 | - Should have faster reads and slower updates, especially if in-place updates 134 | are more efficient than writing a fresh copy of the data. 135 | * When the data hasn't been updated, the readout transaction of triple 136 | buffering only requires a memory read, no atomic operation, and it can be 137 | performed in parallel with any ongoing update. 138 | * When the data has been updated, the readout transaction requires an 139 | infaillible atomic operation, which may or may not be faster than the 140 | faillible atomic operations used by most mutex implementations. 141 | * Unless your data cannot be updated in place and must always be fully 142 | rewritten, the ability provided by mutexes to update data in place should 143 | make updates a lot more efficient, dwarfing any performance difference 144 | originating from the synchronization protocol. 145 | 146 | Compared to the read-copy-update (RCU) primitive from the Linux kernel: 147 | 148 | - Only works in single-producer, single-consumer scenarios 149 | - Has higher dirty read overhead on relaxed-memory architectures (ARM, POWER...) 150 | - Does not require accounting for reader "grace periods": once the reader has 151 | gotten access to the latest value, the synchronization transaction is over 152 | - Does not use the compare-and-swap hardware primitive on update, which is 153 | inefficient by design as it forces its users to retry transactions in a loop. 154 | - Does not suffer from the ABA problem, allowing much simpler code 155 | - Allocates memory on initialization only, rather than on every update 156 | - May use more memory (3x payload + 3x bytes vs 1x pointer + amount of 157 | payloads and refcounts that depends on the readout and update pattern) 158 | - Should be slower if updates are rare, faster if updates are frequent 159 | * The RCU's happy reader path is slightly faster (no flag to check), but its 160 | update procedure is a lot more involved and costly. 161 | 162 | Compared to sending the updates on a message queue: 163 | 164 | - Only works in single-producer, single-consumer scenarios (queues can work in 165 | other scenarios, although the implementations are much less efficient) 166 | - Consumer only has access to the latest state, not the previous ones 167 | - Consumer does not *need* to get through every previous state 168 | - Is nonblocking AND uses bounded amounts of memory (with queues, it's a choice, 169 | unless you use one of those evil queues that silently drop data when full) 170 | - Can transmit information in a single move, rather than two 171 | - Should be faster for any compatible use case. 172 | * Queues force you to move data twice, once in, once out, which will incur a 173 | significant cost for any nontrivial data. If the inner data requires 174 | allocation, they force you to allocate for every transaction. By design, 175 | they force you to store and go through every update, which is not useful 176 | when you're only interested in the latest version of the data. 177 | 178 | In short, triple buffering is what you're after in scenarios where a shared 179 | memory location is updated frequently by a single writer, read by a single 180 | reader who only wants the latest version, and you can spare some RAM. 181 | 182 | - If you need multiple producers, look somewhere else 183 | - If you need multiple consumers, you may be interested in my related "SPMC 184 | buffer" work, which basically extends triple buffering to multiple consumers 185 | - If you can't tolerate the RAM overhead or want to update the data in place, 186 | try a Mutex instead (or possibly an RWLock) 187 | - If the shared value is updated very rarely (e.g. every second), try an RCU 188 | - If the consumer must get every update, try a message queue 189 | 190 | 191 | ## How do I know your unsafe lock-free code is working? 192 | 193 | By running the tests, of course! Which is unfortunately currently harder than 194 | I'd like it to be. 195 | 196 | First of all, we have sequential tests, which are very thorough but obviously 197 | do not check the lock-free/synchronization part. You run them as follows: 198 | 199 | $ cargo test 200 | 201 | Then we have concurrent tests where, for example, a reader thread continuously 202 | observes the values from a rate-limited writer thread, and makes sure that he 203 | can see every single update without any incorrect value slipping in the middle. 204 | 205 | These tests are more important, but also harder to run because one must first 206 | check some assumptions: 207 | 208 | - The testing host must have at least 2 physical CPU cores to test all possible 209 | race conditions 210 | - No other code should be eating CPU in the background. Including other tests. 211 | - As the proper writing rate is system-dependent, what is configured in this 212 | test may not be appropriate for your machine. 213 | - You must test in release mode, as compiler optimizations tend to create more 214 | opportunities for race conditions. 215 | 216 | Taking this and the relatively long run time (~10-20 s) into account, the 217 | concurrent tests are ignored by default. To run them, make sure nothing is 218 | eating CPU in the background and do: 219 | 220 | $ cargo test --release -- --ignored --nocapture --test-threads=1 221 | 222 | Finally, we have benchmarks, which allow you to test how well the code is 223 | performing on your machine. We are now using `criterion` for said benchmarks, 224 | which seems that to run them, you can simply do: 225 | 226 | $ cargo install cargo-criterion 227 | $ cargo criterion 228 | 229 | These benchmarks exercise the worst-case scenario of `u8` payloads, where 230 | synchronization overhead dominates as the cost of reading and writing the 231 | actual data is only 1 cycle. In real-world use cases, you will spend more time 232 | updating buffers and less time synchronizing them. 233 | 234 | However, due to the artificial nature of microbenchmarking, the benchmarks must 235 | exercise two scenarios which are respectively overly optimistic and overly 236 | pessimistic: 237 | 238 | 1. In uncontended mode, the buffer input and output reside on the same CPU core, 239 | which underestimates the overhead of transferring modified cache lines from 240 | the L1 cache of the source CPU to that of the destination CPU. 241 | * This is not as bad as it sounds, because you will pay this overhead no 242 | matter what kind of thread synchronization primitive you use, so we're not 243 | hiding `triple-buffer` specific overhead here. All you need to do is to 244 | ensure that when comparing against another synchronization primitive, that 245 | primitive is benchmarked in a similar way. 246 | 2. In contended mode, the benchmarked half of the triple buffer is operating 247 | under maximal load from the other half, which is much more busy than what is 248 | actually going to be observed in real-world workloads. 249 | * In this configuration, what you're essentially measuring is the performance 250 | of your CPU's cache line locking protocol and inter-CPU core data 251 | transfers under the shared data access pattern of `triple-buffer`. 252 | 253 | Therefore, consider these benchmarks' timings as orders of magnitude of the best 254 | and the worst that you can expect from `triple-buffer`, where actual performance 255 | will be somewhere inbetween these two numbers depending on your workload. 256 | 257 | On an Intel Core i3-3220 CPU @ 3.30GHz, typical results are as follows: 258 | 259 | * Clean read: 0.9 ns 260 | * Write: 6.9 ns 261 | * Write + dirty read: 19.6 ns 262 | * Dirty read (estimated): 12.7 ns 263 | * Contended write: 60.8 ns 264 | * Contended read: 59.2 ns 265 | 266 | 267 | ## License 268 | 269 | This crate is distributed under the terms of the MPLv2 license. See the LICENSE 270 | file for details. 271 | 272 | More relaxed licensing (Apache, MIT, BSD...) may also be negociated, in 273 | exchange of a financial contribution. Contact me for details at 274 | knights_of_ni AT gmx DOTCOM. 275 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "aho-corasick" 7 | version = "1.1.3" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" 10 | dependencies = [ 11 | "memchr", 12 | ] 13 | 14 | [[package]] 15 | name = "anes" 16 | version = "0.1.6" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" 19 | 20 | [[package]] 21 | name = "anstyle" 22 | version = "1.0.10" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" 25 | 26 | [[package]] 27 | name = "autocfg" 28 | version = "1.4.0" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" 31 | 32 | [[package]] 33 | name = "cast" 34 | version = "0.3.0" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" 37 | 38 | [[package]] 39 | name = "cfg-if" 40 | version = "1.0.0" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 43 | 44 | [[package]] 45 | name = "ciborium" 46 | version = "0.2.2" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" 49 | dependencies = [ 50 | "ciborium-io", 51 | "ciborium-ll", 52 | "serde", 53 | ] 54 | 55 | [[package]] 56 | name = "ciborium-io" 57 | version = "0.2.2" 58 | source = "registry+https://github.com/rust-lang/crates.io-index" 59 | checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" 60 | 61 | [[package]] 62 | name = "ciborium-ll" 63 | version = "0.2.2" 64 | source = "registry+https://github.com/rust-lang/crates.io-index" 65 | checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" 66 | dependencies = [ 67 | "ciborium-io", 68 | "half", 69 | ] 70 | 71 | [[package]] 72 | name = "clap" 73 | version = "4.5.34" 74 | source = "registry+https://github.com/rust-lang/crates.io-index" 75 | checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" 76 | dependencies = [ 77 | "clap_builder", 78 | ] 79 | 80 | [[package]] 81 | name = "clap_builder" 82 | version = "4.5.34" 83 | source = "registry+https://github.com/rust-lang/crates.io-index" 84 | checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" 85 | dependencies = [ 86 | "anstyle", 87 | "clap_lex", 88 | ] 89 | 90 | [[package]] 91 | name = "clap_lex" 92 | version = "0.7.4" 93 | source = "registry+https://github.com/rust-lang/crates.io-index" 94 | checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" 95 | 96 | [[package]] 97 | name = "criterion" 98 | version = "0.5.1" 99 | source = "registry+https://github.com/rust-lang/crates.io-index" 100 | checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" 101 | dependencies = [ 102 | "anes", 103 | "cast", 104 | "ciborium", 105 | "clap", 106 | "criterion-plot", 107 | "is-terminal", 108 | "itertools", 109 | "num-traits", 110 | "once_cell", 111 | "oorandom", 112 | "regex", 113 | "serde", 114 | "serde_derive", 115 | "serde_json", 116 | "tinytemplate", 117 | "walkdir", 118 | ] 119 | 120 | [[package]] 121 | name = "criterion-plot" 122 | version = "0.5.0" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" 125 | dependencies = [ 126 | "cast", 127 | "itertools", 128 | ] 129 | 130 | [[package]] 131 | name = "crossbeam-utils" 132 | version = "0.8.21" 133 | source = "registry+https://github.com/rust-lang/crates.io-index" 134 | checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" 135 | 136 | [[package]] 137 | name = "crunchy" 138 | version = "0.2.3" 139 | source = "registry+https://github.com/rust-lang/crates.io-index" 140 | checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" 141 | 142 | [[package]] 143 | name = "either" 144 | version = "1.15.0" 145 | source = "registry+https://github.com/rust-lang/crates.io-index" 146 | checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" 147 | 148 | [[package]] 149 | name = "half" 150 | version = "2.4.1" 151 | source = "registry+https://github.com/rust-lang/crates.io-index" 152 | checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" 153 | dependencies = [ 154 | "cfg-if", 155 | "crunchy", 156 | ] 157 | 158 | [[package]] 159 | name = "hermit-abi" 160 | version = "0.5.0" 161 | source = "registry+https://github.com/rust-lang/crates.io-index" 162 | checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" 163 | 164 | [[package]] 165 | name = "is-terminal" 166 | version = "0.4.16" 167 | source = "registry+https://github.com/rust-lang/crates.io-index" 168 | checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" 169 | dependencies = [ 170 | "hermit-abi", 171 | "libc", 172 | "windows-sys", 173 | ] 174 | 175 | [[package]] 176 | name = "itertools" 177 | version = "0.10.5" 178 | source = "registry+https://github.com/rust-lang/crates.io-index" 179 | checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" 180 | dependencies = [ 181 | "either", 182 | ] 183 | 184 | [[package]] 185 | name = "itoa" 186 | version = "1.0.15" 187 | source = "registry+https://github.com/rust-lang/crates.io-index" 188 | checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" 189 | 190 | [[package]] 191 | name = "libc" 192 | version = "0.2.171" 193 | source = "registry+https://github.com/rust-lang/crates.io-index" 194 | checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" 195 | 196 | [[package]] 197 | name = "memchr" 198 | version = "2.7.4" 199 | source = "registry+https://github.com/rust-lang/crates.io-index" 200 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" 201 | 202 | [[package]] 203 | name = "num-traits" 204 | version = "0.2.19" 205 | source = "registry+https://github.com/rust-lang/crates.io-index" 206 | checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" 207 | dependencies = [ 208 | "autocfg", 209 | ] 210 | 211 | [[package]] 212 | name = "once_cell" 213 | version = "1.21.2" 214 | source = "registry+https://github.com/rust-lang/crates.io-index" 215 | checksum = "c2806eaa3524762875e21c3dcd057bc4b7bfa01ce4da8d46be1cd43649e1cc6b" 216 | 217 | [[package]] 218 | name = "oorandom" 219 | version = "11.1.5" 220 | source = "registry+https://github.com/rust-lang/crates.io-index" 221 | checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" 222 | 223 | [[package]] 224 | name = "proc-macro2" 225 | version = "1.0.94" 226 | source = "registry+https://github.com/rust-lang/crates.io-index" 227 | checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" 228 | dependencies = [ 229 | "unicode-ident", 230 | ] 231 | 232 | [[package]] 233 | name = "quote" 234 | version = "1.0.40" 235 | source = "registry+https://github.com/rust-lang/crates.io-index" 236 | checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" 237 | dependencies = [ 238 | "proc-macro2", 239 | ] 240 | 241 | [[package]] 242 | name = "regex" 243 | version = "1.11.1" 244 | source = "registry+https://github.com/rust-lang/crates.io-index" 245 | checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" 246 | dependencies = [ 247 | "aho-corasick", 248 | "memchr", 249 | "regex-automata", 250 | "regex-syntax", 251 | ] 252 | 253 | [[package]] 254 | name = "regex-automata" 255 | version = "0.4.9" 256 | source = "registry+https://github.com/rust-lang/crates.io-index" 257 | checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" 258 | dependencies = [ 259 | "aho-corasick", 260 | "memchr", 261 | "regex-syntax", 262 | ] 263 | 264 | [[package]] 265 | name = "regex-syntax" 266 | version = "0.8.5" 267 | source = "registry+https://github.com/rust-lang/crates.io-index" 268 | checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" 269 | 270 | [[package]] 271 | name = "ryu" 272 | version = "1.0.20" 273 | source = "registry+https://github.com/rust-lang/crates.io-index" 274 | checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" 275 | 276 | [[package]] 277 | name = "same-file" 278 | version = "1.0.6" 279 | source = "registry+https://github.com/rust-lang/crates.io-index" 280 | checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" 281 | dependencies = [ 282 | "winapi-util", 283 | ] 284 | 285 | [[package]] 286 | name = "serde" 287 | version = "1.0.219" 288 | source = "registry+https://github.com/rust-lang/crates.io-index" 289 | checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" 290 | dependencies = [ 291 | "serde_derive", 292 | ] 293 | 294 | [[package]] 295 | name = "serde_derive" 296 | version = "1.0.219" 297 | source = "registry+https://github.com/rust-lang/crates.io-index" 298 | checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" 299 | dependencies = [ 300 | "proc-macro2", 301 | "quote", 302 | "syn", 303 | ] 304 | 305 | [[package]] 306 | name = "serde_json" 307 | version = "1.0.140" 308 | source = "registry+https://github.com/rust-lang/crates.io-index" 309 | checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" 310 | dependencies = [ 311 | "itoa", 312 | "memchr", 313 | "ryu", 314 | "serde", 315 | ] 316 | 317 | [[package]] 318 | name = "syn" 319 | version = "2.0.100" 320 | source = "registry+https://github.com/rust-lang/crates.io-index" 321 | checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" 322 | dependencies = [ 323 | "proc-macro2", 324 | "quote", 325 | "unicode-ident", 326 | ] 327 | 328 | [[package]] 329 | name = "testbench" 330 | version = "1.0.1" 331 | source = "registry+https://github.com/rust-lang/crates.io-index" 332 | checksum = "4e102f3aa6b7242c8029c115def70364a064de5a11b49fade2b4a3cdbc8aa28f" 333 | 334 | [[package]] 335 | name = "tinytemplate" 336 | version = "1.2.1" 337 | source = "registry+https://github.com/rust-lang/crates.io-index" 338 | checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" 339 | dependencies = [ 340 | "serde", 341 | "serde_json", 342 | ] 343 | 344 | [[package]] 345 | name = "triple_buffer" 346 | version = "9.0.0" 347 | dependencies = [ 348 | "criterion", 349 | "crossbeam-utils", 350 | "testbench", 351 | ] 352 | 353 | [[package]] 354 | name = "unicode-ident" 355 | version = "1.0.18" 356 | source = "registry+https://github.com/rust-lang/crates.io-index" 357 | checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" 358 | 359 | [[package]] 360 | name = "walkdir" 361 | version = "2.5.0" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" 364 | dependencies = [ 365 | "same-file", 366 | "winapi-util", 367 | ] 368 | 369 | [[package]] 370 | name = "winapi-util" 371 | version = "0.1.9" 372 | source = "registry+https://github.com/rust-lang/crates.io-index" 373 | checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" 374 | dependencies = [ 375 | "windows-sys", 376 | ] 377 | 378 | [[package]] 379 | name = "windows-sys" 380 | version = "0.59.0" 381 | source = "registry+https://github.com/rust-lang/crates.io-index" 382 | checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" 383 | dependencies = [ 384 | "windows-targets", 385 | ] 386 | 387 | [[package]] 388 | name = "windows-targets" 389 | version = "0.52.6" 390 | source = "registry+https://github.com/rust-lang/crates.io-index" 391 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" 392 | dependencies = [ 393 | "windows_aarch64_gnullvm", 394 | "windows_aarch64_msvc", 395 | "windows_i686_gnu", 396 | "windows_i686_gnullvm", 397 | "windows_i686_msvc", 398 | "windows_x86_64_gnu", 399 | "windows_x86_64_gnullvm", 400 | "windows_x86_64_msvc", 401 | ] 402 | 403 | [[package]] 404 | name = "windows_aarch64_gnullvm" 405 | version = "0.52.6" 406 | source = "registry+https://github.com/rust-lang/crates.io-index" 407 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" 408 | 409 | [[package]] 410 | name = "windows_aarch64_msvc" 411 | version = "0.52.6" 412 | source = "registry+https://github.com/rust-lang/crates.io-index" 413 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" 414 | 415 | [[package]] 416 | name = "windows_i686_gnu" 417 | version = "0.52.6" 418 | source = "registry+https://github.com/rust-lang/crates.io-index" 419 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" 420 | 421 | [[package]] 422 | name = "windows_i686_gnullvm" 423 | version = "0.52.6" 424 | source = "registry+https://github.com/rust-lang/crates.io-index" 425 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" 426 | 427 | [[package]] 428 | name = "windows_i686_msvc" 429 | version = "0.52.6" 430 | source = "registry+https://github.com/rust-lang/crates.io-index" 431 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" 432 | 433 | [[package]] 434 | name = "windows_x86_64_gnu" 435 | version = "0.52.6" 436 | source = "registry+https://github.com/rust-lang/crates.io-index" 437 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" 438 | 439 | [[package]] 440 | name = "windows_x86_64_gnullvm" 441 | version = "0.52.6" 442 | source = "registry+https://github.com/rust-lang/crates.io-index" 443 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" 444 | 445 | [[package]] 446 | name = "windows_x86_64_msvc" 447 | version = "0.52.6" 448 | source = "registry+https://github.com/rust-lang/crates.io-index" 449 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" 450 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! In this crate, we propose a Rust implementation of triple buffering. This is 2 | //! a non-blocking thread synchronization mechanism that can be used when a 3 | //! single producer thread is frequently updating a shared data block, and a 4 | //! single consumer thread wants to be able to read the latest available version 5 | //! of the shared data whenever it feels like it. 6 | //! 7 | //! # Examples 8 | //! 9 | //! For many use cases, you can use the ergonomic write/read interface, where 10 | //! the producer moves values into the buffer and the consumer accesses the 11 | //! latest buffer by shared reference: 12 | //! 13 | //! ``` 14 | //! // Create a triple buffer 15 | //! use triple_buffer::triple_buffer; 16 | //! let (mut buf_input, mut buf_output) = triple_buffer(&0); 17 | //! 18 | //! // The producer thread can move a value into the buffer at any time 19 | //! let producer = std::thread::spawn(move || buf_input.write(42)); 20 | //! 21 | //! // The consumer thread can read the latest value at any time 22 | //! let consumer = std::thread::spawn(move || { 23 | //! let latest = buf_output.read(); 24 | //! assert!(*latest == 42 || *latest == 0); 25 | //! }); 26 | //! 27 | //! # producer.join().unwrap(); 28 | //! # consumer.join().unwrap(); 29 | //! ``` 30 | //! 31 | //! In situations where moving the original value away and being unable to 32 | //! modify it on the consumer's side is too costly, such as if creating a new 33 | //! value involves dynamic memory allocation, you can use a lower-level API 34 | //! which allows you to access the producer and consumer's buffers in place 35 | //! and to precisely control when updates are propagated: 36 | //! 37 | //! ``` 38 | //! // Create and split a triple buffer 39 | //! use triple_buffer::triple_buffer; 40 | //! let (mut buf_input, mut buf_output) = triple_buffer(&String::with_capacity(42)); 41 | //! 42 | //! // --- PRODUCER SIDE --- 43 | //! 44 | //! // Mutate the input buffer in place 45 | //! { 46 | //! // Acquire a reference to the input buffer 47 | //! let input = buf_input.input_buffer_mut(); 48 | //! 49 | //! // In general, you don't know what's inside of the buffer, so you should 50 | //! // always reset the value before use (this is a type-specific process). 51 | //! input.clear(); 52 | //! 53 | //! // Perform an in-place update 54 | //! input.push_str("Hello, "); 55 | //! } 56 | //! 57 | //! // Publish the above input buffer update 58 | //! buf_input.publish(); 59 | //! 60 | //! // --- CONSUMER SIDE --- 61 | //! 62 | //! // Manually fetch the buffer update from the consumer interface 63 | //! buf_output.update(); 64 | //! 65 | //! // Acquire a read-only reference to the output buffer 66 | //! let output = buf_output.output_buffer(); 67 | //! assert_eq!(*output, "Hello, "); 68 | //! 69 | //! // Or acquire a mutable reference if necessary 70 | //! let output_mut = buf_output.output_buffer_mut(); 71 | //! 72 | //! // Post-process the output value before use 73 | //! output_mut.push_str("world!"); 74 | //! ``` 75 | //! 76 | //! Finally, as a middle ground before the maximal ergonomics of the 77 | //! [`write()`](Input::write) API and the maximal control of the 78 | //! [`input_buffer_mut()`](Input::input_buffer_mut)/[`publish()`](Input::publish) 79 | //! API, you can also use the 80 | //! [`input_buffer_publisher()`](Input::input_buffer_publisher) RAII API on the 81 | //! producer side, which ensures that `publish()` is automatically called when 82 | //! the resulting input buffer handle goes out of scope: 83 | //! 84 | //! ``` 85 | //! // Create and split a triple buffer 86 | //! use triple_buffer::triple_buffer; 87 | //! let (mut buf_input, _) = triple_buffer(&String::with_capacity(42)); 88 | //! 89 | //! // Mutate the input buffer in place and publish it 90 | //! { 91 | //! // Acquire a reference to the input buffer 92 | //! let mut input = buf_input.input_buffer_publisher(); 93 | //! 94 | //! // In general, you don't know what's inside of the buffer, so you should 95 | //! // always reset the value before use (this is a type-specific process). 96 | //! input.clear(); 97 | //! 98 | //! // Perform an in-place update 99 | //! input.push_str("Hello world!"); 100 | //! 101 | //! // Input buffer is automatically published at the end of the scope of 102 | //! // the "input" RAII guard 103 | //! } 104 | //! 105 | //! // From this point on, the consumer can see the updated version 106 | //! ``` 107 | 108 | #![cfg_attr(not(test), no_std)] 109 | #![deny(missing_debug_implementations, missing_docs)] 110 | 111 | extern crate alloc; 112 | 113 | use crossbeam_utils::CachePadded; 114 | 115 | use alloc::sync::Arc; 116 | use core::{ 117 | cell::UnsafeCell, 118 | fmt, 119 | ops::{Deref, DerefMut}, 120 | sync::atomic::{AtomicU8, Ordering}, 121 | }; 122 | 123 | /// A triple buffer, useful for nonblocking and thread-safe data sharing 124 | /// 125 | /// A triple buffer is a single-producer single-consumer nonblocking 126 | /// communication channel which behaves like a shared variable: the producer 127 | /// submits regular updates, and the consumer accesses the latest available 128 | /// value whenever it feels like it. 129 | #[derive(Debug)] 130 | pub struct TripleBuffer { 131 | /// Input object used by producers to send updates 132 | input: Input, 133 | 134 | /// Output object used by consumers to read the current value 135 | output: Output, 136 | } 137 | // 138 | impl TripleBuffer { 139 | /// Construct a triple buffer with a certain initial value 140 | pub fn new(initial: &T) -> Self { 141 | Self::new_impl(|| initial.clone()) 142 | } 143 | } 144 | // 145 | impl Default for TripleBuffer { 146 | /// Construct a triple buffer with a default-constructed value 147 | fn default() -> Self { 148 | Self::new_impl(T::default) 149 | } 150 | } 151 | // 152 | impl TripleBuffer { 153 | /// Construct a triple buffer, using a functor to generate initial values 154 | fn new_impl(mut generator: impl FnMut() -> T) -> Self { 155 | // Start with the shared state... 156 | let shared_state = Arc::new(SharedState::new(|_i| generator(), 0)); 157 | 158 | // ...then construct the input and output structs 159 | TripleBuffer { 160 | input: Input { 161 | shared: shared_state.clone(), 162 | input_idx: 1, 163 | }, 164 | output: Output { 165 | shared: shared_state, 166 | output_idx: 2, 167 | }, 168 | } 169 | } 170 | 171 | /// Extract input and output of the triple buffer 172 | // 173 | // NOTE: Although it would be nicer to directly return `Input` and `Output` 174 | // from `new()`, the `split()` design gives some API evolution 175 | // headroom towards future allocation-free modes of operation where 176 | // the SharedState is a static variable, or a stack-allocated variable 177 | // used through scoped threads or other unsafe thread synchronization. 178 | // 179 | // See https://github.com/HadrienG2/triple-buffer/issues/8 . 180 | // 181 | pub fn split(self) -> (Input, Output) { 182 | (self.input, self.output) 183 | } 184 | } 185 | // 186 | /// Shorthand for `TripleBuffer::new(initial).split()` 187 | pub fn triple_buffer(initial: &T) -> (Input, Output) { 188 | TripleBuffer::new(initial).split() 189 | } 190 | // 191 | // The Clone and PartialEq traits are used internally for testing and I don't 192 | // want to commit to supporting them publicly for now. 193 | // 194 | #[doc(hidden)] 195 | impl Clone for TripleBuffer { 196 | fn clone(&self) -> Self { 197 | // Clone the shared state. This is safe because at this layer of the 198 | // interface, one needs an Input/Output &mut to mutate the shared state. 199 | let shared_state = Arc::new(unsafe { (*self.input.shared).clone() }); 200 | 201 | // ...then the input and output structs 202 | TripleBuffer { 203 | input: Input { 204 | shared: shared_state.clone(), 205 | input_idx: self.input.input_idx, 206 | }, 207 | output: Output { 208 | shared: shared_state, 209 | output_idx: self.output.output_idx, 210 | }, 211 | } 212 | } 213 | } 214 | // 215 | #[doc(hidden)] 216 | impl PartialEq for TripleBuffer { 217 | fn eq(&self, other: &Self) -> bool { 218 | // Compare the shared states. This is safe because at this layer of the 219 | // interface, one needs an Input/Output &mut to mutate the shared state. 220 | let shared_states_equal = unsafe { (*self.input.shared).eq(&*other.input.shared) }; 221 | 222 | // Compare the rest of the triple buffer states 223 | shared_states_equal 224 | && (self.input.input_idx == other.input.input_idx) 225 | && (self.output.output_idx == other.output.output_idx) 226 | } 227 | } 228 | 229 | /// Producer interface to the triple buffer 230 | /// 231 | /// The producer of data can use this struct to submit updates to the triple 232 | /// buffer whenever he likes. These updates are nonblocking: a collision between 233 | /// the producer and the consumer will result in cache contention, but deadlocks 234 | /// and scheduling-induced slowdowns cannot happen. 235 | #[derive(Debug)] 236 | pub struct Input { 237 | /// Reference-counted shared state 238 | shared: Arc>, 239 | 240 | /// Index of the input buffer (which is private to the producer) 241 | input_idx: BufferIndex, 242 | } 243 | // 244 | // Public interface 245 | impl Input { 246 | /// Write a new value into the triple buffer 247 | pub fn write(&mut self, value: T) { 248 | // Update the input buffer 249 | *self.input_buffer_mut() = value; 250 | 251 | // Publish our update to the consumer 252 | self.publish(); 253 | } 254 | 255 | /// Check if the consumer has fetched our latest submission yet 256 | /// 257 | /// This method is only intended for diagnostics purposes. Please do not let 258 | /// it inform your decision of sending or not sending a value, as that would 259 | /// effectively be building a very poor spinlock-based double buffer 260 | /// implementation. If what you truly need is a double buffer, build 261 | /// yourself a proper blocking one instead of wasting CPU time. 262 | pub fn consumed(&self) -> bool { 263 | let back_info = self.shared.back_info.load(Ordering::Relaxed); 264 | back_info & BACK_DIRTY_BIT == 0 265 | } 266 | 267 | /// Query the current value of the input buffer 268 | /// 269 | /// This is a read-only version of 270 | /// [`input_buffer_mut()`](Input::input_buffer_mut). Please read the 271 | /// documentation of that method for more information on the precautions 272 | /// that need to be taken when accessing the input buffer in place. 273 | pub fn input_buffer(&self) -> &T { 274 | // This is safe because the synchronization protocol ensures that we 275 | // have exclusive access to this buffer. 276 | let input_ptr = self.shared.buffers[self.input_idx as usize].get(); 277 | unsafe { &*input_ptr } 278 | } 279 | 280 | /// Access the input buffer directly 281 | /// 282 | /// This advanced interface allows you to update the input buffer in place, 283 | /// so that you can avoid creating values of type T repeatedy just to push 284 | /// them into the triple buffer when doing so is expensive. 285 | /// 286 | /// However, by using it, you force yourself to take into account some 287 | /// implementation subtleties that you could otherwise ignore. 288 | /// 289 | /// First, the buffer does not contain the last value that you published 290 | /// (which is now available to the consumer thread). In fact, what you get 291 | /// may not match _any_ value that you sent in the past, but rather be a new 292 | /// value that was written in there by the consumer thread. All you can 293 | /// safely assume is that the buffer contains a valid value of type T, which 294 | /// you may need to "clean up" before use using a type-specific process 295 | /// (like calling the `clear()` method of a `Vec`/`String`). 296 | /// 297 | /// Second, we do not send updates automatically. You need to call 298 | /// [`publish()`](Input::publish) in order to propagate a buffer update to 299 | /// the consumer. If you would prefer this to be done automatically when the 300 | /// input buffer reference goes out of scope, consider using the 301 | /// [`input_buffer_publisher()`](Input::input_buffer_publisher) RAII 302 | /// interface instead. 303 | pub fn input_buffer_mut(&mut self) -> &mut T { 304 | // This is safe because the synchronization protocol ensures that we 305 | // have exclusive access to this buffer. 306 | let input_ptr = self.shared.buffers[self.input_idx as usize].get(); 307 | unsafe { &mut *input_ptr } 308 | } 309 | 310 | /// Publish the current input buffer, checking for overwrites 311 | /// 312 | /// After updating the input buffer in-place using 313 | /// [`input_buffer_mut()`](Input::input_buffer_mut), you can use this method 314 | /// to publish your updates to the consumer. Beware that this will replace 315 | /// the current input buffer with another one that has totally unrelated 316 | /// contents. 317 | /// 318 | /// It will also tell you whether you overwrote a value which was not read 319 | /// by the consumer thread. 320 | pub fn publish(&mut self) -> bool { 321 | // Swap the input buffer and the back buffer, setting the dirty bit 322 | // 323 | // The ordering must be AcqRel, because... 324 | // 325 | // - Our accesses to the old buffer must not be reordered after this 326 | // operation (which mandates Release ordering), otherwise they could 327 | // race with the consumer accessing the freshly published buffer. 328 | // - Our accesses from the buffer must not be reordered before this 329 | // operation (which mandates Consume ordering, that is best 330 | // approximated by Acquire in Rust), otherwise they would race with 331 | // the consumer accessing the buffer as well before switching to 332 | // another buffer. 333 | // * This reordering may seem paradoxical, but could happen if the 334 | // compiler or CPU correctly speculated the new buffer's index 335 | // before that index is actually read, as well as on weird hardware 336 | // with incoherent caches like GPUs or old DEC Alpha where keeping 337 | // data in sync across cores requires manual action. 338 | // 339 | let former_back_info = self 340 | .shared 341 | .back_info 342 | .swap(self.input_idx | BACK_DIRTY_BIT, Ordering::AcqRel); 343 | 344 | // The old back buffer becomes our new input buffer 345 | self.input_idx = former_back_info & BACK_INDEX_MASK; 346 | 347 | // Tell whether we have overwritten unread data 348 | former_back_info & BACK_DIRTY_BIT != 0 349 | } 350 | 351 | /// Access the input buffer wrapped in the `InputPublishGuard` 352 | /// 353 | /// This is an RAII alternative to the [`input_buffer_mut()`]/[`publish()`] 354 | /// workflow where the [`publish()`] transaction happens automatically when 355 | /// the input buffer handle goes out of scope. 356 | /// 357 | /// Please check out the documentation of [`input_buffer_mut()`] and 358 | /// [`publish()`] to know more about the precautions that you need to take 359 | /// when using the lower-level in-place buffer access interface. 360 | /// 361 | /// [`input_buffer_mut()`]: Input::input_buffer_mut 362 | /// [`publish()`]: Input::publish 363 | pub fn input_buffer_publisher(&mut self) -> InputPublishGuard<'_, T> { 364 | InputPublishGuard { reference: self } 365 | } 366 | } 367 | 368 | /// RAII Guard to the buffer provided by an [`Input`]. 369 | /// 370 | /// The current buffer of the [`Input`] can be accessed through this guard via 371 | /// its [`Deref`] and [`DerefMut`] implementations. 372 | /// 373 | /// When the guard is dropped, [`Input::publish()`] will be called 374 | /// automatically. 375 | /// 376 | /// This structure is created by the [`Input::input_buffer_publisher()`] method. 377 | pub struct InputPublishGuard<'a, T: 'a + Send> { 378 | reference: &'a mut Input, 379 | } 380 | 381 | impl Deref for InputPublishGuard<'_, T> { 382 | type Target = T; 383 | 384 | fn deref(&self) -> &T { 385 | self.reference.input_buffer() 386 | } 387 | } 388 | 389 | impl DerefMut for InputPublishGuard<'_, T> { 390 | fn deref_mut(&mut self) -> &mut T { 391 | self.reference.input_buffer_mut() 392 | } 393 | } 394 | 395 | impl Drop for InputPublishGuard<'_, T> { 396 | #[inline] 397 | fn drop(&mut self) { 398 | self.reference.publish(); 399 | } 400 | } 401 | 402 | impl fmt::Debug for InputPublishGuard<'_, T> { 403 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 404 | fmt::Debug::fmt(&**self, f) 405 | } 406 | } 407 | 408 | impl fmt::Display for InputPublishGuard<'_, T> { 409 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 410 | (**self).fmt(f) 411 | } 412 | } 413 | 414 | /// Consumer interface to the triple buffer 415 | /// 416 | /// The consumer of data can use this struct to access the latest published 417 | /// update from the producer whenever he likes. Readout is nonblocking: a 418 | /// collision between the producer and consumer will result in cache contention, 419 | /// but deadlocks and scheduling-induced slowdowns cannot happen. 420 | #[derive(Debug)] 421 | pub struct Output { 422 | /// Reference-counted shared state 423 | shared: Arc>, 424 | 425 | /// Index of the output buffer (which is private to the consumer) 426 | output_idx: BufferIndex, 427 | } 428 | // 429 | // Public interface 430 | impl Output { 431 | /// Access the latest value from the triple buffer 432 | pub fn read(&mut self) -> &T { 433 | // Fetch updates from the producer 434 | self.update(); 435 | 436 | // Give access to the output buffer 437 | self.output_buffer_mut() 438 | } 439 | 440 | /// Tell whether an updated value has been submitted by the producer 441 | /// 442 | /// This method is mainly intended for diagnostics purposes. Please do not 443 | /// let it inform your decision of reading a value or not, as that would 444 | /// effectively be building a very poor spinlock-based double buffer 445 | /// implementation. If what you truly need is a double buffer, build 446 | /// yourself a proper blocking one instead of wasting CPU time. 447 | pub fn updated(&self) -> bool { 448 | let back_info = self.shared.back_info.load(Ordering::Relaxed); 449 | back_info & BACK_DIRTY_BIT != 0 450 | } 451 | 452 | /// Query the current value of the output buffer 453 | /// 454 | /// This is a deprecated compatibility alias to 455 | /// [`output_buffer()`](Self::output_buffer). Please use that method 456 | /// instead, as `peek_output_buffer()` is scheduled for removal in the next 457 | /// major release of `triple-buffer`. 458 | #[deprecated = "Please use output_buffer() instead"] 459 | pub fn peek_output_buffer(&self) -> &T { 460 | self.output_buffer() 461 | } 462 | 463 | /// Query the current value of the output buffer 464 | /// 465 | /// This is a read-only version of 466 | /// [`output_buffer_mut()`](Output::output_buffer_mut). Please read the 467 | /// documentation of that method for more information on the precautions 468 | /// that need to be taken when accessing the output buffer in place. 469 | /// 470 | /// In particular, remember that this method does not update the output 471 | /// buffer automatically. You need to call [`update()`](Output::update) in 472 | /// order to fetch buffer updates from the producer. 473 | pub fn output_buffer(&self) -> &T { 474 | // This is safe because the synchronization protocol ensures that we 475 | // have exclusive access to this buffer. 476 | let output_ptr = self.shared.buffers[self.output_idx as usize].get(); 477 | unsafe { &*output_ptr } 478 | } 479 | 480 | /// Access the output buffer directly 481 | /// 482 | /// This advanced interface allows you to modify the contents of the output 483 | /// buffer, so that you can avoid copying the output value when this is an 484 | /// expensive process. One possible application, for example, is to 485 | /// post-process values from the producer before use. 486 | /// 487 | /// However, by using it, you force yourself to take into account some 488 | /// implementation subtleties that you could normally ignore. 489 | /// 490 | /// First, keep in mind that you can lose access to the current output 491 | /// buffer any time [`read()`] or [`update()`] is called, as it may be 492 | /// replaced by an updated buffer from the producer automatically. 493 | /// 494 | /// Second, to reduce the potential for the aforementioned usage error, this 495 | /// method does not update the output buffer automatically. You need to call 496 | /// [`update()`] in order to fetch buffer updates from the producer. 497 | /// 498 | /// [`read()`]: Output::read 499 | /// [`update()`]: Output::update 500 | pub fn output_buffer_mut(&mut self) -> &mut T { 501 | // This is safe because the synchronization protocol ensures that we 502 | // have exclusive access to this buffer. 503 | let output_ptr = self.shared.buffers[self.output_idx as usize].get(); 504 | unsafe { &mut *output_ptr } 505 | } 506 | 507 | /// Update the output buffer 508 | /// 509 | /// Check if the producer submitted a new data version, and if one is 510 | /// available, update our output buffer to use it. Return a flag that tells 511 | /// you whether such an update was carried out. 512 | /// 513 | /// Bear in mind that when this happens, you will lose any change that you 514 | /// performed to the output buffer via the 515 | /// [`output_buffer_mut()`](Output::output_buffer_mut) interface. 516 | pub fn update(&mut self) -> bool { 517 | // Check if an update is present in the back-buffer 518 | let updated = self.updated(); 519 | if updated { 520 | // Access the shared state 521 | let shared_state = &(*self.shared); 522 | 523 | // If so, exchange our output buffer with the back-buffer, thusly 524 | // acquiring exclusive access to the old back buffer while giving 525 | // the producer a new back-buffer to write to. 526 | // 527 | // The ordering must be AcqRel, because... 528 | // 529 | // - Our accesses to the previous buffer must not be reordered after 530 | // this operation (which mandates Release ordering), otherwise 531 | // they could race with the producer accessing the freshly 532 | // liberated buffer. 533 | // - Our accesses from the buffer must not be reordered before this 534 | // operation (which mandates Consume ordering, that is best 535 | // approximated by Acquire in Rust), otherwise they would race 536 | // with the producer writing into the buffer before publishing it. 537 | // * This reordering may seem paradoxical, but could happen if the 538 | // compiler or CPU correctly speculated the new buffer's index 539 | // before that index is actually read, as well as on weird hardware 540 | // like GPUs where CPU caches require manual synchronization. 541 | // 542 | let former_back_info = shared_state 543 | .back_info 544 | .swap(self.output_idx, Ordering::AcqRel); 545 | 546 | // Make the old back-buffer our new output buffer 547 | self.output_idx = former_back_info & BACK_INDEX_MASK; 548 | } 549 | 550 | // Tell whether an update was carried out 551 | updated 552 | } 553 | } 554 | 555 | /// Triple buffer shared state 556 | /// 557 | /// In a triple buffering communication protocol, the producer and consumer 558 | /// share the following storage: 559 | /// 560 | /// - Three memory buffers suitable for storing the data at hand 561 | /// - Information about the back-buffer: which buffer is the current back-buffer 562 | /// and whether an update was published since the last readout. 563 | #[derive(Debug)] 564 | struct SharedState { 565 | /// Data storage buffers 566 | buffers: [CachePadded>; 3], 567 | 568 | /// Information about the current back-buffer state 569 | back_info: CachePadded, 570 | } 571 | // 572 | #[doc(hidden)] 573 | impl SharedState { 574 | /// Given (a way to generate) buffer contents and the back info, build the shared state 575 | fn new(mut gen_buf_data: impl FnMut(usize) -> T, back_info: BackBufferInfo) -> Self { 576 | let mut make_buf = |i| -> CachePadded> { 577 | CachePadded::new(UnsafeCell::new(gen_buf_data(i))) 578 | }; 579 | Self { 580 | buffers: [make_buf(0), make_buf(1), make_buf(2)], 581 | back_info: CachePadded::new(AtomicBackBufferInfo::new(back_info)), 582 | } 583 | } 584 | } 585 | // 586 | #[doc(hidden)] 587 | impl SharedState { 588 | /// Cloning the shared state is unsafe because you must ensure that no one 589 | /// is concurrently accessing it, since &self is enough for writing. 590 | unsafe fn clone(&self) -> Self { 591 | Self::new( 592 | |i| (*self.buffers[i].get()).clone(), 593 | self.back_info.load(Ordering::Relaxed), 594 | ) 595 | } 596 | } 597 | // 598 | #[doc(hidden)] 599 | impl SharedState { 600 | /// Equality is unsafe for the same reason as cloning: you must ensure that 601 | /// no one is concurrently accessing the triple buffer to avoid data races. 602 | unsafe fn eq(&self, other: &Self) -> bool { 603 | // Check whether the contents of all buffers are equal... 604 | let buffers_equal = self 605 | .buffers 606 | .iter() 607 | .zip(other.buffers.iter()) 608 | .all(|tuple| -> bool { 609 | let (cell1, cell2) = tuple; 610 | *cell1.get() == *cell2.get() 611 | }); 612 | 613 | // ...then check whether the rest of the shared state is equal 614 | buffers_equal 615 | && (self.back_info.load(Ordering::Relaxed) == other.back_info.load(Ordering::Relaxed)) 616 | } 617 | } 618 | // 619 | unsafe impl Sync for SharedState {} 620 | 621 | // Index types used for triple buffering 622 | // 623 | // These types are used to index into triple buffers. In addition, the 624 | // BackBufferInfo type is actually a bitfield, whose third bit (numerical 625 | // value: 4) is set to 1 to indicate that the producer published an update into 626 | // the back-buffer, and reset to 0 when the consumer fetches the update. 627 | // 628 | type BufferIndex = u8; 629 | type BackBufferInfo = BufferIndex; 630 | // 631 | type AtomicBackBufferInfo = AtomicU8; 632 | const BACK_INDEX_MASK: u8 = 0b11; // Mask used to extract back-buffer index 633 | const BACK_DIRTY_BIT: u8 = 0b100; // Bit set by producer to signal updates 634 | 635 | /// Unit tests 636 | #[cfg(test)] 637 | mod tests { 638 | use super::{BufferIndex, SharedState, TripleBuffer, BACK_DIRTY_BIT, BACK_INDEX_MASK}; 639 | use std::{fmt::Debug, ops::Deref, sync::atomic::Ordering, thread, time::Duration}; 640 | use testbench::race_cell::{RaceCell, Racey}; 641 | 642 | /// Check that triple buffers are properly initialized 643 | #[test] 644 | fn initial_state() { 645 | // Let's create a triple buffer 646 | let mut buf = TripleBuffer::new(&42); 647 | check_buf_state(&mut buf, false); 648 | assert_eq!(*buf.output.read(), 42); 649 | } 650 | 651 | /// Check that the shared state's unsafe equality operator works 652 | #[test] 653 | fn partial_eq_shared() { 654 | // Let's create some dummy shared state 655 | let dummy_state = SharedState::::new(|i| [111, 222, 333][i], 0b10); 656 | 657 | // Check that the dummy state is equal to itself 658 | assert!(unsafe { dummy_state.eq(&dummy_state) }); 659 | 660 | // Check that it's not equal to a state where buffer contents differ 661 | assert!(unsafe { !dummy_state.eq(&SharedState::::new(|i| [114, 222, 333][i], 0b10)) }); 662 | assert!(unsafe { !dummy_state.eq(&SharedState::::new(|i| [111, 225, 333][i], 0b10)) }); 663 | assert!(unsafe { !dummy_state.eq(&SharedState::::new(|i| [111, 222, 336][i], 0b10)) }); 664 | 665 | // Check that it's not equal to a state where the back info differs 666 | assert!(unsafe { 667 | !dummy_state.eq(&SharedState::::new( 668 | |i| [111, 222, 333][i], 669 | BACK_DIRTY_BIT & 0b10, 670 | )) 671 | }); 672 | assert!(unsafe { !dummy_state.eq(&SharedState::::new(|i| [111, 222, 333][i], 0b01)) }); 673 | } 674 | 675 | /// Check that TripleBuffer's PartialEq impl works 676 | #[test] 677 | fn partial_eq() { 678 | // Create a triple buffer 679 | let buf = TripleBuffer::new(&"test"); 680 | 681 | // Check that it is equal to itself 682 | assert_eq!(buf, buf); 683 | 684 | // Make another buffer with different contents. As buffer creation is 685 | // deterministic, this should only have an impact on the shared state, 686 | // but the buffers should nevertheless be considered different. 687 | let buf2 = TripleBuffer::new(&"taste"); 688 | assert_eq!(buf.input.input_idx, buf2.input.input_idx); 689 | assert_eq!(buf.output.output_idx, buf2.output.output_idx); 690 | assert!(buf != buf2); 691 | 692 | // Check that changing either the input or output buffer index will 693 | // also lead two TripleBuffers to be considered different (this test 694 | // technically creates an invalid TripleBuffer state, but it's the only 695 | // way to check that the PartialEq impl is exhaustive) 696 | let mut buf3 = TripleBuffer::new(&"test"); 697 | assert_eq!(buf, buf3); 698 | let old_input_idx = buf3.input.input_idx; 699 | buf3.input.input_idx = buf3.output.output_idx; 700 | assert!(buf != buf3); 701 | buf3.input.input_idx = old_input_idx; 702 | buf3.output.output_idx = old_input_idx; 703 | assert!(buf != buf3); 704 | } 705 | 706 | /// Check that the shared state's unsafe clone operator works 707 | #[test] 708 | fn clone_shared() { 709 | // Let's create some dummy shared state 710 | let dummy_state = SharedState::::new(|i| [123, 231, 132][i], BACK_DIRTY_BIT & 0b01); 711 | 712 | // Now, try to clone it 713 | let dummy_state_copy = unsafe { dummy_state.clone() }; 714 | 715 | // Check that the contents of the original state did not change 716 | assert!(unsafe { 717 | dummy_state.eq(&SharedState::::new( 718 | |i| [123, 231, 132][i], 719 | BACK_DIRTY_BIT & 0b01, 720 | )) 721 | }); 722 | 723 | // Check that the contents of the original and final state are identical 724 | assert!(unsafe { dummy_state.eq(&dummy_state_copy) }); 725 | } 726 | 727 | /// Check that TripleBuffer's Clone impl works 728 | #[test] 729 | fn clone() { 730 | // Create a triple buffer 731 | let mut buf = TripleBuffer::new(&4.2); 732 | 733 | // Put it in a nontrivial state 734 | unsafe { 735 | *buf.input.shared.buffers[0].get() = 1.2; 736 | *buf.input.shared.buffers[1].get() = 3.4; 737 | *buf.input.shared.buffers[2].get() = 5.6; 738 | } 739 | buf.input 740 | .shared 741 | .back_info 742 | .store(BACK_DIRTY_BIT & 0b01, Ordering::Relaxed); 743 | buf.input.input_idx = 0b10; 744 | buf.output.output_idx = 0b00; 745 | 746 | // Now clone it 747 | let buf_clone = buf.clone(); 748 | 749 | // Check that the clone uses its own, separate shared data storage 750 | assert_eq!( 751 | as_ptr(&buf_clone.input.shared), 752 | as_ptr(&buf_clone.output.shared) 753 | ); 754 | assert_ne!(as_ptr(&buf_clone.input.shared), as_ptr(&buf.input.shared)); 755 | assert_ne!(as_ptr(&buf_clone.output.shared), as_ptr(&buf.output.shared)); 756 | 757 | // Check that it is identical from PartialEq's point of view 758 | assert_eq!(buf, buf_clone); 759 | 760 | // Check that the contents of the original buffer did not change 761 | unsafe { 762 | assert_eq!(*buf.input.shared.buffers[0].get(), 1.2); 763 | assert_eq!(*buf.input.shared.buffers[1].get(), 3.4); 764 | assert_eq!(*buf.input.shared.buffers[2].get(), 5.6); 765 | } 766 | assert_eq!( 767 | buf.input.shared.back_info.load(Ordering::Relaxed), 768 | BACK_DIRTY_BIT & 0b01 769 | ); 770 | assert_eq!(buf.input.input_idx, 0b10); 771 | assert_eq!(buf.output.output_idx, 0b00); 772 | } 773 | 774 | /// Check that the low-level publish/update primitives work 775 | #[test] 776 | fn swaps() { 777 | // Create a new buffer, and a way to track any changes to it 778 | let mut buf = TripleBuffer::new(&[123, 456]); 779 | let old_buf = buf.clone(); 780 | let old_input_idx = old_buf.input.input_idx; 781 | let old_shared = &old_buf.input.shared; 782 | let old_back_info = old_shared.back_info.load(Ordering::Relaxed); 783 | let old_back_idx = old_back_info & BACK_INDEX_MASK; 784 | let old_output_idx = old_buf.output.output_idx; 785 | 786 | // Check that updating from a clean state works 787 | assert!(!buf.output.update()); 788 | assert_eq!(buf, old_buf); 789 | check_buf_state(&mut buf, false); 790 | 791 | // Check that publishing from a clean state works 792 | assert!(!buf.input.publish()); 793 | let mut expected_buf = old_buf.clone(); 794 | expected_buf.input.input_idx = old_back_idx; 795 | expected_buf 796 | .input 797 | .shared 798 | .back_info 799 | .store(old_input_idx | BACK_DIRTY_BIT, Ordering::Relaxed); 800 | assert_eq!(buf, expected_buf); 801 | check_buf_state(&mut buf, true); 802 | 803 | // Check that overwriting a dirty state works 804 | assert!(buf.input.publish()); 805 | let mut expected_buf = old_buf.clone(); 806 | expected_buf.input.input_idx = old_input_idx; 807 | expected_buf 808 | .input 809 | .shared 810 | .back_info 811 | .store(old_back_idx | BACK_DIRTY_BIT, Ordering::Relaxed); 812 | assert_eq!(buf, expected_buf); 813 | check_buf_state(&mut buf, true); 814 | 815 | // Check that updating from a dirty state works 816 | assert!(buf.output.update()); 817 | expected_buf.output.output_idx = old_back_idx; 818 | expected_buf 819 | .output 820 | .shared 821 | .back_info 822 | .store(old_output_idx, Ordering::Relaxed); 823 | assert_eq!(buf, expected_buf); 824 | check_buf_state(&mut buf, false); 825 | } 826 | 827 | /// Check that writing to a triple buffer works 828 | #[test] 829 | fn vec_guarded_write() { 830 | let mut buf = TripleBuffer::new(&vec![]); 831 | 832 | // write new value, publish, read 833 | { 834 | let mut buffer = buf.input.input_buffer_publisher(); 835 | buffer.push(0); 836 | buffer.push(1); 837 | buffer.push(2); 838 | 839 | // not yet published 840 | let back_info = buffer.reference.shared.back_info.load(Ordering::Relaxed); 841 | let back_buffer_dirty = back_info & BACK_DIRTY_BIT != 0; 842 | assert!(!back_buffer_dirty); 843 | } 844 | check_buf_state(&mut buf, true); // after publish, before read 845 | assert_eq!(*buf.output.read(), vec![0, 1, 2]); 846 | check_buf_state(&mut buf, false); // after publish and read 847 | 848 | // write new value, publish, don't read 849 | { 850 | buf.input.input_buffer_publisher().push(3); 851 | } 852 | check_buf_state(&mut buf, true); 853 | 854 | // write new value, publish, read 855 | { 856 | buf.input.input_buffer_publisher().push(4); 857 | } 858 | assert_eq!(*buf.output.read(), vec![4]); 859 | check_buf_state(&mut buf, false); 860 | 861 | // overwrite existing value, publish, surprising read 862 | { 863 | buf.input.input_buffer_publisher().push(5); 864 | } 865 | assert_eq!(*buf.output.read(), vec![3, 5]); 866 | check_buf_state(&mut buf, false); 867 | 868 | // to avoid surprise, always clear before write 869 | { 870 | let mut buffer = buf.input.input_buffer_publisher(); 871 | buffer.clear(); 872 | buffer.push(6); 873 | } 874 | assert_eq!(*buf.output.read(), vec![6]); 875 | check_buf_state(&mut buf, false); 876 | } 877 | 878 | /// Check that (sequentially) writing to a triple buffer works 879 | #[test] 880 | fn sequential_write() { 881 | // Let's create a triple buffer 882 | let mut buf = TripleBuffer::new(&false); 883 | 884 | // Back up the initial buffer state 885 | let old_buf = buf.clone(); 886 | 887 | // Perform a write 888 | buf.input.write(true); 889 | 890 | // Check new implementation state 891 | { 892 | // Starting from the old buffer state... 893 | let mut expected_buf = old_buf.clone(); 894 | 895 | // ...write the new value in and swap... 896 | *expected_buf.input.input_buffer_mut() = true; 897 | expected_buf.input.publish(); 898 | 899 | // Nothing else should have changed 900 | assert_eq!(buf, expected_buf); 901 | check_buf_state(&mut buf, true); 902 | } 903 | } 904 | 905 | /// Check that (sequentially) writing to a triple buffer works 906 | #[test] 907 | fn sequential_guarded_write() { 908 | // Let's create a triple buffer 909 | let mut buf = TripleBuffer::new(&false); 910 | 911 | // Back up the initial buffer state 912 | let old_buf = buf.clone(); 913 | 914 | // Perform a write 915 | *buf.input.input_buffer_publisher() = true; 916 | 917 | // Check new implementation state 918 | { 919 | // Starting from the old buffer state... 920 | let mut expected_buf = old_buf.clone(); 921 | 922 | // ...write the new value in and swap... 923 | *expected_buf.input.input_buffer_mut() = true; 924 | expected_buf.input.publish(); 925 | 926 | // Nothing else should have changed 927 | assert_eq!(buf, expected_buf); 928 | check_buf_state(&mut buf, true); 929 | } 930 | } 931 | 932 | /// Check that (sequentially) reading from a triple buffer works 933 | #[test] 934 | fn sequential_read() { 935 | // Let's create a triple buffer and write into it 936 | let mut buf = TripleBuffer::new(&1.0); 937 | buf.input.write(4.2); 938 | 939 | // Test readout from dirty (freshly written) triple buffer 940 | { 941 | // Back up the initial buffer state 942 | let old_buf = buf.clone(); 943 | 944 | // Read from the buffer 945 | let result = *buf.output.read(); 946 | 947 | // Output value should be correct 948 | assert_eq!(result, 4.2); 949 | 950 | // Result should be equivalent to carrying out an update 951 | let mut expected_buf = old_buf.clone(); 952 | assert!(expected_buf.output.update()); 953 | assert_eq!(buf, expected_buf); 954 | check_buf_state(&mut buf, false); 955 | } 956 | 957 | // Test readout from clean (unchanged) triple buffer 958 | { 959 | // Back up the initial buffer state 960 | let old_buf = buf.clone(); 961 | 962 | // Read from the buffer 963 | let result = *buf.output.read(); 964 | 965 | // Output value should be correct 966 | assert_eq!(result, 4.2); 967 | 968 | // Buffer state should be unchanged 969 | assert_eq!(buf, old_buf); 970 | check_buf_state(&mut buf, false); 971 | } 972 | } 973 | 974 | /// Check that (sequentially) reading from a triple buffer works 975 | #[test] 976 | fn sequential_guarded_read() { 977 | // Let's create a triple buffer and write into it 978 | let mut buf = TripleBuffer::new(&1.0); 979 | *buf.input.input_buffer_publisher() = 4.2; 980 | 981 | // Test readout from dirty (freshly written) triple buffer 982 | { 983 | // Back up the initial buffer state 984 | let old_buf: TripleBuffer = buf.clone(); 985 | 986 | // Read from the buffer 987 | let result = *buf.output.read(); 988 | 989 | // Output value should be correct 990 | assert_eq!(result, 4.2); 991 | 992 | // Result should be equivalent to carrying out an update 993 | let mut expected_buf = old_buf.clone(); 994 | assert!(expected_buf.output.update()); 995 | assert_eq!(buf, expected_buf); 996 | check_buf_state(&mut buf, false); 997 | } 998 | 999 | // Test readout from clean (unchanged) triple buffer 1000 | { 1001 | // Back up the initial buffer state 1002 | let old_buf = buf.clone(); 1003 | 1004 | // Read from the buffer 1005 | let result = *buf.output.read(); 1006 | 1007 | // Output value should be correct 1008 | assert_eq!(result, 4.2); 1009 | 1010 | // Buffer state should be unchanged 1011 | assert_eq!(buf, old_buf); 1012 | check_buf_state(&mut buf, false); 1013 | } 1014 | } 1015 | 1016 | /// Check that contended concurrent reads and writes work 1017 | #[test] 1018 | #[ignore] 1019 | fn contended_concurrent_read_write() { 1020 | // We will stress the infrastructure by performing this many writes 1021 | // as a reader continuously reads the latest value 1022 | #[cfg(not(feature = "miri"))] 1023 | const TEST_WRITE_COUNT: usize = 100_000_000; 1024 | #[cfg(feature = "miri")] 1025 | const TEST_WRITE_COUNT: usize = 3_000; 1026 | 1027 | // This is the buffer that our reader and writer will share 1028 | let buf = TripleBuffer::new(&RaceCell::new(0)); 1029 | let (mut buf_input, mut buf_output) = buf.split(); 1030 | 1031 | // Concurrently run a writer which increments a shared value in a loop, 1032 | // and a reader which makes sure that no unexpected value slips in. 1033 | let mut last_value = 0usize; 1034 | testbench::concurrent_test_2( 1035 | move || { 1036 | for value in 1..=TEST_WRITE_COUNT { 1037 | buf_input.write(RaceCell::new(value)); 1038 | } 1039 | }, 1040 | move || { 1041 | while last_value < TEST_WRITE_COUNT { 1042 | let new_racey_value = buf_output.read().get(); 1043 | match new_racey_value { 1044 | Racey::Consistent(new_value) => { 1045 | assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT)); 1046 | last_value = new_value; 1047 | } 1048 | Racey::Inconsistent => { 1049 | panic!("Inconsistent state exposed by the buffer!"); 1050 | } 1051 | } 1052 | } 1053 | }, 1054 | ); 1055 | } 1056 | 1057 | /// Check that uncontended concurrent reads and writes work 1058 | /// 1059 | /// **WARNING:** This test unfortunately needs to have timing-dependent 1060 | /// behaviour to do its job. If it fails for you, try the following: 1061 | /// 1062 | /// - Close running applications in the background 1063 | /// - Re-run the tests with only one OS thread (--test-threads=1) 1064 | /// - Increase the writer sleep period 1065 | #[test] 1066 | #[ignore] 1067 | fn uncontended_concurrent_read_write() { 1068 | // We will stress the infrastructure by performing this many writes 1069 | // as a reader continuously reads the latest value 1070 | #[cfg(not(feature = "miri"))] 1071 | const TEST_WRITE_COUNT: usize = 625; 1072 | #[cfg(feature = "miri")] 1073 | const TEST_WRITE_COUNT: usize = 200; 1074 | 1075 | // This is the buffer that our reader and writer will share 1076 | let buf = TripleBuffer::new(&RaceCell::new(0)); 1077 | let (mut buf_input, mut buf_output) = buf.split(); 1078 | 1079 | // Concurrently run a writer which slowly increments a shared value, 1080 | // and a reader which checks that it can receive every update 1081 | let mut last_value = 0usize; 1082 | testbench::concurrent_test_2( 1083 | move || { 1084 | for value in 1..=TEST_WRITE_COUNT { 1085 | buf_input.write(RaceCell::new(value)); 1086 | thread::yield_now(); 1087 | thread::sleep(Duration::from_millis(32)); 1088 | } 1089 | }, 1090 | move || { 1091 | while last_value < TEST_WRITE_COUNT { 1092 | let new_racey_value = buf_output.read().get(); 1093 | match new_racey_value { 1094 | Racey::Consistent(new_value) => { 1095 | assert!((new_value >= last_value) && (new_value - last_value <= 1)); 1096 | last_value = new_value; 1097 | } 1098 | Racey::Inconsistent => { 1099 | panic!("Inconsistent state exposed by the buffer!"); 1100 | } 1101 | } 1102 | } 1103 | }, 1104 | ); 1105 | } 1106 | 1107 | /// Through the low-level API, the consumer is allowed to modify its 1108 | /// bufffer, which means that it will unknowingly send back data to the 1109 | /// producer. This creates new correctness requirements for the 1110 | /// synchronization protocol, which must be checked as well. 1111 | #[test] 1112 | #[ignore] 1113 | fn concurrent_bidirectional_exchange() { 1114 | // We will stress the infrastructure by performing this many writes 1115 | // as a reader continuously reads the latest value 1116 | #[cfg(not(feature = "miri"))] 1117 | const TEST_WRITE_COUNT: usize = 100_000_000; 1118 | #[cfg(feature = "miri")] 1119 | const TEST_WRITE_COUNT: usize = 3_000; 1120 | 1121 | // This is the buffer that our reader and writer will share 1122 | let buf = TripleBuffer::new(&RaceCell::new(0)); 1123 | let (mut buf_input, mut buf_output) = buf.split(); 1124 | 1125 | // Concurrently run a writer which increments a shared value in a loop, 1126 | // and a reader which makes sure that no unexpected value slips in. 1127 | testbench::concurrent_test_2( 1128 | move || { 1129 | for new_value in 1..=TEST_WRITE_COUNT { 1130 | match buf_input.input_buffer_mut().get() { 1131 | Racey::Consistent(curr_value) => { 1132 | assert!(curr_value <= new_value); 1133 | } 1134 | Racey::Inconsistent => { 1135 | panic!("Inconsistent state exposed by the buffer!"); 1136 | } 1137 | } 1138 | buf_input.write(RaceCell::new(new_value)); 1139 | } 1140 | }, 1141 | move || { 1142 | let mut last_value = 0usize; 1143 | while last_value < TEST_WRITE_COUNT { 1144 | match buf_output.output_buffer().get() { 1145 | Racey::Consistent(new_value) => { 1146 | assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT)); 1147 | last_value = new_value; 1148 | } 1149 | Racey::Inconsistent => { 1150 | panic!("Inconsistent state exposed by the buffer!"); 1151 | } 1152 | } 1153 | if buf_output.updated() { 1154 | buf_output.output_buffer_mut().set(last_value / 2); 1155 | buf_output.update(); 1156 | } 1157 | } 1158 | }, 1159 | ); 1160 | } 1161 | 1162 | /// Range check for triple buffer indexes 1163 | #[allow(unused_comparisons)] 1164 | fn index_in_range(idx: BufferIndex) -> bool { 1165 | (0..=2).contains(&idx) 1166 | } 1167 | 1168 | /// Get a pointer to the target of some reference (e.g. an &, an Arc...) 1169 | fn as_ptr(ref_like: &P) -> *const P::Target { 1170 | &(**ref_like) as *const _ 1171 | } 1172 | 1173 | /// Check the state of a buffer, and the effect of queries on it 1174 | fn check_buf_state(buf: &mut TripleBuffer, expected_dirty_bit: bool) 1175 | where 1176 | T: Clone + Debug + PartialEq + Send, 1177 | { 1178 | // Make a backup of the buffer's initial state 1179 | let initial_buf = buf.clone(); 1180 | 1181 | // Check that the input and output point to the same shared state 1182 | assert_eq!(as_ptr(&buf.input.shared), as_ptr(&buf.output.shared)); 1183 | 1184 | // Access the shared state and decode back-buffer information 1185 | let back_info = buf.input.shared.back_info.load(Ordering::Relaxed); 1186 | let back_idx = back_info & BACK_INDEX_MASK; 1187 | let back_buffer_dirty = back_info & BACK_DIRTY_BIT != 0; 1188 | 1189 | // Input-/output-/back-buffer indexes must be in range 1190 | assert!(index_in_range(buf.input.input_idx)); 1191 | assert!(index_in_range(buf.output.output_idx)); 1192 | assert!(index_in_range(back_idx)); 1193 | 1194 | // Input-/output-/back-buffer indexes must be distinct 1195 | assert!(buf.input.input_idx != buf.output.output_idx); 1196 | assert!(buf.input.input_idx != back_idx); 1197 | assert!(buf.output.output_idx != back_idx); 1198 | 1199 | // Back-buffer must have the expected dirty bit 1200 | assert_eq!(back_buffer_dirty, expected_dirty_bit); 1201 | 1202 | // Check that the "input buffer" query behaves as expected 1203 | assert_eq!( 1204 | as_ptr(&buf.input.input_buffer_mut()), 1205 | buf.input.shared.buffers[buf.input.input_idx as usize].get() 1206 | ); 1207 | assert_eq!(*buf, initial_buf); 1208 | 1209 | // Check that the "consumed" query behaves as expected 1210 | assert_eq!(!buf.input.consumed(), expected_dirty_bit); 1211 | assert_eq!(*buf, initial_buf); 1212 | 1213 | // Check that the output_buffer query works in the initial state 1214 | assert_eq!( 1215 | as_ptr(&buf.output.output_buffer()), 1216 | buf.output.shared.buffers[buf.output.output_idx as usize].get() 1217 | ); 1218 | assert_eq!(*buf, initial_buf); 1219 | 1220 | // Check that the output buffer query works in the initial state 1221 | assert_eq!(buf.output.updated(), expected_dirty_bit); 1222 | assert_eq!(*buf, initial_buf); 1223 | } 1224 | } 1225 | --------------------------------------------------------------------------------