├── .github └── workflows │ ├── doctests.yml │ ├── embedded-builds.yml │ ├── fmt.yml │ ├── full-test.yml │ └── tsan-test.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── bbqtest ├── Cargo.toml └── src │ ├── benches.rs │ ├── framed.rs │ ├── lib.rs │ ├── multi_thread.rs │ ├── ring_around_the_senders.rs │ └── single_thread.rs ├── core ├── Cargo.toml └── src │ ├── bbbuffer.rs │ ├── framed.rs │ ├── lib.rs │ └── vusize.rs └── tsan-blacklist.txt /.github/workflows/doctests.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | name: Documentation Tests 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | - features: thumbv6 16 | nodefault: "--no-default-features" 17 | - features: "" 18 | nodefault: "" 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - uses: dtolnay/rust-toolchain@stable 23 | - uses: actions-rs/cargo@v1 24 | with: 25 | command: test 26 | args: ${{ matrix.nodefault }} --features=${{ matrix.features }} --manifest-path core/Cargo.toml 27 | -------------------------------------------------------------------------------- /.github/workflows/embedded-builds.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | name: Embedded Builds 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | - features: "" 16 | target: thumbv7em-none-eabihf 17 | - feature: thumbv6 18 | target: thumbv6m-none-eabi 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - uses: dtolnay/rust-toolchain@stable 23 | with: 24 | targets: thumbv6m-none-eabi, thumbv7em-none-eabihf 25 | 26 | - run: cargo build --manifest-path core/Cargo.toml --no-default-features --features=${{ matrix.feature }} --target=${{ matrix.target }} 27 | -------------------------------------------------------------------------------- /.github/workflows/fmt.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | name: Formatting check 8 | 9 | jobs: 10 | fmt: 11 | name: Rustfmt 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: dtolnay/rust-toolchain@stable 16 | with: 17 | components: rustfmt 18 | - run: cargo fmt --all -- --check 19 | -------------------------------------------------------------------------------- /.github/workflows/full-test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | name: Integration Tests 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | - build: "" 16 | - build: "--release" 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | - uses: dtolnay/rust-toolchain@stable 21 | - run: cargo test ${{ matrix.build }} --features=short-potato --manifest-path bbqtest/Cargo.toml -- --nocapture 22 | -------------------------------------------------------------------------------- /.github/workflows/tsan-test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | name: TSAN Integration Test 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | include: 15 | - build: "" 16 | - build: "--release" 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | - uses: dtolnay/rust-toolchain@stable 21 | with: 22 | toolchain: nightly 23 | components: rust-src 24 | 25 | - run: cargo test ${{ matrix.build }} --features=short-potato --manifest-path bbqtest/Cargo.toml -Zbuild-std --target x86_64-unknown-linux-gnu -- --nocapture 26 | env: 27 | RUSTFLAGS: "-Z sanitizer=thread" 28 | RUST_TEST_THREADS: 1 29 | TSAN_OPTIONS: "suppressions=${{ github.workspace }}/tsan-blacklist.txt" 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | **/*.rs.bk 3 | **/Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["bbqtest", "core"] 3 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019-2020 Anthony James Munns 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BBQueue 2 | 3 | [![Documentation](https://docs.rs/bbqueue/badge.svg)](https://docs.rs/bbqueue) 4 | 5 | BBQueue, short for "BipBuffer Queue", is a Single Producer Single Consumer, 6 | lockless, no_std, thread safe, queue, based on [BipBuffers]. For more info on 7 | the design of the lock-free algorithm used by bbqueue, see [this blog post]. 8 | 9 | For a 90 minute guided tour of BBQueue, you can also view this [guide on YouTube]. 10 | 11 | [guide on YouTube]: https://www.youtube.com/watch?v=ngTCf2cnGkY 12 | [BipBuffers]: https://www.codeproject.com/Articles/3479/%2FArticles%2F3479%2FThe-Bip-Buffer-The-Circular-Buffer-with-a-Twist 13 | [this blog post]: https://ferrous-systems.com/blog/lock-free-ring-buffer/ 14 | 15 | BBQueue is designed (primarily) to be a First-In, First-Out queue for use with DMA on embedded 16 | systems. 17 | 18 | While Circular/Ring Buffers allow you to send data between two threads (or from an interrupt to 19 | main code), you must push the data one piece at a time. With BBQueue, you instead are granted a 20 | block of contiguous memory, which can be filled (or emptied) by a DMA engine. 21 | 22 | ## Local usage 23 | 24 | ```rust 25 | // Create a buffer with six elements 26 | let bb: BBBuffer<6> = BBBuffer::new(); 27 | let (mut prod, mut cons) = bb.try_split().unwrap(); 28 | 29 | // Request space for one byte 30 | let mut wgr = prod.grant_exact(1).unwrap(); 31 | 32 | // Set the data 33 | wgr[0] = 123; 34 | 35 | assert_eq!(wgr.len(), 1); 36 | 37 | // Make the data ready for consuming 38 | wgr.commit(1); 39 | 40 | // Read all available bytes 41 | let rgr = cons.read().unwrap(); 42 | 43 | assert_eq!(rgr[0], 123); 44 | 45 | // Release the space for later writes 46 | rgr.release(1); 47 | ``` 48 | 49 | ## Static usage 50 | 51 | ```rust, no_run 52 | use bbqueue::BBBuffer; 53 | 54 | // Create a buffer with six elements 55 | static BB: BBBuffer<6> = BBBuffer::new(); 56 | 57 | fn main() { 58 | // Split the bbqueue into producer and consumer halves. 59 | // These halves can be sent to different threads or to 60 | // an interrupt handler for thread safe SPSC usage 61 | let (mut prod, mut cons) = BB.try_split().unwrap(); 62 | 63 | // Request space for one byte 64 | let mut wgr = prod.grant_exact(1).unwrap(); 65 | 66 | // Set the data 67 | wgr[0] = 123; 68 | 69 | assert_eq!(wgr.len(), 1); 70 | 71 | // Make the data ready for consuming 72 | wgr.commit(1); 73 | 74 | // Read all available bytes 75 | let rgr = cons.read().unwrap(); 76 | 77 | assert_eq!(rgr[0], 123); 78 | 79 | // Release the space for later writes 80 | rgr.release(1); 81 | 82 | // The buffer cannot be split twice 83 | assert!(BB.try_split().is_err()); 84 | } 85 | ``` 86 | 87 | The `bbqueue` crate is located in `core/`, and tests are located in `bbqtest/`. 88 | 89 | ## Features 90 | 91 | By default BBQueue uses atomic operations which are available on most platforms. However on some 92 | (mostly embedded) platforms atomic support is limited and with the default features you will get 93 | a compiler error about missing atomic methods. 94 | 95 | 96 | # License 97 | 98 | Licensed under either of 99 | 100 | - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or 101 | http://www.apache.org/licenses/LICENSE-2.0) 102 | 103 | - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 104 | 105 | at your option. 106 | 107 | ## Contribution 108 | 109 | Unless you explicitly state otherwise, any contribution intentionally submitted 110 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 111 | dual licensed as above, without any additional terms or conditions. 112 | -------------------------------------------------------------------------------- /bbqtest/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bbqtest" 3 | version = "0.1.0" 4 | authors = ["James Munns "] 5 | edition = "2018" 6 | license = "MIT OR Apache-2.0" 7 | 8 | [dependencies] 9 | bounded-spsc-queue = { version = "0.4.0", optional = true } 10 | 11 | [dependencies.bbqueue] 12 | path = "../core" 13 | 14 | 15 | [dev-dependencies] 16 | rand = "0.8" 17 | criterion = "0.5" 18 | crossbeam-utils = "0.8" 19 | crossbeam = "0.8" 20 | heapless = "0.8" 21 | cfg-if = "1.0" 22 | 23 | [[bench]] 24 | name = "benches" 25 | harness = false 26 | path = "src/benches.rs" 27 | 28 | [features] 29 | travisci = ["verbose"] 30 | default = [] 31 | verbose = [] 32 | nightly = ["bounded-spsc-queue"] 33 | extra-verbose = [] 34 | short-potato = [] 35 | -------------------------------------------------------------------------------- /bbqtest/src/benches.rs: -------------------------------------------------------------------------------- 1 | use bbqueue::BBBuffer; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use std::cmp::min; 4 | 5 | const DATA_SZ: usize = 128 * 1024 * 1024; 6 | 7 | pub fn criterion_benchmark(c: &mut Criterion) { 8 | let data = vec![0; DATA_SZ].into_boxed_slice(); 9 | 10 | c.bench_function("bbq 128/4096", |bench| bench.iter(|| chunky(&data, 128))); 11 | 12 | c.bench_function("bbq 256/4096", |bench| bench.iter(|| chunky(&data, 256))); 13 | 14 | c.bench_function("bbq 512/4096", |bench| bench.iter(|| chunky(&data, 512))); 15 | 16 | c.bench_function("bbq 1024/4096", |bench| bench.iter(|| chunky(&data, 1024))); 17 | 18 | c.bench_function("bbq 2048/4096", |bench| bench.iter(|| chunky(&data, 2048))); 19 | 20 | let buffy: BBBuffer<65536> = BBBuffer::new(); 21 | let (mut prod, mut cons) = buffy.try_split().unwrap(); 22 | 23 | c.bench_function("bbq 8192/65536", |bench| { 24 | let chunksz = 8192; 25 | 26 | bench.iter(|| { 27 | black_box(thread::scope(|sc| { 28 | sc.spawn(|_| { 29 | data.chunks(chunksz).for_each(|ch| loop { 30 | if let Ok(mut wgr) = prod.grant_exact(chunksz) { 31 | wgr.copy_from_slice(black_box(ch)); 32 | wgr.commit(chunksz); 33 | break; 34 | } 35 | }); 36 | }); 37 | 38 | sc.spawn(|_| { 39 | data.chunks(chunksz).for_each(|ch| { 40 | let mut st = 0; 41 | loop { 42 | if let Ok(rgr) = cons.read() { 43 | let len = min(chunksz - st, rgr.len()); 44 | assert_eq!(ch[st..st + len], rgr[..len]); 45 | rgr.release(len); 46 | 47 | st += len; 48 | 49 | if st == chunksz { 50 | break; 51 | } 52 | } 53 | } 54 | }); 55 | }); 56 | })) 57 | .unwrap(); 58 | }) 59 | }); 60 | 61 | use std::mem::MaybeUninit; 62 | 63 | c.bench_function("std channels 8192 unbounded", |bench| { 64 | bench.iter(|| { 65 | use std::sync::mpsc::{Receiver, Sender}; 66 | let (mut prod, mut cons): (Sender<[u8; 8192]>, Receiver<[u8; 8192]>) = 67 | std::sync::mpsc::channel(); 68 | let rdata = &data; 69 | 70 | thread::scope(|sc| { 71 | sc.spawn(move |_| { 72 | rdata.chunks(8192).for_each(|ch| { 73 | let mut x: MaybeUninit<[u8; 8192]> = MaybeUninit::uninit(); 74 | unsafe { 75 | x.as_mut_ptr() 76 | .copy_from_nonoverlapping(ch.as_ptr().cast::<[u8; 8192]>(), 1) 77 | }; 78 | prod.send(unsafe { x.assume_init() }).unwrap(); 79 | }); 80 | }); 81 | 82 | sc.spawn(move |_| { 83 | rdata.chunks(8192).for_each(|ch| { 84 | let x = cons.recv().unwrap(); 85 | assert_eq!(&x[..], &ch[..]); 86 | }); 87 | }); 88 | }) 89 | .unwrap(); 90 | }) 91 | }); 92 | 93 | c.bench_function("xbeam channels 8192/65536", |bench| { 94 | bench.iter(|| { 95 | use crossbeam::{bounded, Receiver, Sender}; 96 | let (mut prod, mut cons): (Sender<[u8; 8192]>, Receiver<[u8; 8192]>) = 97 | bounded(65536 / 8192); 98 | let rdata = &data; 99 | 100 | thread::scope(|sc| { 101 | sc.spawn(move |_| { 102 | rdata.chunks(8192).for_each(|ch| { 103 | let mut x: MaybeUninit<[u8; 8192]> = MaybeUninit::uninit(); 104 | unsafe { 105 | x.as_mut_ptr() 106 | .copy_from_nonoverlapping(ch.as_ptr().cast::<[u8; 8192]>(), 1) 107 | }; 108 | prod.send(unsafe { x.assume_init() }).unwrap(); 109 | }); 110 | }); 111 | 112 | sc.spawn(move |_| { 113 | rdata.chunks(8192).for_each(|ch| { 114 | let x = cons.recv().unwrap(); 115 | assert_eq!(&x[..], &ch[..]); 116 | }); 117 | }); 118 | }) 119 | .unwrap(); 120 | }) 121 | }); 122 | 123 | cfg_if::cfg_if! { 124 | if #[cfg(feature = "nightly")] { 125 | c.bench_function("bounded queue 8192/65536", |bench| { 126 | 127 | bench.iter(|| { 128 | use bounded_spsc_queue::make; 129 | let (mut prod, mut cons) = make::<[u8; 8192]>(65536 / 8192); 130 | let rdata = &data; 131 | 132 | thread::scope(|sc| { 133 | sc.spawn(move |_| { 134 | rdata.chunks(8192).for_each(|ch| { 135 | let mut x: MaybeUninit<[u8; 8192]> = MaybeUninit::uninit(); 136 | unsafe { 137 | x.as_mut_ptr().copy_from_nonoverlapping(ch.as_ptr().cast::<[u8; 8192]>(), 1) 138 | }; 139 | prod.push(unsafe { x.assume_init() }); 140 | }); 141 | }); 142 | 143 | sc.spawn(move |_| { 144 | rdata.chunks(8192).for_each(|ch| { 145 | let x = cons.pop(); 146 | assert_eq!(&x[..], &ch[..]); 147 | }); 148 | }); 149 | }).unwrap(); 150 | }) 151 | }); 152 | } 153 | } 154 | 155 | use heapless::spsc::Queue; 156 | 157 | let mut queue: Queue<[u8; 8192], 8> = Queue::new(); 158 | let (mut prod, mut cons) = queue.split(); 159 | 160 | c.bench_function("heapless spsc::Queue 8192/65536", |bench| { 161 | let chunksz = 8192; 162 | 163 | bench.iter(|| { 164 | black_box(thread::scope(|sc| { 165 | sc.spawn(|_| { 166 | data.chunks(chunksz).for_each(|ch| { 167 | let mut x: MaybeUninit<[u8; 8192]> = MaybeUninit::uninit(); 168 | unsafe { 169 | x.as_mut_ptr() 170 | .copy_from_nonoverlapping(ch.as_ptr().cast::<[u8; 8192]>(), 1) 171 | }; 172 | let mut x = unsafe { x.assume_init() }; 173 | 174 | loop { 175 | match prod.enqueue(x) { 176 | Ok(_) => break, 177 | Err(y) => x = y, 178 | }; 179 | } 180 | }); 181 | }); 182 | 183 | sc.spawn(|_| { 184 | data.chunks(8192).for_each(|ch| loop { 185 | if let Some(x) = cons.dequeue() { 186 | assert_eq!(&x[..], &ch[..]); 187 | break; 188 | } 189 | }); 190 | }); 191 | })) 192 | .unwrap(); 193 | }) 194 | }); 195 | } 196 | 197 | use crossbeam_utils::thread; 198 | fn chunky(data: &[u8], chunksz: usize) { 199 | let buffy: BBBuffer<4096> = BBBuffer::new(); 200 | let (mut prod, mut cons) = buffy.try_split().unwrap(); 201 | 202 | thread::scope(|sc| { 203 | let pjh = sc.spawn(|_| { 204 | data.chunks(chunksz).for_each(|ch| loop { 205 | if let Ok(mut wgr) = prod.grant_exact(chunksz) { 206 | wgr.copy_from_slice(ch); 207 | wgr.commit(chunksz); 208 | break; 209 | } 210 | }); 211 | }); 212 | 213 | let cjh = sc.spawn(|_| { 214 | data.chunks(chunksz).for_each(|ch| { 215 | let mut st = 0; 216 | loop { 217 | if let Ok(rgr) = cons.read() { 218 | let len = min(chunksz - st, rgr.len()); 219 | assert_eq!(ch[st..st + len], rgr[..len]); 220 | rgr.release(len); 221 | 222 | st += len; 223 | 224 | if st == chunksz { 225 | break; 226 | } 227 | } 228 | } 229 | }); 230 | }); 231 | }) 232 | .unwrap(); 233 | } 234 | 235 | criterion_group!(benches, criterion_benchmark); 236 | criterion_main!(benches); 237 | -------------------------------------------------------------------------------- /bbqtest/src/framed.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use bbqueue::BBBuffer; 4 | 5 | #[test] 6 | fn frame_wrong_size() { 7 | let bb: BBBuffer<256> = BBBuffer::new(); 8 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 9 | 10 | // Create largeish grants 11 | let mut wgr = prod.grant(127).unwrap(); 12 | for (i, by) in wgr.iter_mut().enumerate() { 13 | *by = i as u8; 14 | } 15 | // Note: In debug mode, this hits a debug_assert 16 | wgr.commit(256); 17 | 18 | let rgr = cons.read().unwrap(); 19 | assert_eq!(rgr.len(), 127); 20 | for (i, by) in rgr.iter().enumerate() { 21 | assert_eq!((i as u8), *by); 22 | } 23 | rgr.release(); 24 | } 25 | 26 | #[test] 27 | fn full_size() { 28 | let bb: BBBuffer<256> = BBBuffer::new(); 29 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 30 | let mut ctr = 0; 31 | 32 | for _ in 0..10_000 { 33 | // Create largeish grants 34 | if let Ok(mut wgr) = prod.grant(127) { 35 | ctr += 1; 36 | for (i, by) in wgr.iter_mut().enumerate() { 37 | *by = i as u8; 38 | } 39 | wgr.commit(127); 40 | 41 | let rgr = cons.read().unwrap(); 42 | assert_eq!(rgr.len(), 127); 43 | for (i, by) in rgr.iter().enumerate() { 44 | assert_eq!((i as u8), *by); 45 | } 46 | rgr.release(); 47 | } else { 48 | // Create smallish grants 49 | let mut wgr = prod.grant(1).unwrap(); 50 | for (i, by) in wgr.iter_mut().enumerate() { 51 | *by = i as u8; 52 | } 53 | wgr.commit(1); 54 | 55 | let rgr = cons.read().unwrap(); 56 | assert_eq!(rgr.len(), 1); 57 | for (i, by) in rgr.iter().enumerate() { 58 | assert_eq!((i as u8), *by); 59 | } 60 | rgr.release(); 61 | }; 62 | } 63 | 64 | assert!(ctr > 1); 65 | } 66 | 67 | #[test] 68 | fn frame_overcommit() { 69 | let bb: BBBuffer<256> = BBBuffer::new(); 70 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 71 | 72 | // Create largeish grants 73 | let mut wgr = prod.grant(128).unwrap(); 74 | for (i, by) in wgr.iter_mut().enumerate() { 75 | *by = i as u8; 76 | } 77 | wgr.commit(255); 78 | 79 | let mut wgr = prod.grant(64).unwrap(); 80 | for (i, by) in wgr.iter_mut().enumerate() { 81 | *by = (i as u8) + 128; 82 | } 83 | wgr.commit(127); 84 | 85 | let rgr = cons.read().unwrap(); 86 | assert_eq!(rgr.len(), 128); 87 | rgr.release(); 88 | 89 | let rgr = cons.read().unwrap(); 90 | assert_eq!(rgr.len(), 64); 91 | rgr.release(); 92 | } 93 | 94 | #[test] 95 | fn frame_undercommit() { 96 | let bb: BBBuffer<512> = BBBuffer::new(); 97 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 98 | 99 | for _ in 0..100_000 { 100 | // Create largeish grants 101 | let mut wgr = prod.grant(128).unwrap(); 102 | for (i, by) in wgr.iter_mut().enumerate() { 103 | *by = i as u8; 104 | } 105 | wgr.commit(13); 106 | 107 | let mut wgr = prod.grant(64).unwrap(); 108 | for (i, by) in wgr.iter_mut().enumerate() { 109 | *by = (i as u8) + 128; 110 | } 111 | wgr.commit(7); 112 | 113 | let mut wgr = prod.grant(32).unwrap(); 114 | for (i, by) in wgr.iter_mut().enumerate() { 115 | *by = (i as u8) + 192; 116 | } 117 | wgr.commit(0); 118 | 119 | let rgr = cons.read().unwrap(); 120 | assert_eq!(rgr.len(), 13); 121 | rgr.release(); 122 | 123 | let rgr = cons.read().unwrap(); 124 | assert_eq!(rgr.len(), 7); 125 | rgr.release(); 126 | 127 | let rgr = cons.read().unwrap(); 128 | assert_eq!(rgr.len(), 0); 129 | rgr.release(); 130 | } 131 | } 132 | 133 | #[test] 134 | fn frame_auto_commit_release() { 135 | let bb: BBBuffer<256> = BBBuffer::new(); 136 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 137 | 138 | for _ in 0..100 { 139 | { 140 | let mut wgr = prod.grant(64).unwrap(); 141 | wgr.to_commit(64); 142 | for (i, by) in wgr.iter_mut().enumerate() { 143 | *by = i as u8; 144 | } 145 | // drop 146 | } 147 | 148 | { 149 | let mut rgr = cons.read().unwrap(); 150 | rgr.auto_release(true); 151 | let rgr = rgr; 152 | 153 | for (i, by) in rgr.iter().enumerate() { 154 | assert_eq!(*by, i as u8); 155 | } 156 | assert_eq!(rgr.len(), 64); 157 | // drop 158 | } 159 | } 160 | 161 | assert!(cons.read().is_none()); 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /bbqtest/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! NOTE: this crate is really just a shim for testing 2 | //! the other no-std crate. 3 | 4 | mod framed; 5 | mod multi_thread; 6 | mod ring_around_the_senders; 7 | mod single_thread; 8 | 9 | #[cfg(test)] 10 | mod tests { 11 | use bbqueue::{BBBuffer, Error as BBQError}; 12 | 13 | #[test] 14 | fn deref_deref_mut() { 15 | let bb: BBBuffer<6> = BBBuffer::new(); 16 | let (mut prod, mut cons) = bb.try_split().unwrap(); 17 | 18 | let mut wgr = prod.grant_exact(1).unwrap(); 19 | 20 | // deref_mut 21 | wgr[0] = 123; 22 | 23 | assert_eq!(wgr.len(), 1); 24 | 25 | wgr.commit(1); 26 | 27 | // deref 28 | let rgr = cons.read().unwrap(); 29 | 30 | assert_eq!(rgr[0], 123); 31 | 32 | rgr.release(1); 33 | } 34 | 35 | #[test] 36 | fn static_allocator() { 37 | // Check we can make multiple static items... 38 | static BBQ1: BBBuffer<6> = BBBuffer::new(); 39 | static BBQ2: BBBuffer<6> = BBBuffer::new(); 40 | let (mut prod1, mut cons1) = BBQ1.try_split().unwrap(); 41 | let (mut _prod2, mut cons2) = BBQ2.try_split().unwrap(); 42 | 43 | // ... and they aren't the same 44 | let mut wgr1 = prod1.grant_exact(3).unwrap(); 45 | wgr1.copy_from_slice(&[1, 2, 3]); 46 | wgr1.commit(3); 47 | 48 | // no data here... 49 | assert!(cons2.read().is_err()); 50 | 51 | // ...data is here! 52 | let rgr1 = cons1.read().unwrap(); 53 | assert_eq!(&*rgr1, &[1, 2, 3]); 54 | } 55 | 56 | #[test] 57 | fn release() { 58 | // Check we can make multiple static items... 59 | static BBQ1: BBBuffer<6> = BBBuffer::new(); 60 | static BBQ2: BBBuffer<6> = BBBuffer::new(); 61 | let (prod1, cons1) = BBQ1.try_split().unwrap(); 62 | let (prod2, cons2) = BBQ2.try_split().unwrap(); 63 | 64 | // We cannot release with the wrong prod/cons 65 | let (prod2, cons2) = BBQ1.try_release(prod2, cons2).unwrap_err(); 66 | let (prod1, cons1) = BBQ2.try_release(prod1, cons1).unwrap_err(); 67 | 68 | // We cannot release with the wrong consumer... 69 | let (prod1, cons2) = BBQ1.try_release(prod1, cons2).unwrap_err(); 70 | 71 | // ...or the wrong producer 72 | let (prod2, cons1) = BBQ1.try_release(prod2, cons1).unwrap_err(); 73 | 74 | // We cannot release with a write grant in progress 75 | let mut prod1 = prod1; 76 | let wgr1 = prod1.grant_exact(3).unwrap(); 77 | let (prod1, mut cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); 78 | 79 | // We cannot release with a read grant in progress 80 | wgr1.commit(3); 81 | let rgr1 = cons1.read().unwrap(); 82 | let (prod1, cons1) = BBQ1.try_release(prod1, cons1).unwrap_err(); 83 | 84 | // But we can when everything is resolved 85 | rgr1.release(3); 86 | assert!(BBQ1.try_release(prod1, cons1).is_ok()); 87 | assert!(BBQ2.try_release(prod2, cons2).is_ok()); 88 | 89 | // And we can re-split on-demand 90 | let _ = BBQ1.try_split().unwrap(); 91 | let _ = BBQ2.try_split().unwrap(); 92 | } 93 | 94 | #[test] 95 | fn direct_usage_sanity() { 96 | // Initialize 97 | let bb: BBBuffer<6> = BBBuffer::new(); 98 | let (mut prod, mut cons) = bb.try_split().unwrap(); 99 | assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); 100 | 101 | // Initial grant, shouldn't roll over 102 | let mut x = prod.grant_exact(4).unwrap(); 103 | 104 | // Still no data available yet 105 | assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); 106 | 107 | // Add full data from grant 108 | x.copy_from_slice(&[1, 2, 3, 4]); 109 | 110 | // Still no data available yet 111 | assert_eq!(cons.read(), Err(BBQError::InsufficientSize)); 112 | 113 | // Commit data 114 | x.commit(4); 115 | 116 | ::std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst); 117 | 118 | let a = cons.read().unwrap(); 119 | assert_eq!(&*a, &[1, 2, 3, 4]); 120 | 121 | // Release the first two bytes 122 | a.release(2); 123 | 124 | let r = cons.read().unwrap(); 125 | assert_eq!(&*r, &[3, 4]); 126 | r.release(0); 127 | 128 | // Grant two more 129 | let mut x = prod.grant_exact(2).unwrap(); 130 | let r = cons.read().unwrap(); 131 | assert_eq!(&*r, &[3, 4]); 132 | r.release(0); 133 | 134 | // Add more data 135 | x.copy_from_slice(&[11, 12]); 136 | let r = cons.read().unwrap(); 137 | assert_eq!(&*r, &[3, 4]); 138 | r.release(0); 139 | 140 | // Commit 141 | x.commit(2); 142 | 143 | let a = cons.read().unwrap(); 144 | assert_eq!(&*a, &[3, 4, 11, 12]); 145 | 146 | a.release(2); 147 | let r = cons.read().unwrap(); 148 | assert_eq!(&*r, &[11, 12]); 149 | r.release(0); 150 | 151 | let mut x = prod.grant_exact(3).unwrap(); 152 | let r = cons.read().unwrap(); 153 | assert_eq!(&*r, &[11, 12]); 154 | r.release(0); 155 | 156 | x.copy_from_slice(&[21, 22, 23]); 157 | 158 | let r = cons.read().unwrap(); 159 | assert_eq!(&*r, &[11, 12]); 160 | r.release(0); 161 | x.commit(3); 162 | 163 | let a = cons.read().unwrap(); 164 | 165 | // NOTE: The data we just added isn't available yet, 166 | // since it has wrapped around 167 | assert_eq!(&*a, &[11, 12]); 168 | 169 | a.release(2); 170 | 171 | // And now we can see it 172 | let r = cons.read().unwrap(); 173 | assert_eq!(&*r, &[21, 22, 23]); 174 | r.release(0); 175 | 176 | // Ask for something way too big 177 | assert!(prod.grant_exact(10).is_err()); 178 | } 179 | 180 | #[test] 181 | fn zero_sized_grant() { 182 | let bb: BBBuffer<1000> = BBBuffer::new(); 183 | let (mut prod, mut _cons) = bb.try_split().unwrap(); 184 | 185 | let size = 1000; 186 | let grant = prod.grant_exact(size).unwrap(); 187 | grant.commit(size); 188 | 189 | let grant = prod.grant_exact(0).unwrap(); 190 | grant.commit(0); 191 | } 192 | 193 | #[test] 194 | fn frame_sanity() { 195 | let bb: BBBuffer<1000> = BBBuffer::new(); 196 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 197 | 198 | // One frame in, one frame out 199 | let mut wgrant = prod.grant(128).unwrap(); 200 | assert_eq!(wgrant.len(), 128); 201 | for (idx, i) in wgrant.iter_mut().enumerate() { 202 | *i = idx as u8; 203 | } 204 | wgrant.commit(128); 205 | 206 | let rgrant = cons.read().unwrap(); 207 | assert_eq!(rgrant.len(), 128); 208 | for (idx, i) in rgrant.iter().enumerate() { 209 | assert_eq!(*i, idx as u8); 210 | } 211 | rgrant.release(); 212 | 213 | // Three frames in, three frames out 214 | let mut state = 0; 215 | let states = [16usize, 32, 24]; 216 | 217 | for step in &states { 218 | let mut wgrant = prod.grant(*step).unwrap(); 219 | assert_eq!(wgrant.len(), *step); 220 | for (idx, i) in wgrant.iter_mut().enumerate() { 221 | *i = (idx + state) as u8; 222 | } 223 | wgrant.commit(*step); 224 | state += *step; 225 | } 226 | 227 | state = 0; 228 | 229 | for step in &states { 230 | let rgrant = cons.read().unwrap(); 231 | assert_eq!(rgrant.len(), *step); 232 | for (idx, i) in rgrant.iter().enumerate() { 233 | assert_eq!(*i, (idx + state) as u8); 234 | } 235 | rgrant.release(); 236 | state += *step; 237 | } 238 | } 239 | 240 | #[test] 241 | fn frame_wrap() { 242 | let bb: BBBuffer<22> = BBBuffer::new(); 243 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 244 | 245 | // 10 + 1 used 246 | let mut wgrant = prod.grant(10).unwrap(); 247 | assert_eq!(wgrant.len(), 10); 248 | for (idx, i) in wgrant.iter_mut().enumerate() { 249 | *i = idx as u8; 250 | } 251 | wgrant.commit(10); 252 | // 1 frame in queue 253 | 254 | // 20 + 2 used (assuming u64 test platform) 255 | let mut wgrant = prod.grant(10).unwrap(); 256 | assert_eq!(wgrant.len(), 10); 257 | for (idx, i) in wgrant.iter_mut().enumerate() { 258 | *i = idx as u8; 259 | } 260 | wgrant.commit(10); 261 | // 2 frames in queue 262 | 263 | let rgrant = cons.read().unwrap(); 264 | assert_eq!(rgrant.len(), 10); 265 | for (idx, i) in rgrant.iter().enumerate() { 266 | assert_eq!(*i, idx as u8); 267 | } 268 | rgrant.release(); 269 | // 1 frame in queue 270 | 271 | // No more room! 272 | assert!(prod.grant(10).is_err()); 273 | 274 | let rgrant = cons.read().unwrap(); 275 | assert_eq!(rgrant.len(), 10); 276 | for (idx, i) in rgrant.iter().enumerate() { 277 | assert_eq!(*i, idx as u8); 278 | } 279 | rgrant.release(); 280 | // 0 frames in queue 281 | 282 | // 10 + 1 used (assuming u64 test platform) 283 | let mut wgrant = prod.grant(10).unwrap(); 284 | assert_eq!(wgrant.len(), 10); 285 | for (idx, i) in wgrant.iter_mut().enumerate() { 286 | *i = idx as u8; 287 | } 288 | wgrant.commit(10); 289 | // 1 frame in queue 290 | 291 | // No more room! 292 | assert!(prod.grant(10).is_err()); 293 | 294 | let rgrant = cons.read().unwrap(); 295 | assert_eq!(rgrant.len(), 10); 296 | for (idx, i) in rgrant.iter().enumerate() { 297 | assert_eq!(*i, idx as u8); 298 | } 299 | rgrant.release(); 300 | // 0 frames in queue 301 | 302 | // No more frames! 303 | assert!(cons.read().is_none()); 304 | } 305 | 306 | #[test] 307 | fn frame_big_little() { 308 | let bb: BBBuffer<65536> = BBBuffer::new(); 309 | let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 310 | 311 | // Create a frame that should take 3 bytes for the header 312 | assert!(prod.grant(65534).is_err()); 313 | 314 | let mut wgrant = prod.grant(65533).unwrap(); 315 | assert_eq!(wgrant.len(), 65533); 316 | for (idx, i) in wgrant.iter_mut().enumerate() { 317 | *i = idx as u8; 318 | } 319 | // Only commit 127 bytes, which fit into a header of 1 byte 320 | wgrant.commit(127); 321 | 322 | let rgrant = cons.read().unwrap(); 323 | assert_eq!(rgrant.len(), 127); 324 | for (idx, i) in rgrant.iter().enumerate() { 325 | assert_eq!(*i, idx as u8); 326 | } 327 | rgrant.release(); 328 | } 329 | 330 | #[test] 331 | fn split_sanity_check() { 332 | let bb: BBBuffer<10> = BBBuffer::new(); 333 | let (mut prod, mut cons) = bb.try_split().unwrap(); 334 | 335 | // Fill buffer 336 | let mut wgrant = prod.grant_exact(10).unwrap(); 337 | assert_eq!(wgrant.len(), 10); 338 | wgrant.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); 339 | wgrant.commit(10); 340 | 341 | let rgrant = cons.split_read().unwrap(); 342 | assert_eq!(rgrant.combined_len(), 10); 343 | assert_eq!( 344 | rgrant.bufs(), 345 | (&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..], &[][..]) 346 | ); 347 | // Release part of the buffer 348 | rgrant.release(6); 349 | 350 | // Almost fill buffer again => | 11 | 12 | 13 | 14 | 15 | x | 7 | 8 | 9 | 10 | 351 | let mut wgrant = prod.grant_exact(5).unwrap(); 352 | assert_eq!(wgrant.len(), 5); 353 | wgrant.copy_from_slice(&[11, 12, 13, 14, 15]); 354 | wgrant.commit(5); 355 | 356 | let rgrant = cons.split_read().unwrap(); 357 | assert_eq!(rgrant.combined_len(), 9); 358 | assert_eq!( 359 | rgrant.bufs(), 360 | (&[7, 8, 9, 10][..], &[11, 12, 13, 14, 15][..]) 361 | ); 362 | 363 | // Release part of the buffer => | x | x | x | 14 | 15 | x | x | x | x | x | 364 | rgrant.release(7); 365 | 366 | // Check that it is not possible to claim more space than what should be available 367 | assert!(prod.grant_exact(6).is_err()); 368 | 369 | // Fill buffer to the end => | x | x | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | 370 | let mut wgrant = prod.grant_exact(5).unwrap(); 371 | wgrant.copy_from_slice(&[21, 22, 23, 24, 25]); 372 | wgrant.commit(5); 373 | 374 | let rgrant = cons.split_read().unwrap(); 375 | assert_eq!(rgrant.combined_len(), 7); 376 | assert_eq!(rgrant.bufs(), (&[14, 15, 21, 22, 23, 24, 25][..], &[][..])); 377 | rgrant.release(0); 378 | 379 | // Fill buffer to the end => | 26 | 27 | x | 14 | 15 | 21 | 22 | 23 | 24 | 25 | 380 | let mut wgrant = prod.grant_exact(2).unwrap(); 381 | wgrant.copy_from_slice(&[26, 27]); 382 | wgrant.commit(2); 383 | 384 | // Fill buffer to the end => | x | 27 | x | x | x | x | x | x | x | x | 385 | let rgrant = cons.split_read().unwrap(); 386 | assert_eq!(rgrant.combined_len(), 9); 387 | assert_eq!( 388 | rgrant.bufs(), 389 | (&[14, 15, 21, 22, 23, 24, 25][..], &[26, 27][..]) 390 | ); 391 | rgrant.release(8); 392 | 393 | let rgrant = cons.split_read().unwrap(); 394 | assert_eq!(rgrant.combined_len(), 1); 395 | assert_eq!(rgrant.bufs(), (&[27][..], &[][..])); 396 | rgrant.release(1); 397 | } 398 | 399 | #[test] 400 | fn split_read_sanity_check() { 401 | let bb: BBBuffer<6> = BBBuffer::new(); 402 | let (mut prod, mut cons) = bb.try_split().unwrap(); 403 | 404 | const ITERS: usize = 100000; 405 | 406 | for i in 0..ITERS { 407 | let j = (i & 255) as u8; 408 | 409 | #[cfg(feature = "extra-verbose")] 410 | println!("==========================="); 411 | #[cfg(feature = "extra-verbose")] 412 | println!("INDEX: {:?}", j); 413 | #[cfg(feature = "extra-verbose")] 414 | println!("==========================="); 415 | 416 | #[cfg(feature = "extra-verbose")] 417 | println!("START: {:?}", bb); 418 | 419 | let mut wgr = prod.grant_exact(1).unwrap(); 420 | 421 | #[cfg(feature = "extra-verbose")] 422 | println!("GRANT: {:?}", bb); 423 | 424 | wgr[0] = j; 425 | 426 | #[cfg(feature = "extra-verbose")] 427 | println!("WRITE: {:?}", bb); 428 | 429 | wgr.commit(1); 430 | 431 | #[cfg(feature = "extra-verbose")] 432 | println!("COMIT: {:?}", bb); 433 | 434 | // This panicked before with Err(GrantInProgress), because SplitGrantR did not implement Drop 435 | let rgr = cons.split_read().unwrap(); 436 | drop(rgr); 437 | 438 | #[cfg(feature = "extra-verbose")] 439 | println!("READ : {:?}", bb); 440 | 441 | let rgr = cons.split_read().unwrap(); 442 | let (first, second) = rgr.bufs(); 443 | if first.len() == 1 { 444 | assert_eq!(first[0], j); 445 | } else if second.len() == 1 { 446 | assert_eq!(second[0], j); 447 | } else { 448 | assert!(false, "wrong len"); 449 | } 450 | 451 | #[cfg(feature = "extra-verbose")] 452 | println!("RELSE: {:?}", bb); 453 | 454 | rgr.release(1); 455 | 456 | #[cfg(feature = "extra-verbose")] 457 | println!("FINSH: {:?}", bb); 458 | } 459 | } 460 | } 461 | -------------------------------------------------------------------------------- /bbqtest/src/multi_thread.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(not(feature = "verbose"), allow(unused_variables))] 2 | #[cfg(test)] 3 | mod tests { 4 | use bbqueue::{BBBuffer, Error}; 5 | use rand::prelude::*; 6 | use std::thread::spawn; 7 | use std::time::{Duration, Instant}; 8 | 9 | #[cfg(feature = "travisci")] 10 | const ITERS: usize = 10_000; 11 | #[cfg(not(feature = "travisci"))] 12 | const ITERS: usize = 10_000_000; 13 | 14 | const RPT_IVAL: usize = ITERS / 100; 15 | 16 | const QUEUE_SIZE: usize = 1024; 17 | 18 | const TIMEOUT_NODATA: Duration = Duration::from_millis(10_000); 19 | 20 | #[test] 21 | fn randomize_tx() { 22 | #[cfg(feature = "travisci")] 23 | #[cfg(feature = "verbose")] 24 | println!("Hello Travis!"); 25 | 26 | #[cfg(feature = "verbose")] 27 | println!("RTX: Generating Test Data..."); 28 | let gen_start = Instant::now(); 29 | let mut data = Vec::with_capacity(ITERS); 30 | (0..ITERS).for_each(|_| data.push(rand::random::())); 31 | let mut data_rx = data.clone(); 32 | 33 | let mut trng = thread_rng(); 34 | let mut chunks = vec![]; 35 | while !data.is_empty() { 36 | let chunk_sz = trng.gen_range(1..((QUEUE_SIZE / 2) - 1)); 37 | if chunk_sz > data.len() { 38 | continue; 39 | } 40 | 41 | // Note: This gives back data in chunks in reverse order. 42 | // We later .rev()` this to fix it 43 | chunks.push(data.split_off(data.len() - chunk_sz)); 44 | } 45 | 46 | #[cfg(feature = "verbose")] 47 | println!("RTX: Generation complete: {:?}", gen_start.elapsed()); 48 | #[cfg(feature = "verbose")] 49 | println!("RTX: Running test..."); 50 | 51 | static BB: BBBuffer = BBBuffer::new(); 52 | let (mut tx, mut rx) = BB.try_split().unwrap(); 53 | 54 | let mut last_tx = Instant::now(); 55 | let mut last_rx = last_tx.clone(); 56 | let start_time = last_tx.clone(); 57 | 58 | let tx_thr = spawn(move || { 59 | let mut txd_ct = 0; 60 | let mut txd_ivl = 0; 61 | 62 | for (i, ch) in chunks.iter().rev().enumerate() { 63 | let mut semichunk = ch.to_owned(); 64 | // #[cfg(feature = "verbose")] println!("semi: {:?}", semichunk); 65 | 66 | while !semichunk.is_empty() { 67 | if last_tx.elapsed() > TIMEOUT_NODATA { 68 | panic!("tx timeout, iter {}", i); 69 | } 70 | 71 | 'sizer: for sz in (1..(semichunk.len() + 1)).rev() { 72 | if let Ok(mut gr) = tx.grant_exact(sz) { 73 | // how do you do this idiomatically? 74 | (0..sz).for_each(|idx| { 75 | gr[idx] = semichunk.remove(0); 76 | }); 77 | gr.commit(sz); 78 | 79 | // Update tracking 80 | last_tx = Instant::now(); 81 | txd_ct += sz; 82 | if (txd_ct / RPT_IVAL) > txd_ivl { 83 | txd_ivl = txd_ct / RPT_IVAL; 84 | #[cfg(feature = "verbose")] 85 | println!("{:?} - rtxtx: {}", start_time.elapsed(), txd_ct); 86 | } 87 | 88 | break 'sizer; 89 | } 90 | } 91 | } 92 | } 93 | }); 94 | 95 | let rx_thr = spawn(move || { 96 | let mut rxd_ct = 0; 97 | let mut rxd_ivl = 0; 98 | 99 | for (_idx, i) in data_rx.drain(..).enumerate() { 100 | 'inner: loop { 101 | if last_rx.elapsed() > TIMEOUT_NODATA { 102 | panic!("rx timeout, iter {}", i); 103 | } 104 | let gr = match rx.read() { 105 | Ok(gr) => gr, 106 | Err(Error::InsufficientSize) => continue 'inner, 107 | Err(_) => panic!(), 108 | }; 109 | 110 | let act = gr[0] as u8; 111 | let exp = i; 112 | if act != exp { 113 | #[cfg(feature = "verbose")] 114 | println!("act: {:?}, exp: {:?}", act, exp); 115 | #[cfg(feature = "verbose")] 116 | println!("len: {:?}", gr.len()); 117 | #[cfg(feature = "verbose")] 118 | println!("{:?}", gr); 119 | panic!("RX Iter: {}, mod: {}", i, i % 6); 120 | } 121 | gr.release(1); 122 | 123 | // Update tracking 124 | last_rx = Instant::now(); 125 | rxd_ct += 1; 126 | if (rxd_ct / RPT_IVAL) > rxd_ivl { 127 | rxd_ivl = rxd_ct / RPT_IVAL; 128 | #[cfg(feature = "verbose")] 129 | println!("{:?} - rtxrx: {}", start_time.elapsed(), rxd_ct); 130 | } 131 | 132 | break 'inner; 133 | } 134 | } 135 | }); 136 | 137 | tx_thr.join().unwrap(); 138 | rx_thr.join().unwrap(); 139 | } 140 | 141 | #[test] 142 | fn sanity_check() { 143 | static BB: BBBuffer = BBBuffer::new(); 144 | let (mut tx, mut rx) = BB.try_split().unwrap(); 145 | 146 | let mut last_tx = Instant::now(); 147 | let mut last_rx = last_tx.clone(); 148 | let start_time = last_tx.clone(); 149 | 150 | let tx_thr = spawn(move || { 151 | let mut txd_ct = 0; 152 | let mut txd_ivl = 0; 153 | 154 | for i in 0..ITERS { 155 | 'inner: loop { 156 | if last_tx.elapsed() > TIMEOUT_NODATA { 157 | panic!("tx timeout, iter {}", i); 158 | } 159 | match tx.grant_exact(1) { 160 | Ok(mut gr) => { 161 | gr[0] = (i & 0xFF) as u8; 162 | gr.commit(1); 163 | 164 | // Update tracking 165 | last_tx = Instant::now(); 166 | txd_ct += 1; 167 | if (txd_ct / RPT_IVAL) > txd_ivl { 168 | txd_ivl = txd_ct / RPT_IVAL; 169 | #[cfg(feature = "verbose")] 170 | println!("{:?} - sctx: {}", start_time.elapsed(), txd_ct); 171 | } 172 | 173 | break 'inner; 174 | } 175 | Err(_) => {} 176 | } 177 | } 178 | } 179 | }); 180 | 181 | let rx_thr = spawn(move || { 182 | let mut rxd_ct = 0; 183 | let mut rxd_ivl = 0; 184 | 185 | let mut i = 0; 186 | 187 | while i < ITERS { 188 | if last_rx.elapsed() > TIMEOUT_NODATA { 189 | panic!("rx timeout, iter {}", i); 190 | } 191 | 192 | let gr = match rx.read() { 193 | Ok(gr) => gr, 194 | Err(Error::InsufficientSize) => continue, 195 | Err(_) => panic!(), 196 | }; 197 | 198 | for data in &*gr { 199 | let act = *data; 200 | let exp = (i & 0xFF) as u8; 201 | if act != exp { 202 | // #[cfg(feature = "verbose")] println!("baseptr: {}", panny); 203 | #[cfg(feature = "verbose")] 204 | println!("offendr: {:p}", &gr[0]); 205 | #[cfg(feature = "verbose")] 206 | println!("act: {:?}, exp: {:?}", act, exp); 207 | #[cfg(feature = "verbose")] 208 | println!("len: {:?}", gr.len()); 209 | #[cfg(feature = "verbose")] 210 | println!("{:?}", &gr); 211 | panic!("RX Iter: {}, mod: {}", i, i % 6); 212 | } 213 | 214 | i += 1; 215 | } 216 | 217 | let len = gr.len(); 218 | rxd_ct += len; 219 | gr.release(len); 220 | 221 | // Update tracking 222 | last_rx = Instant::now(); 223 | if (rxd_ct / RPT_IVAL) > rxd_ivl { 224 | rxd_ivl = rxd_ct / RPT_IVAL; 225 | #[cfg(feature = "verbose")] 226 | println!("{:?} - scrx: {}", start_time.elapsed(), rxd_ct); 227 | } 228 | } 229 | }); 230 | 231 | tx_thr.join().unwrap(); 232 | rx_thr.join().unwrap(); 233 | } 234 | 235 | #[test] 236 | fn sanity_check_grant_max() { 237 | static BB: BBBuffer = BBBuffer::new(); 238 | let (mut tx, mut rx) = BB.try_split().unwrap(); 239 | 240 | #[cfg(feature = "verbose")] 241 | println!("SCGM: Generating Test Data..."); 242 | let gen_start = Instant::now(); 243 | 244 | let mut data_tx = (0..ITERS).map(|i| (i & 0xFF) as u8).collect::>(); 245 | let mut data_rx = data_tx.clone(); 246 | 247 | #[cfg(feature = "verbose")] 248 | println!("SCGM: Generated Test Data in: {:?}", gen_start.elapsed()); 249 | #[cfg(feature = "verbose")] 250 | println!("SCGM: Starting Test..."); 251 | 252 | let mut last_tx = Instant::now(); 253 | let mut last_rx = last_tx.clone(); 254 | let start_time = last_tx.clone(); 255 | 256 | let tx_thr = spawn(move || { 257 | let mut txd_ct = 0; 258 | let mut txd_ivl = 0; 259 | 260 | let mut trng = thread_rng(); 261 | 262 | while !data_tx.is_empty() { 263 | 'inner: loop { 264 | if last_tx.elapsed() > TIMEOUT_NODATA { 265 | panic!("tx timeout"); 266 | } 267 | match tx.grant_max_remaining( 268 | trng.gen_range((QUEUE_SIZE / 3)..((2 * QUEUE_SIZE) / 3)), 269 | ) { 270 | Ok(mut gr) => { 271 | let sz = ::std::cmp::min(data_tx.len(), gr.len()); 272 | for i in 0..sz { 273 | gr[i] = data_tx.pop().unwrap(); 274 | } 275 | 276 | // Update tracking 277 | last_tx = Instant::now(); 278 | txd_ct += sz; 279 | if (txd_ct / RPT_IVAL) > txd_ivl { 280 | txd_ivl = txd_ct / RPT_IVAL; 281 | #[cfg(feature = "verbose")] 282 | println!("{:?} - scgmtx: {}", start_time.elapsed(), txd_ct); 283 | } 284 | 285 | let len = gr.len(); 286 | gr.commit(len); 287 | break 'inner; 288 | } 289 | Err(_) => {} 290 | } 291 | } 292 | } 293 | }); 294 | 295 | let rx_thr = spawn(move || { 296 | let mut rxd_ct = 0; 297 | let mut rxd_ivl = 0; 298 | 299 | while !data_rx.is_empty() { 300 | 'inner: loop { 301 | if last_rx.elapsed() > TIMEOUT_NODATA { 302 | panic!("rx timeout"); 303 | } 304 | let gr = match rx.read() { 305 | Ok(gr) => gr, 306 | Err(Error::InsufficientSize) => continue 'inner, 307 | Err(_) => panic!(), 308 | }; 309 | 310 | let act = gr[0]; 311 | let exp = data_rx.pop().unwrap(); 312 | if act != exp { 313 | #[cfg(feature = "verbose")] 314 | println!("offendr: {:p}", &gr[0]); 315 | #[cfg(feature = "verbose")] 316 | println!("act: {:?}, exp: {:?}", act, exp); 317 | #[cfg(feature = "verbose")] 318 | println!("len: {:?}", gr.len()); 319 | #[cfg(feature = "verbose")] 320 | println!("{:?}", gr); 321 | panic!("RX Iter: {}", rxd_ct); 322 | } 323 | gr.release(1); 324 | 325 | // Update tracking 326 | last_rx = Instant::now(); 327 | rxd_ct += 1; 328 | if (rxd_ct / RPT_IVAL) > rxd_ivl { 329 | rxd_ivl = rxd_ct / RPT_IVAL; 330 | #[cfg(feature = "verbose")] 331 | println!("{:?} - scgmrx: {}", start_time.elapsed(), rxd_ct); 332 | } 333 | 334 | break 'inner; 335 | } 336 | } 337 | }); 338 | 339 | tx_thr.join().unwrap(); 340 | rx_thr.join().unwrap(); 341 | } 342 | } 343 | -------------------------------------------------------------------------------- /bbqtest/src/ring_around_the_senders.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | use bbqueue::{BBBuffer, Consumer, GrantR, GrantW, Producer}; 5 | 6 | enum Potato<'a, const N: usize> { 7 | Tx((Producer<'a, N>, u8)), 8 | Rx((Consumer<'a, N>, u8)), 9 | TxG(GrantW<'a, N>), 10 | RxG(GrantR<'a, N>), 11 | Idle, 12 | Done, 13 | } 14 | 15 | #[cfg(not(feature = "short-potato"))] 16 | const TOTAL_RINGS: usize = 1_000_000; 17 | 18 | #[cfg(feature = "short-potato")] 19 | const TOTAL_RINGS: usize = 1_000; 20 | 21 | const TX_GRANTS_PER_RING: u8 = 3; 22 | const RX_GRANTS_PER_RING: u8 = 3; 23 | const BYTES_PER_GRANT: usize = 129; 24 | const BUFFER_SIZE: usize = 4096; 25 | 26 | impl<'a, const N: usize> Potato<'a, N> { 27 | fn work(self) -> (Self, Self) { 28 | match self { 29 | Self::Tx((mut prod, ct)) => { 30 | // If we are holding a producer, try to send three things before passing it on. 31 | if ct == 0 { 32 | // If we have exhausted our counts, pass on the sender. 33 | (Self::Idle, Self::Tx((prod, TX_GRANTS_PER_RING))) 34 | } else { 35 | // If we get a grant, pass it on, otherwise keep trying 36 | if let Ok(wgr) = prod.grant_exact(BYTES_PER_GRANT) { 37 | (Self::Tx((prod, ct - 1)), Self::TxG(wgr)) 38 | } else { 39 | (Self::Tx((prod, ct)), Self::Idle) 40 | } 41 | } 42 | } 43 | Self::Rx((mut cons, ct)) => { 44 | // If we are holding a consumer, try to send three things before passing it on. 45 | if ct == 0 { 46 | // If we have exhausted our counts, pass on the sender. 47 | (Self::Idle, Self::Rx((cons, RX_GRANTS_PER_RING))) 48 | } else { 49 | // If we get a grant, pass it on, otherwise keep trying 50 | if let Ok(rgr) = cons.read() { 51 | (Self::Rx((cons, ct - 1)), Self::RxG(rgr)) 52 | } else { 53 | (Self::Rx((cons, ct)), Self::Idle) 54 | } 55 | } 56 | } 57 | Self::TxG(mut gr_w) => { 58 | gr_w.iter_mut() 59 | .take(BYTES_PER_GRANT) 60 | .enumerate() 61 | .for_each(|(i, by)| *by = i as u8); 62 | gr_w.commit(BYTES_PER_GRANT); 63 | (Self::Idle, Self::Idle) 64 | } 65 | Self::RxG(gr_r) => { 66 | gr_r.iter() 67 | .take(BYTES_PER_GRANT) 68 | .enumerate() 69 | .for_each(|(i, by)| assert_eq!(*by, i as u8)); 70 | gr_r.release(BYTES_PER_GRANT); 71 | (Self::Idle, Self::Idle) 72 | } 73 | Self::Idle => (Self::Idle, Self::Idle), 74 | Self::Done => (Self::Idle, Self::Done), 75 | } 76 | } 77 | } 78 | 79 | static BB: BBBuffer = BBBuffer::new(); 80 | 81 | use std::sync::mpsc::{channel, Receiver, Sender}; 82 | use std::thread::spawn; 83 | 84 | #[test] 85 | fn hello() { 86 | let (prod, cons) = BB.try_split().unwrap(); 87 | 88 | // create the channels 89 | let (tx_1_2, rx_1_2): ( 90 | Sender>, 91 | Receiver>, 92 | ) = channel(); 93 | let (tx_2_3, rx_2_3): ( 94 | Sender>, 95 | Receiver>, 96 | ) = channel(); 97 | let (tx_3_4, rx_3_4): ( 98 | Sender>, 99 | Receiver>, 100 | ) = channel(); 101 | let (tx_4_1, rx_4_1): ( 102 | Sender>, 103 | Receiver>, 104 | ) = channel(); 105 | 106 | tx_1_2.send(Potato::Tx((prod, 3))).unwrap(); 107 | tx_1_2.send(Potato::Rx((cons, 3))).unwrap(); 108 | 109 | let thread_1 = spawn(move || { 110 | let mut count = TOTAL_RINGS; 111 | let mut me: Potato<'static, BUFFER_SIZE> = Potato::Idle; 112 | 113 | loop { 114 | if let Potato::Idle = me { 115 | if let Ok(new) = rx_4_1.recv() { 116 | if let Potato::Tx(tx) = new { 117 | count -= 1; 118 | 119 | if (count % 100) == 0 { 120 | println!("count left: {}", count); 121 | } 122 | 123 | if count == 0 { 124 | me = Potato::Done; 125 | } else { 126 | me = Potato::Tx(tx); 127 | } 128 | } else { 129 | me = new; 130 | } 131 | } else { 132 | continue; 133 | } 134 | } 135 | let (new_me, send) = me.work(); 136 | 137 | let we_done = if let Potato::Done = &send { 138 | true 139 | } else { 140 | false 141 | }; 142 | 143 | let nop = if let Potato::Idle = &send { 144 | true 145 | } else { 146 | false 147 | }; 148 | 149 | if !nop { 150 | tx_1_2.send(send).unwrap(); 151 | } 152 | 153 | if we_done { 154 | println!("We good?"); 155 | return; 156 | } 157 | 158 | me = new_me; 159 | } 160 | }); 161 | 162 | let closure_2_3_4 = 163 | move |rx: Receiver>, 164 | tx: Sender>| { 165 | let mut me: Potato<'static, BUFFER_SIZE> = Potato::Idle; 166 | let mut count = 0; 167 | 168 | loop { 169 | if let Potato::Idle = me { 170 | if let Ok(new) = rx.try_recv() { 171 | if let Potato::Tx(_) = &new { 172 | count += 1; 173 | } 174 | me = new; 175 | } else { 176 | continue; 177 | } 178 | } 179 | let (new_me, send) = me.work(); 180 | 181 | let we_done = if let Potato::Done = &send { 182 | true 183 | } else { 184 | false 185 | }; 186 | 187 | let nop = if let Potato::Idle = &send { 188 | true 189 | } else { 190 | false 191 | }; 192 | 193 | if !nop { 194 | tx.send(send).ok(); 195 | } 196 | 197 | if we_done { 198 | assert_eq!(count, TOTAL_RINGS); 199 | println!("We good."); 200 | return; 201 | } 202 | 203 | me = new_me; 204 | } 205 | }; 206 | 207 | let thread_2 = spawn(move || closure_2_3_4(rx_1_2, tx_2_3)); 208 | let thread_3 = spawn(move || closure_2_3_4(rx_2_3, tx_3_4)); 209 | let thread_4 = spawn(move || closure_2_3_4(rx_3_4, tx_4_1)); 210 | 211 | thread_1.join().unwrap(); 212 | thread_2.join().unwrap(); 213 | thread_3.join().unwrap(); 214 | thread_4.join().unwrap(); 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /bbqtest/src/single_thread.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use bbqueue::BBBuffer; 4 | 5 | #[test] 6 | fn sanity_check() { 7 | let bb: BBBuffer<6> = BBBuffer::new(); 8 | let (mut prod, mut cons) = bb.try_split().unwrap(); 9 | 10 | const ITERS: usize = 100000; 11 | 12 | for i in 0..ITERS { 13 | let j = (i & 255) as u8; 14 | 15 | #[cfg(feature = "extra-verbose")] 16 | println!("==========================="); 17 | #[cfg(feature = "extra-verbose")] 18 | println!("INDEX: {:?}", j); 19 | #[cfg(feature = "extra-verbose")] 20 | println!("==========================="); 21 | 22 | #[cfg(feature = "extra-verbose")] 23 | println!("START: {:?}", bb); 24 | 25 | let mut wgr = prod.grant_exact(1).unwrap(); 26 | 27 | #[cfg(feature = "extra-verbose")] 28 | println!("GRANT: {:?}", bb); 29 | 30 | wgr[0] = j; 31 | 32 | #[cfg(feature = "extra-verbose")] 33 | println!("WRITE: {:?}", bb); 34 | 35 | wgr.commit(1); 36 | 37 | #[cfg(feature = "extra-verbose")] 38 | println!("COMIT: {:?}", bb); 39 | 40 | let rgr = cons.read().unwrap(); 41 | 42 | #[cfg(feature = "extra-verbose")] 43 | println!("READ : {:?}", bb); 44 | 45 | assert_eq!(rgr[0], j); 46 | 47 | #[cfg(feature = "extra-verbose")] 48 | println!("RELSE: {:?}", bb); 49 | 50 | rgr.release(1); 51 | 52 | #[cfg(feature = "extra-verbose")] 53 | println!("FINSH: {:?}", bb); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bbqueue" 3 | version = "0.5.1" 4 | description = "A SPSC, lockless, no_std, thread safe, queue, based on BipBuffers" 5 | repository = "https://github.com/jamesmunns/bbqueue" 6 | authors = ["James Munns "] 7 | edition = "2018" 8 | readme = "../README.md" 9 | 10 | categories = [ 11 | "embedded", 12 | "no-std", 13 | "memory-management", 14 | ] 15 | license = "MIT OR Apache-2.0" 16 | 17 | [dependencies] 18 | cortex-m = { version = "0.7.0", optional = true } 19 | 20 | [dependencies.defmt] 21 | version = "0.3.0" 22 | optional = true 23 | 24 | [features] 25 | thumbv6 = ["cortex-m"] 26 | defmt_0_3 = ["defmt"] 27 | 28 | [package.metadata.docs.rs] 29 | all-features = true 30 | -------------------------------------------------------------------------------- /core/src/bbbuffer.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | framed::{FrameConsumer, FrameProducer}, 3 | Error, Result, 4 | }; 5 | use core::{ 6 | cell::UnsafeCell, 7 | cmp::min, 8 | marker::PhantomData, 9 | mem::{forget, transmute, MaybeUninit}, 10 | ops::{Deref, DerefMut}, 11 | ptr::NonNull, 12 | result::Result as CoreResult, 13 | slice::{from_raw_parts, from_raw_parts_mut}, 14 | sync::atomic::{ 15 | AtomicBool, AtomicUsize, 16 | Ordering::{AcqRel, Acquire, Release}, 17 | }, 18 | }; 19 | #[derive(Debug)] 20 | /// A backing structure for a BBQueue. Can be used to create either 21 | /// a BBQueue or a split Producer/Consumer pair 22 | pub struct BBBuffer { 23 | buf: UnsafeCell>, 24 | 25 | /// Where the next byte will be written 26 | write: AtomicUsize, 27 | 28 | /// Where the next byte will be read from 29 | read: AtomicUsize, 30 | 31 | /// Used in the inverted case to mark the end of the 32 | /// readable streak. Otherwise will == sizeof::(). 33 | /// Writer is responsible for placing this at the correct 34 | /// place when entering an inverted condition, and Reader 35 | /// is responsible for moving it back to sizeof::() 36 | /// when exiting the inverted condition 37 | last: AtomicUsize, 38 | 39 | /// Used by the Writer to remember what bytes are currently 40 | /// allowed to be written to, but are not yet ready to be 41 | /// read from 42 | reserve: AtomicUsize, 43 | 44 | /// Is there an active read grant? 45 | read_in_progress: AtomicBool, 46 | 47 | /// Is there an active write grant? 48 | write_in_progress: AtomicBool, 49 | 50 | /// Have we already split? 51 | already_split: AtomicBool, 52 | } 53 | 54 | unsafe impl Sync for BBBuffer {} 55 | 56 | impl<'a, const N: usize> BBBuffer { 57 | /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the 58 | /// buffer. If buffer has already been split, an error will be returned. 59 | /// 60 | /// NOTE: When splitting, the underlying buffer will be explicitly initialized 61 | /// to zero. This may take a measurable amount of time, depending on the size 62 | /// of the buffer. This is necessary to prevent undefined behavior. If the buffer 63 | /// is placed at `static` scope within the `.bss` region, the explicit initialization 64 | /// will be elided (as it is already performed as part of memory initialization) 65 | /// 66 | /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical section 67 | /// while splitting. 68 | /// 69 | /// ```rust 70 | /// # // bbqueue test shim! 71 | /// # fn bbqtest() { 72 | /// use bbqueue::BBBuffer; 73 | /// 74 | /// // Create and split a new buffer 75 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 76 | /// let (prod, cons) = buffer.try_split().unwrap(); 77 | /// 78 | /// // Not possible to split twice 79 | /// assert!(buffer.try_split().is_err()); 80 | /// # // bbqueue test shim! 81 | /// # } 82 | /// # 83 | /// # fn main() { 84 | /// # #[cfg(not(feature = "thumbv6"))] 85 | /// # bbqtest(); 86 | /// # } 87 | /// ``` 88 | pub fn try_split(&'a self) -> Result<(Producer<'a, N>, Consumer<'a, N>)> { 89 | if atomic::swap(&self.already_split, true, AcqRel) { 90 | return Err(Error::AlreadySplit); 91 | } 92 | 93 | unsafe { 94 | // Explicitly zero the data to avoid undefined behavior. 95 | // This is required, because we hand out references to the buffers, 96 | // which mean that creating them as references is technically UB for now 97 | let mu_ptr = self.buf.get(); 98 | (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1); 99 | 100 | let nn1 = NonNull::new_unchecked(self as *const _ as *mut _); 101 | let nn2 = NonNull::new_unchecked(self as *const _ as *mut _); 102 | 103 | Ok(( 104 | Producer { 105 | bbq: nn1, 106 | pd: PhantomData, 107 | }, 108 | Consumer { 109 | bbq: nn2, 110 | pd: PhantomData, 111 | }, 112 | )) 113 | } 114 | } 115 | 116 | /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves 117 | /// to gain access to the buffer. If buffer has already been split, an error 118 | /// will be returned. 119 | /// 120 | /// NOTE: When splitting, the underlying buffer will be explicitly initialized 121 | /// to zero. This may take a measurable amount of time, depending on the size 122 | /// of the buffer. This is necessary to prevent undefined behavior. If the buffer 123 | /// is placed at `static` scope within the `.bss` region, the explicit initialization 124 | /// will be elided (as it is already performed as part of memory initialization) 125 | /// 126 | /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical 127 | /// section while splitting. 128 | pub fn try_split_framed(&'a self) -> Result<(FrameProducer<'a, N>, FrameConsumer<'a, N>)> { 129 | let (producer, consumer) = self.try_split()?; 130 | Ok((FrameProducer { producer }, FrameConsumer { consumer })) 131 | } 132 | 133 | /// Attempt to release the Producer and Consumer 134 | /// 135 | /// This re-initializes the buffer so it may be split in a different mode at a later 136 | /// time. There must be no read or write grants active, or an error will be returned. 137 | /// 138 | /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will 139 | /// be returned. 140 | /// 141 | /// ```rust 142 | /// # // bbqueue test shim! 143 | /// # fn bbqtest() { 144 | /// use bbqueue::BBBuffer; 145 | /// 146 | /// // Create and split a new buffer 147 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 148 | /// let (prod, cons) = buffer.try_split().unwrap(); 149 | /// 150 | /// // Not possible to split twice 151 | /// assert!(buffer.try_split().is_err()); 152 | /// 153 | /// // Release the producer and consumer 154 | /// assert!(buffer.try_release(prod, cons).is_ok()); 155 | /// 156 | /// // Split the buffer in framed mode 157 | /// let (fprod, fcons) = buffer.try_split_framed().unwrap(); 158 | /// # // bbqueue test shim! 159 | /// # } 160 | /// # 161 | /// # fn main() { 162 | /// # #[cfg(not(feature = "thumbv6"))] 163 | /// # bbqtest(); 164 | /// # } 165 | /// ``` 166 | pub fn try_release( 167 | &'a self, 168 | prod: Producer<'a, N>, 169 | cons: Consumer<'a, N>, 170 | ) -> CoreResult<(), (Producer<'a, N>, Consumer<'a, N>)> { 171 | // Note: Re-entrancy is not possible because we require ownership 172 | // of the producer and consumer, which are not cloneable. We also 173 | // can assume the buffer has been split, because 174 | 175 | // Are these our producers and consumers? 176 | let our_prod = prod.bbq.as_ptr() as *const Self == self; 177 | let our_cons = cons.bbq.as_ptr() as *const Self == self; 178 | 179 | if !(our_prod && our_cons) { 180 | // Can't release, not our producer and consumer 181 | return Err((prod, cons)); 182 | } 183 | 184 | let wr_in_progress = self.write_in_progress.load(Acquire); 185 | let rd_in_progress = self.read_in_progress.load(Acquire); 186 | 187 | if wr_in_progress || rd_in_progress { 188 | // Can't release, active grant(s) in progress 189 | return Err((prod, cons)); 190 | } 191 | 192 | // Drop the producer and consumer halves 193 | // Clippy's hint are ignored because this is done intentionnaly to prevent any use after 194 | // the release fo the lock. 195 | #[allow(clippy::drop_non_drop)] 196 | drop(prod); 197 | #[allow(clippy::drop_non_drop)] 198 | drop(cons); 199 | 200 | // Re-initialize the buffer (not totally needed, but nice to do) 201 | self.write.store(0, Release); 202 | self.read.store(0, Release); 203 | self.reserve.store(0, Release); 204 | self.last.store(0, Release); 205 | 206 | // Mark the buffer as ready to be split 207 | self.already_split.store(false, Release); 208 | 209 | Ok(()) 210 | } 211 | 212 | /// Attempt to release the Producer and Consumer in Framed mode 213 | /// 214 | /// This re-initializes the buffer so it may be split in a different mode at a later 215 | /// time. There must be no read or write grants active, or an error will be returned. 216 | /// 217 | /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error 218 | /// will be returned. 219 | pub fn try_release_framed( 220 | &'a self, 221 | prod: FrameProducer<'a, N>, 222 | cons: FrameConsumer<'a, N>, 223 | ) -> CoreResult<(), (FrameProducer<'a, N>, FrameConsumer<'a, N>)> { 224 | self.try_release(prod.producer, cons.consumer) 225 | .map_err(|(producer, consumer)| { 226 | // Restore the wrapper types 227 | (FrameProducer { producer }, FrameConsumer { consumer }) 228 | }) 229 | } 230 | } 231 | 232 | impl BBBuffer { 233 | /// Create a new constant inner portion of a `BBBuffer`. 234 | /// 235 | /// NOTE: This is only necessary to use when creating a `BBBuffer` at static 236 | /// scope, and is generally never used directly. This process is necessary to 237 | /// work around current limitations in `const fn`, and will be replaced in 238 | /// the future. 239 | /// 240 | /// ```rust,no_run 241 | /// use bbqueue::BBBuffer; 242 | /// 243 | /// static BUF: BBBuffer<6> = BBBuffer::new(); 244 | /// 245 | /// fn main() { 246 | /// let (prod, cons) = BUF.try_split().unwrap(); 247 | /// } 248 | /// ``` 249 | pub const fn new() -> Self { 250 | Self { 251 | // This will not be initialized until we split the buffer 252 | buf: UnsafeCell::new(MaybeUninit::uninit()), 253 | 254 | // Owned by the writer 255 | write: AtomicUsize::new(0), 256 | 257 | // Owned by the reader 258 | read: AtomicUsize::new(0), 259 | 260 | // Cooperatively owned 261 | // 262 | // NOTE: This should generally be initialized as size_of::(), however 263 | // this would prevent the structure from being entirely zero-initialized, 264 | // and can cause the .data section to be much larger than necessary. By 265 | // forcing the `last` pointer to be zero initially, we place the structure 266 | // in an "inverted" condition, which will be resolved on the first commited 267 | // bytes that are written to the structure. 268 | // 269 | // When read == last == write, no bytes will be allowed to be read (good), but 270 | // write grants can be given out (also good). 271 | last: AtomicUsize::new(0), 272 | 273 | // Owned by the Writer, "private" 274 | reserve: AtomicUsize::new(0), 275 | 276 | // Owned by the Reader, "private" 277 | read_in_progress: AtomicBool::new(false), 278 | 279 | // Owned by the Writer, "private" 280 | write_in_progress: AtomicBool::new(false), 281 | 282 | // We haven't split at the start 283 | already_split: AtomicBool::new(false), 284 | } 285 | } 286 | } 287 | 288 | /// `Producer` is the primary interface for pushing data into a `BBBuffer`. 289 | /// There are various methods for obtaining a grant to write to the buffer, with 290 | /// different potential tradeoffs. As all grants are required to be a contiguous 291 | /// range of data, different strategies are sometimes useful when making the decision 292 | /// between maximizing usage of the buffer, and ensuring a given grant is successful. 293 | /// 294 | /// As a short summary of currently possible grants: 295 | /// 296 | /// * `grant_exact(N)` 297 | /// * User will receive a grant `sz == N` (or receive an error) 298 | /// * This may cause a wraparound if a grant of size N is not available 299 | /// at the end of the ring. 300 | /// * If this grant caused a wraparound, the bytes that were "skipped" at the 301 | /// end of the ring will not be available until the reader reaches them, 302 | /// regardless of whether the grant commited any data or not. 303 | /// * Maximum possible waste due to skipping: `N - 1` bytes 304 | /// * `grant_max_remaining(N)` 305 | /// * User will receive a grant `0 < sz <= N` (or receive an error) 306 | /// * This will only cause a wrap to the beginning of the ring if exactly 307 | /// zero bytes are available at the end of the ring. 308 | /// * Maximum possible waste due to skipping: 0 bytes 309 | /// 310 | /// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a 311 | /// discussion of grant methods that could be added in the future. 312 | #[derive(Debug)] 313 | pub struct Producer<'a, const N: usize> { 314 | bbq: NonNull>, 315 | pd: PhantomData<&'a ()>, 316 | } 317 | 318 | unsafe impl<'a, const N: usize> Send for Producer<'a, N> {} 319 | 320 | impl<'a, const N: usize> Producer<'a, N> { 321 | /// Request a writable, contiguous section of memory of exactly 322 | /// `sz` bytes. If the buffer size requested is not available, 323 | /// an error will be returned. 324 | /// 325 | /// This method may cause the buffer to wrap around early if the 326 | /// requested space is not available at the end of the buffer, but 327 | /// is available at the beginning 328 | /// 329 | /// ```rust 330 | /// # // bbqueue test shim! 331 | /// # fn bbqtest() { 332 | /// use bbqueue::BBBuffer; 333 | /// 334 | /// // Create and split a new buffer of 6 elements 335 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 336 | /// let (mut prod, cons) = buffer.try_split().unwrap(); 337 | /// 338 | /// // Successfully obtain and commit a grant of four bytes 339 | /// let mut grant = prod.grant_exact(4).unwrap(); 340 | /// assert_eq!(grant.buf().len(), 4); 341 | /// grant.commit(4); 342 | /// 343 | /// // Try to obtain a grant of three bytes 344 | /// assert!(prod.grant_exact(3).is_err()); 345 | /// # // bbqueue test shim! 346 | /// # } 347 | /// # 348 | /// # fn main() { 349 | /// # #[cfg(not(feature = "thumbv6"))] 350 | /// # bbqtest(); 351 | /// # } 352 | /// ``` 353 | pub fn grant_exact(&mut self, sz: usize) -> Result> { 354 | let inner = unsafe { &self.bbq.as_ref() }; 355 | 356 | if atomic::swap(&inner.write_in_progress, true, AcqRel) { 357 | return Err(Error::GrantInProgress); 358 | } 359 | 360 | // Writer component. Must never write to `read`, 361 | // be careful writing to `load` 362 | let write = inner.write.load(Acquire); 363 | let read = inner.read.load(Acquire); 364 | let max = N; 365 | let already_inverted = write < read; 366 | 367 | let start = if already_inverted { 368 | if (write + sz) < read { 369 | // Inverted, room is still available 370 | write 371 | } else { 372 | // Inverted, no room is available 373 | inner.write_in_progress.store(false, Release); 374 | return Err(Error::InsufficientSize); 375 | } 376 | } else { 377 | #[allow(clippy::collapsible_if)] 378 | if write + sz <= max { 379 | // Non inverted condition 380 | write 381 | } else { 382 | // Not inverted, but need to go inverted 383 | 384 | // NOTE: We check sz < read, NOT <=, because 385 | // write must never == read in an inverted condition, since 386 | // we will then not be able to tell if we are inverted or not 387 | if sz < read { 388 | // Invertible situation 389 | 0 390 | } else { 391 | // Not invertible, no space 392 | inner.write_in_progress.store(false, Release); 393 | return Err(Error::InsufficientSize); 394 | } 395 | } 396 | }; 397 | 398 | // Safe write, only viewed by this task 399 | inner.reserve.store(start + sz, Release); 400 | 401 | // This is sound, as UnsafeCell, MaybeUninit, and GenericArray 402 | // are all `#[repr(Transparent)] 403 | let start_of_buf_ptr = inner.buf.get().cast::(); 404 | let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(start), sz) }; 405 | 406 | Ok(GrantW { 407 | buf: grant_slice.into(), 408 | bbq: self.bbq, 409 | to_commit: 0, 410 | phatom: PhantomData, 411 | }) 412 | } 413 | 414 | /// Request a writable, contiguous section of memory of up to 415 | /// `sz` bytes. If a buffer of size `sz` is not available without 416 | /// wrapping, but some space (0 < available < sz) is available without 417 | /// wrapping, then a grant will be given for the remaining size at the 418 | /// end of the buffer. If no space is available for writing, an error 419 | /// will be returned. 420 | /// 421 | /// ``` 422 | /// # // bbqueue test shim! 423 | /// # fn bbqtest() { 424 | /// use bbqueue::BBBuffer; 425 | /// 426 | /// // Create and split a new buffer of 6 elements 427 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 428 | /// let (mut prod, mut cons) = buffer.try_split().unwrap(); 429 | /// 430 | /// // Successfully obtain and commit a grant of four bytes 431 | /// let mut grant = prod.grant_max_remaining(4).unwrap(); 432 | /// assert_eq!(grant.buf().len(), 4); 433 | /// grant.commit(4); 434 | /// 435 | /// // Release the four initial commited bytes 436 | /// let mut grant = cons.read().unwrap(); 437 | /// assert_eq!(grant.buf().len(), 4); 438 | /// grant.release(4); 439 | /// 440 | /// // Try to obtain a grant of three bytes, get two bytes 441 | /// let mut grant = prod.grant_max_remaining(3).unwrap(); 442 | /// assert_eq!(grant.buf().len(), 2); 443 | /// grant.commit(2); 444 | /// # // bbqueue test shim! 445 | /// # } 446 | /// # 447 | /// # fn main() { 448 | /// # #[cfg(not(feature = "thumbv6"))] 449 | /// # bbqtest(); 450 | /// # } 451 | /// ``` 452 | pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result> { 453 | let inner = unsafe { &self.bbq.as_ref() }; 454 | 455 | if atomic::swap(&inner.write_in_progress, true, AcqRel) { 456 | return Err(Error::GrantInProgress); 457 | } 458 | 459 | // Writer component. Must never write to `read`, 460 | // be careful writing to `load` 461 | let write = inner.write.load(Acquire); 462 | let read = inner.read.load(Acquire); 463 | let max = N; 464 | 465 | let already_inverted = write < read; 466 | 467 | let start = if already_inverted { 468 | // In inverted case, read is always > write 469 | let remain = read - write - 1; 470 | 471 | if remain != 0 { 472 | sz = min(remain, sz); 473 | write 474 | } else { 475 | // Inverted, no room is available 476 | inner.write_in_progress.store(false, Release); 477 | return Err(Error::InsufficientSize); 478 | } 479 | } else { 480 | #[allow(clippy::collapsible_if)] 481 | if write != max { 482 | // Some (or all) room remaining in un-inverted case 483 | sz = min(max - write, sz); 484 | write 485 | } else { 486 | // Not inverted, but need to go inverted 487 | 488 | // NOTE: We check read > 1, NOT read >= 1, because 489 | // write must never == read in an inverted condition, since 490 | // we will then not be able to tell if we are inverted or not 491 | if read > 1 { 492 | sz = min(read - 1, sz); 493 | 0 494 | } else { 495 | // Not invertible, no space 496 | inner.write_in_progress.store(false, Release); 497 | return Err(Error::InsufficientSize); 498 | } 499 | } 500 | }; 501 | 502 | // Safe write, only viewed by this task 503 | inner.reserve.store(start + sz, Release); 504 | 505 | // This is sound, as UnsafeCell, MaybeUninit, and GenericArray 506 | // are all `#[repr(Transparent)] 507 | let start_of_buf_ptr = inner.buf.get().cast::(); 508 | let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(start), sz) }; 509 | 510 | Ok(GrantW { 511 | buf: grant_slice.into(), 512 | bbq: self.bbq, 513 | to_commit: 0, 514 | phatom: PhantomData, 515 | }) 516 | } 517 | } 518 | 519 | /// `Consumer` is the primary interface for reading data from a `BBBuffer`. 520 | #[derive(Debug)] 521 | pub struct Consumer<'a, const N: usize> { 522 | bbq: NonNull>, 523 | pd: PhantomData<&'a ()>, 524 | } 525 | 526 | unsafe impl<'a, const N: usize> Send for Consumer<'a, N> {} 527 | 528 | impl<'a, const N: usize> Consumer<'a, N> { 529 | /// Obtains a contiguous slice of committed bytes. This slice may not 530 | /// contain ALL available bytes, if the writer has wrapped around. The 531 | /// remaining bytes will be available after all readable bytes are 532 | /// released 533 | /// 534 | /// ```rust 535 | /// # // bbqueue test shim! 536 | /// # fn bbqtest() { 537 | /// use bbqueue::BBBuffer; 538 | /// 539 | /// // Create and split a new buffer of 6 elements 540 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 541 | /// let (mut prod, mut cons) = buffer.try_split().unwrap(); 542 | /// 543 | /// // Successfully obtain and commit a grant of four bytes 544 | /// let mut grant = prod.grant_max_remaining(4).unwrap(); 545 | /// assert_eq!(grant.buf().len(), 4); 546 | /// grant.commit(4); 547 | /// 548 | /// // Obtain a read grant 549 | /// let mut grant = cons.read().unwrap(); 550 | /// assert_eq!(grant.buf().len(), 4); 551 | /// # // bbqueue test shim! 552 | /// # } 553 | /// # 554 | /// # fn main() { 555 | /// # #[cfg(not(feature = "thumbv6"))] 556 | /// # bbqtest(); 557 | /// # } 558 | /// ``` 559 | pub fn read(&mut self) -> Result> { 560 | let inner = unsafe { &self.bbq.as_ref() }; 561 | 562 | if atomic::swap(&inner.read_in_progress, true, AcqRel) { 563 | return Err(Error::GrantInProgress); 564 | } 565 | 566 | let write = inner.write.load(Acquire); 567 | let last = inner.last.load(Acquire); 568 | let mut read = inner.read.load(Acquire); 569 | 570 | // Resolve the inverted case or end of read 571 | if (read == last) && (write < read) { 572 | read = 0; 573 | // This has some room for error, the other thread reads this 574 | // Impact to Grant: 575 | // Grant checks if read < write to see if inverted. If not inverted, but 576 | // no space left, Grant will initiate an inversion, but will not trigger it 577 | // Impact to Commit: 578 | // Commit does not check read, but if Grant has started an inversion, 579 | // grant could move Last to the prior write position 580 | // MOVING READ BACKWARDS! 581 | inner.read.store(0, Release); 582 | } 583 | 584 | let sz = if write < read { 585 | // Inverted, only believe last 586 | last 587 | } else { 588 | // Not inverted, only believe write 589 | write 590 | } - read; 591 | 592 | if sz == 0 { 593 | inner.read_in_progress.store(false, Release); 594 | return Err(Error::InsufficientSize); 595 | } 596 | 597 | // This is sound, as UnsafeCell, MaybeUninit, and GenericArray 598 | // are all `#[repr(Transparent)] 599 | let start_of_buf_ptr = inner.buf.get().cast::(); 600 | let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(read), sz) }; 601 | 602 | Ok(GrantR { 603 | buf: grant_slice.into(), 604 | bbq: self.bbq, 605 | to_release: 0, 606 | phatom: PhantomData, 607 | }) 608 | } 609 | 610 | /// Obtains two disjoint slices, which are each contiguous of committed bytes. 611 | /// Combined these contain all previously commited data. 612 | pub fn split_read(&mut self) -> Result> { 613 | let inner = unsafe { &self.bbq.as_ref() }; 614 | 615 | if atomic::swap(&inner.read_in_progress, true, AcqRel) { 616 | return Err(Error::GrantInProgress); 617 | } 618 | 619 | let write = inner.write.load(Acquire); 620 | let last = inner.last.load(Acquire); 621 | let mut read = inner.read.load(Acquire); 622 | 623 | // Resolve the inverted case or end of read 624 | if (read == last) && (write < read) { 625 | read = 0; 626 | // This has some room for error, the other thread reads this 627 | // Impact to Grant: 628 | // Grant checks if read < write to see if inverted. If not inverted, but 629 | // no space left, Grant will initiate an inversion, but will not trigger it 630 | // Impact to Commit: 631 | // Commit does not check read, but if Grant has started an inversion, 632 | // grant could move Last to the prior write position 633 | // MOVING READ BACKWARDS! 634 | inner.read.store(0, Release); 635 | } 636 | 637 | let (sz1, sz2) = if write < read { 638 | // Inverted, only believe last 639 | (last - read, write) 640 | } else { 641 | // Not inverted, only believe write 642 | (write - read, 0) 643 | }; 644 | 645 | if sz1 == 0 { 646 | inner.read_in_progress.store(false, Release); 647 | return Err(Error::InsufficientSize); 648 | } 649 | 650 | // This is sound, as UnsafeCell, MaybeUninit, and GenericArray 651 | // are all `#[repr(Transparent)] 652 | let start_of_buf_ptr = inner.buf.get().cast::(); 653 | let grant_slice1 = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(read), sz1) }; 654 | let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) }; 655 | 656 | Ok(SplitGrantR { 657 | buf1: grant_slice1.into(), 658 | buf2: grant_slice2.into(), 659 | bbq: self.bbq, 660 | to_release: 0, 661 | phatom: PhantomData, 662 | }) 663 | } 664 | } 665 | 666 | impl BBBuffer { 667 | /// Returns the size of the backing storage. 668 | /// 669 | /// This is the maximum number of bytes that can be stored in this queue. 670 | /// 671 | /// ```rust 672 | /// # // bbqueue test shim! 673 | /// # fn bbqtest() { 674 | /// use bbqueue::BBBuffer; 675 | /// 676 | /// // Create a new buffer of 6 elements 677 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 678 | /// assert_eq!(buffer.capacity(), 6); 679 | /// # // bbqueue test shim! 680 | /// # } 681 | /// # 682 | /// # fn main() { 683 | /// # #[cfg(not(feature = "thumbv6"))] 684 | /// # bbqtest(); 685 | /// # } 686 | /// ``` 687 | pub const fn capacity(&self) -> usize { 688 | N 689 | } 690 | } 691 | 692 | /// A structure representing a contiguous region of memory that 693 | /// may be written to, and potentially "committed" to the queue. 694 | /// 695 | /// NOTE: If the grant is dropped without explicitly commiting 696 | /// the contents, or by setting a the number of bytes to 697 | /// automatically be committed with `to_commit()`, then no bytes 698 | /// will be comitted for writing. 699 | /// 700 | /// If the `thumbv6` feature is selected, dropping the grant 701 | /// without committing it takes a short critical section, 702 | #[derive(Debug, PartialEq)] 703 | pub struct GrantW<'a, const N: usize> { 704 | pub(crate) buf: NonNull<[u8]>, 705 | bbq: NonNull>, 706 | pub(crate) to_commit: usize, 707 | phatom: PhantomData<&'a mut [u8]>, 708 | } 709 | 710 | unsafe impl<'a, const N: usize> Send for GrantW<'a, N> {} 711 | 712 | /// A structure representing a contiguous region of memory that 713 | /// may be read from, and potentially "released" (or cleared) 714 | /// from the queue 715 | /// 716 | /// NOTE: If the grant is dropped without explicitly releasing 717 | /// the contents, or by setting the number of bytes to automatically 718 | /// be released with `to_release()`, then no bytes will be released 719 | /// as read. 720 | /// 721 | /// 722 | /// If the `thumbv6` feature is selected, dropping the grant 723 | /// without releasing it takes a short critical section, 724 | #[derive(Debug, PartialEq)] 725 | pub struct GrantR<'a, const N: usize> { 726 | pub(crate) buf: NonNull<[u8]>, 727 | bbq: NonNull>, 728 | pub(crate) to_release: usize, 729 | phatom: PhantomData<&'a mut [u8]>, 730 | } 731 | 732 | /// A structure representing up to two contiguous regions of memory that 733 | /// may be read from, and potentially "released" (or cleared) 734 | /// from the queue 735 | #[derive(Debug, PartialEq)] 736 | pub struct SplitGrantR<'a, const N: usize> { 737 | pub(crate) buf1: NonNull<[u8]>, 738 | pub(crate) buf2: NonNull<[u8]>, 739 | bbq: NonNull>, 740 | pub(crate) to_release: usize, 741 | phatom: PhantomData<&'a mut [u8]>, 742 | } 743 | 744 | unsafe impl<'a, const N: usize> Send for GrantR<'a, N> {} 745 | 746 | unsafe impl<'a, const N: usize> Send for SplitGrantR<'a, N> {} 747 | 748 | impl<'a, const N: usize> GrantW<'a, N> { 749 | /// Finalizes a writable grant given by `grant()` or `grant_max()`. 750 | /// This makes the data available to be read via `read()`. This consumes 751 | /// the grant. 752 | /// 753 | /// If `used` is larger than the given grant, the maximum amount will 754 | /// be commited 755 | /// 756 | /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical 757 | /// section while committing. 758 | pub fn commit(mut self, used: usize) { 759 | self.commit_inner(used); 760 | forget(self); 761 | } 762 | 763 | /// Obtain access to the inner buffer for writing 764 | /// 765 | /// ```rust 766 | /// # // bbqueue test shim! 767 | /// # fn bbqtest() { 768 | /// use bbqueue::BBBuffer; 769 | /// 770 | /// // Create and split a new buffer of 6 elements 771 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 772 | /// let (mut prod, mut cons) = buffer.try_split().unwrap(); 773 | /// 774 | /// // Successfully obtain and commit a grant of four bytes 775 | /// let mut grant = prod.grant_max_remaining(4).unwrap(); 776 | /// grant.buf().copy_from_slice(&[1, 2, 3, 4]); 777 | /// grant.commit(4); 778 | /// # // bbqueue test shim! 779 | /// # } 780 | /// # 781 | /// # fn main() { 782 | /// # #[cfg(not(feature = "thumbv6"))] 783 | /// # bbqtest(); 784 | /// # } 785 | /// ``` 786 | pub fn buf(&mut self) -> &mut [u8] { 787 | unsafe { from_raw_parts_mut(self.buf.as_ptr() as *mut u8, self.buf.len()) } 788 | } 789 | 790 | /// Sometimes, it's not possible for the lifetimes to check out. For example, 791 | /// if you need to hand this buffer to a function that expects to receive a 792 | /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the 793 | /// grant itself. 794 | /// 795 | /// # Safety 796 | /// 797 | /// You MUST guarantee that in no cases, the reference that is returned here outlives 798 | /// the grant itself. Once the grant has been released, referencing the data contained 799 | /// WILL cause undefined behavior. 800 | /// 801 | /// Additionally, you must ensure that a separate reference to this data is not created 802 | /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant. 803 | pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] { 804 | transmute::<&mut [u8], &'static mut [u8]>(self.buf()) 805 | } 806 | 807 | #[inline(always)] 808 | pub(crate) fn commit_inner(&mut self, used: usize) { 809 | let inner = unsafe { &self.bbq.as_ref() }; 810 | 811 | // If there is no grant in progress, return early. This 812 | // generally means we are dropping the grant within a 813 | // wrapper structure 814 | if !inner.write_in_progress.load(Acquire) { 815 | return; 816 | } 817 | 818 | // Writer component. Must never write to READ, 819 | // be careful writing to LAST 820 | 821 | // Saturate the grant commit 822 | let len = self.buf.len(); 823 | let used = min(len, used); 824 | 825 | let write = inner.write.load(Acquire); 826 | atomic::fetch_sub(&inner.reserve, len - used, AcqRel); 827 | 828 | let max = N; 829 | let last = inner.last.load(Acquire); 830 | let new_write = inner.reserve.load(Acquire); 831 | 832 | if (new_write < write) && (write != max) { 833 | // We have already wrapped, but we are skipping some bytes at the end of the ring. 834 | // Mark `last` where the write pointer used to be to hold the line here 835 | inner.last.store(write, Release); 836 | } else if new_write > last { 837 | // We're about to pass the last pointer, which was previously the artificial 838 | // end of the ring. Now that we've passed it, we can "unlock" the section 839 | // that was previously skipped. 840 | // 841 | // Since new_write is strictly larger than last, it is safe to move this as 842 | // the other thread will still be halted by the (about to be updated) write 843 | // value 844 | inner.last.store(max, Release); 845 | } 846 | // else: If new_write == last, either: 847 | // * last == max, so no need to write, OR 848 | // * If we write in the end chunk again, we'll update last to max next time 849 | // * If we write to the start chunk in a wrap, we'll update last when we 850 | // move write backwards 851 | 852 | // Write must be updated AFTER last, otherwise read could think it was 853 | // time to invert early! 854 | inner.write.store(new_write, Release); 855 | 856 | // Allow subsequent grants 857 | inner.write_in_progress.store(false, Release); 858 | } 859 | 860 | /// Configures the amount of bytes to be commited on drop. 861 | pub fn to_commit(&mut self, amt: usize) { 862 | self.to_commit = self.buf.len().min(amt); 863 | } 864 | } 865 | 866 | impl<'a, const N: usize> GrantR<'a, N> { 867 | /// Release a sequence of bytes from the buffer, allowing the space 868 | /// to be used by later writes. This consumes the grant. 869 | /// 870 | /// If `used` is larger than the given grant, the full grant will 871 | /// be released. 872 | /// 873 | /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical 874 | /// section while releasing. 875 | pub fn release(mut self, used: usize) { 876 | // Saturate the grant release 877 | let used = min(self.buf.len(), used); 878 | 879 | self.release_inner(used); 880 | forget(self); 881 | } 882 | 883 | pub(crate) fn shrink(&mut self, len: usize) { 884 | let mut new_buf: &mut [u8] = &mut []; 885 | core::mem::swap(&mut self.buf_mut(), &mut new_buf); 886 | let (new, _) = new_buf.split_at_mut(len); 887 | self.buf = new.into(); 888 | } 889 | 890 | /// Obtain access to the inner buffer for reading 891 | /// 892 | /// ``` 893 | /// # // bbqueue test shim! 894 | /// # fn bbqtest() { 895 | /// use bbqueue::BBBuffer; 896 | /// 897 | /// // Create and split a new buffer of 6 elements 898 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 899 | /// let (mut prod, mut cons) = buffer.try_split().unwrap(); 900 | /// 901 | /// // Successfully obtain and commit a grant of four bytes 902 | /// let mut grant = prod.grant_max_remaining(4).unwrap(); 903 | /// grant.buf().copy_from_slice(&[1, 2, 3, 4]); 904 | /// grant.commit(4); 905 | /// 906 | /// // Obtain a read grant, and copy to a buffer 907 | /// let mut grant = cons.read().unwrap(); 908 | /// let mut buf = [0u8; 4]; 909 | /// buf.copy_from_slice(grant.buf()); 910 | /// assert_eq!(&buf, &[1, 2, 3, 4]); 911 | /// # // bbqueue test shim! 912 | /// # } 913 | /// # 914 | /// # fn main() { 915 | /// # #[cfg(not(feature = "thumbv6"))] 916 | /// # bbqtest(); 917 | /// # } 918 | /// ``` 919 | pub fn buf(&self) -> &[u8] { 920 | unsafe { from_raw_parts(self.buf.as_ptr() as *const u8, self.buf.len()) } 921 | } 922 | 923 | /// Obtain mutable access to the read grant 924 | /// 925 | /// This is useful if you are performing in-place operations 926 | /// on an incoming packet, such as decryption 927 | pub fn buf_mut(&mut self) -> &mut [u8] { 928 | unsafe { from_raw_parts_mut(self.buf.as_ptr() as *mut u8, self.buf.len()) } 929 | } 930 | 931 | /// Sometimes, it's not possible for the lifetimes to check out. For example, 932 | /// if you need to hand this buffer to a function that expects to receive a 933 | /// `&'static [u8]`, it is not possible for the inner reference to outlive the 934 | /// grant itself. 935 | /// 936 | /// # Safety 937 | /// 938 | /// You MUST guarantee that in no cases, the reference that is returned here outlives 939 | /// the grant itself. Once the grant has been released, referencing the data contained 940 | /// WILL cause undefined behavior. 941 | /// 942 | /// Additionally, you must ensure that a separate reference to this data is not created 943 | /// to this data, e.g. using `Deref` or the `buf()` method of this grant. 944 | pub unsafe fn as_static_buf(&self) -> &'static [u8] { 945 | transmute::<&[u8], &'static [u8]>(self.buf()) 946 | } 947 | 948 | #[inline(always)] 949 | pub(crate) fn release_inner(&mut self, used: usize) { 950 | let inner = unsafe { &self.bbq.as_ref() }; 951 | 952 | // If there is no grant in progress, return early. This 953 | // generally means we are dropping the grant within a 954 | // wrapper structure 955 | if !inner.read_in_progress.load(Acquire) { 956 | return; 957 | } 958 | 959 | // This should always be checked by the public interfaces 960 | debug_assert!(used <= self.buf.len()); 961 | 962 | // This should be fine, purely incrementing 963 | let _ = atomic::fetch_add(&inner.read, used, Release); 964 | 965 | inner.read_in_progress.store(false, Release); 966 | } 967 | 968 | /// Configures the amount of bytes to be released on drop. 969 | pub fn to_release(&mut self, amt: usize) { 970 | self.to_release = self.buf.len().min(amt); 971 | } 972 | } 973 | 974 | impl<'a, const N: usize> SplitGrantR<'a, N> { 975 | /// Release a sequence of bytes from the buffer, allowing the space 976 | /// to be used by later writes. This consumes the grant. 977 | /// 978 | /// If `used` is larger than the given grant, the full grant will 979 | /// be released. 980 | /// 981 | /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical 982 | /// section while releasing. 983 | pub fn release(mut self, used: usize) { 984 | // Saturate the grant release 985 | let used = min(self.combined_len(), used); 986 | 987 | self.release_inner(used); 988 | forget(self); 989 | } 990 | 991 | /// Obtain access to both inner buffers for reading 992 | /// 993 | /// ``` 994 | /// # // bbqueue test shim! 995 | /// # fn bbqtest() { 996 | /// use bbqueue::BBBuffer; 997 | /// 998 | /// // Create and split a new buffer of 6 elements 999 | /// let buffer: BBBuffer<6> = BBBuffer::new(); 1000 | /// let (mut prod, mut cons) = buffer.try_split().unwrap(); 1001 | /// 1002 | /// // Successfully obtain and commit a grant of six bytes filling the buffer 1003 | /// let mut grant = prod.grant_max_remaining(6).unwrap(); 1004 | /// grant.buf().copy_from_slice(&[1, 2, 3, 4, 5, 6]); 1005 | /// grant.commit(6); 1006 | /// 1007 | /// // Obtain a read grant of all six bytes, but only release four bytes 1008 | /// let mut grant = cons.read().unwrap(); 1009 | /// assert_eq!(grant.buf(), &[1, 2, 3, 4, 5, 6]); 1010 | /// grant.release(4); 1011 | /// 1012 | /// // Successfully obtain and commit a grant of two bytes again at the start 1013 | /// // of the buffer creating a split buffer 1014 | /// let mut grant = prod.grant_max_remaining(2).unwrap(); 1015 | /// grant.buf().copy_from_slice(&[7, 8]); 1016 | /// grant.commit(2); 1017 | /// 1018 | /// // Obtain a split read grant and release the buffer 1019 | /// let mut grant = cons.split_read().unwrap(); 1020 | /// assert_eq!(grant.bufs(), ([5, 6].as_ref(), [7, 8].as_ref())); 1021 | /// grant.release(4); 1022 | /// 1023 | /// # // bbqueue test shim! 1024 | /// # } 1025 | /// # 1026 | /// # fn main() { 1027 | /// # #[cfg(not(feature = "thumbv6"))] 1028 | /// # bbqtest(); 1029 | /// # } 1030 | /// ``` 1031 | pub fn bufs(&self) -> (&[u8], &[u8]) { 1032 | let buf1 = unsafe { from_raw_parts(self.buf1.as_ptr() as *const u8, self.buf1.len()) }; 1033 | let buf2 = unsafe { from_raw_parts(self.buf2.as_ptr() as *const u8, self.buf2.len()) }; 1034 | (buf1, buf2) 1035 | } 1036 | 1037 | /// Obtain mutable access to both parts of the read grant 1038 | /// 1039 | /// This is useful if you are performing in-place operations 1040 | /// on an incoming packet, such as decryption 1041 | pub fn bufs_mut(&mut self) -> (&mut [u8], &mut [u8]) { 1042 | let buf1 = unsafe { from_raw_parts_mut(self.buf1.as_ptr() as *mut u8, self.buf1.len()) }; 1043 | let buf2 = unsafe { from_raw_parts_mut(self.buf2.as_ptr() as *mut u8, self.buf2.len()) }; 1044 | (buf1, buf2) 1045 | } 1046 | 1047 | #[inline(always)] 1048 | pub(crate) fn release_inner(&mut self, used: usize) { 1049 | let inner = unsafe { &self.bbq.as_ref() }; 1050 | 1051 | // If there is no grant in progress, return early. This 1052 | // generally means we are dropping the grant within a 1053 | // wrapper structure 1054 | if !inner.read_in_progress.load(Acquire) { 1055 | return; 1056 | } 1057 | 1058 | // This should always be checked by the public interfaces 1059 | debug_assert!(used <= self.combined_len()); 1060 | 1061 | if used <= self.buf1.len() { 1062 | // This should be fine, purely incrementing 1063 | let _ = atomic::fetch_add(&inner.read, used, Release); 1064 | } else { 1065 | // Also release parts of the second buffer 1066 | inner.read.store(used - self.buf1.len(), Release); 1067 | } 1068 | 1069 | inner.read_in_progress.store(false, Release); 1070 | } 1071 | 1072 | /// Configures the amount of bytes to be released on drop. 1073 | pub fn to_release(&mut self, amt: usize) { 1074 | self.to_release = self.combined_len().min(amt); 1075 | } 1076 | 1077 | /// The combined length of both buffers 1078 | pub fn combined_len(&self) -> usize { 1079 | self.buf1.len() + self.buf2.len() 1080 | } 1081 | } 1082 | 1083 | impl<'a, const N: usize> Drop for GrantW<'a, N> { 1084 | fn drop(&mut self) { 1085 | self.commit_inner(self.to_commit) 1086 | } 1087 | } 1088 | 1089 | impl<'a, const N: usize> Drop for GrantR<'a, N> { 1090 | fn drop(&mut self) { 1091 | self.release_inner(self.to_release) 1092 | } 1093 | } 1094 | 1095 | impl<'a, const N: usize> Drop for SplitGrantR<'a, N> { 1096 | fn drop(&mut self) { 1097 | self.release_inner(self.to_release) 1098 | } 1099 | } 1100 | 1101 | impl<'a, const N: usize> Deref for GrantW<'a, N> { 1102 | type Target = [u8]; 1103 | 1104 | fn deref(&self) -> &Self::Target { 1105 | unsafe { from_raw_parts_mut(self.buf.as_ptr() as *mut u8, self.buf.len()) } 1106 | } 1107 | } 1108 | 1109 | impl<'a, const N: usize> DerefMut for GrantW<'a, N> { 1110 | fn deref_mut(&mut self) -> &mut [u8] { 1111 | self.buf() 1112 | } 1113 | } 1114 | 1115 | impl<'a, const N: usize> Deref for GrantR<'a, N> { 1116 | type Target = [u8]; 1117 | 1118 | fn deref(&self) -> &Self::Target { 1119 | self.buf() 1120 | } 1121 | } 1122 | 1123 | impl<'a, const N: usize> DerefMut for GrantR<'a, N> { 1124 | fn deref_mut(&mut self) -> &mut [u8] { 1125 | self.buf_mut() 1126 | } 1127 | } 1128 | 1129 | #[cfg(feature = "thumbv6")] 1130 | mod atomic { 1131 | use core::sync::atomic::{ 1132 | AtomicBool, AtomicUsize, 1133 | Ordering::{self, Acquire, Release}, 1134 | }; 1135 | use cortex_m::interrupt::free; 1136 | 1137 | #[inline(always)] 1138 | pub fn fetch_add(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize { 1139 | free(|_| { 1140 | let prev = atomic.load(Acquire); 1141 | atomic.store(prev.wrapping_add(val), Release); 1142 | prev 1143 | }) 1144 | } 1145 | 1146 | #[inline(always)] 1147 | pub fn fetch_sub(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize { 1148 | free(|_| { 1149 | let prev = atomic.load(Acquire); 1150 | atomic.store(prev.wrapping_sub(val), Release); 1151 | prev 1152 | }) 1153 | } 1154 | 1155 | #[inline(always)] 1156 | pub fn swap(atomic: &AtomicBool, val: bool, _order: Ordering) -> bool { 1157 | free(|_| { 1158 | let prev = atomic.load(Acquire); 1159 | atomic.store(val, Release); 1160 | prev 1161 | }) 1162 | } 1163 | } 1164 | 1165 | #[cfg(not(feature = "thumbv6"))] 1166 | mod atomic { 1167 | use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 1168 | 1169 | #[inline(always)] 1170 | pub fn fetch_add(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize { 1171 | atomic.fetch_add(val, order) 1172 | } 1173 | 1174 | #[inline(always)] 1175 | pub fn fetch_sub(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize { 1176 | atomic.fetch_sub(val, order) 1177 | } 1178 | 1179 | #[inline(always)] 1180 | pub fn swap(atomic: &AtomicBool, val: bool, order: Ordering) -> bool { 1181 | atomic.swap(val, order) 1182 | } 1183 | } 1184 | -------------------------------------------------------------------------------- /core/src/framed.rs: -------------------------------------------------------------------------------- 1 | //! A Framed flavor of BBQueue, useful for variable length packets 2 | //! 3 | //! This module allows for a `Framed` mode of operation, 4 | //! where a size header is included in each grant, allowing for 5 | //! "chunks" of data to be passed through a BBQueue, rather than 6 | //! just a stream of bytes. This is convenient when receiving 7 | //! packets of variable sizes. 8 | //! 9 | //! ## Example 10 | //! 11 | //! ```rust 12 | //! # // bbqueue test shim! 13 | //! # fn bbqtest() { 14 | //! use bbqueue::BBBuffer; 15 | //! 16 | //! let bb: BBBuffer<1000> = BBBuffer::new(); 17 | //! let (mut prod, mut cons) = bb.try_split_framed().unwrap(); 18 | //! 19 | //! // One frame in, one frame out 20 | //! let mut wgrant = prod.grant(128).unwrap(); 21 | //! assert_eq!(wgrant.len(), 128); 22 | //! for (idx, i) in wgrant.iter_mut().enumerate() { 23 | //! *i = idx as u8; 24 | //! } 25 | //! wgrant.commit(128); 26 | //! 27 | //! let rgrant = cons.read().unwrap(); 28 | //! assert_eq!(rgrant.len(), 128); 29 | //! for (idx, i) in rgrant.iter().enumerate() { 30 | //! assert_eq!(*i, idx as u8); 31 | //! } 32 | //! rgrant.release(); 33 | //! # // bbqueue test shim! 34 | //! # } 35 | //! # 36 | //! # fn main() { 37 | //! # #[cfg(not(feature = "thumbv6"))] 38 | //! # bbqtest(); 39 | //! # } 40 | //! ``` 41 | //! 42 | //! ## Frame header 43 | //! 44 | //! An internal header is required for each frame stored 45 | //! inside of the `BBQueue`. This header is never exposed to end 46 | //! users of the bbqueue library. 47 | //! 48 | //! A variable sized integer is used for the header size, and the 49 | //! size of this header is based on the max size requested for the grant. 50 | //! This header size must be factored in when calculating an appropriate 51 | //! total size of your buffer. 52 | //! 53 | //! Even if a smaller portion of the grant is committed, the original 54 | //! requested grant size will be used for the header size calculation. 55 | //! 56 | //! For example, if you request a 128 byte grant, the header size will 57 | //! be two bytes. If you then only commit 32 bytes, two bytes will still 58 | //! be used for the header of that grant. 59 | //! 60 | //! | Grant Size (bytes) | Header size (bytes) | 61 | //! | :--- | :--- | 62 | //! | 1..(2^7) | 1 | 63 | //! | (2^7)..(2^14) | 2 | 64 | //! | (2^14)..(2^21) | 3 | 65 | //! | (2^21)..(2^28) | 4 | 66 | //! | (2^28)..(2^35) | 5 | 67 | //! | (2^35)..(2^42) | 6 | 68 | //! | (2^42)..(2^49) | 7 | 69 | //! | (2^49)..(2^56) | 8 | 70 | //! | (2^56)..(2^64) | 9 | 71 | //! 72 | 73 | use crate::{Consumer, GrantR, GrantW, Producer}; 74 | 75 | use crate::{ 76 | vusize::{decode_usize, decoded_len, encode_usize_to_slice, encoded_len}, 77 | Result, 78 | }; 79 | 80 | use core::{ 81 | cmp::min, 82 | ops::{Deref, DerefMut}, 83 | }; 84 | 85 | /// A producer of Framed data 86 | pub struct FrameProducer<'a, const N: usize> { 87 | pub(crate) producer: Producer<'a, N>, 88 | } 89 | 90 | impl<'a, const N: usize> FrameProducer<'a, N> { 91 | /// Receive a grant for a frame with a maximum size of `max_sz` in bytes. 92 | /// 93 | /// This size does not include the size of the frame header. The exact size 94 | /// of the frame can be set on `commit`. 95 | pub fn grant(&mut self, max_sz: usize) -> Result> { 96 | let hdr_len = encoded_len(max_sz); 97 | Ok(FrameGrantW { 98 | grant_w: self.producer.grant_exact(max_sz + hdr_len)?, 99 | hdr_len: hdr_len as u8, 100 | }) 101 | } 102 | } 103 | 104 | /// A consumer of Framed data 105 | pub struct FrameConsumer<'a, const N: usize> { 106 | pub(crate) consumer: Consumer<'a, N>, 107 | } 108 | 109 | impl<'a, const N: usize> FrameConsumer<'a, N> { 110 | /// Obtain the next available frame, if any 111 | pub fn read(&mut self) -> Option> { 112 | // Get all available bytes. We never wrap a frame around, 113 | // so if a header is available, the whole frame will be. 114 | let mut grant_r = self.consumer.read().ok()?; 115 | 116 | // Additionally, we never commit less than a full frame with 117 | // a header, so if we have ANY data, we'll have a full header 118 | // and frame. `Consumer::read` will return an Error when 119 | // there are 0 bytes available. 120 | 121 | // The header consists of a single usize, encoded in native 122 | // endianess order 123 | let frame_len = decode_usize(&grant_r); 124 | let hdr_len = decoded_len(grant_r[0]); 125 | let total_len = frame_len + hdr_len; 126 | let hdr_len = hdr_len as u8; 127 | 128 | debug_assert!(grant_r.len() >= total_len); 129 | 130 | // Reduce the grant down to the size of the frame with a header 131 | grant_r.shrink(total_len); 132 | 133 | Some(FrameGrantR { grant_r, hdr_len }) 134 | } 135 | } 136 | 137 | /// A write grant for a single frame 138 | /// 139 | /// NOTE: If the grant is dropped without explicitly commiting 140 | /// the contents without first calling `to_commit()`, then no 141 | /// frame will be comitted for writing. 142 | #[derive(Debug, PartialEq)] 143 | pub struct FrameGrantW<'a, const N: usize> { 144 | grant_w: GrantW<'a, N>, 145 | hdr_len: u8, 146 | } 147 | 148 | /// A read grant for a single frame 149 | /// 150 | /// NOTE: If the grant is dropped without explicitly releasing 151 | /// the contents, then no frame will be released. 152 | #[derive(Debug, PartialEq)] 153 | pub struct FrameGrantR<'a, const N: usize> { 154 | grant_r: GrantR<'a, N>, 155 | hdr_len: u8, 156 | } 157 | 158 | impl<'a, const N: usize> Deref for FrameGrantW<'a, N> { 159 | type Target = [u8]; 160 | 161 | fn deref(&self) -> &Self::Target { 162 | &self.grant_w[self.hdr_len.into()..] 163 | } 164 | } 165 | 166 | impl<'a, const N: usize> DerefMut for FrameGrantW<'a, N> { 167 | fn deref_mut(&mut self) -> &mut [u8] { 168 | &mut self.grant_w[self.hdr_len.into()..] 169 | } 170 | } 171 | 172 | impl<'a, const N: usize> Deref for FrameGrantR<'a, N> { 173 | type Target = [u8]; 174 | 175 | fn deref(&self) -> &Self::Target { 176 | &self.grant_r[self.hdr_len.into()..] 177 | } 178 | } 179 | 180 | impl<'a, const N: usize> DerefMut for FrameGrantR<'a, N> { 181 | fn deref_mut(&mut self) -> &mut [u8] { 182 | &mut self.grant_r[self.hdr_len.into()..] 183 | } 184 | } 185 | 186 | impl<'a, const N: usize> FrameGrantW<'a, N> { 187 | /// Commit a frame to make it available to the Consumer half. 188 | /// 189 | /// `used` is the size of the payload, in bytes, not 190 | /// including the frame header 191 | pub fn commit(mut self, used: usize) { 192 | let total_len = self.set_header(used); 193 | 194 | // Commit the header + frame 195 | self.grant_w.commit(total_len); 196 | } 197 | 198 | /// Set the header and return the total size 199 | fn set_header(&mut self, used: usize) -> usize { 200 | // Saturate the commit size to the available frame size 201 | let grant_len = self.grant_w.len(); 202 | let hdr_len: usize = self.hdr_len.into(); 203 | let frame_len = min(used, grant_len - hdr_len); 204 | let total_len = frame_len + hdr_len; 205 | 206 | // Write the actual frame length to the header 207 | encode_usize_to_slice(frame_len, hdr_len, &mut self.grant_w[..hdr_len]); 208 | 209 | total_len 210 | } 211 | 212 | /// Configures the amount of bytes to be commited on drop. 213 | pub fn to_commit(&mut self, amt: usize) { 214 | if amt == 0 { 215 | self.grant_w.to_commit(0); 216 | } else { 217 | let size = self.set_header(amt); 218 | self.grant_w.to_commit(size); 219 | } 220 | } 221 | } 222 | 223 | impl<'a, const N: usize> FrameGrantR<'a, N> { 224 | /// Release a frame to make the space available for future writing 225 | /// 226 | /// Note: The full frame is always released 227 | pub fn release(mut self) { 228 | // For a read grant, we have already shrunk the grant 229 | // size down to the correct size 230 | let len = self.grant_r.len(); 231 | self.grant_r.release_inner(len); 232 | } 233 | 234 | /// Set whether the read fram should be automatically released 235 | pub fn auto_release(&mut self, is_auto: bool) { 236 | self.grant_r 237 | .to_release(if is_auto { self.grant_r.len() } else { 0 }); 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # BBQueue 2 | //! 3 | //! BBQueue, short for "BipBuffer Queue", is a Single Producer Single Consumer, 4 | //! lockless, no_std, thread safe, queue, based on [BipBuffers]. For more info on 5 | //! the design of the lock-free algorithm used by bbqueue, see [this blog post]. 6 | //! 7 | //! For a 90 minute guided tour of BBQueue, you can also view this [guide on YouTube]. 8 | //! 9 | //! [guide on YouTube]: https://www.youtube.com/watch?v=ngTCf2cnGkY 10 | //! [BipBuffers]: https://www.codeproject.com/Articles/3479/%2FArticles%2F3479%2FThe-Bip-Buffer-The-Circular-Buffer-with-a-Twist 11 | //! [this blog post]: https://ferrous-systems.com/blog/lock-free-ring-buffer/ 12 | //! 13 | //! BBQueue is designed (primarily) to be a First-In, First-Out queue for use with DMA on embedded 14 | //! systems. 15 | //! 16 | //! While Circular/Ring Buffers allow you to send data between two threads (or from an interrupt to 17 | //! main code), you must push the data one piece at a time. With BBQueue, you instead are granted a 18 | //! block of contiguous memory, which can be filled (or emptied) by a DMA engine. 19 | //! 20 | //! ## Local usage 21 | //! 22 | //! ```rust, no_run 23 | //! # use bbqueue::BBBuffer; 24 | //! # 25 | //! // Create a buffer with six elements 26 | //! let bb: BBBuffer<6> = BBBuffer::new(); 27 | //! let (mut prod, mut cons) = bb.try_split().unwrap(); 28 | //! 29 | //! // Request space for one byte 30 | //! let mut wgr = prod.grant_exact(1).unwrap(); 31 | //! 32 | //! // Set the data 33 | //! wgr[0] = 123; 34 | //! 35 | //! assert_eq!(wgr.len(), 1); 36 | //! 37 | //! // Make the data ready for consuming 38 | //! wgr.commit(1); 39 | //! 40 | //! // Read all available bytes 41 | //! let rgr = cons.read().unwrap(); 42 | //! 43 | //! assert_eq!(rgr[0], 123); 44 | //! 45 | //! // Release the space for later writes 46 | //! rgr.release(1); 47 | //! ``` 48 | //! 49 | //! ## Static usage 50 | //! 51 | //! ```rust, no_run 52 | //! # use bbqueue::BBBuffer; 53 | //! # 54 | //! // Create a buffer with six elements 55 | //! static BB: BBBuffer<6> = BBBuffer::new(); 56 | //! 57 | //! fn main() { 58 | //! // Split the bbqueue into producer and consumer halves. 59 | //! // These halves can be sent to different threads or to 60 | //! // an interrupt handler for thread safe SPSC usage 61 | //! let (mut prod, mut cons) = BB.try_split().unwrap(); 62 | //! 63 | //! // Request space for one byte 64 | //! let mut wgr = prod.grant_exact(1).unwrap(); 65 | //! 66 | //! // Set the data 67 | //! wgr[0] = 123; 68 | //! 69 | //! assert_eq!(wgr.len(), 1); 70 | //! 71 | //! // Make the data ready for consuming 72 | //! wgr.commit(1); 73 | //! 74 | //! // Read all available bytes 75 | //! let rgr = cons.read().unwrap(); 76 | //! 77 | //! assert_eq!(rgr[0], 123); 78 | //! 79 | //! // Release the space for later writes 80 | //! rgr.release(1); 81 | //! 82 | //! // The buffer cannot be split twice 83 | //! assert!(BB.try_split().is_err()); 84 | //! } 85 | //! ``` 86 | //! 87 | //! ## Features 88 | //! 89 | //! By default BBQueue uses atomic operations which are available on most platforms. However on some 90 | //! (mostly embedded) platforms atomic support is limited and with the default features you will get 91 | //! a compiler error about missing atomic methods. 92 | //! 93 | //! This crate contains special support for Cortex-M0(+) targets with the `thumbv6` feature. By 94 | //! enabling the feature, unsupported atomic operations will be replaced with critical sections 95 | //! implemented by disabling interrupts. The critical sections are very short, a few instructions at 96 | //! most, so they should make no difference to most applications. 97 | 98 | #![no_std] 99 | #![deny(missing_docs)] 100 | #![deny(warnings)] 101 | 102 | mod bbbuffer; 103 | pub use bbbuffer::*; 104 | 105 | pub mod framed; 106 | mod vusize; 107 | 108 | use core::result::Result as CoreResult; 109 | 110 | /// Result type used by the `BBQueue` interfaces 111 | pub type Result = CoreResult; 112 | 113 | /// Error type used by the `BBQueue` interfaces 114 | #[derive(Debug, PartialEq, Eq, Copy, Clone)] 115 | #[cfg_attr(feature = "defmt_0_3", derive(defmt::Format))] 116 | pub enum Error { 117 | /// The buffer does not contain sufficient size for the requested action 118 | InsufficientSize, 119 | 120 | /// Unable to produce another grant, a grant of this type is already in 121 | /// progress 122 | GrantInProgress, 123 | 124 | /// Unable to split the buffer, as it has already been split 125 | AlreadySplit, 126 | } 127 | -------------------------------------------------------------------------------- /core/src/vusize.rs: -------------------------------------------------------------------------------- 1 | //! Varints 2 | //! 3 | //! This implementation borrows heavily from the `vint64` crate. 4 | //! 5 | //! Below is an example of how prefix bits signal the length of the integer value 6 | //! which follows: 7 | //! 8 | //! | Prefix | Precision | Total Bytes | 9 | //! |------------|-----------|-------------| 10 | //! | `xxxxxxx1` | 7 bits | 1 byte | 11 | //! | `xxxxxx10` | 14 bits | 2 bytes | 12 | //! | `xxxxx100` | 21 bits | 3 bytes | 13 | //! | `xxxx1000` | 28 bits | 4 bytes | 14 | //! | `xxx10000` | 35 bits | 5 bytes | 15 | //! | `xx100000` | 42 bits | 6 bytes | 16 | //! | `x1000000` | 49 bits | 7 bytes | 17 | //! | `10000000` | 56 bits | 8 bytes | 18 | //! | `00000000` | 64 bits | 9 bytes | 19 | //! 20 | //! ## Note 21 | //! 22 | //! Although this scheme supports up to 64 bits, it will only ever allow encoding 23 | //! and decoding of up to `usize::max()` of the current platform. In practice, 24 | //! this is not an issue, as you cannot send data larger than the address space 25 | //! of your platform anyway. 26 | //! 27 | //! ## Important warning 28 | //! 29 | //! This implementation is NOT suitable for data that is passed between multiple 30 | //! platforms, particularly those of different pointer sizes. If you are interested 31 | //! in portably serializing/deserializing data, consider using the `vint64` crate. 32 | //! This implementation makes assumptions that data larger than the platform's 33 | //! `usize::max()` will never be encoded/decoded, which is not true when sending 34 | //! between 32-bit and 64-bit platforms. 35 | //! 36 | //! For bbqueue, the sender doing the encoding (the `Producer`) and the receiver 37 | //! doing the decoding (the `Consumer`) will always reside within the same application 38 | //! running on the same machine, meaning we CAN make these non-portable 39 | //! assumptions for the sake of performance/simplicity. 40 | //! 41 | //! Because `vusize` is an internal implementation detail of `BBQueue`, this does **NOT** 42 | //! affect portability when sending data from one machine to another. Here's a diagram 43 | //! explaining that: 44 | //! 45 | //! ```text 46 | //! interrupt sending bytes out 47 | //! over the serial port 48 | //! | 49 | //! application creating | 50 | //! data to send | 51 | //! | | 52 | //! v v 53 | //! [ embedded system ] [ PC system ] 54 | //! [ [bbq producer] => [bbq consumer] ] => [ ] 55 | //! [ ] [ ] 56 | //! ^ ^ 57 | //! | | 58 | //! `vusize` lives here | 59 | //! | 60 | //! bytes sent over a serial 61 | //! port, in order. Frame 62 | //! information is not sent over 63 | //! the wire. 64 | //! ``` 65 | 66 | const USIZE_SIZE: usize = core::mem::size_of::(); 67 | const USIZE_SIZE_PLUS_ONE: usize = USIZE_SIZE + 1; 68 | 69 | const fn max_size_header() -> u8 { 70 | // 64-bit: 0b0000_0000 71 | // 32-bit: 0b0001_0000 72 | // 16-bit: 0b0000_0100 73 | // 8-bit: 0b0000_0010 74 | ((1usize << USIZE_SIZE) & 0xFF) as u8 75 | } 76 | 77 | /// Get the length of an encoded `usize` for the given value in bytes. 78 | #[cfg(target_pointer_width = "64")] 79 | pub fn encoded_len(value: usize) -> usize { 80 | match value.leading_zeros() { 81 | 0..=7 => 9, 82 | 8..=14 => 8, 83 | 15..=21 => 7, 84 | 22..=28 => 6, 85 | 29..=35 => 5, 86 | 36..=42 => 4, 87 | 43..=49 => 3, 88 | 50..=56 => 2, 89 | 57..=64 => 1, 90 | _ => { 91 | // SAFETY: 92 | // 93 | // The `leading_zeros` intrinsic returns the number of bits that 94 | // contain a zero bit. The result will always be in the range of 95 | // 0..=64 for a 64 bit `usize`, so the above pattern is exhaustive, however 96 | // it is not exhaustive over the return type of `u32`. Because of 97 | // this, we mark the "uncovered" part of the match as unreachable 98 | // for performance reasons. 99 | #[allow(unsafe_code)] 100 | unsafe { 101 | core::hint::unreachable_unchecked() 102 | } 103 | } 104 | } 105 | } 106 | 107 | /// Get the length of an encoded `usize` for the given value in bytes. 108 | #[cfg(target_pointer_width = "32")] 109 | pub fn encoded_len(value: usize) -> usize { 110 | match value.leading_zeros() { 111 | 0..=3 => 5, 112 | 4..=10 => 4, 113 | 11..=17 => 3, 114 | 18..=24 => 2, 115 | 25..=32 => 1, 116 | _ => { 117 | // SAFETY: 118 | // 119 | // The `leading_zeros` intrinsic returns the number of bits that 120 | // contain a zero bit. The result will always be in the range of 121 | // 0..=32 for a 32 bit `usize`, so the above pattern is exhaustive, however 122 | // it is not exhaustive over the return type of `u32`. Because of 123 | // this, we mark the "uncovered" part of the match as unreachable 124 | // for performance reasons. 125 | #[allow(unsafe_code)] 126 | unsafe { 127 | core::hint::unreachable_unchecked() 128 | } 129 | } 130 | } 131 | } 132 | 133 | /// Get the length of an encoded `usize` for the given value in bytes. 134 | #[cfg(target_pointer_width = "16")] 135 | pub fn encoded_len(value: usize) -> usize { 136 | match value.leading_zeros() { 137 | 0..=1 => 3, 138 | 2..=8 => 2, 139 | 9..=16 => 1, 140 | _ => { 141 | // SAFETY: 142 | // 143 | // The `leading_zeros` intrinsic returns the number of bits that 144 | // contain a zero bit. The result will always be in the range of 145 | // 0..=16 for a 16 bit `usize`, so the above pattern is exhaustive, however 146 | // it is not exhaustive over the return type of `u32`. Because of 147 | // this, we mark the "uncovered" part of the match as unreachable 148 | // for performance reasons. 149 | #[allow(unsafe_code)] 150 | unsafe { 151 | core::hint::unreachable_unchecked() 152 | } 153 | } 154 | } 155 | } 156 | 157 | /// Encode the given usize to the `slice`, using `length` bytes for encoding. 158 | /// 159 | /// ## Safety 160 | /// 161 | /// * `slice.len()` must be >= `length` or this function will panic 162 | /// * `length` must be `>= encoded_len(value)` or the value will be truncated 163 | /// * `length` must be `<= size_of::() + 1` or the value will be truncated 164 | pub fn encode_usize_to_slice(value: usize, length: usize, slice: &mut [u8]) { 165 | debug_assert!( 166 | encoded_len(value) <= length, 167 | "Tried to encode to smaller than necessary length!", 168 | ); 169 | debug_assert!(length <= slice.len(), "Not enough space to encode!",); 170 | debug_assert!( 171 | length <= USIZE_SIZE_PLUS_ONE, 172 | "Tried to encode larger than platform supports!", 173 | ); 174 | 175 | let header_bytes = &mut slice[..length]; 176 | 177 | if length >= USIZE_SIZE_PLUS_ONE { 178 | // In the case where the number of bytes is larger than `usize`, 179 | // don't try to encode bits in the header byte, just create the header 180 | // and place all of the length bytes in subsequent bytes 181 | header_bytes[0] = max_size_header(); 182 | header_bytes[1..USIZE_SIZE_PLUS_ONE].copy_from_slice(&value.to_le_bytes()); 183 | } else { 184 | let encoded = (value << 1 | 1) << (length - 1); 185 | header_bytes.copy_from_slice(&encoded.to_le_bytes()[..length]); 186 | } 187 | } 188 | 189 | /// Determine the size of the encoded value (in bytes) based on the 190 | /// encoded header 191 | pub fn decoded_len(byte: u8) -> usize { 192 | byte.trailing_zeros() as usize + 1 193 | } 194 | 195 | /// Decode an encoded usize. 196 | /// 197 | /// Accepts a slice containing the encoded usize. 198 | pub fn decode_usize(input: &[u8]) -> usize { 199 | let length = decoded_len(input[0]); 200 | 201 | debug_assert!(input.len() >= length, "Not enough data to decode!",); 202 | debug_assert!( 203 | length <= USIZE_SIZE_PLUS_ONE, 204 | "Tried to decode data too large for this platform!", 205 | ); 206 | 207 | let header_bytes = &input[..length]; 208 | 209 | let mut encoded = [0u8; USIZE_SIZE]; 210 | 211 | if length >= USIZE_SIZE_PLUS_ONE { 212 | // usize + 1 special case, see `encode_usize_to_slice()` for details 213 | encoded.copy_from_slice(&header_bytes[1..]); 214 | usize::from_le_bytes(encoded) 215 | } else { 216 | encoded[..length].copy_from_slice(header_bytes); 217 | usize::from_le_bytes(encoded) >> length 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /tsan-blacklist.txt: -------------------------------------------------------------------------------- 1 | # false positives in thread::spawn (?) 2 | race:*dealloc 3 | race:*drop_slow* 4 | race:__call_tls_dtors 5 | 6 | # false positives in scoped_threadpool (?) 7 | race:*drop* 8 | --------------------------------------------------------------------------------