├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Cargo.toml ├── LICENSE ├── README.md └── src ├── buffer.rs ├── buffer ├── array.rs └── vec.rs ├── error.rs ├── lib.rs ├── loom.rs ├── notify.rs ├── queue.rs ├── synchronized.rs ├── synchronized ├── atomic_waker.rs ├── waker.rs └── waker_list.rs ├── utils.rs ├── write.rs ├── write ├── array.rs └── vec.rs ├── write_vectored.rs └── write_vectored ├── array.rs └── vec.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | - name: rustfmt 15 | run: cargo fmt -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate,format_code_in_doc_comments=true" 16 | - uses: taiki-e/install-action@cargo-hack 17 | - name: clippy 18 | run: cargo hack clippy --feature-powerset -- -D warnings 19 | - name: test 20 | run: cargo test --all-features 21 | - name: install miri 22 | run: rustup install nightly && rustup +nightly component add miri 23 | - name: miri 24 | run: cargo +nightly miri test --all-features --many-seeds 25 | - name: loom 26 | run: cargo test --release --lib 27 | env: 28 | RUSTFLAGS: "--cfg loom" 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/crate-ci/typos 3 | rev: v1.12.0 4 | hooks: 5 | - id: typos 6 | args: [] 7 | - repo: local 8 | hooks: 9 | - id: fmt 10 | name: fmt 11 | entry: cargo fmt -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate,format_code_in_doc_comments=true" 12 | language: rust 13 | pass_filenames: false 14 | types_or: [rust, toml] 15 | - id: clippy 16 | name: clippy 17 | entry: cargo clippy --no-default-features -- -D warnings 18 | language: rust 19 | pass_filenames: false 20 | types_or: [rust, toml] 21 | - id: clippy 22 | name: clippy 23 | entry: cargo clippy --all-features --tests -- -D warnings 24 | language: rust 25 | pass_filenames: false 26 | types_or: [rust, toml] 27 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "swap-buffer-queue" 3 | version = "0.2.1" 4 | edition = "2021" 5 | exclude = [".*"] 6 | description = "A buffering MPSC queue." 7 | homepage = "https://github.com/wyfo/swap-buffer-queue" 8 | readme = "README.md" 9 | keywords = [ 10 | "atomic", 11 | "lock-free", 12 | "no-std", 13 | "mpsc", 14 | "async", 15 | ] 16 | categories = [ 17 | "concurrency", 18 | "data-structures", 19 | "no-std", 20 | ] 21 | license = "MIT" 22 | repository = "https://github.com/wyfo/swap-buffer-queue" 23 | 24 | [features] 25 | default = ["std"] 26 | alloc = [] 27 | std = ["alloc"] 28 | stream = ["std", "dep:futures-core", "dep:futures-util"] 29 | write = [] 30 | 31 | [dependencies] 32 | crossbeam-utils = { version = "0.8", default-features = false } 33 | futures-core = { version = "0.3", default-features = false, optional = true } 34 | futures-util = { version = "0.3", default-features = false, optional = true } 35 | 36 | [dev-dependencies] 37 | tokio-test = "0.4" 38 | tokio = { version = "1", features = ["macros", "rt-multi-thread", "test-util", "time"] } 39 | 40 | [target.'cfg(loom)'.dev-dependencies] 41 | loom = { version = "0.7", features = ["futures"] } 42 | 43 | [lints.rust] 44 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] } 45 | 46 | # see https://users.rust-lang.org/t/how-to-document-optional-features-in-api-docs/64577/3 47 | [package.metadata.docs.rs] 48 | all-features = true 49 | rustdoc-args = [ 50 | "--cfg", 51 | "docsrs", 52 | ] 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Joseph Perez 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # swap-buffer-queue 2 | 3 | [![License](https://img.shields.io/badge/license-MIT-blue.svg)]( 4 | https://github.com/wyfo/swap-buffer-queue/blob/main/LICENSE) 5 | [![Cargo](https://img.shields.io/crates/v/swap-buffer-queue.svg)]( 6 | https://crates.io/crates/swap-buffer-queue) 7 | [![Documentation](https://docs.rs/swap-buffer-queue/badge.svg)]( 8 | https://docs.rs/swap-buffer-queue) 9 | 10 | A buffering MPSC queue. 11 | 12 | This library is intended to be a (better, I hope) alternative to traditional MPSC queues in the context of a buffering consumer, by moving the buffering part directly into the queue. 13 | 14 | It is especially well suited for IO writing workflow, see [buffer implementations](#buffer-implementations). 15 | 16 | The crate is `no_std` – some buffer implementations may require `alloc` crate. 17 | 18 | In addition to the low level `Queue` implementation, a higher level `SynchronizedQueue` is provided with both blocking and asynchronous methods. Synchronization feature requires `std`. 19 | 20 | 21 | ## Example 22 | 23 | ```rust 24 | use std::ops::Deref; 25 | use swap_buffer_queue::{buffer::{IntoValueIter, VecBuffer}, Queue}; 26 | 27 | // Initialize the queue with a capacity 28 | let queue: Queue> = Queue::with_capacity(42); 29 | // Enqueue some value 30 | queue.try_enqueue([0]).unwrap(); 31 | // Multiple values can be enqueued at the same time 32 | // (optimized compared to multiple enqueuing) 33 | queue.try_enqueue([1, 2]).unwrap(); 34 | let mut values = vec![3, 4]; 35 | queue 36 | .try_enqueue(values.drain(..).into_value_iter()) 37 | .unwrap(); 38 | // Dequeue a slice to the enqueued values 39 | let slice = queue.try_dequeue().unwrap(); 40 | assert_eq!(slice.deref(), &[0, 1, 2, 3, 4]); 41 | // Enqueued values can also be retrieved 42 | assert_eq!(slice.into_iter().collect::>(), vec![0, 1, 2, 3, 4]); 43 | ``` 44 | 45 | 46 | ## Buffer implementations 47 | 48 | In addition to simple [`ArrayBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/buffer/struct.ArrayBuffer.html) and [`VecBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/buffer/struct.VecBuffer.html), this crate provides useful write-oriented implementations. 49 | 50 | ### [`write`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write/index.html) 51 | 52 | [`WriteArrayBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write/struct.WriteVecBuffer.html) and 53 | [`WriteVecBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write/struct.WriteVecBuffer.html) are well suited when there are objects to be serialized with a known-serialization size. Indeed, objects can then be serialized directly on the queue's buffer, avoiding allocation. 54 | 55 | ```rust 56 | use std::io::Write; 57 | use swap_buffer_queue::{Queue, write::{WriteBytesSlice, WriteVecBuffer}}; 58 | 59 | // Creates a WriteVecBuffer queue with a 2-bytes header 60 | let queue: Queue> = Queue::with_capacity((1 << 16) - 1); 61 | queue 62 | .try_enqueue((256, |slice: &mut [u8]| { /* write the slice */ })) 63 | .unwrap(); 64 | queue 65 | .try_enqueue((42, |slice: &mut [u8]| { /* write the slice */ })) 66 | .unwrap(); 67 | let mut slice = queue.try_dequeue().unwrap(); 68 | // Adds a header with the len of the buffer 69 | let len = (slice.len() as u16).to_be_bytes(); 70 | slice.header().copy_from_slice(&len); 71 | // Let's pretend we have a writer 72 | let mut writer: Vec = Default::default(); 73 | assert_eq!(writer.write(slice.frame()).unwrap(), 300); 74 | ``` 75 | 76 | ### [`write_vectored`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write/index.html) 77 | 78 | [`WriteVectoredArrayBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write_vectored/struct.WriteVectoredVecBuffer.html) and 79 | [`WriteVectoredVecBuffer`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/write_vectored/struct.WriteVectoredVecBuffer.html) allows buffering a slice of [`IoSlice`](https://doc.rust-lang.org/std/io/struct.IoSlice.html), saving the cost of dequeuing io-slices one by one to collect them after. 80 | (Internally, two buffers are used, one of the values, and one for the io-slices) 81 | 82 | As a convenience, total size of the buffered io-slices can be retrieved. 83 | 84 | ```rust 85 | use std::io::{Write}; 86 | use swap_buffer_queue::{Queue, write_vectored::WriteVectoredVecBuffer}; 87 | 88 | // Creates a WriteVectoredVecBuffer queue 89 | let queue: Queue>, Vec> = Queue::with_capacity(100); 90 | queue.try_enqueue([vec![0; 256]]).unwrap(); 91 | queue.try_enqueue([vec![42; 42]]).unwrap(); 92 | let mut total_size = 0u16.to_be_bytes(); 93 | let mut slice = queue.try_dequeue().unwrap(); 94 | // Adds a header with the total size of the slices 95 | total_size.copy_from_slice(&(slice.total_size() as u16).to_be_bytes()); 96 | let mut frame = slice.frame(.., Some(&total_size), None); 97 | // Let's pretend we have a writer 98 | let mut writer: Vec = Default::default(); 99 | assert_eq!(writer.write_vectored(&mut frame).unwrap(), 300); 100 | ``` 101 | 102 | ## How it works 103 | 104 | Internally, this queue use 2 buffers: one being used for enqueuing while the other is dequeued. 105 | 106 | When [`Queue::try_enqueue`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/struct.Queue.html#method.try_enqueue) is called, it reserves atomically a slot in the current enqueuing buffer. The value is then inserted in the slot. 107 | 108 | When [`Queue::try_dequeue`](https://docs.rs/swap-buffer-queue/latest/swap_buffer_queue/struct.Queue.html#method.try_dequeue) is called, both buffers are swapped atomically, so dequeued buffer will contain previously enqueued values, and new enqueued ones will go to the other (empty) buffer. 109 | 110 | As the two-phase enqueuing cannot be atomic, the queue can be in a transitory state, where slots have been reserved but have not been written yet. In this rare case, dequeuing will fail and have to be retried. 111 | 112 | ## Fairness 113 | 114 | `SynchronizedQueue` implementation is not fair, i.e. it doesn't ensure that the oldest blocked enqueuer will succeed when the capacity becomes available. 115 | 116 | However, this issue is quite mitigated by the fact that all the capacity becomes available at once, so all blocked enqueuers may succeed (especially with one-sized values). 117 | 118 | For the particular case of potential big variable-sized values, it's still possible to combine the queue with a semaphore, e.g. `tokio::sync::Semaphore`. Performance will be impacted, but the algorithm is [fast enough](#performance) to afford it. 119 | 120 | I'm still thinking about a way to include fairness directly in the algorithm, but it's not an easy thing to do. 121 | 122 | ## Unsafe 123 | 124 | This library uses unsafe code, for three reasons: 125 | - buffers are wrapped in `UnsafeCell` to allow mutable for the dequeued buffer; 126 | - buffers implementation may use unsafe to allow insertion with shared reference; 127 | - `Buffer` trait require unsafe interface for its invariant, because it's public. 128 | 129 | To ensure the safety of the algorithm, it uses: 130 | - tests (mostly doctests for now, but it needs to be completed) 131 | - benchmarks 132 | - MIRI (with tests) 133 | 134 | Loom is partially integrated for now, but loom tests are on the TODO list. 135 | 136 | ## Performance 137 | 138 | swap-buffer-queue is very performant – it's actually the fastest MPSC queue I know. 139 | 140 | Here is the crossbeam benchmark [forked](https://github.com/wyfo/crossbeam/tree/bench_sbq/crossbeam-channel/benchmarks) 141 | 142 | | benchmark | crossbeam | swap-buffer-queue | 143 | |---------------|-----------|-------------------| 144 | | bounded1_mpsc | 1.545s | 1.763s | 145 | | bounded1_spsc | 1.652s | 1.000s | 146 | | bounded_mpsc | 0.362s | 0.137s | 147 | | bounded_seq | 0.190s | 0.114s | 148 | | bounded_spsc | 0.115s | 0.092s | 149 | 150 | However, a large enough capacity is required to reach maximum performance; otherwise, high contention scenario may be penalized. 151 | This is because the algorithm put all the contention on a single atomic integer (instead of two for crossbeam). -------------------------------------------------------------------------------- /src/buffer.rs: -------------------------------------------------------------------------------- 1 | //! [`Buffer`] definition and simple implementations. 2 | 3 | use core::{ 4 | fmt, 5 | iter::FusedIterator, 6 | marker::PhantomData, 7 | mem::ManuallyDrop, 8 | ops::{Deref, DerefMut, Range}, 9 | ptr, 10 | }; 11 | 12 | use crate::queue::Queue; 13 | 14 | mod array; 15 | #[cfg(feature = "alloc")] 16 | mod vec; 17 | 18 | pub use array::ArrayBuffer; 19 | #[cfg(feature = "alloc")] 20 | pub use vec::VecBuffer; 21 | 22 | /// [`Queue`] buffer. It is used together with [`InsertIntoBuffer`]. 23 | /// 24 | /// # Safety 25 | /// [`Buffer::clear`] *clears* the inserted range from the buffer 26 | /// (see [`InsertIntoBuffer::insert_into`]), meaning new values can be inserted. 27 | pub unsafe trait Buffer: Default { 28 | /// The slice type returned by [`slice`](Buffer::slice) method. 29 | type Slice<'a> 30 | where 31 | Self: 'a; 32 | /// Returns the buffer's capacity. 33 | fn capacity(&self) -> usize; 34 | /// Returns a slice of the buffer. 35 | /// 36 | /// # Safety 37 | /// Range **must** have been inserted (see [`InsertIntoBuffer::insert_into`]) before calling 38 | /// this method. 39 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_>; 40 | /// Clears the buffer. 41 | /// 42 | /// # Safety 43 | /// Range **must** have been inserted (see [`InsertIntoBuffer::insert_into`]) before calling 44 | /// this method. 45 | /// 46 | /// Calling this method *clears* the inserted value, meaning new values can be inserted. 47 | unsafe fn clear(&mut self, range: Range); 48 | } 49 | 50 | /// [`Buffer`] value. 51 | /// 52 | /// # Safety 53 | /// Range `index..index+value.size()` is considered inserted into the buffer after calling 54 | /// [`InsertIntoBuffer::insert_into`] (see [`Buffer::slice`]/[`Buffer::clear`]) 55 | pub unsafe trait InsertIntoBuffer { 56 | /// Returns the size taken by a value in the buffer. 57 | fn size(&self) -> usize; 58 | /// Inserts the value into the buffer at the given index. 59 | /// 60 | /// # Safety 61 | /// For every call to this method, the inserted range `index..index+self.size()` **must not** 62 | /// overlap with a previously inserted one. 63 | unsafe fn insert_into(self, buffer: &B, index: usize); 64 | } 65 | 66 | /// [`Buffer`] kind where value are inserted one by one. 67 | /// 68 | /// # Safety 69 | /// `index` is considered inserted into the buffer after calling [`CellBuffer::insert`] (see [`Buffer::slice`]/[`Buffer::clear`]) 70 | pub(crate) unsafe trait CellBuffer: Buffer { 71 | /// Inserts a value into the buffer at the given index. 72 | /// 73 | /// # Safety 74 | /// For every call to this method, `index` **must not** have previously been inserted. 75 | unsafe fn insert(&self, index: usize, value: T); 76 | } 77 | 78 | /// Wrapper to implement [`InsertIntoBuffer`] on iterators. 79 | pub struct ValueIter(pub I); 80 | 81 | /// Extension trait to instantiate [`ValueIter`]. 82 | pub trait IntoValueIter: Sized { 83 | /// Iterator type to be wrapped in [`ValueIter`]. 84 | type Iter; 85 | /// Wrap iterator into [`ValueIter`]. 86 | fn into_value_iter(self) -> ValueIter; 87 | } 88 | 89 | impl IntoValueIter for I 90 | where 91 | I: IntoIterator, 92 | I::IntoIter: ExactSizeIterator, 93 | { 94 | type Iter = I::IntoIter; 95 | fn into_value_iter(self) -> ValueIter { 96 | ValueIter(self.into_iter()) 97 | } 98 | } 99 | 100 | // SAFETY: `insert_into` does initialize the slice in the buffer 101 | unsafe impl InsertIntoBuffer for ValueIter 102 | where 103 | B: CellBuffer, 104 | I: Iterator + ExactSizeIterator, 105 | { 106 | #[inline] 107 | fn size(&self) -> usize { 108 | self.0.len() 109 | } 110 | 111 | #[inline] 112 | unsafe fn insert_into(mut self, buffer: &B, index: usize) { 113 | // don't loop on iterator, because `ExactSizeIterator` is not a sufficient guarantee 114 | // for unsafe code 115 | for i in index..(index + self.0.len()) { 116 | const ERROR: &str = "iterator exhausted before reaching its exact size"; 117 | // SAFETY: function contract encompass `CellBuffer::insert` one 118 | unsafe { buffer.insert(i, self.0.next().expect(ERROR)) }; 119 | } 120 | } 121 | } 122 | 123 | // SAFETY: `insert_into` does initialize the slice in the buffer 124 | unsafe impl InsertIntoBuffer for [T; N] 125 | where 126 | B: CellBuffer, 127 | { 128 | #[inline] 129 | fn size(&self) -> usize { 130 | N 131 | } 132 | 133 | #[inline] 134 | unsafe fn insert_into(self, buffer: &B, index: usize) { 135 | for (i, elt) in self.into_iter().enumerate() { 136 | // SAFETY: function contract encompass `CellBuffer::insert` one 137 | unsafe { buffer.insert(index + i, elt) }; 138 | } 139 | } 140 | } 141 | 142 | /// Resizable [`Buffer`]. 143 | pub trait Resize: Buffer { 144 | /// Resizes the buffer. 145 | fn resize(&mut self, capacity: usize); 146 | } 147 | 148 | /// [`Buffer`] whose values can be drained from. 149 | /// 150 | /// # Safety 151 | /// Calling [`Drain::remove`] remove the value inserted at index `index 152 | /// (see [`InsertIntoBuffer::insert_into`]) 153 | pub unsafe trait Drain: Buffer { 154 | /// Value to be removed from the buffer 155 | type Value; 156 | /// Removes a value from the buffer at a given index and return it. 157 | /// 158 | /// # Safety 159 | /// A value **must** have been inserted at this index (see [`InsertIntoBuffer::insert_into`]) 160 | /// before calling this method. 161 | unsafe fn remove(&mut self, index: usize) -> Self::Value; 162 | } 163 | 164 | /// [`Buffer`] slice returned by [`Queue::try_dequeue`] (see [`Buffer::Slice`]). 165 | /// 166 | /// Buffer is released when the slice is dropped, so the other buffer will be dequeued next, 167 | /// unless [`BufferSlice::requeue`]/[`BufferSlice::into_iter`] is called. 168 | /// 169 | /// # Examples 170 | /// ``` 171 | /// # use std::ops::Deref; 172 | /// # use swap_buffer_queue::Queue; 173 | /// # use swap_buffer_queue::buffer::VecBuffer; 174 | /// let queue: Queue> = Queue::with_capacity(42); 175 | /// queue.try_enqueue([0]).unwrap(); 176 | /// queue.try_enqueue([1]).unwrap(); 177 | /// 178 | /// let slice = queue.try_dequeue().unwrap(); 179 | /// assert_eq!(slice.deref(), &[0, 1]); 180 | /// assert_eq!(slice.into_iter().collect::>(), vec![0, 1]); 181 | /// ``` 182 | pub struct BufferSlice<'a, B, N> 183 | where 184 | B: Buffer, 185 | { 186 | queue: &'a Queue, 187 | buffer_index: usize, 188 | range: Range, 189 | slice: B::Slice<'a>, 190 | } 191 | 192 | impl<'a, B, N> BufferSlice<'a, B, N> 193 | where 194 | B: Buffer, 195 | { 196 | #[inline] 197 | pub(crate) fn new( 198 | queue: &'a Queue, 199 | buffer_index: usize, 200 | range: Range, 201 | slice: B::Slice<'a>, 202 | ) -> Self { 203 | Self { 204 | queue, 205 | buffer_index, 206 | range, 207 | slice, 208 | } 209 | } 210 | 211 | /// Reinsert the buffer at the beginning queue. 212 | /// 213 | /// It will thus de dequeued again next. 214 | /// 215 | /// # Examples 216 | /// ``` 217 | /// # use std::ops::Deref; 218 | /// # use swap_buffer_queue::Queue; 219 | /// # use swap_buffer_queue::buffer::VecBuffer; 220 | /// let queue: Queue> = Queue::with_capacity(42); 221 | /// queue.try_enqueue([0]).unwrap(); 222 | /// queue.try_enqueue([1]).unwrap(); 223 | /// 224 | /// let slice = queue.try_dequeue().unwrap(); 225 | /// assert_eq!(slice.deref(), &[0, 1]); 226 | /// slice.requeue(); 227 | /// let slice = queue.try_dequeue().unwrap(); 228 | /// assert_eq!(slice.deref(), &[0, 1]); 229 | /// ``` 230 | #[inline] 231 | pub fn requeue(self) { 232 | let slice = ManuallyDrop::new(self); 233 | slice.queue.requeue(slice.buffer_index, slice.range.clone()); 234 | } 235 | } 236 | 237 | impl<'a, B, N> fmt::Debug for BufferSlice<'a, B, N> 238 | where 239 | B: Buffer, 240 | B::Slice<'a>: fmt::Debug, 241 | { 242 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 243 | f.debug_tuple("BufferSlice").field(&self.slice).finish() 244 | } 245 | } 246 | 247 | impl<'a, B, N> Deref for BufferSlice<'a, B, N> 248 | where 249 | B: Buffer, 250 | { 251 | type Target = B::Slice<'a>; 252 | 253 | #[inline] 254 | fn deref(&self) -> &Self::Target { 255 | &self.slice 256 | } 257 | } 258 | 259 | impl<'a, B, N> DerefMut for BufferSlice<'a, B, N> 260 | where 261 | B: Buffer, 262 | { 263 | #[inline] 264 | fn deref_mut(&mut self) -> &mut Self::Target { 265 | &mut self.slice 266 | } 267 | } 268 | 269 | impl<'a, B, N> Drop for BufferSlice<'a, B, N> 270 | where 271 | B: Buffer, 272 | { 273 | #[inline] 274 | fn drop(&mut self) { 275 | self.queue.release(self.buffer_index, self.range.clone()); 276 | } 277 | } 278 | 279 | impl<'a, B, N> IntoIterator for BufferSlice<'a, B, N> 280 | where 281 | B: Buffer + Drain, 282 | { 283 | type Item = B::Value; 284 | type IntoIter = BufferIter<'a, B, N>; 285 | 286 | #[inline] 287 | fn into_iter(self) -> Self::IntoIter { 288 | let slice = ManuallyDrop::new(self); 289 | BufferIter { 290 | queue: slice.queue, 291 | buffer_index: slice.buffer_index, 292 | range: slice.range.clone(), 293 | _phantom: PhantomData, 294 | } 295 | } 296 | } 297 | 298 | /// [`Buffer`] iterator returned by [`BufferSlice::into_iter`] (see [`Drain`]). 299 | /// 300 | /// Buffer is lazily drained, and requeued (see [`BufferSlice::requeue`]) if the iterator is dropped while non exhausted. 301 | /// 302 | /// # Examples 303 | /// ``` 304 | /// # use std::ops::Deref; 305 | /// # use swap_buffer_queue::Queue; 306 | /// # use swap_buffer_queue::buffer::VecBuffer; 307 | /// let queue: Queue> = Queue::with_capacity(42); 308 | /// queue.try_enqueue([0]).unwrap(); 309 | /// queue.try_enqueue([1]).unwrap(); 310 | /// 311 | /// let mut iter = queue.try_dequeue().unwrap().into_iter(); 312 | /// assert_eq!(iter.next(), Some(0)); 313 | /// drop(iter); 314 | /// let mut iter = queue.try_dequeue().unwrap().into_iter(); 315 | /// assert_eq!(iter.next(), Some(1)); 316 | /// assert_eq!(iter.next(), None); 317 | /// ``` 318 | pub struct OwnedBufferIter 319 | where 320 | Q: AsRef>, 321 | B: Buffer, 322 | { 323 | queue: Q, 324 | buffer_index: usize, 325 | range: Range, 326 | _phantom: PhantomData>, 327 | } 328 | 329 | /// Alias of [`OwnedBufferIter`] with a queue reference. 330 | pub type BufferIter<'a, B, N> = OwnedBufferIter<&'a Queue, B, N>; 331 | 332 | impl<'a, B, N> BufferIter<'a, B, N> 333 | where 334 | B: Buffer, 335 | { 336 | /// Convert back a buffer iterator into a buffer slice. 337 | /// 338 | /// # Examples 339 | /// ``` 340 | /// # use std::ops::Deref; 341 | /// # use std::sync::Arc; 342 | /// # use swap_buffer_queue::Queue; 343 | /// # use swap_buffer_queue::buffer::VecBuffer; 344 | /// let queue: Arc>> = Arc::new(Queue::with_capacity(42)); 345 | /// queue.try_enqueue([0]).unwrap(); 346 | /// queue.try_enqueue([1]).unwrap(); 347 | /// 348 | /// let iter = queue.try_dequeue().unwrap().into_iter(); 349 | /// let slice = iter.into_slice(); 350 | /// assert_eq!(slice.deref(), &[0, 1]); 351 | /// ``` 352 | #[inline] 353 | pub fn into_slice(self) -> BufferSlice<'a, B, N> { 354 | let iter = ManuallyDrop::new(self); 355 | BufferSlice { 356 | queue: iter.queue, 357 | buffer_index: iter.buffer_index, 358 | range: iter.range.clone(), 359 | slice: iter.queue.get_slice(iter.buffer_index, iter.range.clone()), 360 | } 361 | } 362 | } 363 | 364 | impl OwnedBufferIter 365 | where 366 | Q: AsRef>, 367 | B: Buffer, 368 | { 369 | /// Returns a "owned" version of the buffer iterator using a "owned" version of the queue. 370 | /// 371 | /// # Examples 372 | /// ``` 373 | /// # use std::ops::Deref; 374 | /// # use std::sync::Arc; 375 | /// # use swap_buffer_queue::Queue; 376 | /// # use swap_buffer_queue::buffer::VecBuffer; 377 | /// let queue: Arc>> = Arc::new(Queue::with_capacity(42)); 378 | /// queue.try_enqueue([0]).unwrap(); 379 | /// queue.try_enqueue([1]).unwrap(); 380 | /// 381 | /// let mut iter = queue 382 | /// .try_dequeue() 383 | /// .unwrap() 384 | /// .into_iter() 385 | /// .with_owned(queue.clone()); 386 | /// drop(queue); // iter is "owned", queue can be dropped 387 | /// assert_eq!(iter.next(), Some(0)); 388 | /// assert_eq!(iter.next(), Some(1)); 389 | /// assert_eq!(iter.next(), None); 390 | /// ``` 391 | #[inline] 392 | pub fn with_owned(self, queue: O) -> OwnedBufferIter 393 | where 394 | O: AsRef>, 395 | { 396 | let iter = ManuallyDrop::new(self); 397 | assert!( 398 | ptr::eq(iter.queue.as_ref(), queue.as_ref()), 399 | "new owner must reference the queue referenced by the iterator" 400 | ); 401 | OwnedBufferIter { 402 | queue, 403 | buffer_index: iter.buffer_index, 404 | range: iter.range.clone(), 405 | _phantom: PhantomData, 406 | } 407 | } 408 | } 409 | 410 | impl fmt::Debug for OwnedBufferIter 411 | where 412 | Q: AsRef>, 413 | B: Buffer, 414 | { 415 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 416 | f.debug_tuple("BufferIter").field(&self.range).finish() 417 | } 418 | } 419 | 420 | impl Drop for OwnedBufferIter 421 | where 422 | Q: AsRef>, 423 | B: Buffer, 424 | { 425 | #[inline] 426 | fn drop(&mut self) { 427 | self.queue 428 | .as_ref() 429 | .requeue(self.buffer_index, self.range.clone()); 430 | } 431 | } 432 | 433 | impl Iterator for OwnedBufferIter 434 | where 435 | Q: AsRef>, 436 | B: Buffer + Drain, 437 | { 438 | type Item = B::Value; 439 | 440 | #[inline] 441 | fn next(&mut self) -> Option { 442 | if self.range.is_empty() { 443 | return None; 444 | } 445 | let value = self 446 | .queue 447 | .as_ref() 448 | .remove(self.buffer_index, self.range.start); 449 | self.range.start += 1; 450 | Some(value) 451 | } 452 | 453 | #[inline] 454 | fn size_hint(&self) -> (usize, Option) { 455 | self.range.size_hint() 456 | } 457 | } 458 | 459 | impl ExactSizeIterator for OwnedBufferIter 460 | where 461 | Q: AsRef>, 462 | B: Buffer + Drain, 463 | { 464 | } 465 | 466 | impl FusedIterator for OwnedBufferIter 467 | where 468 | Q: AsRef>, 469 | B: Buffer + Drain, 470 | { 471 | } 472 | -------------------------------------------------------------------------------- /src/buffer/array.rs: -------------------------------------------------------------------------------- 1 | use core::{cell::Cell, mem::MaybeUninit, ops::Range}; 2 | 3 | use crate::{ 4 | buffer::{Buffer, CellBuffer, Drain}, 5 | utils::init_array, 6 | }; 7 | 8 | /// A simple array buffer. 9 | pub struct ArrayBuffer([Cell>; N]); 10 | 11 | impl Default for ArrayBuffer { 12 | fn default() -> Self { 13 | Self(init_array(|| Cell::new(MaybeUninit::uninit()))) 14 | } 15 | } 16 | 17 | // SAFETY: `ArrayBuffer::clear` does clear the inserted range from the buffer 18 | unsafe impl Buffer for ArrayBuffer { 19 | type Slice<'a> = &'a mut [T] 20 | where 21 | T: 'a; 22 | 23 | #[inline] 24 | fn capacity(&self) -> usize { 25 | self.0.len() 26 | } 27 | 28 | #[inline] 29 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 30 | // SAFETY: [Cell>] has the same layout as [T] 31 | // and function contract guarantees that the range is initialized 32 | unsafe { &mut *(&mut self.0[range] as *mut _ as *mut [T]) } 33 | } 34 | 35 | #[inline] 36 | unsafe fn clear(&mut self, range: Range) { 37 | for index in range { 38 | // SAFETY: function contract guarantees that the range is initialized 39 | unsafe { self.remove(index) }; 40 | } 41 | } 42 | } 43 | 44 | // SAFETY: `insert` does initialize the index in the buffer 45 | unsafe impl CellBuffer for ArrayBuffer { 46 | unsafe fn insert(&self, index: usize, value: T) { 47 | self.0[index].set(MaybeUninit::new(value)); 48 | } 49 | } 50 | 51 | // SAFETY: `ArrayBuffer::remove` does remove the index from the buffer 52 | unsafe impl Drain for ArrayBuffer { 53 | type Value = T; 54 | #[inline] 55 | unsafe fn remove(&mut self, index: usize) -> Self::Value { 56 | // SAFETY: function contract guarantees that the index has been inserted and is then initialized 57 | unsafe { self.0[index].replace(MaybeUninit::uninit()).assume_init() } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/buffer/vec.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::{cell::Cell, mem::MaybeUninit, ops::Range}; 3 | 4 | use crate::buffer::{Buffer, CellBuffer, Drain, Resize}; 5 | 6 | /// A simple vector buffer. 7 | pub struct VecBuffer(Box<[Cell>]>); 8 | 9 | impl Default for VecBuffer { 10 | fn default() -> Self { 11 | Self(Default::default()) 12 | } 13 | } 14 | 15 | // SAFETY: `VecBuffer::clear` does clear the inserted range from the buffer 16 | unsafe impl Buffer for VecBuffer { 17 | type Slice<'a> = &'a mut [T] 18 | where 19 | T: 'a; 20 | 21 | #[inline] 22 | fn capacity(&self) -> usize { 23 | self.0.len() 24 | } 25 | 26 | #[inline] 27 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 28 | // SAFETY: [Cell>] has the same layout as [T] 29 | // and function contract guarantees that the range is initialized 30 | unsafe { &mut *(&mut self.0[range] as *mut _ as *mut [T]) } 31 | } 32 | 33 | #[inline] 34 | unsafe fn clear(&mut self, range: Range) { 35 | for index in range { 36 | // SAFETY: function contract guarantees that the range is initialized 37 | unsafe { self.remove(index) }; 38 | } 39 | } 40 | } 41 | 42 | // SAFETY: `insert` does initialize the index in the buffer 43 | unsafe impl CellBuffer for VecBuffer { 44 | unsafe fn insert(&self, index: usize, value: T) { 45 | self.0[index].set(MaybeUninit::new(value)); 46 | } 47 | } 48 | 49 | impl Resize for VecBuffer { 50 | fn resize(&mut self, capacity: usize) { 51 | self.0 = (0..capacity) 52 | .map(|_| Cell::new(MaybeUninit::uninit())) 53 | .collect(); 54 | } 55 | } 56 | 57 | // SAFETY: `VecBuffer::remove` does remove the index from the buffer 58 | unsafe impl Drain for VecBuffer { 59 | type Value = T; 60 | #[inline] 61 | unsafe fn remove(&mut self, index: usize) -> Self::Value { 62 | // SAFETY: function contract guarantees that the index has been inserted and is then initialized 63 | unsafe { self.0[index].get_mut().assume_init_read() } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! Queue error types. 2 | 3 | use core::fmt; 4 | 5 | /// Error returned by [`Queue::try_enqueue`](crate::Queue::try_enqueue). 6 | /// 7 | /// The value whose enqueuing has failed is embedded within the error. 8 | #[derive(Copy, Clone, Eq, PartialEq)] 9 | pub enum TryEnqueueError { 10 | /// The queue doesn't have sufficient capacity to enqueue the give value. 11 | InsufficientCapacity(T), 12 | /// The queue is closed. 13 | Closed(T), 14 | } 15 | 16 | impl TryEnqueueError { 17 | /// Returns the value whose enqueuing has failed 18 | pub fn into_inner(self) -> T { 19 | match self { 20 | Self::InsufficientCapacity(v) | Self::Closed(v) => v, 21 | } 22 | } 23 | } 24 | 25 | impl fmt::Debug for TryEnqueueError { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | match self { 28 | Self::InsufficientCapacity(_) => write!(f, "TryEnqueueError::InsufficientCapacity(_)"), 29 | Self::Closed(_) => write!(f, "TryEnqueueError::Closed(_)"), 30 | } 31 | } 32 | } 33 | 34 | impl fmt::Display for TryEnqueueError { 35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 36 | let error = match self { 37 | Self::InsufficientCapacity(_) => "queue has insufficient capacity", 38 | Self::Closed(_) => "queue is closed", 39 | }; 40 | write!(f, "{error}") 41 | } 42 | } 43 | 44 | #[cfg(feature = "std")] 45 | impl std::error::Error for TryEnqueueError {} 46 | 47 | /// Error returned by [`SynchronizedQueue::enqueue`](crate::SynchronizedQueue::enqueue)/[`SynchronizedQueue::enqueue_async`](crate::SynchronizedQueue::enqueue_async) 48 | pub type EnqueueError = TryEnqueueError; 49 | 50 | /// Error returned by [`Queue::try_dequeue`](crate::Queue::try_dequeue). 51 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 52 | pub enum TryDequeueError { 53 | /// The queue is empty. 54 | Empty, 55 | /// There is a concurrent enqueuing that need to end before dequeuing. 56 | Pending, 57 | /// The queue is closed. 58 | Closed, 59 | /// The queue is concurrently dequeued. 60 | Conflict, 61 | } 62 | 63 | impl fmt::Display for TryDequeueError { 64 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 65 | let error = match self { 66 | Self::Empty => "queue is empty", 67 | Self::Pending => "waiting for concurrent enqueuing end", 68 | Self::Closed => "queue is closed", 69 | Self::Conflict => "queue is concurrently dequeued", 70 | }; 71 | write!(f, "{error}") 72 | } 73 | } 74 | 75 | #[cfg(feature = "std")] 76 | impl std::error::Error for TryDequeueError {} 77 | 78 | /// Error returned by [`SynchronizedQueue::dequeue`](crate::SynchronizedQueue::dequeue)/ 79 | /// [`SynchronizedQueue::dequeue_async`](crate::SynchronizedQueue::dequeue_async). 80 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 81 | pub enum DequeueError { 82 | /// The queue is closed. 83 | Closed, 84 | /// The queue is concurrently dequeued. 85 | Conflict, 86 | } 87 | 88 | impl fmt::Display for DequeueError { 89 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 90 | let error = match self { 91 | Self::Closed => "queue is closed", 92 | Self::Conflict => "queue is concurrently dequeued", 93 | }; 94 | write!(f, "{error}") 95 | } 96 | } 97 | 98 | #[cfg(feature = "std")] 99 | impl std::error::Error for DequeueError {} 100 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(clippy::dbg_macro)] 2 | #![forbid(clippy::semicolon_if_nothing_returned)] 3 | #![forbid(missing_docs)] 4 | #![forbid(unsafe_op_in_unsafe_fn)] 5 | #![forbid(clippy::undocumented_unsafe_blocks)] 6 | #![cfg_attr(docsrs, feature(doc_auto_cfg))] 7 | #![cfg_attr(not(feature = "std"), no_std)] 8 | //! # swap-buffer-queue 9 | //! A buffering MPSC queue. 10 | //! 11 | //! This library is intended to be a (better, I hope) alternative to traditional MPSC queues 12 | //! in the context of a buffering consumer, by moving the buffering part directly into the queue. 13 | //! 14 | //! It is especially well suited for IO writing workflow, see [`mod@write`] and [`write_vectored`]. 15 | //! 16 | //! The crate is *no_std* (some buffer implementations may require `std`). 17 | //! 18 | //! In addition to the low level `Queue` implementation, a higher level `SynchronizedQueue` is 19 | //! provided with both blocking and asynchronous methods. 20 | //! 21 | //! # Examples 22 | //! 23 | //! ```rust 24 | //! # use std::ops::Deref; 25 | //! # use swap_buffer_queue::{buffer::{IntoValueIter, VecBuffer}, Queue}; 26 | //! // Initialize the queue with a capacity 27 | //! let queue: Queue> = Queue::with_capacity(42); 28 | //! // Enqueue some value 29 | //! queue.try_enqueue([0]).unwrap(); 30 | //! // Multiple values can be enqueued at the same time 31 | //! // (optimized compared to multiple enqueuing) 32 | //! queue.try_enqueue([1, 2]).unwrap(); 33 | //! let mut values = vec![3, 4]; 34 | //! queue 35 | //! .try_enqueue(values.drain(..).into_value_iter()) 36 | //! .unwrap(); 37 | //! // Dequeue a slice to the enqueued values 38 | //! let slice = queue.try_dequeue().unwrap(); 39 | //! assert_eq!(slice.deref(), &[0, 1, 2, 3, 4]); 40 | //! // Enqueued values can also be retrieved 41 | //! assert_eq!(slice.into_iter().collect::>(), vec![0, 1, 2, 3, 4]); 42 | //! ``` 43 | 44 | #[cfg(feature = "alloc")] 45 | extern crate alloc; 46 | 47 | pub mod buffer; 48 | pub mod error; 49 | mod loom; 50 | pub mod notify; 51 | mod queue; 52 | #[cfg(feature = "std")] 53 | mod synchronized; 54 | mod utils; 55 | #[cfg(feature = "write")] 56 | pub mod write; 57 | #[cfg(feature = "write")] 58 | #[cfg(feature = "std")] 59 | pub mod write_vectored; 60 | 61 | pub use queue::Queue; 62 | #[cfg(feature = "std")] 63 | pub use synchronized::{SynchronizedNotifier, SynchronizedQueue}; 64 | -------------------------------------------------------------------------------- /src/loom.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(all(loom, test)))] 2 | mod without_loom { 3 | #[cfg(not(feature = "std"))] 4 | pub(crate) use core::sync; 5 | pub(crate) use core::{cell, hint}; 6 | #[cfg(feature = "std")] 7 | pub(crate) use std::{sync, thread}; 8 | 9 | pub(crate) const SPIN_LIMIT: usize = 64; 10 | pub(crate) const BACKOFF_LIMIT: usize = 6; 11 | 12 | #[derive(Debug, Default)] 13 | pub(crate) struct LoomUnsafeCell(cell::UnsafeCell); 14 | 15 | impl LoomUnsafeCell { 16 | pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { 17 | f(self.0.get()) 18 | } 19 | 20 | pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { 21 | f(self.0.get()) 22 | } 23 | } 24 | 25 | #[cfg(feature = "std")] 26 | impl LoomUnsafeCell { 27 | pub(crate) fn new(data: T) -> Self { 28 | Self(cell::UnsafeCell::new(data)) 29 | } 30 | } 31 | } 32 | 33 | #[cfg(not(all(loom, test)))] 34 | pub(crate) use without_loom::*; 35 | 36 | #[cfg(all(loom, test))] 37 | mod with_loom { 38 | #[cfg(feature = "std")] 39 | pub(crate) use loom::thread; 40 | pub(crate) use loom::{cell, hint, sync}; 41 | 42 | pub(crate) const SPIN_LIMIT: usize = 1; 43 | pub(crate) const BACKOFF_LIMIT: usize = 1; 44 | pub(crate) use cell::UnsafeCell as LoomUnsafeCell; 45 | } 46 | 47 | #[cfg(all(loom, test))] 48 | pub(crate) use with_loom::*; 49 | -------------------------------------------------------------------------------- /src/notify.rs: -------------------------------------------------------------------------------- 1 | //! Tool for (a)synchronous implementation. 2 | 3 | /// Notifier for waiting [`Queue`](crate::Queue) operations. 4 | pub trait Notify { 5 | /// Wake waiting dequeue operation. 6 | fn notify_dequeue(&self); 7 | /// Wake waiting enqueue operation. 8 | fn notify_enqueue(&self); 9 | } 10 | 11 | impl Notify for () { 12 | #[inline] 13 | fn notify_dequeue(&self) {} 14 | #[inline] 15 | fn notify_enqueue(&self) {} 16 | } 17 | -------------------------------------------------------------------------------- /src/queue.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, num::NonZeroUsize, ops::Range}; 2 | 3 | use crossbeam_utils::CachePadded; 4 | 5 | use crate::{ 6 | buffer::{Buffer, BufferSlice, Drain, InsertIntoBuffer, Resize}, 7 | error::{TryDequeueError, TryEnqueueError}, 8 | loom::{ 9 | hint, 10 | sync::atomic::{AtomicUsize, Ordering}, 11 | LoomUnsafeCell, BACKOFF_LIMIT, SPIN_LIMIT, 12 | }, 13 | notify::Notify, 14 | }; 15 | 16 | const CLOSED_FLAG: usize = (usize::MAX >> 1) + 1; 17 | const DEQUEUING_LOCKED: usize = usize::MAX; 18 | 19 | /// Atomic usize with the following (64bit) representation 20 | /// 64------------63---------------------1--------------0 21 | /// | closed flag | enqueuing capacity | buffer index | 22 | /// +-------------+----------------------+--------------+ 23 | /// *buffer index* bit is the index (0 or 1) of the enqueuing buffer 24 | /// *enqueuing capacity* is the remaining enqueuing capacity, starting at the capacity of the 25 | /// buffer, and decreasing until zero 26 | /// *closed flag* is a bit flag to mark the queue as closed 27 | #[derive(Copy, Clone)] 28 | #[repr(transparent)] 29 | struct EnqueuingCapacity(usize); 30 | 31 | impl EnqueuingCapacity { 32 | #[inline] 33 | fn new(buffer_index: usize, capacity: usize) -> Self { 34 | assert!(capacity << 1 < CLOSED_FLAG); 35 | Self(buffer_index | (capacity << 1)) 36 | } 37 | 38 | #[inline] // I've found compiler not inlining this function 39 | fn buffer_index(self) -> usize { 40 | self.0 & 1 41 | } 42 | 43 | #[inline] 44 | fn remaining_capacity(self) -> usize { 45 | (self.0 & !CLOSED_FLAG) >> 1 46 | } 47 | 48 | #[inline] 49 | fn is_closed(self) -> bool { 50 | self.0 & CLOSED_FLAG != 0 51 | } 52 | 53 | #[inline] 54 | fn try_reserve(self, size: NonZeroUsize) -> Option { 55 | self.0.checked_sub(size.get() << 1).map(Self) 56 | } 57 | 58 | #[inline] 59 | fn with_closed(self, enqueuing: Self) -> Self { 60 | Self(self.0 | (enqueuing.0 & CLOSED_FLAG)) 61 | } 62 | 63 | #[inline] 64 | fn from_atomic(atomic: usize) -> Self { 65 | Self(atomic) 66 | } 67 | 68 | #[inline] 69 | fn into_atomic(self) -> usize { 70 | self.0 71 | } 72 | 73 | #[inline] 74 | fn close(atomic: &AtomicUsize, ordering: Ordering) { 75 | atomic.fetch_or(CLOSED_FLAG, ordering); 76 | } 77 | 78 | #[inline] 79 | fn reopen(atomic: &AtomicUsize, ordering: Ordering) { 80 | atomic.fetch_and(!CLOSED_FLAG, ordering); 81 | } 82 | 83 | #[inline] 84 | fn check_overflow(capacity: usize) { 85 | assert!( 86 | capacity < usize::MAX >> 2, 87 | "capacity must be lower than `usize::MAX >> 2`" 88 | ); 89 | } 90 | } 91 | 92 | /// Atomic usize with the following (64bit) representation 93 | /// 64-------------------1--------------0 94 | /// | dequeuing length | buffer index | 95 | /// +--------------------+--------------+ 96 | /// *buffer index* bit is the index (0 or 1) of the dequeuing buffer 97 | /// *dequeueing length* is the length currently dequeued 98 | #[derive(Copy, Clone)] 99 | struct DequeuingLength(usize); 100 | 101 | impl DequeuingLength { 102 | #[inline] 103 | fn new(buffer_index: usize, length: usize) -> Self { 104 | Self(buffer_index | length << 1) 105 | } 106 | 107 | #[inline] 108 | fn buffer_index(self) -> usize { 109 | self.0 & 1 110 | } 111 | 112 | #[inline] 113 | fn buffer_len(self) -> usize { 114 | self.0 >> 1 115 | } 116 | 117 | #[inline] 118 | fn try_from_atomic(atomic: usize) -> Result { 119 | if atomic != DEQUEUING_LOCKED { 120 | Ok(Self(atomic)) 121 | } else { 122 | Err(TryDequeueError::Conflict) 123 | } 124 | } 125 | 126 | #[inline] 127 | fn into_atomic(self) -> usize { 128 | self.0 129 | } 130 | } 131 | 132 | /// A buffered MPSC "swap-buffer" queue. 133 | pub struct Queue 134 | where 135 | B: Buffer, 136 | { 137 | enqueuing_capacity: CachePadded, 138 | dequeuing_length: CachePadded, 139 | buffers: [LoomUnsafeCell; 2], 140 | buffers_length: [CachePadded; 2], 141 | capacity: AtomicUsize, 142 | notify: N, 143 | } 144 | 145 | // Needed for `BufferIter` 146 | impl AsRef> for Queue 147 | where 148 | B: Buffer, 149 | { 150 | fn as_ref(&self) -> &Queue { 151 | self 152 | } 153 | } 154 | 155 | // SAFETY: Buffer access is synchronized by the algorithm, but `Send` is required 156 | // because it is owned by the queue 157 | unsafe impl Send for Queue 158 | where 159 | B: Buffer + Send, 160 | N: Send, 161 | { 162 | } 163 | // SAFETY: Buffer access is synchronized by the algorithm, but `Send` is required 164 | // because it is owned by the queue 165 | unsafe impl Sync for Queue 166 | where 167 | B: Buffer + Send, 168 | N: Sync, 169 | { 170 | } 171 | 172 | impl Queue 173 | where 174 | B: Buffer, 175 | N: Default, 176 | { 177 | /// Create a new queue using buffer default. 178 | /// 179 | /// Buffer default may have a non-zero capacity, e.g. array buffer. 180 | /// 181 | /// # Examples 182 | /// ``` 183 | /// # use swap_buffer_queue::Queue; 184 | /// # use swap_buffer_queue::buffer::VecBuffer; 185 | /// let queue: Queue> = Queue::new(); 186 | /// ``` 187 | pub fn new() -> Self { 188 | let buffers: [LoomUnsafeCell; 2] = Default::default(); 189 | // https://github.com/tokio-rs/loom/issues/277#issuecomment-1633262296 190 | // SAFETY: exclusive reference to `buffers` 191 | let capacity = buffers[0].with_mut(|buf| unsafe { &*buf }.capacity()); 192 | EnqueuingCapacity::check_overflow(capacity); 193 | Self { 194 | enqueuing_capacity: AtomicUsize::new(EnqueuingCapacity::new(0, capacity).into_atomic()) 195 | .into(), 196 | dequeuing_length: AtomicUsize::new(DequeuingLength::new(1, 0).into_atomic()).into(), 197 | buffers, 198 | buffers_length: Default::default(), 199 | capacity: AtomicUsize::new(capacity), 200 | notify: Default::default(), 201 | } 202 | } 203 | } 204 | 205 | impl Queue 206 | where 207 | B: Buffer + Resize, 208 | N: Default, 209 | { 210 | /// Creates a new queue with the given capacity. 211 | /// 212 | /// # Examples 213 | /// ``` 214 | /// # use swap_buffer_queue::Queue; 215 | /// # use swap_buffer_queue::buffer::VecBuffer; 216 | /// let queue: Queue> = Queue::with_capacity(42); 217 | /// ``` 218 | pub fn with_capacity(capacity: usize) -> Self { 219 | EnqueuingCapacity::check_overflow(capacity); 220 | let buffers: [LoomUnsafeCell; 2] = Default::default(); 221 | // https://github.com/tokio-rs/loom/issues/277#issuecomment-1633262296 222 | // SAFETY: exclusive reference to `buffers` 223 | buffers[0].with_mut(|buf| unsafe { &mut *buf }.resize(capacity)); 224 | // SAFETY: exclusive reference to `buffers` 225 | buffers[1].with_mut(|buf| unsafe { &mut *buf }.resize(capacity)); 226 | Self { 227 | enqueuing_capacity: AtomicUsize::new(EnqueuingCapacity::new(0, capacity).into_atomic()) 228 | .into(), 229 | dequeuing_length: AtomicUsize::new(DequeuingLength::new(1, 0).into_atomic()).into(), 230 | buffers, 231 | buffers_length: Default::default(), 232 | capacity: AtomicUsize::new(capacity), 233 | notify: Default::default(), 234 | } 235 | } 236 | } 237 | 238 | impl Queue 239 | where 240 | B: Buffer, 241 | { 242 | /// Returns queue's [`Notify`] implementor. 243 | /// 244 | /// # Examples 245 | /// ``` 246 | /// # use swap_buffer_queue::Queue; 247 | /// # use swap_buffer_queue::buffer::VecBuffer; 248 | /// use swap_buffer_queue::notify::Notify; 249 | /// 250 | /// let queue: Queue> = Queue::with_capacity(42); 251 | /// queue.notify().notify_dequeue(); 252 | /// ``` 253 | #[inline] 254 | pub fn notify(&self) -> &N { 255 | &self.notify 256 | } 257 | 258 | /// Returns the current enqueuing buffer capacity. 259 | /// 260 | /// # Examples 261 | /// ``` 262 | /// # use swap_buffer_queue::Queue; 263 | /// # use swap_buffer_queue::buffer::VecBuffer; 264 | /// let queue: Queue> = Queue::with_capacity(42); 265 | /// assert_eq!(queue.capacity(), 42); 266 | /// ``` 267 | #[inline] 268 | pub fn capacity(&self) -> usize { 269 | // cannot use `Buffer::capacity` because of data race 270 | self.capacity.load(Ordering::Relaxed) 271 | } 272 | 273 | /// Returns the current enqueuing buffer length. 274 | /// 275 | /// # Examples 276 | /// ``` 277 | /// # use swap_buffer_queue::Queue; 278 | /// # use swap_buffer_queue::buffer::VecBuffer; 279 | /// let queue: Queue> = Queue::with_capacity(42); 280 | /// assert_eq!(queue.len(), 0); 281 | /// queue.try_enqueue([0]).unwrap(); 282 | /// assert_eq!(queue.len(), 1); 283 | /// ``` 284 | #[inline] 285 | pub fn len(&self) -> usize { 286 | let enqueuing = 287 | EnqueuingCapacity::from_atomic(self.enqueuing_capacity.load(Ordering::Relaxed)); 288 | self.capacity() 289 | .saturating_sub(enqueuing.remaining_capacity()) 290 | } 291 | 292 | /// Returns `true` if the current enqueuing buffer is empty. 293 | /// 294 | /// # Examples 295 | /// ``` 296 | /// # use swap_buffer_queue::Queue; 297 | /// # use swap_buffer_queue::buffer::VecBuffer; 298 | /// let queue: Queue> = Queue::with_capacity(42); 299 | /// assert!(queue.is_empty()); 300 | /// ``` 301 | #[inline] 302 | pub fn is_empty(&self) -> bool { 303 | self.len() == 0 304 | } 305 | 306 | /// Returns `true` if the queue is closed. 307 | /// 308 | /// # Examples 309 | /// ``` 310 | /// # use swap_buffer_queue::Queue; 311 | /// # use swap_buffer_queue::buffer::VecBuffer; 312 | /// let queue: Queue> = Queue::with_capacity(42); 313 | /// assert!(!queue.is_closed()); 314 | /// queue.close(); 315 | /// assert!(queue.is_closed()); 316 | /// ``` 317 | #[inline] 318 | pub fn is_closed(&self) -> bool { 319 | EnqueuingCapacity::from_atomic(self.enqueuing_capacity.load(Ordering::Relaxed)).is_closed() 320 | } 321 | 322 | /// Reopen a closed queue. 323 | /// 324 | /// Calling this method when the queue is not closed has no effect. 325 | /// 326 | /// # Examples 327 | /// ``` 328 | /// # use swap_buffer_queue::Queue; 329 | /// # use swap_buffer_queue::buffer::VecBuffer; 330 | /// let queue: Queue> = Queue::with_capacity(42); 331 | /// queue.close(); 332 | /// assert!(queue.is_closed()); 333 | /// queue.reopen(); 334 | /// assert!(!queue.is_closed()); 335 | /// ``` 336 | #[inline] 337 | pub fn reopen(&self) { 338 | EnqueuingCapacity::reopen(&self.enqueuing_capacity, Ordering::AcqRel); 339 | } 340 | 341 | #[inline] 342 | fn lock_dequeuing(&self) -> Result { 343 | // Protect from concurrent dequeuing by swapping the dequeuing length with a constant 344 | // marking dequeuing conflict. 345 | DequeuingLength::try_from_atomic( 346 | self.dequeuing_length 347 | .swap(DEQUEUING_LOCKED, Ordering::Relaxed), 348 | ) 349 | } 350 | 351 | #[allow(clippy::type_complexity)] 352 | const NO_RESIZE: Option (bool, usize)> = None; 353 | 354 | #[inline] 355 | fn try_dequeue_internal( 356 | &self, 357 | dequeuing: DequeuingLength, 358 | notify_enqueue: impl Fn(), 359 | resize: Option (bool, usize)>, 360 | ) -> Result, TryDequeueError> { 361 | // If dequeuing length is greater than zero, it means than previous dequeuing is still 362 | // ongoing, either because previous `try_dequeue` operation returns pending error, 363 | // or because requeuing (after partial draining for example). 364 | if let Some(len) = NonZeroUsize::new(dequeuing.buffer_len()) { 365 | return self 366 | .try_dequeue_spin(dequeuing.buffer_index(), len) 367 | .ok_or(TryDequeueError::Pending); 368 | } 369 | let next_buffer_index = dequeuing.buffer_index(); 370 | let (resized, inserted_length, next_capa) = 371 | self.buffers[next_buffer_index].with_mut(|next_buf| { 372 | // SAFETY: Dequeuing buffer can be accessed mutably 373 | let next_buffer = unsafe { &mut *next_buf }; 374 | // Resize buffer if needed. 375 | let (resized, inserted_length) = resize.map_or((false, 0), |f| f(next_buffer)); 376 | (resized, inserted_length, next_buffer.capacity()) 377 | }); 378 | if inserted_length > 0 { 379 | self.buffers_length[next_buffer_index].fetch_add(inserted_length, Ordering::Relaxed); 380 | } 381 | let mut enqueuing = 382 | EnqueuingCapacity::from_atomic(self.enqueuing_capacity.load(Ordering::Acquire)); 383 | debug_assert_ne!(dequeuing.buffer_index(), enqueuing.buffer_index()); 384 | let capacity = 385 | // SAFETY: Enqueuing buffer can be immutably accessed. 386 | self.buffers[enqueuing.buffer_index()].with(|buf| unsafe { &*buf }.capacity()); 387 | // If buffer is empty and has not be resized, return an error (and store back dequeuing) 388 | if enqueuing.remaining_capacity() == capacity && !resized && inserted_length == 0 { 389 | self.dequeuing_length 390 | .store(dequeuing.into_atomic(), Ordering::Relaxed); 391 | return Err(if enqueuing.is_closed() { 392 | TryDequeueError::Closed 393 | } else { 394 | TryDequeueError::Empty 395 | }); 396 | } 397 | // Swap buffers: previous dequeuing buffer become the enqueuing one 398 | let next_enqueuing = EnqueuingCapacity::new(next_buffer_index, next_capa - inserted_length); 399 | let mut backoff = 0; 400 | while let Err(enq) = self.enqueuing_capacity.compare_exchange_weak( 401 | enqueuing.into_atomic(), 402 | next_enqueuing.with_closed(enqueuing).into_atomic(), 403 | Ordering::AcqRel, 404 | Ordering::Relaxed, 405 | ) { 406 | enqueuing = EnqueuingCapacity::from_atomic(enq); 407 | // Spin in case of concurrent modifications, except when the buffer is full ofc. 408 | if enqueuing.remaining_capacity() != 0 { 409 | for _ in 0..1 << backoff { 410 | hint::spin_loop(); 411 | } 412 | if backoff < BACKOFF_LIMIT { 413 | backoff += 1; 414 | } 415 | } 416 | } 417 | // Update the queue capacity if needed. 418 | if self.capacity() != next_capa { 419 | self.capacity.store(next_capa, Ordering::Relaxed); 420 | } 421 | // Notify enqueuers. 422 | notify_enqueue(); 423 | match NonZeroUsize::new(capacity - enqueuing.remaining_capacity()) { 424 | // Try to wait ongoing insertions and take ownership of the buffer, then return the 425 | // buffer slice 426 | Some(len) => self 427 | .try_dequeue_spin(enqueuing.buffer_index(), len) 428 | .ok_or(TryDequeueError::Pending), 429 | // If the enqueuing buffer was empty, but values has been inserted while resizing, 430 | // retry. 431 | None if inserted_length > 0 => self.try_dequeue_internal( 432 | DequeuingLength::new(enqueuing.buffer_index(), 0), 433 | notify_enqueue, 434 | Self::NO_RESIZE, 435 | ), 436 | // Otherwise, (empty enqueuing buffer, resized dequeuing one), acknowledge the swap and 437 | // return empty error 438 | None => { 439 | debug_assert!(resized); 440 | self.dequeuing_length.store( 441 | DequeuingLength::new(enqueuing.buffer_index(), 0).into_atomic(), 442 | Ordering::Relaxed, 443 | ); 444 | Err(TryDequeueError::Empty) 445 | } 446 | } 447 | } 448 | 449 | fn try_dequeue_spin( 450 | &self, 451 | buffer_index: usize, 452 | length: NonZeroUsize, 453 | ) -> Option> { 454 | for _ in 0..SPIN_LIMIT { 455 | // Buffers having been swapped, no more enqueuing can happen, we still need to wait 456 | // for ongoing one. They will be finished when the buffer length (updated after 457 | // enqueuing) is equal to the expected one. 458 | // Also, requeuing with potential draining can lead to have an expected length lower 459 | // than the effective buffer length. 460 | let buffer_len = self.buffers_length[buffer_index].load(Ordering::Acquire); 461 | if buffer_len >= length.get() { 462 | // Returns the slice (range can be shortened by draining + requeuing). 463 | let range = buffer_len - length.get()..buffer_len; 464 | let slice = self.buffers[buffer_index] 465 | // SAFETY: All enqueuings are done, and buffers having been swapped, this buffer 466 | // can now be accessed mutably. 467 | // SAFETY: All enqueuing are done, range has been inserted. 468 | .with_mut(|buf| unsafe { (*buf).slice(range.clone()) }); 469 | return Some(BufferSlice::new(self, buffer_index, range, slice)); 470 | } 471 | hint::spin_loop(); 472 | } 473 | // If the enqueuing are still ongoing, just save the dequeuing state in order to retry. 474 | self.dequeuing_length.store( 475 | DequeuingLength::new(buffer_index, length.get()).into_atomic(), 476 | Ordering::Relaxed, 477 | ); 478 | None 479 | } 480 | 481 | pub(crate) fn release(&self, buffer_index: usize, range: Range) { 482 | // Clears the dequeuing buffer and its length, and release the dequeuing "lock". 483 | // SAFETY: Dequeued buffer pointed by buffer index can be accessed mutably 484 | // (see `Queue::try_dequeue_spin`). 485 | // SAFETY: Range comes from the dequeued slice, so it has been previously inserted. 486 | self.buffers[buffer_index].with_mut(|buf| unsafe { (*buf).clear(range) }); 487 | self.buffers_length[buffer_index].store(0, Ordering::Release); 488 | self.dequeuing_length.store( 489 | DequeuingLength::new(buffer_index, 0).into_atomic(), 490 | Ordering::Relaxed, 491 | ); 492 | } 493 | 494 | #[inline] 495 | pub(crate) fn get_slice(&self, buffer_index: usize, range: Range) -> B::Slice<'_> { 496 | self.buffers[buffer_index] 497 | // SAFETY: Dequeued buffer pointed by buffer index can be accessed mutably 498 | // (see `Queue::try_dequeue_spin`). 499 | // SAFETY: Range comes from the dequeued slice, so it has been previously inserted. 500 | .with_mut(|buf| unsafe { (*buf).slice(range.clone()) }) 501 | } 502 | 503 | #[inline] 504 | pub(crate) fn requeue(&self, buffer_index: usize, range: Range) { 505 | // Requeuing the buffer just means saving the dequeuing state (or release if there is 506 | // nothing to requeue). 507 | let length = range.end - range.start; 508 | if length > 0 { 509 | self.dequeuing_length.store( 510 | DequeuingLength::new(buffer_index, length).into_atomic(), 511 | Ordering::Relaxed, 512 | ); 513 | } else { 514 | self.release(buffer_index, range); 515 | } 516 | } 517 | } 518 | 519 | impl Queue 520 | where 521 | B: Buffer, 522 | N: Notify, 523 | { 524 | /// Tries enqueuing the given value into the queue. 525 | /// 526 | /// Enqueuing will fail if the queue has insufficient capacity, or if it is closed. In case of 527 | /// success, it will notify waiting dequeuing operations using [`Notify::notify_dequeue`]. 528 | /// 529 | /// Enqueuing a zero-sized value is a no-op. 530 | /// 531 | /// # Examples 532 | /// ``` 533 | /// # use swap_buffer_queue::Queue; 534 | /// # use swap_buffer_queue::buffer::VecBuffer; 535 | /// # use swap_buffer_queue::error::TryEnqueueError; 536 | /// let queue: Queue> = Queue::with_capacity(1); 537 | /// queue.try_enqueue([0]).unwrap(); 538 | /// // queue is full 539 | /// assert_eq!( 540 | /// queue.try_enqueue([0]), 541 | /// Err(TryEnqueueError::InsufficientCapacity([0])) 542 | /// ); 543 | /// // let's close the queue 544 | /// queue.close(); 545 | /// assert_eq!(queue.try_enqueue([0]), Err(TryEnqueueError::Closed([0]))); 546 | /// ``` 547 | pub fn try_enqueue(&self, value: T) -> Result<(), TryEnqueueError> 548 | where 549 | T: InsertIntoBuffer, 550 | { 551 | // Compare-and-swap loop with backoff in order to mitigate contention on the atomic field 552 | let Some(value_size) = NonZeroUsize::new(value.size()) else { 553 | return Ok(()); 554 | }; 555 | let mut enqueuing = 556 | EnqueuingCapacity::from_atomic(self.enqueuing_capacity.load(Ordering::Acquire)); 557 | let mut backoff = None; 558 | loop { 559 | // Check if the queue is not closed and try to reserve a slice of the buffer. 560 | if enqueuing.is_closed() { 561 | return Err(TryEnqueueError::Closed(value)); 562 | } 563 | let Some(next_enq) = enqueuing.try_reserve(value_size) else { 564 | return Err(TryEnqueueError::InsufficientCapacity(value)); 565 | }; 566 | if let Some(ref mut backoff) = backoff { 567 | for _ in 0..1 << *backoff { 568 | hint::spin_loop(); 569 | } 570 | if *backoff < BACKOFF_LIMIT { 571 | *backoff += 1; 572 | } 573 | } 574 | match self.enqueuing_capacity.compare_exchange_weak( 575 | enqueuing.into_atomic(), 576 | next_enq.into_atomic(), 577 | Ordering::AcqRel, 578 | Ordering::Relaxed, 579 | ) { 580 | Ok(_) => break, 581 | Err(enq) => { 582 | enqueuing = EnqueuingCapacity::from_atomic(enq); 583 | // Spin in case of concurrent modification, except when the buffer index has 584 | // modified, which may mean conflict was due to dequeuing. 585 | backoff = (next_enq.buffer_index() == enqueuing.buffer_index()) 586 | .then(|| backoff.unwrap_or(0)); 587 | } 588 | } 589 | } 590 | // Insert the value into the buffer at the index given by subtracting the remaining 591 | // capacity to the buffer one. 592 | self.buffers[enqueuing.buffer_index()].with(|buf| { 593 | // SAFETY: As long as enqueuing is ongoing, i.e. a reserved slice has not been acknowledged 594 | // in the buffer length (see `BufferWithLength::insert`), buffer cannot be dequeued and can 595 | // thus be accessed immutably (see `Queue::try_dequeue_spin`). 596 | let buffer = unsafe { &*buf }; 597 | let index = buffer.capacity() - enqueuing.remaining_capacity(); 598 | // SAFETY: Compare-and-swap makes indexes not overlap, and the buffer is cleared before 599 | // reusing it for enqueuing (see `Queue::release`). 600 | unsafe { value.insert_into(buffer, index) }; 601 | }); 602 | self.buffers_length[enqueuing.buffer_index()].fetch_add(value_size.get(), Ordering::AcqRel); 603 | // Notify dequeuer. 604 | self.notify.notify_dequeue(); 605 | Ok(()) 606 | } 607 | 608 | /// Tries dequeuing a buffer with all enqueued values from the queue. 609 | /// 610 | /// This method swaps the current buffer with the other one, which is empty. All concurrent 611 | /// enqueuing must end before the the current buffer is really dequeuable, so the queue may 612 | /// be in a transitory state where `try_dequeue` must be retried. In this state, after a spin 613 | /// loop, this method will return a [`TryDequeueError::Pending`] error. 614 | /// 615 | /// Dequeuing also fails if the queue is empty, or if it is closed. Moreover, as the algorithm 616 | /// is MPSC, dequeuing is protected against concurrent calls, failing with 617 | /// [`TryDequeueError::Conflict`] error. 618 | /// 619 | /// It returns a [`BufferSlice`], which holds, as its name may indicate, a reference to the 620 | /// dequeued buffer. That's why, the concurrent dequeuing protection is maintained for the 621 | /// lifetime of the buffer slice. 622 | /// 623 | /// # Examples 624 | /// ``` 625 | /// # use std::ops::Deref; 626 | /// # use swap_buffer_queue::Queue; 627 | /// # use swap_buffer_queue::buffer::VecBuffer; 628 | /// # use swap_buffer_queue::error::TryDequeueError; 629 | /// let queue: Queue> = Queue::with_capacity(42); 630 | /// queue.try_enqueue([0]).unwrap(); 631 | /// queue.try_enqueue([1]).unwrap(); 632 | /// { 633 | /// let slice = queue.try_dequeue().unwrap(); 634 | /// assert_eq!(slice.deref(), &[0, 1]); 635 | /// // dequeuing cannot be done concurrently (`slice` is still in scope) 636 | /// assert_eq!(queue.try_dequeue().unwrap_err(), TryDequeueError::Conflict); 637 | /// } 638 | /// // let's close the queue 639 | /// queue.try_enqueue([2]).unwrap(); 640 | /// queue.close(); 641 | /// // queue can be dequeued while closed when not empty 642 | /// { 643 | /// let slice = queue.try_dequeue().unwrap(); 644 | /// assert_eq!(slice.deref(), &[2]); 645 | /// } 646 | /// assert_eq!(queue.try_dequeue().unwrap_err(), TryDequeueError::Closed) 647 | /// ``` 648 | pub fn try_dequeue(&self) -> Result, TryDequeueError> { 649 | self.try_dequeue_internal( 650 | self.lock_dequeuing()?, 651 | || self.notify.notify_enqueue(), 652 | Self::NO_RESIZE, 653 | ) 654 | } 655 | 656 | /// Closes the queue. 657 | /// 658 | /// Closed queue can no more accept enqueuing, but it can be dequeued while not empty. 659 | /// Calling this method on a closed queue has no effect. 660 | /// See [`reopen`](Queue::reopen) to reopen a closed queue. 661 | /// # Examples 662 | /// ``` 663 | /// # use std::ops::Deref; 664 | /// # use swap_buffer_queue::Queue; 665 | /// # use swap_buffer_queue::buffer::VecBuffer; 666 | /// # use swap_buffer_queue::error::{TryDequeueError, TryEnqueueError}; 667 | /// let queue: Queue> = Queue::with_capacity(42); 668 | /// queue.try_enqueue([0]).unwrap(); 669 | /// queue.close(); 670 | /// assert!(queue.is_closed()); 671 | /// assert_eq!(queue.try_enqueue([1]), Err(TryEnqueueError::Closed([1]))); 672 | /// assert_eq!(queue.try_dequeue().unwrap().deref(), &[0]); 673 | /// assert_eq!(queue.try_dequeue().unwrap_err(), TryDequeueError::Closed); 674 | /// ``` 675 | pub fn close(&self) { 676 | EnqueuingCapacity::close(&self.enqueuing_capacity, Ordering::AcqRel); 677 | self.notify.notify_dequeue(); 678 | self.notify.notify_enqueue(); 679 | } 680 | } 681 | 682 | impl Queue 683 | where 684 | B: Buffer + Resize, 685 | N: Notify, 686 | { 687 | /// Tries dequeuing a buffer with all enqueued values from the queue, and resizes the next 688 | /// buffer to be used for enqueuing. 689 | /// 690 | /// This method is an extension of [`try_dequeue`](Queue::try_dequeue) method. In fact, 691 | /// before swapping the buffers, next one is empty and protected, so it can be resized, and 692 | /// it is also possible to add values in it before making it available for enqueuing. 693 | /// This can be used to make the queue [unbounded](Queue#an-amortized-unbounded-recipe). 694 | /// 695 | /// It is worth to be noted that only one buffer is resized, so it can lead to asymmetric buffers. 696 | /// 697 | /// # Examples 698 | /// ``` 699 | /// # use std::ops::Deref; 700 | /// # use swap_buffer_queue::Queue; 701 | /// # use swap_buffer_queue::buffer::VecBuffer; 702 | /// # use swap_buffer_queue::error::TryEnqueueError; 703 | /// let queue: Queue> = Queue::with_capacity(1); 704 | /// queue.try_enqueue([0]).unwrap(); 705 | /// // queue is full 706 | /// assert_eq!( 707 | /// queue.try_enqueue([1]), 708 | /// Err(TryEnqueueError::InsufficientCapacity([1])) 709 | /// ); 710 | /// // dequeue and resize, inserting elements before the buffer is available 711 | /// { 712 | /// let slice = queue 713 | /// .try_dequeue_and_resize(3, Some(|| std::iter::once([42]))) 714 | /// .unwrap(); 715 | /// assert_eq!(slice.deref(), &[0]); 716 | /// } 717 | /// // capacity has been increased 718 | /// queue.try_enqueue([1]).unwrap(); 719 | /// queue.try_enqueue([2]).unwrap(); 720 | /// let slice = queue.try_dequeue().unwrap(); 721 | /// assert_eq!(slice.deref(), &[42, 1, 2]); 722 | /// ``` 723 | /// 724 | /// ## An amortized unbounded recipe 725 | /// 726 | /// ```rust 727 | /// # use std::ops::Deref; 728 | /// # use std::sync::Mutex; 729 | /// # use swap_buffer_queue::Queue; 730 | /// # use swap_buffer_queue::buffer::{BufferSlice, InsertIntoBuffer, VecBuffer}; 731 | /// # use swap_buffer_queue::error::{EnqueueError, TryDequeueError, TryEnqueueError}; 732 | /// # use swap_buffer_queue::notify::Notify; 733 | /// fn enqueue_unbounded( 734 | /// queue: &Queue>, 735 | /// overflow: &Mutex>, 736 | /// mut value: T, 737 | /// ) -> Result<(), EnqueueError<[T; 1]>> { 738 | /// // first, try to enqueue normally 739 | /// match queue.try_enqueue([value]) { 740 | /// Err(TryEnqueueError::InsufficientCapacity([v])) => value = v, 741 | /// res => return res, 742 | /// }; 743 | /// // if the enqueuing fails, lock the overflow 744 | /// let mut guard = overflow.lock().unwrap(); 745 | /// // retry to enqueue (we never know what happened during lock acquisition) 746 | /// match queue.try_enqueue([value]) { 747 | /// Err(TryEnqueueError::InsufficientCapacity([v])) => value = v, 748 | /// res => return res, 749 | /// }; 750 | /// // then push the values to the overflow vector 751 | /// guard.push([value]); 752 | /// drop(guard); 753 | /// // notify possible waiting dequeue 754 | /// queue.notify().notify_dequeue(); 755 | /// Ok(()) 756 | /// } 757 | /// 758 | /// fn try_dequeue_unbounded<'a, T>( 759 | /// queue: &'a Queue>, 760 | /// overflow: &Mutex>, 761 | /// ) -> Result, ()>, TryDequeueError> { 762 | /// // lock the overflow and use `try_dequeue_and_resize` to drain the overflow into the 763 | /// // queue 764 | /// let mut guard = overflow.lock().unwrap(); 765 | /// let vec = &mut guard; 766 | /// // `{ vec }` is a trick to get the correct FnOnce inference 767 | /// // https://stackoverflow.com/questions/74814588/why-does-rust-infer-fnmut-instead-of-fnonce-for-this-closure-even-though-inferr 768 | /// queue.try_dequeue_and_resize(queue.capacity() + vec.len(), Some(|| { vec }.drain(..))) 769 | /// } 770 | /// 771 | /// // queue is initialized with zero capacity 772 | /// let queue: Queue> = Queue::new(); 773 | /// let overflow = Mutex::new(Vec::new()); 774 | /// assert_eq!(queue.capacity(), 0); 775 | /// enqueue_unbounded(&queue, &overflow, 0).unwrap(); 776 | /// assert_eq!( 777 | /// try_dequeue_unbounded(&queue, &overflow).unwrap().deref(), 778 | /// &[0] 779 | /// ); 780 | /// enqueue_unbounded(&queue, &overflow, 1).unwrap(); 781 | /// enqueue_unbounded(&queue, &overflow, 2).unwrap(); 782 | /// assert_eq!( 783 | /// try_dequeue_unbounded(&queue, &overflow).unwrap().deref(), 784 | /// &[1, 2] 785 | /// ); 786 | /// ``` 787 | pub fn try_dequeue_and_resize( 788 | &self, 789 | capacity: impl Into>, 790 | insert: Option I>, 791 | ) -> Result, TryDequeueError> 792 | where 793 | I: IntoIterator, 794 | I::Item: InsertIntoBuffer, 795 | { 796 | self.try_dequeue_internal( 797 | self.lock_dequeuing()?, 798 | || self.notify.notify_enqueue(), 799 | Some(move |buffer_mut: &mut B| { 800 | let resized_capa = capacity 801 | .into() 802 | .filter(|capa| *capa != buffer_mut.capacity()); 803 | if let Some(capa) = resized_capa { 804 | EnqueuingCapacity::check_overflow(capa); 805 | buffer_mut.resize(capa); 806 | } 807 | let mut length = 0; 808 | if let Some(insert) = insert { 809 | for value in insert() { 810 | let Some(value_size) = NonZeroUsize::new(value.size()) else { 811 | continue; 812 | }; 813 | if value_size.get() > buffer_mut.capacity() { 814 | break; 815 | } 816 | // SAFETY: Ranges `length..length+value.size()` will obviously not overlap, 817 | // and the buffer is cleared before reusing it for enqueuing 818 | // (see `Queue::release`) 819 | unsafe { value.insert_into(buffer_mut, length) }; 820 | length += value_size.get(); 821 | } 822 | } 823 | (resized_capa.is_some(), length) 824 | }), 825 | ) 826 | } 827 | } 828 | 829 | impl Queue 830 | where 831 | B: Buffer + Drain, 832 | { 833 | pub(crate) fn remove(&self, buffer_index: usize, index: usize) -> B::Value { 834 | debug_assert_eq!( 835 | self.dequeuing_length.load(Ordering::Relaxed), 836 | DEQUEUING_LOCKED 837 | ); 838 | // SAFETY: Dequeued buffer pointed by buffer index can be accessed mutably 839 | // (see `Queue::try_dequeue_spin`). 840 | // SAFETY: Index comes from an iterator on the dequeued slice, so it has 841 | // been previously inserted, and can be removed. 842 | self.buffers[buffer_index].with_mut(|buf| unsafe { (*buf).remove(index) }) 843 | } 844 | } 845 | 846 | impl Default for Queue 847 | where 848 | B: Buffer, 849 | N: Default, 850 | { 851 | fn default() -> Self { 852 | Self::new() 853 | } 854 | } 855 | 856 | impl fmt::Debug for Queue 857 | where 858 | B: Buffer, 859 | N: fmt::Debug, 860 | { 861 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 862 | f.debug_struct("Queue") 863 | .field("capacity", &self.capacity()) 864 | .field("len", &self.len()) 865 | .field("notify", &self.notify) 866 | .finish() 867 | } 868 | } 869 | 870 | impl Drop for Queue 871 | where 872 | B: Buffer, 873 | { 874 | fn drop(&mut self) { 875 | self.lock_dequeuing() 876 | .and_then(|deq| self.try_dequeue_internal(deq, || (), Self::NO_RESIZE)) 877 | .ok(); 878 | } 879 | } 880 | -------------------------------------------------------------------------------- /src/synchronized.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives for [`Queue`]. 2 | //! 3 | //! It supports both synchronous and asynchronous API. [`SynchronizedQueue`] is just an alias 4 | //! for a [`Queue`] using [`SynchronizedNotifier`]. 5 | //! 6 | //! # Examples 7 | //! ```rust 8 | //! # use std::sync::Arc; 9 | //! # use swap_buffer_queue::SynchronizedQueue; 10 | //! # use swap_buffer_queue::buffer::VecBuffer; 11 | //! let queue: Arc>> = 12 | //! Arc::new(SynchronizedQueue::with_capacity(1)); 13 | //! let queue_clone = queue.clone(); 14 | //! std::thread::spawn(move || { 15 | //! queue_clone.enqueue([0]).unwrap(); 16 | //! queue_clone.enqueue([1]).unwrap(); 17 | //! }); 18 | //! assert_eq!(queue.dequeue().unwrap()[0], 0); 19 | //! assert_eq!(queue.dequeue().unwrap()[0], 1); 20 | //! ``` 21 | use std::{ 22 | fmt, 23 | future::poll_fn, 24 | iter, 25 | task::{Context, Poll}, 26 | time::{Duration, Instant}, 27 | }; 28 | 29 | use crate::{ 30 | buffer::{Buffer, BufferSlice, Drain, InsertIntoBuffer}, 31 | error::{DequeueError, EnqueueError, TryDequeueError, TryEnqueueError}, 32 | loom::{hint, thread, SPIN_LIMIT}, 33 | notify::Notify, 34 | synchronized::{atomic_waker::AtomicWaker, waker_list::WakerList}, 35 | Queue, 36 | }; 37 | 38 | mod atomic_waker; 39 | mod waker; 40 | mod waker_list; 41 | 42 | /// [`Queue`] with [`SynchronizedNotifier`] 43 | pub type SynchronizedQueue = Queue; 44 | 45 | /// Synchronized (a)synchronous [`Notify`] implementation. 46 | #[derive(Default)] 47 | pub struct SynchronizedNotifier { 48 | enqueuers: WakerList, 49 | dequeuer: AtomicWaker, 50 | } 51 | 52 | impl fmt::Debug for SynchronizedNotifier { 53 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 54 | f.debug_struct("SynchronizedNotifier").finish() 55 | } 56 | } 57 | 58 | impl Notify for SynchronizedNotifier { 59 | #[inline] 60 | fn notify_dequeue(&self) { 61 | self.dequeuer.wake(); 62 | } 63 | 64 | #[inline] 65 | fn notify_enqueue(&self) { 66 | self.enqueuers.wake(); 67 | } 68 | } 69 | 70 | impl SynchronizedQueue 71 | where 72 | B: Buffer, 73 | { 74 | #[inline] 75 | fn enqueue_sync( 76 | &self, 77 | mut value: T, 78 | deadline: Option, 79 | ) -> Result<(), TryEnqueueError> 80 | where 81 | T: InsertIntoBuffer, 82 | { 83 | loop { 84 | match try_enqueue(self, value, None) { 85 | Ok(res) => return res, 86 | Err(v) => value = v, 87 | }; 88 | if wait_until(deadline) { 89 | return self.try_enqueue(value); 90 | } 91 | } 92 | } 93 | 94 | /// Enqueues the given value inside the queue. 95 | /// 96 | /// This method extends [`try_enqueue`](Queue::try_enqueue) by waiting synchronously 97 | /// [`SynchronizedNotifier::notify_enqueue`] call, i.e. when a buffer is dequeued, in case of 98 | /// insufficient capacity. 99 | /// 100 | /// # Examples 101 | /// ``` 102 | /// # use std::ops::Deref; 103 | /// # use std::sync::Arc; 104 | /// # use std::time::Duration; 105 | /// # use swap_buffer_queue::SynchronizedQueue; 106 | /// # use swap_buffer_queue::buffer::VecBuffer; 107 | /// # use swap_buffer_queue::error::{EnqueueError, TryEnqueueError}; 108 | /// let queue: Arc>> = 109 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 110 | /// queue.try_enqueue([0]).unwrap(); 111 | /// assert_eq!( 112 | /// queue.try_enqueue([1]), 113 | /// Err(TryEnqueueError::InsufficientCapacity([1])) 114 | /// ); 115 | /// // queue is full, let's spawn an enqueuing task and dequeue 116 | /// let queue_clone = queue.clone(); 117 | /// let task = std::thread::spawn(move || queue_clone.enqueue([1])); 118 | /// std::thread::sleep(Duration::from_millis(1)); 119 | /// assert_eq!(queue.try_dequeue().unwrap().deref(), &[0]); 120 | /// // enqueuing task has succeeded 121 | /// task.join().unwrap().unwrap(); 122 | /// assert_eq!(queue.try_dequeue().unwrap().deref(), &[1]); 123 | /// // let's close the queue 124 | /// queue.try_enqueue([2]).unwrap(); 125 | /// let queue_clone = queue.clone(); 126 | /// let task = std::thread::spawn(move || queue_clone.enqueue([3])); 127 | /// std::thread::sleep(Duration::from_millis(1)); 128 | /// queue.close(); 129 | /// assert_eq!(task.join().unwrap(), Err(EnqueueError::Closed([3]))); 130 | /// ``` 131 | pub fn enqueue(&self, value: T) -> Result<(), EnqueueError> 132 | where 133 | T: InsertIntoBuffer, 134 | { 135 | self.enqueue_sync(value, None) 136 | } 137 | 138 | /// Tries enqueuing the given value inside the queue with a timeout. 139 | /// 140 | /// This method extends [`try_enqueue`](Queue::try_enqueue) by waiting synchronously (with a 141 | /// timeout) [`SynchronizedNotifier::notify_enqueue`] call, i.e. when a buffer is dequeued, in case of 142 | /// insufficient capacity. 143 | /// 144 | /// # Examples 145 | /// ``` 146 | /// # use std::ops::Deref; 147 | /// # use std::sync::Arc; 148 | /// # use std::time::Duration; 149 | /// # use swap_buffer_queue::SynchronizedQueue; 150 | /// # use swap_buffer_queue::buffer::VecBuffer; 151 | /// # use swap_buffer_queue::error::{EnqueueError, TryEnqueueError}; 152 | /// let queue: Arc>> = 153 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 154 | /// queue.try_enqueue([0]).unwrap(); 155 | /// assert_eq!( 156 | /// queue.enqueue_timeout([1], Duration::from_millis(1)), 157 | /// Err(TryEnqueueError::InsufficientCapacity([1])) 158 | /// ); 159 | /// let queue_clone = queue.clone(); 160 | /// let task = std::thread::spawn(move || { 161 | /// std::thread::sleep(Duration::from_millis(1)); 162 | /// queue_clone.try_dequeue().unwrap(); 163 | /// }); 164 | /// queue.enqueue_timeout([1], Duration::from_secs(1)).unwrap(); 165 | /// ``` 166 | pub fn enqueue_timeout(&self, value: T, timeout: Duration) -> Result<(), TryEnqueueError> 167 | where 168 | T: InsertIntoBuffer, 169 | { 170 | self.enqueue_sync(value, Some(Instant::now() + timeout)) 171 | } 172 | 173 | /// Enqueues the given value inside the queue. 174 | /// 175 | /// This method extends [`try_enqueue`](Queue::try_enqueue) by waiting asynchronously 176 | /// [`SynchronizedNotifier::notify_enqueue`] call, i.e. when a buffer is dequeued, in case of 177 | /// insufficient capacity. 178 | /// 179 | /// # Examples 180 | /// ``` 181 | /// # use std::ops::Deref; 182 | /// # use std::sync::Arc; 183 | /// # use swap_buffer_queue::SynchronizedQueue; 184 | /// # use swap_buffer_queue::buffer::VecBuffer; 185 | /// # use swap_buffer_queue::error::{EnqueueError, TryEnqueueError}; 186 | /// # tokio_test::block_on(async { 187 | /// let queue: Arc>> = 188 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 189 | /// queue.try_enqueue([0]).unwrap(); 190 | /// assert_eq!( 191 | /// queue.try_enqueue([0]), 192 | /// Err(TryEnqueueError::InsufficientCapacity([0])) 193 | /// ); 194 | /// // queue is full, let's spawn an enqueuing task and dequeue 195 | /// let queue_clone = queue.clone(); 196 | /// let task = tokio::spawn(async move { queue_clone.enqueue_async([1]).await }); 197 | /// assert_eq!(queue.try_dequeue().unwrap().deref(), &[0]); 198 | /// // enqueuing task has succeeded 199 | /// task.await.unwrap().unwrap(); 200 | /// assert_eq!(queue.try_dequeue().unwrap().deref(), &[1]); 201 | /// // let's close the queue 202 | /// queue.try_enqueue([2]).unwrap(); 203 | /// let queue_clone = queue.clone(); 204 | /// let task = tokio::spawn(async move { queue_clone.enqueue_async([3]).await }); 205 | /// queue.close(); 206 | /// assert_eq!(task.await.unwrap(), Err(EnqueueError::Closed([3]))); 207 | /// # }) 208 | /// ``` 209 | pub async fn enqueue_async(&self, value: T) -> Result<(), EnqueueError> 210 | where 211 | T: InsertIntoBuffer, 212 | { 213 | let mut value = Some(value); 214 | poll_fn(|cx| { 215 | let v = value.take().unwrap(); 216 | match try_enqueue(self, v, Some(cx)) { 217 | Ok(res) => return Poll::Ready(res), 218 | Err(v) => value.replace(v), 219 | }; 220 | Poll::Pending 221 | }) 222 | .await 223 | } 224 | 225 | fn dequeue_sync( 226 | &self, 227 | deadline: Option, 228 | ) -> Result, TryDequeueError> { 229 | loop { 230 | if let Some(res) = try_dequeue(self, None) { 231 | return res; 232 | } 233 | if wait_until(deadline) { 234 | return self.try_dequeue(); 235 | } 236 | } 237 | } 238 | 239 | /// Dequeues a buffer with all enqueued values from the queue. 240 | /// 241 | /// This method extends [`try_dequeue`](Queue::try_dequeue) by waiting synchronously 242 | /// [`SynchronizedNotifier::notify_dequeue`] call, i.e. when a value is enqueued, in case of 243 | /// empty queue. 244 | /// 245 | /// # Examples 246 | /// ``` 247 | /// # use std::ops::Deref; 248 | /// # use std::sync::Arc; 249 | /// # use swap_buffer_queue::SynchronizedQueue; 250 | /// # use swap_buffer_queue::buffer::VecBuffer; 251 | /// # use swap_buffer_queue::error::{DequeueError, TryDequeueError}; 252 | /// let queue: Arc>> = 253 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 254 | /// assert_eq!(queue.try_dequeue().unwrap_err(), TryDequeueError::Empty); 255 | /// // queue is empty, let's spawn a dequeuing task and enqueue 256 | /// let queue_clone = queue.clone(); 257 | /// let task = std::thread::spawn(move || { 258 | /// Ok::<_, DequeueError>(queue_clone.dequeue()?.into_iter().collect::>()) 259 | /// }); 260 | /// queue.try_enqueue([0]).unwrap(); 261 | /// // dequeuing task has succeeded 262 | /// assert_eq!(task.join().unwrap().unwrap().deref(), &[0]); 263 | /// // let's close the queue 264 | /// let queue_clone = queue.clone(); 265 | /// let task = std::thread::spawn(move || { 266 | /// Ok::<_, DequeueError>(queue_clone.dequeue()?.into_iter().collect::>()) 267 | /// }); 268 | /// queue.close(); 269 | /// assert_eq!(task.join().unwrap().unwrap_err(), DequeueError::Closed); 270 | /// ``` 271 | pub fn dequeue(&self) -> Result, DequeueError> { 272 | self.dequeue_sync(None).map_err(dequeue_err) 273 | } 274 | 275 | /// Tries dequeuing a buffer with all enqueued values from the queue with a timeout. 276 | /// 277 | /// This method extends [`try_dequeue`](Queue::try_dequeue) by waiting synchronously, with a 278 | /// timeout, [`SynchronizedNotifier::notify_dequeue`] call, i.e. when a value is enqueued, in case of 279 | /// empty queue. 280 | /// 281 | /// # Examples 282 | /// ``` 283 | /// # use std::ops::Deref; 284 | /// # use std::sync::Arc; 285 | /// # use std::time::Duration; 286 | /// # use swap_buffer_queue::SynchronizedQueue; 287 | /// # use swap_buffer_queue::buffer::VecBuffer; 288 | /// # use swap_buffer_queue::error::{DequeueError, TryDequeueError}; 289 | /// let queue: Arc>> = 290 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 291 | /// assert_eq!( 292 | /// queue.dequeue_timeout(Duration::from_millis(1)).unwrap_err(), 293 | /// TryDequeueError::Empty 294 | /// ); 295 | /// let queue_clone = queue.clone(); 296 | /// let task = std::thread::spawn(move || { 297 | /// std::thread::sleep(Duration::from_millis(1)); 298 | /// queue_clone.try_enqueue([0]).unwrap(); 299 | /// }); 300 | /// assert_eq!( 301 | /// queue 302 | /// .dequeue_timeout(Duration::from_secs(1)) 303 | /// .unwrap() 304 | /// .deref(), 305 | /// &[0] 306 | /// ); 307 | /// ``` 308 | pub fn dequeue_timeout( 309 | &self, 310 | timeout: Duration, 311 | ) -> Result, TryDequeueError> { 312 | self.dequeue_sync(Some(Instant::now() + timeout)) 313 | } 314 | 315 | /// Dequeues a buffer with all enqueued values from the queue. 316 | /// 317 | /// This method extends [`try_dequeue`](Queue::try_dequeue) by waiting asynchronously 318 | /// [`SynchronizedNotifier::notify_dequeue`] call, i.e. when a value is enqueued, in case of 319 | /// empty queue. 320 | /// 321 | /// # Examples 322 | /// ``` 323 | /// # use std::ops::Deref; 324 | /// # use std::sync::Arc; 325 | /// # use swap_buffer_queue::SynchronizedQueue; 326 | /// # use swap_buffer_queue::buffer::VecBuffer; 327 | /// # use swap_buffer_queue::error::{DequeueError, TryDequeueError}; 328 | /// # tokio_test::block_on(async { 329 | /// let queue: Arc>> = 330 | /// Arc::new(SynchronizedQueue::with_capacity(1)); 331 | /// assert_eq!(queue.try_dequeue().unwrap_err(), TryDequeueError::Empty); 332 | /// // queue is empty, let's spawn a dequeuing task and enqueue 333 | /// let queue_clone = queue.clone(); 334 | /// let task = tokio::spawn(async move { 335 | /// Ok::<_, DequeueError>( 336 | /// queue_clone 337 | /// .dequeue_async() 338 | /// .await? 339 | /// .into_iter() 340 | /// .collect::>(), 341 | /// ) 342 | /// }); 343 | /// queue.try_enqueue([0]).unwrap(); 344 | /// // dequeuing task has succeeded 345 | /// assert_eq!(task.await.unwrap().unwrap().deref(), &[0]); 346 | /// // let's close the queue 347 | /// let queue_clone = queue.clone(); 348 | /// let task = tokio::spawn(async move { 349 | /// Ok::<_, DequeueError>( 350 | /// queue_clone 351 | /// .dequeue_async() 352 | /// .await? 353 | /// .into_iter() 354 | /// .collect::>(), 355 | /// ) 356 | /// }); 357 | /// queue.close(); 358 | /// assert_eq!(task.await.unwrap().unwrap_err(), DequeueError::Closed); 359 | /// # }) 360 | /// ``` 361 | pub async fn dequeue_async( 362 | &self, 363 | ) -> Result, DequeueError> { 364 | poll_fn(|cx| { 365 | if let Some(res) = try_dequeue(self, Some(cx)) { 366 | return Poll::Ready(res.map_err(dequeue_err)); 367 | } 368 | Poll::Pending 369 | }) 370 | .await 371 | } 372 | } 373 | 374 | impl SynchronizedQueue 375 | where 376 | B: Buffer + Drain, 377 | { 378 | /// Returns an iterator over the element of the queue (see [`BufferIter`](crate::buffer::BufferIter)). 379 | /// 380 | /// # Examples 381 | /// ``` 382 | /// # use swap_buffer_queue::SynchronizedQueue; 383 | /// # use swap_buffer_queue::buffer::VecBuffer; 384 | /// let queue: SynchronizedQueue> = SynchronizedQueue::with_capacity(42); 385 | /// queue.try_enqueue([0]).unwrap(); 386 | /// queue.try_enqueue([1]).unwrap(); 387 | /// 388 | /// let mut iter = queue.iter(); 389 | /// assert_eq!(iter.next(), Some(0)); 390 | /// drop(iter); 391 | /// let mut iter = queue.iter(); 392 | /// assert_eq!(iter.next(), Some(1)); 393 | /// queue.close(); // close in order to stop the iterator 394 | /// assert_eq!(iter.next(), None); 395 | /// ``` 396 | pub fn iter(&self) -> impl Iterator + '_ { 397 | iter::repeat_with(|| self.dequeue()) 398 | .map_while(|res| res.ok()) 399 | .flatten() 400 | } 401 | 402 | #[cfg(feature = "stream")] 403 | /// Returns an stream over the element of the queue (see [`BufferIter`](crate::buffer::BufferIter)). 404 | /// 405 | /// # Examples 406 | /// ``` 407 | /// # use futures_util::StreamExt; 408 | /// # use swap_buffer_queue::SynchronizedQueue; 409 | /// # use swap_buffer_queue::buffer::VecBuffer; 410 | /// # tokio_test::block_on(async { 411 | /// let queue: SynchronizedQueue> = SynchronizedQueue::with_capacity(42); 412 | /// queue.try_enqueue([0]).unwrap(); 413 | /// queue.try_enqueue([1]).unwrap(); 414 | /// 415 | /// let mut stream = Box::pin(queue.stream()); 416 | /// assert_eq!(stream.next().await, Some(0)); 417 | /// drop(stream); 418 | /// let mut stream = Box::pin(queue.stream()); 419 | /// assert_eq!(stream.next().await, Some(1)); 420 | /// queue.close(); // close in order to stop the stream 421 | /// assert_eq!(stream.next().await, None); 422 | /// # }) 423 | /// ``` 424 | pub fn stream(&self) -> impl futures_core::Stream + '_ { 425 | use futures_util::{stream, StreamExt}; 426 | stream::repeat_with(|| stream::once(self.dequeue_async())) 427 | .flatten() 428 | .take_while(|res| { 429 | let is_ok = res.is_ok(); 430 | async move { is_ok } 431 | }) 432 | .flat_map(|res| stream::iter(res.unwrap())) 433 | } 434 | } 435 | 436 | #[inline] 437 | fn try_enqueue( 438 | queue: &SynchronizedQueue, 439 | mut value: T, 440 | cx: Option<&Context>, 441 | ) -> Result>, T> 442 | where 443 | B: Buffer, 444 | T: InsertIntoBuffer, 445 | { 446 | for _ in 0..SPIN_LIMIT { 447 | match queue.try_enqueue(value) { 448 | Err(TryEnqueueError::InsufficientCapacity(v)) if v.size() <= queue.capacity() => { 449 | value = v; 450 | } 451 | res => return Ok(res), 452 | }; 453 | hint::spin_loop(); 454 | } 455 | queue.notify().enqueuers.register(cx); 456 | match queue.try_enqueue(value) { 457 | Err(TryEnqueueError::InsufficientCapacity(v)) if v.size() <= queue.capacity() => Err(v), 458 | res => Ok(res), 459 | } 460 | } 461 | 462 | #[inline] 463 | fn try_dequeue<'a, B>( 464 | queue: &'a SynchronizedQueue, 465 | cx: Option<&Context>, 466 | ) -> Option, TryDequeueError>> 467 | where 468 | B: Buffer, 469 | { 470 | for _ in 0..SPIN_LIMIT { 471 | match queue.try_dequeue() { 472 | Err(TryDequeueError::Empty | TryDequeueError::Pending) => {} 473 | res => return Some(res), 474 | } 475 | hint::spin_loop(); 476 | } 477 | queue.notify().dequeuer.register(cx); 478 | match queue.try_dequeue() { 479 | Err(TryDequeueError::Empty | TryDequeueError::Pending) => None, 480 | res => Some(res), 481 | } 482 | } 483 | 484 | #[inline] 485 | fn dequeue_err(error: TryDequeueError) -> DequeueError { 486 | match error { 487 | TryDequeueError::Closed => DequeueError::Closed, 488 | TryDequeueError::Conflict => DequeueError::Conflict, 489 | _ => unreachable!(), 490 | } 491 | } 492 | 493 | #[inline] 494 | fn wait_until(deadline: Option) -> bool { 495 | match deadline.map(|d| d.checked_duration_since(Instant::now())) { 496 | #[cfg(not(all(loom, test)))] 497 | Some(Some(timeout)) => thread::park_timeout(timeout), 498 | #[cfg(all(loom, test))] 499 | Some(Some(_)) => panic!("loom doesn't support park_timeout"), 500 | Some(None) => return true, 501 | None => thread::park(), 502 | } 503 | false 504 | } 505 | -------------------------------------------------------------------------------- /src/synchronized/atomic_waker.rs: -------------------------------------------------------------------------------- 1 | use std::task; 2 | 3 | use crate::{ 4 | loom::{ 5 | hint, 6 | sync::atomic::{AtomicUsize, Ordering}, 7 | thread, LoomUnsafeCell, 8 | }, 9 | synchronized::waker::Waker, 10 | }; 11 | 12 | // I have to reimplement AtomicWaker because it doesn't use standard `Waker`, 13 | // see https://internals.rust-lang.org/t/thread-park-waker-a-waker-calling-thread-unpark/19114 14 | pub(super) struct AtomicWaker { 15 | state: AtomicUsize, 16 | waker: LoomUnsafeCell>, 17 | } 18 | 19 | const WAITING: usize = 0; 20 | const REGISTERING: usize = 0b01; 21 | const WAKING: usize = 0b10; 22 | 23 | impl AtomicWaker { 24 | pub fn new() -> Self { 25 | Self { 26 | state: AtomicUsize::new(WAITING), 27 | waker: LoomUnsafeCell::new(None), 28 | } 29 | } 30 | 31 | #[inline] 32 | pub(super) fn register(&self, cx: Option<&task::Context>) { 33 | match self 34 | .state 35 | .compare_exchange(WAITING, REGISTERING, Ordering::Acquire, Ordering::Acquire) 36 | .unwrap_or_else(|x| x) 37 | { 38 | WAITING => { 39 | // SAFETY: see `futures::task::AtomicWaker` implementation 40 | unsafe { 41 | self.waker.with_mut(|w| match &mut *w { 42 | Some(old_waker) if old_waker.will_wake(cx) => (), 43 | _ => *w = Some(Waker::new(cx)), 44 | }); 45 | let res = self.state.compare_exchange( 46 | REGISTERING, 47 | WAITING, 48 | Ordering::AcqRel, 49 | Ordering::Acquire, 50 | ); 51 | 52 | match res { 53 | Ok(_) => {} 54 | Err(actual) => { 55 | debug_assert_eq!(actual, REGISTERING | WAKING); 56 | let waker = self.waker.with_mut(|w| (*w).take()).unwrap(); 57 | self.state.swap(WAITING, Ordering::AcqRel); 58 | waker.wake(); 59 | } 60 | } 61 | } 62 | } 63 | WAKING => { 64 | match cx { 65 | Some(cx) => cx.waker().wake_by_ref(), 66 | None => thread::current().unpark(), 67 | } 68 | // SAFETY: see `AtomicWaker` implementation from tokio (it's needed by loom) 69 | hint::spin_loop(); 70 | } 71 | state => { 72 | debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); 73 | } 74 | } 75 | } 76 | 77 | #[inline] 78 | pub(super) fn wake(&self) { 79 | match self.state.fetch_or(WAKING, Ordering::AcqRel) { 80 | WAITING => { 81 | // SAFETY: see `futures::task::AtomicWaker` implementation 82 | let waker = unsafe { self.waker.with_mut(|w| (*w).take()) }; 83 | self.state.fetch_and(!WAKING, Ordering::Release); 84 | if let Some(waker) = waker { 85 | waker.wake(); 86 | } 87 | } 88 | state => { 89 | debug_assert!( 90 | state == REGISTERING || state == REGISTERING | WAKING || state == WAKING 91 | ); 92 | } 93 | } 94 | } 95 | } 96 | 97 | impl Default for AtomicWaker { 98 | fn default() -> Self { 99 | Self::new() 100 | } 101 | } 102 | 103 | // SAFETY: see `futures::task::AtomicWaker` implementation 104 | unsafe impl Send for AtomicWaker {} 105 | 106 | // SAFETY: see `futures::task::AtomicWaker` implementation 107 | unsafe impl Sync for AtomicWaker {} 108 | 109 | #[cfg(all(test, loom))] 110 | mod tests { 111 | use std::{ 112 | future::poll_fn, 113 | sync::Arc, 114 | task::Poll::{Pending, Ready}, 115 | }; 116 | 117 | use loom::{ 118 | future::block_on, 119 | sync::atomic::{AtomicUsize, Ordering}, 120 | thread, 121 | }; 122 | 123 | use super::AtomicWaker; 124 | 125 | struct Chan { 126 | num: AtomicUsize, 127 | task: AtomicWaker, 128 | } 129 | #[test] 130 | fn basic_notification() { 131 | const NUM_NOTIFY: usize = 2; 132 | 133 | loom::model(|| { 134 | let chan = Arc::new(Chan { 135 | num: AtomicUsize::new(0), 136 | task: AtomicWaker::default(), 137 | }); 138 | 139 | for _ in 0..NUM_NOTIFY { 140 | let chan = chan.clone(); 141 | 142 | thread::spawn(move || { 143 | chan.num.fetch_add(1, Ordering::Relaxed); 144 | chan.task.wake(); 145 | }); 146 | } 147 | 148 | block_on(poll_fn(move |cx| { 149 | chan.task.register(Some(cx)); 150 | 151 | if NUM_NOTIFY == chan.num.load(Ordering::Relaxed) { 152 | return Ready(()); 153 | } 154 | 155 | Pending 156 | })); 157 | }); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/synchronized/waker.rs: -------------------------------------------------------------------------------- 1 | use std::task; 2 | 3 | use crate::loom::thread; 4 | 5 | #[derive(Debug)] 6 | pub(super) enum Waker { 7 | Async(task::Waker), 8 | Sync(thread::Thread), 9 | } 10 | 11 | impl Waker { 12 | #[inline] 13 | pub(super) fn new(cx: Option<&task::Context>) -> Self { 14 | match cx { 15 | Some(cx) => Self::Async(cx.waker().clone()), 16 | None => Self::Sync(thread::current()), 17 | } 18 | } 19 | 20 | #[inline] 21 | pub(super) fn will_wake(&self, cx: Option<&task::Context>) -> bool { 22 | match (self, cx) { 23 | (Self::Async(w), Some(cx)) => w.will_wake(cx.waker()), 24 | _ => false, 25 | } 26 | } 27 | 28 | #[inline] 29 | pub(super) fn wake(self) { 30 | match self { 31 | Self::Async(waker) => waker.wake(), 32 | Self::Sync(thread) => thread.unpark(), 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/synchronized/waker_list.rs: -------------------------------------------------------------------------------- 1 | use std::task; 2 | 3 | use crossbeam_utils::CachePadded; 4 | 5 | use crate::{ 6 | loom::sync::{ 7 | atomic::{AtomicBool, Ordering}, 8 | Mutex, 9 | }, 10 | synchronized::waker::Waker, 11 | }; 12 | 13 | #[derive(Debug, Default)] 14 | pub(super) struct WakerList { 15 | wakers: Mutex>, 16 | non_empty: CachePadded, 17 | } 18 | 19 | impl WakerList { 20 | pub(super) fn register(&self, cx: Option<&task::Context>) { 21 | let waker = Waker::new(cx); 22 | let mut wakers = self.wakers.lock().unwrap(); 23 | if wakers.is_empty() { 24 | self.non_empty.store(true, Ordering::SeqCst); 25 | } 26 | wakers.push(waker); 27 | } 28 | 29 | #[inline] 30 | pub(super) fn wake(&self) { 31 | if self.non_empty.load(Ordering::Relaxed) 32 | && self 33 | .non_empty 34 | .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) 35 | .is_ok() 36 | { 37 | self.wake_all(); 38 | } 39 | } 40 | 41 | // not inlined 42 | fn wake_all(&self) { 43 | for waker in self.wakers.lock().unwrap().drain(..) { 44 | match waker { 45 | Waker::Async(waker) => waker.wake(), 46 | Waker::Sync(thread) => thread.unpark(), 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | mem, 3 | mem::MaybeUninit, 4 | ops::{Deref, DerefMut}, 5 | slice, 6 | }; 7 | 8 | pub(crate) fn init_array(default: impl Fn() -> T) -> [T; N] { 9 | // SAFETY: common MaybeUninit pattern, used in unstable `MaybeUninit::uninit_array` 10 | let mut array: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; 11 | for elem in &mut array { 12 | elem.write(default()); 13 | } 14 | // SAFETY: all elements have been initialized 15 | // I used `std::mem::transmute_copy` because `transmute` doesn't work here 16 | // see https://users.rust-lang.org/t/transmuting-a-generic-array/45645 17 | unsafe { mem::transmute_copy(&array) } 18 | } 19 | 20 | /// A hack for const-expression-sized array, as discussed here: 21 | /// https://users.rust-lang.org/t/is-slice-from-raw-parts-unsound-in-case-of-a-repr-c-struct-with-consecutive-arrays/88368 22 | #[repr(C)] 23 | pub(crate) struct ArrayWithHeaderAndTrailer< 24 | T, 25 | const HEADER_SIZE: usize, 26 | const N: usize, 27 | const TRAILER_SIZE: usize, 28 | > { 29 | header: [T; HEADER_SIZE], 30 | array: [T; N], 31 | trailer: [T; TRAILER_SIZE], 32 | } 33 | 34 | impl Deref 35 | for ArrayWithHeaderAndTrailer 36 | { 37 | type Target = [T]; 38 | fn deref(&self) -> &Self::Target { 39 | // SAFETY: see struct documentation 40 | unsafe { 41 | slice::from_raw_parts(self as *const _ as *const T, HEADER_SIZE + N + TRAILER_SIZE) 42 | } 43 | } 44 | } 45 | 46 | impl DerefMut 47 | for ArrayWithHeaderAndTrailer 48 | { 49 | fn deref_mut(&mut self) -> &mut Self::Target { 50 | // SAFETY: see struct documentation 51 | unsafe { 52 | slice::from_raw_parts_mut(self as *mut _ as *mut T, HEADER_SIZE + N + TRAILER_SIZE) 53 | } 54 | } 55 | } 56 | 57 | impl 58 | ArrayWithHeaderAndTrailer 59 | { 60 | pub(crate) fn new(default: impl Fn() -> T) -> Self { 61 | Self { 62 | header: init_array(&default), 63 | array: init_array(&default), 64 | trailer: init_array(&default), 65 | } 66 | } 67 | } 68 | 69 | impl Default 70 | for ArrayWithHeaderAndTrailer 71 | where 72 | T: Default + Clone, 73 | { 74 | fn default() -> Self { 75 | Self::new(T::default) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/write.rs: -------------------------------------------------------------------------------- 1 | //! [`Buffer`](`crate::buffer::Buffer`) implementations to be used with 2 | //! [`Write::write`](std::io::Write::write). 3 | //! 4 | //! [`WriteArrayBuffer`] and [`WriteVecBuffer`] are well suited when there are objects to be 5 | //! serialized with a known-serialization size. Indeed, objects can then be serialized directly on 6 | //! the queue's buffer, avoiding allocation. 7 | //! 8 | //! # Examples 9 | //! ```rust 10 | //! # use std::io::Write; 11 | //! # use swap_buffer_queue::{Queue, write::{WriteBytesSlice, WriteVecBuffer}}; 12 | //! // Creates a WriteVecBuffer queue with a 2-bytes header 13 | //! let queue: Queue> = Queue::with_capacity((1 << 16) - 1); 14 | //! queue 15 | //! .try_enqueue((256, |slice: &mut [u8]| { /* write the slice */ })) 16 | //! .unwrap(); 17 | //! queue 18 | //! .try_enqueue((42, |slice: &mut [u8]| { /* write the slice */ })) 19 | //! .unwrap(); 20 | //! let mut slice = queue.try_dequeue().unwrap(); 21 | //! // Adds a header with the len of the buffer 22 | //! let len = (slice.len() as u16).to_be_bytes(); 23 | //! slice.header().copy_from_slice(&len); 24 | //! // Let's pretend we have a writer 25 | //! let mut writer: Vec = Default::default(); 26 | //! assert_eq!(writer.write(slice.frame()).unwrap(), 300); 27 | //! ``` 28 | 29 | use core::ops::{Deref, DerefMut}; 30 | 31 | mod array; 32 | #[cfg(feature = "alloc")] 33 | mod vec; 34 | 35 | pub use array::WriteArrayBuffer; 36 | #[cfg(feature = "alloc")] 37 | pub use vec::WriteVecBuffer; 38 | 39 | /// A bytes slice with a `HEADER_SIZE`-bytes header and a `TRAILER_SIZE`-bytes trailer. 40 | /// 41 | /// It implements [`Deref`] and [`DerefMut`], targeting the *unframed* part of the slice, 42 | /// without the header and the trailer. The complete slice (with header and trailer) can be 43 | /// retrieved using [`frame`](BytesSlice::frame) or [`frame_mut`](BytesSlice::frame_mut) methods. 44 | /// 45 | /// # Examples 46 | /// 47 | /// ```rust 48 | /// # use std::ops::Deref; 49 | /// # use swap_buffer_queue::buffer::BufferSlice; 50 | /// # use swap_buffer_queue::Queue; 51 | /// # use swap_buffer_queue::write::{BytesSlice, WriteBytesSlice, WriteVecBuffer}; 52 | /// # let queue: Queue> = Queue::with_capacity(42); 53 | /// # queue.try_enqueue(&[2u8, 3, 4, 5] as &[_]).unwrap(); 54 | /// let mut slice: BufferSlice, _> /* = ... */; 55 | /// # slice = queue.try_dequeue().unwrap(); 56 | /// assert_eq!(slice.deref().deref(), &[2, 3, 4, 5]); 57 | /// slice.header().copy_from_slice(&[0, 1]); 58 | /// slice.trailer().copy_from_slice(&[6, 7, 8, 9]); 59 | /// assert_eq!(slice.frame(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 60 | /// ``` 61 | #[derive(Debug)] 62 | pub struct BytesSlice<'a, const HEADER_SIZE: usize = 0, const TRAILER_SIZE: usize = 0>( 63 | pub(crate) &'a mut [u8], 64 | ); 65 | 66 | impl<'a, const HEADER_SIZE: usize, const TRAILER_SIZE: usize> 67 | BytesSlice<'a, HEADER_SIZE, TRAILER_SIZE> 68 | { 69 | #[inline] 70 | pub(crate) fn new(slice: &'a mut [u8]) -> Self { 71 | Self(slice) 72 | } 73 | 74 | /// Returns a mutable reference on the header part of the slice 75 | /// (see [examples](BytesSlice#examples)). 76 | #[inline] 77 | pub fn header(&mut self) -> &mut [u8] { 78 | &mut self.0[..HEADER_SIZE] 79 | } 80 | 81 | /// Returns a mutable reference on the trailer part of the slice 82 | /// (see [examples](BytesSlice#examples)). 83 | #[inline] 84 | pub fn trailer(&mut self) -> &mut [u8] { 85 | let len = self.0.len(); 86 | &mut self.0[len - TRAILER_SIZE..] 87 | } 88 | 89 | /// Returns the complete frame slice, with header and trailer 90 | /// (see [examples](BytesSlice#examples)). 91 | #[inline] 92 | pub fn frame(&self) -> &[u8] { 93 | self.0 94 | } 95 | 96 | /// Returns the complete mutable frame slice, with header and trailer 97 | /// (see [examples](BytesSlice#examples)). 98 | #[inline] 99 | pub fn frame_mut(&mut self) -> &mut [u8] { 100 | self.0 101 | } 102 | } 103 | 104 | impl Deref 105 | for BytesSlice<'_, HEADER_SIZE, TRAILER_SIZE> 106 | { 107 | type Target = [u8]; 108 | 109 | #[inline] 110 | fn deref(&self) -> &Self::Target { 111 | &self.0[HEADER_SIZE..self.0.len() - TRAILER_SIZE] 112 | } 113 | } 114 | 115 | impl DerefMut 116 | for BytesSlice<'_, HEADER_SIZE, TRAILER_SIZE> 117 | { 118 | #[inline] 119 | fn deref_mut(&mut self) -> &mut Self::Target { 120 | let len = self.0.len(); 121 | &mut self.0[HEADER_SIZE..len - TRAILER_SIZE] 122 | } 123 | } 124 | 125 | /// Bytes slice writer, used by [`WriteArrayBuffer`] and [`WriteVecBuffer`]. 126 | pub trait WriteBytesSlice { 127 | /// Returns the size of the slice to be written. 128 | fn size(&self) -> usize; 129 | /// Writes the slice. 130 | fn write(self, slice: &mut [u8]); 131 | } 132 | 133 | impl WriteBytesSlice for &[u8] { 134 | #[inline] 135 | fn size(&self) -> usize { 136 | self.len() 137 | } 138 | #[inline] 139 | fn write(self, slice: &mut [u8]) { 140 | slice.copy_from_slice(self.as_ref()); 141 | } 142 | } 143 | 144 | impl WriteBytesSlice for (usize, F) 145 | where 146 | F: FnOnce(&mut [u8]), 147 | { 148 | #[inline] 149 | fn size(&self) -> usize { 150 | self.0 151 | } 152 | #[inline] 153 | fn write(self, slice: &mut [u8]) { 154 | self.1(slice); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/write/array.rs: -------------------------------------------------------------------------------- 1 | use core::ops::Range; 2 | 3 | use crate::{ 4 | buffer::{Buffer, InsertIntoBuffer}, 5 | loom::{cell::Cell, LoomUnsafeCell}, 6 | utils::ArrayWithHeaderAndTrailer, 7 | write::{BytesSlice, WriteBytesSlice}, 8 | }; 9 | 10 | /// A `N`-bytes buffer with a `HEADER_SIZE`-bytes header and a `TRAILER_SIZE`-bytes trailer. 11 | /// 12 | /// The total size of the buffer is `N + HEADER_SIZE + TRAILER_SIZE`. This buffer is *no_std*. 13 | #[derive(Default)] 14 | pub struct WriteArrayBuffer< 15 | const N: usize, 16 | const HEADER_SIZE: usize = 0, 17 | const TRAILER_SIZE: usize = 0, 18 | >(ArrayWithHeaderAndTrailer, HEADER_SIZE, N, TRAILER_SIZE>); 19 | 20 | // SAFETY: Buffer values are `Copy` and already initialized 21 | unsafe impl Buffer 22 | for WriteArrayBuffer 23 | { 24 | type Slice<'a> = BytesSlice<'a, HEADER_SIZE, TRAILER_SIZE>; 25 | 26 | #[inline] 27 | fn capacity(&self) -> usize { 28 | N 29 | } 30 | 31 | #[inline] 32 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 33 | // SAFETY: [Cell] has the same layout as [u8] 34 | BytesSlice::new(unsafe { 35 | &mut *(&mut self.0[range.start..HEADER_SIZE + range.end + TRAILER_SIZE] as *mut _ 36 | as *mut [u8]) 37 | }) 38 | } 39 | 40 | #[inline] 41 | unsafe fn clear(&mut self, _range: Range) {} 42 | } 43 | 44 | // SAFETY: Buffer values are `Copy` and already initialized 45 | unsafe impl 46 | InsertIntoBuffer> for T 47 | where 48 | T: WriteBytesSlice, 49 | { 50 | fn size(&self) -> usize { 51 | WriteBytesSlice::size(self) 52 | } 53 | 54 | unsafe fn insert_into( 55 | self, 56 | buffer: &WriteArrayBuffer, 57 | index: usize, 58 | ) { 59 | let slice = 60 | &buffer.0[HEADER_SIZE + index..HEADER_SIZE + index + WriteBytesSlice::size(&self)]; 61 | // SAFETY: [Cell] has the same layout as UnsafeCell<[u8]> 62 | unsafe { 63 | (*(slice as *const _ as *const LoomUnsafeCell<[u8]>)).with_mut(|s| self.write(&mut *s)); 64 | }; 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/write/vec.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::ops::Range; 3 | 4 | use crate::{ 5 | buffer::{Buffer, InsertIntoBuffer, Resize}, 6 | loom::{cell::Cell, LoomUnsafeCell}, 7 | write::{BytesSlice, WriteBytesSlice}, 8 | }; 9 | 10 | /// A bytes buffer with a `HEADER_SIZE`-bytes header and a `TRAILER_SIZE`-bytes trailer. 11 | #[derive(Default)] 12 | pub struct WriteVecBuffer( 13 | Box<[Cell]>, 14 | ); 15 | 16 | // SAFETY: Buffer values are `Copy` and already initialized 17 | unsafe impl Buffer 18 | for WriteVecBuffer 19 | { 20 | type Slice<'a> = BytesSlice<'a, HEADER_SIZE, TRAILER_SIZE>; 21 | 22 | #[inline] 23 | fn capacity(&self) -> usize { 24 | self.0.len().saturating_sub(HEADER_SIZE + TRAILER_SIZE) 25 | } 26 | 27 | #[inline] 28 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 29 | // SAFETY: [Cell] has the same layout as [u8] 30 | BytesSlice::new(unsafe { 31 | &mut *(&mut self.0[range.start..HEADER_SIZE + range.end + TRAILER_SIZE] as *mut _ 32 | as *mut [u8]) 33 | }) 34 | } 35 | 36 | #[inline] 37 | unsafe fn clear(&mut self, _range: Range) {} 38 | } 39 | 40 | // SAFETY: Buffer values are `Copy` and already initialized 41 | unsafe impl 42 | InsertIntoBuffer> for T 43 | where 44 | T: WriteBytesSlice, 45 | { 46 | #[inline] 47 | fn size(&self) -> usize { 48 | WriteBytesSlice::size(self) 49 | } 50 | 51 | #[inline] 52 | unsafe fn insert_into(self, buffer: &WriteVecBuffer, index: usize) { 53 | let slice = 54 | &buffer.0[HEADER_SIZE + index..HEADER_SIZE + index + WriteBytesSlice::size(&self)]; 55 | // SAFETY: [Cell] has the same layout as UnsafeCell<[u8]> 56 | unsafe { 57 | (*(slice as *const _ as *const LoomUnsafeCell<[u8]>)).with_mut(|s| self.write(&mut *s)); 58 | }; 59 | } 60 | } 61 | 62 | impl Resize 63 | for WriteVecBuffer 64 | { 65 | fn resize(&mut self, capacity: usize) { 66 | let full_capacity = HEADER_SIZE + capacity + TRAILER_SIZE; 67 | let buffer = alloc::vec![0u8; full_capacity].into_boxed_slice(); 68 | // SAFETY: [Cell] has the same layout as [u8] 69 | self.0 = unsafe { Box::from_raw(Box::into_raw(buffer) as *mut _) }; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/write_vectored.rs: -------------------------------------------------------------------------------- 1 | //! [`Buffer`](`crate::buffer::Buffer`) implementations to be used with [`Write::write_vectored`](std::io::Write::write_vectored). 2 | //! 3 | //! [`WriteVectoredArrayBuffer`] and [`WriteVectoredVecBuffer`] allows buffering a slice of 4 | //! [`IoSlice`], saving the cost of dequeuing io-slices one by one to collect them after. 5 | //! (Internally, two buffers are used: one of the values, and one for the io-slices) 6 | //! 7 | //! # Examples 8 | //! ```rust 9 | //! # use std::io::{IoSlice, Write}; 10 | //! # use swap_buffer_queue::{Queue, write_vectored::WriteVectoredVecBuffer}; 11 | //! // Creates a WriteVectoredVecBuffer queue 12 | //! let queue: Queue>> = Queue::with_capacity(100); 13 | //! queue.try_enqueue([vec![0; 256]]).unwrap(); 14 | //! queue.try_enqueue([vec![42; 42]]).unwrap(); 15 | //! let mut total_size = 0u16.to_be_bytes(); 16 | //! let mut slice = queue.try_dequeue().unwrap(); 17 | //! // Adds a header with the total size of the slices 18 | //! total_size.copy_from_slice(&(slice.total_size() as u16).to_be_bytes()); 19 | //! let mut frame = slice.frame(.., Some(&total_size), None); 20 | //! // Let's pretend we have a writer 21 | //! let mut writer: Vec = Default::default(); 22 | //! assert_eq!(writer.write_vectored(&mut frame).unwrap(), 300); 23 | //! ``` 24 | 25 | use std::{ 26 | fmt, 27 | io::IoSlice, 28 | mem, 29 | ops::{Bound, Deref, DerefMut, RangeBounds}, 30 | }; 31 | 32 | mod array; 33 | mod vec; 34 | 35 | pub use array::WriteVectoredArrayBuffer; 36 | pub use vec::WriteVectoredVecBuffer; 37 | 38 | pub(crate) static EMPTY_SLICE: &[u8] = &[]; 39 | 40 | /// A *vectored* slice, i.e. a slice of [`IoSlice`]. 41 | /// 42 | /// The total size of all the buffered io-slices can be retrieved with [`total_size`](VectoredSlice::total_size) method. 43 | /// An header and a trailer can also be added to the slice using [`frame`](VectoredSlice::frame) 44 | /// method. 45 | /// 46 | /// # Examples 47 | /// 48 | /// ```rust 49 | /// # use std::io::IoSlice; 50 | /// # use std::ops::Deref; 51 | /// # use swap_buffer_queue::buffer::BufferSlice; 52 | /// # use swap_buffer_queue::Queue; 53 | /// # use swap_buffer_queue::write_vectored::{VectoredSlice, WriteVectoredVecBuffer}; 54 | /// # let queue: Queue> = Queue::with_capacity(42); 55 | /// # queue.try_enqueue([vec![2, 3, 4, 5]]).unwrap(); 56 | /// let header = vec![0, 1]; 57 | /// let trailer = vec![6, 7, 8, 9]; 58 | /// let mut slice: BufferSlice>, _> /* = ... */; 59 | /// # slice = queue.try_dequeue().unwrap(); 60 | /// fn to_vec<'a, 'b: 'a>(slices: &'a [IoSlice<'b>]) -> Vec<&'a [u8]> { 61 | /// slices.iter().map(Deref::deref).collect() 62 | /// } 63 | /// assert_eq!(to_vec(slice.deref().deref()), vec![&[2u8, 3, 4, 5]]); 64 | /// assert_eq!(slice.total_size(), 4); 65 | /// let frame = slice.frame(.., Some(&header), Some(&trailer)); 66 | /// assert_eq!( 67 | /// to_vec(frame.deref()), 68 | /// vec![&[0u8, 1] as &[u8], &[2, 3, 4, 5], &[6, 7, 8, 9]] 69 | /// ); 70 | /// ``` 71 | pub struct VectoredSlice<'a> { 72 | slices: &'a mut [IoSlice<'static>], 73 | total_size: usize, 74 | } 75 | 76 | impl<'a> fmt::Debug for VectoredSlice<'a> { 77 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 78 | f.debug_struct("VectoredSlice") 79 | .field("slices", &self.deref()) 80 | .field("total_size", &self.total_size) 81 | .finish() 82 | } 83 | } 84 | 85 | impl<'a> Deref for VectoredSlice<'a> { 86 | type Target = [IoSlice<'a>]; 87 | fn deref(&self) -> &Self::Target { 88 | &self.slices[1..self.slices.len() - 1] 89 | } 90 | } 91 | 92 | impl<'a> DerefMut for VectoredSlice<'a> { 93 | fn deref_mut(&mut self) -> &mut Self::Target { 94 | let slices_len = self.slices.len(); 95 | // SAFETY: slices in `self.slices[1..self.slices.len() - 1]` are never read 96 | // with their static lifetime (see `VectoredSlice::new`), only with `'a`, 97 | // so it's fine to mutate them with `'a` lifetime 98 | unsafe { mem::transmute(&mut self.slices[1..slices_len - 1]) } 99 | } 100 | } 101 | 102 | impl<'a> VectoredSlice<'a> { 103 | /// # Safety 104 | /// `slices` must not be read by the caller and have a lifetime greater than `'a` 105 | pub(crate) unsafe fn new(slices: &'a mut [IoSlice<'static>], total_size: usize) -> Self { 106 | Self { slices, total_size } 107 | } 108 | 109 | /// Returns the total size of all the buffered io-slices 110 | /// (see [examples](VectoredSlice#examples)). 111 | pub fn total_size(&self) -> usize { 112 | self.total_size 113 | } 114 | 115 | /// Returns the *framed* part of the vectored slice within the given range, with an optional 116 | /// header io-slice and an optional trailer io-slice 117 | /// (see [examples](VectoredSlice#examples)). 118 | pub fn frame( 119 | &mut self, 120 | range: impl RangeBounds, 121 | header: Option<&'a [u8]>, 122 | trailer: Option<&'a [u8]>, 123 | ) -> VectoredFrame<'a> { 124 | let mut start = match range.start_bound() { 125 | Bound::Included(&n) => n, 126 | Bound::Excluded(&n) => n + 1, 127 | Bound::Unbounded => 0, 128 | }; 129 | let mut end = match range.end_bound() { 130 | Bound::Included(&n) => n + 2, 131 | Bound::Excluded(&n) => n + 1, 132 | Bound::Unbounded => self.slices.len(), 133 | }; 134 | let header = if let Some(header) = header { 135 | // SAFETY: `self.slices[start..end]` will be transmuted right after to `[IoSlice<'a>] 136 | Some(mem::replace(&mut self.slices[start], unsafe { 137 | mem::transmute::(IoSlice::new(header)) 138 | })) 139 | } else { 140 | start += 1; 141 | None 142 | }; 143 | let trailer = if let Some(trailer) = trailer { 144 | // SAFETY: `self.slices[start..end]` will be transmuted right after to `[IoSlice<'a>] 145 | Some(mem::replace(&mut self.slices[end - 1], unsafe { 146 | mem::transmute::(IoSlice::new(trailer)) 147 | })) 148 | } else { 149 | end -= 1; 150 | None 151 | }; 152 | VectoredFrame { 153 | // SAFETY: `[self.slices[1..self.slices.len() - 1]` is safe to transmute to 154 | // `[IoSlice<'a>]` (see `VectoredSlice::new`), and `start == 0` 155 | // (respectively `end == self.slices.len()`) means that `self.slices[start]` 156 | // (respectively `self.slices[end]`) has `'a` lifetime because it's set from `header` 157 | // (respectively `trailer`) parameter above 158 | slices: unsafe { 159 | mem::transmute::<&mut [IoSlice], &mut [IoSlice]>(&mut self.slices[start..end]) 160 | }, 161 | header, 162 | trailer, 163 | } 164 | } 165 | } 166 | 167 | /// A *framed* part of a [`VectoredSlice`], with an [`IoSlice`] header and an [`IoSlice`] trailer 168 | /// (see [`VectoredSlice::frame`]). 169 | pub struct VectoredFrame<'a> { 170 | slices: &'a mut [IoSlice<'a>], 171 | header: Option>, 172 | trailer: Option>, 173 | } 174 | 175 | impl fmt::Debug for VectoredFrame<'_> { 176 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 177 | f.debug_tuple("VectoredFrame").field(&self.slices).finish() 178 | } 179 | } 180 | 181 | impl<'a> Deref for VectoredFrame<'a> { 182 | type Target = [IoSlice<'a>]; 183 | fn deref(&self) -> &Self::Target { 184 | self.slices 185 | } 186 | } 187 | 188 | impl<'a> DerefMut for VectoredFrame<'a> { 189 | fn deref_mut(&mut self) -> &mut Self::Target { 190 | self.slices 191 | } 192 | } 193 | 194 | impl<'a> Drop for VectoredFrame<'a> { 195 | fn drop(&mut self) { 196 | if let Some(header) = self.header { 197 | self.slices[0] = header; 198 | } 199 | if let Some(trailer) = self.trailer { 200 | self.slices[self.slices.len() - 1] = trailer; 201 | } 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /src/write_vectored/array.rs: -------------------------------------------------------------------------------- 1 | use std::{io::IoSlice, mem, mem::MaybeUninit, ops::Range}; 2 | 3 | use crate::{ 4 | buffer::{Buffer, CellBuffer, Drain}, 5 | loom::{ 6 | cell::Cell, 7 | sync::atomic::{AtomicUsize, Ordering}, 8 | }, 9 | utils::{init_array, ArrayWithHeaderAndTrailer}, 10 | write_vectored::{VectoredSlice, EMPTY_SLICE}, 11 | }; 12 | 13 | /// A buffer of [`IoSlice`] of size `N` 14 | /// 15 | /// The total size of the buffer is `N * mem::size_of::() + (N + 2) * mem::size_of::()`. 16 | pub struct WriteVectoredArrayBuffer { 17 | owned: [Cell>; N], 18 | slices: ArrayWithHeaderAndTrailer>, 1, N, 1>, 19 | total_size: AtomicUsize, 20 | } 21 | 22 | impl Default for WriteVectoredArrayBuffer { 23 | fn default() -> Self { 24 | Self { 25 | owned: init_array(|| Cell::new(MaybeUninit::uninit())), 26 | slices: ArrayWithHeaderAndTrailer::new(|| Cell::new(IoSlice::new(EMPTY_SLICE))), 27 | total_size: Default::default(), 28 | } 29 | } 30 | } 31 | 32 | // SAFETY: `WriteVectoredArrayBuffer::clear` does clear the inserted range from the buffer 33 | unsafe impl Buffer for WriteVectoredArrayBuffer 34 | where 35 | T: AsRef<[u8]>, 36 | { 37 | type Slice<'a> = VectoredSlice<'a> 38 | where 39 | T: 'a; 40 | 41 | #[inline] 42 | fn capacity(&self) -> usize { 43 | N 44 | } 45 | 46 | #[inline] 47 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 48 | // SAFETY: [Cell] has the same layout as [IoSlice] 49 | // and function contract guarantees that the range is initialized 50 | let slices = unsafe { 51 | &mut *(&mut self.slices[range.start..range.end + 2] as *mut _ 52 | as *mut [IoSlice<'static>]) 53 | }; 54 | // SAFETY: slices are never read and live along their owner in the buffer, as they are 55 | // inserted and removed together 56 | unsafe { VectoredSlice::new(slices, self.total_size.load(Ordering::Acquire)) } 57 | } 58 | 59 | #[inline] 60 | unsafe fn clear(&mut self, range: Range) { 61 | *self.total_size.get_mut() = 0; 62 | for index in range { 63 | // SAFETY: function contract guarantees that the range is initialized 64 | unsafe { self.remove(index) }; 65 | } 66 | } 67 | } 68 | 69 | // SAFETY: `insert` does initialize the index in the buffer 70 | unsafe impl CellBuffer for WriteVectoredArrayBuffer 71 | where 72 | T: AsRef<[u8]>, 73 | { 74 | unsafe fn insert(&self, index: usize, value: T) { 75 | // SAFETY: slice is never read with static lifetime, it will only be used as a reference 76 | // with the same lifetime than the slice owner 77 | let slice = unsafe { mem::transmute::(IoSlice::new(value.as_ref())) }; 78 | self.slices[index + 1].set(slice); 79 | self.owned[index].set(MaybeUninit::new(value)); 80 | self.total_size.fetch_add(slice.len(), Ordering::AcqRel); 81 | } 82 | } 83 | 84 | // SAFETY: `WriteVectoredArrayBuffer::remove` does remove the index from the buffer 85 | unsafe impl Drain for WriteVectoredArrayBuffer 86 | where 87 | T: AsRef<[u8]>, 88 | { 89 | type Value = T; 90 | 91 | #[inline] 92 | unsafe fn remove(&mut self, index: usize) -> Self::Value { 93 | // SAFETY: function contract guarantees that the index has been inserted and is then initialized 94 | let value = unsafe { 95 | self.owned[index] 96 | .replace(MaybeUninit::uninit()) 97 | .assume_init() 98 | }; 99 | self.total_size 100 | .fetch_sub(value.as_ref().len(), Ordering::Release); 101 | value 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/write_vectored/vec.rs: -------------------------------------------------------------------------------- 1 | use std::{io::IoSlice, mem, mem::MaybeUninit, ops::Range}; 2 | 3 | use crate::{ 4 | buffer::{Buffer, CellBuffer, Drain, Resize}, 5 | loom::{ 6 | cell::Cell, 7 | sync::atomic::{AtomicUsize, Ordering}, 8 | }, 9 | write_vectored::{VectoredSlice, EMPTY_SLICE}, 10 | }; 11 | 12 | /// A buffer of [`IoSlice`] 13 | pub struct WriteVectoredVecBuffer { 14 | owned: Box<[Cell>]>, 15 | slices: Box<[Cell>]>, 16 | total_size: AtomicUsize, 17 | } 18 | 19 | impl Default for WriteVectoredVecBuffer { 20 | fn default() -> Self { 21 | Self { 22 | owned: Default::default(), 23 | slices: Default::default(), 24 | total_size: Default::default(), 25 | } 26 | } 27 | } 28 | 29 | // SAFETY: `WriteVectoredVecBuffer::clear` does clear the inserted range from the buffer 30 | unsafe impl Buffer for WriteVectoredVecBuffer 31 | where 32 | T: AsRef<[u8]>, 33 | { 34 | type Slice<'a> = VectoredSlice<'a> 35 | where 36 | T: 'a; 37 | 38 | #[inline] 39 | fn capacity(&self) -> usize { 40 | self.owned.len() 41 | } 42 | 43 | #[inline] 44 | unsafe fn slice(&mut self, range: Range) -> Self::Slice<'_> { 45 | // SAFETY: [Cell] has the same layout as [IoSlice] 46 | // and function contract guarantees that the range is initialized 47 | let slices = unsafe { 48 | &mut *(&mut self.slices[range.start..range.end + 2] as *mut _ 49 | as *mut [IoSlice<'static>]) 50 | }; 51 | // SAFETY: slices are never read and live along their owner in the buffer, as they are 52 | // inserted and removed together 53 | unsafe { VectoredSlice::new(slices, self.total_size.load(Ordering::Acquire)) } 54 | } 55 | 56 | #[inline] 57 | unsafe fn clear(&mut self, range: Range) { 58 | *self.total_size.get_mut() = 0; 59 | for index in range { 60 | // SAFETY: function contract guarantees that the range is initialized 61 | unsafe { self.remove(index) }; 62 | } 63 | } 64 | } 65 | 66 | // SAFETY: `insert` does initialize the index in the buffer 67 | unsafe impl CellBuffer for WriteVectoredVecBuffer 68 | where 69 | T: AsRef<[u8]>, 70 | { 71 | unsafe fn insert(&self, index: usize, value: T) { 72 | // SAFETY: slice is never read with static lifetime, it will only be used as a reference 73 | // with the same lifetime than the slice owner 74 | let slice = unsafe { mem::transmute::(IoSlice::new(value.as_ref())) }; 75 | self.slices[index + 1].set(slice); 76 | self.owned[index].set(MaybeUninit::new(value)); 77 | self.total_size.fetch_add(slice.len(), Ordering::AcqRel); 78 | } 79 | } 80 | 81 | impl Resize for WriteVectoredVecBuffer 82 | where 83 | T: AsRef<[u8]>, 84 | { 85 | fn resize(&mut self, capacity: usize) { 86 | self.owned = (0..capacity) 87 | .map(|_| Cell::new(MaybeUninit::uninit())) 88 | .collect(); 89 | self.slices = (0..capacity + 2) 90 | .map(|_| Cell::new(IoSlice::new(EMPTY_SLICE))) 91 | .collect(); 92 | } 93 | } 94 | 95 | // SAFETY: `WriteVectoredVecBuffer::remove` does remove the index from the buffer 96 | unsafe impl Drain for WriteVectoredVecBuffer 97 | where 98 | T: AsRef<[u8]>, 99 | { 100 | type Value = T; 101 | 102 | #[inline] 103 | unsafe fn remove(&mut self, index: usize) -> Self::Value { 104 | // SAFETY: function contract guarantees that the index has been inserted and is then initialized 105 | let value = unsafe { 106 | self.owned[index] 107 | .replace(MaybeUninit::uninit()) 108 | .assume_init() 109 | }; 110 | self.total_size 111 | .fetch_sub(value.as_ref().len(), Ordering::Release); 112 | value 113 | } 114 | } 115 | --------------------------------------------------------------------------------