├── .gitignore ├── .travis.yml ├── src ├── lib.rs ├── atomic_queue.rs └── atomic_ring.rs ├── Cargo.toml ├── LICENSE-MIT ├── README.md ├── benches └── bench_atomic_ring.rs ├── LICENSE-APACHE └── Cargo.lock /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target 3 | atomicring.iml -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - nightly 5 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate parking_lot; 2 | 3 | pub use crate::atomic_queue::AtomicRingQueue; 4 | pub use crate::atomic_ring::AtomicRingBuffer; 5 | 6 | mod atomic_ring; 7 | mod atomic_queue; 8 | 9 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "atomicring" 3 | version = "1.2.9" 4 | authors = ["Crown Communications GmbH"] 5 | description = "AtomicRingBuffer is a constant-size almost lock-free concurrent ring buffer" 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/eun-ice/atomicring" 8 | edition = "2021" 9 | 10 | [features] 11 | index_access = [] 12 | 13 | default = ["index_access"] 14 | 15 | [dependencies] 16 | parking_lot = "0.12.1" 17 | 18 | [dev-dependencies] 19 | mpmc = "0.1.5" 20 | crossbeam-queue = "0.3.11" 21 | serial_test = "3.0.0" 22 | 23 | 24 | [profile.bench] 25 | debug=true 26 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Aron Wieck Crown Communications GmbH 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AtomicRingBuffer / AtomicRingQueue 2 | 3 | [![Build Status](https://travis-ci.org/eun-ice/atomicring.svg?branch=master)](https://travis-ci.org/eun-ice/atomicring) 4 | [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/eun-ice/atomicring) 5 | [![Cargo](https://img.shields.io/crates/v/atomicring.svg)](https://crates.io/crates/atomicring) 6 | [![Documentation](https://docs.rs/atomicring/badge.svg)](https://docs.rs/atomicring) 7 | 8 | A constant-size almost lock-free concurrent ring buffer 9 | 10 | Upsides 11 | 12 | - fast, try_push and try_pop are O(1) 13 | - scales well even during heavy concurrency 14 | - AtomicRingBuffer has only 4 words of memory overhead (AtomicRingQueue has 6 words of overhead) 15 | - blocking pop supported via AtomicRingQueue 16 | - no memory allocations after initial creation 17 | 18 | 19 | Downsides 20 | 21 | - growing/shrinking is not supported 22 | - maximum capacity of (usize >> 16) entries 23 | - capacity is rounded up to the next power of 2 24 | 25 | This queue should perform similar to [mpmc](https://github.com/brayniac/mpmc) but with a lower memory overhead. 26 | If memory overhead is not your main concern you should run benchmarks to decide which one to use. 27 | 28 | ## Implementation details 29 | 30 | This implementation uses two atomics to store the read_index/write_index 31 | 32 | ```Text 33 | Read index atomic 34 | +63------------------------------------------------16+15-----8+7------0+ 35 | | read_index | r_done | r_pend | 36 | +----------------------------------------------------+--------+--------+ 37 | Write index atomic 38 | +63------------------------------------------------16+15-----8+7------0+ 39 | | write_index | w_done | w_pend | 40 | +----------------------------------------------------+--------+--------+ 41 | ``` 42 | 43 | - write_index/read_index (16bit on 32bit arch, 48bits on 64bit arch): current read/write position in the ring buffer (head and tail). 44 | - r_pend/w_pend (8bit): number of pending concurrent read/writes 45 | - r_done/w_done (8bit): number of completed read/writes. 46 | 47 | For reading r_pend is incremented first, then the content of the ring buffer is read from memory. 48 | After reading is done r_done is incremented. read_index is only incremented if r_done is equal to r_pend. 49 | 50 | For writing first w_pend is incremented, then the content of the ring buffer is updated. 51 | After writing w_done is incremented. If w_done is equal to w_pend then both are set to 0 and write_index is incremented. 52 | 53 | In rare cases this can result in a race where multiple threads increment r_pend in turn and r_done never quite reaches r_pend. 54 | If r_pend == 255 or w_pend == 255 a spinloop waits it to be <255 to continue. This rarely happens in practice, that's why this is called almost lock-free. 55 | 56 | 57 | ## Structs 58 | 59 | This package provides ```AtomicRingBuffer``` without blocking pop support and ```AtomicRingQueue``` with blocking pop support. 60 | 61 | 62 | ## Dependencies 63 | 64 | This package depends on [parking_lot](https://github.com/Amanieu/parking_lot) for blocking support in AtomicRingQueue 65 | 66 | ## Usage 67 | 68 | To use AtomicRingBuffer, add this to your `Cargo.toml`: 69 | 70 | ```toml 71 | [dependencies] 72 | atomicring = "1.2.9" 73 | ``` 74 | 75 | 76 | And something like this to your code 77 | 78 | ```rust 79 | 80 | // create an AtomicRingBuffer with capacity of 1024 elements 81 | let ring = ::atomicring::AtomicRingBuffer::with_capacity(900); 82 | 83 | // try_pop removes an element of the buffer and returns None if the buffer is empty 84 | assert_eq!(None, ring.try_pop()); 85 | // push_overwrite adds an element to the buffer, overwriting the oldest element if the buffer is full: 86 | ring.push_overwrite(1); 87 | assert_eq!(Some(1), ring.try_pop()); 88 | assert_eq!(None, ring.try_pop()); 89 | ``` 90 | 91 | 92 | ## License 93 | 94 | Licensed under the terms of MIT license and the Apache License (Version 2.0). 95 | 96 | See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. 97 | 98 | -------------------------------------------------------------------------------- /benches/bench_atomic_ring.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | extern crate atomicring; 3 | extern crate mpmc; 4 | extern crate test; 5 | 6 | use atomicring::AtomicRingBuffer; 7 | use mpmc::Queue; 8 | use crossbeam_queue::ArrayQueue; 9 | use test::Bencher; 10 | 11 | 12 | #[allow(dead_code)] 13 | #[derive(Default)] 14 | struct ZeroType {} 15 | 16 | unsafe impl Send for ZeroType {} 17 | 18 | unsafe impl Sync for ZeroType {} 19 | 20 | 21 | #[allow(dead_code)] 22 | #[derive(Default)] 23 | struct SmallType { 24 | index: usize 25 | } 26 | 27 | unsafe impl Send for SmallType {} 28 | 29 | unsafe impl Sync for SmallType {} 30 | 31 | impl SmallType { 32 | fn new(index: usize) -> SmallType { 33 | SmallType { index } 34 | } 35 | } 36 | 37 | #[allow(dead_code)] 38 | #[derive(Default)] 39 | struct MediumType { 40 | index: usize, 41 | some1: [usize; 31], 42 | } 43 | 44 | impl MediumType { 45 | fn new(index: usize) -> MediumType { 46 | MediumType { index, some1: [0; 31] } 47 | } 48 | } 49 | 50 | unsafe impl Send for MediumType {} 51 | 52 | unsafe impl Sync for MediumType {} 53 | 54 | 55 | #[derive(Default)] 56 | #[allow(dead_code)] 57 | struct LargeType { 58 | index: usize, 59 | some1: [usize; 31], 60 | some2: [usize; 32], 61 | some3: [usize; 32], 62 | some4: [usize; 32], 63 | } 64 | 65 | unsafe impl Send for LargeType {} 66 | 67 | unsafe impl Sync for LargeType {} 68 | 69 | 70 | impl LargeType { 71 | fn new(index: usize) -> LargeType { 72 | LargeType { index, some1: [0; 31], some2: [0; 32], some3: [0; 32], some4: [0; 32] } 73 | } 74 | } 75 | 76 | #[bench] 77 | fn bench_ring_singlethread_small(b: &mut Bencher) { 78 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 79 | b.iter(|| { 80 | for i in 0..10000 { 81 | let _ = ring.try_push(SmallType::new(i)).ok().expect("!!"); 82 | } 83 | for i in 0..10000 { 84 | assert_eq!(ring.try_pop().expect("!!").index, i); 85 | } 86 | }); 87 | } 88 | 89 | #[bench] 90 | fn bench_ring_singlethread_optionsmall(b: &mut Bencher) { 91 | let ring: AtomicRingBuffer> = AtomicRingBuffer::with_capacity(10000); 92 | b.iter(|| { 93 | for i in 0..10000 { 94 | let _ = ring.try_push(Some(SmallType::new(i))).ok().expect("!!"); 95 | } 96 | for i in 0..10000 { 97 | assert_eq!(ring.try_pop().expect("!!").unwrap().index, i); 98 | } 99 | }); 100 | } 101 | 102 | 103 | #[bench] 104 | fn bench_ring_singlethread_medium(b: &mut Bencher) { 105 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 106 | b.iter(|| { 107 | for i in 0..10000 { 108 | let _ = ring.try_push(MediumType::new(i)).ok().expect("!!"); 109 | } 110 | for i in 0..10000 { 111 | assert_eq!(ring.try_pop().expect("!!").index, i); 112 | } 113 | }); 114 | } 115 | 116 | 117 | #[bench] 118 | fn bench_ring_singlethread_large(b: &mut Bencher) { 119 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 120 | b.iter(|| { 121 | for i in 0..10000 { 122 | let _ = ring.try_push(LargeType::new(i)).ok().expect("!!"); 123 | } 124 | for i in 0..10000 { 125 | assert_eq!(ring.try_pop().expect("!!").index, i); 126 | } 127 | }); 128 | } 129 | 130 | 131 | #[bench] 132 | fn bench_ring_inline_singlethread_small(b: &mut Bencher) { 133 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 134 | b.iter(|| { 135 | for i in 0..10000 { 136 | ring.try_write(|w| { w.index = i }).ok().expect("!!"); 137 | } 138 | for i in 0..10000 { 139 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 140 | } 141 | }); 142 | } 143 | 144 | #[bench] 145 | fn bench_ring_inline_singlethread_optionsmall(b: &mut Bencher) { 146 | let ring: AtomicRingBuffer> = AtomicRingBuffer::with_capacity(10000); 147 | b.iter(|| { 148 | for i in 0..10000 { 149 | ring.try_write(|w| { *w = Some(SmallType::new(i)) }).ok().expect("!!"); 150 | } 151 | for i in 0..10000 { 152 | ring.try_read(|r| { assert_eq!(r.as_mut().unwrap().index, i) }).expect("!!"); 153 | } 154 | }); 155 | } 156 | 157 | 158 | #[bench] 159 | fn bench_ring_inline_singlethread_medium(b: &mut Bencher) { 160 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 161 | b.iter(|| { 162 | for i in 0..10000 { 163 | ring.try_write(|w| { w.index = i }).ok().expect("!!"); 164 | } 165 | for i in 0..10000 { 166 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 167 | } 168 | }); 169 | } 170 | 171 | 172 | #[bench] 173 | fn bench_ring_inline_singlethread_large(b: &mut Bencher) { 174 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 175 | b.iter(|| { 176 | for i in 0..10000 { 177 | ring.try_unsafe_write(|w| unsafe { ::std::ptr::write_unaligned(w, LargeType::new(i)) }).ok().expect("!!"); 178 | } 179 | for i in 0..10000 { 180 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 181 | } 182 | }); 183 | } 184 | 185 | 186 | #[bench] 187 | fn bench_ring_unsafe_singlethread_small(b: &mut Bencher) { 188 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 189 | b.iter(|| { 190 | for i in 0..10000 { 191 | ring.try_unsafe_write(|w| unsafe { ::std::ptr::write_unaligned(w, SmallType::new(i)) }).ok().expect("!!"); 192 | } 193 | for i in 0..10000 { 194 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 195 | } 196 | }); 197 | } 198 | 199 | #[bench] 200 | fn bench_ring_unsafe_singlethread_optionsmall(b: &mut Bencher) { 201 | let ring: AtomicRingBuffer> = AtomicRingBuffer::with_capacity(10000); 202 | b.iter(|| { 203 | for i in 0..10000 { 204 | ring.try_unsafe_write(|w| unsafe { ::std::ptr::write_unaligned(w, Some(SmallType::new(i))) }).ok().expect("!!"); 205 | } 206 | for i in 0..10000 { 207 | ring.try_read(|r| { assert_eq!(r.as_mut().unwrap().index, i) }).expect("!!"); 208 | } 209 | }); 210 | } 211 | 212 | 213 | #[bench] 214 | fn bench_ring_unsafe_singlethread_medium(b: &mut Bencher) { 215 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 216 | b.iter(|| { 217 | for i in 0..10000 { 218 | ring.try_unsafe_write(|w| unsafe { ::std::ptr::write_unaligned(w, MediumType::new(i)) }).ok().expect("!!"); 219 | } 220 | for i in 0..10000 { 221 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 222 | } 223 | }); 224 | } 225 | 226 | 227 | #[bench] 228 | fn bench_ring_unsafe_singlethread_large(b: &mut Bencher) { 229 | let ring: AtomicRingBuffer = AtomicRingBuffer::with_capacity(10000); 230 | b.iter(|| { 231 | for i in 0..10000 { 232 | ring.try_unsafe_write(|w| unsafe { ::std::ptr::write_unaligned(w, LargeType::new(i)) }).ok().expect("!!"); 233 | } 234 | for i in 0..10000 { 235 | ring.try_read(|r| { assert_eq!(r.index, i) }).expect("!!"); 236 | } 237 | }); 238 | } 239 | 240 | 241 | #[bench] 242 | fn bench_mpmc_singlethread_small(b: &mut Bencher) { 243 | let ring: Queue = Queue::with_capacity(10000); 244 | b.iter(|| { 245 | for i in 0..10000 { 246 | let _ = ring.push(SmallType::new(i)); 247 | } 248 | for i in 0..10000 { 249 | assert_eq!(ring.pop().unwrap().index, i); 250 | } 251 | }); 252 | } 253 | #[bench] 254 | fn bench_mpmc_singlethread_optionsmall(b: &mut Bencher) { 255 | let ring: Queue> = Queue::with_capacity(10000); 256 | b.iter(|| { 257 | for i in 0..10000 { 258 | let _ = ring.push(Some(SmallType::new(i))); 259 | } 260 | for i in 0..10000 { 261 | assert_eq!(ring.pop().unwrap().unwrap().index, i); 262 | } 263 | }); 264 | } 265 | 266 | #[bench] 267 | fn bench_mpmc_singlethread_medium(b: &mut Bencher) { 268 | let ring: Queue = Queue::with_capacity(10000); 269 | b.iter(|| { 270 | for i in 0..10000 { 271 | let _ = ring.push(MediumType::new(i)); 272 | } 273 | for i in 0..10000 { 274 | assert_eq!(ring.pop().unwrap().index, i); 275 | } 276 | }); 277 | } 278 | 279 | 280 | #[bench] 281 | fn bench_mpmc_singlethread_large(b: &mut Bencher) { 282 | let ring: Queue = Queue::with_capacity(10000); 283 | b.iter(|| { 284 | for i in 0..10000 { 285 | let _ = ring.push(LargeType::new(i)); 286 | } 287 | for i in 0..10000 { 288 | assert_eq!(ring.pop().unwrap().index, i); 289 | } 290 | }); 291 | } 292 | 293 | 294 | 295 | 296 | 297 | 298 | #[bench] 299 | fn bench_crossbeam_singlethread_small(b: &mut Bencher) { 300 | let ring: ArrayQueue = ArrayQueue::new(10000); 301 | b.iter(|| { 302 | for i in 0..10000 { 303 | let _ = ring.push(SmallType::new(i)); 304 | } 305 | for i in 0..10000 { 306 | assert_eq!(ring.pop().unwrap().index, i); 307 | } 308 | }); 309 | } 310 | #[bench] 311 | fn bench_crossbeam_singlethread_optionsmall(b: &mut Bencher) { 312 | let ring: ArrayQueue> = ArrayQueue::new(10000); 313 | b.iter(|| { 314 | for i in 0..10000 { 315 | let _ = ring.push(Some(SmallType::new(i))); 316 | } 317 | for i in 0..10000 { 318 | assert_eq!(ring.pop().unwrap().unwrap().index, i); 319 | } 320 | }); 321 | } 322 | 323 | #[bench] 324 | fn bench_crossbeam_singlethread_medium(b: &mut Bencher) { 325 | let ring: ArrayQueue = ArrayQueue::new(10000); 326 | b.iter(|| { 327 | for i in 0..10000 { 328 | let _ = ring.push(MediumType::new(i)); 329 | } 330 | for i in 0..10000 { 331 | assert_eq!(ring.pop().unwrap().index, i); 332 | } 333 | }); 334 | } 335 | 336 | 337 | #[bench] 338 | fn bench_crossbeam_singlethread_large(b: &mut Bencher) { 339 | let ring: ArrayQueue = ArrayQueue::new(10000); 340 | b.iter(|| { 341 | for i in 0..10000 { 342 | let _ = ring.push(LargeType::new(i)); 343 | } 344 | for i in 0..10000 { 345 | assert_eq!(ring.pop().unwrap().index, i); 346 | } 347 | }); 348 | } 349 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2018 Aron Wieck Crown Communications GmbH 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "atomicring" 7 | version = "1.2.9" 8 | dependencies = [ 9 | "crossbeam-queue", 10 | "mpmc", 11 | "parking_lot", 12 | "serial_test", 13 | ] 14 | 15 | [[package]] 16 | name = "autocfg" 17 | version = "1.1.0" 18 | source = "registry+https://github.com/rust-lang/crates.io-index" 19 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 20 | 21 | [[package]] 22 | name = "bitflags" 23 | version = "1.3.2" 24 | source = "registry+https://github.com/rust-lang/crates.io-index" 25 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 26 | 27 | [[package]] 28 | name = "cfg-if" 29 | version = "1.0.0" 30 | source = "registry+https://github.com/rust-lang/crates.io-index" 31 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 32 | 33 | [[package]] 34 | name = "crossbeam-queue" 35 | version = "0.3.11" 36 | source = "registry+https://github.com/rust-lang/crates.io-index" 37 | checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" 38 | dependencies = [ 39 | "crossbeam-utils", 40 | ] 41 | 42 | [[package]] 43 | name = "crossbeam-utils" 44 | version = "0.8.19" 45 | source = "registry+https://github.com/rust-lang/crates.io-index" 46 | checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" 47 | 48 | [[package]] 49 | name = "dashmap" 50 | version = "5.3.4" 51 | source = "registry+https://github.com/rust-lang/crates.io-index" 52 | checksum = "3495912c9c1ccf2e18976439f4443f3fee0fd61f424ff99fde6a66b15ecb448f" 53 | dependencies = [ 54 | "cfg-if", 55 | "hashbrown", 56 | "lock_api", 57 | "parking_lot_core", 58 | ] 59 | 60 | [[package]] 61 | name = "futures" 62 | version = "0.3.30" 63 | source = "registry+https://github.com/rust-lang/crates.io-index" 64 | checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" 65 | dependencies = [ 66 | "futures-channel", 67 | "futures-core", 68 | "futures-executor", 69 | "futures-io", 70 | "futures-sink", 71 | "futures-task", 72 | "futures-util", 73 | ] 74 | 75 | [[package]] 76 | name = "futures-channel" 77 | version = "0.3.30" 78 | source = "registry+https://github.com/rust-lang/crates.io-index" 79 | checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" 80 | dependencies = [ 81 | "futures-core", 82 | "futures-sink", 83 | ] 84 | 85 | [[package]] 86 | name = "futures-core" 87 | version = "0.3.30" 88 | source = "registry+https://github.com/rust-lang/crates.io-index" 89 | checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" 90 | 91 | [[package]] 92 | name = "futures-executor" 93 | version = "0.3.30" 94 | source = "registry+https://github.com/rust-lang/crates.io-index" 95 | checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" 96 | dependencies = [ 97 | "futures-core", 98 | "futures-task", 99 | "futures-util", 100 | ] 101 | 102 | [[package]] 103 | name = "futures-io" 104 | version = "0.3.30" 105 | source = "registry+https://github.com/rust-lang/crates.io-index" 106 | checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" 107 | 108 | [[package]] 109 | name = "futures-sink" 110 | version = "0.3.30" 111 | source = "registry+https://github.com/rust-lang/crates.io-index" 112 | checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" 113 | 114 | [[package]] 115 | name = "futures-task" 116 | version = "0.3.30" 117 | source = "registry+https://github.com/rust-lang/crates.io-index" 118 | checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" 119 | 120 | [[package]] 121 | name = "futures-util" 122 | version = "0.3.30" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" 125 | dependencies = [ 126 | "futures-channel", 127 | "futures-core", 128 | "futures-io", 129 | "futures-sink", 130 | "futures-task", 131 | "memchr", 132 | "pin-project-lite", 133 | "pin-utils", 134 | "slab", 135 | ] 136 | 137 | [[package]] 138 | name = "hashbrown" 139 | version = "0.12.3" 140 | source = "registry+https://github.com/rust-lang/crates.io-index" 141 | checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" 142 | 143 | [[package]] 144 | name = "lazy_static" 145 | version = "1.4.0" 146 | source = "registry+https://github.com/rust-lang/crates.io-index" 147 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 148 | 149 | [[package]] 150 | name = "libc" 151 | version = "0.2.126" 152 | source = "registry+https://github.com/rust-lang/crates.io-index" 153 | checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" 154 | 155 | [[package]] 156 | name = "lock_api" 157 | version = "0.4.7" 158 | source = "registry+https://github.com/rust-lang/crates.io-index" 159 | checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" 160 | dependencies = [ 161 | "autocfg", 162 | "scopeguard", 163 | ] 164 | 165 | [[package]] 166 | name = "log" 167 | version = "0.4.21" 168 | source = "registry+https://github.com/rust-lang/crates.io-index" 169 | checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" 170 | 171 | [[package]] 172 | name = "memchr" 173 | version = "2.7.2" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" 176 | 177 | [[package]] 178 | name = "mpmc" 179 | version = "0.1.6" 180 | source = "registry+https://github.com/rust-lang/crates.io-index" 181 | checksum = "bf78b1242a953be96e01b5f8ed8ffdfc8055c0a2b779899b3835e5d27a69dced" 182 | 183 | [[package]] 184 | name = "parking_lot" 185 | version = "0.12.1" 186 | source = "registry+https://github.com/rust-lang/crates.io-index" 187 | checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" 188 | dependencies = [ 189 | "lock_api", 190 | "parking_lot_core", 191 | ] 192 | 193 | [[package]] 194 | name = "parking_lot_core" 195 | version = "0.9.9" 196 | source = "registry+https://github.com/rust-lang/crates.io-index" 197 | checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" 198 | dependencies = [ 199 | "cfg-if", 200 | "libc", 201 | "redox_syscall", 202 | "smallvec", 203 | "windows-targets", 204 | ] 205 | 206 | [[package]] 207 | name = "pin-project-lite" 208 | version = "0.2.14" 209 | source = "registry+https://github.com/rust-lang/crates.io-index" 210 | checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" 211 | 212 | [[package]] 213 | name = "pin-utils" 214 | version = "0.1.0" 215 | source = "registry+https://github.com/rust-lang/crates.io-index" 216 | checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" 217 | 218 | [[package]] 219 | name = "proc-macro2" 220 | version = "1.0.81" 221 | source = "registry+https://github.com/rust-lang/crates.io-index" 222 | checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" 223 | dependencies = [ 224 | "unicode-ident", 225 | ] 226 | 227 | [[package]] 228 | name = "quote" 229 | version = "1.0.36" 230 | source = "registry+https://github.com/rust-lang/crates.io-index" 231 | checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" 232 | dependencies = [ 233 | "proc-macro2", 234 | ] 235 | 236 | [[package]] 237 | name = "redox_syscall" 238 | version = "0.4.1" 239 | source = "registry+https://github.com/rust-lang/crates.io-index" 240 | checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" 241 | dependencies = [ 242 | "bitflags", 243 | ] 244 | 245 | [[package]] 246 | name = "scopeguard" 247 | version = "1.1.0" 248 | source = "registry+https://github.com/rust-lang/crates.io-index" 249 | checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" 250 | 251 | [[package]] 252 | name = "serial_test" 253 | version = "3.0.0" 254 | source = "registry+https://github.com/rust-lang/crates.io-index" 255 | checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" 256 | dependencies = [ 257 | "dashmap", 258 | "futures", 259 | "lazy_static", 260 | "log", 261 | "parking_lot", 262 | "serial_test_derive", 263 | ] 264 | 265 | [[package]] 266 | name = "serial_test_derive" 267 | version = "3.0.0" 268 | source = "registry+https://github.com/rust-lang/crates.io-index" 269 | checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" 270 | dependencies = [ 271 | "proc-macro2", 272 | "quote", 273 | "syn", 274 | ] 275 | 276 | [[package]] 277 | name = "slab" 278 | version = "0.4.9" 279 | source = "registry+https://github.com/rust-lang/crates.io-index" 280 | checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" 281 | dependencies = [ 282 | "autocfg", 283 | ] 284 | 285 | [[package]] 286 | name = "smallvec" 287 | version = "1.9.0" 288 | source = "registry+https://github.com/rust-lang/crates.io-index" 289 | checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" 290 | 291 | [[package]] 292 | name = "syn" 293 | version = "2.0.60" 294 | source = "registry+https://github.com/rust-lang/crates.io-index" 295 | checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" 296 | dependencies = [ 297 | "proc-macro2", 298 | "quote", 299 | "unicode-ident", 300 | ] 301 | 302 | [[package]] 303 | name = "unicode-ident" 304 | version = "1.0.1" 305 | source = "registry+https://github.com/rust-lang/crates.io-index" 306 | checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" 307 | 308 | [[package]] 309 | name = "windows-targets" 310 | version = "0.48.5" 311 | source = "registry+https://github.com/rust-lang/crates.io-index" 312 | checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" 313 | dependencies = [ 314 | "windows_aarch64_gnullvm", 315 | "windows_aarch64_msvc", 316 | "windows_i686_gnu", 317 | "windows_i686_msvc", 318 | "windows_x86_64_gnu", 319 | "windows_x86_64_gnullvm", 320 | "windows_x86_64_msvc", 321 | ] 322 | 323 | [[package]] 324 | name = "windows_aarch64_gnullvm" 325 | version = "0.48.5" 326 | source = "registry+https://github.com/rust-lang/crates.io-index" 327 | checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" 328 | 329 | [[package]] 330 | name = "windows_aarch64_msvc" 331 | version = "0.48.5" 332 | source = "registry+https://github.com/rust-lang/crates.io-index" 333 | checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" 334 | 335 | [[package]] 336 | name = "windows_i686_gnu" 337 | version = "0.48.5" 338 | source = "registry+https://github.com/rust-lang/crates.io-index" 339 | checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" 340 | 341 | [[package]] 342 | name = "windows_i686_msvc" 343 | version = "0.48.5" 344 | source = "registry+https://github.com/rust-lang/crates.io-index" 345 | checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" 346 | 347 | [[package]] 348 | name = "windows_x86_64_gnu" 349 | version = "0.48.5" 350 | source = "registry+https://github.com/rust-lang/crates.io-index" 351 | checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" 352 | 353 | [[package]] 354 | name = "windows_x86_64_gnullvm" 355 | version = "0.48.5" 356 | source = "registry+https://github.com/rust-lang/crates.io-index" 357 | checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" 358 | 359 | [[package]] 360 | name = "windows_x86_64_msvc" 361 | version = "0.48.5" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" 364 | -------------------------------------------------------------------------------- /src/atomic_queue.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug}; 2 | use std::hint::spin_loop; 3 | use std::time::Duration; 4 | use std::time::Instant; 5 | 6 | use parking_lot::{Condvar, Mutex}; 7 | 8 | use crate::AtomicRingBuffer; 9 | 10 | ///A constant-size almost lock-free concurrent ring buffer with blocking poll support 11 | /// 12 | /// See AtomicRingQueue for implementation details 13 | /// 14 | /// # Examples 15 | /// 16 | ///``` 17 | /// // create an AtomicRingQueue with capacity of 1024 elements 18 | /// let ring = ::atomicring::AtomicRingQueue::with_capacity(900); 19 | /// 20 | /// // try_pop removes an element of the buffer and returns None if the buffer is empty 21 | /// assert_eq!(None, ring.try_pop()); 22 | /// // push_overwrite adds an element to the buffer, overwriting the oldest element if the buffer is full: 23 | /// ring.push_overwrite(10); 24 | /// assert_eq!(10, ring.pop()); 25 | /// assert_eq!(None, ring.try_pop()); 26 | ///``` 27 | pub struct AtomicRingQueue { 28 | mutex: Mutex<()>, 29 | condvar: Condvar, 30 | ring: AtomicRingBuffer, 31 | } 32 | 33 | /// If T is Send, AtomicRingQueue is Send + Sync 34 | unsafe impl Send for AtomicRingQueue {} 35 | 36 | /// Any particular `T` should never accessed concurrently, so T does not need to be Sync. 37 | /// If T is Send, AtomicRingQueue is Send + Sync 38 | unsafe impl Sync for AtomicRingQueue {} 39 | 40 | impl Debug for AtomicRingQueue { 41 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 42 | write!(f, "AtomicRingQueue {:?}/{:?}", self.ring.len(), self.ring.capacity()) 43 | } 44 | } 45 | 46 | impl AtomicRingQueue { 47 | /// Constructs a new empty AtomicRingQueue with the specified capacity 48 | /// the capacity is rounded up to the next power of 2 49 | /// 50 | /// # Examples 51 | /// 52 | ///``` 53 | /// // create an AtomicRingQueue with capacity of 1024 elements 54 | /// let ring = ::atomicring::AtomicRingQueue::with_capacity(900); 55 | /// 56 | /// // try_pop removes an element of the buffer and returns None if the buffer is empty 57 | /// assert_eq!(None, ring.try_pop()); 58 | /// // push_overwrite adds an element to the buffer, overwriting the oldest element if the buffer is full: 59 | /// ring.push_overwrite(10); 60 | /// assert_eq!(10, ring.pop()); 61 | /// assert_eq!(None, ring.try_pop()); 62 | ///``` 63 | pub fn with_capacity(capacity: usize) -> AtomicRingQueue { 64 | AtomicRingQueue { 65 | mutex: Mutex::new(()), 66 | condvar: Condvar::new(), 67 | ring: AtomicRingBuffer::with_capacity(capacity), 68 | } 69 | } 70 | 71 | fn trigger(&self) { 72 | let _ = self.mutex.lock(); 73 | self.condvar.notify_one(); 74 | } 75 | 76 | /// Try to push an object to the atomic ring buffer. 77 | /// If the buffer has no capacity remaining, the pushed object will be returned to the caller as error. 78 | #[inline(always)] 79 | pub fn try_push(&self, content: T) -> Result<(), T> { 80 | let result = self.ring.try_push(content); 81 | if result.is_ok() { 82 | self.trigger(); 83 | } 84 | result 85 | } 86 | 87 | /// Pushes an object to the atomic ring buffer. 88 | /// If the buffer is full, another object will be popped to make room for the new object. 89 | #[inline(always)] 90 | pub fn push_overwrite(&self, content: T) { 91 | self.ring.push_overwrite(content); 92 | self.trigger(); 93 | } 94 | 95 | /// Pop an object from the ring buffer, returns None if the buffer is empty 96 | #[inline] 97 | pub fn try_pop(&self) -> Option { 98 | self.ring.try_pop() 99 | } 100 | 101 | #[inline(always)] 102 | fn spinning_pop(&self) -> Option { 103 | for i in 0..10 { 104 | if let res @ Some(_) = self.ring.try_pop() { 105 | return res; 106 | } 107 | for _ in 0..i << 1 { 108 | spin_loop(); 109 | } 110 | } 111 | for _ in 0..10 { 112 | if let res @ Some(_) = self.ring.try_pop() { 113 | return res; 114 | } 115 | ::std::thread::yield_now(); 116 | } 117 | None 118 | } 119 | 120 | /// Pop an object from the ring buffer, waits indefinitely if the buf is empty 121 | #[inline] 122 | pub fn pop(&self) -> T { 123 | loop { 124 | if let Some(res) = self.spinning_pop() { 125 | return res; 126 | } 127 | { 128 | let mut lock = self.mutex.lock(); 129 | if let Some(res) = self.try_pop() { 130 | return res; 131 | } 132 | self.condvar.wait(&mut lock); 133 | } 134 | } 135 | } 136 | 137 | /// Pop an object from the ring buffer, waiting until the given instant if the buffer is empty. Returns None on timeout 138 | #[inline] 139 | pub fn pop_until(&self, deadline: Instant) -> Option { 140 | loop { 141 | if let res @ Some(_) = self.spinning_pop() { 142 | return res; 143 | } 144 | { 145 | let mut lock = self.mutex.lock(); 146 | if let res @ Some(_) = self.try_pop() { 147 | return res; 148 | } 149 | if self.condvar.wait_until(&mut lock, deadline).timed_out() { 150 | return None; 151 | } 152 | } 153 | } 154 | } 155 | 156 | /// Pop an object from the ring buffer, waiting until the given instant if the buffer is empty. Returns None on timeout 157 | #[inline] 158 | pub fn pop_for(&self, timeout: Duration) -> Option { 159 | self.pop_until(Instant::now() + timeout) 160 | } 161 | 162 | 163 | /// Returns the number of objects stored in the ring buffer that are not in process of being removed. 164 | #[inline] 165 | pub fn len(&self) -> usize { 166 | self.ring.len() 167 | } 168 | 169 | 170 | /// Returns the true if ring buffer is empty. Equivalent to `self.len() == 0` 171 | #[inline] 172 | pub fn is_empty(&self) -> bool { 173 | self.ring.is_empty() 174 | } 175 | 176 | /// Returns the maximum capacity of the ring buffer. 177 | /// Attention: In fact you can store one element less than the capacity given here 178 | #[inline(always)] 179 | pub fn capacity(&self) -> usize { 180 | self.ring.capacity() 181 | } 182 | 183 | /// Returns the remaining capacity of the ring buffer. 184 | /// This is equal to `self.cap() - self.len() - pending writes + pending reads`. 185 | #[inline] 186 | pub fn remaining_cap(&self) -> usize { 187 | self.ring.remaining_cap() 188 | } 189 | 190 | /// Pop everything from ring buffer and discard it. 191 | #[inline] 192 | pub fn clear(&self) { 193 | self.ring.clear() 194 | } 195 | } 196 | 197 | 198 | #[cfg(test)] 199 | mod tests { 200 | #[test] 201 | pub fn test_pushpop() { 202 | let ring = super::AtomicRingQueue::with_capacity(900); 203 | assert_eq!(1024, ring.capacity()); 204 | assert_eq!(None, ring.try_pop()); 205 | ring.push_overwrite(1); 206 | assert_eq!(1, ring.pop()); 207 | assert_eq!(None, ring.try_pop()); 208 | 209 | for i in 0..5000 { 210 | ring.push_overwrite(i); 211 | assert_eq!(i, ring.pop()); 212 | assert_eq!(None, ring.try_pop()); 213 | } 214 | 215 | 216 | for i in 0..199999 { 217 | ring.push_overwrite(i); 218 | } 219 | assert_eq!(ring.capacity(), ring.len() + 1); 220 | assert_eq!(199999 - (ring.capacity() - 1), ring.pop()); 221 | assert_eq!(Ok(()), ring.try_push(199999)); 222 | 223 | for i in 200000 - (ring.capacity() - 1)..200000 { 224 | assert_eq!(i, ring.pop()); 225 | } 226 | } 227 | 228 | #[test] 229 | pub fn test_pushpop_large() { 230 | let ring = super::AtomicRingQueue::with_capacity(65535); 231 | 232 | 233 | assert_eq!(None, ring.try_pop()); 234 | ring.push_overwrite(1); 235 | assert_eq!(1, ring.pop()); 236 | 237 | for i in 0..200000 { 238 | ring.push_overwrite(i); 239 | assert_eq!(i, ring.pop()); 240 | } 241 | 242 | 243 | for i in 0..200000 { 244 | ring.push_overwrite(i); 245 | } 246 | assert_eq!(ring.capacity(), ring.len() + 1); 247 | 248 | for i in 200000 - (ring.capacity() - 1)..200000 { 249 | assert_eq!(i, ring.pop()); 250 | } 251 | } 252 | 253 | #[test] 254 | pub fn test_pushpop_large2() { 255 | let ring = super::AtomicRingQueue::with_capacity(65536); 256 | 257 | 258 | assert_eq!(None, ring.try_pop()); 259 | ring.push_overwrite(1); 260 | assert_eq!(1, ring.pop()); 261 | 262 | for i in 0..200000 { 263 | ring.push_overwrite(i); 264 | assert_eq!(i, ring.pop()); 265 | } 266 | 267 | 268 | for i in 0..200000 { 269 | ring.push_overwrite(i); 270 | } 271 | assert_eq!(ring.capacity(), ring.len() + 1); 272 | 273 | for i in 200000 - (ring.capacity() - 1)..200000 { 274 | assert_eq!(i, ring.pop()); 275 | } 276 | } 277 | 278 | 279 | #[test] 280 | pub fn test_pushpop_large2_zerotype() { 281 | #[derive(Eq, PartialEq, Debug)] 282 | struct ZeroType {} 283 | 284 | let ring = super::AtomicRingQueue::with_capacity(65536); 285 | 286 | 287 | assert_eq!(None, ring.try_pop()); 288 | ring.push_overwrite(ZeroType {}); 289 | assert_eq!(ZeroType {}, ring.pop()); 290 | 291 | for _i in 0..200000 { 292 | ring.push_overwrite(ZeroType {}); 293 | assert_eq!(ZeroType {}, ring.pop()); 294 | } 295 | 296 | 297 | for _i in 0..200000 { 298 | ring.push_overwrite(ZeroType {}); 299 | } 300 | assert_eq!(ring.capacity(), ring.len() + 1); 301 | 302 | for _i in 200000 - (ring.capacity() - 1)..200000 { 303 | assert_eq!(ZeroType {}, ring.pop()); 304 | } 305 | } 306 | 307 | 308 | #[test] 309 | pub fn test_threaded() { 310 | let cap = 65535; 311 | 312 | let buf: super::AtomicRingQueue = super::AtomicRingQueue::with_capacity(cap); 313 | for i in 0..cap { 314 | buf.try_push(i).expect("init"); 315 | } 316 | let arc = ::std::sync::Arc::new(buf); 317 | 318 | let mut handles = Vec::new(); 319 | let end = ::std::time::Instant::now() + ::std::time::Duration::from_millis(10000); 320 | for _thread_num in 0..100 { 321 | let buf = ::std::sync::Arc::clone(&arc); 322 | handles.push(::std::thread::spawn(move || { 323 | while ::std::time::Instant::now() < end { 324 | let a = buf.pop(); 325 | let b = buf.pop(); 326 | while let Err(_) = buf.try_push(a) {}; 327 | while let Err(_) = buf.try_push(b) {}; 328 | } 329 | })); 330 | } 331 | for (_idx, handle) in handles.into_iter().enumerate() { 332 | handle.join().expect("join"); 333 | } 334 | 335 | assert_eq!(arc.len(), cap); 336 | 337 | let mut expected: Vec = Vec::new(); 338 | let mut actual: Vec = Vec::new(); 339 | for i in 0..cap { 340 | expected.push(i); 341 | actual.push(arc.pop()); 342 | } 343 | actual.sort_by(|&a, b| a.partial_cmp(b).unwrap()); 344 | assert_eq!(actual, expected); 345 | } 346 | 347 | static DROP_COUNT: ::std::sync::atomic::AtomicUsize = ::std::sync::atomic::AtomicUsize::new(0); 348 | 349 | #[allow(dead_code)] 350 | #[derive(Debug)] 351 | struct TestType { 352 | some: usize 353 | } 354 | 355 | 356 | impl Drop for TestType { 357 | fn drop(&mut self) { 358 | DROP_COUNT.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed); 359 | } 360 | } 361 | 362 | #[test] 363 | pub fn test_dropcount() { 364 | DROP_COUNT.store(0, ::std::sync::atomic::Ordering::Relaxed); 365 | { 366 | let buf: super::AtomicRingQueue = super::AtomicRingQueue::with_capacity(1024); 367 | buf.try_push(TestType { some: 0 }).expect("push"); 368 | buf.try_push(TestType { some: 0 }).expect("push"); 369 | 370 | assert_eq!(0, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 371 | buf.pop(); 372 | assert_eq!(1, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 373 | } 374 | assert_eq!(2, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 375 | } 376 | } 377 | -------------------------------------------------------------------------------- /src/atomic_ring.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::hint::spin_loop; 3 | use std::marker::PhantomData; 4 | use std::mem; 5 | use std::ptr; 6 | use std::sync::atomic::{AtomicUsize, Ordering}; 7 | 8 | ///A constant-size almost lock-free concurrent ring buffer 9 | /// 10 | ///Upsides 11 | /// 12 | ///- fast, try_push and pop are O(1) 13 | ///- scales well even during heavy concurrency 14 | ///- only 4 words of memory overhead 15 | ///- no memory allocations after initial creation 16 | /// 17 | /// 18 | ///Downsides 19 | /// 20 | ///- growing/shrinking is not supported 21 | ///- no blocking poll support (see AtomicRingQueue for blocking poll support) 22 | ///- maximum capacity of (usize >> 16) entries 23 | ///- capacity is rounded up to the next power of 2 24 | /// 25 | ///This queue should perform similar to [mpmc](https://github.com/brayniac/mpmc) but with a lower memory overhead. 26 | ///If memory overhead is not your main concern you should run benchmarks to decide which one to use. 27 | /// 28 | ///## Implementation details 29 | /// 30 | ///This implementation uses two atomics to store the read_index/write_index 31 | /// 32 | ///```Text 33 | /// Read index atomic 34 | ///+63------------------------------------------------16+15-----8+7------0+ 35 | ///| read_index | r_done | r_pend | 36 | ///+----------------------------------------------------+--------+--------+ 37 | /// Write index atomic 38 | ///+63------------------------------------------------16+15-----8+7------0+ 39 | ///| write_index | w_done | w_pend | 40 | ///+----------------------------------------------------+--------+--------+ 41 | ///``` 42 | /// 43 | ///- write_index/read_index (16bit on 32bit arch, 48bits on 64bit arch): current read/write position in the ring buffer (head and tail). 44 | ///- r_pend/w_pend (8bit): number of pending concurrent read/writes 45 | ///- r_done/w_done (8bit): number of completed read/writes. 46 | /// 47 | ///For reading r_pend is incremented first, then the content of the ring buffer is read from memory. 48 | ///After reading is done r_done is incremented. read_index is only incremented if r_done is equal to r_pend. 49 | /// 50 | ///For writing first w_pend is incremented, then the content of the ring buffer is updated. 51 | ///After writing w_done is incremented. If w_done is equal to w_pend then both are set to 0 and write_index is incremented. 52 | /// 53 | ///In rare cases this can result in a race where multiple threads increment r_pend in turn and r_done never quite reaches r_pend. 54 | ///If r_pend == 255 or w_pend == 255 a spinloop waits it to be <255 to continue. This rarely happens in practice, that's why this is called almost lock-free. 55 | /// 56 | /// 57 | /// 58 | /// 59 | ///## Usage 60 | /// 61 | ///To use AtomicRingBuffer, add this to your `Cargo.toml`: 62 | /// 63 | ///```toml 64 | ///[dependencies] 65 | ///atomicring = "1.2.9" 66 | ///``` 67 | /// 68 | /// 69 | ///And something like this to your code 70 | /// 71 | ///```rust 72 | /// 73 | ///// create an AtomicRingBuffer with capacity of 1023 elements (next power of two -1) 74 | ///let ring = ::atomicring::AtomicRingBuffer::with_capacity(900); 75 | /// 76 | ///// try_pop removes an element of the buffer and returns None if the buffer is empty 77 | ///assert_eq!(None, ring.try_pop()); 78 | ///// push_overwrite adds an element to the buffer, overwriting the oldest element if the buffer is full: 79 | ///ring.push_overwrite(1); 80 | ///assert_eq!(Some(1), ring.try_pop()); 81 | ///assert_eq!(None, ring.try_pop()); 82 | ///``` 83 | /// 84 | /// 85 | ///## License 86 | /// 87 | ///Licensed under the terms of MIT license and the Apache License (Version 2.0). 88 | /// 89 | ///See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. 90 | /// 91 | 92 | pub struct AtomicRingBuffer { 93 | read_counters: CounterStore, 94 | write_counters: CounterStore, 95 | mem: *mut [T], 96 | _marker: PhantomData, 97 | } 98 | 99 | /// If T is Send, AtomicRingBuffer is Send + Sync 100 | unsafe impl Send for AtomicRingBuffer {} 101 | 102 | /// Any particular `T` should never accessed concurrently, so T does not need to be Sync. 103 | /// If T is Send, AtomicRingBuffer is Send + Sync 104 | unsafe impl Sync for AtomicRingBuffer {} 105 | 106 | const MAXIMUM_IN_PROGRESS: u8 = 16; 107 | 108 | impl AtomicRingBuffer { 109 | /// Write an object from the ring buffer, passing an &mut pointer to a given function to write to during transaction. The cell will be initialized with Default::default() 110 | #[inline(always)] 111 | pub fn try_write(&self, writer: F) -> Result<(), ()> { 112 | self.try_unsafe_write(|cell| unsafe { 113 | if mem::size_of::() != 0 { 114 | ptr::write(cell, Default::default()); 115 | } 116 | writer(&mut (*cell)); 117 | }) 118 | } 119 | } 120 | 121 | impl AtomicRingBuffer { 122 | /// Constructs a new empty AtomicRingBuffer with the specified capacity 123 | /// the capacity is rounded up to the next power of 2 -1 124 | /// 125 | /// # Examples 126 | /// 127 | ///``` 128 | /// // create an AtomicRingBuffer with capacity of 1023 elements (next power of 2 from the given capacity) 129 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(900); 130 | /// 131 | /// assert_eq!(1023, ring.remaining_cap()); 132 | /// 133 | /// // try_pop removes an element of the buffer and returns None if the buffer is empty 134 | /// assert_eq!(None, ring.try_pop()); 135 | /// // push_overwrite adds an element to the buffer, overwriting the oldest element if the buffer is full: 136 | /// ring.push_overwrite(10); 137 | /// assert_eq!(Some(10), ring.try_pop()); 138 | /// assert_eq!(None, ring.try_pop()); 139 | ///``` 140 | /// 141 | pub fn with_capacity(capacity: usize) -> AtomicRingBuffer { 142 | if capacity > (::std::usize::MAX >> 16) + 1 { 143 | panic!("too large!"); 144 | } 145 | 146 | let cap = capacity.next_power_of_two(); 147 | 148 | /* allocate using a Vec */ 149 | let mut content: Vec = Vec::with_capacity(cap); 150 | unsafe { content.set_len(cap); } 151 | 152 | /* Zero memory content 153 | for i in content.iter_mut() { 154 | unsafe { ptr::write(i, mem::zeroed()); } 155 | } 156 | */ 157 | let mem = Box::into_raw(content.into_boxed_slice()); 158 | 159 | AtomicRingBuffer { 160 | mem, 161 | read_counters: CounterStore::new(), 162 | write_counters: CounterStore::new(), 163 | _marker: PhantomData, 164 | } 165 | } 166 | 167 | 168 | /// Try to push an object to the atomic ring buffer. 169 | /// If the buffer has no capacity remaining, the pushed object will be returned to the caller as error. 170 | /// 171 | /// # Examples 172 | /// 173 | ///``` 174 | /// // create an AtomicRingBuffer with capacity of 3 elements 175 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 176 | /// assert_eq!(3, ring.remaining_cap()); 177 | /// 178 | /// // try_push adds an element to the buffer, if there is room 179 | /// assert_eq!(Ok(()), ring.try_push(10)); 180 | /// assert_eq!(Some(10), ring.try_pop()); 181 | /// assert_eq!(None, ring.try_pop()); 182 | /// 183 | /// for i in 0..3 { 184 | /// assert_eq!(Ok(()), ring.try_push(i)); 185 | /// } 186 | /// 187 | /// // try_push returns the element to the caller if the buffer is full 188 | /// assert_eq!(Err(3), ring.try_push(3)); 189 | /// 190 | /// // existing values remain in the ring buffer 191 | /// for i in 0..3 { 192 | /// assert_eq!(Some(i), ring.try_pop()); 193 | /// } 194 | ///``` 195 | #[inline(always)] 196 | pub fn try_push(&self, content: T) -> Result<(), T> { 197 | self.try_unsafe_write_or(content, |cell, content| { 198 | if mem::size_of::() != 0 { 199 | unsafe { ptr::write(cell, content); } 200 | } 201 | }, |content| { content }) 202 | } 203 | 204 | /// Write an object from the ring buffer, passing an uninitialized *mut pointer to a given fuction to write to during transaction. 205 | /// 206 | /// The content of the cell will *NOT* be initialized and has to be overwritten using ptr::write. 207 | /// 208 | /// The writer function is called once if there is room in the buffer 209 | /// 210 | /// Warning: Use ::std::ptr::write only if you know the type is not zero length. 211 | /// 212 | /// # Examples 213 | /// 214 | ///``` 215 | /// // create an AtomicRingBuffer with capacity of 7 elements (next power of 2 -1 from the given capacity) 216 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(5); 217 | /// assert_eq!(7, ring.remaining_cap()); 218 | /// 219 | /// // try_unsafe_write adds an element to the buffer, if there is room 220 | /// assert_eq!(Ok(()), ring.try_unsafe_write(|cell| unsafe { ::std::ptr::write(cell, 10) } )); 221 | /// assert_eq!(Some(10), ring.try_pop()); 222 | /// assert_eq!(None, ring.try_pop()); 223 | /// 224 | /// for i in 0..7 { 225 | /// assert_eq!(Ok(()), ring.try_unsafe_write(|cell| unsafe { ::std::ptr::write(cell, i) } )); 226 | /// } 227 | /// 228 | /// // try_unsafe_write returns an error to the caller if the buffer is full 229 | /// assert_eq!(Err(()), ring.try_unsafe_write(|cell| unsafe { ::std::ptr::write(cell, 7) } )); 230 | /// 231 | /// // existing values remain in the ring buffer 232 | /// for i in 0..7 { 233 | /// assert_eq!(Some(i), ring.try_pop()); 234 | /// } 235 | ///``` 236 | #[inline(always)] 237 | pub fn try_unsafe_write(&self, writer: F) -> Result<(), ()> { 238 | self.try_unsafe_write_or((), |dst, _| { writer(dst) }, |_| {}) 239 | } 240 | 241 | 242 | /// Write an object from the ring buffer, passing an uninitialized *mut pointer and an arbitrary content parameter to a given fuction to write to during transaction. 243 | /// 244 | /// The content of the cell will *NOT* be initialized and has to be overwritten using ptr::write. 245 | /// 246 | /// The writer function is called once if there is room in the buffer 247 | /// 248 | /// # Examples 249 | /// 250 | ///``` 251 | /// // create an AtomicRingBuffer with capacity of 7 elements (next power of 2 -1 from the given capacity) 252 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(5); 253 | /// assert_eq!(7, ring.remaining_cap()); 254 | /// 255 | /// // try_unsafe_write adds an element to the buffer, if there is room 256 | /// let writer = |cell, content| unsafe { ::std::ptr::write(cell, content) }; 257 | /// let error = |content| {content}; 258 | /// 259 | /// assert_eq!(Ok(()), ring.try_unsafe_write_or(10, writer, error)); 260 | /// assert_eq!(Some(10), ring.try_pop()); 261 | /// assert_eq!(None, ring.try_pop()); 262 | /// 263 | /// for i in 0..7 { 264 | /// assert_eq!(Ok(()), ring.try_unsafe_write_or(i, writer, error)); 265 | /// } 266 | /// 267 | /// assert_eq!(0, ring.remaining_cap()); 268 | /// 269 | /// // try_unsafe_write returns an error to the caller if the buffer is full 270 | /// assert_eq!(Err(7), ring.try_unsafe_write_or(7, writer, error)); 271 | /// 272 | /// // existing values remain in the ring buffer 273 | /// for i in 0..7 { 274 | /// assert_eq!(Some(i), ring.try_pop()); 275 | /// } 276 | /// 277 | ///``` 278 | #[inline(always)] 279 | pub fn try_unsafe_write_or OK, E: FnOnce(CNT) -> ERR>(&self, content: CNT, writer: W, err: E) -> Result { 280 | let cap_mask = self.cap_mask(); 281 | let error_condition = |to_write_index: usize, _: u8| { to_write_index.wrapping_add(1) & cap_mask == self.read_counters.load(Ordering::SeqCst).index() }; 282 | 283 | if let Ok((write_counters, to_write_index)) = self.write_counters.increment_in_progress(error_condition, cap_mask) { 284 | 285 | // write mem 286 | let ok = unsafe { 287 | let cell = self.cell(to_write_index); 288 | writer(cell, content) 289 | }; 290 | 291 | // Mark write as done 292 | self.write_counters.increment_done(write_counters, to_write_index, cap_mask); 293 | Ok(ok) 294 | } else { 295 | Err(err(content)) 296 | } 297 | } 298 | 299 | 300 | /// Pushes an object to the atomic ring buffer. 301 | /// If the buffer is full, another object will be popped and dropped to make room for the new object. 302 | /// 303 | /// Returns true if another element was overwritten 304 | /// 305 | /// # Examples 306 | /// 307 | ///``` 308 | /// // create an AtomicRingBuffer with capacity of 3 elements 309 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 310 | /// 311 | /// assert_eq!(3, ring.remaining_cap()); 312 | /// 313 | /// // push_overwrite adds an element to the buffer, overwriting an older element 314 | /// 315 | /// ring.push_overwrite(10); 316 | /// assert_eq!(Some(10), ring.try_pop()); 317 | /// assert_eq!(None, ring.try_pop()); 318 | /// 319 | /// for i in 0..3 { 320 | /// ring.push_overwrite(i); 321 | /// } 322 | /// // push_overwrite overwrites t first element 323 | /// assert_eq!(true, ring.push_overwrite(3)); 324 | /// 325 | /// // first value (0) was removed 326 | /// for i in 1..4 { 327 | /// assert_eq!(Some(i), ring.try_pop()); 328 | /// } 329 | /// 330 | ///``` 331 | #[inline(always)] 332 | pub fn push_overwrite(&self, content: T) -> bool { 333 | let mut cont = content; 334 | let mut dropped = false; 335 | loop { 336 | match self.try_push(cont) { 337 | Ok(_) => return dropped, 338 | Err(ret) => { 339 | dropped |= self.remove_if_full().is_some(); 340 | cont = ret; 341 | } 342 | } 343 | } 344 | } 345 | 346 | /// Tries to pop an object from the ring buffer. 347 | /// If the buffer is empty this returns None, otherwise this returns Some(val) 348 | /// 349 | /// # Examples 350 | /// 351 | ///``` 352 | /// // create an AtomicRingBuffer with capacity of 3 elements 353 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 354 | /// 355 | /// assert_eq!(Ok(()), ring.try_push(10)); 356 | /// assert_eq!(Some(10), ring.try_pop()); 357 | /// assert_eq!(None, ring.try_pop()); 358 | /// 359 | ///``` 360 | #[inline] 361 | pub fn try_pop(&self) -> Option { 362 | self.try_read_nodrop(|cell| 363 | if ::std::mem::size_of::() != 0 { 364 | unsafe { ptr::read(cell) } 365 | } else { 366 | unsafe { ::std::mem::zeroed() } 367 | }) 368 | } 369 | 370 | /// Read an object from the ring buffer, passing an &mut pointer to a given function to read during transaction. 371 | /// 372 | /// The given function is called with a mutable reference to the cell, and then the content in the cell is dropped 373 | /// 374 | /// If you do not want the content to be dropped use try_read_nodrop(..) or just use try_pop() 375 | /// 376 | /// # Examples 377 | /// 378 | ///``` 379 | /// // create an AtomicRingBuffer with capacity of 3 elements 380 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 381 | /// 382 | /// assert_eq!(Ok(()), ring.try_push(10)); 383 | /// assert_eq!(Some(10), ring.try_read(|cell| unsafe { *cell })); 384 | /// assert_eq!(None, ring.try_read(|cell| unsafe { *cell })); 385 | /// 386 | ///``` 387 | #[inline] 388 | pub fn try_read U>(&self, reader: F) -> Option { 389 | self.try_read_nodrop(|cell| unsafe { 390 | let result = reader(&mut (*cell)); 391 | ptr::drop_in_place(cell); 392 | result 393 | }) 394 | } 395 | 396 | /// Read an object from the ring buffer, passing an &mut pointer to a given function to read during transaction. 397 | /// 398 | /// The given function is called with a mutable reference to the cell. The content in the cell is not dropped after reading, so the given function must take ownership of the content ideally using ptr::read(cell) 399 | /// 400 | /// Warning: Do not use ptr::read for zero length types 401 | /// 402 | /// # Examples 403 | /// 404 | ///``` 405 | /// // create an AtomicRingBuffer with capacity of 3 elements 406 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 407 | /// 408 | /// assert_eq!(Ok(()), ring.try_push(10)); 409 | /// assert_eq!(Some(10), ring.try_read_nodrop(|cell| unsafe { *cell })); 410 | /// assert_eq!(None, ring.try_read_nodrop(|cell| unsafe { *cell })); 411 | /// 412 | ///``` 413 | #[inline(always)] 414 | pub fn try_read_nodrop U>(&self, reader: F) -> Option { 415 | let cap_mask = self.cap_mask(); 416 | 417 | let error_condition = |to_read_index: usize, _: u8| { to_read_index == self.write_counters.load(Ordering::SeqCst).index() }; 418 | 419 | 420 | if let Ok((read_counters, to_read_index)) = self.read_counters.increment_in_progress(error_condition, cap_mask) { 421 | let popped = unsafe { 422 | let cell = self.cell(to_read_index); 423 | reader(&mut (*cell)) 424 | }; 425 | self.read_counters.increment_done(read_counters, to_read_index, cap_mask); 426 | Some(popped) 427 | } else { 428 | None 429 | } 430 | } 431 | 432 | 433 | /// Returns the number of objects stored in the ring buffer that are not in process of being removed. 434 | /// 435 | /// # Examples 436 | /// 437 | ///``` 438 | /// // create an AtomicRingBuffer with capacity of 3 elements 439 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 440 | /// 441 | /// assert_eq!(0, ring.len()); 442 | /// assert_eq!(Ok(()), ring.try_push(10)); 443 | /// assert_eq!(1, ring.len()); 444 | /// assert_eq!(Some(10), ring.try_pop()); 445 | /// assert_eq!(None, ring.try_pop()); 446 | /// assert_eq!(0, ring.len()); 447 | /// 448 | ///``` 449 | #[inline] 450 | pub fn len(&self) -> usize { 451 | let read_counters = self.read_counters.load(Ordering::SeqCst); 452 | let write_counters = self.write_counters.load(Ordering::SeqCst); 453 | counter_len(read_counters, write_counters, self.capacity()) 454 | } 455 | 456 | 457 | /// Returns the true if ring buffer is empty. Equivalent to `self.len() == 0` 458 | /// 459 | /// # Examples 460 | /// 461 | ///``` 462 | /// // create an AtomicRingBuffer with capacity of 3 elements 463 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 464 | /// 465 | /// assert_eq!(true, ring.is_empty()); 466 | /// assert_eq!(Ok(()), ring.try_push(10)); 467 | /// assert_eq!(false, ring.is_empty()); 468 | /// assert_eq!(Some(10), ring.try_pop()); 469 | /// assert_eq!(None, ring.try_pop()); 470 | /// assert_eq!(true, ring.is_empty()); 471 | /// 472 | ///``` 473 | #[inline] 474 | pub fn is_empty(&self) -> bool { 475 | self.len() == 0 476 | } 477 | 478 | /// Returns the maximum capacity of the ring buffer 479 | /// 480 | /// # Examples 481 | /// 482 | ///``` 483 | /// // create an AtomicRingBuffer with capacity of 3 elements 484 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 485 | /// 486 | /// assert_eq!(true, ring.is_empty()); 487 | /// assert_eq!(Ok(()), ring.try_push(10)); 488 | /// assert_eq!(false, ring.is_empty()); 489 | /// assert_eq!(Some(10), ring.try_pop()); 490 | /// assert_eq!(None, ring.try_pop()); 491 | /// assert_eq!(true, ring.is_empty()); 492 | /// 493 | ///``` 494 | #[inline(always)] 495 | pub fn capacity(&self) -> usize { 496 | unsafe { (*self.mem).len() } 497 | } 498 | 499 | /// Returns the remaining capacity of the ring buffer. 500 | /// This is equal to `self.cap() - self.len() - pending writes + pending reads`. 501 | /// 502 | /// # Examples 503 | /// 504 | ///``` 505 | /// // create an AtomicRingBuffer with capacity of 3 elements 506 | /// let ring = ::atomicring::AtomicRingBuffer::with_capacity(3); 507 | /// 508 | /// assert_eq!(3, ring.remaining_cap()); 509 | /// assert_eq!(Ok(()), ring.try_push(10)); 510 | /// assert_eq!(2, ring.remaining_cap()); 511 | /// assert_eq!(Some(10), ring.try_pop()); 512 | /// assert_eq!(3, ring.remaining_cap()); 513 | /// 514 | ///``` 515 | #[inline] 516 | pub fn remaining_cap(&self) -> usize { 517 | let read_counters = self.read_counters.load(Ordering::SeqCst); 518 | let write_counters = self.write_counters.load(Ordering::SeqCst); 519 | let cap = self.capacity(); 520 | let read_index = read_counters.index(); 521 | let write_index = write_counters.index(); 522 | let len = if read_index <= write_index { write_index - read_index } else { write_index + cap - read_index }; 523 | //len is from read_index to write_index, but we have to substract write_in_process_count for a better remaining capacity approximation 524 | cap - 1 - len - write_counters.in_process_count() as usize 525 | } 526 | 527 | /// Pop everything from ring buffer and discard it. 528 | #[inline] 529 | pub fn clear(&self) { 530 | while let Some(_) = self.try_pop() {} 531 | } 532 | 533 | /// Returns the memory usage in bytes of the allocated region of the ring buffer. 534 | /// This does not include overhead. 535 | pub fn memory_usage(&self) -> usize { 536 | unsafe { mem::size_of_val(&(*self.mem)) } 537 | } 538 | 539 | /// Returns a *mut T pointer to an indexed cell 540 | #[inline(always)] 541 | unsafe fn cell(&self, index: usize) -> *mut T { 542 | (*self.mem).get_unchecked_mut(index) 543 | //&mut (*self.mem)[index] 544 | } 545 | 546 | /// Returns the capacity mask 547 | #[inline(always)] 548 | fn cap_mask(&self) -> usize { 549 | self.capacity() - 1 550 | } 551 | 552 | 553 | /// pop one element, but only if ringbuffer is full, used by push_overwrite 554 | fn remove_if_full(&self) -> Option { 555 | let cap_mask = self.cap_mask(); 556 | 557 | 558 | let error_condition = |to_read_index: usize, read_in_process_count: u8| { read_in_process_count > 0 || to_read_index.wrapping_add(1) & cap_mask == self.write_counters.load(Ordering::Acquire).index() }; 559 | 560 | 561 | if let Ok((read_counters, to_read_index)) = self.read_counters.increment_in_progress(error_condition, cap_mask) { 562 | let popped = unsafe { 563 | // Read Memory 564 | if ::std::mem::size_of::() != 0 { 565 | ptr::read(self.cell(to_read_index)) 566 | } else { 567 | ::std::mem::zeroed() 568 | } 569 | }; 570 | 571 | self.read_counters.increment_done(read_counters, to_read_index, cap_mask); 572 | 573 | Some(popped) 574 | } else { 575 | None 576 | } 577 | } 578 | } 579 | 580 | impl Drop for AtomicRingBuffer { 581 | fn drop(&mut self) { 582 | // drop contents 583 | self.clear(); 584 | // drop memory box without dropping contents 585 | unsafe { Box::from_raw(self.mem).into_vec().set_len(0); } 586 | } 587 | } 588 | 589 | 590 | impl fmt::Debug for AtomicRingBuffer { 591 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 592 | if f.alternate() { 593 | let cap = self.capacity(); 594 | let read_counters = self.read_counters.load(Ordering::Relaxed); 595 | let write_counters = self.write_counters.load(Ordering::Relaxed); 596 | write!(f, "AtomicRingBuffer cap: {} len: {} read_index: {}, read_in_process_count: {}, read_done_count: {}, write_index: {}, write_in_process_count: {}, write_done_count: {}", cap, self.len(), 597 | read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), 598 | write_counters.index(), write_counters.in_process_count(), write_counters.done_count()) 599 | } else { 600 | write!(f, "AtomicRingBuffer cap: {} len: {}", self.capacity(), self.len()) 601 | } 602 | } 603 | } 604 | 605 | 606 | #[derive(Eq, PartialEq, Copy, Clone)] 607 | pub struct Counters(usize); 608 | 609 | impl Counters { 610 | #[inline(always)] 611 | const fn index(self) -> usize { 612 | self.0 >> 16 613 | } 614 | 615 | #[inline(always)] 616 | const fn in_process_count(self) -> u8 { 617 | self.0 as u8 618 | } 619 | 620 | #[inline(always)] 621 | const fn done_count(self) -> u8 { 622 | (self.0 >> 8) as u8 623 | } 624 | } 625 | 626 | impl From for Counters { 627 | fn from(val: usize) -> Counters { 628 | Counters(val) 629 | } 630 | } 631 | 632 | impl From for usize { 633 | fn from(val: Counters) -> usize { 634 | val.0 635 | } 636 | } 637 | 638 | fn counter_len(read_counters: Counters, write_counters: Counters, cap: usize) -> usize { 639 | let read_index = read_counters.index(); 640 | let write_index = write_counters.index(); 641 | let len = if read_index <= write_index { write_index - read_index } else { write_index + cap - read_index }; 642 | //len is from read_index to write_index, but we have to subtract read_in_process_count for a better approximation 643 | len - (read_counters.in_process_count() as usize) 644 | } 645 | 646 | 647 | #[cfg_attr(target_arch = "x86_64", repr(align(128)))] 648 | #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] 649 | struct CounterStore { 650 | counters: AtomicUsize, 651 | } 652 | 653 | impl CounterStore { 654 | pub const fn new() -> CounterStore { 655 | CounterStore { counters: AtomicUsize::new(0) } 656 | } 657 | #[inline(always)] 658 | pub fn load(&self, ordering: Ordering) -> Counters { 659 | Counters(self.counters.load(ordering)) 660 | } 661 | #[inline(always)] 662 | pub fn increment_in_progress(&self, error_condition: F, cap_mask: usize) -> Result<(Counters, usize), ()> 663 | where F: Fn(usize, u8) -> bool { 664 | 665 | // Mark write as in progress 666 | let mut counters = self.load(Ordering::Acquire); 667 | loop { 668 | let in_progress_count = counters.in_process_count(); 669 | if error_condition(counters.index(), in_progress_count) { 670 | return Err(()); 671 | } 672 | 673 | 674 | // spin wait on MAXIMUM_IN_PROGRESS simultanous in progress writes/reads 675 | if in_progress_count == MAXIMUM_IN_PROGRESS { 676 | spin_loop(); 677 | counters = self.load(Ordering::Acquire); 678 | continue; 679 | } 680 | let index = counters.index().wrapping_add(in_progress_count as usize) & cap_mask; 681 | 682 | // recheck error condition, e.g. full/empty 683 | if error_condition(index, in_progress_count) { 684 | return Err(()); 685 | } 686 | 687 | let new_counters = Counters(counters.0.wrapping_add(1)); 688 | match self.counters.compare_exchange_weak(counters.0, new_counters.0, Ordering::Acquire, Ordering::Relaxed) { 689 | Ok(_) => return Ok((new_counters, index)), 690 | Err(updated) => counters = Counters(updated) 691 | }; 692 | } 693 | } 694 | #[inline(always)] 695 | pub fn increment_done(&self, mut counters: Counters, index: usize, cap_mask: usize) { 696 | // ultra fast path: if we are first pending operation in line and nothing is done yet, and we do not need to roll over the index 697 | // we can just increment index, decrement in_progress_count, preserve done_count without checking 698 | // if counters.index() == index && counters.done_count() == 0 && index < cap_mask 699 | if (counters.0 & 0x00FF_FFFF_FFFF_FF00 == (index << 16)) && (index < cap_mask) { 700 | // even if other operations are in progress 701 | self.counters.fetch_add((1 << 16) - 1, Ordering::Release); 702 | return; 703 | } 704 | 705 | loop { 706 | let in_process_count = counters.in_process_count(); 707 | let new_counters = Counters(if counters.done_count().wrapping_add(1) == in_process_count { 708 | // if the new done_count equals in_process_count count commit: 709 | // increment read_index and zero read_in_process_count and read_done_count 710 | (counters.index().wrapping_add(in_process_count as usize) & cap_mask) << 16 711 | } else if counters.index() == index { 712 | // fast path: if we are first pending operation in line, increment index, decrement in_progress_count, preserve done_count, even if other operations are pending 713 | counters.0.wrapping_add((1 << 16) - 1) & ((cap_mask << 16) | 0xFFFF) 714 | } else { 715 | // otherwise we just increment read_done_count 716 | counters.0.wrapping_add(1 << 8) 717 | }); 718 | 719 | 720 | match self.counters.compare_exchange_weak(counters.0, new_counters.0, Ordering::Release, Ordering::Relaxed) { 721 | Ok(_) => return, 722 | Err(updated) => counters = Counters(updated) 723 | }; 724 | } 725 | } 726 | } 727 | 728 | 729 | #[cfg(test)] 730 | mod tests { 731 | use serial_test::serial; 732 | 733 | #[test] 734 | pub fn test_increments() { 735 | let read_counter_store = super::CounterStore::new(); 736 | let write_counter_store = super::CounterStore::new(); 737 | // 16 elements 738 | let cap = 16; 739 | let cap_mask = 0xf; 740 | 741 | 742 | let error_condition = |_, _| { false }; 743 | for i in 0..8 { 744 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 745 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 746 | assert_eq!((0, (0 + i * 3) % 16, 0, 0, (0 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 747 | 748 | write_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 749 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 750 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 751 | assert_eq!((0, (0 + i * 3) % 16, 0, 0, (0 + i * 3) % 16, 1, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 752 | 753 | write_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 754 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 755 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 756 | assert_eq!((0, (0 + i * 3) % 16, 0, 0, (0 + i * 3) % 16, 2, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 757 | write_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 758 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 759 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 760 | assert_eq!((0, (0 + i * 3) % 16, 0, 0, (0 + i * 3) % 16, 3, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 761 | write_counter_store.increment_done(write_counters, (0 + i * 3) % 16, cap_mask); 762 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 763 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 764 | assert_eq!((1, (0 + i * 3) % 16, 0, 0, (1 + i * 3) % 16, 2, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 765 | write_counter_store.increment_done(write_counters, (2 + i * 3) % 16, cap_mask); 766 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 767 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 768 | assert_eq!((1, (0 + i * 3) % 16, 0, 0, (1 + i * 3) % 16, 2, 1), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 769 | write_counter_store.increment_done(write_counters, (1 + i * 3) % 16, cap_mask); 770 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 771 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 772 | assert_eq!((3, (0 + i * 3) % 16, 0, 0, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 773 | 774 | read_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 775 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 776 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 777 | assert_eq!((2, (0 + i * 3) % 16, 1, 0, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 778 | read_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 779 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 780 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 781 | assert_eq!((1, (0 + i * 3) % 16, 2, 0, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 782 | read_counter_store.increment_in_progress(error_condition, cap_mask).expect(".."); 783 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 784 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 785 | assert_eq!((0, (0 + i * 3) % 16, 3, 0, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 786 | read_counter_store.increment_done(read_counters, (1 + i * 3) % 16, cap_mask); 787 | 788 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 789 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 790 | assert_eq!((0, (0 + i * 3) % 16, 3, 1, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 791 | read_counter_store.increment_done(read_counters, (0 + i * 3) % 16, cap_mask); 792 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 793 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 794 | assert_eq!((0, (1 + i * 3) % 16, 2, 1, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 795 | read_counter_store.increment_done(read_counters, (2 + i * 3) % 16, cap_mask); 796 | let read_counters = read_counter_store.load(super::Ordering::Relaxed); 797 | let write_counters = write_counter_store.load(super::Ordering::Relaxed); 798 | assert_eq!((0, (3 + i * 3) % 16, 0, 0, (3 + i * 3) % 16, 0, 0), (super::counter_len(read_counters, write_counters, cap), read_counters.index(), read_counters.in_process_count(), read_counters.done_count(), write_counters.index(), write_counters.in_process_count(), write_counters.done_count())); 799 | } 800 | } 801 | 802 | #[test] 803 | pub fn test_pushpop() { 804 | let ring = super::AtomicRingBuffer::with_capacity(900); 805 | assert_eq!(1024, ring.capacity()); 806 | assert_eq!(None, ring.try_pop()); 807 | ring.push_overwrite(1); 808 | assert_eq!(Some(1), ring.try_pop()); 809 | assert_eq!(None, ring.try_pop()); 810 | 811 | // Test push in an empty buffer 812 | for i in 0..5000 { 813 | ring.push_overwrite(i); 814 | assert_eq!(Some(i), ring.try_pop()); 815 | assert_eq!(None, ring.try_pop()); 816 | } 817 | 818 | 819 | // Test push in a full buffer 820 | for i in 0..199999 { 821 | ring.push_overwrite(i); 822 | } 823 | assert_eq!(ring.capacity(), ring.len() + 1); 824 | assert_eq!(199999 - (ring.capacity() - 1), ring.try_pop().unwrap()); 825 | assert_eq!(Ok(()), ring.try_push(199999)); 826 | 827 | for i in 200000 - (ring.capacity() - 1)..200000 { 828 | assert_eq!(i, ring.try_pop().unwrap()); 829 | } 830 | 831 | // Test push in an almost full buffer 832 | for i in 0..1023 { 833 | ring.try_push(i).expect("push") 834 | } 835 | assert_eq!(1024, ring.capacity()); 836 | assert_eq!(1023, ring.len()); 837 | for i in 0..1023 { 838 | assert_eq!(ring.try_pop(), Some(i)); 839 | ring.try_push(i).expect("push") 840 | } 841 | } 842 | 843 | #[test] 844 | pub fn test_pushpop_large() { 845 | let ring = super::AtomicRingBuffer::with_capacity(65535); 846 | 847 | 848 | assert_eq!(None, ring.try_pop()); 849 | ring.push_overwrite(1); 850 | assert_eq!(Some(1), ring.try_pop()); 851 | 852 | for i in 0..200000 { 853 | ring.push_overwrite(i); 854 | assert_eq!(Some(i), ring.try_pop()); 855 | } 856 | 857 | 858 | for i in 0..200000 { 859 | ring.push_overwrite(i); 860 | } 861 | assert_eq!(ring.capacity(), ring.len() + 1); 862 | 863 | for i in 200000 - (ring.capacity() - 1)..200000 { 864 | assert_eq!(i, ring.try_pop().unwrap()); 865 | } 866 | } 867 | 868 | #[test] 869 | pub fn test_pushpop_large2() { 870 | let ring = super::AtomicRingBuffer::with_capacity(65536); 871 | 872 | 873 | assert_eq!(None, ring.try_pop()); 874 | ring.push_overwrite(1); 875 | assert_eq!(Some(1), ring.try_pop()); 876 | 877 | for i in 0..200000 { 878 | ring.push_overwrite(i); 879 | assert_eq!(Some(i), ring.try_pop()); 880 | } 881 | 882 | 883 | for i in 0..200000 { 884 | ring.push_overwrite(i); 885 | } 886 | assert_eq!(ring.capacity(), ring.len() + 1); 887 | 888 | for i in 200000 - (ring.capacity() - 1)..200000 { 889 | assert_eq!(i, ring.try_pop().unwrap()); 890 | } 891 | } 892 | 893 | #[test] 894 | pub fn test_pushpop_large2_zerotype() { 895 | #[derive(Eq, PartialEq, Debug)] 896 | struct ZeroType {} 897 | 898 | let ring = super::AtomicRingBuffer::with_capacity(65536); 899 | 900 | assert_eq!(0, ring.memory_usage()); 901 | 902 | assert_eq!(None, ring.try_pop()); 903 | ring.push_overwrite(ZeroType {}); 904 | assert_eq!(Some(ZeroType {}), ring.try_pop()); 905 | 906 | for _i in 0..200000 { 907 | ring.push_overwrite(ZeroType {}); 908 | assert_eq!(Some(ZeroType {}), ring.try_pop()); 909 | } 910 | 911 | 912 | for _i in 0..200000 { 913 | ring.push_overwrite(ZeroType {}); 914 | } 915 | assert_eq!(ring.capacity(), ring.len() + 1); 916 | 917 | for _i in 200000 - (ring.capacity() - 1)..200000 { 918 | assert_eq!(ZeroType {}, ring.try_pop().unwrap()); 919 | } 920 | } 921 | 922 | 923 | #[test] 924 | pub fn test_threaded() { 925 | let cap = 65535; 926 | 927 | let buf: super::AtomicRingBuffer = super::AtomicRingBuffer::with_capacity(cap); 928 | for i in 0..cap { 929 | buf.try_push(i).expect("init"); 930 | } 931 | let arc = ::std::sync::Arc::new(buf); 932 | 933 | let mut handles = Vec::new(); 934 | let end = ::std::time::Instant::now() + ::std::time::Duration::from_millis(10000); 935 | for _thread_num in 0..100 { 936 | let buf = ::std::sync::Arc::clone(&arc); 937 | handles.push(::std::thread::spawn(move || { 938 | while ::std::time::Instant::now() < end { 939 | let a = pop_wait(&buf); 940 | let b = pop_wait(&buf); 941 | //buf.try_push(a).expect("push"); 942 | //buf.try_push(b).expect("push"); 943 | while let Err(_) = buf.try_push(a) {}; 944 | while let Err(_) = buf.try_push(b) {}; 945 | } 946 | })); 947 | } 948 | for (_idx, handle) in handles.into_iter().enumerate() { 949 | handle.join().expect("join"); 950 | } 951 | 952 | assert_eq!(arc.len(), cap); 953 | 954 | let mut expected: Vec = Vec::new(); 955 | let mut actual: Vec = Vec::new(); 956 | for i in 0..cap { 957 | expected.push(i); 958 | actual.push(arc.try_pop().expect("check")); 959 | } 960 | actual.sort_by(|&a, b| a.partial_cmp(b).unwrap()); 961 | assert_eq!(actual, expected); 962 | } 963 | 964 | #[test] 965 | pub fn test_push_overwrite() { 966 | // create an AtomicRingBuffer with capacity of 8 elements (next power of 2 from the given capacity) 967 | let ring = super::AtomicRingBuffer::with_capacity(3); 968 | 969 | // push_overwrite adds an element to the buffer, overwriting an older element 970 | 971 | ring.push_overwrite(10); 972 | assert_eq!(Some(10), ring.try_pop()); 973 | assert_eq!(None, ring.try_pop()); 974 | 975 | for i in 0..3 { 976 | assert_eq!(Ok(()), ring.try_push(i)); 977 | } 978 | assert_eq!(3, ring.len()); 979 | 980 | ring.push_overwrite(3); 981 | 982 | assert_eq!(Some(1), ring.try_pop()); 983 | assert_eq!(Some(2), ring.try_pop()); 984 | assert_eq!(Some(3), ring.try_pop()); 985 | } 986 | 987 | static DROP_COUNT: ::std::sync::atomic::AtomicUsize = ::std::sync::atomic::AtomicUsize::new(0); 988 | 989 | #[allow(dead_code)] 990 | #[derive(Debug, Default)] 991 | struct TestType { 992 | some: usize, 993 | } 994 | 995 | 996 | impl Drop for TestType { 997 | fn drop(&mut self) { 998 | DROP_COUNT.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed); 999 | } 1000 | } 1001 | 1002 | #[test] 1003 | #[serial] 1004 | pub fn test_dropcount() { 1005 | DROP_COUNT.store(0, ::std::sync::atomic::Ordering::Relaxed); 1006 | { 1007 | let buf: super::AtomicRingBuffer = super::AtomicRingBuffer::with_capacity(1024); 1008 | buf.try_push(TestType { some: 0 }).expect("push"); 1009 | buf.try_push(TestType { some: 0 }).expect("push"); 1010 | 1011 | assert_eq!(0, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1012 | buf.try_pop(); 1013 | assert_eq!(1, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1014 | } 1015 | assert_eq!(2, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1016 | } 1017 | 1018 | #[test] 1019 | #[serial] 1020 | pub fn test_inline_dropcount() { 1021 | DROP_COUNT.store(0, ::std::sync::atomic::Ordering::Relaxed); 1022 | { 1023 | let buf: super::AtomicRingBuffer = super::AtomicRingBuffer::with_capacity(1024); 1024 | buf.try_write(|w| { w.some = 0 }).expect("push"); 1025 | buf.try_write(|w| { w.some = 0 }).expect("push"); 1026 | 1027 | assert_eq!(0, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1028 | buf.try_read(|_| {}); 1029 | assert_eq!(1, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1030 | } 1031 | assert_eq!(2, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1032 | } 1033 | 1034 | #[test] 1035 | #[serial] 1036 | pub fn test_unsafe_dropcount() { 1037 | DROP_COUNT.store(0, ::std::sync::atomic::Ordering::Relaxed); 1038 | { 1039 | let buf: super::AtomicRingBuffer = super::AtomicRingBuffer::with_capacity(1024); 1040 | buf.try_unsafe_write(|w| unsafe { ::std::ptr::write(w, TestType { some: 0 }) }).expect("push"); 1041 | buf.try_unsafe_write(|w| unsafe { ::std::ptr::write(w, TestType { some: 0 }) }).expect("push"); 1042 | 1043 | assert_eq!(0, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1044 | buf.try_read(|_| {}); 1045 | assert_eq!(1, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1046 | } 1047 | assert_eq!(2, DROP_COUNT.load(::std::sync::atomic::Ordering::Relaxed)); 1048 | } 1049 | 1050 | fn pop_wait(buf: &::std::sync::Arc>) -> usize { 1051 | loop { 1052 | match buf.try_pop() { 1053 | None => continue, 1054 | Some(v) => return v, 1055 | } 1056 | } 1057 | } 1058 | } 1059 | 1060 | --------------------------------------------------------------------------------