├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── Readme.md ├── benches ├── mpmc_mt_read_write_bench.rs ├── mt_read_bench.rs ├── read_bench.rs ├── read_empty_bench.rs ├── spmc_mt_read_write_bench.rs └── write_bench.rs ├── doc ├── images │ ├── event_queue.png │ ├── st_read_bench.svg │ ├── st_write_bench.svg │ └── tracked_chunks.png ├── mpmc_benchmarks.md ├── principle-of-operation.md └── tests.md ├── src ├── chunk_state.rs ├── cursor.rs ├── dynamic_array │ ├── mod.rs │ └── test.rs ├── dynamic_chunk.rs ├── event_queue.rs ├── event_queue │ └── test.rs ├── event_reader.rs ├── lib.rs ├── mpmc │ ├── event_queue.rs │ ├── event_reader.rs │ └── mod.rs ├── spmc │ ├── event_queue.rs │ ├── event_reader.rs │ └── mod.rs ├── sync │ ├── build.rs │ ├── dev.rs │ └── mod.rs ├── tests │ ├── common.rs │ ├── loom_test.rs │ ├── mod.rs │ ├── mpmc.rs │ ├── spmc.rs │ └── utils.rs └── utils.rs └── tests ├── compile-tests.rs └── compile-tests └── compile-fail └── iter_lifetimes.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ master, dev ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Build 18 | run: RUSTFLAGS="--deny warnings" cargo build 19 | 20 | build-all-features: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v2 24 | - name: Build with all features 25 | run: RUSTFLAGS="--deny warnings" cargo build --all-features 26 | 27 | tests: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - name: Run tests 32 | run: cargo test --all-features 33 | 34 | miri: 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v2 38 | - uses: actions-rs/toolchain@v1 39 | with: 40 | toolchain: nightly 41 | components: miri 42 | - name: Miri tests 43 | run: cargo +nightly miri test 44 | 45 | benches: 46 | runs-on: ubuntu-latest 47 | steps: 48 | - uses: actions/checkout@v2 49 | - name: Build benches 50 | run: cargo build --benches 51 | 52 | doc: 53 | runs-on: ubuntu-latest 54 | steps: 55 | - uses: actions/checkout@v2 56 | - name: Build doc 57 | run: RUSTFLAGS="--deny warnings" cargo doc --lib 58 | 59 | loom: 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v2 63 | - name: Run loom tests 64 | run: RUSTFLAGS="--cfg loom" cargo test --lib tests::loom_test --release -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | /.idea 4 | /.vscode 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 0.4.2 4 | ### Changed 5 | - clear/truncate_front now dispose chunks not occupied by readers immediately! Which, at least partially, solves "emergency cleanup" problem. 6 | Now you don't have to have access to all readers! 7 | - Subscribe/unsubscribe now O(1). 8 | - EventQueue::total_capacity now O(1). 9 | 10 | ## 0.4.1 11 | ### Added 12 | - miri support 13 | ### Changed 14 | - Using spin mutex everywhere. Write performance improved x2 in non-heavy concurrent cases. 15 | 16 | ## 0.4.0 17 | ### Added 18 | - `spmc` version 19 | ### Changed 20 | - `EventQueue::subscribe` -> `EventReader::new` 21 | 22 | ## 0.3.1 23 | ### Changed 24 | - Improved read performance on long runs. 25 | 26 | ## 0.3.0 27 | ### Security 28 | - `EventReader::iter` now return `LendingIterator`. This prevent references from outlive iterator. 29 | Since chunk with items, pointed by references returned by iterator, may die after iterator drop, 30 | it is not safe to keep them outside iterator. 31 | ### Changed 32 | - AUTO_CLEANUP -> CLEANUP{ ON_CHUNK_READ / ON_NEW_CHUNK / NEVER } 33 | 34 | ## 0.2.0 35 | ### Changed 36 | - Chunks now have dynamic size. Chunks grow in size, 37 | in order to find optimal chunk size. 38 | In ideal, we will work with just 2 same-sized chunks. 39 | ### Added 40 | - `double_buffering` feature. 41 | 42 | ## 0.1.0 43 | ### Added 44 | - Initial working implementation with fix-size chunks. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rc_event_queue" 3 | authors = ["Andrey Diduh "] 4 | license = "MIT OR Apache-2.0" 5 | version = "0.4.3" 6 | edition = "2018" 7 | description = "VecDeque-like fast, unbounded, FIFO, concurent read-lock-free message queue." 8 | repository = "https://github.com/tower120/rc_event_queue" 9 | keywords = ["lock-free", "queue", "mpmc", "concurent", "message-queue"] 10 | categories = ["data-structures", "concurrency"] 11 | exclude = ["doc"] 12 | 13 | [features] 14 | double_buffering = [] 15 | 16 | [dependencies] 17 | #parking_lot = "0.11.2" 18 | spin = {version ="0.9.2", features = ["std"] } 19 | lock_api = "0.4.5" 20 | 21 | [dev-dependencies] 22 | itertools = "0.10.1" 23 | criterion = "0.3.3" 24 | rand = "0.8.4" 25 | compiletest_rs = { version = "0.7" } 26 | 27 | [target.'cfg(loom)'.dev-dependencies] 28 | loom = "0.5.2" 29 | 30 | [[bench]] 31 | name = "read_bench" 32 | harness = false 33 | 34 | [[bench]] 35 | name = "read_empty_bench" 36 | harness = false 37 | 38 | [[bench]] 39 | name = "write_bench" 40 | harness = false 41 | 42 | [[bench]] 43 | name = "mt_read_bench" 44 | harness = false 45 | 46 | [[bench]] 47 | name = "mpmc_mt_read_write_bench" 48 | harness = false 49 | 50 | [[bench]] 51 | name = "spmc_mt_read_write_bench" 52 | harness = false -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Andrey Diduh 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Andrey Diduh 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | [![crates.io](https://img.shields.io/crates/v/rc_event_queue.svg)](https://crates.io/crates/rc_event_queue) 2 | [![Docs](https://docs.rs/rc_event_queue/badge.svg)](https://docs.rs/rc_event_queue) 3 | [![CI](https://github.com/tower120/rc_event_queue/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/tower120/rc_event_queue/actions/workflows/ci.yml) 4 | 5 | > [!WARNING] 6 | > Project is deprecated in favor of [chute](https://github.com/tower120/chute/). 7 | > 8 | > [Chute](https://github.com/tower120/chute/) is a continuation of this project, featuring truly lock-free MPMC 9 | > writers that are superlinearly faster in highly concurrent scenarios. 10 | 11 | ## Reader counted event queue 12 | 13 | Fast, concurrent FIFO event queue _(or message queue)_. Multiple consumers receive every message. 14 | 15 | - mpmc _(multi-producer multi-consumer)_ - lock-free read, locked write. 16 | - spmc _(single-producer multi-consumer)_ - lock-free read, lock-free write. 17 | 18 | Write operations never block read operations. Performance consumer oriented. Mostly contiguous memory layout. 19 | Memory consumption does not grow with readers number. 20 | 21 | ### Performance 22 | 23 | Have VERY low CPU + memory overhead. Most of the time reader just do 1 atomic load per `iter()` call. That's all! 24 | 25 | #### Single-threaded. 26 | 27 | Read - close to `VecDeque`! Write: 28 | - `mpmc` - `push` 2x slower then `VecDeque`. `extend` with at least 4 items, close to `VecDeque`. 29 | - `spmc` - equal to `VecDeque`! 30 | 31 | #### Multi-threaded. 32 | 33 | Read - per thread performance degrades slowly, with each additional simultaneously reading thread. 34 | _(Also remember, since `rc_event_queue` is message queue, and each reader read ALL queue - 35 | adding more readers does not consume queue faster)_ 36 | 37 | Write - per thread performance degrades almost linearly, with each additional simultaneously writing thread. 38 | (Due to being locked). Not applicable to `spmc`. 39 | 40 | N.B. But if there is no heavy contention - performance very close to single-threaded case. 41 | 42 | [See mpmc benchmarks](doc/mpmc_benchmarks.md). 43 | 44 | ### Principle of operation 45 | 46 | See [doc/principle-of-operation.md](doc/principle-of-operation.md). 47 | 48 | Short version - `EventQueue` operates on the chunk basis. `EventQueue` does not touch `EventReader`s . `EventReader`s always 49 | "pull" from `EventQueue`. The only way `EventReader` interact with `EventQueue` - by increasing read counter 50 | when switching to next chunk during traverse. 51 | 52 | ### Usage 53 | 54 | [API doc](https://docs.rs/rc_event_queue/) 55 | 56 | ```rust 57 | use rc_event_queue::prelude::*; 58 | use rc_event_queue::mpmc::{EventQueue, EventReader}; 59 | 60 | let event = EventQueue::::new(); 61 | let mut reader1 = EventReader::new(event); 62 | let mut reader2 = EventReader::new(event); 63 | 64 | event.push(1); 65 | event.push(10); 66 | event.push(100); 67 | event.push(1000); 68 | 69 | fn sum (mut iter: impl LendingIterator) -> usize { 70 | let mut sum = 0; 71 | while let Some(item) = iter.next() { 72 | sum += item; 73 | } 74 | sum 75 | } 76 | 77 | assert!(sum(reader1.iter()) == 1111); 78 | assert!(sum(reader1.iter()) == 0); 79 | assert!(sum(reader2.iter()) == 1111); 80 | assert!(sum(reader2.iter()) == 0); 81 | 82 | event.extend(0..10); 83 | assert!(sum(reader1.iter()) == 55); 84 | assert!(sum(reader2.iter()) == 55); 85 | ``` 86 | 87 | clear: 88 | ```rust 89 | event.push(1); 90 | event.push(10); 91 | event.clear(); 92 | event.push(100); 93 | event.push(1000); 94 | 95 | assert!(sum(reader1.iter()) == 1100); 96 | assert!(sum(reader2.iter()) == 1100); 97 | ``` 98 | 99 | `clear`/`truncate_front` have peculiarities - chunks occupied by readers, will not be freed immediately. 100 | 101 | ### Emergency cut 102 | 103 | If any of the readers did not read for a long time - it can retain queue from cleanup. 104 | This means that queue capacity will grow. On long runs with unpredictable systems, you may want to periodically check `total_capacity`, 105 | and if it grows too much - you may want to force-cut/clear it. 106 | 107 | ```rust 108 | if event.total_capacity() > 100000{ 109 | // This will not free chunks occupied by readers, but will free the rest. 110 | // This should be enough, to prevent memory leak, in case if some readers 111 | // stop consume unexpectedly. 112 | event.truncate_front(1000); // leave some of the latest messages to read 113 | 114 | // If you set to Settings::MAX_CHUNK_SIZE to high value, 115 | // this will reduce chunk size. 116 | event.change_chunk_size(2048); 117 | 118 | // If you DO have access to all readers (you probably don't) - 119 | // this will move readers forward, and free the chunks occupied by readers. 120 | // Under normal conditions, this is not necessary, since readers will jump 121 | // forward to another chunk immediately on the next iter() call. 122 | for reader in readers{ 123 | reader.update_position(); 124 | // reader.iter(); // this have same effect as above 125 | } 126 | } 127 | ``` 128 | Even if some reader will stop read forever - you'll only lose/leak chunk directly occupied by reader. 129 | 130 | ### Optimisation 131 | 132 | #### CLEANUP 133 | 134 | Set `CLEANUP` to `Never` in `Settings`, in order to postpone chunks deallocations. 135 | 136 | ```rust 137 | use rc_event_reader::mpmc::{EventQueue, EventReader, Settings}; 138 | 139 | struct S{} impl Settings for S{ 140 | const MIN_CHUNK_SIZE: u32 = 4; 141 | const MAX_CHUNK_SIZE: u32 = 4096; 142 | const CLEANUP: CleanupMode = CleanupMode::Never; 143 | } 144 | 145 | let event = EventQueue::::new(); 146 | let mut reader = event.subscribe(); 147 | 148 | event.extend(0..10); 149 | sum(reader.iter()); // With CLEANUP != Never, this would cause chunk deallocation 150 | 151 | ... 152 | 153 | event.cleanup(); // Free used chunks 154 | ``` 155 | #### double_buffering 156 | 157 | Use `double_buffering` feature. This will reuse biggest freed chunk. When `EventQueue` reach its optimal size - chunks will be just swapped, 158 | without alloc/dealloc. 159 | 160 | ### Soundness 161 | 162 | `EventQueue` covered with tests. [Miri](https://github.com/rust-lang/miri) tests. [Loom](https://github.com/tokio-rs/loom) tests. See [doc/tests.md](doc/tests.md) 163 | -------------------------------------------------------------------------------- /benches/mpmc_mt_read_write_bench.rs: -------------------------------------------------------------------------------- 1 | use rc_event_queue::mpmc::{ EventQueue, EventReader, Settings}; 2 | use rc_event_queue::{CleanupMode, LendingIterator}; 3 | use criterion::{Criterion, black_box, criterion_main, criterion_group, BenchmarkId}; 4 | use std::time::{Duration, Instant}; 5 | use std::thread; 6 | use std::sync::Arc; 7 | use std::sync::atomic::{AtomicBool, Ordering}; 8 | use std::pin::Pin; 9 | 10 | const QUEUE_SIZE: usize = 100000; 11 | 12 | struct S{} impl Settings for S{ 13 | const MIN_CHUNK_SIZE: u32 = 512; 14 | const MAX_CHUNK_SIZE: u32 = 512; 15 | const CLEANUP: CleanupMode = CleanupMode::Never; 16 | } 17 | type Event = EventQueue; 18 | type ArcEvent = Pin>; 19 | 20 | 21 | /// We test high-contention read-write case. 22 | fn bench_event_read_write(iters: u64, writer_fn: F) -> Duration 23 | where F: Fn(&ArcEvent, usize, usize) -> () + Send + 'static + Clone 24 | { 25 | let mut total = Duration::ZERO; 26 | 27 | let writers_thread_count = 2; 28 | let readers_thread_count = 4; 29 | 30 | 31 | for _ in 0..iters { 32 | let event = Event::new(); 33 | 34 | let mut readers = Vec::new(); 35 | for _ in 0..readers_thread_count{ 36 | readers.push(EventReader::new(&event)); 37 | } 38 | 39 | // write 40 | let mut writer_threads = Vec::new(); 41 | let writer_chunk = QUEUE_SIZE / writers_thread_count; 42 | for thread_id in 0..writers_thread_count{ 43 | let event = event.clone(); 44 | let writer_fn = writer_fn.clone(); 45 | let thread = Box::new(thread::spawn(move || { 46 | let from = thread_id*writer_chunk; 47 | let to = from+writer_chunk; 48 | writer_fn(&event, from, to); 49 | })); 50 | writer_threads.push(thread); 51 | } 52 | 53 | // read 54 | let readers_stop = Arc::new(AtomicBool::new(false)); 55 | let mut reader_threads = Vec::new(); 56 | for mut reader in readers{ 57 | let readers_stop = readers_stop.clone(); 58 | let thread = Box::new(thread::spawn(move || { 59 | let mut local_sum0: usize = 0; 60 | 61 | // do-while ensures that reader will try another round after stop, 62 | // to consume leftovers. Since iter's end/sentinel acquired at iter construction. 63 | loop{ 64 | let stop = readers_stop.load(Ordering::Acquire); 65 | let mut iter = reader.iter(); 66 | while let Some(i) = iter.next(){ 67 | local_sum0 += i; 68 | } 69 | if stop{ break; } 70 | } 71 | 72 | black_box(local_sum0); 73 | })); 74 | reader_threads.push(thread); 75 | } 76 | 77 | // await and measure 78 | let start = Instant::now(); 79 | 80 | for thread in writer_threads { 81 | thread.join().unwrap(); 82 | } 83 | readers_stop.store(true, Ordering::Release); 84 | for thread in reader_threads { 85 | thread.join().unwrap(); 86 | } 87 | 88 | total += start.elapsed(); 89 | } 90 | total 91 | } 92 | 93 | 94 | pub fn mt_read_write_event_benchmark(c: &mut Criterion) { 95 | let mut group = c.benchmark_group("mpmc mt read write"); 96 | for session_size in [4, 8, 16, 32, 128, 512 as usize]{ 97 | group.bench_with_input( 98 | BenchmarkId::new("mpmc::EventQueue extend", session_size), 99 | &session_size, 100 | |b, input| b.iter_custom(|iters| { 101 | let session_len = *input; 102 | let f = move |event: &ArcEvent, from: usize, to: usize|{ 103 | write_extend(session_len, event, from, to); 104 | }; 105 | bench_event_read_write(iters, f) 106 | })); 107 | } 108 | 109 | #[inline(always)] 110 | fn write_push(event: &ArcEvent, from: usize, to: usize){ 111 | for i in from..to{ 112 | event.push(black_box(i)); 113 | } 114 | } 115 | #[inline(always)] 116 | fn write_extend(session_len: usize, event: &ArcEvent, from: usize, to: usize){ 117 | let mut i = from; 118 | loop{ 119 | let session_from = i; 120 | let session_to = session_from + session_len; 121 | if session_to>=to{ 122 | return; 123 | } 124 | 125 | event.extend(black_box(session_from..session_to)); 126 | 127 | i = session_to; 128 | } 129 | } 130 | 131 | group.bench_function("mpmc::EventQueue push", |b|b.iter_custom(|iters| bench_event_read_write(iters, write_push))); 132 | } 133 | 134 | criterion_group!(benches, mt_read_write_event_benchmark); 135 | criterion_main!(benches); -------------------------------------------------------------------------------- /benches/mt_read_bench.rs: -------------------------------------------------------------------------------- 1 | //! mpmc and spmc are the same. 2 | //! 3 | //! Chunk size dependence test. 4 | 5 | use criterion::{Criterion, criterion_group, criterion_main, BenchmarkId, black_box, BenchmarkGroup}; 6 | use rc_event_queue::mpmc::{EventQueue, EventReader, Settings}; 7 | use rc_event_queue::prelude::*; 8 | use std::thread; 9 | use std::time::{Duration, Instant}; 10 | use criterion::measurement::WallTime; 11 | 12 | const QUEUE_SIZE: usize = 100000; 13 | 14 | fn read_bench( 15 | readers_start_offset_step: usize, 16 | read_session_size: usize, 17 | readers_thread_count: usize 18 | ) -> Duration { 19 | let event = EventQueue::::new(); 20 | 21 | let mut readers = Vec::new(); 22 | let mut queue_n = 0; 23 | for _ in 0..readers_thread_count { 24 | event.extend(queue_n.. queue_n+ readers_start_offset_step); 25 | readers.push(EventReader::new(&event)); 26 | queue_n += readers_start_offset_step; 27 | } 28 | event.extend(queue_n..QUEUE_SIZE); 29 | 30 | // read 31 | let mut threads = Vec::new(); 32 | for mut reader in readers{ 33 | let thread = Box::new(thread::spawn(move || { 34 | // simulate "read sessions" 35 | 'outer: loop{ 36 | let mut iter = reader.iter(); 37 | for _ in 0..read_session_size { 38 | let next = iter.next(); 39 | match next{ 40 | None => {break 'outer;} 41 | Some(i) => {black_box(i);} 42 | } 43 | } 44 | } 45 | })); 46 | threads.push(thread); 47 | } 48 | 49 | let start = Instant::now(); 50 | for thread in threads{ 51 | thread.join().unwrap(); 52 | } 53 | start.elapsed() 54 | } 55 | 56 | pub fn mt_read_event_benchmark(c: &mut Criterion) { 57 | fn bench(group: &mut BenchmarkGroup, id: &str, mut f: impl FnMut() -> Duration) { 58 | group.bench_function(id, |b| b.iter_custom(|iters| { 59 | let mut total = Duration::ZERO; 60 | for _ in 0..iters { 61 | total += f(); 62 | } 63 | total 64 | })); 65 | } 66 | 67 | let mut test_group = |name: &str, readers_start_offset_step: usize, read_session_size: usize, threads_count: usize|{ 68 | let mut group = c.benchmark_group(name); 69 | 70 | bench(&mut group, "chunk:32", ||{ 71 | struct S{} impl Settings for S{ 72 | const MIN_CHUNK_SIZE: u32 = 32; 73 | const MAX_CHUNK_SIZE: u32 = 32; 74 | const CLEANUP: CleanupMode = CleanupMode::Never; 75 | } 76 | read_bench::(readers_start_offset_step, read_session_size, threads_count) 77 | }); 78 | bench(&mut group, "chunk:128", ||{ 79 | struct S{} impl Settings for S{ 80 | const MIN_CHUNK_SIZE: u32 = 128; 81 | const MAX_CHUNK_SIZE: u32 = 128; 82 | const CLEANUP: CleanupMode = CleanupMode::Never; 83 | } 84 | read_bench::(readers_start_offset_step, read_session_size, threads_count) 85 | }); 86 | bench(&mut group, "chunk:512", ||{ 87 | struct S{} impl Settings for S{ 88 | const MIN_CHUNK_SIZE: u32 = 512; 89 | const MAX_CHUNK_SIZE: u32 = 512; 90 | const CLEANUP: CleanupMode = CleanupMode::Never; 91 | } 92 | read_bench::(readers_start_offset_step, read_session_size, threads_count) 93 | }); 94 | bench(&mut group, "chunk:2048", ||{ 95 | struct S{} impl Settings for S{ 96 | const MIN_CHUNK_SIZE: u32 = 2048; 97 | const MAX_CHUNK_SIZE: u32 = 2048; 98 | const CLEANUP: CleanupMode = CleanupMode::Never; 99 | } 100 | read_bench::(readers_start_offset_step, read_session_size, threads_count) 101 | }); 102 | }; 103 | 104 | // thread count dependency bench 105 | test_group("mt read 1 thread", 0, 8096, 1); 106 | test_group("mt read 2 threads", 0, 8096, 2); 107 | test_group("mt read 4 threads", 0, 8096, 4); 108 | test_group("mt read 8 threads", 0, 8096, 8); 109 | 110 | // read session size dependency bench 111 | /* test_group(0, 8, 8); 112 | test_group(1000, 8, 8); 113 | test_group(0, 64, 8); 114 | test_group(1000, 64, 8); 115 | test_group(0, 8096, 8); 116 | test_group(1000, 8096, 8);*/ 117 | } 118 | 119 | criterion_group!(benches, mt_read_event_benchmark); 120 | criterion_main!(benches); -------------------------------------------------------------------------------- /benches/read_bench.rs: -------------------------------------------------------------------------------- 1 | //! mpmc and spmc are the same. 2 | //! 3 | use rc_event_queue::mpmc::{EventQueue, EventReader, Settings}; 4 | use rc_event_queue::prelude::*; 5 | use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId}; 6 | use std::time::{Instant, Duration}; 7 | use std::collections::VecDeque; 8 | 9 | const QUEUE_SIZE: usize = 100000; 10 | 11 | struct EventQueueSettings{} 12 | impl Settings for EventQueueSettings{ 13 | const MIN_CHUNK_SIZE: u32 = 512; 14 | const MAX_CHUNK_SIZE: u32 = 512; 15 | const CLEANUP: CleanupMode = CleanupMode::Never; 16 | } 17 | 18 | fn bench_event_reader(iters: u64, read_session_size: usize) -> Duration{ 19 | let mut total = Duration::ZERO; 20 | for _ in 0..iters { 21 | let event = EventQueue::::new(); 22 | let mut reader = EventReader::new(&event); 23 | for i in 0..QUEUE_SIZE { 24 | event.push(i); 25 | } 26 | 27 | let start = Instant::now(); 28 | 'outer: loop{ 29 | // simulate "read sessions" 30 | // Testing this, because constructing iterator _and switching chunk_ 31 | // is the only potentially "heavy" operations 32 | 33 | let mut iter = reader.iter(); 34 | for _ in 0..read_session_size { 35 | let next = iter.next(); 36 | match next{ 37 | None => {break 'outer;} 38 | Some(i) => {black_box(i);} 39 | } 40 | } 41 | } 42 | total += start.elapsed(); 43 | } 44 | total 45 | } 46 | 47 | fn bench_event_reader_whole(iters: u64) -> Duration{ 48 | let mut total = Duration::ZERO; 49 | for _ in 0..iters { 50 | let event = EventQueue::::new(); 51 | let mut reader = EventReader::new(&event); 52 | for i in 0..QUEUE_SIZE { 53 | event.push(i); 54 | } 55 | 56 | let start = Instant::now(); 57 | let mut iter = reader.iter(); 58 | while let Some(i) = iter.next(){ 59 | black_box(i); 60 | } 61 | total += start.elapsed(); 62 | } 63 | total 64 | } 65 | 66 | fn bench_vector_whole(iters: u64) -> Duration{ 67 | let mut total = Duration::ZERO; 68 | for _ in 0..iters { 69 | let mut vec = Vec::new(); 70 | for i in 0..QUEUE_SIZE { 71 | vec.push(i); 72 | } 73 | 74 | let start = Instant::now(); 75 | for i in vec.iter(){ 76 | black_box(i); 77 | } 78 | total += start.elapsed(); 79 | } 80 | total 81 | } 82 | 83 | fn bench_deque_whole(iters: u64) -> Duration{ 84 | let mut total = Duration::ZERO; 85 | for _ in 0..iters { 86 | let mut deque = VecDeque::new(); 87 | for i in 0..QUEUE_SIZE { 88 | deque.push_back(i); 89 | } 90 | 91 | let start = Instant::now(); 92 | for i in deque.iter(){ 93 | black_box(i); 94 | } 95 | total += start.elapsed(); 96 | } 97 | total 98 | } 99 | 100 | 101 | 102 | pub fn read_event_benchmark(c: &mut Criterion) { 103 | let mut group = c.benchmark_group("Read"); 104 | for read_session_size in [4, 8, 16, 32, 128, 512]{ 105 | group.bench_with_input( 106 | BenchmarkId::new("EventReader", read_session_size), 107 | &read_session_size, 108 | |b, input| b.iter_custom(|iters| { bench_event_reader(iters, *input) })); 109 | } 110 | group.bench_function("EventReader/Whole", |b|b.iter_custom(|iters| bench_event_reader_whole(iters))); 111 | group.bench_function("Vec", |b|b.iter_custom(|iters| bench_vector_whole(iters))); 112 | group.bench_function("Deque", |b|b.iter_custom(|iters| bench_deque_whole(iters))); 113 | } 114 | 115 | criterion_group!(benches, read_event_benchmark); 116 | criterion_main!(benches); -------------------------------------------------------------------------------- /benches/read_empty_bench.rs: -------------------------------------------------------------------------------- 1 | //! This is special benchmark, to measure empty queue iteration overhead. 2 | //! mpmc and spmc use the same code for readers. 3 | 4 | use rc_event_queue::mpmc::{EventQueue, EventReader, Settings}; 5 | use rc_event_queue::prelude::*; 6 | use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId}; 7 | use std::time::{Instant, Duration}; 8 | use std::collections::VecDeque; 9 | 10 | struct EventQueueSettings{} 11 | impl Settings for EventQueueSettings{ 12 | const MIN_CHUNK_SIZE: u32 = 512; 13 | const MAX_CHUNK_SIZE: u32 = 512; 14 | const CLEANUP: CleanupMode = CleanupMode::Never; 15 | } 16 | 17 | fn bench_event_reader(iters: u64) -> Duration{ 18 | let mut total = Duration::ZERO; 19 | for _ in 0..iters { 20 | let event = EventQueue::::new(); 21 | let mut reader = EventReader::new(&event); 22 | let start = Instant::now(); 23 | let mut iter = reader.iter(); 24 | while let Some(i) = iter.next(){ 25 | black_box(i); 26 | } 27 | total += start.elapsed(); 28 | } 29 | total 30 | } 31 | 32 | fn bench_vector(iters: u64) -> Duration{ 33 | let mut total = Duration::ZERO; 34 | for _ in 0..iters { 35 | let vec = Vec::::new(); 36 | 37 | let start = Instant::now(); 38 | for i in vec.iter(){ 39 | black_box(i); 40 | } 41 | total += start.elapsed(); 42 | } 43 | total 44 | } 45 | 46 | fn bench_deque(iters: u64) -> Duration{ 47 | let mut total = Duration::ZERO; 48 | for _ in 0..iters { 49 | let deque = VecDeque::::new(); 50 | 51 | let start = Instant::now(); 52 | for i in deque.iter(){ 53 | black_box(i); 54 | } 55 | total += start.elapsed(); 56 | } 57 | total 58 | } 59 | 60 | 61 | 62 | pub fn read_empty_event_benchmark(c: &mut Criterion) { 63 | let mut group = c.benchmark_group("Read empty"); 64 | group.bench_function("EventReader", |b|b.iter_custom(|iters| bench_event_reader(iters))); 65 | group.bench_function("Vec", |b|b.iter_custom(|iters| bench_vector(iters))); 66 | group.bench_function("Deque", |b|b.iter_custom(|iters| bench_deque(iters))); 67 | } 68 | 69 | criterion_group!(benches, read_empty_event_benchmark); 70 | criterion_main!(benches); -------------------------------------------------------------------------------- /benches/spmc_mt_read_write_bench.rs: -------------------------------------------------------------------------------- 1 | use rc_event_queue::spmc::{EventQueue, EventReader, Settings}; 2 | use criterion::{Criterion, black_box, criterion_main, criterion_group, BenchmarkId}; 3 | use std::time::{Duration, Instant}; 4 | use std::thread; 5 | use std::sync::Arc; 6 | use std::sync::atomic::{AtomicBool, Ordering}; 7 | use std::pin::Pin; 8 | use rc_event_queue::{CleanupMode, LendingIterator}; 9 | 10 | const QUEUE_SIZE: usize = 100000; 11 | 12 | struct S{} impl Settings for S{ 13 | const MIN_CHUNK_SIZE: u32 = 512; 14 | const MAX_CHUNK_SIZE: u32 = 512; 15 | const CLEANUP: CleanupMode = CleanupMode::Never; 16 | } 17 | type Event = EventQueue; 18 | 19 | 20 | /// We test high-contention read-write case. 21 | fn bench_event_read_write(iters: u64, writer_fn: F) -> Duration 22 | where F: Fn(&mut Event, usize, usize) -> () + Send + 'static + Clone 23 | { 24 | let mut total = Duration::ZERO; 25 | let readers_thread_count = 4; 26 | 27 | for _ in 0..iters { 28 | let mut event = Event::new(); 29 | 30 | let mut readers = Vec::new(); 31 | for _ in 0..readers_thread_count{ 32 | readers.push(EventReader::new(&mut event)); 33 | } 34 | 35 | // write 36 | let writer_thread = { 37 | let writer_fn = writer_fn.clone(); 38 | Box::new(thread::spawn(move || { 39 | writer_fn(&mut event, 0, QUEUE_SIZE); 40 | })) 41 | }; 42 | 43 | // read 44 | let readers_stop = Arc::new(AtomicBool::new(false)); 45 | let mut reader_threads = Vec::new(); 46 | for mut reader in readers{ 47 | let readers_stop = readers_stop.clone(); 48 | let thread = Box::new(thread::spawn(move || { 49 | let mut local_sum0: usize = 0; 50 | 51 | // do-while ensures that reader will try another round after stop, 52 | // to consume leftovers. Since iter's end/sentinel acquired at iter construction. 53 | loop{ 54 | let stop = readers_stop.load(Ordering::Acquire); 55 | let mut iter = reader.iter(); 56 | while let Some(i) = iter.next(){ 57 | local_sum0 += i; 58 | } 59 | if stop{ break; } 60 | } 61 | 62 | black_box(local_sum0); 63 | })); 64 | reader_threads.push(thread); 65 | } 66 | 67 | // await and measure 68 | let start = Instant::now(); 69 | 70 | writer_thread.join().unwrap(); 71 | readers_stop.store(true, Ordering::Release); 72 | for thread in reader_threads { 73 | thread.join().unwrap(); 74 | } 75 | 76 | total += start.elapsed(); 77 | } 78 | total 79 | } 80 | 81 | 82 | pub fn mt_read_write_event_benchmark(c: &mut Criterion) { 83 | let mut group = c.benchmark_group("spmc mt read write"); 84 | for session_size in [4, 8, 16, 32, 128, 512 as usize]{ 85 | group.bench_with_input( 86 | BenchmarkId::new("spmc::EventQueue extend", session_size), 87 | &session_size, 88 | |b, input| b.iter_custom(|iters| { 89 | let session_len = *input; 90 | let f = move |event: &mut Event, from: usize, to: usize|{ 91 | write_extend(session_len, event, from, to); 92 | }; 93 | bench_event_read_write(iters, f) 94 | })); 95 | } 96 | 97 | #[inline(always)] 98 | fn write_push(event: &mut Event, from: usize, to: usize){ 99 | for i in from..to{ 100 | event.push(black_box(i)); 101 | } 102 | } 103 | #[inline(always)] 104 | fn write_extend(session_len: usize, event: &mut Event, from: usize, to: usize){ 105 | let mut i = from; 106 | loop{ 107 | let session_from = i; 108 | let session_to = session_from + session_len; 109 | if session_to>=to{ 110 | return; 111 | } 112 | 113 | event.extend(black_box(session_from..session_to)); 114 | 115 | i = session_to; 116 | } 117 | } 118 | 119 | group.bench_function("spmc::EventQueue push", |b|b.iter_custom(|iters| bench_event_read_write(iters, write_push))); 120 | } 121 | 122 | criterion_group!(benches, mt_read_write_event_benchmark); 123 | criterion_main!(benches); -------------------------------------------------------------------------------- /benches/write_bench.rs: -------------------------------------------------------------------------------- 1 | use criterion::{Criterion, BenchmarkId, black_box, criterion_main, criterion_group}; 2 | use std::time::{Duration, Instant}; 3 | use std::collections::VecDeque; 4 | use rc_event_queue::{CleanupMode, mpmc, spmc}; 5 | 6 | const QUEUE_SIZE: usize = 100000; 7 | 8 | macro_rules! event_queue_bench { 9 | ($mod_name:ident, $event_type:ty) => { 10 | #[allow(unused_mut)] 11 | mod $mod_name{ 12 | use std::time::{Duration, Instant}; 13 | use criterion::black_box; 14 | use crate::QUEUE_SIZE; 15 | 16 | pub fn bench_event_extend_session(iters: u64, session_len: usize) -> Duration{ 17 | let mut total = Duration::ZERO; 18 | let sessions_count = QUEUE_SIZE / session_len; 19 | for _ in 0..iters { 20 | let mut event = <$event_type>::new(); 21 | let start = Instant::now(); 22 | 23 | for session_id in 0..sessions_count{ 24 | let from = black_box(session_id) * session_len; 25 | let to = from + session_len; 26 | event.extend(from..to); 27 | } 28 | 29 | total += start.elapsed(); 30 | } 31 | total 32 | } 33 | 34 | pub fn bench_event_extend(iters: u64) -> Duration{ 35 | let mut total = Duration::ZERO; 36 | for _ in 0..iters { 37 | let mut event = <$event_type>::new(); 38 | let start = Instant::now(); 39 | 40 | event.extend(black_box(0..QUEUE_SIZE)); 41 | 42 | total += start.elapsed(); 43 | } 44 | total 45 | } 46 | 47 | pub fn bench_event_push(iters: u64) -> Duration{ 48 | let mut total = Duration::ZERO; 49 | for _ in 0..iters { 50 | let mut event = <$event_type>::new(); 51 | let start = Instant::now(); 52 | 53 | for i in 0..QUEUE_SIZE { 54 | event.push(black_box(i)); 55 | } 56 | 57 | total += start.elapsed(); 58 | } 59 | total 60 | } 61 | } 62 | } 63 | } 64 | 65 | struct MPMCEventQueueSettings{} 66 | impl mpmc::Settings for MPMCEventQueueSettings{ 67 | const MIN_CHUNK_SIZE: u32 = 512; 68 | const MAX_CHUNK_SIZE: u32 = 512; 69 | const CLEANUP: CleanupMode = CleanupMode::Never; 70 | } 71 | event_queue_bench!(mpmc_bench, crate::mpmc::EventQueue); 72 | 73 | struct SPMCEventQueueSettings{} 74 | impl spmc::Settings for SPMCEventQueueSettings{ 75 | const MIN_CHUNK_SIZE: u32 = 512; 76 | const MAX_CHUNK_SIZE: u32 = 512; 77 | const CLEANUP: CleanupMode = CleanupMode::Never; 78 | } 79 | event_queue_bench!(spmc_bench, crate::spmc::EventQueue); 80 | 81 | fn bench_vector_push(iters: u64) -> Duration{ 82 | let mut total = Duration::ZERO; 83 | for _ in 0..iters { 84 | let mut vec = Vec::new(); 85 | let start = Instant::now(); 86 | 87 | for i in 0..QUEUE_SIZE { 88 | vec.push(black_box(i)); 89 | } 90 | 91 | total += start.elapsed(); 92 | } 93 | total 94 | } 95 | 96 | fn bench_vector_extend(iters: u64) -> Duration{ 97 | let mut total = Duration::ZERO; 98 | for _ in 0..iters { 99 | let mut vec = Vec::new(); 100 | let start = Instant::now(); 101 | 102 | vec.extend(black_box(0..QUEUE_SIZE)); 103 | 104 | total += start.elapsed(); 105 | } 106 | total 107 | } 108 | 109 | 110 | fn bench_deque_push(iters: u64) -> Duration{ 111 | let mut total = Duration::ZERO; 112 | for _ in 0..iters { 113 | let mut vec = VecDeque::new(); 114 | let start = Instant::now(); 115 | 116 | for i in 0..QUEUE_SIZE { 117 | vec.push_back(black_box(i)); 118 | } 119 | 120 | total += start.elapsed(); 121 | } 122 | total 123 | } 124 | 125 | fn bench_deque_extend(iters: u64) -> Duration{ 126 | let mut total = Duration::ZERO; 127 | for _ in 0..iters { 128 | let mut vec = VecDeque::new(); 129 | let start = Instant::now(); 130 | 131 | vec.extend(black_box(0..QUEUE_SIZE)); 132 | 133 | total += start.elapsed(); 134 | } 135 | total 136 | } 137 | 138 | 139 | pub fn write_event_benchmark(c: &mut Criterion) { 140 | let mut group = c.benchmark_group("Write"); 141 | // -------------------------- mpmc --------------------------------------- 142 | for session_size in [1, 4, 8, 16, 32, 128, 512]{ 143 | group.bench_with_input( 144 | BenchmarkId::new("mpmc::EventQueue::extend session", session_size), 145 | &session_size, 146 | |b, input| b.iter_custom(|iters| { mpmc_bench::bench_event_extend_session(iters, *input) })); 147 | } 148 | group.bench_function("mpmc::EventQueue::extend", |b|b.iter_custom(|iters| mpmc_bench::bench_event_extend(iters))); 149 | group.bench_function("mpmc::EventQueue::push", |b|b.iter_custom(|iters| mpmc_bench::bench_event_push(iters))); 150 | 151 | // -------------------------- spmc --------------------------------------- 152 | for session_size in [1, 4, 8, 16, 32, 128, 512]{ 153 | group.bench_with_input( 154 | BenchmarkId::new("spmc::EventQueue::extend session", session_size), 155 | &session_size, 156 | |b, input| b.iter_custom(|iters| { spmc_bench::bench_event_extend_session(iters, *input) })); 157 | } 158 | group.bench_function("spmc::EventQueue::extend", |b|b.iter_custom(|iters| spmc_bench::bench_event_extend(iters))); 159 | group.bench_function("spmc::EventQueue::push", |b|b.iter_custom(|iters| spmc_bench::bench_event_push(iters))); 160 | 161 | // -------------------------- std --------------------------------------- 162 | group.bench_function("Vec::push", |b|b.iter_custom(|iters| bench_vector_push(iters))); 163 | group.bench_function("Vec::extend", |b|b.iter_custom(|iters| bench_vector_extend(iters))); 164 | group.bench_function("Deque::push", |b|b.iter_custom(|iters| bench_deque_push(iters))); 165 | group.bench_function("Deque::extend", |b|b.iter_custom(|iters| bench_deque_extend(iters))); 166 | } 167 | 168 | criterion_group!(benches, write_event_benchmark); 169 | criterion_main!(benches); -------------------------------------------------------------------------------- /doc/images/event_queue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tower120/rc_event_queue/943d8e4dff0d5f0ab04ec3f39c9a6ed38b025111/doc/images/event_queue.png -------------------------------------------------------------------------------- /doc/images/tracked_chunks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tower120/rc_event_queue/943d8e4dff0d5f0ab04ec3f39c9a6ed38b025111/doc/images/tracked_chunks.png -------------------------------------------------------------------------------- /doc/mpmc_benchmarks.md: -------------------------------------------------------------------------------- 1 | ### Bench machine 2 | ``` 3 | i7-4771 (4C/8T; 32Kb I/D L1) 4 | DDR3-1666 5 | Windows 10 6 | rustc 1.5.5 stable 7 | ``` 8 | 9 | **!! Obsolete. Data for v0.4.0 !!** 10 | 11 | Data from `mpmc` benchmarks in `/benches`. 12 | 13 | ## Single thread read performance 14 | 15 | ![](images/st_read_bench.svg) 16 | 17 | As you can see, on the long session it lies between `Vec` and `VecDeque`. On very short 18 | sessions - it is x2 slower then `VecDeque`. 19 | 20 | "Read session size" - is the number of items that reader consume per each `.iter()` call. 21 | 22 | ## Single thread write performance 23 | 24 | ![](images/st_write_bench.svg) 25 | 26 | Write to `EventQueue` is **not** lockless. Hence, `EventQueue::push` is x4 times slower, 27 | then `Vec::push` (which is not bad already). To overcome that - `EventQueue` has bulk 28 | insert - `EventQueue::extend`. On long write sessions - it closes to `Vec::extend`. 29 | 30 | "Write session size" - is the number of items that reader push per each `.extend()` call. 31 | 32 | ## Thread count read-performance dependency 33 | 34 | Read performance drop non-linear, with linear threads count grow. At some point it becomes 35 | memory-bandwidth bound. 36 | 37 | ``` 38 | readers_start_offset_step=0; read_session_size=8096; threads_count=2/chunk:512 39 | time: [177.93 us 178.44 us 179.08 us] 40 | readers_start_offset_step=0; read_session_size=8096; threads_count=4/chunk:512 41 | time: [237.34 us 238.26 us 239.25 us] 42 | readers_start_offset_step=0; read_session_size=8096; threads_count=8/chunk:512 43 | time: [338.97 us 341.73 us 344.28 us] 44 | ``` 45 | 46 | ## Chunk size dependency 47 | 48 | The bigger the chunk - the better. After some point (4096 - on benched machine) 49 | performance benefits become marginal. 50 | 51 | _Reducing chunk size and spreading reader positions over queue, in order to make each reader 52 | read its own chunk - did not show performance benefits._ 53 | 54 | ## Read session size dependency 55 | 56 | Getting iterator from `EventReader` and crossing inter-chunk boundary (rare case) is the only potential 57 | source of overhead. So, the more you read from the same iterator - the better. -------------------------------------------------------------------------------- /doc/principle-of-operation.md: -------------------------------------------------------------------------------- 1 | # Principle of operation 2 | 3 | EventQueue is multi-producer, multi-consumer FIFO queue. 4 | Performance-wise it is biased to the readers side. All reads are lockless and very fast. 5 | Writes happens under lock, and does not block reads. 6 | 7 | Single-threaded read performance is between `Vec` and `VecDeque` for best-case scenario; 1.5-2x slower then `VecDeque` - for worse. 8 | 9 | Single-threaded write performance 2-3x slower then `VecDeque`. But writing with `EventQueue::extend` can give you `VecDeque`-like performance. 10 | 11 | Memory-wise there is only fixed overhead. Each reader is just kind a pointer. 12 | 13 | 14 | ## The main idea 15 | 16 | ![](images/event_queue.png) 17 | 18 | EventQueue's storage is a single-linked list of chunks. 19 | In order to read from it, you need to register a Reader (Reader is kinda consuming forward iterator). 20 | As soon as all readers process the chunk - it is safe to free it. 21 | Thus - we only need to track the number of readers that completely read the chunk. 22 | 23 | Chunk's `read_count` atomic +1 operation happens only when `Reader` cross inter-chunk boundary. And that's basically 24 | all atomic stores for reader. 25 | 26 | One important thing to remember: is that both - writers and readers - go in one direction, they can not "jump". 27 | This means that if reader are in chunk 2 - chunks 0 and 1 are read. 28 | 29 | ## EventQueue 30 | 31 | ```rust 32 | struct EventQueue{ 33 | list: Mutex>, // this is writer lock 34 | readers_count: usize, 35 | } 36 | ``` 37 | 38 | ```rust 39 | struct Chunk{ 40 | storage: [T; CAPACITY], 41 | len: AtomicUsize, 42 | read_count: AtomicUsize, 43 | next: AtomicPtr, 44 | event: &Event, 45 | } 46 | ``` 47 | Synchronization between EventQueue and Readers happens on `Chunk::len` atomic Release/Acquire. 48 | When writer `push`, it locks `list`, write to chunk, then atomically increase `Chunk::len` (Release). 49 | 50 | Reader on start of the read, atomically load `Chunk::len` (Acquire). This guarantees that all memory writes, that happened 51 | prior to `len` Release will become visible on current thread (in analogue with spin lock). 52 | 53 | ## Reader 54 | 55 | ```rust 56 | struct Reader{ 57 | chunk: *const Chunk, 58 | index: usize 59 | } 60 | ``` 61 | In order to read, `Reader` need: 62 | 1) Atomic load `chunk` len. 63 | 2) Read and increment `index` until the end of chunk reached. 64 | 3) If chunk len == chunk capacity, do atomic load `Chunk::next` and switch chunk. Else - stop. 65 | 4) If we jumped to the next chunk - `fetch_add` `Chunk::read_count`. If `Chunk::read_count` == `EventQueue::readers_count` do `cleanup`. 66 | 67 | As you can see, most of the time, it works like `slice`. And touch only chunk's memory. 68 | 69 | `EventQueue::clenup` traverse event chunks, and dispose those, with read_count == readers_count. So, as soon as we increase 70 | `Chunk::read_count`, there is a chance that chunk will be disposed. This means, that we have either return item copies, 71 | or postpone increasing `Chunk::read_count` until we finish read session. Currently, `event_chunk_rs` do second. 72 | 73 | `Reader::irer()` serves in role of read session. On construction, it reads `Chunk::len`, on destruction updates `Chunk::read_count`. 74 | 75 | ## Clear 76 | 77 | EventQueue does not track readers, and readers does not signal/lock on read start/end. 78 | 79 | Clearing - means pointing readers to the end of the queue. To achieve that, `Event`- 80 | on `clear()` - stores current queue end position (*chunk + index) in `Event::start_position`. 81 | `Reader`, on read session start, reads `start_position` from `Event`, and move it's position forward, if needed. 82 | 83 | We add following fields to `EventQueue`: 84 | ```rust 85 | struct EventQueue{ 86 | ... 87 | start_position_chunk: *const Chunk, 88 | start_position_index: usize, 89 | } 90 | ``` 91 | This technique has one side effect - **it does not actually free memory immediately**. Readers need to advance first. 92 | So if you want to truncate queue, due to memory limits, you need to touch all associated readers, after `clear()`. 93 | 94 | 95 | ### `start_position` optimisation 96 | 97 | We add notion of `start_position_epoch`. Each time `start_position` updated - `start_position_epoch` increased. 98 | `start_position_epoch` fuses with `Chunk::len` - this way we have only one atomic load for reader. _So technically, instead 99 | of storing `start_position_epoch` in EventQueue, we store it duplicates in all chunks. So, on each `clear` we have to update 100 | all chunks values. But it's ok - since chunks number are low, and clear is relatively rare._ 101 | 102 | So in the end: 103 | 104 | ```rust 105 | struct Chunk{ 106 | ... 107 | len_and_epoch: AtomicUsize 108 | } 109 | ``` 110 | Also, Reader need its own start_position_epoch: 111 | ```rust 112 | struct Reader{ 113 | ... 114 | start_position_epoch: u32 115 | } 116 | ``` 117 | 118 | Reader, with one atomic load, read both chunk len, and current start_point_epoch. 119 | If start_point_epoch does not match current, we update Reader's position and mark chunks in-between as read. 120 | 121 | ## Dynamic chunk size 122 | 123 | Chunk sizes have `[N, N, N*2, N*2, N*4, N*4, ...]` pattern (`[4,4,8,8,16,16,..]`). 124 | If we would simply increase size on each next chunk - we would simply grow infinitely. 125 | If we have only one chunk left, and previous chunk had the same size - we found ideal size for our queue. 126 | 127 | ## Сhunk recycling 128 | 129 | With `feature="double_buffering"` enabled, the biggest freed chunk will be stored for further reuse. 130 | 131 | ## Tracking readers. Out-of-order chunks disposal. 132 | 133 | ![](images/tracked_chunks.png) 134 | 135 | In order to be able to immediately dispose chunks on cleanup/truncate, we need to know which chunks 136 | are free from readers. To do that, we add "In" counter. Whenever "In" = "Out" - chunk is free of 137 | readers. 138 | 139 | On the EventQueue side: 140 | - Write lock prev.chunk_switch_mutex 141 | - If In == Out - it is safe to dispose chunk: 142 | - Change Next pointer to next chunk 143 | - free chunk 144 | - Release mutex 145 | 146 | _We lock only left-side chunk mutex (with In), because Reader cannot get Out+=1, without In+=1. 147 | In other words, it cannot get into chunk by omitting left-side lock and In+=1. And reader moves 148 | left to right only._ 149 | 150 | On the EventReader side, during chunk switch: 151 | - Read lock prev.chunk_switch_mutex 152 | - Read next chunk 153 | - Out+=1, Next.In+=1 154 | - Release mutex 155 | 156 | Queue remains lockless up to the clear/truncate call, due to read lock. 157 | 158 | ## Optimisation techniques 159 | 160 | _TODO: AUTO_CLEANUP=false_ 161 | 162 | _TODO: copy iter vs deferred read mark_ 163 | -------------------------------------------------------------------------------- /doc/tests.md: -------------------------------------------------------------------------------- 1 | ## Loom tests 2 | 3 | Take approx. 15 min 4 | 5 | Linux: 6 | 7 | ``` 8 | RUSTFLAGS="--cfg loom" cargo test --lib tests::loom_test --release 9 | ``` 10 | 11 | Windows (powershell): 12 | 13 | ``` 14 | $env:RUSTFLAGS="--cfg loom"; 15 | cargo test --lib tests::loom_test --release 16 | ``` 17 | 18 | 19 | -------------------------------------------------------------------------------- /src/chunk_state.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::AtomicU64; 2 | use crate::sync::Ordering; 3 | use crate::{StartPositionEpoch, utils}; 4 | 5 | pub struct AtomicPackedChunkState(AtomicU64); 6 | 7 | impl AtomicPackedChunkState{ 8 | #[inline(always)] 9 | pub fn new(packed_chunk_state: PackedChunkState) -> Self{ 10 | Self{0: AtomicU64::new(packed_chunk_state.into())} 11 | } 12 | #[inline(always)] 13 | pub fn load(&self, ordering : Ordering) -> PackedChunkState { 14 | PackedChunkState{0: self.0.load(ordering)} 15 | } 16 | #[inline(always)] 17 | pub fn store(&self, packed_chunk_state: PackedChunkState, ordering : Ordering){ 18 | self.0.store(packed_chunk_state.0, ordering); 19 | } 20 | } 21 | 22 | #[derive(Copy, Clone, PartialEq, Debug)] 23 | pub struct PackedChunkState(u64); 24 | impl PackedChunkState{ 25 | #[inline(always)] 26 | pub fn pack(chunk_state: ChunkState) -> Self{ 27 | Self( 28 | u64::from(chunk_state.len) 29 | | u64::from(chunk_state.has_next) << 32 30 | | u64::from(chunk_state.epoch.into_inner()) << 33 31 | ) 32 | } 33 | 34 | #[inline(always)] 35 | pub fn unpack(&self) -> ChunkState{ 36 | let len = self.len(); 37 | let has_next = self.has_next(); 38 | let epoch = self.epoch(); 39 | 40 | ChunkState{len, has_next, epoch} 41 | } 42 | 43 | #[inline(always)] 44 | pub fn len(&self) -> u32 { 45 | self.0 as u32 46 | } 47 | #[inline(always)] 48 | pub fn set_len(&mut self, new_len: u32) { 49 | const MASK: u64 = (1 << 32) - 1; // all first 32 bits 0. 50 | self.0 &= !MASK; 51 | self.0 |= new_len as u64; 52 | } 53 | 54 | #[inline(always)] 55 | pub fn has_next(&self) -> bool { 56 | utils::bittest_u64::<32>(self.0) 57 | } 58 | #[inline(always)] 59 | pub fn set_has_next(&mut self, new_flag: bool){ 60 | self.0 = utils::bitset_u64::<32>(self.0, new_flag); 61 | } 62 | 63 | #[inline(always)] 64 | pub fn epoch(&self) -> StartPositionEpoch { 65 | unsafe{ StartPositionEpoch::new_unchecked((self.0 >> 33) as u32) } 66 | } 67 | #[inline(always)] 68 | pub fn set_epoch(&mut self, epoch: StartPositionEpoch){ 69 | const MASK: u64 = (1 << 33) - 1; // all first 33 bits 0. 70 | self.0 &= MASK; 71 | self.0 |= u64::from(epoch.into_inner()) << 33; 72 | } 73 | } 74 | 75 | impl From for u64{ 76 | #[inline(always)] 77 | fn from(packed_chunk_state: PackedChunkState) -> u64 { 78 | packed_chunk_state.0 79 | } 80 | } 81 | 82 | #[derive(PartialEq, Debug, Clone)] 83 | pub struct ChunkState{ 84 | pub len : u32, 85 | pub epoch : StartPositionEpoch, 86 | pub has_next: bool 87 | } 88 | 89 | #[cfg(test)] 90 | mod test{ 91 | use rand::Rng; 92 | use crate::chunk_state::{ChunkState, PackedChunkState}; 93 | use crate::StartPositionEpoch; 94 | 95 | #[test] 96 | fn pack_unpack_fuzzy_test(){ 97 | let mut rng = rand::thread_rng(); 98 | 99 | let len = 100 | if cfg!(miri){ 1000 } else { 100000 }; 101 | for _ in 0..len{ 102 | let state = ChunkState{ 103 | len: rng.gen_range(0 .. u32::MAX), 104 | epoch: StartPositionEpoch::new(rng.gen_range(0 .. u32::MAX/2)), 105 | has_next: rng.gen_bool(0.5) 106 | }; 107 | 108 | let pack = PackedChunkState::pack(state.clone()); 109 | let unpacked = pack.unpack(); 110 | assert_eq!(state, unpacked); 111 | } 112 | } 113 | 114 | #[test] 115 | fn pack_unpack_data_test(){ 116 | let state = ChunkState{ 117 | len: 1356995898, 118 | epoch: StartPositionEpoch::new(1624221158), 119 | has_next: true 120 | }; 121 | 122 | let pack = PackedChunkState::pack(state.clone()); 123 | let unpacked = pack.unpack(); 124 | assert_eq!(state, unpacked); 125 | } 126 | 127 | #[test] 128 | fn setters_fuzzy_test(){ 129 | let mut rng = rand::thread_rng(); 130 | 131 | let len = 132 | if cfg!(miri){ 1000 } else { 100000 }; 133 | for _ in 0..len{ 134 | let state1 = ChunkState{ 135 | len: rng.gen_range(0 .. u32::MAX), 136 | epoch: StartPositionEpoch::new(rng.gen_range(0 .. u32::MAX/2)), 137 | has_next: rng.gen_bool(0.5) 138 | }; 139 | let state2 = ChunkState{ 140 | len: rng.gen_range(0 .. u32::MAX), 141 | epoch: StartPositionEpoch::new(rng.gen_range(0 .. u32::MAX/2)), 142 | has_next: rng.gen_bool(0.5) 143 | }; 144 | 145 | let mut pack1 = PackedChunkState::pack(state1.clone()); 146 | pack1.set_len(state2.len); 147 | pack1.set_has_next(state2.has_next); 148 | pack1.set_epoch(state2.epoch); 149 | 150 | let pack2 = PackedChunkState::pack(state2.clone()); 151 | assert_eq!(pack1, pack2); 152 | 153 | let unpacked1 = pack1.unpack(); 154 | let unpacked2 = pack2.unpack(); 155 | assert_eq!(unpacked1, unpacked2); 156 | } 157 | } 158 | } -------------------------------------------------------------------------------- /src/cursor.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use crate::dynamic_chunk::DynamicChunk; 3 | use crate::event_queue::Settings; 4 | 5 | // TODO: Untested comparison!! 6 | pub(super) struct Cursor 7 | { 8 | // TODO: try hide 9 | /// Always valid 10 | pub chunk: *const DynamicChunk, 11 | /// in-chunk index 12 | pub index : usize 13 | } 14 | 15 | implCopy for Cursor {} 16 | implClone for Cursor { 17 | fn clone(&self) -> Self { 18 | Self{ chunk: self.chunk, index: self.index } 19 | } 20 | } 21 | 22 | 23 | impl Cursor { 24 | fn chunk_ref(&self) -> &DynamicChunk{ 25 | unsafe { &*self.chunk } 26 | } 27 | } 28 | 29 | 30 | impl PartialEq for Cursor { 31 | fn eq(&self, other: &Self) -> bool { 32 | self.chunk == other.chunk 33 | && self.index == other.index 34 | } 35 | } 36 | impl Eq for Cursor{} 37 | 38 | 39 | impl PartialOrd for Cursor { 40 | fn partial_cmp(&self, other: &Self) -> Option { 41 | Some(self.cmp(other)) 42 | } 43 | 44 | // TODO: Is this needed? Benchmark with/without specialized lt comparison 45 | fn lt(&self, other: &Self) -> bool { 46 | let self_chunk_id = self.chunk_ref().id(); 47 | let other_chunk_id = other.chunk_ref().id(); 48 | 49 | if self_chunk_id < other_chunk_id{ 50 | return true; 51 | } 52 | if self_chunk_id > other_chunk_id{ 53 | return false; 54 | } 55 | return self.index < other.index; 56 | } 57 | } 58 | impl Ord for Cursor { 59 | fn cmp(&self, other: &Self) -> Ordering { 60 | let self_chunk_id = self.chunk_ref().id(); 61 | let other_chunk_id = other.chunk_ref().id(); 62 | 63 | if self_chunk_id < other_chunk_id { 64 | return Ordering::Less; 65 | } 66 | if self_chunk_id > other_chunk_id { 67 | return Ordering::Greater; 68 | } 69 | self.index.cmp(&other.index) 70 | } 71 | } -------------------------------------------------------------------------------- /src/dynamic_array/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod test; 3 | 4 | use std::mem; 5 | use std::alloc::Layout; 6 | use std::borrow::Borrow; 7 | 8 | #[repr(C)] 9 | pub struct DynamicArray{ 10 | header: Header, 11 | array_len : usize, 12 | array: [T; 0], 13 | } 14 | 15 | impl DynamicArray{ 16 | #[inline] 17 | fn layout(len: usize) -> Layout{ 18 | unsafe{ 19 | let size = mem::size_of::() + len * mem::size_of::(); 20 | Layout::from_size_align_unchecked(size, mem::align_of::()) 21 | } 22 | } 23 | 24 | pub fn construct(header: Header, value: T, len: usize) -> *mut Self { 25 | unsafe{ 26 | let this = &mut *Self::construct_uninit(header, len); 27 | 28 | for item in this.slice_mut(){ 29 | std::ptr::copy_nonoverlapping(value.borrow(), item, 1); 30 | } 31 | 32 | std::mem::forget(value); 33 | 34 | this 35 | } 36 | } 37 | 38 | /// array is not initialized 39 | pub unsafe fn construct_uninit(header: Header, len: usize) -> *mut Self { 40 | let this = &mut *{ 41 | let allocation = std::alloc::alloc(Self::layout(len)); 42 | allocation as *mut Self 43 | }; 44 | 45 | std::ptr::write(&mut this.header, header); 46 | this.array_len = len; 47 | 48 | this 49 | } 50 | 51 | /// No checks at all! 52 | #[inline] 53 | pub unsafe fn write_at(&mut self, index: usize, value: T){ 54 | std::ptr::write(self.slice_mut().get_unchecked_mut(index), value); 55 | } 56 | 57 | /// Unsafe due to potential double-free, use-after-free 58 | pub unsafe fn destruct(this: *mut Self) { 59 | if std::mem::needs_drop::() { 60 | for item in (*this).slice_mut(){ 61 | std::ptr::drop_in_place(item); 62 | } 63 | } 64 | 65 | Self::destruct_uninit(this); 66 | } 67 | 68 | pub unsafe fn destruct_uninit(this: *mut Self) { 69 | if std::mem::needs_drop::
() { 70 | std::ptr::drop_in_place(&mut (*this).header); 71 | } 72 | 73 | std::alloc::dealloc(this as *mut u8,Self::layout((*this).array_len)); 74 | } 75 | 76 | #[inline] 77 | pub fn header(&self) -> &Header{ 78 | &self.header 79 | } 80 | 81 | #[inline] 82 | pub fn header_mut(&mut self) -> &mut Header{ 83 | &mut self.header 84 | } 85 | 86 | #[inline] 87 | pub fn slice(&self) -> &[T] { 88 | unsafe { 89 | std::slice::from_raw_parts(self.array.as_ptr(), self.array_len) 90 | } 91 | } 92 | 93 | #[inline] 94 | pub fn slice_mut(&mut self) -> &mut [T] { 95 | unsafe { 96 | std::slice::from_raw_parts_mut(self.array.as_mut_ptr(), self.array_len) 97 | } 98 | } 99 | 100 | #[inline] 101 | pub fn len(&self) -> usize{ 102 | self.array_len 103 | } 104 | } -------------------------------------------------------------------------------- /src/dynamic_array/test.rs: -------------------------------------------------------------------------------- 1 | use crate::dynamic_array::DynamicArray; 2 | use itertools::assert_equal; 3 | use std::cell::RefCell; 4 | 5 | struct Header{ 6 | i : u8, 7 | on_destroy : F 8 | } 9 | impl Drop for Header{ 10 | fn drop(&mut self) { 11 | (self.on_destroy)(self.i); 12 | } 13 | } 14 | 15 | struct Data{ 16 | i : usize, 17 | on_destroy : F 18 | } 19 | 20 | impl Drop for Data{ 21 | fn drop(&mut self) { 22 | (self.on_destroy)(self.i); 23 | } 24 | } 25 | 26 | 27 | #[test] 28 | fn uninit_test(){ 29 | let data_destruct_order = RefCell::new(Vec::new()); 30 | let on_destroy = |i:usize|{ 31 | data_destruct_order.borrow_mut().push(i); 32 | }; 33 | 34 | let header_destruct_counter = RefCell::new(0); 35 | let on_header_destroy = |_|{ 36 | *header_destruct_counter.borrow_mut() += 1; 37 | }; 38 | 39 | let fla = unsafe { 40 | &mut *DynamicArray::, Data<_>>::construct_uninit( 41 | Header { i: 100, on_destroy: on_header_destroy }, 42 | 8 43 | ) 44 | }; 45 | 46 | unsafe { fla.write_at(1, Data { i: 800, on_destroy }); } 47 | 48 | let array = fla.slice_mut(); 49 | assert_eq!(array.len(), 8); 50 | assert_eq!(array[1].i, 800); 51 | assert_eq!(fla.header.i, 100); 52 | 53 | assert_eq!(*header_destruct_counter.borrow(), 0); 54 | assert!(data_destruct_order.borrow().is_empty()); 55 | unsafe{ DynamicArray::destruct_uninit(fla); } 56 | assert!(data_destruct_order.borrow().is_empty()); 57 | assert_eq!(*header_destruct_counter.borrow(), 1); 58 | } 59 | 60 | #[test] 61 | fn default_test(){ 62 | let data_destruct_order = RefCell::new(Vec::new()); 63 | let on_destroy = |i:usize|{ 64 | data_destruct_order.borrow_mut().push(i); 65 | }; 66 | 67 | let header_destruct_counter = RefCell::new(0); 68 | let on_header_destroy = |_|{ 69 | *header_destruct_counter.borrow_mut() += 1; 70 | }; 71 | 72 | 73 | let fla = unsafe{ 74 | &mut *DynamicArray::, Data<_>>::construct( 75 | Header { i: 100, on_destroy: on_header_destroy }, 76 | Data { i: 0, on_destroy }, 77 | 4 78 | ) 79 | }; 80 | 81 | assert!(data_destruct_order.borrow().is_empty()); 82 | { 83 | let array = fla.slice_mut(); 84 | array[1] = Data{i : 800, on_destroy}; 85 | } 86 | assert_equal(&*data_destruct_order.borrow(), &[0 as usize]); 87 | data_destruct_order.borrow_mut().clear(); 88 | 89 | assert_eq!(fla.header.i, 100); 90 | assert_equal(fla.slice().iter().map(|data|data.i), [0, 800, 0, 0]); 91 | 92 | assert_eq!(*header_destruct_counter.borrow(), 0); 93 | unsafe{ DynamicArray::destruct(fla); } 94 | assert_equal(&*data_destruct_order.borrow(), &[0,800,0,0 as usize]); 95 | assert_eq!(*header_destruct_counter.borrow(), 1); 96 | } 97 | 98 | /*#[repr(transparent)] 99 | struct Node(DynamicArray); 100 | 101 | impl Node{ 102 | pub fn construct()-> &'static mut Self { 103 | let base = DynamicArray::::construct( 104 | Header { i: 20 }, 105 | 0, 106 | 10 107 | ); 108 | unsafe { &mut *(base as *mut _ as *mut Self) } 109 | } 110 | 111 | pub unsafe fn destruct(this: *mut Self){ 112 | DynamicArray::::destruct( 113 | this as *mut DynamicArray 114 | ); 115 | } 116 | }*/ -------------------------------------------------------------------------------- /src/dynamic_chunk.rs: -------------------------------------------------------------------------------- 1 | use crate::dynamic_array::DynamicArray; 2 | use crate::sync::{Ordering, AtomicPtr, AtomicUsize, SpinSharedMutex}; 3 | use crate::event_queue::{EventQueue, Settings}; 4 | use std::ptr::{null_mut, NonNull}; 5 | use std::ptr; 6 | use crate::chunk_state::{AtomicPackedChunkState, ChunkState, PackedChunkState}; 7 | use crate::StartPositionEpoch; 8 | 9 | /// Error, indicating insufficient capacity 10 | pub struct CapacityError{ 11 | pub value: V, 12 | } 13 | 14 | struct Header{ 15 | /// Just to compare chunks by age/sequence fast. Brings order. 16 | /// Will overflow after years... So just ignore that possibility. 17 | pub(super) id : usize, 18 | pub(super) next : AtomicPtr>, 19 | 20 | /// locked in reader next chunk and force_cleanup 21 | pub(super) chunk_switch_mutex : SpinSharedMutex<()>, 22 | /// Grow only. 23 | pub(super) readers_entered: AtomicUsize, 24 | 25 | /// When == readers count, it is safe to delete this chunk. 26 | /// Chunk read completely if reader consumed CHUNK_SIZE'ed element. 27 | /// Last chunk always exists. 28 | /// 29 | /// Grow only. 30 | pub(super) read_completely_times : AtomicUsize, 31 | 32 | // This needed to access Event from EventReader. 33 | // Never changes. 34 | pub(super) event : *const EventQueue, 35 | 36 | /// LenAndEpoch. Epoch same across all chunks. Epoch updated in all chunks at [EventQueue::clear] 37 | /// len fused with epoch for optimization purposes. This allow to get start_position_epoch without 38 | /// touching EventQueue and without additional atomic load(acquire) 39 | chunk_state: AtomicPackedChunkState, 40 | } 41 | 42 | #[repr(transparent)] 43 | pub struct DynamicChunk( 44 | DynamicArray< Header, T > 45 | ); 46 | 47 | impl DynamicChunk{ 48 | #[inline] 49 | pub fn id(&self) -> usize{ 50 | self.0.header().id 51 | } 52 | 53 | #[inline] 54 | pub fn next(&self, load_ordering: Ordering) -> *mut Self{ 55 | self.0.header().next.load(load_ordering) 56 | } 57 | 58 | #[inline] 59 | pub fn set_next(&mut self, ptr: *mut Self, store_ordering: Ordering) { 60 | self.0.header().next.store(ptr, store_ordering); 61 | 62 | // Relaxed because &mut self 63 | let mut chunk_state = self.0.header().chunk_state.load(Ordering::Relaxed); 64 | chunk_state.set_has_next(!ptr.is_null()); 65 | self.0.header().chunk_state.store(chunk_state, store_ordering); 66 | } 67 | 68 | #[inline] 69 | pub fn chunk_switch_mutex(&self) -> &SpinSharedMutex<()>{ 70 | &self.0.header().chunk_switch_mutex 71 | } 72 | 73 | #[inline] 74 | pub fn readers_entered(&self) -> &AtomicUsize{ 75 | &self.0.header().readers_entered 76 | } 77 | 78 | #[inline] 79 | pub fn read_completely_times(&self) -> &AtomicUsize{ 80 | &self.0.header().read_completely_times 81 | } 82 | 83 | #[inline] 84 | pub fn event(&self) -> &EventQueue{ 85 | unsafe { &*self.0.header().event } 86 | } 87 | 88 | pub fn construct( 89 | id: usize, 90 | epoch: StartPositionEpoch, 91 | event : *const EventQueue, 92 | len: usize 93 | ) -> *mut Self{ 94 | let header = Header{ 95 | id, 96 | next: AtomicPtr::new(null_mut()), 97 | chunk_switch_mutex: SpinSharedMutex::new(()), 98 | readers_entered: AtomicUsize::new(0), 99 | read_completely_times: AtomicUsize::new(0), 100 | event, 101 | chunk_state: AtomicPackedChunkState::new( 102 | PackedChunkState::pack( 103 | ChunkState{len: 0, has_next: false, epoch} 104 | ) 105 | ) 106 | }; 107 | unsafe{ 108 | let this = DynamicArray::, T>::construct_uninit( 109 | header, 110 | len 111 | ); 112 | 113 | // This is ok, due to transparent 114 | this as *mut _ as *mut Self 115 | } 116 | } 117 | 118 | /// Unsafe - because `this` state is unknown 119 | /// Reuse previously stored chunk. 120 | /// Should be used in deinitialize -> reinitialize cycle. 121 | pub unsafe fn from_recycled( 122 | mut recycled: DynamicChunkRecycled, 123 | id: usize, 124 | epoch: StartPositionEpoch, 125 | ) -> *mut Self { 126 | let header = recycled.chunk.as_mut().0.header_mut(); 127 | header.id = id; 128 | header.next = AtomicPtr::new(null_mut()); 129 | header.read_completely_times = AtomicUsize::new(0); 130 | header.chunk_state = AtomicPackedChunkState::new( 131 | PackedChunkState::pack( 132 | ChunkState{len: 0, has_next: false, epoch} 133 | ) 134 | ); 135 | 136 | let ptr = recycled.chunk.as_ptr(); 137 | std::mem::forget(recycled); 138 | ptr 139 | } 140 | 141 | // ---------------------------------------------------------------- 142 | // STORAGE 143 | // ---------------------------------------------------------------- 144 | #[inline] 145 | pub fn set_epoch(&mut self, epoch: StartPositionEpoch, load_ordering: Ordering, store_ordering: Ordering){ 146 | let mut chunk_state = self.chunk_state(load_ordering); 147 | chunk_state.set_epoch(epoch); 148 | 149 | self.0.header().chunk_state.store(chunk_state, store_ordering); 150 | } 151 | 152 | /// Needs additional synchronization, because several threads writing simultaneously may finish writes 153 | /// not in order, but len increases sequentially. This may cause items before len index being not fully written. 154 | #[inline(always)] 155 | pub fn try_push(&mut self, value: T, store_ordering: Ordering) -> Result<(), CapacityError>{ 156 | // Relaxed because updated only with &mut self 157 | let chunk_state = self.chunk_state(Ordering::Relaxed); 158 | let index = chunk_state.len(); 159 | if (index as usize) >= self.capacity() { 160 | return Result::Err(CapacityError{value}); 161 | } 162 | 163 | unsafe{ self.push_at(value, index, chunk_state, store_ordering); } 164 | 165 | return Result::Ok(()); 166 | } 167 | 168 | #[inline(always)] 169 | pub unsafe fn push_unchecked(&mut self, value: T, store_ordering: Ordering){ 170 | // Relaxed because updated only with &mut self 171 | let chunk_state = self.chunk_state(Ordering::Relaxed); 172 | let index = chunk_state.len(); 173 | 174 | self.push_at(value, index, chunk_state, store_ordering); 175 | } 176 | 177 | #[inline(always)] 178 | pub unsafe fn push_at(&mut self, value: T, index: u32, mut chunk_state: PackedChunkState, store_ordering: Ordering) { 179 | debug_assert!((index as usize) < self.capacity()); 180 | 181 | self.0.write_at(index as usize, value); 182 | 183 | chunk_state.set_len(index+1); 184 | 185 | self.0.header().chunk_state.store(chunk_state, store_ordering); 186 | } 187 | 188 | /// Append items from iterator, until have free space 189 | /// Returns Ok if everything fit, CapacityError() - if not 190 | #[inline] 191 | pub fn extend(&mut self, iter: &mut I, store_ordering: Ordering) -> Result<(), CapacityError<()>> 192 | where I:Iterator 193 | { 194 | let mut chunk_state = self.chunk_state(Ordering::Relaxed); 195 | let mut index = chunk_state.len() as usize; 196 | 197 | loop { 198 | if index == self.capacity(){ 199 | chunk_state.set_len(self.capacity() as u32); 200 | self.0.header().chunk_state.store(chunk_state, store_ordering); 201 | return Result::Err(CapacityError{value:()}); 202 | } 203 | 204 | match iter.next(){ 205 | None => { 206 | chunk_state.set_len(index as u32); 207 | self.0.header().chunk_state.store(chunk_state, store_ordering); 208 | return Result::Ok(()); 209 | } 210 | Some(value) => { 211 | unsafe{ 212 | self.0.write_at(index as usize, value); 213 | } 214 | } 215 | } 216 | 217 | index+=1; 218 | } 219 | } 220 | 221 | #[inline(always)] 222 | pub unsafe fn get_unchecked(&self, index: usize) -> &T{ 223 | self.0.slice().get_unchecked(index) 224 | } 225 | 226 | #[inline(always)] 227 | pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T{ 228 | self.0.slice_mut().get_unchecked_mut(index) 229 | } 230 | 231 | #[inline(always)] 232 | pub fn chunk_state(&self, ordering: Ordering) -> PackedChunkState { 233 | self.0.header().chunk_state.load(ordering) 234 | } 235 | 236 | #[inline(always)] 237 | pub fn capacity(&self) -> usize { 238 | self.0.len() 239 | } 240 | 241 | pub unsafe fn destruct(this: *mut Self){ 242 | std::mem::drop(Self::recycle(this)); 243 | } 244 | 245 | /// destruct all items. Can be stored for reuse. 246 | /// Should be called exactly once before reinitialization. 247 | #[must_use] 248 | pub unsafe fn recycle(this: *mut Self) -> DynamicChunkRecycled{ 249 | if std::mem::needs_drop::() { 250 | // Relaxed because &mut self 251 | let len = (*this).chunk_state(Ordering::Relaxed).len() as usize; 252 | for i in 0..len { 253 | ptr::drop_in_place((*this).0.slice_mut().get_unchecked_mut(i)); 254 | } 255 | } 256 | DynamicChunkRecycled {chunk: NonNull::new_unchecked(this)} 257 | } 258 | } 259 | 260 | #[repr(transparent)] 261 | pub struct DynamicChunkRecycled{ 262 | chunk: ptr::NonNull> 263 | } 264 | impl DynamicChunkRecycled{ 265 | #[inline(always)] 266 | pub fn capacity(&self) -> usize { 267 | unsafe{ self.chunk.as_ref().capacity() } 268 | } 269 | } 270 | impl Drop for DynamicChunkRecycled{ 271 | fn drop(&mut self) { 272 | unsafe { 273 | DynamicArray::, T>::destruct_uninit( 274 | self.chunk.as_ptr() as *mut DynamicArray, T> 275 | ) 276 | } 277 | } 278 | } 279 | 280 | -------------------------------------------------------------------------------- /src/event_queue.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(loom))] 2 | #[cfg(test)] 3 | mod test; 4 | 5 | use crate::sync::{Ordering}; 6 | use crate::sync::{Mutex, Arc}; 7 | use crate::sync::{SpinMutex}; 8 | 9 | use std::ptr::{null_mut, null, NonNull}; 10 | use crate::event_reader::EventReader; 11 | use std::ops::ControlFlow; 12 | use std::ops::ControlFlow::{Continue, Break}; 13 | use std::marker::PhantomPinned; 14 | use std::pin::Pin; 15 | use crate::cursor::Cursor; 16 | use crate::dynamic_chunk::{DynamicChunk}; 17 | #[cfg(feature = "double_buffering")] 18 | use crate::dynamic_chunk::{DynamicChunkRecycled}; 19 | use crate::{StartPositionEpoch}; 20 | 21 | /// This way you can control when chunk's memory deallocation happens. 22 | /// _In addition, some operations may cause deallocations as well._ 23 | #[derive(PartialEq)] 24 | pub enum CleanupMode{ 25 | /// Cleanup will be called when chunk fully read. 26 | /// 27 | /// In this mode memory will be freed ASAP - right in the end of reader consumption session. 28 | /// 29 | /// !! Not allowed for spmc !! 30 | OnChunkRead, 31 | /// Cleanup will be called when new chunk created. 32 | OnNewChunk, 33 | /// Cleanup will never be called. You should call `EventQueue::cleanup` manually. 34 | Never 35 | } 36 | 37 | pub trait Settings{ 38 | const MIN_CHUNK_SIZE : u32; 39 | const MAX_CHUNK_SIZE : u32; 40 | const CLEANUP : CleanupMode; 41 | 42 | // for spmc/mpmc 43 | /// Lock on new chunk cleanup event. Will dead-lock if already locked. 44 | const LOCK_ON_NEW_CHUNK_CLEANUP: bool; 45 | /// Call cleanup on unsubscribe? 46 | const CLEANUP_IN_UNSUBSCRIBE: bool; 47 | } 48 | 49 | pub struct List{ 50 | first: *mut DynamicChunk, 51 | last : *mut DynamicChunk, 52 | chunk_id_counter: usize, 53 | total_capacity: usize, 54 | 55 | readers_count: u32, 56 | 57 | /// 0 - means no penult 58 | penult_chunk_size: u32, 59 | 60 | #[cfg(feature = "double_buffering")] 61 | /// Biggest freed chunk 62 | free_chunk: Option>, 63 | } 64 | 65 | pub struct EventQueue{ 66 | pub(crate) list : Mutex>, 67 | 68 | /// Separate lock from list::start_position_epoch, is safe, because start_point_epoch encoded in 69 | /// chunk's atomic len+epoch. 70 | // TODO: Make RWLock? Bench. 71 | // TODO: Optioned 72 | pub(crate) start_position: SpinMutex>>, 73 | 74 | _pinned: PhantomPinned, 75 | } 76 | 77 | //unsafe impl Send for EventQueue{} 78 | //unsafe impl Sync for EventQueue{} 79 | 80 | impl EventQueue 81 | { 82 | pub fn with_capacity(new_capacity: u32) -> Pin>{ 83 | assert!(S::MIN_CHUNK_SIZE <= new_capacity && new_capacity <= S::MAX_CHUNK_SIZE); 84 | 85 | let this = Arc::new(Self{ 86 | list: Mutex::new(List{ 87 | first: null_mut(), 88 | last: null_mut(), 89 | chunk_id_counter: 0, 90 | readers_count:0, 91 | total_capacity:new_capacity as usize, 92 | penult_chunk_size : 0, 93 | 94 | #[cfg(feature = "double_buffering")] 95 | free_chunk: None, 96 | }), 97 | start_position: SpinMutex::new(None), 98 | _pinned: PhantomPinned, 99 | }); 100 | 101 | let node = DynamicChunk::::construct( 102 | 0, StartPositionEpoch::zero(), &*this, new_capacity as usize); 103 | 104 | { 105 | let mut list = this.list.lock(); 106 | list.first = node; 107 | list.last = node; 108 | } 109 | 110 | unsafe{ Pin::new_unchecked(this) } 111 | } 112 | 113 | #[inline] 114 | fn add_chunk_sized(&self, list: &mut List, size: usize) -> &mut DynamicChunk{ 115 | let node = unsafe{&mut *list.last}; 116 | let epoch = node.chunk_state(Ordering::Relaxed).epoch(); 117 | 118 | // make new node 119 | list.chunk_id_counter += 1; 120 | 121 | #[cfg(not(feature = "double_buffering"))] 122 | let new_node = DynamicChunk::::construct(list.chunk_id_counter, epoch, self, size); 123 | 124 | #[cfg(feature = "double_buffering")] 125 | let new_node = { 126 | let mut new_node: *mut DynamicChunk = null_mut(); 127 | 128 | if let Some(recycled_chunk) = &list.free_chunk { 129 | // Check if recycled_chunk have exact capacity. 130 | if recycled_chunk.capacity() == size { 131 | // unwrap_unchecked() 132 | new_node = 133 | match list.free_chunk.take() { 134 | Some(recycled_chunk) => { 135 | unsafe { DynamicChunk::from_recycled( 136 | recycled_chunk, 137 | list.chunk_id_counter, 138 | epoch) } 139 | }, None => unsafe { std::hint::unreachable_unchecked() }, 140 | } 141 | } else { 142 | // TODO: try free in cleanup somehow 143 | list.free_chunk = None; 144 | } 145 | } 146 | 147 | if new_node.is_null(){ 148 | new_node = DynamicChunk::::construct(list.chunk_id_counter, epoch, self, size); 149 | } 150 | new_node 151 | }; 152 | 153 | // connect 154 | node.set_next(new_node, Ordering::Release); 155 | list.last = new_node; 156 | list.penult_chunk_size = node.capacity() as u32; 157 | list.total_capacity += size; 158 | 159 | unsafe{&mut *new_node} 160 | } 161 | 162 | #[inline] 163 | fn on_new_chunk_cleanup(&self, list: &mut List){ 164 | if S::CLEANUP == CleanupMode::OnNewChunk{ 165 | // this should acts as compile-time-if. 166 | if S::LOCK_ON_NEW_CHUNK_CLEANUP{ 167 | let _lock = self.list.lock(); 168 | self.cleanup_impl(list); 169 | } else { 170 | self.cleanup_impl(list); 171 | } 172 | } 173 | } 174 | 175 | #[inline] 176 | fn add_chunk(&self, list: &mut List) -> &mut DynamicChunk{ 177 | let node = unsafe{&*list.last}; 178 | 179 | self.on_new_chunk_cleanup(list); 180 | 181 | // Size pattern 4,4,8,8,16,16 182 | let new_size: usize = { 183 | if list.penult_chunk_size as usize == node.capacity(){ 184 | std::cmp::min(node.capacity() * 2, S::MAX_CHUNK_SIZE as usize) 185 | } else { 186 | node.capacity() 187 | } 188 | }; 189 | 190 | self.add_chunk_sized(list, new_size) 191 | } 192 | 193 | // Have 10% better performance. Observable in spmc. 194 | #[inline] 195 | pub fn push(&self, list: &mut List, value: T){ 196 | let mut node = unsafe{&mut *list.last}; 197 | 198 | // Relaxed because we update only under lock 199 | let chunk_state = node.chunk_state(Ordering::Relaxed); 200 | let mut storage_len = chunk_state.len(); 201 | 202 | if /*unlikely*/ storage_len == node.capacity() as u32{ 203 | node = self.add_chunk(&mut *list); 204 | storage_len = 0; 205 | } 206 | 207 | unsafe { node.push_at(value, storage_len, chunk_state, Ordering::Release); } 208 | } 209 | 210 | /* 211 | #[inline] 212 | pub fn push(&self, list: &mut List, value: T){ 213 | let node = unsafe{&mut *list.last}; 214 | 215 | if let Err(err) = node.try_push(value, Ordering::Release){ 216 | unsafe { 217 | self.add_chunk(&mut *list) 218 | .push_unchecked(err.value, Ordering::Release); 219 | } 220 | } 221 | } 222 | */ 223 | 224 | // Not an Extend trait, because Extend::extend(&mut self) 225 | #[inline] 226 | pub fn extend(&self, list: &mut List, iter: I) 227 | where I: IntoIterator 228 | { 229 | let mut node = unsafe{&mut *list.last}; 230 | 231 | let mut iter = iter.into_iter(); 232 | 233 | while node.extend(&mut iter, Ordering::Release).is_err(){ 234 | match iter.next() { 235 | None => {return;} 236 | Some(value) => { 237 | // add chunk and push value there 238 | node = self.add_chunk(&mut *list); 239 | unsafe{ node.push_unchecked(value, Ordering::Relaxed); } 240 | } 241 | }; 242 | } 243 | } 244 | 245 | /// EventReader will start receive events from NOW. 246 | /// It will not see events that was pushed BEFORE subscription. 247 | pub fn subscribe(&self, list: &mut List) -> EventReader{ 248 | if list.readers_count == 0{ 249 | // Keep alive. Decrements in unsubscribe 250 | unsafe { Arc::increment_strong_count(self); } 251 | } 252 | list.readers_count += 1; 253 | 254 | let last_chunk = unsafe{&*list.last}; 255 | let chunk_state = last_chunk.chunk_state(Ordering::Relaxed); 256 | 257 | // Enter chunk 258 | last_chunk.readers_entered().fetch_add(1, Ordering::AcqRel); 259 | 260 | EventReader{ 261 | position: Cursor{chunk: last_chunk, index: chunk_state.len() as usize}, 262 | start_position_epoch: chunk_state.epoch() 263 | } 264 | } 265 | 266 | // Called from EventReader Drop 267 | // 268 | // `this_ptr` instead of `&self`, because `&self` as reference should be valid during 269 | // function call. And we drop it sometimes.... through `Arc::decrement_strong_count`. 270 | pub(crate) fn unsubscribe(this_ptr: NonNull, event_reader: &EventReader){ 271 | let this = unsafe { this_ptr.as_ref() }; 272 | let mut list = this.list.lock(); 273 | 274 | // Exit chunk 275 | unsafe{&*event_reader.position.chunk}.read_completely_times().fetch_add(1, Ordering::AcqRel); 276 | 277 | if S::CLEANUP_IN_UNSUBSCRIBE && S::CLEANUP != CleanupMode::Never{ 278 | if list.first as *const _ == event_reader.position.chunk { 279 | this.cleanup_impl(&mut *list); 280 | } 281 | } 282 | 283 | list.readers_count -= 1; 284 | if list.readers_count == 0{ 285 | drop(list); 286 | // Safe to self-destruct 287 | unsafe { Arc::decrement_strong_count(this_ptr.as_ptr()); } 288 | } 289 | } 290 | 291 | unsafe fn free_chunk( 292 | &self, 293 | chunk: *mut DynamicChunk, 294 | list: &mut List) 295 | { 296 | if let Some(start_position) = *self.start_position.as_mut_ptr(){ 297 | if start_position.chunk == chunk{ 298 | if LOCK_ON_WRITE_START_POSITION{ 299 | *self.start_position.lock() = None; 300 | } else { 301 | *self.start_position.as_mut_ptr() = None; 302 | } 303 | } 304 | } 305 | 306 | list.total_capacity -= (*chunk).capacity(); 307 | 308 | #[cfg(not(feature = "double_buffering"))] 309 | { 310 | DynamicChunk::destruct(chunk); 311 | std::mem::drop(list); // just for use 312 | } 313 | 314 | #[cfg(feature = "double_buffering")] 315 | { 316 | if let Some(free_chunk) = &list.free_chunk { 317 | if free_chunk.capacity() >= (*chunk).capacity() { 318 | // Discard - recycled chunk bigger then our 319 | DynamicChunk::destruct(chunk); 320 | return; 321 | } 322 | } 323 | // Replace free_chunk with our. 324 | list.free_chunk = Some(DynamicChunk::recycle(chunk)); 325 | } 326 | } 327 | 328 | fn cleanup_impl(&self, list: &mut List){ 329 | unsafe { 330 | // using _ptr version, because with &chunk - reference should be valid during whole 331 | // lambda function call. (according to miri and some rust borrowing rules). 332 | // And we actually drop that chunk. 333 | foreach_chunk_ptr_mut( 334 | list.first, 335 | list.last, 336 | Ordering::Relaxed, // we're under mutex 337 | |chunk_ptr| { 338 | // Do not lock prev_chunk.chunk_switch_mutex because we traverse in order. 339 | let chunk = &mut *chunk_ptr; 340 | let chunk_readers = chunk.readers_entered().load(Ordering::Acquire); 341 | let chunk_read_times = chunk.read_completely_times().load(Ordering::Acquire); 342 | // Cleanup only in order 343 | if chunk_readers != chunk_read_times { 344 | return Break(()); 345 | } 346 | 347 | let next_chunk_ptr = chunk.next(Ordering::Relaxed); 348 | debug_assert!(!next_chunk_ptr.is_null()); 349 | 350 | debug_assert!(std::ptr::eq(chunk, list.first)); 351 | // Do not lock start_position permanently, because reader will 352 | // never enter chunk before list.first 353 | self.free_chunk::(chunk, list); 354 | list.first = next_chunk_ptr; 355 | 356 | Continue(()) 357 | } 358 | ); 359 | } 360 | if list.first == list.last{ 361 | list.penult_chunk_size = 0; 362 | } 363 | } 364 | 365 | /// This will traverse up to the start_point - and will free all unoccupied chunks. (out-of-order cleanup) 366 | /// This one slower then cleanup_impl. 367 | fn force_cleanup_impl(&self, list: &mut List){ 368 | self.cleanup_impl(list); 369 | 370 | // Lock start_position permanently, due to out of order chunk destruction. 371 | // Reader can try enter in the chunk in the middle of force_cleanup execution. 372 | let start_position = self.start_position.lock(); 373 | let terminal_chunk = match &*start_position{ 374 | None => { return; } 375 | Some(cursor) => {cursor.chunk} 376 | }; 377 | if list.first as *const _ == terminal_chunk{ 378 | return; 379 | } 380 | unsafe { 381 | // cleanup_impl dealt with first chunk before. Omit. 382 | let mut prev_chunk = list.first; 383 | // using _ptr version, because with &chunk - reference should be valid during whole 384 | // lambda function call. (according to miri and some rust borrowing rules). 385 | // And we actually drop that chunk. 386 | foreach_chunk_ptr_mut( 387 | (*list.first).next(Ordering::Relaxed), 388 | terminal_chunk, 389 | Ordering::Relaxed, // we're under mutex 390 | |chunk| { 391 | // We need to lock only `prev_chunk`, because it is impossible 392 | // to get in `chunk` omitting chunk.readers_entered+1 393 | let lock = (*prev_chunk).chunk_switch_mutex().write(); 394 | let chunk_readers = (*chunk).readers_entered().load(Ordering::Acquire); 395 | let chunk_read_times = (*chunk).read_completely_times().load(Ordering::Acquire); 396 | if chunk_readers != chunk_read_times { 397 | prev_chunk = chunk; 398 | return Continue(()); 399 | } 400 | 401 | let next_chunk_ptr = (*chunk).next(Ordering::Relaxed); 402 | debug_assert!(!next_chunk_ptr.is_null()); 403 | 404 | (*prev_chunk).set_next(next_chunk_ptr, Ordering::Release); 405 | drop(lock); 406 | 407 | self.free_chunk::(chunk, list); 408 | Continue(()) 409 | } 410 | ); 411 | } 412 | } 413 | 414 | pub fn cleanup(&self){ 415 | self.cleanup_impl(&mut *self.list.lock()); 416 | } 417 | 418 | #[inline] 419 | fn set_start_position( 420 | &self, 421 | list: &mut List, 422 | new_start_position: Cursor) 423 | { 424 | *self.start_position.lock() = Some(new_start_position); 425 | 426 | // update len_and_start_position_epoch in each chunk 427 | let first_chunk = unsafe{&mut *list.first}; 428 | let new_epoch = first_chunk.chunk_state(Ordering::Relaxed).epoch().increment(); 429 | unsafe { 430 | foreach_chunk_mut( 431 | first_chunk, 432 | null(), 433 | Ordering::Relaxed, // we're under mutex 434 | |chunk| { 435 | chunk.set_epoch(new_epoch, Ordering::Relaxed, Ordering::Release); 436 | Continue(()) 437 | } 438 | ); 439 | } 440 | } 441 | 442 | pub fn clear(&self, list: &mut List){ 443 | let last_chunk = unsafe{ &*list.last }; 444 | let last_chunk_len = last_chunk.chunk_state(Ordering::Relaxed).len() as usize; 445 | 446 | self.set_start_position(list, Cursor { 447 | chunk: last_chunk, 448 | index: last_chunk_len 449 | }); 450 | 451 | self.force_cleanup_impl(list); 452 | } 453 | 454 | pub fn truncate_front(&self, list: &mut List, len: usize) { 455 | // make chunks* array 456 | 457 | // TODO: subtract from total_capacity 458 | // TODO: use small_vec 459 | // TODO: loop if > 128? 460 | // there is no way we can have memory enough to hold > 2^64 bytes. 461 | let mut chunks : [*const DynamicChunk; 128] = [null(); 128]; 462 | let chunks_count= 463 | unsafe { 464 | let mut i = 0; 465 | foreach_chunk( 466 | list.first, 467 | null(), 468 | Ordering::Relaxed, // we're under mutex 469 | |chunk| { 470 | chunks[i] = chunk; 471 | i+=1; 472 | Continue(()) 473 | } 474 | ); 475 | i 476 | }; 477 | 478 | let mut total_len = 0; 479 | for i in (0..chunks_count).rev(){ 480 | let chunk = unsafe{ &*chunks[i] }; 481 | let chunk_len = chunk.chunk_state(Ordering::Relaxed).len() as usize; 482 | total_len += chunk_len; 483 | if total_len >= len{ 484 | let new_start_position = Cursor { 485 | chunk: chunks[i], 486 | index: total_len - len 487 | }; 488 | // Do we actually need to truncate? 489 | if let Some(start_position) = unsafe{*self.start_position.as_mut_ptr()}{ 490 | if start_position >= new_start_position{ 491 | return; 492 | } 493 | } 494 | 495 | self.set_start_position(list, new_start_position); 496 | self.force_cleanup_impl(list); 497 | return; 498 | } 499 | } 500 | 501 | // len is bigger then total_len. 502 | // do nothing. 503 | } 504 | 505 | pub fn change_chunk_capacity(&self, list: &mut List, new_capacity: u32){ 506 | assert!(S::MIN_CHUNK_SIZE <= new_capacity && new_capacity <= S::MAX_CHUNK_SIZE); 507 | self.on_new_chunk_cleanup(list); 508 | self.add_chunk_sized(&mut *list, new_capacity as usize); 509 | } 510 | 511 | pub fn total_capacity(&self, list: &List) -> usize { 512 | list.total_capacity 513 | } 514 | 515 | pub fn chunk_capacity(&self, list: &List) -> usize { 516 | unsafe { (*list.last).capacity() } 517 | } 518 | 519 | /* 520 | // chunks_count can be atomic. But does that needed? 521 | pub fn chunks_count(&self) -> usize { 522 | let list = self.list.lock(); 523 | unsafe{ 524 | list.chunk_id_counter/*(*list.last).id*/ - (*list.first).id() + 1 525 | } 526 | }*/ 527 | } 528 | 529 | impl Drop for EventQueue{ 530 | fn drop(&mut self) { 531 | let list = self.list.get_mut(); 532 | debug_assert!(list.readers_count == 0); 533 | unsafe{ 534 | let mut node_ptr = list.first; 535 | while node_ptr != null_mut() { 536 | let node = &mut *node_ptr; 537 | node_ptr = node.next(Ordering::Relaxed); 538 | DynamicChunk::destruct(node); 539 | } 540 | } 541 | } 542 | } 543 | 544 | #[inline(always)] 545 | pub(super) unsafe fn foreach_chunk 546 | ( 547 | start_chunk_ptr : *const DynamicChunk, 548 | end_chunk_ptr : *const DynamicChunk, 549 | load_ordering : Ordering, 550 | mut func : F 551 | ) 552 | where F: FnMut(&DynamicChunk) -> ControlFlow<()> 553 | { 554 | foreach_chunk_mut( 555 | start_chunk_ptr as *mut _, 556 | end_chunk_ptr, 557 | load_ordering, 558 | |mut_chunk| func(mut_chunk) 559 | ); 560 | } 561 | 562 | /// end_chunk_ptr may be null 563 | #[inline(always)] 564 | pub(super) unsafe fn foreach_chunk_mut 565 | ( 566 | start_chunk_ptr : *mut DynamicChunk, 567 | end_chunk_ptr : *const DynamicChunk, 568 | load_ordering : Ordering, 569 | mut func : F 570 | ) 571 | where F: FnMut(&mut DynamicChunk) -> ControlFlow<()> 572 | { 573 | foreach_chunk_ptr_mut( 574 | start_chunk_ptr, 575 | end_chunk_ptr, 576 | load_ordering, 577 | |mut_chunk_ptr| func(&mut *mut_chunk_ptr) 578 | ); 579 | } 580 | 581 | /// end_chunk_ptr may be null 582 | #[inline(always)] 583 | pub(super) unsafe fn foreach_chunk_ptr_mut 584 | ( 585 | start_chunk_ptr : *mut DynamicChunk, 586 | end_chunk_ptr : *const DynamicChunk, 587 | load_ordering : Ordering, 588 | mut func : F 589 | ) 590 | where F: FnMut(*mut DynamicChunk) -> ControlFlow<()> 591 | { 592 | debug_assert!(!start_chunk_ptr.is_null()); 593 | debug_assert!( 594 | end_chunk_ptr.is_null() 595 | || 596 | std::ptr::eq((*start_chunk_ptr).event(), (*end_chunk_ptr).event()) 597 | ); 598 | debug_assert!( 599 | end_chunk_ptr.is_null() 600 | || 601 | (*start_chunk_ptr).id() <= (*end_chunk_ptr).id() 602 | ); 603 | 604 | let mut chunk_ptr = start_chunk_ptr; 605 | while !chunk_ptr.is_null(){ 606 | if chunk_ptr as *const _ == end_chunk_ptr { 607 | break; 608 | } 609 | 610 | // chunk can be dropped inside `func`, so fetch `next` beforehand 611 | let next_chunk_ptr = (*chunk_ptr).next(load_ordering); 612 | 613 | let proceed = func(chunk_ptr); 614 | if proceed == Break(()) { 615 | break; 616 | } 617 | 618 | chunk_ptr = next_chunk_ptr; 619 | } 620 | } -------------------------------------------------------------------------------- /src/event_queue/test.rs: -------------------------------------------------------------------------------- 1 | use crate::mpmc::{EventQueue, EventReader, Settings, DefaultSettings}; 2 | use crate::{CleanupMode, LendingIterator}; 3 | use std::ptr::null; 4 | use std::ops::ControlFlow::Continue; 5 | use std::ops::Deref; 6 | use itertools::assert_equal; 7 | use rand::Rng; 8 | use crate::event_queue::{foreach_chunk, List}; 9 | use crate::sync::Ordering; 10 | use crate::tests::utils::{consume_copies, skip}; 11 | 12 | struct S{} impl Settings for S{ 13 | const MIN_CHUNK_SIZE: u32 = 4; 14 | const MAX_CHUNK_SIZE: u32 = u32::MAX; 15 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 16 | } 17 | 18 | fn get_chunks_capacities(event_queue: &EventQueue) -> Vec { 19 | let list = &event_queue.0.list.lock(); 20 | let mut chunk_capacities = Vec::new(); 21 | unsafe{ 22 | foreach_chunk(list.first, null(), Ordering::Acquire, 23 | |chunk|{ 24 | chunk_capacities.push( chunk.capacity() ); 25 | Continue(()) 26 | }); 27 | } 28 | chunk_capacities 29 | } 30 | 31 | fn get_chunks_lens(event_queue: &EventQueue) -> Vec { 32 | let list = &event_queue.0.list.lock(); 33 | let mut chunk_lens = Vec::new(); 34 | unsafe{ 35 | foreach_chunk(list.first, null(), Ordering::Acquire, 36 | |chunk|{ 37 | chunk_lens.push( chunk.chunk_state(Ordering::Relaxed).len() as usize ); 38 | Continue(()) 39 | }); 40 | } 41 | chunk_lens 42 | } 43 | 44 | fn factual_capacity(event_queue: &EventQueue) -> usize { 45 | let list = &event_queue.0.list.lock(); 46 | let mut total = 0; 47 | unsafe { 48 | foreach_chunk( 49 | list.first, 50 | null(), 51 | Ordering::Relaxed, // we're under mutex 52 | |chunk| { 53 | total += chunk.capacity(); 54 | Continue(()) 55 | } 56 | ); 57 | } 58 | total 59 | } 60 | 61 | 62 | #[test] 63 | fn chunks_size_test(){ 64 | let event = EventQueue::::new(); 65 | event.extend(0..32); 66 | 67 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16]); 68 | } 69 | 70 | #[cfg(feature = "double_buffering")] 71 | #[test] 72 | fn double_buffering_test(){ 73 | let event = EventQueue::::new(); 74 | let mut reader = EventReader::new(&event); 75 | 76 | event.extend(0..24); 77 | assert_equal(get_chunks_capacities(&event), [4,4,8,8]); 78 | 79 | consume_copies(&mut reader.iter()); 80 | assert_eq!(event.0.list.lock().free_chunk.as_ref().unwrap().capacity(), 8); 81 | assert_equal(get_chunks_capacities(&event), [8]); 82 | 83 | event.extend(0..32); 84 | assert!(event.0.list.lock().free_chunk.is_none()); 85 | assert_equal(get_chunks_capacities(&event), [8, 8, 16, 16]); 86 | } 87 | 88 | #[test] 89 | fn resize_test(){ 90 | let event = EventQueue::::new(); 91 | let mut reader = EventReader::new(&event); 92 | 93 | event.extend(0..32); 94 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16]); 95 | 96 | event.change_chunk_capacity(6); 97 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16,6]); 98 | assert_equal(get_chunks_lens(&event), [4,4,8,8,8,0]); 99 | 100 | event.push(32); 101 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16,6]); 102 | assert_equal(get_chunks_lens(&event), [4,4,8,8,8,1]); 103 | 104 | consume_copies(&mut reader.iter()); 105 | assert_equal(get_chunks_capacities(&event), [6]); 106 | assert_equal(get_chunks_lens(&event), [1]); 107 | 108 | event.extend(0..6); 109 | assert_equal(get_chunks_capacities(&event), [6,6]); 110 | assert_equal(get_chunks_lens(&event), [6, 1]); 111 | } 112 | 113 | #[test] 114 | fn truncate_front_test(){ 115 | let event = EventQueue::::new(); 116 | let mut reader = EventReader::new(&event); 117 | 118 | event.extend(0..26); 119 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16]); 120 | 121 | // basic 122 | event.truncate_front(4); 123 | reader.update_position(); 124 | assert_equal(get_chunks_capacities(&event), [8,16]); 125 | assert_equal(consume_copies(&mut reader.iter()), [22, 23, 24, 25]); 126 | 127 | // more then queue 128 | event.extend(0..5); 129 | event.truncate_front(10); 130 | assert_equal(consume_copies(&mut reader.iter()), 0..5 as usize); 131 | 132 | // clear all queue 133 | event.extend(0..5); 134 | event.truncate_front(0); 135 | assert_equal(consume_copies(&mut reader.iter()), []); 136 | } 137 | 138 | #[test] 139 | fn force_cleanup_test(){ 140 | struct S{} impl Settings for S{ 141 | const MIN_CHUNK_SIZE: u32 = 4; 142 | const MAX_CHUNK_SIZE: u32 = 4; 143 | } 144 | // clear force cleanup effect 145 | { 146 | let event = EventQueue::::new(); 147 | let mut _reader = EventReader::new(&event); 148 | 149 | event.extend(0..16); 150 | assert_equal(get_chunks_capacities(&event), [4,4,4,4]); 151 | 152 | event.clear(); 153 | // first - because occupied by reader, last - because tip of the queue 154 | assert_equal(get_chunks_capacities(&event), [4, 4]); 155 | } 156 | // truncate force cleanup effect 157 | { 158 | let event = EventQueue::::new(); 159 | let mut _reader = EventReader::new(&event); 160 | 161 | event.extend(0..20); 162 | assert_equal(get_chunks_capacities(&event), [4,4,4,4,4]); 163 | 164 | event.truncate_front(6); 165 | // first + 2 last 166 | assert_equal(get_chunks_capacities(&event), [4,4,4]); 167 | } 168 | } 169 | 170 | #[test] 171 | fn capacity_test(){ 172 | let event = EventQueue::::new(); 173 | 174 | event.extend(0..26); 175 | assert_equal(get_chunks_capacities(&event), [4,4,8,8,16]); 176 | 177 | assert_eq!(event.chunk_capacity(), 16); 178 | assert_eq!(event.total_capacity(), get_chunks_capacities(&event).iter().sum()); 179 | } 180 | 181 | #[test] 182 | fn fuzzy_capacity_size_test(){ 183 | use rand::Rng; 184 | let mut rng = rand::thread_rng(); 185 | let size_bound = if cfg!(miri){ 1000 } else { 100000 }; 186 | let read_bound = if cfg!(miri){ 100 } else { 10000 }; 187 | for _ in 0..100{ 188 | let size = rng.gen_range(0..size_bound); 189 | let event = EventQueue::::new(); 190 | let mut reader = EventReader::new(&event); 191 | event.extend(0..size); 192 | { 193 | let mut iter = reader.iter(); 194 | for _ in 0..rng.gen_range(0..read_bound){ 195 | iter.next(); 196 | } 197 | } 198 | 199 | assert_eq!(event.total_capacity(), factual_capacity(&event)); 200 | } 201 | } 202 | 203 | #[test] 204 | #[allow(non_snake_case)] 205 | fn CleanupMode_OnNewChunk_test(){ 206 | struct S{} impl Settings for S{ 207 | const MIN_CHUNK_SIZE: u32 = 4; 208 | const MAX_CHUNK_SIZE: u32 = 4; 209 | const CLEANUP: CleanupMode = CleanupMode::OnNewChunk; 210 | } 211 | let event = EventQueue::::new(); 212 | let mut reader = EventReader::new(&event); 213 | 214 | event.extend(0..16); 215 | assert_equal(get_chunks_capacities(&event), [4,4,4,4]); 216 | 217 | // 8 - will stop reader on the very last element of 2nd chunk. And will not leave it. So use 9 218 | skip(&mut reader.iter(), 9); 219 | assert_equal(get_chunks_capacities(&event), [4,4,4,4]); 220 | 221 | event.push(100); 222 | assert_equal(get_chunks_capacities(&event), [4,4,4]); 223 | } 224 | 225 | #[test] 226 | #[allow(non_snake_case)] 227 | fn CleanupMode_Never_test(){ 228 | struct S{} impl Settings for S{ 229 | const MIN_CHUNK_SIZE: u32 = 4; 230 | const MAX_CHUNK_SIZE: u32 = 4; 231 | const CLEANUP: CleanupMode = CleanupMode::Never; 232 | } 233 | let event = EventQueue::::new(); 234 | let mut reader = EventReader::new(&event); 235 | 236 | event.extend(0..12); 237 | assert_equal(get_chunks_capacities(&event), [4,4,4]); 238 | 239 | skip(&mut reader.iter(), 5); 240 | assert_equal(get_chunks_capacities(&event), [4,4,4]); 241 | 242 | event.push(100); 243 | assert_equal(get_chunks_capacities(&event), [4,4,4,4]); 244 | 245 | consume_copies(&mut reader.iter()); 246 | assert_equal(get_chunks_capacities(&event), [4,4,4,4]); 247 | 248 | event.cleanup(); 249 | assert_equal(get_chunks_capacities(&event), [4]); 250 | } -------------------------------------------------------------------------------- /src/event_reader.rs: -------------------------------------------------------------------------------- 1 | // Chunk's read_completely_times updated on Iter::Drop 2 | // 3 | // Chunk's iteration synchronization occurs around [ChunkStorage::storage_len] acquire/release access 4 | // 5 | 6 | use crate::sync::Ordering; 7 | use std::ptr::{NonNull}; 8 | use crate::event_queue::{CleanupMode, EventQueue, foreach_chunk, Settings}; 9 | use std::ops::ControlFlow::{Continue}; 10 | use crate::cursor::Cursor; 11 | use crate::chunk_state::{PackedChunkState}; 12 | use crate::StartPositionEpoch; 13 | 14 | pub struct EventReader 15 | { 16 | pub(super) position: Cursor, 17 | pub(super) start_position_epoch: StartPositionEpoch, 18 | } 19 | 20 | unsafe impl Send for EventReader{} 21 | 22 | impl EventReader 23 | { 24 | // Have much better performance being non-inline. Occurs rarely. 25 | // This is the only reason this code - is a function. 26 | #[inline(never)] 27 | #[cold] 28 | fn do_update_start_position_and_get_chunk_state(&mut self) -> PackedChunkState { 29 | let event = unsafe{(*self.position.chunk).event()}; 30 | 31 | // fast forward 32 | { 33 | let start_position_lock = event.start_position.lock(); 34 | if let Some(start_position) = *start_position_lock{ 35 | if self.position < start_position { 36 | 37 | // 1. Enter new_position chunk 38 | let new_chunk = unsafe{&*start_position.chunk}; 39 | new_chunk.readers_entered().fetch_add(1, Ordering::AcqRel); 40 | 41 | // 2. Mark current chunk read 42 | let chunk = unsafe{&*self.position.chunk}; 43 | if /*constexpr*/ S::CLEANUP == CleanupMode::OnChunkRead { 44 | let event = chunk.event(); 45 | let readers_entered = chunk.readers_entered().load(Ordering::Acquire); 46 | 47 | // MORE or equal, just in case (this MT...). This check is somewhat opportunistic. 48 | let prev_read = chunk.read_completely_times().fetch_add(1, Ordering::AcqRel); 49 | if prev_read+1 >= readers_entered{ 50 | drop(start_position_lock); 51 | event.cleanup(); 52 | } 53 | } else { 54 | chunk.read_completely_times().fetch_add(1, Ordering::AcqRel); 55 | } 56 | 57 | // 3. Change position 58 | self.position = start_position; 59 | } 60 | } 61 | } 62 | 63 | unsafe{&*self.position.chunk}.chunk_state(Ordering::Acquire) 64 | } 65 | 66 | // Returns len of actual self.position.chunk 67 | #[inline] 68 | fn update_start_position_and_get_chunk_state(&mut self) -> PackedChunkState { 69 | let chunk_state = unsafe{&*self.position.chunk}.chunk_state(Ordering::Acquire); 70 | let epoch = chunk_state.epoch(); 71 | 72 | if /*unlikely*/ epoch != self.start_position_epoch { 73 | self.start_position_epoch = epoch; 74 | self.do_update_start_position_and_get_chunk_state() 75 | } else { 76 | chunk_state 77 | } 78 | } 79 | 80 | // Do we actually need this as separate fn? Benchmark. 81 | #[inline] 82 | pub fn update_position(&mut self) { 83 | self.update_start_position_and_get_chunk_state(); 84 | } 85 | 86 | // TODO: copy_iter() ? 87 | 88 | #[inline] 89 | pub fn iter(&mut self) -> Iter{ 90 | Iter::new(self) 91 | } 92 | } 93 | 94 | impl Drop for EventReader{ 95 | fn drop(&mut self) { 96 | unsafe { 97 | EventQueue::::unsubscribe( 98 | NonNull::from((*self.position.chunk).event()), 99 | self 100 | ); 101 | } 102 | } 103 | } 104 | 105 | /// This should be rust GAT iterator. But it does not exists yet. 106 | pub trait LendingIterator{ 107 | type ItemValue; 108 | fn next(&mut self) -> Option<&Self::ItemValue>; 109 | } 110 | 111 | // Having separate chunk+index, allow us to postpone marking passed chunks as read, until the Iter destruction. 112 | // This allows to return &T instead of T 113 | pub struct Iter<'a, T, S: Settings> 114 | { 115 | position: Cursor, 116 | chunk_state : PackedChunkState, 117 | // &mut to ensure that only one Iter for Reader can exists 118 | event_reader : &'a mut EventReader, 119 | } 120 | 121 | impl<'a, T, S: Settings> Iter<'a, T, S>{ 122 | #[inline] 123 | fn new(event_reader: &'a mut EventReader) -> Self{ 124 | let chunk_state = event_reader.update_start_position_and_get_chunk_state(); 125 | Self{ 126 | position: event_reader.position, 127 | chunk_state, 128 | event_reader, 129 | } 130 | } 131 | } 132 | 133 | impl<'a, T, S: Settings> LendingIterator for Iter<'a, T, S>{ 134 | type ItemValue = T; 135 | 136 | #[inline] 137 | fn next(&mut self) -> Option<&Self::ItemValue> { 138 | if /*unlikely*/ self.position.index as u32 == self.chunk_state.len(){ 139 | // should try next chunk? 140 | if !self.chunk_state.has_next(){ 141 | return None; 142 | } 143 | 144 | // acquire next chunk 145 | let next_chunk = unsafe{ 146 | let chunk = &*self.position.chunk; 147 | let _lock = chunk.chunk_switch_mutex().read(); 148 | 149 | let next = chunk.next(Ordering::Acquire); 150 | debug_assert!(!next.is_null()); 151 | 152 | (*next).readers_entered().fetch_add(1, Ordering::AcqRel); 153 | &*next 154 | }; 155 | 156 | // switch chunk 157 | self.position.chunk = next_chunk; 158 | self.position.index = 0; 159 | self.chunk_state = next_chunk.chunk_state(Ordering::Acquire); 160 | 161 | // Maybe 0, when new chunk is created, but item still not pushed. 162 | // It is possible rework `push`/`extend` in the way that this situation will not exists. 163 | // But for now, just have this check here. 164 | if self.chunk_state.len() == 0 { 165 | return None; 166 | } 167 | } 168 | 169 | let chunk = unsafe{&*self.position.chunk}; 170 | let value = unsafe { chunk.get_unchecked(self.position.index) }; 171 | self.position.index += 1; 172 | 173 | Some(value) 174 | } 175 | } 176 | 177 | impl<'a, T, S: Settings> Drop for Iter<'a, T, S>{ 178 | #[inline] 179 | fn drop(&mut self) { 180 | let try_cleanup = S::CLEANUP == CleanupMode::OnChunkRead; // should be const 181 | 182 | debug_assert!(self.position >= self.event_reader.position); 183 | let mut need_cleanup = false; 184 | 185 | let first_chunk = self.event_reader.position.chunk; 186 | let end_chunk = self.position.chunk; 187 | 188 | // 1. Mark passed chunks as read 189 | unsafe { 190 | // It is ok here to switch chunks without chunk_switch_mutex. 191 | // Chunk already held by in-out counter imbalance. 192 | foreach_chunk( 193 | first_chunk, 194 | end_chunk, 195 | Ordering::Acquire, 196 | |chunk| { 197 | debug_assert!( 198 | !chunk.next(Ordering::Acquire).is_null() 199 | ); 200 | let prev_read = chunk.read_completely_times().fetch_add(1, Ordering::AcqRel); 201 | 202 | if try_cleanup { 203 | // TODO: move out of loop and benchmark. 204 | if chunk as *const _ == first_chunk{ 205 | let read = prev_read+1; 206 | let chunk_readers = chunk.readers_entered().load(Ordering::Acquire); 207 | if read >= chunk_readers { 208 | need_cleanup = true; 209 | } 210 | } 211 | } 212 | 213 | Continue(()) 214 | } 215 | ); 216 | } 217 | 218 | // Cleanup (optional) 219 | if try_cleanup { 220 | if need_cleanup{ 221 | unsafe{&*end_chunk}.event().cleanup(); 222 | } 223 | } 224 | 225 | // 2. Update EventReader chunk+index 226 | self.event_reader.position = self.position; 227 | } 228 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Concurrent FIFO event queue / message queue. Multi consumer. Each consumer receive all messages. 2 | //! Lock-free reading. Write under lock (for [mpmc] version). Write lock does not block read. 3 | //! 4 | //! Linked list of chunks (C++ std::deque -like). Each chunk have "read counter". 5 | //! When "read counter" reach readers count - it is safe to drop chunk. Chunk considered read, when 6 | //! Reader reach its end. See `doc/principle-of-operation.md`. 7 | //! 8 | //! `EventQueue` live, until `EventReader`s live. 9 | //! In order to completely drop `EventQueue` - drop all associated `EventReader`s. 10 | //! 11 | //! # Features 12 | //! 13 | //! * `double_buffering` : Reuse biggest freed chunk. 14 | 15 | mod sync; 16 | mod utils; 17 | mod cursor; 18 | mod event_queue; 19 | mod event_reader; 20 | mod chunk_state; 21 | #[allow(dead_code)] 22 | mod dynamic_array; 23 | 24 | // TODO: make double_buffering not a feature. 25 | #[allow(dead_code)] 26 | mod dynamic_chunk; 27 | 28 | /// Epoch of EventQueue::start_position 29 | type StartPositionEpoch = crate::utils::Epoch; 30 | 31 | pub use crate::event_queue::CleanupMode; 32 | pub use crate::event_reader::LendingIterator; 33 | 34 | pub mod prelude{ 35 | pub use crate::CleanupMode; 36 | pub use crate::LendingIterator; 37 | } 38 | 39 | pub mod mpmc; 40 | pub mod spmc; 41 | 42 | #[cfg(test)] 43 | mod tests; -------------------------------------------------------------------------------- /src/mpmc/event_queue.rs: -------------------------------------------------------------------------------- 1 | // This is canonical variant. 2 | 3 | use std::pin::Pin; 4 | use crate::sync::Arc; 5 | use crate::event_queue::{EventQueue as BaseEventQueue}; 6 | use crate::mpmc::{BS, DefaultSettings, Settings}; 7 | 8 | #[repr(transparent)] 9 | pub struct EventQueue( 10 | pub(crate) BaseEventQueue> 11 | ); 12 | 13 | impl EventQueue{ 14 | #[inline] 15 | pub fn new() -> Pin> { 16 | Self::with_capacity(S::MIN_CHUNK_SIZE) 17 | } 18 | 19 | // Hide for a while. 20 | #[inline] 21 | fn with_capacity(new_capacity: u32) -> Pin> { 22 | let base = BaseEventQueue::>::with_capacity(new_capacity); 23 | unsafe { 24 | let base_ptr = Arc::into_raw(Pin::into_inner_unchecked(base)); 25 | Pin::new_unchecked( 26 | Arc::from_raw(base_ptr as *const Self) 27 | ) 28 | } 29 | } 30 | 31 | #[inline] 32 | pub fn push(&self, value: T){ 33 | let mut list = self.0.list.lock(); 34 | self.0.push(&mut list, value); 35 | } 36 | 37 | #[inline] 38 | pub fn extend(&self, iter: I) 39 | where I: IntoIterator 40 | { 41 | let mut list = self.0.list.lock(); 42 | self.0.extend(&mut list, iter); 43 | } 44 | 45 | /// Free all completely read chunks. 46 | /// 47 | /// Called automatically with [Settings::CLEANUP] != Never. 48 | #[inline] 49 | pub fn cleanup(&self){ 50 | self.0.cleanup(); 51 | } 52 | 53 | /// "Lazily move" all readers positions to the "end of the queue". From readers perspective, 54 | /// equivalent to conventional `clear`. 55 | /// 56 | /// Immediately free all chunks, **unoccupied** by readers. 57 | /// 58 | /// "End of the queue" - is the queue's end position at the moment of the `clear` call. 59 | /// 60 | /// "Lazy move" - means that reader actually change position and free occupied chunk, 61 | /// only when actual read starts. 62 | #[inline] 63 | pub fn clear(&self){ 64 | let mut list = self.0.list.lock(); 65 | self.0.clear(&mut list); 66 | } 67 | 68 | /// "Lazily move" all readers positions to the `len`-th element from the end of the queue. 69 | /// From readers perspective, equivalent to conventional `truncate` from the other side. 70 | /// 71 | /// Immediately free chunks, **unoccupied** by readers. 72 | /// 73 | /// "Lazy move" - means that reader actually change position and free occupied chunk, 74 | /// only when actual read starts. 75 | #[inline] 76 | pub fn truncate_front(&self, len: usize){ 77 | let mut list = self.0.list.lock(); 78 | self.0.truncate_front(&mut list, len); 79 | } 80 | 81 | /// Adds chunk with `new_capacity` capacity. All next writes will be on new chunk. 82 | /// 83 | /// If you configured [Settings::MAX_CHUNK_SIZE] to high value, use this, in conjunction 84 | /// with [clear](Self::clear) / [truncate_front](Self::truncate_front), to reduce 85 | /// memory pressure ASAP. 86 | /// 87 | /// Total capacity will be temporarily increased, until readers get to the new chunk. 88 | #[inline] 89 | pub fn change_chunk_capacity(&self, new_capacity: u32){ 90 | let mut list = self.0.list.lock(); 91 | self.0.change_chunk_capacity(&mut list, new_capacity); 92 | } 93 | 94 | /// Returns total chunks capacity. 95 | #[inline] 96 | pub fn total_capacity(&self) -> usize{ 97 | let mut list = self.0.list.lock(); 98 | self.0.total_capacity(&mut list) 99 | } 100 | 101 | /// Returns last/active chunk capacity 102 | #[inline] 103 | pub fn chunk_capacity(&self) -> usize{ 104 | let mut list = self.0.list.lock(); 105 | self.0.chunk_capacity(&mut list) 106 | } 107 | } 108 | 109 | unsafe impl Send for EventQueue{} 110 | unsafe impl Sync for EventQueue{} -------------------------------------------------------------------------------- /src/mpmc/event_reader.rs: -------------------------------------------------------------------------------- 1 | // new-type EventReader, mostly to hide `BS` 2 | 3 | use crate::event_reader::{EventReader as BaseEventReader, LendingIterator}; 4 | use crate::event_reader::Iter as BaseIter; 5 | use crate::mpmc::{BS, EventQueue, Settings}; 6 | 7 | pub struct EventReader(BaseEventReader>); 8 | impl EventReader{ 9 | #[inline] 10 | pub fn new(event_queue: &EventQueue) -> Self { 11 | Self{0: event_queue.0.subscribe(&mut event_queue.0.list.lock())} 12 | } 13 | 14 | /// Move cursor to the new position, if necessary. 15 | /// 16 | /// This will move reader to the new position, and mark all chunks between current 17 | /// and new position as "read". 18 | /// 19 | /// You need this only if you cleared/cut queue, and now want to force free memory. 20 | /// (When ALL readers mark chunk as read - it will be deleted) 21 | /// 22 | /// Functionally, this is the same as just calling `iter()` and drop it. 23 | #[inline] 24 | pub fn update_position(&mut self){ 25 | self.0.update_position(); 26 | } 27 | 28 | /// This is consuming iterator. Return references. 29 | /// Iterator items references should not outlive iterator. 30 | /// 31 | /// Read counters of affected chunks updated in `Iter::drop`. 32 | #[inline] 33 | pub fn iter(&mut self) -> Iter{ 34 | Iter{ 0: self.0.iter() } 35 | } 36 | } 37 | 38 | /// This is consuming iterator. 39 | /// 40 | /// Return references. References have lifetime of Iter. 41 | /// 42 | /// On [drop] `cleanup` may be called. See [Settings::CLEANUP]. 43 | pub struct Iter<'a, T, S: Settings> (BaseIter<'a, T, BS>); 44 | impl <'a, T, S: Settings> LendingIterator for Iter<'a, T, S>{ 45 | type ItemValue = T; 46 | 47 | #[inline] 48 | fn next(&mut self) -> Option<&Self::ItemValue> { 49 | self.0.next() 50 | } 51 | } -------------------------------------------------------------------------------- /src/mpmc/mod.rs: -------------------------------------------------------------------------------- 1 | //! Multi-producer multi-consumer. 2 | //! 3 | //! Lock-free reading. Write under lock. 4 | 5 | mod event_queue; 6 | mod event_reader; 7 | 8 | use crate::CleanupMode; 9 | use crate::event_queue::Settings as BaseSettings; 10 | use std::marker::PhantomData; 11 | 12 | pub use event_queue::*; 13 | pub use event_reader::*; 14 | 15 | pub trait Settings{ 16 | const MIN_CHUNK_SIZE : u32 = 4; 17 | const MAX_CHUNK_SIZE : u32 = 4096; 18 | const CLEANUP: CleanupMode = CleanupMode::OnChunkRead; 19 | } 20 | 21 | pub struct DefaultSettings{} 22 | impl Settings for DefaultSettings{} 23 | 24 | /// mpmc::Settings -> event_queue::Settings 25 | pub(crate) struct BS{ 26 | _phantom: PhantomData 27 | } 28 | impl BaseSettings for BS{ 29 | const MIN_CHUNK_SIZE : u32 = S::MIN_CHUNK_SIZE; 30 | const MAX_CHUNK_SIZE : u32 = S::MAX_CHUNK_SIZE; 31 | const CLEANUP: CleanupMode = S::CLEANUP; 32 | const LOCK_ON_NEW_CHUNK_CLEANUP: bool = false; 33 | const CLEANUP_IN_UNSUBSCRIBE: bool = true; 34 | } -------------------------------------------------------------------------------- /src/spmc/event_queue.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | use crate::sync::Arc; 3 | use crate::event_queue::{EventQueue as BaseEventQueue, List}; 4 | use crate::spmc::{BS, DefaultSettings, Settings}; 5 | use crate::CleanupMode; 6 | 7 | /// See [mpmc](crate::mpmc::EventQueue) documentation. 8 | /// 9 | /// Only [cleanup](EventQueue::cleanup) and `unsubscribe`(on `EventReader::drop`) are synchronized. 10 | /// Everything else - overhead free. 11 | /// 12 | /// Insert performance in the `std::vec::Vec` league. 13 | pub struct EventQueue( 14 | pub(crate) Arc>> 15 | ); 16 | 17 | impl EventQueue{ 18 | #[inline] 19 | pub fn new() -> Self { 20 | Self::with_capacity(S::MIN_CHUNK_SIZE) 21 | } 22 | 23 | // Hide for a while. 24 | #[inline] 25 | fn with_capacity(new_capacity: u32) -> Self { 26 | assert!(S::CLEANUP!=CleanupMode::OnChunkRead, "CleanupMode::OnChunkRead is not valid mode for spmc"); 27 | let base = BaseEventQueue::>::with_capacity(new_capacity); 28 | unsafe { 29 | let base_arc = Pin::into_inner_unchecked(base); 30 | Self{0: base_arc} 31 | } 32 | } 33 | 34 | // without lock 35 | #[inline] 36 | pub(crate) fn get_list(&self) -> &List> { 37 | unsafe{ &*self.0.list.data_ptr() } 38 | } 39 | // should be &mut self ... But... self-references comes later... 40 | #[inline] 41 | pub(crate) fn get_list_mut(&self) -> &mut List> { 42 | unsafe{ &mut *self.0.list.data_ptr() } 43 | } 44 | 45 | #[inline] 46 | pub fn push(&mut self, value: T){ 47 | let list = self.get_list_mut(); 48 | self.0.push(list, value); 49 | } 50 | 51 | #[inline] 52 | pub fn extend(&mut self, iter: I) 53 | where I: IntoIterator 54 | { 55 | self.0.extend(self.get_list_mut(), iter); 56 | } 57 | 58 | #[inline] 59 | pub fn cleanup(&mut self){ 60 | self.0.cleanup(); 61 | } 62 | 63 | #[inline] 64 | pub fn clear(&mut self){ 65 | self.0.clear(self.get_list_mut()); 66 | } 67 | 68 | #[inline] 69 | pub fn truncate_front(&mut self, len: usize){ 70 | self.0.truncate_front(self.get_list_mut(), len); 71 | } 72 | 73 | #[inline] 74 | pub fn change_chunk_capacity(&mut self, new_capacity: u32){ 75 | self.0.change_chunk_capacity(self.get_list_mut(), new_capacity); 76 | } 77 | 78 | #[inline] 79 | pub fn total_capacity(&self) -> usize{ 80 | self.0.total_capacity(self.get_list()) 81 | } 82 | 83 | #[inline] 84 | pub fn chunk_capacity(&self) -> usize{ 85 | self.0.chunk_capacity(self.get_list()) 86 | } 87 | } 88 | 89 | unsafe impl Send for EventQueue{} 90 | -------------------------------------------------------------------------------- /src/spmc/event_reader.rs: -------------------------------------------------------------------------------- 1 | // new-type EventReader, mostly to hide `BS` 2 | 3 | use crate::event_reader::{EventReader as BaseEventReader, LendingIterator}; 4 | use crate::event_reader::Iter as BaseIter; 5 | use crate::spmc::{BS, EventQueue, Settings}; 6 | 7 | /// See [mpmc](crate::mpmc::EventReader) documentation. 8 | pub struct EventReader(BaseEventReader>); 9 | impl EventReader{ 10 | #[inline] 11 | pub fn new(event_queue: &mut EventQueue) -> Self { 12 | Self{0: event_queue.0.subscribe(event_queue.get_list_mut())} 13 | } 14 | 15 | #[inline] 16 | pub fn update_position(&mut self){ 17 | self.0.update_position(); 18 | } 19 | 20 | #[inline] 21 | pub fn iter(&mut self) -> Iter{ 22 | Iter{ 0: self.0.iter() } 23 | } 24 | } 25 | 26 | /// See [mpmc](crate::mpmc::Iter) documentation. 27 | pub struct Iter<'a, T, S: Settings> (BaseIter<'a, T, BS>); 28 | impl <'a, T, S: Settings> LendingIterator for Iter<'a, T, S>{ 29 | type ItemValue = T; 30 | 31 | #[inline] 32 | fn next(&mut self) -> Option<&Self::ItemValue> { 33 | self.0.next() 34 | } 35 | } -------------------------------------------------------------------------------- /src/spmc/mod.rs: -------------------------------------------------------------------------------- 1 | //! Single-producer multi-consumer. 2 | //! 3 | //! Same as [mpmc](crate::mpmc), but writes without lock. 4 | //! 5 | //! [CleanupMode::OnChunkRead] is not available for spmc! _Since there is no more lock - reader can not 6 | //! safely call cleanup._ 7 | 8 | mod event_queue; 9 | mod event_reader; 10 | 11 | use std::marker::PhantomData; 12 | use crate::event_queue::Settings as BaseSettings; 13 | use crate::CleanupMode; 14 | 15 | pub use event_queue::*; 16 | pub use event_reader::*; 17 | 18 | pub trait Settings{ 19 | const MIN_CHUNK_SIZE : u32 = 4; 20 | const MAX_CHUNK_SIZE : u32 = 4096; 21 | const CLEANUP: CleanupMode = CleanupMode::OnNewChunk; 22 | } 23 | 24 | pub struct DefaultSettings{} 25 | impl Settings for DefaultSettings{} 26 | 27 | /// spmc::Settings -> event_queue::Settings 28 | pub(crate) struct BS{ 29 | _phantom: PhantomData 30 | } 31 | impl BaseSettings for BS{ 32 | const MIN_CHUNK_SIZE : u32 = S::MIN_CHUNK_SIZE; 33 | const MAX_CHUNK_SIZE : u32 = S::MAX_CHUNK_SIZE; 34 | const CLEANUP: CleanupMode = S::CLEANUP; 35 | const LOCK_ON_NEW_CHUNK_CLEANUP: bool = true; 36 | const CLEANUP_IN_UNSUBSCRIBE: bool = false; 37 | } -------------------------------------------------------------------------------- /src/sync/build.rs: -------------------------------------------------------------------------------- 1 | #[cfg(loom)] 2 | pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, AtomicU64, Ordering}; 3 | 4 | #[cfg(loom)] 5 | pub(crate) use loom::sync::Arc; 6 | 7 | #[cfg(loom)] 8 | #[derive(Debug)] 9 | pub struct Mutex(loom::sync::Mutex); 10 | 11 | #[cfg(loom)] 12 | impl Mutex{ 13 | pub fn new(data: T) -> Self { 14 | Self(loom::sync::Mutex::new(data)) 15 | } 16 | 17 | pub fn lock(&self) -> loom::sync::MutexGuard<'_, T> { 18 | self.0.lock().unwrap() 19 | } 20 | 21 | pub fn as_mut_ptr(&self) -> *mut T { 22 | self.data_ptr() 23 | } 24 | 25 | pub fn get_mut(&mut self) -> &mut T { 26 | // There is no way to get without lock in loom 27 | unsafe{ 28 | use std::ops::DerefMut; 29 | &mut *(self.0.lock().unwrap().deref_mut() as *mut T) 30 | } 31 | } 32 | 33 | pub fn data_ptr(&self) -> *mut T { 34 | // There is no way to get without lock in loom 35 | use std::ops::DerefMut; 36 | self.0.lock().unwrap().deref_mut() as *mut T 37 | } 38 | } 39 | 40 | #[cfg(loom)] 41 | pub type SpinMutex = Mutex; 42 | 43 | #[cfg(loom)] 44 | pub(crate) type SpinSharedMutex = loom::sync::RwLock; 45 | 46 | // ========================================================================================== 47 | 48 | #[cfg(not(loom))] 49 | pub(crate) use std::sync::atomic::{AtomicPtr, AtomicUsize, AtomicU64, Ordering}; 50 | 51 | #[cfg(not(loom))] 52 | pub(crate) use std::sync::Arc; 53 | 54 | #[cfg(not(loom))] 55 | //pub(crate) use parking_lot::{Mutex}; 56 | pub(crate) type Mutex = lock_api::Mutex, T>; 57 | 58 | #[cfg(not(loom))] 59 | pub(crate) use spin::mutex::{SpinMutex}; 60 | 61 | #[cfg(not(loom))] 62 | pub(crate) use spin::rwlock::RwLock as SpinSharedMutex; -------------------------------------------------------------------------------- /src/sync/dev.rs: -------------------------------------------------------------------------------- 1 | #[cfg(loom)] 2 | pub(crate) use loom::thread; 3 | 4 | #[cfg(loom)] 5 | pub(crate) use loom::sync::atomic::{AtomicBool}; 6 | 7 | #[cfg(loom)] 8 | pub(crate) type SpinMutexGuard<'a, T> = loom::sync::MutexGuard<'a, T>; 9 | 10 | #[cfg(loom)] 11 | pub(crate) use loom::sync::{MutexGuard}; 12 | 13 | // ========================================================================================== 14 | 15 | #[cfg(not(loom))] 16 | pub(crate) use std::thread; 17 | 18 | #[cfg(not(loom))] 19 | pub(crate) use std::sync::atomic::{AtomicBool}; 20 | 21 | #[cfg(not(loom))] 22 | pub(crate) use spin::mutex::{SpinMutexGuard}; 23 | 24 | #[cfg(not(loom))] 25 | //pub(crate) use parking_lot::{MutexGuard}; 26 | pub(crate) use spin::lock_api::{MutexGuard}; 27 | 28 | -------------------------------------------------------------------------------- /src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | mod build; 2 | pub(crate) use build::*; 3 | 4 | #[cfg(test)] 5 | #[allow(dead_code)] 6 | #[allow(unused_imports)] 7 | mod dev; 8 | #[cfg(test)] 9 | pub(crate) use dev::*; 10 | -------------------------------------------------------------------------------- /src/tests/common.rs: -------------------------------------------------------------------------------- 1 | use crate::mpmc::{EventQueue, EventReader, Settings}; 2 | use crate::sync::{thread}; 3 | use crate::tests::utils::consume_copies; 4 | 5 | pub(crate) fn mt_read_test_impl(threads_count: usize, len: usize) { 6 | let event = EventQueue::::new(); 7 | 8 | let mut readers = Vec::new(); 9 | for _ in 0..threads_count{ 10 | readers.push(EventReader::new(&event)); 11 | } 12 | 13 | let mut sum = 0; 14 | for i in 0..len{ 15 | event.push(i); 16 | sum += i; 17 | } 18 | 19 | // read 20 | let mut threads = Vec::new(); 21 | for mut reader in readers{ 22 | let thread = Box::new(thread::spawn(move || { 23 | // some work here 24 | let local_sum: usize = consume_copies(&mut reader.iter()).iter().sum(); 25 | assert!(local_sum == sum); 26 | })); 27 | threads.push(thread); 28 | } 29 | 30 | for thread in threads{ 31 | thread.join().unwrap(); 32 | } 33 | } -------------------------------------------------------------------------------- /src/tests/loom_test.rs: -------------------------------------------------------------------------------- 1 | use crate::mpmc::{DefaultSettings, EventQueue, EventReader, Settings}; 2 | use crate::prelude::*; 3 | use crate::sync::{Arc, thread, Mutex}; 4 | use super::common::*; 5 | use loom::sync::Condvar; 6 | use crate::event_reader::LendingIterator; 7 | 8 | #[test] 9 | fn loom_mt_read_test(){ 10 | loom::model(|| { 11 | struct S{} impl Settings for S{ 12 | const MIN_CHUNK_SIZE: u32 = 4; 13 | const MAX_CHUNK_SIZE: u32 = 4; 14 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 15 | } 16 | mt_read_test_impl::(3, 7); 17 | }); 18 | } 19 | 20 | #[test] 21 | fn loom_mt_write_read_test(){ 22 | // Use Condvar, instead of AtomicBool flag. 23 | // Not the same, but at least loom can handle it. 24 | loom::model(|| { 25 | let writer_chunk: usize = 3; 26 | let writers_thread_count: usize = 1; //should be 2 writers, instead of 1, but loom does not support >4 threads 27 | let readers_thread_count: usize = 2; 28 | 29 | struct S{} impl Settings for S{ 30 | const MIN_CHUNK_SIZE: u32 = 4; 31 | const MAX_CHUNK_SIZE: u32 = 4; 32 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 33 | } 34 | 35 | let event = EventQueue::<[usize;4], S>::new(); 36 | 37 | let mut readers = Vec::new(); 38 | for _ in 0..readers_thread_count{ 39 | readers.push(EventReader::new(&event)); 40 | } 41 | 42 | // etalon 43 | let sum0: usize = (0..writers_thread_count*writer_chunk).map(|i|i+0).sum(); 44 | let sum1: usize = (0..writers_thread_count*writer_chunk).map(|i|i+1).sum(); 45 | let sum2: usize = (0..writers_thread_count*writer_chunk).map(|i|i+2).sum(); 46 | let sum3: usize = (0..writers_thread_count*writer_chunk).map(|i|i+3).sum(); 47 | 48 | // write 49 | let mut writer_threads = Vec::new(); 50 | for thread_id in 0..writers_thread_count{ 51 | let event = event.clone(); 52 | let thread = Box::new(thread::spawn(move || { 53 | let from = thread_id*writer_chunk; 54 | let to = from+writer_chunk; 55 | 56 | for i in from..to{ 57 | event.push([i, i+1, i+2, i+3]); 58 | } 59 | })); 60 | writer_threads.push(thread); 61 | } 62 | 63 | let readers_stop = Arc::new( 64 | (Mutex::new(false), Condvar::new()) 65 | ); 66 | 67 | let mut reader_threads = Vec::new(); 68 | for mut reader in readers{ 69 | let readers_stop = readers_stop.clone(); 70 | let thread = Box::new(thread::spawn(move || { 71 | let mut local_sum0: usize = 0; 72 | let mut local_sum1: usize = 0; 73 | let mut local_sum2: usize = 0; 74 | let mut local_sum3: usize = 0; 75 | 76 | // do-while ensures that reader will try another round after stop, 77 | // to consume leftovers. Since iter's end/sentinel acquired at iter construction. 78 | 79 | let (lock, cvar) = &*readers_stop; 80 | let mut stopped = lock.lock(); 81 | 82 | loop { 83 | let mut reader = reader.iter(); 84 | while let Some([i0, i1, i2, i3]) = reader.next(){ 85 | local_sum0 += i0; 86 | local_sum1 += i1; 87 | local_sum2 += i2; 88 | local_sum3 += i3; 89 | } 90 | 91 | if *stopped { break; } 92 | stopped = cvar.wait(stopped).unwrap(); 93 | } 94 | 95 | assert_eq!(local_sum0, sum0); 96 | assert_eq!(local_sum1, sum1); 97 | assert_eq!(local_sum2, sum2); 98 | assert_eq!(local_sum3, sum3); 99 | })); 100 | reader_threads.push(thread); 101 | } 102 | 103 | for thread in writer_threads { 104 | thread.join().unwrap(); 105 | } 106 | 107 | { 108 | let (lock, cvar) = &*readers_stop; 109 | let mut stopped = lock.lock(); 110 | *stopped = true; 111 | cvar.notify_all(); 112 | } 113 | 114 | for thread in reader_threads { 115 | thread.join().unwrap(); 116 | } 117 | }); 118 | } -------------------------------------------------------------------------------- /src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | pub(crate) mod utils; 3 | 4 | mod common; 5 | 6 | #[cfg(not(loom))] 7 | mod mpmc; 8 | 9 | #[cfg(not(loom))] 10 | mod spmc; 11 | 12 | #[cfg(loom)] 13 | mod loom_test; -------------------------------------------------------------------------------- /src/tests/mpmc.rs: -------------------------------------------------------------------------------- 1 | use crate::mpmc::{DefaultSettings, EventQueue, EventReader, Settings}; 2 | use crate::event_queue::{CleanupMode}; 3 | use crate::sync::{AtomicUsize, Ordering, AtomicBool, Arc, thread}; 4 | use itertools::{Itertools, assert_equal}; 5 | use std::borrow::BorrowMut; 6 | use std::ops::Range; 7 | use crate::tests::utils::{consume_copies, consume_mapped}; 8 | use crate::event_reader::LendingIterator; 9 | use super::common::*; 10 | 11 | //#[derive(Clone, Eq, PartialEq, Hash)] 12 | struct Data{ 13 | id : usize, 14 | _name: String, 15 | on_destroy: F 16 | } 17 | 18 | impl Data{ 19 | fn from(i:usize, on_destroy: F) -> Self { 20 | Self{ 21 | id : i, 22 | _name: i.to_string(), 23 | on_destroy 24 | } 25 | } 26 | } 27 | 28 | impl Drop for Data{ 29 | fn drop(&mut self) { 30 | (self.on_destroy)(); 31 | } 32 | } 33 | 34 | #[test] 35 | #[allow(unused_assignments)] 36 | fn push_drop_test() { 37 | let destruct_counter = AtomicUsize::new(0); 38 | let destruct_counter_ref = &destruct_counter; 39 | let on_destroy = ||{destruct_counter_ref.fetch_add(1, Ordering::Relaxed);}; 40 | 41 | struct S{} impl Settings for S{ 42 | const MIN_CHUNK_SIZE: u32 = 4; 43 | const MAX_CHUNK_SIZE: u32 = 4; 44 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 45 | } 46 | 47 | let mut reader_option : Option<_> = None; 48 | { 49 | let chunk_list = EventQueue::<_, S>::new(); 50 | reader_option = Option::Some(EventReader::new(&chunk_list)); 51 | 52 | chunk_list.push(Data::from(0, on_destroy)); 53 | chunk_list.push(Data::from(1, on_destroy)); 54 | chunk_list.push(Data::from(2, on_destroy)); 55 | chunk_list.push(Data::from(3, on_destroy)); 56 | 57 | chunk_list.push(Data::from(4, on_destroy)); 58 | 59 | let reader = reader_option.as_mut().unwrap(); 60 | assert_equal( 61 | consume_mapped(&mut reader.iter(), |data| data.id), 62 | [0, 1, 2, 3, 4] 63 | ); 64 | 65 | // Only first chunk should be freed 66 | assert!(destruct_counter.load(Ordering::Relaxed) == 4); 67 | } 68 | assert!(destruct_counter.load(Ordering::Relaxed) == 4); 69 | reader_option = None; 70 | assert!(destruct_counter.load(Ordering::Relaxed) == 5); 71 | } 72 | 73 | #[test] 74 | fn read_on_full_chunk_test() { 75 | let destruct_counter = AtomicUsize::new(0); 76 | let destruct_counter_ref = &destruct_counter; 77 | let on_destroy = ||{destruct_counter_ref.fetch_add(1, Ordering::Relaxed);}; 78 | 79 | { 80 | struct S{} impl Settings for S{ 81 | const MIN_CHUNK_SIZE: u32 = 4; 82 | const MAX_CHUNK_SIZE: u32 = 4; 83 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 84 | } 85 | 86 | let chunk_list = EventQueue::<_, S>::new(); 87 | let mut reader = EventReader::new(&chunk_list); 88 | 89 | chunk_list.push(Data::from(0, on_destroy)); 90 | chunk_list.push(Data::from(1, on_destroy)); 91 | chunk_list.push(Data::from(2, on_destroy)); 92 | chunk_list.push(Data::from(3, on_destroy)); 93 | 94 | assert_equal( 95 | consume_mapped(&mut reader.iter(), |data| data.id), 96 | [0, 1, 2, 3] 97 | ); 98 | assert!(destruct_counter.load(Ordering::Relaxed) == 0); 99 | 100 | assert_equal( 101 | consume_mapped(&mut reader.iter(), |data| data.id), 102 | [] 103 | ); 104 | assert!(destruct_counter.load(Ordering::Relaxed) == 0); 105 | } 106 | assert!(destruct_counter.load(Ordering::Relaxed) == 4); 107 | } 108 | 109 | #[test] 110 | fn huge_push_test() { 111 | struct S{} impl Settings for S{ 112 | const MIN_CHUNK_SIZE: u32 = 4; 113 | const MAX_CHUNK_SIZE: u32 = 4; 114 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 115 | } 116 | 117 | let event = EventQueue::::new(); 118 | let mut reader = EventReader::new(&event); 119 | 120 | let len = 121 | if cfg!(miri){ 1000 } else { 100000 }; 122 | 123 | for i in 0..len{ 124 | event.push(i); 125 | } 126 | 127 | consume_copies(&mut reader.iter()); 128 | } 129 | 130 | #[test] 131 | fn extend_test() { 132 | struct S{} impl Settings for S{ 133 | const MIN_CHUNK_SIZE: u32 = 8; 134 | const MAX_CHUNK_SIZE: u32 = 8; 135 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 136 | } 137 | 138 | let event = EventQueue::::new(); 139 | let mut reader = EventReader::new(&event); 140 | 141 | let len = 142 | if cfg!(miri){ 1000 } else { 100000 }; 143 | let rng : Range = 0..len; 144 | 145 | event.extend(rng.clone()); 146 | 147 | assert_eq!( 148 | consume_copies(&mut reader.iter()).iter().sum::(), 149 | rng.sum() 150 | ); 151 | } 152 | 153 | #[test] 154 | fn clear_test() { 155 | struct S{} impl Settings for S{ 156 | const MIN_CHUNK_SIZE: u32 = 4; 157 | const MAX_CHUNK_SIZE: u32 = 4; 158 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 159 | } 160 | 161 | let event = EventQueue::::new(); 162 | let mut reader = EventReader::new(&event); 163 | 164 | event.push(0); 165 | event.push(1); 166 | event.push(2); 167 | event.push(3); 168 | 169 | event.clear(); 170 | assert!(reader.iter().next().is_none()); 171 | 172 | event.push(4); 173 | event.push(5); 174 | assert_equal( 175 | consume_copies(&mut reader.iter()), 176 | [4, 5 as usize] 177 | ); 178 | } 179 | 180 | #[test] 181 | #[cfg(any(not(miri), not(target_os = "windows")))] 182 | fn mt_push_truncate_test() { 183 | for _ in 0..if cfg!(miri){1} else {100}{ 184 | struct S{} impl Settings for S{ 185 | const MAX_CHUNK_SIZE: u32 = 256; 186 | } 187 | 188 | let event = EventQueue::::new(); 189 | 190 | let mut readers = Vec::new(); 191 | for _ in 0..2{ 192 | readers.push(EventReader::new(&event)); 193 | } 194 | 195 | let writer_thread = { 196 | let event = event.clone(); 197 | Box::new(thread::spawn(move || { 198 | for i in 0..10000{ 199 | event.push(i); 200 | } 201 | })) 202 | }; 203 | 204 | let stop_clear_flag = Arc::new(AtomicBool::new(false)); 205 | let clear_thread = { 206 | let event = event.clone(); 207 | let stop_clear_flag = stop_clear_flag.clone(); 208 | Box::new(thread::spawn(move || { 209 | let mut i = 0; 210 | loop { 211 | let stop = stop_clear_flag.load(Ordering::Acquire); 212 | if stop{ 213 | break; 214 | } 215 | 216 | if i == 1000{ 217 | event.truncate_front(100); 218 | i = 0; 219 | } 220 | i += 1; 221 | std::hint::spin_loop(); 222 | } 223 | })) 224 | }; 225 | 226 | // read 227 | let mut threads = Vec::new(); 228 | for mut reader in readers{ 229 | let thread = Box::new(thread::spawn(move || { 230 | // some work here 231 | let _local_sum: usize = consume_copies(&mut reader.iter()).iter().sum(); 232 | })); 233 | threads.push(thread); 234 | } 235 | 236 | writer_thread.join().unwrap(); 237 | 238 | stop_clear_flag.store(true, Ordering::Release); 239 | clear_thread.join().unwrap(); 240 | 241 | for thread in threads{ 242 | thread.join().unwrap(); 243 | } 244 | } 245 | } 246 | 247 | #[test] 248 | #[cfg(any(not(miri), not(target_os = "windows")))] 249 | fn mt_read_test() { 250 | for _ in 0..10{ 251 | struct S{} impl Settings for S{ 252 | const MIN_CHUNK_SIZE: u32 = 512; 253 | const MAX_CHUNK_SIZE: u32 = 512; 254 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 255 | } 256 | mt_read_test_impl::(4, if cfg!(miri){ 1000 } else { 1000000 }); 257 | } 258 | } 259 | 260 | #[test] 261 | #[cfg(any(not(miri), not(target_os = "windows")))] 262 | fn mt_write_read_test() { 263 | for _ in 0..if cfg!(miri){10} else {100} { 264 | let writer_chunk = if cfg!(miri){ 1000 } else { 10000 }; 265 | let writers_thread_count = 2; 266 | let readers_thread_count = 4; 267 | struct S{} impl Settings for S{ 268 | const MIN_CHUNK_SIZE: u32 = 32; 269 | const MAX_CHUNK_SIZE: u32 = 32; 270 | const CLEANUP: CleanupMode = DefaultSettings::CLEANUP; 271 | } 272 | 273 | let event = EventQueue::<[usize;4], S>::new(); 274 | 275 | let mut readers = Vec::new(); 276 | for _ in 0..readers_thread_count{ 277 | readers.push(EventReader::new(&event)); 278 | } 279 | 280 | // etalon 281 | let sum0: usize = (0..writers_thread_count*writer_chunk).map(|i|i+0).sum(); 282 | let sum1: usize = (0..writers_thread_count*writer_chunk).map(|i|i+1).sum(); 283 | let sum2: usize = (0..writers_thread_count*writer_chunk).map(|i|i+2).sum(); 284 | let sum3: usize = (0..writers_thread_count*writer_chunk).map(|i|i+3).sum(); 285 | 286 | // write 287 | let mut writer_threads = Vec::new(); 288 | for thread_id in 0..writers_thread_count{ 289 | let event = event.clone(); 290 | let thread = Box::new(thread::spawn(move || { 291 | let from = thread_id*writer_chunk; 292 | let to = from+writer_chunk; 293 | 294 | for i in from..to{ 295 | event.push([i, i+1, i+2, i+3]); 296 | } 297 | })); 298 | writer_threads.push(thread); 299 | } 300 | 301 | // read 302 | let readers_stop = Arc::new(AtomicBool::new(false)); 303 | let mut reader_threads = Vec::new(); 304 | for mut reader in readers{ 305 | let readers_stop = readers_stop.clone(); 306 | let thread = Box::new(thread::spawn(move || { 307 | let mut local_sum0: usize = 0; 308 | let mut local_sum1: usize = 0; 309 | let mut local_sum2: usize = 0; 310 | let mut local_sum3: usize = 0; 311 | 312 | // do-while ensures that reader will try another round after stop, 313 | // to consume leftovers. Since iter's end/sentinel acquired at iter construction. 314 | loop{ 315 | let stop = readers_stop.load(Ordering::Acquire); 316 | 317 | let mut reader = reader.iter(); 318 | while let Some([i0, i1, i2, i3]) = reader.next() { 319 | local_sum0 += i0; 320 | local_sum1 += i1; 321 | local_sum2 += i2; 322 | local_sum3 += i3; 323 | } 324 | 325 | if stop{ break; } 326 | std::hint::spin_loop(); 327 | } 328 | 329 | assert_eq!(local_sum0, sum0); 330 | assert_eq!(local_sum1, sum1); 331 | assert_eq!(local_sum2, sum2); 332 | assert_eq!(local_sum3, sum3); 333 | })); 334 | reader_threads.push(thread); 335 | } 336 | 337 | for thread in writer_threads { 338 | thread.join().unwrap(); 339 | } 340 | readers_stop.store(true, Ordering::Release); 341 | for thread in reader_threads { 342 | thread.join().unwrap(); 343 | } 344 | } 345 | } -------------------------------------------------------------------------------- /src/tests/spmc.rs: -------------------------------------------------------------------------------- 1 | use itertools::assert_equal; 2 | use crate::event_reader::LendingIterator; 3 | use crate::spmc::{EventReader, Settings}; 4 | use crate::spmc::EventQueue; 5 | use crate::sync::{Arc, AtomicBool, Ordering, thread}; 6 | use crate::tests::utils::consume_copies; 7 | 8 | #[test] 9 | fn basic_test(){ 10 | let mut event = EventQueue::::new(); 11 | let mut reader1 = EventReader::new(&mut event); 12 | 13 | event.push(1); 14 | event.extend(2..5); 15 | 16 | assert_equal( consume_copies(&mut reader1.iter()), [1,2,3,4 as usize]); 17 | } 18 | 19 | #[test] 20 | #[cfg(any(not(miri), not(target_os = "windows")))] 21 | fn mt_write_read_test() { 22 | for _ in 0..if cfg!(miri){10} else {100} { 23 | let queue_size = if cfg!(miri){ 1000 } else { 10000 }; 24 | let readers_thread_count = 4; 25 | struct S{} impl Settings for S{ 26 | const MIN_CHUNK_SIZE: u32 = 32; 27 | const MAX_CHUNK_SIZE: u32 = 32; 28 | } 29 | 30 | let mut event = EventQueue::<[usize;4], S>::new(); 31 | 32 | let mut readers = Vec::new(); 33 | for _ in 0..readers_thread_count{ 34 | readers.push(EventReader::new(&mut event)); 35 | } 36 | 37 | // etalon 38 | let sum0: usize = (0..queue_size).map(|i|i+0).sum(); 39 | let sum1: usize = (0..queue_size).map(|i|i+1).sum(); 40 | let sum2: usize = (0..queue_size).map(|i|i+2).sum(); 41 | let sum3: usize = (0..queue_size).map(|i|i+3).sum(); 42 | 43 | // write 44 | let writer_thread = Box::new(thread::spawn(move || { 45 | for i in 0..queue_size{ 46 | event.push([i, i+1, i+2, i+3]); 47 | } 48 | })); 49 | 50 | // read 51 | let readers_stop = Arc::new(AtomicBool::new(false)); 52 | let mut reader_threads = Vec::new(); 53 | for mut reader in readers{ 54 | let readers_stop = readers_stop.clone(); 55 | let thread = Box::new(thread::spawn(move || { 56 | let mut local_sum0: usize = 0; 57 | let mut local_sum1: usize = 0; 58 | let mut local_sum2: usize = 0; 59 | let mut local_sum3: usize = 0; 60 | 61 | // do-while ensures that reader will try another round after stop, 62 | // to consume leftovers. Since iter's end/sentinel acquired at iter construction. 63 | loop{ 64 | let stop = readers_stop.load(Ordering::Acquire); 65 | 66 | let mut reader = reader.iter(); 67 | while let Some([i0, i1, i2, i3]) = reader.next() { 68 | local_sum0 += i0; 69 | local_sum1 += i1; 70 | local_sum2 += i2; 71 | local_sum3 += i3; 72 | } 73 | 74 | if stop{ break; } 75 | std::hint::spin_loop(); 76 | } 77 | 78 | assert_eq!(local_sum0, sum0); 79 | assert_eq!(local_sum1, sum1); 80 | assert_eq!(local_sum2, sum2); 81 | assert_eq!(local_sum3, sum3); 82 | })); 83 | reader_threads.push(thread); 84 | } 85 | 86 | writer_thread.join().unwrap(); 87 | readers_stop.store(true, Ordering::Release); 88 | for thread in reader_threads { 89 | thread.join().unwrap(); 90 | } 91 | } 92 | } -------------------------------------------------------------------------------- /src/tests/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::event_reader::LendingIterator; 2 | 3 | pub fn consume_copies(iter: &mut impl LendingIterator) -> Vec { 4 | consume_mapped(iter, |item| item.clone()) 5 | } 6 | 7 | pub fn consume_mapped(iter: &mut impl LendingIterator, f: F) -> Vec 8 | where F: Fn(&T) -> R 9 | { 10 | let mut v = Vec::new(); 11 | while let Some(item) = iter.next(){ 12 | v.push( f(item) ); 13 | } 14 | v 15 | } 16 | 17 | pub fn skip(iter: &mut impl LendingIterator, len : usize) { 18 | let mut i = 0; 19 | while let Some(_) = iter.next(){ 20 | i+=1; 21 | 22 | if i == len { 23 | break; 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Add}; 2 | 3 | #[inline(always)] 4 | #[allow(unreachable_code)] 5 | pub fn bittest_u64(value: u64) -> bool { 6 | #[cfg(not(miri))] 7 | unsafe { 8 | #[cfg(target_arch = "x86_64")] 9 | return core::arch::x86_64::_bittest64(&(value as i64), N as i64) != 0; 10 | } 11 | 12 | return value & (1 << N) != 0; 13 | } 14 | #[inline(always)] 15 | #[must_use] 16 | pub fn bitset_u64(mut value: u64, bit: bool) -> u64 { 17 | // should be const. Lets hope rust precalculate it. 18 | let mask: u64 = 1< (T) 32 | where T : Copy + Add + PartialOrd + From + Into; 33 | 34 | impl Epoch 35 | where T : Copy + Add + PartialOrd + From + Into 36 | { 37 | #[inline(always)] 38 | pub fn zero() -> Self { 39 | Self{0: T::from(0)} 40 | } 41 | 42 | pub fn new(init: T) -> Self { 43 | assert!(init.into() <= MAX); 44 | Self{0: init} 45 | } 46 | 47 | #[inline(always)] 48 | pub unsafe fn new_unchecked(init: T) -> Self { 49 | Self{0: init} 50 | } 51 | 52 | /// +1 53 | #[must_use] 54 | #[inline] 55 | pub fn increment(&self) -> Self{ 56 | if self.0.into() == MAX{ 57 | Self::zero() 58 | } else { 59 | Self{0: self.0 + T::from(1)} 60 | } 61 | } 62 | 63 | #[inline(always)] 64 | pub fn into_inner(self) -> T{ 65 | self.0 66 | } 67 | } -------------------------------------------------------------------------------- /tests/compile-tests.rs: -------------------------------------------------------------------------------- 1 | extern crate compiletest_rs as compiletest; 2 | use std::env; 3 | use std::path::PathBuf; 4 | 5 | fn run_mode(mode: &'static str) { 6 | let mut config = compiletest::Config::default(); 7 | 8 | config.mode = mode.parse().expect("Invalid mode"); 9 | config.src_base = PathBuf::from(format!("tests/compile-tests/{}", mode)); 10 | config.target_rustcflags = Some("-L target/debug -L target/debug/deps".to_string()); 11 | config.clean_rmeta(); // If your tests import the parent crate, this helps with E0464 12 | 13 | compiletest::run_tests(&config); 14 | } 15 | 16 | // If this test fail - try `cargo clean` first. 17 | #[test] 18 | #[cfg(not(miri))] 19 | fn compile_test() { 20 | run_mode("compile-fail"); 21 | } -------------------------------------------------------------------------------- /tests/compile-tests/compile-fail/iter_lifetimes.rs: -------------------------------------------------------------------------------- 1 | extern crate rc_event_queue; 2 | 3 | use rc_event_queue::mpmc::{EventQueue, EventReader, Iter}; 4 | use rc_event_queue::prelude::*; 5 | 6 | fn main() { 7 | let event = EventQueue::::new(); 8 | let mut reader = EventReader::new(&event); 9 | 10 | event.extend(0..10); 11 | 12 | let v = 100; 13 | let mut i: &usize = &v; 14 | { 15 | let mut iter = reader.iter(); 16 | let item = iter.next().unwrap(); //~ ERROR `iter` does not live long enough 17 | i = item; 18 | } 19 | assert_eq!(*i, 100); 20 | } --------------------------------------------------------------------------------