├── .gitignore ├── .rustfmt.toml ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── benches ├── bench_mutex.sh ├── mpmc_channel.rs ├── mutex.rs ├── semaphore.rs └── utils │ └── mod.rs ├── examples ├── cancellation.rs ├── philosophers.rs └── readme.md ├── readme.md ├── src ├── buffer │ ├── mod.rs │ ├── real_array.rs │ └── ring_buffer.rs ├── channel │ ├── channel_future.rs │ ├── error.rs │ ├── mod.rs │ ├── mpmc.rs │ ├── oneshot.rs │ ├── oneshot_broadcast.rs │ └── state_broadcast.rs ├── intrusive_double_linked_list.rs ├── intrusive_pairing_heap.rs ├── lib.rs ├── noop_lock.rs ├── sync │ ├── manual_reset_event.rs │ ├── mod.rs │ ├── mutex.rs │ └── semaphore.rs ├── timer │ ├── clock.rs │ ├── mod.rs │ └── timer.rs └── utils │ └── mod.rs └── tests ├── manual_reset_event.rs ├── mpmc_channel.rs ├── mutex.rs ├── oneshot_channel.rs ├── semaphore.rs ├── state_broadcast_channel.rs └── timer.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | _site 5 | .sass-cache 6 | /.idea 7 | .DS_Store -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 80 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | env: 5 | - RUST_BACKTRACE=1 6 | cache: 7 | directories: 8 | - /home/travis/.cargo 9 | before_cache: 10 | - cargo cache -r registry 11 | before_script: 12 | - rustup component add rustfmt 13 | - (test -x $HOME/.cargo/bin/cargo-cache || cargo install cargo-cache) 14 | script: 15 | - cargo fmt --all -- --check 16 | - cargo test --no-default-features 17 | - cargo test --no-default-features --features alloc 18 | - cargo test --all-targets --all-features 19 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "futures-intrusive" 3 | edition = "2018" 4 | version = "0.5.0" 5 | authors = ["Matthias Einwag "] 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/Matthias247/futures-intrusive" 8 | homepage = "https://github.com/Matthias247/futures-intrusive" 9 | description = """ 10 | Futures based on intrusive data structures - for std and no-std environments. 11 | """ 12 | 13 | [lib] 14 | name = "futures_intrusive" 15 | 16 | [features] 17 | alloc = ["futures-core/alloc"] 18 | std = ["alloc", "parking_lot"] 19 | default = ["std"] 20 | 21 | [dependencies] 22 | futures-core = { version = "^0.3", default-features = false } 23 | lock_api = "0.4.1" 24 | parking_lot = { version = "0.12.0", optional = true } 25 | 26 | [dev-dependencies] 27 | futures = { version = "0.3.0", default-features = true, features=["async-await"] } 28 | futures-test = { version = "0.3.0", default-features = true } 29 | pin-utils = "0.1.0" 30 | criterion = "0.3.0" 31 | crossbeam = "0.7" # For channel benchmarks 32 | lazy_static = "1.4.0" 33 | rand = "0.7" 34 | async-std = "1.4" # For benchmarks 35 | tokio = { version = "1.14", features = ["full"] } # For channel benchmarks 36 | signal-hook = "0.1.11" # For cancellation example 37 | 38 | [[bench]] 39 | name = "mpmc_channel" 40 | harness = false 41 | 42 | [[bench]] 43 | name = "mutex" 44 | harness = false 45 | 46 | [[bench]] 47 | name = "semaphore" 48 | harness = false 49 | 50 | [[example]] 51 | name = "cancellation" 52 | required-features = ["std"] 53 | 54 | [[example]] 55 | name = "philosophers" 56 | required-features = ["std"] 57 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright (c) 2019 Matthias Einwag 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Matthias Einwag 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /benches/bench_mutex.sh: -------------------------------------------------------------------------------- 1 | # This is just a convenience script to filter the important facts out of the criterion report 2 | cargo bench --bench mutex | grep -E "cont|time" | grep -v -E "Warming|Analyzing|Benchmarking|Warning" -------------------------------------------------------------------------------- /benches/mpmc_channel.rs: -------------------------------------------------------------------------------- 1 | use criterion::{ 2 | criterion_group, criterion_main, Criterion, ParameterizedBenchmark, 3 | }; 4 | use futures::{ 5 | executor::block_on, future::join_all, join, sink::SinkExt, 6 | stream::StreamExt, FutureExt, 7 | }; 8 | use futures_intrusive::channel::{ 9 | shared::channel, shared::unbuffered_channel, LocalChannel, 10 | }; 11 | use std::time::Duration; 12 | 13 | /// Elements to transfer per producer 14 | const ELEMS_TO_SEND: usize = 1000; 15 | /// Buffer size for buffered channels 16 | const CHANNEL_BUFFER_SIZE: usize = 20; 17 | 18 | /// Benchmark for Crossbeam channels 19 | fn crossbeam_channel_variable_tx(producers: usize) { 20 | let elems_per_producer = ELEMS_TO_SEND / producers; 21 | let (tx, rx) = crossbeam::channel::bounded(CHANNEL_BUFFER_SIZE); 22 | 23 | for _i in 0..producers { 24 | let tx = tx.clone(); 25 | std::thread::spawn(move || { 26 | for _i in 0..elems_per_producer { 27 | tx.send(4).unwrap(); 28 | } 29 | }); 30 | } 31 | 32 | drop(tx); 33 | 34 | loop { 35 | let res = rx.recv(); 36 | if res.is_err() { 37 | break; 38 | } 39 | } 40 | } 41 | 42 | /// variable producers, single consumer 43 | fn futchan_bounded_variable_tx(producers: usize) { 44 | use futures::channel::mpsc::channel; 45 | let elems_per_producer = ELEMS_TO_SEND / producers; 46 | let (tx, mut rx) = channel(CHANNEL_BUFFER_SIZE); 47 | 48 | for _i in 0..producers { 49 | let mut tx = tx.clone(); 50 | std::thread::spawn(move || { 51 | block_on(async { 52 | for _i in 0..elems_per_producer { 53 | tx.send(4).await.unwrap(); 54 | } 55 | }); 56 | }); 57 | } 58 | 59 | drop(tx); 60 | 61 | block_on(async { 62 | loop { 63 | let res = rx.next().await; 64 | if res.is_none() { 65 | break; 66 | } 67 | } 68 | }); 69 | } 70 | 71 | /// variable producers, single consumer 72 | fn tokiochan_bounded_variable_tx(producers: usize) { 73 | let elems_per_producer = ELEMS_TO_SEND / producers; 74 | let (tx, mut rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER_SIZE); 75 | 76 | for _i in 0..producers { 77 | let tx = tx.clone(); 78 | std::thread::spawn(move || { 79 | block_on(async { 80 | for _i in 0..elems_per_producer { 81 | tx.send(4).await.unwrap(); 82 | } 83 | }); 84 | }); 85 | } 86 | 87 | drop(tx); 88 | 89 | block_on(async { 90 | loop { 91 | let res = rx.recv().await; 92 | if res.is_none() { 93 | break; 94 | } 95 | } 96 | }); 97 | } 98 | 99 | macro_rules! intrusive_channel_variable_tx { 100 | ($producers: expr, $channel_constructor: expr) => { 101 | let elems_per_producer = ELEMS_TO_SEND / $producers; 102 | let (tx, rx) = $channel_constructor; 103 | 104 | for _i in 0..$producers { 105 | let tx = tx.clone(); 106 | 107 | std::thread::spawn(move || { 108 | block_on(async { 109 | for _i in 0..elems_per_producer { 110 | let r = tx.send(4).await; 111 | assert!(r.is_ok()); 112 | } 113 | }); 114 | }); 115 | } 116 | 117 | drop(tx); 118 | 119 | block_on(async { 120 | loop { 121 | let res = rx.receive().await; 122 | if res.is_none() { 123 | break; 124 | } 125 | } 126 | }); 127 | }; 128 | } 129 | 130 | /// variable producers, single consumer 131 | fn intrusivechan_bounded_variable_tx(producers: usize) { 132 | intrusive_channel_variable_tx!( 133 | producers, 134 | channel::(CHANNEL_BUFFER_SIZE) 135 | ); 136 | } 137 | 138 | /// variable producers, single consumer 139 | fn intrusivechan_unbuffered_variable_tx(producers: usize) { 140 | intrusive_channel_variable_tx!(producers, unbuffered_channel::()); 141 | } 142 | 143 | /// variable producers, single consumer 144 | fn futchan_bounded_variable_tx_single_thread(producers: usize) { 145 | let elems_per_producer = ELEMS_TO_SEND / producers; 146 | 147 | block_on(async { 148 | let (tx, mut rx) = futures::channel::mpsc::channel(CHANNEL_BUFFER_SIZE); 149 | let produce_done = join_all((0..producers).into_iter().map(|_| { 150 | let mut tx = tx.clone(); 151 | async move { 152 | for _i in 0..elems_per_producer { 153 | tx.send(4).await.unwrap(); 154 | } 155 | } 156 | .boxed() 157 | })); 158 | 159 | drop(tx); 160 | 161 | let consume_done = async { 162 | loop { 163 | let res = rx.next().await; 164 | if res.is_none() { 165 | break; 166 | } 167 | } 168 | }; 169 | 170 | join!(produce_done, consume_done); 171 | }); 172 | } 173 | 174 | /// variable producers, single consumer 175 | fn tokiochan_bounded_variable_tx_single_thread(producers: usize) { 176 | let elems_per_producer = ELEMS_TO_SEND / producers; 177 | 178 | block_on(async { 179 | let (tx, mut rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER_SIZE); 180 | let produce_done = join_all((0..producers).into_iter().map(|_| { 181 | let tx = tx.clone(); 182 | async move { 183 | for _i in 0..elems_per_producer { 184 | tx.send(4).await.unwrap(); 185 | } 186 | } 187 | .boxed() 188 | })); 189 | 190 | drop(tx); 191 | 192 | let consume_done = async { 193 | loop { 194 | let res = rx.recv().await; 195 | if res.is_none() { 196 | break; 197 | } 198 | } 199 | }; 200 | 201 | join!(produce_done, consume_done); 202 | }); 203 | } 204 | 205 | macro_rules! intrusive_channel_variable_tx_single_thread { 206 | ($producers: expr, $channel_constructor: expr) => { 207 | let elems_per_producer = ELEMS_TO_SEND / $producers; 208 | 209 | block_on(async { 210 | let (tx, rx) = $channel_constructor; 211 | let produce_done = 212 | join_all((0..$producers).into_iter().map(|_| { 213 | let tx = tx.clone(); 214 | Box::pin(async move { 215 | for _i in 0..elems_per_producer { 216 | let r = tx.send(4).await; 217 | assert!(r.is_ok()); 218 | } 219 | }) 220 | })); 221 | 222 | drop(tx); 223 | 224 | let consume_done = async { 225 | loop { 226 | let res = rx.receive().await; 227 | if res.is_none() { 228 | break; 229 | } 230 | } 231 | }; 232 | 233 | join!(produce_done, consume_done); 234 | }); 235 | }; 236 | } 237 | 238 | /// variable producers, single consumer 239 | fn intrusivechan_bounded_variable_tx_single_thread(producers: usize) { 240 | intrusive_channel_variable_tx_single_thread!( 241 | producers, 242 | channel::(CHANNEL_BUFFER_SIZE) 243 | ); 244 | } 245 | 246 | /// variable producers, single consumer 247 | fn intrusivechan_unbuffered_variable_tx_single_thread(producers: usize) { 248 | intrusive_channel_variable_tx_single_thread!( 249 | producers, 250 | unbuffered_channel::() 251 | ); 252 | } 253 | 254 | /// variable producers, single consumer 255 | fn intrusive_local_chan_bounded_variable_tx_single_thread(producers: usize) { 256 | let elems_per_producer = ELEMS_TO_SEND / producers; 257 | 258 | block_on(async { 259 | let rx = LocalChannel::::new(); 260 | let produce_done = join_all((0..producers).into_iter().map(|_| { 261 | Box::pin(async { 262 | for _i in 0..elems_per_producer { 263 | let r = rx.send(4).await; 264 | assert!(r.is_ok()); 265 | } 266 | }) 267 | })); 268 | 269 | let consume_done = async { 270 | let mut count = 0; 271 | let needed = elems_per_producer * producers; 272 | loop { 273 | let _ = rx.receive().await.unwrap(); 274 | // The channel doesn't automatically get closed when producers are 275 | // gone since producer and consumer are the same object type. 276 | // Therefore we need to count receives. 277 | count += 1; 278 | if count == needed { 279 | break; 280 | } 281 | } 282 | }; 283 | 284 | join!(produce_done, consume_done); 285 | }); 286 | } 287 | 288 | fn criterion_benchmark(c: &mut Criterion) { 289 | // Producer and consumer are running on the same thread 290 | c.bench( 291 | "Channels (Single Threaded)", 292 | ParameterizedBenchmark::new( 293 | "intrusive local channel with producers", 294 | |b, &&producers| { 295 | b.iter(|| { 296 | intrusive_local_chan_bounded_variable_tx_single_thread( 297 | producers, 298 | ) 299 | }) 300 | }, 301 | &[5, 20, 100], 302 | ) 303 | .with_function("intrusive channel with producers", |b, &&producers| { 304 | b.iter(|| { 305 | intrusivechan_bounded_variable_tx_single_thread(producers) 306 | }) 307 | }) 308 | .with_function( 309 | "intrusive unbuffered channel with producers", 310 | |b, &&producers| { 311 | b.iter(|| { 312 | intrusivechan_unbuffered_variable_tx_single_thread( 313 | producers, 314 | ) 315 | }) 316 | }, 317 | ) 318 | .with_function( 319 | "futures::channel::mpsc with producers", 320 | |b, &&producers| { 321 | b.iter(|| futchan_bounded_variable_tx_single_thread(producers)) 322 | }, 323 | ) 324 | .with_function( 325 | "tokio::sync::mpsc with producers", 326 | |b, &&producers| { 327 | b.iter(|| { 328 | tokiochan_bounded_variable_tx_single_thread(producers) 329 | }) 330 | }, 331 | ), 332 | ); 333 | 334 | // Producer and consume run on a different thread 335 | c.bench( 336 | "Channels (Thread per producer)", 337 | ParameterizedBenchmark::new( 338 | "crossbeam channel with producers", 339 | |b, &&producers| { 340 | b.iter(|| crossbeam_channel_variable_tx(producers)) 341 | }, 342 | &[5, 20, 100], 343 | ) 344 | .with_function("intrusive channel with producers", |b, &&producers| { 345 | b.iter(|| intrusivechan_bounded_variable_tx(producers)) 346 | }) 347 | .with_function( 348 | "intrusive unbuffered channel with producers", 349 | |b, &&producers| { 350 | b.iter(|| intrusivechan_unbuffered_variable_tx(producers)) 351 | }, 352 | ) 353 | .with_function( 354 | "futures::channel::mpsc with producers", 355 | |b, &&producers| b.iter(|| futchan_bounded_variable_tx(producers)), 356 | ) 357 | .with_function( 358 | "tokio::sync::mpsc with producers", 359 | |b, &&producers| { 360 | b.iter(|| tokiochan_bounded_variable_tx(producers)) 361 | }, 362 | ), 363 | ); 364 | } 365 | 366 | criterion_group! { 367 | name = benches; 368 | config = Criterion::default().measurement_time(Duration::from_secs(10)).nresamples(50); 369 | targets = criterion_benchmark 370 | } 371 | criterion_main!(benches); 372 | -------------------------------------------------------------------------------- /benches/mutex.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarks for asynchronous Mutex implementations 2 | 3 | use async_std::{sync::Mutex as AsyncStdMutex, task}; 4 | use criterion::{criterion_group, criterion_main, Benchmark, Criterion}; 5 | use futures_intrusive::sync::{Mutex as IntrusiveMutex, Semaphore}; 6 | use tokio::sync::Mutex as TokioMutex; 7 | 8 | use std::future::Future; 9 | use std::sync::Arc; 10 | use std::time::Duration; 11 | 12 | mod utils; 13 | use utils::Yield; 14 | 15 | const ITERATIONS: usize = 300; 16 | const CONTENTION_THREADS: usize = 10; 17 | /// With a chance of 25% chance the operation inside the async Mutex blocks, 18 | /// which is emulated by yielding `NR_YIELD` times back to the executor. 19 | const YIELD_CHANCE: usize = 25; 20 | const NR_YIELDS: usize = 10; 21 | 22 | /// Extension trait to add support for `block_on` for runtimes which not 23 | /// natively support it as member function 24 | trait Block { 25 | fn block_on>(&self, f: F); 26 | } 27 | 28 | struct FakeAsyncStdRuntime; 29 | 30 | impl Block for FakeAsyncStdRuntime { 31 | fn block_on>(&self, f: F) { 32 | task::block_on(f); 33 | } 34 | } 35 | 36 | macro_rules! run_with_mutex { 37 | ( 38 | $mutex_constructor: expr, 39 | $nr_tasks: expr, 40 | $nr_iterations: expr, 41 | $spawn_fn: expr 42 | ) => { 43 | let m = Arc::new($mutex_constructor); 44 | let mut tasks = Vec::new(); 45 | let sem = Arc::new(Semaphore::new(false, 0)); 46 | 47 | for _ in 0..$nr_tasks { 48 | let m = m.clone(); 49 | let s = sem.clone(); 50 | tasks.push($spawn_fn(async move { 51 | for count in 0..$nr_iterations { 52 | let _ = m.lock().await; 53 | // Asynchronous mutexes are intended to guard over 54 | // operations which are potentially task-blocking and take 55 | // a certain amount of time to complete. In order to simulate 56 | // the behavior we yield a certain amount of times to back 57 | // to the executor. This is more consistent than e.g. using 58 | // a timer, and the overhead of yielding is the same for the 59 | // various Mutex implementations. 60 | 61 | if YIELD_CHANCE != 0 && (count % (100 / YIELD_CHANCE) == 0) { 62 | Yield::new(NR_YIELDS).await; 63 | } 64 | } 65 | s.release(1); 66 | })); 67 | } 68 | 69 | sem.acquire($nr_tasks).await; 70 | }; 71 | } 72 | 73 | macro_rules! contention { 74 | ( 75 | $b: ident, 76 | $rt_setup: expr, $spawn_fn: expr, 77 | $mutex_constructor: expr, $nr_iterations: expr 78 | ) => { 79 | #[allow(unused_mut)] // mut is only required for some runtimes 80 | let mut rt = $rt_setup; 81 | $b.iter(|| { 82 | rt.block_on(async { 83 | run_with_mutex!( 84 | $mutex_constructor, 85 | CONTENTION_THREADS, 86 | $nr_iterations, 87 | $spawn_fn 88 | ); 89 | }) 90 | }); 91 | }; 92 | } 93 | 94 | macro_rules! no_contention { 95 | ( 96 | $b: ident, 97 | $rt_setup: expr, $spawn_fn: expr, 98 | $mutex_constructor: expr, $nr_iterations: expr 99 | ) => { 100 | #[allow(unused_mut)] // mut is only required for some runtimes 101 | let mut rt = $rt_setup; 102 | $b.iter(|| { 103 | rt.block_on(async { 104 | run_with_mutex!( 105 | $mutex_constructor, 106 | 1, 107 | $nr_iterations, 108 | $spawn_fn 109 | ); 110 | }) 111 | }); 112 | }; 113 | } 114 | 115 | macro_rules! benchmarks { 116 | ( 117 | $c: ident, 118 | $rt_name: literal, $rt_setup: expr, $spawn_fn: expr, 119 | $mutex_name: literal, $mutex_constructor: expr 120 | ) => { 121 | $c.bench( 122 | concat!($rt_name, "/", $mutex_name), 123 | Benchmark::new("contention", |b| { 124 | contention!( 125 | b, 126 | $rt_setup, 127 | $spawn_fn, 128 | $mutex_constructor, 129 | ITERATIONS 130 | ); 131 | }) 132 | .with_function("no_contention", |b| { 133 | no_contention!( 134 | b, 135 | $rt_setup, 136 | $spawn_fn, 137 | $mutex_constructor, 138 | ITERATIONS 139 | ); 140 | }), 141 | ); 142 | }; 143 | } 144 | 145 | fn tokio_rt_intrusive_fair_benchmarks(c: &mut Criterion) { 146 | benchmarks!( 147 | c, 148 | "tokio_rt", 149 | tokio::runtime::Runtime::new().unwrap(), 150 | tokio::spawn, 151 | "futures_intrusive(fair=true)", 152 | IntrusiveMutex::new((), true) 153 | ); 154 | } 155 | 156 | fn tokio_rt_intrusive_unfair_benchmarks(c: &mut Criterion) { 157 | benchmarks!( 158 | c, 159 | "tokio_rt", 160 | tokio::runtime::Runtime::new().unwrap(), 161 | tokio::spawn, 162 | "futures_intrusive(fair=false)", 163 | IntrusiveMutex::new((), false) 164 | ); 165 | } 166 | 167 | fn tokio_rt_async_std_benchmarks(c: &mut Criterion) { 168 | benchmarks!( 169 | c, 170 | "tokio_rt", 171 | tokio::runtime::Runtime::new().unwrap(), 172 | tokio::spawn, 173 | "async_std", 174 | AsyncStdMutex::new(()) 175 | ); 176 | } 177 | 178 | fn tokio_rt_tokio_benchmarks(c: &mut Criterion) { 179 | benchmarks!( 180 | c, 181 | "tokio_rt", 182 | tokio::runtime::Runtime::new().unwrap(), 183 | tokio::spawn, 184 | "tokio", 185 | TokioMutex::new(()) 186 | ); 187 | } 188 | 189 | fn async_std_intrusive_fair_benchmarks(c: &mut Criterion) { 190 | benchmarks!( 191 | c, 192 | "async_std_rt", 193 | FakeAsyncStdRuntime {}, 194 | task::spawn, 195 | "futures_intrusive(fair=true)", 196 | IntrusiveMutex::new((), true) 197 | ); 198 | } 199 | 200 | fn async_std_intrusive_unfair_benchmarks(c: &mut Criterion) { 201 | benchmarks!( 202 | c, 203 | "async_std_rt", 204 | FakeAsyncStdRuntime {}, 205 | task::spawn, 206 | "futures_intrusive(fair=false)", 207 | IntrusiveMutex::new((), false) 208 | ); 209 | } 210 | 211 | fn async_std_async_std_benchmarks(c: &mut Criterion) { 212 | benchmarks!( 213 | c, 214 | "async_std_rt", 215 | FakeAsyncStdRuntime {}, 216 | task::spawn, 217 | "async_std", 218 | AsyncStdMutex::new(()) 219 | ); 220 | } 221 | 222 | fn async_std_tokio_benchmarks(c: &mut Criterion) { 223 | benchmarks!( 224 | c, 225 | "async_std_rt", 226 | FakeAsyncStdRuntime {}, 227 | task::spawn, 228 | "tokio", 229 | TokioMutex::new(()) 230 | ); 231 | } 232 | 233 | criterion_group! { 234 | name = benches; 235 | config = Criterion::default().measurement_time(Duration::from_secs(10)); 236 | targets = 237 | // tokio 238 | tokio_rt_intrusive_fair_benchmarks, 239 | tokio_rt_intrusive_unfair_benchmarks, 240 | tokio_rt_async_std_benchmarks, 241 | tokio_rt_tokio_benchmarks, 242 | // async-std 243 | async_std_intrusive_fair_benchmarks, 244 | async_std_intrusive_unfair_benchmarks, 245 | async_std_async_std_benchmarks, 246 | async_std_tokio_benchmarks 247 | } 248 | criterion_main!(benches); 249 | -------------------------------------------------------------------------------- /benches/semaphore.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarks for asynchronous Semaphore implementations 2 | 3 | use criterion::{criterion_group, criterion_main, Benchmark, Criterion}; 4 | use futures_intrusive::sync::{ 5 | Semaphore as IntrusiveSemaphore, 6 | SemaphoreReleaser as IntrusiveSemaphoreReleaser, 7 | }; 8 | use tokio::sync::{ 9 | Semaphore as TokioSemaphore, SemaphorePermit as TokioSemaphorePermit, 10 | }; 11 | 12 | use std::future::Future; 13 | use std::sync::Arc; 14 | use std::time::Duration; 15 | 16 | mod utils; 17 | use utils::Yield; 18 | 19 | /// How often each task should acquire the semaphore 20 | const NR_ACQUIRES: usize = 50; 21 | /// How many tasks are used 22 | const TASKS: usize = 200; 23 | 24 | /// The amount of available permits when we are testing strong contention 25 | const CONTENTION_PERMITS: usize = 100; 26 | /// The amount of available permits when testing light contention 27 | const NORMAL_PERMITS: usize = 180; 28 | /// The amount of available permits when testing no contention 29 | const UNCONTENDED_PERMITS: usize = TASKS; 30 | 31 | /// The number of yields we perform after the Semaphore was acquired 32 | const NR_YIELDS: usize = 4; 33 | 34 | /// Extension trait to add support for `block_on` for runtimes which not 35 | /// natively support it as member function 36 | trait Block { 37 | fn block_on>(&self, f: F); 38 | } 39 | 40 | fn create_intrusive_fair_semaphore(permits: usize) -> IntrusiveSemaphore { 41 | IntrusiveSemaphore::new(true, permits) 42 | } 43 | 44 | fn create_intrusive_unfair_semaphore(permits: usize) -> IntrusiveSemaphore { 45 | IntrusiveSemaphore::new(false, permits) 46 | } 47 | 48 | fn create_tokio_semaphore(permits: usize) -> TokioSemaphore { 49 | TokioSemaphore::new(permits) 50 | } 51 | 52 | async fn acquire_intrusive_semaphore( 53 | sem: &IntrusiveSemaphore, 54 | ) -> IntrusiveSemaphoreReleaser<'_> { 55 | sem.acquire(1).await 56 | } 57 | 58 | async fn acquire_tokio_semaphore( 59 | sem: &TokioSemaphore, 60 | ) -> TokioSemaphorePermit<'_> { 61 | sem.acquire().await.unwrap() 62 | } 63 | 64 | macro_rules! run_with_semaphore { 65 | ( 66 | $nr_tasks: expr, 67 | $nr_iterations: expr, 68 | $nr_permits: expr, 69 | $spawn_fn: expr, 70 | $create_semaphore_fn: ident, 71 | $acquire_fn: ident, 72 | ) => { 73 | let semaphore = Arc::new($create_semaphore_fn($nr_permits)); 74 | let mut tasks = Vec::new(); 75 | let sem = Arc::new(IntrusiveSemaphore::new(false, 0)); 76 | 77 | for _ in 0..$nr_tasks { 78 | let semaphore = semaphore.clone(); 79 | let s = sem.clone(); 80 | tasks.push($spawn_fn(async move { 81 | for _count in 0..$nr_iterations { 82 | let _releaser = $acquire_fn(&*semaphore).await; 83 | Yield::new(NR_YIELDS).await; 84 | } 85 | s.release(1); 86 | })); 87 | } 88 | 89 | sem.acquire($nr_tasks).await; 90 | }; 91 | } 92 | 93 | macro_rules! bench { 94 | ( 95 | $b: ident, 96 | $rt_setup: expr, 97 | $spawn_fn: expr, 98 | $nr_iterations: expr, 99 | $nr_permits: expr, 100 | $create_semaphore_fn: ident, 101 | $acquire_fn: ident, 102 | ) => { 103 | #[allow(unused_mut)] // mut is only required for some runtimes 104 | let mut rt = $rt_setup; 105 | $b.iter(|| { 106 | rt.block_on(async { 107 | run_with_semaphore!( 108 | TASKS, 109 | $nr_iterations, 110 | $nr_permits, 111 | $spawn_fn, 112 | $create_semaphore_fn, 113 | $acquire_fn, 114 | ); 115 | }) 116 | }); 117 | }; 118 | } 119 | 120 | macro_rules! benchmarks { 121 | ( 122 | $c: ident, 123 | $rt_name: literal, 124 | $rt_setup: expr, 125 | $spawn_fn: expr, 126 | $semaphore_name: literal, 127 | $create_semaphore_fn: ident, 128 | $acquire_fn: ident, 129 | ) => { 130 | $c.bench( 131 | concat!($rt_name, "/", $semaphore_name), 132 | Benchmark::new("heavy contention", |b| { 133 | bench!( 134 | b, 135 | $rt_setup, 136 | $spawn_fn, 137 | NR_ACQUIRES, 138 | CONTENTION_PERMITS, 139 | $create_semaphore_fn, 140 | $acquire_fn, 141 | ); 142 | }) 143 | .with_function("normal contention", |b| { 144 | bench!( 145 | b, 146 | $rt_setup, 147 | $spawn_fn, 148 | NR_ACQUIRES, 149 | NORMAL_PERMITS, 150 | $create_semaphore_fn, 151 | $acquire_fn, 152 | ); 153 | }) 154 | .with_function("no contention", |b| { 155 | bench!( 156 | b, 157 | $rt_setup, 158 | $spawn_fn, 159 | NR_ACQUIRES, 160 | UNCONTENDED_PERMITS, 161 | $create_semaphore_fn, 162 | $acquire_fn, 163 | ); 164 | }), 165 | ); 166 | }; 167 | } 168 | 169 | fn tokio_rt_intrusive_fair_benchmarks(c: &mut Criterion) { 170 | benchmarks!( 171 | c, 172 | "tokio_rt", 173 | tokio::runtime::Runtime::new().unwrap(), 174 | tokio::spawn, 175 | "futures_intrusive(fair=true)", 176 | create_intrusive_fair_semaphore, 177 | acquire_intrusive_semaphore, 178 | ); 179 | } 180 | 181 | fn tokio_rt_intrusive_unfair_benchmarks(c: &mut Criterion) { 182 | benchmarks!( 183 | c, 184 | "tokio_rt", 185 | tokio::runtime::Runtime::new().unwrap(), 186 | tokio::spawn, 187 | "futures_intrusive(fair=false)", 188 | create_intrusive_unfair_semaphore, 189 | acquire_intrusive_semaphore, 190 | ); 191 | } 192 | 193 | fn tokio_rt_tokio_benchmarks(c: &mut Criterion) { 194 | benchmarks!( 195 | c, 196 | "tokio_rt", 197 | tokio::runtime::Runtime::new().unwrap(), 198 | tokio::spawn, 199 | "tokio", 200 | create_tokio_semaphore, 201 | acquire_tokio_semaphore, 202 | ); 203 | } 204 | 205 | criterion_group! { 206 | name = benches; 207 | config = Criterion::default().measurement_time(Duration::from_secs(10)); 208 | targets = 209 | tokio_rt_intrusive_fair_benchmarks, 210 | tokio_rt_intrusive_unfair_benchmarks, 211 | tokio_rt_tokio_benchmarks, 212 | } 213 | criterion_main!(benches); 214 | -------------------------------------------------------------------------------- /benches/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | /// A Future which yields to the executor for a given amount of iterations 8 | /// and resolves after this 9 | pub struct Yield { 10 | iter: usize, 11 | } 12 | 13 | impl Yield { 14 | pub fn new(iter: usize) -> Yield { 15 | Yield { iter } 16 | } 17 | } 18 | 19 | impl Future for Yield { 20 | type Output = (); 21 | 22 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { 23 | if self.iter == 0 { 24 | Poll::Ready(()) 25 | } else { 26 | self.iter -= 1; 27 | cx.waker().wake_by_ref(); 28 | Poll::Pending 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /examples/cancellation.rs: -------------------------------------------------------------------------------- 1 | //! This example demonstrates the application of structured concurrency and 2 | //! gracefully cancellation in an async Rust application. 3 | //! An async [`ManualResetEvent`] as provided by `futures-intrusive` is a used 4 | //! as the main signalization mechanism for cooperative cancellation. 5 | //! 6 | //! Usage: cargo run --example cancellation 7 | //! After some seconds, press Ctrl+C and observe the results 8 | //! 9 | //! Structured concurrency is an application model where the lifetime of any 10 | //! concurrent operation is strictly contained within the lifetime of it's 11 | //! parent operation. 12 | //! 13 | //! The concept is described in further detail within 14 | //! https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ 15 | //! https://trio.discourse.group/t/structured-concurrency-kickoff/55 16 | //! 17 | //! The application of structured concurrency principles simplifies concurrent 18 | //! program. It allows for an easier reasoning about which concurrent tasks run 19 | //! at a given point of time, since no subtask will ever run without it's original 20 | //! parent task already having finished. This makes it impossible for the subtask 21 | //! to wait on a certain condition that will no longer occur - or to modify the 22 | //! state of the program when we no longer expect it. 23 | //! 24 | //! One challenge for structured concurrency is the graceful cancellation of 25 | //! subtasks. Within Rusts `Future`s and `async/await` programming model it is 26 | //! generally easy to stop asynchronous subtasks: We can just `drop` their 27 | //! associated `Future`s, which will cancel those tasks. However this foceful 28 | //! cancellation comes with several downsides: 29 | //! - The subtasks can't perform any cleanup work anymore that might be helpful. 30 | //! Only code inside their destructors can run if the tasks are cancelled. 31 | //! - The subtasks can't return any value. 32 | //! 33 | //! Therefore a cooperative and graceful cancellation is sometimes preferred. In 34 | //! this example we implement graceful cancellation in order to allow a sub task 35 | //! to return it's calculated values. 36 | //! 37 | //! Graceful cancellation is implemented in 3 steps: 38 | //! 1. Signalling the cancellation: One component signals the sub-tasks that they 39 | //! should stop their work as soon as it is convenient for them. The 40 | //! cancellation signal can either originate from a parent task, the sub task 41 | //! itself, or one of the sibling tasks. In order to distribute cancellation 42 | //! signals we utilize an async `ManualResetEvent` as a cancellation token. 43 | //! This datastructure allows to signal an arbitrary amount of tasks. 44 | //! The signal can be emitted by any component which has access to 45 | //! `ManualResetEvent`. 46 | //! 2. Detecting the signal inside sub-tasks and shutting down. In order to 47 | //! support graceful cancellation, subtasks need to detect the condition that 48 | //! they are supposed to shut down. In order to do this we use the futures-rs 49 | //! `select!` macro to wait in parallel for either the async calculation on 50 | //! the "normal path" to complete or for the cancellation to get signalled. 51 | //! Not all subtasks have to explicitly support this. Some of them just need 52 | //! to forward the cancellation token to their child tasks. When these finish 53 | //! early due to cancellation, then the parent will also finish early. 54 | //! Child tasks can return an error result in order to indicate that they have 55 | //! returned due the explicit cancellation. E.g. `Err(Cancelled)` could be 56 | //! returned to the parent. 57 | //! 3. The parent tasks waits for all sub-tasks to shut down, via waiting on 58 | //! their wait-handles (which in our case are `Future`s that can be awaited 59 | //! via `await` or various `join` functions). 60 | //! 61 | //! After these steps have completed all sub tasks of a given parent have 62 | //! completed and the parent task can also finish. It can thereby return the 63 | //! results of the child tasks if required. 64 | //! 65 | //! The implementation is similar in spirit to cancellation in the Go programming 66 | //! language trough the Context parameter (https://blog.golang.org/context). 67 | //! The main difference is that a `ManualResetEvent` is used for signalling 68 | //! cancellation instead of a `Channel` - and that we can check for the 69 | //! cancellation signal on every `await` of a `Future`. Checking for cancellation 70 | //! is not constrained to interaction with `Channel` types. 71 | //! E.g. we can easily wait on receiving data on a socket while in parallel 72 | //! waiting for cancellation. This is not directly possible in Go. 73 | //! 74 | //! It also similar to the `CancellationToken` mechanism in .NET. There the 75 | //! `CancellationToken` also needs to get forwarded as a parameter. 76 | //! 77 | //! This example demonstrates the mechanisms via a distributed "FizzBuzz" checker. 78 | //! The "algorithm" uses a parent tasks which uses 2 child tasks for it's work. 79 | //! When the user cancels the program, a graceful shutdown as described should 80 | //! be performed. This allows the user to retrieve the results of the algorithm. 81 | 82 | use futures::{executor::block_on, join, select}; 83 | use futures_intrusive::{ 84 | channel::LocalUnbufferedChannel, 85 | sync::{LocalManualResetEvent, ManualResetEvent}, 86 | timer::{StdClock, Timer, TimerService}, 87 | }; 88 | use lazy_static::lazy_static; 89 | use signal_hook; 90 | use std::{ 91 | sync::{ 92 | atomic::{AtomicBool, Ordering}, 93 | Arc, 94 | }, 95 | thread::{sleep, spawn}, 96 | time::Duration, 97 | }; 98 | 99 | /// The result of our search for FizzBuzz values 100 | #[derive(Debug, Default)] 101 | struct SearchResult { 102 | highest_fizz: Option, 103 | highest_buzz: Option, 104 | highest_fizzbuzz: Option, 105 | } 106 | 107 | /// This is our main async function that supports cooperative cancellation. 108 | /// The purpose of this function is to check values up to `max` for their 109 | /// fizzbuzzness and return the highest values in each category. 110 | /// 111 | /// The method can be be cancelled by signalling the cancellation token. In this 112 | /// case the method will return its latest findings. 113 | /// This is in contrast to just cancelling a `Future` - which would not allow us 114 | /// to return any results. Cancellation tokens can be passed as `Arc` 115 | /// if multiple independent subtasks need to get cancelled, or as a plain reference 116 | /// if only subtasks of a single task need to get signalled. For tasks which run 117 | /// on a singlethreaded executor `LocalManualResetEvent` provides an even higher 118 | /// lower overhead solution which does not require any internal synchronization. 119 | async fn fizzbuzz_search( 120 | max: usize, 121 | cancellation_token: Arc, 122 | ) -> SearchResult { 123 | // We start two child-tasks: 124 | // - One produces values to check 125 | // - The other task will check the values and store the results in the 126 | // result data structure. 127 | // 128 | // Both tasks are connected via a channel. Since the tasks are running as 129 | // subtasks of the same task in a singlethreaded executor, we can use an 130 | // extremely efficient LocalChannel for this. 131 | // 132 | // In order to make things a bit more interesting we do not utilize the same 133 | // cancellation signal for both tasks (which would also be a valid solution). 134 | // Instead we implement a sequential shutdown: 135 | // - When the main `cancellation_token` is signalled from the outside, 136 | // only the producer task will shut down. 137 | // - Before the producer task exits, it will signal another cancellation 138 | // token. That one will lead the checker task to shut down. 139 | let channel = LocalUnbufferedChannel::::new(); 140 | let checker_cancellation_token = LocalManualResetEvent::new(false); 141 | let producer_future = producer_task( 142 | max, 143 | &channel, 144 | &cancellation_token, 145 | &checker_cancellation_token, 146 | ); 147 | let checker_future = check_task(&channel, &checker_cancellation_token); 148 | 149 | // Here we wait for both tasks to complete. Waiting for all subtasks to 150 | // complete is one important part of structured concurrency. 151 | let results = join!(producer_future, checker_future); 152 | println!("All subtasks have completed"); 153 | 154 | // Since we waited for all subtasks to complete we can return the search 155 | // result. 156 | // If the async subtasks had been forcefully instead of cooperatively 157 | // cancelled the results would not have been available. 158 | results.1 159 | } 160 | 161 | /// The producing task produces all values that need to get checked for 162 | /// fizzbuzzness. 163 | /// The task will run until it either has generated all values to check or 164 | /// until the task gets cancelled. 165 | async fn producer_task( 166 | max: usize, 167 | channel: &LocalUnbufferedChannel, 168 | main_cancellation_token: &ManualResetEvent, 169 | consumer_cancellation_token: &LocalManualResetEvent, 170 | ) { 171 | for value in 1..max { 172 | select! { 173 | result = channel.send(value) => { 174 | if !result.is_ok() { 175 | unreachable!("This can not happen in this example"); 176 | } 177 | }, 178 | _ = main_cancellation_token.wait() => { 179 | // The operation was cancelled 180 | break; 181 | } 182 | }; 183 | } 184 | 185 | // No more values to check or we had been cancelled. 186 | // In this case we signal the `cancellation_token`, in order to let the 187 | // consumer shut down. 188 | // We should here have alternatively `.close()`d the channel to signal the 189 | // consumer to join. However we want mainly want to demonstrate the 190 | // cancellation concept here. 191 | println!("Goodbye from the producer. Now signalling the checker"); 192 | consumer_cancellation_token.set(); 193 | } 194 | 195 | /// The check task runs until it gets cancelled. That can happen either due 196 | /// to a cancellation being signalled, or due to the input channel getting 197 | /// closed. In a real application one of those strategies would be good sufficient. 198 | /// Since this example focusses on cancellation and structured concurrency, this 199 | /// task will **always** get shut down via the cancellation token. 200 | /// 201 | /// It is important that this tasks runs to completion instead of getting 202 | /// forcefully cancelled. Otherwise no results would be available. 203 | async fn check_task( 204 | channel: &LocalUnbufferedChannel, 205 | cancellation_token: &LocalManualResetEvent, 206 | ) -> SearchResult { 207 | // Initialize the result with `None`s 208 | let mut result: SearchResult = Default::default(); 209 | 210 | loop { 211 | select! { 212 | value = channel.receive() => { 213 | if let Some(value) = value { 214 | // Received a value that needs to get checked for fizzbuzzness 215 | println!("Checking {} of fizzbuzzness", value); 216 | match (value % 3 == 0, value % 5 == 0) { 217 | (true, true) => result.highest_fizzbuzz = Some(value), 218 | (true, false) => result.highest_fizz = Some(value), 219 | (false, true) => result.highest_buzz = Some(value), 220 | _ => {}, 221 | } 222 | } else { 223 | unreachable!("this is not allowed in this example"); 224 | // Otherwise just doing the following here would be ok: 225 | // break; 226 | } 227 | }, 228 | _ = cancellation_token.wait() => { 229 | // The operation was cancelled 230 | break; 231 | } 232 | }; 233 | 234 | // Waits until the timer elapses or the task gets cancelled - whatever 235 | // comes first. This slows down our consumer, and introduces another 236 | // cancellation point. Since we use an unbuffered channel to accept 237 | // values to check from the producer, the producer is slowed down by 238 | // the same amount of time. 239 | select! { 240 | _ = get_timer().delay(Duration::from_millis(1000)) => {}, 241 | _ = cancellation_token.wait() => { 242 | // The operation was cancelled 243 | break; 244 | }, 245 | } 246 | } 247 | 248 | println!("Goodbye from the checker"); 249 | result 250 | } 251 | 252 | fn main() { 253 | // Spawn a background thread which advances the timer 254 | let timer_join_handle = spawn(move || { 255 | timer_thread(); 256 | }); 257 | 258 | // This is the asynchronous ManualResetEvent that will be used as a cancellation 259 | // token. When the cancellation is requested, the token will be set. Thereby 260 | // all tasks which are waiting for cancellation will get signalled and awoken. 261 | let cancellation_token = Arc::new(ManualResetEvent::new(false)); 262 | 263 | // This sets up a signal listener. When SIGINT (Ctrl+C) is signalled, 264 | // the Cancellation Token is set - which will lead the async task to run 265 | // to completion. Since setting the cancellation token is not signal safe, 266 | // we apply a workaround and set only an atomic variable in the signal handler. 267 | // A background thread regularly checks the signal and sets the event once 268 | // the signal had been observed. 269 | let cloned_token = cancellation_token.clone(); // Clone for the background thread 270 | std::thread::spawn(move || { 271 | let term = Arc::new(AtomicBool::new(false)); 272 | signal_hook::flag::register(signal_hook::SIGINT, Arc::clone(&term)) 273 | .unwrap(); 274 | while !term.load(Ordering::Relaxed) { 275 | std::thread::sleep(Duration::from_millis(100)); 276 | } 277 | println!("Starting cancellation"); 278 | cloned_token.set(); 279 | }); 280 | 281 | // Start our async task. This gets the cancellation token passed as argument 282 | let result = block_on(fizzbuzz_search(std::usize::MAX, cancellation_token)); 283 | // At this point in time, the task has finished - either due to running to 284 | // completion or due to being cancelled. The task can return results in both 285 | // situations. 286 | 287 | println!("Discovered these awesome results: {:?}", result); 288 | 289 | // Stop the timer thread 290 | STOP_TIMER.store(true, Ordering::Relaxed); 291 | timer_join_handle.join().unwrap(); 292 | } 293 | 294 | // Some setup for the asynchronously awaitable timer 295 | lazy_static! { 296 | static ref STD_CLOCK: StdClock = StdClock::new(); 297 | static ref TIMER_SERVICE: TimerService = TimerService::new(&*STD_CLOCK); 298 | static ref STOP_TIMER: AtomicBool = AtomicBool::new(false); 299 | } 300 | 301 | /// Returns a reference to the global timer 302 | fn get_timer() -> &'static dyn Timer { 303 | &*TIMER_SERVICE 304 | } 305 | 306 | /// A background thread that drives the async timer service 307 | fn timer_thread() { 308 | while !STOP_TIMER.load(Ordering::Relaxed) { 309 | sleep(Duration::from_millis(25)); 310 | TIMER_SERVICE.check_expirations(); 311 | } 312 | } 313 | -------------------------------------------------------------------------------- /examples/philosophers.rs: -------------------------------------------------------------------------------- 1 | //! The example in this file demonstrates a solution for the 2 | //! [Dining Philosophers Problem](https://en.wikipedia.org/wiki/Dining_philosophers_problem), 3 | //! which uses async tasks and futures_intrusive primitives in order to 4 | //! simulate philosophers. 5 | 6 | #![recursion_limit = "256"] 7 | 8 | use futures::{executor::block_on, join, select}; 9 | use futures_intrusive::{ 10 | sync::LocalMutex, 11 | timer::{StdClock, Timer, TimerService}, 12 | }; 13 | use lazy_static::lazy_static; 14 | use pin_utils::pin_mut; 15 | use std::sync::atomic::{AtomicBool, Ordering}; 16 | use std::thread::{sleep, spawn}; 17 | use std::time::Duration; 18 | 19 | /// We simulate the ownership of a fork through an asynchronously awaitable mutex. 20 | /// In order to acquire a fork, the philosopher acquires the Mutex. 21 | /// In order to release a fork, the philosopher releases the LockGuard. This 22 | /// happens automatically, when the LockGuard goes out of scope. 23 | /// Since all philosophers are subtasks of the same top-level `async` task, 24 | /// a lightweight non thread-safe `LocalMutex` can be utilized. 25 | type Fork = LocalMutex<()>; 26 | 27 | // Some setup for the asynchronously awaitable timer 28 | lazy_static! { 29 | static ref STD_CLOCK: StdClock = StdClock::new(); 30 | static ref TIMER_SERVICE: TimerService = TimerService::new(&*STD_CLOCK); 31 | static ref STOP_TIMER: AtomicBool = AtomicBool::new(false); 32 | } 33 | 34 | /// Returns a reference to the global timer 35 | fn get_timer() -> &'static dyn Timer { 36 | &*TIMER_SERVICE 37 | } 38 | 39 | /// Returns a random delay duration between `min` and `(min + max_extra)` 40 | fn rand_delay(min: Duration, max_extra: Duration) -> Duration { 41 | let extra_ms = rand::random::() % (max_extra.as_millis() as u64); 42 | min + Duration::from_millis(extra_ms) 43 | } 44 | 45 | /// How often a philosopher should eat 46 | const TO_EAT: usize = 5; 47 | 48 | /// Simulates a single philosopher 49 | async fn philosopher_task<'a>( 50 | name: &'a str, 51 | left_fork: &'a Fork, 52 | right_fork: &'a Fork, 53 | ) { 54 | println!("{} is ready to go", name); 55 | let mut eaten: usize = 0; 56 | 57 | while eaten != TO_EAT { 58 | println!("{} is thinking", name); 59 | get_timer() 60 | .delay(rand_delay( 61 | Duration::from_millis(1000), 62 | Duration::from_millis(1000), 63 | )) 64 | .await; 65 | { 66 | println!("{} is starting to pick up forks", name); 67 | // Create futures for acquiring both forks 68 | let get_left_fork_future = left_fork.lock(); 69 | pin_mut!(get_left_fork_future); 70 | let get_right_fork_future = right_fork.lock(); 71 | pin_mut!(get_right_fork_future); 72 | 73 | // This sets up a timer. If the philosopher can't obtain both forks 74 | // during that, they put back all acquired forks and start thinking 75 | // again. 76 | let abort_get_forks_future = 77 | get_timer().delay(Duration::from_millis(300)); 78 | pin_mut!(abort_get_forks_future); 79 | 80 | select! { 81 | _ = get_left_fork_future => { 82 | println!("{} got the left fork and tries to get the right fork", name); 83 | 84 | select! { 85 | _ = get_right_fork_future => { 86 | println!("{} got the right fork and starts eating", name); 87 | get_timer().delay( 88 | rand_delay(Duration::from_millis(1000), 89 | Duration::from_millis(200))).await; 90 | eaten += 1; 91 | println!("{} has finished eating [ate {} times]", name, eaten); 92 | }, 93 | _ = abort_get_forks_future => { 94 | println!("{} could not acquire the right fork", name); 95 | }, 96 | } 97 | }, 98 | _ = get_right_fork_future => { 99 | println!("{} got the right fork and tries to get the left fork", name); 100 | 101 | select! { 102 | _ = get_left_fork_future => { 103 | println!("{} got the left fork and starts eating", name); 104 | get_timer().delay( 105 | rand_delay(Duration::from_millis(1000), 106 | Duration::from_millis(200))).await; 107 | eaten += 1; 108 | println!("{} has finished eating [ate {} times]", name, eaten); 109 | }, 110 | _ = abort_get_forks_future => { 111 | println!("{} could not acquire the left fork", name); 112 | }, 113 | } 114 | }, 115 | _ = abort_get_forks_future => { 116 | println!("{} could not acquire any fork", name); 117 | }, 118 | } 119 | } 120 | } 121 | 122 | println!("{} has finished", name); 123 | } 124 | 125 | async fn simulate_philosophers() { 126 | // Create the forks for the philosophers 127 | let forks: [Fork; 5] = [ 128 | Fork::new((), true), 129 | Fork::new((), true), 130 | Fork::new((), true), 131 | Fork::new((), true), 132 | Fork::new((), true), 133 | ]; 134 | 135 | // Create a task for each philosopher 136 | let p1 = philosopher_task("A", &forks[4], &forks[0]); 137 | let p2 = philosopher_task("B", &forks[0], &forks[1]); 138 | let p3 = philosopher_task("C", &forks[1], &forks[2]); 139 | let p4 = philosopher_task("D", &forks[2], &forks[3]); 140 | let p5 = philosopher_task("E", &forks[3], &forks[4]); 141 | 142 | // Wait until all philosophers have finished eating 143 | join!(p1, p2, p3, p4, p5); 144 | } 145 | 146 | fn main() { 147 | // Spawn a background thread which advances the timer 148 | let join_handle = spawn(move || { 149 | timer_thread(); 150 | }); 151 | 152 | // And simulate the philosophers 153 | block_on(simulate_philosophers()); 154 | 155 | // Stop the timer thread 156 | STOP_TIMER.store(true, Ordering::Relaxed); 157 | join_handle.join().unwrap(); 158 | } 159 | 160 | fn timer_thread() { 161 | while !STOP_TIMER.load(Ordering::Relaxed) { 162 | sleep(Duration::from_millis(25)); 163 | TIMER_SERVICE.check_expirations(); 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /examples/readme.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This folder contains examples for the usage of this library. 4 | 5 | Examples can be started in the following fashion: 6 | ``` 7 | cargo run --example name_of_example 8 | ``` -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | futures-intrusive 2 | ================= 3 | 4 | This crate provides a variety of `Futures`-based and `async/await` compatible 5 | types that are based on the idea of intrusive collections: 6 | - Channels in a variety of flavors: 7 | - Oneshot 8 | - Multi-Producer Multi-Consumer (MPMC) 9 | - State Broadcast 10 | - Synchronization Primitives: 11 | - Manual Reset Event 12 | - Mutex 13 | - Semaphore 14 | - A timer 15 | 16 | Please refer to the [documentation](https://docs.rs/futures-intrusive) for details. 17 | 18 | In addition to the documentation the examples provide a quick overview on how 19 | the primitives can be used. 20 | 21 | ## Usage 22 | 23 | Add this to your `Cargo.toml`: 24 | 25 | ```toml 26 | [dependencies] 27 | futures-intrusive = "^0.5" 28 | ``` 29 | 30 | In order to use the crate in a `no-std` environment, it needs to be compiled 31 | without default features: 32 | 33 | ```toml 34 | [dependencies] 35 | futures-intrusive = { version = "^0.5", default-features = false } 36 | ``` 37 | 38 | The crate defines a feature `alloc`, which can be used in order to re-enable 39 | `alloc` features. Also defined is `std`, which can be used in order to re-enable 40 | `std` features. 41 | 42 | ## Minimum Rust version 43 | 44 | The minimum required Rust version is 1.36, due to reliance on stable 45 | `Future`, `Context` and `Waker` types. 46 | 47 | ## License 48 | 49 | Licensed under either of 50 | 51 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 52 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 53 | 54 | at your option. -------------------------------------------------------------------------------- /src/buffer/mod.rs: -------------------------------------------------------------------------------- 1 | //! Buffer types 2 | 3 | mod real_array; 4 | pub use real_array::RealArray; 5 | 6 | mod ring_buffer; 7 | pub use ring_buffer::{ArrayBuf, RingBuf}; 8 | 9 | #[cfg(feature = "alloc")] 10 | pub use ring_buffer::FixedHeapBuf; 11 | #[cfg(feature = "alloc")] 12 | pub use ring_buffer::GrowingHeapBuf; 13 | -------------------------------------------------------------------------------- /src/buffer/real_array.rs: -------------------------------------------------------------------------------- 1 | /// A marker trait which may only be implemented for native array types, like 2 | /// `[T; 2]`. The library incorporates several components that are parameterized 3 | /// by array types, but currently Rust provides no safe mechanism to express 4 | /// that. 5 | /// 6 | /// In order to work around the limitations, these methods only accept arrays 7 | /// which implement the `RealArray` type. The library provides an implementation 8 | /// of `RealArray` for arrays up to length 64, as well as for all powers of 2 9 | /// up to 64k. 10 | /// 11 | /// In order to let the library accept arrays of bigger sizes, `RealArray` can 12 | /// be implemented by users via newtypes. A type as defined in the following 13 | /// example can be passed to the library: 14 | /// 15 | /// ``` 16 | /// use futures_intrusive::buffer::RealArray; 17 | /// use futures_intrusive::channel::LocalChannel; 18 | /// 19 | /// struct I32x384Array([i32; 384]); 20 | /// unsafe impl RealArray for I32x384Array { 21 | /// const LEN: usize = 384; 22 | /// } 23 | /// 24 | /// impl AsMut<[i32]> for I32x384Array { 25 | /// fn as_mut(&mut self) -> &mut [i32] { 26 | /// &mut self.0 27 | /// } 28 | /// } 29 | /// 30 | /// impl AsRef<[i32]> for I32x384Array { 31 | /// fn as_ref(&self) -> &[i32] { 32 | /// &self.0 33 | /// } 34 | /// } 35 | /// 36 | /// fn main() { 37 | /// let channel = LocalChannel::::new(); 38 | /// } 39 | /// 40 | /// ``` 41 | pub unsafe trait RealArray { 42 | /// The length of the array 43 | const LEN: usize; 44 | } 45 | 46 | macro_rules! real_array { 47 | ($($N:expr),+) => { 48 | $( 49 | unsafe impl RealArray for [T; $N] { 50 | const LEN: usize = $N; 51 | } 52 | )+ 53 | } 54 | } 55 | 56 | real_array!( 57 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 58 | 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 59 | 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60 | 59, 60, 61, 62, 63, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 61 | 32768, 65536 62 | ); 63 | -------------------------------------------------------------------------------- /src/buffer/ring_buffer.rs: -------------------------------------------------------------------------------- 1 | use super::RealArray; 2 | use core::marker::PhantomData; 3 | use core::mem::MaybeUninit; 4 | 5 | /// A Ring Buffer of items 6 | pub trait RingBuf { 7 | /// The type of stored items inside the Ring Buffer 8 | type Item; 9 | 10 | /// Creates a new instance of the Ring Buffer 11 | fn new() -> Self; 12 | /// Creates a new instance of the Ring Buffer with the given capacity. 13 | /// `RingBuf` implementations are allowed to ignore the `capacity` hint and 14 | /// utilize their default capacity. 15 | fn with_capacity(cap: usize) -> Self; 16 | 17 | /// The capacity of the buffer 18 | fn capacity(&self) -> usize; 19 | /// The amount of stored items in the buffer 20 | fn len(&self) -> usize; 21 | /// Returns true if no item is stored inside the buffer. 22 | fn is_empty(&self) -> bool { 23 | self.len() == 0 24 | } 25 | 26 | /// Returns true if there is enough space in the buffer to 27 | /// store another item. 28 | fn can_push(&self) -> bool; 29 | /// Stores the item at the end of the buffer. 30 | /// Panics if there is not enough free space. 31 | fn push(&mut self, item: Self::Item); 32 | /// Returns the oldest item inside the buffer. 33 | /// Panics if there is no available item. 34 | fn pop(&mut self) -> Self::Item; 35 | } 36 | 37 | /// An array-backed Ring Buffer 38 | /// 39 | /// `A` is the type of the backing array. The backing array must be a real 40 | /// array. In order to verify this it must satisfy the [`RealArray`] constraint. 41 | /// In order to create a Ring Buffer backed by an array of 5 integer elements, 42 | /// the following code can be utilized: 43 | /// 44 | /// ``` 45 | /// use futures_intrusive::buffer::{ArrayBuf, RingBuf}; 46 | /// 47 | /// type Buffer5 = ArrayBuf; 48 | /// let buffer = Buffer5::new(); 49 | /// ``` 50 | pub struct ArrayBuf 51 | where 52 | A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, 53 | { 54 | buffer: MaybeUninit, 55 | size: usize, 56 | recv_idx: usize, 57 | send_idx: usize, 58 | _phantom: PhantomData, 59 | } 60 | 61 | impl core::fmt::Debug for ArrayBuf 62 | where 63 | A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, 64 | { 65 | fn fmt( 66 | &self, 67 | f: &mut core::fmt::Formatter, 68 | ) -> Result<(), core::fmt::Error> { 69 | f.debug_struct("ArrayBuf") 70 | .field("size", &self.size) 71 | .field("cap", &self.capacity()) 72 | .finish() 73 | } 74 | } 75 | 76 | impl ArrayBuf 77 | where 78 | A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, 79 | { 80 | fn next_idx(&mut self, last_idx: usize) -> usize { 81 | if last_idx + 1 == self.capacity() { 82 | return 0; 83 | } 84 | last_idx + 1 85 | } 86 | } 87 | 88 | impl RingBuf for ArrayBuf 89 | where 90 | A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, 91 | { 92 | type Item = T; 93 | 94 | fn new() -> Self { 95 | ArrayBuf { 96 | buffer: MaybeUninit::uninit(), 97 | send_idx: 0, 98 | recv_idx: 0, 99 | size: 0, 100 | _phantom: PhantomData, 101 | } 102 | } 103 | 104 | fn with_capacity(_cap: usize) -> Self { 105 | // The fixed size array backed Ring Buffer doesn't support an adjustable 106 | // capacity. Therefore only the default capacity is utilized. 107 | Self::new() 108 | } 109 | 110 | #[inline] 111 | fn capacity(&self) -> usize { 112 | A::LEN 113 | } 114 | 115 | #[inline] 116 | fn len(&self) -> usize { 117 | self.size 118 | } 119 | 120 | #[inline] 121 | fn can_push(&self) -> bool { 122 | self.len() != self.capacity() 123 | } 124 | 125 | #[inline] 126 | fn push(&mut self, value: Self::Item) { 127 | assert!(self.can_push()); 128 | // Safety: We asserted that there is available space for an item. 129 | // Therefore the memory address is valid. 130 | unsafe { 131 | let arr_ptr = self.buffer.as_mut_ptr() as *mut T; 132 | arr_ptr.add(self.send_idx).write(value); 133 | } 134 | self.send_idx = self.next_idx(self.send_idx); 135 | self.size += 1; 136 | } 137 | 138 | #[inline] 139 | fn pop(&mut self) -> Self::Item { 140 | assert!(self.size > 0); 141 | // Safety: We asserted that there is an element available, so it must 142 | // have been written before. 143 | let val = unsafe { 144 | let arr_ptr = self.buffer.as_mut_ptr() as *mut T; 145 | arr_ptr.add(self.recv_idx).read() 146 | }; 147 | self.recv_idx = self.next_idx(self.recv_idx); 148 | self.size -= 1; 149 | val 150 | } 151 | } 152 | 153 | impl Drop for ArrayBuf 154 | where 155 | A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, 156 | { 157 | fn drop(&mut self) { 158 | // Drop all elements which are still stored inside the buffer 159 | while self.size > 0 { 160 | // Safety: This drops only as many elements as have been written via 161 | // ptr::write and haven't read via ptr::read before 162 | unsafe { 163 | let arr_ptr = self.buffer.as_mut_ptr() as *mut T; 164 | arr_ptr.add(self.recv_idx).drop_in_place(); 165 | } 166 | self.recv_idx = self.next_idx(self.recv_idx); 167 | self.size -= 1; 168 | } 169 | } 170 | } 171 | 172 | #[cfg(feature = "alloc")] 173 | mod if_alloc { 174 | use super::*; 175 | use alloc::collections::VecDeque; 176 | 177 | /// A Ring Buffer which stores all items on the heap. 178 | /// 179 | /// The `FixedHeapBuf` will allocate its capacity ahead of time. This is good 180 | /// fit when you have a constant latency between two components. 181 | pub struct FixedHeapBuf { 182 | buffer: VecDeque, 183 | /// The capacity is stored extra, since VecDeque can allocate space for 184 | /// more elements than specified. 185 | cap: usize, 186 | } 187 | 188 | impl core::fmt::Debug for FixedHeapBuf { 189 | fn fmt( 190 | &self, 191 | f: &mut core::fmt::Formatter, 192 | ) -> Result<(), core::fmt::Error> { 193 | f.debug_struct("FixedHeapBuf") 194 | .field("size", &self.buffer.len()) 195 | .field("cap", &self.cap) 196 | .finish() 197 | } 198 | } 199 | 200 | impl RingBuf for FixedHeapBuf { 201 | type Item = T; 202 | 203 | fn new() -> Self { 204 | FixedHeapBuf { 205 | buffer: VecDeque::new(), 206 | cap: 0, 207 | } 208 | } 209 | 210 | fn with_capacity(cap: usize) -> Self { 211 | FixedHeapBuf { 212 | buffer: VecDeque::with_capacity(cap), 213 | cap, 214 | } 215 | } 216 | 217 | #[inline] 218 | fn capacity(&self) -> usize { 219 | self.cap 220 | } 221 | 222 | #[inline] 223 | fn len(&self) -> usize { 224 | self.buffer.len() 225 | } 226 | 227 | #[inline] 228 | fn can_push(&self) -> bool { 229 | self.buffer.len() != self.cap 230 | } 231 | 232 | #[inline] 233 | fn push(&mut self, value: Self::Item) { 234 | assert!(self.can_push()); 235 | self.buffer.push_back(value); 236 | } 237 | 238 | #[inline] 239 | fn pop(&mut self) -> Self::Item { 240 | assert!(self.buffer.len() > 0); 241 | self.buffer.pop_front().unwrap() 242 | } 243 | } 244 | 245 | /// A Ring Buffer which stores all items on the heap but grows dynamically. 246 | /// 247 | /// A `GrowingHeapBuf` does not allocate the capacity ahead of time, as 248 | /// opposed to the `FixedHeapBuf`. This makes it a good fit when you have 249 | /// unpredictable latency between two components, when you want to 250 | /// amortize your allocation costs or when you are using an external 251 | /// back-pressure mechanism. 252 | pub struct GrowingHeapBuf { 253 | buffer: VecDeque, 254 | /// The maximum number of elements in the buffer. 255 | limit: usize, 256 | } 257 | 258 | impl core::fmt::Debug for GrowingHeapBuf { 259 | fn fmt( 260 | &self, 261 | f: &mut core::fmt::Formatter, 262 | ) -> Result<(), core::fmt::Error> { 263 | f.debug_struct("GrowingHeapBuf") 264 | .field("size", &self.buffer.len()) 265 | .field("limit", &self.limit) 266 | .finish() 267 | } 268 | } 269 | 270 | impl RingBuf for GrowingHeapBuf { 271 | type Item = T; 272 | 273 | fn new() -> Self { 274 | GrowingHeapBuf { 275 | buffer: VecDeque::new(), 276 | limit: 0, 277 | } 278 | } 279 | 280 | fn with_capacity(limit: usize) -> Self { 281 | GrowingHeapBuf { 282 | buffer: VecDeque::new(), 283 | limit, 284 | } 285 | } 286 | 287 | #[inline] 288 | fn capacity(&self) -> usize { 289 | self.limit 290 | } 291 | 292 | #[inline] 293 | fn len(&self) -> usize { 294 | self.buffer.len() 295 | } 296 | 297 | #[inline] 298 | fn can_push(&self) -> bool { 299 | self.buffer.len() != self.limit 300 | } 301 | 302 | #[inline] 303 | fn push(&mut self, value: Self::Item) { 304 | debug_assert!(self.can_push()); 305 | self.buffer.push_back(value); 306 | } 307 | 308 | #[inline] 309 | fn pop(&mut self) -> Self::Item { 310 | debug_assert!(self.buffer.len() > 0); 311 | self.buffer.pop_front().unwrap() 312 | } 313 | } 314 | } 315 | 316 | #[cfg(feature = "alloc")] 317 | pub use if_alloc::*; 318 | 319 | #[cfg(test)] 320 | #[cfg(feature = "alloc")] 321 | mod tests { 322 | use super::*; 323 | use crate::buffer::ring_buffer::if_alloc::FixedHeapBuf; 324 | 325 | fn test_ring_buf>(mut buf: Buf) { 326 | assert_eq!(5, buf.capacity()); 327 | assert_eq!(0, buf.len()); 328 | assert_eq!(true, buf.is_empty()); 329 | assert_eq!(true, buf.can_push()); 330 | 331 | buf.push(1); 332 | buf.push(2); 333 | buf.push(3); 334 | assert_eq!(5, buf.capacity()); 335 | assert_eq!(3, buf.len()); 336 | assert_eq!(false, buf.is_empty()); 337 | assert_eq!(true, buf.can_push()); 338 | 339 | assert_eq!(1, buf.pop()); 340 | assert_eq!(2, buf.pop()); 341 | assert_eq!(1, buf.len()); 342 | assert_eq!(false, buf.is_empty()); 343 | assert_eq!(3, buf.pop()); 344 | assert_eq!(0, buf.len()); 345 | assert_eq!(true, buf.is_empty()); 346 | 347 | for (i, val) in [4, 5, 6, 7, 8].iter().enumerate() { 348 | buf.push(*val); 349 | assert_eq!(i + 1, buf.len()); 350 | assert_eq!(i != 4, buf.can_push()); 351 | assert_eq!(false, buf.is_empty()); 352 | } 353 | 354 | for (i, val) in [4, 5, 6, 7, 8].iter().enumerate() { 355 | assert_eq!(*val, buf.pop()); 356 | assert_eq!(4 - i, buf.len()); 357 | assert_eq!(true, buf.can_push()); 358 | assert_eq!(i == 4, buf.is_empty()); 359 | } 360 | } 361 | 362 | #[test] 363 | fn test_array_ring_buf() { 364 | let buf = ArrayBuf::::new(); 365 | test_ring_buf(buf); 366 | } 367 | 368 | #[test] 369 | fn test_heap_ring_buf() { 370 | let buf = FixedHeapBuf::::with_capacity(5); 371 | test_ring_buf(buf); 372 | } 373 | 374 | #[test] 375 | fn test_growing_ring_buf() { 376 | let buf = GrowingHeapBuf::::with_capacity(5); 377 | test_ring_buf(buf); 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /src/channel/error.rs: -------------------------------------------------------------------------------- 1 | /// The error which is returned when sending a value into a channel fails. 2 | /// 3 | /// The `send` operation can only fail if the channel has been closed, which 4 | /// would prevent the other actors to ever retrieve the value. 5 | /// 6 | /// The error recovers the value that has been sent. 7 | #[derive(PartialEq, Debug)] 8 | pub struct ChannelSendError(pub T); 9 | 10 | /// The error which is returned when trying to receive from a channel 11 | /// without waiting fails. 12 | #[derive(PartialEq, Debug, Copy, Clone)] 13 | pub enum TryReceiveError { 14 | /// The channel is empty. No value is available for reception. 15 | Empty, 16 | /// The channel had been closed and no more value is available for reception. 17 | Closed, 18 | } 19 | 20 | impl TryReceiveError { 21 | /// Returns whether the error is the `Empty` variant. 22 | pub fn is_empty(self) -> bool { 23 | match self { 24 | Self::Empty => true, 25 | _ => false, 26 | } 27 | } 28 | 29 | /// Returns whether the error is the `Closed` variant. 30 | pub fn is_closed(self) -> bool { 31 | match self { 32 | Self::Closed => true, 33 | _ => false, 34 | } 35 | } 36 | } 37 | 38 | /// The error which is returned when trying to send on a channel 39 | /// without waiting fails. 40 | #[derive(PartialEq, Debug)] 41 | pub enum TrySendError { 42 | /// The channel is full. 43 | Full(T), 44 | /// The channel was closed. 45 | Closed(T), 46 | } 47 | 48 | impl TrySendError { 49 | /// Converts the error into its inner value. 50 | pub fn into_inner(self) -> T { 51 | match self { 52 | Self::Closed(inner) => inner, 53 | Self::Full(inner) => inner, 54 | } 55 | } 56 | 57 | /// Returns whether the error is the `WouldBlock` variant. 58 | pub fn is_full(&self) -> bool { 59 | match self { 60 | Self::Full(_) => true, 61 | _ => false, 62 | } 63 | } 64 | 65 | /// Returns whether the error is the `Closed` variant. 66 | pub fn is_closed(&self) -> bool { 67 | match self { 68 | Self::Closed(_) => true, 69 | _ => false, 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/channel/mod.rs: -------------------------------------------------------------------------------- 1 | //! Asynchronous channels. 2 | //! 3 | //! This module provides various channels that can be used to communicate between 4 | //! asynchronous tasks. 5 | 6 | mod error; 7 | pub use self::error::{ChannelSendError, TryReceiveError, TrySendError}; 8 | 9 | mod channel_future; 10 | use channel_future::{ 11 | ChannelReceiveAccess, ChannelSendAccess, RecvPollState, RecvWaitQueueEntry, 12 | SendPollState, SendWaitQueueEntry, 13 | }; 14 | pub use channel_future::{ 15 | ChannelReceiveFuture, ChannelSendFuture, CloseStatus, 16 | }; 17 | 18 | mod oneshot; 19 | 20 | pub use self::oneshot::{GenericOneshotChannel, LocalOneshotChannel}; 21 | 22 | #[cfg(feature = "std")] 23 | pub use self::oneshot::OneshotChannel; 24 | 25 | mod oneshot_broadcast; 26 | 27 | pub use self::oneshot_broadcast::{ 28 | GenericOneshotBroadcastChannel, LocalOneshotBroadcastChannel, 29 | }; 30 | 31 | #[cfg(feature = "std")] 32 | pub use self::oneshot_broadcast::OneshotBroadcastChannel; 33 | 34 | mod state_broadcast; 35 | pub use state_broadcast::{ 36 | GenericStateBroadcastChannel, LocalStateBroadcastChannel, StateId, 37 | StateReceiveFuture, 38 | }; 39 | 40 | #[cfg(feature = "std")] 41 | pub use self::state_broadcast::StateBroadcastChannel; 42 | 43 | mod mpmc; 44 | 45 | pub use self::mpmc::{ 46 | ChannelStream, GenericChannel, LocalChannel, LocalUnbufferedChannel, 47 | }; 48 | 49 | #[cfg(feature = "std")] 50 | pub use self::mpmc::{Channel, UnbufferedChannel}; 51 | 52 | #[cfg(feature = "alloc")] 53 | mod if_alloc { 54 | 55 | /// Channel implementations where Sender and Receiver sides are cloneable 56 | /// and owned. 57 | /// The Futures produced by channels in this module don't require a lifetime 58 | /// parameter. 59 | pub mod shared { 60 | pub use super::super::channel_future::shared::*; 61 | pub use super::super::mpmc::shared::*; 62 | pub use super::super::oneshot::shared::*; 63 | pub use super::super::oneshot_broadcast::shared::*; 64 | pub use super::super::state_broadcast::shared::*; 65 | } 66 | } 67 | 68 | #[cfg(feature = "alloc")] 69 | pub use self::if_alloc::*; 70 | -------------------------------------------------------------------------------- /src/channel/oneshot.rs: -------------------------------------------------------------------------------- 1 | //! An asynchronously awaitable oneshot channel 2 | 3 | use super::{ 4 | ChannelReceiveAccess, ChannelReceiveFuture, ChannelSendError, CloseStatus, 5 | RecvPollState, RecvWaitQueueEntry, 6 | }; 7 | use crate::{ 8 | intrusive_double_linked_list::{LinkedList, ListNode}, 9 | utils::update_waker_ref, 10 | NoopLock, 11 | }; 12 | use core::marker::PhantomData; 13 | use futures_core::task::{Context, Poll}; 14 | use lock_api::{Mutex, RawMutex}; 15 | 16 | fn wake_waiters(waiters: &mut LinkedList) { 17 | // Remove all waiters from the waiting list in reverse order and wake them. 18 | // We reverse the waiter list, so that the oldest waker (which is 19 | // at the end of the list), gets woken first and has the best 20 | // chance to grab the channel value. 21 | waiters.reverse_drain(|waiter| { 22 | if let Some(handle) = waiter.task.take() { 23 | handle.wake(); 24 | } 25 | waiter.state = RecvPollState::Unregistered; 26 | }); 27 | } 28 | 29 | /// Internal state of the oneshot channel 30 | struct ChannelState { 31 | /// Whether the channel had been fulfilled before 32 | is_fulfilled: bool, 33 | /// The value which is stored inside the channel 34 | value: Option, 35 | /// The list of waiters, which are waiting for the channel to get fulfilled 36 | waiters: LinkedList, 37 | } 38 | 39 | impl ChannelState { 40 | fn new() -> ChannelState { 41 | ChannelState:: { 42 | is_fulfilled: false, 43 | value: None, 44 | waiters: LinkedList::new(), 45 | } 46 | } 47 | 48 | /// Writes a single value to the channel. 49 | /// If a value had been written to the channel before, the new value will be rejected. 50 | fn send(&mut self, value: T) -> Result<(), ChannelSendError> { 51 | if self.is_fulfilled { 52 | return Err(ChannelSendError(value)); 53 | } 54 | 55 | self.value = Some(value); 56 | self.is_fulfilled = true; 57 | 58 | // Wakeup all waiters 59 | wake_waiters(&mut self.waiters); 60 | 61 | Ok(()) 62 | } 63 | 64 | fn close(&mut self) -> CloseStatus { 65 | if self.is_fulfilled { 66 | return CloseStatus::AlreadyClosed; 67 | } 68 | self.is_fulfilled = true; 69 | 70 | // Wakeup all waiters 71 | wake_waiters(&mut self.waiters); 72 | 73 | CloseStatus::NewlyClosed 74 | } 75 | 76 | /// Tries to read the value from the channel. 77 | /// If the value isn't available yet, the ChannelReceiveFuture gets added to the 78 | /// wait queue at the channel, and will be signalled once ready. 79 | /// This function is only safe as long as the `wait_node`s address is guaranteed 80 | /// to be stable until it gets removed from the queue. 81 | unsafe fn try_receive( 82 | &mut self, 83 | wait_node: &mut ListNode, 84 | cx: &mut Context<'_>, 85 | ) -> Poll> { 86 | match wait_node.state { 87 | RecvPollState::Unregistered => { 88 | let maybe_val = self.value.take(); 89 | match maybe_val { 90 | Some(v) => { 91 | // A value was available inside the channel and was fetched 92 | Poll::Ready(Some(v)) 93 | } 94 | None => { 95 | // Check if something was written into the channel before 96 | // or the channel was closed. 97 | if self.is_fulfilled { 98 | Poll::Ready(None) 99 | } else { 100 | // Added the task to the wait queue 101 | wait_node.task = Some(cx.waker().clone()); 102 | wait_node.state = RecvPollState::Registered; 103 | self.waiters.add_front(wait_node); 104 | Poll::Pending 105 | } 106 | } 107 | } 108 | } 109 | RecvPollState::Registered => { 110 | // Since the channel wakes up all waiters and moves their states 111 | // to unregistered there can't be any value in the channel in this state. 112 | // However the caller might have passed a different `Waker`. 113 | // In this case we need to update it. 114 | update_waker_ref(&mut wait_node.task, cx); 115 | Poll::Pending 116 | } 117 | RecvPollState::Notified => { 118 | unreachable!("Not possible for Oneshot"); 119 | } 120 | } 121 | } 122 | 123 | fn remove_waiter(&mut self, wait_node: &mut ListNode) { 124 | // ChannelReceiveFuture only needs to get removed if it had been added to 125 | // the wait queue of the channel. This has happened in the RecvPollState::Waiting case. 126 | if let RecvPollState::Registered = wait_node.state { 127 | // Safety: Due to the state, we know that the node must be part 128 | // of the waiter list 129 | if !unsafe { self.waiters.remove(wait_node) } { 130 | // Panic if the address isn't found. This can only happen if the contract was 131 | // violated, e.g. the RecvWaitQueueEntry got moved after the initial poll. 132 | panic!("Future could not be removed from wait queue"); 133 | } 134 | wait_node.state = RecvPollState::Unregistered; 135 | } 136 | } 137 | } 138 | 139 | /// A channel which can be used to exchange a single value between two 140 | /// concurrent tasks. 141 | /// 142 | /// Tasks can wait for the value to get delivered via `receive`. 143 | /// The returned Future will get fulfilled when a value is sent into the channel. 144 | /// 145 | /// The value can only be extracted by a single receiving task. Once the value 146 | /// has been retrieved from the Channel, the Channel is closed and subsequent 147 | /// receive calls will return `None`. 148 | pub struct GenericOneshotChannel { 149 | inner: Mutex>, 150 | } 151 | 152 | // The channel can be sent to other threads as long as it's not borrowed and the 153 | // value in it can be sent to other threads. 154 | unsafe impl Send 155 | for GenericOneshotChannel 156 | { 157 | } 158 | // The channel is thread-safe as long as a thread-safe mutex is used 159 | unsafe impl Sync 160 | for GenericOneshotChannel 161 | { 162 | } 163 | 164 | impl core::fmt::Debug 165 | for GenericOneshotChannel 166 | { 167 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 168 | f.debug_struct("GenericOneshotChannel").finish() 169 | } 170 | } 171 | 172 | impl GenericOneshotChannel { 173 | /// Creates a new OneshotChannel in the given state 174 | pub fn new() -> GenericOneshotChannel { 175 | GenericOneshotChannel { 176 | inner: Mutex::new(ChannelState::new()), 177 | } 178 | } 179 | 180 | /// Writes a single value to the channel. 181 | /// 182 | /// This will notify waiters about the availability of the value. 183 | /// If a value had been written to the channel before, or if the 184 | /// channel is closed, the new value will be rejected and 185 | /// returned inside the error variant. 186 | pub fn send(&self, value: T) -> Result<(), ChannelSendError> { 187 | self.inner.lock().send(value) 188 | } 189 | 190 | /// Closes the channel. 191 | /// 192 | /// This will notify waiters about closure, by fulfilling pending `Future`s 193 | /// with `None`. 194 | /// `send(value)` attempts which follow this call will fail with a 195 | /// [`ChannelSendError`]. 196 | pub fn close(&self) -> CloseStatus { 197 | self.inner.lock().close() 198 | } 199 | 200 | /// Returns a future that gets fulfilled when a value is written to the channel 201 | /// or the channel is closed. 202 | pub fn receive(&self) -> ChannelReceiveFuture { 203 | ChannelReceiveFuture { 204 | channel: Some(self), 205 | wait_node: ListNode::new(RecvWaitQueueEntry::new()), 206 | _phantom: PhantomData, 207 | } 208 | } 209 | } 210 | 211 | impl ChannelReceiveAccess 212 | for GenericOneshotChannel 213 | { 214 | unsafe fn receive_or_register( 215 | &self, 216 | wait_node: &mut ListNode, 217 | cx: &mut Context<'_>, 218 | ) -> Poll> { 219 | self.inner.lock().try_receive(wait_node, cx) 220 | } 221 | 222 | fn remove_receive_waiter( 223 | &self, 224 | wait_node: &mut ListNode, 225 | ) { 226 | self.inner.lock().remove_waiter(wait_node) 227 | } 228 | } 229 | 230 | // Export a non thread-safe version using NoopLock 231 | 232 | /// A [`GenericOneshotChannel`] which is not thread-safe. 233 | pub type LocalOneshotChannel = GenericOneshotChannel; 234 | 235 | #[cfg(feature = "std")] 236 | mod if_std { 237 | use super::*; 238 | // Export a thread-safe version using parking_lot::RawMutex 239 | 240 | /// A [`GenericOneshotChannel`] implementation backed by [`parking_lot`]. 241 | pub type OneshotChannel = 242 | GenericOneshotChannel; 243 | } 244 | 245 | #[cfg(feature = "std")] 246 | pub use self::if_std::*; 247 | 248 | #[cfg(feature = "alloc")] 249 | mod if_alloc { 250 | use super::*; 251 | 252 | pub mod shared { 253 | use super::*; 254 | use crate::channel::shared::ChannelReceiveFuture; 255 | 256 | struct GenericOneshotChannelSharedState 257 | where 258 | MutexType: RawMutex, 259 | T: 'static, 260 | { 261 | channel: GenericOneshotChannel, 262 | } 263 | 264 | // Implement ChannelReceiveAccess trait for SharedChannelState, so that it can 265 | // be used for dynamic dispatch in futures. 266 | impl ChannelReceiveAccess 267 | for GenericOneshotChannelSharedState 268 | where 269 | MutexType: RawMutex, 270 | { 271 | unsafe fn receive_or_register( 272 | &self, 273 | wait_node: &mut ListNode, 274 | cx: &mut Context<'_>, 275 | ) -> Poll> { 276 | self.channel.receive_or_register(wait_node, cx) 277 | } 278 | 279 | fn remove_receive_waiter( 280 | &self, 281 | wait_node: &mut ListNode, 282 | ) { 283 | self.channel.remove_receive_waiter(wait_node) 284 | } 285 | } 286 | 287 | /// The sending side of a channel which can be used to exchange values 288 | /// between concurrent tasks. 289 | /// 290 | /// Values can be sent into the channel through `send`. 291 | pub struct GenericOneshotSender 292 | where 293 | MutexType: RawMutex, 294 | T: 'static, 295 | { 296 | inner: alloc::sync::Arc< 297 | GenericOneshotChannelSharedState, 298 | >, 299 | } 300 | 301 | /// The receiving side of a channel which can be used to exchange values 302 | /// between concurrent tasks. 303 | /// 304 | /// Tasks can receive values from the channel through the `receive` method. 305 | /// The returned Future will get resolved when a value is sent into the channel. 306 | pub struct GenericOneshotReceiver 307 | where 308 | MutexType: RawMutex, 309 | T: 'static, 310 | { 311 | inner: alloc::sync::Arc< 312 | GenericOneshotChannelSharedState, 313 | >, 314 | } 315 | 316 | impl core::fmt::Debug for GenericOneshotSender 317 | where 318 | MutexType: RawMutex, 319 | { 320 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 321 | f.debug_struct("OneshotSender").finish() 322 | } 323 | } 324 | 325 | impl core::fmt::Debug for GenericOneshotReceiver 326 | where 327 | MutexType: RawMutex, 328 | { 329 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 330 | f.debug_struct("OneshotReceiver").finish() 331 | } 332 | } 333 | 334 | impl Drop for GenericOneshotSender 335 | where 336 | MutexType: RawMutex, 337 | { 338 | fn drop(&mut self) { 339 | // Close the channel, before last sender gets destroyed 340 | // TODO: We could potentially avoid this, if no receiver is left 341 | self.inner.channel.close(); 342 | } 343 | } 344 | 345 | impl Drop for GenericOneshotReceiver 346 | where 347 | MutexType: RawMutex, 348 | { 349 | fn drop(&mut self) { 350 | // Close the channel, before last receiver gets destroyed 351 | // TODO: We could potentially avoid this, if no sender is left 352 | self.inner.channel.close(); 353 | } 354 | } 355 | 356 | /// Creates a new oneshot channel which can be used to exchange values 357 | /// of type `T` between concurrent tasks. 358 | /// The ends of the Channel are represented through 359 | /// the returned Sender and Receiver. 360 | /// 361 | /// As soon es either the senders or receivers is closed, the channel 362 | /// itself will be closed. 363 | pub fn generic_oneshot_channel() -> ( 364 | GenericOneshotSender, 365 | GenericOneshotReceiver, 366 | ) 367 | where 368 | MutexType: RawMutex, 369 | T: Send, 370 | { 371 | let inner = 372 | alloc::sync::Arc::new(GenericOneshotChannelSharedState { 373 | channel: GenericOneshotChannel::new(), 374 | }); 375 | 376 | let sender = GenericOneshotSender { 377 | inner: inner.clone(), 378 | }; 379 | let receiver = GenericOneshotReceiver { inner }; 380 | 381 | (sender, receiver) 382 | } 383 | 384 | impl GenericOneshotSender 385 | where 386 | MutexType: RawMutex + 'static, 387 | { 388 | /// Writes a single value to the channel. 389 | /// 390 | /// This will notify waiters about the availability of the value. 391 | /// If a value had been written to the channel before, or if the 392 | /// channel is closed, the new value will be rejected and 393 | /// returned inside the error variant. 394 | pub fn send(&self, value: T) -> Result<(), ChannelSendError> { 395 | self.inner.channel.send(value) 396 | } 397 | } 398 | 399 | impl GenericOneshotReceiver 400 | where 401 | MutexType: RawMutex + 'static, 402 | { 403 | /// Returns a future that gets fulfilled when a value is written to the channel. 404 | /// If the channels gets closed, the future will resolve to `None`. 405 | pub fn receive(&self) -> ChannelReceiveFuture { 406 | ChannelReceiveFuture { 407 | channel: Some(self.inner.clone()), 408 | wait_node: ListNode::new(RecvWaitQueueEntry::new()), 409 | _phantom: PhantomData, 410 | } 411 | } 412 | } 413 | 414 | // Export parking_lot based shared channels in std mode 415 | #[cfg(feature = "std")] 416 | mod if_std { 417 | use super::*; 418 | 419 | /// A [`GenericOneshotSender`] implementation backed by [`parking_lot`]. 420 | pub type OneshotSender = 421 | GenericOneshotSender; 422 | /// A [`GenericOneshotReceiver`] implementation backed by [`parking_lot`]. 423 | pub type OneshotReceiver = 424 | GenericOneshotReceiver; 425 | 426 | /// Creates a new oneshot channel. 427 | /// 428 | /// Refer to [`generic_oneshot_channel`] for details. 429 | /// 430 | /// Example for creating a channel to transmit an integer value: 431 | /// 432 | /// ``` 433 | /// # use futures_intrusive::channel::shared::oneshot_channel; 434 | /// let (sender, receiver) = oneshot_channel::(); 435 | /// ``` 436 | pub fn oneshot_channel() -> (OneshotSender, OneshotReceiver) 437 | where 438 | T: Send, 439 | { 440 | generic_oneshot_channel::() 441 | } 442 | } 443 | 444 | #[cfg(feature = "std")] 445 | pub use self::if_std::*; 446 | } 447 | } 448 | 449 | #[cfg(feature = "alloc")] 450 | pub use self::if_alloc::*; 451 | -------------------------------------------------------------------------------- /src/channel/oneshot_broadcast.rs: -------------------------------------------------------------------------------- 1 | //! An asynchronously awaitable oneshot channel which can be awaited by 2 | //! multiple consumers. 3 | 4 | use super::{ 5 | ChannelReceiveAccess, ChannelReceiveFuture, ChannelSendError, CloseStatus, 6 | RecvPollState, RecvWaitQueueEntry, 7 | }; 8 | use crate::{ 9 | intrusive_double_linked_list::{LinkedList, ListNode}, 10 | utils::update_waker_ref, 11 | NoopLock, 12 | }; 13 | use core::marker::PhantomData; 14 | use futures_core::task::{Context, Poll}; 15 | use lock_api::{Mutex, RawMutex}; 16 | 17 | fn wake_waiters(waiters: &mut LinkedList) { 18 | // Remove all waiters from the waiting list in reverse order and wake them. 19 | // We reverse the waiter list, so that the oldest waker (which is 20 | // at the end of the list), gets woken first and has the best 21 | // chance to grab the channel value. 22 | waiters.reverse_drain(|waiter| { 23 | if let Some(handle) = waiter.task.take() { 24 | handle.wake(); 25 | } 26 | waiter.state = RecvPollState::Unregistered; 27 | }); 28 | } 29 | 30 | /// Internal state of the oneshot channel 31 | struct ChannelState { 32 | /// Whether the channel had been fulfilled before 33 | is_fulfilled: bool, 34 | /// The value which is stored inside the channel 35 | value: Option, 36 | /// The list of waiters, which are waiting for the channel to get fulfilled 37 | waiters: LinkedList, 38 | } 39 | 40 | impl ChannelState 41 | where 42 | T: Clone, 43 | { 44 | fn new() -> ChannelState { 45 | ChannelState:: { 46 | is_fulfilled: false, 47 | value: None, 48 | waiters: LinkedList::new(), 49 | } 50 | } 51 | 52 | /// Writes a single value to the channel. 53 | /// If a value had been written to the channel before, the new value will be rejected. 54 | fn send(&mut self, value: T) -> Result<(), ChannelSendError> { 55 | if self.is_fulfilled { 56 | return Err(ChannelSendError(value)); 57 | } 58 | 59 | self.value = Some(value); 60 | self.is_fulfilled = true; 61 | 62 | // Wakeup all waiters 63 | wake_waiters(&mut self.waiters); 64 | 65 | Ok(()) 66 | } 67 | 68 | fn close(&mut self) -> CloseStatus { 69 | if self.is_fulfilled { 70 | return CloseStatus::AlreadyClosed; 71 | } 72 | self.is_fulfilled = true; 73 | 74 | // Wakeup all waiters 75 | wake_waiters(&mut self.waiters); 76 | 77 | CloseStatus::NewlyClosed 78 | } 79 | 80 | /// Tries to read the value from the channel. 81 | /// If the value isn't available yet, the ChannelReceiveFuture gets added to the 82 | /// wait queue at the channel, and will be signalled once ready. 83 | /// This function is only safe as long as the `wait_node`s address is guaranteed 84 | /// to be stable until it gets removed from the queue. 85 | unsafe fn try_receive( 86 | &mut self, 87 | wait_node: &mut ListNode, 88 | cx: &mut Context<'_>, 89 | ) -> Poll> { 90 | match wait_node.state { 91 | RecvPollState::Unregistered => { 92 | match &self.value { 93 | Some(v) => { 94 | // A value was available inside the channel and was fetched. 95 | // TODO: If the same waiter asks again, they will always 96 | // get the same value, instead of `None`. Is that reasonable? 97 | Poll::Ready(Some(v.clone())) 98 | } 99 | None => { 100 | // Check if something was written into the channel before 101 | // or the channel was closed. 102 | if self.is_fulfilled { 103 | Poll::Ready(None) 104 | } else { 105 | // Added the task to the wait queue 106 | wait_node.task = Some(cx.waker().clone()); 107 | wait_node.state = RecvPollState::Registered; 108 | self.waiters.add_front(wait_node); 109 | Poll::Pending 110 | } 111 | } 112 | } 113 | } 114 | RecvPollState::Registered => { 115 | // Since the channel wakes up all waiters and moves their states 116 | // to unregistered there can't be any value in the channel in this state. 117 | // However the caller might have passed a different `Waker`. 118 | // In this case we need to update it. 119 | update_waker_ref(&mut wait_node.task, cx); 120 | Poll::Pending 121 | } 122 | RecvPollState::Notified => { 123 | unreachable!("Not possible for Oneshot Broadcast"); 124 | } 125 | } 126 | } 127 | 128 | fn remove_waiter(&mut self, wait_node: &mut ListNode) { 129 | // ChannelReceiveFuture only needs to get removed if it had been added to 130 | // the wait queue of the channel. This has happened in the RecvPollState::Waiting case. 131 | if let RecvPollState::Registered = wait_node.state { 132 | // Safety: Due to the state, we know that the node must be part 133 | // of the waiter list 134 | if !unsafe { self.waiters.remove(wait_node) } { 135 | // Panic if the address isn't found. This can only happen if the contract was 136 | // violated, e.g. the RecvWaitQueueEntry got moved after the initial poll. 137 | panic!("Future could not be removed from wait queue"); 138 | } 139 | wait_node.state = RecvPollState::Unregistered; 140 | } 141 | } 142 | } 143 | 144 | /// A channel which can be used to exchange a single value between two or more 145 | /// concurrent tasks. 146 | /// 147 | /// The value which gets sent will get stored inside the Channel, and can be 148 | /// retrieved by an arbitrary number of tasks afterwards. 149 | /// 150 | /// Tasks can wait for the value to get delivered via `receive`. 151 | /// The returned Future will get fulfilled when a value is sent into the channel. 152 | pub struct GenericOneshotBroadcastChannel { 153 | inner: Mutex>, 154 | } 155 | 156 | // The channel can be sent to other threads as long as it's not borrowed and the 157 | // value in it can be sent to other threads. 158 | unsafe impl Send 159 | for GenericOneshotBroadcastChannel 160 | { 161 | } 162 | // The channel is thread-safe as long as a thread-safe mutex is used 163 | unsafe impl Sync 164 | for GenericOneshotBroadcastChannel 165 | { 166 | } 167 | 168 | impl core::fmt::Debug 169 | for GenericOneshotBroadcastChannel 170 | { 171 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 172 | f.debug_struct("GenericOneshotBroadcastChannel").finish() 173 | } 174 | } 175 | 176 | impl GenericOneshotBroadcastChannel 177 | where 178 | T: Clone, 179 | { 180 | /// Creates a new OneshotBroadcastChannel in the given state 181 | pub fn new() -> GenericOneshotBroadcastChannel { 182 | GenericOneshotBroadcastChannel { 183 | inner: Mutex::new(ChannelState::new()), 184 | } 185 | } 186 | 187 | /// Writes a single value to the channel. 188 | /// 189 | /// This will notify waiters about the availability of the value. 190 | /// If a value had been written to the channel before, or if the 191 | /// channel is closed, the new value will be rejected and 192 | /// returned inside the error variant. 193 | pub fn send(&self, value: T) -> Result<(), ChannelSendError> { 194 | self.inner.lock().send(value) 195 | } 196 | 197 | /// Closes the channel. 198 | /// 199 | /// This will notify waiters about closure, by fulfilling pending `Future`s 200 | /// with `None`. 201 | /// `send(value)` attempts which follow this call will fail with a 202 | /// [`ChannelSendError`]. 203 | pub fn close(&self) -> CloseStatus { 204 | self.inner.lock().close() 205 | } 206 | 207 | /// Returns a future that gets fulfilled when a value is written to the channel 208 | /// or the channel is closed. 209 | pub fn receive(&self) -> ChannelReceiveFuture { 210 | ChannelReceiveFuture { 211 | channel: Some(self), 212 | wait_node: ListNode::new(RecvWaitQueueEntry::new()), 213 | _phantom: PhantomData, 214 | } 215 | } 216 | } 217 | 218 | impl ChannelReceiveAccess 219 | for GenericOneshotBroadcastChannel 220 | where 221 | T: Clone, 222 | { 223 | unsafe fn receive_or_register( 224 | &self, 225 | wait_node: &mut ListNode, 226 | cx: &mut Context<'_>, 227 | ) -> Poll> { 228 | self.inner.lock().try_receive(wait_node, cx) 229 | } 230 | 231 | fn remove_receive_waiter( 232 | &self, 233 | wait_node: &mut ListNode, 234 | ) { 235 | self.inner.lock().remove_waiter(wait_node) 236 | } 237 | } 238 | 239 | // Export a non thread-safe version using NoopLock 240 | 241 | /// A [`GenericOneshotBroadcastChannel`] which is not thread-safe. 242 | pub type LocalOneshotBroadcastChannel = 243 | GenericOneshotBroadcastChannel; 244 | 245 | #[cfg(feature = "std")] 246 | mod if_std { 247 | use super::*; 248 | 249 | // Export a thread-safe version using parking_lot::RawMutex 250 | 251 | /// A [`GenericOneshotBroadcastChannel`] implementation backed by [`parking_lot`]. 252 | pub type OneshotBroadcastChannel = 253 | GenericOneshotBroadcastChannel; 254 | } 255 | 256 | #[cfg(feature = "std")] 257 | pub use self::if_std::*; 258 | 259 | #[cfg(feature = "alloc")] 260 | mod if_alloc { 261 | use super::*; 262 | 263 | pub mod shared { 264 | use super::*; 265 | use crate::channel::shared::ChannelReceiveFuture; 266 | 267 | struct GenericOneshotChannelSharedState 268 | where 269 | MutexType: RawMutex, 270 | T: 'static, 271 | { 272 | channel: GenericOneshotBroadcastChannel, 273 | } 274 | 275 | // Implement ChannelReceiveAccess trait for SharedChannelState, so that it can 276 | // be used for dynamic dispatch in futures. 277 | impl ChannelReceiveAccess 278 | for GenericOneshotChannelSharedState 279 | where 280 | MutexType: RawMutex, 281 | T: Clone, 282 | { 283 | unsafe fn receive_or_register( 284 | &self, 285 | wait_node: &mut ListNode, 286 | cx: &mut Context<'_>, 287 | ) -> Poll> { 288 | self.channel.receive_or_register(wait_node, cx) 289 | } 290 | 291 | fn remove_receive_waiter( 292 | &self, 293 | wait_node: &mut ListNode, 294 | ) { 295 | self.channel.remove_receive_waiter(wait_node) 296 | } 297 | } 298 | 299 | /// The sending side of a channel which can be used to exchange values 300 | /// between concurrent tasks. 301 | /// 302 | /// Values can be sent into the channel through `send`. 303 | pub struct GenericOneshotBroadcastSender 304 | where 305 | MutexType: RawMutex, 306 | T: Clone + 'static, 307 | { 308 | inner: alloc::sync::Arc< 309 | GenericOneshotChannelSharedState, 310 | >, 311 | } 312 | 313 | /// The receiving side of a channel which can be used to exchange values 314 | /// between concurrent tasks. 315 | /// 316 | /// Tasks can receive values from the channel through the `receive` method. 317 | /// The returned Future will get resolved when a value is sent into the channel. 318 | pub struct GenericOneshotBroadcastReceiver 319 | where 320 | MutexType: RawMutex, 321 | T: Clone + 'static, 322 | { 323 | inner: alloc::sync::Arc< 324 | GenericOneshotChannelSharedState, 325 | >, 326 | } 327 | 328 | // Manual `Clone` implementation, since #[derive(Clone)] also requires 329 | // the Mutex to be `Clone` 330 | impl Clone for GenericOneshotBroadcastReceiver 331 | where 332 | MutexType: RawMutex, 333 | T: Clone + 'static, 334 | { 335 | fn clone(&self) -> Self { 336 | Self { 337 | inner: self.inner.clone(), 338 | } 339 | } 340 | } 341 | 342 | impl core::fmt::Debug 343 | for GenericOneshotBroadcastSender 344 | where 345 | MutexType: RawMutex, 346 | T: Clone, 347 | { 348 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 349 | f.debug_struct("OneshotBroadcastSender").finish() 350 | } 351 | } 352 | 353 | impl core::fmt::Debug 354 | for GenericOneshotBroadcastReceiver 355 | where 356 | MutexType: RawMutex, 357 | T: Clone, 358 | { 359 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 360 | f.debug_struct("OneshotBroadcastReceiver").finish() 361 | } 362 | } 363 | 364 | impl Drop for GenericOneshotBroadcastSender 365 | where 366 | MutexType: RawMutex, 367 | T: Clone, 368 | { 369 | fn drop(&mut self) { 370 | // Close the channel, before last sender gets destroyed 371 | // TODO: We could potentially avoid this, if no receiver is left 372 | self.inner.channel.close(); 373 | } 374 | } 375 | 376 | impl Drop for GenericOneshotBroadcastReceiver 377 | where 378 | MutexType: RawMutex, 379 | T: Clone, 380 | { 381 | fn drop(&mut self) { 382 | // TODO: This is broken, since it will already close the channel if only one receiver is closed. 383 | // We need to count receivers, as in mpmc queue. 384 | // Close the channel, before last receiver gets destroyed 385 | // TODO: We could potentially avoid this, if no sender is left 386 | self.inner.channel.close(); 387 | } 388 | } 389 | 390 | /// Creates a new oneshot broadcast channel which can be used to exchange values 391 | /// of type `T` between concurrent tasks. 392 | /// The ends of the Channel are represented through 393 | /// the returned `Sender` and `Receiver`. The `Receiver` can be cloned. 394 | /// 395 | /// As soon es either the senders or all receivers is closed, the channel 396 | /// itself will be closed. 397 | pub fn generic_oneshot_broadcast_channel() -> ( 398 | GenericOneshotBroadcastSender, 399 | GenericOneshotBroadcastReceiver, 400 | ) 401 | where 402 | MutexType: RawMutex, 403 | T: Send + Clone, 404 | { 405 | let inner = 406 | alloc::sync::Arc::new(GenericOneshotChannelSharedState { 407 | channel: GenericOneshotBroadcastChannel::new(), 408 | }); 409 | 410 | let sender = GenericOneshotBroadcastSender { 411 | inner: inner.clone(), 412 | }; 413 | let receiver = GenericOneshotBroadcastReceiver { inner }; 414 | 415 | (sender, receiver) 416 | } 417 | 418 | impl GenericOneshotBroadcastSender 419 | where 420 | MutexType: RawMutex + 'static, 421 | T: Clone, 422 | { 423 | /// Writes a single value to the channel. 424 | /// 425 | /// This will notify waiters about the availability of the value. 426 | /// If a value had been written to the channel before, or if the 427 | /// channel is closed, the new value will be rejected and 428 | /// returned inside the error variant. 429 | pub fn send(&self, value: T) -> Result<(), ChannelSendError> { 430 | self.inner.channel.send(value) 431 | } 432 | } 433 | 434 | impl GenericOneshotBroadcastReceiver 435 | where 436 | MutexType: RawMutex + 'static, 437 | T: Clone, 438 | { 439 | /// Returns a future that gets fulfilled when a value is written to the channel. 440 | /// If the channels gets closed, the future will resolve to `None`. 441 | pub fn receive(&self) -> ChannelReceiveFuture { 442 | ChannelReceiveFuture { 443 | channel: Some(self.inner.clone()), 444 | wait_node: ListNode::new(RecvWaitQueueEntry::new()), 445 | _phantom: PhantomData, 446 | } 447 | } 448 | } 449 | 450 | // Export parking_lot based shared channels in std mode 451 | #[cfg(feature = "std")] 452 | mod if_std { 453 | use super::*; 454 | 455 | /// A [`GenericOneshotBroadcastSender`] implementation backed by [`parking_lot`]. 456 | pub type OneshotBroadcastSender = 457 | GenericOneshotBroadcastSender; 458 | /// A [`GenericOneshotBroadcastReceiver`] implementation backed by [`parking_lot`]. 459 | pub type OneshotBroadcastReceiver = 460 | GenericOneshotBroadcastReceiver; 461 | 462 | /// Creates a new oneshot broadcast channel. 463 | /// 464 | /// Refer to [`generic_oneshot_broadcast_channel`] for details. 465 | /// 466 | /// Example for creating a channel to transmit an integer value: 467 | /// 468 | /// ``` 469 | /// # use futures_intrusive::channel::shared::oneshot_broadcast_channel; 470 | /// let (sender, receiver) = oneshot_broadcast_channel::(); 471 | /// ``` 472 | pub fn oneshot_broadcast_channel( 473 | ) -> (OneshotBroadcastSender, OneshotBroadcastReceiver) 474 | where 475 | T: Send + Clone, 476 | { 477 | generic_oneshot_broadcast_channel::() 478 | } 479 | } 480 | 481 | #[cfg(feature = "std")] 482 | pub use self::if_std::*; 483 | } 484 | } 485 | 486 | #[cfg(feature = "alloc")] 487 | pub use self::if_alloc::*; 488 | -------------------------------------------------------------------------------- /src/intrusive_pairing_heap.rs: -------------------------------------------------------------------------------- 1 | //! Implements an intrusive priority queue based on a pairing heap. 2 | //! 3 | //! A [pairing heap] is a heap data structure (i.e. a tree whose nodes carry 4 | //! values, with the property that every node's value is lesser or equal to its 5 | //! children's) that supports the following operations: 6 | //! 7 | //! - finding a minimum element in `O(1)` 8 | //! - This is trivial: the heap property guarantees that the root is a 9 | //! minimum element. 10 | //! - insertion of a new node in `O(1)` 11 | //! - deletion in `O(log n)`, _amortized_ 12 | //! - However, note that any _individual_ deletion may take `O(N)` time. For 13 | //! example, if we take an empty heap and insert N elements, the tree will 14 | //! have a very degenerate (shallow) shape. Then, deleting the root will 15 | //! take `O(N)` time, but it will also reorganize the tree to make 16 | //! successive deletes cheaper. 17 | //! 18 | //! [pairing heap]: https://en.wikipedia.org/wiki/Pairing_heap 19 | 20 | use core::{ 21 | marker::PhantomPinned, 22 | mem, 23 | ops::{Deref, DerefMut, Drop}, 24 | ptr::NonNull, 25 | }; 26 | 27 | /// Compares `a` and `b` without unwinding. 28 | /// This is necessary to avoid reentrancy in the heap. 29 | fn safe_lesser(a: &T, b: &T) -> bool { 30 | struct DropBomb; 31 | impl Drop for DropBomb { 32 | fn drop(&mut self) { 33 | panic!("Panicked while comparing"); 34 | } 35 | } 36 | // If `T::cmp` panics, force a double-panic (and therefore an abort). 37 | let bomb = DropBomb; 38 | let ordering = a < b; 39 | mem::forget(bomb); 40 | ordering 41 | } 42 | 43 | /// A node which carries data of type `T` and is stored in an intrusive heap. 44 | /// 45 | /// Nodes will be compared based on `T`'s [`Ord`] impl. Those comparisons must 46 | /// not panic - otherwise, the program will abort. 47 | #[derive(Debug)] 48 | pub struct HeapNode { 49 | /// The parent. `None` if this is the root. 50 | parent: Option>>, 51 | /// The previous sibling. `None` if there is no previous sibling. 52 | prev: Option>>, 53 | /// The next sibling. `None` if there is no next sibling. 54 | next: Option>>, 55 | /// The first child. `None` if there are no children. 56 | first_child: Option>>, 57 | /// The data which is associated to this heap item. 58 | data: T, 59 | /// Prevents `HeapNode`s from being `Unpin`. They may never be moved, since 60 | /// the heap semantics require addresses to be stable. 61 | _pin: PhantomPinned, 62 | } 63 | 64 | impl HeapNode { 65 | /// Creates a new node with the associated data 66 | pub fn new(data: T) -> HeapNode { 67 | HeapNode:: { 68 | parent: None, 69 | prev: None, 70 | next: None, 71 | first_child: None, 72 | data, 73 | _pin: PhantomPinned, 74 | } 75 | } 76 | 77 | fn is_root(&self) -> bool { 78 | if self.parent.is_none() { 79 | debug_assert_eq!(self.prev, None); 80 | debug_assert_eq!(self.next, None); 81 | true 82 | } else { 83 | false 84 | } 85 | } 86 | } 87 | 88 | impl Deref for HeapNode { 89 | type Target = T; 90 | 91 | fn deref(&self) -> &T { 92 | &self.data 93 | } 94 | } 95 | 96 | impl DerefMut for HeapNode { 97 | fn deref_mut(&mut self) -> &mut T { 98 | &mut self.data 99 | } 100 | } 101 | 102 | /// Add a child to a node. 103 | unsafe fn add_child( 104 | mut parent: NonNull>, 105 | mut child: NonNull>, 106 | ) { 107 | // require parent <= child 108 | debug_assert!(!safe_lesser(&child.as_ref().data, &parent.as_ref().data)); 109 | if let Some(mut old_first_child) = parent.as_mut().first_child.take() { 110 | child.as_mut().next = Some(old_first_child); 111 | debug_assert_eq!(old_first_child.as_ref().prev, None); 112 | old_first_child.as_mut().prev = Some(child); 113 | } 114 | parent.as_mut().first_child = Some(child); 115 | child.as_mut().parent = Some(parent); 116 | } 117 | 118 | /// Merge two root heaps. Returns the new root. 119 | unsafe fn meld( 120 | left: NonNull>, 121 | right: NonNull>, 122 | ) -> NonNull> { 123 | debug_assert!(left.as_ref().is_root()); 124 | debug_assert!(right.as_ref().is_root()); 125 | // The lesser node should become the root. 126 | if safe_lesser(&left.as_ref().data, &right.as_ref().data) { 127 | add_child(left, right); 128 | left 129 | } else { 130 | add_child(right, left); 131 | right 132 | } 133 | } 134 | 135 | /// Merge two root heaps, where the left might be empty. Returns the new root. 136 | unsafe fn maybe_meld( 137 | left: Option>>, 138 | right: NonNull>, 139 | ) -> NonNull> { 140 | if let Some(left) = left { 141 | meld(left, right) 142 | } else { 143 | right 144 | } 145 | } 146 | 147 | /// Given the first child in a child list, traverse and find the last child. 148 | unsafe fn last_child( 149 | first_child: NonNull>, 150 | ) -> NonNull> { 151 | let mut cur = first_child; 152 | while let Some(next) = cur.as_ref().next { 153 | cur = next; 154 | } 155 | cur 156 | } 157 | 158 | /// Given a pointer to the last node in a child list, unlink it and return the 159 | /// previous node (which has become the last node in its list). 160 | /// 161 | /// That is, given a list `A <-> B <-> C`, `unlink_prev(C)` will return `B` and 162 | /// also unlink `C` to become `A <-> B C`. 163 | /// 164 | /// If the node was a lone child, returns `None`. 165 | /// 166 | /// Parent/child pointers are untouched. 167 | unsafe fn unlink_prev( 168 | mut node: NonNull>, 169 | ) -> Option>> { 170 | debug_assert_eq!(node.as_ref().next, None); 171 | let mut prev = node.as_mut().prev.take()?; 172 | debug_assert_eq!(prev.as_ref().next, Some(node)); 173 | prev.as_mut().next = None; 174 | Some(prev) 175 | } 176 | 177 | /// Merge together a child list. Each child in the child list is expected to 178 | /// have an equal `parent`. Returns the new merged root, whose `parent` will be unset. 179 | unsafe fn merge_children( 180 | first_child: NonNull>, 181 | ) -> NonNull> { 182 | let common_parent = first_child.as_ref().parent; 183 | debug_assert!(common_parent.is_some()); 184 | 185 | // Traverse the children right-to-left. This is important for the analysis 186 | // to work. Reading: "Pairing heaps: the forward variant", 187 | // https://arxiv.org/pdf/1709.01152.pdf 188 | let mut node = last_child(first_child); 189 | let mut current = None; 190 | // Loop invariant: `node` is the first unprocessed child, `current` 191 | // is the merged result of all processed children. 192 | loop { 193 | // All nodes in the list should have the same parent. 194 | let node_parent = node.as_mut().parent.take(); 195 | debug_assert_eq!(node_parent, common_parent); 196 | 197 | // Grab the last two unprocessed elements. 198 | let mut prev = if let Some(prev) = unlink_prev(node) { 199 | prev 200 | } else { 201 | // Odd case. 202 | return maybe_meld(current, node); 203 | }; 204 | 205 | // All nodes in the list should have the same parent. 206 | let prev_parent = prev.as_mut().parent.take(); 207 | debug_assert_eq!(prev_parent, common_parent); 208 | 209 | // Unlink `prev` from `prev.prev`. 210 | let prev_prev = unlink_prev(prev); 211 | 212 | // Meld the pair, then meld it into the accumulator. 213 | let cur = maybe_meld(current, meld(prev, node)); 214 | 215 | if let Some(prev_prev) = prev_prev { 216 | node = prev_prev; 217 | current = Some(cur); 218 | continue; 219 | } else { 220 | // Even case. 221 | return cur; 222 | } 223 | } 224 | } 225 | 226 | /// An intrusive min-heap of nodes, where each node carries associated data 227 | /// of type `T`. 228 | #[derive(Debug)] 229 | pub struct PairingHeap { 230 | root: Option>>, 231 | } 232 | 233 | impl PairingHeap { 234 | /// Creates an empty heap 235 | pub fn new() -> Self { 236 | PairingHeap:: { root: None } 237 | } 238 | 239 | /// Adds a node to the heap. 240 | /// Safety: This function is only safe as long as `node` is guaranteed to 241 | /// get removed from the list before it gets moved or dropped. 242 | /// In addition to this `node` may not be added to another other heap before 243 | /// it is removed from the current one. 244 | pub unsafe fn insert(&mut self, node: &mut HeapNode) { 245 | // The node should not already be in a heap. 246 | debug_assert!(node.is_root()); 247 | debug_assert_eq!(node.first_child, None); 248 | 249 | if let Some(root) = self.root { 250 | self.root = Some(meld(root, node.into())); 251 | } else { 252 | self.root = Some(node.into()); 253 | } 254 | } 255 | 256 | /// Returns the smallest element in the heap without removing it. 257 | /// The function is only safe as long as valid pointers are stored inside 258 | /// the heap. 259 | /// The returned pointer is only guaranteed to be valid as long as the heap 260 | /// is not mutated 261 | pub fn peek_min(&self) -> Option>> { 262 | self.root 263 | } 264 | 265 | /// Removes the given node from the heap. 266 | /// The node must be a member of this heap, and not a member of any other 267 | /// heap. 268 | pub unsafe fn remove(&mut self, node: &mut HeapNode) { 269 | let parent = node.parent.take(); 270 | if let Some(mut parent) = parent { 271 | // Unlink this node from its parent. 272 | if let Some(mut prev) = node.prev { 273 | prev.as_mut().next = node.next; 274 | } else { 275 | parent.as_mut().first_child = node.next; 276 | } 277 | if let Some(mut next) = node.next { 278 | next.as_mut().prev = node.prev; 279 | } 280 | node.next = None; 281 | node.prev = None; 282 | } else { 283 | debug_assert_eq!(node.next, None); 284 | debug_assert_eq!(node.prev, None); 285 | debug_assert_eq!(self.root, Some(node.into())); 286 | self.root = None; 287 | } 288 | if let Some(first_child) = node.first_child.take() { 289 | // Merge together the children. 290 | let children = merge_children(first_child); 291 | // Add the children back into the parent. 292 | if let Some(parent) = parent { 293 | // The heap property is preserved because we had `parent.data` 294 | // <= `node.data`, and `node.data` <= `child.data` for all 295 | // children. 296 | add_child(parent, children); 297 | } else { 298 | self.root = Some(children); 299 | } 300 | } 301 | } 302 | } 303 | 304 | #[cfg(all(test, feature = "std"))] 305 | mod tests { 306 | use super::{HeapNode, PairingHeap}; 307 | use core::ptr::NonNull; 308 | 309 | // Recursively check the provided node and all descendants for: 310 | // - pointer consistency: parent pointers and next/prev 311 | // - the heap property: `node.data <= child.data` for all children 312 | unsafe fn validate_heap_node( 313 | node: &HeapNode, 314 | parent: Option<&HeapNode>, 315 | ) { 316 | assert_eq!(node.parent, parent.map(NonNull::from)); 317 | if let Some(p) = parent { 318 | assert!(p.data <= node.data); 319 | } 320 | if let Some(prev) = node.prev { 321 | assert_eq!(prev.as_ref().next, Some(node.into())); 322 | } 323 | if let Some(next) = node.next { 324 | assert_eq!(next.as_ref().prev, Some(node.into())); 325 | } 326 | let mut child = node.first_child; 327 | while let Some(c) = child { 328 | validate_heap_node(c.as_ref(), Some(node)); 329 | child = c.as_ref().next; 330 | } 331 | } 332 | 333 | fn validate_heap(heap: &PairingHeap) { 334 | if let Some(root) = heap.root { 335 | // This is also sufficient to check that `heap.root` is indeed a 336 | // minimum element of the heap. 337 | unsafe { 338 | validate_heap_node(root.as_ref(), None); 339 | } 340 | } 341 | } 342 | 343 | #[test] 344 | fn insert_and_remove() { 345 | // This test exhaustively covers every possible schedule of inserting, 346 | // then removing, each of five different nodes from the heap. 347 | #[derive(Copy, Clone, Debug)] 348 | enum Action { 349 | Insert(u8), 350 | Remove(u8), 351 | } 352 | 353 | fn generate_schedules( 354 | current: &mut Vec, 355 | available: &mut Vec, 356 | f: fn(&[Action]), 357 | ) { 358 | for i in 0..available.len() { 359 | let action = available.swap_remove(i); 360 | current.push(action); 361 | f(current); 362 | if let Action::Insert(j) = action { 363 | available.push(Action::Remove(j)); 364 | } 365 | generate_schedules(current, available, f); 366 | if let Action::Insert(_) = action { 367 | available.pop(); 368 | } 369 | current.pop(); 370 | // the opposite of `swap_remove` 371 | available.push(action); 372 | let len = available.len(); 373 | available.swap(i, len - 1); 374 | } 375 | } 376 | let max = if cfg!(miri) { 377 | // Miri is really slow, make things easier. 378 | 3 379 | } else { 380 | // 5 runs in a reasonable amount of time but still exercises 381 | // interesting cases. 382 | 5 383 | }; 384 | generate_schedules( 385 | &mut vec![], 386 | &mut (0..max).map(Action::Insert).collect(), 387 | |schedule| unsafe { 388 | let mut nodes = [ 389 | HeapNode::new(0u8), 390 | HeapNode::new(1), 391 | HeapNode::new(2), 392 | HeapNode::new(3), 393 | HeapNode::new(4), 394 | ]; 395 | let mut heap = PairingHeap::new(); 396 | for action in schedule { 397 | match *action { 398 | Action::Insert(n) => { 399 | heap.insert(&mut nodes[n as usize]); 400 | validate_heap(&heap); 401 | } 402 | Action::Remove(n) => { 403 | heap.remove(&mut nodes[n as usize]); 404 | assert!(nodes[n as usize].is_root()); 405 | assert_eq!(nodes[n as usize].first_child, None); 406 | validate_heap(&heap); 407 | } 408 | } 409 | } 410 | }, 411 | ); 412 | } 413 | 414 | #[test] 415 | fn equal_values() { 416 | // Check that things behave properly in the presence of equal values. 417 | unsafe { 418 | let mut nodes = [ 419 | HeapNode::new(0u8), 420 | HeapNode::new(0), 421 | HeapNode::new(0), 422 | HeapNode::new(0), 423 | HeapNode::new(0), 424 | ]; 425 | let mut heap = PairingHeap::new(); 426 | for node in &mut nodes { 427 | heap.insert(node); 428 | validate_heap(&heap); 429 | } 430 | for _ in 0..5 { 431 | heap.remove(heap.peek_min().unwrap().as_mut()); 432 | validate_heap(&heap); 433 | } 434 | assert_eq!(heap.peek_min(), None); 435 | } 436 | } 437 | } 438 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives and utilities based on intrusive collections. 2 | //! 3 | //! This crate provides a variety of `Futures`-based and `async/await` compatible 4 | //! types that are based on the idea of intrusive collections: 5 | //! - Channels in a variety of flavors: 6 | //! - Oneshot 7 | //! - Multi-Producer Multi-Consumer (MPMC) 8 | //! - State Broadcast 9 | //! - Synchronization Primitives: 10 | //! - Manual Reset Event 11 | //! - Mutex 12 | //! - Semaphore 13 | //! - A timer 14 | //! 15 | //! ## Intrusive collections? 16 | //! 17 | //! In an intrusive collection, the elements that want to get stored inside the 18 | //! collection provide the means to store themselves inside the collection. 19 | //! E.g. in an intrusive linked list, each element that gets stored inside the 20 | //! list contains a pointer field that points to the next list element. E.g. 21 | //! 22 | //! ``` 23 | //! // The element which is intended to be stored inside an intrusive container 24 | //! struct ListElement { 25 | //! data: u32, 26 | //! next: *mut ListElement, 27 | //! } 28 | //! 29 | //! // The intrusive container 30 | //! struct List { 31 | //! head: *mut ListElement, 32 | //! } 33 | //! ``` 34 | //! 35 | //! The advantage here is that the intrusive collection (here: the list) requires 36 | //! only a fixed amount of memory. In this case it only needs a pointer to the 37 | //! first element. 38 | //! 39 | //! The list container itself has a fixed size of a single pointer independent 40 | //! of the number of stored elements. 41 | //! 42 | //! Intrusive lists are often used in low-level code like in operating system 43 | //! kernels. E.g. they can be used for storing elements that represent threads 44 | //! that are blocked and waiting on queue. In that case the stored elements can 45 | //! be on the call stack of the caller of each blocked thread, since the 46 | //! call stack won't change as long as the thread is blocked. 47 | //! 48 | //! ### Application in Futures 49 | //! 50 | //! This library brings this idea into the world of Rusts `Future`s. Due to the 51 | //! addition of `Pin`ning, the address of a certain `Future` is not allowed to 52 | //! change between the first call to `poll()` and when the `Future` is dropped. 53 | //! This means the data inside the `Future` itself can be inserted into an 54 | //! intrusive container. If the the call to `Future::poll()` is not immedately 55 | //! ready, some parts of the `Future` itself are registered in the type which 56 | //! yielded the `Future`. Each `Future` can store a `Waker`. When the original 57 | //! type becomes ready, it can iterate through the list of registered `Future`s, 58 | //! wakeup associated tasks, and potentially remove them from its queue. 59 | //! 60 | //! The result is that the future-yielding type is not required to copy an 61 | //! arbitrary number of `Waker` objects into itself, and thereby does not require 62 | //! dynamic memory for this task. 63 | //! 64 | //! When a `Future` gets destructed/dropped, it must make sure to remove itself 65 | //! from any collections that refer to it to avoid invalid memory accesses. 66 | //! 67 | //! This library implements common synchronization primitives for the usage in 68 | //! asychronous code based on this concept. 69 | //! 70 | //! The implementation requires the usage of a fair chunk of `unsafe` 71 | //! annotations. However the provided user-level API is intended to be fully safe. 72 | //! 73 | //! ## Features of this library 74 | //! 75 | //! The following types are currently implemented: 76 | //! - Channels (oneshot and multi-producer-multi-consumer) 77 | //! - Synchronization primitives (async mutexes and events) 78 | //! - Timers 79 | //! 80 | //! ## Design goals for the library 81 | //! 82 | //! - Provide implementations of common synchronization primitives in a platform 83 | //! independent fashion. 84 | //! - Support `no-std` environments. As many types as possible are also provided 85 | //! for `no-std` environments. The library should boost the ability to use 86 | //! async Rust code in environments like: 87 | //! - Microcontrollers (RTOS and bare-metal) 88 | //! - Kernels 89 | //! - Drivers 90 | //! - Avoid dynamic memory allocations at runtime. After objects from this 91 | //! library have been created, they should not require allocation of any 92 | //! further memory at runtime. E.g. they should not need to allocate memory 93 | //! for each call to an asynchronous function or each time a new task accesses 94 | //! the same object in parallel. 95 | //! - Offer familiar APIs. 96 | //! The library tries to mimic the APIs of existing Rust libraries like the 97 | //! standard library and `futures-rs` as closely as possible. 98 | //! 99 | //! ## Non goals 100 | //! 101 | //! - Provide IO primitives (like sockets), or platform specific implementations. 102 | //! - Reach the highest possible performance in terms of throughput and latency. 103 | //! While code in this library is optimized for performance, portability 104 | //! and deterministic memory usage are more important goals. 105 | //! - Provide future wrappers for platform-specific APIs. 106 | //! 107 | //! ## Local, Non-local and shared flavors 108 | //! 109 | //! The library provides types in a variety of flavors: 110 | //! 111 | //! - A local flavor (e.g. [`channel::LocalChannel`]) 112 | //! - A non-local flavor (e.g. [`channel::Channel`]) 113 | //! - A shared flavor (e.g. [`channel::shared::Sender`]) 114 | //! - A generic flavor (e.g. [`channel::GenericChannel`] and 115 | //! [`channel::shared::GenericSender`]) 116 | //! 117 | //! The difference between these types lie in their thread-safety. The non-local 118 | //! flavors of types can be accessed from multiple threads (and thereby also 119 | //! futures tasks) concurrently. This means they implement the `Sync` trait in 120 | //! addition to the `Send` trait. 121 | //! The local flavors only implement the `Send` trait. 122 | //! 123 | //! ### Local flavor 124 | //! 125 | //! The local flavors will require no internal synchronization (e.g. internal 126 | //! Mutexes) and can therefore be provided for all platforms (including `no-std`). 127 | //! Due the lack of required synchronization, they are also very fast. 128 | //! 129 | //! It might seem counter-intuitive to provide synchronization primitives that 130 | //! only work within a single task. However there are a variety of applications 131 | //! where these can be used to coordinate sub-tasks (futures that are polled on 132 | //! a single task concurrently). 133 | //! 134 | //! The following example demonstrates this use-case: 135 | //! 136 | //! ``` 137 | //! # use futures::join; 138 | //! # use futures_intrusive::sync::LocalManualResetEvent; 139 | //! async fn async_fn() { 140 | //! let event = LocalManualResetEvent::new(false); 141 | //! let task_a = async { 142 | //! // Wait for the event 143 | //! event.wait().await; 144 | //! // Do something with the knowledge that task_b reached a certain state 145 | //! }; 146 | //! let task_b = async { 147 | //! // Some complex asynchronous workflow here 148 | //! // ... 149 | //! // Signal task_a 150 | //! event.set(); 151 | //! }; 152 | //! join!(task_a, task_b); 153 | //! } 154 | //! ``` 155 | //! 156 | //! ### Non-local flavor 157 | //! 158 | //! The non-local flavors can be used between arbitrary tasks and threads. They 159 | //! use internal synchronization for this in form of an embedded `Mutex` of 160 | //! [`parking_lot::Mutex`] type. 161 | //! 162 | //! The non-local flavors are only available in `alloc` environments. 163 | //! 164 | //! ### Shared flavor 165 | //! 166 | //! For some types a shared flavor is provided. Non-local flavors of types are 167 | //! `Sync`, but they still can only be shared by reference between various tasks. 168 | 169 | //! Shared flavors are also `Sync`, but the types additionally implement the 170 | //! `Clone` trait, which allows duplicating the object, and passing ownership of 171 | //! it to a different task. These types allow avoiding references (and thereby 172 | //! lifetimes) in some scenarios, which makes them more convenient to use. The 173 | //! types also return `Future`s which do not have an associated lifetime. This 174 | //! allows using those types as implementations of traits without the need for 175 | //! generic associated types (GATs). 176 | //! 177 | //! Due to the requirement of atomic reference counting, these types are 178 | //! currently only available for `alloc` environments. 179 | //! 180 | //! ### Generic flavor 181 | //! 182 | //! The generic flavors of provided types are parameterized around a 183 | //! [`lock_api::RawMutex`] type. These form the base for the non-local and shared 184 | //! flavors which simply parameterize the generic flavor in either a 185 | //! non-thread-safe or thread-safe fashion. 186 | //! 187 | //! Users can directly use the generic flavors to adapt the provided thread-safe 188 | //! types for use in `no-std` environments. 189 | //! 190 | //! E.g. by providing a custom [`lock_api::RawMutex`] 191 | //! implementation, the following platforms can be supported: 192 | //! 193 | //! - For RTOS platforms, RTOS-specific mutexes can be wrapped. 194 | //! - For kernel development, spinlock based mutexes can be created. 195 | //! - For embedded development, mutexes which just disable interrupts can be 196 | //! utilized. 197 | //! 198 | //! 199 | //! ## Relation to types in other libraries 200 | //! 201 | //! Other libraries (e.g. `futures-rs` and `tokio`) provide many primitives that 202 | //! are comparable feature-wise to the types in this library. 203 | //! 204 | //! The most important differences are: 205 | //! - This library has a bigger focus on `no-std` environments, and does not 206 | //! only try to provide an implementation for `alloc` or `std`. 207 | //! - The types in this library do not require dynamic memory allocation for 208 | //! waking up an arbitrary number of tasks waiting on a particular 209 | //! `Future`. Other libraries typically require heap-allocated nodes of 210 | //! growing vectors for handling a varying number of tasks. 211 | //! - The `Future`s produced by this library are all `!Unpin`, which might make 212 | //! them less ergonomic to use. 213 | //! 214 | 215 | #![cfg_attr(not(feature = "std"), no_std)] 216 | #![warn(missing_docs, missing_debug_implementations)] 217 | #![deny(bare_trait_objects)] 218 | 219 | #[cfg(feature = "alloc")] 220 | extern crate alloc; 221 | 222 | mod noop_lock; 223 | use noop_lock::NoopLock; 224 | 225 | pub mod buffer; 226 | 227 | #[allow(dead_code)] 228 | mod intrusive_double_linked_list; 229 | mod intrusive_pairing_heap; 230 | 231 | pub mod channel; 232 | pub mod sync; 233 | pub mod timer; 234 | 235 | mod utils; 236 | -------------------------------------------------------------------------------- /src/noop_lock.rs: -------------------------------------------------------------------------------- 1 | //! An unsafe (non-thread-safe) lock, equivalent to UnsafeCell 2 | 3 | use core::marker::PhantomData; 4 | use lock_api::{GuardSend, RawMutex}; 5 | 6 | /// An unsafe (non-thread-safe) lock, equivalent to UnsafeCell 7 | #[derive(Debug)] 8 | pub struct NoopLock { 9 | /// Assigned in order to make the type !Sync 10 | _phantom: PhantomData<*mut ()>, 11 | } 12 | 13 | unsafe impl RawMutex for NoopLock { 14 | const INIT: NoopLock = NoopLock { 15 | _phantom: PhantomData, 16 | }; 17 | 18 | type GuardMarker = GuardSend; 19 | 20 | fn lock(&self) {} 21 | 22 | fn try_lock(&self) -> bool { 23 | true 24 | } 25 | 26 | unsafe fn unlock(&self) {} 27 | } 28 | -------------------------------------------------------------------------------- /src/sync/manual_reset_event.rs: -------------------------------------------------------------------------------- 1 | //! An asynchronously awaitable event for signalization between tasks 2 | 3 | use crate::{ 4 | intrusive_double_linked_list::{LinkedList, ListNode}, 5 | utils::update_waker_ref, 6 | NoopLock, 7 | }; 8 | use core::pin::Pin; 9 | use futures_core::{ 10 | future::{FusedFuture, Future}, 11 | task::{Context, Poll, Waker}, 12 | }; 13 | use lock_api::{Mutex, RawMutex}; 14 | 15 | /// Tracks how the future had interacted with the event 16 | #[derive(PartialEq)] 17 | enum PollState { 18 | /// The task has never interacted with the event. 19 | New, 20 | /// The task was added to the wait queue at the event. 21 | Waiting, 22 | /// The task has been polled to completion. 23 | Done, 24 | } 25 | 26 | /// Tracks the WaitForEventFuture waiting state. 27 | /// Access to this struct is synchronized through the mutex in the Event. 28 | struct WaitQueueEntry { 29 | /// The task handle of the waiting task 30 | task: Option, 31 | /// Current polling state 32 | state: PollState, 33 | } 34 | 35 | impl WaitQueueEntry { 36 | /// Creates a new WaitQueueEntry 37 | fn new() -> WaitQueueEntry { 38 | WaitQueueEntry { 39 | task: None, 40 | state: PollState::New, 41 | } 42 | } 43 | } 44 | 45 | /// Internal state of the `ManualResetEvent` pair above 46 | struct EventState { 47 | is_set: bool, 48 | waiters: LinkedList, 49 | } 50 | 51 | impl EventState { 52 | fn new(is_set: bool) -> EventState { 53 | EventState { 54 | is_set, 55 | waiters: LinkedList::new(), 56 | } 57 | } 58 | 59 | fn reset(&mut self) { 60 | self.is_set = false; 61 | } 62 | 63 | fn set(&mut self) { 64 | if self.is_set != true { 65 | self.is_set = true; 66 | 67 | // Wakeup all waiters 68 | // This happens inside the lock to make cancellation reliable 69 | // If we would access waiters outside of the lock, the pointers 70 | // may no longer be valid. 71 | // Typically this shouldn't be an issue, since waking a task should 72 | // only move it from the blocked into the ready state and not have 73 | // further side effects. 74 | 75 | // Use a reverse iterator, so that the oldest waiter gets 76 | // scheduled first 77 | self.waiters.reverse_drain(|waiter| { 78 | if let Some(handle) = waiter.task.take() { 79 | handle.wake(); 80 | } 81 | waiter.state = PollState::Done; 82 | }); 83 | } 84 | } 85 | 86 | fn is_set(&self) -> bool { 87 | self.is_set 88 | } 89 | 90 | /// Checks if the event is set. If it is this returns immediately. 91 | /// If the event isn't set, the WaitForEventFuture gets added to the wait 92 | /// queue at the event, and will be signalled once ready. 93 | /// This function is only safe as long as the `wait_node`s address is guaranteed 94 | /// to be stable until it gets removed from the queue. 95 | unsafe fn try_wait( 96 | &mut self, 97 | wait_node: &mut ListNode, 98 | cx: &mut Context<'_>, 99 | ) -> Poll<()> { 100 | match wait_node.state { 101 | PollState::New => { 102 | if self.is_set { 103 | // The event is already signaled 104 | wait_node.state = PollState::Done; 105 | Poll::Ready(()) 106 | } else { 107 | // Added the task to the wait queue 108 | wait_node.task = Some(cx.waker().clone()); 109 | wait_node.state = PollState::Waiting; 110 | self.waiters.add_front(wait_node); 111 | Poll::Pending 112 | } 113 | } 114 | PollState::Waiting => { 115 | // The WaitForEventFuture is already in the queue. 116 | // The event can't have been set, since this would change the 117 | // waitstate inside the mutex. However the caller might have 118 | // passed a different `Waker`. In this case we need to update it. 119 | update_waker_ref(&mut wait_node.task, cx); 120 | Poll::Pending 121 | } 122 | PollState::Done => { 123 | // We have been woken up by the event. 124 | // This does not guarantee that the event is still set. It could 125 | // have been reset it in the meantime. 126 | Poll::Ready(()) 127 | } 128 | } 129 | } 130 | 131 | fn remove_waiter(&mut self, wait_node: &mut ListNode) { 132 | // WaitForEventFuture only needs to get removed if it has been added to 133 | // the wait queue of the Event. This has happened in the PollState::Waiting case. 134 | if let PollState::Waiting = wait_node.state { 135 | // Safety: Due to the state, we know that the node must be part 136 | // of the waiter list 137 | if !unsafe { self.waiters.remove(wait_node) } { 138 | // Panic if the address isn't found. This can only happen if the contract was 139 | // violated, e.g. the WaitQueueEntry got moved after the initial poll. 140 | panic!("Future could not be removed from wait queue"); 141 | } 142 | wait_node.state = PollState::Done; 143 | } 144 | } 145 | } 146 | 147 | /// A synchronization primitive which can be either in the set or reset state. 148 | /// 149 | /// Tasks can wait for the event to get set by obtaining a Future via `wait`. 150 | /// This Future will get fulfilled when the event has been set. 151 | pub struct GenericManualResetEvent { 152 | inner: Mutex, 153 | } 154 | 155 | // The Event is can be sent to other threads as long as it's not borrowed 156 | unsafe impl Send 157 | for GenericManualResetEvent 158 | { 159 | } 160 | // The Event is thread-safe as long as the utilized Mutex is thread-safe 161 | unsafe impl Sync 162 | for GenericManualResetEvent 163 | { 164 | } 165 | 166 | impl core::fmt::Debug 167 | for GenericManualResetEvent 168 | { 169 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 170 | f.debug_struct("ManualResetEvent").finish() 171 | } 172 | } 173 | 174 | impl GenericManualResetEvent { 175 | /// Creates a new ManualResetEvent in the given state 176 | pub fn new(is_set: bool) -> GenericManualResetEvent { 177 | GenericManualResetEvent { 178 | inner: Mutex::::new(EventState::new(is_set)), 179 | } 180 | } 181 | 182 | /// Sets the event. 183 | /// 184 | /// Setting the event will notify all pending waiters. 185 | pub fn set(&self) { 186 | self.inner.lock().set() 187 | } 188 | 189 | /// Resets the event. 190 | pub fn reset(&self) { 191 | self.inner.lock().reset() 192 | } 193 | 194 | /// Returns whether the event is set 195 | pub fn is_set(&self) -> bool { 196 | self.inner.lock().is_set() 197 | } 198 | 199 | /// Returns a future that gets fulfilled when the event is set. 200 | pub fn wait(&self) -> GenericWaitForEventFuture { 201 | GenericWaitForEventFuture { 202 | event: Some(self), 203 | wait_node: ListNode::new(WaitQueueEntry::new()), 204 | } 205 | } 206 | 207 | unsafe fn try_wait( 208 | &self, 209 | wait_node: &mut ListNode, 210 | cx: &mut Context<'_>, 211 | ) -> Poll<()> { 212 | self.inner.lock().try_wait(wait_node, cx) 213 | } 214 | 215 | fn remove_waiter(&self, wait_node: &mut ListNode) { 216 | self.inner.lock().remove_waiter(wait_node) 217 | } 218 | } 219 | 220 | /// A Future that is resolved once the corresponding ManualResetEvent has been set 221 | #[must_use = "futures do nothing unless polled"] 222 | pub struct GenericWaitForEventFuture<'a, MutexType: RawMutex> { 223 | /// The ManualResetEvent that is associated with this WaitForEventFuture 224 | event: Option<&'a GenericManualResetEvent>, 225 | /// Node for waiting at the event 226 | wait_node: ListNode, 227 | } 228 | 229 | // Safety: Futures can be sent between threads as long as the underlying 230 | // event is thread-safe (Sync), which allows to poll/register/unregister from 231 | // a different thread. 232 | unsafe impl<'a, MutexType: RawMutex + Sync> Send 233 | for GenericWaitForEventFuture<'a, MutexType> 234 | { 235 | } 236 | 237 | impl<'a, MutexType: RawMutex> core::fmt::Debug 238 | for GenericWaitForEventFuture<'a, MutexType> 239 | { 240 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 241 | f.debug_struct("GenericWaitForEventFuture").finish() 242 | } 243 | } 244 | 245 | impl<'a, MutexType: RawMutex> Future 246 | for GenericWaitForEventFuture<'a, MutexType> 247 | { 248 | type Output = (); 249 | 250 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { 251 | // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. 252 | // However this didn't seem to work for some borrow checker reasons 253 | 254 | // Safety: The next operations are safe, because Pin promises us that 255 | // the address of the wait queue entry inside MutexLocalFuture is stable, 256 | // and we don't move any fields inside the future until it gets dropped. 257 | let mut_self: &mut GenericWaitForEventFuture = 258 | unsafe { Pin::get_unchecked_mut(self) }; 259 | 260 | let event = mut_self 261 | .event 262 | .expect("polled WaitForEventFuture after completion"); 263 | 264 | let poll_res = unsafe { event.try_wait(&mut mut_self.wait_node, cx) }; 265 | 266 | if let Poll::Ready(()) = poll_res { 267 | // The event was set 268 | mut_self.event = None; 269 | } 270 | 271 | poll_res 272 | } 273 | } 274 | 275 | impl<'a, MutexType: RawMutex> FusedFuture 276 | for GenericWaitForEventFuture<'a, MutexType> 277 | { 278 | fn is_terminated(&self) -> bool { 279 | self.event.is_none() 280 | } 281 | } 282 | 283 | impl<'a, MutexType: RawMutex> Drop 284 | for GenericWaitForEventFuture<'a, MutexType> 285 | { 286 | fn drop(&mut self) { 287 | // If this WaitForEventFuture has been polled and it was added to the 288 | // wait queue at the event, it must be removed before dropping. 289 | // Otherwise the event would access invalid memory. 290 | if let Some(ev) = self.event { 291 | ev.remove_waiter(&mut self.wait_node); 292 | } 293 | } 294 | } 295 | 296 | // Export a non thread-safe version using NoopLock 297 | 298 | /// A [`GenericManualResetEvent`] which is not thread-safe. 299 | pub type LocalManualResetEvent = GenericManualResetEvent; 300 | /// A [`GenericWaitForEventFuture`] for [`LocalManualResetEvent`]. 301 | pub type LocalWaitForEventFuture<'a> = GenericWaitForEventFuture<'a, NoopLock>; 302 | 303 | #[cfg(feature = "std")] 304 | mod if_std { 305 | use super::*; 306 | 307 | // Export a thread-safe version using parking_lot::RawMutex 308 | 309 | /// A [`GenericManualResetEvent`] implementation backed by [`parking_lot`]. 310 | pub type ManualResetEvent = GenericManualResetEvent; 311 | /// A [`GenericWaitForEventFuture`] for [`ManualResetEvent`]. 312 | pub type WaitForEventFuture<'a> = 313 | GenericWaitForEventFuture<'a, parking_lot::RawMutex>; 314 | } 315 | 316 | #[cfg(feature = "std")] 317 | pub use self::if_std::*; 318 | -------------------------------------------------------------------------------- /src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | //! Asynchronous synchronization primitives based on intrusive collections. 2 | //! 3 | //! This module provides various primitives for synchronizing concurrently 4 | //! executing futures. 5 | 6 | mod manual_reset_event; 7 | 8 | pub use self::manual_reset_event::{ 9 | GenericManualResetEvent, GenericWaitForEventFuture, LocalManualResetEvent, 10 | LocalWaitForEventFuture, 11 | }; 12 | 13 | #[cfg(feature = "std")] 14 | pub use self::manual_reset_event::{ManualResetEvent, WaitForEventFuture}; 15 | 16 | mod mutex; 17 | 18 | pub use self::mutex::{ 19 | GenericMutex, GenericMutexGuard, GenericMutexLockFuture, LocalMutex, 20 | LocalMutexGuard, LocalMutexLockFuture, 21 | }; 22 | 23 | #[cfg(feature = "std")] 24 | pub use self::mutex::{Mutex, MutexGuard, MutexLockFuture}; 25 | 26 | mod semaphore; 27 | 28 | pub use self::semaphore::{ 29 | GenericSemaphore, GenericSemaphoreAcquireFuture, GenericSemaphoreReleaser, 30 | LocalSemaphore, LocalSemaphoreAcquireFuture, LocalSemaphoreReleaser, 31 | }; 32 | 33 | #[cfg(feature = "alloc")] 34 | pub use self::semaphore::{ 35 | GenericSharedSemaphore, GenericSharedSemaphoreAcquireFuture, 36 | GenericSharedSemaphoreReleaser, 37 | }; 38 | 39 | #[cfg(feature = "std")] 40 | pub use self::semaphore::{ 41 | Semaphore, SemaphoreAcquireFuture, SemaphoreReleaser, SharedSemaphore, 42 | SharedSemaphoreAcquireFuture, SharedSemaphoreReleaser, 43 | }; 44 | -------------------------------------------------------------------------------- /src/timer/clock.rs: -------------------------------------------------------------------------------- 1 | //! Monotonic clocks 2 | 3 | use core::sync::atomic::{AtomicUsize, Ordering}; 4 | 5 | /// A monotonic source of time. 6 | /// 7 | /// Clocks must always returning increasing timestamps. 8 | pub trait Clock: Sync { 9 | /// Returns a timestamp in milliseconds which represents the current time 10 | /// according to the clock. 11 | /// 12 | /// Clocks must only return timestamps that are bigger or equal than what 13 | /// they returned on the last call to `now()`. 14 | fn now(&self) -> u64; 15 | } 16 | 17 | /// A [`Clock`] which can be set to arbitrary timestamps for testing purposes. 18 | /// 19 | /// It can be used in a test case as demonstrated in the following example: 20 | /// ``` 21 | /// use futures_intrusive::timer::MockClock; 22 | /// # #[cfg(feature = "std")] 23 | /// # use futures_intrusive::timer::TimerService; 24 | /// 25 | /// static TEST_CLOCK: MockClock = MockClock::new(); 26 | /// TEST_CLOCK.set_time(2300); // Set the current time 27 | /// # #[cfg(feature = "std")] 28 | /// let timer = TimerService::new(&TEST_CLOCK); 29 | /// ``` 30 | pub struct MockClock { 31 | now: core::sync::atomic::AtomicUsize, 32 | } 33 | 34 | impl core::fmt::Debug for MockClock { 35 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 36 | let now = self.now(); 37 | f.debug_struct("MockClock").field("now", &now).finish() 38 | } 39 | } 40 | 41 | impl MockClock { 42 | /// Creates a new instance of the [`MockClock`], which is initialized to 43 | /// timestamp 0. 44 | pub const fn new() -> MockClock { 45 | MockClock { 46 | now: AtomicUsize::new(0), 47 | } 48 | } 49 | 50 | /// Sets the current timestamp inside to [`MockClock`] to the given value 51 | pub fn set_time(&self, timestamp: u64) { 52 | if timestamp > (core::usize::MAX as u64) { 53 | panic!("timestamps bigger than usize::MAX are not supported") 54 | } 55 | let to_set = timestamp as usize; 56 | self.now.store(to_set, Ordering::Release); 57 | } 58 | } 59 | 60 | impl Clock for MockClock { 61 | fn now(&self) -> u64 { 62 | self.now.load(Ordering::Relaxed) as u64 63 | } 64 | } 65 | 66 | #[cfg(feature = "std")] 67 | mod if_std { 68 | use super::*; 69 | use std::time::Instant; 70 | 71 | /// A Clock that makes use of the Standard libraries [`std::time::Instant`] 72 | /// functionality in order to generate monotonically increasing timestamps. 73 | pub struct StdClock { 74 | start: Instant, 75 | } 76 | 77 | impl core::fmt::Debug for StdClock { 78 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 79 | f.debug_struct("StdClock").finish() 80 | } 81 | } 82 | 83 | impl StdClock { 84 | /// Creates a new [`StdClock`] 85 | pub fn new() -> StdClock { 86 | StdClock { 87 | start: Instant::now(), 88 | } 89 | } 90 | } 91 | 92 | impl Clock for StdClock { 93 | fn now(&self) -> u64 { 94 | let elapsed = Instant::now() - self.start; 95 | elapsed.as_millis() as u64 96 | } 97 | } 98 | } 99 | 100 | #[cfg(feature = "std")] 101 | pub use self::if_std::*; 102 | -------------------------------------------------------------------------------- /src/timer/mod.rs: -------------------------------------------------------------------------------- 1 | //! Asynchronous timers. 2 | //! 3 | //! This module provides a timer implementation which returns awaitable 4 | //! `Future`s. 5 | //! The timer can work with a configurable clock source. In order to utilize 6 | //! the system clock, a global instance `StdClock` can be utilized. 7 | 8 | mod clock; 9 | pub use self::clock::{Clock, MockClock}; 10 | 11 | #[cfg(feature = "std")] 12 | pub use self::clock::StdClock; 13 | 14 | mod timer; 15 | 16 | pub use self::timer::{ 17 | GenericTimerService, LocalTimer, LocalTimerFuture, LocalTimerService, 18 | Timer, TimerFuture, 19 | }; 20 | 21 | #[cfg(feature = "std")] 22 | pub use self::timer::TimerService; 23 | -------------------------------------------------------------------------------- /src/timer/timer.rs: -------------------------------------------------------------------------------- 1 | //! An asynchronously awaitable timer 2 | 3 | use super::clock::Clock; 4 | use crate::{ 5 | intrusive_pairing_heap::{HeapNode, PairingHeap}, 6 | utils::update_waker_ref, 7 | NoopLock, 8 | }; 9 | use core::{pin::Pin, time::Duration}; 10 | use futures_core::{ 11 | future::{FusedFuture, Future}, 12 | task::{Context, Poll, Waker}, 13 | }; 14 | use lock_api::{Mutex, RawMutex}; 15 | 16 | /// Tracks how the future had interacted with the timer 17 | #[derive(PartialEq)] 18 | enum PollState { 19 | /// The task is not registered at the wait queue at the timer 20 | Unregistered, 21 | /// The task was added to the wait queue at the timer 22 | Registered, 23 | /// The timer has expired and was thereby removed from the wait queue at 24 | /// the timer. Having this extra state avoids to query the clock for an 25 | /// extra time. 26 | Expired, 27 | } 28 | 29 | /// Tracks the timer futures waiting state. 30 | struct TimerQueueEntry { 31 | /// Timestamp when the timer expires 32 | expiry: u64, 33 | /// The task handle of the waiting task 34 | task: Option, 35 | /// Current polling state 36 | state: PollState, 37 | } 38 | 39 | impl TimerQueueEntry { 40 | /// Creates a new TimerQueueEntry 41 | fn new(expiry: u64) -> TimerQueueEntry { 42 | TimerQueueEntry { 43 | expiry, 44 | task: None, 45 | state: PollState::Unregistered, 46 | } 47 | } 48 | } 49 | 50 | impl PartialEq for TimerQueueEntry { 51 | fn eq(&self, other: &TimerQueueEntry) -> bool { 52 | // This is technically not correct. However for the usage in this module 53 | // we only need to compare timers by expiration. 54 | self.expiry == other.expiry 55 | } 56 | } 57 | 58 | impl Eq for TimerQueueEntry {} 59 | 60 | impl PartialOrd for TimerQueueEntry { 61 | fn partial_cmp( 62 | &self, 63 | other: &TimerQueueEntry, 64 | ) -> Option { 65 | // Compare timer queue entries by expiration time 66 | self.expiry.partial_cmp(&other.expiry) 67 | } 68 | } 69 | 70 | impl Ord for TimerQueueEntry { 71 | fn cmp(&self, other: &TimerQueueEntry) -> core::cmp::Ordering { 72 | self.expiry.cmp(&other.expiry) 73 | } 74 | } 75 | 76 | /// Internal state of the timer 77 | struct TimerState { 78 | /// The clock which is utilized 79 | clock: &'static dyn Clock, 80 | /// The heap of waiters, which are waiting for their timer to expire 81 | waiters: PairingHeap, 82 | } 83 | 84 | impl TimerState { 85 | fn new(clock: &'static dyn Clock) -> TimerState { 86 | TimerState { 87 | clock, 88 | waiters: PairingHeap::new(), 89 | } 90 | } 91 | 92 | /// Registers the timer future at the Timer. 93 | /// This function is only safe as long as the `wait_node`s address is guaranteed 94 | /// to be stable until it gets removed from the queue. 95 | unsafe fn try_wait( 96 | &mut self, 97 | wait_node: &mut HeapNode, 98 | cx: &mut Context<'_>, 99 | ) -> Poll<()> { 100 | match wait_node.state { 101 | PollState::Unregistered => { 102 | let now = self.clock.now(); 103 | if now >= wait_node.expiry { 104 | // The timer is already expired 105 | wait_node.state = PollState::Expired; 106 | Poll::Ready(()) 107 | } else { 108 | // Added the task to the wait queue 109 | wait_node.task = Some(cx.waker().clone()); 110 | wait_node.state = PollState::Registered; 111 | self.waiters.insert(wait_node); 112 | Poll::Pending 113 | } 114 | } 115 | PollState::Registered => { 116 | // Since the timer wakes up all waiters and moves their states to 117 | // Expired when the timer expired, it can't be expired here yet. 118 | // However the caller might have passed a different `Waker`. 119 | // In this case we need to update it. 120 | update_waker_ref(&mut wait_node.task, cx); 121 | Poll::Pending 122 | } 123 | PollState::Expired => Poll::Ready(()), 124 | } 125 | } 126 | 127 | fn remove_waiter(&mut self, wait_node: &mut HeapNode) { 128 | // TimerFuture only needs to get removed if it had been added to 129 | // the wait queue of the timer. This has happened in the PollState::Registered case. 130 | if let PollState::Registered = wait_node.state { 131 | // Safety: Due to the state, we know that the node must be part 132 | // of the waiter heap 133 | unsafe { self.waiters.remove(wait_node) }; 134 | wait_node.state = PollState::Unregistered; 135 | } 136 | } 137 | 138 | /// Returns a timestamp when the next timer expires. 139 | /// 140 | /// For thread-safe timers, the returned value is not precise and subject to 141 | /// race-conditions, since other threads can add timer in the meantime. 142 | fn next_expiration(&self) -> Option { 143 | // Safety: We ensure that any node in the heap remains alive 144 | unsafe { self.waiters.peek_min().map(|first| first.as_ref().expiry) } 145 | } 146 | 147 | /// Checks whether any of the attached Futures is expired 148 | fn check_expirations(&mut self) { 149 | let now = self.clock.now(); 150 | while let Some(mut first) = self.waiters.peek_min() { 151 | // Safety: We ensure that any node in the heap remains alive 152 | unsafe { 153 | let entry = first.as_mut(); 154 | let first_expiry = entry.expiry; 155 | if now >= first_expiry { 156 | // The timer is expired. 157 | entry.state = PollState::Expired; 158 | if let Some(task) = entry.task.take() { 159 | task.wake(); 160 | } 161 | } else { 162 | // Remaining timers are not expired 163 | break; 164 | } 165 | 166 | // Remove the expired timer 167 | self.waiters.remove(entry); 168 | } 169 | } 170 | } 171 | } 172 | 173 | /// Adapter trait that allows Futures to generically interact with timer 174 | /// implementations via dynamic dispatch. 175 | trait TimerAccess { 176 | unsafe fn try_wait( 177 | &self, 178 | wait_node: &mut HeapNode, 179 | cx: &mut Context<'_>, 180 | ) -> Poll<()>; 181 | 182 | fn remove_waiter(&self, wait_node: &mut HeapNode); 183 | } 184 | 185 | /// An asynchronously awaitable timer which is bound to a thread. 186 | /// 187 | /// The timer operates on millisecond precision and makes use of a configurable 188 | /// clock source. 189 | /// 190 | /// The timer allows to wait asynchronously either for a certain duration, 191 | /// or until the provided [`Clock`] reaches a certain timestamp. 192 | pub trait LocalTimer { 193 | /// Returns a future that gets fulfilled after the given `Duration` 194 | fn delay(&self, delay: Duration) -> LocalTimerFuture; 195 | 196 | /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches 197 | /// the given timestamp. 198 | fn deadline(&self, timestamp: u64) -> LocalTimerFuture; 199 | } 200 | 201 | /// An asynchronously awaitable thread-safe timer. 202 | /// 203 | /// The timer operates on millisecond precision and makes use of a configurable 204 | /// clock source. 205 | /// 206 | /// The timer allows to wait asynchronously either for a certain duration, 207 | /// or until the provided [`Clock`] reaches a certain timestamp. 208 | pub trait Timer { 209 | /// Returns a future that gets fulfilled after the given `Duration` 210 | fn delay(&self, delay: Duration) -> TimerFuture; 211 | 212 | /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches 213 | /// the given timestamp. 214 | fn deadline(&self, timestamp: u64) -> TimerFuture; 215 | } 216 | 217 | /// An asynchronously awaitable timer. 218 | /// 219 | /// The timer operates on millisecond precision and makes use of a configurable 220 | /// clock source. 221 | /// 222 | /// The timer allows to wait asynchronously either for a certain duration, 223 | /// or until the provided [`Clock`] reaches a certain timestamp. 224 | /// 225 | /// In order to unblock tasks that are waiting on the timer, 226 | /// [`check_expirations`](GenericTimerService::check_expirations) 227 | /// must be called in regular intervals on this timer service. 228 | /// 229 | /// The timer can either be running on a separate timer thread (in case a 230 | /// thread-safe timer type is utilize), or it can be integrated into an executor 231 | /// in order to minimize context switches. 232 | pub struct GenericTimerService { 233 | inner: Mutex, 234 | } 235 | 236 | // The timer can be sent to other threads as long as it's not borrowed 237 | unsafe impl Send 238 | for GenericTimerService 239 | { 240 | } 241 | // The timer is thread-safe as long as it uses a thread-safe mutex 242 | unsafe impl Sync 243 | for GenericTimerService 244 | { 245 | } 246 | 247 | impl core::fmt::Debug for GenericTimerService { 248 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 249 | f.debug_struct("TimerService").finish() 250 | } 251 | } 252 | 253 | impl GenericTimerService { 254 | /// Creates a new Timer in the given state. 255 | /// 256 | /// The Timer will query the provided [`Clock`] instance for the current 257 | /// time whenever required. 258 | /// 259 | /// In order to create a create a clock which utilizes system time, 260 | /// [`StdClock`](super::StdClock) can be utilized. 261 | /// In order to simulate time for test purposes, 262 | /// [`MockClock`](super::MockClock) can be utilized. 263 | pub fn new(clock: &'static dyn Clock) -> GenericTimerService { 264 | GenericTimerService:: { 265 | inner: Mutex::new(TimerState::new(clock)), 266 | } 267 | } 268 | 269 | /// Returns a timestamp when the next timer expires. 270 | /// 271 | /// For thread-safe timers, the returned value is not precise and subject to 272 | /// race-conditions, since other threads can add timer in the meantime. 273 | /// 274 | /// Therefore adding any timer to the [`GenericTimerService`] should also 275 | /// make sure to wake up the executor which polls for timeouts, in order to 276 | /// let it capture the latest change. 277 | pub fn next_expiration(&self) -> Option { 278 | self.inner.lock().next_expiration() 279 | } 280 | 281 | /// Checks whether any of the attached [`TimerFuture`]s has expired. 282 | /// In this case the associated task is woken up. 283 | pub fn check_expirations(&self) { 284 | self.inner.lock().check_expirations() 285 | } 286 | 287 | /// Returns a deadline based on the current timestamp plus the given Duration 288 | fn deadline_from_now(&self, duration: Duration) -> u64 { 289 | let now = self.inner.lock().clock.now(); 290 | let duration_ms = 291 | core::cmp::min(duration.as_millis(), core::u64::MAX as u128) as u64; 292 | now.saturating_add(duration_ms) 293 | } 294 | } 295 | 296 | impl LocalTimer for GenericTimerService { 297 | /// Returns a future that gets fulfilled after the given [`Duration`] 298 | fn delay(&self, delay: Duration) -> LocalTimerFuture { 299 | let deadline = self.deadline_from_now(delay); 300 | LocalTimer::deadline(&*self, deadline) 301 | } 302 | 303 | /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches 304 | /// the given timestamp. 305 | fn deadline(&self, timestamp: u64) -> LocalTimerFuture { 306 | LocalTimerFuture { 307 | timer: Some(self), 308 | wait_node: HeapNode::new(TimerQueueEntry::new(timestamp)), 309 | } 310 | } 311 | } 312 | 313 | impl Timer for GenericTimerService 314 | where 315 | MutexType: Sync, 316 | { 317 | /// Returns a future that gets fulfilled after the given [`Duration`] 318 | fn delay(&self, delay: Duration) -> TimerFuture { 319 | let deadline = self.deadline_from_now(delay); 320 | Timer::deadline(&*self, deadline) 321 | } 322 | 323 | /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches 324 | /// the given timestamp. 325 | fn deadline(&self, timestamp: u64) -> TimerFuture { 326 | TimerFuture { 327 | timer_future: LocalTimerFuture { 328 | timer: Some(self), 329 | wait_node: HeapNode::new(TimerQueueEntry::new(timestamp)), 330 | }, 331 | } 332 | } 333 | } 334 | 335 | impl TimerAccess for GenericTimerService { 336 | unsafe fn try_wait( 337 | &self, 338 | wait_node: &mut HeapNode, 339 | cx: &mut Context<'_>, 340 | ) -> Poll<()> { 341 | self.inner.lock().try_wait(wait_node, cx) 342 | } 343 | 344 | fn remove_waiter(&self, wait_node: &mut HeapNode) { 345 | self.inner.lock().remove_waiter(wait_node) 346 | } 347 | } 348 | 349 | /// A Future that is resolved once the requested time has elapsed. 350 | #[must_use = "futures do nothing unless polled"] 351 | pub struct LocalTimerFuture<'a> { 352 | /// The Timer that is associated with this TimerFuture 353 | timer: Option<&'a dyn TimerAccess>, 354 | /// Node for waiting on the timer 355 | wait_node: HeapNode, 356 | } 357 | 358 | impl<'a> core::fmt::Debug for LocalTimerFuture<'a> { 359 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 360 | f.debug_struct("LocalTimerFuture").finish() 361 | } 362 | } 363 | 364 | impl<'a> Future for LocalTimerFuture<'a> { 365 | type Output = (); 366 | 367 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { 368 | // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. 369 | // However this didn't seem to work for some borrow checker reasons 370 | 371 | // Safety: The next operations are safe, because Pin promises us that 372 | // the address of the wait queue entry inside TimerFuture is stable, 373 | // and we don't move any fields inside the future until it gets dropped. 374 | let mut_self: &mut LocalTimerFuture = 375 | unsafe { Pin::get_unchecked_mut(self) }; 376 | 377 | let timer = 378 | mut_self.timer.expect("polled TimerFuture after completion"); 379 | 380 | let poll_res = unsafe { timer.try_wait(&mut mut_self.wait_node, cx) }; 381 | 382 | if poll_res.is_ready() { 383 | // A value was available 384 | mut_self.timer = None; 385 | } 386 | 387 | poll_res 388 | } 389 | } 390 | 391 | impl<'a> FusedFuture for LocalTimerFuture<'a> { 392 | fn is_terminated(&self) -> bool { 393 | self.timer.is_none() 394 | } 395 | } 396 | 397 | impl<'a> Drop for LocalTimerFuture<'a> { 398 | fn drop(&mut self) { 399 | // If this TimerFuture has been polled and it was added to the 400 | // wait queue at the timer, it must be removed before dropping. 401 | // Otherwise the timer would access invalid memory. 402 | if let Some(timer) = self.timer { 403 | timer.remove_waiter(&mut self.wait_node); 404 | } 405 | } 406 | } 407 | 408 | /// A Future that is resolved once the requested time has elapsed. 409 | #[must_use = "futures do nothing unless polled"] 410 | pub struct TimerFuture<'a> { 411 | /// The Timer that is associated with this TimerFuture 412 | timer_future: LocalTimerFuture<'a>, 413 | } 414 | 415 | // Safety: TimerFutures are only returned by GenericTimerService instances which 416 | // are thread-safe (RawMutex: Sync). 417 | unsafe impl<'a> Send for TimerFuture<'a> {} 418 | 419 | impl<'a> core::fmt::Debug for TimerFuture<'a> { 420 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 421 | f.debug_struct("TimerFuture").finish() 422 | } 423 | } 424 | 425 | impl<'a> Future for TimerFuture<'a> { 426 | type Output = (); 427 | 428 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { 429 | // Safety: TimerFuture is a pure wrapper around LocalTimerFuture. 430 | // The inner value is never moved 431 | let inner_pin = unsafe { 432 | Pin::map_unchecked_mut(self, |fut| &mut fut.timer_future) 433 | }; 434 | inner_pin.poll(cx) 435 | } 436 | } 437 | 438 | impl<'a> FusedFuture for TimerFuture<'a> { 439 | fn is_terminated(&self) -> bool { 440 | self.timer_future.is_terminated() 441 | } 442 | } 443 | 444 | // Export a non thread-safe version using NoopLock 445 | 446 | /// A [`GenericTimerService`] implementation which is not thread-safe. 447 | pub type LocalTimerService = GenericTimerService; 448 | 449 | #[cfg(feature = "std")] 450 | mod if_std { 451 | use super::*; 452 | 453 | // Export a thread-safe version using parking_lot::RawMutex 454 | 455 | /// A [`GenericTimerService`] implementation backed by [`parking_lot`]. 456 | pub type TimerService = GenericTimerService; 457 | } 458 | 459 | #[cfg(feature = "std")] 460 | pub use self::if_std::*; 461 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utilities which are used within the library 2 | 3 | use core::task::{Context, Waker}; 4 | 5 | /// Updates a `Waker` which is stored inside a `Option` to the newest value 6 | /// which is delivered via a `Context`. 7 | pub fn update_waker_ref(waker_option: &mut Option, cx: &Context) { 8 | if waker_option 9 | .as_ref() 10 | .map_or(true, |stored_waker| !stored_waker.will_wake(cx.waker())) 11 | { 12 | *waker_option = Some(cx.waker().clone()); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /tests/manual_reset_event.rs: -------------------------------------------------------------------------------- 1 | use futures::future::{FusedFuture, Future}; 2 | use futures::task::Context; 3 | use futures_intrusive::sync::LocalManualResetEvent; 4 | use futures_test::task::{new_count_waker, panic_waker}; 5 | use pin_utils::pin_mut; 6 | 7 | macro_rules! gen_event_tests { 8 | ($mod_name:ident, $event_type:ident) => { 9 | mod $mod_name { 10 | use super::*; 11 | 12 | #[test] 13 | fn synchronous() { 14 | let event = $event_type::new(false); 15 | 16 | assert!(!event.is_set()); 17 | event.set(); 18 | assert!(event.is_set()); 19 | event.reset(); 20 | assert!(!event.is_set()); 21 | } 22 | 23 | #[test] 24 | fn immediately_ready_event() { 25 | let event = $event_type::new(true); 26 | let waker = &panic_waker(); 27 | let cx = &mut Context::from_waker(&waker); 28 | 29 | assert!(event.is_set()); 30 | 31 | let poll = event.wait(); 32 | pin_mut!(poll); 33 | assert!(!poll.as_mut().is_terminated()); 34 | 35 | assert!(poll.as_mut().poll(cx).is_ready()); 36 | assert!(poll.as_mut().is_terminated()); 37 | } 38 | 39 | #[test] 40 | fn cancel_mid_wait() { 41 | let event = $event_type::new(false); 42 | let (waker, count) = new_count_waker(); 43 | let cx = &mut Context::from_waker(&waker); 44 | 45 | { 46 | // Cancel a wait in between other waits 47 | // In order to arbitrarily drop a non movable future we have to box and pin it 48 | let mut poll1 = Box::pin(event.wait()); 49 | let mut poll2 = Box::pin(event.wait()); 50 | let mut poll3 = Box::pin(event.wait()); 51 | let mut poll4 = Box::pin(event.wait()); 52 | let mut poll5 = Box::pin(event.wait()); 53 | 54 | assert!(poll1.as_mut().poll(cx).is_pending()); 55 | assert!(poll2.as_mut().poll(cx).is_pending()); 56 | assert!(poll3.as_mut().poll(cx).is_pending()); 57 | assert!(poll4.as_mut().poll(cx).is_pending()); 58 | assert!(poll5.as_mut().poll(cx).is_pending()); 59 | assert!(!poll1.is_terminated()); 60 | assert!(!poll2.is_terminated()); 61 | assert!(!poll3.is_terminated()); 62 | assert!(!poll4.is_terminated()); 63 | assert!(!poll5.is_terminated()); 64 | 65 | // Cancel 2 futures. Only the remaining ones should get completed 66 | drop(poll2); 67 | drop(poll4); 68 | 69 | assert!(poll1.as_mut().poll(cx).is_pending()); 70 | assert!(poll3.as_mut().poll(cx).is_pending()); 71 | assert!(poll5.as_mut().poll(cx).is_pending()); 72 | 73 | assert_eq!(count, 0); 74 | event.set(); 75 | 76 | assert!(poll1.as_mut().poll(cx).is_ready()); 77 | assert!(poll3.as_mut().poll(cx).is_ready()); 78 | assert!(poll5.as_mut().poll(cx).is_ready()); 79 | assert!(poll1.is_terminated()); 80 | assert!(poll3.is_terminated()); 81 | assert!(poll5.is_terminated()); 82 | } 83 | 84 | assert_eq!(count, 3); 85 | } 86 | 87 | #[test] 88 | fn cancel_end_wait() { 89 | let event = $event_type::new(false); 90 | let (waker, count) = new_count_waker(); 91 | let cx = &mut Context::from_waker(&waker); 92 | 93 | let poll1 = event.wait(); 94 | let poll2 = event.wait(); 95 | let poll3 = event.wait(); 96 | let poll4 = event.wait(); 97 | 98 | pin_mut!(poll1); 99 | pin_mut!(poll2); 100 | pin_mut!(poll3); 101 | pin_mut!(poll4); 102 | 103 | assert!(poll1.as_mut().poll(cx).is_pending()); 104 | assert!(poll2.as_mut().poll(cx).is_pending()); 105 | 106 | // Start polling some wait handles which get cancelled 107 | // before new ones are attached 108 | { 109 | let poll5 = event.wait(); 110 | let poll6 = event.wait(); 111 | pin_mut!(poll5); 112 | pin_mut!(poll6); 113 | assert!(poll5.as_mut().poll(cx).is_pending()); 114 | assert!(poll6.as_mut().poll(cx).is_pending()); 115 | } 116 | 117 | assert!(poll3.as_mut().poll(cx).is_pending()); 118 | assert!(poll4.as_mut().poll(cx).is_pending()); 119 | 120 | event.set(); 121 | 122 | assert!(poll1.as_mut().poll(cx).is_ready()); 123 | assert!(poll2.as_mut().poll(cx).is_ready()); 124 | assert!(poll3.as_mut().poll(cx).is_ready()); 125 | assert!(poll4.as_mut().poll(cx).is_ready()); 126 | 127 | assert_eq!(count, 4); 128 | } 129 | 130 | #[test] 131 | fn poll_from_multiple_executors() { 132 | let (waker_1, count_1) = new_count_waker(); 133 | let (waker_2, count_2) = new_count_waker(); 134 | let event = $event_type::new(false); 135 | 136 | let cx_1 = &mut Context::from_waker(&waker_1); 137 | let cx_2 = &mut Context::from_waker(&waker_2); 138 | 139 | let fut = event.wait(); 140 | pin_mut!(fut); 141 | 142 | assert!(fut.as_mut().poll(cx_1).is_pending()); 143 | assert!(fut.as_mut().poll(cx_2).is_pending()); 144 | 145 | event.set(); 146 | assert!(event.is_set()); 147 | assert_eq!(count_1, 0); 148 | assert_eq!(count_2, 1); 149 | 150 | assert!(fut.as_mut().poll(cx_2).is_ready()); 151 | assert!(fut.as_mut().is_terminated()); 152 | } 153 | } 154 | }; 155 | } 156 | 157 | gen_event_tests!(local_manual_reset_event_tests, LocalManualResetEvent); 158 | 159 | #[cfg(feature = "std")] 160 | mod if_std { 161 | use super::*; 162 | use futures::executor::block_on; 163 | use futures_intrusive::sync::ManualResetEvent; 164 | use std::sync::Arc; 165 | use std::thread; 166 | use std::time; 167 | 168 | gen_event_tests!(manual_reset_event_tests, ManualResetEvent); 169 | 170 | fn is_send(_: &T) {} 171 | 172 | fn is_send_value(_: T) {} 173 | 174 | fn is_sync(_: &T) {} 175 | 176 | #[test] 177 | fn event_futures_are_send() { 178 | let event = ManualResetEvent::new(false); 179 | is_sync(&event); 180 | { 181 | let wait_fut = event.wait(); 182 | is_send(&wait_fut); 183 | pin_mut!(wait_fut); 184 | is_send(&wait_fut); 185 | } 186 | is_send_value(event); 187 | } 188 | 189 | #[test] 190 | fn multithreaded_smoke() { 191 | let event = Arc::new(ManualResetEvent::new(false)); 192 | 193 | let waiters: Vec> = [1..4] 194 | .iter() 195 | .map(|_| { 196 | let ev = event.clone(); 197 | thread::spawn(move || { 198 | block_on(ev.wait()); 199 | time::Instant::now() 200 | }) 201 | }) 202 | .collect(); 203 | 204 | let start = time::Instant::now(); 205 | 206 | thread::sleep(time::Duration::from_millis(100)); 207 | event.set(); 208 | 209 | for waiter in waiters.into_iter() { 210 | let end_time = waiter.join().unwrap(); 211 | let diff = end_time - start; 212 | assert!(diff > time::Duration::from_millis(50)); 213 | } 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /tests/oneshot_channel.rs: -------------------------------------------------------------------------------- 1 | use futures::future::{FusedFuture, Future}; 2 | use futures::task::{Context, Poll}; 3 | use futures_intrusive::channel::{ChannelSendError, LocalOneshotChannel}; 4 | use futures_test::task::{new_count_waker, panic_waker}; 5 | use pin_utils::pin_mut; 6 | 7 | macro_rules! gen_oneshot_tests { 8 | ($mod_name:ident, $channel_type:ident) => { 9 | mod $mod_name { 10 | use super::*; 11 | 12 | fn assert_receive_done( 13 | cx: &mut Context, 14 | receive_fut: &mut core::pin::Pin<&mut FutureType>, 15 | value: Option, 16 | ) where 17 | FutureType: Future> + FusedFuture, 18 | T: PartialEq + core::fmt::Debug, 19 | { 20 | match receive_fut.as_mut().poll(cx) { 21 | Poll::Pending => panic!("future is not ready"), 22 | Poll::Ready(res) => { 23 | if res != value { 24 | panic!("Unexpected value {:?}", res); 25 | } 26 | } 27 | }; 28 | assert!(receive_fut.as_mut().is_terminated()); 29 | } 30 | 31 | #[test] 32 | fn send_on_closed_channel() { 33 | let channel = $channel_type::::new(); 34 | assert!(channel.close().is_newly_closed()); 35 | assert_eq!(Err(ChannelSendError(5)), channel.send(5)); 36 | } 37 | 38 | #[test] 39 | fn close_status() { 40 | let channel = $channel_type::::new(); 41 | 42 | assert!(channel.close().is_newly_closed()); 43 | assert!(channel.close().is_already_closed()); 44 | assert!(channel.close().is_already_closed()); 45 | } 46 | 47 | #[test] 48 | fn close_unblocks_receive() { 49 | let channel = $channel_type::::new(); 50 | let (waker, count) = new_count_waker(); 51 | let cx = &mut Context::from_waker(&waker); 52 | 53 | let fut = channel.receive(); 54 | pin_mut!(fut); 55 | assert!(fut.as_mut().poll(cx).is_pending()); 56 | let fut2 = channel.receive(); 57 | pin_mut!(fut2); 58 | assert!(fut2.as_mut().poll(cx).is_pending()); 59 | assert_eq!(count, 0); 60 | 61 | assert!(channel.close().is_newly_closed()); 62 | assert_eq!(count, 2); 63 | assert_receive_done(cx, &mut fut, None); 64 | assert_receive_done(cx, &mut fut2, None); 65 | } 66 | 67 | #[test] 68 | fn receive_after_send() { 69 | let channel = $channel_type::::new(); 70 | let waker = &panic_waker(); 71 | let cx = &mut Context::from_waker(&waker); 72 | 73 | channel.send(5).unwrap(); 74 | 75 | let receive_fut = channel.receive(); 76 | pin_mut!(receive_fut); 77 | assert!(!receive_fut.as_mut().is_terminated()); 78 | 79 | assert_receive_done(cx, &mut receive_fut, Some(5)); 80 | 81 | // A second receive attempt must yield None, since the 82 | // value was taken out of the channel 83 | let receive_fut2 = channel.receive(); 84 | pin_mut!(receive_fut2); 85 | assert_receive_done(cx, &mut receive_fut2, None); 86 | } 87 | 88 | #[test] 89 | fn send_after_receive() { 90 | let channel = $channel_type::::new(); 91 | let (waker, _) = new_count_waker(); 92 | let cx = &mut Context::from_waker(&waker); 93 | 94 | let receive_fut1 = channel.receive(); 95 | let receive_fut2 = channel.receive(); 96 | pin_mut!(receive_fut1); 97 | pin_mut!(receive_fut2); 98 | assert!(!receive_fut1.as_mut().is_terminated()); 99 | assert!(!receive_fut2.as_mut().is_terminated()); 100 | 101 | let poll_res1 = receive_fut1.as_mut().poll(cx); 102 | let poll_res2 = receive_fut2.as_mut().poll(cx); 103 | assert!(poll_res1.is_pending()); 104 | assert!(poll_res2.is_pending()); 105 | 106 | channel.send(5).unwrap(); 107 | 108 | assert_receive_done(cx, &mut receive_fut1, Some(5)); 109 | // receive_fut2 isn't terminated, since it hasn't been polled 110 | assert!(!receive_fut2.as_mut().is_terminated()); 111 | // When it gets polled, it must evaluate to None 112 | assert_receive_done(cx, &mut receive_fut2, None); 113 | } 114 | 115 | #[test] 116 | fn second_send_rejects_value() { 117 | let channel = $channel_type::::new(); 118 | let (waker, _) = new_count_waker(); 119 | let cx = &mut Context::from_waker(&waker); 120 | 121 | let receive_fut1 = channel.receive(); 122 | pin_mut!(receive_fut1); 123 | assert!(!receive_fut1.as_mut().is_terminated()); 124 | assert!(receive_fut1.as_mut().poll(cx).is_pending()); 125 | 126 | // First send 127 | channel.send(5).unwrap(); 128 | 129 | assert!(receive_fut1.as_mut().poll(cx).is_ready()); 130 | 131 | // Second send 132 | let send_res = channel.send(7); 133 | match send_res { 134 | Err(ChannelSendError(7)) => {} // expected 135 | _ => panic!("Second second should reject"), 136 | } 137 | } 138 | 139 | #[test] 140 | fn cancel_mid_wait() { 141 | let channel = $channel_type::new(); 142 | let (waker, count) = new_count_waker(); 143 | let cx = &mut Context::from_waker(&waker); 144 | 145 | { 146 | // Cancel a wait in between other waits 147 | // In order to arbitrarily drop a non movable future we have to box and pin it 148 | let mut poll1 = Box::pin(channel.receive()); 149 | let mut poll2 = Box::pin(channel.receive()); 150 | let mut poll3 = Box::pin(channel.receive()); 151 | let mut poll4 = Box::pin(channel.receive()); 152 | let mut poll5 = Box::pin(channel.receive()); 153 | 154 | assert!(poll1.as_mut().poll(cx).is_pending()); 155 | assert!(poll2.as_mut().poll(cx).is_pending()); 156 | assert!(poll3.as_mut().poll(cx).is_pending()); 157 | assert!(poll4.as_mut().poll(cx).is_pending()); 158 | assert!(poll5.as_mut().poll(cx).is_pending()); 159 | assert!(!poll1.is_terminated()); 160 | assert!(!poll2.is_terminated()); 161 | assert!(!poll3.is_terminated()); 162 | assert!(!poll4.is_terminated()); 163 | assert!(!poll5.is_terminated()); 164 | 165 | // Cancel 2 futures. Only the remaining ones should get completed 166 | drop(poll2); 167 | drop(poll4); 168 | 169 | assert!(poll1.as_mut().poll(cx).is_pending()); 170 | assert!(poll3.as_mut().poll(cx).is_pending()); 171 | assert!(poll5.as_mut().poll(cx).is_pending()); 172 | 173 | assert_eq!(count, 0); 174 | channel.send(7).unwrap(); 175 | assert_eq!(count, 3); 176 | 177 | assert!(poll1.as_mut().poll(cx).is_ready()); 178 | assert!(poll3.as_mut().poll(cx).is_ready()); 179 | assert!(poll5.as_mut().poll(cx).is_ready()); 180 | assert!(poll1.is_terminated()); 181 | assert!(poll3.is_terminated()); 182 | assert!(poll5.is_terminated()); 183 | } 184 | 185 | assert_eq!(count, 3) 186 | } 187 | 188 | #[test] 189 | fn cancel_end_wait() { 190 | let channel = $channel_type::new(); 191 | let (waker, count) = new_count_waker(); 192 | let cx = &mut Context::from_waker(&waker); 193 | 194 | let poll1 = channel.receive(); 195 | let poll2 = channel.receive(); 196 | let poll3 = channel.receive(); 197 | let poll4 = channel.receive(); 198 | 199 | pin_mut!(poll1); 200 | pin_mut!(poll2); 201 | pin_mut!(poll3); 202 | pin_mut!(poll4); 203 | 204 | assert!(poll1.as_mut().poll(cx).is_pending()); 205 | assert!(poll2.as_mut().poll(cx).is_pending()); 206 | 207 | // Start polling some wait handles which get cancelled 208 | // before new ones are attached 209 | { 210 | let poll5 = channel.receive(); 211 | let poll6 = channel.receive(); 212 | pin_mut!(poll5); 213 | pin_mut!(poll6); 214 | assert!(poll5.as_mut().poll(cx).is_pending()); 215 | assert!(poll6.as_mut().poll(cx).is_pending()); 216 | } 217 | 218 | assert!(poll3.as_mut().poll(cx).is_pending()); 219 | assert!(poll4.as_mut().poll(cx).is_pending()); 220 | 221 | channel.send(99).unwrap(); 222 | 223 | assert!(poll1.as_mut().poll(cx).is_ready()); 224 | assert!(poll2.as_mut().poll(cx).is_ready()); 225 | assert!(poll3.as_mut().poll(cx).is_ready()); 226 | assert!(poll4.as_mut().poll(cx).is_ready()); 227 | 228 | assert_eq!(count, 4) 229 | } 230 | 231 | #[test] 232 | fn poll_from_multiple_executors() { 233 | let (waker_1, count_1) = new_count_waker(); 234 | let (waker_2, count_2) = new_count_waker(); 235 | let channel = $channel_type::new(); 236 | 237 | let cx_1 = &mut Context::from_waker(&waker_1); 238 | let cx_2 = &mut Context::from_waker(&waker_2); 239 | 240 | let fut = channel.receive(); 241 | pin_mut!(fut); 242 | assert!(fut.as_mut().poll(cx_1).is_pending()); 243 | assert!(fut.as_mut().poll(cx_2).is_pending()); 244 | 245 | channel.send(99).unwrap(); 246 | assert_eq!(count_1, 0); 247 | assert_eq!(count_2, 1); 248 | 249 | assert_receive_done(cx_2, &mut fut, Some(99)); 250 | } 251 | } 252 | }; 253 | } 254 | 255 | gen_oneshot_tests!(local_oneshot_channel_tests, LocalOneshotChannel); 256 | 257 | #[cfg(feature = "std")] 258 | mod if_std { 259 | use super::*; 260 | use futures_intrusive::channel::shared::oneshot_channel; 261 | use futures_intrusive::channel::OneshotChannel; 262 | 263 | gen_oneshot_tests!(oneshot_channel_tests, OneshotChannel); 264 | 265 | fn is_send(_: &T) {} 266 | 267 | fn is_send_value(_: T) {} 268 | 269 | fn is_sync(_: &T) {} 270 | 271 | #[test] 272 | fn channel_futures_are_send() { 273 | let channel = OneshotChannel::::new(); 274 | is_sync(&channel); 275 | { 276 | let recv_fut = channel.receive(); 277 | is_send(&recv_fut); 278 | pin_mut!(recv_fut); 279 | is_send(&recv_fut); 280 | let send_fut = channel.send(3); 281 | is_send(&send_fut); 282 | pin_mut!(send_fut); 283 | is_send(&send_fut); 284 | } 285 | is_send_value(channel); 286 | } 287 | 288 | #[test] 289 | fn shared_channel_futures_are_send() { 290 | let (sender, receiver) = oneshot_channel::(); 291 | is_sync(&sender); 292 | is_sync(&receiver); 293 | let recv_fut = receiver.receive(); 294 | is_send(&recv_fut); 295 | pin_mut!(recv_fut); 296 | is_send(&recv_fut); 297 | let send_fut = sender.send(3); 298 | is_send(&send_fut); 299 | pin_mut!(send_fut); 300 | is_send(&send_fut); 301 | 302 | is_send_value(sender); 303 | is_send_value(receiver); 304 | } 305 | 306 | #[test] 307 | fn dropping_shared_channel_senders_closes_channel() { 308 | let (waker, _) = new_count_waker(); 309 | let cx = &mut Context::from_waker(&waker); 310 | 311 | let (sender, receiver) = oneshot_channel::(); 312 | 313 | let fut = receiver.receive(); 314 | pin_mut!(fut); 315 | assert!(fut.as_mut().poll(cx).is_pending()); 316 | 317 | drop(sender); 318 | 319 | match fut.as_mut().poll(cx) { 320 | Poll::Ready(None) => {} 321 | Poll::Ready(Some(_)) => panic!("Expected no value"), 322 | Poll::Pending => panic!("Expected channel to be closed"), 323 | } 324 | } 325 | 326 | #[test] 327 | fn dropping_shared_channel_receivers_closes_channel() { 328 | let (sender, receiver) = oneshot_channel::(); 329 | drop(receiver); 330 | 331 | assert_eq!(Err(ChannelSendError(5)), sender.send(5)); 332 | } 333 | } 334 | -------------------------------------------------------------------------------- /tests/timer.rs: -------------------------------------------------------------------------------- 1 | use core::time::Duration; 2 | use futures::future::{FusedFuture, Future}; 3 | use futures::task::Context; 4 | use futures_intrusive::timer::{LocalTimerService, MockClock}; 5 | use futures_test::task::{new_count_waker, panic_waker}; 6 | use pin_utils::pin_mut; 7 | 8 | macro_rules! gen_timer_tests { 9 | ($mod_name:ident, $timer_type:ident, $timer_trait_type:ident) => { 10 | mod $mod_name { 11 | use super::*; 12 | use futures_intrusive::timer::$timer_trait_type; 13 | 14 | #[test] 15 | fn start_and_expire_timers() { 16 | static TEST_CLOCK: MockClock = MockClock::new(); 17 | TEST_CLOCK.set_time(200); 18 | let timer = $timer_type::new(&TEST_CLOCK); 19 | let (waker, count) = new_count_waker(); 20 | let cx = &mut Context::from_waker(&waker); 21 | assert!(timer.next_expiration().is_none()); 22 | 23 | let fut = timer.deadline(999); 24 | pin_mut!(fut); 25 | assert!(fut.as_mut().poll(cx).is_pending()); 26 | assert_eq!(Some(999), timer.next_expiration()); 27 | 28 | let fut2 = timer.delay(Duration::from_millis(300)); 29 | pin_mut!(fut2); 30 | assert!(fut2.as_mut().poll(cx).is_pending()); 31 | assert_eq!(Some(500), timer.next_expiration()); 32 | 33 | let fut3 = timer.delay(Duration::from_millis(500)); 34 | pin_mut!(fut3); 35 | assert!(fut3.as_mut().poll(cx).is_pending()); 36 | assert_eq!(Some(500), timer.next_expiration()); 37 | 38 | TEST_CLOCK.set_time(500); 39 | timer.check_expirations(); 40 | assert_eq!(count, 1); 41 | assert!(fut.as_mut().poll(cx).is_pending()); 42 | assert!(fut2.as_mut().poll(cx).is_ready()); 43 | assert!(fut3.as_mut().poll(cx).is_pending()); 44 | assert_eq!(Some(700), timer.next_expiration()); 45 | 46 | TEST_CLOCK.set_time(699); 47 | timer.check_expirations(); 48 | assert_eq!(count, 1); 49 | 50 | TEST_CLOCK.set_time(700); 51 | timer.check_expirations(); 52 | assert_eq!(count, 2); 53 | 54 | assert!(fut.as_mut().poll(cx).is_pending()); 55 | assert!(fut3.as_mut().poll(cx).is_ready()); 56 | assert_eq!(Some(999), timer.next_expiration()); 57 | 58 | TEST_CLOCK.set_time(1000); 59 | timer.check_expirations(); 60 | assert_eq!(count, 3); 61 | 62 | assert!(fut.as_mut().poll(cx).is_ready()); 63 | assert_eq!(None, timer.next_expiration()); 64 | } 65 | 66 | #[test] 67 | fn immediately_ready_timer() { 68 | static TEST_CLOCK: MockClock = MockClock::new(); 69 | TEST_CLOCK.set_time(400); 70 | let timer = $timer_type::new(&TEST_CLOCK); 71 | let waker = &panic_waker(); 72 | let cx = &mut Context::from_waker(&waker); 73 | 74 | let fut = timer.delay(Duration::from_millis(0)); 75 | pin_mut!(fut); 76 | assert!(fut.as_mut().poll(cx).is_ready()); 77 | 78 | for ts in 389..=400 { 79 | let fut2 = timer.deadline(ts); 80 | pin_mut!(fut2); 81 | assert!(fut2.as_mut().poll(cx).is_ready()); 82 | } 83 | } 84 | 85 | #[test] 86 | fn can_use_timer_as_trait_object() { 87 | static TEST_CLOCK: MockClock = MockClock::new(); 88 | TEST_CLOCK.set_time(340); 89 | let timer = $timer_type::new(&TEST_CLOCK); 90 | let (waker, _count) = new_count_waker(); 91 | let cx = &mut Context::from_waker(&waker); 92 | 93 | let mut inner = |dyn_timer: &dyn $timer_trait_type| { 94 | let fut = dyn_timer.delay(Duration::from_millis(10)); 95 | pin_mut!(fut); 96 | assert!(fut.as_mut().poll(cx).is_pending()); 97 | TEST_CLOCK.set_time(350); 98 | timer.check_expirations(); 99 | 100 | assert!(fut.as_mut().poll(cx).is_ready()); 101 | }; 102 | 103 | inner(&timer); 104 | } 105 | 106 | #[test] 107 | fn cancel_mid_wait() { 108 | static TEST_CLOCK: MockClock = MockClock::new(); 109 | TEST_CLOCK.set_time(1300); 110 | let timer = $timer_type::new(&TEST_CLOCK); 111 | let (waker, count) = new_count_waker(); 112 | let cx = &mut Context::from_waker(&waker); 113 | 114 | { 115 | // Cancel a wait in between other waits 116 | // In order to arbitrarily drop a non movable future we have to box and pin it 117 | let mut poll1 = Box::pin(timer.deadline(1400)); 118 | let mut poll2 = Box::pin(timer.deadline(1500)); 119 | let mut poll3 = Box::pin(timer.deadline(1600)); 120 | let mut poll4 = Box::pin(timer.deadline(1700)); 121 | let mut poll5 = Box::pin(timer.deadline(1800)); 122 | 123 | assert!(poll1.as_mut().poll(cx).is_pending()); 124 | assert!(poll2.as_mut().poll(cx).is_pending()); 125 | assert!(poll3.as_mut().poll(cx).is_pending()); 126 | assert!(poll4.as_mut().poll(cx).is_pending()); 127 | assert!(poll5.as_mut().poll(cx).is_pending()); 128 | assert!(!poll1.is_terminated()); 129 | assert!(!poll2.is_terminated()); 130 | assert!(!poll3.is_terminated()); 131 | assert!(!poll4.is_terminated()); 132 | assert!(!poll5.is_terminated()); 133 | 134 | // Cancel 2 futures. Only the remaining ones should get completed 135 | drop(poll2); 136 | drop(poll4); 137 | 138 | assert!(poll1.as_mut().poll(cx).is_pending()); 139 | assert!(poll3.as_mut().poll(cx).is_pending()); 140 | assert!(poll5.as_mut().poll(cx).is_pending()); 141 | 142 | assert_eq!(count, 0); 143 | TEST_CLOCK.set_time(1800); 144 | timer.check_expirations(); 145 | 146 | assert!(poll1.as_mut().poll(cx).is_ready()); 147 | assert!(poll3.as_mut().poll(cx).is_ready()); 148 | assert!(poll5.as_mut().poll(cx).is_ready()); 149 | assert!(poll1.is_terminated()); 150 | assert!(poll3.is_terminated()); 151 | assert!(poll5.is_terminated()); 152 | } 153 | 154 | assert_eq!(count, 3); 155 | } 156 | 157 | #[test] 158 | fn cancel_end_wait() { 159 | static TEST_CLOCK: MockClock = MockClock::new(); 160 | TEST_CLOCK.set_time(2300); 161 | let timer = $timer_type::new(&TEST_CLOCK); 162 | let (waker, count) = new_count_waker(); 163 | let cx = &mut Context::from_waker(&waker); 164 | 165 | let poll1 = timer.deadline(2400); 166 | let poll2 = timer.deadline(2500); 167 | let poll3 = timer.deadline(2600); 168 | let poll4 = timer.deadline(2700); 169 | 170 | pin_mut!(poll1); 171 | pin_mut!(poll2); 172 | pin_mut!(poll3); 173 | pin_mut!(poll4); 174 | 175 | assert!(poll1.as_mut().poll(cx).is_pending()); 176 | assert!(poll2.as_mut().poll(cx).is_pending()); 177 | 178 | // Start polling some wait handles which get cancelled 179 | // before new ones are attached 180 | { 181 | let poll5 = timer.deadline(2350); 182 | let poll6 = timer.deadline(2650); 183 | pin_mut!(poll5); 184 | pin_mut!(poll6); 185 | assert!(poll5.as_mut().poll(cx).is_pending()); 186 | assert!(poll6.as_mut().poll(cx).is_pending()); 187 | } 188 | 189 | assert!(poll3.as_mut().poll(cx).is_pending()); 190 | assert!(poll4.as_mut().poll(cx).is_pending()); 191 | 192 | TEST_CLOCK.set_time(2700); 193 | timer.check_expirations(); 194 | 195 | assert!(poll1.as_mut().poll(cx).is_ready()); 196 | assert!(poll2.as_mut().poll(cx).is_ready()); 197 | assert!(poll3.as_mut().poll(cx).is_ready()); 198 | assert!(poll4.as_mut().poll(cx).is_ready()); 199 | 200 | assert_eq!(count, 4); 201 | } 202 | 203 | #[test] 204 | fn poll_from_multiple_executors() { 205 | static TEST_CLOCK: MockClock = MockClock::new(); 206 | TEST_CLOCK.set_time(2300); 207 | 208 | let timer = $timer_type::new(&TEST_CLOCK); 209 | 210 | let (waker_1, count_1) = new_count_waker(); 211 | let (waker_2, count_2) = new_count_waker(); 212 | let cx_1 = &mut Context::from_waker(&waker_1); 213 | let cx_2 = &mut Context::from_waker(&waker_2); 214 | 215 | let fut = timer.deadline(2400); 216 | pin_mut!(fut); 217 | 218 | assert!(fut.as_mut().poll(cx_1).is_pending()); 219 | assert!(fut.as_mut().poll(cx_2).is_pending()); 220 | 221 | TEST_CLOCK.set_time(2700); 222 | timer.check_expirations(); 223 | assert_eq!(count_1, 0); 224 | assert_eq!(count_2, 1); 225 | 226 | assert!(fut.as_mut().poll(cx_2).is_ready()); 227 | assert!(fut.as_mut().is_terminated()); 228 | } 229 | } 230 | }; 231 | } 232 | 233 | gen_timer_tests!(local_timer_service_tests, LocalTimerService, LocalTimer); 234 | 235 | #[cfg(feature = "std")] 236 | mod if_std { 237 | use super::*; 238 | use futures_intrusive::timer::{Timer, TimerService}; 239 | 240 | gen_timer_tests!(timer_service_tests, TimerService, Timer); 241 | 242 | fn is_send(_: &T) {} 243 | 244 | fn is_send_value(_: T) {} 245 | 246 | fn is_sync(_: &T) {} 247 | 248 | #[test] 249 | fn timer_futures_are_send() { 250 | static TEST_CLOCK: MockClock = MockClock::new(); 251 | TEST_CLOCK.set_time(2300); 252 | let timer = TimerService::new(&TEST_CLOCK); 253 | is_sync(&timer); 254 | { 255 | let deadline = timer.deadline(2400); 256 | is_send(&deadline); 257 | pin_mut!(deadline); 258 | is_send(&deadline); 259 | let delay_fut = timer.delay(Duration::from_millis(1000)); 260 | is_send(&delay_fut); 261 | pin_mut!(delay_fut); 262 | is_send(&delay_fut); 263 | } 264 | is_send_value(timer); 265 | } 266 | } 267 | --------------------------------------------------------------------------------