├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── contiguous_async_1.rs ├── src ├── lib.rs ├── receiver.rs ├── sender.rs └── state.rs └── tests └── ring_contiguous.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-spsc" 3 | version = "0.2.0" 4 | description = "Fast, easy-to-use, async-aware single-producer/single-consumer (SPSC) channel." 5 | authors = ["James Laver "] 6 | edition = "2018" 7 | 8 | [features] 9 | default = ["alloc", "async", "stream"] 10 | alloc = ["pages"] 11 | async = ["atomic-waker"] 12 | stream = ["async", "futures-core"] 13 | bench = ["criterion"] 14 | 15 | [dependencies.atomic-waker] 16 | version = "1.0.0" 17 | optional = true 18 | 19 | [dependencies.futures-core] 20 | version = "0.3.16" 21 | default-features = false 22 | optional = true 23 | 24 | [dependencies.criterion] 25 | version = "0.3" 26 | features = ["real_blackbox"] 27 | optional = true 28 | 29 | [dependencies.pages] 30 | version = "0.2.0" 31 | optional = true 32 | 33 | [dev-dependencies] 34 | dummy-waker = "1" 35 | futures-micro = "1.0.0-rc0" 36 | wookie = "0.3" 37 | 38 | [[bench]] 39 | name = "contiguous_async_1" 40 | harness = false 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | 204 | ---- LLVM Exceptions to the Apache 2.0 License ---- 205 | 206 | As an exception, if, as a result of your compiling your source code, portions 207 | of this Software are embedded into an Object form of such source code, you 208 | may redistribute such embedded portions in such Object form without complying 209 | with the conditions of Sections 4(a), 4(b) and 4(d) of the License. 210 | 211 | In addition, if you combine or link compiled forms of this Software with 212 | software that is licensed under the GPLv2 ("Combined Software") and if a 213 | court of competent jurisdiction determines that the patent provision (Section 214 | 3), the indemnity provision (Section 9) or other Section of the License 215 | conflicts with the conditions of the GPLv2, you may retroactively and 216 | prospectively choose to deem waived or otherwise exclude such Section(s) of 217 | the License, but only in their entirety and only with respect to the Combined 218 | Software. 219 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # async-spsc 2 | 3 | Fast, easy-to-use, async-aware single-producer/single-consumer (SPSC) 4 | channel based around a ringbuffer. 5 | 6 | 7 | 8 | 9 | 10 | ## Status: alpha 11 | 12 | It works, it's fast, but it's still rough around the edges - missing APIs etc. 13 | 14 | We believe it to be correct, but you might not want to deploy to prod just yet. 15 | 16 | Git only. The crates.io release is a placeholder. 17 | 18 | TODO: 19 | 20 | * Batch APIs and streams support. 21 | * More tests. 22 | * More benchmarks. 23 | * More documentation. 24 | * Support use without a global allocator. 25 | 26 | Help welcome, I've already spent way more time on this than is healthy... 27 | 28 | ## Usage 29 | 30 | ``` 31 | use async_spsc::spsc; 32 | 33 | async fn async_example() { 34 | let (mut sender, mut receiver) = spsc::(2); 35 | assert!(sender.send(42).await.is_ok()); 36 | assert!(sender.send(420).await.is_ok()); 37 | assert_eq!(receiver.receive().await, Ok(42)); 38 | assert_eq!(receiver.receive().await, Ok(420)); 39 | assert!(sender.send(7).await.is_ok()); 40 | assert_eq!(receiver.receive().await, Ok(7)); 41 | } 42 | 43 | fn sync_example() { 44 | let (mut sender, mut receiver) = spsc::(2); 45 | assert!(sender.send(42).now().is_ok()); 46 | assert!(sender.send(420).now().is_ok()); 47 | assert!(sender.send(7).now().is_err()); // no space! 48 | 49 | assert_eq!(receiver.receive().now(), Ok(Some(42))); 50 | assert_eq!(receiver.receive().now(), Ok(Some(420))); 51 | assert!(receiver.receive().now().is_err()); // no message! 52 | 53 | assert!(sender.send(7).now().is_err()); 54 | assert_eq!(receiver.receive().now(), Ok(Some(7))); 55 | } 56 | ``` 57 | 58 | ## Implementation Details 59 | 60 | This channel is significantly faster than multi-producer and multi-consumer channels 61 | because it does not require as much synchronisation. It is based around a single 62 | atomic into which we pack two closed flags and two positions. The position is stored 63 | modulo `2*capacity`, but indexed modulo `capacity`. The difference between the two 64 | positions will always be between 0 and `capacity`, both inclusive. 65 | 66 | The indices are packed into a single `AtomicUsize`. Each half 67 | additionally reserves a single bit for their `closed` flag. Thus, the 68 | maximum permissible length of a channel must fit into half a usize, 69 | minus two bits: 70 | 71 | | usize width (bits) | Maximum length (items) | 72 | |--------------------|------------------------| 73 | | 64 | 2^30 (over a billion) | 74 | | 32 | 2^14 (16384) | 75 | | 16 | 2^6 (64) | 76 | 77 | If you try to create a channel longer than this, you will cause a panic. 78 | 79 | ## Safety 80 | 81 | This library consists of low level concurrency and parallelism 82 | primitives. Some things simply could not be done without unsafe, 83 | others would not perform well without it. Thus, we use unsafe code. 84 | 85 | We take our use of unsafe seriously. A lot of care and attention has 86 | gone into the design of this library to ensure that we don't invoke 87 | UB. You are encouraged to audit the code and leave thoughtful feedback or 88 | submit improvements. 89 | 90 | Broadly speaking, we use unsafe for four main things: 91 | 92 | * Pin projection. It isn't optional, we can't use `pin-project-lite` 93 | at present and we won't use `pin-project`. 94 | * UnsafeCell access. Always appropriately synchronised. 95 | * Dealing with uninitialised memory. The alternative would be to wrap 96 | each item in an option, but that would consume more memory for most 97 | message types and be slower. 98 | * Allocating as a single contiguous allocation. This allows us to 99 | reduce 2 allocations to 1. 100 | 101 | ## Performance 102 | 103 | We're optimised for low contention scenarios - we expect there to be many more 104 | channels than threads on average and there can only be two ends for the channel. If 105 | you are in a potentially high contention scenario such as audio, you might want to 106 | look at one of the (non-async) ring buffers. As always, benchmark real code. 107 | 108 | The best performance will be available when we finish the batch APIs (as we can do a 109 | single atomic instead of an atomic per item). We need to figure out the best way of 110 | benchmarking these too. 111 | 112 | Here are some unscientific benchmark numbers for a capacity 1 channel. This is 113 | essentially the worst case scenario for this channel because it pays the overheads 114 | of supporting many items, but I can still compare it to 115 | [async-oneshot](https://github.com/irrustible/async-oneshot)'s performance figures 116 | and be disappointed: 117 | 118 | ``` 119 | contiguous_async_1/create_destroy 74.629 ns 120 | contiguous_async_1/send_now_closed 11.625 ns 121 | contiguous_async_1/send_now_empty 15.487 ns 122 | contiguous_async_1/send_now_full 6.8426 ns 123 | contiguous_async_1/receive_now_closed 3.4865 ns 124 | contiguous_async_1/receive_now_empty 5.6765 ns 125 | contiguous_async_1/receive_now_full 14.775 ns 126 | contiguous_async_1/send_closed 11.833 ns 127 | contiguous_async_1/send_empty 18.273 ns 128 | contiguous_async_1/send_full 24.604 ns 129 | contiguous_async_1/receive_closed 6.7487 ns 130 | contiguous_async_1/receive_empty 14.292 ns 131 | contiguous_async_1/receive_full 16.001 ns 132 | ``` 133 | 134 | ## Copyright and License 135 | 136 | Copyright (c) 2021 James Laver, async-spsc contributors. 137 | 138 | [Licensed](LICENSE) under Apache License, Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0), 139 | with LLVM Exceptions (https://spdx.org/licenses/LLVM-exception.html). 140 | 141 | Unless you explicitly state otherwise, any contribution intentionally submitted 142 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 143 | licensed as above, without any additional terms or conditions. 144 | 145 | -------------------------------------------------------------------------------- /benches/contiguous_async_1.rs: -------------------------------------------------------------------------------- 1 | #![cfg(all(feature="bench",feature="async"))] 2 | use criterion::*; 3 | use async_spsc::*; 4 | use wookie::dummy; 5 | 6 | pub fn create_destroy(c: &mut Criterion) { 7 | c.bench_function( 8 | "contiguous_async_1/create_destroy", 9 | |b| b.iter(|| spsc::(1)) 10 | ); 11 | } 12 | 13 | pub fn send_now_closed(c: &mut Criterion) { 14 | c.bench_function( 15 | "contiguous_async_1/send_now_closed", 16 | |b| b.iter_batched_ref( 17 | || spsc::(1).0, 18 | |ref mut s| s.send(420).now().unwrap_err(), 19 | BatchSize::SmallInput 20 | ) 21 | ); 22 | } 23 | 24 | pub fn send_now_empty(c: &mut Criterion) { 25 | c.bench_function( 26 | "contiguous_async_1/send_now_empty", 27 | |b| b.iter_batched_ref( 28 | || spsc::(1), 29 | |(ref mut s, _)| s.send(420).now().unwrap(), 30 | BatchSize::SmallInput 31 | ) 32 | ); 33 | } 34 | 35 | pub fn send_now_full(c: &mut Criterion) { 36 | c.bench_function( 37 | "contiguous_async_1/send_now_full", 38 | |b| b.iter_batched_ref( 39 | || { 40 | let (mut s, r) = spsc::(1); 41 | s.send(42).now().unwrap(); 42 | (s,r) 43 | }, 44 | |(ref mut s, _)| s.send(420).now().unwrap_err(), 45 | BatchSize::SmallInput 46 | ) 47 | ); 48 | } 49 | 50 | pub fn receive_now_closed(c: &mut Criterion) { 51 | c.bench_function( 52 | "contiguous_async_1/receive_now_closed", 53 | |b| b.iter_batched_ref( 54 | || spsc::(1).1, 55 | |ref mut r| r.receive().now().unwrap_err(), 56 | BatchSize::SmallInput 57 | ) 58 | ); 59 | } 60 | 61 | pub fn receive_now_empty(c: &mut Criterion) { 62 | c.bench_function( 63 | "contiguous_async_1/receive_now_empty", 64 | |b| b.iter_batched_ref( 65 | || spsc::(1), 66 | |(_, ref mut r)| r.receive().now().unwrap(), 67 | BatchSize::SmallInput 68 | ) 69 | ); 70 | } 71 | 72 | pub fn receive_now_full(c: &mut Criterion) { 73 | c.bench_function( 74 | "contiguous_async_1/receive_now_full", 75 | |b| b.iter_batched_ref( 76 | || { 77 | let (mut s, r) = spsc::(1); 78 | s.send(420).now().unwrap(); 79 | (s, r) 80 | }, 81 | |(_, ref mut r)| r.receive().now().unwrap(), 82 | BatchSize::SmallInput 83 | ) 84 | ); 85 | } 86 | 87 | pub fn send_closed(c: &mut Criterion) { 88 | c.bench_function( 89 | "contiguous_async_1/send_closed", 90 | |b| b.iter_batched_ref( 91 | || spsc::(1).0, 92 | |ref mut s| { 93 | dummy!(s2: s.send(420)); 94 | s2.poll() 95 | }, 96 | BatchSize::SmallInput 97 | ) 98 | ); 99 | } 100 | 101 | pub fn send_empty(c: &mut Criterion) { 102 | c.bench_function( 103 | "contiguous_async_1/send_empty", 104 | |b| b.iter_batched_ref( 105 | || spsc::(1), 106 | |(ref mut s, _)| { 107 | dummy!(s2: s.send(420)); 108 | s2.poll() 109 | }, 110 | BatchSize::SmallInput 111 | ) 112 | ); 113 | } 114 | 115 | pub fn send_full(c: &mut Criterion) { 116 | c.bench_function( 117 | "contiguous_async_1/send_full", 118 | |b| b.iter_batched_ref( 119 | || { 120 | let (mut s, r) = spsc::(1); 121 | s.send(42).now().unwrap(); 122 | (s,r) 123 | }, 124 | |(ref mut s, _)| { 125 | dummy!(s2: s.send(420)); 126 | s2.poll() 127 | }, 128 | BatchSize::SmallInput 129 | ) 130 | ); 131 | } 132 | 133 | pub fn receive_closed(c: &mut Criterion) { 134 | c.bench_function( 135 | "contiguous_async_1/receive_closed", 136 | |b| b.iter_batched_ref( 137 | || spsc::(1).1, 138 | |ref mut r| { 139 | dummy!(r2: r.receive()); 140 | r2.poll() 141 | }, 142 | BatchSize::SmallInput 143 | ) 144 | ); 145 | } 146 | 147 | pub fn receive_empty(c: &mut Criterion) { 148 | c.bench_function( 149 | "contiguous_async_1/receive_empty", 150 | |b| b.iter_batched_ref( 151 | || spsc::(1), 152 | |(_, ref mut r)| { 153 | dummy!(r2: r.receive()); 154 | r2.poll() 155 | }, 156 | BatchSize::SmallInput 157 | ) 158 | ); 159 | } 160 | 161 | pub fn receive_full(c: &mut Criterion) { 162 | c.bench_function( 163 | "contiguous_async_1/receive_full", 164 | |b| b.iter_batched_ref( 165 | || { 166 | let (mut s, r) = spsc::(1); 167 | s.send(420).now().unwrap(); 168 | (s, r) 169 | }, 170 | |(_, ref mut r)| { 171 | dummy!(r2: r.receive()); 172 | r2.poll() 173 | }, 174 | BatchSize::SmallInput 175 | ) 176 | ); 177 | } 178 | 179 | 180 | 181 | 182 | criterion_group!( 183 | benches, 184 | create_destroy, 185 | send_now_closed, 186 | send_now_empty, 187 | send_now_full, 188 | receive_now_closed, 189 | receive_now_empty, 190 | receive_now_full, 191 | send_closed, 192 | send_empty, 193 | send_full, 194 | receive_closed, 195 | receive_empty, 196 | receive_full, 197 | ); 198 | criterion_main!(benches); 199 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Fast, easy-to-use, async-aware single-producer/single-consumer 2 | //! (SPSC) channel based around a ringbuffer. 3 | //! 4 | //! # Examples 5 | //! 6 | //! ``` 7 | //! use async_spsc::spsc; 8 | //! 9 | //! async fn async_example() { 10 | //! let (mut sender, mut receiver) = spsc::(2); 11 | //! assert!(sender.send(42).await.is_ok()); 12 | //! assert!(sender.send(420).await.is_ok()); 13 | //! assert_eq!(receiver.receive().await, Ok(42)); 14 | //! assert_eq!(receiver.receive().await, Ok(420)); 15 | //! assert!(sender.send(7).await.is_ok()); 16 | //! assert_eq!(receiver.receive().await, Ok(7)); 17 | //! } 18 | //! 19 | //! fn sync_example() { 20 | //! let (mut sender, mut receiver) = spsc::(2); 21 | //! assert!(sender.send(42).now().is_ok()); 22 | //! assert!(sender.send(420).now().is_ok()); 23 | //! assert!(sender.send(7).now().is_err()); // no space! 24 | //! 25 | //! assert_eq!(receiver.receive().now(), Ok(Some(42))); 26 | //! assert_eq!(receiver.receive().now(), Ok(Some(420))); 27 | //! assert!(receiver.receive().now().is_err()); // no message! 28 | //! 29 | //! assert!(sender.send(7).now().is_err()); 30 | //! assert_eq!(receiver.receive().now(), Ok(Some(7))); 31 | //! } 32 | //! ``` 33 | #![no_std] 34 | 35 | #[cfg(feature="alloc")] 36 | extern crate alloc; 37 | 38 | use core::marker::PhantomData; 39 | use core::mem::MaybeUninit; 40 | use core::ptr::{NonNull, drop_in_place}; 41 | use core::sync::atomic::{AtomicUsize, Ordering}; 42 | #[cfg(feature="async")] 43 | use core::{future::Future, pin::Pin, task::{Context, Poll}}; 44 | 45 | #[cfg(feature="async")] 46 | use atomic_waker::AtomicWaker; 47 | 48 | #[cfg(feature="alloc")] 49 | use pages::*; 50 | 51 | mod state; 52 | use state::*; 53 | pub mod sender; 54 | pub use sender::*; 55 | pub mod receiver; 56 | pub use receiver::*; 57 | 58 | // Sender/Receiver operation-local flags 59 | const WAITING: u8 = 1; 60 | 61 | #[derive(Debug)] 62 | enum Holder<'a, 'b, T> { 63 | /// A pointer we do not own and will not attempt to free. 64 | BorrowedPtr(NonNull>, PhantomData<&'a ()>), 65 | /// A pointer to a page we manage. This is an owned object we are 66 | /// abusing, so we need to suppress its destructor and manually 67 | /// drop it only when both sides are done. 68 | #[cfg(feature="alloc")] 69 | Page(PageRef), 70 | // // A pointer produced from [`Box::leak`] that's potentially 71 | // // shared with other holders. 72 | // #[cfg(feature="alloc")] 73 | // SharedBoxPtr(NonNull>), 74 | } 75 | 76 | impl<'a, 'b, T> Clone for Holder<'a, 'b, T> { 77 | fn clone(&self) -> Self { 78 | match self { 79 | Holder::BorrowedPtr(r, p) => Holder::BorrowedPtr(*r, *p), 80 | #[cfg(feature="alloc")] 81 | Holder::Page(r) => Holder::Page(*r), 82 | // #[cfg(feature="alloc")] 83 | // Holder::SharedBoxPtr(r) => Holder::SharedBoxPtr(*r), 84 | } 85 | } 86 | } 87 | 88 | impl<'a, 'b, T> Copy for Holder<'a, 'b, T> {} 89 | 90 | impl<'a, 'b, T> Holder<'a, 'b, T> { 91 | 92 | #[inline(always)] 93 | fn atomics(&self) -> *const Atomics { 94 | match self { 95 | Holder::BorrowedPtr(r, _) => &unsafe { r.as_ref() }.atomics, 96 | Holder::Page(p) => unsafe { p.header() } 97 | } 98 | } 99 | 100 | #[inline(always)] 101 | fn data(&mut self) -> *mut MaybeUninit { 102 | match self { 103 | Holder::BorrowedPtr(r, _) => unsafe { r.as_ref() }.data(), 104 | Holder::Page(p) => unsafe { p.data() }, 105 | } 106 | } 107 | 108 | // Safe only if we are the last referent to the spsc. 109 | unsafe fn cleanup(self, capacity: Half, state: State) { 110 | // whatever we are, we are going to drop the inflight items 111 | // and the wakers if there are any. 112 | match self { 113 | Holder::BorrowedPtr(ptr, _) => 114 | ptr.as_ref().cleanup(capacity, state), 115 | #[cfg(feature="alloc")] 116 | Holder::Page(c) => { 117 | drop_in_flight(c.data(), capacity, state); 118 | PageRef::drop(c); 119 | } 120 | } 121 | } 122 | 123 | // // Safe only if we are the last active referent 124 | // pub(crate) unsafe fn recycle(self) { 125 | // (*self.inner.get()).reset(); 126 | // self.flags.store(0, orderings::STORE); 127 | // } 128 | } 129 | 130 | fn drop_in_flight(items: *mut MaybeUninit, capacity: Half, state: State) { 131 | // TODO: probably not optimal 132 | let front = state.front().position(); 133 | let mut back = state.back(); 134 | loop { 135 | let b = back.position(); 136 | if front == b { break; } 137 | let index = (b % capacity) as usize; 138 | unsafe { drop_in_place(items.add(index)); } 139 | back = back.advance(capacity, 1); 140 | } 141 | } 142 | 143 | /// Creates a new heap-backed [`Spsc`] that can store up to `capacity` 144 | /// in-flight messages at a time. 145 | pub fn spsc(capacity: Half) -> (Sender<'static, 'static, T>, Receiver<'static, 'static, T>) { 146 | // First we must check we can handle this capacity. 147 | assert!(capacity > 0); 148 | assert!(capacity <= MAX_CAPACITY); 149 | let page = PageRef::new(Atomics::default(), capacity); 150 | let holder = Holder::Page(page); 151 | (Sender::new(holder, State(0), capacity), Receiver::new(holder, State(0), capacity)) 152 | } 153 | 154 | #[derive(Debug)] 155 | pub struct Spsc<'a, T> { 156 | atomics: Atomics, 157 | ptr: NonNull>, 158 | capacity: Half, 159 | _phantom: PhantomData<&'a T> 160 | } 161 | 162 | impl<'a, T> Spsc<'a, T> { 163 | fn cleanup(&self, capacity: Half, state: State) { 164 | // Safe because we have exclusive access 165 | drop_in_flight(self.data(), capacity, state); 166 | // Avoid a potential memory leak. 167 | self.atomics.drop_wakers(); 168 | } 169 | 170 | fn data(&self) -> *mut MaybeUninit { self.ptr.as_ptr() } 171 | } 172 | 173 | impl<'a, T> Spsc<'a, T> { 174 | // /// ## Safety 175 | // /// 176 | // /// * ptr must point to a len-sized array of appropriately aligned 177 | // /// and padded T which should already be initialised. 178 | // /// 179 | // /// Note: will panic if length is 0 or greater than can be 180 | // /// represented in two bits less than half a usize. 181 | // pub unsafe fn from_nonnull_len(ptr: NonNull>, len: Half) -> Self { 182 | // assert!(len > 0, "the spsc buffer must have a non-zero length"); 183 | // assert!(len <= MAX_CAPACITY, "the spsc buffer must have a length representable in two bits less than half a usize") 184 | // Spsc(); 185 | // Self::make(Slice::BorrowedPtrLen(ptr, len)) 186 | // } 187 | 188 | // // /// ## Safety 189 | // // /// 190 | // // /// * len must not be zero 191 | // // /// * ptr must point to a len-sized array of appropriately aligned 192 | // // /// and padded T which should already be initialised. 193 | // // /// 194 | // // /// Note: will panic if length is 0 or greater than can be 195 | // // /// represented in two bits less than half a usize. 196 | // // pub unsafe fn from_raw_parts(ptr: *mut MaybeUninit, len: Half) -> Self { 197 | // // assert!(len > 0, "the spsc buffer must have a non-zero length"); 198 | // // assert!(len <= MAX_CAPACITY, "the spsc buffer must have a length representable in two bits less than half a usize"); 199 | // // Self::make(Slice::BorrowedPtrLen(NonNull::new_unchecked(ptr), len)) 200 | // // } 201 | 202 | } 203 | 204 | #[derive(Debug,Default)] 205 | pub struct Atomics { 206 | state: AtomicUsize, 207 | #[cfg(feature="async")] 208 | sender: AtomicWaker, 209 | #[cfg(feature="async")] 210 | receiver: AtomicWaker, 211 | } 212 | 213 | impl Atomics { 214 | fn drop_wakers(&self) { 215 | let _s = self.sender.take(); 216 | let _r = self.receiver.take(); 217 | } 218 | } 219 | 220 | #[derive(Debug,Eq,Hash,PartialEq)] 221 | pub struct SendError { 222 | pub kind: SendErrorKind, 223 | pub value: T, 224 | } 225 | #[derive(Debug,Eq,Hash,PartialEq)] 226 | pub enum SendErrorKind { 227 | Closed, 228 | Full, 229 | } 230 | 231 | #[derive(Debug,Eq,Hash,PartialEq)] 232 | pub struct Closed; 233 | -------------------------------------------------------------------------------- /src/receiver.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use core::cell::Cell; 3 | 4 | // #[cfg(feature="stream")] 5 | // use futures_core::stream::Stream; 6 | 7 | pub struct Receiver<'a, 'b, T> { 8 | spsc: Option>, 9 | state: Cell, 10 | cap: Half, 11 | } 12 | 13 | impl<'a, 'b, T> Receiver<'a, 'b, T> { 14 | 15 | pub(super) fn new(spsc: Holder<'a, 'b, T>, state: State, cap: Half) -> Self { 16 | Receiver { spsc: Some(spsc), state: Cell::new(state), cap } 17 | } 18 | 19 | fn refresh_state(&mut self) -> State { 20 | let atomics = self.spsc.as_mut().unwrap().atomics(); 21 | let state = State(atomics.state.load(Ordering::Acquire)); 22 | self.state.set(state); 23 | state 24 | } 25 | 26 | fn update_state(&mut self, mask: Half) -> State { 27 | let atomics = self.spsc.as_mut().unwrap().atomics(); 28 | let mask = (mask as usize) << BITS; 29 | let state = State(atomics.state.fetch_xor(mask, Ordering::Acquire) ^ mask); 30 | self.state.set(state); 31 | state 32 | } 33 | 34 | /// Returns a disposable object which can receive a single message 35 | /// either synchronously via [`Receiving::now`] or asynchronously 36 | /// via the [`core::future::Future`] instance. 37 | pub fn receive<'c>(&'c mut self) -> Receiving<'a, 'b, 'c, T> { 38 | Receiving { receiver: Some(self) } 39 | } 40 | } 41 | 42 | 43 | 44 | unsafe impl<'a, 'b, T: Send> Send for Receiver<'a, 'b, T> {} 45 | unsafe impl<'a, 'b, T: Send> Sync for Receiver<'a, 'b, T> {} 46 | 47 | impl<'a, 'b, T> Drop for Receiver<'a, 'b, T> { 48 | fn drop(&mut self) { 49 | if let Some(spsc) = self.spsc.take() { 50 | // If we already know they've closed, clean up. 51 | let state = self.state.get(); 52 | if state.is_closed() { 53 | unsafe { spsc.cleanup(self.cap, state); } 54 | return; 55 | } 56 | // Mark ourselves closed 57 | let atomics = spsc.atomics(); 58 | let state2 = State(atomics.state.fetch_xor(R_CLOSE, Ordering::AcqRel)); 59 | if state2.is_closed() { 60 | // We were beaten to it. 61 | unsafe { spsc.cleanup(self.cap, state); } 62 | } else { 63 | // We should wake them 64 | atomics.sender.wake(); 65 | } 66 | } 67 | } 68 | } 69 | 70 | /// A single Receive operation that can be performed synchronously 71 | /// (with [`Receiving::now`]) or asynchronously (with the 72 | /// [`core::future::Future`] instance). 73 | pub struct Receiving<'a, 'b, 'c, T> { 74 | receiver: Option<&'c mut Receiver<'a, 'b, T>>, 75 | } 76 | 77 | impl<'a, 'b, 'c, T> Receiving<'a, 'b, 'c, T> { 78 | pub fn now(mut self) -> Result, Closed> { 79 | // Take our receiver, since we can't be called again. 80 | let receiver = self.receiver.take().unwrap(); 81 | if let Some(spsc) = receiver.spsc.as_mut() { 82 | let cap = receiver.cap; 83 | // We are going to first check our local cached state. If 84 | // it tells us there is space, we don't need to 85 | // synchronise to receive! 86 | let mut state = receiver.state.get(); 87 | // The Receiver is slightly different logic to the Sender 88 | // since if there are still messages in flight, we can 89 | // receive them even if the Sender closed. Thus if we hit 90 | // a close, having already taken our local receiver, 91 | // there's nothing to do in terms of cleanup. 92 | if state.is_empty() { 93 | if state.is_closed() { return Err(Closed); } 94 | // Hard luck, time to synchronise (and recheck) 95 | state = State(spsc.atomics().state.load(Ordering::Acquire)); 96 | receiver.state.set(state); 97 | if state.is_empty() { 98 | if state.is_closed() { return Err(Closed); } 99 | return Ok(None); 100 | } 101 | } 102 | // Still here? Fabulous, we have a message waiting for us. 103 | let back = state.back(); 104 | // This mouthful takes the value, leaving the slot uninitialised 105 | let value = unsafe { spsc.data().add(back.index(cap)).read().assume_init() }; 106 | // Now inform the Sender they can have this slot back. 107 | let b = back.advance(receiver.cap, 1); 108 | let mask = ((back.0 ^ b.0) as usize) << BITS; 109 | let atomics = spsc.atomics(); 110 | let state = State(atomics.state.fetch_xor(mask, Ordering::Acquire) ^ mask); 111 | receiver.state.set(state); 112 | // Now we attempt to wake the Sender if they are not 113 | // closed. There will probably be nothing here. 114 | #[cfg(feature="async")] 115 | if !state.is_closed() { spsc.atomics().sender.wake(); } 116 | return Ok(Some(value)); 117 | } 118 | Err(Closed) 119 | } 120 | } 121 | 122 | #[cfg(feature="async")] 123 | impl<'a, 'b, 'c, T> Future for Receiving<'a, 'b, 'c, T> { 124 | type Output = Result; 125 | fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { 126 | let this = unsafe { Pin::get_unchecked_mut(self) }; 127 | let receiver = this.receiver.take().unwrap(); 128 | if let Some(spsc) = receiver.spsc.as_mut() { 129 | let cap = receiver.cap; 130 | let mut state = receiver.state.get(); 131 | // Try to find a message without hitting the atomic. 132 | if state.is_empty() { 133 | // If we're closed, we don't need to synchronise again. 134 | if state.is_closed() { return Poll::Ready(Err(Closed)); } 135 | // No? let's refresh the state then and check again 136 | state = State(spsc.atomics().state.load(Ordering::Acquire)); 137 | receiver.state.set(state); 138 | if state.is_empty() { 139 | if state.is_closed() { return Poll::Ready(Err(Closed)); } 140 | // Go into hibernation 141 | spsc.atomics().receiver.register(ctx.waker()); 142 | this.receiver.replace(receiver); 143 | return Poll::Pending; 144 | } 145 | } 146 | // Good news, we can receive a value. 147 | let back = state.back(); 148 | let value = unsafe { spsc.data().add(back.index(cap)).read().assume_init() }; 149 | // Now inform the other side we're done reading. 150 | let b = back.advance(cap, 1); 151 | let mask = ((back.0 ^ b.0) as usize) << BITS; 152 | let atomics = spsc.atomics(); 153 | let state = State(atomics.state.fetch_xor(mask, Ordering::Acquire) ^ mask); 154 | receiver.state.set(state); 155 | // Now we attempt to wake the Sender if they are not 156 | // closed. There will probably be nothing here. 157 | if !state.is_closed() { atomics.sender.wake(); } 158 | return Poll::Ready(Ok(value)); 159 | } 160 | Poll::Ready(Err(Closed)) 161 | } 162 | } 163 | 164 | // pub struct Batch<'a, 'b, 'c, T> { 165 | // receiver: Option<&'a mut Receiver<'b, 'c, T>>, 166 | // state: State, 167 | // } 168 | 169 | // // impl<'a, 'b, 'c, T> Future for Batch<'a, 'b, 'c, T> { 170 | // // type Output = Result; 171 | // // fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { 172 | // // let this = unsafe { Pin::get_unchecked_mut(self) }; 173 | // // if let Some(sender) = self.sender.take() { 174 | // // if self.state.is_empty() { 175 | // // if state.is_closed() { return Poll::Ready(Err(Closed)); } 176 | // // if self.state == sender.state { 177 | // // } 178 | // // } 179 | // // } 180 | // // } 181 | // // } 182 | 183 | // impl<'a, 'b, 'c, T> Iterator for Batch<'a, 'b, 'c, T> { 184 | // type Item = T; 185 | // fn next(&mut self) -> Option { 186 | // if let Some(receiver) = self.receiver.as_mut() { 187 | // if let Some(spsc) = receiver.spsc { 188 | // // The batch only fetches items that are known to be 189 | // // available for reading already. 190 | // let state = self.state; 191 | // if state.is_empty() { return None; } 192 | // // Still here? Fabulous, we have a message waiting for us. 193 | // let back = state.back(); 194 | // let value = unsafe { 195 | // (&mut *spsc.buffer.get())[back.position()].as_mut_ptr().read() 196 | // }; 197 | // // Update our local version of the state. 198 | // self.state = state.with_back(back.advance(receiver.cap, 1)); 199 | // return Some(value); 200 | // } 201 | // } 202 | // None 203 | // } 204 | // } 205 | 206 | // impl<'a, 'b, 'c, T> Drop for Batch<'a, 'b, 'c, T> { 207 | // fn drop(&mut self) { 208 | // if let Some(receiver) = self.receiver.take() { 209 | // if receiver.spsc.is_some() { 210 | // // We need to update the Receiver's state cache if the other side isn't closed 211 | // if self.state != receiver.state { 212 | // if self.state.is_closed() { 213 | // // no point updating the atomic, just update the receiver 214 | // receiver.state = self.state; 215 | // } else { 216 | // // apply our changes to the atomic and the receiver. 217 | // let mask = receiver.state.front().0 ^ self.state.front().0; 218 | // receiver.update_state(mask); 219 | // } 220 | // } 221 | // } 222 | // } 223 | // } 224 | // } 225 | -------------------------------------------------------------------------------- /src/sender.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use core::cell::Cell; 3 | 4 | pub struct Sender<'a, 'b, T> { 5 | spsc: Option>, 6 | state: Cell, 7 | cap: Half, 8 | } 9 | 10 | impl<'a, 'b, T> Sender<'a, 'b, T> { 11 | 12 | pub(super) fn new(spsc: Holder<'a, 'b, T>, state: State, cap: Half) -> Self { 13 | Sender { spsc: Some(spsc), state: Cell::new(state), cap } 14 | } 15 | 16 | /// Indicates how many send slots are known to be available. 17 | /// 18 | /// Note: this checks our local cache of the state, so the true 19 | /// figure may be greater. We will find out when we next send. 20 | pub fn space(&self) -> Half { self.state.get().space(self.cap) } 21 | 22 | /// Indicates whether we believe there to be no space left to send. 23 | /// 24 | /// Note: this checks our local cache of the state, so the true 25 | /// figure may be greater. We will find out when we next send. 26 | pub fn is_full(&self) -> bool { self.state.get().is_full(self.cap) } 27 | 28 | /// Indicates whether the channel is empty. 29 | pub fn is_empty(&self) -> bool { self.state.get().is_full(self.cap) } 30 | 31 | /// Indicates the capacity of the channel, the maximum number of 32 | /// messages that can be in flight at a time. 33 | pub fn capacity(&self) -> Half { self.cap } 34 | 35 | pub fn send<'c>(&'c mut self, value: T) -> Sending<'c, 'a, 'b, T> { 36 | Sending { sender: Some(self),value: Some(value), flags: 0 } 37 | } 38 | 39 | // pub fn batch<'c>(&'c mut self) -> Batch<'c, 'a, 'b, T> { 40 | // let state = self.state; 41 | // Batch { sender: Some(self), state } 42 | // } 43 | 44 | } 45 | impl<'a, 'b, T> Drop for Sender<'a, 'b, T> { 46 | fn drop(&mut self) { 47 | if let Some(spsc) = self.spsc.take() { 48 | let state = self.state.get(); 49 | if state.is_closed() { 50 | unsafe { spsc.cleanup(self.cap, state); } 51 | return; 52 | } 53 | let atomics = unsafe { &*spsc.atomics() }; 54 | let state = State(atomics.state.fetch_xor(S_CLOSE, Ordering::AcqRel)); 55 | if state.is_closed() { 56 | unsafe { spsc.cleanup(self.cap, state); } 57 | } else { 58 | atomics.receiver.wake(); 59 | } 60 | } 61 | } 62 | } 63 | 64 | unsafe impl<'a, 'b, T: Send> Send for Sender<'a, 'b, T> {} 65 | unsafe impl<'a, 'b, T: Send> Sync for Sender<'a, 'b, T> {} 66 | 67 | /// Sends a single message. 68 | pub struct Sending<'a, 'b, 'c, T> { 69 | sender: Option<&'a mut Sender<'b, 'c, T>>, 70 | value: Option, 71 | flags: u8, 72 | } 73 | 74 | fn closed(value: T) -> Result<(), SendError> { 75 | Err(SendError { kind: SendErrorKind::Closed, value }) 76 | } 77 | 78 | fn full(value: T) -> Result<(), SendError> { 79 | Err(SendError { kind: SendErrorKind::Full, value }) 80 | } 81 | 82 | impl<'a, 'b, 'c, T> Sending<'a, 'b, 'c, T> { 83 | pub fn now(mut self) -> Result<(), SendError> { 84 | let sender = self.sender.take().unwrap(); 85 | let value = self.value.take().unwrap(); 86 | if let Some(spsc) = sender.spsc.as_mut() { 87 | let cap = sender.cap; 88 | let mut state = sender.state.get(); 89 | // We do nothing if we're closed. 90 | if state.is_closed() { return closed(value); } 91 | if state.is_full(cap) { 92 | // The Receiver may have cleared space since the cache 93 | // was last updated; refresh and recheck. 94 | state = State(unsafe { &*spsc.atomics() }.state.load(Ordering::Acquire)); 95 | sender.state.set(state); 96 | if state.is_closed() { return closed(value); } 97 | if state.is_full(cap) { return full(value); } 98 | } 99 | // Still here? Cool, we can write the value now. 100 | let s = state.front(); 101 | unsafe { spsc.data().add(s.index(cap)).write(MaybeUninit::new(value)) }; 102 | // Update the atomic with our advance. 103 | let mask = (s.0 ^ s.advance(cap, 1).0) as usize; 104 | let atomics = unsafe { &* spsc.atomics() }; 105 | let state2 = State(atomics.state.fetch_xor(mask, Ordering::Acquire) ^ mask); 106 | sender.state.set(state2); 107 | if state2.is_closed() { 108 | // Oh. Well we need our item back for the SendError. 109 | let value = unsafe { spsc.data().add(s.index(cap)).read().assume_init() }; 110 | // We already committed our advance. To avoid double 111 | // freeing, we have to wind back the sender's local 112 | // cache of the state in lieu of an atomic op. 113 | sender.state.set(State((state.0 & BACK) | s.0 as usize)); 114 | return closed(value); 115 | } 116 | // Before we go, let the receiver know there's a message. 117 | #[cfg(feature="async")] 118 | atomics.receiver.wake(); 119 | return Ok(()); 120 | } 121 | closed(value) 122 | } 123 | } 124 | 125 | #[cfg(feature="async")] 126 | impl<'a, 'b, 'c, T> Future for Sending<'a, 'b, 'c, T> { 127 | type Output = Result<(), SendError>; 128 | fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { 129 | let this = unsafe { Pin::get_unchecked_mut(self) }; 130 | let sender = this.sender.take().unwrap(); 131 | let value = this.value.take().unwrap(); 132 | if let Some(spsc) = sender.spsc.as_mut() { 133 | let cap = sender.cap; 134 | let mut state = sender.state.get(); 135 | // First we must check we're not closed. 136 | if state.is_closed() { return Poll::Ready(closed(value)); } 137 | // Try to find space without hitting the atomic. 138 | if state.is_full(cap) { 139 | let atomics = unsafe { &*spsc.atomics() }; 140 | state = State(atomics.state.load(Ordering::Acquire)); 141 | sender.state.set(state); 142 | // We have to check again because of that refresh. 143 | if state.is_closed() { return Poll::Ready(closed(value)); } 144 | if state.is_full(cap) { 145 | // We'll have to wait. 146 | this.flags |= WAITING; 147 | atomics.sender.register(ctx.waker()); 148 | // We'll also have to put ourselves back. 149 | this.sender.replace(sender); 150 | this.value.replace(value); 151 | return Poll::Pending 152 | } 153 | } 154 | // Still here? Cool, we can write the value now. 155 | let s = state.front(); 156 | unsafe { spsc.data().add(s.index(cap)).write(MaybeUninit::new(value)) }; 157 | // Update the atomic with our advance. 158 | let mask = (s.0 ^ s.advance(cap, 1).0) as usize; 159 | let atomics = unsafe { &*spsc.atomics() }; 160 | let state2 = State(atomics.state.fetch_xor(mask, Ordering::Acquire) ^ mask); 161 | sender.state.set(state2); 162 | if state2.is_closed() { 163 | // Oh. Well we need our item back for the SendError. 164 | let value = unsafe { spsc.data().add(s.index(cap)).read().assume_init() }; 165 | // We already committed our advance. To avoid double 166 | // freeing, we have to wind back the sender's local 167 | // cache of the state in lieu of an atomic op. 168 | sender.state.set(State((state.0 & BACK) | s.0 as usize)); 169 | return Poll::Ready(closed(value)); 170 | } 171 | // Before we go, let the receiver know there's a message. 172 | atomics.receiver.wake(); 173 | return Poll::Ready(Ok(())); 174 | } 175 | Poll::Ready(closed(value)) 176 | } 177 | } 178 | 179 | impl<'a, 'b, 'c, T> Drop for Sending<'a, 'b, 'c, T> { 180 | fn drop(&mut self) { 181 | if let Some(sender) = self.sender.take() { 182 | if (self.flags & WAITING) != 0 { 183 | // We left a waker we should probably clear up 184 | sender.spsc.as_mut().map(|r| unsafe { &*r.atomics() }.sender.take()); 185 | } 186 | } 187 | } 188 | } 189 | 190 | // /// 191 | // pub struct Batch<'a, 'b, 'c, T> { 192 | // sender: Option<&'a mut Sender<'b, 'c, T>>, 193 | // state: State, 194 | // } 195 | 196 | // impl<'a, 'b, 'c, T> Batch<'a, 'b, 'c, T> { 197 | // pub fn space(&self) -> Half { 198 | // let sender = self.sender.as_ref().unwrap(); 199 | // self.state.space(sender.cap) 200 | // } 201 | // // pub fn push(&mut self, value: T) -> Result<(), SendError> { 202 | // // Ok(()) 203 | // // } 204 | // } 205 | 206 | // #[cfg(feature="async")] 207 | // impl<'a, 'b, 'c, T> Future for Batch<'a, 'b, 'c, T> { 208 | // type Output = Result<(), SendError<()>>; 209 | // fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { 210 | // todo!() 211 | // } 212 | // } 213 | 214 | // impl<'a, 'b, 'c, T> Drop for Batch<'a, 'b, 'c, T> { 215 | // fn drop(&mut self) { 216 | // if let Some(sender) = self.sender.take() { 217 | // let front = self.state.front(); 218 | // let sender_front = sender.state.front(); 219 | // // Check if we have to do anything. 220 | // if front != sender_front { 221 | // if let Some(spsc) = sender.spsc { 222 | // // We have some uncommitted changes, we should propagate them. 223 | // let mask = (front.0 ^ sender_front.0) as usize; 224 | // let state = State(spsc.atomics.state.fetch_xor(mask, Ordering::AcqRel)); 225 | // if !state.is_closed() { 226 | // // propagate the updated state to the sender. 227 | // sender.state = State(state.0 ^ mask); 228 | // } 229 | // } 230 | // } 231 | // } 232 | // } 233 | // } 234 | 235 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | use core::convert::TryInto; 2 | 3 | #[cfg(target_pointer_width="64")] pub type Half = u32; 4 | #[cfg(target_pointer_width="32")] pub type Half = u16; 5 | #[cfg(target_pointer_width="16")] pub type Half = u8; 6 | 7 | #[cfg(target_pointer_width="64")] pub const BITS: usize = 32; 8 | #[cfg(target_pointer_width="32")] pub const BITS: usize = 16; 9 | #[cfg(target_pointer_width="16")] pub const BITS: usize = 8; 10 | 11 | pub const HIGH_BIT: Half = 1 << (BITS - 1); 12 | pub const S_CLOSE: usize = HIGH_BIT as usize; 13 | pub const R_CLOSE: usize = S_CLOSE << BITS; 14 | pub const ANY_CLOSE: usize = S_CLOSE | R_CLOSE; 15 | pub const FRONT: usize = Half::MAX as usize; 16 | pub const BACK: usize = !FRONT; 17 | pub const MAX_CAPACITY: Half = (HIGH_BIT >> 1) - 1; 18 | 19 | #[derive(Copy,Clone,Debug,Eq,PartialEq)] 20 | pub struct HalfState(pub Half); 21 | 22 | impl HalfState { 23 | 24 | #[inline(always)] 25 | pub fn position(self) -> Half { self.0 & !(1 << (BITS - 1)) } 26 | 27 | pub fn index(self, capacity: Half) -> usize { (self.0 % capacity) as usize } 28 | 29 | #[inline(always)] 30 | pub fn is_closed(self) -> bool { (self.0 & HIGH_BIT) != 0 } 31 | 32 | #[inline(always)] 33 | pub fn advance(self, cap: Half, by: Half) -> Self { 34 | HalfState((self.0 + by) % (2 * cap)) 35 | } 36 | 37 | #[inline(always)] 38 | pub fn close(self) -> Self { HalfState(self.0 | HIGH_BIT) } 39 | } 40 | 41 | /// The state is divided into two halves: front (updated by Sender) 42 | /// and back (updated by Receiver). Each half comprises an integer 43 | /// index into the slice and a 'closed' flag. 44 | /// 45 | /// Ring buffers have a small complication in general: how to 46 | /// distinguish full and empty. There are a few approaches the 47 | /// internet knows about: 48 | /// 49 | /// 1. Wrap at capacity when incrementing and waste a slot. Naive, 50 | /// simple, no thanks. 51 | /// 2. Don't wrap when incrementing and wrap at capacity when 52 | /// indexing. This allows use of the full integer to represent how 53 | /// far ahead of the back the front is, though we only need one 54 | /// extra value. A variation of this does not do overflow checking 55 | /// but requires the buffer size to be a power of two. 56 | /// 3. Wrap at twice capacity when incrementing and at capacity when 57 | /// indexing. This allows us to get away with only stealing one 58 | /// bit. This is very handy for us since we're trying to use as 59 | /// little space as possible so we can pack them into a single 60 | /// atomic. This is the approach we chose. 61 | #[derive(Copy,Clone,Debug,Eq,PartialEq)] 62 | pub struct State(pub(crate) usize); 63 | 64 | impl State { 65 | #[inline(always)] 66 | pub fn front(self) -> HalfState { 67 | HalfState((self.0 & FRONT).try_into().unwrap()) 68 | } 69 | 70 | #[inline(always)] 71 | pub fn back(self) -> HalfState { 72 | HalfState((self.0 >> BITS).try_into().unwrap()) 73 | } 74 | 75 | #[inline(always)] 76 | pub fn with_front(self, front: HalfState) -> State { 77 | State((self.0 & BACK) | front.0 as usize) 78 | } 79 | 80 | #[inline(always)] 81 | pub fn with_back(self, back: HalfState) -> State { 82 | State((self.0 & FRONT) | ((back.0 as usize) << BITS) ) 83 | } 84 | 85 | #[inline(always)] 86 | pub fn is_closed(self) -> bool { (self.0 & ANY_CLOSE) != 0 } 87 | 88 | #[inline(always)] 89 | pub fn is_full(self, cap: Half) -> bool { self.len(cap) == cap } 90 | 91 | #[inline(always)] 92 | pub fn is_empty(self) -> bool { self.front().position() == self.back().position() } 93 | 94 | /// The number of slots available for writing. 95 | #[inline(always)] 96 | pub fn space(self, cap: Half) -> Half { cap - self.len(cap)} 97 | /// The number of slots available for reading. 98 | 99 | #[inline(always)] 100 | pub fn len(self, cap: Half) -> Half { 101 | let f = self.front().position(); 102 | let b = self.back().position(); 103 | if f >= b { 104 | f - b 105 | } else { 106 | 2 * cap - b + f 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /tests/ring_contiguous.rs: -------------------------------------------------------------------------------- 1 | use async_spsc::*; 2 | use wookie::*; 3 | use core::task::*; 4 | 5 | // these helpers make the tests more readable 6 | 7 | fn closed(value: T) -> Result<(), SendError> { 8 | Err(SendError { kind: SendErrorKind::Closed, value}) 9 | } 10 | fn full(value: T) -> Result<(), SendError> { 11 | Err(SendError { kind: SendErrorKind::Full, value}) 12 | } 13 | 14 | // assert the number of times the waker for the executor has been 15 | // cloned, dropped, woken. 16 | macro_rules! cdw { 17 | ($pin:ident : $c:literal , $d:literal , $w:literal) => { 18 | assert_eq!($c, $pin.cloned()); 19 | assert_eq!($d, $pin.dropped()); 20 | assert_eq!($w, $pin.woken()); 21 | } 22 | } 23 | 24 | #[test] 25 | fn create_destroy() { 26 | spsc::(1); 27 | } 28 | 29 | #[test] 30 | fn ping_pong_sync_sync() { 31 | let (mut s, mut r) = spsc::(1); 32 | for _ in 0..10 { 33 | assert_eq!(Ok(None), r.receive().now()); 34 | assert_eq!(Ok(()), s.send(42).now()); 35 | assert_eq!(full(420), s.send(420).now()); 36 | assert_eq!(Ok(Some(42)), r.receive().now()); 37 | assert_eq!(Ok(None), r.receive().now()); 38 | assert_eq!(Ok(()), s.send(420).now()); 39 | assert_eq!(Ok(Some(420)), r.receive().now()); 40 | } 41 | } 42 | 43 | #[test] 44 | fn ping_pong_async_async() { 45 | unsafe { 46 | let (mut s, mut r) = spsc::(1); 47 | for _ in 0..10 { 48 | { 49 | wookie!(r2: r.receive()); 50 | assert_eq!(Poll::Pending, r2.poll()); 51 | r2.stats().assert(1, 0, 0); 52 | assert_eq!(Poll::Pending, r2.poll()); 53 | r2.stats().assert(2, 1, 0); 54 | { 55 | wookie!(s2: s.send(42)); 56 | assert_eq!(Poll::Ready(Ok(())), s2.poll()); 57 | s2.stats().assert(0, 0, 0); 58 | } 59 | r2.stats().assert(2, 2, 1); 60 | wookie!(s2: s.send(420)); 61 | assert_eq!(Poll::Pending, s2.poll()); 62 | r2.stats().assert(2, 2, 1); 63 | s2.stats().assert(1, 0, 0); 64 | assert_eq!(Poll::Ready(Ok(42)), r2.poll()); 65 | r2.stats().assert(2, 2, 1); 66 | s2.stats().assert(1, 1, 1); 67 | assert_eq!(Poll::Ready(Ok(())), s2.poll()); 68 | s2.stats().assert(1, 1, 1); 69 | } 70 | wookie!(r2: r.receive()); 71 | assert_eq!(Poll::Ready(Ok(420)), r2.poll()); 72 | r2.stats().assert(0, 0, 0) 73 | } 74 | } 75 | } 76 | 77 | #[test] 78 | fn ping_pong_sync_async() { 79 | unsafe { 80 | let (mut s, mut r) = spsc::(1); 81 | for _ in 0..10 { 82 | { 83 | wookie!(r2: r.receive()); 84 | assert_eq!(Poll::Pending, r2.poll()); 85 | r2.stats().assert(1, 0, 0); 86 | assert_eq!(Poll::Pending, r2.poll()); 87 | r2.stats().assert(2, 1, 0); 88 | assert_eq!(Ok(()), s.send(42).now()); 89 | r2.stats().assert(2, 2, 1); 90 | assert_eq!(full(420), s.send(420).now()); 91 | assert_eq!(Poll::Ready(Ok(42)), r2.poll()); 92 | } 93 | assert_eq!(Ok(()), s.send(420).now()); 94 | wookie!(r2: r.receive()); 95 | assert_eq!(Poll::Ready(Ok(420)), r2.poll()); 96 | r2.stats().assert(0, 0, 0); 97 | } 98 | } 99 | } 100 | 101 | #[test] 102 | fn ping_pong_async_sync() { 103 | unsafe { 104 | let (mut s, mut r) = spsc::(1); 105 | for _ in 0..10 { 106 | assert_eq!(Ok(None), r.receive().now()); 107 | { 108 | wookie!(s2: s.send(42)); 109 | assert_eq!(Poll::Ready(Ok(())), s2.poll()); 110 | s2.stats().assert(0, 0, 0); 111 | } 112 | { 113 | wookie!(s2: s.send(420)); 114 | assert_eq!(Poll::Pending, s2.poll()); 115 | s2.stats().assert(1, 0, 0); 116 | assert_eq!(Ok(Some(42)), r.receive().now()); 117 | s2.stats().assert(1, 1, 1); 118 | assert_eq!(Poll::Ready(Ok(())), s2.poll()); 119 | s2.stats().assert(1, 1, 1); 120 | assert_eq!(Ok(Some(420)), r.receive().now()); 121 | } 122 | } 123 | } 124 | } 125 | 126 | #[test] 127 | fn drop_send_now() { 128 | let (mut s, r) = spsc::(1); 129 | drop(r); 130 | assert_eq!(closed(42), s.send(42).now()); 131 | } 132 | 133 | #[test] 134 | fn drop_send() { 135 | unsafe { 136 | let (mut s, r) = spsc::(1); 137 | drop(r); 138 | wookie!(s2: s.send(42)); 139 | assert_eq!(Poll::Ready(closed(42)), s2.poll()); 140 | } 141 | } 142 | 143 | #[test] 144 | fn send_drop() { 145 | unsafe { 146 | let (mut s, r) = spsc::(1); 147 | { 148 | wookie!(s2: s.send(42)); 149 | assert_eq!(Poll::Ready(Ok(())), s2.poll()); 150 | cdw!(s2: 0, 0, 0); 151 | } 152 | wookie!(s2: s.send(42)); 153 | assert_eq!(Poll::Pending, s2.poll()); 154 | cdw!(s2: 1, 0, 0); 155 | drop(r); 156 | cdw!(s2: 1, 1, 1); 157 | assert_eq!(Poll::Ready(closed(42)), s2.poll()); 158 | cdw!(s2: 1, 1, 1); 159 | } 160 | } 161 | 162 | #[test] 163 | fn drop_receive_now() { 164 | let (s, mut r) = spsc::(1); 165 | drop(s); 166 | assert_eq!(Err(Closed), r.receive().now()); 167 | } 168 | 169 | #[test] 170 | fn drop_receive() { 171 | let (s, mut r) = spsc::(1); 172 | drop(s); 173 | wookie!(r2: r.receive()); 174 | assert_eq!(Poll::Ready(Err(Closed)), r2.poll()); 175 | } 176 | 177 | 178 | 179 | 180 | #[test] 181 | fn send_drop_receive_now() { 182 | let (mut s, mut r) = spsc::(1); 183 | assert_eq!(Ok(()), s.send(42).now()); 184 | drop(s); 185 | assert_eq!(Ok(Some(42)), r.receive().now()); 186 | } 187 | 188 | #[test] 189 | fn send_drop_receive() { 190 | let (mut s, mut r) = spsc::(1); 191 | assert_eq!(Ok(()), s.send(42).now()); 192 | drop(s); 193 | { 194 | wookie!(r2: r.receive()); 195 | assert_eq!(Poll::Ready(Ok(42)), r2.poll()); 196 | } 197 | wookie!(r2: r.receive()); 198 | assert_eq!(Poll::Ready(Err(Closed)), r2.poll()); 199 | } 200 | --------------------------------------------------------------------------------