├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── src ├── lib.rs ├── loom_exports.rs └── queue.rs └── tests ├── general.rs ├── may_leak.rs └── tests.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [ main ] 7 | 8 | env: 9 | RUSTFLAGS: -Dwarnings 10 | 11 | jobs: 12 | check: 13 | name: Check 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | rust: 19 | - stable 20 | - 1.64.0 21 | steps: 22 | - name: Checkout sources 23 | uses: actions/checkout@v4 24 | 25 | - name: Install toolchain 26 | uses: dtolnay/rust-toolchain@master 27 | with: 28 | toolchain: ${{ matrix.rust }} 29 | 30 | - name: Run cargo check 31 | run: cargo check 32 | 33 | test: 34 | name: Tests 35 | runs-on: ubuntu-latest 36 | steps: 37 | - name: Checkout sources 38 | uses: actions/checkout@v4 39 | 40 | - name: Install toolchain 41 | uses: dtolnay/rust-toolchain@stable 42 | 43 | - name: Run cargo test 44 | run: cargo test 45 | 46 | loom: 47 | name: Loom 48 | runs-on: ubuntu-latest 49 | steps: 50 | - name: Checkout sources 51 | uses: actions/checkout@v4 52 | 53 | - name: Install toolchain 54 | uses: dtolnay/rust-toolchain@stable 55 | 56 | - name: Run cargo test (Loom) 57 | run: cargo test --lib --release 58 | env: 59 | RUSTFLAGS: --cfg tachyonix_loom 60 | 61 | miri: 62 | name: Miri 63 | runs-on: ubuntu-latest 64 | steps: 65 | - name: Checkout sources 66 | uses: actions/checkout@v4 67 | 68 | - name: Install toolchain 69 | uses: dtolnay/rust-toolchain@nightly 70 | with: 71 | components: miri 72 | 73 | - name: Run cargo miri 74 | run: cargo miri test --tests 75 | env: 76 | MIRIFLAGS: -Zmiri-strict-provenance 77 | 78 | - name: Run cargo miri (ignore leaks) 79 | run: cargo miri test --tests 80 | env: 81 | RUSTFLAGS: --cfg tachyonix_ignore_leaks 82 | MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-ignore-leaks 83 | 84 | lints: 85 | name: Lints 86 | runs-on: ubuntu-latest 87 | steps: 88 | - name: Checkout sources 89 | uses: actions/checkout@v4 90 | 91 | - name: Install toolchain 92 | uses: dtolnay/rust-toolchain@stable 93 | with: 94 | components: rustfmt, clippy 95 | 96 | - name: Run cargo fmt 97 | run: cargo fmt --all -- --check 98 | 99 | - name: Run cargo clippy 100 | run: cargo clippy 101 | 102 | docs: 103 | name: Docs 104 | runs-on: ubuntu-latest 105 | steps: 106 | - name: Checkout sources 107 | uses: actions/checkout@v4 108 | 109 | - name: Install toolchain 110 | uses: dtolnay/rust-toolchain@stable 111 | 112 | - name: Run cargo doc 113 | run: cargo doc --no-deps --document-private-items 114 | env: 115 | RUSTDOCFLAGS: -Dwarnings 116 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.3.1 (2024-09-08) 2 | 3 | * Roll back MSRV to 1.64 and make Loom a dev dependency ([#9],[#10]). 4 | 5 | [#9]: https://github.com/asynchronics/tachyonix/pull/9 6 | [#10]: https://github.com/asynchronics/tachyonix/pull/10 7 | 8 | # 0.3.0 (2024-05-15) 9 | 10 | * Make it possible to specify a deadline when sending or receiving ([#6]). 11 | * Increase the MSRV to work around breakage introduced by the new `--check-cfg` 12 | being enabled by default. 13 | 14 | *Note*: there are no API-breaking changes, the minor version was only increased 15 | due to the new MSRV. 16 | 17 | [#6]: https://github.com/asynchronics/tachyonix/pull/6 18 | 19 | # 0.2.1 (2023-07-16) 20 | 21 | - Use internally the newly spun-off `async-event` crate instead of the `event` 22 | module. 23 | - Use `crossbeam-utils` instead of deprecated `cache-padded` crate. 24 | 25 | # 0.2.0 (2022-10-30) 26 | 27 | - Improve performance by always allocating new notifiers for blocked senders; 28 | this also makes it now possible take `self` by reference in senders ([#3]). 29 | - Fix soundness issue when sender is forgotten ([#2]). 30 | 31 | [#2]: https://github.com/asynchronics/tachyonix/pull/2 32 | [#3]: https://github.com/asynchronics/tachyonix/pull/3 33 | 34 | 35 | # 0.1.1 (2022-10-16) 36 | 37 | - Support Rust 1.56. 38 | - Move benchmark to separate repo. 39 | 40 | 41 | # 0.1.0 (2022-10-12) 42 | 43 | Initial release 44 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tachyonix" 3 | # When incrementing version and releasing to crates.io: 4 | # - Update crate version in README.md 5 | # - Update CHANGELOG.md 6 | # - Update if necessary copyright notice in LICENSE-MIT 7 | # - Create a "vX.Y.Z" git tag 8 | version = "0.3.1" 9 | authors = ["Serge Barral "] 10 | edition = "2021" 11 | rust-version = "1.64" 12 | license = "MIT OR Apache-2.0" 13 | repository = "https://github.com/asynchronics/tachyonix" 14 | readme = "README.md" 15 | description = """ 16 | A very fast asynchronous, multi-producer, single-consumer bounded channel. 17 | """ 18 | categories = ["asynchronous", "concurrency"] 19 | keywords = ["async", "channel", "futures", "mpsc"] 20 | autotests = false 21 | 22 | [dependencies] 23 | async-event = "0.2" 24 | crossbeam-utils = "0.8" 25 | diatomic-waker = "0.2" 26 | futures-core = "0.3" 27 | pin-project-lite = "0.2" 28 | 29 | [dev-dependencies] 30 | futures-executor = { version = "0.3", default-features = false, features = ["thread-pool"] } 31 | futures-task = { version = "0.3", default-features = false, features = ["std"] } 32 | futures-util = { version = "0.3", default-features = false, features = ["std", "async-await"] } 33 | futures-time = "3.0" 34 | 35 | [target.'cfg(tachyonix_loom)'.dev-dependencies] 36 | loom = "0.7" 37 | waker-fn = "1.1" 38 | 39 | [lints.rust] 40 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tachyonix_loom)', 'cfg(tachyonix_ignore_leaks)'] } 41 | 42 | [[test]] 43 | name = "integration" 44 | path = "tests/tests.rs" 45 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2024 Serge Barral 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tachyonix 2 | 3 | An asynchronous, multi-producer, single-consumer (MPSC) bounded channel 4 | that operates at [tachyonic][tachyon] speeds. 5 | 6 | This library is an offshoot of [Asynchronix][asynchronix], an ongoing effort at a 7 | high performance asynchronous computation framework for system simulation. 8 | 9 | No laws of physics were broken in the making of this library. 10 | 11 | [![Cargo](https://img.shields.io/crates/v/tachyonix.svg)](https://crates.io/crates/tachyonix) 12 | [![Documentation](https://docs.rs/tachyonix/badge.svg)](https://docs.rs/tachyonix) 13 | [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/asynchronics/tachyonix#license) 14 | 15 | [tachyon]: https://en.wikipedia.org/wiki/Tachyon 16 | 17 | [asynchronix]: https://github.com/asynchronics/asynchronix 18 | 19 | 20 | ## Overview 21 | 22 | This is a no-frills `async` channel which only claim to fame is to be extremely 23 | fast (see [benchmarks](#benchmarks)), without taking any shortcuts on 24 | correctness and implementation quality. Its performance mainly results from its 25 | focus on the MPSC use-case and from a number of careful optimizations, among 26 | which: 27 | 28 | - aggressively **optimized notification primitives** for full-queue and 29 | empty-queue events (the latter is courtesy of 30 | [diatomic-waker][diatomic-waker], a fast, spinlock-free alternative to 31 | `atomic-waker`), 32 | - **no allocation** after channel creation except for blocked sender 33 | notifications, 34 | - **no spinlocks** whatsoever, and no mutex in the hot path (the only mutex is a 35 | `std::sync::mutex` used for blocked senders notifications), 36 | - underlying queue **optimized for single receiver**. 37 | 38 | [diatomic-waker]: https://github.com/asynchronics/diatomic-waker 39 | 40 | 41 | ## Usage 42 | 43 | Add this to your `Cargo.toml`: 44 | 45 | ```toml 46 | [dependencies] 47 | tachyonix = "0.3.1" 48 | ``` 49 | 50 | 51 | ## Example 52 | 53 | ```rust 54 | use tachyonix; 55 | use futures_executor::{block_on, ThreadPool}; 56 | 57 | let pool = ThreadPool::new().unwrap(); 58 | 59 | let (s, mut r) = tachyonix::channel(3); 60 | 61 | block_on( async move { 62 | pool.spawn_ok( async move { 63 | assert_eq!(s.send("Hello").await, Ok(())); 64 | }); 65 | 66 | assert_eq!(r.recv().await, Ok("Hello")); 67 | }); 68 | ``` 69 | 70 | 71 | ## Limitations 72 | 73 | The original raison d'être of this library was to provide a less idiosyncratic 74 | sibling to the channels developed for [Asynchronix][asynchronix] that could be 75 | easily benchmarked against other channel implementations. The experiment turned 76 | out better than anticipated so a slightly more fleshed out version was released 77 | for public consumption in the hope that others may find it useful. However, its 78 | API surface is intentionally kept small and it does not aspire to become much 79 | more than it is today. More importantly, it makes trade-offs that may or may not 80 | be acceptable depending on your use-case: 81 | 82 | * just like most other async channels except the MPSC channels in the `tokio` 83 | and `futures` crates, fairness for blocked senders is not enforced: while the 84 | first sender blocked on a full channel is indeed notified first, it may still 85 | be outrun if another sender happens to be scheduled before; if your 86 | application requires better fairness guarantees, you should use `tokio`'s or 87 | `futures`'s channels. 88 | * just like most other async channels except the MPSC channels in the `futures` 89 | crate, the effective capacity of the channel decreases with each "forgotten" 90 | blocked sender (*i.e.* blocked senders which, for some reason, were not polled 91 | to completion but were not dropped either) and the channel will eventually 92 | deadlock if the effective capacity drops to zero; if this can happen in your 93 | application, you should use `futures`'s channels. 94 | * just like most other async channel with the exception of `flume`, its 95 | low-level primitives rely on `unsafe` (see [dedicated section](#safety)), 96 | * zero-capacity channels (a.k.a. rendez-vous channels) are not supported. 97 | 98 | 99 | ## Safety 100 | 101 | Despite the focus on performance, implementation quality and correctness are the 102 | highest priority. 103 | 104 | The library comes with a decent battery of tests, in particular for all 105 | low-level (unsafe) concurrency primitives which are extensively tested with 106 | [Loom][loom], complemented with MIRI for integrations tests. As amazing as they 107 | are, however, Loom and MIRI cannot formally prove the absence of data races so 108 | soundness issues _are_ possible. You should therefore exercise caution before 109 | using it in mission-critical software until it receives more testing in the 110 | wild. 111 | 112 | [loom]: https://github.com/tokio-rs/loom 113 | 114 | 115 | ## Benchmarks 116 | 117 | ### Benchmarks overview 118 | 119 | A custom [benchmarking suite][bench] was implemented that can test a number of 120 | popular MPSC and MPMC channels with several executors (Tokio, async-std, 121 | smolscale and Asynchronix). 122 | 123 | It contains at the moment 2 benchmarks: 124 | - *pinball*: an upgraded version of the classical pin-pong benchmark where 125 | messages ("balls") perform a random walk between 13 vertices ("pins") of a 126 | fully connected graph; it is parametrized by the total number of balls within 127 | the graph, 128 | - *funnel*: the most common MPSC benchmark where messages are sent in a tight 129 | loop from 13 senders to a unique receiver; it is parametrized by the channel 130 | capacity. 131 | 132 | Each benchmark executes 61 instances of an elementary bench rig, which ensures 133 | that all executor threads are busy at nearly all times. The *pinball* benchmark 134 | is a relatively good proxy for performance in situations where channel receivers 135 | are often starved but senders are never blocked (i.e. the channel capacity is 136 | always sufficient). 137 | 138 | Regardless of its popularity, the *funnel* benchmark is **less realistic and 139 | less objective** as it is sensitive not only to the absolute speed of enqueue, 140 | dequeue and notifications, but is also strongly affected by their relative speed 141 | and by other subtle details. Its extrapolation to real-life performance is 142 | rather debatable. 143 | 144 | More information about these benchmarks can be found in the [bench repo][bench]. 145 | 146 | [bench]: https://github.com/asynchronics/tachyobench/ 147 | 148 | 149 | ### Benchmark results 150 | 151 | > Keep in mind that raw speed is not everything: every channel makes design 152 | > choices and trade-offs (regarding *e.g.* unsafety, fairness, mpmc support, 153 | > ...) which can have a significant impact on performance. Be sure to read the 154 | > sections about [limitations](#limitations) and [safety](#safety). 155 | 156 | The benchmarks were run on EC2 instances of comparable performance but different 157 | micro-architectures (Intel Ice Lake, AMD Zen 3, ARM Graviton 2). The reported 158 | performance is the mean number of messages per microsecond after averaging over 159 | 10 benchmark runs (higher is better). 160 | 161 | The reported results were obtained with Tokio, which in practice was found 162 | significantly faster than either async-std or smolscale. Asynchronix is faster 163 | yet, but less relevant as a baseline as it is not meant for general-purpose 164 | `async` programming. 165 | 166 | 167 | #### EC2 c6i.2xlarge 168 | 169 | ![Alt text](https://raw.githubusercontent.com/asynchronics/tachyobench/main/results/sha_f4642a4-rustc_1.64-tokio/c6i.2xlarge.png) 170 | 171 | 172 | #### EC2 c6a.2xlarge 173 | 174 | ![Alt text](https://raw.githubusercontent.com/asynchronics/tachyobench/main/results/sha_f4642a4-rustc_1.64-tokio/c6a.2xlarge.png) 175 | 176 | 177 | #### EC2 c6g.2xlarge 178 | 179 | ![Alt text](https://raw.githubusercontent.com/asynchronics/tachyobench/main/results/sha_f4642a4-rustc_1.64-tokio/c6g.2xlarge.png) 180 | 181 | 182 | ## License 183 | 184 | This software is licensed under the [Apache License, Version 185 | 2.0](LICENSE-APACHE) or the [MIT license](LICENSE-MIT), at your option. 186 | 187 | 188 | ## Contribution 189 | 190 | Unless you explicitly state otherwise, any contribution intentionally submitted 191 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 192 | dual licensed as above, without any additional terms or conditions. 193 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A very fast asynchronous, multi-producer, single-consumer (MPSC) bounded 2 | //! channel. 3 | //! 4 | //! This is a no-frills `async` bounded MPSC channel which only claim to fame is 5 | //! to be extremely fast, without taking any shortcuts on correctness 6 | //! and implementation quality. 7 | //! 8 | //! # Disconnection 9 | //! 10 | //! The channel is disconnected automatically once all [`Sender`]s are dropped 11 | //! or once the [`Receiver`] is dropped. It can also be disconnected manually by 12 | //! any `Sender` or `Receiver` handle. 13 | //! 14 | //! Disconnection is signaled by the `Result` of the sending or receiving 15 | //! operations. Once a channel is disconnected, all attempts to send a message 16 | //! will return an error. However, the receiver will still be able to receive 17 | //! messages already in the channel and will only get a disconnection error once 18 | //! all messages have been received. 19 | //! 20 | //! # Example 21 | //! 22 | //! ``` 23 | //! use tachyonix; 24 | //! use futures_executor::{block_on, ThreadPool}; 25 | //! 26 | //! let pool = ThreadPool::new().unwrap(); 27 | //! 28 | //! let (s, mut r) = tachyonix::channel(3); 29 | //! 30 | //! block_on( async move { 31 | //! pool.spawn_ok( async move { 32 | //! assert_eq!(s.send("Hello").await, Ok(())); 33 | //! }); 34 | //! 35 | //! assert_eq!(r.recv().await, Ok("Hello")); 36 | //! }); 37 | //! # std::thread::sleep(std::time::Duration::from_millis(100)); // MIRI bug workaround 38 | //! ``` 39 | //! 40 | #![warn(missing_docs, missing_debug_implementations, unreachable_pub)] 41 | 42 | mod loom_exports; 43 | mod queue; 44 | 45 | use std::error; 46 | use std::fmt; 47 | use std::future::Future; 48 | use std::pin::Pin; 49 | use std::sync::atomic::{self, AtomicUsize, Ordering}; 50 | use std::sync::Arc; 51 | use std::task::Context; 52 | use std::task::Poll; 53 | 54 | use async_event::Event; 55 | use diatomic_waker::primitives::DiatomicWaker; 56 | use futures_core::Stream; 57 | use pin_project_lite::pin_project; 58 | 59 | use crate::queue::{PopError, PushError, Queue}; 60 | 61 | /// Shared channel data. 62 | struct Inner { 63 | /// Non-blocking internal queue. 64 | queue: Queue, 65 | /// Signalling primitive used to notify the receiver. 66 | receiver_signal: DiatomicWaker, 67 | /// Signalling primitive used to notify one or several senders. 68 | sender_signal: Event, 69 | /// Current count of live senders. 70 | sender_count: AtomicUsize, 71 | } 72 | 73 | impl Inner { 74 | fn new(capacity: usize, sender_count: usize) -> Self { 75 | Self { 76 | queue: Queue::new(capacity), 77 | receiver_signal: DiatomicWaker::new(), 78 | sender_signal: Event::new(), 79 | sender_count: AtomicUsize::new(sender_count), 80 | } 81 | } 82 | } 83 | 84 | /// The sending side of a channel. 85 | /// 86 | /// Multiple [`Sender`]s can be created via cloning. 87 | pub struct Sender { 88 | /// Shared data. 89 | inner: Arc>, 90 | } 91 | 92 | impl Sender { 93 | /// Attempts to send a message immediately. 94 | pub fn try_send(&self, message: T) -> Result<(), TrySendError> { 95 | match self.inner.queue.push(message) { 96 | Ok(()) => { 97 | self.inner.receiver_signal.notify(); 98 | Ok(()) 99 | } 100 | Err(PushError::Full(v)) => Err(TrySendError::Full(v)), 101 | Err(PushError::Closed(v)) => Err(TrySendError::Closed(v)), 102 | } 103 | } 104 | 105 | /// Sends a message asynchronously, if necessary waiting until enough 106 | /// capacity becomes available. 107 | pub async fn send(&self, message: T) -> Result<(), SendError> { 108 | let mut message = Some(message); 109 | 110 | self.inner 111 | .sender_signal 112 | .wait_until(|| { 113 | match self.inner.queue.push(message.take().unwrap()) { 114 | Ok(()) => Some(()), 115 | Err(PushError::Full(m)) => { 116 | // Recycle the message. 117 | message = Some(m); 118 | 119 | None 120 | } 121 | Err(PushError::Closed(m)) => { 122 | // Keep the message so it can be returned in the error 123 | // field. 124 | message = Some(m); 125 | 126 | Some(()) 127 | } 128 | } 129 | }) 130 | .await; 131 | 132 | match message { 133 | Some(m) => Err(SendError(m)), 134 | None => { 135 | self.inner.receiver_signal.notify(); 136 | 137 | Ok(()) 138 | } 139 | } 140 | } 141 | 142 | /// Sends a message asynchronously, if necessary waiting until enough 143 | /// capacity becomes available or until the deadline elapses. 144 | /// 145 | /// The deadline is specified as a `Future` that is expected to resolves to 146 | /// `()` after some duration, such as a `tokio::time::Sleep` future. 147 | pub async fn send_timeout<'a, D>( 148 | &'a self, 149 | message: T, 150 | deadline: D, 151 | ) -> Result<(), SendTimeoutError> 152 | where 153 | D: Future + 'a, 154 | { 155 | let mut message = Some(message); 156 | 157 | let res = self 158 | .inner 159 | .sender_signal 160 | .wait_until_or_timeout( 161 | || { 162 | match self.inner.queue.push(message.take().unwrap()) { 163 | Ok(()) => Some(()), 164 | Err(PushError::Full(m)) => { 165 | // Recycle the message. 166 | message = Some(m); 167 | 168 | None 169 | } 170 | Err(PushError::Closed(m)) => { 171 | // Keep the message so it can be returned in the error 172 | // field. 173 | message = Some(m); 174 | 175 | Some(()) 176 | } 177 | } 178 | }, 179 | deadline, 180 | ) 181 | .await; 182 | 183 | match (message, res) { 184 | (Some(m), Some(())) => Err(SendTimeoutError::Closed(m)), 185 | (Some(m), None) => Err(SendTimeoutError::Timeout(m)), 186 | _ => { 187 | self.inner.receiver_signal.notify(); 188 | 189 | Ok(()) 190 | } 191 | } 192 | } 193 | 194 | /// Closes the queue. 195 | /// 196 | /// This prevents any further messages from being sent on the channel. 197 | /// Messages that were already sent can still be received. 198 | pub fn close(&self) { 199 | self.inner.queue.close(); 200 | 201 | // Notify the receiver and all blocked senders that the channel is 202 | // closed. 203 | self.inner.receiver_signal.notify(); 204 | self.inner.sender_signal.notify_all(); 205 | } 206 | 207 | /// Checks if the channel is closed. 208 | /// 209 | /// This can happen either because the [`Receiver`] was dropped or because 210 | /// one of the [`Sender::close`] or [`Receiver::close`] method was called. 211 | pub fn is_closed(&self) -> bool { 212 | self.inner.queue.is_closed() 213 | } 214 | } 215 | 216 | impl Clone for Sender { 217 | fn clone(&self) -> Self { 218 | // Increase the sender reference count. 219 | // 220 | // Ordering: Relaxed ordering is sufficient here for the same reason it 221 | // is sufficient for an `Arc` reference count increment: synchronization 222 | // is only necessary when decrementing the counter since all what is 223 | // needed is to ensure that all operations until the drop handler is 224 | // called are visible once the reference count drops to 0. 225 | self.inner.sender_count.fetch_add(1, Ordering::Relaxed); 226 | 227 | Self { 228 | inner: self.inner.clone(), 229 | } 230 | } 231 | } 232 | 233 | impl Drop for Sender { 234 | fn drop(&mut self) { 235 | // Decrease the sender reference count. 236 | // 237 | // Ordering: Release ordering is necessary for the same reason it is 238 | // necessary for an `Arc` reference count decrement: it ensures that all 239 | // operations performed by this sender before it was dropped will be 240 | // visible once the sender count drops to 0. 241 | if self.inner.sender_count.fetch_sub(1, Ordering::Release) == 1 242 | && !self.inner.queue.is_closed() 243 | { 244 | // Make sure that the notified receiver sees all operations 245 | // performed by all dropped senders. 246 | // 247 | // Ordering: Acquire is necessary to synchronize with the Release 248 | // decrement operations. Note that the fence synchronizes with _all_ 249 | // decrement operations since the chain of counter decrements forms 250 | // a Release sequence. 251 | atomic::fence(Ordering::Acquire); 252 | 253 | self.inner.queue.close(); 254 | 255 | // Notify the receiver that the channel is closed. 256 | self.inner.receiver_signal.notify(); 257 | } 258 | } 259 | } 260 | 261 | impl fmt::Debug for Sender { 262 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 263 | f.debug_struct("Sender").finish_non_exhaustive() 264 | } 265 | } 266 | 267 | /// The receiving side of a channel. 268 | /// 269 | /// The receiver can only be called from a single thread. 270 | pub struct Receiver { 271 | /// Shared data. 272 | inner: Arc>, 273 | } 274 | 275 | impl Receiver { 276 | /// Attempts to receive a message immediately. 277 | pub fn try_recv(&mut self) -> Result { 278 | // Safety: `Queue::pop` cannot be used concurrently from multiple 279 | // threads since `Receiver` does not implement `Clone` and requires 280 | // exclusive ownership. 281 | match unsafe { self.inner.queue.pop() } { 282 | Ok(message) => { 283 | self.inner.sender_signal.notify_one(); 284 | Ok(message) 285 | } 286 | Err(PopError::Empty) => Err(TryRecvError::Empty), 287 | Err(PopError::Closed) => Err(TryRecvError::Closed), 288 | } 289 | } 290 | 291 | /// Receives a message asynchronously, if necessary waiting until one 292 | /// becomes available. 293 | pub async fn recv(&mut self) -> Result { 294 | // We could of course return the future directly from a plain method, 295 | // but the `async` signature makes the intent more explicit. 296 | RecvFuture { receiver: self }.await 297 | } 298 | 299 | /// Receives a message asynchronously, if necessary waiting until one 300 | /// becomes available or until the deadline elapses. 301 | /// 302 | /// The deadline is specified as a `Future` that is expected to resolves to 303 | /// `()` after some duration, such as a `tokio::time::Sleep` future. 304 | pub async fn recv_timeout(&mut self, deadline: D) -> Result 305 | where 306 | D: Future, 307 | { 308 | // We could of course return the future directly from a plain method, 309 | // but the `async` signature makes the intent more explicit. 310 | RecvTimeoutFuture { 311 | receiver: self, 312 | deadline, 313 | } 314 | .await 315 | } 316 | 317 | /// Closes the queue. 318 | /// 319 | /// This prevents any further messages from being sent on the channel. 320 | /// Messages that were already sent can still be received, however, which is 321 | /// why a call to this method should typically be followed by a loop 322 | /// receiving all remaining messages. 323 | /// 324 | /// For this reason, no counterpart to [`Sender::is_closed`] is exposed by 325 | /// the receiver as such method could easily be misused and lead to lost 326 | /// messages. Instead, messages should be received until a [`RecvError`], 327 | /// [`RecvTimeoutError::Closed`] or [`TryRecvError::Closed`] error is 328 | /// returned. 329 | pub fn close(&self) { 330 | if !self.inner.queue.is_closed() { 331 | self.inner.queue.close(); 332 | 333 | // Notify all blocked senders that the channel is closed. 334 | self.inner.sender_signal.notify_all(); 335 | } 336 | } 337 | } 338 | 339 | impl Drop for Receiver { 340 | fn drop(&mut self) { 341 | self.inner.queue.close(); 342 | 343 | // Notify all blocked senders that the channel is closed. 344 | self.inner.sender_signal.notify_all(); 345 | } 346 | } 347 | 348 | impl fmt::Debug for Receiver { 349 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 350 | f.debug_struct("Receiver").finish_non_exhaustive() 351 | } 352 | } 353 | 354 | impl Stream for Receiver { 355 | type Item = T; 356 | 357 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 358 | // Safety: `Queue::pop`, `DiatomicWaker::register` and 359 | // `DiatomicWaker::unregister` cannot be used concurrently from multiple 360 | // threads since `Receiver` does not implement `Clone` and requires 361 | // exclusive ownership. 362 | unsafe { 363 | // Happy path: try to pop a message without registering the waker. 364 | match self.inner.queue.pop() { 365 | Ok(message) => { 366 | // Signal to one awaiting sender that one slot was freed. 367 | self.inner.sender_signal.notify_one(); 368 | 369 | return Poll::Ready(Some(message)); 370 | } 371 | Err(PopError::Closed) => { 372 | return Poll::Ready(None); 373 | } 374 | Err(PopError::Empty) => {} 375 | } 376 | 377 | // Slow path: we must register the waker to be notified when the 378 | // queue is populated again. It is thereafter necessary to check 379 | // again the predicate in case we raced with a sender. 380 | self.inner.receiver_signal.register(cx.waker()); 381 | 382 | match self.inner.queue.pop() { 383 | Ok(message) => { 384 | // Cancel the request for notification. 385 | self.inner.receiver_signal.unregister(); 386 | 387 | // Signal to one awaiting sender that one slot was freed. 388 | self.inner.sender_signal.notify_one(); 389 | 390 | Poll::Ready(Some(message)) 391 | } 392 | Err(PopError::Closed) => { 393 | // Cancel the request for notification. 394 | self.inner.receiver_signal.unregister(); 395 | 396 | Poll::Ready(None) 397 | } 398 | Err(PopError::Empty) => Poll::Pending, 399 | } 400 | } 401 | } 402 | } 403 | 404 | /// The future returned by the `Receiver::recv` method. 405 | /// 406 | /// This is just a thin wrapper over the `Stream::poll_next` implementation. 407 | struct RecvFuture<'a, T> { 408 | receiver: &'a mut Receiver, 409 | } 410 | 411 | impl<'a, T> Future for RecvFuture<'a, T> { 412 | type Output = Result; 413 | 414 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 415 | match Pin::new(&mut self.receiver).poll_next(cx) { 416 | Poll::Ready(Some(v)) => Poll::Ready(Ok(v)), 417 | Poll::Ready(None) => Poll::Ready(Err(RecvError)), 418 | Poll::Pending => Poll::Pending, 419 | } 420 | } 421 | } 422 | 423 | pin_project! { 424 | /// The future returned by the `Receiver::recv_timeout` method. 425 | /// 426 | /// This is just a thin wrapper over the `Stream::poll_next` implementation 427 | /// which abandons if the deadline elapses. 428 | struct RecvTimeoutFuture<'a, T, D> where D: Future { 429 | receiver: &'a mut Receiver, 430 | #[pin] 431 | deadline: D, 432 | } 433 | } 434 | 435 | impl<'a, T, D> Future for RecvTimeoutFuture<'a, T, D> 436 | where 437 | D: Future, 438 | { 439 | type Output = Result; 440 | 441 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 442 | let this = self.project(); 443 | let receiver = this.receiver; 444 | let deadline = this.deadline; 445 | 446 | match Pin::new(receiver).poll_next(cx) { 447 | Poll::Ready(Some(v)) => Poll::Ready(Ok(v)), 448 | Poll::Ready(None) => Poll::Ready(Err(RecvTimeoutError::Closed)), 449 | Poll::Pending => match deadline.poll(cx) { 450 | Poll::Pending => Poll::Pending, 451 | Poll::Ready(()) => Poll::Ready(Err(RecvTimeoutError::Timeout)), 452 | }, 453 | } 454 | } 455 | } 456 | 457 | /// Creates a new channel, returning the sending and receiving sides. 458 | /// 459 | /// # Panic 460 | /// 461 | /// The function will panic if the requested capacity is 0 or if it is greater 462 | /// than `usize::MAX/2 + 1`. 463 | pub fn channel(capacity: usize) -> (Sender, Receiver) { 464 | let inner = Arc::new(Inner::new(capacity, 1)); 465 | 466 | let sender = Sender { 467 | inner: inner.clone(), 468 | }; 469 | let receiver = Receiver { inner }; 470 | 471 | (sender, receiver) 472 | } 473 | 474 | /// An error returned when an attempt to send a message synchronously is 475 | /// unsuccessful. 476 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 477 | pub enum TrySendError { 478 | /// The queue is full. 479 | Full(T), 480 | /// The receiver has been dropped. 481 | Closed(T), 482 | } 483 | 484 | impl error::Error for TrySendError {} 485 | 486 | impl fmt::Display for TrySendError { 487 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 488 | match self { 489 | TrySendError::Full(_) => "sending into a full channel".fmt(f), 490 | TrySendError::Closed(_) => "sending into a closed channel".fmt(f), 491 | } 492 | } 493 | } 494 | 495 | /// An error returned when an attempt to send a message asynchronously is 496 | /// unsuccessful. 497 | #[derive(Clone, Copy, Eq, PartialEq)] 498 | pub struct SendError(pub T); 499 | 500 | impl error::Error for SendError {} 501 | 502 | impl fmt::Debug for SendError { 503 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 504 | f.debug_struct("SendError").finish_non_exhaustive() 505 | } 506 | } 507 | 508 | impl fmt::Display for SendError { 509 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 510 | "sending into a closed channel".fmt(f) 511 | } 512 | } 513 | 514 | /// An error returned when an attempt to send a message asynchronously with a 515 | /// deadline is unsuccessful. 516 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 517 | pub enum SendTimeoutError { 518 | /// The deadline has elapsed. 519 | Timeout(T), 520 | /// The channel has been closed. 521 | Closed(T), 522 | } 523 | 524 | impl error::Error for SendTimeoutError {} 525 | 526 | impl fmt::Display for SendTimeoutError { 527 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 528 | match self { 529 | SendTimeoutError::Timeout(_) => "the deadline for sending has elapsed".fmt(f), 530 | SendTimeoutError::Closed(_) => "sending into a closed channel".fmt(f), 531 | } 532 | } 533 | } 534 | 535 | /// An error returned when an attempt to receive a message synchronously is 536 | /// unsuccessful. 537 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 538 | pub enum TryRecvError { 539 | /// The queue is empty. 540 | Empty, 541 | /// All senders have been dropped. 542 | Closed, 543 | } 544 | 545 | impl error::Error for TryRecvError {} 546 | 547 | impl fmt::Display for TryRecvError { 548 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 549 | match self { 550 | TryRecvError::Empty => "receiving from an empty channel".fmt(f), 551 | TryRecvError::Closed => "receiving from a closed channel".fmt(f), 552 | } 553 | } 554 | } 555 | 556 | /// An error returned when an attempt to receive a message asynchronously is 557 | /// unsuccessful. 558 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 559 | pub struct RecvError; 560 | 561 | impl error::Error for RecvError {} 562 | 563 | impl fmt::Display for RecvError { 564 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 565 | "receiving from a closed channel".fmt(f) 566 | } 567 | } 568 | 569 | /// An error returned when an attempt to receive a message asynchronously with a 570 | /// deadline is unsuccessful. 571 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 572 | pub enum RecvTimeoutError { 573 | /// The deadline has elapsed. 574 | Timeout, 575 | /// All senders have been dropped. 576 | Closed, 577 | } 578 | 579 | impl error::Error for RecvTimeoutError {} 580 | 581 | impl fmt::Display for RecvTimeoutError { 582 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 583 | match self { 584 | RecvTimeoutError::Timeout => "the deadline for receiving has elapsed".fmt(f), 585 | RecvTimeoutError::Closed => "receiving from a closed channel".fmt(f), 586 | } 587 | } 588 | } 589 | -------------------------------------------------------------------------------- /src/loom_exports.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(test, tachyonix_loom))] 2 | #[allow(unused_imports)] 3 | pub(crate) mod sync { 4 | pub(crate) use loom::sync::{Arc, Mutex}; 5 | 6 | pub(crate) mod atomic { 7 | pub(crate) use loom::sync::atomic::{AtomicBool, AtomicUsize}; 8 | } 9 | } 10 | #[cfg(not(all(test, tachyonix_loom)))] 11 | #[allow(unused_imports)] 12 | pub(crate) mod sync { 13 | pub(crate) use std::sync::{Arc, Mutex}; 14 | 15 | pub(crate) mod atomic { 16 | pub(crate) use std::sync::atomic::{AtomicBool, AtomicUsize}; 17 | } 18 | } 19 | 20 | #[cfg(all(test, tachyonix_loom))] 21 | pub(crate) mod cell { 22 | pub(crate) use loom::cell::UnsafeCell; 23 | } 24 | #[cfg(not(all(test, tachyonix_loom)))] 25 | pub(crate) mod cell { 26 | #[derive(Debug)] 27 | pub(crate) struct UnsafeCell(std::cell::UnsafeCell); 28 | 29 | #[allow(dead_code)] 30 | impl UnsafeCell { 31 | #[inline(always)] 32 | pub(crate) fn new(data: T) -> UnsafeCell { 33 | UnsafeCell(std::cell::UnsafeCell::new(data)) 34 | } 35 | #[inline(always)] 36 | pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { 37 | f(self.0.get()) 38 | } 39 | #[inline(always)] 40 | pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { 41 | f(self.0.get()) 42 | } 43 | } 44 | } 45 | 46 | #[allow(unused_macros)] 47 | macro_rules! debug_or_loom_assert { 48 | ($($arg:tt)*) => (if cfg!(any(debug_assertions, all(test, tachyonix_loom))) { assert!($($arg)*); }) 49 | } 50 | #[allow(unused_macros)] 51 | macro_rules! debug_or_loom_assert_eq { 52 | ($($arg:tt)*) => (if cfg!(any(debug_assertions, all(test, tachyonix_loom))) { assert_eq!($($arg)*); }) 53 | } 54 | #[allow(unused_imports)] 55 | pub(crate) use debug_or_loom_assert; 56 | #[allow(unused_imports)] 57 | pub(crate) use debug_or_loom_assert_eq; 58 | -------------------------------------------------------------------------------- /src/queue.rs: -------------------------------------------------------------------------------- 1 | //! A bounded MPSC queue, based on Dmitry Vyukov's MPMC queue. 2 | 3 | use std::cmp; 4 | use std::mem::MaybeUninit; 5 | use std::sync::atomic::Ordering; 6 | 7 | use crate::loom_exports::cell::UnsafeCell; 8 | use crate::loom_exports::debug_or_loom_assert_eq; 9 | use crate::loom_exports::sync::atomic::AtomicUsize; 10 | 11 | use crossbeam_utils::CachePadded; 12 | 13 | /// A queue slot containing a value and an associated stamp. 14 | struct Slot { 15 | stamp: AtomicUsize, 16 | value: UnsafeCell>, 17 | } 18 | 19 | /// An MPSC queue. 20 | /// 21 | /// The enqueue position, dequeue position and the slot stamps are all stored as 22 | /// `usize` and share the following layout: 23 | /// 24 | /// ```text 25 | /// 26 | /// | <- MSB LSB -> | 27 | /// | Sequence count | flag (1 bit) | Buffer index | 28 | /// 29 | /// ``` 30 | /// 31 | /// The purpose of the flag differs depending on the field: 32 | /// 33 | /// - enqueue position: if set, the flag signals that the queue has been closed 34 | /// by either the consumer or a producer, 35 | /// - dequeue position: the flag is not used (always 0), 36 | /// - slot stamp: the flag de-facto extends the mantissa of the buffer index, 37 | /// which makes it in particular possible to support queues with a capacity of 38 | /// 1 without special-casing. 39 | /// 40 | pub(super) struct Queue { 41 | /// Buffer position of the slot to which the next value will be written. 42 | /// 43 | /// The position stores the buffer index in the least significant bits and a 44 | /// sequence counter in the most significant bits. 45 | enqueue_pos: CachePadded, 46 | 47 | /// Buffer position of the slot from which the next value will be read. 48 | /// 49 | /// This is only ever mutated from a single thread but it must be stored in 50 | /// an atomic or an `UnsafeCell` since it is shared between the consumers 51 | /// and the producer. The reason it is shared is that the drop handler of 52 | /// the last `Inner` owner (which may be a producer) needs access to the 53 | /// dequeue position. 54 | dequeue_pos: CachePadded>, 55 | 56 | /// Buffer holding the values and their stamps. 57 | buffer: Box<[Slot]>, 58 | 59 | /// Bit mask covering both the buffer index and the 1-bit flag. 60 | right_mask: usize, 61 | 62 | /// Bit mask for the 1-bit flag, used as closed-channel flag in the enqueue 63 | /// position. 64 | closed_channel_mask: usize, 65 | } 66 | 67 | impl Queue { 68 | /// Creates a new `Inner`. 69 | pub(super) fn new(capacity: usize) -> Queue { 70 | assert!(capacity >= 1, "the capacity must be 1 or greater"); 71 | 72 | assert!( 73 | capacity <= (1 << (usize::BITS - 1)), 74 | "the capacity may not exceed {}", 75 | 1usize << (usize::BITS - 1) 76 | ); 77 | 78 | // Allocate a buffer initialized with linearly increasing stamps. 79 | let mut buffer = Vec::with_capacity(capacity); 80 | for i in 0..capacity { 81 | buffer.push(Slot { 82 | stamp: AtomicUsize::new(i), 83 | value: UnsafeCell::new(MaybeUninit::uninit()), 84 | }); 85 | } 86 | 87 | let closed_channel_mask = capacity.next_power_of_two(); 88 | let right_mask = (closed_channel_mask << 1).wrapping_sub(1); 89 | 90 | Queue { 91 | enqueue_pos: CachePadded::new(AtomicUsize::new(0)), 92 | dequeue_pos: CachePadded::new(UnsafeCell::new(0)), 93 | buffer: buffer.into(), 94 | right_mask, 95 | closed_channel_mask, 96 | } 97 | } 98 | 99 | /// Attempts to push an item in the queue. 100 | pub(super) fn push(&self, value: T) -> Result<(), PushError> { 101 | let mut enqueue_pos = self.enqueue_pos.load(Ordering::Relaxed); 102 | 103 | loop { 104 | if enqueue_pos & self.closed_channel_mask != 0 { 105 | return Err(PushError::Closed(value)); 106 | } 107 | 108 | let slot = &self.buffer[enqueue_pos & self.right_mask]; 109 | let stamp = slot.stamp.load(Ordering::Acquire); 110 | 111 | let stamp_delta = stamp.wrapping_sub(enqueue_pos) as isize; 112 | 113 | match stamp_delta.cmp(&0) { 114 | cmp::Ordering::Equal => { 115 | // The enqueue position matches the stamp: a push can be 116 | // attempted. 117 | 118 | // Try incrementing the enqueue position. 119 | match self.enqueue_pos.compare_exchange_weak( 120 | enqueue_pos, 121 | self.next_queue_pos(enqueue_pos), 122 | Ordering::Relaxed, 123 | Ordering::Relaxed, 124 | ) { 125 | Ok(_) => { 126 | // Write the value into the slot and update the stamp. 127 | unsafe { 128 | slot.value.with_mut(|v| *v = MaybeUninit::new(value)); 129 | } 130 | slot.stamp.store(stamp.wrapping_add(1), Ordering::Release); 131 | 132 | return Ok(()); 133 | } 134 | Err(pos) => { 135 | enqueue_pos = pos; 136 | } 137 | } 138 | } 139 | cmp::Ordering::Less => { 140 | // The sequence count of the stamp is smaller than that of the 141 | // enqueue position: the value it contains has not been popped 142 | // yet, so report a full queue. 143 | return Err(PushError::Full(value)); 144 | } 145 | cmp::Ordering::Greater => { 146 | // The stamp is greater than the enqueue position: this means we 147 | // raced with a concurrent producer which has already (i) 148 | // incremented the enqueue position and (ii) written a value to 149 | // this slot. A retry is required. 150 | enqueue_pos = self.enqueue_pos.load(Ordering::Relaxed); 151 | } 152 | } 153 | } 154 | } 155 | 156 | /// Attempts to pop an item from the queue. 157 | /// 158 | /// # Safety 159 | /// 160 | /// This method may not be called concurrently from multiple threads. 161 | pub(super) unsafe fn pop(&self) -> Result { 162 | let dequeue_pos = self.dequeue_pos.with(|p| *p); 163 | let slot = &self.buffer[dequeue_pos & self.right_mask]; 164 | let stamp = slot.stamp.load(Ordering::Acquire); 165 | 166 | if dequeue_pos != stamp { 167 | // The stamp is ahead of the dequeue position by 1 increment: the 168 | // value can be popped. 169 | debug_or_loom_assert_eq!(stamp, dequeue_pos + 1); 170 | 171 | // Only this thread can access the dequeue position so there is no 172 | // need to increment the position atomically with a `fetch_add`. 173 | self.dequeue_pos 174 | .with_mut(|p| *p = self.next_queue_pos(dequeue_pos)); 175 | 176 | // Read the value from the slot and set the stamp to the value of 177 | // the dequeue position increased by one sequence increment. 178 | let value = slot.value.with(|v| v.read().assume_init()); 179 | slot.stamp 180 | .store(stamp.wrapping_add(self.right_mask), Ordering::Release); 181 | 182 | Ok(value) 183 | } else { 184 | // Check whether the queue was closed. Even if the closed flag is 185 | // set and the slot is empty, there might still be a producer that 186 | // started a push before the channel was closed but has not yet 187 | // updated the stamp. For this reason, before returning 188 | // `PopError::Closed` it is necessary to check as well that the 189 | // enqueue position matches the dequeue position. 190 | // 191 | // Ordering: Relaxed ordering is enough since no value will be read. 192 | if self.enqueue_pos.load(Ordering::Relaxed) == (dequeue_pos | self.closed_channel_mask) 193 | { 194 | Err(PopError::Closed) 195 | } else { 196 | Err(PopError::Empty) 197 | } 198 | } 199 | } 200 | 201 | /// Closes the queue. 202 | pub(super) fn close(&self) { 203 | // Set the closed-channel flag. 204 | // 205 | // Ordering: Relaxed ordering is enough here since neither the producers 206 | // nor the consumer rely on this flag for synchronizing reads and 207 | // writes. 208 | self.enqueue_pos 209 | .fetch_or(self.closed_channel_mask, Ordering::Relaxed); 210 | } 211 | 212 | /// Checks if the channel has been closed. 213 | /// 214 | /// Note that even if the channel is closed, some messages may still be 215 | /// present in the queue so further calls to `pop` may still succeed. 216 | pub(super) fn is_closed(&self) -> bool { 217 | // Read the closed-channel flag. 218 | // 219 | // Ordering: Relaxed ordering is enough here since this is merely an 220 | // informational function and cannot lead to any unsafety. If the load 221 | // is stale, the worse that can happen is that the queue is seen as open 222 | // when it is in fact already closed, which is OK since the caller must 223 | // anyway be resilient to the case where the channel closes right after 224 | // `is_closed` returns `false`. 225 | self.enqueue_pos.load(Ordering::Relaxed) & self.closed_channel_mask != 0 226 | } 227 | 228 | /// Increment the queue position, incrementing the sequence count as well if 229 | /// the index wraps to 0. 230 | /// 231 | /// Precondition when used with enqueue positions: the closed-channel flag 232 | /// should be cleared. 233 | #[inline] 234 | fn next_queue_pos(&self, queue_pos: usize) -> usize { 235 | debug_or_loom_assert_eq!(queue_pos & self.closed_channel_mask, 0); 236 | 237 | // The queue position cannot wrap around: in the worst case it will 238 | // overflow the flag bit. 239 | let new_queue_pos = queue_pos + 1; 240 | 241 | let new_index = new_queue_pos & self.right_mask; 242 | 243 | if new_index < self.buffer.len() { 244 | new_queue_pos 245 | } else { 246 | // The buffer index must wrap to 0 and the sequence count 247 | // must be incremented. 248 | let sequence_increment = self.right_mask + 1; 249 | let sequence_count = queue_pos & !self.right_mask; 250 | 251 | sequence_count.wrapping_add(sequence_increment) 252 | } 253 | } 254 | } 255 | impl Drop for Queue { 256 | fn drop(&mut self) { 257 | // Drop all values in the queue. 258 | // 259 | // Safety: single-thread access is guaranteed since the dropping thread 260 | // holds exclusive ownership. 261 | unsafe { while self.pop().is_ok() {} } 262 | } 263 | } 264 | 265 | unsafe impl Send for Queue {} 266 | unsafe impl Sync for Queue {} 267 | 268 | /// Error occurring when pushing into a queue is unsuccessful. 269 | #[derive(Debug, Eq, PartialEq)] 270 | pub(super) enum PushError { 271 | /// The queue is full. 272 | Full(T), 273 | /// The receiver has been dropped. 274 | Closed(T), 275 | } 276 | 277 | /// Error occurring when popping from a queue is unsuccessful. 278 | #[derive(Debug, Eq, PartialEq)] 279 | pub(super) enum PopError { 280 | /// The queue is empty. 281 | Empty, 282 | /// All senders have been dropped and the queue is empty. 283 | Closed, 284 | } 285 | 286 | #[cfg(all(test, any(not(miri), not(tachyonix_ignore_leaks))))] 287 | mod test_utils { 288 | use super::*; 289 | 290 | /// Queue producer. 291 | /// 292 | /// This is a safe queue producer proxy used for testing purposes only. 293 | pub(super) struct Producer { 294 | inner: crate::loom_exports::sync::Arc>, 295 | } 296 | impl Producer { 297 | /// Attempts to push an item into the queue. 298 | pub(super) fn push(&self, value: T) -> Result<(), PushError> { 299 | self.inner.push(value) 300 | } 301 | 302 | /// Closes the queue. 303 | pub(super) fn close(&self) { 304 | self.inner.close(); 305 | } 306 | 307 | /// Checks if the queue is closed. 308 | #[cfg(not(tachyonix_loom))] 309 | pub(super) fn is_closed(&self) -> bool { 310 | self.inner.is_closed() 311 | } 312 | } 313 | impl Clone for Producer { 314 | fn clone(&self) -> Self { 315 | Self { 316 | inner: self.inner.clone(), 317 | } 318 | } 319 | } 320 | 321 | /// Queue consumer. 322 | /// 323 | /// This is a safe queue consumer proxy used for testing purposes only. 324 | pub(super) struct Consumer { 325 | inner: crate::loom_exports::sync::Arc>, 326 | } 327 | impl Consumer { 328 | /// Attempts to pop an item from the queue. 329 | pub(super) fn pop(&mut self) -> Result { 330 | // Safety: single-thread access is guaranteed since the consumer does 331 | // not implement `Clone` and `pop` requires exclusive ownership. 332 | unsafe { self.inner.pop() } 333 | } 334 | 335 | /// Closes the queue. 336 | pub(super) fn close(&self) { 337 | self.inner.close(); 338 | } 339 | } 340 | 341 | pub(super) fn queue(capacity: usize) -> (Producer, Consumer) { 342 | let inner = crate::loom_exports::sync::Arc::new(Queue::new(capacity)); 343 | 344 | let producer = Producer { 345 | inner: inner.clone(), 346 | }; 347 | let consumer = Consumer { 348 | inner: inner.clone(), 349 | }; 350 | 351 | (producer, consumer) 352 | } 353 | } 354 | 355 | /// Regular tests. 356 | #[cfg(all(test, not(tachyonix_loom), any(not(miri), not(tachyonix_ignore_leaks))))] 357 | mod tests { 358 | use super::test_utils::*; 359 | use super::*; 360 | 361 | use std::thread; 362 | 363 | #[test] 364 | fn queue_closed_by_sender() { 365 | let (p, mut c) = queue(3); 366 | 367 | assert_eq!(c.pop(), Err(PopError::Empty)); 368 | 369 | p.push(42).unwrap(); 370 | p.close(); 371 | 372 | assert_eq!(c.pop(), Ok(42)); 373 | assert_eq!(c.pop(), Err(PopError::Closed)); 374 | } 375 | 376 | #[test] 377 | fn queue_closed_by_consumer() { 378 | let (p, mut c) = queue(3); 379 | 380 | assert_eq!(p.is_closed(), false); 381 | p.push(42).unwrap(); 382 | 383 | c.close(); 384 | 385 | assert_eq!(p.is_closed(), true); 386 | assert_eq!(p.push(13), Err(PushError::Closed(13))); 387 | 388 | assert_eq!(c.pop(), Ok(42)); 389 | assert_eq!(c.pop(), Err(PopError::Closed)); 390 | } 391 | 392 | fn queue_spsc(capacity: usize) { 393 | const COUNT: usize = if cfg!(miri) { 50 } else { 100_000 }; 394 | 395 | let (p, mut c) = queue(capacity); 396 | 397 | let th_pop = thread::spawn(move || { 398 | for i in 0..COUNT { 399 | loop { 400 | if let Ok(x) = c.pop() { 401 | assert_eq!(x, i); 402 | break; 403 | } 404 | } 405 | } 406 | assert!(c.pop().is_err()); 407 | }); 408 | 409 | let th_push = thread::spawn(move || { 410 | for i in 0..COUNT { 411 | while p.push(i).is_err() {} 412 | } 413 | }); 414 | 415 | th_pop.join().unwrap(); 416 | th_push.join().unwrap(); 417 | } 418 | 419 | #[test] 420 | fn queue_spsc_capacity_one() { 421 | queue_spsc(1); 422 | } 423 | #[test] 424 | fn queue_spsc_capacity_two() { 425 | queue_spsc(2); 426 | } 427 | #[test] 428 | fn queue_spsc_capacity_three() { 429 | queue_spsc(3); 430 | } 431 | 432 | fn queue_mpsc(capacity: usize) { 433 | const COUNT: usize = if cfg!(miri) { 20 } else { 25_000 }; 434 | const PRODUCER_THREADS: usize = 4; 435 | 436 | let (p, mut c) = queue(capacity); 437 | let mut push_count = Vec::::new(); 438 | push_count.resize_with(COUNT, Default::default); 439 | 440 | let th_push: Vec<_> = (0..PRODUCER_THREADS) 441 | .map(|_| { 442 | let p = p.clone(); 443 | 444 | thread::spawn(move || { 445 | for i in 0..COUNT { 446 | while p.push(i).is_err() {} 447 | } 448 | }) 449 | }) 450 | .collect(); 451 | 452 | for _ in 0..COUNT * PRODUCER_THREADS { 453 | let n = loop { 454 | if let Ok(x) = c.pop() { 455 | break x; 456 | } 457 | }; 458 | push_count[n] += 1; 459 | } 460 | 461 | for c in push_count { 462 | assert_eq!(c, PRODUCER_THREADS); 463 | } 464 | 465 | for th in th_push { 466 | th.join().unwrap(); 467 | } 468 | } 469 | 470 | #[test] 471 | fn queue_mpsc_capacity_one() { 472 | queue_mpsc(1); 473 | } 474 | #[test] 475 | fn queue_mpsc_capacity_two() { 476 | queue_mpsc(2); 477 | } 478 | #[test] 479 | fn queue_mpsc_capacity_three() { 480 | queue_mpsc(3); 481 | } 482 | } 483 | 484 | /// Loom tests. 485 | #[cfg(all(test, tachyonix_loom))] 486 | mod tests { 487 | use super::test_utils::*; 488 | use super::*; 489 | 490 | use loom::model::Builder; 491 | use loom::sync::atomic::AtomicUsize; 492 | use loom::sync::Arc; 493 | use loom::thread; 494 | 495 | fn loom_queue_push_pop( 496 | max_push_per_thread: usize, 497 | producer_thread_count: usize, 498 | capacity: usize, 499 | preemption_bound: usize, 500 | ) { 501 | let mut builder = Builder::new(); 502 | if builder.preemption_bound.is_none() { 503 | builder.preemption_bound = Some(preemption_bound); 504 | } 505 | 506 | builder.check(move || { 507 | let (producer, mut consumer) = queue(capacity); 508 | 509 | let push_count = Arc::new(AtomicUsize::new(0)); 510 | 511 | let producer_threads: Vec<_> = (0..producer_thread_count) 512 | .map(|_| { 513 | let producer = producer.clone(); 514 | let push_count = push_count.clone(); 515 | 516 | thread::spawn(move || { 517 | for i in 0..max_push_per_thread { 518 | match producer.push(i) { 519 | Ok(()) => {} 520 | Err(PushError::Full(_)) => { 521 | // A push can fail only if there is not enough capacity. 522 | assert!(capacity < max_push_per_thread * producer_thread_count); 523 | 524 | break; 525 | } 526 | Err(PushError::Closed(_)) => panic!(), 527 | } 528 | push_count.fetch_add(1, Ordering::Relaxed); 529 | } 530 | }) 531 | }) 532 | .collect(); 533 | 534 | let mut pop_count = 0; 535 | while consumer.pop().is_ok() { 536 | pop_count += 1; 537 | } 538 | 539 | for th in producer_threads { 540 | th.join().unwrap(); 541 | } 542 | 543 | while consumer.pop().is_ok() { 544 | pop_count += 1; 545 | } 546 | 547 | assert_eq!(push_count.load(Ordering::Relaxed), pop_count); 548 | }); 549 | } 550 | 551 | #[test] 552 | fn loom_queue_push_pop_overflow() { 553 | const DEFAULT_PREEMPTION_BOUND: usize = 5; 554 | loom_queue_push_pop(2, 2, 3, DEFAULT_PREEMPTION_BOUND); 555 | } 556 | #[test] 557 | fn loom_queue_push_pop_no_overflow() { 558 | const DEFAULT_PREEMPTION_BOUND: usize = 5; 559 | loom_queue_push_pop(2, 2, 5, DEFAULT_PREEMPTION_BOUND); 560 | } 561 | #[test] 562 | fn loom_queue_push_pop_capacity_power_of_two_overflow() { 563 | const DEFAULT_PREEMPTION_BOUND: usize = 5; 564 | loom_queue_push_pop(3, 2, 4, DEFAULT_PREEMPTION_BOUND); 565 | } 566 | #[test] 567 | fn loom_queue_push_pop_capacity_one_overflow() { 568 | const DEFAULT_PREEMPTION_BOUND: usize = 5; 569 | loom_queue_push_pop(2, 2, 1, DEFAULT_PREEMPTION_BOUND); 570 | } 571 | #[test] 572 | fn loom_queue_push_pop_capacity_power_of_two_no_overflow() { 573 | const DEFAULT_PREEMPTION_BOUND: usize = 5; 574 | loom_queue_push_pop(2, 2, 4, DEFAULT_PREEMPTION_BOUND); 575 | } 576 | #[test] 577 | fn loom_queue_push_pop_three_producers() { 578 | const DEFAULT_PREEMPTION_BOUND: usize = 2; 579 | loom_queue_push_pop(2, 3, 3, DEFAULT_PREEMPTION_BOUND); 580 | } 581 | 582 | #[test] 583 | fn loom_queue_drop_items() { 584 | const CAPACITY: usize = 3; 585 | const PRODUCER_THREAD_COUNT: usize = 3; 586 | const DEFAULT_PREEMPTION_BOUND: usize = 4; 587 | 588 | let mut builder = Builder::new(); 589 | if builder.preemption_bound.is_none() { 590 | builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); 591 | } 592 | 593 | builder.check(move || { 594 | let (producer, consumer) = queue(CAPACITY); 595 | let item = std::sync::Arc::new(()); // loom does not implement `strong_count()` 596 | 597 | let producer_threads: Vec<_> = (0..PRODUCER_THREAD_COUNT) 598 | .map(|_| { 599 | thread::spawn({ 600 | let item = item.clone(); 601 | let producer = producer.clone(); 602 | 603 | move || { 604 | producer.push(item).unwrap(); 605 | } 606 | }) 607 | }) 608 | .collect(); 609 | 610 | for th in producer_threads { 611 | th.join().unwrap(); 612 | } 613 | drop(producer); 614 | drop(consumer); 615 | 616 | assert_eq!(std::sync::Arc::strong_count(&item), 1); 617 | }); 618 | } 619 | 620 | #[test] 621 | fn loom_queue_closed_by_producer() { 622 | const CAPACITY: usize = 3; 623 | const DEFAULT_PREEMPTION_BOUND: usize = 3; 624 | 625 | let mut builder = Builder::new(); 626 | if builder.preemption_bound.is_none() { 627 | builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); 628 | } 629 | 630 | builder.check(move || { 631 | let (producer, mut consumer) = queue(CAPACITY); 632 | 633 | let th_push_close = thread::spawn({ 634 | let producer = producer.clone(); 635 | 636 | move || { 637 | producer.push(7).unwrap(); 638 | producer.close(); 639 | } 640 | }); 641 | 642 | let th_try_push = thread::spawn({ 643 | let producer = producer.clone(); 644 | 645 | move || match producer.push(13) { 646 | Ok(()) => true, 647 | Err(PushError::Closed(13)) => false, 648 | _ => panic!(), 649 | } 650 | }); 651 | 652 | let mut sum = 0; 653 | loop { 654 | match consumer.pop() { 655 | Ok(v) => sum += v, 656 | Err(PopError::Closed) => break, 657 | Err(PopError::Empty) => {} 658 | }; 659 | thread::yield_now(); 660 | } 661 | 662 | th_push_close.join().unwrap(); 663 | let try_push_success = th_try_push.join().unwrap(); 664 | if try_push_success { 665 | assert_eq!(sum, 7 + 13); 666 | } else { 667 | assert_eq!(sum, 7); 668 | } 669 | }); 670 | } 671 | 672 | #[test] 673 | fn loom_queue_closed_by_consumer() { 674 | const CAPACITY: usize = 3; 675 | const DEFAULT_PREEMPTION_BOUND: usize = 3; 676 | 677 | let mut builder = Builder::new(); 678 | if builder.preemption_bound.is_none() { 679 | builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); 680 | } 681 | 682 | builder.check(move || { 683 | let (producer, mut consumer) = queue(CAPACITY); 684 | 685 | let th_try_push1 = thread::spawn({ 686 | let producer = producer.clone(); 687 | 688 | move || match producer.push(7) { 689 | Ok(()) => true, 690 | Err(PushError::Closed(7)) => false, 691 | _ => panic!(), 692 | } 693 | }); 694 | 695 | let th_try_push2 = thread::spawn({ 696 | let producer = producer.clone(); 697 | 698 | move || match producer.push(13) { 699 | Ok(()) => true, 700 | Err(PushError::Closed(13)) => false, 701 | _ => panic!(), 702 | } 703 | }); 704 | 705 | let mut sum = 0; 706 | consumer.close(); 707 | 708 | loop { 709 | match consumer.pop() { 710 | Ok(v) => sum += v, 711 | Err(PopError::Closed) => break, 712 | Err(PopError::Empty) => {} 713 | }; 714 | thread::yield_now(); 715 | } 716 | 717 | let try_push1_success = th_try_push1.join().unwrap(); 718 | let try_push2_success = th_try_push2.join().unwrap(); 719 | match (try_push1_success, try_push2_success) { 720 | (true, true) => assert_eq!(sum, 7 + 13), 721 | (true, false) => assert_eq!(sum, 7), 722 | (false, true) => assert_eq!(sum, 13), 723 | (false, false) => {} 724 | } 725 | }); 726 | } 727 | } 728 | -------------------------------------------------------------------------------- /tests/general.rs: -------------------------------------------------------------------------------- 1 | //! Note: timer-based tests are disabled for MIRI. 2 | 3 | #[cfg(not(miri))] 4 | use std::future::Future; 5 | #[cfg(not(miri))] 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | #[cfg(not(miri))] 9 | use std::time::Duration; 10 | 11 | use futures_executor::block_on; 12 | #[cfg(not(miri))] 13 | use futures_task::noop_waker; 14 | #[cfg(not(miri))] 15 | use futures_util::pin_mut; 16 | use tachyonix::{channel, RecvError, SendError, TryRecvError, TrySendError}; 17 | #[cfg(not(miri))] 18 | use tachyonix::{RecvTimeoutError, SendTimeoutError}; 19 | 20 | // Sleep for the provided number of milliseconds. 21 | #[cfg(not(miri))] 22 | fn sleep(millis: u64) { 23 | thread::sleep(Duration::from_millis(millis)); 24 | } 25 | #[cfg(not(miri))] 26 | async fn async_sleep(millis: u64) -> () { 27 | futures_time::task::sleep(futures_time::time::Duration::from_millis(millis)).await; 28 | } 29 | 30 | // Poll the future once and keep it alive for the specified number of 31 | // milliseconds. 32 | #[cfg(not(miri))] 33 | fn poll_once_and_keep_alive(f: F, millis: u64) -> Poll { 34 | pin_mut!(f); 35 | let waker = noop_waker(); 36 | let mut cx = Context::from_waker(&waker); 37 | 38 | let res = f.poll(&mut cx); 39 | 40 | // Delay the drop of the original (shadowed) future. 41 | sleep(millis); 42 | 43 | res 44 | } 45 | 46 | // Basic synchronous sending/receiving functionality. 47 | #[cfg(not(miri))] 48 | #[test] 49 | fn try_send_recv() { 50 | let (s, mut r) = channel(2); 51 | 52 | let th_send = thread::spawn(move || { 53 | sleep(100); 54 | assert_eq!(s.try_send(3), Ok(())); // t = t0 + 100 55 | assert_eq!(s.try_send(7), Ok(())); // t = t0 + 100 56 | assert_eq!(s.try_send(13), Err(TrySendError::Full(13))); // t = t0 + 100 57 | sleep(200); 58 | assert_eq!(s.try_send(42), Ok(())); // t = t0 + 300 59 | }); 60 | 61 | sleep(200); 62 | assert_eq!(r.try_recv(), Ok(3)); // t = t0 + 200 63 | assert_eq!(r.try_recv(), Ok(7)); // t = t0 + 200 64 | assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); // t = t0 + 200 65 | sleep(200); 66 | assert_eq!(r.try_recv(), Ok(42)); // t = t0 + 400 67 | assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); // t = t0 + 400 68 | 69 | th_send.join().unwrap(); 70 | } 71 | 72 | // Basic asynchronous sending functionality. 73 | #[cfg(not(miri))] 74 | #[test] 75 | fn async_send() { 76 | let (s, mut r) = channel(2); 77 | 78 | let th_send = thread::spawn(move || { 79 | block_on(s.send(3)).unwrap(); 80 | block_on(s.send(7)).unwrap(); 81 | block_on(s.send(13)).unwrap(); // blocked until t0 + 300 82 | sleep(200); 83 | block_on(s.send(42)).unwrap(); // t = t0 + 500 84 | }); 85 | 86 | sleep(300); 87 | assert_eq!(r.try_recv(), Ok(3)); // t = t0 + 300 88 | assert_eq!(r.try_recv(), Ok(7)); // t = t0 + 300 89 | sleep(100); 90 | assert_eq!(r.try_recv(), Ok(13)); // t = t0 + 400 91 | sleep(200); 92 | assert_eq!(r.try_recv(), Ok(42)); // t = t0 + 600 93 | 94 | th_send.join().unwrap(); 95 | } 96 | 97 | // Asynchronous sending with timeout. 98 | #[cfg(not(miri))] 99 | #[test] 100 | fn async_send_timeout() { 101 | let (s, mut r) = channel(2); 102 | 103 | let th_send = thread::spawn(move || { 104 | block_on(async { 105 | s.send(1).await.unwrap(); // t = t0 106 | s.send(2).await.unwrap(); // t = t0 107 | s.send_timeout(3, async_sleep(200)).await.unwrap(); // blocked from t = t0 to t0 + 100 108 | assert_eq!( 109 | s.send_timeout(4, async_sleep(100)).await, // blocked from t = t0 + 100 to y0 + 200 110 | Err(SendTimeoutError::Timeout(4)) 111 | ); 112 | sleep(200); 113 | assert_eq!( 114 | s.send_timeout(5, async_sleep(200)).await, // t = t0 + 400 115 | Err(SendTimeoutError::Closed(5)) 116 | ); 117 | }) 118 | }); 119 | 120 | sleep(100); 121 | assert_eq!(r.try_recv(), Ok(1)); // t = t0 + 100 122 | sleep(200); 123 | assert_eq!(r.try_recv(), Ok(2)); // t = t0 + 300 124 | assert_eq!(r.try_recv(), Ok(3)); // t = t0 + 300 125 | assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); // t = t0 + 300 126 | drop(r); 127 | 128 | th_send.join().unwrap(); 129 | } 130 | 131 | // Basic asynchronous receiving functionality. 132 | #[cfg(not(miri))] 133 | #[test] 134 | fn async_recv() { 135 | let (s, mut r) = channel(100); 136 | 137 | let th_send = thread::spawn(move || { 138 | sleep(100); 139 | assert_eq!(s.try_send(3), Ok(())); // t = t0 + 100 140 | assert_eq!(s.try_send(7), Ok(())); // t = t0 + 100 141 | assert_eq!(s.try_send(42), Ok(())); // t = t0 + 100 142 | sleep(100); 143 | }); 144 | 145 | assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); // t = t0 146 | assert_eq!(block_on(r.recv()), Ok(3)); // blocked from t0 to t0 + 100 147 | assert_eq!(block_on(r.recv()), Ok(7)); // t = t0 + 100 148 | assert_eq!(block_on(r.recv()), Ok(42)); // t = t0 + 100 149 | assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); // t = t0 + 100 150 | 151 | th_send.join().unwrap(); 152 | } 153 | 154 | // Asynchronous receiving with timeout. 155 | #[cfg(not(miri))] 156 | #[test] 157 | fn async_recv_timeout() { 158 | let (s, mut r) = channel(100); 159 | 160 | let th_send = thread::spawn(move || { 161 | s.try_send(1).unwrap(); // t = t0 162 | sleep(200); 163 | s.try_send(2).unwrap(); // t = t0 + 200 164 | sleep(300); 165 | s.try_send(3).unwrap(); // t = t0 + 500 166 | }); 167 | 168 | block_on(async { 169 | sleep(100); 170 | assert_eq!(r.recv_timeout(async_sleep(200)).await, Ok(1)); // t = t0 + 100 171 | assert_eq!(r.recv_timeout(async_sleep(200)).await, Ok(2)); // blocked from t = t0 + 100 to t0 + 200 172 | assert_eq!( 173 | r.recv_timeout(async_sleep(200)).await, 174 | Err(RecvTimeoutError::Timeout) 175 | ); // blocked from t = t0 + 200 to t0 + 400 176 | sleep(200); 177 | assert_eq!(r.recv_timeout(async_sleep(200)).await, Ok(3)); // t = t0 + 600 178 | assert_eq!( 179 | r.recv_timeout(async_sleep(200)).await, 180 | Err(RecvTimeoutError::Closed) 181 | ); // t = t0 + 600 182 | }); 183 | 184 | th_send.join().unwrap(); 185 | } 186 | 187 | // Channel closed due to the receiver being dropped. 188 | #[test] 189 | fn send_after_close() { 190 | let (s, r) = channel(100); 191 | 192 | block_on(s.send(3)).unwrap(); 193 | block_on(s.send(7)).unwrap(); 194 | 195 | drop(r); 196 | 197 | assert_eq!(block_on(s.send(13)), Err(SendError(13))); 198 | assert_eq!(s.try_send(42), Err(TrySendError::Closed(42))); 199 | } 200 | 201 | // Channel closed due to the receiver being dropped while a sender is blocked on 202 | // a full channel. 203 | #[cfg(not(miri))] 204 | #[test] 205 | fn blocked_send_after_close() { 206 | let (s1, r) = channel(2); 207 | let s2 = s1.clone(); 208 | 209 | block_on(s1.send(3)).unwrap(); 210 | block_on(s1.send(7)).unwrap(); 211 | 212 | let th_send1 = thread::spawn(move || { 213 | assert_eq!(block_on(s1.send(13)), Err(SendError(13))); // blocked from t0 to t0 + 100 214 | }); 215 | let th_send2 = thread::spawn(move || { 216 | assert_eq!(block_on(s2.send(42)), Err(SendError(42))); // blocked from t0 to t0 + 100 217 | }); 218 | 219 | sleep(100); 220 | drop(r); // t = t0 + 100 221 | 222 | th_send1.join().unwrap(); 223 | th_send2.join().unwrap(); 224 | } 225 | 226 | // Channel closed due to the senders being dropped. 227 | #[test] 228 | fn recv_after_close() { 229 | let (s1, mut r) = channel(100); 230 | let s2 = s1.clone(); 231 | 232 | block_on(s1.send(3)).unwrap(); 233 | block_on(s1.send(7)).unwrap(); 234 | block_on(s2.send(13)).unwrap(); 235 | 236 | drop(s1); 237 | drop(s2); 238 | 239 | assert_eq!(block_on(r.recv()), Ok(3)); 240 | assert_eq!(block_on(r.recv()), Ok(7)); 241 | assert_eq!(block_on(r.recv()), Ok(13)); 242 | assert_eq!(block_on(r.recv()), Err(RecvError)); 243 | assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); 244 | } 245 | 246 | // Channel closed due to the senders being dropped while the receiver is blocked 247 | // on an empty channel. 248 | #[cfg(not(miri))] 249 | #[test] 250 | fn blocked_recv_after_close() { 251 | let (s1, mut r) = channel(100); 252 | let s2 = s1.clone(); 253 | 254 | block_on(s1.send(3)).unwrap(); 255 | block_on(s1.send(7)).unwrap(); 256 | block_on(s2.send(13)).unwrap(); 257 | 258 | let th_recv = thread::spawn(move || { 259 | assert_eq!(block_on(r.recv()), Ok(3)); 260 | assert_eq!(block_on(r.recv()), Ok(7)); 261 | assert_eq!(block_on(r.recv()), Ok(13)); 262 | assert_eq!(block_on(r.recv()), Err(RecvError)); // blocked from t0 to t0 + 100 263 | assert_eq!(r.try_recv(), Err(TryRecvError::Closed)); 264 | }); 265 | 266 | sleep(100); 267 | drop(s1); 268 | drop(s2); 269 | 270 | th_recv.join().unwrap(); 271 | } 272 | 273 | // Block two senders on a full channel, cancel the first sending operation and 274 | // receive a message to unblock the second sender. 275 | #[cfg(not(miri))] 276 | #[test] 277 | fn cancel_async_send() { 278 | let (s1, mut r) = channel(2); 279 | let s2 = s1.clone(); 280 | 281 | // Fill the channel and block a sender, then cancel the sending operation at 282 | // t0 + 300. 283 | let th_send1 = thread::spawn(move || { 284 | block_on(s1.send(3)).unwrap(); 285 | block_on(s1.send(7)).unwrap(); 286 | assert_eq!(poll_once_and_keep_alive(s1.send(13), 300), Poll::Pending); // cancel at t0 + 300 287 | }); 288 | 289 | // Block a second sender from t0 + 100, expect it to get re-scheduled when the 290 | // sending operation of the first blocked sender is cancelled. 291 | let th_send2 = thread::spawn(move || { 292 | sleep(100); 293 | block_on(s2.send(42)).unwrap(); // blocked from t0 + 100 to t0 + 300 294 | }); 295 | 296 | // Receive a message at t0 + 200 to free one channel slot; receive the 297 | // remaining messages at t0 + 400. 298 | let th_recv = thread::spawn(move || { 299 | sleep(200); 300 | assert_eq!(block_on(r.recv()), Ok(3)); // t = t0 + 200 301 | sleep(200); 302 | assert_eq!(r.try_recv(), Ok(7)); // t = t0 + 400 303 | assert_eq!(r.try_recv(), Ok(42)); // t = t0 + 400 304 | }); 305 | 306 | th_send1.join().unwrap(); 307 | th_send2.join().unwrap(); 308 | th_recv.join().unwrap(); 309 | } 310 | 311 | // Block two senders on a full channel, stop polling the first sender and 312 | // receive two messages to unblock the second sender. 313 | #[cfg(not(miri))] 314 | #[test] 315 | fn forget_async_send() { 316 | let (s1, mut r) = channel(2); 317 | let s2 = s1.clone(); 318 | 319 | // Fill the channel and block a sender, then stop polling it for a long 320 | // time. 321 | let th_send1 = thread::spawn(move || { 322 | block_on(s1.send(3)).unwrap(); 323 | block_on(s1.send(7)).unwrap(); 324 | assert_eq!(poll_once_and_keep_alive(s1.send(13), 500), Poll::Pending); 325 | }); 326 | 327 | // Block a second sender from t0 + 100, expect it to get re-scheduled when the 328 | // second message is received. 329 | let th_send2 = thread::spawn(move || { 330 | sleep(100); 331 | block_on(s2.send(42)).unwrap(); // blocked from t0 + 100 to t0 + 200 332 | }); 333 | 334 | // Receive two message at t0 + 200 to free both channel slots; receive one 335 | // more message at t0 + 300 to check that the second sender got 336 | // re-scheduled. 337 | let th_recv = thread::spawn(move || { 338 | sleep(200); 339 | assert_eq!(block_on(r.recv()), Ok(3)); // t = t0 + 200 340 | assert_eq!(block_on(r.recv()), Ok(7)); // t = t0 + 200 341 | sleep(100); 342 | assert_eq!(r.try_recv(), Ok(42)); // t = t0 + 300 343 | }); 344 | 345 | th_send1.join().unwrap(); 346 | th_send2.join().unwrap(); 347 | th_recv.join().unwrap(); 348 | } 349 | 350 | // SPSC stress test. 351 | #[test] 352 | fn spsc_stress() { 353 | const CAPACITY: usize = 3; 354 | const COUNT: usize = if cfg!(miri) { 50 } else { 1_000_000 }; 355 | 356 | let (s, mut r) = channel(CAPACITY); 357 | 358 | let th_send = thread::spawn(move || { 359 | block_on(async { 360 | for i in 0..COUNT { 361 | s.send(i).await.unwrap(); 362 | } 363 | }); 364 | }); 365 | let th_recv = thread::spawn(move || { 366 | block_on(async { 367 | for i in 0..COUNT { 368 | assert_eq!(r.recv().await, Ok(i)); 369 | } 370 | }); 371 | 372 | assert!(r.try_recv().is_err()); 373 | }); 374 | 375 | th_send.join().unwrap(); 376 | th_recv.join().unwrap(); 377 | } 378 | 379 | // MPSC stress test. 380 | #[test] 381 | fn mpsc_stress() { 382 | const CAPACITY: usize = 3; 383 | const COUNT: usize = if cfg!(miri) { 50 } else { 1_000_000 }; 384 | const THREADS: usize = 4; 385 | 386 | let (s, mut r) = channel(CAPACITY); 387 | 388 | let th_send = (0..THREADS).map(|_| { 389 | let s = s.clone(); 390 | 391 | thread::spawn(move || { 392 | block_on(async { 393 | for i in 0..COUNT { 394 | s.send(i).await.unwrap(); 395 | } 396 | }); 397 | }) 398 | }); 399 | let th_recv = thread::spawn(move || { 400 | let mut stats = Vec::new(); 401 | stats.resize(COUNT, 0); 402 | 403 | block_on(async { 404 | for _ in 0..COUNT * THREADS { 405 | let i = r.recv().await.unwrap(); 406 | stats[i] += 1; 407 | } 408 | }); 409 | 410 | assert!(r.try_recv().is_err()); 411 | 412 | for s in stats { 413 | assert_eq!(s, THREADS); 414 | } 415 | }); 416 | 417 | for th in th_send { 418 | th.join().unwrap() 419 | } 420 | th_recv.join().unwrap(); 421 | } 422 | -------------------------------------------------------------------------------- /tests/may_leak.rs: -------------------------------------------------------------------------------- 1 | use std::mem::{self, ManuallyDrop}; 2 | use std::pin::Pin; 3 | 4 | use futures_executor::block_on; 5 | use futures_util::poll; 6 | 7 | use tachyonix::channel; 8 | 9 | // Forget a send future, then drop the sender. 10 | // 11 | // Mainly meant for MIRI. See . 12 | #[test] 13 | fn forget_send_future_drop_sender() { 14 | let (s, mut r) = channel(1); 15 | 16 | // Boxing so that MIRI can identify invalidated memory access via use-after-free. 17 | let s = Box::new(s); 18 | 19 | s.try_send(13).unwrap(); 20 | 21 | let mut s_fut = ManuallyDrop::new(s.send(42)); 22 | let mut s_fut = unsafe { Pin::new_unchecked(&mut *s_fut) }; // safe: the unpinned future is shadowed. 23 | assert!(block_on(async { poll!(s_fut.as_mut()) }).is_pending()); 24 | 25 | std::mem::forget(s_fut); 26 | 27 | drop(s); 28 | 29 | assert!(r.try_recv().is_ok()); 30 | } 31 | 32 | // Forget a send future, then forget the sender. 33 | // 34 | // Mainly meant for MIRI. See . 35 | #[test] 36 | fn forget_send_future_forget_sender() { 37 | let (s, mut r) = channel(1); 38 | 39 | let mut s = ManuallyDrop::new(s); 40 | s.try_send(13).unwrap(); 41 | 42 | let mut s_fut = ManuallyDrop::new(s.send(42)); 43 | let mut s_fut = unsafe { Pin::new_unchecked(&mut *s_fut) }; // safe: the unpinned future is shadowed. 44 | assert!(block_on(async { poll!(s_fut.as_mut()) }).is_pending()); 45 | 46 | mem::forget(s_fut); 47 | 48 | // Forget the sender and move in a new sender. 49 | s = ManuallyDrop::new(tachyonix::channel(1).0); 50 | let _ = s; 51 | 52 | assert!(r.try_recv().is_ok()); 53 | } 54 | 55 | // Forget a send future, then reuse the sender. 56 | // 57 | // See . 58 | #[test] 59 | fn forget_send_future_reuse_sender() { 60 | let (s, mut r) = channel(1); 61 | 62 | let s = ManuallyDrop::new(s); 63 | s.try_send(7).unwrap(); 64 | 65 | let mut s_fut1 = ManuallyDrop::new(s.send(13)); 66 | let mut s_fut1 = unsafe { Pin::new_unchecked(&mut *s_fut1) }; // safe: the unpinned future is shadowed. 67 | assert!(block_on(async { poll!(s_fut1.as_mut()) }).is_pending()); 68 | 69 | mem::forget(s_fut1); 70 | 71 | assert!(block_on(async { poll!(Box::pin(s.send(42))) }).is_pending()); 72 | 73 | assert!(r.try_recv().is_ok()); 74 | } 75 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | // Temporary workaround until the `async_event_loom` flag can be whitelisted 2 | // without a `build.rs` [1]. 3 | // 4 | // [1]: (https://github.com/rust-lang/rust/issues/124800). 5 | #![allow(unexpected_cfgs)] 6 | 7 | /// Non-Loom tests that may not leak memory; on MIRI, enabled only if 8 | /// `tachyonix_ignore_leaks` is not configured. 9 | #[cfg(all(not(tachyonix_loom), any(not(miri), not(tachyonix_ignore_leaks))))] 10 | mod general; 11 | /// Non-Loom tests that may leak memory; on MIRI, enabled only if 12 | /// `tachyonix_ignore_leaks` is configured. 13 | #[cfg(all(not(tachyonix_loom), any(not(miri), tachyonix_ignore_leaks)))] 14 | mod may_leak; 15 | --------------------------------------------------------------------------------