├── .github └── workflows │ ├── checks.yml │ ├── linux.yml │ └── osx.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── async.rs ├── common.rs └── sync.rs ├── src ├── backoff.rs ├── error.rs ├── future.rs ├── internal.rs ├── lib.rs ├── mutex.rs ├── pointer.rs └── signal.rs └── tests ├── async_test.rs ├── sync_test.rs └── utils └── mod.rs /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: Checks 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: [cron: "40 1 * * *"] 7 | 8 | jobs: 9 | clippy: 10 | name: Clippy 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions-rs/toolchain@v1 15 | with: 16 | toolchain: nightly 17 | components: clippy 18 | override: true 19 | - uses: actions-rs/clippy-check@v1 20 | with: 21 | token: ${{ secrets.GITHUB_TOKEN }} 22 | args: --all-features 23 | 24 | fmt: 25 | name: Rustfmt 26 | runs-on: ubuntu-latest 27 | strategy: 28 | matrix: 29 | rust: 30 | - stable 31 | steps: 32 | - uses: actions/checkout@v3 33 | - uses: actions-rs/toolchain@v1 34 | with: 35 | profile: minimal 36 | toolchain: ${{ matrix.rust }} 37 | override: true 38 | - uses: actions-rs/cargo@v1 39 | with: 40 | command: fmt 41 | args: --all -- --check 42 | -------------------------------------------------------------------------------- /.github/workflows/linux.yml: -------------------------------------------------------------------------------- 1 | name: CI (Linux) 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: [cron: "40 1 * * *"] 7 | 8 | jobs: 9 | build_and_test: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | version: 14 | # - 1.57.0 # MSRV 15 | - stable 16 | - nightly 17 | 18 | name: ${{ matrix.version }} - x86_64-unknown-linux-gnu 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - uses: actions/checkout@master 23 | 24 | - name: Install ${{ matrix.version }} 25 | uses: actions-rs/toolchain@v1 26 | with: 27 | toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu 28 | profile: minimal 29 | components: rustfmt 30 | override: true 31 | 32 | - name: Generate Cargo.lock 33 | uses: actions-rs/cargo@v1 34 | with: 35 | command: generate-lockfile 36 | 37 | - name: Cache cargo registry 38 | uses: actions/cache@v3 39 | with: 40 | path: ~/.cargo/registry 41 | key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-registry-trimmed-${{ hashFiles('**/Cargo.lock') }} 42 | 43 | - name: Cache cargo index 44 | uses: actions/cache@v3 45 | with: 46 | path: ~/.cargo/git 47 | key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} 48 | 49 | - name: Run tests 50 | uses: actions-rs/cargo@v1 51 | timeout-minutes: 40 52 | with: 53 | command: test 54 | args: --all-features -- --nocapture 55 | 56 | - name: Install cargo-cache 57 | continue-on-error: true 58 | run: | 59 | cargo install cargo-cache --no-default-features --features ci-autoclean 60 | 61 | - name: Clear the cargo caches 62 | run: | 63 | cargo-cache 64 | -------------------------------------------------------------------------------- /.github/workflows/osx.yml: -------------------------------------------------------------------------------- 1 | name: CI (OSX) 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: [cron: "40 1 * * *"] 7 | 8 | jobs: 9 | build_and_test: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | version: 14 | - stable 15 | - nightly 16 | 17 | name: ${{ matrix.version }} - aarch64-apple-darwin 18 | runs-on: macOS-latest 19 | 20 | steps: 21 | - uses: actions/checkout@master 22 | - name: Install ${{ matrix.version }} 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: ${{ matrix.version }}-aarch64-apple-darwin 26 | profile: minimal 27 | components: rustfmt 28 | override: true 29 | 30 | - name: Generate Cargo.lock 31 | uses: actions-rs/cargo@v1 32 | with: 33 | command: generate-lockfile 34 | 35 | - name: Cache cargo registry 36 | uses: actions/cache@v3 37 | with: 38 | path: ~/.cargo/registry 39 | key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-registry-trimmed-${{ hashFiles('**/Cargo.lock') }} 40 | 41 | - name: Cache cargo index 42 | uses: actions/cache@v3 43 | with: 44 | path: ~/.cargo/git 45 | key: ${{ matrix.version }}-aarch64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }} 46 | 47 | - name: Run tests 48 | uses: actions-rs/cargo@v1 49 | with: 50 | command: test 51 | args: --all-features 52 | 53 | - name: Clear the cargo caches 54 | run: | 55 | cargo install cargo-cache --no-default-features --features ci-autoclean 56 | cargo-cache 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /dev 3 | Cargo.lock 4 | rustfmt.toml -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kanal" 3 | version = "0.1.1" 4 | edition = "2021" 5 | authors = ["Khashayar Fereidani"] 6 | description = "The fast sync and async channel that Rust deserves" 7 | repository = "https://github.com/fereidani/kanal" 8 | documentation = "https://docs.rs/kanal" 9 | keywords = ["channel", "mpsc", "mpmc", "async"] 10 | categories = ["concurrency", "data-structures", "asynchronous"] 11 | license = "MIT" 12 | readme = "README.md" 13 | 14 | [dependencies] 15 | cacheguard = "0.1" 16 | futures-core = { version = "0.3", optional = true } 17 | lock_api = "0.4" 18 | 19 | [dev-dependencies] 20 | anyhow = "1.0" 21 | criterion = "0.4" 22 | crossbeam = "0.8" 23 | tokio = { version = "1", features = ["rt-multi-thread", "test-util", "macros"] } 24 | futures = "0.3" 25 | 26 | [features] 27 | async = ["futures-core"] 28 | std-mutex = [] 29 | default = ["async"] 30 | 31 | [[bench]] 32 | name = "sync" 33 | harness = false 34 | 35 | [[bench]] 36 | name = "async" 37 | harness = false 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022-2023 Khashayar Fereidani and other Kanal contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kanal 2 | 3 | **The fast sync and async channel that Rust deserves!** 4 | 5 | [![Crates.io][crates-badge]][crates-url] 6 | [![Documentation][doc-badge]][doc-url] 7 | [![MIT licensed][mit-badge]][mit-url] 8 | 9 | [crates-badge]: https://img.shields.io/crates/v/kanal.svg?style=for-the-badge 10 | [crates-url]: https://crates.io/crates/kanal 11 | [mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg?style=for-the-badge 12 | [mit-url]: https://github.com/fereidani/kanal/blob/master/LICENSE 13 | [doc-badge]: https://img.shields.io/docsrs/kanal?style=for-the-badge 14 | [doc-url]: https://docs.rs/kanal 15 | 16 | ## What is Kanal 17 | 18 | The Kanal library is a Rust implementation of channels inspired by the CSP (Communicating Sequential Processes) model. It aims to help programmers create efficient concurrent programs by providing multi-producer and multi-consumer channels with advanced features for fast communication. The library focuses on unifying message passing between synchronous and asynchronous parts of Rust code, offering a combination of synchronous and asynchronous APIs while maintaining high performance. 19 | 20 | ## Why Kanal is faster? 21 | 22 | 1. Kanal employs a highly optimized composite technique for the transfer of objects. When the data size is less than or equal to the pointer size, it utilizes serialization, encoding the data as the pointer address. Conversely, when the data size exceeds the pointer size, the protocol employs a strategy similar to that utilized by the Golang programming language, utilizing direct memory access to copy objects from the sender's stack or write directly to the receiver's stack. This composite method not only eliminates unnecessary pointer access but also eliminates heap allocations for bounded(0) channels. 23 | 2. Kanal utilizes a specially tuned mutex for its channel locking mechanism, made possible by the predictable internal lock time of the channel. That said it's possible to use Rust standard mutex with the `std-mutex` feature and Kanal will perform better than competitors with that feature too. 24 | 3. Utilizing Rust high-performance compiler and powerful LLVM backend with highly optimized memory access and deeply thought algorithms. 25 | 26 | ## Usage 27 | 28 | To use Kanal in your Rust project, add the following line to your `Cargo.toml` file: 29 | 30 | ```toml 31 | [dependencies] 32 | kanal = "0.1" 33 | ``` 34 | 35 | Sync channel example: 36 | 37 | ```rust,ignore 38 | // Initialize a bounded sync channel with a capacity for 8 messages 39 | let (sender, receiver) = kanal::bounded(8); 40 | 41 | let s = sender.clone(); 42 | std::thread::spawn(move || { 43 | s.send("hello")?; 44 | anyhow::Ok(()) 45 | }); 46 | 47 | // Receive an example message in another thread 48 | let msg = receiver.recv()?; 49 | println!("I got msg: {}", msg); 50 | 51 | 52 | // Convert and use channel in async context to communicate between sync and async 53 | tokio::spawn(async move { 54 | // Borrow the channel as an async channel and use it in an async context ( or convert it to async using to_async() ) 55 | sender.as_async().send("hello").await?; 56 | anyhow::Ok(()) 57 | }); 58 | ``` 59 | 60 | Async channel example: 61 | 62 | ```rust,ignore 63 | // Initialize a bounded channel with a capacity for 8 messages 64 | let (sender, receiver) = kanal::bounded_async(8); 65 | 66 | sender.send("hello").await?; 67 | sender.send("hello").await?; 68 | 69 | // Clone receiver and convert it to a sync receiver 70 | let receiver_sync = receiver.clone().to_sync(); 71 | 72 | tokio::spawn(async move { 73 | let msg = receiver.recv().await?; 74 | println!("I got msg: {}", msg); 75 | anyhow::Ok(()) 76 | }); 77 | 78 | // Spawn a thread and use receiver in sync context 79 | std::thread::spawn(move || { 80 | let msg = receiver_sync.recv()?; 81 | println!("I got msg in sync context: {}", msg); 82 | anyhow::Ok(()) 83 | }); 84 | ``` 85 | 86 | ## Why use Kanal? 87 | 88 | - Kanal offers fast and efficient communication capabilities. 89 | - Kanal simplifies communication in and between synchronous and asynchronous contexts, thanks to its flexible API like `as_sync` and `as_async`. 90 | - Kanal provides a clean and intuitive API, making it easier to work with compared to other Rust libraries. 91 | - Similar to Golang, Kanal allows you to close channels using the `Close` function, enabling you to broadcast a close signal from any channel instance and close the channel for both senders and receivers. 92 | - Kanal includes high-performance MPMC (Multiple Producers Multiple Consumers) and SPSC (Single Producer Single Consumer) channels in a single package. 93 | 94 | ### Benchmark Results 95 | 96 | Results are based on how many messages can be passed in each scenario per second. 97 | 98 | #### Test types: 99 | 100 | 1. Seq is sequentially writing and reading to a channel in the same thread. 101 | 2. SPSC is one receiver, and one sender and passing messages between them. 102 | 3. MPSC is multiple sender threads with only one receiver. 103 | 4. MPMC is multiple senders and multiple receivers communicating through the same channel. 104 | 105 | #### Message types: 106 | 107 | 1. `usize` tests are transferring messages of size hardware pointer. 108 | 2. `big` tests are transferring messages of 8x the size of the hardware pointer. 109 | 110 | N/A means that the test subject is unable to perform the test due to its limitations, Some of the test subjects don't have implementation for size 0 channels, MPMC or unbounded channels. 111 | 112 | Machine: `AMD Ryzen 9 9950X 16-Core Processor`
113 | Rust: `rustc 1.85.1 (4eb161250 2025-03-15)`
114 | Go: `go version go1.24.1 linux/amd64`
115 | OS (`uname -a`): `Linux 6.11.0-19-generic #19~24.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon Feb 17 11:51:52 UTC 2 x86_64`
116 | Date: Mar 19, 2025 117 | 118 | [Benchmark codes](https://github.com/fereidani/rust-channel-benchmarks) 119 | 120 | ![Benchmarks](https://i.imgur.com/VPwyam0.png) 121 | 122 | #### Why does async outperform sync in some tests? 123 | 124 | In certain tests, asynchronous communication may exhibit superior performance compared to synchronous communication. This can be attributed to the context-switching performance of libraries such as tokio, which, similar to Golang, utilize context-switching within the same thread to switch to the next coroutine when a message is ready on a channel. This approach is more efficient than communicating between separate threads. This same principle applies to asynchronous network applications, which generally exhibit better performance compared to synchronous implementations. As the channel size increases, one may observe improved performance in synchronous benchmarks, as the sending threads are able to push data directly to the channel queue without requiring awaiting blocking/suspending signals from receiving threads. 125 | -------------------------------------------------------------------------------- /benches/async.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | pub use common::*; 4 | use criterion::*; 5 | use std::{thread::available_parallelism, time::Duration}; 6 | 7 | macro_rules! run_bench { 8 | ($b:expr, $tx:expr, $rx:expr, $writers:expr, $readers:expr) => {{ 9 | let rt = tokio::runtime::Builder::new_multi_thread() 10 | .worker_threads(usize::from(available_parallelism().unwrap())) 11 | .enable_all() 12 | .build() 13 | .unwrap(); 14 | let readers_dist = evenly_distribute(BENCH_MSG_COUNT, $readers); 15 | let writers_dist = evenly_distribute(BENCH_MSG_COUNT, $writers); 16 | $b.iter(|| { 17 | let mut handles = Vec::with_capacity($readers + $writers); 18 | for d in 0..$readers { 19 | let rx = $rx.clone(); 20 | let iterations = readers_dist[d]; 21 | handles.push(rt.spawn(async move { 22 | for _ in 0..iterations { 23 | check_value(black_box(rx.recv().await.unwrap())); 24 | } 25 | })); 26 | } 27 | for d in 0..$writers { 28 | let tx = $tx.clone(); 29 | let iterations = writers_dist[d]; 30 | handles.push(rt.spawn(async move { 31 | for i in 0..iterations { 32 | tx.send(i + 1).await.unwrap(); 33 | } 34 | })); 35 | } 36 | for handle in handles { 37 | rt.block_on(handle).unwrap(); 38 | } 39 | }); 40 | }}; 41 | } 42 | 43 | fn mpmc(c: &mut Criterion) { 44 | let mut g = c.benchmark_group("async::mpmc"); 45 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 46 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 47 | g.bench_function("b0", |b| { 48 | let (tx, rx) = kanal::bounded_async::(0); 49 | let core_count = usize::from(available_parallelism().unwrap()); 50 | run_bench!(b, tx, rx, core_count, core_count); 51 | }); 52 | g.bench_function("b0_contended", |b| { 53 | let (tx, rx) = kanal::bounded_async::(0); 54 | let core_count = usize::from(available_parallelism().unwrap()); 55 | run_bench!(b, tx, rx, core_count * 64, core_count * 64); 56 | }); 57 | g.bench_function("b1", |b| { 58 | let (tx, rx) = kanal::bounded_async::(1); 59 | let core_count = usize::from(available_parallelism().unwrap()); 60 | run_bench!(b, tx, rx, core_count, core_count); 61 | }); 62 | g.bench_function("bn", |b| { 63 | let (tx, rx) = kanal::unbounded_async(); 64 | let core_count = usize::from(available_parallelism().unwrap()); 65 | run_bench!(b, tx, rx, core_count, core_count); 66 | }); 67 | g.finish(); 68 | } 69 | 70 | fn mpsc(c: &mut Criterion) { 71 | let mut g = c.benchmark_group("async::mpsc"); 72 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 73 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 74 | g.bench_function("b0", |b| { 75 | let (tx, rx) = kanal::bounded_async::(0); 76 | let core_count = usize::from(available_parallelism().unwrap()); 77 | run_bench!(b, tx, rx, core_count, 1); 78 | }); 79 | g.bench_function("b0_contended", |b| { 80 | let (tx, rx) = kanal::bounded_async::(0); 81 | let core_count = usize::from(available_parallelism().unwrap()); 82 | run_bench!(b, tx, rx, core_count * 64, 1); 83 | }); 84 | g.bench_function("b1", |b| { 85 | let (tx, rx) = kanal::bounded_async::(1); 86 | let core_count = usize::from(available_parallelism().unwrap()); 87 | run_bench!(b, tx, rx, core_count, 1); 88 | }); 89 | g.bench_function("bn", |b| { 90 | let (tx, rx) = kanal::unbounded_async(); 91 | let core_count = usize::from(available_parallelism().unwrap()); 92 | run_bench!(b, tx, rx, core_count, 1); 93 | }); 94 | g.finish(); 95 | } 96 | 97 | fn spsc(c: &mut Criterion) { 98 | let mut g = c.benchmark_group("async::spsc"); 99 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 100 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 101 | g.bench_function("b0", |b| { 102 | let (tx, rx) = kanal::bounded_async::(0); 103 | run_bench!(b, tx, rx, 1, 1); 104 | }); 105 | g.bench_function("b1", |b| { 106 | let (tx, rx) = kanal::bounded_async::(1); 107 | run_bench!(b, tx, rx, 1, 1); 108 | }); 109 | g.finish(); 110 | } 111 | criterion_group!(async_bench, mpmc, mpsc, spsc); 112 | criterion_main!(async_bench); 113 | -------------------------------------------------------------------------------- /benches/common.rs: -------------------------------------------------------------------------------- 1 | pub const BENCH_MSG_COUNT: usize = 1 << 20; 2 | 3 | pub fn check_value(value: usize) { 4 | if value == 0 { 5 | println!("Value should not be zero"); 6 | } 7 | } 8 | 9 | pub fn evenly_distribute(total: usize, parts: usize) -> Vec { 10 | if parts == 0 { 11 | return Vec::new(); 12 | } 13 | 14 | let base_value = total / parts; 15 | let remainder = total % parts; 16 | 17 | (0..parts) 18 | .map(|i| base_value + if i < remainder { 1 } else { 0 }) 19 | .collect() 20 | } 21 | -------------------------------------------------------------------------------- /benches/sync.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | pub use common::*; 4 | use criterion::*; 5 | use std::{thread::available_parallelism, time::Duration}; 6 | 7 | macro_rules! run_bench { 8 | ($b:expr, $tx:expr, $rx:expr, $writers:expr, $readers:expr) => { 9 | use std::thread::spawn; 10 | let readers_dist = evenly_distribute(BENCH_MSG_COUNT, $readers); 11 | let writers_dist = evenly_distribute(BENCH_MSG_COUNT, $writers); 12 | $b.iter(|| { 13 | let mut handles = Vec::with_capacity($readers + $writers); 14 | for d in 0..$readers { 15 | let rx = $rx.clone(); 16 | let iterations = readers_dist[d]; 17 | handles.push(spawn(move || { 18 | for _ in 0..iterations { 19 | check_value(black_box(rx.recv().unwrap())); 20 | } 21 | })); 22 | } 23 | for d in 0..$writers { 24 | let tx = $tx.clone(); 25 | let iterations = writers_dist[d]; 26 | handles.push(spawn(move || { 27 | for i in 0..iterations { 28 | tx.send(i + 1).unwrap(); 29 | } 30 | })); 31 | } 32 | for handle in handles { 33 | handle.join().unwrap(); 34 | } 35 | }) 36 | }; 37 | } 38 | 39 | fn mpmc(c: &mut Criterion) { 40 | let mut g = c.benchmark_group("sync::mpmc"); 41 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 42 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 43 | g.bench_function("b0", |b| { 44 | let (tx, rx) = kanal::bounded::(0); 45 | let core_count = usize::from(available_parallelism().unwrap()); 46 | run_bench!(b, tx, rx, core_count, core_count); 47 | }); 48 | g.bench_function("b0_contended", |b| { 49 | let (tx, rx) = kanal::bounded::(0); 50 | let core_count = usize::from(available_parallelism().unwrap()); 51 | run_bench!(b, tx, rx, core_count * 64, core_count * 64); 52 | }); 53 | g.bench_function("b1", |b| { 54 | let (tx, rx) = kanal::bounded::(1); 55 | let core_count = usize::from(available_parallelism().unwrap()); 56 | run_bench!(b, tx, rx, core_count, core_count); 57 | }); 58 | g.bench_function("bn", |b| { 59 | let (tx, rx) = kanal::unbounded(); 60 | let core_count = usize::from(available_parallelism().unwrap()); 61 | run_bench!(b, tx, rx, core_count, core_count); 62 | }); 63 | g.finish(); 64 | } 65 | 66 | fn mpsc(c: &mut Criterion) { 67 | let mut g = c.benchmark_group("sync::mpsc"); 68 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 69 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 70 | g.bench_function("b0", |b| { 71 | let (tx, rx) = kanal::bounded::(0); 72 | let core_count = usize::from(available_parallelism().unwrap()); 73 | run_bench!(b, tx, rx, core_count, 1); 74 | }); 75 | g.bench_function("b0_contended", |b| { 76 | let (tx, rx) = kanal::bounded::(0); 77 | let core_count = usize::from(available_parallelism().unwrap()); 78 | run_bench!(b, tx, rx, core_count * 64, 1); 79 | }); 80 | g.bench_function("b1", |b| { 81 | let (tx, rx) = kanal::bounded::(1); 82 | let core_count = usize::from(available_parallelism().unwrap()); 83 | run_bench!(b, tx, rx, core_count, 1); 84 | }); 85 | g.bench_function("bn", |b| { 86 | let (tx, rx) = kanal::unbounded(); 87 | let core_count = usize::from(available_parallelism().unwrap()); 88 | run_bench!(b, tx, rx, core_count, 1); 89 | }); 90 | g.finish(); 91 | } 92 | 93 | fn spsc(c: &mut Criterion) { 94 | let mut g = c.benchmark_group("sync::spsc"); 95 | g.throughput(Throughput::Elements(BENCH_MSG_COUNT as u64)); 96 | g.sample_size(10).warm_up_time(Duration::from_secs(1)); 97 | g.bench_function("b0", |b| { 98 | let (tx, rx) = kanal::bounded::(0); 99 | run_bench!(b, tx, rx, 1, 1); 100 | }); 101 | g.bench_function("b1", |b| { 102 | let (tx, rx) = kanal::bounded::(1); 103 | run_bench!(b, tx, rx, 1, 1); 104 | }); 105 | g.finish(); 106 | } 107 | criterion_group!(sync_bench, mpmc, mpsc, spsc); 108 | criterion_main!(sync_bench); 109 | -------------------------------------------------------------------------------- /src/backoff.rs: -------------------------------------------------------------------------------- 1 | /// This module provides various backoff strategies that can be used to reduce 2 | /// the amount of busy waiting and improve the efficiency of concurrent systems. 3 | /// 4 | /// The main idea behind separating backoff into an independent module is that 5 | /// it makes it easier to test and compare different backoff solutions. 6 | use core::{ 7 | num::NonZeroUsize, 8 | sync::atomic::{AtomicU32, AtomicU8, AtomicUsize, Ordering}, 9 | time::Duration, 10 | }; 11 | 12 | use std::thread::available_parallelism; 13 | 14 | /// Puts the current thread to sleep for a specified duration. 15 | #[inline(always)] 16 | pub fn sleep(dur: Duration) { 17 | std::thread::sleep(dur) 18 | } 19 | 20 | /// Emits a CPU instruction that signals the processor that it is in a spin 21 | /// loop. 22 | #[allow(dead_code)] 23 | #[inline(always)] 24 | pub fn spin_hint() { 25 | std::hint::spin_loop() 26 | } 27 | 28 | /// Yields the thread to the scheduler. 29 | #[allow(dead_code)] 30 | #[inline(always)] 31 | pub fn yield_os() { 32 | // On Unix systems, this function uses libc's sched_yield(), which cooperatively 33 | // gives up a random timeslice to another thread. On Windows systems, it 34 | // uses SwitchToThread(), which does the same thing. 35 | std::thread::yield_now(); 36 | } 37 | 38 | /// Spins in a loop for a finite amount of time. 39 | #[allow(dead_code)] 40 | #[inline(always)] 41 | pub fn spin_wait(count: usize) { 42 | for _ in 0..count { 43 | spin_hint(); 44 | } 45 | } 46 | 47 | /// Yields the thread to the scheduler for a short random duration. 48 | /// This function is implemented using a simple 7-bit pseudo random number 49 | /// generator based on an atomic fetch-and-add operation. 50 | #[allow(dead_code)] 51 | #[inline(always)] 52 | pub fn spin_rand() { 53 | // This number will be added to the calculated pseudo-random number to avoid 54 | // short spins. 55 | const OFFSET: usize = 1 << 6; 56 | spin_wait((random_u7() as usize).wrapping_add(OFFSET)); 57 | } 58 | 59 | /// Generates a 7-bit pseudo-random number using an atomic fetch-and-add 60 | /// operation and a linear congruential generator (LCG)-like algorithm. 61 | /// This generator is only suited for the special use-case of yield_now(), and 62 | /// not recommended for use anywhere else. 63 | #[allow(dead_code)] 64 | #[inline(always)] 65 | fn random_u7() -> u8 { 66 | static SEED: AtomicU8 = AtomicU8::new(13); 67 | const MULTIPLIER: u8 = 113; 68 | // Increment the seed atomically. Relaxed ordering is enough as we only need an 69 | // atomic operation on the SEED itself. 70 | let seed = SEED.fetch_add(1, Ordering::Relaxed); 71 | // Use a LCG-like algorithm to generate a random number from the seed. 72 | seed.wrapping_mul(MULTIPLIER) & 0x7F 73 | } 74 | 75 | /// Generates a pseudo-random u32 number using an atomic fetch-and-add operation 76 | /// and a LCG-like algorithm. This function is implemented using the same 77 | /// algorithm as random_u8(). 78 | #[allow(dead_code)] 79 | #[inline(always)] 80 | fn random_u32() -> u32 { 81 | static SEED: AtomicU32 = AtomicU32::new(13); 82 | const MULTIPLIER: u32 = 1812433253; 83 | let seed = SEED.fetch_add(1, Ordering::Relaxed); 84 | seed.wrapping_mul(MULTIPLIER) 85 | } 86 | 87 | /// Randomizes the input by up to 25%. 88 | /// This function is used to introduce some randomness into backoff strategies. 89 | #[allow(dead_code)] 90 | #[inline(always)] 91 | pub fn randomize(d: usize) -> usize { 92 | d - (d >> 3) + random_u32() as usize % (d >> 2) 93 | } 94 | 95 | // Static atomic variable used to store the degree of parallelism. 96 | // Initialized to 0, meaning that the parallelism degree has not been computed 97 | // yet. 98 | static PARALLELISM: AtomicUsize = AtomicUsize::new(0); 99 | 100 | /// Retrieves the available degree of parallelism. 101 | /// If the degree of parallelism has not been computed yet, it computes and 102 | /// stores it in the PARALLELISM atomic variable. The degree of parallelism 103 | /// typically corresponds to the number of processor cores that can execute 104 | /// threads concurrently. 105 | #[inline(always)] 106 | pub fn get_parallelism() -> usize { 107 | let mut p = PARALLELISM.load(Ordering::Relaxed); 108 | // If the parallelism degree has not been computed yet. 109 | if p == 0 { 110 | // Try to get the degree of parallelism from available_parallelism. 111 | // If it is not available, default to 1. 112 | p = usize::from(available_parallelism().unwrap_or(NonZeroUsize::new(1).unwrap())); 113 | PARALLELISM.store(p, Ordering::SeqCst); 114 | } 115 | // Return the computed degree of parallelism. 116 | p 117 | } 118 | 119 | /// Spins until the specified condition becomes true. 120 | /// This function uses a combination of spinning, yielding, and sleeping to 121 | /// reduce busy waiting and improve the efficiency of concurrent systems. 122 | /// 123 | /// The function starts with a short spinning phase, followed by a longer 124 | /// spinning and yielding phase, then a longer spinning and yielding phase with 125 | /// the operating system's yield function, and finally a phase with zero-length 126 | /// sleeping and yielding. 127 | /// 128 | /// The function uses a geometric backoff strategy to increase the spin time 129 | /// between each phase. The spin time starts at 8 iterations and doubles after 130 | /// each unsuccessful iteration, up to a maximum of 2^30 iterations. 131 | /// 132 | /// The function also uses a simple randomization strategy to introduce some 133 | /// variation into the spin time. 134 | /// 135 | /// The function takes a closure that returns a boolean value indicating whether 136 | /// the condition has been met. The function returns when the condition is true. 137 | #[allow(dead_code)] 138 | #[allow(clippy::reversed_empty_ranges)] 139 | #[inline(always)] 140 | pub fn spin_cond bool>(cond: F) { 141 | if get_parallelism() == 1 { 142 | // For environments with limited resources, such as small Virtual Private 143 | // Servers (VPS) or single-core systems, active spinning may lead to inefficient 144 | // CPU usage without performance benefits. This is due to the fact that there's 145 | // only one thread of execution, making it impossible for another thread to make 146 | // progress during the spin wait period. 147 | while !cond() { 148 | yield_os(); 149 | } 150 | return; 151 | } 152 | 153 | const NO_YIELD: usize = 1; 154 | const SPIN_YIELD: usize = 1; 155 | const OS_YIELD: usize = 0; 156 | const ZERO_SLEEP: usize = 2; 157 | const SPINS: u32 = 8; 158 | let mut spins: u32 = SPINS; 159 | 160 | // Short spinning phase 161 | for _ in 0..NO_YIELD { 162 | for _ in 0..SPINS / 2 { 163 | if cond() { 164 | return; 165 | } 166 | spin_hint(); 167 | } 168 | } 169 | 170 | // Longer spinning and yielding phase 171 | loop { 172 | for _ in 0..SPIN_YIELD { 173 | spin_rand(); 174 | 175 | for _ in 0..spins { 176 | if cond() { 177 | return; 178 | } 179 | } 180 | } 181 | 182 | // Longer spinning and yielding phase with OS yield 183 | for _ in 0..OS_YIELD { 184 | yield_os(); 185 | 186 | for _ in 0..spins { 187 | if cond() { 188 | return; 189 | } 190 | } 191 | } 192 | 193 | // Phase with zero-length sleeping and yielding 194 | for _ in 0..ZERO_SLEEP { 195 | sleep(Duration::from_nanos(0)); 196 | 197 | for _ in 0..spins { 198 | if cond() { 199 | return; 200 | } 201 | } 202 | } 203 | 204 | // Geometric backoff 205 | if spins < (1 << 30) { 206 | spins <<= 1; 207 | } 208 | // Backoff about 1ms 209 | sleep(Duration::from_nanos(1 << 20)); 210 | } 211 | } 212 | 213 | macro_rules! return_if_some { 214 | ($result:expr) => {{ 215 | let result = $result; 216 | if result.is_some() { 217 | return result; 218 | } 219 | }}; 220 | } 221 | 222 | /// Computes a future timeout instant by adding a specified number of microseconds to the current time. 223 | /// 224 | /// # Parameters 225 | /// - `spin_micros`: The number of microseconds to add to the current time. 226 | /// 227 | /// # Returns 228 | /// A [`std::time::Instant`] indicating when the timeout will occur. 229 | /// 230 | /// # Panics 231 | /// This function will panic if the addition of the duration results in an overflow. 232 | #[inline(always)] 233 | #[allow(dead_code)] 234 | pub(crate) fn spin_option_yield_only( 235 | predicate: impl Fn() -> Option, 236 | spin_micros: u64, 237 | ) -> Option { 238 | // exit early if predicate is already satisfied 239 | return_if_some!(predicate()); 240 | let timeout = if let Some(timeout) = 241 | std::time::Instant::now().checked_add(Duration::from_micros(spin_micros)) 242 | { 243 | timeout 244 | } else { 245 | return None; 246 | }; 247 | 248 | loop { 249 | for _ in 0..32 { 250 | yield_os(); 251 | return_if_some!(predicate()); 252 | } 253 | if std::time::Instant::now() >= timeout { 254 | return None; 255 | } 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | use core::fmt; 3 | use core::fmt::Debug; 4 | /// Error type for channel send operations without timeout 5 | #[derive(Debug, PartialEq, Eq)] 6 | pub enum SendError { 7 | /// Indicates that the channel is closed on both sides with 8 | /// call to `close()` 9 | Closed, 10 | /// Indicates that all receiver instances are dropped and the channel is 11 | /// closed from the receive side 12 | ReceiveClosed, 13 | } 14 | impl core::error::Error for SendError {} 15 | impl fmt::Display for SendError { 16 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 17 | fmt::Display::fmt( 18 | match *self { 19 | SendError::Closed => "send to a closed channel", 20 | SendError::ReceiveClosed => "send to a half closed channel", 21 | }, 22 | f, 23 | ) 24 | } 25 | } 26 | 27 | /// Error type for channel send operations with timeout 28 | #[derive(Debug, PartialEq, Eq)] 29 | pub enum SendErrorTimeout { 30 | /// Indicates that the channel is closed on both sides with a call to 31 | /// `close()` 32 | Closed, 33 | /// Indicates that all receiver instances are dropped and the channel is 34 | /// closed from the receive side 35 | ReceiveClosed, 36 | /// Indicates that channel operation reached timeout and is canceled 37 | Timeout, 38 | } 39 | impl core::error::Error for SendErrorTimeout {} 40 | impl fmt::Display for SendErrorTimeout { 41 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 42 | fmt::Display::fmt( 43 | match *self { 44 | SendErrorTimeout::Closed => "send to a closed channel", 45 | SendErrorTimeout::ReceiveClosed => "send to a half closed channel", 46 | SendErrorTimeout::Timeout => "send timeout", 47 | }, 48 | f, 49 | ) 50 | } 51 | } 52 | 53 | /// Error type for channel receive operations without timeout 54 | #[derive(Debug, PartialEq, Eq)] 55 | pub enum ReceiveError { 56 | /// Indicates that the channel is closed on both sides with a call to 57 | /// `close()` 58 | Closed, 59 | /// Indicates that all sender instances are dropped and the channel is 60 | /// closed from the send side 61 | SendClosed, 62 | } 63 | impl core::error::Error for ReceiveError {} 64 | impl fmt::Display for ReceiveError { 65 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 66 | fmt::Display::fmt( 67 | match *self { 68 | ReceiveError::Closed => "receive from a closed channel", 69 | ReceiveError::SendClosed => "receive from a half closed channel", 70 | }, 71 | f, 72 | ) 73 | } 74 | } 75 | 76 | /// Error type for channel receive operations with timeout 77 | #[derive(Debug, PartialEq, Eq)] 78 | pub enum ReceiveErrorTimeout { 79 | /// Indicates that the channel is closed on both sides with a call to 80 | /// `close()` 81 | Closed, 82 | /// Indicates that all sender instances are dropped and the channel is 83 | /// closed from the send side 84 | SendClosed, 85 | /// Indicates that channel operation reached timeout and is canceled 86 | Timeout, 87 | } 88 | impl core::error::Error for ReceiveErrorTimeout {} 89 | impl fmt::Display for ReceiveErrorTimeout { 90 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 91 | fmt::Display::fmt( 92 | match *self { 93 | ReceiveErrorTimeout::Closed => "receive from a closed channel", 94 | ReceiveErrorTimeout::SendClosed => "receive from a half closed channel", 95 | ReceiveErrorTimeout::Timeout => "receive timeout", 96 | }, 97 | f, 98 | ) 99 | } 100 | } 101 | 102 | /// Error type for closing a channel when channel is already closed 103 | #[derive(Debug, PartialEq, Eq)] 104 | pub struct CloseError(); 105 | impl core::error::Error for CloseError {} 106 | impl fmt::Display for CloseError { 107 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 108 | fmt::Display::fmt("channel is already closed", f) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/future.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | internal::{acquire_internal, Internal}, 3 | pointer::KanalPtr, 4 | signal::Signal, 5 | AsyncReceiver, ReceiveError, SendError, 6 | }; 7 | use core::{ 8 | fmt::Debug, 9 | marker::PhantomPinned, 10 | mem::{needs_drop, size_of, MaybeUninit}, 11 | pin::Pin, 12 | task::Poll, 13 | }; 14 | use futures_core::{FusedStream, Future, Stream}; 15 | 16 | #[repr(u8)] 17 | #[derive(PartialEq, Clone, Copy)] 18 | pub(crate) enum FutureState { 19 | Zero, 20 | Waiting, 21 | Done, 22 | } 23 | 24 | impl FutureState { 25 | #[inline(always)] 26 | fn is_waiting(&self) -> bool { 27 | *self == FutureState::Waiting 28 | } 29 | 30 | #[inline(always)] 31 | fn is_done(&self) -> bool { 32 | *self == FutureState::Done 33 | } 34 | } 35 | 36 | /// SendFuture is a future for sending an object to a channel asynchronously. 37 | /// It must be polled to complete the send operation. 38 | #[must_use = "futures do nothing unless you .await or poll them"] 39 | pub struct SendFuture<'a, T> { 40 | state: FutureState, 41 | internal: &'a Internal, 42 | sig: Signal, 43 | data: MaybeUninit, 44 | _pinned: PhantomPinned, 45 | } 46 | 47 | impl Debug for SendFuture<'_, T> { 48 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 49 | write!(f, "SendFuture {{ .. }}") 50 | } 51 | } 52 | 53 | impl Drop for SendFuture<'_, T> { 54 | fn drop(&mut self) { 55 | if !self.state.is_done() { 56 | if self.state.is_waiting() 57 | && !acquire_internal(self.internal).cancel_send_signal(&self.sig) 58 | { 59 | // a receiver got signal ownership, should wait until the response 60 | if self.sig.async_blocking_wait() { 61 | // no need to drop data is moved to receiver 62 | return; 63 | } 64 | } 65 | // signal is canceled, or in zero stated, drop data locally 66 | if needs_drop::() { 67 | // Safety: data is not moved it's safe to drop it 68 | unsafe { 69 | self.drop_local_data(); 70 | } 71 | } 72 | } 73 | } 74 | } 75 | 76 | impl<'a, T> SendFuture<'a, T> { 77 | /// Creates a new SendFuture with the given internal channel and data. 78 | #[inline(always)] 79 | pub(crate) fn new(internal: &'a Internal, data: T) -> Self { 80 | if size_of::() > size_of::<*mut T>() { 81 | SendFuture { 82 | state: FutureState::Zero, 83 | internal, 84 | sig: Signal::new_async(), 85 | data: MaybeUninit::new(data), 86 | _pinned: PhantomPinned, 87 | } 88 | } else { 89 | SendFuture { 90 | state: FutureState::Zero, 91 | internal, 92 | sig: Signal::new_async_ptr(KanalPtr::new_owned(data)), 93 | data: MaybeUninit::uninit(), 94 | _pinned: PhantomPinned, 95 | } 96 | } 97 | } 98 | /// Safety: it's only safe to call this function once and only if send 99 | /// operation will finish after this call. 100 | #[inline(always)] 101 | unsafe fn read_local_data(&self) -> T { 102 | if size_of::() > size_of::<*mut T>() { 103 | // if its smaller than register size, it does not need pointer setup as data 104 | // will be stored in register address object 105 | core::ptr::read(self.data.as_ptr()) 106 | } else { 107 | self.sig.assume_init() 108 | } 109 | } 110 | /// Safety: it's only safe to call this function once and only if send 111 | /// operation fails 112 | #[inline(always)] 113 | unsafe fn drop_local_data(&mut self) { 114 | if size_of::() > size_of::<*mut T>() { 115 | self.data.assume_init_drop(); 116 | } else { 117 | self.sig.load_and_drop(); 118 | } 119 | } 120 | } 121 | 122 | impl Future for SendFuture<'_, T> { 123 | type Output = Result<(), SendError>; 124 | 125 | #[inline(always)] 126 | fn poll(self: Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> Poll { 127 | let this = unsafe { self.get_unchecked_mut() }; 128 | 129 | match this.state { 130 | FutureState::Zero => { 131 | let mut internal = acquire_internal(this.internal); 132 | if internal.recv_count == 0 { 133 | let send_count = internal.send_count; 134 | drop(internal); 135 | this.state = FutureState::Done; 136 | if needs_drop::() { 137 | // the data failed to move, drop it locally 138 | // Safety: the data is not moved, we are sure that it is inited in this 139 | // point, it's safe to init drop it. 140 | unsafe { 141 | this.drop_local_data(); 142 | } 143 | } 144 | return Poll::Ready(Err(if send_count == 0 { 145 | SendError::Closed 146 | } else { 147 | SendError::ReceiveClosed 148 | })); 149 | } 150 | if let Some(first) = internal.next_recv() { 151 | drop(internal); 152 | this.state = FutureState::Done; 153 | // Safety: data is inited and available from constructor 154 | unsafe { first.send(this.read_local_data()) } 155 | Poll::Ready(Ok(())) 156 | } else if internal.queue.len() < internal.capacity { 157 | this.state = FutureState::Done; 158 | // Safety: data is inited and available from constructor 159 | internal.queue.push_back(unsafe { this.read_local_data() }); 160 | drop(internal); 161 | Poll::Ready(Ok(())) 162 | } else { 163 | this.state = FutureState::Waiting; 164 | // if T is smaller than register size, we already have data in pointer address 165 | // from initialization step 166 | if size_of::() > size_of::<*mut T>() { 167 | this.sig 168 | .set_ptr(KanalPtr::new_unchecked(this.data.as_mut_ptr())); 169 | } 170 | this.sig.register_waker(cx.waker()); 171 | // send directly to the waitlist 172 | internal.push_send(this.sig.get_terminator()); 173 | drop(internal); 174 | Poll::Pending 175 | } 176 | } 177 | FutureState::Waiting => match this.sig.poll() { 178 | Poll::Ready(success) => { 179 | this.state = FutureState::Done; 180 | if success { 181 | Poll::Ready(Ok(())) 182 | } else { 183 | if needs_drop::() { 184 | // the data failed to move, drop it locally 185 | // Safety: the data is not moved, we are sure that it is inited in 186 | // this point, it's safe to init drop it. 187 | unsafe { 188 | this.drop_local_data(); 189 | } 190 | } 191 | Poll::Ready(Err(SendError::Closed)) 192 | } 193 | } 194 | Poll::Pending => { 195 | if !this.sig.will_wake(cx.waker()) { 196 | // Waker is changed and we need to update waker in the waiting list 197 | if acquire_internal(this.internal).send_signal_exists(&this.sig) { 198 | // signal is not shared with other thread yet so it's safe to 199 | // update waker locally 200 | // this.sig.register_waker(cx.waker()); 201 | Poll::Pending 202 | } else { 203 | // signal is already shared, and data will be available shortly, so wait 204 | // synchronously and return the result note: 205 | // it's not possible safely to update waker after the signal is shared, 206 | // but we know data will be ready shortly, 207 | // we can wait synchronously and receive it. 208 | this.state = FutureState::Done; 209 | if this.sig.async_blocking_wait() { 210 | Poll::Ready(Ok(())) 211 | } else { 212 | // the data failed to move, drop it locally 213 | // Safety: the data is not moved, we are sure that it is inited in 214 | // this point, it's safe to init 215 | // drop it. 216 | if needs_drop::() { 217 | unsafe { 218 | this.drop_local_data(); 219 | } 220 | } 221 | Poll::Ready(Err(SendError::Closed)) 222 | } 223 | } 224 | } else { 225 | Poll::Pending 226 | } 227 | } 228 | }, 229 | _ => panic!("polled after result is already returned"), 230 | } 231 | } 232 | } 233 | 234 | /// ReceiveFuture is a future for receiving an object from a channel 235 | /// asynchronously. It must be polled to complete the receive operation. 236 | #[must_use = "futures do nothing unless you .await or poll them"] 237 | pub struct ReceiveFuture<'a, T> { 238 | state: FutureState, 239 | is_stream: bool, 240 | internal: &'a Internal, 241 | sig: Signal, 242 | data: MaybeUninit, 243 | _pinned: PhantomPinned, 244 | } 245 | 246 | impl Drop for ReceiveFuture<'_, T> { 247 | fn drop(&mut self) { 248 | if self.state.is_waiting() { 249 | // try to cancel recv signal 250 | if !acquire_internal(self.internal).cancel_recv_signal(&self.sig) { 251 | // a sender got signal ownership, receiver should wait until the response 252 | if self.sig.async_blocking_wait() { 253 | // got ownership of data that is not going to be used ever again, so drop it 254 | if needs_drop::() { 255 | // Safety: data is not moved it's safe to drop it 256 | unsafe { 257 | self.drop_local_data(); 258 | } 259 | } 260 | } 261 | } 262 | } 263 | } 264 | } 265 | 266 | impl Debug for ReceiveFuture<'_, T> { 267 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 268 | write!(f, "ReceiveFuture {{ .. }}") 269 | } 270 | } 271 | 272 | impl<'a, T> ReceiveFuture<'a, T> { 273 | #[inline(always)] 274 | unsafe fn read_local_data(&self) -> T { 275 | if size_of::() > size_of::<*mut T>() { 276 | // if T is smaller than register size, it does not need pointer setup as data 277 | // will be stored in register address object 278 | core::ptr::read(self.data.as_ptr()) 279 | } else { 280 | self.sig.assume_init() 281 | } 282 | } 283 | 284 | #[inline(always)] 285 | unsafe fn drop_local_data(&mut self) { 286 | if size_of::() > size_of::<*mut T>() { 287 | self.data.assume_init_drop(); 288 | } else { 289 | self.sig.load_and_drop(); 290 | } 291 | } 292 | 293 | #[inline(always)] 294 | pub(crate) fn new_ref(internal: &'a Internal) -> Self { 295 | Self { 296 | state: FutureState::Zero, 297 | sig: Signal::new_async(), 298 | internal, 299 | data: MaybeUninit::uninit(), 300 | is_stream: false, 301 | _pinned: PhantomPinned, 302 | } 303 | } 304 | } 305 | 306 | impl Future for ReceiveFuture<'_, T> { 307 | type Output = Result; 308 | 309 | #[inline(always)] 310 | fn poll(self: Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> Poll { 311 | let this = unsafe { self.get_unchecked_mut() }; 312 | 313 | loop { 314 | return match this.state { 315 | FutureState::Zero => { 316 | let mut internal = acquire_internal(this.internal); 317 | if internal.recv_count == 0 { 318 | this.state = FutureState::Done; 319 | return Poll::Ready(Err(ReceiveError::Closed)); 320 | } 321 | if let Some(v) = internal.queue.pop_front() { 322 | if let Some(t) = internal.next_send() { 323 | // if there is a sender take its data and push it into the queue 324 | unsafe { internal.queue.push_back(t.recv()) } 325 | } 326 | drop(internal); 327 | this.state = FutureState::Done; 328 | Poll::Ready(Ok(v)) 329 | } else if let Some(t) = internal.next_send() { 330 | drop(internal); 331 | this.state = FutureState::Done; 332 | Poll::Ready(Ok(unsafe { t.recv() })) 333 | } else { 334 | if internal.send_count == 0 { 335 | this.state = FutureState::Done; 336 | return Poll::Ready(Err(ReceiveError::SendClosed)); 337 | } 338 | this.state = FutureState::Waiting; 339 | if size_of::() > size_of::<*mut T>() { 340 | // if type T smaller than register size, it does not need pointer setup 341 | // as data will be stored in register address object 342 | this.sig 343 | .set_ptr(KanalPtr::new_unchecked(this.data.as_mut_ptr())); 344 | } 345 | this.sig.register_waker(cx.waker()); 346 | // no active waiter so push to the queue 347 | internal.push_recv(this.sig.get_terminator()); 348 | drop(internal); 349 | Poll::Pending 350 | } 351 | } 352 | FutureState::Waiting => match this.sig.poll() { 353 | Poll::Ready(success) => { 354 | this.state = FutureState::Done; 355 | if success { 356 | Poll::Ready(Ok(unsafe { this.read_local_data() })) 357 | } else { 358 | Poll::Ready(Err(ReceiveError::Closed)) 359 | } 360 | } 361 | Poll::Pending => { 362 | if !this.sig.will_wake(cx.waker()) { 363 | // the Waker is changed and we need to update waker in the waiting 364 | // list 365 | if acquire_internal(this.internal).recv_signal_exists(&this.sig) { 366 | // signal is not shared with other thread yet so it's safe 367 | // to update waker locally 368 | this.sig.register_waker(cx.waker()); 369 | Poll::Pending 370 | } else { 371 | // the signal is already shared, and data will be available shortly, 372 | // so wait synchronously and return the result 373 | // note: it's not possible safely to update waker after the signal 374 | // is shared, but we know data will be ready shortly, 375 | // we can wait synchronously and receive it. 376 | this.state = FutureState::Done; 377 | if this.sig.async_blocking_wait() { 378 | Poll::Ready(Ok(unsafe { this.read_local_data() })) 379 | } else { 380 | Poll::Ready(Err(ReceiveError::Closed)) 381 | } 382 | } 383 | } else { 384 | Poll::Pending 385 | } 386 | } 387 | }, 388 | _ => { 389 | if this.is_stream { 390 | this.state = FutureState::Zero; 391 | continue; 392 | } 393 | panic!("polled after result is already returned") 394 | } 395 | }; 396 | } 397 | } 398 | } 399 | 400 | /// ReceiveStream is a stream for receiving objects from a channel 401 | /// asynchronously. 402 | pub struct ReceiveStream<'a, T: 'a> { 403 | future: Pin>>, 404 | terminated: bool, 405 | receiver: &'a AsyncReceiver, 406 | } 407 | 408 | impl Debug for ReceiveStream<'_, T> { 409 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 410 | write!(f, "ReceiveStream {{ .. }}") 411 | } 412 | } 413 | 414 | impl Stream for ReceiveStream<'_, T> { 415 | type Item = T; 416 | 417 | fn poll_next( 418 | mut self: Pin<&mut Self>, 419 | cx: &mut core::task::Context<'_>, 420 | ) -> Poll> { 421 | if self.terminated { 422 | return Poll::Ready(None); 423 | } 424 | // Safety: future is pinned as stream is pinned to a location too 425 | match self.future.as_mut().poll(cx) { 426 | Poll::Ready(res) => match res { 427 | Ok(d) => Poll::Ready(Some(d)), 428 | Err(_) => { 429 | self.terminated = true; 430 | Poll::Ready(None) 431 | } 432 | }, 433 | Poll::Pending => Poll::Pending, 434 | } 435 | } 436 | } 437 | 438 | impl FusedStream for ReceiveStream<'_, T> { 439 | fn is_terminated(&self) -> bool { 440 | self.receiver.is_terminated() 441 | } 442 | } 443 | 444 | impl<'a, T> ReceiveStream<'a, T> { 445 | pub(crate) fn new_borrowed(receiver: &'a AsyncReceiver) -> Self { 446 | let mut future = receiver.recv(); 447 | future.is_stream = true; 448 | ReceiveStream { 449 | future: Box::pin(future), 450 | terminated: false, 451 | receiver, 452 | } 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /src/internal.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "std-mutex"))] 2 | use crate::mutex::{Mutex, MutexGuard}; 3 | use crate::signal::{Signal, SignalTerminator}; 4 | extern crate alloc; 5 | use alloc::{collections::VecDeque, sync::Arc}; 6 | #[cfg(feature = "std-mutex")] 7 | use std::sync::{Mutex, MutexGuard}; 8 | 9 | pub(crate) type Internal = Arc>>; 10 | 11 | /// Acquire mutex guard on channel internal for use in channel operations 12 | #[inline(always)] 13 | pub(crate) fn acquire_internal(internal: &'_ Internal) -> MutexGuard<'_, ChannelInternal> { 14 | #[cfg(not(feature = "std-mutex"))] 15 | return internal.lock(); 16 | #[cfg(feature = "std-mutex")] 17 | internal.lock().unwrap_or_else(|err| err.into_inner()) 18 | } 19 | 20 | /// Tries to acquire mutex guard on channel internal for use in channel 21 | /// operations 22 | #[inline(always)] 23 | pub(crate) fn try_acquire_internal( 24 | internal: &'_ Internal, 25 | ) -> Option>> { 26 | #[cfg(not(feature = "std-mutex"))] 27 | return internal.try_lock(); 28 | #[cfg(feature = "std-mutex")] 29 | internal.try_lock().ok() 30 | } 31 | 32 | /// Internal of the channel that holds queues, waitlists, and general state of 33 | /// the channel, it's shared among senders and receivers with an atomic 34 | /// counter and a mutex 35 | pub(crate) struct ChannelInternal { 36 | // KEEP THE ORDER 37 | /// Channel queue to save buffered objects 38 | pub(crate) queue: VecDeque, 39 | /// It's true if the signals in the waiting list are recv signals 40 | pub(crate) recv_blocking: bool, 41 | /// Receive and Send waitlist for when the channel queue is empty or zero 42 | /// capacity for recv or full for send. 43 | pub(crate) wait_list: VecDeque>, 44 | /// The capacity of the channel buffer 45 | pub(crate) capacity: usize, 46 | /// Count of alive receivers 47 | pub(crate) recv_count: u32, 48 | /// Count of alive senders 49 | pub(crate) send_count: u32, 50 | } 51 | 52 | // Safety: It is safe to implement `Send` for `ChannelInternal` if `T` is 53 | // `Send`. 54 | unsafe impl Send for ChannelInternal {} 55 | 56 | impl ChannelInternal { 57 | /// Returns a channel internal with the required capacity 58 | #[inline(always)] 59 | pub(crate) fn new(bounded: bool, capacity: usize) -> Internal { 60 | let mut abstract_capacity = capacity; 61 | if !bounded { 62 | // act like there is no limit 63 | abstract_capacity = usize::MAX; 64 | } 65 | let wait_list_size = if capacity == 0 { 8 } else { 4 }; 66 | let ret = Self { 67 | queue: VecDeque::with_capacity(capacity), 68 | recv_blocking: false, 69 | wait_list: VecDeque::with_capacity(wait_list_size), 70 | recv_count: 1, 71 | send_count: 1, 72 | capacity: abstract_capacity, 73 | }; 74 | 75 | Arc::new(Mutex::from(ret)) 76 | } 77 | 78 | /// Terminates remainings signals in the queue to notify listeners about the 79 | /// closing of the channel 80 | pub(crate) fn terminate_signals(&mut self) { 81 | for t in self.wait_list.iter() { 82 | // Safety: it's safe to terminate owned signal once 83 | unsafe { t.terminate() } 84 | } 85 | self.wait_list.clear(); 86 | } 87 | 88 | /// Returns next signal for sender from the waitlist 89 | #[inline(always)] 90 | pub(crate) fn next_send(&mut self) -> Option> { 91 | if self.recv_blocking { 92 | return None; 93 | } 94 | match self.wait_list.pop_front() { 95 | Some(sig) => Some(sig), 96 | None => { 97 | self.recv_blocking = true; 98 | None 99 | } 100 | } 101 | } 102 | 103 | /// Adds new sender signal to the waitlist 104 | #[inline(always)] 105 | pub(crate) fn push_send(&mut self, s: SignalTerminator) { 106 | self.wait_list.push_back(s); 107 | } 108 | 109 | /// Returns the next signal for the receiver in the waitlist 110 | #[inline(always)] 111 | pub(crate) fn next_recv(&mut self) -> Option> { 112 | if !self.recv_blocking { 113 | return None; 114 | } 115 | match self.wait_list.pop_front() { 116 | Some(sig) => Some(sig), 117 | None => { 118 | self.recv_blocking = false; 119 | None 120 | } 121 | } 122 | } 123 | 124 | /// Adds new receiver signal to the waitlist 125 | #[inline(always)] 126 | pub(crate) fn push_recv(&mut self, s: SignalTerminator) { 127 | self.wait_list.push_back(s); 128 | } 129 | 130 | /// Tries to remove the send signal from the waitlist, returns true if the 131 | /// operation was successful 132 | pub(crate) fn cancel_send_signal(&mut self, sig: &Signal) -> bool { 133 | if !self.recv_blocking { 134 | for (i, send) in self.wait_list.iter().enumerate() { 135 | if send.eq(sig) { 136 | self.wait_list.remove(i); 137 | return true; 138 | } 139 | } 140 | } 141 | false 142 | } 143 | 144 | /// Tries to remove the received signal from the waitlist, returns true if 145 | /// the operation was successful 146 | pub(crate) fn cancel_recv_signal(&mut self, sig: &Signal) -> bool { 147 | if self.recv_blocking { 148 | for (i, recv) in self.wait_list.iter().enumerate() { 149 | if recv.eq(sig) { 150 | self.wait_list.remove(i); 151 | return true; 152 | } 153 | } 154 | } 155 | false 156 | } 157 | 158 | /// checks if send signal exists in wait list 159 | #[cfg(feature = "async")] 160 | pub(crate) fn send_signal_exists(&self, sig: &Signal) -> bool { 161 | if !self.recv_blocking { 162 | for signal in self.wait_list.iter() { 163 | if signal.eq(sig) { 164 | return true; 165 | } 166 | } 167 | } 168 | false 169 | } 170 | 171 | /// checks if receive signal exists in wait list 172 | #[cfg(feature = "async")] 173 | pub(crate) fn recv_signal_exists(&self, sig: &Signal) -> bool { 174 | if self.recv_blocking { 175 | for signal in self.wait_list.iter() { 176 | if signal.eq(sig) { 177 | return true; 178 | } 179 | } 180 | } 181 | false 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![warn(missing_docs, missing_debug_implementations)] 3 | 4 | pub(crate) mod backoff; 5 | pub(crate) mod internal; 6 | #[cfg(not(feature = "std-mutex"))] 7 | pub(crate) mod mutex; 8 | pub(crate) mod pointer; 9 | 10 | mod error; 11 | #[cfg(feature = "async")] 12 | mod future; 13 | mod signal; 14 | 15 | pub use error::*; 16 | #[cfg(feature = "async")] 17 | pub use future::*; 18 | 19 | #[cfg(feature = "async")] 20 | use core::mem::transmute; 21 | use core::{ 22 | fmt, 23 | mem::{needs_drop, size_of, MaybeUninit}, 24 | time::Duration, 25 | }; 26 | use std::time::Instant; 27 | 28 | use internal::{acquire_internal, try_acquire_internal, ChannelInternal, Internal}; 29 | use pointer::KanalPtr; 30 | use signal::*; 31 | 32 | /// Sending side of the channel with sync API. It's possible to convert it to 33 | /// async [`AsyncSender`] with `as_async`, `to_async` or `clone_async` based on 34 | /// software requirement. 35 | #[cfg_attr( 36 | feature = "async", 37 | doc = r##" 38 | # Examples 39 | 40 | ``` 41 | let (sender, _r) = kanal::bounded::(0); 42 | let sync_sender=sender.clone_async(); 43 | ``` 44 | "## 45 | )] 46 | #[repr(C)] 47 | pub struct Sender { 48 | internal: Internal, 49 | } 50 | 51 | /// Sending side of the channel with async API. It's possible to convert it to 52 | /// sync [`Sender`] with `as_sync`, `to_sync` or `clone_sync` based on software 53 | /// requirement. 54 | /// 55 | /// # Examples 56 | /// 57 | /// ``` 58 | /// let (sender, _r) = kanal::bounded_async::(0); 59 | /// let sync_sender=sender.clone_sync(); 60 | /// ``` 61 | #[cfg(feature = "async")] 62 | #[repr(C)] 63 | pub struct AsyncSender { 64 | internal: Internal, 65 | } 66 | 67 | impl Drop for Sender { 68 | fn drop(&mut self) { 69 | let mut internal = acquire_internal(&self.internal); 70 | if internal.send_count > 0 { 71 | internal.send_count -= 1; 72 | if internal.send_count == 0 && internal.recv_count != 0 { 73 | internal.terminate_signals(); 74 | } 75 | } 76 | } 77 | } 78 | 79 | #[cfg(feature = "async")] 80 | impl Drop for AsyncSender { 81 | fn drop(&mut self) { 82 | let mut internal = acquire_internal(&self.internal); 83 | if internal.send_count > 0 { 84 | internal.send_count -= 1; 85 | if internal.send_count == 0 && internal.recv_count != 0 { 86 | internal.terminate_signals(); 87 | } 88 | } 89 | } 90 | } 91 | 92 | impl Clone for Sender { 93 | fn clone(&self) -> Self { 94 | let mut internal = acquire_internal(&self.internal); 95 | if internal.send_count > 0 { 96 | internal.send_count += 1; 97 | } 98 | drop(internal); 99 | Self { 100 | internal: self.internal.clone(), 101 | } 102 | } 103 | } 104 | 105 | impl fmt::Debug for Sender { 106 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 107 | write!(f, "Sender {{ .. }}") 108 | } 109 | } 110 | 111 | #[cfg(feature = "async")] 112 | impl Clone for AsyncSender { 113 | fn clone(&self) -> Self { 114 | let mut internal = acquire_internal(&self.internal); 115 | if internal.send_count > 0 { 116 | internal.send_count += 1; 117 | } 118 | drop(internal); 119 | Self { 120 | internal: self.internal.clone(), 121 | } 122 | } 123 | } 124 | 125 | #[cfg(feature = "async")] 126 | impl fmt::Debug for AsyncSender { 127 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 128 | write!(f, "AsyncSender {{ .. }}") 129 | } 130 | } 131 | 132 | macro_rules! shared_impl { 133 | () => { 134 | /// Returns whether the channel is bounded or not. 135 | /// 136 | /// # Examples 137 | /// 138 | /// ``` 139 | /// let (s, r) = kanal::bounded::(0); 140 | /// assert_eq!(s.is_bounded(),true); 141 | /// assert_eq!(r.is_bounded(),true); 142 | /// ``` 143 | /// ``` 144 | /// let (s, r) = kanal::unbounded::(); 145 | /// assert_eq!(s.is_bounded(),false); 146 | /// assert_eq!(r.is_bounded(),false); 147 | /// ``` 148 | pub fn is_bounded(&self) -> bool { 149 | acquire_internal(&self.internal).capacity != usize::MAX 150 | } 151 | /// Returns length of the queue. 152 | /// 153 | /// # Examples 154 | /// 155 | /// ``` 156 | /// let (s, r) = kanal::unbounded::(); 157 | /// assert_eq!(s.len(),0); 158 | /// assert_eq!(r.len(),0); 159 | /// s.send(10); 160 | /// assert_eq!(s.len(),1); 161 | /// assert_eq!(r.len(),1); 162 | /// ``` 163 | pub fn len(&self) -> usize { 164 | acquire_internal(&self.internal).queue.len() 165 | } 166 | /// Returns whether the channel queue is empty or not. 167 | /// 168 | /// # Examples 169 | /// 170 | /// ``` 171 | /// let (s, r) = kanal::unbounded::(); 172 | /// assert_eq!(s.is_empty(),true); 173 | /// assert_eq!(r.is_empty(),true); 174 | /// ``` 175 | pub fn is_empty(&self) -> bool { 176 | acquire_internal(&self.internal).queue.is_empty() 177 | } 178 | /// Returns whether the channel queue is full or not 179 | /// full channels will block on send and recv calls 180 | /// it always returns true for zero sized channels. 181 | /// 182 | /// # Examples 183 | /// 184 | /// ``` 185 | /// let (s, r) = kanal::bounded(1); 186 | /// s.send("Hi!").unwrap(); 187 | /// assert_eq!(s.is_full(),true); 188 | /// assert_eq!(r.is_full(),true); 189 | /// ``` 190 | pub fn is_full(&self) -> bool { 191 | let internal = acquire_internal(&self.internal); 192 | internal.capacity == internal.queue.len() 193 | } 194 | /// Returns capacity of channel (not the queue) 195 | /// for unbounded channels, it will return usize::MAX. 196 | /// 197 | /// # Examples 198 | /// 199 | /// ``` 200 | /// let (s, r) = kanal::bounded::(0); 201 | /// assert_eq!(s.capacity(),0); 202 | /// assert_eq!(r.capacity(),0); 203 | /// ``` 204 | /// ``` 205 | /// let (s, r) = kanal::unbounded::(); 206 | /// assert_eq!(s.capacity(),usize::MAX); 207 | /// assert_eq!(r.capacity(),usize::MAX); 208 | /// ``` 209 | pub fn capacity(&self) -> usize { 210 | acquire_internal(&self.internal).capacity 211 | } 212 | /// Returns count of alive receiver instances of the channel. 213 | /// 214 | /// # Examples 215 | /// 216 | /// ``` 217 | /// let (s, r) = kanal::unbounded::(); 218 | /// let receiver_clone=r.clone(); 219 | /// assert_eq!(r.receiver_count(),2); 220 | /// ``` 221 | pub fn receiver_count(&self) -> u32 { 222 | acquire_internal(&self.internal).recv_count 223 | } 224 | /// Returns count of alive sender instances of the channel. 225 | /// 226 | /// # Examples 227 | /// 228 | /// ``` 229 | /// let (s, r) = kanal::unbounded::(); 230 | /// let sender_clone=s.clone(); 231 | /// assert_eq!(r.sender_count(),2); 232 | /// ``` 233 | pub fn sender_count(&self) -> u32 { 234 | acquire_internal(&self.internal).send_count 235 | } 236 | /// Closes the channel completely on both sides and terminates waiting 237 | /// signals. 238 | /// 239 | /// # Examples 240 | /// 241 | /// ``` 242 | /// let (s, r) = kanal::unbounded::(); 243 | /// // closes channel on both sides and has same effect as r.close(); 244 | /// s.close().unwrap(); 245 | /// assert_eq!(r.is_closed(),true); 246 | /// assert_eq!(s.is_closed(),true); 247 | /// ``` 248 | pub fn close(&self) -> Result<(), CloseError> { 249 | let mut internal = acquire_internal(&self.internal); 250 | if internal.recv_count == 0 && internal.send_count == 0 { 251 | return Err(CloseError()); 252 | } 253 | internal.recv_count = 0; 254 | internal.send_count = 0; 255 | internal.terminate_signals(); 256 | internal.queue.clear(); 257 | Ok(()) 258 | } 259 | /// Returns whether the channel is closed on both side of send and 260 | /// receive or not. 261 | /// 262 | /// # Examples 263 | /// 264 | /// ``` 265 | /// let (s, r) = kanal::unbounded::(); 266 | /// // closes channel on both sides and has same effect as r.close(); 267 | /// s.close(); 268 | /// assert_eq!(r.is_closed(),true); 269 | /// assert_eq!(s.is_closed(),true); 270 | /// ``` 271 | pub fn is_closed(&self) -> bool { 272 | let internal = acquire_internal(&self.internal); 273 | internal.send_count == 0 && internal.recv_count == 0 274 | } 275 | }; 276 | } 277 | 278 | macro_rules! shared_send_impl { 279 | () => { 280 | /// Tries sending to the channel without waiting on the waitlist, if 281 | /// send fails then the object will be dropped. It returns `Ok(true)` in 282 | /// case of a successful operation and `Ok(false)` for a failed one, or 283 | /// error in case that channel is closed. Important note: this function 284 | /// is not lock-free as it acquires a mutex guard of the channel 285 | /// internal for a short time. 286 | /// 287 | /// # Examples 288 | /// 289 | /// ``` 290 | /// # use std::thread::spawn; 291 | /// let (s, r) = kanal::bounded(0); 292 | /// let t=spawn( move || { 293 | /// loop{ 294 | /// if s.try_send(1).unwrap(){ 295 | /// break; 296 | /// } 297 | /// } 298 | /// }); 299 | /// assert_eq!(r.recv()?,1); 300 | /// # t.join(); 301 | /// # anyhow::Ok(()) 302 | /// ``` 303 | #[inline(always)] 304 | pub fn try_send(&self, data: T) -> Result { 305 | let mut internal = acquire_internal(&self.internal); 306 | if internal.recv_count == 0 { 307 | let send_count = internal.send_count; 308 | // Avoid wasting lock time on dropping failed send object 309 | drop(internal); 310 | if send_count == 0 { 311 | return Err(SendError::Closed); 312 | } 313 | return Err(SendError::ReceiveClosed); 314 | } 315 | if let Some(first) = internal.next_recv() { 316 | drop(internal); 317 | // Safety: it's safe to send to owned signal once 318 | unsafe { first.send(data) } 319 | return Ok(true); 320 | } else if internal.queue.len() < internal.capacity { 321 | internal.queue.push_back(data); 322 | return Ok(true); 323 | } 324 | Ok(false) 325 | } 326 | 327 | /// Tries sending to the channel without waiting on the waitlist, if 328 | /// send fails then the object will be dropped. It returns `Ok(true)` in 329 | /// case of a successful operation and `Ok(false)` for a failed one, or 330 | /// error in case that channel is closed. Important note: this function 331 | /// is not lock-free as it acquires a mutex guard of the channel 332 | /// internal for a short time. 333 | /// 334 | /// # Examples 335 | /// 336 | /// ``` 337 | /// # use std::thread::spawn; 338 | /// let (s, r) = kanal::bounded(0); 339 | /// let t=spawn( move || { 340 | /// let mut opt=Some(1); 341 | /// loop{ 342 | /// if s.try_send_option(&mut opt).unwrap(){ 343 | /// break; 344 | /// } 345 | /// } 346 | /// }); 347 | /// assert_eq!(r.recv()?,1); 348 | /// # t.join(); 349 | /// # anyhow::Ok(()) 350 | /// ``` 351 | #[inline(always)] 352 | pub fn try_send_option(&self, data: &mut Option) -> Result { 353 | if data.is_none() { 354 | panic!("send data option is None"); 355 | } 356 | let mut internal = acquire_internal(&self.internal); 357 | if internal.recv_count == 0 { 358 | let send_count = internal.send_count; 359 | // Avoid wasting lock time on dropping failed send object 360 | drop(internal); 361 | if send_count == 0 { 362 | return Err(SendError::Closed); 363 | } 364 | return Err(SendError::ReceiveClosed); 365 | } 366 | if let Some(first) = internal.next_recv() { 367 | drop(internal); 368 | // Safety: it's safe to send to owned signal once 369 | unsafe { first.send(data.take().unwrap()) } 370 | return Ok(true); 371 | } else if internal.queue.len() < internal.capacity { 372 | internal.queue.push_back(data.take().unwrap()); 373 | return Ok(true); 374 | } 375 | Ok(false) 376 | } 377 | 378 | /// Tries sending to the channel without waiting on the waitlist or for 379 | /// the internal mutex, if send fails then the object will be dropped. 380 | /// It returns `Ok(true)` in case of a successful operation and 381 | /// `Ok(false)` for a failed one, or error in case that channel is 382 | /// closed. Do not use this function unless you know exactly what you 383 | /// are doing. 384 | /// 385 | /// # Examples 386 | /// 387 | /// ``` 388 | /// # use std::thread::spawn; 389 | /// let (s, r) = kanal::bounded(0); 390 | /// let t=spawn( move || { 391 | /// loop{ 392 | /// if s.try_send_realtime(1).unwrap(){ 393 | /// break; 394 | /// } 395 | /// } 396 | /// }); 397 | /// assert_eq!(r.recv()?,1); 398 | /// # t.join(); 399 | /// # anyhow::Ok(()) 400 | /// ``` 401 | #[inline(always)] 402 | pub fn try_send_realtime(&self, data: T) -> Result { 403 | if let Some(mut internal) = try_acquire_internal(&self.internal) { 404 | if internal.recv_count == 0 { 405 | let send_count = internal.send_count; 406 | // Avoid wasting lock time on dropping failed send object 407 | drop(internal); 408 | if send_count == 0 { 409 | return Err(SendError::Closed); 410 | } 411 | return Err(SendError::ReceiveClosed); 412 | } 413 | if let Some(first) = internal.next_recv() { 414 | drop(internal); 415 | // Safety: it's safe to send to owned signal once 416 | unsafe { first.send(data) } 417 | return Ok(true); 418 | } else if internal.queue.len() < internal.capacity { 419 | internal.queue.push_back(data); 420 | return Ok(true); 421 | } 422 | } 423 | Ok(false) 424 | } 425 | 426 | /// Tries sending to the channel without waiting on the waitlist or 427 | /// channel internal lock. It returns `Ok(true)` in case of a successful 428 | /// operation and `Ok(false)` for a failed one, or error in case that 429 | /// channel is closed. This function will `panic` on successful send 430 | /// attempt of `None` data. Do not use this function unless you know 431 | /// exactly what you are doing. 432 | /// 433 | /// # Examples 434 | /// 435 | /// ``` 436 | /// # use std::thread::spawn; 437 | /// let (s, r) = kanal::bounded(0); 438 | /// let t=spawn( move || { 439 | /// let mut opt=Some(1); 440 | /// loop{ 441 | /// if s.try_send_option_realtime(&mut opt).unwrap(){ 442 | /// break; 443 | /// } 444 | /// } 445 | /// }); 446 | /// assert_eq!(r.recv()?,1); 447 | /// # t.join(); 448 | /// # anyhow::Ok(()) 449 | /// ``` 450 | #[inline(always)] 451 | pub fn try_send_option_realtime(&self, data: &mut Option) -> Result { 452 | if data.is_none() { 453 | panic!("send data option is None"); 454 | } 455 | if let Some(mut internal) = try_acquire_internal(&self.internal) { 456 | if internal.recv_count == 0 { 457 | let send_count = internal.send_count; 458 | // Avoid wasting lock time on dropping failed send object 459 | drop(internal); 460 | if send_count == 0 { 461 | return Err(SendError::Closed); 462 | } 463 | return Err(SendError::ReceiveClosed); 464 | } 465 | if let Some(first) = internal.next_recv() { 466 | drop(internal); 467 | // Safety: it's safe to send to owned signal once 468 | unsafe { first.send(data.take().unwrap()) } 469 | return Ok(true); 470 | } else if internal.queue.len() < internal.capacity { 471 | internal.queue.push_back(data.take().unwrap()); 472 | return Ok(true); 473 | } 474 | } 475 | Ok(false) 476 | } 477 | 478 | /// Returns whether the receive side of the channel is closed or not. 479 | /// 480 | /// # Examples 481 | /// 482 | /// ``` 483 | /// let (s, r) = kanal::unbounded::(); 484 | /// drop(r); // drop receiver and disconnect the receive side from the channel 485 | /// assert_eq!(s.is_disconnected(),true); 486 | /// # anyhow::Ok(()) 487 | /// ``` 488 | pub fn is_disconnected(&self) -> bool { 489 | acquire_internal(&self.internal).recv_count == 0 490 | } 491 | }; 492 | } 493 | 494 | macro_rules! shared_recv_impl { 495 | () => { 496 | /// Tries receiving from the channel without waiting on the waitlist. 497 | /// It returns `Ok(Some(T))` in case of successful operation and 498 | /// `Ok(None)` for a failed one, or error in case that channel is 499 | /// closed. Important note: this function is not lock-free as it 500 | /// acquires a mutex guard of the channel internal for a short time. 501 | /// 502 | /// # Examples 503 | /// 504 | /// ``` 505 | /// # use std::thread::spawn; 506 | /// # let (s, r) = kanal::bounded(0); 507 | /// # let t=spawn(move || { 508 | /// # s.send("Buddy")?; 509 | /// # anyhow::Ok(()) 510 | /// # }); 511 | /// loop { 512 | /// if let Some(name)=r.try_recv()?{ 513 | /// println!("Hello {}!",name); 514 | /// break; 515 | /// } 516 | /// } 517 | /// # t.join(); 518 | /// # anyhow::Ok(()) 519 | /// ``` 520 | #[inline(always)] 521 | pub fn try_recv(&self) -> Result, ReceiveError> { 522 | let mut internal = acquire_internal(&self.internal); 523 | if internal.recv_count == 0 { 524 | return Err(ReceiveError::Closed); 525 | } 526 | if let Some(v) = internal.queue.pop_front() { 527 | if let Some(p) = internal.next_send() { 528 | // if there is a sender take its data and push it into the 529 | // queue Safety: it's safe to receive from owned 530 | // signal once 531 | unsafe { internal.queue.push_back(p.recv()) } 532 | } 533 | return Ok(Some(v)); 534 | } else if let Some(p) = internal.next_send() { 535 | // Safety: it's safe to receive from owned signal once 536 | drop(internal); 537 | return unsafe { Ok(Some(p.recv())) }; 538 | } 539 | if internal.send_count == 0 { 540 | return Err(ReceiveError::SendClosed); 541 | } 542 | Ok(None) 543 | // if the queue is not empty send the data 544 | } 545 | /// Tries receiving from the channel without waiting on the waitlist or 546 | /// waiting for channel internal lock. It returns `Ok(Some(T))` in case 547 | /// of successful operation and `Ok(None)` for a failed one, or error in 548 | /// case that channel is closed. Do not use this function unless you 549 | /// know exactly what you are doing. 550 | /// 551 | /// # Examples 552 | /// 553 | /// ``` 554 | /// # use std::thread::spawn; 555 | /// # let (s, r) = kanal::bounded(0); 556 | /// # let t=spawn(move || { 557 | /// # s.send("Buddy")?; 558 | /// # anyhow::Ok(()) 559 | /// # }); 560 | /// loop { 561 | /// if let Some(name)=r.try_recv_realtime()?{ 562 | /// println!("Hello {}!",name); 563 | /// break; 564 | /// } 565 | /// } 566 | /// # t.join(); 567 | /// # anyhow::Ok(()) 568 | /// ``` 569 | #[inline(always)] 570 | pub fn try_recv_realtime(&self) -> Result, ReceiveError> { 571 | if let Some(mut internal) = try_acquire_internal(&self.internal) { 572 | if internal.recv_count == 0 { 573 | return Err(ReceiveError::Closed); 574 | } 575 | if let Some(v) = internal.queue.pop_front() { 576 | if let Some(p) = internal.next_send() { 577 | // if there is a sender take its data and push it into 578 | // the queue Safety: it's safe to 579 | // receive from owned signal once 580 | unsafe { internal.queue.push_back(p.recv()) } 581 | } 582 | return Ok(Some(v)); 583 | } else if let Some(p) = internal.next_send() { 584 | // Safety: it's safe to receive from owned signal once 585 | drop(internal); 586 | return unsafe { Ok(Some(p.recv())) }; 587 | } 588 | if internal.send_count == 0 { 589 | return Err(ReceiveError::SendClosed); 590 | } 591 | } 592 | Ok(None) 593 | } 594 | 595 | /// Drains all available messages from the channel into the provided vector and returns the number of received messages. 596 | /// 597 | /// The function is designed to be non-blocking, meaning it only processes messages that are readily available and returns 598 | /// immediately with whatever messages are present. It provides a count of received messages, which could be zero if no 599 | /// messages are available at the time of the call. 600 | /// 601 | /// When using this function, it’s a good idea to check if the returned count is zero to avoid busy-waiting in a loop. 602 | /// If blocking behavior is desired when the count is zero, you can use the `recv()` function if count is zero. For efficiency, 603 | /// reusing the same vector across multiple calls can help minimize memory allocations. Between uses, you can clear 604 | /// the vector with `vec.clear()` to prepare it for the next set of messages. 605 | /// 606 | /// # Examples 607 | /// 608 | /// ``` 609 | /// # use std::thread::spawn; 610 | /// # let (s, r) = kanal::bounded(1000); 611 | /// # let t=spawn(move || { 612 | /// # for i in 0..1000 { 613 | /// # s.send(i)?; 614 | /// # } 615 | /// # anyhow::Ok(()) 616 | /// # }); 617 | /// 618 | /// let mut buf = Vec::with_capacity(1000); 619 | /// loop { 620 | /// if let Ok(count) = r.drain_into(&mut buf) { 621 | /// if count == 0 { 622 | /// // count is 0, to avoid busy-wait using recv for 623 | /// // the first next message 624 | /// if let Ok(v) = r.recv() { 625 | /// buf.push(v); 626 | /// } else { 627 | /// break; 628 | /// } 629 | /// } 630 | /// // use buffer 631 | /// buf.iter().for_each(|v| println!("{}",v)); 632 | /// }else{ 633 | /// println!("Channel closed"); 634 | /// break; 635 | /// } 636 | /// buf.clear(); 637 | /// } 638 | /// # t.join(); 639 | /// # anyhow::Ok(()) 640 | /// ``` 641 | pub fn drain_into(&self, vec: &mut Vec) -> Result { 642 | let vec_initial_length = vec.len(); 643 | let remaining_cap = vec.capacity() - vec_initial_length; 644 | let mut internal = acquire_internal(&self.internal); 645 | if internal.recv_count == 0 { 646 | return Err(ReceiveError::Closed); 647 | } 648 | let required_cap = internal.queue.len() + { 649 | if internal.recv_blocking { 650 | 0 651 | } else { 652 | internal.wait_list.len() 653 | } 654 | }; 655 | if required_cap > remaining_cap { 656 | vec.reserve(vec_initial_length + required_cap - remaining_cap); 657 | } 658 | while let Some(v) = internal.queue.pop_front() { 659 | vec.push(v); 660 | } 661 | while let Some(p) = internal.next_send() { 662 | // Safety: it's safe to receive from owned signal once 663 | unsafe { vec.push(p.recv()) } 664 | } 665 | Ok(required_cap) 666 | } 667 | 668 | /// Returns, whether the send side of the channel, is closed or not. 669 | /// 670 | /// # Examples 671 | /// 672 | /// ``` 673 | /// let (s, r) = kanal::unbounded::(); 674 | /// drop(s); // drop sender and disconnect the send side from the channel 675 | /// assert_eq!(r.is_disconnected(),true); 676 | /// ``` 677 | pub fn is_disconnected(&self) -> bool { 678 | acquire_internal(&self.internal).send_count == 0 679 | } 680 | 681 | /// Returns, whether the channel receive side is terminated, and will 682 | /// not return any result in future recv calls. 683 | /// 684 | /// # Examples 685 | /// 686 | /// ``` 687 | /// let (s, r) = kanal::unbounded::(); 688 | /// s.send(1).unwrap(); 689 | /// drop(s); // drop sender and disconnect the send side from the channel 690 | /// assert_eq!(r.is_disconnected(),true); 691 | /// // Also channel is closed from send side, it's not terminated as there is data in channel queue 692 | /// assert_eq!(r.is_terminated(),false); 693 | /// assert_eq!(r.recv().unwrap(),1); 694 | /// // Now channel receive side is terminated as there is no sender for channel and queue is empty 695 | /// assert_eq!(r.is_terminated(),true); 696 | /// ``` 697 | pub fn is_terminated(&self) -> bool { 698 | let internal = acquire_internal(&self.internal); 699 | internal.send_count == 0 && internal.queue.len() == 0 700 | } 701 | }; 702 | } 703 | 704 | impl Sender { 705 | /// Sends data to the channel. 706 | /// 707 | /// # Examples 708 | /// 709 | /// ``` 710 | /// # use std::thread::spawn; 711 | /// # let (s, r) = kanal::bounded(0); 712 | /// # spawn(move || { 713 | /// s.send("Hello").unwrap(); 714 | /// # anyhow::Ok(()) 715 | /// # }); 716 | /// # let name=r.recv()?; 717 | /// # println!("Hello {}!",name); 718 | /// # anyhow::Ok(()) 719 | /// ``` 720 | #[inline(always)] 721 | pub fn send(&self, data: T) -> Result<(), SendError> { 722 | let mut internal = acquire_internal(&self.internal); 723 | if internal.recv_count == 0 { 724 | let send_count = internal.send_count; 725 | // Avoid wasting lock time on dropping failed send object 726 | drop(internal); 727 | if send_count == 0 { 728 | return Err(SendError::Closed); 729 | } 730 | return Err(SendError::ReceiveClosed); 731 | } 732 | if let Some(first) = internal.next_recv() { 733 | drop(internal); 734 | // Safety: it's safe to send to owned signal once 735 | unsafe { first.send(data) } 736 | Ok(()) 737 | } else if internal.queue.len() < internal.capacity { 738 | // Safety: MaybeUninit is acting like a ManuallyDrop 739 | internal.queue.push_back(data); 740 | Ok(()) 741 | } else { 742 | let mut data = MaybeUninit::new(data); 743 | // send directly to the waitlist 744 | let sig = Signal::new_sync(KanalPtr::new_from(data.as_mut_ptr())); 745 | internal.push_send(sig.get_terminator()); 746 | drop(internal); 747 | if !sig.wait() { 748 | // Safety: data failed to move, sender should drop it if it 749 | // needs to 750 | if needs_drop::() { 751 | unsafe { data.assume_init_drop() } 752 | } 753 | return Err(SendError::Closed); 754 | } 755 | Ok(()) 756 | } 757 | // if the queue is not empty send the data 758 | } 759 | /// Sends data to the channel with a deadline, if send fails then the object 760 | /// will be dropped. you can use send_option_timeout if you like to keep 761 | /// the object in case of timeout. 762 | /// 763 | /// # Examples 764 | /// 765 | /// ``` 766 | /// # use std::thread::spawn; 767 | /// # use std::time::Duration; 768 | /// # let (s, r) = kanal::bounded(0); 769 | /// # spawn(move || { 770 | /// s.send_timeout("Hello",Duration::from_millis(500)).unwrap(); 771 | /// # anyhow::Ok(()) 772 | /// # }); 773 | /// # let name=r.recv()?; 774 | /// # println!("Hello {}!",name); 775 | /// # anyhow::Ok(()) 776 | /// ``` 777 | #[inline(always)] 778 | pub fn send_timeout(&self, data: T, duration: Duration) -> Result<(), SendErrorTimeout> { 779 | let deadline = Instant::now().checked_add(duration).unwrap(); 780 | let mut internal = acquire_internal(&self.internal); 781 | if internal.recv_count == 0 { 782 | let send_count = internal.send_count; 783 | // Avoid wasting lock time on dropping failed send object 784 | drop(internal); 785 | if send_count == 0 { 786 | return Err(SendErrorTimeout::Closed); 787 | } 788 | return Err(SendErrorTimeout::ReceiveClosed); 789 | } 790 | if let Some(first) = internal.next_recv() { 791 | drop(internal); 792 | // Safety: it's safe to send to owned signal once 793 | unsafe { first.send(data) } 794 | Ok(()) 795 | } else if internal.queue.len() < internal.capacity { 796 | // Safety: MaybeUninit is used as a ManuallyDrop, and data in it is 797 | // valid. 798 | internal.queue.push_back(data); 799 | Ok(()) 800 | } else { 801 | let mut data = MaybeUninit::new(data); 802 | // send directly to the waitlist 803 | let sig = Signal::new_sync(KanalPtr::new_from(data.as_mut_ptr())); 804 | internal.push_send(sig.get_terminator()); 805 | drop(internal); 806 | if !sig.wait_timeout(deadline) { 807 | if sig.is_terminated() { 808 | // Safety: data failed to move, sender should drop it if it 809 | // needs to 810 | if needs_drop::() { 811 | unsafe { data.assume_init_drop() } 812 | } 813 | return Err(SendErrorTimeout::Closed); 814 | } 815 | { 816 | let mut internal = acquire_internal(&self.internal); 817 | if internal.cancel_send_signal(&sig) { 818 | return Err(SendErrorTimeout::Timeout); 819 | } 820 | } 821 | // removing receive failed to wait for the signal response 822 | if !sig.wait() { 823 | // Safety: data failed to move, sender should drop it if it 824 | // needs to 825 | if needs_drop::() { 826 | unsafe { data.assume_init_drop() } 827 | } 828 | return Err(SendErrorTimeout::Closed); 829 | } 830 | } 831 | Ok(()) 832 | } 833 | // if the queue is not empty send the data 834 | } 835 | 836 | /// Tries to send data from provided option with a deadline, it will panic 837 | /// on successful send for None option. 838 | /// 839 | /// # Examples 840 | /// 841 | /// ``` 842 | /// # use std::thread::spawn; 843 | /// # use std::time::Duration; 844 | /// # let (s, r) = kanal::bounded(0); 845 | /// # spawn(move || { 846 | /// let mut opt=Some("Hello"); 847 | /// s.send_option_timeout(&mut opt,Duration::from_millis(500)).unwrap(); 848 | /// # anyhow::Ok(()) 849 | /// # }); 850 | /// # let name=r.recv()?; 851 | /// # println!("Hello {}!",name); 852 | /// # anyhow::Ok(()) 853 | /// ``` 854 | #[inline(always)] 855 | pub fn send_option_timeout( 856 | &self, 857 | data: &mut Option, 858 | duration: Duration, 859 | ) -> Result<(), SendErrorTimeout> { 860 | if data.is_none() { 861 | panic!("send data option is None"); 862 | } 863 | let deadline = Instant::now().checked_add(duration).unwrap(); 864 | let mut internal = acquire_internal(&self.internal); 865 | if internal.recv_count == 0 { 866 | let send_count = internal.send_count; 867 | // Avoid wasting lock time on dropping failed send object 868 | drop(internal); 869 | if send_count == 0 { 870 | return Err(SendErrorTimeout::Closed); 871 | } 872 | return Err(SendErrorTimeout::ReceiveClosed); 873 | } 874 | if let Some(first) = internal.next_recv() { 875 | drop(internal); 876 | // Safety: it's safe to send to owned signal once 877 | unsafe { first.send(data.take().unwrap()) } 878 | Ok(()) 879 | } else if internal.queue.len() < internal.capacity { 880 | internal.queue.push_back(data.take().unwrap()); 881 | Ok(()) 882 | } else { 883 | // send directly to the waitlist 884 | let mut d = data.take().unwrap(); 885 | let sig = Signal::new_sync(KanalPtr::new_from(&mut d)); 886 | internal.push_send(sig.get_terminator()); 887 | drop(internal); 888 | if !sig.wait_timeout(deadline) { 889 | if sig.is_terminated() { 890 | *data = Some(d); 891 | return Err(SendErrorTimeout::Closed); 892 | } 893 | { 894 | let mut internal = acquire_internal(&self.internal); 895 | if internal.cancel_send_signal(&sig) { 896 | *data = Some(d); 897 | return Err(SendErrorTimeout::Timeout); 898 | } 899 | } 900 | // removing receive failed to wait for the signal response 901 | if !sig.wait() { 902 | *data = Some(d); 903 | return Err(SendErrorTimeout::Closed); 904 | } 905 | } 906 | Ok(()) 907 | } 908 | // if the queue is not empty send the data 909 | } 910 | shared_send_impl!(); 911 | /// Clones [`Sender`] as the async version of it and returns it 912 | #[cfg(feature = "async")] 913 | pub fn clone_async(&self) -> AsyncSender { 914 | let mut internal = acquire_internal(&self.internal); 915 | if internal.send_count > 0 { 916 | internal.send_count += 1; 917 | } 918 | drop(internal); 919 | AsyncSender:: { 920 | internal: self.internal.clone(), 921 | } 922 | } 923 | 924 | /// Converts [`Sender`] to [`AsyncSender`] and returns it 925 | /// # Examples 926 | /// 927 | /// ``` 928 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 929 | /// # use tokio::{spawn as co}; 930 | /// # use std::time::Duration; 931 | /// let (s, r) = kanal::bounded(0); 932 | /// co(async move { 933 | /// let s=s.to_async(); 934 | /// s.send("World").await; 935 | /// }); 936 | /// let name=r.recv()?; 937 | /// println!("Hello {}!",name); 938 | /// # anyhow::Ok(()) 939 | /// # }); 940 | /// ``` 941 | #[cfg(feature = "async")] 942 | pub fn to_async(self) -> AsyncSender { 943 | // Safety: structure of Sender and AsyncSender is same 944 | unsafe { transmute(self) } 945 | } 946 | 947 | /// Borrows [`Sender`] as [`AsyncSender`] and returns it 948 | /// # Examples 949 | /// 950 | /// ``` 951 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 952 | /// # use tokio::{spawn as co}; 953 | /// # use std::time::Duration; 954 | /// let (s, r) = kanal::bounded(0); 955 | /// co(async move { 956 | /// s.as_async().send("World").await; 957 | /// }); 958 | /// let name=r.recv()?; 959 | /// println!("Hello {}!",name); 960 | /// # anyhow::Ok(()) 961 | /// # }); 962 | /// ``` 963 | #[cfg(feature = "async")] 964 | pub fn as_async(&self) -> &AsyncSender { 965 | // Safety: structure of Sender and AsyncSender is same 966 | unsafe { transmute(self) } 967 | } 968 | shared_impl!(); 969 | } 970 | 971 | #[cfg(feature = "async")] 972 | impl AsyncSender { 973 | /// Sends data asynchronously to the channel. 974 | /// 975 | /// # Examples 976 | /// 977 | /// ``` 978 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 979 | /// # let (s, r) = kanal::unbounded_async(); 980 | /// s.send(1).await?; 981 | /// assert_eq!(r.recv().await?,1); 982 | /// # anyhow::Ok(()) 983 | /// # }); 984 | /// ``` 985 | #[inline(always)] 986 | pub fn send(&'_ self, data: T) -> SendFuture<'_, T> { 987 | SendFuture::new(&self.internal, data) 988 | } 989 | shared_send_impl!(); 990 | /// Clones [`AsyncSender`] as [`Sender`] with sync api of it. 991 | /// 992 | /// # Examples 993 | /// 994 | /// ``` 995 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 996 | /// let (s, r) = kanal::unbounded_async(); 997 | /// let sync_sender=s.clone_sync(); 998 | /// // JUST FOR EXAMPLE IT IS WRONG TO USE SYNC INSTANCE IN ASYNC CONTEXT 999 | /// sync_sender.send(1)?; 1000 | /// assert_eq!(r.recv().await?,1); 1001 | /// # anyhow::Ok(()) 1002 | /// # }); 1003 | /// ``` 1004 | pub fn clone_sync(&self) -> Sender { 1005 | let mut internal = acquire_internal(&self.internal); 1006 | if internal.send_count > 0 { 1007 | internal.send_count += 1; 1008 | } 1009 | drop(internal); 1010 | Sender:: { 1011 | internal: self.internal.clone(), 1012 | } 1013 | } 1014 | 1015 | /// Converts [`AsyncSender`] to [`Sender`] and returns it. 1016 | /// 1017 | /// # Examples 1018 | /// 1019 | /// ``` 1020 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1021 | /// # use std::time::Duration; 1022 | /// let (s, r) = kanal::bounded_async(0); 1023 | /// // move to sync environment 1024 | /// std::thread::spawn(move || { 1025 | /// let s=s.to_sync(); 1026 | /// s.send("World")?; 1027 | /// anyhow::Ok(()) 1028 | /// }); 1029 | /// let name=r.recv().await?; 1030 | /// println!("Hello {}!",name); 1031 | /// # anyhow::Ok(()) 1032 | /// # }); 1033 | /// ``` 1034 | pub fn to_sync(self) -> Sender { 1035 | // Safety: structure of Sender and AsyncSender is same 1036 | unsafe { transmute(self) } 1037 | } 1038 | 1039 | /// Borrows [`AsyncSender`] as [`Sender`] and returns it. 1040 | /// 1041 | /// # Examples 1042 | /// 1043 | /// ``` 1044 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1045 | /// # use std::time::Duration; 1046 | /// let (s, r) = kanal::bounded_async(0); 1047 | /// // move to sync environment 1048 | /// std::thread::spawn(move || { 1049 | /// s.as_sync().send("World")?; 1050 | /// anyhow::Ok(()) 1051 | /// }); 1052 | /// let name=r.recv().await?; 1053 | /// println!("Hello {}!",name); 1054 | /// # anyhow::Ok(()) 1055 | /// # }); 1056 | /// ``` 1057 | pub fn as_sync(&self) -> &Sender { 1058 | // Safety: structure of Sender and AsyncSender is same 1059 | unsafe { transmute(self) } 1060 | } 1061 | 1062 | shared_impl!(); 1063 | } 1064 | 1065 | /// Receiving side of the channel in sync mode. 1066 | /// Receivers can be cloned and produce receivers to operate in both sync and 1067 | /// async modes. 1068 | #[cfg_attr( 1069 | feature = "async", 1070 | doc = r##" 1071 | # Examples 1072 | 1073 | ``` 1074 | let (_s, receiver) = kanal::bounded::(0); 1075 | let async_receiver=receiver.clone_async(); 1076 | ``` 1077 | "## 1078 | )] 1079 | #[repr(C)] 1080 | pub struct Receiver { 1081 | internal: Internal, 1082 | } 1083 | 1084 | impl fmt::Debug for Receiver { 1085 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1086 | write!(f, "Receiver {{ .. }}") 1087 | } 1088 | } 1089 | 1090 | /// [`AsyncReceiver`] is receiving side of the channel in async mode. 1091 | /// Receivers can be cloned and produce receivers to operate in both sync and 1092 | /// async modes. 1093 | /// 1094 | /// # Examples 1095 | /// 1096 | /// ``` 1097 | /// let (_s, receiver) = kanal::bounded_async::(0); 1098 | /// let sync_receiver=receiver.clone_sync(); 1099 | /// ``` 1100 | #[cfg(feature = "async")] 1101 | #[repr(C)] 1102 | pub struct AsyncReceiver { 1103 | internal: Internal, 1104 | } 1105 | 1106 | #[cfg(feature = "async")] 1107 | impl fmt::Debug for AsyncReceiver { 1108 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1109 | write!(f, "AsyncReceiver {{ .. }}") 1110 | } 1111 | } 1112 | 1113 | impl Receiver { 1114 | /// Receives data from the channel 1115 | #[inline(always)] 1116 | pub fn recv(&self) -> Result { 1117 | let mut internal = acquire_internal(&self.internal); 1118 | if internal.recv_count == 0 { 1119 | return Err(ReceiveError::Closed); 1120 | } 1121 | if let Some(v) = internal.queue.pop_front() { 1122 | if let Some(p) = internal.next_send() { 1123 | // if there is a sender take its data and push it into the queue 1124 | // Safety: it's safe to receive from owned signal once 1125 | unsafe { internal.queue.push_back(p.recv()) } 1126 | } 1127 | Ok(v) 1128 | } else if let Some(p) = internal.next_send() { 1129 | drop(internal); 1130 | // Safety: it's safe to receive from owned signal once 1131 | unsafe { Ok(p.recv()) } 1132 | } else { 1133 | if internal.send_count == 0 { 1134 | return Err(ReceiveError::SendClosed); 1135 | } 1136 | // no active waiter so push to the queue 1137 | let mut ret = MaybeUninit::::uninit(); 1138 | let sig = Signal::new_sync(KanalPtr::new_write_address_ptr(ret.as_mut_ptr())); 1139 | internal.push_recv(sig.get_terminator()); 1140 | drop(internal); 1141 | 1142 | if !sig.wait() { 1143 | return Err(ReceiveError::Closed); 1144 | } 1145 | 1146 | // Safety: it's safe to assume init as data is forgotten on another 1147 | // side 1148 | if size_of::() > size_of::<*mut T>() { 1149 | Ok(unsafe { ret.assume_init() }) 1150 | } else { 1151 | Ok(unsafe { sig.assume_init() }) 1152 | } 1153 | } 1154 | // if the queue is not empty send the data 1155 | } 1156 | /// Tries receiving from the channel within a duration 1157 | #[inline(always)] 1158 | pub fn recv_timeout(&self, duration: Duration) -> Result { 1159 | let deadline = Instant::now().checked_add(duration).unwrap(); 1160 | let mut internal = acquire_internal(&self.internal); 1161 | if internal.recv_count == 0 { 1162 | return Err(ReceiveErrorTimeout::Closed); 1163 | } 1164 | if let Some(v) = internal.queue.pop_front() { 1165 | if let Some(p) = internal.next_send() { 1166 | // if there is a sender take its data and push it into the queue 1167 | // Safety: it's safe to receive from owned signal once 1168 | unsafe { internal.queue.push_back(p.recv()) } 1169 | } 1170 | Ok(v) 1171 | } else if let Some(p) = internal.next_send() { 1172 | drop(internal); 1173 | // Safety: it's safe to receive from owned signal once 1174 | unsafe { Ok(p.recv()) } 1175 | } else { 1176 | if Instant::now() > deadline { 1177 | return Err(ReceiveErrorTimeout::Timeout); 1178 | } 1179 | if internal.send_count == 0 { 1180 | return Err(ReceiveErrorTimeout::SendClosed); 1181 | } 1182 | // no active waiter so push to the queue 1183 | let mut ret = MaybeUninit::::uninit(); 1184 | let sig = Signal::new_sync(KanalPtr::new_write_address_ptr(ret.as_mut_ptr())); 1185 | internal.push_recv(sig.get_terminator()); 1186 | drop(internal); 1187 | if !sig.wait_timeout(deadline) { 1188 | if sig.is_terminated() { 1189 | return Err(ReceiveErrorTimeout::Closed); 1190 | } 1191 | { 1192 | let mut internal = acquire_internal(&self.internal); 1193 | if internal.cancel_recv_signal(&sig) { 1194 | return Err(ReceiveErrorTimeout::Timeout); 1195 | } 1196 | } 1197 | // removing receive failed to wait for the signal response 1198 | if !sig.wait() { 1199 | return Err(ReceiveErrorTimeout::Closed); 1200 | } 1201 | } 1202 | // Safety: it's safe to assume init as data is forgotten on another 1203 | // side 1204 | if size_of::() > size_of::<*mut T>() { 1205 | Ok(unsafe { ret.assume_init() }) 1206 | } else { 1207 | Ok(unsafe { sig.assume_init() }) 1208 | } 1209 | } 1210 | // if the queue is not empty send the data 1211 | } 1212 | 1213 | shared_recv_impl!(); 1214 | #[cfg(feature = "async")] 1215 | /// Clones receiver as the async version of it 1216 | pub fn clone_async(&self) -> AsyncReceiver { 1217 | let mut internal = acquire_internal(&self.internal); 1218 | if internal.recv_count > 0 { 1219 | internal.recv_count += 1; 1220 | } 1221 | drop(internal); 1222 | AsyncReceiver:: { 1223 | internal: self.internal.clone(), 1224 | } 1225 | } 1226 | 1227 | /// Converts [`Receiver`] to [`AsyncReceiver`] and returns it. 1228 | /// 1229 | /// # Examples 1230 | /// 1231 | /// ``` 1232 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1233 | /// # use tokio::{spawn as co}; 1234 | /// # use std::time::Duration; 1235 | /// let (s, r) = kanal::bounded(0); 1236 | /// co(async move { 1237 | /// let r=r.to_async(); 1238 | /// let name=r.recv().await?; 1239 | /// println!("Hello {}!",name); 1240 | /// anyhow::Ok(()) 1241 | /// }); 1242 | /// s.send("World")?; 1243 | /// # anyhow::Ok(()) 1244 | /// # }); 1245 | /// ``` 1246 | #[cfg(feature = "async")] 1247 | pub fn to_async(self) -> AsyncReceiver { 1248 | // Safety: structure of Receiver and AsyncReceiver is same 1249 | unsafe { transmute(self) } 1250 | } 1251 | 1252 | /// Borrows [`Receiver`] as [`AsyncReceiver`] and returns it. 1253 | /// 1254 | /// # Examples 1255 | /// 1256 | /// ``` 1257 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1258 | /// # use tokio::{spawn as co}; 1259 | /// # use std::time::Duration; 1260 | /// let (s, r) = kanal::bounded(0); 1261 | /// co(async move { 1262 | /// let name=r.as_async().recv().await?; 1263 | /// println!("Hello {}!",name); 1264 | /// anyhow::Ok(()) 1265 | /// }); 1266 | /// s.send("World")?; 1267 | /// # anyhow::Ok(()) 1268 | /// # }); 1269 | /// ``` 1270 | #[cfg(feature = "async")] 1271 | pub fn as_async(&self) -> &AsyncReceiver { 1272 | // Safety: structure of Receiver and AsyncReceiver is same 1273 | unsafe { transmute(self) } 1274 | } 1275 | 1276 | shared_impl!(); 1277 | } 1278 | 1279 | impl Iterator for Receiver { 1280 | type Item = T; 1281 | 1282 | fn next(&mut self) -> Option { 1283 | self.recv().ok() 1284 | } 1285 | } 1286 | 1287 | #[cfg(feature = "async")] 1288 | impl AsyncReceiver { 1289 | /// Returns a [`ReceiveFuture`] to receive data from the channel 1290 | /// asynchronously. 1291 | /// 1292 | /// # Cancellation and Polling Considerations 1293 | /// 1294 | /// Due to current limitations in Rust's handling of future cancellation, if a 1295 | /// `ReceiveFuture` is dropped exactly at the time when new data is written to the 1296 | /// channel, it may result in the loss of the received value. This behavior although memory-safe stems from 1297 | /// the fact that Rust does not provide a built-in, correct mechanism for cancelling futures. 1298 | /// 1299 | /// Additionally, it is important to note that constructs such as `tokio::select!` are not correct to use 1300 | /// with kanal async channels. Kanal's design does not rely on the conventional `poll` mechanism to 1301 | /// read messages. Because of its internal optimizations, the future may complete without receiving the 1302 | /// final poll, which prevents proper handling of the message. 1303 | /// 1304 | /// As a result, once the `ReceiveFuture` is polled for the first time (which registers the request to 1305 | /// receive data), the programmer must commit to completing the polling process. This ensures that 1306 | /// messages are correctly delivered and avoids potential race conditions associated with cancellation. 1307 | /// 1308 | /// # Examples 1309 | /// 1310 | /// ``` 1311 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1312 | /// # use tokio::{spawn as co}; 1313 | /// # let (s, r) = kanal::bounded_async(0); 1314 | /// # co(async move { 1315 | /// # s.send("Buddy").await?; 1316 | /// # anyhow::Ok(()) 1317 | /// # }); 1318 | /// let name=r.recv().await?; 1319 | /// println!("Hello {}",name); 1320 | /// # anyhow::Ok(()) 1321 | /// # }); 1322 | /// ``` 1323 | #[inline(always)] 1324 | pub fn recv(&'_ self) -> ReceiveFuture<'_, T> { 1325 | ReceiveFuture::new_ref(&self.internal) 1326 | } 1327 | /// Creates a asynchronous stream for the channel to receive messages, 1328 | /// [`ReceiveStream`] borrows the [`AsyncReceiver`], after dropping it, 1329 | /// receiver will be available and usable again. 1330 | /// 1331 | /// # Examples 1332 | /// 1333 | /// ``` 1334 | /// # use tokio::{spawn as co}; 1335 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1336 | /// // import to be able to use stream.next() function 1337 | /// use futures::stream::StreamExt; 1338 | /// // import to be able to use stream.is_terminated() function 1339 | /// use futures::stream::FusedStream; 1340 | /// 1341 | /// let (s, r) = kanal::unbounded_async(); 1342 | /// co(async move { 1343 | /// for i in 0..100 { 1344 | /// s.send(i).await.unwrap(); 1345 | /// } 1346 | /// }); 1347 | /// let mut stream = r.stream(); 1348 | /// assert!(!stream.is_terminated()); 1349 | /// for i in 0..100 { 1350 | /// assert_eq!(stream.next().await, Some(i)); 1351 | /// } 1352 | /// // Stream will return None after it is terminated, and there is no other sender. 1353 | /// assert_eq!(stream.next().await, None); 1354 | /// assert!(stream.is_terminated()); 1355 | /// # }); 1356 | /// ``` 1357 | #[inline(always)] 1358 | pub fn stream(&'_ self) -> ReceiveStream<'_, T> { 1359 | ReceiveStream::new_borrowed(self) 1360 | } 1361 | shared_recv_impl!(); 1362 | /// Returns sync cloned version of the receiver. 1363 | /// 1364 | /// # Examples 1365 | /// 1366 | /// ``` 1367 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1368 | /// # use tokio::{spawn as co}; 1369 | /// let (s, r) = kanal::unbounded_async(); 1370 | /// s.send(1).await?; 1371 | /// let sync_receiver=r.clone_sync(); 1372 | /// // JUST FOR EXAMPLE IT IS WRONG TO USE SYNC INSTANCE IN ASYNC CONTEXT 1373 | /// assert_eq!(sync_receiver.recv()?,1); 1374 | /// # anyhow::Ok(()) 1375 | /// # }); 1376 | /// ``` 1377 | pub fn clone_sync(&self) -> Receiver { 1378 | let mut internal = acquire_internal(&self.internal); 1379 | if internal.recv_count > 0 { 1380 | internal.recv_count += 1; 1381 | } 1382 | drop(internal); 1383 | Receiver:: { 1384 | internal: self.internal.clone(), 1385 | } 1386 | } 1387 | 1388 | /// Converts [`AsyncReceiver`] to [`Receiver`] and returns it. 1389 | /// 1390 | /// # Examples 1391 | /// 1392 | /// ``` 1393 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1394 | /// # use std::time::Duration; 1395 | /// let (s, r) = kanal::bounded_async(0); 1396 | /// // move to sync environment 1397 | /// std::thread::spawn(move || { 1398 | /// let r=r.to_sync(); 1399 | /// let name=r.recv()?; 1400 | /// println!("Hello {}!",name); 1401 | /// anyhow::Ok(()) 1402 | /// }); 1403 | /// s.send("World").await?; 1404 | /// # anyhow::Ok(()) 1405 | /// # }); 1406 | /// ``` 1407 | pub fn to_sync(self) -> Receiver { 1408 | // Safety: structure of Receiver and AsyncReceiver is same 1409 | unsafe { transmute(self) } 1410 | } 1411 | 1412 | /// Borrows [`AsyncReceiver`] as [`Receiver`] and returns it 1413 | /// # Examples 1414 | /// 1415 | /// ``` 1416 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1417 | /// # use std::time::Duration; 1418 | /// let (s, r) = kanal::bounded_async(0); 1419 | /// // move to sync environment 1420 | /// std::thread::spawn(move || { 1421 | /// let name=r.as_sync().recv()?; 1422 | /// println!("Hello {}!",name); 1423 | /// anyhow::Ok(()) 1424 | /// }); 1425 | /// s.send("World").await?; 1426 | /// # anyhow::Ok(()) 1427 | /// # }); 1428 | /// ``` 1429 | pub fn as_sync(&self) -> &Receiver { 1430 | // Safety: structure of Receiver and AsyncReceiver is same 1431 | unsafe { transmute(self) } 1432 | } 1433 | 1434 | shared_impl!(); 1435 | } 1436 | 1437 | impl Drop for Receiver { 1438 | fn drop(&mut self) { 1439 | let mut internal = acquire_internal(&self.internal); 1440 | if internal.recv_count > 0 { 1441 | internal.recv_count -= 1; 1442 | if internal.recv_count == 0 && internal.send_count != 0 { 1443 | internal.terminate_signals(); 1444 | } 1445 | } 1446 | } 1447 | } 1448 | 1449 | #[cfg(feature = "async")] 1450 | impl Drop for AsyncReceiver { 1451 | fn drop(&mut self) { 1452 | let mut internal = acquire_internal(&self.internal); 1453 | if internal.recv_count > 0 { 1454 | internal.recv_count -= 1; 1455 | if internal.recv_count == 0 && internal.send_count != 0 { 1456 | internal.terminate_signals(); 1457 | } 1458 | } 1459 | } 1460 | } 1461 | 1462 | impl Clone for Receiver { 1463 | fn clone(&self) -> Self { 1464 | let mut internal = acquire_internal(&self.internal); 1465 | if internal.recv_count > 0 { 1466 | internal.recv_count += 1; 1467 | } 1468 | drop(internal); 1469 | Self { 1470 | internal: self.internal.clone(), 1471 | } 1472 | } 1473 | } 1474 | 1475 | #[cfg(feature = "async")] 1476 | impl Clone for AsyncReceiver { 1477 | fn clone(&self) -> Self { 1478 | let mut internal = acquire_internal(&self.internal); 1479 | if internal.recv_count > 0 { 1480 | internal.recv_count += 1; 1481 | } 1482 | drop(internal); 1483 | Self { 1484 | internal: self.internal.clone(), 1485 | } 1486 | } 1487 | } 1488 | 1489 | /// Creates a new sync bounded channel with the requested buffer size, and 1490 | /// returns [`Sender`] and [`Receiver`] of the channel for type T, you can get 1491 | /// access to async API of [`AsyncSender`] and [`AsyncReceiver`] with `to_sync`, 1492 | /// `as_async` or `clone_sync` based on your requirements, by calling them on 1493 | /// sender or receiver. 1494 | /// 1495 | /// # Examples 1496 | /// 1497 | /// ``` 1498 | /// use std::thread::spawn; 1499 | /// 1500 | /// let (s, r) = kanal::bounded(0); // for channel with zero size queue, this channel always block until successful send/recv 1501 | /// 1502 | /// // spawn 8 threads, that will send 100 numbers to channel reader 1503 | /// for i in 0..8{ 1504 | /// let s = s.clone(); 1505 | /// spawn(move || { 1506 | /// for i in 1..100{ 1507 | /// s.send(i); 1508 | /// } 1509 | /// }); 1510 | /// } 1511 | /// // drop local sender so the channel send side gets closed when all of the senders finished their jobs 1512 | /// drop(s); 1513 | /// 1514 | /// let first = r.recv().unwrap(); // receive first msg 1515 | /// let total: u32 = first+r.sum::(); // the receiver implements iterator so you can call sum to receive sum of rest of messages 1516 | /// assert_eq!(total, 39600); 1517 | /// ``` 1518 | pub fn bounded(size: usize) -> (Sender, Receiver) { 1519 | let internal = ChannelInternal::new(true, size); 1520 | ( 1521 | Sender { 1522 | internal: internal.clone(), 1523 | }, 1524 | Receiver { internal }, 1525 | ) 1526 | } 1527 | 1528 | /// Creates a new async bounded channel with the requested buffer size, and 1529 | /// returns [`AsyncSender`] and [`AsyncReceiver`] of the channel for type T, you 1530 | /// can get access to sync API of [`Sender`] and [`Receiver`] with `to_sync`, 1531 | /// `as_async` or `clone_sync` based on your requirements, by calling them on 1532 | /// async sender or receiver. 1533 | /// 1534 | /// # Examples 1535 | /// 1536 | /// ``` 1537 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1538 | /// use tokio::{spawn as co}; 1539 | /// 1540 | /// let (s, r) = kanal::bounded_async(0); 1541 | /// 1542 | /// co(async move { 1543 | /// s.send("hello!").await?; 1544 | /// anyhow::Ok(()) 1545 | /// }); 1546 | /// 1547 | /// assert_eq!(r.recv().await?, "hello!"); 1548 | /// anyhow::Ok(()) 1549 | /// # }); 1550 | /// ``` 1551 | #[cfg(feature = "async")] 1552 | pub fn bounded_async(size: usize) -> (AsyncSender, AsyncReceiver) { 1553 | let internal = ChannelInternal::new(true, size); 1554 | ( 1555 | AsyncSender { 1556 | internal: internal.clone(), 1557 | }, 1558 | AsyncReceiver { internal }, 1559 | ) 1560 | } 1561 | 1562 | const UNBOUNDED_STARTING_SIZE: usize = 32; 1563 | 1564 | /// Creates a new sync unbounded channel, and returns [`Sender`] and 1565 | /// [`Receiver`] of the channel for type T, you can get access to async API 1566 | /// of [`AsyncSender`] and [`AsyncReceiver`] with `to_sync`, `as_async` or 1567 | /// `clone_sync` based on your requirements, by calling them on sender or 1568 | /// receiver. 1569 | /// 1570 | /// # Warning 1571 | /// This unbounded channel does not shrink its queue. As a result, if the receive side is 1572 | /// exhausted or delayed, the internal queue may grow substantially. This behavior is intentional and considered as a warmup phase. 1573 | /// If such growth is undesirable, consider using a bounded channel with an appropriate queue size. 1574 | /// 1575 | /// # Examples 1576 | /// 1577 | /// ``` 1578 | /// use std::thread::spawn; 1579 | /// 1580 | /// let (s, r) = kanal::unbounded(); // for channel with unbounded size queue, this channel never blocks on send 1581 | /// 1582 | /// // spawn 8 threads, that will send 100 numbers to the channel reader 1583 | /// for i in 0..8{ 1584 | /// let s = s.clone(); 1585 | /// spawn(move || { 1586 | /// for i in 1..100{ 1587 | /// s.send(i); 1588 | /// } 1589 | /// }); 1590 | /// } 1591 | /// // drop local sender so the channel send side gets closed when all of the senders finished their jobs 1592 | /// drop(s); 1593 | /// 1594 | /// let first = r.recv().unwrap(); // receive first msg 1595 | /// let total: u32 = first+r.sum::(); // the receiver implements iterator so you can call sum to receive sum of rest of messages 1596 | /// assert_eq!(total, 39600); 1597 | /// ``` 1598 | pub fn unbounded() -> (Sender, Receiver) { 1599 | let internal = ChannelInternal::new(false, UNBOUNDED_STARTING_SIZE); 1600 | ( 1601 | Sender { 1602 | internal: internal.clone(), 1603 | }, 1604 | Receiver { internal }, 1605 | ) 1606 | } 1607 | 1608 | /// Creates a new async unbounded channel, and returns [`AsyncSender`] and 1609 | /// [`AsyncReceiver`] of the channel for type T, you can get access to sync API 1610 | /// of [`Sender`] and [`Receiver`] with `to_sync`, `as_async` or `clone_sync` 1611 | /// based on your requirements, by calling them on async sender or receiver. 1612 | /// 1613 | /// # Warning 1614 | /// This unbounded channel does not shrink its queue. As a result, if the receive side is 1615 | /// exhausted or delayed, the internal queue may grow substantially. This behavior is intentional and considered as a warmup phase. 1616 | /// If such growth is undesirable, consider using a bounded channel with an appropriate queue size. 1617 | /// 1618 | /// # Examples 1619 | /// 1620 | /// ``` 1621 | /// # tokio::runtime::Runtime::new().unwrap().block_on(async { 1622 | /// use tokio::{spawn as co}; 1623 | /// 1624 | /// let (s, r) = kanal::unbounded_async(); 1625 | /// 1626 | /// co(async move { 1627 | /// s.send("hello!").await?; 1628 | /// anyhow::Ok(()) 1629 | /// }); 1630 | /// 1631 | /// assert_eq!(r.recv().await?, "hello!"); 1632 | /// anyhow::Ok(()) 1633 | /// # }); 1634 | /// ``` 1635 | #[cfg(feature = "async")] 1636 | pub fn unbounded_async() -> (AsyncSender, AsyncReceiver) { 1637 | let internal = ChannelInternal::new(false, UNBOUNDED_STARTING_SIZE); 1638 | ( 1639 | AsyncSender { 1640 | internal: internal.clone(), 1641 | }, 1642 | AsyncReceiver { internal }, 1643 | ) 1644 | } 1645 | -------------------------------------------------------------------------------- /src/mutex.rs: -------------------------------------------------------------------------------- 1 | use cacheguard::CacheGuard; 2 | use core::sync::atomic::{AtomicBool, Ordering}; 3 | use lock_api::{GuardSend, RawMutex}; 4 | 5 | use crate::backoff::*; 6 | pub struct RawMutexLock { 7 | locked: CacheGuard, 8 | } 9 | 10 | impl RawMutexLock { 11 | #[inline(never)] 12 | fn lock_no_inline(&self) { 13 | spin_cond(|| self.try_lock()); 14 | } 15 | } 16 | 17 | unsafe impl RawMutex for RawMutexLock { 18 | #[allow(clippy::declare_interior_mutable_const)] 19 | const INIT: RawMutexLock = RawMutexLock { 20 | locked: CacheGuard::new(AtomicBool::new(false)), 21 | }; 22 | type GuardMarker = GuardSend; 23 | #[inline(always)] 24 | fn lock(&self) { 25 | if self.try_lock() { 26 | return; 27 | } 28 | self.lock_no_inline(); 29 | } 30 | 31 | #[inline(always)] 32 | fn try_lock(&self) -> bool { 33 | self.locked 34 | .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) 35 | .is_ok() 36 | } 37 | 38 | #[inline(always)] 39 | unsafe fn unlock(&self) { 40 | self.locked.store(false, Ordering::Release); 41 | } 42 | } 43 | #[allow(dead_code)] 44 | pub type Mutex = lock_api::Mutex; 45 | #[cfg(not(feature = "std-mutex"))] 46 | pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutexLock, T>; 47 | -------------------------------------------------------------------------------- /src/pointer.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cell::UnsafeCell, 3 | mem::{forget, size_of, zeroed, MaybeUninit}, 4 | ptr, 5 | }; 6 | 7 | /// Kanal Pointer is a structure to move data efficiently between sync and async 8 | /// context. This mod transfer data with two different ways between threads: 9 | /// 10 | /// 1. When data size T is bigger than pointer size: 11 | /// 12 | /// holds pointer to that data in another side stack, and copies memory from 13 | /// that pointer location. 14 | /// 15 | /// 2. When data size T is equal or less than pointer size: 16 | /// 17 | /// serialize data itself in pointer address, with this action KanalPtr 18 | /// removes one unnecessary memory load operation and improves speed. This 19 | /// structure is unsafe. KanalPtr should be pinned to memory location or be a 20 | /// member of pinned structure to work correctly. In particular, ZSTs should be 21 | /// treated carefully as their alignments can be larger than the alignment of 22 | /// pointers. 23 | pub(crate) struct KanalPtr(UnsafeCell>); 24 | 25 | impl Default for KanalPtr { 26 | fn default() -> Self { 27 | Self(UnsafeCell::new(MaybeUninit::uninit())) 28 | } 29 | } 30 | 31 | impl KanalPtr { 32 | /// Creates a KanalPtr from mut reference without forgetting or taking 33 | /// ownership Creator side should take care of forgetting the object 34 | /// after move action is completed. 35 | #[inline(always)] 36 | pub(crate) fn new_from(addr: *mut T) -> Self { 37 | if size_of::() > size_of::<*mut T>() { 38 | Self(UnsafeCell::new(MaybeUninit::new(addr))) 39 | } else { 40 | Self(UnsafeCell::new(unsafe { store_as_kanal_ptr(addr) })) 41 | } 42 | } 43 | /// Creates a KanalPtr from owned object, receiver or creator should take 44 | /// care of dropping data inside ptr. 45 | #[cfg(feature = "async")] 46 | #[inline(always)] 47 | pub(crate) fn new_owned(d: T) -> Self { 48 | if size_of::() > size_of::<*mut T>() { 49 | unreachable!("bug: data can't be stored when size of T is bigger than pointer size"); 50 | } else { 51 | let ret = Self(UnsafeCell::new(unsafe { store_as_kanal_ptr(&d) })); 52 | forget(d); 53 | ret 54 | } 55 | } 56 | /// Creates a KanalPtr only for write operation, so it does not load data 57 | /// inside addr in KanalPtr as it is unnecessary 58 | #[inline(always)] 59 | pub(crate) fn new_write_address_ptr(addr: *mut T) -> Self { 60 | if size_of::() > size_of::<*mut T>() { 61 | Self(UnsafeCell::new(MaybeUninit::new(addr))) 62 | } else { 63 | Self(UnsafeCell::new(MaybeUninit::uninit())) 64 | } 65 | } 66 | /// Creates a KanalPtr without checking or transforming the pointer to 67 | /// correct KanalPtr format, Caller should take uf being sure that 68 | /// provided address is in correct KanalPtr format 69 | #[cfg(feature = "async")] 70 | #[inline(always)] 71 | pub(crate) fn new_unchecked(addr: *mut T) -> Self { 72 | Self(UnsafeCell::new(MaybeUninit::new(addr))) 73 | } 74 | /// Reads data based on movement protocol of KanalPtr based on size of T 75 | #[inline(always)] 76 | pub(crate) unsafe fn read(&self) -> T { 77 | if size_of::() == 0 { 78 | zeroed() 79 | } else if size_of::() > size_of::<*mut T>() { 80 | ptr::read((*self.0.get()).assume_init()) 81 | } else { 82 | ptr::read((*self.0.get()).as_ptr() as *const T) 83 | } 84 | } 85 | /// Writes data based on movement protocol of KanalPtr based on size of T 86 | #[inline(always)] 87 | pub(crate) unsafe fn write(&self, d: T) { 88 | if size_of::() > size_of::<*mut T>() { 89 | ptr::write((*self.0.get()).assume_init(), d); 90 | } else { 91 | if size_of::() > 0 { 92 | *self.0.get() = store_as_kanal_ptr(&d); 93 | } 94 | forget(d); 95 | } 96 | } 97 | /// Writes data based on movement protocol of KanalPtr based on size of T 98 | #[inline(always)] 99 | #[allow(unused)] 100 | pub(crate) unsafe fn copy(&self, d: *const T) { 101 | if size_of::() > size_of::<*mut T>() { 102 | // Data can't be stored as pointer value, move it to pointer 103 | // location 104 | ptr::copy_nonoverlapping(d, (*self.0.get()).assume_init(), 1); 105 | } else if size_of::() > 0 { 106 | // Data size is less or equal to pointer size, serialize data as 107 | // pointer address 108 | *self.0.get() = store_as_kanal_ptr(d); 109 | } 110 | } 111 | } 112 | 113 | /// this function stores data inside ptr in correct protocol format of KanalPtr 114 | /// for T types that are smaller than pointer size 115 | #[inline(always)] 116 | unsafe fn store_as_kanal_ptr(ptr: *const T) -> MaybeUninit<*mut T> { 117 | let mut ret = MaybeUninit::uninit(); 118 | if size_of::() > 0 { 119 | ptr::copy_nonoverlapping(ptr, ret.as_mut_ptr() as *mut T, 1); 120 | } 121 | ret 122 | } 123 | -------------------------------------------------------------------------------- /src/signal.rs: -------------------------------------------------------------------------------- 1 | use crate::{backoff, pointer::KanalPtr}; 2 | use core::{ 3 | cell::UnsafeCell, 4 | sync::atomic::{fence, AtomicU8, Ordering}, 5 | }; 6 | #[cfg(feature = "async")] 7 | use core::{ 8 | task::{Poll, Waker}, 9 | time::Duration, 10 | }; 11 | use std::{thread::Thread, time::Instant}; 12 | 13 | const UNLOCKED: u8 = 0; 14 | const TERMINATED: u8 = 1; 15 | const LOCKED: u8 = 2; 16 | const LOCKED_STARVATION: u8 = 3; 17 | 18 | /// `KanalWaker` is a structure to enable synchronization in both async and 19 | /// sync. 20 | #[repr(u8)] 21 | pub(crate) enum KanalWaker { 22 | #[cfg(feature = "async")] 23 | None, 24 | Sync(UnsafeCell>), 25 | #[cfg(feature = "async")] 26 | Async(Waker), 27 | } 28 | 29 | /// `Signal` struct is responsible for communicating between threads and 30 | /// coroutines for both reads and writes. 31 | pub struct Signal { 32 | state: AtomicU8, 33 | ptr: KanalPtr, 34 | waker: KanalWaker, 35 | } 36 | 37 | impl Signal { 38 | /// Signal to send data to a writer 39 | #[inline(always)] 40 | #[cfg(feature = "async")] 41 | pub(crate) fn new_async() -> Self { 42 | Self { 43 | state: AtomicU8::new(LOCKED), 44 | ptr: Default::default(), 45 | waker: KanalWaker::None, 46 | } 47 | } 48 | 49 | #[inline(always)] 50 | #[cfg(feature = "async")] 51 | pub(crate) fn poll(&self) -> Poll { 52 | let v = self.state.load(Ordering::Relaxed); 53 | if v < LOCKED { 54 | fence(Ordering::Acquire); 55 | Poll::Ready(v == UNLOCKED) 56 | } else { 57 | Poll::Pending 58 | } 59 | } 60 | 61 | /// Signal to send data to a writer for specific kanal pointer 62 | #[inline(always)] 63 | #[cfg(feature = "async")] 64 | pub(crate) fn new_async_ptr(ptr: KanalPtr) -> Self { 65 | Self { 66 | state: AtomicU8::new(LOCKED), 67 | ptr, 68 | waker: KanalWaker::None, 69 | } 70 | } 71 | 72 | /// Returns new sync signal for the provided thread 73 | #[inline(always)] 74 | pub(crate) fn new_sync(ptr: KanalPtr) -> Self { 75 | Self { 76 | state: AtomicU8::new(LOCKED), 77 | ptr, 78 | waker: KanalWaker::Sync(None.into()), 79 | } 80 | } 81 | 82 | /// Waits for finishing async signal for a short time 83 | #[cfg(feature = "async")] 84 | pub(crate) fn async_blocking_wait(&self) -> bool { 85 | let v = self.state.load(Ordering::Relaxed); 86 | if v < LOCKED { 87 | fence(Ordering::Acquire); 88 | return v == UNLOCKED; 89 | } 90 | 91 | for _ in 0..32 { 92 | backoff::yield_os(); 93 | let v = self.state.load(Ordering::Relaxed); 94 | if v < LOCKED { 95 | fence(Ordering::Acquire); 96 | return v == UNLOCKED; 97 | } 98 | } 99 | 100 | // Usually this part will not happen but you can't be sure 101 | let mut sleep_time: u64 = 1 << 10; 102 | loop { 103 | backoff::sleep(Duration::from_nanos(sleep_time)); 104 | let v = self.state.load(Ordering::Relaxed); 105 | if v < LOCKED { 106 | fence(Ordering::Acquire); 107 | return v == UNLOCKED; 108 | } 109 | // increase sleep_time gradually to 262 microseconds 110 | if sleep_time < (1 << 18) { 111 | sleep_time <<= 1; 112 | } 113 | } 114 | } 115 | 116 | /// Waits for the signal event in sync mode, 117 | #[inline(always)] 118 | pub(crate) fn wait(&self) -> bool { 119 | if let Some(res) = backoff::spin_option_yield_only( 120 | || { 121 | let v = self.state.load(Ordering::Relaxed); 122 | if v < LOCKED { 123 | fence(Ordering::Acquire); 124 | return Some(v == UNLOCKED); 125 | } 126 | None 127 | }, 128 | 25, 129 | ) { 130 | return res; 131 | } 132 | match &self.waker { 133 | KanalWaker::Sync(waker) => { 134 | // waker is not shared as the state is not `LOCKED_STARVATION` 135 | unsafe { 136 | *waker.get() = Some(std::thread::current()); 137 | } 138 | match self.state.compare_exchange( 139 | LOCKED, 140 | LOCKED_STARVATION, 141 | Ordering::Release, 142 | Ordering::Acquire, 143 | ) { 144 | Ok(_) => loop { 145 | std::thread::park(); 146 | let v = self.state.load(Ordering::Relaxed); 147 | if v < LOCKED { 148 | fence(Ordering::Acquire); 149 | return v == UNLOCKED; 150 | } 151 | }, 152 | Err(v) => v == UNLOCKED, 153 | } 154 | } 155 | #[cfg(feature = "async")] 156 | KanalWaker::None | KanalWaker::Async(_) => unreachable!(), 157 | } 158 | } 159 | 160 | /// Waits for the signal event in sync mode with a timeout 161 | pub(crate) fn wait_timeout(&self, until: Instant) -> bool { 162 | let v = self.state.load(Ordering::Relaxed); 163 | if v < LOCKED { 164 | fence(Ordering::Acquire); 165 | return v == UNLOCKED; 166 | } 167 | match self.state.compare_exchange( 168 | LOCKED, 169 | LOCKED_STARVATION, 170 | Ordering::Release, 171 | Ordering::Acquire, 172 | ) { 173 | Ok(_) => loop { 174 | let v = self.state.load(Ordering::Relaxed); 175 | if v < LOCKED { 176 | fence(Ordering::Acquire); 177 | return v == UNLOCKED; 178 | } 179 | let now = Instant::now(); 180 | if now >= until { 181 | return self.state.load(Ordering::Acquire) == UNLOCKED; 182 | } 183 | std::thread::park_timeout(until - now); 184 | }, 185 | Err(v) => v == UNLOCKED, 186 | } 187 | } 188 | 189 | /// Set pointer to data for receiving or sending 190 | #[inline(always)] 191 | #[cfg(feature = "async")] 192 | pub(crate) fn set_ptr(&mut self, ptr: KanalPtr) { 193 | self.ptr = ptr; 194 | } 195 | 196 | /// Registers the async waker in the Signal 197 | #[inline(always)] 198 | #[cfg(feature = "async")] 199 | pub(crate) fn register_waker(&mut self, waker: &Waker) { 200 | self.waker = KanalWaker::Async(waker.clone()) 201 | } 202 | 203 | /// Set pointer to data for receiving or sending 204 | #[inline(always)] 205 | #[cfg(feature = "async")] 206 | pub(crate) fn will_wake(&self, waker: &Waker) -> bool { 207 | match &self.waker { 208 | KanalWaker::Async(w) => w.will_wake(waker), 209 | KanalWaker::Sync(_) | KanalWaker::None => unreachable!(), 210 | } 211 | } 212 | 213 | /// Returns true if signal is terminated 214 | pub(crate) fn is_terminated(&self) -> bool { 215 | self.state.load(Ordering::Relaxed) == TERMINATED 216 | } 217 | 218 | /// Reads kanal ptr and returns its value 219 | pub(crate) unsafe fn assume_init(&self) -> T { 220 | self.ptr.read() 221 | } 222 | 223 | /// Wakes the sleeping thread or coroutine 224 | unsafe fn wake(this: *const Self, state: u8) { 225 | match &(*this).waker { 226 | KanalWaker::Sync(waker) => { 227 | if (*this) 228 | .state 229 | .compare_exchange(LOCKED, state, Ordering::Release, Ordering::Acquire) 230 | .is_err() 231 | { 232 | if let Some(thread) = (*waker.get()).as_ref() { 233 | let thread = thread.clone(); 234 | (*this).state.store(state, Ordering::Release); 235 | thread.unpark(); 236 | } 237 | } 238 | } 239 | #[cfg(feature = "async")] 240 | KanalWaker::Async(w) => { 241 | let w = w.clone(); 242 | (*this).state.store(state, Ordering::Release); 243 | w.wake(); 244 | } 245 | #[cfg(feature = "async")] 246 | KanalWaker::None => unreachable!(), 247 | } 248 | } 249 | 250 | /// Sends object to receive signal 251 | /// Safety: it's only safe to be called only once on the receive signals 252 | /// that are not terminated 253 | pub(crate) unsafe fn send(this: *const Self, d: T) { 254 | (*this).ptr.write(d); 255 | Self::wake(this, UNLOCKED); 256 | } 257 | 258 | /// Sends object to receive signal by coping the pointer 259 | /// Safety: it's only safe to be called only once on the receive signals 260 | /// that are not terminated 261 | #[allow(unused)] 262 | pub(crate) unsafe fn send_copy(this: *const Self, d: *const T) { 263 | (*this).ptr.copy(d); 264 | Self::wake(this, UNLOCKED); 265 | } 266 | 267 | /// Receives object from send signal 268 | /// Safety: it's only safe to be called only once on send signals that are 269 | /// not terminated 270 | pub(crate) unsafe fn recv(this: *const Self) -> T { 271 | let r = (*this).ptr.read(); 272 | Self::wake(this, UNLOCKED); 273 | r 274 | } 275 | 276 | /// Terminates the signal and notifies its waiter 277 | /// Safety: it's only safe to be called only once on send/receive signals 278 | /// that are not finished or terminated 279 | pub(crate) unsafe fn terminate(this: *const Self) { 280 | Self::wake(this, TERMINATED); 281 | } 282 | 283 | /// Loads pointer data and drops it in place 284 | /// Safety: it should only be used once, and only when data in ptr is valid 285 | /// and not moved. 286 | #[cfg(feature = "async")] 287 | pub(crate) unsafe fn load_and_drop(&self) { 288 | _ = self.ptr.read(); 289 | } 290 | 291 | /// Returns signal terminator for other side of channel 292 | pub(crate) fn get_terminator(&self) -> SignalTerminator { 293 | (self as *const Signal).into() 294 | } 295 | } 296 | 297 | pub(crate) struct SignalTerminator(*const Signal); 298 | 299 | impl From<*const Signal> for SignalTerminator { 300 | fn from(value: *const Signal) -> Self { 301 | Self(value) 302 | } 303 | } 304 | 305 | impl SignalTerminator { 306 | pub(crate) unsafe fn send(self, data: T) { 307 | Signal::send(self.0, data) 308 | } 309 | #[allow(unused)] 310 | pub(crate) unsafe fn send_copy(self, data: *const T) { 311 | Signal::send_copy(self.0, data) 312 | } 313 | pub(crate) unsafe fn recv(self) -> T { 314 | Signal::recv(self.0) 315 | } 316 | pub(crate) unsafe fn terminate(&self) { 317 | Signal::terminate(self.0) 318 | } 319 | } 320 | 321 | impl PartialEq> for SignalTerminator { 322 | fn eq(&self, other: &Signal) -> bool { 323 | self.0 == other as *const Signal 324 | } 325 | } 326 | 327 | // If internal is safe to send SignalPtr is safe to send. 328 | unsafe impl Send for SignalTerminator {} 329 | // If internal is safe to send Signal is safe to send. 330 | unsafe impl Send for Signal {} 331 | -------------------------------------------------------------------------------- /tests/async_test.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | #[cfg(feature = "async")] 3 | mod asyncs { 4 | use crate::utils::*; 5 | use futures_core::FusedStream; 6 | use kanal::{ 7 | bounded_async, unbounded_async, AsyncReceiver, AsyncSender, ReceiveError, SendError, 8 | }; 9 | use std::{ 10 | sync::{ 11 | atomic::{AtomicUsize, Ordering}, 12 | Arc, 13 | }, 14 | time::Duration, 15 | }; 16 | 17 | fn new(cap: Option) -> (AsyncSender, AsyncReceiver) { 18 | match cap { 19 | None => unbounded_async(), 20 | Some(cap) => bounded_async(cap), 21 | } 22 | } 23 | 24 | macro_rules! mpmc_dyn { 25 | ($pre:stmt,$new:expr,$cap:expr) => { 26 | let (tx, rx) = new($cap); 27 | let mut list = Vec::new(); 28 | for _ in 0..THREADS { 29 | let tx = tx.clone(); 30 | $pre 31 | let h = tokio::spawn(async move { 32 | for _i in 0..MESSAGES / THREADS { 33 | tx.send($new).await.unwrap(); 34 | } 35 | }); 36 | list.push(h); 37 | } 38 | 39 | for _ in 0..THREADS { 40 | let rx = rx.clone(); 41 | let h = tokio::spawn(async move { 42 | for _i in 0..MESSAGES / THREADS { 43 | rx.recv().await.unwrap(); 44 | } 45 | }); 46 | list.push(h); 47 | } 48 | 49 | for h in list { 50 | h.await.unwrap(); 51 | } 52 | }; 53 | } 54 | 55 | macro_rules! integrity_test { 56 | ($zero:expr,$ones:expr) => { 57 | let (tx, rx) = new(Some(0)); 58 | tokio::spawn(async move { 59 | for _ in 0..MESSAGES { 60 | tx.send($zero).await.unwrap(); 61 | tx.send($ones).await.unwrap(); 62 | } 63 | }); 64 | for _ in 0..MESSAGES { 65 | assert_eq!(rx.recv().await.unwrap(), $zero); 66 | assert_eq!(rx.recv().await.unwrap(), $ones); 67 | } 68 | }; 69 | } 70 | 71 | async fn mpsc(cap: Option) { 72 | let (tx, rx) = new(cap); 73 | let mut list = Vec::new(); 74 | 75 | for _ in 0..THREADS { 76 | let tx = tx.clone(); 77 | let h = tokio::spawn(async move { 78 | for _i in 0..MESSAGES / THREADS { 79 | tx.send(Box::new(1)).await.unwrap(); 80 | } 81 | }); 82 | list.push(h); 83 | } 84 | 85 | for _ in 0..MESSAGES { 86 | assert_eq!(rx.recv().await.unwrap(), Box::new(1)); 87 | } 88 | 89 | for h in list { 90 | h.await.unwrap(); 91 | } 92 | } 93 | 94 | async fn seq(cap: Option) { 95 | let (tx, rx) = new(cap); 96 | 97 | for _i in 0..MESSAGES { 98 | tx.send(Box::new(1)).await.unwrap(); 99 | } 100 | 101 | for _ in 0..MESSAGES { 102 | assert_eq!(rx.recv().await.unwrap(), Box::new(1)); 103 | } 104 | } 105 | 106 | async fn spsc(cap: Option) { 107 | let (tx, rx) = new(cap); 108 | 109 | tokio::spawn(async move { 110 | for _i in 0..MESSAGES { 111 | tx.send(Box::new(1)).await.unwrap(); 112 | } 113 | }); 114 | 115 | for _ in 0..MESSAGES { 116 | assert_eq!(rx.recv().await.unwrap(), Box::new(1)); 117 | } 118 | } 119 | 120 | async fn mpmc(cap: Option) { 121 | let (tx, rx) = new(cap); 122 | let mut list = Vec::new(); 123 | for _ in 0..THREADS { 124 | let tx = tx.clone(); 125 | let h = tokio::spawn(async move { 126 | for _i in 0..MESSAGES / THREADS { 127 | tx.send(Box::new(1)).await.unwrap(); 128 | } 129 | }); 130 | list.push(h); 131 | } 132 | 133 | for _ in 0..THREADS { 134 | let rx = rx.clone(); 135 | let h = tokio::spawn(async move { 136 | for _i in 0..MESSAGES / THREADS { 137 | rx.recv().await.unwrap(); 138 | } 139 | }); 140 | list.push(h); 141 | } 142 | 143 | for h in list { 144 | h.await.unwrap(); 145 | } 146 | } 147 | 148 | #[tokio::test] 149 | async fn integrity_u8() { 150 | integrity_test!(0u8, !0u8); 151 | } 152 | 153 | #[tokio::test] 154 | async fn integrity_u16() { 155 | integrity_test!(0u16, !0u16); 156 | } 157 | 158 | #[tokio::test] 159 | async fn integrity_u32() { 160 | integrity_test!(0u32, !0u32); 161 | } 162 | 163 | #[tokio::test] 164 | async fn integrity_usize() { 165 | integrity_test!(0u64, !0u64); 166 | } 167 | 168 | #[tokio::test] 169 | async fn integrity_big() { 170 | integrity_test!((0u64, 0u64, 0u64, 0u64), (!0u64, !0u64, !0u64, !0u64)); 171 | } 172 | 173 | #[tokio::test] 174 | async fn integrity_string() { 175 | integrity_test!("", "not empty"); 176 | } 177 | 178 | #[tokio::test] 179 | async fn integrity_padded_rust() { 180 | integrity_test!( 181 | Padded { 182 | a: false, 183 | b: 0x0, 184 | c: 0x0 185 | }, 186 | Padded { 187 | a: true, 188 | b: 0xFF, 189 | c: 0xFFFFFFFF 190 | } 191 | ); 192 | } 193 | 194 | #[tokio::test] 195 | async fn integrity_padded_c() { 196 | integrity_test!( 197 | PaddedReprC { 198 | a: false, 199 | b: 0x0, 200 | c: 0x0 201 | }, 202 | PaddedReprC { 203 | a: true, 204 | b: 0xFF, 205 | c: 0xFFFFFFFF 206 | } 207 | ); 208 | } 209 | 210 | #[tokio::test] 211 | async fn drop_test() { 212 | let counter = Arc::new(AtomicUsize::new(0)); 213 | mpmc_dyn!(let counter=counter.clone(),DropTester::new(counter.clone(), 10), Some(1)); 214 | assert_eq!(counter.load(Ordering::SeqCst), MESSAGES); 215 | } 216 | 217 | #[tokio::test] 218 | async fn drop_test_in_signal() { 219 | let (s, r) = new(Some(10)); 220 | 221 | let counter = Arc::new(AtomicUsize::new(0)); 222 | let mut list = Vec::new(); 223 | for _ in 0..10 { 224 | let counter = counter.clone(); 225 | let s = s.clone(); 226 | let c = tokio::spawn(async move { 227 | let _ = s.send(DropTester::new(counter, 1234)).await; 228 | }); 229 | list.push(c); 230 | } 231 | r.close().unwrap(); 232 | for c in list { 233 | c.await.unwrap(); 234 | } 235 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 236 | } 237 | 238 | // Channel logic tests 239 | #[tokio::test] 240 | async fn recv_from_half_closed_queue() { 241 | let (tx, rx) = new(Some(1)); 242 | tx.send(Box::new(1)).await.unwrap(); 243 | drop(tx); 244 | // it's ok to receive data from queue of half closed channel 245 | assert_eq!(rx.recv().await.unwrap(), Box::new(1)); 246 | } 247 | 248 | #[tokio::test] 249 | async fn recv_from_half_closed_channel() { 250 | let (tx, rx) = new::(Some(1)); 251 | drop(tx); 252 | assert_eq!(rx.recv().await.err().unwrap(), ReceiveError::SendClosed); 253 | } 254 | 255 | #[tokio::test] 256 | async fn recv_from_closed_channel() { 257 | let (tx, rx) = new::(Some(1)); 258 | tx.close().unwrap(); 259 | assert_eq!(rx.recv().await.err().unwrap(), ReceiveError::Closed); 260 | } 261 | 262 | #[tokio::test] 263 | async fn recv_from_closed_channel_queue() { 264 | let (tx, rx) = new(Some(1)); 265 | tx.send(Box::new(1)).await.unwrap(); 266 | tx.close().unwrap(); 267 | // it's not possible to read data from queue of fully closed channel 268 | assert_eq!(rx.recv().await.err().unwrap(), ReceiveError::Closed); 269 | } 270 | 271 | #[tokio::test] 272 | async fn send_to_half_closed_channel() { 273 | let (tx, rx) = new(Some(1)); 274 | drop(rx); 275 | assert_eq!( 276 | tx.send(Box::new(1)).await.err().unwrap(), 277 | SendError::ReceiveClosed 278 | ); 279 | } 280 | 281 | #[tokio::test] 282 | async fn send_to_closed_channel() { 283 | let (tx, rx) = new(Some(1)); 284 | rx.close().unwrap(); 285 | assert_eq!(tx.send(Box::new(1)).await.err().unwrap(), SendError::Closed); 286 | } 287 | 288 | // Drop tests 289 | #[tokio::test] 290 | async fn recv_abort_test() { 291 | let (_s, r) = new::(Some(10)); 292 | 293 | let mut list = Vec::new(); 294 | for _ in 0..10 { 295 | let r = r.clone(); 296 | let c = tokio::spawn(async move { 297 | if r.recv().await.is_ok() { 298 | panic!("should not be ok"); 299 | } 300 | }); 301 | list.push(c); 302 | } 303 | tokio::time::sleep(Duration::from_millis(500)).await; 304 | for c in list { 305 | c.abort(); 306 | } 307 | r.close().unwrap(); 308 | } 309 | 310 | // Drop tests 311 | #[tokio::test] 312 | async fn send_abort_test() { 313 | let (s, r) = new::(Some(0)); 314 | let counter = Arc::new(AtomicUsize::new(0)); 315 | let mut list = Vec::new(); 316 | for _ in 0..10 { 317 | let s = s.clone(); 318 | let counter = counter.clone(); 319 | let c = tokio::spawn(async move { 320 | if s.send(DropTester::new(counter, 1234)).await.is_ok() { 321 | panic!("should not be ok"); 322 | } 323 | }); 324 | list.push(c); 325 | } 326 | tokio::time::sleep(Duration::from_millis(500)).await; 327 | for c in list { 328 | c.abort(); 329 | } 330 | tokio::time::sleep(Duration::from_millis(500)).await; 331 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 332 | r.close().unwrap(); 333 | } 334 | 335 | #[tokio::test] 336 | async fn drop_test_in_queue() { 337 | let (s, r) = new(Some(10)); 338 | 339 | let counter = Arc::new(AtomicUsize::new(0)); 340 | let mut list = Vec::new(); 341 | for _ in 0..10 { 342 | let counter = counter.clone(); 343 | let s = s.clone(); 344 | let c = tokio::spawn(async move { 345 | let _ = s.send(DropTester::new(counter, 1234)).await; 346 | }); 347 | list.push(c); 348 | } 349 | for c in list { 350 | c.await.unwrap(); 351 | } 352 | r.close().unwrap(); 353 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 354 | } 355 | 356 | #[tokio::test] 357 | async fn drop_test_in_unused_signal() { 358 | let (s, r) = new(Some(10)); 359 | 360 | let counter = Arc::new(AtomicUsize::new(0)); 361 | for _ in 0..10 { 362 | let counter = counter.clone(); 363 | drop(s.send(DropTester::new(counter, 1234))); 364 | } 365 | r.close().unwrap(); 366 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 367 | } 368 | 369 | #[tokio::test] 370 | async fn drop_test_send_to_closed() { 371 | let (s, r) = new(Some(10)); 372 | r.close().unwrap(); 373 | let counter = Arc::new(AtomicUsize::new(0)); 374 | for _ in 0..10 { 375 | let counter = counter.clone(); 376 | let _ = s.send(DropTester::new(counter, 1234)).await; 377 | } 378 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 379 | } 380 | 381 | #[tokio::test] 382 | async fn drop_test_send_to_half_closed() { 383 | let (s, r) = new(Some(10)); 384 | drop(r); 385 | let counter = Arc::new(AtomicUsize::new(0)); 386 | for _ in 0..10 { 387 | let counter = counter.clone(); 388 | let _ = s.send(DropTester::new(counter, 1234)).await; 389 | } 390 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 391 | } 392 | 393 | #[tokio::test] 394 | async fn vec_test() { 395 | mpmc_dyn!({}, vec![1, 2, 3], Some(1)); 396 | } 397 | 398 | #[tokio::test] 399 | async fn one_msg() { 400 | let (s, r) = bounded_async::(1); 401 | s.send(0).await.unwrap(); 402 | assert_eq!(r.recv().await.unwrap(), 0); 403 | } 404 | 405 | #[tokio::test] 406 | async fn two_msg_0() { 407 | two_msg(0).await; 408 | } 409 | #[tokio::test] 410 | async fn two_msg_1() { 411 | two_msg(1).await; 412 | } 413 | #[tokio::test] 414 | async fn two_msg_2() { 415 | two_msg(2).await; 416 | } 417 | 418 | #[tokio::test] 419 | async fn mpsc_0() { 420 | mpsc(Some(0)).await; 421 | } 422 | 423 | #[tokio::test] 424 | async fn mpsc_n() { 425 | mpsc(Some(MESSAGES)).await; 426 | } 427 | 428 | #[tokio::test] 429 | async fn mpsc_u() { 430 | mpsc(None).await; 431 | } 432 | 433 | #[tokio::test] 434 | async fn mpmc_0() { 435 | mpmc(Some(0)).await; 436 | } 437 | 438 | #[tokio::test] 439 | async fn mpmc_n() { 440 | mpmc(Some(MESSAGES)).await; 441 | } 442 | 443 | #[tokio::test] 444 | async fn mpmc_u() { 445 | mpmc(None).await; 446 | } 447 | 448 | #[tokio::test] 449 | async fn spsc_0() { 450 | spsc(Some(0)).await; 451 | } 452 | 453 | #[tokio::test] 454 | async fn spsc_1() { 455 | spsc(Some(1)).await; 456 | } 457 | 458 | #[tokio::test] 459 | async fn spsc_n() { 460 | spsc(Some(MESSAGES)).await; 461 | } 462 | 463 | #[tokio::test] 464 | async fn spsc_u() { 465 | spsc(None).await; 466 | } 467 | 468 | #[tokio::test] 469 | async fn seq_n() { 470 | seq(Some(MESSAGES)).await; 471 | } 472 | 473 | #[tokio::test] 474 | async fn seq_u() { 475 | seq(None).await; 476 | } 477 | 478 | #[tokio::test] 479 | async fn stream() { 480 | use futures::stream::StreamExt; 481 | let (s, r) = new(Some(0)); 482 | tokio::spawn(async move { 483 | for i in 0..MESSAGES { 484 | s.send(i).await.unwrap(); 485 | } 486 | }); 487 | let mut stream = r.stream(); 488 | 489 | assert!(!stream.is_terminated()); 490 | for i in 0..MESSAGES { 491 | assert_eq!(stream.next().await.unwrap(), i); 492 | } 493 | assert_eq!(stream.next().await, None); 494 | assert!(stream.is_terminated()); 495 | assert_eq!(stream.next().await, None); 496 | } 497 | 498 | async fn two_msg(size: usize) { 499 | let (s, r) = bounded_async::(size); 500 | tokio::spawn(async move { 501 | s.send(0).await.unwrap(); 502 | s.send(1).await.unwrap(); 503 | }); 504 | assert_eq!(r.recv().await.unwrap(), 0); 505 | assert_eq!(r.recv().await.unwrap(), 1); 506 | } 507 | 508 | #[tokio::test] 509 | async fn spsc_overaligned_zst() { 510 | #[repr(align(1024))] 511 | struct Foo; 512 | 513 | let (tx, rx) = new(Some(0)); 514 | 515 | tokio::spawn(async move { 516 | for _i in 0..MESSAGES { 517 | tx.send(Foo).await.unwrap(); 518 | } 519 | }); 520 | 521 | for _ in 0..MESSAGES { 522 | rx.recv().await.unwrap(); 523 | } 524 | } 525 | } 526 | -------------------------------------------------------------------------------- /tests/sync_test.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | use utils::*; 3 | 4 | use kanal::{bounded, unbounded, ReceiveError, Receiver, SendError, Sender}; 5 | use std::{ 6 | sync::{ 7 | atomic::{AtomicUsize, Ordering}, 8 | Arc, 9 | }, 10 | thread, 11 | time::Duration, 12 | }; 13 | 14 | fn new(cap: Option) -> (Sender, Receiver) { 15 | match cap { 16 | None => unbounded(), 17 | Some(cap) => bounded(cap), 18 | } 19 | } 20 | 21 | fn delay() { 22 | std::thread::sleep(Duration::from_millis(10)); 23 | } 24 | 25 | fn mpmc(cap: Option) { 26 | let (tx, rx) = new(cap); 27 | 28 | crossbeam::scope(|scope| { 29 | for _ in 0..THREADS { 30 | scope.spawn(|_| { 31 | for _i in 0..MESSAGES / THREADS { 32 | tx.send(Box::new(1)).unwrap(); 33 | } 34 | }); 35 | } 36 | 37 | for _ in 0..THREADS { 38 | scope.spawn(|_| { 39 | for _ in 0..MESSAGES / THREADS { 40 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 41 | } 42 | }); 43 | } 44 | }) 45 | .unwrap(); 46 | } 47 | 48 | macro_rules! mpmc_dyn { 49 | ($new:expr,$cap:expr) => { 50 | let (tx, rx) = new($cap); 51 | 52 | crossbeam::scope(|scope| { 53 | for _ in 0..THREADS { 54 | scope.spawn(|_| { 55 | for _i in 0..MESSAGES / THREADS { 56 | tx.send($new).unwrap(); 57 | } 58 | }); 59 | } 60 | 61 | for _ in 0..THREADS { 62 | scope.spawn(|_| { 63 | for _ in 0..MESSAGES / THREADS { 64 | rx.recv().unwrap(); 65 | } 66 | }); 67 | } 68 | }) 69 | .unwrap(); 70 | }; 71 | } 72 | 73 | macro_rules! integrity_test { 74 | ($zero:expr,$ones:expr) => { 75 | let (tx, rx) = new(Some(0)); 76 | crossbeam::scope(|scope| { 77 | scope.spawn(|_| { 78 | for _ in 0..MESSAGES { 79 | tx.send($zero).unwrap(); 80 | tx.send($ones).unwrap(); 81 | } 82 | }); 83 | for _ in 0..MESSAGES { 84 | assert_eq!(rx.recv().unwrap(), $zero); 85 | assert_eq!(rx.recv().unwrap(), $ones); 86 | } 87 | }) 88 | .unwrap(); 89 | }; 90 | } 91 | 92 | fn mpsc(cap: Option) { 93 | let (tx, rx) = new(cap); 94 | 95 | crossbeam::scope(|scope| { 96 | for _ in 0..THREADS { 97 | scope.spawn(|_| { 98 | for _i in 0..MESSAGES / THREADS { 99 | tx.send(Box::new(1)).unwrap(); 100 | } 101 | }); 102 | } 103 | 104 | for _ in 0..MESSAGES { 105 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 106 | } 107 | }) 108 | .unwrap(); 109 | } 110 | 111 | fn seq(cap: Option) { 112 | let (tx, rx) = new(cap); 113 | 114 | for _i in 0..MESSAGES { 115 | tx.send(Box::new(1)).unwrap(); 116 | } 117 | 118 | for _ in 0..MESSAGES { 119 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 120 | } 121 | } 122 | 123 | fn spsc(cap: Option) { 124 | let (tx, rx) = new(cap); 125 | 126 | crossbeam::scope(|scope| { 127 | scope.spawn(|_| { 128 | for _i in 0..MESSAGES { 129 | tx.send(Box::new(1)).unwrap(); 130 | } 131 | }); 132 | 133 | for _ in 0..MESSAGES { 134 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 135 | } 136 | }) 137 | .unwrap(); 138 | } 139 | 140 | #[test] 141 | fn spsc_delayed_receive() { 142 | let (tx, rx) = new(0.into()); 143 | crossbeam::scope(|scope| { 144 | scope.spawn(|_| { 145 | for _i in 0..10 { 146 | tx.send(Box::new(1)).unwrap(); 147 | } 148 | }); 149 | 150 | for _ in 0..10 { 151 | delay(); 152 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 153 | } 154 | }) 155 | .unwrap(); 156 | } 157 | 158 | #[test] 159 | fn spsc_delayed_send() { 160 | let (tx, rx) = new(0.into()); 161 | crossbeam::scope(|scope| { 162 | scope.spawn(|_| { 163 | for _i in 0..10 { 164 | delay(); 165 | tx.send(Box::new(1)).unwrap(); 166 | } 167 | }); 168 | 169 | for _ in 0..10 { 170 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 171 | } 172 | }) 173 | .unwrap(); 174 | } 175 | 176 | #[test] 177 | fn spsc_overaligned_zst() { 178 | #[repr(align(1024))] 179 | struct Foo; 180 | 181 | let (tx, rx) = new(0.into()); 182 | crossbeam::scope(|scope| { 183 | scope.spawn(|_| { 184 | for _i in 0..10 { 185 | delay(); 186 | tx.send(Foo).unwrap(); 187 | } 188 | }); 189 | 190 | for _ in 0..10 { 191 | rx.recv().unwrap(); 192 | } 193 | }) 194 | .unwrap(); 195 | } 196 | 197 | #[test] 198 | fn integrity_u8() { 199 | integrity_test!(0u8, !0u8); 200 | } 201 | 202 | #[test] 203 | fn integrity_u16() { 204 | integrity_test!(0u16, !0u16); 205 | } 206 | 207 | #[test] 208 | fn integrity_u32() { 209 | integrity_test!(0u32, !0u32); 210 | } 211 | 212 | #[test] 213 | fn integrity_u64() { 214 | integrity_test!(0u64, !0u64); 215 | } 216 | 217 | #[test] 218 | fn integrity_box_u64() { 219 | integrity_test!(Box::new(0u64), Box::new(!0u64)); 220 | } 221 | 222 | #[test] 223 | fn integrity_big() { 224 | integrity_test!((0u64, 0u64, 0u64, 0u64), (!0u64, !0u64, !0u64, !0u64)); 225 | } 226 | 227 | #[test] 228 | fn integrity_string() { 229 | integrity_test!("", "not empty"); 230 | } 231 | 232 | #[test] 233 | fn integrity_big_tail() { 234 | integrity_test!((0u64, 0u64, 0u64, 0u8), (!0u64, !0u64, !0u64, !0u8)); 235 | } 236 | 237 | #[test] 238 | fn integrity_padded_rust() { 239 | integrity_test!( 240 | Padded { 241 | a: false, 242 | b: 0x0, 243 | c: 0x0 244 | }, 245 | Padded { 246 | a: true, 247 | b: 0xFF, 248 | c: 0xFFFFFFFF 249 | } 250 | ); 251 | } 252 | 253 | #[test] 254 | fn integrity_padded_c() { 255 | integrity_test!( 256 | PaddedReprC { 257 | a: false, 258 | b: 0x0, 259 | c: 0x0 260 | }, 261 | PaddedReprC { 262 | a: true, 263 | b: 0xFF, 264 | c: 0xFFFFFFFF 265 | } 266 | ); 267 | } 268 | 269 | #[test] 270 | fn single_message() { 271 | let (tx, rx) = new(Some(1)); 272 | tx.send(Box::new(1)).unwrap(); 273 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 274 | } 275 | 276 | // Channel logic tests 277 | #[test] 278 | fn recv_from_half_closed_queue() { 279 | let (tx, rx) = new(Some(1)); 280 | tx.send(Box::new(1)).unwrap(); 281 | drop(tx); 282 | // it's ok to receive data from queue of half closed channel 283 | assert_eq!(rx.recv().unwrap(), Box::new(1)); 284 | } 285 | 286 | #[test] 287 | fn drain_into_test() { 288 | const TEST_LENGTH: usize = 1000; 289 | let (tx, rx) = new(Some(TEST_LENGTH)); 290 | for i in 0..TEST_LENGTH { 291 | tx.send(Box::new(i)).unwrap(); 292 | } 293 | let mut vec = Vec::new(); 294 | assert_eq!(rx.drain_into(&mut vec).unwrap(), TEST_LENGTH); 295 | assert_eq!(vec.len(), TEST_LENGTH); 296 | for (i, v) in vec.iter().enumerate() { 297 | assert_eq!(**v, i); 298 | } 299 | } 300 | 301 | #[test] 302 | fn drain_into_test_zero_sized() { 303 | const TEST_LENGTH: usize = 100; 304 | let (tx, rx) = new(None); 305 | for _ in 0..TEST_LENGTH { 306 | let tx = tx.clone(); 307 | thread::spawn(move || { 308 | tx.send(0xff).unwrap(); 309 | }); 310 | } 311 | std::thread::sleep(Duration::from_millis(1000)); 312 | let mut vec = Vec::new(); 313 | assert_eq!(rx.drain_into(&mut vec).unwrap(), TEST_LENGTH); 314 | assert_eq!(vec.len(), TEST_LENGTH); 315 | for v in vec.iter() { 316 | assert_eq!(*v, 0xff); 317 | } 318 | } 319 | 320 | #[test] 321 | fn recv_from_half_closed_channel() { 322 | let (tx, rx) = new::(Some(1)); 323 | drop(tx); 324 | assert_eq!(rx.recv().err().unwrap(), ReceiveError::SendClosed); 325 | } 326 | 327 | #[test] 328 | fn recv_from_closed_channel() { 329 | let (tx, rx) = new::(Some(1)); 330 | tx.close().unwrap(); 331 | assert_eq!(rx.recv().err().unwrap(), ReceiveError::Closed); 332 | } 333 | 334 | #[test] 335 | fn recv_from_closed_channel_queue() { 336 | let (tx, rx) = new(Some(1)); 337 | tx.send(Box::new(1)).unwrap(); 338 | tx.close().unwrap(); 339 | // it's not possible to read data from queue of fully closed channel 340 | assert_eq!(rx.recv().err().unwrap(), ReceiveError::Closed); 341 | } 342 | 343 | #[test] 344 | fn send_to_half_closed_channel() { 345 | let (tx, rx) = new(Some(1)); 346 | drop(rx); 347 | assert_eq!( 348 | tx.send(Box::new(1)).err().unwrap(), 349 | SendError::ReceiveClosed 350 | ); 351 | } 352 | 353 | #[test] 354 | fn send_to_closed_channel() { 355 | let (tx, rx) = new(Some(1)); 356 | rx.close().unwrap(); 357 | assert_eq!(tx.send(Box::new(1)).err().unwrap(), SendError::Closed); 358 | } 359 | 360 | // Channel drop tests 361 | #[test] 362 | fn drop_test() { 363 | let counter = Arc::new(AtomicUsize::new(0)); 364 | mpmc_dyn!(DropTester::new(counter.clone(), 10), Some(1)); 365 | assert_eq!(counter.load(Ordering::SeqCst), MESSAGES); 366 | } 367 | 368 | #[test] 369 | fn drop_test_in_queue() { 370 | let counter = Arc::new(AtomicUsize::new(0)); 371 | let (s, r) = new(Some(10)); 372 | for _ in 0..10 { 373 | s.send(DropTester::new(counter.clone(), 1234)).unwrap(); 374 | } 375 | r.close().unwrap(); 376 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 377 | } 378 | 379 | #[test] 380 | fn drop_test_send_to_closed() { 381 | let counter = Arc::new(AtomicUsize::new(0)); 382 | let (s, r) = new(Some(10)); 383 | r.close().unwrap(); 384 | for _ in 0..10 { 385 | // will fail 386 | let _ = s.send(DropTester::new(counter.clone(), 1234)); 387 | } 388 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 389 | } 390 | 391 | #[test] 392 | fn drop_test_send_to_half_closed() { 393 | let counter = Arc::new(AtomicUsize::new(0)); 394 | let (s, r) = new(Some(10)); 395 | drop(r); 396 | for _ in 0..10 { 397 | // will fail 398 | let _ = s.send(DropTester::new(counter.clone(), 1234)); 399 | } 400 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 401 | } 402 | 403 | #[test] 404 | fn drop_test_in_signal() { 405 | let (s, r) = new(Some(0)); 406 | 407 | crossbeam::scope(|scope| { 408 | let counter = Arc::new(AtomicUsize::new(0)); 409 | let mut list = Vec::new(); 410 | for _ in 0..10 { 411 | let counter = counter.clone(); 412 | let s = s.clone(); 413 | let t = scope.spawn(move |_| { 414 | let _ = s.send(DropTester::new(counter.clone(), 1234)); 415 | }); 416 | list.push(t); 417 | } 418 | std::thread::sleep(Duration::from_millis(1000)); 419 | r.close().unwrap(); 420 | for t in list { 421 | t.join().unwrap(); 422 | } 423 | assert_eq!(counter.load(Ordering::SeqCst), 10_usize); 424 | }) 425 | .unwrap(); 426 | } 427 | 428 | #[test] 429 | fn vec_test() { 430 | mpmc_dyn!(vec![1, 2, 3], Some(1)); 431 | } 432 | 433 | #[test] 434 | fn seq_n() { 435 | seq(Some(MESSAGES)); 436 | } 437 | 438 | #[test] 439 | fn seq_u() { 440 | seq(None); 441 | } 442 | 443 | #[test] 444 | fn spsc_1() { 445 | spsc(Some(1)); 446 | } 447 | #[test] 448 | fn spsc_0() { 449 | spsc(Some(0)); 450 | } 451 | #[test] 452 | fn spsc_n() { 453 | spsc(Some(MESSAGES)); 454 | } 455 | #[test] 456 | fn spsc_u() { 457 | spsc(None); 458 | } 459 | 460 | #[test] 461 | fn mpsc_1() { 462 | mpsc(Some(1)); 463 | } 464 | #[test] 465 | fn mpsc_0() { 466 | mpsc(Some(0)); 467 | } 468 | #[test] 469 | fn mpsc_n() { 470 | mpsc(Some(MESSAGES)); 471 | } 472 | #[test] 473 | fn mpsc_u() { 474 | mpsc(None); 475 | } 476 | 477 | #[test] 478 | fn mpmc_1() { 479 | mpmc(Some(1)); 480 | } 481 | #[test] 482 | fn mpmc_0() { 483 | mpmc(Some(0)); 484 | } 485 | #[test] 486 | fn mpmc_n() { 487 | mpmc(Some(MESSAGES)); 488 | } 489 | #[test] 490 | fn mpmc_u() { 491 | mpmc(None); 492 | } 493 | -------------------------------------------------------------------------------- /tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | sync::{ 3 | atomic::{AtomicUsize, Ordering}, 4 | Arc, 5 | }, 6 | task::Wake, 7 | thread::Thread, 8 | }; 9 | 10 | #[cfg(not(miri))] 11 | #[allow(dead_code)] 12 | pub const MESSAGES: usize = 100000; 13 | #[cfg(miri)] 14 | #[allow(dead_code)] 15 | pub const MESSAGES: usize = 32; 16 | #[allow(dead_code)] 17 | pub const THREADS: usize = 8; 18 | 19 | #[derive(PartialEq, Eq, Debug)] 20 | pub struct Padded { 21 | pub a: bool, 22 | pub b: u8, 23 | pub c: u32, 24 | } 25 | 26 | #[derive(PartialEq, Eq, Debug)] 27 | #[repr(C)] 28 | pub struct PaddedReprC { 29 | pub a: bool, 30 | pub b: u8, 31 | pub c: u32, 32 | } 33 | 34 | pub struct DropTester { 35 | i: usize, 36 | dropped: bool, 37 | counter: Arc, 38 | } 39 | 40 | impl Drop for DropTester { 41 | fn drop(&mut self) { 42 | if self.dropped { 43 | panic!("double dropped"); 44 | } 45 | if self.i == 0 { 46 | panic!("bug: i=0 is invalid value for drop tester"); 47 | } 48 | self.dropped = true; 49 | self.counter.fetch_add(1, Ordering::SeqCst); 50 | } 51 | } 52 | 53 | impl DropTester { 54 | #[allow(dead_code)] 55 | pub fn new(counter: Arc, i: usize) -> Self { 56 | if i == 0 { 57 | panic!("don't initialize DropTester with 0"); 58 | } 59 | Self { 60 | i, 61 | dropped: false, 62 | counter, 63 | } 64 | } 65 | } 66 | 67 | pub struct ThreadWaker(Thread); 68 | 69 | impl ThreadWaker { 70 | #[allow(dead_code)] 71 | pub fn new() -> Arc { 72 | Self(std::thread::current()).into() 73 | } 74 | } 75 | 76 | impl Wake for ThreadWaker { 77 | fn wake(self: Arc) { 78 | self.0.unpark() 79 | } 80 | } 81 | --------------------------------------------------------------------------------