├── .gitignore ├── Cargo.toml ├── src ├── atomic_task.rs ├── atomic_u64.rs └── lib.rs └── tests └── test_broadcast.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "futures-broadcast" 3 | version = "0.1.0" 4 | authors = ["Carl Lerche "] 5 | license = "MIT/Apache-2.0" 6 | readme = "README.md" 7 | keywords = ["futures", "async", "future"] 8 | repository = "https://github.com/carllerche/futures-broadcast" 9 | homepage = "https://github.com/carllerche/futures-broadcast" 10 | documentation = "https://docs.rs/futures-broadcast" 11 | description = """ 12 | A futures aware, bounded, MPMC broadcast channel with back pressure support. 13 | """ 14 | 15 | [dependencies] 16 | futures = "0.1" 17 | -------------------------------------------------------------------------------- /src/atomic_task.rs: -------------------------------------------------------------------------------- 1 | use futures::task::{self, Task}; 2 | 3 | use std::cell::UnsafeCell; 4 | use std::sync::atomic::AtomicUsize; 5 | use std::sync::atomic::Ordering::{Acquire, Release}; 6 | 7 | /// A coordinated `Task` handle enabling concurrent operations on a task. 8 | pub struct AtomicTask { 9 | state: AtomicUsize, 10 | task: UnsafeCell, 11 | } 12 | 13 | /// Initial state, the `AtomicTask` is currently not being used. 14 | /// 15 | /// The value `2` is picked specifically because it between the write lock & 16 | /// read lock values. Since the read lock is represented by an incrementing 17 | /// counter, this enables an atomic fetch_sub operation to be used for releasing 18 | /// a lock. 19 | const WAITING: usize = 2; 20 | 21 | /// The `park` function has determined that the task is no longer current. This 22 | /// implies that `AtomicTask::park` is being called from a different task than 23 | /// is represented by the currently stored task. The write lock is obtained to 24 | /// update the task cell. 25 | const LOCKED_WRITE: usize = 0; 26 | 27 | /// At least one call to `unpark` happened concurrently to `park` updating the 28 | /// task cell. This state is detected when `park` exits the mutation code and 29 | /// signals to `park` that it is responsible for notifying its own task. 30 | const LOCKED_WRITE_NOTIFIED: usize = 1; 31 | 32 | 33 | /// The `unpark` function has locked access to the task cell for notification. 34 | /// 35 | /// The constant is left here mostly for documentation reasons. 36 | #[allow(dead_code)] 37 | const LOCKED_READ: usize = 3; 38 | 39 | impl AtomicTask { 40 | /// Create an `AtomicTask` initialized with the given `Task` 41 | pub fn new(task: Task) -> AtomicTask { 42 | // Make sure that task is Sync 43 | fn is_sync() {} 44 | is_sync::(); 45 | 46 | AtomicTask { 47 | state: AtomicUsize::new(WAITING), 48 | task: UnsafeCell::new(task), 49 | } 50 | } 51 | 52 | /// This function is unsafe as it requires the caller to ensure mutual 53 | /// exclusion. 54 | pub unsafe fn park(&self) { 55 | if (*self.task.get()).is_current() { 56 | // Nothing more to do here 57 | return; 58 | } 59 | 60 | // Get a new task handle 61 | let task = task::park(); 62 | 63 | match self.state.compare_and_swap(WAITING, LOCKED_WRITE, Acquire) { 64 | WAITING => { 65 | // Locked acquired, update the task cell 66 | *self.task.get() = task; 67 | 68 | // Release the lock. If the state transitioned to 69 | // `LOCKED_NOTIFIED`, this means that an unpark has been 70 | // signaled, so unpark the task. 71 | if LOCKED_WRITE_NOTIFIED == self.state.swap(WAITING, Release) { 72 | (*self.task.get()).unpark(); 73 | } 74 | } 75 | state => { 76 | debug_assert!(state != LOCKED_WRITE, "unexpected state LOCKED_WRITE"); 77 | debug_assert!(state != LOCKED_WRITE_NOTIFIED, "unexpected state LOCKED_WRITE_NOTIFIED"); 78 | 79 | // Currently in a read locked state, this implies that `unpark` 80 | // is currently being called on the old task handle. So, we call 81 | // unpark on the new task handle 82 | task.unpark(); 83 | } 84 | } 85 | } 86 | 87 | pub fn unpark(&self) { 88 | let mut curr = WAITING; 89 | 90 | loop { 91 | if curr == LOCKED_WRITE { 92 | // Transition the state to LOCKED_NOTIFIED 93 | let actual = self.state.compare_and_swap(LOCKED_WRITE, LOCKED_WRITE_NOTIFIED, Release); 94 | 95 | if curr == actual { 96 | // Success, return 97 | return; 98 | } 99 | 100 | // update current state variable and try again 101 | curr = actual; 102 | 103 | } else if curr == LOCKED_WRITE_NOTIFIED { 104 | // Currently in `LOCKED_WRITE_NOTIFIED` state, nothing else to do. 105 | return; 106 | 107 | } else { 108 | // Currently in a LOCKED_READ state, so attempt to increment the 109 | // lock count. 110 | let actual = self.state.compare_and_swap(curr, curr + 1, Acquire); 111 | 112 | // Locked acquired 113 | if actual == curr { 114 | // Notify the task 115 | unsafe { (*self.task.get()).unpark() }; 116 | 117 | // Release the lock 118 | self.state.fetch_sub(1, Release); 119 | 120 | // Done 121 | return; 122 | } 123 | 124 | // update current state variable and try again 125 | curr = actual; 126 | 127 | } 128 | } 129 | } 130 | } 131 | 132 | unsafe impl Send for AtomicTask {} 133 | unsafe impl Sync for AtomicTask {} 134 | -------------------------------------------------------------------------------- /src/atomic_u64.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | pub use self::imp::AtomicU64; 4 | 5 | #[cfg(target_pointer_width = "64")] 6 | mod imp { 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | 9 | pub struct AtomicU64 { 10 | v: AtomicUsize, 11 | } 12 | 13 | impl AtomicU64 { 14 | pub fn new(v: u64) -> AtomicU64 { 15 | AtomicU64 { v: AtomicUsize::new(v as usize) } 16 | } 17 | 18 | pub fn load(&self, order: Ordering) -> u64 { 19 | self.v.load(order) as u64 20 | } 21 | 22 | pub fn store(&self, val: u64, order: Ordering) { 23 | self.v.store(val as usize, order); 24 | } 25 | 26 | pub fn swap(&self, val: u64, order: Ordering) -> u64 { 27 | self.v.swap(val as usize, order) as u64 28 | } 29 | 30 | pub fn compare_and_swap(&self, old: u64, new: u64, order: Ordering) -> u64 { 31 | self.v.compare_and_swap(old as usize, new as usize, order) as u64 32 | } 33 | 34 | pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { 35 | self.v.fetch_add(val as usize, order) as u64 36 | } 37 | 38 | pub fn fetch_sub(&self, val: u64, order: Ordering) -> u64 { 39 | self.v.fetch_sub(val as usize, order) as u64 40 | } 41 | 42 | pub fn fetch_and(&self, val: u64, order: Ordering) -> u64 { 43 | self.v.fetch_and(val as usize, order) as u64 44 | } 45 | 46 | pub fn fetch_or(&self, val: u64, order: Ordering) -> u64 { 47 | self.v.fetch_or(val as usize, order) as u64 48 | } 49 | 50 | pub fn fetch_xor(&self, val: u64, order: Ordering) -> u64 { 51 | self.v.fetch_xor(val as usize, order) as u64 52 | } 53 | } 54 | } 55 | 56 | #[cfg(not(target_pointer_width = "64"))] 57 | mod imp { 58 | #![allow(unused_variables)] // order is not used 59 | 60 | use std::sync::Mutex; 61 | use std::sync::atomic::Ordering; 62 | 63 | pub struct AtomicU64 { 64 | v: Mutex, 65 | } 66 | 67 | impl AtomicU64 { 68 | pub fn new(v: u64) -> AtomicU64 { 69 | AtomicU64 { v: Mutex::new(v) } 70 | } 71 | 72 | pub fn load(&self, order: Ordering) -> u64 { 73 | *self.v.lock().unwrap() 74 | } 75 | 76 | pub fn store(&self, val: u64, order: Ordering) { 77 | *self.v.lock().unwrap() = val 78 | } 79 | 80 | pub fn swap(&self, val: u64, order: Ordering) -> u64 { 81 | let mut lock = self.v.lock().unwrap(); 82 | let prev = *lock; 83 | *lock = val; 84 | prev 85 | } 86 | 87 | pub fn compare_and_swap(&self, old: u64, new: u64, order: Ordering) -> u64 { 88 | let mut lock = self.v.lock().unwrap(); 89 | let prev = *lock; 90 | 91 | if prev != old { 92 | return prev; 93 | } 94 | 95 | *lock = new; 96 | prev 97 | } 98 | 99 | pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { 100 | let mut lock = self.v.lock().unwrap(); 101 | let prev = *lock; 102 | *lock = prev + val; 103 | prev 104 | } 105 | 106 | pub fn fetch_sub(&self, val: u64, order: Ordering) -> u64 { 107 | let mut lock = self.v.lock().unwrap(); 108 | let prev = *lock; 109 | *lock = prev - val; 110 | prev 111 | } 112 | 113 | pub fn fetch_and(&self, val: u64, order: Ordering) -> u64 { 114 | let mut lock = self.v.lock().unwrap(); 115 | let prev = *lock; 116 | *lock = prev & val; 117 | prev 118 | } 119 | 120 | pub fn fetch_or(&self, val: u64, order: Ordering) -> u64 { 121 | let mut lock = self.v.lock().unwrap(); 122 | let prev = *lock; 123 | *lock = prev | val; 124 | prev 125 | } 126 | 127 | pub fn fetch_xor(&self, val: u64, order: Ordering) -> u64 { 128 | let mut lock = self.v.lock().unwrap(); 129 | let prev = *lock; 130 | *lock = prev ^ val; 131 | prev 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A multi-producer, multi-consumer, futures-aware broadcast channel with back 2 | //! pressure. 3 | //! 4 | //! A broadcast channel can be used as a communication primitive between tasks 5 | //! running on `futures-rs` executors. Unlike normal channels, each broadcast 6 | //! channel `Receiver` will receive every value sent by `Sender` handles. 7 | //! 8 | //! Broadcast channels are backed by a pre-allocated vector of slots. The 9 | //! capacity is specified at creation. 10 | //! 11 | //! # Producers 12 | //! 13 | //! `Sender` implements `Sink` and allows a task to send values to all 14 | //! outstanding consumers. If the underlying value buffer is full due to slow 15 | //! consumers, then the send operation will fail with `NotReady` and the task 16 | //! will be notified once there is additional capacity available. 17 | //! 18 | //! **Note**: A limitation of the broadcast channel is that the capacity 19 | //! specified at creation time also limits the number of outstanding producer 20 | //! handles. In other words, if a broadcast channel is created with a capacity 21 | //! of `16`, then there can only be at most 16 `Sender` handles in existence at 22 | //! the same time. 23 | //! 24 | //! # Consumers 25 | //! 26 | //! `Receiver` implements `Stream` and allows a task to read values out of the 27 | //! channel. `Receiver` also has a `recv` function which returns a reference to 28 | //! the sent value. This allows all consumers to observe the value without 29 | //! requiring cloning. 30 | //! 31 | //! If there is no new value to read from the channel, the task will be notified 32 | //! once the next value is sent. 33 | //! 34 | //! **Note**: Since all consumers observe all values sent, any single consumer 35 | //! could cause the channel's capacity to fill up. As such, users of the 36 | //! broadcast channel must take care to respond swiftly to notifications in 37 | //! order to avoid blocking producers. 38 | 39 | // The broadcast channel is based on an MPMC array queue. It is similar to the 40 | // bounded MPMC queue described by 1024cores [1]. However, it differens in that 41 | // all consumers must see all values. This is done by having each entry in the 42 | // buffer track the number of remaining consumers When a value is pushed into 43 | // the channel, the current number of outstanding consumers is used to 44 | // initialize the "remaining consumers" value. Whenever a consumer sees the 45 | // value, this number is decremented. When it reaches zero, the entry is freed 46 | // and made available again to the producers. 47 | // 48 | // Similar to the dynamic mpsc channel [2], each sender gets one "guaranteed" 49 | // slot in order to hold a message. This allows `Sender` to know for a fact that 50 | // a send will succeed *before* starting to do the actual work. If this 51 | // dedicates lot is in use, then the `Sender` is unable to send a value and 52 | // `NotReady` will be returned on a send attempt. 53 | // 54 | // The channel is implemented without using locks. Each entry manages its own 55 | // state with a single atomic variable. A single atomic variable is used to 56 | // manage the number of outstanding receiver handles as well as the index of the 57 | // next available entry. 58 | // 59 | // # Operations 60 | // 61 | // The steps for sending a value are roughly: 62 | // 63 | // 1) Claim a slot by fetching and incrementing the next available entry 64 | // variable. This also returns the number of outstanding receive handles. 65 | // 2) If the entry is occupied, use the `WaitingTx` node to hold the value until 66 | // the entry becomes available. 67 | // 3) Store the value in the entry 68 | // 4) Toggle the entry state to indicate that a value is present 69 | // 5) Notify any waiting consumers. 70 | // 71 | // The steps for receiving a value are roughly: 72 | // 73 | // 1) Read the state of the entry at the receivers's current position. 74 | // 2) If there is no value, wait for one to become available. 75 | // 3) Observe the value (via a reference to the value in the buffer). 76 | // 4) Decrement the atomic tracking the number of remaining consumers to observe 77 | // the value. 78 | // 5) If this is the last consumer, continue, otherwise return. 79 | // 6) Drop the value from the buffer 80 | // 8) If a producer is waiting for the slot, immediately let it place the value 81 | // stored in it's waiter node into the newly available entry. The entry is 82 | // now in use with the value. 83 | // 7) If no producer is waiting, then toggle the entry state to indicate that 84 | // the entry is now available. 85 | // 86 | // The specific details are a bit more complicated and are documented inline 87 | // 88 | // # Entry state 89 | // 90 | // Each entry maintains an atomic variable which is used to track the state of 91 | // the entry and used to handle memory ordering. The data tracked by the entry 92 | // state consists of: 93 | // 94 | // * Pointer to the next consumer waiter node 95 | // * Sequence value, this is either 0 or 1. 96 | // * Toggle flag, which has a different meaning if it is the consumer or the 97 | // producer which flips it. 98 | // 99 | // This state is stored in a single AtomicPtr, leveraging the fact that the 2 100 | // least significant bits in a pointer are never used due to pointer alignment. 101 | // 102 | // The sequence value manages the state of the entry. It is similar to the 103 | // `sequence` value used in the 1024cores queue, however it can be limited to 1 104 | // bit. This is acceptable due to the fact that consumers track their own 105 | // positions (vs using an atomic in the 1024cores queue) and producers cannot 106 | // claim an entry until all consumers have "released" that entry, which prevents 107 | // race conditions where producers "lap" consumers. 108 | // 109 | // In the case of slow consumers, it is possible for both a producer and 110 | // multiple consumers to wait on the slot. The producer gets blocked on the slow 111 | // consumer and fast consumers are blocked waiting for the producer to send a 112 | // value. In such a case, the fast consumer will encounter an entry that has a 113 | // value and must be able to determine that it is a value that it has already 114 | // seen and not a new one. This is handled by the `sequence` component of the 115 | // entry state. 116 | // 117 | // The sequence bit is essentially `(position / channel-capacity) & 1`, where 118 | // position is a monotonically increasing integer. Aka, it is the least 119 | // significant bit representing the number of times a sender or receiver has 120 | // cycled the buffer. Given that it is impossible for handles to lap each other 121 | // more than once, this is sufficient amount of state to disambiguate the above 122 | // scenario. 123 | // 124 | // The `toggle` flag implies different things depending on if the sender or 125 | // receiver set it. When a sender encounters an occupied entry, it will store 126 | // its `WaitingTx` node in the entry's `waiting_tx` variable. It will then 127 | // attempt to flip `toggle` from `false` to `true. If the compare-and-swap 128 | // operation is successful, then the sender has entered the waiting state, if 129 | // the compare-and-swap fails, then the entry is available to the sender. When a 130 | // receiver is done with a value, it will release the entry by flipping `toggle` 131 | // from `false` to `true. If the compare-and-swap fails, then a sender is 132 | // waiting on the entry, and the receiver must unpark it. 133 | // 134 | // # References 135 | // 136 | // [1] http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue 137 | // [2] https://github.com/carllerche/futures-mpsc 138 | 139 | #[macro_use] 140 | extern crate futures; 141 | 142 | mod atomic_task; 143 | mod atomic_u64; 144 | 145 | pub use std::sync::mpsc::SendError; 146 | 147 | use atomic_task::AtomicTask; 148 | use atomic_u64::AtomicU64; 149 | 150 | use futures::{Stream, Sink, Poll, StartSend, Async, AsyncSink}; 151 | use futures::task; 152 | 153 | use std::{ops, mem, ptr, u32, usize}; 154 | use std::cell::UnsafeCell; 155 | use std::sync::Arc; 156 | use std::sync::atomic::{AtomicUsize, AtomicPtr}; 157 | use std::sync::atomic::Ordering::{self, Acquire, Release, AcqRel, Relaxed}; 158 | 159 | /// The broadcast end of a channel which is used to send values. 160 | /// 161 | /// This is created by the `channel` function. 162 | pub struct Sender { 163 | // Channel state shared between the sender and receiver 164 | inner: Arc>, 165 | 166 | // Handle to the waiter node associated with this `Sender`. This is lazily 167 | // allocated, so if the `Sender` never blocks, the node is never allocated. 168 | // Once it is allocated, it is reused for the lifetime of the `Sender` 169 | // handle. 170 | // 171 | // The handle holds the waiting task as well as the sent value that caused 172 | // the sender to block. 173 | waiter: Option>>, 174 | } 175 | 176 | /// The receiving end of a channel. 177 | /// 178 | /// Each `Receiver` will receive every value sent by every `Sender`. 179 | /// 180 | /// This is created by the `channel` function. 181 | pub struct Receiver { 182 | // Channel state shared between the sender and receiver 183 | inner: Arc>, 184 | 185 | // The current receiver's position. Usually, when the `Receiver` is created, 186 | // this is initialized to the head of the channel. After the receiver reads 187 | // a value, `pos` is incremented. 188 | pos: usize, 189 | 190 | // Handle to the waiter node associated with this `Receiver`. This is lazily 191 | // allocated, so if the `Receiver` never blocks, the node is never 192 | // allocated. Once it is allocated, it is reused for the lifetime of the 193 | // `Receiver` handle. 194 | // 195 | // The handle holds the waiting task. 196 | waiter: Option>, 197 | } 198 | 199 | /// A reference to the received value still located in the channel buffer. 200 | /// 201 | /// When the `RecvGuard` is dropped, the value will be released. This type 202 | /// derefs to the value. 203 | pub struct RecvGuard<'a, T: 'a> { 204 | // Reference to the receiver handle. This is a mutable reference to ensure 205 | // exclusive access to the receiver. 206 | recv: &'a mut Receiver, 207 | 208 | // Reference to the value 209 | value: &'a T, 210 | } 211 | 212 | struct Inner { 213 | // Pre-allocated buffer of entries 214 | buffer: Vec>, 215 | 216 | // Buffer access mask This is `capacity - 1` and allows mapping a position 217 | // in the channel to an index. 218 | mask: usize, 219 | 220 | // Sequence value mask 221 | seq_mask: usize, 222 | 223 | // Used by publishers and when receivers are cloned. 224 | // 225 | // This is an atomic variable containing the number of outstanding receiver 226 | // handles as well as the entry position for the next value sent. 227 | pub_state: PubCell, 228 | 229 | // Number of outstanding senders 230 | num_tx: AtomicUsize, 231 | } 232 | 233 | // Contains the `PubState` 234 | // 235 | // Currently, this is coordinated with a `Mutex`, however there are a bunch of 236 | // other strategies that could be used. 237 | struct PubCell { 238 | pub_state: AtomicU64, 239 | } 240 | 241 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 242 | struct PubState { 243 | // Current producer position 244 | pos: u32, 245 | 246 | // Number of outstanding receiver handles 247 | num_rx: u32, 248 | } 249 | 250 | struct Entry { 251 | // Value being published 252 | value: UnsafeCell>, 253 | 254 | // Stores the entry state. This is a combination of the necessary flags to 255 | // track the state as well as a pointer to the head of the waiting stack. 256 | state: StateCell, 257 | 258 | // Number of remaining receivers to observe the value. 259 | // 260 | // This value can actually go "negative" (though it will wrap) in some racy 261 | // cases. 262 | remaining: AtomicUsize, 263 | 264 | // Pointer to a waiting TX node. 265 | waiting_tx: UnsafeCell>>>, 266 | } 267 | 268 | // Used to track a waiter. Node in a linked-list. 269 | struct WaitingRx { 270 | // Parked task 271 | task: AtomicTask, 272 | // Next waiter 273 | next: AtomicPtr, 274 | } 275 | 276 | struct WaitingTx { 277 | // Parked task 278 | task: AtomicTask, 279 | 280 | // Queued (value, num-rx) 281 | value: UnsafeCell, usize)>>, 282 | 283 | // True if parked 284 | parked: AtomicUsize, 285 | } 286 | 287 | const READY: usize = 0; 288 | const PARKED: usize = 1; 289 | const CLOSED: usize = 2; 290 | 291 | struct StateCell { 292 | state: AtomicUsize, 293 | } 294 | 295 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 296 | struct State { 297 | // Pointer to the head of the waiting RXs 298 | waiting_rx: *mut WaitingRx, 299 | 300 | // The least significant bit of number of types the channel buffer has been 301 | // cycled. 302 | sequence: usize, 303 | 304 | // TX and RX halves race to toggle this flag. If TX wins, then the slot is 305 | // made available to an RX, if an RX wins, then the RX is blocked. 306 | toggled: bool, 307 | } 308 | 309 | /// Returns a channel 310 | pub fn channel(capacity: usize) -> (Sender, Receiver) { 311 | let inner = Arc::new(Inner::with_capacity(capacity)); 312 | 313 | let tx = Sender { 314 | inner: inner.clone(), 315 | waiter: None, 316 | }; 317 | 318 | let rx = Receiver { 319 | inner: inner, 320 | pos: 0, 321 | waiter: None, 322 | }; 323 | 324 | (tx, rx) 325 | } 326 | 327 | // ===== impl Sender ===== 328 | 329 | impl Sender { 330 | /// Returns `Ready` if the channel currently has capacity 331 | pub fn poll_ready(&mut self) -> Async<()> { 332 | if self.is_parked() { 333 | Async::NotReady 334 | } else { 335 | Async::Ready(()) 336 | } 337 | } 338 | 339 | /// Try to clone the `Sender`. This will fail if there are too many 340 | /// outstanding senders. 341 | pub fn try_clone(&self) -> Result { 342 | let mut curr = self.inner.num_tx.load(Relaxed); 343 | 344 | loop { 345 | if curr == self.inner.buffer.len() { 346 | return Err(()); 347 | } 348 | 349 | let actual = self.inner.num_tx.compare_and_swap(curr, curr + 1, Relaxed); 350 | 351 | if actual == curr { 352 | return Ok(Sender { 353 | inner: self.inner.clone(), 354 | waiter: None, 355 | }); 356 | } 357 | 358 | curr = actual; 359 | } 360 | } 361 | 362 | fn is_parked(&self) -> bool { 363 | match self.waiter { 364 | Some(ref w) => w.parked.load(Acquire) == PARKED, 365 | None => false, 366 | } 367 | } 368 | } 369 | 370 | impl Sink for Sender { 371 | type SinkItem = T; 372 | type SinkError = SendError; 373 | 374 | fn start_send(&mut self, item: T) -> StartSend> { 375 | if self.is_parked() { 376 | return Ok(AsyncSink::NotReady(item)); 377 | } 378 | 379 | match self.inner.send(Some(item), &mut self.waiter, true) { 380 | Err(SendError(e)) => Err(SendError(e.unwrap())), 381 | Ok(_) => Ok(AsyncSink::Ready), 382 | } 383 | } 384 | 385 | fn poll_complete(&mut self) -> Poll<(), SendError> { 386 | Ok(Async::Ready(())) 387 | } 388 | } 389 | 390 | fn pos_to_sequence(pos: usize, mask: usize) -> usize { 391 | if pos & mask == 0 { 392 | 0 393 | } else { 394 | 1 395 | } 396 | } 397 | 398 | fn tx_waiter(cell: &mut Option>>, 399 | item: Option, 400 | num_rx: usize, 401 | park: bool) -> Arc> 402 | { 403 | if let Some(ref w) = *cell { 404 | unsafe { 405 | if park { 406 | w.task.park(); 407 | } 408 | 409 | (*w.value.get()) = Some((item, num_rx)); 410 | } 411 | 412 | w.parked.store(PARKED, Release); 413 | 414 | return w.clone(); 415 | } 416 | 417 | let w = Arc::new(WaitingTx { 418 | task: AtomicTask::new(task::park()), 419 | value: UnsafeCell::new(Some((item, num_rx))), 420 | parked: AtomicUsize::new(PARKED), 421 | }); 422 | 423 | *cell = Some(w.clone()); 424 | w 425 | } 426 | 427 | impl Drop for Sender { 428 | fn drop(&mut self) { 429 | let prev = self.inner.num_tx.fetch_sub(1, AcqRel); 430 | 431 | if prev == 1 { 432 | if let Some(ref w) = self.waiter { 433 | match w.parked.compare_and_swap(PARKED, CLOSED, AcqRel) { 434 | PARKED | CLOSED => return, 435 | _ => {} 436 | } 437 | } 438 | 439 | // Send a "closed" message 440 | let _ = self.inner.send(None, &mut self.waiter, false); 441 | } 442 | } 443 | } 444 | 445 | // ===== impl Receiver ===== 446 | 447 | impl Receiver { 448 | /// Returns a new receiver positioned at the head of the channel 449 | pub fn new_receiver(&self) -> Receiver { 450 | let pos = self.inner.pub_state.inc_rx(); 451 | 452 | Receiver { 453 | inner: self.inner.clone(), 454 | pos: pos, 455 | waiter: None, 456 | } 457 | } 458 | 459 | pub fn recv(&mut self) -> Poll>, ()> { 460 | let mask = self.inner.mask; 461 | let seq_mask = self.inner.seq_mask; 462 | let pos = self.pos; 463 | 464 | // A little bit of misdirection to make the borrow checker happy. 465 | let value = { 466 | // Get the entry at the current position 467 | let entry = &self.inner.buffer[pos & mask]; 468 | let mut entry_state = entry.state.load(Acquire); 469 | 470 | let seq = pos_to_sequence(pos, seq_mask); 471 | 472 | if seq != entry_state.sequence { 473 | // No value present, attempt to wait 474 | // 475 | // First, get a waiter. `rx_waiter` ensures that the 476 | // `Receiver`'s wait node references the current task. 477 | let waiter = rx_waiter(&mut self.waiter); 478 | 479 | // Transmute the waiter to an unsafe pointer. This ptr will 480 | // be stored in the state slot 481 | let node_ptr: *mut WaitingRx = unsafe { mem::transmute(waiter) }; 482 | 483 | loop { 484 | // Update the next pointer. 485 | // 486 | // Relaxed ordering is used as the waiter node is not 487 | // currently shared with other threads. 488 | unsafe { 489 | (*node_ptr).next.store(entry_state.waiting_rx, Relaxed); 490 | } 491 | 492 | let next = State { 493 | waiting_rx: node_ptr, 494 | .. entry_state 495 | }; 496 | 497 | // Attempt to CAS the new state 498 | let actual = entry.state.compare_and_swap(entry_state, next, AcqRel); 499 | 500 | if actual == entry_state { 501 | // The wait has successfully been registered, so return 502 | // with NotReady 503 | return Ok(Async::NotReady); 504 | } 505 | 506 | // The CAS failed, maybe the value is not ready. This is why 507 | // Acq is used in the CAS. 508 | if seq == actual.sequence { 509 | // The Arc must be cleaned up... 510 | let _: Arc = unsafe { mem::transmute(node_ptr) }; 511 | break; 512 | } 513 | 514 | // The CAS failed for another reason, update the state and 515 | // try again. 516 | entry_state = actual; 517 | } 518 | } 519 | 520 | // Read the value 521 | unsafe { 522 | match *entry.value.get() { 523 | Some(ref v) => v, 524 | None => { 525 | return Ok(Async::Ready(None)); 526 | } 527 | } 528 | } 529 | }; 530 | 531 | // The slot is ready to be read 532 | Ok(Async::Ready(Some(RecvGuard { 533 | recv: self, 534 | value: value, 535 | }))) 536 | } 537 | } 538 | 539 | impl Drop for Receiver { 540 | fn drop(&mut self) { 541 | let pos = self.inner.pub_state.dec_rx(); 542 | let mask = self.inner.mask; 543 | 544 | while self.pos < pos { 545 | let entry = &self.inner.buffer[self.pos & mask]; 546 | 547 | // Decrement the receiver 548 | if 1 == entry.remaining.fetch_sub(1, AcqRel) { 549 | // Last remaining receiver, release the entry 550 | entry.release(&self.inner); 551 | } 552 | 553 | self.pos += 1; 554 | } 555 | } 556 | } 557 | 558 | // ===== impl RecvGuard ===== 559 | 560 | impl<'a, T> ops::Deref for RecvGuard<'a, T> { 561 | type Target = T; 562 | 563 | fn deref(&self) -> &T { 564 | &*self.value 565 | } 566 | } 567 | 568 | impl<'a, T> Drop for RecvGuard<'a, T> { 569 | fn drop(&mut self) { 570 | // It's critical to not use the `value` pointer in here. 571 | 572 | let mask = self.recv.inner.mask; 573 | let pos = self.recv.pos; 574 | 575 | // Get the entry at the current position 576 | let entry = &self.recv.inner.buffer[pos & mask]; 577 | 578 | // Decrement the remaining receivers 579 | // 580 | // Why is this AcqRel? 581 | let prev = entry.remaining.fetch_sub(1, AcqRel); 582 | 583 | if 1 == prev { 584 | entry.release(&self.recv.inner); 585 | } 586 | 587 | // Increment the position 588 | self.recv.pos += 1; 589 | } 590 | } 591 | 592 | fn rx_waiter(cell: &mut Option>) -> Arc { 593 | if let Some(ref w) = *cell { 594 | // Concurrent calls to `AtomicTask::park()` are guaranteed by having a 595 | // &mut reference to the cell. 596 | unsafe { w.task.park() }; 597 | 598 | return w.clone(); 599 | } 600 | 601 | let w = Arc::new(WaitingRx { 602 | task: AtomicTask::new(task::park()), 603 | next: AtomicPtr::new(ptr::null_mut()), 604 | }); 605 | 606 | *cell = Some(w.clone()); 607 | w 608 | } 609 | 610 | impl Stream for Receiver { 611 | type Item = T; 612 | type Error = (); 613 | 614 | fn poll(&mut self) -> Poll, ()> { 615 | match try_ready!(self.recv()) { 616 | Some(val) => Ok(Async::Ready(Some(val.clone()))), 617 | None => Ok(Async::Ready(None)), 618 | } 619 | } 620 | } 621 | 622 | // ===== impl Inner ===== 623 | 624 | impl Inner { 625 | fn with_capacity(capacity: usize) -> Inner { 626 | // TODO: Capacity is capped at a smaller number than usize::MAX 627 | // 628 | let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 { 629 | if capacity < 2 { 630 | 2 631 | } else { 632 | // use next power of 2 as capacity 633 | capacity.next_power_of_two() 634 | } 635 | } else { 636 | capacity 637 | }; 638 | 639 | // Initialize the ring as a vec of vaccant entries 640 | let mut buffer = Vec::with_capacity(capacity); 641 | 642 | for _ in 0..capacity { 643 | buffer.push(Entry { 644 | state: StateCell::new(), 645 | value: UnsafeCell::new(None), 646 | remaining: AtomicUsize::new(0), 647 | waiting_tx: UnsafeCell::new(None), 648 | }); 649 | } 650 | 651 | let mask = capacity - 1; 652 | 653 | Inner { 654 | buffer: buffer, 655 | mask: mask, 656 | seq_mask: (mask << 1) & (!mask), 657 | pub_state: PubCell::new(), 658 | num_tx: AtomicUsize::new(1), 659 | } 660 | } 661 | 662 | 663 | fn send(&self, mut item: Option, 664 | waiter: &mut Option>>, 665 | park: bool) 666 | -> Result<(), SendError>> 667 | { 668 | let mask = self.mask; 669 | let seq_mask = self.seq_mask; 670 | 671 | // Claim a lot, this does not need to be guarded like in the original 672 | // MPMC queue because the size restrictions on the channel and the 673 | // `is_parked` is a sufficient guard. 674 | let pub_state = match self.pub_state.claim_slot() { 675 | Some(v) => v, 676 | None => return Err(SendError(item)), 677 | }; 678 | 679 | // Get the sequence value for the given position 680 | let seq = pos_to_sequence(pub_state.pos(), seq_mask); 681 | 682 | // Get a handle to the entry 683 | let entry = &self.buffer[pub_state.pos() & mask]; 684 | 685 | // Load the current entry state 686 | let mut entry_state = entry.state.load(Acquire); 687 | 688 | loop { 689 | debug_assert!(seq != entry_state.sequence); 690 | 691 | if entry_state.toggled { 692 | // The subscriber has released this slot and the publisher may 693 | // write a value to it. 694 | unsafe { 695 | // Set the value 696 | (*entry.value.get()) = item; 697 | } 698 | 699 | // Set the number of remaining subscribers to observe 700 | // 701 | // This is done with a `fetch_add` to work concurrently with 702 | // receivers that are in the process of dropping. 703 | // 704 | // The `Relaxed` ordering is sufficient here as all receivers with 705 | // `Acquire` this memory when loading the entry state. 706 | let prev = entry.remaining.fetch_add(pub_state.num_rx(), Relaxed); 707 | 708 | if 0 == prev.wrapping_add(pub_state.num_rx()) { 709 | // The receiving end is disconnected, so return the value 710 | let item = unsafe { (*entry.value.get()).take() }; 711 | return Err(SendError(item)); 712 | } 713 | 714 | // A CAS loop for updating the entry state. 715 | loop { 716 | let next = State { 717 | waiting_rx: ptr::null_mut(), 718 | sequence: seq, 719 | toggled: false, 720 | .. entry_state 721 | }; 722 | 723 | let actual = entry.state.compare_and_swap(entry_state, next, AcqRel); 724 | 725 | if actual == entry_state { 726 | break; 727 | } 728 | 729 | entry_state = actual; 730 | } 731 | 732 | // Notify waiters 733 | entry_state.notify_rx(); 734 | 735 | return Ok(()); 736 | 737 | } else { 738 | // The slot is occupied, so store the item in the waiting_tx 739 | // slot 740 | 741 | let mut waiter = tx_waiter(waiter, item, pub_state.num_rx(), park); 742 | 743 | // Store the waiter in the slot 744 | unsafe { 745 | // The slot should be empty... 746 | debug_assert!((*entry.waiting_tx.get()).is_none()); 747 | 748 | // Store the waiter 749 | (*entry.waiting_tx.get()) = Some(waiter); 750 | }; 751 | 752 | let next = State { 753 | // Set the toggle, indicating that the TX is waiting 754 | toggled: true, 755 | .. entry_state 756 | }; 757 | 758 | // Attempt to CAS 759 | let actual = entry.state.compare_and_swap(entry_state, next, AcqRel); 760 | 761 | if entry_state == actual { 762 | return Ok(()); 763 | } 764 | 765 | // The CAS failed, remove the waiting node, re-acquire the item, 766 | // and try the process again 767 | waiter = unsafe { (*entry.waiting_tx.get()).take().unwrap() }; 768 | let (i, _) = unsafe { (*waiter.value.get()).take().unwrap() }; 769 | item = i; 770 | 771 | // Clear parked flag 772 | waiter.parked.store(READY, Relaxed); 773 | 774 | entry_state = actual; 775 | } 776 | } 777 | } 778 | } 779 | 780 | #[cfg(debug_assertions)] 781 | impl Drop for Inner { 782 | fn drop(&mut self) { 783 | // At this point, since all `Sender` and `Receiver` handles have dropped 784 | // and cleaned up after themselves, all unsafe pointers are null. 785 | for entry in &self.buffer { 786 | let entry_state = entry.state.load(Acquire); 787 | 788 | debug_assert!(entry_state.waiting_rx.is_null()); 789 | 790 | unsafe { 791 | debug_assert!((*entry.waiting_tx.get()).is_none()); 792 | } 793 | } 794 | } 795 | } 796 | 797 | unsafe impl Send for Sender {} 798 | unsafe impl Sync for Sender {} 799 | 800 | unsafe impl Send for Inner {} 801 | unsafe impl Sync for Inner {} 802 | 803 | // ===== impl PubCell ===== 804 | 805 | impl PubCell { 806 | fn new() -> PubCell { 807 | let v = PubState::new().as_u64(); 808 | 809 | PubCell { pub_state: AtomicU64::new(v) } 810 | } 811 | 812 | // Loads the state 813 | fn load(&self, ordering: Ordering) -> PubState { 814 | let val = self.pub_state.load(ordering); 815 | PubState::load(val) 816 | } 817 | 818 | fn compare_and_swap(&self, current: PubState, new: PubState, ordering: Ordering) -> PubState { 819 | let val = self.pub_state.compare_and_swap(current.as_u64(), new.as_u64(), ordering); 820 | PubState::load(val) 821 | } 822 | 823 | /// Claim the next publish spot and return the new `PubState` 824 | /// 825 | /// Returns `None` if there are no more receivers. 826 | /// 827 | /// Uses `Relaxed` ordering 828 | fn claim_slot(&self) -> Option { 829 | let mut curr = self.load(Relaxed); 830 | 831 | loop { 832 | if curr.num_rx == 0 { 833 | return None; 834 | } 835 | 836 | let next = PubState { 837 | pos: curr.pos.wrapping_add(1), 838 | .. curr 839 | }; 840 | 841 | let actual = self.compare_and_swap(curr, next, Relaxed); 842 | 843 | if curr == actual { 844 | return Some(curr); 845 | } 846 | 847 | curr = actual 848 | } 849 | } 850 | 851 | /// Atomically increment the number of outstanding RX handles and return the 852 | /// current "head" position. 853 | /// 854 | /// Uses `Relaxed` ordering 855 | fn inc_rx(&self) -> usize { 856 | let mut curr = self.load(Relaxed); 857 | 858 | loop { 859 | if curr.num_rx == u32::MAX { 860 | // TODO: return this as an error 861 | panic!(); 862 | } 863 | 864 | let next = PubState { 865 | num_rx: curr.num_rx + 1, 866 | .. curr 867 | }; 868 | 869 | let actual = self.compare_and_swap(curr, next, Relaxed); 870 | 871 | if curr == actual { 872 | return next.pos(); 873 | } 874 | 875 | curr = actual; 876 | } 877 | } 878 | 879 | /// Decrement the number of outstanding receivers 880 | fn dec_rx(&self) -> usize { 881 | let mut curr = self.load(Relaxed); 882 | 883 | loop { 884 | let next = PubState { 885 | num_rx: curr.num_rx - 1, 886 | .. curr 887 | }; 888 | 889 | let actual = self.compare_and_swap(curr, next, Relaxed); 890 | 891 | if curr == actual { 892 | return curr.pos(); 893 | } 894 | 895 | curr = actual; 896 | } 897 | } 898 | } 899 | 900 | // ===== impl PubState ===== 901 | 902 | impl PubState { 903 | /// Return a new `PubState` with default values 904 | fn new() -> PubState { 905 | PubState { 906 | pos: 0, 907 | num_rx: 1, 908 | } 909 | } 910 | 911 | fn pos(&self) -> usize { 912 | self.pos as usize 913 | } 914 | 915 | fn num_rx(&self) -> usize { 916 | self.num_rx as usize 917 | } 918 | 919 | /// Load a `PubState` from its u64 representation 920 | fn load(val: u64) -> PubState { 921 | PubState { 922 | pos: (val >> 32) as u32, 923 | num_rx: (val & (u32::MAX as u64)) as u32, 924 | } 925 | } 926 | 927 | /// Return the u64 representation for this `PubState` 928 | fn as_u64(&self) -> u64 { 929 | ((self.pos as u64) << 32) | (self.num_rx as u64) 930 | } 931 | } 932 | 933 | // ===== impl Entry ===== 934 | 935 | impl Entry { 936 | fn release(&self, inner: &Inner) { 937 | // The acquire is done here to ensure that the memory ordering has been 938 | // established for the entry value. 939 | let mut entry_state = self.state.load(Acquire); 940 | 941 | // Remove the value 942 | unsafe { (*self.value.get()) = None }; 943 | 944 | loop { 945 | if entry_state.toggled { 946 | let waiter = unsafe { 947 | // A TX is waiting, take the waiter.. 948 | let waiter = (*self.waiting_tx.get()).take().unwrap(); 949 | 950 | // Get the item 951 | let (item, num_rx) = (*waiter.value.get()).take().unwrap(); 952 | 953 | // Store the item in the entry 954 | (*self.value.get()) = item; 955 | 956 | // Reset the number of remaining receivers to observe 957 | // the value. 958 | // 959 | // A `store` is safe here as it is not possible for another 960 | // RX to concurrenly modify the value. 961 | // 962 | // `Relaxed` ordering is used as the entry state gates 963 | // reads. 964 | self.remaining.store(num_rx, Relaxed); 965 | 966 | waiter 967 | }; 968 | 969 | let next = State { 970 | waiting_rx: ptr::null_mut(), 971 | sequence: (entry_state.sequence + 1) % 2, 972 | toggled: false, 973 | .. entry_state 974 | }; 975 | 976 | // At this point, this is the only thread that will attempt 977 | // to mutate the state slot, so there is no need for a CAS. 978 | let prev = self.state.swap(next, Release); 979 | debug_assert!(entry_state == prev); 980 | 981 | // Unpark the TX waiter 982 | match waiter.parked.compare_and_swap(PARKED, READY, AcqRel) { 983 | CLOSED => { 984 | let mut waiter = Some(waiter); 985 | let _ = inner.send(None, &mut waiter, false); 986 | } 987 | PARKED => { 988 | waiter.task.unpark(); 989 | } 990 | _ => unreachable!(), 991 | } 992 | 993 | // Unpark any RX waiting on the slot 994 | entry_state.notify_rx(); 995 | 996 | break; 997 | } 998 | 999 | let next = State { 1000 | toggled: true, 1001 | .. entry_state 1002 | }; 1003 | 1004 | let actual = self.state.compare_and_swap(entry_state, next, Release); 1005 | 1006 | if entry_state == actual { 1007 | break; 1008 | } 1009 | 1010 | entry_state = actual; 1011 | } 1012 | } 1013 | } 1014 | 1015 | // ===== impl StateCell ===== 1016 | 1017 | impl StateCell { 1018 | fn new() -> StateCell { 1019 | let val = State::new().as_usize(); 1020 | 1021 | StateCell { 1022 | state: AtomicUsize::new(val), 1023 | } 1024 | } 1025 | 1026 | fn load(&self, ordering: Ordering) -> State { 1027 | let val = self.state.load(ordering); 1028 | State::load(val) 1029 | } 1030 | 1031 | fn swap(&self, next: State, ordering: Ordering) -> State { 1032 | let val = self.state.swap(next.as_usize(), ordering); 1033 | State::load(val) 1034 | } 1035 | 1036 | fn compare_and_swap(&self, current: State, next: State, ordering: Ordering) -> State { 1037 | let val = self.state.compare_and_swap(current.as_usize(), next.as_usize(), ordering); 1038 | State::load(val) 1039 | } 1040 | } 1041 | 1042 | // ===== impl State ===== 1043 | 1044 | impl State { 1045 | /// Return a new `State` value 1046 | fn new() -> State { 1047 | State { 1048 | waiting_rx: ptr::null_mut(), 1049 | sequence: 1, 1050 | toggled: true, 1051 | } 1052 | } 1053 | 1054 | /// Load a `State` value from its `usize` representation 1055 | fn load(val: usize) -> State { 1056 | State { 1057 | waiting_rx: (val & !3) as *mut WaitingRx, 1058 | sequence: val & 1, 1059 | toggled: (val & 2) == 2, 1060 | } 1061 | } 1062 | 1063 | /// Return the `usize` representation of this `State` 1064 | fn as_usize(&self) -> usize { 1065 | let mut val = self.waiting_rx as usize | self.sequence; 1066 | 1067 | if self.toggled { 1068 | val |= 2; 1069 | } 1070 | 1071 | val 1072 | } 1073 | 1074 | fn notify_rx(&self) { 1075 | let mut ptr = self.waiting_rx; 1076 | 1077 | while !ptr.is_null() { 1078 | unsafe { 1079 | let node: Arc = mem::transmute(ptr); 1080 | 1081 | ptr = node.next.swap(ptr::null_mut(), Release); 1082 | 1083 | // Unpark the task 1084 | node.task.unpark(); 1085 | } 1086 | } 1087 | } 1088 | } 1089 | -------------------------------------------------------------------------------- /tests/test_broadcast.rs: -------------------------------------------------------------------------------- 1 | extern crate futures; 2 | extern crate futures_broadcast as broadcast; 3 | 4 | use futures::{Future, Stream, Sink}; 5 | 6 | use std::time::Duration; 7 | use std::thread; 8 | use std::sync::mpsc; 9 | 10 | fn is_send() {} 11 | fn is_sync() {} 12 | 13 | #[test] 14 | fn bounds() { 15 | is_send::>(); 16 | is_send::>(); 17 | is_sync::>(); 18 | is_sync::>(); 19 | } 20 | 21 | #[test] 22 | fn send_recv() { 23 | let (tx, rx) = broadcast::channel::(16); 24 | 25 | let mut rx1 = rx.new_receiver().wait(); 26 | let mut rx2 = rx.wait(); 27 | 28 | let tx = tx.send(1).wait().unwrap(); 29 | let _tx = tx.send(2).wait().unwrap(); 30 | 31 | assert_eq!(rx1.next().unwrap(), Ok(1)); 32 | assert_eq!(rx1.next().unwrap(), Ok(2)); 33 | 34 | assert_eq!(rx2.next().unwrap(), Ok(1)); 35 | assert_eq!(rx2.next().unwrap(), Ok(2)); 36 | } 37 | 38 | #[test] 39 | fn receiver_wait() { 40 | let (mut tx1, rx1) = broadcast::channel::(1); 41 | let (tx2, rx2) = mpsc::channel(); 42 | 43 | { 44 | thread::spawn(move || { 45 | let mut rx = rx1.wait(); 46 | 47 | for _ in 0..5 { 48 | tx2.send(rx.next().unwrap().unwrap()).unwrap(); 49 | } 50 | }); 51 | } 52 | 53 | for i in 0..5 { 54 | thread::sleep(Duration::from_millis(50)); 55 | 56 | tx1 = tx1.send(i).wait().unwrap(); 57 | assert_eq!(rx2.recv().unwrap(), i); 58 | } 59 | 60 | drop(tx1); 61 | } 62 | 63 | #[test] 64 | fn wrapping_receiver_wait() { 65 | let (mut pu, su) = broadcast::channel::(2); 66 | 67 | let (tx1, rx1) = mpsc::channel(); 68 | let (tx2, rx2) = mpsc::channel(); 69 | 70 | // Fast subscriber 71 | { 72 | let su = su.new_receiver(); 73 | 74 | thread::spawn(move || { 75 | let mut su = su.wait(); 76 | 77 | for _ in 0..4 { 78 | tx1.send(su.next().unwrap().unwrap()).unwrap(); 79 | } 80 | }); 81 | } 82 | 83 | // Slow subscriber 84 | { 85 | thread::spawn(move || { 86 | let mut su = su.wait(); 87 | 88 | for _ in 0..4 { 89 | thread::sleep(Duration::from_millis(50)); 90 | tx2.send(su.next().unwrap().unwrap()).unwrap(); 91 | } 92 | }); 93 | } 94 | 95 | for i in 0..2 { 96 | for j in 0..2 { 97 | thread::sleep(Duration::from_millis(10)); 98 | 99 | pu = pu.send(i * 2 + j).wait().unwrap(); 100 | } 101 | 102 | for rx in &[&rx1, &rx2] { 103 | for j in 0..2 { 104 | assert_eq!(i * 2 + j, rx.recv().unwrap()); 105 | } 106 | } 107 | } 108 | 109 | for rx in &[rx1, rx2] { 110 | assert!(rx.recv().is_err()); 111 | } 112 | 113 | drop(pu); 114 | } 115 | 116 | #[test] 117 | fn single_tx_wait() { 118 | const N: i32 = 25; 119 | 120 | let (mut pu, su) = broadcast::channel::(2); 121 | 122 | let (tx, rx) = mpsc::channel(); 123 | 124 | // Slow subscriber 125 | thread::spawn(move || { 126 | let mut su = su.wait(); 127 | 128 | for _ in 0..N { 129 | thread::sleep(Duration::from_millis(10)); 130 | tx.send(su.next().unwrap().unwrap()).unwrap(); 131 | } 132 | }); 133 | 134 | // Fast producer 135 | for i in 0..N { 136 | pu = pu.send(i).wait().unwrap(); 137 | } 138 | 139 | for i in 0..N { 140 | assert_eq!(i, rx.recv().unwrap()); 141 | } 142 | 143 | assert!(rx.recv().is_err()); 144 | } 145 | 146 | #[test] 147 | fn rx_drops_without_consuming_all() { 148 | const N: i32 = 25; 149 | 150 | for _ in 0..N { 151 | thread::sleep(Duration::from_millis(100)); 152 | let (mut pu, su) = broadcast::channel::(8); 153 | 154 | // Slow RX 155 | thread::spawn(move || { 156 | let mut su = su.wait(); 157 | let _ = su.next(); 158 | 159 | thread::sleep(Duration::from_millis(7)); 160 | drop(su); 161 | }); 162 | 163 | let mut err = false; 164 | 165 | for i in 0..16 { 166 | match pu.send(i).wait() { 167 | Ok(p) => pu = p, 168 | Err(_) => { 169 | err = true; 170 | break; 171 | } 172 | } 173 | } 174 | 175 | assert!(err); 176 | } 177 | } 178 | 179 | #[test] 180 | fn test_publisher_closed_immediately() { 181 | let (_, su) = broadcast::channel::(16); 182 | 183 | let mut su = su.wait(); 184 | assert!(su.next().is_none()); 185 | } 186 | --------------------------------------------------------------------------------