├── .gitignore ├── Cargo.toml ├── README.md ├── src ├── containers │ ├── atomic_cell.rs │ ├── atomic_cell_array.rs │ ├── mod.rs │ ├── mpmc_queue.rs │ ├── mpsc_queue.rs │ ├── scratch.rs │ └── storage.rs ├── handle │ ├── bounded.rs │ ├── core.rs │ ├── ids.rs │ ├── inner.rs │ ├── mod.rs │ └── resizing.rs ├── lib.rs ├── primitives │ ├── append_list.rs │ ├── atomic_cell.rs │ ├── atomic_ext.rs │ ├── index_allocator.rs │ ├── invariant.rs │ ├── mod.rs │ └── prepend_list.rs └── sync │ ├── mod.rs │ ├── mpmc_queue.rs │ └── mpsc_queue.rs └── tests ├── mpmc.rs └── mpsc.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lockless" 3 | version = "0.2.0" 4 | authors = ["Diggory Blake "] 5 | description = "Composable, lock-free, allocation-light data structures" 6 | repository = "https://github.com/Diggsey/lockless" 7 | license = "MIT OR Apache-2.0" 8 | categories = ["concurrency", "asynchronous"] 9 | 10 | [dependencies] 11 | parking_lot = "0.3.6" 12 | futures = "0.1.9" 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lockless 2 | 3 | This is an attempt to build useful high-level lock-free data structures, 4 | by designing simple, composable primitives and incrementally building complexity. 5 | 6 | Most of the data structures built upon these primitives, are designed 7 | to perform zero allocation during their primary function. Allocation is only performed 8 | during setup, or when cloning handles to the data structures. 9 | 10 | This allocation-light design is the primary differentiator from other crates providing 11 | lock-free algorithms, and it means that none of the containers provided here 12 | are unbounded. 13 | 14 | 15 | ## Modules 16 | 17 | ### `primitives` 18 | 19 | This module contains simple, low-level building blocks, which can be used in isolation. 20 | 21 | Currently, this contains: 22 | - `AppendList` - an append-only list, which can be concurrently iterated. 23 | - `AtomicCell` - a `Cell`-like type, supporting only an atomic `swap` operation. 24 | - `AtomicExt` - a set of extension methods to simplify working with atomics. 25 | - `IndexAllocator` - a type which can assign IDs from a contiguous slab. 26 | - `PrependList` - a prepend-only list, which also supports an atomic `swap` operation. 27 | 28 | ### `handle` 29 | 30 | This module creates an abstraction for shared ownership of data, where each owner is 31 | automatically assigned a unique ID. 32 | 33 | There are multiple implementations: 34 | - `BoundedIdHandle` is completely lock and allocation free, but has a predefined 35 | limit on the number of concurrent owners. Exceeding this limit will cause a panic. 36 | - `ResizingIdHandle` wraps the data structure in a `parking_lot::RwLock`. Normal 37 | usage is still lock and allocation free, but exceeding the maximum number of 38 | concurrent owners will cause a write-lock to be taken, and the data structure 39 | to be automatically resized to accomodate the additional owners. 40 | 41 | ### `containers` 42 | 43 | This module contains medium-level data structures, often based on the `IdHandle` 44 | abstraction. 45 | 46 | Currently, this contains: 47 | - `Storage` - provides storage for values larger than a `usize`, for use by other 48 | containers. 49 | - `Scratch` - provides a scratch space which each accessor can work in before its 50 | changes are atomically made visible to other accessors. 51 | - `AtomicCell` - an alternative to the primitive `AtomicCell` making slightly different 52 | trade-offs. It is slightly slower (approximately 15% slower in benchmarks), but can be 53 | composed into other data structures based around the `IdHandle` abstraction. 54 | - `AtomicCellArray` - functionally equivalent to a `Vec>`, but much 55 | more memory efficient. 56 | - `MpscQueue` - a multiple-producer, single-consumer queue. This queue does not attempt 57 | to be fair, so it's possible for one producer to starve the others. The queue also 58 | does not provide a mechanism to "wake up" senders/receivers when it's possible to 59 | continue, and so it must be polled. 60 | - `MpmcQueue` - an experimental multiple-producer, multiple-consumer queue. No fairness 61 | guarantees, and no wake-up mechanism. 62 | 63 | ### `sync` 64 | 65 | This module contains high-level data structures which are compatible with futures-rs. 66 | 67 | Currently, this contains: 68 | - `MpscQueue` - a multiple-producer, single-consumer queue. This queue is fair, 69 | so a single producer cannot starve other producers. 70 | - `MpmcQueue` - a multiple-producer, multiple-consumer queue. This queue is fair 71 | for producers, so a single producer cannot starve other producers, and prior 72 | to being closed, it is also fair for receivers. Once closed, any receiver can 73 | empty the queue. 74 | 75 | 76 | ## Contributing 77 | 78 | 1. Fork it! 79 | 2. Create your feature branch: `git checkout -b my-new-feature` 80 | 3. Commit your changes: `git commit -am 'Add some feature'` 81 | 4. Push to the branch: `git push origin my-new-feature` 82 | 5. Submit a pull request :D 83 | 84 | 85 | ## License 86 | 87 | MIT OR Apache-2.0 88 | -------------------------------------------------------------------------------- /src/containers/atomic_cell.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::marker::PhantomData; 3 | 4 | use handle::{Handle, IdHandle, ResizingHandle, BoundedHandle, HandleInner, Like}; 5 | use primitives::index_allocator::IndexAllocator; 6 | use containers::storage::{Storage, Place}; 7 | use containers::scratch::Scratch; 8 | 9 | #[derive(Debug)] 10 | pub struct AtomicCellInner>(AtomicUsize, PhantomData); 11 | 12 | impl> AtomicCellInner { 13 | pub fn new(value: T) -> Self { 14 | AtomicCellInner(AtomicUsize::new(value.into()), PhantomData) 15 | } 16 | pub unsafe fn swap(&self, value: &mut T) { 17 | let value = value.borrow_mut(); 18 | *value = self.0.swap(*value, Ordering::AcqRel); 19 | } 20 | } 21 | 22 | define_id!(AtomicCellId); 23 | 24 | pub struct AtomicCellWrapper { 25 | storage: Storage, 26 | scratch: Scratch>, 27 | inner: AtomicCellInner>, 28 | id_alloc: IndexAllocator 29 | } 30 | 31 | impl AtomicCellWrapper { 32 | pub fn new>(id_limit: usize, value: T) -> H { 33 | assert!(id_limit > 0); 34 | let mut storage = Storage::with_capacity(id_limit + 1); 35 | let scratch = Scratch::new(storage.none_storing_iter(id_limit));; 36 | let inner = AtomicCellInner::new(storage.store(Some(value))); 37 | let id_alloc = IndexAllocator::new(id_limit); 38 | 39 | Handle::new(AtomicCellWrapper { 40 | storage: storage, 41 | scratch: scratch, 42 | inner: inner, 43 | id_alloc: id_alloc, 44 | }) 45 | } 46 | 47 | pub unsafe fn swap(&self, id: &mut AtomicCellId, value: T) -> T { 48 | let place = self.scratch.get_mut(id); 49 | self.storage.replace(place, Some(value)); 50 | self.inner.swap(place); 51 | self.storage.replace(place, None).expect("Some(value) in container") 52 | } 53 | } 54 | 55 | impl HandleInner for AtomicCellWrapper { 56 | type IdAllocator = IndexAllocator; 57 | fn id_allocator(&self) -> &IndexAllocator { 58 | &self.id_alloc 59 | } 60 | fn raise_id_limit(&mut self, new_limit: usize) { 61 | let old_limit = self.id_limit(); 62 | assert!(new_limit > old_limit); 63 | let extra = new_limit - old_limit; 64 | self.storage.reserve(extra); 65 | self.scratch.extend(self.storage.none_storing_iter(extra)); 66 | self.id_alloc.resize(new_limit); 67 | } 68 | } 69 | 70 | #[derive(Debug)] 71 | pub struct AtomicCell>>(IdHandle); 72 | 73 | impl>> AtomicCell { 74 | pub fn new(max_accessors: usize, value: T) -> Self { 75 | AtomicCell(IdHandle::new(&AtomicCellWrapper::new(max_accessors, value))) 76 | } 77 | 78 | pub fn swap(&mut self, value: T) -> T { 79 | self.0.with_mut(move |inner, id| unsafe { inner.swap(id, value) }) 80 | } 81 | } 82 | 83 | impl>> Clone for AtomicCell { 84 | fn clone(&self) -> Self { 85 | AtomicCell(self.0.clone()) 86 | } 87 | } 88 | 89 | pub type ResizingAtomicCell = AtomicCell>>; 90 | pub type BoundedAtomicCell = AtomicCell>>; 91 | -------------------------------------------------------------------------------- /src/containers/atomic_cell_array.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::marker::PhantomData; 3 | 4 | use handle::{HandleInner, Handle, IdHandle, ResizingHandle, BoundedHandle, Like}; 5 | use primitives::index_allocator::IndexAllocator; 6 | use containers::storage::{Storage, Place}; 7 | use containers::scratch::Scratch; 8 | 9 | #[derive(Debug)] 10 | pub struct AtomicCellArrayInner>(Vec, PhantomData); 11 | 12 | impl> AtomicCellArrayInner { 13 | pub fn push(&mut self, value: T) { 14 | self.0.push(AtomicUsize::new(value.into())); 15 | } 16 | 17 | pub fn reserve(&mut self, extra: usize) { 18 | self.0.reserve_exact(extra); 19 | } 20 | 21 | pub fn insert(&mut self, index: usize, value: T) { 22 | self.0.insert(index, AtomicUsize::new(value.into())); 23 | } 24 | 25 | pub fn new>(iter: I) -> Self { 26 | let mut result = AtomicCellArrayInner(Vec::new(), PhantomData); 27 | result.extend(iter); 28 | result 29 | } 30 | 31 | pub fn extend>(&mut self, iter: I) { 32 | let iter = iter.into_iter(); 33 | self.reserve(iter.size_hint().0); 34 | for value in iter { 35 | self.push(value); 36 | } 37 | } 38 | 39 | pub unsafe fn swap(&self, index: usize, value: &mut T) { 40 | let value = value.borrow_mut(); 41 | *value = self.0[index].swap(*value, Ordering::AcqRel) 42 | } 43 | 44 | pub fn len(&self) -> usize { 45 | self.0.len() 46 | } 47 | } 48 | 49 | define_id!(AtomicCellArrayId); 50 | 51 | #[derive(Debug)] 52 | pub struct AtomicCellArrayWrapper { 53 | storage: Storage, 54 | scratch: Scratch>, 55 | inner: AtomicCellArrayInner>, 56 | id_alloc: IndexAllocator 57 | } 58 | 59 | impl AtomicCellArrayWrapper { 60 | pub fn new, I: IntoIterator>(id_limit: usize, values: I) -> H { 61 | assert!(id_limit > 0); 62 | let mut storage = Storage::with_capacity(id_limit + 1); 63 | let scratch = Scratch::new(storage.none_storing_iter(id_limit)); 64 | let inner = AtomicCellArrayInner::new(values.into_iter().map(|v| storage.store(Some(v)))); 65 | let id_alloc = IndexAllocator::new(id_limit); 66 | 67 | Handle::new(AtomicCellArrayWrapper { 68 | storage: storage, 69 | scratch: scratch, 70 | inner: inner, 71 | id_alloc: id_alloc, 72 | }) 73 | } 74 | 75 | pub fn push(&mut self, value: T) { 76 | let place = self.storage.store(Some(value)); 77 | self.inner.push(place); 78 | } 79 | 80 | pub fn insert(&mut self, index: usize, value: T) { 81 | let place = self.storage.store(Some(value)); 82 | self.inner.insert(index, place); 83 | } 84 | 85 | pub unsafe fn swap(&self, id: &mut AtomicCellArrayId, index: usize, value: T) -> T { 86 | let place = self.scratch.get_mut(id); 87 | self.storage.replace(place, Some(value)); 88 | self.inner.swap(index, place); 89 | self.storage.replace(place, None).expect("Some(value) in container") 90 | } 91 | 92 | pub fn len(&self) -> usize { 93 | self.inner.len() 94 | } 95 | } 96 | 97 | impl HandleInner for AtomicCellArrayWrapper { 98 | type IdAllocator = IndexAllocator; 99 | fn id_allocator(&self) -> &IndexAllocator { 100 | &self.id_alloc 101 | } 102 | fn raise_id_limit(&mut self, new_limit: usize) { 103 | let old_limit = self.id_limit(); 104 | assert!(new_limit > old_limit); 105 | let extra = new_limit - self.id_limit(); 106 | self.storage.reserve(extra); 107 | self.scratch.extend(self.storage.none_storing_iter(extra)); 108 | self.id_alloc.resize(new_limit); 109 | } 110 | } 111 | 112 | #[derive(Debug)] 113 | pub struct AtomicCellArray>>(IdHandle); 114 | 115 | impl>> AtomicCellArray { 116 | pub fn new>(max_accessors: usize, values: I) -> Self { 117 | AtomicCellArray(IdHandle::new(&AtomicCellArrayWrapper::new(max_accessors, values))) 118 | } 119 | 120 | pub fn swap(&mut self, index: usize, value: T) -> T { 121 | self.0.with_mut(move |inner, id| unsafe { inner.swap(id, index, value) }) 122 | } 123 | 124 | pub fn len(&self) -> usize { 125 | self.0.with(|inner| inner.len()) 126 | } 127 | } 128 | 129 | impl>> Clone for AtomicCellArray { 130 | fn clone(&self) -> Self { 131 | AtomicCellArray(self.0.clone()) 132 | } 133 | } 134 | 135 | pub type ResizingAtomicCellArray = AtomicCellArray>>; 136 | pub type BoundedAtomicCellArray = AtomicCellArray>>; 137 | -------------------------------------------------------------------------------- /src/containers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod atomic_cell; 2 | pub mod atomic_cell_array; 3 | pub mod mpsc_queue; 4 | pub mod mpmc_queue; 5 | pub mod storage; 6 | pub mod scratch; 7 | -------------------------------------------------------------------------------- /src/containers/mpmc_queue.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::marker::PhantomData; 3 | 4 | use handle::{HandleInner, Handle, IdHandle, ResizingHandle, BoundedHandle, Like}; 5 | use primitives::atomic_ext::AtomicExt; 6 | use primitives::index_allocator::IndexAllocator; 7 | use containers::storage::{Place, Storage}; 8 | use containers::scratch::Scratch; 9 | 10 | // Pointers are only wrapped to 2*Capacity to distinguish full from empty states, so must wrap before indexing! 11 | // ___________________ 12 | // |___|_X_|_X_|___|___| 13 | // ^ ^ 14 | // H T 15 | // 16 | // (H == T) => Empty 17 | // (H != T) && (H%C == T%C) => Full 18 | // 19 | // 20 | // Each cell on the ring stores an access count in the high bits: 21 | // ____________________________ 22 | // | access count | value index | 23 | // |____BITS/4____|__REMAINING__| 24 | // 25 | // An odd access count indicates that the cell contains a value, 26 | // while an even access count indicates that the cell is empty. 27 | // All access counts are initialized to zero. 28 | // The access count is used to prevent a form of the ABA problem, 29 | // where a producer tries to store into a cell which is no longer 30 | // the tail of the queue, and happens to have the same value index. 31 | 32 | const TAG_BITS: usize = ::POINTER_BITS/4; 33 | const VALUE_MASK: usize = !0 >> TAG_BITS; 34 | const TAG_MASK: usize = !VALUE_MASK; 35 | const TAG_BIT: usize = 1 << (::POINTER_BITS - TAG_BITS); 36 | const WRAP_THRESHOLD: usize = !0 ^ (!0 >> 1); 37 | 38 | #[derive(Debug)] 39 | pub struct MpmcQueueInner> { 40 | // If a value in the buffer has the EMPTY_BIT set, the 41 | // corresponding "value slot" is empty. 42 | ring: Vec, 43 | // Pair of pointers into the ring buffer 44 | head: AtomicUsize, 45 | tail: AtomicUsize, 46 | phantom: PhantomData, 47 | } 48 | 49 | fn next_cell(mut index: usize, size2: usize) -> usize { 50 | index += 1; 51 | if index >= WRAP_THRESHOLD { 52 | index = index % size2; 53 | } 54 | index 55 | } 56 | 57 | fn wraps_around(start: usize, end: usize, size: usize) -> bool { 58 | let size2 = size*2; 59 | (end % size) < (start % size) || ((start + size) % size2 == (end % size2)) 60 | } 61 | 62 | fn rotate_slice(slice: &mut [T], places: usize) { 63 | slice.reverse(); 64 | let (a, b) = slice.split_at_mut(places); 65 | a.reverse(); 66 | b.reverse(); 67 | } 68 | 69 | impl> MpmcQueueInner { 70 | pub fn new>(iter: I) -> Self { 71 | MpmcQueueInner { 72 | ring: iter.into_iter().map(Into::into).map(AtomicUsize::new).collect(), 73 | head: AtomicUsize::new(0), 74 | tail: AtomicUsize::new(0), 75 | phantom: PhantomData 76 | } 77 | } 78 | 79 | pub fn extend>(&mut self, iter: I) where I::IntoIter: ExactSizeIterator { 80 | let iter = iter.into_iter(); 81 | let size = self.ring.len(); 82 | let extra = iter.len(); 83 | self.ring.reserve_exact(extra); 84 | self.ring.extend(iter.map(Into::into).map(AtomicUsize::new)); 85 | 86 | // If the queue wraps around the buffer, shift the elements 87 | // along such that the start section of the queue is moved to the 88 | // new end of the buffer. 89 | let head = self.head.get_mut(); 90 | let tail = self.tail.get_mut(); 91 | if wraps_around(*head, *tail, size) { 92 | rotate_slice(&mut self.ring[*head..], extra); 93 | *head += extra; 94 | } 95 | } 96 | 97 | pub fn len(&self) -> usize { 98 | self.ring.len() 99 | } 100 | 101 | pub unsafe fn push(&self, value: &mut T) -> bool { 102 | let size = self.ring.len(); 103 | let size2 = size*2; 104 | 105 | let index = value.borrow_mut(); 106 | 107 | loop { 108 | match self.tail.try_update_indirect(|tail| { 109 | let head = self.head.load(Ordering::Acquire); 110 | // If not full 111 | if (tail % size2) != (head + size) % size2 { 112 | // Try updating cell at tail position 113 | Ok(&self.ring[tail % size]) 114 | } else { 115 | // We observed a full queue, so stop trying 116 | Err(false) 117 | } 118 | }, |tail, cell| { 119 | // If cell at tail is empty 120 | if cell & TAG_BIT == 0 { 121 | // Swap in our index, and mark as full 122 | Ok((cell & TAG_MASK).wrapping_add(TAG_BIT) | *index) 123 | } else { 124 | // Cell is full, another thread is midway through an insertion 125 | // Try to assist the stalled thread 126 | let _ = self.tail.compare_exchange(tail, next_cell(tail, size2), Ordering::AcqRel, Ordering::Acquire); 127 | // Retry the insertion now that we've helped the other thread to progress 128 | Err(true) 129 | } 130 | }) { 131 | Ok((tail, prev_cell, _)) => { 132 | // Update the tail pointer if necessary 133 | let _ = self.tail.compare_exchange(tail, next_cell(tail, size2), Ordering::AcqRel, Ordering::Acquire); 134 | *index = prev_cell & VALUE_MASK; 135 | return true; 136 | } 137 | Err(false) => return false, 138 | Err(true) => {}, 139 | } 140 | } 141 | } 142 | 143 | pub unsafe fn pop(&self, value: &mut T) -> bool { 144 | let size = self.ring.len(); 145 | let size2 = size*2; 146 | 147 | let index = value.borrow_mut(); 148 | 149 | loop { 150 | match self.head.try_update_indirect(|head| { 151 | let tail = self.tail.load(Ordering::Acquire); 152 | // If not empty 153 | if head % size2 != tail % size2 { 154 | // Try updating cell at head position 155 | Ok(&self.ring[head % size]) 156 | } else { 157 | // We observed an empty queue, so stop trying 158 | Err(false) 159 | } 160 | }, |head, cell| { 161 | // If cell at head is full 162 | if cell & TAG_BIT != 0 { 163 | // Swap in our index, and mark as empty 164 | Ok((cell & TAG_MASK).wrapping_add(TAG_BIT) | *index) 165 | } else { 166 | // Cell is empty, another thread is midway through a removal 167 | // Try to assist the stalled thread 168 | let _ = self.head.compare_exchange(head, next_cell(head, size2), Ordering::AcqRel, Ordering::Acquire); 169 | // Retry the insertion now that we've helped the other thread to progress 170 | Err(true) 171 | } 172 | }) { 173 | Ok((head, prev_cell, _)) => { 174 | // Update the tail pointer if necessary 175 | let _ = self.head.compare_exchange(head, next_cell(head, size2), Ordering::AcqRel, Ordering::Acquire); 176 | *index = prev_cell & VALUE_MASK; 177 | return true; 178 | } 179 | Err(false) => return false, 180 | Err(true) => {}, 181 | } 182 | } 183 | } 184 | } 185 | 186 | define_id!(MpmcQueueAccessorId); 187 | 188 | pub struct MpmcQueueWrapper { 189 | storage: Storage, 190 | scratch: Scratch>, 191 | inner: MpmcQueueInner>, 192 | id_alloc: IndexAllocator 193 | } 194 | 195 | impl MpmcQueueWrapper { 196 | pub fn new>(id_limit: usize, size: usize) -> H { 197 | assert!(id_limit > 0); 198 | let mut storage = Storage::with_capacity(id_limit + size); 199 | let scratch = Scratch::new(storage.none_storing_iter(id_limit)); 200 | let inner = MpmcQueueInner::new(storage.none_storing_iter(size)); 201 | let id_alloc = IndexAllocator::new(id_limit); 202 | 203 | Handle::new(MpmcQueueWrapper { 204 | storage: storage, 205 | scratch: scratch, 206 | inner: inner, 207 | id_alloc: id_alloc, 208 | }) 209 | } 210 | 211 | pub unsafe fn push(&self, id: &mut MpmcQueueAccessorId, value: T) -> Result<(), T> { 212 | let place = self.scratch.get_mut(id); 213 | self.storage.replace(place, Some(value)); 214 | if self.inner.push(place) { 215 | Ok(()) 216 | } else { 217 | Err(self.storage.replace(place, None).expect("Some(value) in container")) 218 | } 219 | } 220 | 221 | pub unsafe fn pop(&self, id: &mut MpmcQueueAccessorId) -> Result { 222 | let place = self.scratch.get_mut(id); 223 | if self.inner.pop(place) { 224 | Ok(self.storage.replace(place, None).expect("Some(value) in container")) 225 | } else { 226 | Err(()) 227 | } 228 | } 229 | } 230 | 231 | impl HandleInner for MpmcQueueWrapper { 232 | type IdAllocator = IndexAllocator; 233 | fn id_allocator(&self) -> &IndexAllocator { 234 | &self.id_alloc 235 | } 236 | fn raise_id_limit(&mut self, new_limit: usize) { 237 | let old_limit = self.id_limit(); 238 | assert!(new_limit > old_limit); 239 | let extra = new_limit - old_limit; 240 | self.storage.reserve(extra); 241 | self.scratch.extend(self.storage.none_storing_iter(extra)); 242 | self.id_alloc.resize(new_limit); 243 | } 244 | } 245 | 246 | #[derive(Debug)] 247 | pub struct MpmcQueueReceiver>>(IdHandle); 248 | 249 | impl>> MpmcQueueReceiver { 250 | pub fn receive(&mut self) -> Result { 251 | self.0.with_mut(|inner, id| unsafe { inner.pop(id) }) 252 | } 253 | 254 | pub fn try_clone(&self) -> Option { 255 | self.0.try_clone().map(MpmcQueueReceiver) 256 | } 257 | } 258 | 259 | impl>> Clone for MpmcQueueReceiver { 260 | fn clone(&self) -> Self { 261 | MpmcQueueReceiver(self.0.clone()) 262 | } 263 | } 264 | 265 | pub type ResizingMpmcQueueReceiver = MpmcQueueReceiver>>; 266 | pub type BoundedMpmcQueueReceiver = MpmcQueueReceiver>>; 267 | 268 | #[derive(Debug)] 269 | pub struct MpmcQueueSender>>(IdHandle); 270 | 271 | impl>> MpmcQueueSender { 272 | pub fn send(&mut self, value: T) -> Result<(), T> { 273 | self.0.with_mut(|inner, id| unsafe { inner.push(id, value) }) 274 | } 275 | pub fn try_clone(&self) -> Option { 276 | self.0.try_clone().map(MpmcQueueSender) 277 | } 278 | } 279 | 280 | impl>> Clone for MpmcQueueSender { 281 | fn clone(&self) -> Self { 282 | MpmcQueueSender(self.0.clone()) 283 | } 284 | } 285 | 286 | pub type ResizingMpmcQueueSender = MpmcQueueSender>>; 287 | pub type BoundedMpmcQueueSender = MpmcQueueSender>>; 288 | 289 | pub fn new>>(max_accessors: usize, size: usize) -> (MpmcQueueSender, MpmcQueueReceiver) { 290 | let inner = MpmcQueueWrapper::new(max_accessors, size); 291 | (MpmcQueueSender(IdHandle::new(&inner)), MpmcQueueReceiver(IdHandle::new(&inner))) 292 | } 293 | -------------------------------------------------------------------------------- /src/containers/mpsc_queue.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::marker::PhantomData; 3 | 4 | use handle::{HandleInner, Handle, IdHandle, ResizingHandle, BoundedHandle, Like}; 5 | use primitives::atomic_ext::AtomicExt; 6 | use primitives::index_allocator::IndexAllocator; 7 | use containers::storage::{Place, Storage}; 8 | use containers::scratch::Scratch; 9 | 10 | // Pointers are not wrapped until they reach WRAP_THRESHOLD, at 11 | // which point they are wrapped modulo RING_SIZE*2. This allows 12 | // accessors to be confident whether the pointers have changed 13 | // since they were read, preventing the ABA problem, whilst also 14 | // distinguishing between an empty queue and a full queue: 15 | // ___________________ 16 | // |___|_X_|_X_|___|___| 17 | // ^ ^ 18 | // H T 19 | // 20 | // (H == T) => Empty 21 | // (H != T) && (H%C == T%C) => Full 22 | // 23 | // 24 | // Each cell on the ring stores an access count in the high bits: 25 | // ____________________________ 26 | // | access count | value index | 27 | // |____BITS/4____|__REMAINING__| 28 | // 29 | // An odd access count indicates that the cell contains a value, 30 | // while an even access count indicates that the cell is empty. 31 | // All access counts are initialized to zero. 32 | // The access count is used to prevent a form of the ABA problem, 33 | // where a producer tries to store into a cell which is no longer 34 | // the tail of the queue, and happens to have the same value index. 35 | 36 | // Number of bits in access count 37 | const TAG_BITS: usize = ::POINTER_BITS/4; 38 | // Mask to extract the value index 39 | const VALUE_MASK: usize = !0 >> TAG_BITS; 40 | // Mask to extract the tag 41 | const TAG_MASK: usize = !VALUE_MASK; 42 | // Lowest bit of tag 43 | const TAG_BIT: usize = 1 << (::POINTER_BITS - TAG_BITS); 44 | // Threshold at which to wrap the head/tail pointers 45 | const WRAP_THRESHOLD: usize = !0 ^ (!0 >> 1); 46 | 47 | // The raw queue implementation can only store things that 48 | // look like a `usize`. The values must also be less than 49 | // or equal to VALUE_MASK, to allow room for the tag bits. 50 | #[derive(Debug)] 51 | pub struct MpscQueueInner> { 52 | // Circular buffer storing the values. 53 | ring: Vec, 54 | // Pair of pointers into the ring buffer 55 | head: AtomicUsize, 56 | tail: AtomicUsize, 57 | // Pretend we actually store instances of T 58 | phantom: PhantomData, 59 | } 60 | 61 | // Advance a pointer by one cell, wrapping if necessary 62 | fn next_cell(mut index: usize, size2: usize) -> usize { 63 | index += 1; 64 | if index >= WRAP_THRESHOLD { 65 | index = index % size2; 66 | } 67 | index 68 | } 69 | 70 | // Determine if we can just add empty elements to the end of the ring-buffer. 71 | // If the "live section" wraps around, then we can't. 72 | fn wraps_around(start: usize, end: usize, size: usize) -> bool { 73 | let size2 = size*2; 74 | // If the end is before the start, or they're equal but the queue is full, 75 | // then we will need to do some additional shuffling after extending the 76 | // queue. 77 | (end % size) < (start % size) || ((start + size) % size2 == (end % size2)) 78 | } 79 | 80 | // In-place rotation algorithm (shifts to the right) 81 | fn rotate_slice(slice: &mut [T], places: usize) { 82 | // Rotation can be implemented by reversing the slice, 83 | // splitting the slice in two, and then reversing the 84 | // two sub-slices. 85 | slice.reverse(); 86 | let (a, b) = slice.split_at_mut(places); 87 | a.reverse(); 88 | b.reverse(); 89 | } 90 | 91 | fn validate_value(v: usize) -> usize { 92 | assert!(v <= VALUE_MASK, "Value index outside allowed range!"); 93 | v 94 | } 95 | 96 | impl> MpscQueueInner { 97 | // Constructor takes an iterator to "fill" the buffer with an initial set of 98 | // values (even empty cells have a value index...) 99 | pub fn new>(iter: I) -> Self { 100 | MpscQueueInner { 101 | ring: iter.into_iter() 102 | .map(Into::into) 103 | .map(validate_value) 104 | .map(AtomicUsize::new) 105 | .collect(), 106 | head: AtomicUsize::new(0), 107 | tail: AtomicUsize::new(0), 108 | phantom: PhantomData 109 | } 110 | } 111 | 112 | pub fn extend>(&mut self, iter: I) where I::IntoIter: ExactSizeIterator { 113 | let iter = iter.into_iter(); 114 | let size = self.ring.len(); 115 | // Size of the iterator tells us how much the queue is being extended 116 | let extra = iter.len(); 117 | self.ring.reserve_exact(extra); 118 | self.ring.extend(iter.map(Into::into).map(validate_value).map(AtomicUsize::new)); 119 | 120 | // If the queue wraps around the buffer, shift the elements 121 | // along such that the start section of the queue is moved to the 122 | // new end of the buffer. 123 | let head = self.head.get_mut(); 124 | let tail = self.tail.get_mut(); 125 | if wraps_around(*head, *tail, size) { 126 | rotate_slice(&mut self.ring[*head..], extra); 127 | *head += extra; 128 | } 129 | } 130 | 131 | // This is the length of the buffer, not the number of "live" elements 132 | pub fn len(&self) -> usize { 133 | self.ring.len() 134 | } 135 | 136 | // Swap a value onto the tail of the queue. If the queue is observed to 137 | // be full, there are no side effects and `false` is returned. 138 | pub unsafe fn push(&self, value: &mut T) -> bool { 139 | let index = value.borrow_mut(); 140 | let size = self.ring.len(); 141 | let size2 = size*2; 142 | 143 | validate_value(*index); 144 | 145 | loop { 146 | // Uppdate the cell pointed to by the tail 147 | // `try_update_indirect` takes two functions: 148 | // 149 | // deref 150 | // Takes the tail pointer as input, and returns 151 | // `Ok(&cell_to_update)` or `Err(should_retry)` 152 | // 153 | // update 154 | // Takes tail pointer, and the cell's previous value, 155 | // and returns `Ok(new_value)` or `Err(should_retry)` 156 | // 157 | // The function ensures that the tail pointer did not 158 | // get updated while the previous value in the cell 159 | // was being read. 160 | match self.tail.try_update_indirect(|tail| { 161 | // deref 162 | 163 | let head = self.head.load(Ordering::Acquire); 164 | // If not full 165 | if (tail % size2) != (head + size) % size2 { 166 | // Try updating cell at tail position 167 | Ok(&self.ring[tail % size]) 168 | } else { 169 | // We observed a full queue, so stop trying 170 | Err(false) 171 | } 172 | }, |tail, cell| { 173 | // update 174 | 175 | // If cell at tail is empty 176 | if cell & TAG_BIT == 0 { 177 | // Swap in our index, and mark as full 178 | Ok((cell & TAG_MASK).wrapping_add(TAG_BIT) | *index) 179 | } else { 180 | // Cell is full, another thread is midway through an insertion 181 | // Try to assist the stalled thread, by advancing the tail pointer for them. 182 | let _ = self.tail.compare_exchange(tail, next_cell(tail, size2), Ordering::AcqRel, Ordering::Acquire); 183 | // Retry the insertion now that we've helped the other thread to progress 184 | Err(true) 185 | } 186 | }) { 187 | Ok((tail, prev_cell, _)) => { 188 | // Update the tail pointer if necessary 189 | let _ = self.tail.compare_exchange(tail, next_cell(tail, size2), Ordering::AcqRel, Ordering::Acquire); 190 | *index = prev_cell & VALUE_MASK; 191 | return true; 192 | } 193 | Err(false) => return false, 194 | Err(true) => {}, 195 | } 196 | } 197 | } 198 | 199 | pub unsafe fn pop R>(&self, receiver: F) -> Result { 200 | let size = self.ring.len(); 201 | let size2 = size*2; 202 | let head = self.head.load(Ordering::Acquire); 203 | let tail = self.tail.load(Ordering::Acquire); 204 | 205 | // If the queue is empty 206 | if head % size2 == tail % size2 { 207 | Err(()) 208 | } else { 209 | let cell = self.ring[head % size].fetch_add(TAG_BIT, Ordering::AcqRel); 210 | assert!(cell & TAG_BIT != 0, "Producer advanced without adding an item!"); 211 | let result = T::virtual_borrow(cell & VALUE_MASK, receiver); 212 | self.head.store((head+1) % size2, Ordering::Release); 213 | Ok(result) 214 | } 215 | } 216 | } 217 | 218 | define_id!(MpscQueueSenderId); 219 | 220 | pub struct MpscQueueWrapper { 221 | storage: Storage, 222 | scratch: Scratch>, 223 | inner: MpscQueueInner>, 224 | id_alloc: IndexAllocator 225 | } 226 | 227 | impl MpscQueueWrapper { 228 | pub fn new>(id_limit: usize, size: usize) -> H { 229 | assert!(id_limit > 0); 230 | let mut storage = Storage::with_capacity(id_limit + size); 231 | let scratch = Scratch::new(storage.none_storing_iter(id_limit)); 232 | let inner = MpscQueueInner::new(storage.none_storing_iter(size)); 233 | let id_alloc = IndexAllocator::new(id_limit); 234 | 235 | Handle::new(MpscQueueWrapper { 236 | storage: storage, 237 | scratch: scratch, 238 | inner: inner, 239 | id_alloc: id_alloc, 240 | }) 241 | } 242 | 243 | pub unsafe fn push(&self, id: &mut MpscQueueSenderId, value: T) -> Result<(), T> { 244 | let place = self.scratch.get_mut(id); 245 | self.storage.replace(place, Some(value)); 246 | if self.inner.push(place) { 247 | Ok(()) 248 | } else { 249 | Err(self.storage.replace(place, None).expect("Some(value) in container")) 250 | } 251 | } 252 | 253 | pub unsafe fn pop(&self) -> Result { 254 | self.inner.pop(|place| self.storage.replace(place, None).expect("Some(value) in container")) 255 | } 256 | } 257 | 258 | impl HandleInner for MpscQueueWrapper { 259 | type IdAllocator = IndexAllocator; 260 | fn id_allocator(&self) -> &IndexAllocator { 261 | &self.id_alloc 262 | } 263 | fn raise_id_limit(&mut self, new_limit: usize) { 264 | let old_limit = self.id_limit(); 265 | assert!(new_limit > old_limit); 266 | let extra = new_limit - old_limit; 267 | self.storage.reserve(extra); 268 | self.scratch.extend(self.storage.none_storing_iter(extra)); 269 | self.id_alloc.resize(new_limit); 270 | } 271 | } 272 | 273 | #[derive(Debug)] 274 | pub struct MpscQueueReceiver>>(H); 275 | 276 | impl>> MpscQueueReceiver { 277 | pub fn new(max_senders: usize, size: usize) -> Self { 278 | MpscQueueReceiver(MpscQueueWrapper::new(max_senders, size)) 279 | } 280 | 281 | pub fn receive(&mut self) -> Result { 282 | // This is safe because we guarantee that we are unique 283 | self.0.with(|inner| unsafe { inner.pop() }) 284 | } 285 | } 286 | 287 | pub type ResizingMpscQueueReceiver = MpscQueueReceiver>>; 288 | pub type BoundedMpscQueueReceiver = MpscQueueReceiver>>; 289 | 290 | #[derive(Debug)] 291 | pub struct MpscQueueSender>>(IdHandle); 292 | 293 | impl>> MpscQueueSender { 294 | pub fn new(receiver: &MpscQueueReceiver) -> Self { 295 | MpscQueueSender(IdHandle::new(&receiver.0)) 296 | } 297 | pub fn try_new(receiver: &MpscQueueReceiver) -> Option { 298 | IdHandle::try_new(&receiver.0).map(MpscQueueSender) 299 | } 300 | 301 | pub fn send(&mut self, value: T) -> Result<(), T> { 302 | self.0.with_mut(|inner, id| unsafe { inner.push(id, value) }) 303 | } 304 | pub fn try_clone(&self) -> Option { 305 | self.0.try_clone().map(MpscQueueSender) 306 | } 307 | } 308 | 309 | impl>> Clone for MpscQueueSender { 310 | fn clone(&self) -> Self { 311 | MpscQueueSender(self.0.clone()) 312 | } 313 | } 314 | 315 | pub type ResizingMpscQueueSender = MpscQueueSender>>; 316 | pub type BoundedMpscQueueSender = MpscQueueSender>>; 317 | -------------------------------------------------------------------------------- /src/containers/scratch.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::marker::PhantomData; 3 | 4 | use handle::Like; 5 | use primitives::invariant::Invariant; 6 | 7 | #[derive(Debug)] 8 | pub struct Scratch, U>(Vec>, Invariant); 9 | unsafe impl, U: Send> Sync for Scratch {} 10 | 11 | impl, U> Scratch { 12 | pub fn store(&mut self, value: U) { 13 | self.0.push(UnsafeCell::new(value)); 14 | } 15 | pub unsafe fn get_mut(&self, id: &mut T) -> &mut U { 16 | &mut *(&self.0[*id.borrow()]).get() 17 | } 18 | pub fn new>(iter: I) -> Self { 19 | let mut result = Scratch(Vec::new(), PhantomData); 20 | result.extend(iter); 21 | result 22 | } 23 | pub fn extend>(&mut self, iter: I) { 24 | self.0.extend(iter.into_iter().map(UnsafeCell::new)); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/containers/storage.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | use std::cell::UnsafeCell; 3 | use std::mem; 4 | use std::borrow::{Borrow, BorrowMut}; 5 | 6 | #[derive(Debug)] 7 | pub struct Place(usize, PhantomData); 8 | 9 | impl From for Place { 10 | fn from(idx: usize) -> Self { 11 | Place(idx, PhantomData) 12 | } 13 | } 14 | 15 | impl From> for usize { 16 | fn from(place: Place) -> Self { 17 | place.0 18 | } 19 | } 20 | 21 | impl Borrow for Place { 22 | fn borrow(&self) -> &usize { 23 | &self.0 24 | } 25 | } 26 | 27 | impl BorrowMut for Place { 28 | fn borrow_mut(&mut self) -> &mut usize { 29 | &mut self.0 30 | } 31 | } 32 | 33 | #[derive(Debug)] 34 | pub struct Storage(Vec>>); 35 | unsafe impl Sync for Storage {} 36 | 37 | impl Storage { 38 | pub fn store(&mut self, value: Option) -> Place { 39 | let result = self.0.len().into(); 40 | self.0.push(UnsafeCell::new(value)); 41 | result 42 | } 43 | pub unsafe fn replace(&self, place: &mut Place, value: Option) -> Option { 44 | mem::replace(&mut *(&self.0[place.0]).get(), value) 45 | } 46 | pub fn with_capacity(capacity: usize) -> Self { 47 | Storage(Vec::with_capacity(capacity)) 48 | } 49 | pub fn new() -> Self { 50 | Self::with_capacity(0) 51 | } 52 | pub fn reserve(&mut self, extra: usize) { 53 | self.0.reserve_exact(extra); 54 | } 55 | pub fn none_storing_iter(&mut self, n: usize) -> NoneStoringIter { 56 | NoneStoringIter { 57 | storage: self, 58 | n: n, 59 | } 60 | } 61 | } 62 | 63 | pub struct NoneStoringIter<'a, T: 'a> { 64 | storage: &'a mut Storage, 65 | n: usize, 66 | } 67 | 68 | impl<'a, T: 'a> Iterator for NoneStoringIter<'a, T> { 69 | type Item = Place; 70 | 71 | fn next(&mut self) -> Option> { 72 | if self.n == 0 { 73 | None 74 | } else { 75 | self.n -= 1; 76 | Some(self.storage.store(None)) 77 | } 78 | } 79 | 80 | fn size_hint(&self) -> (usize, Option) { 81 | (self.n, Some(self.n)) 82 | } 83 | } 84 | 85 | impl<'a, T: 'a> ExactSizeIterator for NoneStoringIter<'a, T> {} 86 | -------------------------------------------------------------------------------- /src/handle/bounded.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use super::{HandleInner, Handle, IdHandle, IdAllocator}; 4 | 5 | /// Implementation of Handle which panics if it runs out of IDs 6 | #[derive(Debug)] 7 | pub struct BoundedHandle { 8 | inner: Arc 9 | } 10 | 11 | unsafe impl Handle for BoundedHandle { 12 | type HandleInner = H; 13 | 14 | fn try_allocate_id(&self) -> Option where Self::HandleInner: HandleInner { 15 | self.inner.id_allocator().try_allocate_id() 16 | } 17 | 18 | fn free_id(&self, id: IdType) where Self::HandleInner: HandleInner { 19 | self.inner.id_allocator().free_id(id) 20 | } 21 | 22 | fn with R>(&self, f: F) -> R { 23 | f(&self.inner) 24 | } 25 | 26 | fn new(inner: Self::HandleInner) -> Self { 27 | BoundedHandle { 28 | inner: Arc::new(inner) 29 | } 30 | } 31 | 32 | fn id_limit(&self) -> usize where Self::HandleInner: HandleInner { 33 | self.inner.id_allocator().id_limit() 34 | } 35 | } 36 | 37 | impl Clone for BoundedHandle { 38 | fn clone(&self) -> Self { 39 | BoundedHandle { 40 | inner: self.inner.clone() 41 | } 42 | } 43 | } 44 | 45 | 46 | pub type BoundedIdHandle = IdHandle>; 47 | -------------------------------------------------------------------------------- /src/handle/core.rs: -------------------------------------------------------------------------------- 1 | use super::inner::HandleInner; 2 | 3 | /// This encapsulates the pattern of wrapping a fixed-size lock-free data-structure inside an 4 | /// RwLock, and automatically resizing it when the number of concurrent handles increases. 5 | 6 | pub unsafe trait Handle: Sized + Clone { 7 | type HandleInner; 8 | 9 | fn try_allocate_id(&self) -> Option where Self::HandleInner: HandleInner; 10 | fn free_id(&self, id: IdType) where Self::HandleInner: HandleInner; 11 | fn with R>(&self, f: F) -> R; 12 | fn new(inner: Self::HandleInner) -> Self; 13 | fn id_limit(&self) -> usize where Self::HandleInner: HandleInner; 14 | } 15 | 16 | // `ResizingHandle`s will automatically allocate themselves an ID, which 17 | // may require resizing the data structure. 18 | #[derive(Debug)] 19 | pub struct IdHandle where H::HandleInner: HandleInner { 20 | id: Option, 21 | handle: H 22 | } 23 | 24 | impl IdHandle where H::HandleInner: HandleInner { 25 | pub fn try_clone(&self) -> Option { 26 | Self::try_new(&self.handle) 27 | } 28 | 29 | pub fn new(handle: &H) -> Self { 30 | Self::try_new(handle).expect("Failed to allocate an ID") 31 | } 32 | pub fn try_new(handle: &H) -> Option { 33 | if let Some(id) = handle.try_allocate_id() { 34 | Some(IdHandle { 35 | id: Some(id), 36 | handle: handle.clone() 37 | }) 38 | } else { 39 | None 40 | } 41 | } 42 | pub fn id(&self) -> &IdType { 43 | self.id.as_ref().expect("Some(id)") 44 | } 45 | pub fn with R>(&self, f: F) -> R { 46 | self.handle.with(f) 47 | } 48 | pub fn with_mut R>(&mut self, f: F) -> R { 49 | let id = self.id.as_mut().expect("Some(id)"); 50 | self.handle.with(move |v| f(v, id)) 51 | } 52 | } 53 | 54 | impl Clone for IdHandle where H::HandleInner: HandleInner { 55 | fn clone(&self) -> Self { 56 | Self::new(&self.handle) 57 | } 58 | } 59 | 60 | impl Drop for IdHandle where H::HandleInner: HandleInner { 61 | fn drop(&mut self) { 62 | self.handle.free_id(self.id.take().expect("Some(id)")) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/handle/ids.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::{Borrow, BorrowMut}; 2 | 3 | use primitives::index_allocator::IndexAllocator; 4 | 5 | pub trait Like: Into + From + Borrow + BorrowMut { 6 | fn virtual_borrow R>(value: T, f: F) -> R { 7 | let mut v = value.into(); 8 | let result = f(&mut v); 9 | let _: T = v.into(); 10 | result 11 | } 12 | } 13 | impl + From + Borrow + BorrowMut> Like for U {} 14 | 15 | macro_rules! define_id { 16 | ($name:ident) => { 17 | #[derive(Debug)] 18 | pub struct $name(usize); 19 | 20 | impl From for $name { 21 | fn from(value: usize) -> Self { $name(value) } 22 | } 23 | impl From<$name> for usize { 24 | fn from(value: $name) -> Self { value.0 } 25 | } 26 | impl ::std::borrow::Borrow for $name { 27 | fn borrow(&self) -> &usize { &self.0 } 28 | } 29 | impl ::std::borrow::BorrowMut for $name { 30 | fn borrow_mut(&mut self) -> &mut usize { &mut self.0 } 31 | } 32 | } 33 | } 34 | 35 | pub trait IdAllocator { 36 | fn new(limit: usize) -> Self; 37 | fn try_allocate_id(&self) -> Option; 38 | fn free_id(&self, id: IdType); 39 | fn id_limit(&self) -> usize; 40 | } 41 | 42 | impl> IdAllocator for IndexAllocator { 43 | fn new(limit: usize) -> Self { 44 | IndexAllocator::new(limit) 45 | } 46 | fn try_allocate_id(&self) -> Option { 47 | self.try_allocate().map(Into::into) 48 | } 49 | fn free_id(&self, id: IdType) { 50 | self.free(id.into()) 51 | } 52 | fn id_limit(&self) -> usize { 53 | self.len() 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/handle/inner.rs: -------------------------------------------------------------------------------- 1 | use super::ids::IdAllocator; 2 | 3 | pub trait HandleInner { 4 | type IdAllocator: IdAllocator; 5 | 6 | fn raise_id_limit(&mut self, new_limit: usize); 7 | fn id_allocator(&self) -> &Self::IdAllocator; 8 | fn id_limit(&self) -> usize { 9 | self.id_allocator().id_limit() 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/handle/mod.rs: -------------------------------------------------------------------------------- 1 | mod core; 2 | #[macro_use] 3 | mod ids; 4 | mod inner; 5 | mod resizing; 6 | mod bounded; 7 | 8 | pub use self::core::*; 9 | pub use self::resizing::*; 10 | pub use self::bounded::*; 11 | pub use self::ids::*; 12 | pub use self::inner::*; 13 | -------------------------------------------------------------------------------- /src/handle/resizing.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use parking_lot::RwLock; 3 | 4 | use super::{Handle, IdHandle, IdAllocator, HandleInner}; 5 | 6 | /// Implementation of Handle which resizes the data structure as needed 7 | #[derive(Debug)] 8 | pub struct ResizingHandle { 9 | inner: Arc> 10 | } 11 | 12 | unsafe impl Handle for ResizingHandle { 13 | type HandleInner = H; 14 | 15 | fn try_allocate_id(&self) -> Option where Self::HandleInner: HandleInner { 16 | let prev_limit = { 17 | // Optimistically try getting a fresh ID 18 | let guard = self.inner.read(); 19 | let id_alloc = guard.id_allocator(); 20 | if let Some(id) = id_alloc.try_allocate_id() { 21 | return Some(id); 22 | } 23 | id_alloc.id_limit() 24 | }; 25 | { 26 | // Try again, in case another thread already resized the contents 27 | let mut guard = self.inner.write(); 28 | let (mut maybe_id, new_limit) = { 29 | let id_alloc = guard.id_allocator(); 30 | (id_alloc.try_allocate_id(), id_alloc.id_limit()) 31 | }; 32 | 33 | // Even if we get an ID, if the container is still the same size, 34 | // resize it anyway, to avoid this slower path from happening 35 | // repeatedly. 36 | if prev_limit == new_limit || maybe_id.is_none() { 37 | guard.raise_id_limit(new_limit*2); 38 | if maybe_id.is_none() { 39 | maybe_id = guard.id_allocator().try_allocate_id(); 40 | } 41 | } 42 | 43 | maybe_id 44 | } 45 | } 46 | 47 | fn free_id(&self, id: IdType) where Self::HandleInner: HandleInner { 48 | self.inner.read().id_allocator().free_id(id) 49 | } 50 | 51 | fn with R>(&self, f: F) -> R { 52 | f(&self.inner.read()) 53 | } 54 | 55 | fn new(inner: Self::HandleInner) -> Self { 56 | ResizingHandle { 57 | inner: Arc::new(RwLock::new(inner)) 58 | } 59 | } 60 | 61 | fn id_limit(&self) -> usize where Self::HandleInner: HandleInner { 62 | self.inner.read().id_allocator().id_limit() 63 | } 64 | } 65 | 66 | impl Clone for ResizingHandle { 67 | fn clone(&self) -> Self { 68 | ResizingHandle { 69 | inner: self.inner.clone() 70 | } 71 | } 72 | } 73 | 74 | 75 | pub type ResizingIdHandle = IdHandle>; 76 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(test, feature(test))] 2 | 3 | extern crate parking_lot; 4 | extern crate futures; 5 | 6 | pub mod primitives; 7 | #[macro_use] 8 | pub mod handle; 9 | pub mod containers; 10 | pub mod sync; 11 | 12 | #[cfg(target_pointer_width = "16")] const POINTER_BITS: usize = 16; 13 | #[cfg(target_pointer_width = "32")] const POINTER_BITS: usize = 32; 14 | #[cfg(target_pointer_width = "64")] const POINTER_BITS: usize = 64; 15 | #[cfg(target_pointer_width = "128")] const POINTER_BITS: usize = 128; 16 | 17 | #[cfg(test)] 18 | mod tests { 19 | extern crate test; 20 | 21 | use self::test::Bencher; 22 | use std::thread; 23 | use std::sync::{Mutex, Arc}; 24 | 25 | fn atomic_cell_smoke() { 26 | use primitives::atomic_cell::AtomicCell; 27 | 28 | let results = Arc::new(Mutex::new(Vec::new())); 29 | let results0 = results.clone(); 30 | let results1 = results.clone(); 31 | let cell = AtomicCell::new(0); 32 | let cell0 = cell.clone(); 33 | let cell1 = cell.clone(); 34 | let thread0 = thread::spawn(move || { 35 | let mut result = Vec::new(); 36 | for i in 1..1000000 { 37 | result.push(cell0.swap(i)); 38 | } 39 | results0.lock().unwrap().extend(result.into_iter()); 40 | }); 41 | let thread1 = thread::spawn(move || { 42 | let mut result = Vec::new(); 43 | for i in 1000000..2000000 { 44 | result.push(cell1.swap(i)); 45 | } 46 | results1.lock().unwrap().extend(result.into_iter()); 47 | }); 48 | thread0.join().unwrap(); 49 | thread1.join().unwrap(); 50 | let mut v = results.lock().unwrap(); 51 | v.push(cell.swap(0)); 52 | v.sort(); 53 | 54 | assert_eq!(v.len(), 2000000); 55 | for (a, &b) in v.iter().enumerate() { 56 | assert_eq!(a, b); 57 | } 58 | } 59 | 60 | fn bounded_atomic_cell_smoke() { 61 | use containers::atomic_cell::BoundedAtomicCell; 62 | 63 | let results = Arc::new(Mutex::new(Vec::new())); 64 | let results0 = results.clone(); 65 | let results1 = results.clone(); 66 | let mut cell = BoundedAtomicCell::new(5, 0); 67 | let mut cell0 = cell.clone(); 68 | let mut cell1 = cell.clone(); 69 | let thread0 = thread::spawn(move || { 70 | let mut result = Vec::new(); 71 | for i in 1..1000000 { 72 | result.push(cell0.swap(i)); 73 | } 74 | results0.lock().unwrap().extend(result.into_iter()); 75 | }); 76 | let thread1 = thread::spawn(move || { 77 | let mut result = Vec::new(); 78 | for i in 1000000..2000000 { 79 | result.push(cell1.swap(i)); 80 | } 81 | results1.lock().unwrap().extend(result.into_iter()); 82 | }); 83 | thread0.join().unwrap(); 84 | thread1.join().unwrap(); 85 | let mut v = results.lock().unwrap(); 86 | v.push(cell.swap(0)); 87 | v.sort(); 88 | 89 | assert_eq!(v.len(), 2000000); 90 | for (a, &b) in v.iter().enumerate() { 91 | assert_eq!(a, b); 92 | } 93 | } 94 | 95 | fn resizing_atomic_cell_smoke() { 96 | use containers::atomic_cell::ResizingAtomicCell; 97 | 98 | let results = Arc::new(Mutex::new(Vec::new())); 99 | let results0 = results.clone(); 100 | let results1 = results.clone(); 101 | let mut cell = ResizingAtomicCell::new(5, 0); 102 | let mut cell0 = cell.clone(); 103 | let mut cell1 = cell.clone(); 104 | let thread0 = thread::spawn(move || { 105 | let mut result = Vec::new(); 106 | for i in 1..1000000 { 107 | result.push(cell0.swap(i)); 108 | } 109 | results0.lock().unwrap().extend(result.into_iter()); 110 | }); 111 | let thread1 = thread::spawn(move || { 112 | let mut result = Vec::new(); 113 | for i in 1000000..2000000 { 114 | result.push(cell1.swap(i)); 115 | } 116 | results1.lock().unwrap().extend(result.into_iter()); 117 | }); 118 | thread0.join().unwrap(); 119 | thread1.join().unwrap(); 120 | let mut v = results.lock().unwrap(); 121 | v.push(cell.swap(0)); 122 | v.sort(); 123 | 124 | assert_eq!(v.len(), 2000000); 125 | for (a, &b) in v.iter().enumerate() { 126 | assert_eq!(a, b); 127 | } 128 | } 129 | 130 | fn bounded_mpsc_queue_smoke() { 131 | use containers::mpsc_queue::{BoundedMpscQueueSender, BoundedMpscQueueReceiver}; 132 | use std::time::Duration; 133 | 134 | let sender_count = 20; 135 | let rapid_fire = 10; 136 | let iterations = 10; 137 | let mut threads = Vec::with_capacity(sender_count); 138 | let mut receiver = BoundedMpscQueueReceiver::new(sender_count, 50); 139 | for i in 0..sender_count { 140 | let mut sender = BoundedMpscQueueSender::new(&receiver); 141 | threads.push(thread::spawn(move || { 142 | for _ in 0..iterations { 143 | for _ in 0..rapid_fire { 144 | while sender.send(i).is_err() { 145 | thread::sleep(Duration::from_millis(1)); 146 | } 147 | } 148 | thread::sleep(Duration::from_millis(1)); 149 | } 150 | })); 151 | } 152 | let expected = rapid_fire*iterations*sender_count; 153 | let mut results = vec![0; sender_count]; 154 | 155 | for _ in 0..expected { 156 | loop { 157 | if let Ok(v) = receiver.receive() { 158 | results[v] += 1; 159 | break; 160 | } 161 | } 162 | } 163 | 164 | for t in threads { 165 | let _ = t.join(); 166 | } 167 | 168 | for r in results { 169 | assert!(r == rapid_fire*iterations); 170 | } 171 | } 172 | 173 | fn bounded_mpmc_queue_smoke() { 174 | use containers::mpmc_queue::{BoundedMpmcQueueSender, BoundedMpmcQueueReceiver, self}; 175 | use std::time::Duration; 176 | 177 | let sender_count = 20; 178 | let rapid_fire = 10; 179 | let iterations = 1000; 180 | let mut threads = Vec::with_capacity(sender_count); 181 | let (sender, receiver): (BoundedMpmcQueueSender<_>, BoundedMpmcQueueReceiver<_>) = mpmc_queue::new(sender_count*2+2, 50); 182 | let results = Arc::new(Mutex::new(vec![0; sender_count])); 183 | for i in 0..sender_count { 184 | let mut sender = sender.clone(); 185 | let mut receiver = receiver.clone(); 186 | let results = results.clone(); 187 | threads.push(thread::spawn(move || { 188 | for _ in 0..iterations { 189 | for _ in 0..rapid_fire { 190 | while sender.send(i).is_err() { 191 | thread::sleep(Duration::from_millis(1)); 192 | } 193 | } 194 | thread::sleep(Duration::from_millis(1)); 195 | } 196 | })); 197 | threads.push(thread::spawn(move || { 198 | let mut tmp = vec![0; sender_count]; 199 | for _ in 0..iterations { 200 | for _ in 0..rapid_fire { 201 | loop { 202 | if let Ok(i) = receiver.receive() { 203 | tmp[i] += 1; 204 | break; 205 | } 206 | thread::sleep(Duration::from_millis(1)); 207 | } 208 | } 209 | thread::sleep(Duration::from_millis(1)); 210 | } 211 | 212 | let mut r = results.lock().unwrap(); 213 | for index in 0..sender_count { 214 | (*r)[index] += tmp[index]; 215 | } 216 | })); 217 | } 218 | 219 | for t in threads { 220 | let _ = t.join(); 221 | } 222 | 223 | for &r in &*results.lock().unwrap() { 224 | assert!(r == rapid_fire*iterations); 225 | } 226 | } 227 | 228 | #[bench] 229 | fn atomic_cell_bench(b: &mut Bencher) { 230 | b.iter(|| atomic_cell_smoke()) 231 | } 232 | 233 | #[bench] 234 | fn bounded_atomic_cell_bench(b: &mut Bencher) { 235 | b.iter(|| bounded_atomic_cell_smoke()) 236 | } 237 | 238 | #[bench] 239 | fn resizing_atomic_cell_bench(b: &mut Bencher) { 240 | b.iter(|| resizing_atomic_cell_smoke()) 241 | } 242 | 243 | #[bench] 244 | fn bounded_mpsc_queue_bench(b: &mut Bencher) { 245 | b.iter(|| bounded_mpsc_queue_smoke()) 246 | } 247 | 248 | #[bench] 249 | fn bounded_mpmc_queue_bench(b: &mut Bencher) { 250 | b.iter(|| bounded_mpmc_queue_smoke()) 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /src/primitives/append_list.rs: -------------------------------------------------------------------------------- 1 | /// AppendList is a low-level primitive supporting two safe operations: 2 | /// `push`, which appends a node to the list, and `iter` which iterates the list 3 | /// The list cannot be shrunk whilst in use. 4 | 5 | use std::sync::atomic::{AtomicPtr, Ordering}; 6 | use std::{ptr, mem}; 7 | 8 | type NodePtr = Option>>; 9 | 10 | #[derive(Debug)] 11 | struct Node { 12 | value: T, 13 | next: AppendList 14 | } 15 | 16 | #[derive(Debug)] 17 | pub struct AppendList(AtomicPtr>); 18 | 19 | impl AppendList { 20 | fn into_raw(ptr: NodePtr) -> *mut Node { 21 | match ptr { 22 | Some(b) => Box::into_raw(b), 23 | None => ptr::null_mut() 24 | } 25 | } 26 | unsafe fn from_raw(ptr: *mut Node) -> NodePtr { 27 | if ptr == ptr::null_mut() { 28 | None 29 | } else { 30 | Some(Box::from_raw(ptr)) 31 | } 32 | } 33 | 34 | fn new_internal(ptr: NodePtr) -> Self { 35 | AppendList(AtomicPtr::new(Self::into_raw(ptr))) 36 | } 37 | 38 | pub fn new() -> Self { 39 | Self::new_internal(None) 40 | } 41 | 42 | pub fn append(&self, value: T) { 43 | self.append_list(AppendList::new_internal(Some(Box::new(Node { 44 | value: value, 45 | next: AppendList::new() 46 | })))); 47 | } 48 | 49 | unsafe fn append_ptr(&self, p: *mut Node) { 50 | loop { 51 | match self.0.compare_exchange_weak(ptr::null_mut(), p, Ordering::AcqRel, Ordering::Acquire) { 52 | Ok(_) => return, 53 | Err(head) => if !head.is_null() { 54 | return (*head).next.append_ptr(p); 55 | } 56 | } 57 | } 58 | } 59 | 60 | pub fn append_list(&self, other: AppendList) { 61 | let p = other.0.load(Ordering::Acquire); 62 | mem::forget(other); 63 | unsafe { self.append_ptr(p) }; 64 | } 65 | 66 | pub fn iter(&self) -> AppendListIterator { 67 | AppendListIterator(&self.0) 68 | } 69 | } 70 | 71 | impl<'a, T> IntoIterator for &'a AppendList { 72 | type Item = &'a T; 73 | type IntoIter = AppendListIterator<'a, T>; 74 | 75 | fn into_iter(self) -> AppendListIterator<'a, T> { 76 | self.iter() 77 | } 78 | } 79 | 80 | impl Drop for AppendList { 81 | fn drop(&mut self) { 82 | unsafe { Self::from_raw(mem::replace(self.0.get_mut(), ptr::null_mut())) }; 83 | } 84 | } 85 | 86 | #[derive(Debug)] 87 | pub struct AppendListIterator<'a, T: 'a>(&'a AtomicPtr>); 88 | 89 | impl<'a, T: 'a> Iterator for AppendListIterator<'a, T> { 90 | type Item = &'a T; 91 | 92 | fn next(&mut self) -> Option<&'a T> { 93 | let p = self.0.load(Ordering::Acquire); 94 | if p.is_null() { 95 | None 96 | } else { 97 | unsafe { 98 | self.0 = &(*p).next.0; 99 | Some(&(*p).value) 100 | } 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/primitives/atomic_cell.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicPtr, Ordering}; 2 | use std::cell::Cell; 3 | use std::sync::Arc; 4 | use std::ptr; 5 | use std::mem; 6 | 7 | 8 | // Helper functions for allocation in stable rust 9 | fn allocate() -> *mut T { 10 | let mut v = Vec::with_capacity(1); 11 | let result = v.as_mut_ptr(); 12 | mem::forget(v); 13 | result 14 | } 15 | 16 | unsafe fn free(ptr: *mut T) { 17 | Vec::from_raw_parts(ptr, 0, 1); 18 | } 19 | 20 | // Shared part of an AtomicCell 21 | #[derive(Debug)] 22 | struct Inner { 23 | value: AtomicPtr, 24 | } 25 | 26 | impl Inner { 27 | fn new(value: T) -> Inner { 28 | let p = allocate(); 29 | unsafe { 30 | ptr::write(p, value); 31 | } 32 | Inner { 33 | value: AtomicPtr::new(p) 34 | } 35 | } 36 | fn unwrap(mut self) -> T { 37 | let p = mem::replace(self.value.get_mut(), ptr::null_mut()); 38 | unsafe { 39 | let result = ptr::read(p); 40 | free(p); 41 | mem::forget(self); 42 | result 43 | } 44 | } 45 | fn get_mut(&mut self) -> &mut T { 46 | unsafe { &mut **self.value.get_mut() } 47 | } 48 | } 49 | 50 | impl Drop for Inner { 51 | fn drop(&mut self) { 52 | let p = mem::replace(self.value.get_mut(), ptr::null_mut()); 53 | unsafe { 54 | ptr::read(p); 55 | free(p); 56 | } 57 | } 58 | } 59 | 60 | // Manages heap space for a T - may or may not contain an initialized T 61 | #[derive(Debug)] 62 | struct StorageCell(Cell<*mut T>); 63 | 64 | impl StorageCell { 65 | fn new() -> Self { 66 | StorageCell(Cell::new(allocate())) 67 | } 68 | fn write(&self, value: T) { 69 | unsafe { 70 | ptr::write(self.0.get(), value); 71 | } 72 | } 73 | unsafe fn read(&self) -> T { 74 | ptr::read(self.0.get()) 75 | } 76 | fn get(&self) -> *mut T { 77 | self.0.get() 78 | } 79 | unsafe fn set(&self, p: *mut T) { 80 | self.0.set(p) 81 | } 82 | } 83 | 84 | impl Drop for StorageCell { 85 | fn drop(&mut self) { 86 | unsafe { 87 | free(self.get()) 88 | } 89 | } 90 | } 91 | 92 | unsafe impl Send for StorageCell {} 93 | 94 | /// Lock-free concurrent cell supporting an atomic "swap" operation 95 | #[derive(Debug)] 96 | pub struct AtomicCell { 97 | space: StorageCell, 98 | inner: Arc> 99 | } 100 | 101 | impl AtomicCell { 102 | pub fn new(value: T) -> Self { 103 | AtomicCell { 104 | space: StorageCell::new(), 105 | inner: Arc::new(Inner::new(value)) 106 | } 107 | } 108 | pub fn swap(&self, value: T) -> T { 109 | unsafe { 110 | // Store the value into the space we own 111 | self.space.write(value); 112 | // Swap our space with the shared space atomically 113 | self.space.set(self.inner.value.swap(self.space.get(), Ordering::AcqRel)); 114 | // Retrieve the value from the returned space 115 | self.space.read() 116 | } 117 | } 118 | pub fn try_unwrap(self) -> Result { 119 | match Arc::try_unwrap(self.inner) { 120 | Ok(inner) => Ok(inner.unwrap()), 121 | Err(inner) => Err(AtomicCell { space: self.space, inner: inner }) 122 | } 123 | } 124 | pub fn get_mut(&mut self) -> Option<&mut T> { 125 | Arc::get_mut(&mut self.inner).map(|inner| inner.get_mut()) 126 | } 127 | } 128 | 129 | impl Clone for AtomicCell { 130 | fn clone(&self) -> Self { 131 | AtomicCell { 132 | space: StorageCell::new(), 133 | inner: self.inner.clone() 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/primitives/atomic_ext.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{ 2 | AtomicUsize, 3 | AtomicIsize, 4 | AtomicPtr, 5 | Ordering 6 | }; 7 | 8 | pub trait AtomicExt { 9 | type Value: Copy + Eq; 10 | 11 | fn try_update Result>(&self, mut f: F) -> Result<(Self::Value, Self::Value), E> { 12 | let mut prev = self.load_impl(Ordering::Acquire); 13 | loop { 14 | match f(prev) { 15 | Ok(next) => match self.compare_exchange_weak_impl(prev, next, Ordering::AcqRel, Ordering::Acquire) { 16 | Ok(_) => return Ok((prev, next)), 17 | Err(new_prev) => prev = new_prev, 18 | }, 19 | Err(e) => return Err(e) 20 | } 21 | } 22 | } 23 | 24 | // Updates a second atomic referred to by this one. 25 | // Calling code must be careful to tag pointers to avoid the ABA problem. 26 | fn try_update_indirect< 27 | 'a, 28 | A: AtomicExt + 'a, 29 | E, 30 | F: FnMut(Self::Value) -> Result<&'a A, E>, 31 | G: FnMut(Self::Value, A::Value) -> Result 32 | >(&self, mut deref: F, mut update: G) -> Result<(Self::Value, A::Value, A::Value), E> { 33 | let mut prev_ptr = self.load_impl(Ordering::Acquire); 34 | loop { 35 | match deref(prev_ptr) { 36 | Ok(target) => { 37 | let prev = target.load_impl(Ordering::Acquire); 38 | let prev_ptr2 = self.load_impl(Ordering::Acquire); 39 | 40 | if prev_ptr2 == prev_ptr { 41 | match update(prev_ptr, prev) { 42 | Ok(next) => loop { 43 | match target.compare_exchange_weak_impl(prev, next, Ordering::AcqRel, Ordering::Acquire) { 44 | Ok(_) => return Ok((prev_ptr, prev, next)), 45 | Err(new_prev) => if prev != new_prev { 46 | prev_ptr = self.load_impl(Ordering::Acquire); 47 | break; 48 | } 49 | } 50 | }, 51 | Err(e) => return Err(e) 52 | } 53 | } else { 54 | prev_ptr = prev_ptr2; 55 | } 56 | }, 57 | Err(e) => return Err(e) 58 | } 59 | } 60 | } 61 | 62 | fn load_impl(&self, ordering: Ordering) -> Self::Value; 63 | fn compare_exchange_weak_impl( 64 | &self, 65 | current: Self::Value, 66 | new: Self::Value, 67 | success: Ordering, 68 | failure: Ordering 69 | ) -> Result; 70 | } 71 | 72 | macro_rules! atomic_ext_defaults { 73 | () => { 74 | fn load_impl(&self, ordering: Ordering) -> Self::Value { 75 | self.load(ordering) 76 | } 77 | fn compare_exchange_weak_impl( 78 | &self, 79 | current: Self::Value, 80 | new: Self::Value, 81 | success: Ordering, 82 | failure: Ordering 83 | ) -> Result { 84 | self.compare_exchange_weak(current, new, success, failure) 85 | } 86 | } 87 | } 88 | 89 | impl AtomicExt for AtomicUsize { 90 | type Value = usize; 91 | atomic_ext_defaults!(); 92 | } 93 | impl AtomicExt for AtomicIsize { 94 | type Value = isize; 95 | atomic_ext_defaults!(); 96 | } 97 | impl AtomicExt for AtomicPtr { 98 | type Value = *mut T; 99 | atomic_ext_defaults!(); 100 | } 101 | -------------------------------------------------------------------------------- /src/primitives/index_allocator.rs: -------------------------------------------------------------------------------- 1 | /// This lock-free data structure allows concurrent allocation of IDs from a contiguous slab. 2 | 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use primitives::atomic_ext::AtomicExt; 5 | 6 | #[derive(Debug)] 7 | pub struct IndexAllocator { 8 | mask: Vec, 9 | used: AtomicUsize, 10 | len: usize 11 | } 12 | 13 | fn div_up(a: usize, b: usize) -> usize { 14 | (a + b - 1) / b 15 | } 16 | 17 | fn word_bits() -> usize { 18 | 0usize.trailing_zeros() as usize 19 | } 20 | 21 | impl IndexAllocator { 22 | pub fn new(len: usize) -> Self { 23 | let mut result = IndexAllocator { 24 | mask: Vec::new(), 25 | used: AtomicUsize::new(0), 26 | len: 0, 27 | }; 28 | result.resize(len); 29 | result 30 | } 31 | pub fn try_allocate(&self) -> Option { 32 | let word_bits = word_bits(); 33 | // If we can reserve space 34 | if self.used.try_update(|prev| { 35 | if prev < self.len { 36 | Ok(prev + 1) 37 | } else { 38 | Err(()) 39 | } 40 | }).is_ok() { 41 | loop { 42 | for (index, m) in self.mask.iter().enumerate() { 43 | if let Ok((prev, next)) = m.try_update(|prev| { 44 | match (!prev).trailing_zeros() as usize { 45 | ::POINTER_BITS => Err(()), 46 | other => Ok(prev | (1 << other)) 47 | } 48 | }) { 49 | return Some(index*word_bits + (next & !prev).trailing_zeros() as usize); 50 | } 51 | } 52 | } 53 | } else { 54 | None 55 | } 56 | } 57 | pub fn free(&self, id: usize) { 58 | let word_bits = word_bits(); 59 | assert!(id < self.len); 60 | let (index, offset) = (id / word_bits, id % word_bits); 61 | let bit_mask = 1 << offset; 62 | let prev = self.mask[index].fetch_and(!bit_mask, Ordering::Relaxed); 63 | assert!(prev & bit_mask != 0, "Double-free of index!") 64 | } 65 | pub fn resize(&mut self, new_len: usize) { 66 | assert!(new_len >= self.len); 67 | let word_bits = word_bits(); 68 | let mask_words = self.mask.len(); 69 | 70 | // Clear previous unused bits 71 | let unused_bits = mask_words*word_bits - self.len; 72 | if unused_bits > 0 { 73 | *self.mask[mask_words-1].get_mut() &= usize::max_value() >> unused_bits; 74 | } 75 | 76 | let new_mask_words = div_up(new_len, word_bits); 77 | self.mask.reserve_exact(new_mask_words - mask_words); 78 | for _ in mask_words..new_mask_words { 79 | self.mask.push(AtomicUsize::new(0)) 80 | } 81 | 82 | // Set new unused bits 83 | let new_unused_bits = new_mask_words*word_bits - new_len; 84 | if new_unused_bits > 0 { 85 | *self.mask[new_mask_words-1].get_mut() |= !(usize::max_value() >> new_unused_bits); 86 | } 87 | 88 | self.len = new_len; 89 | } 90 | pub fn len(&self) -> usize { 91 | self.len 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/primitives/invariant.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | pub struct InvariantInner(*mut T); 4 | unsafe impl Send for InvariantInner {} 5 | unsafe impl Sync for InvariantInner {} 6 | 7 | pub type Invariant = PhantomData>; 8 | -------------------------------------------------------------------------------- /src/primitives/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod atomic_cell; 2 | pub mod prepend_list; 3 | pub mod append_list; 4 | pub mod atomic_ext; 5 | pub mod index_allocator; 6 | pub mod invariant; 7 | -------------------------------------------------------------------------------- /src/primitives/prepend_list.rs: -------------------------------------------------------------------------------- 1 | /// PrependList is a low-level primitive supporting two safe operations: 2 | /// `push`, which prepends a node to the list, and `swap` which replaces the list with another 3 | 4 | use std::sync::atomic::{AtomicPtr, Ordering}; 5 | use std::{ptr, mem}; 6 | 7 | pub type NodePtr = Option>>; 8 | 9 | #[derive(Debug)] 10 | pub struct Node { 11 | pub value: T, 12 | pub next: NodePtr 13 | } 14 | 15 | #[derive(Debug)] 16 | pub struct PrependList(AtomicPtr>); 17 | 18 | fn replace_forget(dest: &mut T, value: T) { 19 | mem::forget(mem::replace(dest, value)) 20 | } 21 | 22 | impl PrependList { 23 | fn into_raw(ptr: NodePtr) -> *mut Node { 24 | match ptr { 25 | Some(b) => Box::into_raw(b), 26 | None => ptr::null_mut() 27 | } 28 | } 29 | unsafe fn from_raw(ptr: *mut Node) -> NodePtr { 30 | if ptr == ptr::null_mut() { 31 | None 32 | } else { 33 | Some(Box::from_raw(ptr)) 34 | } 35 | } 36 | 37 | pub fn new(ptr: NodePtr) -> Self { 38 | PrependList(AtomicPtr::new(Self::into_raw(ptr))) 39 | } 40 | pub fn swap(&self, ptr: NodePtr) -> NodePtr { 41 | unsafe { 42 | Self::from_raw(self.0.swap(Self::into_raw(ptr), Ordering::AcqRel)) 43 | } 44 | } 45 | pub fn push(&self, mut node: Box>) { 46 | let mut current = self.0.load(Ordering::Relaxed); 47 | loop { 48 | replace_forget(&mut node.next, unsafe { Self::from_raw(current) }); 49 | match self.0.compare_exchange_weak(current, &mut *node, Ordering::AcqRel, Ordering::Relaxed) { 50 | Ok(_) => { 51 | mem::forget(node); 52 | return 53 | }, 54 | Err(p) => current = p 55 | } 56 | } 57 | } 58 | } 59 | 60 | impl Drop for PrependList { 61 | fn drop(&mut self) { 62 | unsafe { Self::from_raw(self.0.swap(ptr::null_mut(), Ordering::Relaxed)) }; 63 | } 64 | } -------------------------------------------------------------------------------- /src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mpsc_queue; 2 | pub mod mpmc_queue; -------------------------------------------------------------------------------- /src/sync/mpmc_queue.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::borrow::Borrow; 3 | use std::mem; 4 | use futures::task::{self, Task}; 5 | use futures::{StartSend, AsyncSink, Async, Sink, Stream, Poll}; 6 | 7 | use handle::{Handle, IdHandle, ResizingHandle, BoundedHandle, HandleInner}; 8 | use primitives::index_allocator::IndexAllocator; 9 | use containers::atomic_cell_array::AtomicCellArrayInner; 10 | use containers::mpmc_queue; 11 | use containers::storage::{Place, Storage}; 12 | use containers::scratch::Scratch; 13 | 14 | 15 | const CLOSE_FLAG: usize = !0 ^ (!0 >> 1); 16 | const MSG_COUNT_MASK: usize = !CLOSE_FLAG; 17 | const MSG_COUNT_ZERO: usize = CLOSE_FLAG >> 1; 18 | 19 | 20 | define_id!(MpmcQueueAccessorId); 21 | 22 | #[derive(Debug)] 23 | pub struct MpmcQueueWrapper { 24 | // Stores the tasks of all accessors 25 | task_storage: Storage, 26 | task_scratch: Scratch>, 27 | task_array: AtomicCellArrayInner>, 28 | // Stores the message count + MSG_COUNT_ZERO in the low bits 29 | // May go "negative" (less than MSG_COUNT_ZERO) if there are outstanding pop operations 30 | // If high bit is set, the queue is closed 31 | // These are upper and lower bounds 32 | msg_count_upper: AtomicUsize, 33 | msg_count_lower: AtomicUsize, 34 | // Message queue 35 | msg_storage: Storage, 36 | msg_scratch: Scratch>, 37 | msg_queue: mpmc_queue::MpmcQueueInner>, 38 | // Parked thread queue 39 | parked_sender_queue: mpmc_queue::MpmcQueueInner, 40 | parked_receiver_queue: mpmc_queue::MpmcQueueInner, 41 | pending_receive_flags: Scratch, 42 | // Buffer size 43 | buffer_size: usize, 44 | // Sender count 45 | sender_count: AtomicUsize, 46 | receiver_count: AtomicUsize, 47 | // Id allocator 48 | id_alloc: IndexAllocator, 49 | } 50 | 51 | #[derive(Debug)] 52 | pub struct SendError(T); 53 | 54 | impl HandleInner for MpmcQueueWrapper { 55 | type IdAllocator = IndexAllocator; 56 | 57 | fn id_allocator(&self) -> &IndexAllocator { 58 | &self.id_alloc 59 | } 60 | 61 | fn raise_id_limit(&mut self, new_limit: usize) { 62 | let old_limit = self.id_limit(); 63 | assert!(new_limit > old_limit); 64 | let extra = new_limit - old_limit; 65 | 66 | // Reserve space for additional task handles 67 | self.task_storage.reserve(extra*2); 68 | self.task_scratch.extend(self.task_storage.none_storing_iter(extra)); 69 | self.task_array.extend(self.task_storage.none_storing_iter(extra)); 70 | 71 | // Reserve space for additional messages 72 | self.msg_storage.reserve(extra*2); 73 | self.msg_scratch.extend(self.msg_storage.none_storing_iter(extra)); 74 | self.msg_queue.extend(self.msg_storage.none_storing_iter(extra)); 75 | 76 | // Reserve space for additional parked tasks 77 | self.parked_sender_queue.extend((0..extra).map(|_| 0)); 78 | self.parked_receiver_queue.extend((0..extra).map(|_| 0)); 79 | self.pending_receive_flags.extend((0..extra).map(|_| 0)); 80 | 81 | // Extra IDs 82 | self.id_alloc.resize(new_limit); 83 | } 84 | } 85 | 86 | impl MpmcQueueWrapper { 87 | pub fn new>(max_accessors: usize, size: usize) -> H { 88 | assert!(max_accessors > 0); 89 | // Need capacity for both scratch space and task array 90 | let mut task_storage = Storage::with_capacity(max_accessors*2); 91 | let task_scratch = Scratch::new(task_storage.none_storing_iter(max_accessors)); 92 | let task_array = AtomicCellArrayInner::new(task_storage.none_storing_iter(max_accessors)); 93 | 94 | // Need capacity for both scratch space and message queue 95 | let mut msg_storage = Storage::with_capacity(max_accessors*2 + size); 96 | let msg_scratch = Scratch::new(msg_storage.none_storing_iter(max_accessors)); 97 | let msg_queue = mpmc_queue::MpmcQueueInner::new(msg_storage.none_storing_iter(max_accessors + size)); 98 | 99 | // Needs space for every sender 100 | let parked_sender_queue = mpmc_queue::MpmcQueueInner::new((0..max_accessors).map(|_| 0)); 101 | let parked_receiver_queue = mpmc_queue::MpmcQueueInner::new((0..max_accessors).map(|_| 0)); 102 | let pending_receive_flags = Scratch::new((0..max_accessors).map(|_| 0)); 103 | 104 | let id_alloc = IndexAllocator::new(max_accessors); 105 | 106 | Handle::new(MpmcQueueWrapper { 107 | task_storage: task_storage, 108 | task_scratch: task_scratch, 109 | task_array: task_array, 110 | msg_count_upper: AtomicUsize::new(MSG_COUNT_ZERO), 111 | msg_count_lower: AtomicUsize::new(MSG_COUNT_ZERO), 112 | msg_storage: msg_storage, 113 | msg_scratch: msg_scratch, 114 | msg_queue: msg_queue, 115 | parked_sender_queue: parked_sender_queue, 116 | parked_receiver_queue: parked_receiver_queue, 117 | pending_receive_flags: pending_receive_flags, 118 | buffer_size: size, 119 | sender_count: AtomicUsize::new(0), 120 | receiver_count: AtomicUsize::new(0), 121 | id_alloc: id_alloc, 122 | }) 123 | } 124 | 125 | unsafe fn inc_msg_count_upper(&self, id: &mut MpmcQueueAccessorId) -> Result { 126 | // Try increasing the message count 127 | let prev = self.msg_count_upper.fetch_add(1, Ordering::AcqRel); 128 | if prev & CLOSE_FLAG != 0 { 129 | self.msg_count_lower.fetch_sub(1, Ordering::AcqRel); 130 | Err(()) 131 | } else if (prev & MSG_COUNT_MASK) < self.buffer_size + MSG_COUNT_ZERO { 132 | Ok(false) 133 | } else { 134 | let mut index = *(*id).borrow(); 135 | while !self.parked_sender_queue.push(&mut index) {} 136 | // Make sure queue was not closed 137 | if self.msg_count_upper.load(Ordering::Acquire) & CLOSE_FLAG == 0 { 138 | // Queue was not closed, park was successful 139 | Ok(true) 140 | } else { 141 | Ok(false) 142 | } 143 | } 144 | } 145 | 146 | unsafe fn inc_msg_count_lower(&self, id: &mut MpmcQueueAccessorId) { 147 | let prev = self.msg_count_lower.fetch_add(1, Ordering::AcqRel); 148 | 149 | // Wake a receiver if necessary 150 | if (prev & CLOSE_FLAG == 0) && (prev & MSG_COUNT_MASK < MSG_COUNT_ZERO) { 151 | let mut index = 0; 152 | while !self.parked_receiver_queue.pop(&mut index) {} 153 | self.wake_task(id, index); 154 | } 155 | } 156 | 157 | unsafe fn dec_msg_count_lower(&self, id: &mut MpmcQueueAccessorId) -> bool { 158 | let prev = self.msg_count_lower.fetch_sub(1, Ordering::AcqRel); 159 | if prev & CLOSE_FLAG != 0 { 160 | self.msg_count_lower.fetch_add(1, Ordering::AcqRel); 161 | true 162 | } else if (prev & MSG_COUNT_MASK) > MSG_COUNT_ZERO { 163 | true 164 | } else { 165 | let mut index = *(*id).borrow(); 166 | while !self.parked_receiver_queue.push(&mut index) {} 167 | // Make sure queue was not closed 168 | if self.msg_count_upper.load(Ordering::Acquire) & CLOSE_FLAG == 0 { 169 | // Queue was not closed, park was successful 170 | false 171 | } else { 172 | true 173 | } 174 | } 175 | } 176 | 177 | unsafe fn dec_msg_count_upper(&self, id: &mut MpmcQueueAccessorId) { 178 | let prev = self.msg_count_upper.fetch_sub(1, Ordering::AcqRel); 179 | 180 | // Wake a sender if necessary 181 | let mut index = 0; 182 | if (prev & CLOSE_FLAG == 0) && (prev & MSG_COUNT_MASK > self.buffer_size + MSG_COUNT_ZERO) { 183 | while !self.parked_sender_queue.pop(&mut index) {} 184 | self.wake_task(id, index); 185 | } 186 | } 187 | 188 | // Wake another parked task 189 | unsafe fn wake_task(&self, id: &mut MpmcQueueAccessorId, task_id: usize) { 190 | let place = self.task_scratch.get_mut(id); 191 | self.task_array.swap(task_id, place); 192 | if let Some(task) = self.task_storage.replace(place, None) { 193 | task.unpark(); 194 | } 195 | } 196 | 197 | // Returns true if was already parked 198 | unsafe fn park_self(&self, id: &mut MpmcQueueAccessorId) -> bool { 199 | let task_id = *(*id).borrow(); 200 | let place = self.task_scratch.get_mut(id); 201 | self.task_storage.replace(place, Some(task::park())); 202 | self.task_array.swap(task_id, place); 203 | self.task_storage.replace(place, None).is_some() 204 | } 205 | 206 | // Undo a previous `park_self` operation 207 | unsafe fn unpark_self(&self, id: &mut MpmcQueueAccessorId) { 208 | let task_id = *(*id).borrow(); 209 | let place = self.task_scratch.get_mut(id); 210 | self.task_array.swap(task_id, place); 211 | self.task_storage.replace(place, None); 212 | } 213 | 214 | pub unsafe fn push(&self, id: &mut MpmcQueueAccessorId, value: T) -> StartSend> { 215 | // Check if we're currently parked, while updating our task handle at the same time 216 | // ~fancy~ :P 217 | if self.park_self(id) { 218 | return Ok(AsyncSink::NotReady(value)); 219 | } 220 | 221 | // Increasing the message count may fail if we've been closed 222 | match self.inc_msg_count_upper(id) { 223 | Ok(true) => {}, 224 | Ok(false) => { self.unpark_self(id); }, 225 | Err(()) => return Err(SendError(value)) 226 | } 227 | 228 | // Put our value into the scratch space 229 | let place = self.msg_scratch.get_mut(id); 230 | self.msg_storage.replace(place, Some(value)); 231 | // We know this will succeed eventually, because we managed to increment the message count 232 | while !self.msg_queue.push(place) {} 233 | 234 | // Increase the message count lower bound 235 | self.inc_msg_count_lower(id); 236 | 237 | // All done 238 | Ok(AsyncSink::Ready) 239 | } 240 | 241 | // Must not be called while there are any remaining receivers 242 | unsafe fn drain(&self, id: &mut MpmcQueueAccessorId) { 243 | while self.pop_inner(id).is_some() {} 244 | } 245 | 246 | unsafe fn pop_inner(&self, id: &mut MpmcQueueAccessorId) -> Option { 247 | // Get a place to exchange for the result 248 | let place = self.msg_scratch.get_mut(id); 249 | if self.msg_queue.pop(place) { 250 | // Decrease the message count upper bound 251 | self.dec_msg_count_upper(id); 252 | let result = self.msg_storage.replace(place, None).expect("Some(value)"); 253 | Some(result) 254 | } else { 255 | None 256 | } 257 | } 258 | 259 | pub unsafe fn pop(&self, id: &mut MpmcQueueAccessorId) -> Async> { 260 | // Check if we're currently parked, while updating our task handle at the same time 261 | // ~fancy~ :P 262 | if self.park_self(id) { 263 | return Async::NotReady; 264 | } 265 | 266 | // If a poll hasn't been started yet 267 | if mem::replace(self.pending_receive_flags.get_mut(id), 0) == 0 { 268 | // Decreasing the message count returns true if a message is ready 269 | if !self.dec_msg_count_lower(id) { 270 | *self.pending_receive_flags.get_mut(id) = 1; 271 | return Async::NotReady; 272 | } 273 | } 274 | // A poll has been started, and can be completed immediately 275 | self.unpark_self(id); 276 | 277 | // This may fail if no messages are available 278 | Async::Ready(self.pop_inner(id)) 279 | } 280 | 281 | pub unsafe fn close(&self, id: &mut MpmcQueueAccessorId) { 282 | // Mark ourselves as closed 283 | self.msg_count_upper.fetch_or(CLOSE_FLAG, Ordering::AcqRel); 284 | self.msg_count_lower.fetch_or(CLOSE_FLAG, Ordering::AcqRel); 285 | 286 | // Wake any waiting tasks 287 | let mut index = 0; 288 | while self.parked_sender_queue.pop(&mut index) { 289 | self.wake_task(id, index); 290 | } 291 | while self.parked_receiver_queue.pop(&mut index) { 292 | self.wake_task(id, index); 293 | } 294 | } 295 | 296 | pub unsafe fn inc_sender_count(&self) { 297 | self.sender_count.fetch_add(1, Ordering::AcqRel); 298 | } 299 | 300 | pub unsafe fn dec_sender_count(&self, id: &mut MpmcQueueAccessorId) { 301 | if self.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 { 302 | self.close(id); 303 | } 304 | } 305 | 306 | pub unsafe fn inc_receiver_count(&self) { 307 | self.receiver_count.fetch_add(1, Ordering::AcqRel); 308 | } 309 | 310 | pub unsafe fn dec_receiver_count(&self, id: &mut MpmcQueueAccessorId) { 311 | if mem::replace(self.pending_receive_flags.get_mut(id), 0) == 1 { 312 | // Give up our place in the queue to someone else 313 | self.inc_msg_count_lower(id); 314 | } 315 | if self.receiver_count.fetch_sub(1, Ordering::AcqRel) == 1 { 316 | self.close(id); 317 | self.drain(id); 318 | } 319 | } 320 | } 321 | 322 | #[derive(Debug)] 323 | pub struct MpmcQueueReceiver>>(IdHandle); 324 | 325 | impl>> MpmcQueueReceiver { 326 | fn inc_receiver_count(self) -> Self { 327 | self.0.with(|inner| unsafe { inner.inc_receiver_count() }); 328 | self 329 | } 330 | pub fn try_clone(&self) -> Option { 331 | self.0.try_clone().map(|inner| MpmcQueueReceiver(inner).inc_receiver_count()) 332 | } 333 | pub fn close(&mut self) { 334 | self.0.with_mut(|inner, id| unsafe { inner.close(id) }) 335 | } 336 | } 337 | 338 | impl>> Stream for MpmcQueueReceiver { 339 | type Item = T; 340 | type Error = (); 341 | 342 | fn poll(&mut self) -> Poll, ()> { 343 | // This is safe because we guarantee that we are unique 344 | Ok(self.0.with_mut(|inner, id| unsafe { inner.pop(id) })) 345 | } 346 | } 347 | 348 | impl>> Clone for MpmcQueueReceiver { 349 | fn clone(&self) -> Self { 350 | MpmcQueueReceiver(self.0.clone()).inc_receiver_count() 351 | } 352 | } 353 | 354 | impl>> Drop for MpmcQueueReceiver { 355 | fn drop(&mut self) { 356 | self.0.with_mut(|inner, id| unsafe { inner.dec_receiver_count(id) }) 357 | } 358 | } 359 | 360 | pub type ResizingMpmcQueueReceiver = MpmcQueueReceiver>>; 361 | pub type BoundedMpmcQueueReceiver = MpmcQueueReceiver>>; 362 | 363 | #[derive(Debug)] 364 | pub struct MpmcQueueSender>>(IdHandle); 365 | 366 | impl>> MpmcQueueSender { 367 | pub fn try_clone(&self) -> Option { 368 | self.0.try_clone().map(|inner| MpmcQueueSender(inner).inc_sender_count()) 369 | } 370 | fn inc_sender_count(self) -> Self { 371 | self.0.with(|inner| unsafe { inner.inc_sender_count() }); 372 | self 373 | } 374 | } 375 | 376 | impl>> Clone for MpmcQueueSender { 377 | fn clone(&self) -> Self { 378 | MpmcQueueSender(self.0.clone()).inc_sender_count() 379 | } 380 | } 381 | 382 | impl>> Sink for MpmcQueueSender { 383 | type SinkItem = T; 384 | type SinkError = SendError; 385 | 386 | fn start_send(&mut self, msg: T) -> StartSend> { 387 | self.0.with_mut(|inner, id| unsafe { inner.push(id, msg) }) 388 | } 389 | 390 | fn poll_complete(&mut self) -> Poll<(), SendError> { 391 | Ok(Async::Ready(())) 392 | } 393 | } 394 | 395 | impl>> Drop for MpmcQueueSender { 396 | fn drop(&mut self) { 397 | // Wake up the receiver 398 | self.0.with_mut(|inner, id| unsafe { inner.dec_sender_count(id) }) 399 | } 400 | } 401 | 402 | pub type ResizingMpmcQueueSender = MpmcQueueSender>>; 403 | pub type BoundedMpmcQueueSender = MpmcQueueSender>>; 404 | 405 | pub fn new>>(max_accessors: usize, size: usize) -> (MpmcQueueSender, MpmcQueueReceiver) { 406 | let inner = MpmcQueueWrapper::new(max_accessors, size); 407 | (MpmcQueueSender(IdHandle::new(&inner)).inc_sender_count(), MpmcQueueReceiver(IdHandle::new(&inner)).inc_receiver_count()) 408 | } 409 | -------------------------------------------------------------------------------- /src/sync/mpsc_queue.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::borrow::Borrow; 3 | use futures::task::{self, Task}; 4 | use futures::{StartSend, AsyncSink, Async, Sink, Stream, Poll}; 5 | 6 | use handle::{Handle, IdHandle, ResizingHandle, BoundedHandle, HandleInner, Like}; 7 | use primitives::atomic_ext::AtomicExt; 8 | use primitives::index_allocator::IndexAllocator; 9 | use containers::atomic_cell_array::AtomicCellArrayInner; 10 | use containers::mpsc_queue; 11 | use containers::storage::{Place, Storage}; 12 | use containers::scratch::Scratch; 13 | 14 | 15 | const CLOSE_FLAG: usize = !0 ^ (!0 >> 1); 16 | const MSG_COUNT_MASK: usize = !CLOSE_FLAG; 17 | 18 | 19 | define_id!(MpscQueueSenderId); 20 | define_id!(MpscQueueAccessorId); 21 | 22 | fn with_sender_id R>(sender_id: &mut MpscQueueSenderId, f: F) -> R { 23 | >::virtual_borrow(*Borrow::::borrow(sender_id) + 1, f) 24 | } 25 | unsafe fn with_receiver_id R>(f: F) -> R { 26 | >::virtual_borrow(0, f) 27 | } 28 | fn sender_index(sender_id: &mut MpscQueueSenderId) -> usize { 29 | with_sender_id(sender_id, |id| *(*id).borrow()) 30 | } 31 | fn receiver_index() -> usize { 32 | unsafe { with_receiver_id(|id| *(*id).borrow()) } 33 | } 34 | 35 | #[derive(Debug)] 36 | pub struct MpscQueueWrapper { 37 | // Stores the tasks of all senders and the receiver 38 | // Index 0 contains the receiver task 39 | // Index 1..(n+1) contains tasks for senders 0..n 40 | task_storage: Storage, 41 | task_scratch: Scratch>, 42 | task_array: AtomicCellArrayInner>, 43 | // Stores the message count in the low bits 44 | // If the high bit is set, the queue is closed 45 | msg_count: AtomicUsize, 46 | // Message queue 47 | msg_storage: Storage, 48 | msg_scratch: Scratch>, 49 | msg_queue: mpsc_queue::MpscQueueInner>, 50 | // Parked thread queue 51 | parked_queue: mpsc_queue::MpscQueueInner, 52 | // Buffer size 53 | buffer_size: usize, 54 | // Sender count 55 | sender_count: AtomicUsize, 56 | // Id allocator 57 | id_alloc: IndexAllocator, 58 | } 59 | 60 | #[derive(Debug)] 61 | pub struct SendError(T); 62 | 63 | impl HandleInner for MpscQueueWrapper { 64 | type IdAllocator = IndexAllocator; 65 | 66 | fn id_allocator(&self) -> &IndexAllocator { 67 | &self.id_alloc 68 | } 69 | 70 | fn raise_id_limit(&mut self, new_limit: usize) { 71 | let old_limit = self.id_limit(); 72 | assert!(new_limit > old_limit); 73 | let extra = new_limit - old_limit; 74 | 75 | // Reserve space for additional task handles 76 | self.task_storage.reserve(extra*2); 77 | self.task_scratch.extend(self.task_storage.none_storing_iter(extra)); 78 | self.task_array.extend(self.task_storage.none_storing_iter(extra)); 79 | 80 | // Reserve space for additional messages 81 | self.msg_storage.reserve(extra*2); 82 | self.msg_scratch.extend(self.msg_storage.none_storing_iter(extra)); 83 | self.msg_queue.extend(self.msg_storage.none_storing_iter(extra)); 84 | 85 | // Reserve space for additional parked tasks 86 | self.parked_queue.extend((0..extra).map(|_| 0)); 87 | 88 | // Extra IDs 89 | self.id_alloc.resize(new_limit); 90 | } 91 | } 92 | 93 | impl MpscQueueWrapper { 94 | pub fn new>(max_senders: usize, size: usize) -> H { 95 | assert!(max_senders > 0); 96 | // Need capacity for both scratch space and task array 97 | let mut task_storage = Storage::with_capacity(max_senders*2+2); 98 | let task_scratch = Scratch::new(task_storage.none_storing_iter(max_senders+1)); 99 | let task_array = AtomicCellArrayInner::new(task_storage.none_storing_iter(max_senders+1)); 100 | 101 | // Need capacity for both scratch space and message queue 102 | let mut msg_storage = Storage::with_capacity(max_senders*2 + size); 103 | let msg_scratch = Scratch::new(msg_storage.none_storing_iter(max_senders)); 104 | let msg_queue = mpsc_queue::MpscQueueInner::new(msg_storage.none_storing_iter(max_senders + size)); 105 | 106 | // Needs space for every sender 107 | let parked_queue = mpsc_queue::MpscQueueInner::new((0..max_senders).map(|_| 0)); 108 | 109 | let id_alloc = IndexAllocator::new(max_senders); 110 | 111 | Handle::new(MpscQueueWrapper { 112 | task_storage: task_storage, 113 | task_scratch: task_scratch, 114 | task_array: task_array, 115 | msg_count: AtomicUsize::new(0), 116 | msg_storage: msg_storage, 117 | msg_scratch: msg_scratch, 118 | msg_queue: msg_queue, 119 | parked_queue: parked_queue, 120 | buffer_size: size, 121 | sender_count: AtomicUsize::new(0), 122 | id_alloc: id_alloc, 123 | }) 124 | } 125 | 126 | unsafe fn inc_msg_count(&self, id: &mut MpscQueueSenderId) -> Result { 127 | // Try increasing the message count 128 | match self.msg_count.try_update(|prev| { 129 | if prev & CLOSE_FLAG == 0 { 130 | Ok(prev+1) 131 | } else { 132 | Err(()) 133 | } 134 | }) { 135 | Ok((prev, _)) => { 136 | if (prev & MSG_COUNT_MASK) >= self.buffer_size { 137 | let mut index = sender_index(id); 138 | while !self.parked_queue.push(&mut index) {} 139 | 140 | // Make sure queue was not closed 141 | if self.msg_count.load(Ordering::Acquire) & CLOSE_FLAG == 0 { 142 | // Queue was not closed, park was successful 143 | Ok(true) 144 | } else { 145 | Ok(false) 146 | } 147 | } else { 148 | Ok(false) 149 | } 150 | }, 151 | Err(()) => Err(()) 152 | } 153 | } 154 | 155 | unsafe fn dec_msg_count(&self) { 156 | // Decrease the message count 157 | self.msg_count.fetch_sub(1, Ordering::AcqRel); 158 | 159 | // Wake a task if necessary 160 | let _ = self.parked_queue.pop(|task_id| { 161 | with_receiver_id(|id| self.wake_task(id, *task_id)); 162 | }); 163 | } 164 | 165 | // Wake another parked task 166 | unsafe fn wake_task(&self, id: &mut MpscQueueAccessorId, task_id: usize) { 167 | let place = self.task_scratch.get_mut(id); 168 | self.task_array.swap(task_id, place); 169 | if let Some(task) = self.task_storage.replace(place, None) { 170 | task.unpark(); 171 | } 172 | } 173 | 174 | // Returns true if was already parked 175 | unsafe fn park_self(&self, id: &mut MpscQueueAccessorId) -> bool { 176 | let task_id = *(*id).borrow(); 177 | let place = self.task_scratch.get_mut(id); 178 | self.task_storage.replace(place, Some(task::park())); 179 | self.task_array.swap(task_id, place); 180 | self.task_storage.replace(place, None).is_some() 181 | } 182 | 183 | // Undo a previous `park_self` operation 184 | unsafe fn unpark_self(&self, id: &mut MpscQueueAccessorId) { 185 | let task_id = *(*id).borrow(); 186 | let place = self.task_scratch.get_mut(id); 187 | self.task_array.swap(task_id, place); 188 | self.task_storage.replace(place, None); 189 | } 190 | 191 | pub unsafe fn push(&self, id: &mut MpscQueueSenderId, value: T) -> StartSend> { 192 | // Check if we're currently parked, while updating our task handle at the same time 193 | // ~fancy~ :P 194 | if with_sender_id(id, |accessor_id| self.park_self(accessor_id)) { 195 | return Ok(AsyncSink::NotReady(value)); 196 | } 197 | 198 | // Increasing the message count may fail if we've been closed 199 | match self.inc_msg_count(id) { 200 | Ok(true) => {}, 201 | Ok(false) => { with_sender_id(id, |accessor_id| self.unpark_self(accessor_id)); }, 202 | Err(()) => return Err(SendError(value)) 203 | } 204 | 205 | // Put our value into the scratch space 206 | let place = self.msg_scratch.get_mut(id); 207 | self.msg_storage.replace(place, Some(value)); 208 | // We know this will succeed eventually, because we managed to increment the message count 209 | while !self.msg_queue.push(place) {} 210 | 211 | // Wake the receiver if necessary 212 | with_sender_id(id, |accessor_id| self.wake_task(accessor_id, receiver_index())); 213 | 214 | // All done 215 | Ok(AsyncSink::Ready) 216 | } 217 | 218 | unsafe fn pop_inner(&self) -> Result { 219 | self.msg_queue.pop(|place| { 220 | self.msg_storage.replace(place, None).expect("Some(value)") 221 | }) 222 | } 223 | 224 | unsafe fn drain(&self) { 225 | while self.pop_inner().is_ok() { 226 | self.dec_msg_count(); 227 | } 228 | } 229 | 230 | pub unsafe fn pop(&self) -> Async> { 231 | match self.pop_inner() { 232 | Ok(value) => { 233 | self.dec_msg_count(); 234 | Async::Ready(Some(value)) 235 | }, 236 | Err(()) => { 237 | // Park ourselves 238 | with_receiver_id(|id| self.park_self(id)); 239 | let finished = self.msg_count.load(Ordering::Acquire) == CLOSE_FLAG || self.sender_count.load(Ordering::Acquire) == 0; 240 | // Check that queue is still empty 241 | match self.pop_inner() { 242 | Ok(value) => { 243 | // Queue became non-empty 244 | // Take an item and return 245 | with_receiver_id(|id| self.unpark_self(id)); 246 | self.dec_msg_count(); 247 | Async::Ready(Some(value)) 248 | }, 249 | Err(()) => { 250 | // Queue is still empty, if it's closed and there are no remaining message 251 | // then we're done! 252 | if finished { 253 | Async::Ready(None) 254 | } else { 255 | Async::NotReady 256 | } 257 | } 258 | } 259 | } 260 | } 261 | } 262 | 263 | pub unsafe fn close(&self) { 264 | // Mark ourselves as closed 265 | self.msg_count.fetch_or(CLOSE_FLAG, Ordering::AcqRel); 266 | 267 | // Wake any waiting tasks 268 | while let Ok(task_index) = self.parked_queue.pop(|index| *index) { 269 | with_receiver_id(|id| self.wake_task(id, task_index)); 270 | } 271 | } 272 | 273 | pub unsafe fn inc_sender_count(&self) { 274 | self.sender_count.fetch_add(1, Ordering::AcqRel); 275 | } 276 | 277 | pub unsafe fn dec_sender_count(&self, id: &mut MpscQueueSenderId) { 278 | if self.sender_count.fetch_sub(1, Ordering::AcqRel) == 1 { 279 | with_sender_id(id, |accessor_id| self.wake_task(accessor_id, receiver_index())); 280 | } 281 | } 282 | } 283 | 284 | #[derive(Debug)] 285 | pub struct MpscQueueReceiver>>(H); 286 | 287 | impl>> MpscQueueReceiver { 288 | pub fn new(max_senders: usize, size: usize) -> Self { 289 | MpscQueueReceiver(MpscQueueWrapper::new(max_senders, size)) 290 | } 291 | 292 | pub fn close(&mut self) { 293 | // This is safe because we guarantee that we are unique 294 | self.0.with(|inner| unsafe { inner.close() }) 295 | } 296 | } 297 | 298 | impl>> Stream for MpscQueueReceiver { 299 | type Item = T; 300 | type Error = (); 301 | 302 | fn poll(&mut self) -> Poll, ()> { 303 | // This is safe because we guarantee that we are unique 304 | Ok(self.0.with(|inner| unsafe { inner.pop() })) 305 | } 306 | } 307 | 308 | impl>> Drop for MpscQueueReceiver { 309 | fn drop(&mut self) { 310 | // Drain the channel of all pending messages 311 | self.0.with(|inner| unsafe { 312 | inner.close(); 313 | inner.drain(); 314 | }) 315 | } 316 | } 317 | 318 | pub type ResizingMpscQueueReceiver = MpscQueueReceiver>>; 319 | pub type BoundedMpscQueueReceiver = MpscQueueReceiver>>; 320 | 321 | #[derive(Debug)] 322 | pub struct MpscQueueSender>>(IdHandle); 323 | 324 | impl>> MpscQueueSender { 325 | pub fn new(receiver: &MpscQueueReceiver) -> Self { 326 | MpscQueueSender(IdHandle::new(&receiver.0)).inc_sender_count() 327 | } 328 | pub fn try_new(receiver: &MpscQueueReceiver) -> Option { 329 | IdHandle::try_new(&receiver.0).map(|inner| MpscQueueSender(inner).inc_sender_count()) 330 | } 331 | pub fn try_clone(&self) -> Option { 332 | self.0.try_clone().map(|inner| MpscQueueSender(inner).inc_sender_count()) 333 | } 334 | fn inc_sender_count(self) -> Self { 335 | self.0.with(|inner| unsafe { inner.inc_sender_count() }); 336 | self 337 | } 338 | } 339 | 340 | impl>> Clone for MpscQueueSender { 341 | fn clone(&self) -> Self { 342 | MpscQueueSender(self.0.clone()).inc_sender_count() 343 | } 344 | } 345 | 346 | impl>> Sink for MpscQueueSender { 347 | type SinkItem = T; 348 | type SinkError = SendError; 349 | 350 | fn start_send(&mut self, msg: T) -> StartSend> { 351 | self.0.with_mut(|inner, id| unsafe { inner.push(id, msg) }) 352 | } 353 | 354 | fn poll_complete(&mut self) -> Poll<(), SendError> { 355 | Ok(Async::Ready(())) 356 | } 357 | } 358 | 359 | impl>> Drop for MpscQueueSender { 360 | fn drop(&mut self) { 361 | // Wake up the receiver 362 | self.0.with_mut(|inner, id| unsafe { inner.dec_sender_count(id) }) 363 | } 364 | } 365 | 366 | pub type ResizingMpscQueueSender = MpscQueueSender>>; 367 | pub type BoundedMpscQueueSender = MpscQueueSender>>; 368 | -------------------------------------------------------------------------------- /tests/mpmc.rs: -------------------------------------------------------------------------------- 1 | // Copied from futures, to test our version of an MPSC queue 2 | 3 | extern crate futures; 4 | extern crate lockless; 5 | 6 | use futures::{Future, Stream, Sink, Async, AsyncSink}; 7 | use futures::future::lazy; 8 | 9 | use std::time::Duration; 10 | use std::thread; 11 | use std::sync::{Arc, Mutex}; 12 | use std::sync::atomic::{AtomicUsize, Ordering}; 13 | 14 | use lockless::sync::mpmc_queue::{ResizingMpmcQueueReceiver, ResizingMpmcQueueSender, self}; 15 | 16 | fn is_send() {} 17 | 18 | fn channel(size: usize) -> (ResizingMpmcQueueSender, ResizingMpmcQueueReceiver) { 19 | mpmc_queue::new(9, size) 20 | } 21 | 22 | #[test] 23 | fn bounds() { 24 | is_send::>(); 25 | is_send::>(); 26 | } 27 | 28 | #[test] 29 | fn send_recv() { 30 | let (tx, rx) = channel::(16); 31 | let mut rx = rx.wait(); 32 | 33 | tx.send(1).wait().unwrap(); 34 | 35 | assert_eq!(rx.next().unwrap(), Ok(1)); 36 | } 37 | 38 | #[test] 39 | fn send_recv_no_buffer() { 40 | let (mut tx, mut rx) = channel::(0); 41 | 42 | // Run on a task context 43 | lazy(move || { 44 | assert!(tx.poll_complete().unwrap().is_ready()); 45 | 46 | // Send first message 47 | 48 | let res = tx.start_send(1).unwrap(); 49 | assert!(is_ready(&res)); 50 | 51 | // Send second message 52 | let res = tx.start_send(2).unwrap(); 53 | assert!(!is_ready(&res)); 54 | 55 | // Take the value 56 | assert_eq!(rx.poll().unwrap(), Async::Ready(Some(1))); 57 | 58 | let res = tx.start_send(2).unwrap(); 59 | assert!(is_ready(&res)); 60 | 61 | // Take the value 62 | assert_eq!(rx.poll().unwrap(), Async::Ready(Some(2))); 63 | 64 | Ok::<(), ()>(()) 65 | }).wait().unwrap(); 66 | } 67 | 68 | #[test] 69 | fn send_shared_recv() { 70 | let (tx1, rx) = channel::(16); 71 | let tx2 = tx1.clone(); 72 | let mut rx = rx.wait(); 73 | 74 | tx1.send(1).wait().unwrap(); 75 | assert_eq!(rx.next().unwrap(), Ok(1)); 76 | 77 | tx2.send(2).wait().unwrap(); 78 | assert_eq!(rx.next().unwrap(), Ok(2)); 79 | } 80 | 81 | #[test] 82 | fn send_shared_send() { 83 | let (tx, rx1) = channel::(16); 84 | let rx2 = rx1.clone(); 85 | let mut rx1 = rx1.wait(); 86 | let mut rx2 = rx2.wait(); 87 | 88 | let tx = tx.send(1).wait().unwrap(); 89 | assert_eq!(rx1.next().unwrap(), Ok(1)); 90 | 91 | tx.send(2).wait().unwrap(); 92 | assert_eq!(rx2.next().unwrap(), Ok(2)); 93 | } 94 | 95 | #[test] 96 | fn send_recv_threads() { 97 | let (tx, rx) = channel::(16); 98 | let mut rx = rx.wait(); 99 | 100 | thread::spawn(move|| { 101 | tx.send(1).wait().unwrap(); 102 | }); 103 | 104 | assert_eq!(rx.next().unwrap(), Ok(1)); 105 | } 106 | 107 | #[test] 108 | fn send_recv_threads_no_capacity() { 109 | let (mut tx, rx) = channel::(0); 110 | let mut rx = rx.wait(); 111 | 112 | let t = thread::spawn(move|| { 113 | tx = tx.send(1).wait().unwrap(); 114 | tx = tx.send(2).wait().unwrap(); 115 | }); 116 | 117 | thread::sleep(Duration::from_millis(100)); 118 | assert_eq!(rx.next().unwrap(), Ok(1)); 119 | 120 | thread::sleep(Duration::from_millis(100)); 121 | assert_eq!(rx.next().unwrap(), Ok(2)); 122 | 123 | t.join().unwrap(); 124 | } 125 | 126 | #[test] 127 | fn recv_close_gets_none() { 128 | let (tx, mut rx) = channel::(10); 129 | 130 | // Run on a task context 131 | lazy(move || { 132 | rx.close(); 133 | 134 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 135 | 136 | drop(tx); 137 | 138 | Ok::<(), ()>(()) 139 | }).wait().unwrap(); 140 | } 141 | 142 | 143 | #[test] 144 | fn tx_close_gets_none() { 145 | let (_, mut rx) = channel::(10); 146 | 147 | // Run on a task context 148 | lazy(move || { 149 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 150 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 151 | 152 | Ok::<(), ()>(()) 153 | }).wait().unwrap(); 154 | } 155 | 156 | #[test] 157 | fn stress_shared_bounded_hard() { 158 | const AMT: u32 = 10000; 159 | const NTHREADS: u32 = 8; 160 | let (tx, rx) = channel::(0); 161 | 162 | let mut ts = Vec::new(); 163 | for _ in 0..NTHREADS { 164 | let mut rx = rx.clone().wait(); 165 | 166 | ts.push(thread::spawn(move|| { 167 | for _ in 0..AMT { 168 | assert_eq!(rx.next().unwrap(), Ok(1)); 169 | } 170 | })); 171 | } 172 | 173 | for _ in 0..NTHREADS { 174 | let mut tx = tx.clone(); 175 | 176 | thread::spawn(move|| { 177 | for _ in 0..AMT { 178 | tx = tx.send(1).wait().unwrap(); 179 | } 180 | }); 181 | } 182 | 183 | drop(tx); 184 | 185 | for t in ts { 186 | t.join().ok().unwrap(); 187 | } 188 | 189 | if rx.wait().next().is_some() { 190 | panic!(); 191 | } 192 | } 193 | 194 | #[test] 195 | fn stress_receiver_multi_task_bounded_hard() { 196 | const AMT: usize = 10_000; 197 | const NTHREADS: u32 = 2; 198 | 199 | let (mut tx, rx) = channel::(0); 200 | let rx = Arc::new(Mutex::new(Some(rx))); 201 | let n = Arc::new(AtomicUsize::new(0)); 202 | 203 | let mut th = vec![]; 204 | 205 | for _ in 0..NTHREADS { 206 | let rx = rx.clone(); 207 | let n = n.clone(); 208 | 209 | let t = thread::spawn(move || { 210 | let mut i = 0; 211 | 212 | loop { 213 | i += 1; 214 | let mut lock = rx.lock().ok().unwrap(); 215 | 216 | match lock.take() { 217 | Some(mut rx) => { 218 | if i % 5 == 0 { 219 | let (item, rest) = rx.into_future().wait().ok().unwrap(); 220 | 221 | if item.is_none() { 222 | break; 223 | } 224 | 225 | n.fetch_add(1, Ordering::Relaxed); 226 | *lock = Some(rest); 227 | } else { 228 | // Just poll 229 | let n = n.clone(); 230 | let r = lazy(move || { 231 | let r = match rx.poll().unwrap() { 232 | Async::Ready(Some(_)) => { 233 | n.fetch_add(1, Ordering::Relaxed); 234 | *lock = Some(rx); 235 | false 236 | } 237 | Async::Ready(None) => { 238 | true 239 | } 240 | Async::NotReady => { 241 | *lock = Some(rx); 242 | false 243 | } 244 | }; 245 | 246 | Ok::(r) 247 | }).wait().unwrap(); 248 | 249 | if r { 250 | break; 251 | } 252 | } 253 | } 254 | None => break, 255 | } 256 | } 257 | }); 258 | 259 | th.push(t); 260 | } 261 | 262 | for i in 0..AMT { 263 | tx = tx.send(i).wait().unwrap(); 264 | } 265 | 266 | drop(tx); 267 | 268 | for t in th { 269 | t.join().unwrap(); 270 | } 271 | 272 | assert_eq!(AMT, n.load(Ordering::Relaxed)); 273 | } 274 | 275 | fn is_ready(res: &AsyncSink) -> bool { 276 | match *res { 277 | AsyncSink::Ready => true, 278 | _ => false, 279 | } 280 | } -------------------------------------------------------------------------------- /tests/mpsc.rs: -------------------------------------------------------------------------------- 1 | // Copied from futures, to test our version of an MPSC queue 2 | 3 | extern crate futures; 4 | extern crate lockless; 5 | 6 | use futures::{Future, Stream, Sink, Async, AsyncSink}; 7 | use futures::future::lazy; 8 | 9 | use std::time::Duration; 10 | use std::thread; 11 | use std::sync::{Arc, Mutex}; 12 | use std::sync::atomic::{AtomicUsize, Ordering}; 13 | 14 | use lockless::sync::mpsc_queue::{ResizingMpscQueueReceiver, ResizingMpscQueueSender}; 15 | 16 | fn is_send() {} 17 | 18 | fn channel(size: usize) -> (ResizingMpscQueueSender, ResizingMpscQueueReceiver) { 19 | let rx = ResizingMpscQueueReceiver::new(8, size); 20 | let tx = ResizingMpscQueueSender::new(&rx); 21 | (tx, rx) 22 | } 23 | 24 | #[test] 25 | fn bounds() { 26 | is_send::>(); 27 | is_send::>(); 28 | } 29 | 30 | #[test] 31 | fn send_recv() { 32 | let (tx, rx) = channel::(16); 33 | let mut rx = rx.wait(); 34 | 35 | tx.send(1).wait().unwrap(); 36 | 37 | assert_eq!(rx.next().unwrap(), Ok(1)); 38 | } 39 | 40 | #[test] 41 | fn send_recv_no_buffer() { 42 | let (mut tx, mut rx) = channel::(0); 43 | 44 | // Run on a task context 45 | lazy(move || { 46 | assert!(tx.poll_complete().unwrap().is_ready()); 47 | 48 | // Send first message 49 | 50 | let res = tx.start_send(1).unwrap(); 51 | assert!(is_ready(&res)); 52 | 53 | // Send second message 54 | let res = tx.start_send(2).unwrap(); 55 | assert!(!is_ready(&res)); 56 | 57 | // Take the value 58 | assert_eq!(rx.poll().unwrap(), Async::Ready(Some(1))); 59 | 60 | let res = tx.start_send(2).unwrap(); 61 | assert!(is_ready(&res)); 62 | 63 | // Take the value 64 | assert_eq!(rx.poll().unwrap(), Async::Ready(Some(2))); 65 | 66 | Ok::<(), ()>(()) 67 | }).wait().unwrap(); 68 | } 69 | 70 | #[test] 71 | fn send_shared_recv() { 72 | let (tx1, rx) = channel::(16); 73 | let tx2 = tx1.clone(); 74 | let mut rx = rx.wait(); 75 | 76 | tx1.send(1).wait().unwrap(); 77 | assert_eq!(rx.next().unwrap(), Ok(1)); 78 | 79 | tx2.send(2).wait().unwrap(); 80 | assert_eq!(rx.next().unwrap(), Ok(2)); 81 | } 82 | 83 | #[test] 84 | fn send_recv_threads() { 85 | let (tx, rx) = channel::(16); 86 | let mut rx = rx.wait(); 87 | 88 | thread::spawn(move|| { 89 | tx.send(1).wait().unwrap(); 90 | }); 91 | 92 | assert_eq!(rx.next().unwrap(), Ok(1)); 93 | } 94 | 95 | #[test] 96 | fn send_recv_threads_no_capacity() { 97 | let (mut tx, rx) = channel::(0); 98 | let mut rx = rx.wait(); 99 | 100 | let t = thread::spawn(move|| { 101 | tx = tx.send(1).wait().unwrap(); 102 | tx = tx.send(2).wait().unwrap(); 103 | }); 104 | 105 | thread::sleep(Duration::from_millis(100)); 106 | assert_eq!(rx.next().unwrap(), Ok(1)); 107 | 108 | thread::sleep(Duration::from_millis(100)); 109 | assert_eq!(rx.next().unwrap(), Ok(2)); 110 | 111 | t.join().unwrap(); 112 | } 113 | 114 | #[test] 115 | fn recv_close_gets_none() { 116 | let (tx, mut rx) = channel::(10); 117 | 118 | // Run on a task context 119 | lazy(move || { 120 | rx.close(); 121 | 122 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 123 | 124 | drop(tx); 125 | 126 | Ok::<(), ()>(()) 127 | }).wait().unwrap(); 128 | } 129 | 130 | 131 | #[test] 132 | fn tx_close_gets_none() { 133 | let (_, mut rx) = channel::(10); 134 | 135 | // Run on a task context 136 | lazy(move || { 137 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 138 | assert_eq!(rx.poll(), Ok(Async::Ready(None))); 139 | 140 | Ok::<(), ()>(()) 141 | }).wait().unwrap(); 142 | } 143 | 144 | #[test] 145 | fn stress_shared_bounded_hard() { 146 | const AMT: u32 = 10000; 147 | const NTHREADS: u32 = 8; 148 | let (tx, rx) = channel::(0); 149 | let mut rx = rx.wait(); 150 | 151 | let t = thread::spawn(move|| { 152 | for _ in 0..AMT * NTHREADS { 153 | assert_eq!(rx.next().unwrap(), Ok(1)); 154 | } 155 | 156 | if rx.next().is_some() { 157 | panic!(); 158 | } 159 | }); 160 | 161 | for _ in 0..NTHREADS { 162 | let mut tx = tx.clone(); 163 | 164 | thread::spawn(move|| { 165 | for _ in 0..AMT { 166 | tx = tx.send(1).wait().unwrap(); 167 | } 168 | }); 169 | } 170 | 171 | drop(tx); 172 | 173 | t.join().ok().unwrap(); 174 | } 175 | 176 | #[test] 177 | fn stress_receiver_multi_task_bounded_hard() { 178 | const AMT: usize = 10_000; 179 | const NTHREADS: u32 = 2; 180 | 181 | let (mut tx, rx) = channel::(0); 182 | let rx = Arc::new(Mutex::new(Some(rx))); 183 | let n = Arc::new(AtomicUsize::new(0)); 184 | 185 | let mut th = vec![]; 186 | 187 | for _ in 0..NTHREADS { 188 | let rx = rx.clone(); 189 | let n = n.clone(); 190 | 191 | let t = thread::spawn(move || { 192 | let mut i = 0; 193 | 194 | loop { 195 | i += 1; 196 | let mut lock = rx.lock().ok().unwrap(); 197 | 198 | match lock.take() { 199 | Some(mut rx) => { 200 | if i % 5 == 0 { 201 | let (item, rest) = rx.into_future().wait().ok().unwrap(); 202 | 203 | if item.is_none() { 204 | break; 205 | } 206 | 207 | n.fetch_add(1, Ordering::Relaxed); 208 | *lock = Some(rest); 209 | } else { 210 | // Just poll 211 | let n = n.clone(); 212 | let r = lazy(move || { 213 | let r = match rx.poll().unwrap() { 214 | Async::Ready(Some(_)) => { 215 | n.fetch_add(1, Ordering::Relaxed); 216 | *lock = Some(rx); 217 | false 218 | } 219 | Async::Ready(None) => { 220 | true 221 | } 222 | Async::NotReady => { 223 | *lock = Some(rx); 224 | false 225 | } 226 | }; 227 | 228 | Ok::(r) 229 | }).wait().unwrap(); 230 | 231 | if r { 232 | break; 233 | } 234 | } 235 | } 236 | None => break, 237 | } 238 | } 239 | }); 240 | 241 | th.push(t); 242 | } 243 | 244 | for i in 0..AMT { 245 | tx = tx.send(i).wait().unwrap(); 246 | } 247 | 248 | drop(tx); 249 | 250 | for t in th { 251 | t.join().unwrap(); 252 | } 253 | 254 | assert_eq!(AMT, n.load(Ordering::Relaxed)); 255 | } 256 | 257 | fn is_ready(res: &AsyncSink) -> bool { 258 | match *res { 259 | AsyncSink::Ready => true, 260 | _ => false, 261 | } 262 | } --------------------------------------------------------------------------------