├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── examples └── basic.rs ├── rustfmt.toml ├── src ├── lib.rs ├── storage.rs └── util.rs └── tests ├── bounds.rs └── would_write.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Build 20 | run: cargo build --verbose 21 | - name: Run tests 22 | run: cargo test --verbose 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Rust template 3 | # Generated by Cargo 4 | # will have compiled files and executables 5 | /target/ 6 | 7 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 8 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 9 | Cargo.lock 10 | .idea/ 11 | *.iml -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shrev" 3 | version = "1.1.3" 4 | authors = ["Simon Rönnberg ", "torkleyy " ] 5 | repository = "https://github.com/rustgd/shrev-rs.git" 6 | homepage = "https://github.com/rustgd/shrev-rs.git" 7 | edition = "2018" 8 | 9 | license = "MIT OR Apache-2.0" 10 | readme = "README.md" 11 | documentation = "https://docs.rs/shrev" 12 | description = "Event channel, meant to be used with `specs`." 13 | 14 | categories = ["game-engines"] 15 | keywords = ["ecs", "specs", "events"] 16 | 17 | [dependencies] 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `shrev` 2 | 3 | [![crates.io badge](https://img.shields.io/crates/v/shrev.svg)](https://crates.io/crates/shrev) [![docs badge](https://docs.rs/shrev/badge.svg)](https://docs.rs/shrev) 4 | 5 | A pull based event channel, with events stored in a ring buffer, 6 | meant to be used as a resource in [`specs`]. 7 | 8 | [`specs`]: https://github.com/slide-rs/specs 9 | 10 | ## Example usage 11 | 12 | ```rust 13 | extern crate shrev; 14 | 15 | use shrev::EventChannel; 16 | 17 | #[derive(Clone, Debug, PartialEq, Eq)] 18 | pub struct TestEvent { 19 | data: u32, 20 | } 21 | 22 | fn main() { 23 | let mut channel = EventChannel::new(); 24 | 25 | channel.drain_vec_write(&mut vec![TestEvent { data: 1 }, TestEvent { data: 2 }]); 26 | 27 | let mut reader_id = channel.register_reader(); 28 | 29 | // Should be empty, because reader was created after the write 30 | assert_eq!( 31 | Vec::::default(), 32 | channel.read(&mut reader_id).cloned().collect::>() 33 | ); 34 | 35 | // Should have data, as a second write was done 36 | channel.single_write(TestEvent { data: 5 }); 37 | 38 | assert_eq!( 39 | vec![TestEvent { data: 5 }], 40 | channel.read(&mut reader_id).cloned().collect::>() 41 | ); 42 | 43 | // We can also just send in an iterator. 44 | channel.iter_write( 45 | [TestEvent { data: 8 }, TestEvent { data: 9 }] 46 | .iter() 47 | .cloned(), 48 | ); 49 | 50 | assert_eq!( 51 | vec![TestEvent { data: 8 }, TestEvent { data: 9 }], 52 | channel.read(&mut reader_id).cloned().collect::>() 53 | ); 54 | } 55 | ``` 56 | 57 | ## License 58 | 59 | `shrev-rs` is free and open source software distributed under the terms of both 60 | the MIT License and the Apache License 2.0. 61 | 62 | Unless you explicitly state otherwise, any contribution intentionally submitted 63 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall 64 | be dual licensed as above, without any additional terms or conditions. 65 | -------------------------------------------------------------------------------- /examples/basic.rs: -------------------------------------------------------------------------------- 1 | extern crate shrev; 2 | 3 | use shrev::EventChannel; 4 | 5 | #[derive(Clone, Debug, PartialEq, Eq)] 6 | pub struct TestEvent { 7 | data: u32, 8 | } 9 | 10 | fn main() { 11 | let mut channel = EventChannel::new(); 12 | 13 | let mut reader1 = channel.register_reader(); 14 | let mut reader2 = channel.register_reader(); 15 | 16 | channel.single_write(TestEvent { data: 1 }); 17 | 18 | // Prints one event 19 | println!("reader1 read: {:#?}", collect(channel.read(&mut reader1))); 20 | channel.single_write(TestEvent { data: 32 }); 21 | 22 | // Prints two events 23 | println!("reader2 read: {:#?}", collect(channel.read(&mut reader2))); 24 | // Prints no events 25 | println!("reader2 read: {:#?}", collect(channel.read(&mut reader2))); 26 | } 27 | 28 | /// Collects an iterator to a `Vec` 29 | fn collect<'a>(events: impl Iterator) -> Vec<&'a TestEvent> { 30 | events.collect() 31 | } 32 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | hard_tabs = false 2 | merge_imports = true 3 | reorder_impl_items = true 4 | use_field_init_shorthand = true 5 | use_try_shorthand = true 6 | format_doc_comments = true 7 | wrap_comments = true 8 | edition = "2018" 9 | version = "Two" 10 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Event channel, pull based, that use a ringbuffer for internal 2 | //! storage, to make it possible to do immutable reads. 3 | //! 4 | //! See examples directory for examples. 5 | 6 | #![warn(missing_docs)] 7 | 8 | pub use crate::storage::{ReaderId, StorageIterator as EventIterator}; 9 | 10 | use crate::storage::RingBuffer; 11 | 12 | mod storage; 13 | mod util; 14 | 15 | /// Marker trait for data to use with the EventChannel. 16 | /// 17 | /// Has an implementation for all types where its bounds are satisfied. 18 | pub trait Event: Send + Sync + 'static {} 19 | 20 | impl Event for T where T: Send + Sync + 'static {} 21 | 22 | const DEFAULT_CAPACITY: usize = 64; 23 | 24 | /// The `EventChannel`, which is the central component of `shrev`. 25 | /// 26 | /// ## How it works 27 | /// 28 | /// This channel has a ring buffer, which it allocates with an initial capacity. 29 | /// Once allocated, it writes new events into the buffer, wrapping around when 30 | /// it reaches the "end" of the buffer. 31 | /// 32 | /// However, before an event gets written into the buffer, the channel checks if 33 | /// all readers have read the event which is about to be overwritten. In case 34 | /// the answer is "No", it will grow the buffer so no events get overwritten. 35 | /// 36 | /// Readers are stores in the `EventChannel` itself, because we need to access 37 | /// their position in a write, so we can check what's described above. Thus, you 38 | /// only get a `ReaderId` as a handle. 39 | /// 40 | /// ## What do I use it for? 41 | /// 42 | /// The `EventChannel` is basically a single producer, multiple consumer 43 | /// ("SPMC") channel. That is, a `write` to the channel requires mutable access, 44 | /// while reading can be done with just an immutable reference. All readers 45 | /// (consumers) will always get all the events since their last read (or when 46 | /// they were created, if there was no read yet). 47 | /// 48 | /// ## Examples 49 | /// 50 | /// ``` 51 | /// use std::mem::drop; 52 | /// 53 | /// use shrev::{EventChannel, ReaderId}; 54 | /// 55 | /// // The buffer will initially be 16 events big 56 | /// let mut channel = EventChannel::with_capacity(16); 57 | /// 58 | /// // This is basically with no effect; no reader can possibly observe it 59 | /// channel.single_write(42i32); 60 | /// 61 | /// let mut first_reader = channel.register_reader(); 62 | /// 63 | /// // What's interesting here is that we don't check the readers' positions _yet_ 64 | /// // That is because the size of 16 allows us to write 16 events before we need to perform 65 | /// // such a check. 66 | /// channel.iter_write(0..4); 67 | /// 68 | /// // Now, we read 4 events (0, 1, 2, 3) 69 | /// // Notice how we borrow the ID mutably; this is because logically we modify the reader, 70 | /// // and we shall not read with the same ID concurrently 71 | /// let _events = channel.read(&mut first_reader); 72 | /// 73 | /// // Let's create a second reader; this one will not receive any of the previous events 74 | /// let mut second_reader = channel.register_reader(); 75 | /// 76 | /// // No event returned 77 | /// let _events = channel.read(&mut second_reader); 78 | /// 79 | /// channel.iter_write(4..6); 80 | /// 81 | /// // Both now get the same two events 82 | /// let _events = channel.read(&mut first_reader); 83 | /// let _events = channel.read(&mut second_reader); 84 | /// 85 | /// // We no longer need our second reader, so we drop it 86 | /// // This is important, since otherwise the buffer would keep growing if our reader doesn't read 87 | /// // any events 88 | /// drop(second_reader); 89 | /// ``` 90 | #[derive(Debug)] 91 | pub struct EventChannel { 92 | storage: RingBuffer, 93 | } 94 | 95 | impl Default for EventChannel 96 | where 97 | E: Event, 98 | { 99 | fn default() -> Self { 100 | EventChannel::with_capacity(DEFAULT_CAPACITY) 101 | } 102 | } 103 | 104 | impl EventChannel 105 | where 106 | E: Event, 107 | { 108 | /// Create a new `EventChannel` with a default size of 64. 109 | pub fn new() -> Self { 110 | Default::default() 111 | } 112 | 113 | /// Create a new `EventChannel` with the given starting capacity. 114 | pub fn with_capacity(size: usize) -> Self { 115 | Self { 116 | storage: RingBuffer::new(size), 117 | } 118 | } 119 | 120 | /// Returns `true` if any reader would observe an additional event. 121 | /// 122 | /// This can be used to skip calls to `iter_write` in case the event 123 | /// construction is expensive. 124 | pub fn would_write(&mut self) -> bool { 125 | self.storage.would_write() 126 | } 127 | 128 | /// Register a new reader. 129 | /// 130 | /// To be able to read events, a reader id is required. This is because 131 | /// otherwise the channel wouldn't know where in the ring buffer the 132 | /// reader has read to earlier. This information is stored in the channel, 133 | /// associated with the returned `ReaderId`. 134 | /// 135 | /// A newly created `ReaderId` will only receive the events written after 136 | /// its creation. 137 | /// 138 | /// Once you no longer perform `read`s with your `ReaderId`, you should 139 | /// drop it so the channel can safely overwrite events not read by it. 140 | pub fn register_reader(&mut self) -> ReaderId { 141 | self.storage.new_reader_id() 142 | } 143 | 144 | /// Write a slice of events into storage 145 | #[deprecated(note = "please use `iter_write` instead")] 146 | pub fn slice_write(&mut self, events: &[E]) 147 | where 148 | E: Clone, 149 | { 150 | self.storage.iter_write(events.into_iter().cloned()); 151 | } 152 | 153 | /// Write an iterator of events into storage 154 | pub fn iter_write(&mut self, iter: I) 155 | where 156 | I: IntoIterator, 157 | I::IntoIter: ExactSizeIterator, 158 | { 159 | self.storage.iter_write(iter); 160 | } 161 | 162 | /// Drain a vector of events into storage. 163 | pub fn drain_vec_write(&mut self, events: &mut Vec) { 164 | self.storage.drain_vec_write(events); 165 | } 166 | 167 | /// Write a single event into storage. 168 | pub fn single_write(&mut self, event: E) { 169 | self.storage.single_write(event); 170 | } 171 | 172 | /// Read any events that have been written to storage since the last read 173 | /// with `reader_id` (or the creation of the `ReaderId`, if it hasn't read 174 | /// yet). 175 | /// 176 | /// Note that this will advance the position of the reader regardless of 177 | /// what you do with the iterator. In other words, calling `read` 178 | /// without iterating the result won't preserve the events returned. You 179 | /// need to iterate all the events as soon as you got them from this 180 | /// method. This behavior is equivalent to e.g. `Vec::drain`. 181 | pub fn read(&self, reader_id: &mut ReaderId) -> EventIterator { 182 | self.storage.read(reader_id) 183 | } 184 | } 185 | 186 | #[cfg(test)] 187 | mod tests { 188 | use super::*; 189 | 190 | #[derive(Debug, Clone, PartialEq)] 191 | struct Test { 192 | pub id: u32, 193 | } 194 | 195 | #[test] 196 | fn test_grow() { 197 | let mut channel = EventChannel::with_capacity(10); 198 | 199 | let mut reader0 = channel.register_reader(); 200 | let mut reader1 = channel.register_reader(); 201 | 202 | channel.iter_write(vec![1, 2, 3, 4, 5, 6, 7, 8]); 203 | 204 | let data = channel.read(&mut reader0).cloned().collect::>(); 205 | assert_eq!(data, vec![1, 2, 3, 4, 5, 6, 7, 8]); 206 | 207 | channel.iter_write(vec![9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]); 208 | 209 | let data = channel.read(&mut reader0).cloned().collect::>(); 210 | assert_eq!( 211 | data, 212 | vec![9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22] 213 | ); 214 | 215 | for i in 23..10_000 { 216 | channel.single_write(i); 217 | } 218 | 219 | let data = channel.read(&mut reader1).cloned().collect::>(); 220 | assert_eq!(data, (1..10_000).collect::>()); 221 | } 222 | 223 | #[test] 224 | fn test_read_write() { 225 | let mut channel = EventChannel::with_capacity(14); 226 | 227 | let mut reader_id = channel.register_reader(); 228 | let mut reader_id_extra = channel.register_reader(); 229 | 230 | channel.single_write(Test { id: 1 }); 231 | assert_eq!( 232 | vec![Test { id: 1 }], 233 | channel.read(&mut reader_id).cloned().collect::>() 234 | ); 235 | channel.single_write(Test { id: 2 }); 236 | assert_eq!( 237 | vec![Test { id: 2 }], 238 | channel.read(&mut reader_id).cloned().collect::>() 239 | ); 240 | 241 | assert_eq!( 242 | vec![Test { id: 1 }, Test { id: 2 }], 243 | channel 244 | .read(&mut reader_id_extra) 245 | .cloned() 246 | .collect::>() 247 | ); 248 | 249 | channel.single_write(Test { id: 3 }); 250 | assert_eq!( 251 | vec![Test { id: 3 }], 252 | channel.read(&mut reader_id).cloned().collect::>() 253 | ); 254 | assert_eq!( 255 | vec![Test { id: 3 }], 256 | channel 257 | .read(&mut reader_id_extra) 258 | .cloned() 259 | .collect::>() 260 | ); 261 | } 262 | 263 | // There was previously a case where the tests worked but the example didn't, so 264 | // the example was added as a test case. 265 | #[test] 266 | fn test_example() { 267 | let mut channel = EventChannel::new(); 268 | 269 | channel.drain_vec_write(&mut vec![TestEvent { data: 1 }, TestEvent { data: 2 }]); 270 | 271 | let mut reader_id = channel.register_reader(); 272 | 273 | // Should be empty, because reader was created after the write 274 | assert_eq!( 275 | Vec::::default(), 276 | channel.read(&mut reader_id).cloned().collect::>() 277 | ); 278 | 279 | // Should have data, as a second write was done 280 | channel.single_write(TestEvent { data: 5 }); 281 | 282 | assert_eq!( 283 | vec![TestEvent { data: 5 }], 284 | channel.read(&mut reader_id).cloned().collect::>() 285 | ); 286 | 287 | // We can also just send in an iterator. 288 | channel.iter_write( 289 | [TestEvent { data: 8 }, TestEvent { data: 9 }] 290 | .iter() 291 | .cloned(), 292 | ); 293 | 294 | assert_eq!( 295 | vec![TestEvent { data: 8 }, TestEvent { data: 9 }], 296 | channel.read(&mut reader_id).cloned().collect::>() 297 | ); 298 | } 299 | 300 | #[derive(Clone, Debug, PartialEq, Eq)] 301 | pub struct TestEvent { 302 | data: u32, 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/storage.rs: -------------------------------------------------------------------------------- 1 | //! Ring buffer implementation, that does immutable reads. 2 | 3 | use std::{ 4 | cell::UnsafeCell, 5 | fmt, 6 | marker::PhantomData, 7 | num::Wrapping, 8 | ops::{Add, AddAssign, Sub, SubAssign}, 9 | ptr, 10 | sync::mpsc::{self, Receiver, Sender}, 11 | }; 12 | 13 | use crate::util::{InstanceId, NoSharedAccess, Reference}; 14 | use std::fmt::Debug; 15 | 16 | #[derive(Clone, Copy, Debug)] 17 | struct CircularIndex { 18 | index: usize, 19 | size: usize, 20 | } 21 | 22 | impl CircularIndex { 23 | fn new(index: usize, size: usize) -> Self { 24 | CircularIndex { index, size } 25 | } 26 | 27 | fn at_end(size: usize) -> Self { 28 | CircularIndex { 29 | index: size - 1, 30 | size, 31 | } 32 | } 33 | 34 | /// A magic value (!0). 35 | /// This value gets set when calling `step` and we reached 36 | /// the end. 37 | fn magic(size: usize) -> Self { 38 | CircularIndex::new(!0, size) 39 | } 40 | 41 | fn is_magic(&self) -> bool { 42 | self.index == !0 43 | } 44 | 45 | fn step(&mut self, inclusive_end: usize) -> Option { 46 | match self.index { 47 | x if x == !0 => None, 48 | x if x == inclusive_end => { 49 | let r = Some(x); 50 | self.index = !0; 51 | 52 | r 53 | } 54 | x => { 55 | let r = Some(x); 56 | *self += 1; 57 | 58 | r 59 | } 60 | } 61 | } 62 | 63 | fn step_back(&mut self, inclusive_end: &mut usize) -> Option { 64 | match self.index { 65 | x if x == !0 => None, 66 | x if x == *inclusive_end => { 67 | let r = Some(x); 68 | self.index = !0; 69 | 70 | r 71 | } 72 | _ => { 73 | let r = Some(*inclusive_end); 74 | if *inclusive_end == 0 { 75 | *inclusive_end = self.size; 76 | } 77 | 78 | *inclusive_end -= 1; 79 | 80 | r 81 | } 82 | } 83 | } 84 | } 85 | 86 | impl Add for CircularIndex { 87 | type Output = usize; 88 | 89 | fn add(self, rhs: usize) -> usize { 90 | (self.index + rhs) % self.size 91 | } 92 | } 93 | 94 | impl AddAssign for CircularIndex { 95 | fn add_assign(&mut self, rhs: usize) { 96 | self.index = *self + rhs; 97 | } 98 | } 99 | 100 | impl Sub for CircularIndex { 101 | type Output = usize; 102 | 103 | fn sub(self, rhs: usize) -> usize { 104 | (self.size - rhs + self.index) % self.size 105 | } 106 | } 107 | 108 | impl SubAssign for CircularIndex { 109 | fn sub_assign(&mut self, rhs: usize) { 110 | self.index = *self - rhs; 111 | } 112 | } 113 | 114 | struct Data { 115 | data: Vec, 116 | uninitialized: usize, 117 | } 118 | 119 | impl Data { 120 | fn new(size: usize) -> Self { 121 | let mut data = Data { 122 | data: vec![], 123 | uninitialized: 0, 124 | }; 125 | 126 | unsafe { 127 | data.grow(0, size); 128 | } 129 | 130 | debug_assert_eq!(data.uninitialized, size, "Bug in shrev"); 131 | 132 | data 133 | } 134 | 135 | unsafe fn get(&self, index: usize) -> &T { 136 | self.data.get_unchecked(index) 137 | } 138 | 139 | unsafe fn put(&mut self, cursor: usize, elem: T) { 140 | if self.uninitialized > 0 { 141 | // There is no element stored under `cursor` 142 | // -> do not drop anything! 143 | ptr::write(self.data.get_unchecked_mut(cursor) as *mut T, elem); 144 | self.uninitialized -= 1; 145 | } else { 146 | // We can safely drop this, it's initialized. 147 | *self.data.get_unchecked_mut(cursor) = elem; 148 | } 149 | } 150 | 151 | /// `cursor` is the first position that gets moved to the back, 152 | /// free memory will be created between `cursor - 1` and `cursor`. 153 | unsafe fn grow(&mut self, cursor: usize, by: usize) { 154 | assert!(by >= self.data.len()); 155 | 156 | // Calculate how many elements we need to move 157 | let to_move = self.data.len() - cursor; 158 | 159 | // Reserve space and set the new length 160 | self.data.reserve_exact(by); 161 | let new = self.data.len() + by; 162 | self.data.set_len(new); 163 | 164 | // Move the elements after the cursor to the end of the buffer. 165 | // Since we grew the buffer at least by the old length, 166 | // the elements are non-overlapping. 167 | let src = self.data.as_ptr().offset(cursor as isize); 168 | let dst = self.data.as_mut_ptr().offset((cursor + by) as isize); 169 | ptr::copy_nonoverlapping(src, dst, to_move); 170 | 171 | self.uninitialized += by; 172 | } 173 | 174 | /// Called when dropping the ring buffer. 175 | unsafe fn clean(&mut self, cursor: usize) { 176 | let mut cursor = CircularIndex::new(cursor, self.data.len()); 177 | let end = cursor - 1; 178 | 179 | while let Some(i) = cursor.step(end) { 180 | if self.uninitialized > 0 { 181 | self.uninitialized -= 1; 182 | } else { 183 | ptr::drop_in_place(self.data.get_unchecked_mut(i) as *mut T); 184 | } 185 | } 186 | 187 | self.data.set_len(0); 188 | } 189 | 190 | fn num_initialized(&self) -> usize { 191 | self.data.len() - self.uninitialized 192 | } 193 | } 194 | 195 | impl Debug for Data { 196 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 197 | f.debug_struct("Data") 198 | .field("num_initialized", &self.num_initialized()) 199 | .field("num_uninitialized", &self.uninitialized) 200 | .finish() 201 | } 202 | } 203 | 204 | #[derive(Copy, Clone, Debug)] 205 | struct Reader { 206 | generation: usize, 207 | last_index: usize, 208 | } 209 | 210 | impl Reader { 211 | fn set_inactive(&mut self) { 212 | self.last_index = !0; 213 | } 214 | 215 | fn active(&self) -> bool { 216 | self.last_index != !0 217 | } 218 | 219 | fn distance_from(&self, last: CircularIndex, current_gen: usize) -> usize { 220 | let this = CircularIndex { 221 | index: self.last_index, 222 | size: last.size, 223 | }; 224 | 225 | match this - last.index { 226 | 0 if self.generation == current_gen => last.size, 227 | x => x, 228 | } 229 | } 230 | 231 | fn needs_shift(&self, last_index: usize, current_gen: usize) -> bool { 232 | self.last_index > last_index 233 | || (self.last_index == last_index && self.generation != current_gen) 234 | } 235 | } 236 | 237 | /// A reader ID which represents a subscription to the events pushed to the 238 | /// `EventChannel`. 239 | /// 240 | /// For each reader ID, the last read event is tracked; this way, the buffer 241 | /// gets grown whenever it would overwrite an event which was not yet observed 242 | /// by every `ReaderId`. 243 | /// 244 | /// Dropping a `ReaderId` effectively cancels the subscription to those events. 245 | /// 246 | /// Note that as long as a `ReaderId` exists, it is crucial to use it to read 247 | /// the events; otherwise the buffer of the `EventChannel` **will** keep 248 | /// growing. 249 | pub struct ReaderId { 250 | id: usize, 251 | marker: PhantomData<&'static [T]>, 252 | reference: Reference, 253 | // stupid way to make this `Sync` 254 | drop_notifier: NoSharedAccess>, 255 | } 256 | 257 | impl fmt::Debug for ReaderId { 258 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 259 | f.debug_struct("ReaderId") 260 | .field("id", &self.id) 261 | .field("marker", &self.marker) 262 | .field("reference", &self.reference) 263 | .field("drop_notifier", &self.drop_notifier) 264 | .finish() 265 | } 266 | } 267 | 268 | impl Drop for ReaderId { 269 | fn drop(&mut self) { 270 | let _ = self.drop_notifier.get_mut().send(self.id); 271 | } 272 | } 273 | 274 | #[derive(Default)] 275 | struct ReaderMeta { 276 | /// Free ids 277 | free: Vec, 278 | readers: Vec>, 279 | } 280 | 281 | impl ReaderMeta { 282 | fn new() -> Self { 283 | Default::default() 284 | } 285 | 286 | fn reader(&self, id: &mut ReaderId) -> Option<&mut Reader> { 287 | self.readers.get(id.id).map(|r| unsafe { &mut *r.get() }) 288 | } 289 | 290 | fn reader_exclusive(&mut self, id: usize) -> &mut Reader { 291 | unsafe { &mut *self.readers[id].get() } 292 | } 293 | 294 | fn has_reader(&mut self) -> bool { 295 | self.readers 296 | .iter() 297 | .map(|r| unsafe { &mut *r.get() }) 298 | .any(|r| r.active()) 299 | } 300 | 301 | fn alloc(&mut self, last_index: usize, generation: usize) -> usize { 302 | match self.free.pop() { 303 | Some(id) => { 304 | self.reader_exclusive(id).last_index = last_index; 305 | self.reader_exclusive(id).generation = generation; 306 | 307 | id 308 | } 309 | None => { 310 | let id = self.readers.len(); 311 | self.readers.push(UnsafeCell::new(Reader { 312 | generation, 313 | last_index, 314 | })); 315 | 316 | id 317 | } 318 | } 319 | } 320 | 321 | fn remove(&mut self, id: usize) { 322 | self.reader_exclusive(id).set_inactive(); 323 | self.free.push(id); 324 | } 325 | 326 | // This needs to be mutable since `readers` might be borrowed in `reader`! 327 | fn nearest_index(&mut self, last: CircularIndex, current_gen: usize) -> Option<&Reader> { 328 | self.readers 329 | .iter() 330 | .map(|reader| unsafe { &*reader.get() }) 331 | .filter(|reader| reader.active()) 332 | .min_by_key(|reader| reader.distance_from(last, current_gen)) 333 | } 334 | 335 | fn shift(&mut self, last_index: usize, current_gen: usize, grow_by: usize) { 336 | for reader in &mut self.readers { 337 | let reader = unsafe { &mut *reader.get() } as &mut Reader; 338 | if !reader.active() { 339 | continue; 340 | } 341 | 342 | if reader.needs_shift(last_index, current_gen) { 343 | reader.last_index += grow_by; 344 | } 345 | } 346 | } 347 | } 348 | 349 | unsafe impl Send for ReaderMeta {} 350 | unsafe impl Sync for ReaderMeta {} 351 | 352 | /// Ring buffer, holding data of type `T`. 353 | pub struct RingBuffer { 354 | available: usize, 355 | last_index: CircularIndex, 356 | data: Data, 357 | free_rx: NoSharedAccess>, 358 | free_tx: NoSharedAccess>, 359 | generation: Wrapping, 360 | instance_id: InstanceId, 361 | meta: ReaderMeta, 362 | } 363 | 364 | impl RingBuffer { 365 | /// Create a new ring buffer with the given max size. 366 | pub fn new(size: usize) -> Self { 367 | assert!(size > 1); 368 | 369 | let (free_tx, free_rx) = mpsc::channel(); 370 | let free_tx = NoSharedAccess::new(free_tx); 371 | let free_rx = NoSharedAccess::new(free_rx); 372 | 373 | RingBuffer { 374 | available: size, 375 | last_index: CircularIndex::at_end(size), 376 | data: Data::new(size), 377 | free_rx, 378 | free_tx, 379 | generation: Wrapping(0), 380 | instance_id: InstanceId::new("`ReaderId` was not allocated by this `EventChannel`"), 381 | meta: ReaderMeta::new(), 382 | } 383 | } 384 | 385 | /// Iterates over all elements of `iter` and pushes them to the buffer. 386 | pub fn iter_write(&mut self, iter: I) 387 | where 388 | I: IntoIterator, 389 | I::IntoIter: ExactSizeIterator, 390 | { 391 | let iter = iter.into_iter(); 392 | let len = iter.len(); 393 | if len > 0 { 394 | self.ensure_additional(len); 395 | for element in iter { 396 | unsafe { 397 | self.data.put(self.last_index + 1, element); 398 | } 399 | self.last_index += 1; 400 | } 401 | self.available -= len; 402 | self.generation += Wrapping(1); 403 | } 404 | } 405 | 406 | /// Removes all elements from a `Vec` and pushes them to the ring buffer. 407 | pub fn drain_vec_write(&mut self, data: &mut Vec) { 408 | self.iter_write(data.drain(..)); 409 | } 410 | 411 | // Checks if any reader would observe an additional event. 412 | pub fn would_write(&mut self) -> bool { 413 | self.maintain(); 414 | 415 | self.meta.has_reader() 416 | } 417 | 418 | /// Ensures that `num` elements can be inserted. 419 | /// Does nothing if there's enough space, grows the buffer otherwise. 420 | #[inline(always)] 421 | pub fn ensure_additional(&mut self, num: usize) { 422 | if self.available >= num { 423 | return; 424 | } 425 | 426 | self.ensure_additional_slow(num); 427 | } 428 | 429 | #[inline(never)] 430 | fn ensure_additional_slow(&mut self, num: usize) { 431 | self.maintain(); 432 | let left: usize = match self.meta.nearest_index(self.last_index, self.generation.0) { 433 | None => { 434 | self.available = self.last_index.size; 435 | 436 | return; 437 | } 438 | Some(reader) => { 439 | let left = reader.distance_from(self.last_index, self.generation.0); 440 | 441 | self.available = left; 442 | 443 | if left >= num { 444 | return; 445 | } else { 446 | left 447 | } 448 | } 449 | }; 450 | let grow_by = num - left; 451 | let min_target_size = self.last_index.size + grow_by; 452 | 453 | // Make sure size' = 2^n * size 454 | let mut size = 2 * self.last_index.size; 455 | while size < min_target_size { 456 | size *= 2; 457 | } 458 | 459 | // Calculate adjusted growth 460 | let grow_by = size - self.last_index.size; 461 | 462 | // Insert the additional elements 463 | unsafe { 464 | self.data.grow(self.last_index + 1, grow_by); 465 | } 466 | self.last_index.size = size; 467 | 468 | self.meta 469 | .shift(self.last_index.index, self.generation.0, grow_by); 470 | self.available = grow_by + left 471 | } 472 | 473 | fn maintain(&mut self) { 474 | while let Ok(id) = self.free_rx.get_mut().try_recv() { 475 | self.meta.remove(id); 476 | } 477 | } 478 | 479 | /// Write a single data point into the ring buffer. 480 | pub fn single_write(&mut self, element: T) { 481 | use std::iter::once; 482 | 483 | self.iter_write(once(element)); 484 | } 485 | 486 | /// Create a new reader id for this ring buffer. 487 | pub fn new_reader_id(&mut self) -> ReaderId { 488 | self.maintain(); 489 | let last_index = self.last_index.index; 490 | let generation = self.generation.0; 491 | let id = self.meta.alloc(last_index, generation); 492 | 493 | ReaderId { 494 | id, 495 | marker: PhantomData, 496 | reference: self.instance_id.reference(), 497 | drop_notifier: NoSharedAccess::new(self.free_tx.get_mut().clone()), 498 | } 499 | } 500 | 501 | /// Read data from the ring buffer, starting where the last read ended, and 502 | /// up to where the last element was written. 503 | pub fn read(&self, reader_id: &mut ReaderId) -> StorageIterator { 504 | // Check if `reader_id` was actually created for this buffer. 505 | // This is very important as `reader_id` is a token allowing memory access, 506 | // and without this check a race could be caused by duplicate IDs. 507 | self.instance_id.assert_eq(&reader_id.reference); 508 | 509 | let (last_read_index, gen) = { 510 | let reader = self.meta.reader(reader_id).unwrap_or_else(|| { 511 | panic!( 512 | "ReaderId not registered: {}\n\ 513 | This usually means that this ReaderId \ 514 | was created by a different storage", 515 | reader_id.id 516 | ) 517 | }); 518 | let old = reader.last_index; 519 | reader.last_index = self.last_index.index; 520 | let old_gen = reader.generation; 521 | reader.generation = self.generation.0; 522 | 523 | (old, old_gen) 524 | }; 525 | let mut index = CircularIndex::new(last_read_index, self.last_index.size); 526 | index += 1; 527 | if gen == self.generation.0 { 528 | // It is empty 529 | index = CircularIndex::magic(index.size); 530 | } 531 | 532 | let iter = StorageIterator { 533 | data: &self.data, 534 | end: self.last_index.index, 535 | index, 536 | }; 537 | 538 | iter 539 | } 540 | } 541 | 542 | impl Debug for RingBuffer { 543 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 544 | f.debug_struct("RingBuffer") 545 | .field("available", &self.available) 546 | .field("instance_id", &self.instance_id) 547 | .field("data", &self.data) 548 | .field("last_index", &self.last_index) 549 | .finish() 550 | } 551 | } 552 | 553 | impl Drop for RingBuffer { 554 | fn drop(&mut self) { 555 | unsafe { 556 | self.data.clean(self.last_index + 1); 557 | } 558 | } 559 | } 560 | 561 | /// Iterator over a slice of data in `RingBufferStorage`. 562 | #[derive(Debug)] 563 | pub struct StorageIterator<'a, T: 'a> { 564 | data: &'a Data, 565 | /// Inclusive end 566 | end: usize, 567 | index: CircularIndex, 568 | } 569 | 570 | impl<'a, T> Iterator for StorageIterator<'a, T> { 571 | type Item = &'a T; 572 | 573 | fn next(&mut self) -> Option<&'a T> { 574 | self.index 575 | .step(self.end) 576 | .map(|i| unsafe { self.data.get(i) }) 577 | } 578 | 579 | // Needed to fulfill contract of `ExactSizeIterator` 580 | fn size_hint(&self) -> (usize, Option) { 581 | let len = self.len(); 582 | 583 | (len, Some(len)) 584 | } 585 | } 586 | 587 | impl<'a, T> DoubleEndedIterator for StorageIterator<'a, T> { 588 | fn next_back(&mut self) -> Option { 589 | self.index 590 | .step_back(&mut self.end) 591 | .map(|i| unsafe { self.data.get(i) }) 592 | } 593 | } 594 | 595 | impl<'a, T> ExactSizeIterator for StorageIterator<'a, T> { 596 | fn len(&self) -> usize { 597 | match self.index.is_magic() { 598 | true => 0, 599 | false => (CircularIndex::new(self.end, self.index.size) - self.index.index) + 1, 600 | } 601 | } 602 | } 603 | 604 | #[cfg(test)] 605 | mod tests { 606 | use super::*; 607 | 608 | #[derive(Debug, Clone, PartialEq)] 609 | struct Test { 610 | pub id: u32, 611 | } 612 | 613 | #[derive(Debug, Clone, PartialEq)] 614 | struct Test2 { 615 | pub id: u32, 616 | } 617 | 618 | #[test] 619 | fn test_size() { 620 | let mut buffer = RingBuffer::::new(4); 621 | 622 | buffer.single_write(55); 623 | 624 | let mut reader = buffer.new_reader_id(); 625 | 626 | buffer.iter_write(0..16); 627 | assert_eq!(buffer.read(&mut reader).len(), 16); 628 | 629 | buffer.iter_write(0..6); 630 | assert_eq!(buffer.read(&mut reader).len(), 6); 631 | } 632 | 633 | #[test] 634 | fn test_circular() { 635 | let mut buffer = RingBuffer::::new(4); 636 | 637 | buffer.single_write(55); 638 | 639 | let mut reader = buffer.new_reader_id(); 640 | 641 | buffer.iter_write(0..4); 642 | assert_eq!( 643 | buffer.read(&mut reader).cloned().collect::>(), 644 | vec![0, 1, 2, 3] 645 | ); 646 | } 647 | 648 | #[test] 649 | fn test_empty_write() { 650 | let mut buffer = RingBuffer::::new(10); 651 | buffer.drain_vec_write(&mut vec![]); 652 | assert_eq!(buffer.data.num_initialized(), 0); 653 | } 654 | 655 | #[test] 656 | fn test_too_large_write() { 657 | let mut buffer = RingBuffer::::new(10); 658 | // Events just go off into the void if there's no reader registered. 659 | let _reader = buffer.new_reader_id(); 660 | buffer.drain_vec_write(&mut events(15)); 661 | assert_eq!(buffer.data.num_initialized(), 15); 662 | } 663 | 664 | #[test] 665 | fn test_empty_read() { 666 | let mut buffer = RingBuffer::::new(10); 667 | let mut reader_id = buffer.new_reader_id(); 668 | let data = buffer.read(&mut reader_id); 669 | assert_eq!(Vec::::default(), data.cloned().collect::>()) 670 | } 671 | 672 | #[test] 673 | fn test_empty_read_rev() { 674 | let mut buffer = RingBuffer::::new(10); 675 | let mut reader_id = buffer.new_reader_id(); 676 | let data = buffer.read(&mut reader_id); 677 | assert_eq!( 678 | Vec::::default(), 679 | data.rev().cloned().collect::>() 680 | ) 681 | } 682 | 683 | #[test] 684 | fn test_empty_read_write_before_id() { 685 | let mut buffer = RingBuffer::::new(10); 686 | buffer.drain_vec_write(&mut events(2)); 687 | let mut reader_id = buffer.new_reader_id(); 688 | let data = buffer.read(&mut reader_id); 689 | assert_eq!(Vec::::default(), data.cloned().collect::>()) 690 | } 691 | 692 | #[test] 693 | fn test_empty_read_write_before_id_rev() { 694 | let mut buffer = RingBuffer::::new(10); 695 | buffer.drain_vec_write(&mut events(2)); 696 | let mut reader_id = buffer.new_reader_id(); 697 | let data = buffer.read(&mut reader_id); 698 | assert_eq!( 699 | Vec::::default(), 700 | data.rev().cloned().collect::>() 701 | ) 702 | } 703 | 704 | #[test] 705 | fn test_read() { 706 | let mut buffer = RingBuffer::::new(10); 707 | let mut reader_id = buffer.new_reader_id(); 708 | buffer.drain_vec_write(&mut events(2)); 709 | assert_eq!( 710 | vec![Test { id: 0 }, Test { id: 1 }], 711 | buffer.read(&mut reader_id).cloned().collect::>() 712 | ); 713 | 714 | assert_eq!( 715 | Vec::::new(), 716 | buffer.read(&mut reader_id).cloned().collect::>() 717 | ); 718 | } 719 | 720 | #[test] 721 | fn test_read_rev() { 722 | let mut buffer = RingBuffer::::new(10); 723 | let mut reader_id = buffer.new_reader_id(); 724 | buffer.drain_vec_write(&mut events(2)); 725 | assert_eq!( 726 | vec![Test { id: 1 }, Test { id: 0 }], 727 | buffer 728 | .read(&mut reader_id) 729 | .rev() 730 | .cloned() 731 | .collect::>() 732 | ); 733 | 734 | assert_eq!( 735 | Vec::::new(), 736 | buffer 737 | .read(&mut reader_id) 738 | .rev() 739 | .cloned() 740 | .collect::>() 741 | ); 742 | } 743 | 744 | #[test] 745 | fn test_read_wrap_around_rev() { 746 | let mut buffer = RingBuffer::::new(4); 747 | let mut reader_id = buffer.new_reader_id(); 748 | buffer.drain_vec_write(&mut events(2)); 749 | // buffer should now be [--> 0, 1, -, -] 750 | assert_eq!( 751 | vec![Test { id: 1 }, Test { id: 0 }], 752 | buffer 753 | .read(&mut reader_id) 754 | .rev() 755 | .cloned() 756 | .collect::>() 757 | ); 758 | buffer.drain_vec_write(&mut events(3)); 759 | // buffer should now be [2, 1, --> 0, 1] 760 | assert_eq!( 761 | vec![Test { id: 2 }, Test { id: 1 }, Test { id: 0 }], 762 | buffer 763 | .read(&mut reader_id) 764 | .rev() 765 | .cloned() 766 | .collect::>() 767 | ); 768 | 769 | assert_eq!( 770 | Vec::::new(), 771 | buffer 772 | .read(&mut reader_id) 773 | .rev() 774 | .cloned() 775 | .collect::>() 776 | ); 777 | } 778 | 779 | #[test] 780 | fn test_read_wrap_around_overflow_rev() { 781 | let mut buffer = RingBuffer::::new(5); 782 | let mut reader_id = buffer.new_reader_id(); 783 | buffer.drain_vec_write(&mut events(2)); 784 | assert_eq!( 785 | vec![Test { id: 1 }, Test { id: 0 }], 786 | buffer 787 | .read(&mut reader_id) 788 | .rev() 789 | .cloned() 790 | .collect::>() 791 | ); 792 | buffer.drain_vec_write(&mut events(6)); 793 | // buffer should now be [2, 1, --> 0, 1] 794 | assert_eq!( 795 | events(6).into_iter().rev().collect::>(), 796 | buffer 797 | .read(&mut reader_id) 798 | .rev() 799 | .cloned() 800 | .collect::>() 801 | ); 802 | 803 | assert_eq!( 804 | Vec::::new(), 805 | buffer 806 | .read(&mut reader_id) 807 | .rev() 808 | .cloned() 809 | .collect::>() 810 | ); 811 | } 812 | 813 | #[test] 814 | fn test_write_overflow() { 815 | let mut buffer = RingBuffer::::new(3); 816 | let mut reader_id = buffer.new_reader_id(); 817 | buffer.drain_vec_write(&mut events(4)); 818 | let data = buffer.read(&mut reader_id); 819 | assert_eq!( 820 | vec![ 821 | Test { id: 0 }, 822 | Test { id: 1 }, 823 | Test { id: 2 }, 824 | Test { id: 3 }, 825 | ], 826 | data.cloned().collect::>() 827 | ); 828 | } 829 | 830 | /// If you're getting a compilation error here this test has failed! 831 | #[test] 832 | fn test_send_sync() { 833 | trait SendSync: Send + Sync { 834 | fn is_send_sync() -> bool; 835 | } 836 | 837 | impl SendSync for T 838 | where 839 | T: Send + Sync, 840 | { 841 | fn is_send_sync() -> bool { 842 | true 843 | } 844 | } 845 | 846 | assert!(RingBuffer::::is_send_sync()); 847 | assert!(ReaderId::::is_send_sync()); 848 | } 849 | 850 | #[test] 851 | fn test_reader_reuse() { 852 | let mut buffer = RingBuffer::::new(3); 853 | { 854 | let _reader_id = buffer.new_reader_id(); 855 | } 856 | let _reader_id = buffer.new_reader_id(); 857 | assert_eq!(_reader_id.id, 0); 858 | assert_eq!(buffer.meta.readers.len(), 1); 859 | } 860 | 861 | #[test] 862 | fn test_prevent_excess_growth() { 863 | let mut buffer = RingBuffer::::new(3); 864 | let mut reader_id = buffer.new_reader_id(); 865 | println!("Initial buffer state: {:#?}", buffer); 866 | println!("--- first write ---"); 867 | buffer.drain_vec_write(&mut events(2)); 868 | println!("--- second write ---"); 869 | buffer.drain_vec_write(&mut events(2)); 870 | println!("--- writes complete ---"); 871 | // we wrote 0,1,0,1, if the buffer grew correctly we'll get all of these back. 872 | assert_eq!( 873 | vec![ 874 | Test { id: 0 }, 875 | Test { id: 1 }, 876 | Test { id: 0 }, 877 | Test { id: 1 }, 878 | ], 879 | buffer.read(&mut reader_id).cloned().collect::>() 880 | ); 881 | 882 | buffer.drain_vec_write(&mut events(4)); 883 | // After writing 4 more events the buffer should have no reason to grow beyond 6 884 | // (2 * 3). 885 | assert_eq!(buffer.data.num_initialized(), 6); 886 | assert_eq!( 887 | vec![ 888 | Test { id: 0 }, 889 | Test { id: 1 }, 890 | Test { id: 2 }, 891 | Test { id: 3 }, 892 | ], 893 | buffer.read(&mut reader_id).cloned().collect::>() 894 | ); 895 | } 896 | 897 | #[test] 898 | fn test_write_slice() { 899 | let mut buffer = RingBuffer::::new(10); 900 | let mut reader_id = buffer.new_reader_id(); 901 | buffer.iter_write(events(2)); 902 | let data = buffer.read(&mut reader_id); 903 | assert_eq!( 904 | vec![Test { id: 0 }, Test { id: 1 }], 905 | data.cloned().collect::>() 906 | ); 907 | } 908 | 909 | #[test] 910 | fn iter_write_empty() { 911 | let mut buffer = RingBuffer::::new(10); 912 | let mut reader_id = buffer.new_reader_id(); 913 | buffer.iter_write(Vec::new()); 914 | let mut data = buffer.read(&mut reader_id); 915 | assert_eq!(None, data.next()); 916 | } 917 | 918 | fn events(n: u32) -> Vec { 919 | (0..n).map(|i| Test { id: i }).collect::>() 920 | } 921 | } 922 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{self, Debug, Formatter}, 3 | sync::Arc, 4 | }; 5 | 6 | /// A unique ID that can be used to assert two objects refer to another common 7 | /// object. 8 | /// 9 | /// Example: 10 | /// 11 | /// We have an allocator type which allocates `Foo`s. Some operations could 12 | /// cause bugs if two `Foo`s from different allocators are used; `InstanceId` 13 | /// can assert that both are from the same allocator by comparing their 14 | /// `InstanceId`. 15 | #[derive(Debug)] 16 | pub struct InstanceId { 17 | inner: Arc, 18 | msg: &'static str, 19 | } 20 | 21 | impl InstanceId { 22 | /// Creates a new, unique instance id. 23 | pub fn new(msg: &'static str) -> Self { 24 | InstanceId { 25 | inner: Arc::default(), 26 | msg, 27 | } 28 | } 29 | 30 | /// Returns the unique `usize` representation which is used for all the 31 | /// assertions. 32 | #[inline] 33 | pub fn as_usize(&self) -> usize { 34 | self.inner.as_ref() as *const _ as usize 35 | } 36 | 37 | /// Check if `self` and `reference` are equal, panic otherwise. 38 | #[inline] 39 | pub fn assert_eq(&self, reference: &Reference) { 40 | assert_eq!(self, reference, "{}", self.msg); 41 | } 42 | 43 | /// Creates a "reference" of this instance id. This is essentially like 44 | /// cloning, but `InstanceId`s don't implement `Clone` on purpose since 45 | /// values caring it should never be cloned. 46 | #[inline] 47 | pub fn reference(&self) -> Reference { 48 | Reference { 49 | inner: self.inner.clone(), 50 | } 51 | } 52 | } 53 | 54 | impl PartialEq for InstanceId { 55 | #[inline] 56 | fn eq(&self, reference: &Reference) -> bool { 57 | self.as_usize() == reference.as_usize() 58 | } 59 | } 60 | 61 | /// A reference to an `InstanceId`. 62 | #[derive(Debug, Default)] 63 | pub struct Reference { 64 | inner: Arc, 65 | } 66 | 67 | impl Reference { 68 | /// Returns the `usize` representation of the referenced instance id which 69 | /// is used for all the assertions. 70 | #[inline] 71 | pub fn as_usize(&self) -> usize { 72 | self.inner.as_ref() as *const _ as usize 73 | } 74 | } 75 | 76 | impl Eq for Reference {} 77 | 78 | impl PartialEq for Reference { 79 | fn eq(&self, other: &Reference) -> bool { 80 | self.as_usize() == other.as_usize() 81 | } 82 | } 83 | 84 | /// A struct which implements `Sync` for non-`Sync` types by only allowing 85 | /// mutable access. 86 | /// 87 | /// > Is this safe? 88 | /// 89 | /// Yes. The `Sync` marker trait guarantees that `&T` is safe to send to another 90 | /// thread. The reason this is not implemented for some types is that they have 91 | /// interior mutability, i.e. you can change their state with an immutable 92 | /// borrow. That's often not thread safe. 93 | /// 94 | /// `NoSharedAccess` implements `Sync` for the only purpose of making the 95 | /// containing struct `Sync`; there's no use in using `NoSharedAccess` in 96 | /// isolation since it doesn't add anything new; it simply disallows immutable 97 | /// access. 98 | pub struct NoSharedAccess(T); 99 | 100 | impl NoSharedAccess { 101 | pub fn new(t: T) -> Self { 102 | NoSharedAccess(t) 103 | } 104 | 105 | pub fn get_mut(&mut self) -> &mut T { 106 | &mut self.0 107 | } 108 | } 109 | 110 | impl Debug for NoSharedAccess 111 | where 112 | T: Debug, 113 | { 114 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 115 | self.0.fmt(f) 116 | } 117 | } 118 | 119 | unsafe impl Sync for NoSharedAccess where for<'a> &'a mut NoSharedAccess: Send {} 120 | -------------------------------------------------------------------------------- /tests/bounds.rs: -------------------------------------------------------------------------------- 1 | use shrev::*; 2 | 3 | fn is_sync() {} 4 | fn is_send() {} 5 | 6 | #[test] 7 | fn event_channel_bounds() { 8 | is_send::>(); 9 | is_sync::>(); 10 | } 11 | 12 | #[test] 13 | fn reader_id_bounds() { 14 | is_send::>(); 15 | is_sync::>(); 16 | } 17 | 18 | #[test] 19 | fn event_iterator_bounds() { 20 | is_send::>(); 21 | is_sync::>(); 22 | } 23 | -------------------------------------------------------------------------------- /tests/would_write.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use shrev::*; 4 | 5 | type EventChannel = shrev::EventChannel; 6 | 7 | #[test] 8 | fn naive_negative() { 9 | let mut channel = EventChannel::new(); 10 | 11 | assert!(!channel.would_write()); 12 | } 13 | 14 | #[test] 15 | fn naive_positive() { 16 | let mut channel = EventChannel::new(); 17 | let r = channel.register_reader(); 18 | 19 | assert!(channel.would_write()); 20 | } 21 | 22 | #[test] 23 | fn all_dropped() { 24 | let mut channel = EventChannel::new(); 25 | 26 | { 27 | let r1 = channel.register_reader(); 28 | let r2 = channel.register_reader(); 29 | let r3 = channel.register_reader(); 30 | } 31 | 32 | assert!(!channel.would_write()); 33 | } 34 | 35 | #[test] 36 | fn recreated() { 37 | let mut channel = EventChannel::new(); 38 | 39 | { 40 | let r1 = channel.register_reader(); 41 | let r2 = channel.register_reader(); 42 | let r3 = channel.register_reader(); 43 | } 44 | let r1 = channel.register_reader(); 45 | let r2 = channel.register_reader(); 46 | 47 | assert!(channel.would_write()); 48 | } 49 | --------------------------------------------------------------------------------