├── .gitignore ├── Cargo.toml ├── README.md ├── examples └── mio.rs └── src ├── fdbuf.rs ├── lib.rs └── ringbuf.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "fdringbuf" 4 | version = "0.1.0" 5 | authors = ["David Henningsson "] 6 | 7 | description = "Deprecated - use the shmem-ipc crate instead" 8 | repository = "https://github.com/diwic/fdringbuf-rs" 9 | keywords = ["ringbuffer", "fd", "IPC", "filedescriptor", "eventfd"] 10 | license = "Apache-2.0/MIT" 11 | maintenance = { status = "deprecated" } 12 | 13 | [dependencies] 14 | libc = "*" 15 | log = "*" 16 | 17 | [dev-dependencies.nix] 18 | nix = "*" 19 | # Nix on crates.io is currently broken w r t eventfd 20 | git = "https://github.com/carllerche/nix-rust" 21 | 22 | [dev-dependencies.mio] 23 | mio = "*" 24 | 25 | [features] 26 | default = ["nix/eventfd"] 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Deprecation notice ## 2 | 3 | This crate has been superseded by the shmem-ipc crate, see 4 | https://github.com/diwic/shmem-ipc 5 | 6 | It contains essentially the same ringbuffer implementation and contains 7 | more building blocks for setting the ringbuffer up between untrusted processes. 8 | 9 | ### Ringbuffer with fd signalling - fast IPC without memory copies! 10 | 11 | This is an attempt to make the most efficient, fast, and flexible 12 | [IPC](http://en.wikipedia.org/wiki/Inter-process_communication) on the planet! 13 | One use case could be streaming audio/video between applications, where 14 | you need high performance and low latency. 15 | 16 | It's efficient: 17 | * The code gives you direct access to the ringbuffer memory (no memory copies). 18 | * No busy waiting (well, unless you want to). 19 | 20 | It's fast: 21 | * No syscalls unless necessary - as long as the buffer is not completely 22 | full or completely empty, there's no need to sleep or signal a wakeup. 23 | * Just two atomic operations per read and per write. 24 | 25 | It's flexible: 26 | * By using file descriptors for signalling, you can wait on several fds at the same time. 27 | I e, you can have the same thread waiting for more than one buffer, if you wish. 28 | * You can decide what type of fd you want - eventfd, pipes, or something else. 29 | (If you don't know where to start, use eventfd for best performance.) 30 | 31 | It's usable: 32 | * No allocations - suitable for real-time usage. 33 | * While primarily designed for Linux, there's no mandatory dependency that 34 | makes it Linux only (except for some benchmarks that only run under Linux). 35 | 36 | Limitations: 37 | * The ringbuffer capacity cannot be changed after creation, and it works only on `Copy` types. 38 | * The ringbuffer is single producer and single consumer, but the producer and 39 | the consumer can be different threads, and even different processes (if the 40 | ringbuffer points to shared memory). 41 | 42 | 43 | Other options 44 | ------------- 45 | 46 | Just sending a few bytes in every message, so the number of memory copies don't matter much? 47 | Then a `std::io::pipe` might be better. 48 | 49 | Want something more flexible, with a message bus daemon, signals, RPC, etc, and can live with 50 | some extra overhead? Try the [D-Bus](https://github.com/diwic/dbus-rs). 51 | 52 | Usage 53 | ----- 54 | 55 | First, you need to allocate memory for your buffer in the way you prefer. 56 | Use `ringbuf::channel_size` to figure out how much memory the buffer needs. 57 | 58 | Second, decide if you want a `ringbuf::channel` or a `fdbuf::channel` - you probably 59 | want the `fdbuf`, but in case you want to implement the signalling yourself (or just 60 | waste power busy waiting), you can use the `ringbuf` instead. 61 | 62 | The sender side can call the `send` method which takes a closure as argument. You will get 63 | a mutable slice to fill with your data. Note that since this is a ringbuffer that avoids 64 | memory copies, the closure might need to be called twice to fill it completely. 65 | The same applies to the receiver side, which calls the `recv` method. Your closure needs to 66 | return how many items the closure has read (for `recv`) or written (for `send`). 67 | 68 | If the buffer is empty (and only then), the receiver side will be woken up when data can be read from the 69 | buffer. Similar, if the buffer is full (and only then), the sender side will be woken up when more data 70 | can be written. When woken up (and only then), call the `wait_clear` method to reset the wakeup. 71 | See the fdbuf benchmark for an example of how to receive, send and wait accordingly. 72 | 73 | Finally, a hint: [mio](https://github.com/carllerche/mio)'s event loop might be useful here, since it is 74 | based on fd waiting, too. 75 | 76 | License 77 | ------- 78 | 79 | Apache-2.0/MIT 80 | 81 | -------------------------------------------------------------------------------- /examples/mio.rs: -------------------------------------------------------------------------------- 1 | extern crate nix; 2 | extern crate mio; 3 | extern crate fdringbuf; 4 | use fdringbuf::fdbuf; 5 | 6 | #[derive(Copy, Clone, Debug)] 7 | enum Protocol { 8 | Error, 9 | Hello(i32), 10 | Data(u8), 11 | Goodbye, 12 | } 13 | 14 | 15 | fn send_data(i: i32, mut s: fdbuf::Sender) { 16 | use std::thread; 17 | 18 | s.send_foreach(1, |_| Protocol::Hello(i)).unwrap(); 19 | thread::sleep_ms(100); 20 | s.send_foreach(6, |j| Protocol::Data(j as u8)).unwrap(); 21 | 22 | thread::sleep_ms(100); 23 | s.send_foreach(1, |_| Protocol::Goodbye).unwrap(); 24 | } 25 | 26 | struct MyReceiver(Vec>>, Vec, i32); 27 | 28 | impl mio::Handler for MyReceiver { 29 | type Timeout = (); 30 | type Message = (); 31 | 32 | fn ready(&mut self, eloop: &mut mio::EventLoop, token: mio::Token, _: mio::EventSet) { 33 | let mut goodbye = false; 34 | { 35 | let r = &mut self.0[token.0]; 36 | r.recv(|d| { 37 | for dd in d { 38 | println!("Receiving {:?} from thread {}", dd, token.0); 39 | if let &Protocol::Goodbye = dd { goodbye = true; } 40 | } 41 | (d.len(), false) 42 | }).unwrap(); 43 | } 44 | if goodbye { 45 | self.2 += 1; 46 | if self.2 == self.0.len() as i32 { eloop.shutdown() } 47 | } 48 | } 49 | } 50 | 51 | fn main() { 52 | let mut eloop = mio::EventLoop::new().unwrap(); 53 | 54 | let mut rvec = MyReceiver(vec![], vec![], 0); 55 | for i in 0..8 { 56 | use self::nix::sys::eventfd::*; 57 | let (efd1, efd2) = (eventfd(0, EFD_CLOEXEC).unwrap(), eventfd(0, EFD_CLOEXEC).unwrap()); 58 | let pipe1 = fdbuf::Pipe { reader: efd1, writer: efd1 }; 59 | let pipe2 = fdbuf::Pipe { reader: efd2, writer: efd2 }; 60 | let buf = vec![0u8; fdringbuf::channel_bufsize::(64)]; 61 | 62 | let (s, r) = fdbuf::channel(buf, pipe1, pipe2); 63 | std::thread::spawn(move || { send_data(i, s) }); 64 | rvec.0.push(r); 65 | for fd in [efd1, efd2].into_iter() { 66 | let mioio = mio::Io::from_raw_fd(*fd); 67 | eloop.register(&mioio, mio::Token(i as usize)).unwrap(); 68 | rvec.1.push(mioio); 69 | } 70 | } 71 | 72 | eloop.run(&mut rvec).unwrap(); 73 | } 74 | -------------------------------------------------------------------------------- /src/fdbuf.rs: -------------------------------------------------------------------------------- 1 | //! Ringbuffer with signalling via fd:s. 2 | //! You can use it with std::os::Pipe, but do try nix-rust's eventfds 3 | //! for slightly better performance! 4 | //! You will typically integrate with mio so you can wait for many fds at once, 5 | //! hence there are no functions that actually wait, just functions that give out 6 | //! the Fd to wait for. 7 | 8 | use std::os::unix::io::RawFd; 9 | use std::io; 10 | use std::ops::DerefMut; 11 | 12 | 13 | pub struct Sender { 14 | inner: ::ringbuf::Sender, 15 | signal_fd: RawFd, 16 | wait_fd: RawFd, 17 | } 18 | 19 | pub struct Receiver { 20 | inner: ::ringbuf::Receiver, 21 | signal_fd: RawFd, 22 | wait_fd: RawFd, 23 | } 24 | 25 | /*unsafe impl<'a, T: Copy> Send for Sender<'a, T> {} 26 | unsafe impl<'a, T: Copy> Send for Receiver<'a, T> {} 27 | */ 28 | 29 | fn write_fd(fd: RawFd) -> io::Result<()> { 30 | let e = unsafe { ::libc::write(fd, &1u64 as *const _ as *const ::libc::c_void, ::std::mem::size_of::() as ::libc::size_t) }; 31 | trace!("write {} to fd {}", e, fd); 32 | if e == -1 { return Err(io::Error::last_os_error()) } 33 | assert!(e > 0); 34 | Ok(()) 35 | } 36 | 37 | fn flush_fd(fd: RawFd) -> io::Result<()> { 38 | type Arr = [u64; 32]; 39 | let b: Arr = unsafe { ::std::mem::uninitialized() }; 40 | let e = unsafe { ::libc::read(fd, b.as_ptr() as *mut ::libc::c_void, ::std::mem::size_of::() as ::libc::size_t) }; 41 | trace!("read {} from fd {}", e, fd); 42 | if e == -1 { return Err(io::Error::last_os_error()) } 43 | assert!(e > 0); 44 | Ok(()) 45 | } 46 | 47 | impl Sender { 48 | 49 | /// Returns number of items that can be written to the buffer (until it's full). 50 | /// f: This closure returns a tuple of (items written, please call me again). 51 | /// The pointer sent to the closure is an "out" parameter and contains 52 | /// garbage data on entering the closure. The usize parameter is the number of items that 53 | /// can be filled. 54 | pub fn send (usize, bool)>(&mut self, mut f: F) -> io::Result { 55 | let mut r = 0; 56 | let mut last; 57 | let mut was_empty = false; 58 | loop { 59 | let mut repeat = false; 60 | let (ll, wempty) = self.inner.send(|buf, s| { 61 | let (rr, rep) = f(buf, s); 62 | repeat = rep; 63 | r += rr; 64 | rr 65 | }); 66 | last = ll; 67 | was_empty |= wempty; 68 | if !repeat { break; } 69 | } 70 | if r > 0 && was_empty { try!(write_fd(self.signal_fd)) }; 71 | Ok(last) 72 | } 73 | 74 | /// "Safe" version of send. Will call your closure up to "count" times 75 | /// and depend on RVO to avoid memory copies. 76 | /// Will not block in case the buffer gets full. 77 | /// 78 | /// Returns number of items that can be written to the buffer (0 means the buffer is full). 79 | pub fn send_foreach T>(&mut self, count: usize, mut f: F) -> io::Result { 80 | let mut w = 0; 81 | let (mut free_items, mut was_empty) = self.inner.send_foreach(count, |_| { w += 1; f(w - 1) }); 82 | if free_items > 0 && w < count { 83 | let (freeitems, wempty) = self.inner.send_foreach(count - w, |_| { w += 1; f(w - 1) }); 84 | was_empty |= wempty; 85 | free_items = freeitems; 86 | } 87 | 88 | if count > 0 && was_empty { try!(write_fd(self.signal_fd)) }; 89 | Ok(free_items) 90 | } 91 | 92 | 93 | /// Returns fd to wait for, and number of items that can be written 94 | /// You should only wait for this fd if the number is zero. 95 | /// The Fd will not change during the lifetime of the sender. 96 | pub fn wait_status(&self) -> (RawFd, usize) { 97 | (self.wait_fd, self.inner.write_count()) 98 | } 99 | 100 | /// Call this after woken up by the waitfd, or you'll just wake up again. 101 | /// Note: Might deadlock if you call it when not woken up by the waitfd. 102 | pub fn wait_clear(&mut self) -> io::Result<()> { 103 | flush_fd(self.wait_fd) 104 | } 105 | 106 | } 107 | 108 | impl Receiver { 109 | 110 | /// Returns remaining items that can be read. 111 | /// The second item is true if the buffer was full but read from 112 | /// (this can be used to signal remote side that more data can be written). 113 | /// f: This closure returns a tuple of (items written, please call me again). 114 | pub fn recv (usize, bool)>(&mut self, mut f: F) -> io::Result { 115 | let mut r = 0; 116 | let mut last; 117 | let mut was_full = false; 118 | loop { 119 | let mut repeat = false; 120 | let (ll, wfull) = self.inner.recv(|buf| { 121 | let (rr, rep) = f(buf); 122 | repeat = rep; 123 | r += rr; 124 | rr 125 | }); 126 | last = ll; 127 | was_full |= wfull; 128 | if !repeat { break; } 129 | } 130 | 131 | if r > 0 && was_full { try!(write_fd(self.signal_fd)) }; 132 | Ok(last) 133 | } 134 | 135 | /// Returns fd to wait for, and number of items that can be read 136 | /// You should only wait for this fd if the number is zero. 137 | /// The Fd will not change during the lifetime of the sender. 138 | pub fn wait_status(&self) -> (RawFd, usize) { 139 | (self.wait_fd, self.inner.read_count()) 140 | } 141 | 142 | /// Call this after woken up by the waitfd, or you'll just wake up again. 143 | /// Note: Might deadlock if you call it when not woken up by the waitfd. 144 | pub fn wait_clear(&mut self) -> io::Result<()> { 145 | flush_fd(self.wait_fd) 146 | } 147 | 148 | } 149 | 150 | #[derive(Debug, Copy, Clone)] 151 | pub struct Pipe { 152 | pub reader: RawFd, 153 | pub writer: RawFd, 154 | } 155 | 156 | /// Creates a channel with fd signalling. 157 | /// Does not take ownership of the fds - they will not be closed 158 | /// when Sender and Receiver goes out of scope. 159 | pub fn channel>(mem: U, empty: Pipe, full: Pipe) -> 160 | (Sender, Receiver) { 161 | let (s, r) = ::ringbuf::channel(mem); 162 | (Sender { inner: s, signal_fd: empty.writer, wait_fd: full.reader }, 163 | Receiver { inner: r, signal_fd: full.writer, wait_fd: empty.reader }) 164 | } 165 | 166 | #[cfg(test)] 167 | mod tests { 168 | extern crate test; 169 | extern crate nix; 170 | use self::nix::sys::epoll::*; 171 | use std::os::unix::io::RawFd; 172 | use super::Pipe; 173 | 174 | fn make_pipe() -> Pipe { 175 | let a = Pipe { reader: -1, writer: -1 }; 176 | assert_eq!(0, unsafe { ::libc::pipe(::std::mem::transmute(&a)) }); 177 | assert!(a.reader > 2); 178 | assert!(a.writer > 2); 179 | a 180 | } 181 | 182 | fn make_epoll(fd: RawFd) -> RawFd { 183 | let sleep = epoll_create().unwrap(); 184 | let event = EpollEvent { data: 0, events: EPOLLIN }; 185 | epoll_ctl(sleep, EpollOp::EpollCtlAdd, fd, &event).unwrap(); 186 | sleep 187 | } 188 | 189 | fn wait_epoll(fd: RawFd) { 190 | let mut events = [EpollEvent { data: 0, events: EPOLLIN }]; 191 | assert_eq!(1, epoll_wait(fd, &mut events, 5000).unwrap()); 192 | } 193 | 194 | #[bench] 195 | fn pipe_send400_recv300_bufsize1024_u32(b: &mut test::Bencher) { 196 | let (pipe1, pipe2) = (make_pipe(), make_pipe()); 197 | run400_300_1024_bench(b, pipe1, pipe2); 198 | unsafe { 199 | ::libc::close(pipe1.reader); 200 | ::libc::close(pipe1.writer); 201 | ::libc::close(pipe2.reader); 202 | ::libc::close(pipe2.writer); 203 | } 204 | } 205 | 206 | #[bench] 207 | fn eventfd_send400_recv300_bufsize1024_u32(b: &mut test::Bencher) { 208 | use self::nix::sys::eventfd::*; 209 | let (efd1, efd2) = (eventfd(0, EFD_CLOEXEC).unwrap(), eventfd(0, EFD_CLOEXEC).unwrap()); 210 | let pipe1 = Pipe { reader: efd1, writer: efd1 }; 211 | let pipe2 = Pipe { reader: efd2, writer: efd2 }; 212 | run400_300_1024_bench(b, pipe1, pipe2); 213 | unsafe { 214 | ::libc::close(efd1); 215 | ::libc::close(efd2); 216 | } 217 | } 218 | 219 | fn run400_300_1024_bench(b: &mut test::Bencher, pipe1: Pipe, pipe2: Pipe) { 220 | let q = vec![0u8; ::ringbuf::channel_bufsize::(1024)]; 221 | let (mut s, mut r) = super::channel::(q, pipe1, pipe2); 222 | 223 | let guard = ::std::thread::spawn(move || { 224 | let mut sum = 0; 225 | let mut quit = false; 226 | let waitfd = make_epoll(r.wait_status().0); 227 | loop { 228 | let can_recv = r.recv(|d| { 229 | let mut cc = 0; 230 | for z in d.iter().take(300) { cc += 1; if *z == -1 { quit = true; return (cc, false); } sum += *z as u64 }; 231 | (cc, true) 232 | }).unwrap(); 233 | if quit { break; } 234 | if can_recv == 0 { 235 | debug!("Recv wait"); 236 | wait_epoll(waitfd); 237 | r.wait_clear().unwrap(); 238 | }; 239 | }; 240 | unsafe { ::libc::close(waitfd) }; 241 | sum 242 | }); 243 | 244 | let mut total1 = 0; 245 | let waitfd = make_epoll(s.wait_status().0); 246 | b.iter(|| { 247 | let mut c = 0; 248 | let can_send = s.send_foreach(400, |_| { c += 1; total1 += c as u64; c }).unwrap(); 249 | if can_send == 0 { 250 | debug!("Send wait"); 251 | wait_epoll(waitfd); 252 | s.wait_clear().unwrap(); 253 | }; 254 | }); 255 | s.send_foreach(1, |_| -1).unwrap(); 256 | unsafe { ::libc::close(waitfd) }; 257 | let total2 = guard.join().unwrap(); 258 | assert_eq!(total1, total2); 259 | } 260 | 261 | } 262 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(test, feature(test))] 2 | 3 | extern crate libc; 4 | 5 | #[macro_use] 6 | extern crate log; 7 | 8 | pub mod ringbuf; 9 | 10 | pub mod fdbuf; 11 | 12 | /// Use this utility function to figure out how big u8 buffer you need to allocate for a ringbuf or fdbuf. 13 | pub fn channel_bufsize(capacity: usize) -> usize { ringbuf::channel_bufsize::(capacity) } 14 | -------------------------------------------------------------------------------- /src/ringbuf.rs: -------------------------------------------------------------------------------- 1 | //! This is a fast ringbuffer that tries to avoid memory copies as much as possible. 2 | //! There can be one producer and one consumer, but they can be in different threads 3 | //! i e, they are Send but not Clone. 4 | 5 | use std::sync::Arc; 6 | use std::sync::atomic::{AtomicUsize, Ordering}; 7 | use std::mem::size_of; 8 | use std::ops::DerefMut; 9 | 10 | #[allow(raw_pointer_derive)] 11 | #[derive(Copy, Clone)] 12 | struct Buf { 13 | data: *mut T, 14 | count_ptr: *const AtomicUsize, 15 | length: usize, 16 | } 17 | 18 | unsafe impl Send for Buf {} 19 | 20 | pub struct Sender { 21 | buf: Buf, 22 | index: usize, 23 | _owner: Arc, 24 | } 25 | 26 | pub struct Receiver { 27 | buf: Buf, 28 | index: usize, 29 | _owner: Arc, 30 | } 31 | 32 | /// Use this utility function to figure out how big buffer you need to allocate. 33 | pub fn channel_bufsize(capacity: usize) -> usize { capacity * size_of::() + size_of::() } 34 | 35 | 36 | /// Create a channel (without signaling) 37 | /// Non-allocating - expects a pre-allocated buffer 38 | pub fn channel>(buffer: U) -> (Sender, Receiver) { 39 | 40 | let mut mem = buffer; 41 | let b = { 42 | let slice: &mut [u8] = &mut mem; 43 | assert!(slice.len() >= size_of::() + size_of::(), "Buffer too small"); 44 | 45 | Buf { 46 | count_ptr: slice.as_ptr() as *const AtomicUsize, 47 | data: unsafe { slice.as_ptr().offset(size_of::() as isize) } as *mut T, 48 | length: (slice.len() - size_of::()) / size_of::(), 49 | } 50 | }; 51 | b.count().store(0, Ordering::Relaxed); 52 | 53 | let o = Arc::new(mem); 54 | let s = Sender { buf: b, index: 0, _owner: o.clone() }; 55 | let r = Receiver { buf: b, index: 0, _owner: o }; 56 | (s, r) 57 | } 58 | 59 | impl Buf { 60 | #[inline] 61 | fn count(&self) -> &AtomicUsize { unsafe { &*self.count_ptr }} 62 | 63 | #[inline] 64 | fn slice(&mut self) -> &mut [T] { 65 | unsafe { ::std::slice::from_raw_parts_mut(self.data, self.length) } 66 | } 67 | } 68 | 69 | impl Sender { 70 | 71 | /// Lowest level "send" function 72 | /// 73 | /// Returns (free items, was empty) 74 | /// The first item is number of items that can be written to the buffer (until it's full). 75 | /// The second item is true if the buffer was empty but was written to 76 | /// (this can be used to signal remote side that more data can be read). 77 | /// f: This closure returns number of items written to the buffer. 78 | /// 79 | /// The pointer sent to the closure is an "out" parameter and contains 80 | /// garbage data on entering the closure. (This cannot safely be a &mut [T] because 81 | /// the closure might then read from uninitialized memory, even though it shouldn't) 82 | /// 83 | /// Since this is a ringbuffer, there might be more items to write even if you 84 | /// completely fill up during the closure. 85 | pub fn send usize>(&mut self, f: F) -> (usize, bool) { 86 | use std::cmp; 87 | 88 | let cb = self.buf.count().load(Ordering::SeqCst); 89 | let l = self.buf.length; 90 | 91 | let n = { 92 | let data = self.buf.slice(); 93 | let end = self.index + cmp::min(l - self.index, l - cb); 94 | let slice = &mut data[self.index .. end]; 95 | 96 | let n = if slice.len() == 0 { 0 } else { f(slice.as_mut_ptr(), slice.len()) }; 97 | 98 | assert!(n <= slice.len()); 99 | n 100 | }; 101 | 102 | let c = self.buf.count().fetch_add(n, Ordering::SeqCst); 103 | self.index = (self.index + n) % l; 104 | trace!("Send: cb = {}, c = {}, l = {}, n = {}", cb, c, l, n); 105 | (l - c - n, c == 0 && n > 0) 106 | } 107 | 108 | /// "Safe" version of send. Will call your closure up to "count" times 109 | /// and depend on RVO to avoid memory copies. 110 | /// 111 | /// Returns (free items, was empty) like send does 112 | pub fn send_foreach T>(&mut self, count: usize, mut f: F) -> (usize, bool) { 113 | use std::ptr; 114 | 115 | let mut i = 0; 116 | self.send(|p, c| { 117 | while i < c && i < count { 118 | unsafe { ptr::write(p.offset(i as isize), f(i)) }; 119 | i += 1; 120 | }; 121 | i 122 | }) 123 | } 124 | 125 | /// Returns number of items that can be written 126 | pub fn write_count(&self) -> usize { self.buf.length - self.buf.count().load(Ordering::Relaxed) } 127 | } 128 | 129 | impl Receiver { 130 | /// Returns (remaining items, was full) 131 | /// The second item is true if the buffer was full but was read from 132 | /// (this can be used to signal remote side that more data can be written). 133 | /// f: This closure returns number of items that can be dropped from buffer. 134 | /// Since this is a ringbuffer, there might be more items to read even if you 135 | /// read it all during the closure. 136 | pub fn recv usize>(&mut self, f: F) -> (usize, bool) { 137 | use std::cmp; 138 | 139 | let cb = self.buf.count().load(Ordering::SeqCst); 140 | let l = self.buf.length; 141 | let n = { 142 | let data: &[T] = self.buf.slice(); 143 | let slice = &data[self.index .. cmp::min(self.index + cb, l)]; 144 | 145 | let n = if slice.len() == 0 { 0 } else { f(slice) }; 146 | assert!(n <= slice.len()); 147 | n 148 | }; 149 | 150 | let c = self.buf.count().fetch_sub(n, Ordering::SeqCst); 151 | self.index = (self.index + n) % l; 152 | trace!("Recv: cb = {}, c = {}, l = {}, n = {}", cb, c, l, n); 153 | return (c - n, c >= l && n > 0) 154 | } 155 | 156 | /// Returns number of items that can be read 157 | pub fn read_count(&self) -> usize { self.buf.count().load(Ordering::Relaxed) } 158 | } 159 | 160 | #[cfg(test)] 161 | mod tests { 162 | extern crate test; 163 | 164 | #[test] 165 | fn owner() { 166 | let mut v = vec![20; 30]; 167 | let v2: &mut[u8] = &mut *v; 168 | let (_, _) = super::channel::(v2); 169 | } 170 | 171 | #[test] 172 | fn simple_test() { 173 | let (mut s, mut r) = super::channel(vec![10; 20]); 174 | // is it empty? 175 | r.recv(|_| panic!()); 176 | s.send(|d, _| { unsafe { *d = 5u16 }; 1 }); 177 | r.recv(|d| { assert_eq!(d.len(), 1); 178 | assert_eq!(d[0], 5); 0 }); 179 | r.recv(|d| { assert_eq!(d.len(), 1); 180 | assert_eq!(d[0], 5); 1 }); 181 | r.recv(|_| panic!()); 182 | 183 | let mut i = 6; 184 | s.send_foreach(2, |_| { i += 1; i } ); 185 | r.recv(|d| { assert_eq!(d.len(), 2); 186 | assert_eq!(d[0], 7); 187 | assert_eq!(d[1], 8); 188 | 2 189 | }); 190 | } 191 | 192 | #[test] 193 | fn full_buf_test() { 194 | let q: Vec = vec![66; super::channel_bufsize::(3)]; 195 | let (mut s, mut r) = super::channel(q); 196 | s.send(|dd, l| { assert_eq!(l, 3); 197 | let d = unsafe { ::std::slice::from_raw_parts_mut(dd, l) }; 198 | d[0] = 5u16; d[1] = 8; d[2] = 9; 199 | 2 200 | }); 201 | let mut called = false; 202 | s.send_foreach(2, |i| { 203 | assert_eq!(called, false); 204 | assert_eq!(i, 0); 205 | called = true; 206 | 10 207 | }); 208 | s.send(|_, _| panic!()); 209 | r.recv(|d| { assert_eq!(d.len(), 3); 0 }); 210 | s.send(|_, _| panic!()); 211 | r.recv(|d| { assert_eq!(d.len(), 3); 212 | assert_eq!(d[0], 5); 213 | assert_eq!(d[1], 8); 214 | assert_eq!(d[2], 10); 1 }); 215 | s.send(|d, l| { assert_eq!(l, 1); unsafe { *d = 1 }; 1 }); 216 | s.send(|_, _| panic!()); 217 | r.recv(|d| { assert_eq!(d.len(), 2); 218 | assert_eq!(d[0], 8); 219 | assert_eq!(d[1], 10); 2 }); 220 | r.recv(|d| { assert_eq!(d.len(), 1); 221 | assert_eq!(d[0], 1); 1 222 | }); 223 | } 224 | 225 | #[bench] 226 | fn buf_send400_recv300_bufsize1024_u32(b: &mut test::Bencher) { 227 | let q = vec![0u8; super::channel_bufsize::(1024)]; 228 | let (mut s, mut r) = super::channel::(q); 229 | let (mut total1, mut total2) = (0u64, 0u64); 230 | b.iter(|| { 231 | s.send(|dd, l| { 232 | let d = unsafe { ::std::slice::from_raw_parts_mut(dd, l) }; 233 | let mut c = 0; 234 | for z in d.iter_mut().take(400) { *z = c; total1 += c as u64; c += 1; }; 235 | c as usize 236 | }); 237 | r.recv(|d| { 238 | for z in d.iter().take(300) { total2 += *z as u64 }; 239 | ::std::cmp::min(300, d.len()) 240 | }); 241 | }); 242 | 243 | r.recv(|d| { for z in d.iter() { total2 += *z as u64 }; d.len() }); 244 | r.recv(|d| { for z in d.iter() { total2 += *z as u64 }; d.len() }); 245 | 246 | assert_eq!(total1, total2); 247 | } 248 | 249 | #[bench] 250 | fn buf_send_foreach400_recv300_bufsize1024_u32(b: &mut test::Bencher) { 251 | let q = vec![0u8; super::channel_bufsize::(1024)]; 252 | let (mut s, mut r) = super::channel::(q); 253 | let (mut total1, mut total2) = (0u64, 0u64); 254 | b.iter(|| { 255 | let mut c = 0; 256 | s.send_foreach(400, |_| { c += 1; total1 += c as u64; c }); 257 | r.recv(|d| { 258 | for z in d.iter().take(300) { total2 += *z as u64 }; 259 | ::std::cmp::min(300, d.len()) 260 | }); 261 | }); 262 | 263 | r.recv(|d| { for z in d.iter() { total2 += *z as u64 }; d.len() }); 264 | r.recv(|d| { for z in d.iter() { total2 += *z as u64 }; d.len() }); 265 | 266 | assert_eq!(total1, total2); 267 | } 268 | 269 | } 270 | 271 | --------------------------------------------------------------------------------