├── .gitignore ├── Cargo.toml ├── README.md ├── benches └── sharedring1.rs ├── examples ├── client.rs └── server.rs ├── lines.svg ├── rustfmt.toml └── src ├── lib.rs ├── mem.rs ├── ringbuf.rs └── sharedring.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shmem-ipc" 3 | version = "0.3.0" 4 | authors = ["David Henningsson "] 5 | edition = "2018" 6 | readme = "README.md" 7 | license = "Apache-2.0/MIT" 8 | keywords = ["IPC", "memfd", "shmem", "memory"] 9 | documentation = "https://docs.rs/shmem-ipc" 10 | repository = "https://github.com/diwic/shmem-ipc/" 11 | homepage = "https://github.com/diwic/shmem-ipc/" 12 | categories = ["memory-management", "network-programming"] 13 | description = "Untrusted IPC with maximum performance and minimum latency on Linux" 14 | 15 | [dependencies] 16 | memfd = "0.4.0" 17 | memmap2 = "0.5.8" 18 | # Zerocopy depends on proc-macro2 and syn, so having "thiserror" as a 19 | # dependency does not add too much compilation time. 20 | thiserror = "1" 21 | zerocopy = "0.3" 22 | libc = "0.2.85" 23 | byteorder = "1.4" 24 | 25 | [dev-dependencies] 26 | dbus = "0.9.2" 27 | dbus-crossroads = "0.3" 28 | criterion = { version = "0.3", features = ["html_reports"] } 29 | 30 | [[bench]] 31 | name = "sharedring1" 32 | harness = false 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![crates.io](https://img.shields.io/crates/v/shmem-ipc.svg)](https://crates.io/crates/shmem-ipc) 2 | [![API documentation](https://docs.rs/shmem-ipc/badge.svg)](https://docs.rs/shmem-ipc) 3 | [![license](https://img.shields.io/crates/l/shmem-ipc.svg)](https://crates.io/crates/shmem-ipc) 4 | 5 | Untrusted IPC with maximum performance and minimum latency. On Rust, on Linux. 6 | 7 | When is this Rust crate useful? 8 | -------------------------- 9 | 10 | * Performance or latency is crucial, and 11 | * you run Linux. 12 | 13 | A typical use case could be audio/video streaming. 14 | 15 | Don't need maximum performance and minimum latency, and want a higher level protocol 16 | with serialization and lots of bells and whistles built-in? Try [D-Bus](https://docs.rs/dbus/). 17 | 18 | Also, a [unix socket](https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html) 19 | is easier to set up and is not that much slower (see benchmark below). 20 | 21 | As for Linux, this crate uses memfd sealing to ensure safety between untrusted processes, 22 | and ringbuffer signaling is done using eventfd for best performance. 23 | These two features are Linux only. 24 | 25 | Getting started 26 | --------------- 27 | 28 | You probably want to start in the `sharedring` module, which sets up a ringbuffer 29 | between untrusted processes (it's a wait-free/lock-free, bounded, SPSC queue). 30 | Another useful function is `mem::write_once` for a scenario where 31 | you write data once and make it available for reading afterwards. The `mem` and `ringbuf` 32 | modules contain building blocks that might be useful in other use cases. 33 | 34 | The downside of using memfd based shared memory is that you need to set it up 35 | by transferring file descriptors, using some other way of communication. 36 | Using [D-Bus](https://docs.rs/dbus/) would be the standard way of doing that - 37 | it's also possible using [unix sockets](https://crates.io/crates/uds). 38 | 39 | There is also a client/server example in the `examples` directory that can help you get started. 40 | Enjoy! 41 | 42 | Benchmark 43 | --------- 44 | 45 | [![Sharedring vs unix sockets vs D-Bus](https://github.com/diwic/shmem-ipc/blob/master/lines.svg)](https://github.com/diwic/shmem-ipc/blob/master/lines.svg) 46 | Notice the log scale: for a 64K packet, sharedring is a three times faster than unix sockets, 47 | and 60 times faster than D-Bus. (D-Bus is a higher level protocol, so that comparison 48 | is to some degree comparing apples and oranges.) 49 | 50 | License 51 | ------- 52 | 53 | The code is Apache 2.0 / MIT dual licensed. Any code submitted in Pull Requests, discussions or 54 | issues is assumed to have this license, unless explicitly stated otherwise. 55 | -------------------------------------------------------------------------------- /benches/sharedring1.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use criterion::*; 3 | use shmem_ipc::sharedring::{Sender, Receiver}; 4 | 5 | fn setup_one(chunks: usize) -> (Sender, Receiver) { 6 | let s: Sender = Sender::new(chunks).unwrap(); 7 | let memfd = s.memfd().as_file().try_clone().unwrap(); 8 | let e = s.empty_signal().try_clone().unwrap(); 9 | let f = s.full_signal().try_clone().unwrap(); 10 | let r: Receiver = Receiver::open(chunks, memfd, e, f).unwrap(); 11 | (s, r) 12 | } 13 | 14 | fn sum(x: &[u64]) -> u64 { 15 | let mut r = 0u64; 16 | for y in x { r = r.wrapping_add(*y) }; 17 | r 18 | } 19 | 20 | fn bench_one(c: &mut BenchmarkGroup, init: &[u64]) { 21 | let (mut s, mut r) = setup_one::(2 * init.len()); 22 | let initsum = sum(init); 23 | c.bench_with_input(BenchmarkId::new("Sharedring", init.len()*8), &(), |b,_| b.iter(|| { 24 | let mut x = 0; 25 | let mut ss: u64 = 0; 26 | while x < init.len() { 27 | let mut z = 0; 28 | unsafe { 29 | s.send_trusted(|p| { 30 | z = std::cmp::min(p.len(), init.len() - x); 31 | let part = &init[x..(x+z)]; 32 | p[0..z].copy_from_slice(part); 33 | z 34 | }).unwrap(); 35 | r.receive_trusted(|p| { 36 | assert_eq!(p.len(), z); 37 | ss = ss.wrapping_add(sum(p)); 38 | z 39 | }).unwrap(); 40 | } 41 | x += z; 42 | } 43 | assert_eq!(x, init.len()); 44 | assert_eq!(initsum, ss); 45 | })); 46 | } 47 | 48 | fn bench_one_unixsocket(c: &mut BenchmarkGroup, init: &[u64]) { 49 | use std::os::unix::net; 50 | use std::io::{Read, Write}; 51 | use zerocopy::AsBytes; 52 | let (mut sender, mut receiver) = net::UnixStream::pair().unwrap(); 53 | sender.set_nonblocking(true).unwrap(); 54 | let mut rbuf: Vec = init.into(); 55 | let initsum = sum(init); 56 | c.bench_with_input(BenchmarkId::new("Unix socket", init.len()*8), &(), |b,_| b.iter(|| { 57 | let mut x = 0; 58 | let mut ss: u64 = 0; 59 | while x < init.len() { 60 | let z = sender.write(black_box(&init[x..]).as_bytes()).unwrap(); 61 | assert_eq!(z % 8, 0); 62 | let z2 = z/8; 63 | receiver.read_exact(rbuf[0..z2].as_bytes_mut()).unwrap(); 64 | ss = ss.wrapping_add(sum(&rbuf[0..z2])); 65 | x += z2; 66 | } 67 | assert_eq!(x, init.len()); 68 | assert_eq!(initsum, ss); 69 | })); 70 | } 71 | 72 | fn bench_one_dbus(c: &mut BenchmarkGroup, init: &[u64]) { 73 | let initsum = sum(init); 74 | let initlen = init.len(); 75 | let done = std::rc::Rc::new(std::cell::Cell::new(false)); 76 | 77 | use dbus::blocking::LocalConnection; 78 | use dbus::message::MatchRule; 79 | use dbus::Message; 80 | use dbus::channel::Sender; 81 | let conn = LocalConnection::new_session().unwrap(); 82 | let done2 = done.clone(); 83 | conn.add_match(MatchRule::new_signal("sharedring.benchmark", "Benchmark"), move |_: (), _, msg| { 84 | let data: &[u64] = msg.read1().unwrap(); 85 | assert_eq!(data.len(), initlen); 86 | assert_eq!(initsum, sum(data)); 87 | done2.set(true); 88 | true 89 | }).unwrap(); 90 | 91 | c.bench_with_input(BenchmarkId::new("D-Bus", init.len()*8), &(), |b,_| b.iter(|| { 92 | done.set(false); 93 | let msg = Message::new_signal("/benchmark", "sharedring.benchmark", "Benchmark").unwrap().append1(init); 94 | conn.send(msg).unwrap(); 95 | while !done.get() { conn.process(Duration::from_millis(1000)).unwrap(); } 96 | })); 97 | } 98 | 99 | pub fn criterion_benchmark(c: &mut Criterion) { 100 | let mut v = vec![5, 6, 7, 8]; 101 | let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); 102 | let mut group = c.benchmark_group("Sharedring vs unix sockets"); 103 | group.plot_config(plot_config); 104 | // If we're in a hurry 105 | group.warm_up_time(Duration::from_millis(500)); 106 | group.sample_size(40); 107 | group.measurement_time(Duration::from_millis(2500)); 108 | 109 | loop { 110 | bench_one(&mut group, &v); 111 | bench_one_unixsocket(&mut group, &v); 112 | bench_one_dbus(&mut group, &v); 113 | if v.len() > 1024*1024 { return; } 114 | v.extend_from_slice(&v.clone()); 115 | } 116 | } 117 | 118 | criterion_group!(benches, criterion_benchmark); 119 | criterion_main!(benches); 120 | -------------------------------------------------------------------------------- /examples/client.rs: -------------------------------------------------------------------------------- 1 | //! Sends a lot of f64 values over shared memory to the server every second. 2 | 3 | use dbus::blocking::{Connection, Proxy}; 4 | use std::error::Error; 5 | use std::thread::sleep; 6 | use shmem_ipc::sharedring::Sender; 7 | use std::time::Duration; 8 | use std::fs::File; 9 | 10 | fn main() -> Result<(), Box> { 11 | // Setup a D-Bus connection and call the Setup method of the server. 12 | let c = Connection::new_session()?; 13 | let proxy = Proxy::new("com.example.shmemtest", "/shmemtest", Duration::from_millis(3000), &c); 14 | let (capacity, memfd, empty_signal, full_signal): (u64, File, File, File) = 15 | proxy.method_call("com.example.shmemtest", "Setup", ())?; 16 | 17 | // Setup the ringbuffer. 18 | let mut r = Sender::open(capacity as usize, memfd, empty_signal, full_signal)?; 19 | let mut items = 100000; 20 | loop { 21 | let item = 1.0f64 / (items as f64); 22 | r.send_raw(|p: *mut f64, mut count| unsafe { 23 | // We now have a slice of [f64; count], but due to the Rust aliasing rules 24 | // and the untrusted process restrictions, we cannot convert them into a 25 | // Rust slice, so we write the data through the raw pointer directly. 26 | if items < count { count = items }; 27 | for i in 0..count { 28 | *p.offset(i as isize) = item; 29 | } 30 | println!("Sending {} items of {}, in total {}", count, item, (count as f64) * item); 31 | count 32 | }).unwrap(); 33 | items += 100000; 34 | sleep(Duration::from_millis(1000)); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /examples/server.rs: -------------------------------------------------------------------------------- 1 | //! This is a example service that sums all f64 coming in through shared memory. 2 | //! 3 | //! For the initial setup, the service advertises a setup function over D-Bus. 4 | 5 | use dbus::channel::MatchingReceiver; 6 | use dbus::channel::Sender; 7 | use dbus::Path; 8 | use dbus::Message; 9 | use std::sync::Mutex; 10 | use std::sync::Arc; 11 | use std::thread; 12 | use dbus::MethodErr; 13 | use dbus::blocking::Connection; 14 | use std::fs::File; 15 | use dbus_crossroads::{Crossroads}; 16 | use std::error::Error; 17 | use shmem_ipc::sharedring::Receiver; 18 | 19 | const CAPACITY: usize = 500000; 20 | 21 | #[derive(Default)] 22 | struct State { 23 | sum: Arc>, 24 | } 25 | 26 | impl State { 27 | fn add_receiver(&mut self) -> Result<(u64, File, File, File), Box> { 28 | // Create a receiver in shared memory. 29 | let mut r = Receiver::new(CAPACITY as usize)?; 30 | let m = r.memfd().as_file().try_clone()?; 31 | let e = r.empty_signal().try_clone()?; 32 | let f = r.full_signal().try_clone()?; 33 | // In this example, we spawn a thread for every ringbuffer. 34 | // More complex real-world scenarios might multiplex using non-block frameworks, 35 | // as well as having a mechanism to detect when a client is gone. 36 | let sum = self.sum.clone(); 37 | thread::spawn(move || { 38 | loop { 39 | r.block_until_readable().unwrap(); 40 | let mut s = 0.0f64; 41 | r.receive_raw(|ptr: *const f64, count| unsafe { 42 | // We now have a slice of [f64; count], but due to the Rust aliasing rules 43 | // and the untrusted process restrictions, we cannot convert them into a 44 | // Rust slice, so we read the data from the raw pointer directly. 45 | for i in 0..count { 46 | s += *ptr.offset(i as isize); 47 | } 48 | *sum.lock().unwrap() += s; 49 | count 50 | }).unwrap(); 51 | } 52 | }); 53 | Ok((CAPACITY as u64, m, e, f)) 54 | } 55 | } 56 | 57 | fn main() -> Result<(), Box> { 58 | let c = Connection::new_session()?; 59 | c.request_name("com.example.shmemtest", false, true, false)?; 60 | let mut cr = Crossroads::new(); 61 | let iface_token = cr.register("com.example.shmemtest", |b| { 62 | b.method("Setup", (), ("capacity", "memfd", "empty_signal", "full_signal"), |_, state: &mut State, _: ()| { 63 | state.add_receiver().map_err(|e| { 64 | println!("{}, {:?}", e, e.source()); 65 | MethodErr::failed("failed to setup shared memory") 66 | }) 67 | }); 68 | b.signal::<(f64,), _>("Sum", ("sum",)); 69 | }); 70 | cr.insert("/shmemtest", &[iface_token], State::default()); 71 | let acr = Arc::new(Mutex::new(cr)); 72 | let acr_clone = acr.clone(); 73 | c.start_receive(dbus::message::MatchRule::new_method_call(), Box::new(move |msg, conn| { 74 | acr_clone.lock().unwrap().handle_message(msg, conn).unwrap(); 75 | true 76 | })); 77 | 78 | loop { 79 | c.process(std::time::Duration::from_millis(1000))?; 80 | let mut cr = acr.lock().unwrap(); 81 | let state: &mut State = cr.data_mut(&Path::from("/shmemtest")).unwrap(); 82 | let mut sum = state.sum.lock().unwrap(); 83 | if *sum != 0.0 { 84 | println!("Sum: {}", sum); 85 | c.send(Message::new_signal("/shmemtest", "com.example.shmemtest", "Sum").unwrap().append1(*sum)).unwrap(); 86 | *sum = 0.0; 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /lines.svg: -------------------------------------------------------------------------------- 1 | 2 | 7 | 8 | Gnuplot 9 | Produced by GNUPLOT 5.2 patchlevel 8 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 0.0001 54 | 55 | 56 | 57 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 0.001 69 | 70 | 71 | 72 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 0.01 84 | 85 | 86 | 87 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 0.1 99 | 100 | 101 | 102 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 1 114 | 115 | 116 | 117 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 10 129 | 130 | 131 | 132 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 100 144 | 145 | 146 | 147 | 149 | 100 150 | 151 | 152 | 153 | 156 | 1000 157 | 158 | 159 | 160 | 163 | 10000 164 | 165 | 166 | 167 | 170 | 100000 171 | 172 | 173 | 174 | 177 | 1x106 178 | 179 | 180 | 181 | 184 | 1x107 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | Average time (ms) 194 | 195 | 196 | 197 | 198 | Input 199 | 200 | 201 | 202 | 203 | Sharedring vs unix sockets: Comparison 204 | 205 | 206 | 207 | 208 | D-Bus 209 | 210 | 211 | 212 | 213 | D-Bus 214 | 215 | 216 | 217 | 220 | 221 | gnuplot_plot_2 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | Sharedring 248 | 249 | 250 | 251 | 252 | Sharedring 253 | 254 | 255 | 256 | 259 | 260 | gnuplot_plot_4 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | Unix socket 287 | 288 | 289 | 290 | 291 | Unix socket 292 | 293 | 294 | 295 | 298 | 299 | gnuplot_plot_6 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 128 2 | fn_single_line = true 3 | where_single_line = true 4 | unstable_features = true 5 | fn_args_layout = "Compressed" 6 | use_small_heuristics = "Max" 7 | 8 | # I don't like rustfmt myself but if you really have to use it, 9 | # the options above seems to be the ones that makes the code 10 | # the least more ugly. :-\ 11 | 12 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Communication between processes using shared memory. 2 | //! 3 | //! This crate uses memfd sealing to ensure safety between untrusted processes, 4 | //! and therefore, it works only on Linux. 5 | //! 6 | //! You might want to start in the `sharedring` module, which sets up a lock-free ringbuffer 7 | //! between untrusted processes. Another useful function is `mem::write_once` for a scenario where 8 | //! you write data once and make it available for reading afterwards. The `mem` and `ringbuf` 9 | //! contain building blocks that might be useful in other use cases. 10 | //! 11 | //! There is also a client/server example in the `examples` directory that can help you get started. 12 | //! Enjoy! 13 | 14 | pub mod mem; 15 | 16 | pub mod ringbuf; 17 | 18 | pub mod sharedring; 19 | 20 | /// Enumeration of errors possible in this library 21 | #[derive(thiserror::Error, Debug)] 22 | pub enum Error { 23 | #[error("Memfd errors {0:?}")] 24 | Memfd(#[from] mem::mfd::Error), 25 | #[error("OS errors {0:?}")] 26 | Io(#[from] std::io::Error), 27 | #[error("Ringbuffer errors {0:?}")] 28 | Ringbuf(#[from] ringbuf::Error), 29 | } 30 | -------------------------------------------------------------------------------- /src/mem.rs: -------------------------------------------------------------------------------- 1 | //! Functions for creating memory maps from memfds. 2 | 3 | /// Reexport the memmap2 crate 4 | pub mod mmap { 5 | pub use memmap2::*; 6 | } 7 | 8 | /// Reexport the memfd crate 9 | pub mod mfd { 10 | pub use memfd::*; 11 | } 12 | 13 | use super::Error; 14 | 15 | fn verify_seal(memfd: &mfd::Memfd, seal: mfd::FileSeal) -> Result<(), Error> { 16 | let seals = memfd.seals()?; 17 | if seals.contains(&seal) { 18 | return Ok(()); 19 | } 20 | // Try to add the seal. 21 | memfd.add_seal(seal)?; 22 | Ok(()) 23 | } 24 | 25 | /// Creates a memory map of a memfd. The memfd is sealed to be read only. 26 | pub fn read_memfd(memfd: &mfd::Memfd) -> Result { 27 | // The file can be truncated; no safe memory mapping. 28 | verify_seal(&memfd, mfd::FileSeal::SealShrink)?; 29 | // The file can be written to; no safe references. 30 | verify_seal(&memfd, mfd::FileSeal::SealWrite)?; 31 | 32 | let r = unsafe { mmap::MmapOptions::new().map_copy_read_only(memfd.as_file()) }?; 33 | Ok(r) 34 | } 35 | 36 | /// Creates a raw memory map of a memfd, suitable for IPC. It must be writable. 37 | pub fn raw_memfd(memfd: &mfd::Memfd, len: usize) -> Result { 38 | // The file can be truncated; no safe memory mapping. 39 | verify_seal(&memfd, mfd::FileSeal::SealShrink)?; 40 | 41 | // If the file has been sealed as read-only, the below will fail. 42 | // If the file later is trying to be sealed as read-only, that call will fail and 43 | // our mapping will remain. 44 | 45 | Ok(mmap::MmapOptions::new().len(len).map_raw(memfd.as_file())?) 46 | } 47 | 48 | /// Creates a shared memory area that can be written once and read many times. 49 | /// 50 | /// The memfd is created, memory mapped and the closure can fill in the data. 51 | /// After the data is filled in, the memfd is sealed to be read only. 52 | /// 53 | /// # Example 54 | /// ```rust 55 | /// use shmem_ipc::mem::{write_once, read_memfd}; 56 | /// // Create a 4 MB memory area 57 | /// let memfd = write_once(1024*1024*4, "write_then_read_test", |x| { 58 | /// // Fill it with data 59 | /// for (i, j) in x.iter_mut().enumerate() { *j = i as u8; } 60 | /// }).unwrap(); 61 | /// /* ... send the memfd to another process somehow ... */ 62 | /// let map = read_memfd(&memfd).unwrap(); 63 | /// // Read the data 64 | /// for (i, j) in map.iter().enumerate() { assert_eq!(i as u8, *j); } 65 | /// ``` 66 | pub fn write_once(size: u64, name: &str, f: F) -> Result { 67 | let opts = memfd::MemfdOptions::new().allow_sealing(true).close_on_exec(true); 68 | let mut h = mfd::SealsHashSet::new(); 69 | h.insert(mfd::FileSeal::SealGrow); 70 | h.insert(mfd::FileSeal::SealShrink); 71 | h.insert(mfd::FileSeal::SealSeal); 72 | h.insert(mfd::FileSeal::SealWrite); 73 | 74 | write_once_custom(size, name, opts, &h, f) 75 | } 76 | 77 | /// Like "write_once", but allows for customisation of the memfd_options and seals added after writing. 78 | pub fn write_once_custom( 79 | size: u64, name: &str, memfd_options: memfd::MemfdOptions, seals: &mfd::SealsHashSet, f: F, 80 | ) -> Result { 81 | let memfd = memfd_options.create(name)?; 82 | // Sets the memory to zeroes. 83 | memfd.as_file().set_len(size)?; 84 | // We're the sole owner of the file descriptor, it's safe to create a mutable reference to the data. 85 | let mut m = unsafe { mmap::MmapMut::map_mut(memfd.as_file())? }; 86 | f(&mut m); 87 | drop(m); 88 | if !seals.is_empty() { 89 | memfd.add_seals(seals)?; 90 | } 91 | Ok(memfd) 92 | } 93 | 94 | #[cfg(test)] 95 | mod tests { 96 | use super::*; 97 | #[test] 98 | fn create_mmap() -> Result<(), Error> { 99 | let opts = mfd::MemfdOptions::default().allow_sealing(true); 100 | let memfd = opts.create("test-ro")?; 101 | memfd.as_file().set_len(16384)?; 102 | 103 | let mmap = read_memfd(&memfd)?; 104 | assert!(memfd.seals()?.contains(&mfd::FileSeal::SealShrink)); 105 | assert!(memfd.seals()?.contains(&mfd::FileSeal::SealWrite)); 106 | assert_eq!(mmap.len(), 16384); 107 | // The memfd is now read-only, cannot create a writable one. 108 | assert!(raw_memfd(&memfd, 16384).is_err()); 109 | Ok(()) 110 | } 111 | 112 | #[test] 113 | fn create_mmap_raw() -> Result<(), Error> { 114 | let opts = mfd::MemfdOptions::default().allow_sealing(true); 115 | let memfd = opts.create("test-raw")?; 116 | memfd.as_file().set_len(16384)?; 117 | let mmap_raw = raw_memfd(&memfd, 16384)?; 118 | assert_eq!(mmap_raw.len(), 16384); 119 | // The memfd now has a writable mapping, cannot create a read-only one. 120 | assert!(read_memfd(&memfd).is_err()); 121 | Ok(()) 122 | } 123 | 124 | #[test] 125 | fn write_then_read() -> Result<(), Error> { 126 | let m = write_once(4096, "write_then_read_test", |x| { 127 | assert_eq!(x.len(), 4096); 128 | assert_eq!(x[5], 0); 129 | x[2049] = 100; 130 | })?; 131 | let m2 = read_memfd(&m)?; 132 | assert_eq!(m2[2049], 100); 133 | assert_eq!(m2[465], 0); 134 | Ok(()) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/ringbuf.rs: -------------------------------------------------------------------------------- 1 | //! This is a fast ringbuffer that tries to avoid memory copies as much as possible. 2 | //! There can be one producer and one consumer, but they can be in different threads 3 | //! i e, they are Send but not Clone. 4 | 5 | use std::mem::size_of; 6 | use std::sync::atomic::{AtomicUsize, Ordering}; 7 | use std::{cmp, ptr}; 8 | 9 | /// Enumeration of errors possible in this library 10 | #[derive(thiserror::Error, Debug)] 11 | pub enum Error { 12 | #[error("Buffer too small")] 13 | BufTooSmall, 14 | #[error("Buffer too big")] 15 | BufTooBig, 16 | #[error("Buffer unaligned")] 17 | BufUnaligned, 18 | #[error("Buffer corrupt or uninitialized")] 19 | BufCorrupt, 20 | #[error("Callback read more items than existed in the buffer")] 21 | CallbackReadTooMuch, 22 | #[error("Callback wrote more items than available in the buffer")] 23 | CallbackWroteTooMuch, 24 | } 25 | 26 | #[derive(Copy, Clone)] 27 | struct Buf { 28 | data: *mut T, 29 | count_ptr: *const AtomicUsize, 30 | length: usize, 31 | } 32 | 33 | unsafe impl Send for Buf {} 34 | unsafe impl Sync for Buf {} 35 | 36 | pub struct Sender { 37 | buf: Buf, 38 | index: usize, 39 | } 40 | 41 | pub struct Receiver { 42 | buf: Buf, 43 | index: usize, 44 | } 45 | 46 | #[derive(Copy, Clone, Debug)] 47 | pub struct Status { 48 | /// Number of remaining items that can be immediately read/written 49 | pub remaining: usize, 50 | /// True if we should signal the remote side to wake up 51 | pub signal: bool, 52 | } 53 | 54 | const CACHE_LINE_SIZE: usize = 64; 55 | 56 | /// Use this utility function to figure out how big buffer you need to allocate. 57 | pub fn channel_bufsize(capacity: usize) -> usize { capacity * size_of::() + CACHE_LINE_SIZE } 58 | 59 | /// Initializes a ring buffer. 60 | /// 61 | /// # Panics 62 | /// 63 | /// In case the buffer is too small or too big. 64 | pub fn channel(buffer: &mut [u8]) -> (Sender, Receiver) { 65 | let b = unsafe { Buf::attach(buffer.as_mut_ptr(), buffer.len(), true).unwrap() }; 66 | (Sender { buf: b, index: 0 }, Receiver { buf: b, index: 0 }) 67 | } 68 | 69 | impl Buf { 70 | #[inline] 71 | fn count(&self) -> &AtomicUsize { unsafe { &*self.count_ptr } } 72 | 73 | #[inline] 74 | fn load_count(&self) -> Result { 75 | let x = self.count().load(Ordering::Acquire); 76 | if x > self.length { 77 | Err(Error::BufCorrupt) 78 | } else { 79 | Ok(x) 80 | } 81 | } 82 | 83 | unsafe fn attach(data: *mut u8, length: usize, init: bool) -> Result { 84 | use Error::*; 85 | if length < CACHE_LINE_SIZE + size_of::() { 86 | Err(BufTooSmall)? 87 | } 88 | if length >= isize::MAX as usize { 89 | Err(BufTooBig)? 90 | } 91 | let r = Self { 92 | count_ptr: data as *mut _ as *const AtomicUsize, 93 | data: data.offset(CACHE_LINE_SIZE as isize) as _, 94 | length: (length - CACHE_LINE_SIZE) / size_of::(), 95 | }; 96 | if (r.count_ptr as usize) % std::mem::align_of::() != 0 { 97 | Err(BufUnaligned)? 98 | } 99 | if (r.data as usize) % std::mem::align_of::() != 0 { 100 | Err(BufUnaligned)? 101 | } 102 | if init { 103 | r.count().store(0, Ordering::Release); 104 | } else { 105 | r.load_count()?; 106 | } 107 | Ok(r) 108 | } 109 | } 110 | 111 | impl Sender { 112 | /// Assume a ringbuf is set up at the location. 113 | /// 114 | /// A buffer where the first 64 bytes are zero is okay. 115 | /// 116 | /// # Safety 117 | /// 118 | /// You must ensure that "data" points to a readable and writable memory area of "length" bytes. 119 | pub unsafe fn attach(data: *mut u8, length: usize) -> Result { 120 | Ok(Self { buf: Buf::attach(data, length, false)?, index: 0 }) 121 | } 122 | 123 | /// Lowest level "send" function 124 | /// 125 | /// The closure will be called only if the buffer is not full, and needs to returns the number 126 | /// of items written to the buffer. 127 | /// 128 | /// The pointer sent to the closure is an "out" parameter to the first item, and the second 129 | /// parameter is the number of items that can be written to the buffer 130 | /// 131 | /// Since this is a ringbuffer, there might be more items to write even if you 132 | /// completely fill up during the closure. 133 | pub fn send usize>(&mut self, f: F) -> Result { 134 | let cb = self.buf.load_count()?; 135 | let l = self.buf.length; 136 | 137 | let n = { 138 | let end = self.index + cmp::min(l - self.index, l - cb); 139 | let slice_start = unsafe { self.buf.data.offset(self.index as isize) }; 140 | let slice_len = end - self.index; 141 | 142 | let n = if slice_len == 0 { 0 } else { f(slice_start, slice_len) }; 143 | if n > slice_len { 144 | Err(Error::CallbackWroteTooMuch)? 145 | } 146 | assert!(n <= slice_len); 147 | n 148 | }; 149 | 150 | let c = self.buf.count().fetch_add(n, Ordering::AcqRel); 151 | self.index = (self.index + n) % l; 152 | // dbg!("Send: cb = {}, c = {}, l = {}, n = {}", cb, c, l, n); 153 | Ok(Status { remaining: l - c - n, signal: c == 0 && n > 0 }) 154 | } 155 | 156 | /// "Safe" version of send. Will call your closure up to "count" times 157 | /// and depend on optimisation to avoid memory copies. 158 | /// 159 | /// # Panics 160 | /// 161 | /// Panics in case the buffer is corrupt. 162 | pub fn send_foreach T>(&mut self, mut count: usize, mut f: F) -> Status { 163 | loop { 164 | let status = self 165 | .send(|p, c| { 166 | let mut j = 0; 167 | while j < c && count > 0 { 168 | unsafe { ptr::write(p.offset(j as isize), f()) }; 169 | j += 1; 170 | count -= 1; 171 | } 172 | j 173 | }) 174 | .unwrap(); 175 | if status.remaining == 0 || count == 0 { 176 | return status; 177 | } 178 | } 179 | } 180 | 181 | /// Returns number of items that can be written 182 | pub fn write_count(&self) -> Result { Ok(self.buf.length - self.buf.load_count()?) } 183 | } 184 | 185 | impl Receiver { 186 | /// Lowest level receive function 187 | /// 188 | /// The closure will be called with a pointer to the first item and the number of items, 189 | /// and should return the number of items that can be dropped from the buffer. 190 | /// 191 | /// Since this is a ringbuffer, there might be more items to read even if you 192 | /// read it all during the closure. 193 | pub fn recv usize>(&mut self, f: F) -> Result { 194 | let cb = self.buf.load_count()?; 195 | let l = self.buf.length; 196 | let n = { 197 | let data_start = unsafe { self.buf.data.offset(self.index as isize) }; 198 | let data_len = cmp::min(self.index + cb, l) - self.index; 199 | 200 | let n = if data_len == 0 { 0 } else { f(data_start, data_len) }; 201 | if n > data_len { 202 | Err(Error::CallbackReadTooMuch)? 203 | } 204 | n 205 | }; 206 | 207 | let c = self.buf.count().fetch_sub(n, Ordering::AcqRel); 208 | self.index = (self.index + n) % l; 209 | // dbg!("Recv: cb = {}, c = {}, l = {}, n = {}", cb, c, l, n); 210 | return Ok(Status { remaining: c - n, signal: c >= l && n > 0 }); 211 | } 212 | 213 | /// "Safe" version of recv. Will call your closure up to "count" times 214 | /// and depend on optimisation to avoid memory copies. 215 | /// 216 | /// # Panics 217 | /// 218 | /// Panics in case the buffer is corrupt. 219 | pub fn recv_foreach(&mut self, mut count: usize, mut f: F) -> Status { 220 | loop { 221 | let status = self 222 | .recv(|p, c| { 223 | let mut j = 0; 224 | while j < c && count > 0 { 225 | f(unsafe { ptr::read(p.offset(j as isize)) }); 226 | count -= 1; 227 | j += 1; 228 | } 229 | j 230 | }) 231 | .unwrap(); 232 | if status.remaining == 0 || count == 0 { 233 | return status; 234 | } 235 | } 236 | } 237 | 238 | /// Returns number of items that can be read 239 | pub fn read_count(&self) -> Result { self.buf.load_count() } 240 | 241 | /// Assume a ringbuf is set up at the location. 242 | /// 243 | /// A buffer where the first 64 bytes are zero is okay. 244 | /// 245 | /// # Safety 246 | /// 247 | /// You must ensure that "data" points to a readable and writable memory area of "length" bytes. 248 | pub unsafe fn attach(data: *mut u8, length: usize) -> Result { 249 | Ok(Self { buf: Buf::attach(data, length, false)?, index: 0 }) 250 | } 251 | } 252 | 253 | #[cfg(test)] 254 | mod tests { 255 | 256 | #[test] 257 | fn simple_test() { 258 | let mut v = vec![10; 100]; 259 | let (mut s, mut r) = super::channel(&mut v); 260 | // is it empty? 261 | r.recv(|_, _| panic!()).unwrap(); 262 | s.send(|d, l| { 263 | assert!(l > 0); 264 | unsafe { *d = 5u16 }; 265 | 1 266 | }) 267 | .unwrap(); 268 | r.recv(|d, l| { 269 | assert_eq!(l, 1); 270 | assert_eq!(unsafe { *d }, 5); 271 | 0 272 | }) 273 | .unwrap(); 274 | r.recv(|d, l| { 275 | assert_eq!(l, 1); 276 | assert_eq!(unsafe { *d }, 5); 277 | 1 278 | }) 279 | .unwrap(); 280 | r.recv(|_, _| panic!()).unwrap(); 281 | 282 | let mut i = 6; 283 | s.send_foreach(2, || { 284 | i += 1; 285 | i 286 | }); 287 | r.recv(|d, l| { 288 | assert_eq!(l, 2); 289 | let x = unsafe { std::ptr::read(d as *const [u16; 2]) }; 290 | assert_eq!(x, [7, 8]); 291 | 2 292 | }) 293 | .unwrap(); 294 | } 295 | 296 | #[test] 297 | fn full_buf_test() { 298 | assert_eq!(super::channel_bufsize::(3), 64 + 3 * 2); 299 | let mut q: Vec = vec![66; super::channel_bufsize::(3)]; 300 | let (mut s, mut r): (super::Sender, super::Receiver) = super::channel(&mut q); 301 | s.send(|dd, l| { 302 | assert_eq!(l, 3); 303 | unsafe { 304 | std::ptr::write(dd as *mut [u16; 3], [5, 8, 9]); 305 | } 306 | 2 307 | }) 308 | .unwrap(); 309 | let mut called = false; 310 | s.send_foreach(2, || { 311 | assert_eq!(called, false); 312 | called = true; 313 | 10 314 | }); 315 | s.send(|_, _| panic!()).unwrap(); 316 | r.recv(|_, l| { 317 | assert_eq!(l, 3); 318 | 0 319 | }) 320 | .unwrap(); 321 | s.send(|_, _| panic!()).unwrap(); 322 | r.recv(|d, l| { 323 | assert_eq!(l, 3); 324 | assert_eq!([5, 8, 10], unsafe { std::ptr::read(d as *const [u16; 3]) }); 325 | 1 326 | }) 327 | .unwrap(); 328 | s.send(|d, l| { 329 | assert_eq!(l, 1); 330 | unsafe { *d = 1 }; 331 | 1 332 | }) 333 | .unwrap(); 334 | s.send(|_, _| panic!()).unwrap(); 335 | r.recv(|d, l| { 336 | assert_eq!(l, 2); 337 | assert_eq!([8, 10], unsafe { std::ptr::read(d as *const [u16; 2]) }); 338 | 2 339 | }) 340 | .unwrap(); 341 | let mut called = false; 342 | r.recv_foreach(56, |d| { 343 | assert_eq!(called, false); 344 | called = true; 345 | assert_eq!(d, 1); 346 | }); 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /src/sharedring.rs: -------------------------------------------------------------------------------- 1 | //! Creates shared memory ring buffers to be used between untrusted processes. 2 | //! 3 | //! The information to be transferred between processes through other means (pipes or D-Bus) is: 4 | //! * capacity 5 | //! * memfd file descriptor 6 | //! * empty signal file descriptor 7 | //! * full signal file descriptor 8 | 9 | use super::Error; 10 | use crate::mem::mfd::{HugetlbSize, MemfdOptions}; 11 | use crate::ringbuf::Status; 12 | use std::fs::File; 13 | use std::io::{Read, Write}; 14 | use std::os::unix::io::FromRawFd; 15 | use std::slice::from_raw_parts; 16 | use std::slice::from_raw_parts_mut; 17 | 18 | struct Inner { 19 | mmap: memmap2::MmapRaw, 20 | memfd: memfd::Memfd, 21 | empty_signal: File, 22 | full_signal: File, 23 | } 24 | 25 | fn page_size() -> usize { unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } } 26 | 27 | fn round_to_page_size(capacity: usize) -> usize { 28 | let bytes = crate::ringbuf::channel_bufsize::(capacity); 29 | let ps = page_size(); 30 | let m = bytes % ps; 31 | if m == 0 { 32 | bytes 33 | } else { 34 | bytes + ps - m 35 | } 36 | } 37 | 38 | fn eventfd() -> Result { 39 | let x = unsafe { libc::eventfd(0, libc::EFD_CLOEXEC) }; 40 | if x == -1 { 41 | Err(std::io::Error::last_os_error()) 42 | } else { 43 | Ok(unsafe { File::from_raw_fd(x) }) 44 | } 45 | } 46 | 47 | impl Inner { 48 | fn new(capacity: usize, tlbsize: Option) -> Result { 49 | let bytes = round_to_page_size::(capacity); 50 | let mut opts = MemfdOptions::default().allow_sealing(true).close_on_exec(true); 51 | if tlbsize.is_some() { 52 | opts = opts.hugetlb(tlbsize); 53 | } 54 | 55 | let memfd = opts.create(std::any::type_name::())?; 56 | if tlbsize.is_none() { 57 | // hugetlb does not need/allow to set_len 58 | memfd.as_file().set_len(bytes as u64)?; 59 | } 60 | 61 | let empty_signal = eventfd()?; 62 | let full_signal = eventfd()?; 63 | let mmap = crate::mem::raw_memfd(&memfd, bytes)?; 64 | Ok(Self { mmap, memfd, empty_signal, full_signal }) 65 | } 66 | 67 | fn mlock(&mut self) -> Result<(), Error> { Ok(self.mmap.lock()?) } 68 | 69 | fn open(capacity: usize, file: File, empty_signal: File, full_signal: File) -> Result { 70 | let bytes = round_to_page_size::(capacity); 71 | let memfd = memfd::Memfd::try_from_file(file).map_err(|_| std::io::Error::last_os_error())?; 72 | let mmap = crate::mem::raw_memfd(&memfd, bytes)?; 73 | if mmap.len() < bytes { 74 | Err(crate::ringbuf::Error::BufTooSmall)? 75 | }; 76 | Ok(Self { mmap, memfd, empty_signal, full_signal }) 77 | } 78 | } 79 | 80 | pub struct Sender(Inner, crate::ringbuf::Sender); 81 | 82 | impl Sender { 83 | /// Sets up a new ringbuffer and returns the sender half. 84 | pub fn new(capacity: usize) -> Result { 85 | let inner = Inner::new::(capacity, None)?; 86 | let ringbuf = unsafe { crate::ringbuf::Sender::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 87 | Ok(Self(inner, ringbuf)) 88 | } 89 | 90 | /// Create a new ringbuffer with hugetlb support and returns the sender half. 91 | /// Supports linux version 4.16+ only 92 | pub fn with_hugetlb(capacity: usize, tlbsize: HugetlbSize) -> Result { 93 | let inner = Inner::new::(capacity, Some(tlbsize))?; 94 | let ringbuf = unsafe { crate::ringbuf::Sender::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 95 | Ok(Self(inner, ringbuf)) 96 | } 97 | 98 | /// mlock the backing memory to avoid it being put into swap 99 | pub fn mlock(&mut self) -> Result<(), Error> { self.0.mlock() } 100 | 101 | /// Attaches to a ringbuffer set up by the receiving side. 102 | pub fn open(capacity: usize, memfd: File, empty_signal: File, full_signal: File) -> Result { 103 | let inner = Inner::open::(capacity, memfd, empty_signal, full_signal)?; 104 | let ringbuf = unsafe { crate::ringbuf::Sender::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 105 | Ok(Self(inner, ringbuf)) 106 | } 107 | 108 | /// Low-level access to the ringbuffer. 109 | /// 110 | /// Note that writing directly using these methods will not trigger a signal for the receiving side 111 | /// to wake up. 112 | pub fn sender_mut(&mut self) -> &mut crate::ringbuf::Sender { &mut self.1 } 113 | 114 | /// The file descriptor for the shared memory area 115 | pub fn memfd(&self) -> &memfd::Memfd { &self.0.memfd } 116 | /// The file descriptor written to when the receiving side should wake up 117 | pub fn empty_signal(&self) -> &File { &self.0.empty_signal } 118 | /// The file descriptor to register notification for in your favorite non-blocking framework (tokio, async-std etc). 119 | /// 120 | /// It is written to by the receiving side when the buffer is no longer full. 121 | pub fn full_signal(&self) -> &File { &self.0.full_signal } 122 | 123 | /// Sends one or more items through the ringbuffer. 124 | /// 125 | /// Because this is a ringbuffer between untrusted processes we can never create references to 126 | /// the data, so we have to resort to raw pointers. 127 | /// The closure receives a (ptr, count) pair which can be written to using e g `std::ptr::write`, 128 | /// and returns the number of items written to that memory area. 129 | /// If the buffer is full, the closure is not called. If there is more data that could be written 130 | /// (e g in another part of the ringbuffer), that is indicated in the returned `Status` struct. 131 | pub fn send_raw usize>(&mut self, f: F) -> Result { 132 | let status = self.sender_mut().send(f)?; 133 | if status.signal { 134 | self.empty_signal().write(&1u64.to_ne_bytes())?; 135 | } 136 | Ok(status) 137 | } 138 | 139 | /// Sends one or more items through the ringbuffer. 140 | /// 141 | /// The closure receives a slice to which it can write data and returns the number of items 142 | /// written. 143 | /// If the buffer is full, the closure is not called. If there is more data that could be written 144 | /// (e g in another part of the ringbuffer), that is indicated in the returned `Status` struct. 145 | /// 146 | /// # Safety 147 | /// 148 | /// Caller must ensure that no one can read or write the data area, except for 149 | /// at most one Sender (this one) and at most one Receiver, both set up correctly. 150 | pub unsafe fn send_trusted usize>(&mut self, f: F) -> Result { 151 | self.send_raw(|p, count| f(from_raw_parts_mut(p, count))) 152 | } 153 | 154 | /// For blocking scenarios, blocks until the channel is writable. 155 | pub fn block_until_writable(&mut self) -> Result { 156 | loop { 157 | let s = self.sender_mut().write_count()?; 158 | if s > 0 { 159 | return Ok(Status { remaining: s, signal: false }); 160 | }; 161 | let mut b = [0u8; 8]; 162 | self.full_signal().read(&mut b)?; 163 | } 164 | } 165 | } 166 | 167 | pub struct Receiver(Inner, crate::ringbuf::Receiver); 168 | 169 | impl Receiver { 170 | /// Sets up a new ringbuffer and returns the receiver half. 171 | pub fn new(capacity: usize) -> Result { 172 | let inner = Inner::new::(capacity, None)?; 173 | let ringbuf = unsafe { crate::ringbuf::Receiver::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 174 | Ok(Self(inner, ringbuf)) 175 | } 176 | 177 | /// Create a new ringbuffer with hugetlb support and returns the receiver half. 178 | /// Supports linux version 4.16+ only 179 | pub fn with_hugetlb(capacity: usize, tlbsize: HugetlbSize) -> Result { 180 | let inner = Inner::new::(capacity, Some(tlbsize))?; 181 | let ringbuf = unsafe { crate::ringbuf::Receiver::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 182 | Ok(Self(inner, ringbuf)) 183 | } 184 | 185 | /// Attaches to a ringbuffer set up by the sending side. 186 | pub fn open(capacity: usize, memfd: File, empty_signal: File, full_signal: File) -> Result { 187 | let inner = Inner::open::(capacity, memfd, empty_signal, full_signal)?; 188 | let ringbuf = unsafe { crate::ringbuf::Receiver::attach(inner.mmap.as_mut_ptr(), inner.mmap.len())? }; 189 | Ok(Self(inner, ringbuf)) 190 | } 191 | 192 | /// mlock the backing memory to avoid it being put into swap 193 | pub fn mlock(&mut self) -> Result<(), Error> { self.0.mlock() } 194 | 195 | /// Low-level access to the ringbuffer. 196 | /// 197 | /// Note that reading directly using these methods will not trigger a signal for the sending side 198 | /// to wake up. 199 | pub fn receiver_mut(&mut self) -> &mut crate::ringbuf::Receiver { &mut self.1 } 200 | /// The file descriptor for the shared memory area 201 | pub fn memfd(&self) -> &memfd::Memfd { &self.0.memfd } 202 | /// The file descriptor to register notification for in your favorite non-blocking framework (tokio, async-std etc). 203 | /// 204 | /// It is written to by the sending side when the buffer is no longer empty. 205 | pub fn empty_signal(&self) -> &File { &self.0.empty_signal } 206 | /// The file descriptor written to when the sending side should wake up 207 | pub fn full_signal(&self) -> &File { &self.0.full_signal } 208 | 209 | /// Receives data from the ringbuffer. 210 | /// 211 | /// Because this is a ringbuffer between untrusted processes we can never create references to 212 | /// the data, so we have to resort to raw pointers. 213 | /// The closure receives a (ptr, count) pair which can be read from using e g `std::ptr::read`, 214 | /// and returns the number of items that can be dropped from the ringbuffer. 215 | /// If the buffer is empty, the closure is not called. If there is more data that could be read 216 | /// (e g in another part of the ringbuffer), that is indicated in the returned `Status` struct. 217 | pub fn receive_raw usize>(&mut self, f: F) -> Result { 218 | let status = self.receiver_mut().recv(f)?; 219 | if status.signal { 220 | self.full_signal().write(&1u64.to_ne_bytes())?; 221 | } 222 | Ok(status) 223 | } 224 | 225 | /// Receives data from the ringbuffer. 226 | /// 227 | /// The closure receives a slice of data and returns the number of items that can be dropped 228 | /// from the ringbuffer. 229 | /// If the buffer is empty, the closure is not called. If there is more data that could be read 230 | /// (e g in another part of the ringbuffer), that is indicated in the returned `Status` struct. 231 | /// 232 | /// # Safety 233 | /// 234 | /// Caller must ensure that no one can read or write the data area, except for 235 | /// at most one Receiver (this one) and at most one Sender, both set up correctly. 236 | pub unsafe fn receive_trusted usize>(&mut self, f: F) -> Result { 237 | self.receive_raw(|p, count| f(from_raw_parts(p, count))) 238 | } 239 | 240 | /// For blocking scenarios, blocks until the channel is readable. 241 | pub fn block_until_readable(&mut self) -> Result { 242 | loop { 243 | let s = self.receiver_mut().read_count()?; 244 | if s > 0 { 245 | return Ok(Status { remaining: s, signal: false }); 246 | }; 247 | let mut b = [0u8; 8]; 248 | self.empty_signal().read(&mut b)?; 249 | } 250 | } 251 | } 252 | 253 | #[test] 254 | fn simple() { 255 | let mut s: Sender = Sender::new(1000).unwrap(); 256 | assert!(s.sender_mut().write_count().unwrap() >= 1000); 257 | let memfd = s.memfd().as_file().try_clone().unwrap(); 258 | let e = s.empty_signal().try_clone().unwrap(); 259 | let f = s.full_signal().try_clone().unwrap(); 260 | let mut r: Receiver = Receiver::open(1000, memfd, e, f).unwrap(); 261 | assert_eq!(r.receiver_mut().read_count().unwrap(), 0); 262 | } 263 | --------------------------------------------------------------------------------