├── .github └── workflows │ └── test.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── benches ├── bench_rootedrc.rs └── bench_rootedrefcell.rs ├── examples ├── shadow.rs └── shadow_hierarchy.rs ├── maint ├── checks │ ├── build_bench.sh │ ├── clippy.sh │ ├── fmt.sh │ ├── miri.sh │ └── test.sh └── hooks │ ├── pre-commit │ └── pre-push └── src ├── lib.rs ├── rc.rs └── refcell.rs /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # Syntax reference: 2 | # https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions 3 | 4 | name: Tests 5 | permissions: read-all 6 | 7 | defaults: 8 | run: 9 | shell: bash 10 | 11 | on: 12 | push: 13 | branches: [main] 14 | pull_request: 15 | types: [opened, synchronize] 16 | 17 | jobs: 18 | lint: 19 | runs-on: ubuntu-latest 20 | container: rust:1.60.0 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v3 24 | with: 25 | persist-credentials: false 26 | # Run on PR head instead of merge result. Running on the merge 27 | # result can give confusing results, and we require PR to be up to 28 | # date with target branch before merging, anyway. 29 | # See https://github.com/shadow/shadow/issues/2166 30 | ref: ${{ github.event.pull_request.head.sha }} 31 | 32 | - name: fmt 33 | run: | 34 | rustup component add rustfmt 35 | ./maint/checks/fmt.sh 36 | 37 | - name: clippy 38 | run: | 39 | rustup component add clippy 40 | ./maint/checks/clippy.sh 41 | 42 | test: 43 | runs-on: ubuntu-latest 44 | container: rust:1.60.0 45 | steps: 46 | - name: Checkout 47 | uses: actions/checkout@v3 48 | with: 49 | persist-credentials: false 50 | ref: ${{ github.event.pull_request.head.sha }} 51 | 52 | - name: test 53 | run: ./maint/checks/test.sh 54 | 55 | miri: 56 | runs-on: ubuntu-latest 57 | container: rust:1.60.0 58 | steps: 59 | - name: Checkout 60 | uses: actions/checkout@v3 61 | with: 62 | persist-credentials: false 63 | ref: ${{ github.event.pull_request.head.sha }} 64 | 65 | - name: miri 66 | run: | 67 | rustup toolchain install nightly-2022-08-16 68 | rustup default nightly-2022-08-16 69 | rustup component add miri 70 | ./maint/checks/miri.sh 71 | 72 | bench: 73 | runs-on: ubuntu-latest 74 | container: rust:1.60.0 75 | steps: 76 | - name: Checkout 77 | uses: actions/checkout@v3 78 | with: 79 | persist-credentials: false 80 | ref: ${{ github.event.pull_request.head.sha }} 81 | 82 | - name: bench 83 | run: cargo bench 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "objgraph" 3 | version = "0.0.1" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log="0.4.17" 10 | once_cell="1.13.0" 11 | rand="0.8.5" 12 | 13 | [[example]] 14 | name="shadow" 15 | path="examples/shadow.rs" 16 | 17 | [[example]] 18 | name="shadow-hierarchy" 19 | path="examples/shadow_hierarchy.rs" 20 | 21 | [dev-dependencies] 22 | atomic_refcell = "0.1" 23 | criterion = "0.3" 24 | core_affinity = "0.5.10" 25 | parking_lot = "0.12.0" 26 | 27 | [[bench]] 28 | name = "bench_rootedrc" 29 | harness = false 30 | 31 | [[bench]] 32 | name = "bench_rootedrefcell" 33 | harness = false 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a proof of concept for safe, efficient object graphs in Rust. It is 2 | inspired by the concurrency model used in 3 | [Shadow's](https://github.com/shadow/shadow) C implementation, and is intended 4 | as a potential path toward migrating Shadow's C code to Rust without first 5 | having to extensively refactor and/or introduce a lot of atomic operations 6 | (which are intrinsically moderately expensive, and can result in additional 7 | cache misses, and prevent the compiler from reordering some code). 8 | 9 | Shadow simulates a network of Hosts, each of which has a lock associated with 10 | it. Inside the Hosts are a graph of ref-counted objects. They are meant to only 11 | be accessed with the corresponding Host lock held, and do *not* take additional 12 | locks when manipulating the reference counts. 13 | 14 | Hosts are sent across Worker threads over the course of a simulation. 15 | 16 | Translating this model to Rust, we can't simply use `Rc` for the reference counts, 17 | since the Hosts would then not be `Send`. 18 | 19 | We could use `Arc`, but this would introduce a lot of new costly atomic operations. 20 | 21 | Here we encode Shadow's original safety model into Rust's type system. Shadow's 22 | host lock is replaced with a `crate::Root`, which is `!Sync`. Instances of 23 | `crate::rc::RootedRc` and `crate::refcell::RootedRefCell` are associated with a 24 | `Root`, and require the caller to prove they hold a reference to that `Root`; 25 | this allows them to avoid having to perform any additional atomic operations. 26 | 27 | It's not clear to me yet whether the performance gains are generally worth the 28 | extra complexity vs. just using more "mainstream" `Send` and `Sync` equivalents. 29 | In the case of shadow, and maybe other projects being ported from C, the idea is 30 | to allow porting C code to Rust code in a relatively straightforward way without 31 | having to worry too much about "death by a thousand cuts" performance 32 | degradation from introducing many new atomic operations. Once we have used this 33 | technique to migrate most of shadow's code to Rust, the plan will be to compare 34 | macro benchmarks with this crate's internals replaced by the more mainstream 35 | thread-safe equivalents. 36 | 37 | ## Performance And Send/Sync 38 | 39 | `RootedRc` is roughly half the cost of `Arc`, and about the same as `Rc`. From fastest to slowest: 40 | 41 | | benchmark | time | Send | Sync | 42 | | -------- | ------ | -- | -- | 43 | | **clone and drop/RootedRc** | 15.634 ns | Send where T: Sync + Send | Sync where T: Sync + Send | 44 | | clone and drop/Rc | 16.527 ns | !Send | !Sync | 45 | | clone and drop/Arc | 31.171 ns | Send where T: Sync + Send | Sync where T: Sync + Send | 46 | 47 | `RootedRefCell` is slightly slower than `RefCell`, as expected, but significantly faster 48 | than the next fastest thread-safe equivalent, `AtomicRefCell`. 49 | 50 | From fastest to slowest: 51 | 52 | | benchmark | time | Send | Sync | 53 | | -------- | ------ | -- | -- | 54 | | borrow_mut/RefCell | 1.5223 ns | Send where T: Send | !Sync | 55 | | **borrow_mut/RootedRefCell** | 1.8411 ns | Send where T: Send | Sync where T: Send | 56 | | borrow_mut/AtomicRefCell | 6.6425 ns | Send where T: Send | Sync where T: Send | 57 | | borrow_mut/parking_lot::Mutex | 10.848 ns | Send where T: Send | Sync where T: Send | 58 | | borrow_mut/Mutex | 12.666 ns | Send where T: Send | Sync where T: Send | 59 | 60 | Benchmark sources are in `benches` and can be run with `cargo bench`. 61 | 62 | ## Usage and testing 63 | 64 | There are some examples of intended usage in the `examples` directory. 65 | 66 | See `maint/checks` for scripts to run tests, examples, miri, etc. 67 | 68 | `cargo bench` runs the included benchmarks. 69 | 70 | ## Status 71 | 72 | This is currently a sketch for discussion and analysis. It needs more review 73 | and testing to validate soundness. -------------------------------------------------------------------------------- /benches/bench_rootedrc.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; 2 | use objgraph::{rc::RootedRc, Root}; 3 | use std::{rc::Rc, sync::Arc}; 4 | 5 | #[inline(never)] 6 | fn rootedrc_clone_and_drop(root: &Root, x: RootedRc<()>) { 7 | x.clone(root).safely_drop(root); 8 | x.safely_drop(root); 9 | } 10 | 11 | #[inline(never)] 12 | fn arc_clone_and_drop(x: Arc<()>) { 13 | #[allow(clippy::redundant_clone)] 14 | let _ = x.clone(); 15 | } 16 | 17 | #[inline(never)] 18 | fn rc_clone_and_drop(x: Rc) -> i32 { 19 | #[allow(clippy::redundant_clone)] 20 | *x.clone() 21 | } 22 | 23 | fn criterion_benchmark(c: &mut Criterion) { 24 | { 25 | let mut group = c.benchmark_group("clone and drop"); 26 | group.bench_function("RootedRc", |b| { 27 | b.iter_batched( 28 | || { 29 | let root = Root::new(); 30 | let rc = RootedRc::new(&root, ()); 31 | (root, rc) 32 | }, 33 | |(root, rc)| rootedrc_clone_and_drop(&root, rc), 34 | BatchSize::SmallInput, 35 | ); 36 | }); 37 | group.bench_function("Arc", |b| { 38 | b.iter_batched(|| Arc::new(()), arc_clone_and_drop, BatchSize::SmallInput); 39 | }); 40 | group.bench_function("Rc", |b| { 41 | b.iter_batched(|| Rc::new(1), rc_clone_and_drop, BatchSize::SmallInput); 42 | }); 43 | } 44 | 45 | { 46 | let mut group = c.benchmark_group("cross-core clone"); 47 | const N: usize = 10000; 48 | group.bench_function("RootedRc", |b| { 49 | b.iter_batched( 50 | || { 51 | std::thread::spawn(|| { 52 | let mut core_ids = core_affinity::get_core_ids().unwrap(); 53 | // Exclude Current core from tests. 54 | let setup_core_id = core_ids.pop().unwrap(); 55 | core_affinity::set_for_current(setup_core_id); 56 | let root = Root::new(); 57 | let mut v = Vec::new(); 58 | for _ in 0..black_box(N) { 59 | v.push(RootedRc::new(&root, ())); 60 | // No atomic operation here, but for consistency with Arc benchmark. 61 | let t = v.last().unwrap().clone(&root); 62 | t.safely_drop(&root); 63 | } 64 | (root, core_ids, v) 65 | }) 66 | .join() 67 | .unwrap() 68 | }, 69 | |(root, core_ids, v)| { 70 | std::thread::spawn(move || { 71 | for core_id in core_ids { 72 | core_affinity::set_for_current(core_id); 73 | for rc in &v { 74 | let v = rc.clone(&root); 75 | v.safely_drop(&root); 76 | } 77 | } 78 | // Safely drop contents of v 79 | for rc in v { 80 | rc.safely_drop(&root); 81 | } 82 | }) 83 | .join() 84 | .unwrap() 85 | }, 86 | BatchSize::SmallInput, 87 | ); 88 | }); 89 | group.bench_function("Arc", |b| { 90 | b.iter_batched( 91 | || { 92 | std::thread::spawn(|| { 93 | let mut core_ids = core_affinity::get_core_ids().unwrap(); 94 | // Exclude Current core from tests. 95 | let setup_core_id = core_ids.pop().unwrap(); 96 | core_affinity::set_for_current(setup_core_id); 97 | let mut v = Vec::new(); 98 | for _ in 0..black_box(N) { 99 | v.push(Arc::new(())); 100 | // Force an atomic operation on this core. 101 | let _ = v.last().unwrap().clone(); 102 | } 103 | (core_ids, v) 104 | }) 105 | .join() 106 | .unwrap() 107 | }, 108 | |(core_ids, v)| { 109 | std::thread::spawn(move || { 110 | for core_id in core_ids { 111 | core_affinity::set_for_current(core_id); 112 | for rc in &v { 113 | let _ = rc.clone(); 114 | } 115 | } 116 | }) 117 | .join() 118 | .unwrap() 119 | }, 120 | BatchSize::SmallInput, 121 | ); 122 | }); 123 | } 124 | 125 | /* 126 | { 127 | let _lock = root.lock(); 128 | let mut group = c.benchmark_group("drop"); 129 | group.bench_function("RootedRc", |b| { 130 | b.iter_batched( 131 | || RootedRc::<(), _>::new(root.tag(), ()), 132 | |x| drop(x), 133 | BatchSize::SmallInput, 134 | ); 135 | }); 136 | group.bench_function("Arc", |b| { 137 | b.iter_batched(|| Arc::new(()), |x| drop(x), BatchSize::SmallInput); 138 | }); 139 | group.bench_function("Rc", |b| { 140 | b.iter_batched(|| Rc::new(()), |x| drop(x), BatchSize::SmallInput); 141 | }); 142 | } 143 | */ 144 | } 145 | 146 | criterion_group!(benches, criterion_benchmark); 147 | criterion_main!(benches); 148 | -------------------------------------------------------------------------------- /benches/bench_rootedrefcell.rs: -------------------------------------------------------------------------------- 1 | use std::{cell::RefCell, sync::Mutex}; 2 | 3 | use atomic_refcell::AtomicRefCell; 4 | use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; 5 | use objgraph::{refcell::RootedRefCell, Root}; 6 | 7 | #[inline(never)] 8 | fn rootedrefcell_borrow_mut(root: &Root, x: &RootedRefCell) { 9 | *x.borrow_mut(root) += 1; 10 | } 11 | 12 | #[inline(never)] 13 | fn mutex_borrow_mut(x: &Mutex) { 14 | *x.lock().unwrap() += 1; 15 | } 16 | 17 | #[inline(never)] 18 | fn parking_lot_mutex_borrow_mut(x: &parking_lot::Mutex) { 19 | *x.lock() += 1; 20 | } 21 | 22 | #[inline(never)] 23 | fn atomicrefcell_borrow_mut(x: &AtomicRefCell) { 24 | *x.borrow_mut() += 1; 25 | } 26 | 27 | #[inline(never)] 28 | fn refcell_borrow_mut(x: &RefCell) { 29 | *x.borrow_mut() += 1; 30 | } 31 | 32 | fn criterion_benchmark(c: &mut Criterion) { 33 | { 34 | let mut group = c.benchmark_group("borrow_mut"); 35 | group.bench_function("RootedRefCell", |b| { 36 | b.iter_batched_ref( 37 | || { 38 | let root = Root::new(); 39 | let x = RootedRefCell::new(&root, 0); 40 | (root, x) 41 | }, 42 | |(root, x)| rootedrefcell_borrow_mut(root, x), 43 | BatchSize::SmallInput, 44 | ); 45 | }); 46 | group.bench_function("Mutex", |b| { 47 | b.iter_batched_ref( 48 | || Mutex::new(0), 49 | |x| mutex_borrow_mut(x), 50 | BatchSize::SmallInput, 51 | ); 52 | }); 53 | group.bench_function("parking_lot::Mutex", |b| { 54 | b.iter_batched_ref( 55 | || parking_lot::Mutex::new(0), 56 | |x| parking_lot_mutex_borrow_mut(x), 57 | BatchSize::SmallInput, 58 | ); 59 | }); 60 | group.bench_function("AtomicRefCell", |b| { 61 | b.iter_batched_ref( 62 | || AtomicRefCell::new(0), 63 | |x| atomicrefcell_borrow_mut(x), 64 | BatchSize::SmallInput, 65 | ); 66 | }); 67 | group.bench_function("RefCell", |b| { 68 | b.iter_batched_ref( 69 | || RefCell::new(0), 70 | |x| refcell_borrow_mut(x), 71 | BatchSize::SmallInput, 72 | ); 73 | }); 74 | } 75 | } 76 | 77 | criterion_group!(benches, criterion_benchmark); 78 | criterion_main!(benches); 79 | -------------------------------------------------------------------------------- /examples/shadow.rs: -------------------------------------------------------------------------------- 1 | /// Sketch of how shared ownership of Descriptors might look in 2 | /// the [shadow](https://github.com/shadow/shadow) simulator. 3 | use objgraph::{rc::RootedRc, Root}; 4 | use std::{collections::HashMap, thread}; 5 | 6 | struct Host { 7 | processes: HashMap, 8 | root: Root, 9 | } 10 | 11 | impl Drop for Host { 12 | fn drop(&mut self) { 13 | for (_, p) in self.processes.drain() { 14 | p.safely_drop(&self.root); 15 | } 16 | } 17 | } 18 | 19 | struct Process { 20 | descriptors: HashMap>, 21 | } 22 | 23 | impl Process { 24 | pub fn safely_drop(self, root: &Root) { 25 | for (_, d) in self.descriptors { 26 | d.safely_drop(root) 27 | } 28 | } 29 | } 30 | 31 | struct Descriptor { 32 | open: bool, 33 | } 34 | 35 | pub fn main() { 36 | let mut hosts = HashMap::::new(); 37 | 38 | // host1 has 2 processes, which have a shared Descriptor. 39 | // (Maybe one was forked from the other) 40 | let mut host1 = Host { 41 | processes: HashMap::new(), 42 | root: Root::new(), 43 | }; 44 | { 45 | let descriptor = RootedRc::new(&host1.root, Descriptor { open: true }); 46 | 47 | // Process 0 has a reference to the descriptor. 48 | host1.processes.insert( 49 | 0, 50 | Process { 51 | descriptors: HashMap::new(), 52 | }, 53 | ); 54 | host1 55 | .processes 56 | .get_mut(&0) 57 | .unwrap() 58 | .descriptors 59 | .insert(0, descriptor.clone(&host1.root)); 60 | 61 | // So does Process 1. 62 | host1.processes.insert( 63 | 1, 64 | Process { 65 | descriptors: HashMap::new(), 66 | }, 67 | ); 68 | host1 69 | .processes 70 | .get_mut(&1) 71 | .unwrap() 72 | .descriptors 73 | .insert(0, descriptor.clone(&host1.root)); 74 | 75 | descriptor.safely_drop(&host1.root); 76 | } 77 | hosts.insert(0, host1); 78 | 79 | // Process hosts in a worker thread 80 | let worker = thread::spawn(move || { 81 | for (host_id, host) in &mut hosts { 82 | // Dup a file descriptor. We clone RootedRc without any additional 83 | // atomic operations; it's protected by the host lock. 84 | let descriptor = host.processes[&0].descriptors[&0].clone(&host.root); 85 | host.processes 86 | .get_mut(&0) 87 | .unwrap() 88 | .descriptors 89 | .insert(2, descriptor); 90 | 91 | // Iterate 92 | for (pid, process) in &host.processes { 93 | for (fid, descriptor) in &process.descriptors { 94 | println!( 95 | "host_id:{} pid:{} fid:{} open:{}", 96 | host_id, pid, fid, descriptor.open 97 | ); 98 | } 99 | } 100 | } 101 | hosts 102 | }); 103 | 104 | // Wait for worker to finish and get hosts back. 105 | let _hosts = worker.join().unwrap(); 106 | println!("worker done as expected"); 107 | } 108 | 109 | // For `cargo test --examples` 110 | #[test] 111 | fn test() { 112 | main(); 113 | } 114 | -------------------------------------------------------------------------------- /examples/shadow_hierarchy.rs: -------------------------------------------------------------------------------- 1 | /// Prototyping / examples for how this crate may be used in the 2 | /// [shadow](https://github.com/shadow/shadow) simulator. 3 | 4 | mod v1 { 5 | use objgraph::{refcell::RootedRefCell, Root}; 6 | 7 | /// Everything related to a single host, stored "flat". 8 | struct HostObjs { 9 | root: Root, 10 | host: RootedRefCell, 11 | processes: RootedRefCell>>, 12 | threads: RootedRefCell>>, 13 | } 14 | 15 | struct Host {} 16 | impl Host { 17 | pub fn run(&mut self, objs: &HostObjs, pid: usize, tid: usize) { 18 | let processes_guard = objs.processes.borrow(&objs.root); 19 | let mut process_guard = processes_guard.get(pid).unwrap().borrow_mut(&objs.root); 20 | 21 | // Host bookkeeping 22 | 23 | process_guard.run(objs, self, tid); 24 | 25 | // Host bookkeeping 26 | } 27 | } 28 | 29 | struct Process {} 30 | impl Process { 31 | pub fn run(&mut self, objs: &HostObjs, host: &mut Host, tid: usize) { 32 | let threads_guard = objs.threads.borrow(&objs.root); 33 | let mut thread_guard = threads_guard.get(tid).unwrap().borrow_mut(&objs.root); 34 | 35 | // Process bookkeeping 36 | 37 | thread_guard.run(objs, host, self); 38 | 39 | // Process bookkeeping 40 | } 41 | } 42 | 43 | struct Thread {} 44 | impl Thread { 45 | pub fn run(&mut self, _objs: &HostObjs, _host: &mut Host, _process: &mut Process) { 46 | // Do stuff. run, invoke syscall handlers, etc. 47 | } 48 | } 49 | 50 | pub fn main() { 51 | // Create "the world" 52 | let objs = { 53 | let root = Root::new(); 54 | let host = RootedRefCell::new(&root, Host {}); 55 | let processes = RootedRefCell::new( 56 | &root, 57 | Vec::from([ 58 | RootedRefCell::new(&root, Process {}), 59 | RootedRefCell::new(&root, Process {}), 60 | ]), 61 | ); 62 | let threads = RootedRefCell::new( 63 | &root, 64 | Vec::from([ 65 | RootedRefCell::new(&root, Thread {}), 66 | RootedRefCell::new(&root, Thread {}), 67 | ]), 68 | ); 69 | HostObjs { 70 | root, 71 | host, 72 | processes, 73 | threads, 74 | } 75 | }; 76 | 77 | // Run thread tid=0 in process pid=0 78 | let mut host_guard = objs.host.borrow_mut(&objs.root); 79 | host_guard.run(&objs, 0, 0); 80 | // This works ok, but when we have a reference to any single thread or process, 81 | // we have to immutably borrow the whole list of threads or processes as well. 82 | // 83 | // If we needed to mutate those lists, we'd need to 84 | } 85 | } 86 | 87 | /// Similar to above, but wrap individual processes and threads in a RootedRc, 88 | /// allowing us to decouple their lifetimes from the "owning" objects. 89 | /// 90 | /// This also allows us to nest the objects within each-other, though we need to 91 | /// be careful to ensure the RootedRc's are dropped explicitly to prevent leaks 92 | /// (or panics in debug builds). 93 | mod v2 { 94 | use objgraph::{rc::RootedRc, refcell::RootedRefCell, Root}; 95 | 96 | /// Everything related to a single host, stored "flat". 97 | struct HostObjs { 98 | root: Root, 99 | host: RootedRefCell, 100 | } 101 | impl Drop for HostObjs { 102 | fn drop(&mut self) { 103 | self.host.borrow_mut(&self.root).shutdown(&self.root); 104 | } 105 | } 106 | 107 | struct Host { 108 | processes: RootedRefCell>>>, 109 | } 110 | impl Host { 111 | pub fn run(&mut self, objs: &HostObjs, pid: usize, tid: usize) { 112 | let process = self 113 | .processes 114 | .borrow(&objs.root) 115 | .get(pid) 116 | .unwrap() 117 | .clone(&objs.root); 118 | let mut process_guard = process.borrow_mut(&objs.root); 119 | 120 | // Host bookkeeping 121 | 122 | process_guard.run(objs, self, tid); 123 | drop(process_guard); 124 | process.safely_drop(&objs.root) 125 | 126 | // Host bookkeeping 127 | } 128 | 129 | pub fn shutdown(&mut self, root: &Root) { 130 | let mut processes = self.processes.borrow_mut(root); 131 | for process in processes.drain(..) { 132 | process.borrow_mut(root).shutdown(root); 133 | process.safely_drop(root); 134 | } 135 | } 136 | } 137 | 138 | struct Process { 139 | threads: RootedRefCell>>>, 140 | } 141 | impl Process { 142 | pub fn run(&mut self, objs: &HostObjs, host: &mut Host, tid: usize) { 143 | let thread = self 144 | .threads 145 | .borrow(&objs.root) 146 | .get(tid) 147 | .unwrap() 148 | .clone(&objs.root); 149 | let mut thread_guard = thread.borrow_mut(&objs.root); 150 | 151 | // Process bookkeeping 152 | 153 | thread_guard.run(objs, host, self); 154 | drop(thread_guard); 155 | thread.safely_drop(&objs.root); 156 | 157 | // Process bookkeeping 158 | } 159 | 160 | pub fn shutdown(&mut self, root: &Root) { 161 | let mut threads = self.threads.borrow_mut(root); 162 | for thread in threads.drain(..) { 163 | thread.safely_drop(root) 164 | } 165 | } 166 | } 167 | 168 | struct Thread {} 169 | impl Thread { 170 | pub fn run(&mut self, _objs: &HostObjs, _host: &mut Host, _process: &mut Process) { 171 | // Do stuff. run, invoke syscall handlers, etc. 172 | } 173 | } 174 | 175 | pub fn main() { 176 | // Create "the world" 177 | let objs = { 178 | let root = Root::new(); 179 | let threads = RootedRefCell::new( 180 | &root, 181 | Vec::from([ 182 | RootedRc::new(&root, RootedRefCell::new(&root, Thread {})), 183 | RootedRc::new(&root, RootedRefCell::new(&root, Thread {})), 184 | ]), 185 | ); 186 | let processes = RootedRefCell::new( 187 | &root, 188 | Vec::from([RootedRc::new( 189 | &root, 190 | RootedRefCell::new(&root, Process { threads }), 191 | )]), 192 | ); 193 | let host = RootedRefCell::new(&root, Host { processes }); 194 | HostObjs { root, host } 195 | }; 196 | 197 | // Run thread tid=0 in process pid=0 198 | let mut host_guard = objs.host.borrow_mut(&objs.root); 199 | host_guard.run(&objs, 0, 0); 200 | } 201 | } 202 | 203 | pub fn main() { 204 | v1::main(); 205 | v2::main(); 206 | } 207 | 208 | // For `cargo test --examples` 209 | #[test] 210 | fn test() { 211 | main(); 212 | } 213 | -------------------------------------------------------------------------------- /maint/checks/build_bench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | cargo bench --no-run -------------------------------------------------------------------------------- /maint/checks/clippy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | cargo clippy --all-features --all-targets -- -D warnings -------------------------------------------------------------------------------- /maint/checks/fmt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | cargo fmt --all -- --check 6 | -------------------------------------------------------------------------------- /maint/checks/miri.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | # One of the tests is designed to leak memory, so we need to tell miri to ignore leaks altogether. 6 | RUST_BACKTRACE=1 MIRIFLAGS=-Zmiri-ignore-leaks cargo miri test 7 | RUST_BACKTRACE=1 cargo miri test --examples -------------------------------------------------------------------------------- /maint/checks/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | RUST_BACKTRACE=1 cargo test 6 | RUST_BACKTRACE=1 cargo test --examples -------------------------------------------------------------------------------- /maint/hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | maint/checks/fmt.sh 6 | maint/checks/build_bench.sh 7 | maint/checks/clippy.sh 8 | maint/checks/miri.sh 9 | maint/checks/test.sh -------------------------------------------------------------------------------- /maint/hooks/pre-push: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sporksmith/objgraph/315923da710a55e3c35e5941caf9610ef79ce489/maint/hooks/pre-push -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // https://github.com/rust-lang/rfcs/blob/master/text/2585-unsafe-block-in-unsafe-fn.md 2 | #![deny(unsafe_op_in_unsafe_fn)] 3 | 4 | use std::{ 5 | marker::PhantomData, 6 | sync::atomic::{AtomicU32, Ordering}, 7 | }; 8 | 9 | use once_cell::sync::OnceCell; 10 | 11 | /// Every object root is assigned a Tag, which we ensure is globally unique. 12 | /// Each Tag value uniquely identifies a Root. 13 | #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] 14 | struct Tag { 15 | prefix: TagPrefixType, 16 | suffix: TagSuffixType, 17 | } 18 | 19 | /// Larger sizes here reduce the chance of collision, which could lead to 20 | /// silently missing bugs in some cases. Note though that there would both 21 | /// have to be a collision, and the code would need to incorrectly try to 22 | /// access data using the wrong root lock. 23 | /// 24 | /// Increasing the size introduces some runtime overhead for storing, copying, 25 | /// and comparing tag values. 26 | type TagPrefixType = u32; 27 | 28 | /// Larger sizes here support a greater number of tags within a given prefix. 29 | /// 30 | /// Increasing the size introduces some runtime overhead for storing, copying, 31 | /// and comparing tag values. 32 | type TagSuffixType = u32; 33 | type TagSuffixAtomicType = AtomicU32; 34 | 35 | impl Tag { 36 | pub fn new() -> Self { 37 | // Every instance of this module uses a random prefix for tags. This is to 38 | // handle both the case where this module is used from multiple processes that 39 | // share memory, and to handle the case where multiple instances of this module 40 | // end up within a single process. 41 | static TAG_PREFIX: OnceCell = OnceCell::new(); 42 | let prefix = *TAG_PREFIX.get_or_init(rand::prelude::random); 43 | 44 | static NEXT_TAG_SUFFIX: TagSuffixAtomicType = TagSuffixAtomicType::new(0); 45 | let suffix: TagSuffixType = NEXT_TAG_SUFFIX.fetch_add(1, Ordering::Relaxed); 46 | 47 | // Detect overflow 48 | assert!(suffix != TagSuffixType::MAX); 49 | 50 | Self { prefix, suffix } 51 | } 52 | } 53 | 54 | /// Root of an "object graph". Locking a `Root` allows inexpensive access 55 | /// to associated `RootedRc`s and `RootedRefCell`s. 56 | pub struct Root { 57 | tag: Tag, 58 | 59 | // RootedRc and RootedRefCell rely on `Root` being `!Sync`. They take 60 | // immutable/shared references to Self to prove that no other thread 61 | // currently has access. 62 | _notsync: std::marker::PhantomData>, 63 | } 64 | 65 | impl Root { 66 | pub fn new() -> Self { 67 | let tag = Tag::new(); 68 | Self { 69 | tag, 70 | _notsync: PhantomData, 71 | } 72 | } 73 | 74 | /// This root's globally unique tag. 75 | fn tag(&self) -> Tag { 76 | self.tag 77 | } 78 | } 79 | 80 | impl Default for Root { 81 | fn default() -> Self { 82 | Self::new() 83 | } 84 | } 85 | 86 | pub mod rc; 87 | pub mod refcell; 88 | -------------------------------------------------------------------------------- /src/rc.rs: -------------------------------------------------------------------------------- 1 | use crate::{Root, Tag}; 2 | use std::cell::Cell; 3 | 4 | struct RootedRcInternal { 5 | val: T, 6 | strong_count: Cell, 7 | } 8 | 9 | impl RootedRcInternal { 10 | pub fn new(val: T) -> Self { 11 | Self { 12 | val, 13 | strong_count: Cell::new(1), 14 | } 15 | } 16 | 17 | pub fn inc_strong(&self) { 18 | self.strong_count.set(self.strong_count.get() + 1) 19 | } 20 | 21 | pub fn dec_strong(&self) { 22 | self.strong_count.set(self.strong_count.get() - 1) 23 | } 24 | } 25 | 26 | /// Analagous to `std::rc::Rc`. In particular like `Rc` and unlike 27 | /// `std::sync::Arc`, it doesn't perform any atomic operations internally, 28 | /// making it relatively inexpensive 29 | /// 30 | /// Unlike `Rc`, this type `Send` and `Sync` if `T` is. This is safe because 31 | /// the owner is required to prove ownership of the associated `Root` lock 32 | /// to perform any sensitive operations. 33 | /// 34 | /// Instances must be destroyed using the `safely_drop` method, which validates 35 | /// that the lock is held before manipulating reference counts, etc. 36 | /// Failing to call `safely_drop` results in a `panic` in debug builds, 37 | /// or leaking the object in release builds. 38 | pub struct RootedRc { 39 | tag: Tag, 40 | internal: *mut RootedRcInternal, 41 | } 42 | 43 | impl RootedRc { 44 | /// Creates a new object associated with `root`. 45 | pub fn new(root: &Root, val: T) -> Self { 46 | Self { 47 | tag: root.tag(), 48 | internal: Box::into_raw(Box::new(RootedRcInternal::new(val))), 49 | } 50 | } 51 | 52 | /// Like Clone::clone, but requires that the corresponding Root is locked. 53 | /// 54 | /// Intentionally named clone to shadow Self::deref()::clone(). 55 | /// 56 | /// Panics if `guard` did not originate from the associated `Root`. 57 | pub fn clone(&self, root: &Root) -> Self { 58 | assert_eq!( 59 | root.tag, self.tag, 60 | "Tried using a lock for {:?} instead of {:?}", 61 | root.tag, self.tag 62 | ); 63 | // SAFETY: We've verified that the lock is held by inspection of the 64 | // lock itself. We hold a reference to the guard, guaranteeing that the 65 | // lock is held while `unchecked_clone` runs. 66 | unsafe { self.unchecked_clone() } 67 | } 68 | 69 | /// # Safety 70 | /// 71 | /// There must be no other threads accessing this object, or clones of this object. 72 | unsafe fn unchecked_clone(&self) -> Self { 73 | // SAFETY: Pointer should be valid by construction. Caller is 74 | // responsible for ensuring no parallel access. 75 | let internal = unsafe { self.internal.as_ref().unwrap() }; 76 | internal.inc_strong(); 77 | Self { 78 | tag: self.tag, 79 | internal: self.internal, 80 | } 81 | } 82 | 83 | /// Safely drop this object, dropping the internal value if no other 84 | /// references to it remain. 85 | /// 86 | /// Instances that are dropped *without* calling this method cannot be 87 | /// safely cleaned up. In debug builds this will result in a `panic`. 88 | /// Otherwise the underlying reference count will simply not be decremented, 89 | /// ultimately resulting in the enclosed value never being dropped. 90 | pub fn safely_drop(mut self, root: &Root) { 91 | assert_eq!( 92 | root.tag, self.tag, 93 | "Tried using a lock for {:?} instead of {:?}", 94 | root.tag, self.tag 95 | ); 96 | let drop_internal = { 97 | // SAFETY: pointer points to valid data by construction. 98 | let internal = unsafe { self.internal.as_ref() }.unwrap(); 99 | internal.dec_strong(); 100 | internal.strong_count.get() == 0 101 | }; 102 | if drop_internal { 103 | // SAFETY: There are no remaining strong references to 104 | // self.internal, and we know that no other threads could be 105 | // manipulating the reference count in parallel since we have the 106 | // root lock. 107 | unsafe { Box::from_raw(self.internal) }; 108 | } 109 | self.internal = std::ptr::null_mut(); 110 | } 111 | } 112 | 113 | impl Drop for RootedRc { 114 | fn drop(&mut self) { 115 | if !self.internal.is_null() { 116 | log::error!("Dropped without calling `safely_drop`"); 117 | 118 | // We *can* continue without violating Rust safety properties; the 119 | // underlying object will just be leaked, since the ref count will 120 | // never reach zero. 121 | // 122 | // If we're not already panicking, it's useful to panic here to make 123 | // the leak more visible. 124 | // 125 | // If we are already panicking though, that may already explain how 126 | // a call to `safely_drop` got skipped, and panicking again would 127 | // just obscure the original panic. 128 | #[cfg(debug_assertions)] 129 | if !std::thread::panicking() { 130 | panic!("Dropped without calling `safely_drop`"); 131 | } 132 | } 133 | } 134 | } 135 | 136 | // SAFETY: Normally the inner `Rc` would inhibit this type from being `Send` and 137 | // `Sync`. However, RootedRc ensures that `Rc`'s reference count can only be 138 | // accessed when the root is locked by the current thread, effectively 139 | // synchronizing the reference count. 140 | unsafe impl Send for RootedRc {} 141 | unsafe impl Sync for RootedRc {} 142 | 143 | impl std::ops::Deref for RootedRc { 144 | type Target = T; 145 | 146 | fn deref(&self) -> &Self::Target { 147 | &unsafe { self.internal.as_ref() }.unwrap().val 148 | } 149 | } 150 | 151 | #[cfg(test)] 152 | mod test_rooted_rc { 153 | use std::{sync::Arc, thread}; 154 | 155 | use crate::Root; 156 | 157 | use super::*; 158 | 159 | #[test] 160 | fn construct_and_drop() { 161 | let root = Root::new(); 162 | let rc = RootedRc::new(&root, 0); 163 | rc.safely_drop(&root) 164 | } 165 | 166 | #[test] 167 | #[should_panic] 168 | fn drop_without_lock_panics() { 169 | let root = Root::new(); 170 | let _ = RootedRc::new(&root, 0); 171 | } 172 | 173 | #[test] 174 | fn send_to_worker_thread() { 175 | let root = Root::new(); 176 | let rc = RootedRc::new(&root, 0); 177 | thread::spawn(move || { 178 | // Can access immutably 179 | let _ = *rc + 2; 180 | // Need to explicitly drop, since it mutates refcount. 181 | rc.safely_drop(&root); 182 | }) 183 | .join() 184 | .unwrap(); 185 | } 186 | 187 | #[test] 188 | fn send_to_worker_thread_and_retrieve() { 189 | let root = Root::new(); 190 | let root = thread::spawn(move || { 191 | let rc = RootedRc::new(&root, 0); 192 | rc.safely_drop(&root); 193 | root 194 | }) 195 | .join() 196 | .unwrap(); 197 | let rc = RootedRc::new(&root, 0); 198 | rc.safely_drop(&root); 199 | } 200 | 201 | #[test] 202 | fn clone_to_worker_thread() { 203 | let root = Root::new(); 204 | let rc = RootedRc::new(&root, 0); 205 | 206 | // Create a clone of rc that we'll pass to worker thread. 207 | let rc_thread = rc.clone(&root); 208 | 209 | // Worker takes ownership of rc_thread and root; 210 | // Returns ownership of root. 211 | let root = thread::spawn(move || { 212 | let _ = *rc_thread; 213 | rc_thread.safely_drop(&root); 214 | root 215 | }) 216 | .join() 217 | .unwrap(); 218 | 219 | // Take the lock to drop rc 220 | rc.safely_drop(&root); 221 | } 222 | 223 | #[test] 224 | fn threads_contend_over_lock() { 225 | let root = Arc::new(std::sync::Mutex::new(Root::new())); 226 | let rc = RootedRc::new(&root.lock().unwrap(), 0); 227 | 228 | let threads: Vec<_> = (0..100) 229 | .map(|_| { 230 | // Create a clone of rc that we'll pass to worker thread. 231 | let rc = rc.clone(&root.lock().unwrap()); 232 | let root = root.clone(); 233 | 234 | thread::spawn(move || { 235 | let rootlock = root.lock().unwrap(); 236 | let rc2 = rc.clone(&rootlock); 237 | rc.safely_drop(&rootlock); 238 | rc2.safely_drop(&rootlock); 239 | }) 240 | }) 241 | .collect(); 242 | 243 | for handle in threads { 244 | handle.join().unwrap(); 245 | } 246 | 247 | rc.safely_drop(&root.lock().unwrap()); 248 | } 249 | } 250 | -------------------------------------------------------------------------------- /src/refcell.rs: -------------------------------------------------------------------------------- 1 | use crate::{Root, Tag}; 2 | use std::cell::{Cell, UnsafeCell}; 3 | 4 | /// Analagous to `std::cell::RefCell`. In particular like `RefCell` and unlike 5 | /// `std::sync::Mutex`, it doesn't perform any atomic operations internally, 6 | /// making it relatively inexpensive. 7 | /// 8 | /// Unlike `RefCell`, this type is `Send` and `Sync` if `T` is Send. This is 9 | /// safe because the owner is required to prove ownership of the associated 10 | /// `Root` lock to perform any sensitive operations. 11 | pub struct RootedRefCell { 12 | tag: Tag, 13 | val: UnsafeCell, 14 | reader_count: Cell, 15 | writer: Cell, 16 | } 17 | 18 | impl RootedRefCell { 19 | /// Create a RootedRefCell associated with `root`. 20 | pub fn new(root: &Root, val: T) -> Self { 21 | Self { 22 | tag: root.tag(), 23 | val: UnsafeCell::new(val), 24 | reader_count: Cell::new(0), 25 | writer: Cell::new(false), 26 | } 27 | } 28 | 29 | /// Borrow a reference. Panics if `root_guard` is for the wrong `Root`, or 30 | /// if this object is alread mutably borrowed. 31 | pub fn borrow<'a>( 32 | &'a self, 33 | // This 'a statically enforces that the root lock can't be dropped 34 | // while the returned guard is still outstanding. i.e. it is part 35 | // of the safety proof for making Self Send and Sync. 36 | root: &'a Root, 37 | ) -> RootedRefCellRef<'a, T> { 38 | // Prove that the lock is held for this tag. 39 | assert_eq!( 40 | root.tag, self.tag, 41 | "Expected {:?} Got {:?}", 42 | self.tag, root.tag 43 | ); 44 | 45 | assert!(!self.writer.get()); 46 | 47 | self.reader_count.set(self.reader_count.get() + 1); 48 | 49 | // Borrow from the guard to ensure the lock can't be dropped. 50 | RootedRefCellRef { guard: self } 51 | } 52 | 53 | /// Borrow a mutable reference. Panics if `root_guard` is for the wrong 54 | /// `Root`, or if this object is already borrowed. 55 | pub fn borrow_mut<'a>( 56 | &'a self, 57 | // 'a required here for safety, as for `borrow`. 58 | root: &'a Root, 59 | ) -> RootedRefCellRefMut<'a, T> { 60 | // Prove that the lock is held for this tag. 61 | assert_eq!( 62 | root.tag, self.tag, 63 | "Expected {:?} Got {:?}", 64 | self.tag, root.tag 65 | ); 66 | 67 | assert!(!self.writer.get()); 68 | assert!(self.reader_count.get() == 0); 69 | 70 | self.writer.set(true); 71 | 72 | RootedRefCellRefMut { guard: self } 73 | } 74 | 75 | pub fn into_inner(self) -> T { 76 | self.val.into_inner() 77 | } 78 | } 79 | 80 | unsafe impl Send for RootedRefCell {} 81 | unsafe impl Sync for RootedRefCell {} 82 | 83 | pub struct RootedRefCellRef<'a, T> { 84 | guard: &'a RootedRefCell, 85 | } 86 | 87 | impl<'a, T> std::ops::Deref for RootedRefCellRef<'a, T> { 88 | type Target = T; 89 | 90 | fn deref(&self) -> &Self::Target { 91 | unsafe { self.guard.val.get().as_ref().unwrap() } 92 | } 93 | } 94 | 95 | impl<'a, T> Drop for RootedRefCellRef<'a, T> { 96 | fn drop(&mut self) { 97 | self.guard 98 | .reader_count 99 | .set(self.guard.reader_count.get() - 1); 100 | } 101 | } 102 | 103 | pub struct RootedRefCellRefMut<'a, T> { 104 | guard: &'a RootedRefCell, 105 | } 106 | 107 | impl<'a, T> std::ops::Deref for RootedRefCellRefMut<'a, T> { 108 | type Target = T; 109 | 110 | fn deref(&self) -> &Self::Target { 111 | unsafe { self.guard.val.get().as_ref().unwrap() } 112 | } 113 | } 114 | 115 | impl<'a, T> std::ops::DerefMut for RootedRefCellRefMut<'a, T> { 116 | fn deref_mut(&mut self) -> &mut Self::Target { 117 | unsafe { self.guard.val.get().as_mut().unwrap() } 118 | } 119 | } 120 | 121 | impl<'a, T> Drop for RootedRefCellRefMut<'a, T> { 122 | fn drop(&mut self) { 123 | self.guard.writer.set(false); 124 | } 125 | } 126 | 127 | #[cfg(test)] 128 | mod test_rooted_refcell { 129 | use std::thread; 130 | 131 | use super::*; 132 | 133 | use crate::rc::RootedRc; 134 | use crate::Root; 135 | 136 | #[test] 137 | fn construct_and_drop() { 138 | let root = Root::new(); 139 | let _ = RootedRefCell::new(&root, 0); 140 | } 141 | 142 | #[test] 143 | fn share_with_worker_thread() { 144 | let root = Root::new(); 145 | let rc = RootedRc::new(&root, RootedRefCell::new(&root, 0)); 146 | let root = { 147 | let rc = { rc.clone(&root) }; 148 | thread::spawn(move || { 149 | let mut borrow = rc.borrow_mut(&root); 150 | *borrow = 3; 151 | // Drop rc with lock still held. 152 | drop(borrow); 153 | rc.safely_drop(&root); 154 | root 155 | }) 156 | .join() 157 | .unwrap() 158 | }; 159 | let borrow = rc.borrow(&root); 160 | assert_eq!(*borrow, 3); 161 | drop(borrow); 162 | rc.safely_drop(&root); 163 | } 164 | } 165 | --------------------------------------------------------------------------------