├── src ├── manually_traced │ ├── anyhow.rs │ ├── parking_lot.rs │ ├── stdalloc.rs │ ├── stdlib.rs │ ├── indexmap.rs │ ├── arrayvec.rs │ └── mod.rs ├── hash_map.rs ├── epsilon │ ├── alloc │ │ └── arena.rs │ ├── alloc.rs │ ├── handle.rs │ └── layout.rs ├── prelude.rs ├── allocator.rs ├── cell.rs ├── errors.rs ├── vec │ └── cell.rs ├── macros.rs ├── array │ └── repr.rs ├── array.rs ├── serde │ └── hack.rs ├── hash_map │ └── indexmap.rs └── serde.rs ├── .gitignore ├── lefthook.yml ├── libs ├── derive │ ├── Cargo.toml │ ├── tests │ │ ├── deserialize.rs │ │ └── basic.rs │ └── src │ │ └── lib.rs ├── context │ ├── Cargo.toml │ └── src │ │ ├── state │ │ ├── mod.rs │ │ └── nosync.rs │ │ ├── utils.rs │ │ └── lib.rs └── simple │ ├── tests │ ├── errors.rs │ ├── trait_objects.rs │ └── arrays.rs │ ├── Cargo.toml │ ├── examples │ ├── binary_trees.rs │ └── binary_trees_parallel.rs │ └── src │ └── alloc.rs ├── LICENSE.md ├── bench ├── binary_trees.py ├── binary_trees.lua ├── binary_trees.go └── BinaryTree.java ├── tests └── epsilon.rs ├── Cargo.toml ├── run_examples.py └── README.md /src/manually_traced/anyhow.rs: -------------------------------------------------------------------------------- 1 | crate::impl_nulltrace_for_static!(anyhow::Error); 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Rust 2 | /target/ 3 | **/*.rs.bk 4 | Cargo.lock 5 | 6 | # IntelliJ 7 | .idea 8 | *.iml 9 | 10 | # Bench 11 | bench/*.class 12 | perf.data 13 | -------------------------------------------------------------------------------- /lefthook.yml: -------------------------------------------------------------------------------- 1 | skip_output: 2 | - meta 3 | - success 4 | - summary 5 | pre-commit: 6 | commands: 7 | rustfmt: 8 | tags: formatter 9 | glob: "*.rs" 10 | run: cargo fmt --check -- {staged_files} 11 | 12 | -------------------------------------------------------------------------------- /src/manually_traced/parking_lot.rs: -------------------------------------------------------------------------------- 1 | //! Support for parking-lot types 2 | use parking_lot::{Mutex, RwLock}; 3 | 4 | use zerogc::{NullTrace, Trace}; 5 | use zerogc_derive::unsafe_gc_impl; 6 | 7 | unsafe_trace_lock!(Mutex, target = T; |lock| lock.get_mut(), |lock| lock.lock()); 8 | unsafe_trace_lock!(RwLock, target = T; |lock| lock.get_mut(), |lock| lock.write()); 9 | -------------------------------------------------------------------------------- /src/hash_map.rs: -------------------------------------------------------------------------------- 1 | //! A garbage collected HashMap implementation 2 | //! 3 | //! Right now, the only implementation 4 | //! is [GcIndexMap]. It is a garbage collected 5 | //! version of [indexmap::IndexMap](https://docs.rs/indexmap/1.7.0/indexmap/map/struct.IndexMap.html). 6 | //! 7 | //! In the future, unordered maps may be possible 8 | //! (although they'll likely require much more work). 9 | pub mod indexmap; 10 | 11 | /// The default hasher for garbage collected maps. 12 | pub type DefaultHasher = ahash::RandomState; 13 | 14 | pub use self::indexmap::GcIndexMap; 15 | -------------------------------------------------------------------------------- /src/epsilon/alloc/arena.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::Layout; 2 | use std::ptr::NonNull; 3 | 4 | use bumpalo::Bump; 5 | 6 | use super::EpsilonAlloc; 7 | 8 | pub struct BumpEpsilonAlloc(Bump); 9 | impl EpsilonAlloc for BumpEpsilonAlloc { 10 | #[inline] 11 | fn new() -> Self { 12 | BumpEpsilonAlloc(Bump::new()) 13 | } 14 | #[inline] 15 | fn alloc_layout(&self, layout: Layout) -> NonNull { 16 | self.0.alloc_layout(layout) 17 | } 18 | #[inline] 19 | unsafe fn free_alloc(&self, _target: NonNull, _layout: Layout) {} 20 | const NEEDS_EXPLICIT_FREE: bool = false; 21 | } 22 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | //! The prelude for `zergogc`, 2 | //! containing a set of commonly used 3 | //! types and macros. 4 | //! 5 | //! This should really contain everything a garbage 6 | //! collected program needs to use the API. 7 | 8 | // macros 9 | pub use crate::{freeze_context, safepoint, safepoint_recurse, unfreeze_context}; 10 | // Basic collector types 11 | pub use crate::{Gc, GcContext, GcHandle, GcSimpleAlloc, GcSystem, GcVisitor, HandleCollectorId}; 12 | // Traits for user code to implement 13 | pub use crate::{GcRebrand, GcSafe, NullTrace, Trace, TraceImmutable, TrustedDrop}; 14 | // TODO: Should this trait be auto-imported??? 15 | pub use crate::array::{GcArray, GcString}; 16 | pub use crate::cell::GcCell; 17 | pub use crate::vec::GcVec; 18 | pub use crate::AssumeNotTraced; 19 | pub use crate::CollectorId; 20 | -------------------------------------------------------------------------------- /libs/derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zerogc-derive" 3 | description = "Procedural derive for zerogc's garbage collection" 4 | version.workspace = true 5 | authors.workspace = true 6 | repository.workspace = true 7 | license.workspace = true 8 | edition.workspace = true 9 | readme = "../../README.md" 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dev-dependencies] 15 | zerogc = { version = "0.2.0-alpha.7", path = "../..", features = ["serde1"] } 16 | serde = { version = "1" } 17 | 18 | [dependencies] 19 | # Proc macros 20 | syn = { version = "1.0.55", features = ["full", "extra-traits", "visit", "fold"] } 21 | quote = "1.0.8" 22 | darling = "0.13" 23 | proc-macro2 = "1" 24 | # Utilities for argument parsing 25 | proc-macro-kwargs = "0.1.1" 26 | # Misc 27 | indexmap = "1" 28 | itertools = "0.10.1" 29 | 30 | [features] 31 | # Indicates that zerogc was compiled with support for serde, 32 | __serde-internal = [] 33 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Nicholas Schlabach 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /libs/context/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zerogc-context" 3 | description = "Handles the context of a zerogc collector." 4 | version.workspace = true 5 | authors.workspace = true 6 | repository.workspace = true 7 | edition.workspace = true 8 | readme = "../../README.md" 9 | license = "MIT" 10 | 11 | [dependencies] 12 | zerogc = { path = "../..", version = "0.2.0-alpha.6" } 13 | zerogc-derive = { path = "../derive", version = "0.2.0-alpha.6" } 14 | once_cell = { version = "1.5", optional = true } 15 | # Concurrency 16 | parking_lot = { version = "0.11", optional = true } 17 | crossbeam-utils = { version = "0.8", optional = true } 18 | # Logging 19 | slog = "2.7" 20 | 21 | [features] 22 | default = [ 23 | "sync", # Support thread-safety by default 24 | "std" 25 | ] 26 | # Use the standard library (required for `sync`) 27 | std = [] 28 | # This will allow multiple threads to access the garbage collector 29 | # by creating a separate context for each. 30 | # 31 | # Thread safe collectors can have increased overhead 32 | # by requiring communication between threads. 33 | sync = [ 34 | "parking_lot", 35 | "crossbeam-utils", 36 | "std" 37 | ] 38 | 39 | -------------------------------------------------------------------------------- /src/epsilon/alloc.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::Layout; 2 | use std::ptr::NonNull; 3 | 4 | #[cfg(feature = "epsilon-arena-alloc")] 5 | mod arena; 6 | 7 | pub trait EpsilonAlloc { 8 | fn new() -> Self; 9 | fn alloc_layout(&self, layout: Layout) -> NonNull; 10 | unsafe fn free_alloc(&self, target: NonNull, layout: Layout); 11 | const NEEDS_EXPLICIT_FREE: bool; 12 | } 13 | 14 | #[cfg(feature = "epsilon-arena-alloc")] 15 | pub type Default = arena::BumpEpsilonAlloc; 16 | #[cfg(not(feature = "epsilon-arena-alloc"))] 17 | pub type Default = StdEpsilonAlloc; 18 | 19 | pub struct StdEpsilonAlloc; 20 | impl EpsilonAlloc for StdEpsilonAlloc { 21 | #[inline] 22 | fn new() -> Self { 23 | StdEpsilonAlloc 24 | } 25 | 26 | #[inline] 27 | fn alloc_layout(&self, layout: Layout) -> NonNull { 28 | const EMPTY: &[u8] = b""; 29 | if layout.size() == 0 { 30 | return NonNull::from(EMPTY).cast(); 31 | } 32 | // SAFETY: We checked for layout.size() == 0 33 | NonNull::new(unsafe { std::alloc::alloc(layout) }) 34 | .unwrap_or_else(|| std::alloc::handle_alloc_error(layout)) 35 | } 36 | 37 | #[inline] 38 | unsafe fn free_alloc(&self, target: NonNull, layout: Layout) { 39 | if layout.size() == 0 { 40 | return; // We returned our dummy empty alloc 41 | } 42 | std::alloc::dealloc(target.as_ptr(), layout) 43 | } 44 | 45 | const NEEDS_EXPLICIT_FREE: bool = true; 46 | } 47 | -------------------------------------------------------------------------------- /bench/binary_trees.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def bottom_up_tree(depth): 5 | if depth > 0: 6 | depth -= 1 7 | return bottom_up_tree(depth), bottom_up_tree(depth) 8 | else: 9 | return None 10 | 11 | 12 | def item_check(tree): 13 | if tree is not None: 14 | first, second = tree 15 | return 1 + item_check(first) + item_check(second) 16 | else: 17 | return 1 18 | 19 | 20 | def run(n, min_depth, max_depth): 21 | stretch_depth = max_depth + 1 22 | stretch_tree = bottom_up_tree(stretch_depth) 23 | print("stretch tree of depth {}\t check: {}".format( 24 | stretch_depth, item_check(stretch_tree) 25 | )) 26 | del stretch_tree 27 | 28 | long_lived_tree = bottom_up_tree(max_depth) 29 | 30 | for depth in range(min_depth, max_depth, 2): 31 | iterations = 1 << (max_depth - depth + min_depth) 32 | check = 0 33 | for i in range(iterations): 34 | check += item_check(bottom_up_tree(depth)) 35 | print("{}\t trees of depth {}\t check: {}".format(iterations, depth, check)) 36 | 37 | print("long lived tree of depth {}\t check: {}\n".format( 38 | max_depth, item_check(long_lived_tree) 39 | )) 40 | 41 | 42 | if __name__ == "__main__": 43 | try: 44 | n = int(sys.argv[1]) 45 | except IndexError: 46 | n = 10 47 | min_depth = 4 48 | max_depth = min_depth + 2 49 | if max_depth < n: 50 | max_depth = n 51 | run(n, min_depth, max_depth) 52 | 53 | -------------------------------------------------------------------------------- /libs/derive/tests/deserialize.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use zerogc_derive::{GcDeserialize, NullTrace, Trace}; 4 | 5 | use serde::Deserialize; 6 | use zerogc::epsilon::EpsilonCollectorId; 7 | use zerogc::prelude::*; 8 | use zerogc::SimpleAllocCollectorId; 9 | 10 | #[derive(Trace, GcDeserialize)] 11 | #[zerogc(collector_ids(EpsilonCollectorId))] 12 | struct BasicDeserialize<'gc> { 13 | test: Gc<'gc, String, EpsilonCollectorId>, 14 | } 15 | 16 | #[derive(Trace, GcDeserialize)] 17 | #[zerogc(collector_ids(Id))] 18 | struct DeserializeParameterized<'gc, T: GcSafe<'gc, Id>, Id: SimpleAllocCollectorId> { 19 | test: Gc<'gc, Vec, Id>, 20 | } 21 | 22 | #[derive(NullTrace, GcDeserialize, Deserialize)] 23 | #[zerogc(serde(delegate))] 24 | #[allow(unused)] 25 | struct DelegatingDeserialize { 26 | foo: String, 27 | bar: i32, 28 | doesnt: DoesntImplGcDeserialize, 29 | } 30 | 31 | #[derive(Trace, GcDeserialize)] 32 | #[allow(unused)] 33 | #[zerogc(collector_ids(Id))] 34 | struct DeserializeWith<'gc, Id: CollectorId> { 35 | foo: String, 36 | #[zerogc(serde(delegate))] 37 | doesnt_gc_deser: DoesntImplGcDeserialize, 38 | #[zerogc(serde(deserialize_with = "but_its_a_unit", bound(deserialize = "")))] 39 | doesnt_deser_at_all: DoesntDeserAtAll, 40 | marker: PhantomData<&'gc Id>, 41 | deser: Gc<'gc, String, Id>, 42 | } 43 | 44 | #[derive(NullTrace, serde::Deserialize)] 45 | #[allow(unused)] 46 | struct DoesntImplGcDeserialize { 47 | foo: String, 48 | } 49 | 50 | fn but_its_a_unit<'de, D: serde::Deserializer<'de>>( 51 | _deser: D, 52 | ) -> Result { 53 | Ok(DoesntDeserAtAll {}) 54 | } 55 | 56 | #[derive(NullTrace)] 57 | struct DoesntDeserAtAll {} 58 | -------------------------------------------------------------------------------- /bench/binary_trees.lua: -------------------------------------------------------------------------------- 1 | -- The Computer Language Benchmarks Game 2 | -- https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ 3 | -- contributed by Mike Pall 4 | -- modified by Sebastian Engel to be parallel, derived from mandelbrot-lua-6 5 | 6 | -- called with the following arguments on the command line; 7 | -- 1: Initial depth of the tree 8 | -- 2: number of children to spawn (defaults to 6, which works well on 4-way) 9 | -- If this is a child, then there will be additional parameters; 10 | -- 3: current tree depth 11 | -- 4: chunk start 12 | -- 5: chunk end 13 | 14 | 15 | local write = io.write 16 | 17 | local function BottomUpTree(depth) 18 | if depth > 0 then 19 | depth = depth - 1 20 | local left, right = BottomUpTree(depth), BottomUpTree(depth) 21 | return { left, right } 22 | else 23 | return { } 24 | end 25 | end 26 | 27 | local function ItemCheck(tree) 28 | if tree[1] then 29 | return 1 + ItemCheck(tree[1]) + ItemCheck(tree[2]) 30 | else 31 | return 1 32 | end 33 | end 34 | 35 | local N = tonumber(arg and arg[1]) or 10 36 | local mindepth = 4 37 | local maxdepth = mindepth + 2 38 | if maxdepth < N then maxdepth = N end 39 | 40 | do 41 | local stretchdepth = maxdepth + 1 42 | local stretchtree = BottomUpTree(stretchdepth) 43 | write(string.format("stretch tree of depth %d\t check: %d\n", 44 | stretchdepth, ItemCheck(stretchtree))) 45 | end 46 | 47 | local longlivedtree = BottomUpTree(maxdepth) 48 | 49 | for depth=mindepth,maxdepth,2 do 50 | local iterations = 2 ^ (maxdepth - depth + mindepth) 51 | local check = 0 52 | for i=1,iterations do 53 | check = check + ItemCheck(BottomUpTree(depth)) 54 | end 55 | write(string.format("%d\t trees of depth %d\t check: %d\n", iterations, depth, check)) 56 | end 57 | 58 | write(string.format("long lived tree of depth %d\t check: %d\n", 59 | maxdepth, ItemCheck(longlivedtree))) 60 | 61 | -------------------------------------------------------------------------------- /bench/binary_trees.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strconv" 7 | ) 8 | 9 | type Tree struct { 10 | first, second *Tree 11 | } 12 | 13 | func bottomUpTree(depth int) *Tree { 14 | if depth > 0 { 15 | depth -= 1 16 | return &Tree{bottomUpTree(depth), bottomUpTree(depth)} 17 | } else { 18 | return nil 19 | } 20 | } 21 | 22 | func (tree *Tree) itemCheck() int { 23 | if tree == nil { 24 | return 1 25 | } else { 26 | return 1 + tree.first.itemCheck() + tree.second.itemCheck() 27 | } 28 | } 29 | 30 | func run(n, minDepth, maxDepth int) { 31 | { 32 | stretchDepth := maxDepth + 1 33 | stretchTree := bottomUpTree(stretchDepth) 34 | fmt.Printf( 35 | "stretch tree of depth %v\t check: %v\n", 36 | stretchDepth, stretchTree.itemCheck(), 37 | ) 38 | } 39 | longLivedTree := bottomUpTree(maxDepth) 40 | for depth := minDepth; depth < maxDepth; depth += 2 { 41 | iterations := 1 << (maxDepth - depth + minDepth) 42 | check := 0 43 | for i := 0; i < iterations; i++ { 44 | check += bottomUpTree(depth).itemCheck() 45 | } 46 | fmt.Printf( 47 | "%v\t trees of depth %v\t check: %v\n", 48 | iterations, depth, check, 49 | ) 50 | } 51 | fmt.Printf( 52 | "long lived tree of depth %v\t check: %v\n", 53 | maxDepth, longLivedTree.itemCheck(), 54 | ) 55 | } 56 | 57 | func main() { 58 | n := 10 59 | if len(os.Args) >= 2 { 60 | i, err := strconv.Atoi(os.Args[1]) 61 | if err != nil { 62 | panic( 63 | fmt.Errorf("Invalid int %g", err), 64 | ) 65 | } 66 | n = i 67 | } 68 | minDepth := 4 69 | maxDepth := minDepth + 2 70 | if maxDepth < n { 71 | maxDepth = n 72 | } 73 | run(n, minDepth, maxDepth) 74 | } -------------------------------------------------------------------------------- /libs/simple/tests/errors.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use zerogc_derive::Trace; 4 | 5 | use zerogc::array::GcString; 6 | use zerogc::prelude::*; 7 | use zerogc_simple::{CollectorId, Gc, SimpleCollector, SimpleCollectorContext as GcContext}; 8 | 9 | #[derive(Debug, thiserror::Error, Trace)] 10 | #[zerogc(collector_ids(CollectorId))] 11 | pub enum OurError<'gc> { 12 | #[error("Bad gc string: {0}")] 13 | BadGcString(GcString<'gc, CollectorId>), 14 | #[error("Bad gc int: {0}")] 15 | BadGcInt(Gc<'gc, i32>), 16 | #[error("Bad non-gc string: {0}")] 17 | BadOtherString(String), 18 | } 19 | 20 | fn implicitly_alloc<'gc>(ctx: &'gc GcContext, val: i32) -> Result> { 21 | match val { 22 | 0 => Err(OurError::BadGcString(ctx.alloc_str("gc foo"))), 23 | 1 => Err(OurError::BadOtherString(String::from("boxed foo"))), 24 | 2 => Err(OurError::BadGcInt(ctx.alloc(15))), 25 | _ => Ok(String::from("sensible result")), 26 | } 27 | } 28 | 29 | fn into_anyhow<'gc>(ctx: &'gc GcContext, val: i32) -> Result { 30 | let s = implicitly_alloc(ctx, val).map_err(|e| ctx.alloc_error(e))?; 31 | Ok(format!("Result: {}", s)) 32 | } 33 | 34 | #[test] 35 | fn test_errors() { 36 | let collector = SimpleCollector::create(); 37 | let ctx = collector.create_context(); 38 | fn display_anyhow(e: anyhow::Error) -> String { 39 | format!("{}", e) 40 | } 41 | assert_eq!( 42 | into_anyhow(&ctx, 0).map_err(display_anyhow), 43 | Err("Bad gc string: gc foo".into()) 44 | ); 45 | assert_eq!( 46 | into_anyhow(&ctx, 1).map_err(display_anyhow), 47 | Err("Bad non-gc string: boxed foo".into()) 48 | ); 49 | assert_eq!( 50 | into_anyhow(&ctx, 2).map_err(display_anyhow), 51 | Err("Bad gc int: 15".into()) 52 | ); 53 | assert_eq!( 54 | into_anyhow(&ctx, 3).map_err(display_anyhow), 55 | Ok("Result: sensible result".into()) 56 | ); 57 | } 58 | -------------------------------------------------------------------------------- /tests/epsilon.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use zerogc_derive::Trace; 4 | 5 | use zerogc::epsilon::{self, EpsilonCollectorId, EpsilonContext, EpsilonSystem}; 6 | use zerogc::{safepoint_recurse, Gc, GcArray, GcSimpleAlloc}; 7 | 8 | #[derive(Trace)] 9 | #[zerogc(collector_ids(EpsilonCollectorId))] 10 | pub struct Test<'gc> { 11 | val: i32, 12 | rec: Option, EpsilonCollectorId>>, 13 | } 14 | 15 | fn recurse<'gc>(ctx: &'gc EpsilonContext, val: i32, test: Gc<'gc, Test, EpsilonCollectorId>) { 16 | let res = ctx.alloc(Test { 17 | val: 52, 18 | rec: Some(test), 19 | }); 20 | assert_eq!(res.rec.unwrap().val, val); 21 | } 22 | 23 | #[test] 24 | fn simple() { 25 | let leaking = EpsilonSystem::leak(); 26 | let ctx = leaking.new_context(); 27 | assert_eq!(*ctx.alloc(14i32).value(), 14); 28 | assert_eq!(ctx.alloc_slice_copy(b"foo").as_slice(), b"foo"); 29 | assert_eq!(ctx.alloc(Test { val: 42, rec: None }).val, 42); 30 | } 31 | 32 | #[test] 33 | fn recursive() { 34 | let leaking = EpsilonSystem::leak(); 35 | let mut ctx = leaking.new_context(); 36 | let first = ctx.alloc(Test { val: 18, rec: None }); 37 | safepoint_recurse!(ctx, first, |ctx, root| recurse(ctx, 18, root)); 38 | } 39 | 40 | #[test] 41 | fn static_alloc() { 42 | fn recurse<'gc, T: ?Sized + PartialEq + Debug>( 43 | _ctx: &'gc EpsilonContext, 44 | expected: &T, 45 | test: Gc<'gc, T, EpsilonCollectorId>, 46 | ) { 47 | assert_eq!(test.value(), expected); 48 | } 49 | fn recurse_array<'gc, T: PartialEq + Debug>( 50 | _ctx: &'gc EpsilonContext, 51 | expected: &[T], 52 | test: GcArray<'gc, T, EpsilonCollectorId>, 53 | ) { 54 | assert_eq!(test.as_slice(), expected); 55 | } 56 | const BAR: &i32 = &12; 57 | let sys = EpsilonSystem::leak(); 58 | let ctx = sys.new_context(); 59 | recurse(&ctx, BAR, epsilon::gc(BAR)); 60 | const FOO: &[u8] = b"Do you wanna build a snowman?"; 61 | let array: GcArray = epsilon::gc_array(FOO); 62 | recurse_array(&ctx, &*FOO, array); 63 | } 64 | -------------------------------------------------------------------------------- /bench/BinaryTree.java: -------------------------------------------------------------------------------- 1 | public class BinaryTree { 2 | private final BinaryTree first, second; 3 | public BinaryTree(BinaryTree first, BinaryTree second) { 4 | this.first = first; 5 | this.second = second; 6 | } 7 | public static BinaryTree bottomUpTree(int depth) { 8 | if (depth > 0) { 9 | depth -= 1; 10 | return new BinaryTree(bottomUpTree(depth), bottomUpTree(depth)); 11 | } else { 12 | return null; 13 | } 14 | } 15 | public static int itemCheck(BinaryTree tree) { 16 | if (tree != null) { 17 | return 1 + itemCheck(tree.first) + itemCheck(tree.second); 18 | } else { 19 | return 1; 20 | } 21 | } 22 | public static void run(int n, int minDepth, int maxDepth) { 23 | { 24 | int stretchDepth = maxDepth + 1; 25 | BinaryTree stretchTree = bottomUpTree(stretchDepth); 26 | System.out.println("stretch tree of depth " + stretchDepth + 27 | "\t check: " + itemCheck(stretchTree) 28 | ); 29 | } 30 | BinaryTree longLivedTree = bottomUpTree(maxDepth); 31 | for (int depth = minDepth; depth < maxDepth; depth += 2) { 32 | int iterations = 1 << (maxDepth - depth + minDepth); 33 | int check = 0; 34 | for (int i = 0; i < iterations; i += 1) { 35 | check += itemCheck(bottomUpTree(depth)); 36 | } 37 | System.out.println(Integer.toString(iterations) + "\t trees of depth " + 38 | depth + "\t check: " + check); 39 | } 40 | System.out.println("long lived tree of depth " + maxDepth + 41 | "\t check: " + itemCheck(longLivedTree)); 42 | } 43 | public static void main(String[] args) { 44 | int n; 45 | if (args.length >= 1) { 46 | n = Integer.parseInt(args[0]); 47 | } else { 48 | n = 10; 49 | } 50 | int min_depth = 4; 51 | int max_depth = min_depth + 2; 52 | if (max_depth < n) max_depth = n; 53 | BinaryTree.run(n, min_depth, max_depth); 54 | } 55 | } -------------------------------------------------------------------------------- /src/manually_traced/stdalloc.rs: -------------------------------------------------------------------------------- 1 | //! Implementations for types in the standard `alloc` crate 2 | //! 3 | //! These can be used in `#![no_std]` crates without requiring 4 | //! the entire standard library. 5 | #[cfg(not(feature = "std"))] 6 | use alloc::boxed::Box; 7 | use alloc::rc::Rc; 8 | #[cfg(not(feature = "std"))] 9 | use alloc::string::String; 10 | use alloc::sync::Arc; 11 | #[cfg(not(feature = "std"))] 12 | use alloc::vec::Vec; 13 | 14 | use crate::prelude::*; 15 | 16 | use zerogc_derive::unsafe_gc_impl; 17 | 18 | unsafe_gc_impl! { 19 | target => Vec, 20 | params => [T], 21 | null_trace => { where T: NullTrace }, 22 | NEEDS_TRACE => T::NEEDS_TRACE, 23 | NEEDS_DROP => true, // Internal memory 24 | collector_id => *, 25 | trace_template => |self, visitor| { 26 | // Delegate to slice 27 | visitor.#trace_func::<[T]>(#b**self as #b [T]) 28 | }, 29 | deserialize => unstable_horrible_hack, 30 | } 31 | unsafe_gc_impl! { 32 | target => Box, 33 | params => [T], 34 | null_trace => { where T: NullTrace }, 35 | NEEDS_TRACE => T::NEEDS_TRACE, 36 | NEEDS_DROP => true, // Internal memory 37 | collector_id => *, 38 | trace_template => |self, visitor| { 39 | visitor.#trace_func::(#b **self) 40 | }, 41 | deserialize => unstable_horrible_hack, 42 | } 43 | // We can only trace `Rc` and `Arc` if the inner type implements `TraceImmutable` 44 | unsafe_gc_impl! { 45 | target => Rc, 46 | params => [T: TraceImmutable], 47 | null_trace => { where T: NullTrace }, 48 | NEEDS_TRACE => T::NEEDS_TRACE, 49 | NEEDS_DROP => true, // Internal memory 50 | collector_id => *, 51 | trace_template => |self, visitor| { 52 | // We must always visit immutable, since we have shared references 53 | visitor.trace_immutable::(&**self) 54 | }, 55 | } 56 | unsafe_gc_impl! { 57 | target => Arc, 58 | params => [T: TraceImmutable], 59 | null_trace => { where T: NullTrace }, 60 | NEEDS_TRACE => T::NEEDS_TRACE, 61 | NEEDS_DROP => true, // Internal memory 62 | collector_id => *, 63 | trace_template => |self, visitor| { 64 | // We must always visit immutable, since we have shared references 65 | visitor.trace_immutable::(&**self) 66 | }, 67 | } 68 | // String is a primitive with no internal references 69 | unsafe_trace_primitive!(String); 70 | -------------------------------------------------------------------------------- /src/manually_traced/stdlib.rs: -------------------------------------------------------------------------------- 1 | //! Tracing implementations for the standard library 2 | //! 3 | //! Types that are in `libcore` and are `#![no_std]` should go in the core module, 4 | //! but anything that requires the rest of the stdlib (including collections and allocations), 5 | //! should go in this module. 6 | use std::collections::{HashMap, HashSet}; 7 | 8 | use zerogc_derive::unsafe_gc_impl; 9 | 10 | use crate::prelude::*; 11 | 12 | unsafe_gc_impl! { 13 | target => HashMap, 14 | params => [K: TraceImmutable, V, S: 'static], 15 | bounds => { 16 | /* 17 | * We require S: 'static so that we know S: NullTrace 18 | */ 19 | Trace => { where K: TraceImmutable, V: Trace, S: 'static }, 20 | TraceImmutable => { where K: TraceImmutable, V: TraceImmutable, S: 'static }, 21 | TrustedDrop => { where K: TrustedDrop, V: TrustedDrop, S: 'static }, 22 | GcSafe => { where K: TraceImmutable + GcSafe<'gc, Id>, V: GcSafe<'gc, Id>, S: 'static }, 23 | }, 24 | null_trace => { where K: NullTrace, V: NullTrace, S: NullTrace }, 25 | NEEDS_TRACE => K::NEEDS_TRACE || V::NEEDS_TRACE, 26 | NEEDS_DROP => true, // Internal memory 27 | collector_id => *, 28 | trace_template => |self, visitor| { 29 | for (key, value) in self.#iter() { 30 | visitor.trace_immutable::(key)?; 31 | visitor.#trace_func::(value)?; 32 | } 33 | // NOTE: Because S: 'static, we can assume S: NullTrace 34 | Ok(()) 35 | }, 36 | } 37 | 38 | unsafe_gc_impl! { 39 | target => HashSet, 40 | params => [T: TraceImmutable, S: 'static], 41 | bounds => { 42 | /* 43 | * We require S: 'static so that we know S: NullTrace 44 | */ 45 | Trace => { where T: TraceImmutable, S: 'static }, 46 | TraceImmutable => { where T: TraceImmutable, S: 'static }, 47 | TrustedDrop => { where T: TrustedDrop, S: 'static }, 48 | GcSafe => { where T: TraceImmutable + GcSafe<'gc, Id>, S: 'static }, 49 | }, 50 | null_trace => { where T: NullTrace, S: 'static }, 51 | NEEDS_TRACE => T::NEEDS_TRACE, 52 | NEEDS_DROP => true, // Internal memory 53 | collector_id => *, 54 | trace_template => |self, visitor| { 55 | for val in self.iter() { 56 | visitor.trace_immutable::(val)?; 57 | } 58 | // NOTE: Because S: 'static, we can assume S: NullTrace 59 | Ok(()) 60 | }, 61 | } 62 | -------------------------------------------------------------------------------- /src/manually_traced/indexmap.rs: -------------------------------------------------------------------------------- 1 | use indexmap::{IndexMap, IndexSet}; 2 | 3 | use crate::prelude::*; 4 | 5 | use zerogc_derive::unsafe_gc_impl; 6 | 7 | unsafe_gc_impl! { 8 | target => IndexMap, 9 | params => [K, V, S: 'static], 10 | bounds => { 11 | Trace => { where K: Trace, V: Trace, S: 'static }, 12 | TraceImmutable => { where K: TraceImmutable, V: TraceImmutable, S: 'static }, 13 | TrustedDrop => { where K: TrustedDrop, V: TrustedDrop, S: 'static }, 14 | GcSafe => { where K: GcSafe<'gc, Id>, V: GcSafe<'gc, Id>, S: 'static }, 15 | GcRebrand => { where K: GcRebrand<'new_gc, Id>, V: GcRebrand<'new_gc, Id>, K::Branded: Sized, V::Branded: Sized } 16 | }, 17 | branded_type => IndexMap, 18 | null_trace => { where K: NullTrace, V: NullTrace }, 19 | NEEDS_TRACE => K::NEEDS_TRACE || V::NEEDS_TRACE, 20 | NEEDS_DROP => true, // Internal memory 21 | collector_id => *, 22 | trace_mut => |self, visitor| { 23 | for idx in 0..self.len() { 24 | let (key, value) = self.get_index_mut(idx).unwrap(); 25 | visitor.trace::(key)?; 26 | visitor.trace::(value)?; 27 | } 28 | // NOTE: S: 'static implies S: NullTrace 29 | Ok(()) 30 | }, 31 | trace_immutable => |self, visitor| { 32 | for (key, value) in self.iter() { 33 | visitor.trace_immutable::(key)?; 34 | visitor.trace_immutable::(value)?; 35 | } 36 | // NOTE: S: 'static implies S: NullTrace 37 | Ok(()) 38 | }, 39 | } 40 | 41 | unsafe_gc_impl! { 42 | target => IndexSet, 43 | params => [T: TraceImmutable, S: 'static], 44 | null_trace => { where T: NullTrace }, 45 | bounds => { 46 | Trace => { where T: TraceImmutable, S: 'static }, 47 | TraceImmutable => { where T: TraceImmutable, S: 'static }, 48 | TrustedDrop => { where T: TrustedDrop, S: 'static }, 49 | GcSafe => { where T: GcSafe<'gc, Id>, S: 'static }, 50 | GcRebrand => { where T: GcRebrand<'new_gc, Id>, S: 'static, T::Branded: Sized + TraceImmutable }, 51 | }, 52 | branded_type => IndexSet, 53 | NEEDS_TRACE => T::NEEDS_TRACE, 54 | NEEDS_DROP => true, // Internal memory 55 | collector_id => *, 56 | trace_template => |self, visitor| { 57 | for val in self.iter() { 58 | visitor.trace_immutable::(val)?; 59 | } 60 | Ok(()) 61 | }, 62 | } 63 | -------------------------------------------------------------------------------- /libs/simple/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zerogc-simple" 3 | description = "Lightweight mark/sweep collector for zerogc." 4 | version.workspace = true 5 | authors.workspace = true 6 | repository.workspace =true 7 | license.workspace = true 8 | edition.workspace = true 9 | readme = "../../README.md" 10 | 11 | [dependencies] 12 | inherent = "1" 13 | zerogc = { path = "../..", version = "0.2.0-alpha.6" } 14 | once_cell = { version = "1.5", optional = true } 15 | # Shared impl 16 | zerogc-context = { path = "../context", version = "0.2.0-alpha.6", default-features = false } 17 | zerogc-derive = { path = "../derive", version = "0.2.0-alpha.6" } 18 | # Concurrency 19 | parking_lot = { version = "0.11", optional = true } 20 | # Logging 21 | slog = "2.7" 22 | 23 | [features] 24 | default = [ 25 | "small-object-arenas", # Without this, allocating small objects is slow 26 | "sync", # Thread-safety by default 27 | "multiple-collectors", # By default, allow multiple collectors 28 | ] 29 | # Use very fast dedicated arenas for small objects. 30 | # This makes allocation much faster 31 | # Time spent in malloc (even in heavy workloads) drops to near zero 32 | # This can also improve memory significantly by avoiding per-object overheads 33 | # 34 | # However, it increases code complexity and is more 35 | # agressive (memory wise) then delegating all work to std::alloc 36 | # TODO: Return unused memory to the operating systems 37 | # TODO: Thread-local caching (make arenas fast again) 38 | small-object-arenas = ["once_cell"] 39 | # Use recursion to implicitly track the grey stack 40 | # This risks stack overflow at a possible performance gain 41 | # See commit 9a9634d68a4933d 42 | implicit-grey-stack = [] 43 | # Allow multiple threads to access the garbage collector 44 | # by creating a seperate context for each. 45 | # 46 | # This can increase overhead by requiring communication between threads. 47 | sync = ["zerogc-context/sync", "parking_lot"] 48 | # Allow multiple collectors to exist at once 49 | # Otherwise, there's a single global collector (useful in VMs) 50 | # 51 | # Even if multiple collectors are enabled, pointers from 52 | # one collector can't be safely mixed with other collectors. 53 | multiple-collectors = [] 54 | 55 | [[test]] 56 | name = "errors" 57 | required-features = ["sync"] 58 | 59 | [dev-dependencies] 60 | # Used for examples :) 61 | zerogc-derive = { path = "../derive" } 62 | # Used for binary_trees parallel example 63 | rayon = "1.3" 64 | slog-term = "2.6" 65 | # Used to test the 'error' type 66 | anyhow = "1" 67 | thiserror = "1" 68 | zerogc = { path = "../..", features = ["errors"] } 69 | 70 | -------------------------------------------------------------------------------- /src/epsilon/handle.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::NonNull; 2 | use std::rc::Rc; 3 | 4 | use crate::prelude::*; 5 | 6 | use super::{EpsilonCollectorId, EpsilonContext, EpsilonSystem, State}; 7 | 8 | pub struct GcHandle> { 9 | /// The reference to the state, 10 | /// which keeps our data alive 11 | state: Rc, 12 | ptr: *const T, 13 | } 14 | impl> Clone for GcHandle { 15 | fn clone(&self) -> Self { 16 | GcHandle { 17 | state: Rc::clone(&self.state), 18 | ptr: self.ptr, 19 | } 20 | } 21 | } 22 | unsafe impl> zerogc::GcHandle for GcHandle { 23 | type System = EpsilonSystem; 24 | type Id = EpsilonCollectorId; 25 | 26 | #[inline] 27 | fn use_critical(&self, func: impl FnOnce(&T) -> R) -> R { 28 | func(unsafe { &*self.ptr }) 29 | } 30 | 31 | fn bind_to<'new_gc>( 32 | &self, 33 | context: &'new_gc EpsilonContext, 34 | ) -> Gc<'new_gc, T::Branded, Self::Id> 35 | where 36 | T: GcRebrand<'new_gc, Self::Id>, 37 | { 38 | // TODO: Does the simple collector assert the ids are equal? 39 | assert_eq!( 40 | context.state.as_ptr() as *const State, 41 | &*self.state as *const State 42 | ); 43 | unsafe { 44 | Gc::from_raw(NonNull::new_unchecked(std::mem::transmute_copy::< 45 | *const T, 46 | *const T::Branded, 47 | >(&self.ptr) 48 | as *mut T::Branded)) 49 | } 50 | } 51 | } 52 | zerogc_derive::unsafe_gc_impl!( 53 | target => GcHandle, 54 | params => [T: GcSafe<'static, EpsilonCollectorId>], 55 | bounds => { 56 | Trace => { where T: ?Sized }, 57 | TraceImmutable => { where T: ?Sized }, 58 | TrustedDrop => { where T: ?Sized }, 59 | GcSafe => { where T: ?Sized }, 60 | }, 61 | null_trace => { where T: ?Sized }, 62 | NEEDS_DROP => true, 63 | NEEDS_TRACE => false, 64 | branded_type => Self, 65 | trace_template => |self, visitor| { Ok(()) } 66 | ); 67 | 68 | unsafe impl HandleCollectorId for EpsilonCollectorId { 69 | type Handle = GcHandle where T: GcSafe<'static, Self> + ?Sized; 70 | 71 | fn create_handle<'gc, T>(_gc: Gc<'gc, T, Self>) -> Self::Handle 72 | where 73 | T: GcSafe<'gc, Self> + GcRebrand<'static, Self> + ?Sized, 74 | { 75 | unimplemented!("epsilon collector can't convert Gc -> GcContext") 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /libs/simple/tests/trait_objects.rs: -------------------------------------------------------------------------------- 1 | use core::cell::Cell; 2 | 3 | use zerogc::{safepoint, trait_object_trace, DynTrace, GcSimpleAlloc, Trace}; 4 | 5 | use slog::Logger; 6 | use zerogc_simple::{CollectorId as SimpleCollectorId, Gc, GcConfig, SimpleCollector}; 7 | 8 | fn test_collector() -> SimpleCollector { 9 | let mut config = GcConfig::default(); 10 | config.always_force_collect = true; // Force collections for predictability 11 | SimpleCollector::with_config(config, Logger::root(::slog::Discard, ::slog::o!())) 12 | } 13 | 14 | trait Foo<'gc>: DynTrace<'gc, SimpleCollectorId> { 15 | fn method(&self) -> i32; 16 | fn validate(&self); 17 | } 18 | trait_object_trace!( 19 | impl<'gc,> Trace for dyn Foo<'gc>; 20 | Branded<'new_gc> => (dyn Foo<'new_gc> + 'new_gc), 21 | collector_id => SimpleCollectorId, 22 | gc_lifetime => 'gc 23 | ); 24 | 25 | fn foo<'gc, T: ?Sized + Trace + Foo<'gc>>(t: &T) -> i32 { 26 | assert_eq!(t.method(), 12); 27 | t.method() * 2 28 | } 29 | fn bar<'gc>(gc: Gc<'gc, dyn Foo<'gc> + 'gc>) -> i32 { 30 | foo(gc.value()) 31 | } 32 | #[derive(Trace)] 33 | #[zerogc(collector_ids(SimpleCollectorId), unsafe_skip_drop)] 34 | struct Bar<'gc> { 35 | inner: Option>>, 36 | val: Gc<'gc, i32>, 37 | } 38 | impl<'gc> Foo<'gc> for Bar<'gc> { 39 | fn method(&self) -> i32 { 40 | *self.val 41 | } 42 | fn validate(&self) { 43 | assert_eq!(*self.val, 12); 44 | assert_eq!(*self.inner.unwrap().val, 4); 45 | } 46 | } 47 | impl<'gc> Drop for Bar<'gc> { 48 | fn drop(&mut self) { 49 | BAR_DROP_COUNT.with(|val| { 50 | val.set(val.get() + 1); 51 | }) 52 | } 53 | } 54 | 55 | thread_local! { 56 | static BAR_DROP_COUNT: Cell = const { Cell::new(0) }; 57 | } 58 | #[test] 59 | fn foo_bar() { 60 | let collector = test_collector(); 61 | let mut context = collector.into_context(); 62 | let val = context.alloc(12); 63 | let inner = context.alloc(Bar { 64 | inner: None, 65 | val: context.alloc(4), 66 | }); 67 | let gc: Gc<'_, dyn Foo<'_>> = context.alloc(Bar { 68 | inner: Some(inner), 69 | val, 70 | }); 71 | assert_eq!(bar(gc), 24); 72 | // Should be traced correctly 73 | let gc = safepoint!(context, gc); 74 | assert_eq!( 75 | BAR_DROP_COUNT.with(Cell::get), 76 | 0, 77 | "Expected Bar to be retained" 78 | ); 79 | gc.validate(); 80 | // Trace inner, should end up dropping Bar 81 | safepoint!(context, ()); 82 | assert_eq!( 83 | BAR_DROP_COUNT.with(Cell::get), 84 | 2, 85 | "Expected Bar to be dropped" 86 | ); 87 | } 88 | -------------------------------------------------------------------------------- /src/allocator.rs: -------------------------------------------------------------------------------- 1 | //! Emulate the `core::alloc::Allocator` API 2 | //! 3 | //! Constructing a `GcAllocWrapper` is `unsafe`, 4 | //! because it is the caller's responsibility to ensure 5 | //! the returned pointers are appropriately traced. 6 | //! 7 | //! If there are any interior pointers, 8 | //! those must also be traced as well. 9 | 10 | use core::alloc::{AllocError, Allocator, Layout}; 11 | use core::ptr::NonNull; 12 | 13 | use crate::GcSimpleAlloc; 14 | 15 | /// A wrapper for a `GcContext` that implements [core::alloc::Allocator] 16 | /// by allocating `GcArray` 17 | /// 18 | /// ## Safety 19 | /// Using this allocator api comes with two major caveats: 20 | /// 1. All pointers that are in-use must be traced by re-interpreting them as the relavent `GcArray` 21 | /// 2. The `Trace` implementation must support relocating pointers. 22 | /// 23 | /// NOTE: Item number two may be considerably more difficult. 24 | /// For example, the 'hashbrown::raw::RawTable' api supports accessing the raw pointers, 25 | /// but doesn't support changing or reloacting it.....x 26 | pub struct GcAllocWrapper<'gc, C: GcSimpleAlloc>(&'gc C); 27 | 28 | unsafe impl<'gc, C: GcSimpleAlloc> Allocator for GcAllocWrapper<'gc, C> { 29 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 30 | unsafe { 31 | let ptr: *mut u8 = match layout.align() { 32 | 1 => self.0.alloc_uninit_slice::(layout.size()), 33 | 2 => self 34 | .0 35 | .alloc_uninit_slice::((layout.size() + 1) / 2) 36 | .cast(), 37 | 4 => self 38 | .0 39 | .alloc_uninit_slice::((layout.size() + 3) / 4) 40 | .cast(), 41 | 8 => self 42 | .0 43 | .alloc_uninit_slice::((layout.size() + 7) / 8) 44 | .cast(), 45 | _ => return Err(AllocError), 46 | }; 47 | Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut( 48 | ptr, 49 | layout.size(), 50 | ))) 51 | } 52 | } 53 | #[inline] 54 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 55 | /* 56 | * with garbage collection, deallocation is a nop 57 | * 58 | * If we're in debug mode we will write 59 | * 0xDEADBEAF to the memory to be extra sure. 60 | */ 61 | if cfg!(debug_assertions) { 62 | const SRC: [u8; 4] = (0xDEAD_BEAFu32).to_ne_bytes(); 63 | ptr.as_ptr() 64 | .copy_from_nonoverlapping(SRC.as_ptr(), layout.size().min(4)); 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zerogc" 3 | description = "Zero overhead tracing garbage collection for rust" 4 | version.workspace = true 5 | authors.workspace = true 6 | repository.workspace = true 7 | license.workspace = true 8 | edition.workspace = true 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | scopeguard = "1.1" 13 | inherent = "1" 14 | # Manually included tracing support for third party libraries 15 | # Providing support for these important libraries, 16 | # gives zerogc batteries included support. 17 | indexmap = { version = "1.6", optional = true } 18 | parking_lot = { version = "0.11", optional = true } 19 | arrayvec = { version = "0.7", optional = true } 20 | anyhow = { version = "1", optional = true } 21 | # Serde support (optional) 22 | serde = { version = "1", optional = true, features = ["derive"] } 23 | # Used for macros 24 | zerogc-derive = { path = "libs/derive", version = "0.2.0-alpha.6" } 25 | # Used for the "epsilon" no-op collector 26 | bumpalo = { version = "3", optional = true } 27 | # Used for our custom hashmap 28 | ahash = { version = "0.7.0", default-features = false, optional = true } 29 | 30 | [dependencies.hashbrown] 31 | # Hashbrown is used for our custom hashmap implementation 32 | # We also implement Trace regardless 33 | version = "0.11" 34 | optional = true 35 | features = ["raw", "nightly"] 36 | 37 | [dev-dependencies] 38 | serde_json = "1" 39 | # Used to test custom hash function 40 | # support for IndexMap 41 | fnv = "1" 42 | 43 | [workspace] 44 | resolver = "2" 45 | members = ["libs/*"] 46 | 47 | [workspace.package] 48 | version = "0.2.0-alpha.7" 49 | authors = ["Techcable "] 50 | repository = "https://github.com/DuckLogic/zerogc" 51 | license = "MIT" 52 | edition = "2018" 53 | 54 | [features] 55 | default = ["std", "epsilon", "epsilon-arena-alloc"] 56 | # Depend on the standard library (optional) 57 | # 58 | # This implements tracing for most standard library types. 59 | std = ["alloc"] 60 | # Depend on `extern crate alloc` in addition to the Rust `core` 61 | # This is implied by using the standard library (feature="std") 62 | # 63 | # This implements `Trace` for `Box` and collections like `Vec` 64 | alloc = [] 65 | # Emulate the `core::alloc::Allocator` api 66 | # 67 | # NOTE: This doesn't *necessarily* require the 'alloc' 68 | # feature (because the API itself is in 'core') 69 | allocator-api = [] 70 | # Our custom hashmap implementation 71 | hashmap-impl = ["allocator-api", "hashbrown", "ahash"] 72 | # Support a 'GcError' type that implements 'std::error::Error' 73 | # by wrapping a 'GcHandle' 74 | errors = [] 75 | # Serde support 76 | serde1 = ["serde", "zerogc-derive/__serde-internal", "arrayvec/serde", "indexmap/serde-1", "hashbrown/serde"] 77 | # Support the "epsilon" no-op collector 78 | epsilon = [] 79 | # Configure the "epsilon" collector use arena allocation 80 | # (on by default) 81 | epsilon-arena-alloc = ["epsilon", "bumpalo"] 82 | -------------------------------------------------------------------------------- /src/manually_traced/arrayvec.rs: -------------------------------------------------------------------------------- 1 | use arrayvec::{ArrayString, ArrayVec}; 2 | 3 | use crate::{GcRebrand, NullTrace}; 4 | 5 | use zerogc_derive::unsafe_gc_impl; 6 | 7 | unsafe_gc_impl!( 8 | target => ArrayString, 9 | params => [const SIZE: usize], 10 | null_trace => always, 11 | NEEDS_TRACE => false, 12 | NEEDS_DROP => false, 13 | branded_type => ArrayString, 14 | trace_template => |self, visitor| { Ok(()) }, 15 | deserialize => delegate 16 | ); 17 | 18 | unsafe_gc_impl!( 19 | target => ArrayVec, 20 | params => [T, const SIZE: usize], 21 | null_trace => { where T: NullTrace }, 22 | NEEDS_TRACE => ::NEEDS_TRACE, 23 | NEEDS_DROP => ::NEEDS_DROP, 24 | bounds => { 25 | GcRebrand => { where T: GcRebrand<'new_gc, Id>, T::Branded: Sized }, 26 | }, 27 | branded_type => ArrayVec, 28 | trace_template => |self, visitor| { 29 | for val in self.#iter() { 30 | visitor.#trace_func(val)?; 31 | } 32 | Ok(()) 33 | }, 34 | deserialize => |ctx, deserializer| { 35 | use core::marker::PhantomData; 36 | use crate::CollectorId; 37 | use crate::serde::{GcDeserialize, GcDeserializeSeed}; 38 | use serde::de::{Visitor, Error, SeqAccess}; 39 | struct ArrayVecVisitor< 40 | 'gc, 'de, Id: CollectorId, 41 | T: GcDeserialize<'gc, 'de, Id>, 42 | const SIZE: usize 43 | > { 44 | ctx: &'gc Id::Context, 45 | marker: PhantomData ArrayVec> 46 | } 47 | impl< 48 | 'gc, 'de, Id: CollectorId, 49 | T: GcDeserialize<'gc, 'de, Id>, 50 | const SIZE: usize 51 | > Visitor<'de> for ArrayVecVisitor<'gc, 'de, Id, T, SIZE> { 52 | type Value = ArrayVec; 53 | fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 54 | write!(f, "a array with size <= {}", SIZE) 55 | } 56 | #[inline] 57 | fn visit_seq(self, mut access: A) -> Result 58 | where A: SeqAccess<'de>, { 59 | let mut values = Self::Value::new(); 60 | while let Some(value) = access.next_element_seed( 61 | GcDeserializeSeed::new(self.ctx) 62 | )? { 63 | match values.try_push(value) { 64 | Ok(()) => {}, 65 | Err(_) => { 66 | return Err(A::Error::invalid_length(SIZE + 1, &self)) 67 | } 68 | } 69 | } 70 | Ok(values) 71 | } 72 | } 73 | let visitor: ArrayVecVisitor = ArrayVecVisitor { ctx, marker: PhantomData }; 74 | deserializer.deserialize_seq(visitor) 75 | } 76 | ); 77 | -------------------------------------------------------------------------------- /libs/simple/examples/binary_trees.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | arbitrary_self_types, // Unfortunately this is required for methods on Gc refs 3 | )] 4 | use zerogc::prelude::*; 5 | use zerogc_derive::Trace; 6 | use zerogc_simple::{ 7 | CollectorId as SimpleCollectorId, Gc, SimpleCollector, SimpleCollectorContext, 8 | }; 9 | 10 | use slog::{o, Drain, Logger}; 11 | 12 | #[derive(Trace)] 13 | #[zerogc(collector_ids(SimpleCollectorId))] 14 | struct Tree<'gc> { 15 | #[zerogc(mutable(public))] 16 | children: GcCell>, Gc<'gc, Tree<'gc>>)>>, 17 | } 18 | 19 | fn item_check(tree: &Tree) -> i32 { 20 | if let Some((left, right)) = tree.children.get() { 21 | 1 + item_check(&right) + item_check(&left) 22 | } else { 23 | 1 24 | } 25 | } 26 | 27 | fn bottom_up_tree<'gc>(collector: &'gc SimpleCollectorContext, depth: i32) -> Gc<'gc, Tree<'gc>> { 28 | let tree = collector.alloc(Tree { 29 | children: GcCell::new(None), 30 | }); 31 | if depth > 0 { 32 | let right = bottom_up_tree(collector, depth - 1); 33 | let left = bottom_up_tree(collector, depth - 1); 34 | tree.set_children(Some((left, right))); 35 | } 36 | tree 37 | } 38 | 39 | fn inner(gc: &mut SimpleCollectorContext, depth: i32, iterations: u32) -> String { 40 | let chk: i32 = (0..iterations) 41 | .into_iter() 42 | .map(|_| { 43 | safepoint_recurse!(gc, |gc| { 44 | let a = bottom_up_tree(&gc, depth); 45 | item_check(&a) 46 | }) 47 | }) 48 | .sum(); 49 | format!("{}\t trees of depth {}\t check: {}", iterations, depth, chk) 50 | } 51 | 52 | fn main() { 53 | let n = std::env::args() 54 | .nth(1) 55 | .and_then(|n| n.parse().ok()) 56 | .unwrap_or(10); 57 | let min_depth = 4; 58 | let max_depth = if min_depth + 2 > n { min_depth + 2 } else { n }; 59 | 60 | let plain = slog_term::PlainSyncDecorator::new(std::io::stdout()); 61 | let logger = Logger::root( 62 | slog_term::FullFormat::new(plain).build().fuse(), 63 | o!("bench" => file!()), 64 | ); 65 | let collector = SimpleCollector::with_logger(logger); 66 | let mut gc = collector.into_context(); 67 | { 68 | let depth = max_depth + 1; 69 | let tree = bottom_up_tree(&gc, depth); 70 | println!( 71 | "stretch tree of depth {}\t check: {}", 72 | depth, 73 | item_check(&tree) 74 | ); 75 | } 76 | safepoint!(gc, ()); 77 | 78 | let long_lived_tree = bottom_up_tree(&gc, max_depth); 79 | 80 | let (long_lived_tree, ()) = safepoint_recurse!(gc, long_lived_tree, |gc, _long_lived_tree| { 81 | (min_depth / 2..max_depth / 2 + 1) 82 | .into_iter() 83 | .for_each(|half_depth| { 84 | let depth = half_depth * 2; 85 | let iterations = 1 << ((max_depth - depth + min_depth) as u32); 86 | let message = 87 | safepoint_recurse!(gc, |new_gc| { inner(&mut new_gc, depth, iterations) }); 88 | println!("{}", message); 89 | }) 90 | }); 91 | 92 | println!( 93 | "long lived tree of depth {}\t check: {}", 94 | max_depth, 95 | item_check(&long_lived_tree) 96 | ); 97 | } 98 | -------------------------------------------------------------------------------- /run_examples.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | 4 | from subprocess import run, CalledProcessError 5 | 6 | try: 7 | import click 8 | except ImportError: 9 | print("Unable to import required dependency: click", file=sys.stderr) 10 | sys.exit(2) 11 | raise AssertionError 12 | 13 | from click import ClickException 14 | 15 | if sys.version_info[:2] < (3, 9): 16 | print("Unsupported python version:", '.'.join(sys.version_info[:3]), file=sys.stderr) 17 | print("Requires at least Python 3.9") 18 | sys.exit(2) 19 | raise AssertionError 20 | 21 | from dataclasses import dataclass 22 | 23 | @dataclass 24 | class ExampleData: 25 | package: str 26 | args: list[str] 27 | 28 | EXAMPLES = { 29 | "binary_trees": ExampleData(package='zerogc-simple', args=['12']), 30 | "binary_trees_parallel": ExampleData(package='zerogc-simple', args=['12']), 31 | } 32 | 33 | def normalize_example(name): 34 | return name.replace('-', '_') 35 | 36 | def print_seperator(): 37 | print() 38 | print('-' * 8) 39 | print() 40 | 41 | @click.command() 42 | @click.option('--all', is_flag=True, help="Run **all** the examples") 43 | @click.option('--example', '-e', 'examples', multiple=True, help="The name of the example to run") 44 | @click.option('--list', '-l', 'list_examples', is_flag=True, help="List available examples") 45 | @click.option('--release', is_flag=True, help="Compile code in release mode") 46 | def run_examples(list_examples: bool, examples: list[str], all: bool, release: bool): 47 | if all: 48 | if examples: 49 | raise ClickException("Should not specify explicit examples along with `-all`") 50 | else: 51 | examples = sorted(EXAMPLES.keys()) 52 | if list_examples and examples: 53 | raise ClickException("Should not specify any examples along with '--list'") 54 | if not examples: 55 | # Imply '--list' if nothing else is specified 56 | list_examples = True 57 | if list_examples: 58 | click.echo("Listing available examples: [Type --help for more info]") 59 | for example in EXAMPLES.keys(): 60 | click.echo(f" {example}") 61 | sys.exit() 62 | # Normalize all names 63 | examples = list(map(normalize_example, examples)) 64 | extra_cargo_args = [] 65 | if release: 66 | extra_cargo_args += ['--release'] 67 | for example_name in examples: 68 | if example_name not in EXAMPLES: 69 | raise ClickException("Invalid example name: {example_name}") 70 | for example_name in examples: 71 | example = EXAMPLES[example_name] 72 | print(f"Compiling example: {example_name}") 73 | try: 74 | run(["cargo", "build", "--example", example_name, '-p', example.package, *extra_cargo_args], check=True) 75 | except CalledProcessError as e: 76 | raise ClickException(f"Failed to compile {example_name}") 77 | print_seperator() 78 | for index, example_name in enumerate(examples): 79 | example = EXAMPLES[example_name] 80 | print("Running example: {example_name}") 81 | try: 82 | run(["cargo", "run", "--example", example_name, '-p', example.package, *extra_cargo_args, '--', *example.args], check=True) 83 | except CalledProcessError: 84 | raise ClickException(f"Failed to run example: {example_name}") 85 | if index + 1 != len(examples): 86 | print_seperator() 87 | 88 | if __name__ == "__main__": 89 | run_examples() 90 | -------------------------------------------------------------------------------- /libs/simple/examples/binary_trees_parallel.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | arbitrary_self_types, // Unfortunately this is required for methods on Gc refs 3 | )] 4 | use zerogc::prelude::*; 5 | use zerogc_derive::Trace; 6 | use zerogc_simple::{ 7 | CollectorId as SimpleCollectorId, Gc, SimpleCollector, SimpleCollectorContext, 8 | }; 9 | 10 | use rayon::prelude::*; 11 | use slog::{o, Drain, Logger}; 12 | 13 | #[derive(Trace)] 14 | #[zerogc(collector_ids(SimpleCollectorId))] 15 | struct Tree<'gc> { 16 | #[zerogc(mutable(public))] 17 | children: GcCell>, Gc<'gc, Tree<'gc>>)>>, 18 | } 19 | 20 | fn item_check(tree: &Tree) -> i32 { 21 | if let Some((left, right)) = tree.children.get() { 22 | 1 + item_check(&right) + item_check(&left) 23 | } else { 24 | 1 25 | } 26 | } 27 | 28 | fn bottom_up_tree<'gc>(collector: &'gc SimpleCollectorContext, depth: i32) -> Gc<'gc, Tree<'gc>> { 29 | let tree = collector.alloc(Tree { 30 | children: GcCell::new(None), 31 | }); 32 | if depth > 0 { 33 | let right = bottom_up_tree(collector, depth - 1); 34 | let left = bottom_up_tree(collector, depth - 1); 35 | tree.set_children(Some((left, right))); 36 | } 37 | tree 38 | } 39 | 40 | fn inner(collector: &SimpleCollector, depth: i32, iterations: u32) -> String { 41 | let chk: i32 = (0..iterations) 42 | .into_par_iter() 43 | .map(|_| { 44 | let mut gc = collector.create_context(); 45 | safepoint_recurse!(gc, |gc| { 46 | let a = bottom_up_tree(&gc, depth); 47 | item_check(&a) 48 | }) 49 | }) 50 | .sum(); 51 | format!("{}\t trees of depth {}\t check: {}", iterations, depth, chk) 52 | } 53 | 54 | fn main() { 55 | let n = std::env::args() 56 | .nth(1) 57 | .and_then(|n| n.parse().ok()) 58 | .unwrap_or(10); 59 | let min_depth = 4; 60 | let max_depth = if min_depth + 2 > n { min_depth + 2 } else { n }; 61 | 62 | let plain = slog_term::PlainSyncDecorator::new(std::io::stdout()); 63 | let logger = Logger::root( 64 | slog_term::FullFormat::new(plain).build().fuse(), 65 | o!("bench" => file!()), 66 | ); 67 | let collector = SimpleCollector::with_logger(logger); 68 | let mut gc = collector.create_context(); 69 | { 70 | let depth = max_depth + 1; 71 | let tree = bottom_up_tree(&gc, depth); 72 | println!( 73 | "stretch tree of depth {}\t check: {}", 74 | depth, 75 | item_check(&tree) 76 | ); 77 | } 78 | safepoint!(gc, ()); 79 | 80 | let long_lived_tree = bottom_up_tree(&gc, max_depth); 81 | let long_lived_tree = long_lived_tree.create_handle(); 82 | let frozen = freeze_context!(gc); 83 | 84 | (min_depth / 2..max_depth / 2 + 1) 85 | .into_par_iter() 86 | .for_each(|half_depth| { 87 | let depth = half_depth * 2; 88 | let iterations = 1 << ((max_depth - depth + min_depth) as u32); 89 | // NOTE: We're relying on inner to do safe points internally 90 | let message = inner(&collector, depth, iterations); 91 | println!("{}", message); 92 | }); 93 | let new_context = unfreeze_context!(frozen); 94 | let long_lived_tree = long_lived_tree.bind_to(&new_context); 95 | 96 | println!( 97 | "long lived tree of depth {}\t check: {}", 98 | max_depth, 99 | item_check(&long_lived_tree) 100 | ); 101 | } 102 | -------------------------------------------------------------------------------- /libs/simple/tests/arrays.rs: -------------------------------------------------------------------------------- 1 | use slog::Logger; 2 | 3 | use zerogc::safepoint; 4 | use zerogc::GcSimpleAlloc; 5 | use zerogc_derive::Trace; 6 | 7 | use zerogc_simple::{ 8 | CollectorId as SimpleCollectorId, Gc, GcArray, GcConfig, GcVec, SimpleCollector, 9 | }; 10 | 11 | fn test_collector() -> SimpleCollector { 12 | let mut config = GcConfig::default(); 13 | config.always_force_collect = true; // Force collections for predictability 14 | SimpleCollector::with_config(config, Logger::root(::slog::Discard, ::slog::o!())) 15 | } 16 | 17 | #[derive(Trace, Copy, Clone, Debug)] 18 | #[zerogc(copy, collector_ids(SimpleCollectorId))] 19 | struct Dummy<'gc> { 20 | val: usize, 21 | inner: Option>>, 22 | } 23 | 24 | #[test] 25 | fn array() { 26 | let collector = test_collector(); 27 | let mut context = collector.into_context(); 28 | let array1 = context.alloc_slice_fill_copy(5, 12u32); 29 | assert_eq!(*array1.as_slice(), *vec![12u32; 5]); 30 | safepoint!(context, ()); 31 | const TEXT: &[u8] = b"all cows eat grass"; 32 | let array_text = context.alloc_slice_copy(TEXT); 33 | let array_none: GcArray> = context.alloc_slice_none(12); 34 | for val in array_none.as_slice() { 35 | assert_eq!(*val, None); 36 | } 37 | let array_text = safepoint!(context, array_text); 38 | assert_eq!(array_text.as_slice(), TEXT); 39 | let mut nested_trace = Vec::new(); 40 | let mut last = None; 41 | for i in 0..16 { 42 | let obj = context.alloc(Dummy { 43 | val: i, 44 | inner: last, 45 | }); 46 | nested_trace.push(obj); 47 | last = Some(obj); 48 | } 49 | let nested_trace = context.alloc_slice_copy(nested_trace.as_slice()); 50 | let nested_trace: GcArray> = safepoint!(context, nested_trace); 51 | for (idx, val) in nested_trace.as_slice().iter().enumerate() { 52 | assert_eq!(val.val, idx, "Invalid val: {:?}", val); 53 | if let Some(last) = val.inner { 54 | assert_eq!(last.val, idx - 1); 55 | } 56 | } 57 | } 58 | 59 | #[test] 60 | fn vec() { 61 | let collector = test_collector(); 62 | let mut context = collector.into_context(); 63 | let mut vec1 = context.alloc_vec(); 64 | for _ in 0..5 { 65 | vec1.push(12u32); 66 | } 67 | assert_eq!(*vec1.as_slice(), *vec![12u32; 5]); 68 | drop(vec1); 69 | safepoint!(context, ()); 70 | const TEXT: &[u8] = b"all cows eat grass"; 71 | let mut vec_text = context.alloc_vec(); 72 | vec_text.extend_from_slice(TEXT); 73 | let mut vec_none: GcVec> = context.alloc_vec_with_capacity(12); 74 | for _ in 0..12 { 75 | vec_none.push(None); 76 | } 77 | for val in vec_none.iter() { 78 | assert_eq!(*val, None); 79 | } 80 | drop(vec_none); 81 | let vec_text: GcVec = 82 | GcVec::copy_from_slice(safepoint!(context, vec_text).as_slice(), &context); 83 | assert_eq!(vec_text.as_slice(), TEXT); 84 | let mut nested_trace: GcVec> = context.alloc_vec_with_capacity(3); 85 | let mut last = None; 86 | for i in 0..16 { 87 | let obj = context.alloc(Dummy { 88 | val: i, 89 | inner: last, 90 | }); 91 | nested_trace.push(obj); 92 | last = Some(obj); 93 | } 94 | drop(vec_text); 95 | let nested_trace: GcVec> = GcVec::from_vec( 96 | safepoint!(context, nested_trace).into_iter().collect(), 97 | &context, 98 | ); 99 | for (idx, val) in nested_trace.iter().enumerate() { 100 | assert_eq!(val.val, idx, "Invalid val: {:?}", val); 101 | if let Some(last) = val.inner { 102 | assert_eq!(last.val, idx - 1); 103 | } 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /libs/context/src/state/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::collector::RawCollectorImpl; 2 | use crate::{CollectorRef, ContextState, ShadowStack}; 3 | 4 | use core::fmt::Debug; 5 | use core::mem::ManuallyDrop; 6 | 7 | use alloc::boxed::Box; 8 | 9 | pub mod nosync; 10 | /// The internal state of the collector 11 | /// 12 | /// Has a thread-safe and thread-unsafe implementation. 13 | 14 | #[cfg(feature = "sync")] 15 | pub mod sync; 16 | 17 | /// Manages coordination of garbage collections 18 | pub unsafe trait CollectionManager: self::sealed::Sealed 19 | where 20 | C: RawCollectorImpl, 21 | { 22 | type Context: RawContext; 23 | fn new() -> Self; 24 | fn is_collecting(&self) -> bool; 25 | fn should_trigger_collection(&self) -> bool; 26 | /// Freeze this context 27 | /// 28 | /// ## Safety 29 | /// See [GcContext::free_context] 30 | unsafe fn freeze_context(&self, context: &Self::Context); 31 | /// Unfreeze the context 32 | /// 33 | /// ## Safety 34 | /// See [GcContext::unfreeze_context] 35 | unsafe fn unfreeze_context(&self, context: &Self::Context); 36 | 37 | // 38 | // Extension methods on collector 39 | // 40 | 41 | /// Attempt to prevent garbage collection for the duration of the closure 42 | /// 43 | /// This method is **OPTIONAL** and will panic if unimplemented. 44 | fn prevent_collection(collector: &C, func: impl FnOnce() -> R) -> R; 45 | 46 | /// Free the specified context 47 | /// 48 | /// ## Safety 49 | /// - Assumes the specified pointer is valid 50 | /// - Assumes there are no more outstanding borrows to values in the context 51 | unsafe fn free_context(collector: &C, context: *mut Self::Context); 52 | } 53 | 54 | /// The underlying state of a context 55 | /// 56 | /// Each context is bound to one and only one thread, 57 | /// even if the collector supports multi-threading. 58 | pub unsafe trait RawContext: Debug + self::sealed::Sealed 59 | where 60 | C: RawCollectorImpl, 61 | { 62 | unsafe fn register_new(collector: &CollectorRef) -> ManuallyDrop>; 63 | /// Trigger a safepoint for this context. 64 | /// 65 | /// This implicitly attempts a collection, 66 | /// potentially blocking until completion.. 67 | /// 68 | /// Undefined behavior if mutated during collection 69 | /// 70 | /// ## Safety 71 | /// See [GcContext::unchecked_safepoint] 72 | unsafe fn trigger_safepoint(&self); 73 | /// Borrow a reference to the shadow stack, 74 | /// assuming this context is valid (not active). 75 | /// 76 | /// A context is valid if it is either frozen 77 | /// or paused at a safepoint. 78 | /// 79 | /// ## Safety 80 | /// The context must be "inactive", 81 | /// either frozen or paused at a safepoint. 82 | #[inline] 83 | unsafe fn assume_valid_shadow_stack(&self) -> &ShadowStack { 84 | match self.state() { 85 | ContextState::Active => unreachable!("active context: {:?}", self), 86 | ContextState::SafePoint { .. } | ContextState::Frozen { .. } => {} 87 | } 88 | &*self.shadow_stack_ptr() 89 | } 90 | /// Get a pointer to the shadow stack 91 | fn shadow_stack_ptr(&self) -> *mut ShadowStack; 92 | /// Get a reference to the collector as a [CollectorRef] 93 | /// 94 | /// ## Safety 95 | /// Assumes the underlying collector is still valid. 96 | unsafe fn collector_ref(&self) -> &'_ CollectorRef; 97 | /// Get a reference to the collector, 98 | /// assuming that it's valid 99 | /// 100 | /// ## Safety 101 | /// Assumes that the underlying collector 102 | /// is still valid. 103 | #[inline] 104 | unsafe fn collector(&self) -> &C { 105 | self.collector_ref().as_raw() 106 | } 107 | fn state(&self) -> ContextState; 108 | } 109 | 110 | mod sealed { 111 | pub trait Sealed {} 112 | } 113 | -------------------------------------------------------------------------------- /src/cell.rs: -------------------------------------------------------------------------------- 1 | //! Implements a [GcCell] to allow mutating values 2 | //! inside garbage collected objects. 3 | //! 4 | //! Normally garbage collected objects are immutable, 5 | //! since their references are shared. It's typical 6 | //! for collectors to want to trigger a write barrier 7 | //! before writing to a field. All interior mutability 8 | //! requires triggering appropriate write barriers, 9 | //! which is unsafe. 10 | //! 11 | //! The `zerogc_derive` crate can generate setters 12 | //! for fields that are wrapped in a [GcCell]. 13 | //! Just mark the field with `#[zerogc(mutable(public))]` 14 | //! and it'll generate a safe wrapper. 15 | use core::cell::Cell; 16 | 17 | use zerogc_derive::unsafe_gc_impl; 18 | 19 | use crate::{CollectorId, GcDirectBarrier, GcRebrand, GcSafe, NullTrace, Trace}; 20 | 21 | /// A `Cell` pointing to a garbage collected object. 22 | /// 23 | /// This only supports mutating `NullTrace` types, 24 | /// becuase garbage collected pointers need write barriers. 25 | #[derive(Default, Clone, Debug)] 26 | #[repr(transparent)] 27 | pub struct GcCell(Cell); 28 | impl GcCell { 29 | /// Create a new cell 30 | #[inline] 31 | pub fn new(value: T) -> Self { 32 | GcCell(Cell::new(value)) 33 | } 34 | /// Get a mutable reference to this cell's value 35 | /// 36 | /// This is safe because the `&mut self` 37 | /// guarentees exclusive access to the cell. 38 | #[inline] 39 | pub fn get_mut(&mut self) -> &mut T { 40 | self.0.get_mut() 41 | } 42 | /// Get a pointer to this cell's conent 43 | #[inline] 44 | pub fn as_ptr(&self) -> *mut T { 45 | self.0.as_ptr() 46 | } 47 | /// Get the current value of this cell 48 | #[inline] 49 | pub fn get(&self) -> T { 50 | self.0.get() 51 | } 52 | } 53 | impl GcCell { 54 | /// Change the interior of this type 55 | /// to the specified type 56 | /// 57 | /// The type must be `NullTrace` because 58 | /// garbage collected 59 | /// types need write barriers. 60 | #[inline] 61 | pub fn set(&self, value: T) { 62 | self.0.set(value) 63 | } 64 | } 65 | unsafe impl<'gc, OwningRef, Value> GcDirectBarrier<'gc, OwningRef> for GcCell 66 | where 67 | Value: GcDirectBarrier<'gc, OwningRef> + Copy, 68 | { 69 | #[inline] 70 | unsafe fn write_barrier(&self, owner: &OwningRef, field_offset: usize) { 71 | // NOTE: We are direct write because `Value` is stored inline 72 | self.get().write_barrier(owner, field_offset) 73 | } 74 | } 75 | unsafe_gc_impl!( 76 | target => GcCell, 77 | params => [T: Trace + Copy], 78 | NEEDS_TRACE => T::NEEDS_TRACE, 79 | // T is Copy, so it doesn't need to be dropped 80 | NEEDS_DROP => false, 81 | bounds => { 82 | GcSafe => { where T: GcSafe<'gc, Id> + Copy }, 83 | Trace => { where T: Trace + Copy }, 84 | // NOTE: TraceImmutable requires a 'NullTrace' for interior mutability 85 | TraceImmutable => { where T: NullTrace + Copy }, 86 | GcRebrand => { where T: Trace + Copy + GcRebrand<'new_gc, Id>, Id: CollectorId,T::Branded: Copy + Trace } 87 | }, 88 | branded_type => GcCell, 89 | null_trace => { where T: Copy + NullTrace }, 90 | trace_mut => |self, visitor| { 91 | /* 92 | * GcCell can only support mutating types that are `NullTrace`, 93 | * because garbage collected types need write barriers. 94 | * 95 | * However, this is already enforced by the bounds of `GcCell::set`, 96 | * so we don't need to verify here. 97 | * In other words is possible to safely trace a `GcCell` 98 | * with a garbage collected type, as long as it is never mutated. 99 | */ 100 | visitor.trace(self.get_mut()) 101 | }, 102 | trace_immutable => |self, visitor| { 103 | /* 104 | * See Trace documentation on the safety of mutation 105 | * 106 | * We require `NullTrace` in order to `set` our internals. 107 | */ 108 | let mut value = self.get(); 109 | visitor.trace(&mut value)?; 110 | self.set(value); 111 | Ok(()) 112 | } 113 | ); 114 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Adds a `GcError` type, 2 | //! which implements `std::error::Error + 'static` 3 | //! for garbage collected error types. 4 | //! 5 | //! This allows for easy compatibility with [`anyhow`](https://docs.rs/anyhow/1.0.43/anyhow/) 6 | //! even if you want to use garbage collected data in your errors 7 | //! (which would otherwise require a `'gc` lifetime). 8 | //! 9 | //! The implementation doesn't require any unsafe code. 10 | //! It's simply a thin wrapper around [GcHandle] that uses 11 | //! [GcHandle::use_critical] section to block 12 | //! garbage collection during formatting calls... 13 | 14 | use std::error::Error as StdError; 15 | use std::fmt::{self, Debug, Display, Formatter}; 16 | 17 | use crate::prelude::*; 18 | use crate::DynTrace; 19 | 20 | /// A garbage collected [`std::error::Error`] type 21 | /// 22 | /// This is automatically implemented for all types that 23 | /// 1. Implement [`std::error::Error`] 24 | /// 2. Implement [GcSafe] 25 | /// 3. Implement [`GcRebrand<'static, Id>`](`crate::GcRebrand`) 26 | /// 4. It must be [Sync] 27 | /// 5. Have no other lifetimes besides `'gc` 28 | /// 29 | /// The fifth point is rather subtle. 30 | /// Another way of saying it is that `T: 'gc` and `T::Branded: 'static`. 31 | pub trait GcErrorType<'gc, Id: CollectorId>: 32 | StdError + Sync + GcSafe<'gc, Id> + 'gc + GcRebrand<'static, Id> + self::sealed::Sealed 33 | where 34 | >::Branded: 'static, 35 | { 36 | } 37 | impl<'gc, Id: CollectorId, T: StdError + 'gc + Sync + GcSafe<'gc, Id> + GcRebrand<'static, Id>> 38 | GcErrorType<'gc, Id> for T 39 | where 40 | >::Branded: 'static, 41 | { 42 | } 43 | impl<'gc, Id: CollectorId, T: StdError + 'gc + Sync + GcSafe<'gc, Id> + GcRebrand<'static, Id>> 44 | self::sealed::Sealed for T 45 | where 46 | >::Branded: 'static, 47 | { 48 | } 49 | 50 | /// A super-trait of [GcErrorType] 51 | /// that is suitable for dynamic dispatch. 52 | /// 53 | /// This can only actually implemented 54 | /// if the type is a `GcErrorType` at runtime. 55 | /// 56 | /// This is an implementation detail 57 | #[doc(hidden)] 58 | pub trait DynGcErrorType<'gc, Id: CollectorId>: 59 | Sync + StdError + DynTrace<'gc, Id> + self::sealed::Sealed 60 | { 61 | } 62 | impl<'gc, T: GcErrorType<'gc, Id>, Id: CollectorId> DynGcErrorType<'gc, Id> for T where 63 | >::Branded: 'static 64 | { 65 | } 66 | 67 | crate::trait_object_trace!( 68 | impl<'gc, Id> Trace for dyn DynGcErrorType<'gc, Id> { where Id: CollectorId }; 69 | Branded<'new_gc> => (dyn DynGcErrorType<'new_gc, Id> + 'new_gc), 70 | collector_id => Id, 71 | gc_lifetime => 'gc 72 | ); 73 | 74 | /// A wrapper around a dynamically dispatched, 75 | /// [std::error::Error] with garbage collected data. 76 | /// 77 | /// The internal `'gc` lifetime has been erased, 78 | /// by wrapping it in a [GcHandle]. 79 | /// Because the internal lifetime has been erased, 80 | /// the type can be safely 81 | /// 82 | /// This is analogous to [`anyhow::Error`](https://docs.rs/anyhow/1.0.43/anyhow/struct.Error.html) 83 | /// but only for garbage collected . 84 | pub struct GcError { 85 | handle: Box>>, 86 | } 87 | impl GcError { 88 | /// Allocate a new dynamically dispatched [GcError] 89 | /// by converting from a specified `Gc` object. 90 | /// 91 | /// A easier, simpler and recommended alternative 92 | /// is [GcSimpleAlloc::alloc_error]. 93 | #[cold] 94 | pub fn from_gc_allocated<'gc, T: GcErrorType<'gc, Id> + 'gc>(gc: Gc<'gc, T, Id>) -> Self 95 | where 96 | >::Branded: 'static, 97 | { 98 | let dynamic = gc as Gc<'gc, dyn DynGcErrorType<'gc, Id>, Id>; 99 | GcError { 100 | handle: Box::new(Gc::create_handle(&dynamic)), 101 | } 102 | } 103 | } 104 | impl Display for GcError { 105 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 106 | self.handle.use_critical(|err| Display::fmt(&err, f)) 107 | } 108 | } 109 | impl Debug for GcError { 110 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 111 | self.handle.use_critical(|err| Debug::fmt(&err, f)) 112 | } 113 | } 114 | impl StdError for GcError { 115 | /* 116 | * TODO: We can't give 'source' and 'backtrace' 117 | * because they borrow for `'self` 118 | * and it is possible a moving garbage 119 | * collector could relocate 120 | * internal data 121 | */ 122 | } 123 | 124 | mod sealed { 125 | pub trait Sealed {} 126 | } 127 | -------------------------------------------------------------------------------- /libs/context/src/utils.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for the context library 2 | //! 3 | //! Also used by some collector implementations. 4 | #[cfg(not(feature = "sync"))] 5 | use core::cell::Cell; 6 | use core::fmt::{self, Debug, Display, Formatter}; 7 | use core::mem; 8 | 9 | /// Get the offset of the specified field within a structure 10 | #[macro_export] 11 | macro_rules! field_offset { 12 | ($target:ty, $($field:ident).+) => {{ 13 | const OFFSET: usize = { 14 | let uninit = core::mem::MaybeUninit::<$target>::uninit(); 15 | unsafe { ((core::ptr::addr_of!((*uninit.as_ptr())$(.$field)*)) as *const u8) 16 | .offset_from(uninit.as_ptr() as *const u8) as usize } 17 | }; 18 | OFFSET 19 | }}; 20 | } 21 | 22 | /// Transmute between two types, 23 | /// without verifying that there sizes are the same 24 | /// 25 | /// ## Safety 26 | /// This function has undefined behavior if `T` and `U` 27 | /// have different sizes. 28 | /// 29 | /// It also has undefined behavior whenever [mem::transmute] has 30 | /// undefined behavior. 31 | #[inline] 32 | pub unsafe fn transmute_mismatched(src: T) -> U { 33 | // NOTE: This assert has zero cost when monomorphized 34 | assert_eq!(mem::size_of::(), mem::size_of::()); 35 | let d = mem::ManuallyDrop::new(src); 36 | mem::transmute_copy::(&*d) 37 | } 38 | 39 | #[cfg(feature = "sync")] 40 | pub type AtomicCell = ::crossbeam_utils::atomic::AtomicCell; 41 | /// Fallback `AtomicCell` implementation when we actually 42 | /// don't care about thread safety 43 | #[cfg(not(feature = "sync"))] 44 | #[derive(Default)] 45 | pub struct AtomicCell(Cell); 46 | #[cfg(not(feature = "sync"))] 47 | impl AtomicCell { 48 | pub const fn new(value: T) -> Self { 49 | AtomicCell(Cell::new(value)) 50 | } 51 | pub fn store(&self, value: T) { 52 | self.0.set(value) 53 | } 54 | pub fn load(&self) -> T { 55 | self.0.get() 56 | } 57 | pub fn compare_exchange(&self, expected: T, updated: T) -> Result 58 | where 59 | T: PartialEq, 60 | { 61 | let existing = self.0.get(); 62 | if existing == expected { 63 | self.0.set(updated); 64 | Ok(existing) 65 | } else { 66 | Err(existing) 67 | } 68 | } 69 | } 70 | 71 | #[derive(Clone)] 72 | pub enum ThreadId { 73 | #[allow(unused)] 74 | Nop, 75 | #[cfg(feature = "std")] 76 | Enabled { 77 | id: std::thread::ThreadId, 78 | name: Option, 79 | }, 80 | } 81 | impl ThreadId { 82 | #[cfg(feature = "std")] 83 | pub fn current() -> ThreadId { 84 | // NOTE: It's okay: `sync` requires std 85 | let thread = std::thread::current(); 86 | ThreadId::Enabled { 87 | id: thread.id(), 88 | name: thread.name().map(String::from), 89 | } 90 | } 91 | #[cfg(not(feature = "std"))] 92 | #[inline] 93 | pub fn current() -> ThreadId { 94 | ThreadId::Nop 95 | } 96 | } 97 | impl slog::Value for ThreadId { 98 | #[cfg(not(feature = "std"))] 99 | fn serialize( 100 | &self, 101 | _record: &slog::Record, 102 | _key: &'static str, 103 | _serializer: &mut dyn slog::Serializer, 104 | ) -> slog::Result<()> { 105 | Ok(()) // Nop 106 | } 107 | #[cfg(feature = "std")] 108 | fn serialize( 109 | &self, 110 | _record: &slog::Record, 111 | key: &'static str, 112 | serializer: &mut dyn slog::Serializer, 113 | ) -> slog::Result<()> { 114 | let (id, name) = match *self { 115 | ThreadId::Nop => return Ok(()), 116 | ThreadId::Enabled { ref id, ref name } => (id, name), 117 | }; 118 | match *name { 119 | Some(ref name) => serializer.emit_arguments(key, &format_args!("{}: {:?}", *name, id)), 120 | None => serializer.emit_arguments(key, &format_args!("{:?}", id)), 121 | } 122 | } 123 | } 124 | impl Debug for ThreadId { 125 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 126 | match *self { 127 | ThreadId::Nop => f.write_str("ThreadId(??)"), 128 | #[cfg(feature = "std")] 129 | ThreadId::Enabled { id, name: None } => { 130 | write!(f, "{:?}", id) 131 | } 132 | #[cfg(feature = "std")] 133 | ThreadId::Enabled { 134 | id, 135 | name: Some(ref name), 136 | } => f.debug_tuple("ThreadId").field(&id).field(name).finish(), 137 | } 138 | } 139 | } 140 | /// The size of memory in bytes 141 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 142 | pub struct MemorySize { 143 | pub bytes: usize, 144 | } 145 | impl Display for MemorySize { 146 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 147 | if f.alternate() { 148 | write!(f, "{}", self.bytes) 149 | } else { 150 | // Write approximation 151 | let bytes = self.bytes; 152 | let (amount, suffix) = if bytes > 1024 * 1024 * 1024 { 153 | (1024 * 1024 * 1024, "GB") 154 | } else if bytes > 1024 * 1024 { 155 | (1024 * 1024, "MB") 156 | } else if bytes > 1024 { 157 | (1024, "KB") 158 | } else { 159 | (1, "") 160 | }; 161 | write!(f, "{:.2}{}", bytes as f64 / amount as f64, suffix) 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /libs/derive/tests/basic.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | arbitrary_self_types, // Used for `zerogc(mutable)` 3 | )] 4 | use zerogc::{ 5 | epsilon::{self, EpsilonCollectorId}, 6 | CollectorId, Gc, GcSafe, NullTrace, Trace, 7 | }; 8 | 9 | use std::fmt::Debug; 10 | use std::marker::PhantomData; 11 | use zerogc::cell::GcCell; 12 | 13 | #[derive(Trace)] 14 | #[zerogc(collector_ids(EpsilonCollectorId))] 15 | pub struct SpecificCollector<'gc> { 16 | gc: Gc<'gc, i32, EpsilonCollectorId>, 17 | rec: Gc<'gc, SpecificCollector<'gc>, EpsilonCollectorId>, 18 | #[zerogc(mutable)] 19 | cell: GcCell, EpsilonCollectorId>>, 20 | } 21 | 22 | #[derive(Trace)] 23 | #[zerogc(collector_ids(Id))] 24 | pub struct Basic<'gc, Id: CollectorId> { 25 | parent: Option, Id>>, 26 | children: Vec, Id>>, 27 | value: String, 28 | #[zerogc(mutable)] 29 | cell: GcCell, Id>>>, 30 | } 31 | 32 | #[derive(Copy, Clone, Trace)] 33 | #[zerogc(copy, collector_ids(Id))] 34 | pub struct BasicCopy<'gc, Id: CollectorId> { 35 | test: i32, 36 | value: i32, 37 | basic: Option, Id>>, 38 | } 39 | 40 | #[derive(Copy, Clone, Trace)] 41 | #[zerogc(copy, collector_ids(Id))] 42 | pub enum BasicEnum<'gc, Id: CollectorId> { 43 | Unit, 44 | Tuple(i32), 45 | First { 46 | all: i32, 47 | }, 48 | Second { 49 | you: Gc<'gc, String, Id>, 50 | need: bool, 51 | }, 52 | Third { 53 | is: (), 54 | love: Gc<'gc, BasicEnum<'gc, Id>, Id>, 55 | }, 56 | Fifth(Gc<'gc, BasicEnum<'gc, Id>, Id>, Gc<'gc, Basic<'gc, Id>, Id>), 57 | } 58 | 59 | /// A testing type that has no destructor 60 | /// 61 | /// It doesn't implement copy, but shouldn't need 62 | /// to be dropped. 63 | /// 64 | /// We shouldn't need to annotate with `unsafe_skip_drop`. 65 | /// The dummy destructor shouldn't be generated because of `#[zerogc(nop_trace)]` 66 | #[derive(NullTrace)] 67 | #[allow(unused)] 68 | struct NoDestructorNullTrace { 69 | val: i32, 70 | s: &'static str, 71 | f: f32, 72 | } 73 | 74 | fn assert_copy() {} 75 | fn assert_null_trace() {} 76 | fn check_id<'gc, Id: CollectorId>() { 77 | assert_copy::>(); 78 | assert_copy::, Id>>(); 79 | assert_copy::, Id>>(); 80 | assert_copy::, Id>>>(); 81 | 82 | assert!( as Trace>::NEEDS_DROP); 83 | } 84 | 85 | #[derive(NullTrace)] 86 | #[allow(unused)] 87 | struct NopTrace { 88 | s: String, 89 | i: i32, 90 | wow: Box, 91 | } 92 | 93 | #[derive(Trace)] 94 | #[zerogc(unsafe_skip_drop, collector_ids(EpsilonCollectorId))] 95 | #[allow(unused)] 96 | struct UnsafeSkipped<'gc> { 97 | s: &'static str, 98 | i: i32, 99 | #[zerogc(unsafe_skip_trace)] 100 | wow: Gc<'gc, i32, EpsilonCollectorId>, 101 | #[zerogc(unsafe_skip_trace)] 102 | not_impld: NotImplTrace, 103 | } 104 | 105 | /// A type that doesn't implement `Trace` 106 | struct NotImplTrace; 107 | 108 | #[derive(Trace)] 109 | #[zerogc(ignore_lifetimes("'a"), immutable, collector_ids(EpsilonCollectorId))] 110 | #[allow(unused)] 111 | struct LifetimeTrace<'a: 'gc, 'gc, T: GcSafe<'gc, EpsilonCollectorId> + 'a> { 112 | s: String, 113 | i: i32, 114 | wow: Box, 115 | other: &'a u32, 116 | generic: Box, 117 | marker: PhantomData<&'gc ()>, 118 | } 119 | 120 | #[derive(Trace)] 121 | #[zerogc(copy, collector_ids(Id), ignore_params(T))] 122 | struct IgnoredParam<'gc, T: Debug + 'gc, Id: CollectorId> { 123 | gc: Gc<'gc, IgnoredParam<'gc, T, Id>, Id>, 124 | param: PhantomData T>, 125 | } 126 | 127 | #[test] 128 | fn basic<'gc>() { 129 | let _b = Basic:: { 130 | value: String::new(), 131 | parent: None, 132 | children: vec![], 133 | cell: GcCell::new(None), 134 | }; 135 | assert!( as Trace>::NEEDS_TRACE); 136 | assert!( as Trace>::NEEDS_TRACE); 137 | assert!( as Trace>::NEEDS_DROP); 138 | assert!(! as Trace>::NEEDS_DROP); 139 | assert_copy::>(); 140 | assert_null_trace::(); 141 | assert!(!::NEEDS_TRACE); 142 | 143 | check_id::(); 144 | 145 | // We explicitly skipped the only trace field 146 | assert!(! as Trace>::NEEDS_TRACE); 147 | /* 148 | * We (unsafely) claimed drop-safety (w/ `unsafe_skip_drop`), 149 | * so we shouldn't generate a destructor 150 | * 151 | * Trace::NEEDS_DROP should already be false (since we have no Drop fields), 152 | * however in this case `std::mem::needs_drop` is false since we have no dummy drop impl. 153 | */ 154 | assert!(! as Trace>::NEEDS_DROP); 155 | assert!(!std::mem::needs_drop::>()); 156 | 157 | /* 158 | * Ensure that `NoDestructorNullTrace` doesn't need to be dropped 159 | * 160 | * The `nop_trace` automatically implies `unsafe_skip_drop` (safely) 161 | */ 162 | assert_null_trace::(); 163 | assert!(!::NEEDS_DROP); 164 | assert!(!std::mem::needs_drop::()); 165 | } 166 | -------------------------------------------------------------------------------- /src/vec/cell.rs: -------------------------------------------------------------------------------- 1 | //! The implementation of [GcVecCell] 2 | #[cfg(all(not(feature = "std"), feature = "alloc"))] 3 | use alloc::vec::Vec; 4 | use core::cell::RefCell; 5 | 6 | use inherent::inherent; 7 | 8 | use crate::prelude::*; 9 | use crate::vec::raw::{IGcVec, ReallocFailedError}; 10 | use crate::SimpleAllocCollectorId; 11 | 12 | /// A garbage collected vector, 13 | /// wrapped in a [RefCell] for interior mutability. 14 | /// 15 | /// Essentially a `Gc>>`. However, 16 | /// this can't be done directly because `RefCell` normally requires 17 | /// `T: NullTrace` (because of the possibility of write barriers). 18 | #[derive(Trace)] 19 | #[zerogc(collector_ids(Id), copy)] 20 | pub struct GcVecCell<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> { 21 | inner: Gc<'gc, RefCell>, Id>, 22 | } 23 | impl<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> GcVecCell<'gc, T, Id> { 24 | /// Immutably borrow the wrapped [GcVec]. 25 | /// 26 | /// The returned borrow is dynamically tracked, 27 | /// and guarded by the returned 28 | /// [core::cell::Ref] object. 29 | /// 30 | /// All immutable accesses through the [IGcVec] interface 31 | /// implicitly call this method (and thus carry the same risk of panics). 32 | /// 33 | /// ## Panics 34 | /// Panics if this vector has an outstanding mutable borrow. 35 | #[inline] 36 | pub fn borrow(&self) -> core::cell::Ref<'_, GcVec<'gc, T, Id>> { 37 | self.inner.borrow() 38 | } 39 | /// Mutably (and exclusively) borrow the wrapped [GcVec]. 40 | /// 41 | /// The returned borrow is dynamically tracked, 42 | /// and guarded by the returned 43 | /// [core::cell::RefMut] object. 44 | /// 45 | /// All mutable accesses through the [IGcVec] interface 46 | /// implicitly call this method (and thus carry the same risk of panics). 47 | /// 48 | /// ## Panics 49 | /// Panics if this vector has any other outstanding borrows. 50 | #[inline] 51 | pub fn borrow_mut(&self) -> core::cell::RefMut<'_, GcVec<'gc, T, Id>> { 52 | self.inner.borrow_mut() 53 | } 54 | /// Immutably borrow a slice of this vector's contents. 55 | /// 56 | /// Implicitly calls [GcVecCell::borrow], 57 | /// and caries the same risk of panics. 58 | #[inline] 59 | pub fn borrow_slice(&self) -> core::cell::Ref<'_, [T]> { 60 | core::cell::Ref::map(self.borrow(), |v| v.as_slice()) 61 | } 62 | /// Mutably borrow a slice of this vector's contents. 63 | /// 64 | /// Implicitly calls [GcVecCell::borrow_mut], 65 | /// and caries the same risk of panics. 66 | #[inline] 67 | pub fn borrow_mut_slice(&self) -> core::cell::RefMut<'_, [T]> { 68 | core::cell::RefMut::map(self.borrow_mut(), |v| v.as_mut_slice()) 69 | } 70 | } 71 | impl<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> Copy for GcVecCell<'gc, T, Id> {} 72 | impl<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> Clone for GcVecCell<'gc, T, Id> { 73 | #[inline] 74 | fn clone(&self) -> Self { 75 | *self 76 | } 77 | } 78 | /// Because vectors are associated with a [GcContext](`crate::GcContext`), 79 | /// they contain thread local data (and thus must be `!Send` 80 | impl<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> !Send for GcVecCell<'gc, T, Id> {} 81 | #[inherent] 82 | unsafe impl<'gc, T: GcSafe<'gc, Id>, Id: SimpleAllocCollectorId> IGcVec<'gc, T> 83 | for GcVecCell<'gc, T, Id> 84 | { 85 | type Id = Id; 86 | 87 | #[inline] 88 | pub fn with_capacity_in(capacity: usize, ctx: &'gc ::Context) -> Self { 89 | GcVecCell { 90 | inner: ctx.alloc(RefCell::new(GcVec::with_capacity_in(capacity, ctx))), 91 | } 92 | } 93 | 94 | #[inline] 95 | pub fn len(&self) -> usize { 96 | self.inner.borrow().len() 97 | } 98 | 99 | #[inline] 100 | pub unsafe fn set_len(&mut self, len: usize) { 101 | self.inner.borrow_mut().set_len(len); 102 | } 103 | 104 | #[inline] 105 | pub fn capacity(&self) -> usize { 106 | self.inner.borrow().capacity() 107 | } 108 | 109 | #[inline] 110 | pub fn reserve_in_place(&mut self, additional: usize) -> Result<(), ReallocFailedError> { 111 | self.inner.borrow_mut().reserve_in_place(additional) 112 | } 113 | 114 | #[inline] 115 | pub unsafe fn as_ptr(&self) -> *const T { 116 | self.inner.borrow().as_ptr() 117 | } 118 | 119 | #[inline] 120 | pub fn context(&self) -> &'gc ::Context { 121 | self.inner.borrow().context() 122 | } 123 | 124 | // Default methods: 125 | pub unsafe fn as_mut_ptr(&mut self) -> *mut T; 126 | pub fn replace(&mut self, index: usize, val: T) -> T; 127 | pub fn set(&mut self, index: usize, val: T); 128 | pub fn extend_from_slice(&mut self, src: &[T]) 129 | where 130 | T: Copy; 131 | pub fn push(&mut self, val: T); 132 | pub fn pop(&mut self) -> Option; 133 | pub fn swap_remove(&mut self, index: usize) -> T; 134 | pub fn reserve(&mut self, additional: usize); 135 | pub fn is_empty(&self) -> bool; 136 | pub fn new_in(ctx: &'gc ::Context) -> Self; 137 | pub fn copy_from_slice(src: &[T], ctx: &'gc ::Context) -> Self 138 | where 139 | T: Copy; 140 | #[cfg(feature = "alloc")] 141 | pub fn from_vec(src: Vec, ctx: &'gc ::Context) -> Self; 142 | pub fn get(&mut self, index: usize) -> Option 143 | where 144 | T: Copy; 145 | pub unsafe fn as_slice_unchecked(&self) -> &[T]; 146 | } 147 | impl<'gc, T: GcSafe<'gc, Id>, Id: CollectorId> Extend for GcVecCell<'gc, T, Id> { 148 | fn extend>(&mut self, iter: A) { 149 | self.inner.borrow_mut().extend(iter); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | /// Implement [Trace](`crate::Trace`) for a dynamically dispatched trait object 2 | /// 3 | /// This requires that the trait object extends [DynTrace](`crate::DynTrace`). 4 | /// 5 | /// ## Example 6 | /// ``` 7 | /// # use zerogc::{Trace, DynTrace, trait_object_trace}; 8 | /// # use zerogc::epsilon::{self, EpsilonCollectorId, Gc}; 9 | /// # type OurSpecificId = EpsilonCollectorId; 10 | /// trait Foo<'gc>: DynTrace<'gc, OurSpecificId> { 11 | /// fn method(&self) -> i32; 12 | /// } 13 | /// trait_object_trace!( 14 | /// impl<'gc,> Trace for dyn Foo<'gc>; 15 | /// Branded<'new_gc> => (dyn Foo<'new_gc> + 'new_gc), 16 | /// collector_id => OurSpecificId, 17 | /// gc_lifetime => 'gc 18 | /// ); 19 | /// fn foo<'gc, T: ?Sized + Trace + Foo<'gc>>(t: &T) -> i32 { 20 | /// assert_eq!(t.method(), 12); 21 | /// t.method() * 2 22 | /// } 23 | /// fn bar<'gc>(gc: Gc<'gc, dyn Foo<'gc> + 'gc>) -> i32 { 24 | /// foo(gc.value()) 25 | /// } 26 | /// #[derive(Trace)] 27 | /// # #[zerogc(collector_ids(EpsilonCollectorId))] 28 | /// struct Bar<'gc> { 29 | /// val: Gc<'gc, i32> 30 | /// } 31 | /// impl<'gc> Foo<'gc> for Bar<'gc> { 32 | /// fn method(&self) -> i32 { 33 | /// *self.val 34 | /// } 35 | /// } 36 | /// let val = epsilon::leaked(12); 37 | /// let gc: Gc<'_, Bar<'_>> = epsilon::leaked(Bar { val }); 38 | /// assert_eq!(bar(gc as Gc<'_, dyn Foo>), 24); 39 | /// ``` 40 | /// 41 | /// ## Safety 42 | /// This macro is completely safe. 43 | #[macro_export] 44 | macro_rules! trait_object_trace { 45 | (impl $(<$($lt:lifetime,)* $($param:ident),*>)? Trace for dyn $target:path $({ where $($where_clause:tt)* })?; 46 | Branded<$branded_lt:lifetime> => $branded:ty, 47 | collector_id => $collector_id:path, 48 | gc_lifetime => $gc_lt:lifetime) => { 49 | unsafe impl$(<$($lt,)* $($param),*>)? $crate::TrustedDrop for (dyn $target + $gc_lt) where Self: $crate::DynTrace<$gc_lt, $collector_id>, $($($where_clause)*)? {} 50 | unsafe impl$(<$($lt,)* $($param),*>)? $crate::GcSafe<$gc_lt, $collector_id> for (dyn $target + $gc_lt) where Self: $crate::DynTrace<$gc_lt, $collector_id>, $($($where_clause)*)? { 51 | #[inline] 52 | unsafe fn trace_inside_gc(gc: &mut $crate::Gc<$gc_lt, Self, $collector_id>, visitor: &mut V) -> Result<(), V::Err> 53 | where V: $crate::GcVisitor { 54 | visitor.trace_trait_object(gc) 55 | } 56 | 57 | } 58 | unsafe impl$(<$($lt,)* $($param),*>)? $crate::Trace for (dyn $target + $gc_lt) where Self: $crate::DynTrace::<$gc_lt, $collector_id>, $($($where_clause)*)? { 59 | /* 60 | * Insufficient compile-time information to know whether we need to be traced. 61 | * 62 | * Therefore, we make a conservative estimate 63 | */ 64 | const NEEDS_TRACE: bool = true; 65 | // Likewise for `NEEDS_DROP` 66 | const NEEDS_DROP: bool = true; 67 | 68 | fn trace(&mut self, _visitor: &mut V) -> Result<(), V::Err> { 69 | unimplemented!("Unable to use DynTrace outside of a Gc") 70 | } 71 | } 72 | unsafe impl<$branded_lt, $($($lt,)* $($param,)*)?> $crate::GcRebrand<$branded_lt, $collector_id> for (dyn $target + $gc_lt) $(where $($where_clause)*)? { 73 | type Branded = $branded; 74 | } 75 | } 76 | } 77 | 78 | /// Implement [Trace](`crate::Trace`) and [TraceImmutable](`crate::TraceImmutable`) as a no-op, 79 | /// based on the fact that a type implements [NullTrace](crate::NullTrace) 80 | /// 81 | /// ## Safety 82 | /// Because this verifies that `Self: NullTrace`, it is known there 83 | /// are no values inside that need to be traced (allowing a no-op trace) 84 | /// 85 | /// In other words, the unsafety is delegated to NullTrace 86 | #[macro_export] 87 | macro_rules! impl_trace_for_nulltrace { 88 | (impl $(<$($lt:lifetime,)* $($param:ident),*>)? Trace for $target:path $(where $($where_clause:tt)*)?) => { 89 | unsafe impl$(<$($lt,)* $($param),*>)? $crate::Trace for $target $(where $($where_clause)*)? { 90 | const NEEDS_TRACE: bool = false; 91 | // Since we don't need a dummy-drop, we can just delegate to std 92 | const NEEDS_DROP: bool = core::mem::needs_drop::(); 93 | 94 | #[inline] 95 | fn trace(&mut self, _visitor: &mut V) -> Result<(), V::Err> { 96 | ::verify_null_trace(); 97 | Ok(()) 98 | } 99 | } 100 | unsafe impl$(<$($lt,)* $($param),*>)? $crate::TraceImmutable for $target $(where $($where_clause)*)? { 101 | #[inline] 102 | fn trace_immutable(&self, _visitor: &mut V) -> Result<(), V::Err> { 103 | Ok(()) 104 | } 105 | } 106 | } 107 | } 108 | 109 | /// Implement [NullTrace](`crate::NullTrace`) for a type that lives for `'static` 110 | /// 111 | /// ## Safety 112 | /// Because the type is `'static`, it can't possibly 113 | /// have any `Gc` pointers inside it. 114 | /// 115 | /// Therefore this macro is safe. 116 | #[macro_export] 117 | macro_rules! impl_nulltrace_for_static { 118 | ($target:path) => ($crate::impl_nulltrace_for_static!($target, params => []);); 119 | ($target:path, params => [$($param:tt)*] $(where $($where_tk:tt)*)?) => { 120 | zerogc_derive::unsafe_gc_impl!( 121 | target => $target, 122 | params => [$($param)*], 123 | bounds => { 124 | GcSafe => { where Self: 'static, $($($where_tk)*)* }, 125 | Trace => { where Self: 'static, $($($where_tk)*)* }, 126 | TraceImmutable => { where Self: 'static, $($($where_tk)*)* }, 127 | GcRebrand => { where Self: 'static, $($($where_tk)*)* }, 128 | TrustedDrop => { where Self: 'static, $($($where_tk)*)* } 129 | }, 130 | null_trace => { where Self: 'static, $($($where_tk)*)* }, 131 | branded_type => Self, 132 | NEEDS_TRACE => false, 133 | NEEDS_DROP => core::mem::needs_drop::(), 134 | trace_template => |self, visitor| { Ok(()) }, 135 | collector_id => *, 136 | ); 137 | }; 138 | } 139 | -------------------------------------------------------------------------------- /src/array/repr.rs: -------------------------------------------------------------------------------- 1 | //! Defines the underlying representation of a [GcArray](`crate::array::GcArray`) pointer. 2 | //! 3 | //! 4 | //! Two possible implementations are also available: 5 | //! 1. FatArrayPtr - Represents arrays as a fat pointer 6 | //! 2. ThinArraPtr - Represents arrays as a thin pointer, 7 | //! with the length stored indirectly in the object header. 8 | #![allow( 9 | clippy::len_without_is_empty, // This is really an internal interface... 10 | )] 11 | use core::ffi::c_void; 12 | use core::marker::PhantomData; 13 | use core::ptr::NonNull; 14 | 15 | use crate::CollectorId; 16 | 17 | /// The type of [GcArrayPtr] impl 18 | #[derive(Copy, Clone, Debug)] 19 | pub enum ArrayPtrKind { 20 | /// A `FatArrayRepr`, which can be transmuted <-> to `&[T]` 21 | Fat, 22 | /// A `ThinArrayRepr`, which can be transmuted <-> to `NonNull` 23 | Thin, 24 | } 25 | 26 | /// The raw, untyped representation of a GcArray pointer. 27 | /// 28 | /// NOTE: This is only for customizing the *pointer* 29 | /// representation. The in-memory layout of the array and its 30 | /// header can be controlled separately from the pointer. 31 | /// 32 | /// This trait is sealed, and there are only two possible 33 | /// implementations: 34 | /// 1. fat pointers 35 | /// 2. thin pointers 36 | /// 37 | /// This needs to be untyped, 38 | /// because we expect the type to be [covariant](https://doc.rust-lang.org/nomicon/subtyping.html#variance). 39 | /// If we were to use `Id::ArrayPtr` the borrow checker would infer the type 40 | /// `T` to be invariant. Instead, we just treat it as a `NonNull`, 41 | /// and add an extra `PhantomData`. Variance problem solved :p 42 | /// 43 | /// ## Safety 44 | /// If the length is stored inline in the array (like a fat pointer), 45 | /// then the length and never change. 46 | /// 47 | /// If the length is *not* stored inline, then it must 48 | /// be retrieved from the corresponding [CollectorId]. 49 | /// 50 | /// The underlying 'repr' is responsible 51 | /// for dropping memory as appropriate. 52 | pub unsafe trait GcArrayPtr: Copy + sealed::Sealed { 53 | /// The repr's collector 54 | type Id: CollectorId; 55 | /// The "kind" of the array pointer (whether fat or thin) 56 | /// 57 | /// This is necessary to correctly 58 | /// transmute in a const-fn context 59 | const UNCHECKED_KIND: ArrayPtrKind; 60 | /// Construct an array representation from a combination 61 | /// of a pointer and length. 62 | /// 63 | /// This is the garbage collected equivalent of [std::slice::from_raw_parts] 64 | /// 65 | /// ## Safety 66 | /// The combination of pointer + length must be valid. 67 | /// 68 | /// The pointer must be the correct type (the details are erased at runtime). 69 | unsafe fn from_raw_parts(ptr: NonNull, len: usize) -> Self; 70 | /// Get a raw pointer to this array's elements. 71 | /// 72 | /// The pointer is untyped. 73 | fn as_raw_ptr(&self) -> *mut c_void; 74 | /// Get the length of this array, 75 | /// or `None` if it's not stored in the pointer (it's a thin pointer). 76 | /// 77 | /// If this type is a fat pointer it will return 78 | /// `Some`. 79 | /// If this is a thin pointer, then it must return `None`. 80 | /// 81 | /// NOTE: Despite the fact that `as_raw_ptr` returns `c_void`, 82 | /// the length is in terms of the (erased) runtime type `T`, 83 | /// not in terms of bytes. 84 | fn len(&self) -> Option; 85 | } 86 | 87 | /// Represents an array as a fat pointer. 88 | /// 89 | /// ## Safety 90 | /// This pointer is stored as a `NonNull<[c_void]>` 91 | /// 92 | /// Transmuting back and forth is safe if and only if 93 | /// it is cast to a `T` first. 94 | #[repr(transparent)] 95 | pub struct FatArrayPtr { 96 | /// NOTE: The length of this slice is in terms of `T`, 97 | /// not in terms of bytes. 98 | /// 99 | /// It is (probably) an under-estimation 100 | slice: NonNull<[c_void]>, 101 | marker: PhantomData, 102 | } 103 | impl self::sealed::Sealed for FatArrayPtr {} 104 | impl FatArrayPtr { 105 | /// Get the length of this fat array (stored inline) 106 | #[inline] 107 | pub const fn len(&self) -> usize { 108 | unsafe { (*self.slice.as_ptr()).len() } 109 | } 110 | } 111 | impl Copy for FatArrayPtr {} 112 | impl Clone for FatArrayPtr { 113 | #[inline] 114 | fn clone(&self) -> Self { 115 | *self 116 | } 117 | } 118 | 119 | unsafe impl GcArrayPtr for FatArrayPtr { 120 | type Id = Id; 121 | const UNCHECKED_KIND: ArrayPtrKind = ArrayPtrKind::Fat; 122 | 123 | #[inline] 124 | unsafe fn from_raw_parts(ptr: NonNull, len: usize) -> Self { 125 | FatArrayPtr { 126 | slice: NonNull::new_unchecked(core::ptr::slice_from_raw_parts( 127 | ptr.as_ptr() as *const T, 128 | len, 129 | ) as *mut [T] as *mut [c_void]), 130 | marker: PhantomData, 131 | } 132 | } 133 | 134 | #[inline] 135 | fn as_raw_ptr(&self) -> *mut c_void { 136 | self.slice.as_ptr() as *mut c_void 137 | } 138 | 139 | #[inline] 140 | fn len(&self) -> Option { 141 | Some(self.len()) // delegates to inherent impl 142 | } 143 | } 144 | 145 | /// Represents an array as a thin pointer, 146 | /// storing the length indirectly in the object's header. 147 | /// 148 | /// ## Safety 149 | /// This type has the same layout as `NonNull`. 150 | /// This representation can be relied upon if and only 151 | /// if is cast to `NonNull` first. 152 | #[repr(transparent)] 153 | pub struct ThinArrayPtr { 154 | elements: NonNull, 155 | marker: PhantomData, 156 | } 157 | impl Copy for ThinArrayPtr {} 158 | impl Clone for ThinArrayPtr { 159 | #[inline] 160 | fn clone(&self) -> Self { 161 | *self 162 | } 163 | } 164 | impl self::sealed::Sealed for ThinArrayPtr {} 165 | unsafe impl GcArrayPtr for ThinArrayPtr { 166 | type Id = Id; 167 | const UNCHECKED_KIND: ArrayPtrKind = ArrayPtrKind::Thin; 168 | #[inline] 169 | unsafe fn from_raw_parts(ptr: NonNull, _len: usize) -> Self { 170 | ThinArrayPtr { 171 | elements: ptr.cast(), 172 | marker: PhantomData, 173 | } 174 | } 175 | #[inline] 176 | fn as_raw_ptr(&self) -> *mut c_void { 177 | self.elements.as_ptr() 178 | } 179 | #[inline] 180 | fn len(&self) -> Option { 181 | None 182 | } 183 | } 184 | 185 | mod sealed { 186 | pub trait Sealed {} 187 | } 188 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ZeroGc 2 | ======= 3 | [WIP] Zero overhead tracing garbage collection for rust. 4 | 5 | ## Features 6 | 1. Easy to use, since `Gc` is `Copy` and coerces to a reference. 7 | 2. Absolutely zero overhead when modifying pointers, since `Gc` is `Copy`. 8 | 3. Implementation agnostic API 9 | 4. Unsafe code has complete freedom to manipulate garbage collected pointers, and it doesn't need to understand the distinction 10 | 5. Uses rust's lifetime system to ensure all roots are known at explicit safepoints, without any runtime overhead. 11 | 6. Collection can only happen with an explicit `safepoint` call and has no overhead between these calls, 12 | 7. API supports moving objects (allowing copying/generational GCs) 13 | 14 | Instead of requiring compiler support to track GC roots (like Java/Go), 15 | it uses a shadow stack to keep track of GC roots. 16 | Collections can only happen at explicit safepoint. 17 | 18 | It uses Rust's lifetime system to ensure that freed garbage 19 | isn't accessed after a collection. Allocated objects are tied 20 | to the lifetime of the garbage collector. 21 | A safepoint (potential collection) is treated as a mutation by 22 | the borrow checker. Without zerogc (using a Vec or typed-arena), 23 | this would invalidate all previously allocated pointers. However, 24 | the any 'roots' given to the safepoint are rebound to the new lifetime 25 | (via dark magic). 26 | 27 | This API aims to be implementation-agnostic, 28 | simply defining the `Trace` trait and the interface for safepoints. 29 | 30 | Right now the only implementation is `zerogc-simple`, 31 | which is a basic mark-sweep collector. 32 | It's relatively fast and lightweight, making it a good default. 33 | It uses fast arena allocation for small objects (optional; on by default) and 34 | falls back to the system allocator for everything else. 35 | 36 | In spite of the mark/sweep collector's simplicity, 37 | it's reasonably fast and is able to compete with production-quality 38 | collectors like Go/Java. 39 | 40 | The library is mostly undocumented, since I expect it to change significantly in the future. 41 | See the [binary-trees](libs/simple/examples/binary_trees.rs) example for a basic sample. 42 | 43 | ## Status 44 | This is extremely experimental. It's really uncharted territory 45 | in the way it uses the rust borrow checker. It seems to be sound, 46 | but I have no way of really knowing for sure. 47 | 48 | The simple mark/sweep collector passes basic tests, 49 | but it still relies on a lot of unsafe code internally. 50 | 51 | Eventually I plan to use this in a language VM, 52 | so it needs to be very flexible! I want to support both simple 53 | and complex collectors with the same API. 54 | 55 | There was previously a copying collector (which worked) 511be539228e7142, 56 | but I removed it due to high memory usage. 57 | 58 | ## Motivation 59 | I was originally inspired to create a safe abstraction for garbage collection by [rust gc](https://github.com/Manishearth/rust-gc) 60 | but I wanted it to have zero runtime overhead and pointers that are `Copy`. 61 | 62 | The main problem that the rust-gc solves is finding the 'roots' of garbage collection. 63 | His collector uses runtime tracking to maintain a reference to every GC object. 64 | 65 | I'm familiar with some JIT implementations, and I know they tend to use safepoints 66 | to explicitly control where garbage collections can happen. 67 | Normally this is an unsafe operation. All live references on the stack must 68 | be given on the stack or use after. 69 | Eventually I realized I could use the borrow checker to restrict 70 | the usage of roots around safepoints. I was inspired in part by the way 71 | [indexing](https://github.com/bluss/indexing) uses lifetimes to enforce 72 | the validity of indexes. 73 | 74 | Our collector only has runtime overhead at specific safepoints, 75 | when the shadow-stack is being updated. 76 | 77 | Not only is this faster than runtime tracking of every single pointer or conservative collection, but it is more flexible. 78 | It paves the way for any combination of generational, incremental, and copying garbage collection. 79 | 80 | ### Prior Art 81 | Since the original [rust gc](https://github.com/Manishearth/rust-gc) others 82 | have attempted to make zero-overhead collectors. 83 | I was not aware of this when I started this project. 84 | 1. [shifgrethor](https://github.com/withoutboats/shifgrethor) 85 | - This is **extremely** impressive. It appears to be the only other collector 86 | where `Gc: Copy` and coerces to a reference. 87 | - However, the collectors have to support pinning of roots (which we do not) 88 | - See this [blog post series for more](https://boats.gitlab.io/blog/post/shifgrethor-i/) 89 | 2. [cell-gc](https://github.com/jorendorff/cell-gc) 90 | - Unfortunately this has a rather clunky interface to Rust code, 91 | so it wouldn't be suitable . 92 | - They implement a basic List VM as a proof of concept. 93 | 3. [gc-arena](https://github.com/kyren/gc-arena) 94 | - Seems like a similar idea, implemented a little later. 95 | - Works around the awkwardness of lifetimes by using a future-like API. 96 | - Like our collector, safepoints are explicitly requested by the user 97 | - However instead of attempting to rebind lifetimes, 98 | they attempt to use futures to build state machines 99 | 100 | ## Disadvantages 101 | 1. The garbage collector can only run in response to an explicit `safepoint!`, not memory pressure, 102 | - This is a fundamental design limitation. 103 | - One can think of this as a feature, since garbage collection can be restricted to specific times and places. 104 | - The user must be liberal about inserting safepoints 105 | - Generally high-level languages which use GCs automatically insert calls to safe points 106 | - Their safepoints tend to be lower-overhead than ours, so you need to balance the cost/benefit 107 | 2. Implementing `GcSafe` for a type prevents it from having a explicit `Drop` implementation. 108 | - This is needed to ensure that the destructors don't do bad things, since we don't want to deal with finalizers. 109 | - Of course unsafe code isn't bound by this restriction, since it's assumed to behave properly (and there is an opt-out if you know you're safe). 110 | 111 | ### Implementation Flaws 112 | None of these are fundemental design flaws. They can all be fixed (and should). 113 | 1. Currently, unable to return a garbage collected type from a method that uses a safepoint 114 | - This is *not* a fundamental limitation of the design. I have plans to fix the API to support this. 115 | - Unfortunately, this is almost crippling for many use cases, but can be worked around with an 'unchecked_safepoint' 116 | 2. Currently, unable to use short lifetimes in garbage collected code. 117 | - This is because `Gc<'gc, T>` currently requires `T: 'gc` 118 | - It is possible to relax this restriction, but may make things more awkward to use... 119 | 3. Restriction on borrowing possibly aliased vectors of GC memory 120 | - It is difficult to enforce Rust's mutability rules with possibly shared gc memory (See issue #30) 121 | 4. Implementation isn't generational (yet) 122 | 6. Awkward to use from a generic context, because the API hasn't yet taken full advantage of generic associated typesd. -------------------------------------------------------------------------------- /src/manually_traced/mod.rs: -------------------------------------------------------------------------------- 1 | //! Includes manual `GarbageCollected` implementations for various types. 2 | //! 3 | //! Since unsafe code and third-party libraries can't have an automatically derived `GarbageCollected` implementation, 4 | //! we need to manually provide them here. 5 | //! This is done for all stdlib types and some feature gated external libraries. 6 | #![doc(hidden)] // This is unstable 7 | 8 | /// Unsafely implement `GarbageCollected` for the specified type, 9 | /// by acquiring a 'lock' in order to trace the underlying value. 10 | /// 11 | /// This is good for interior mutability types like `RefCell` and `Mutex` where you need to acquire a lock, 12 | /// in order to safely view the interior. 13 | /// Usually `unsafe_trace_deref!` is sufficient since it also lets you run 14 | /// arbitrary code in order to 'convert' the macro to the necessary type, 15 | /// and the only restriction is that the interior can be directly traced. 16 | /// 17 | /// However, that isn't sufficient if you need to hold RAII guards (like `Ref` or `MutexGuard`s) 18 | /// on the values you're tracing in addition to just accessing them. 19 | /// For example, for `RefCell` you'd call `borrow` in order to acquire a `Ref` to the interior. 20 | /// Although tracing garbage collection is already unsafe, 21 | /// it's always completely undefined behavior to bypass the locking of `Mutex` and `RefCell`, 22 | /// even if it's just for a 'little bit' since it may cause mutable references to alias. 23 | /// It is currently the most powerful of the unsafe implementation macros, 24 | /// since it lets you not only run an arbitrary expression like `unsafe_trace_deref!`, 25 | /// but also acquire and hold a RAII guard object. 26 | /// 27 | /// This macro is usually only useful for types like `Mutex` and `RefCell` who use raw pointers internally, 28 | /// since raw pointers can't be automatically traced without additional type information. 29 | /// Otherwise, it's best to use an automatically derived implementation since that's always safe. 30 | /// However, using this macro is always better than a manual implementation, since it makes your intent clearer. 31 | /// 32 | /// ## Usage 33 | /// ````no_test 34 | /// // You can use an arbitrary expression to acquire a lock's guard 35 | /// unsafe_trace_lock!(RefCell, target = T, |cell| cell.borrow()); 36 | /// unsafe_trace_lock!(Mutex, target = T, |lock| lock.lock().unwrap()); 37 | /// unsafe_trace_lock!(RwLock, target = T, |lock| lock.lock().unwrap()); 38 | /// ```` 39 | /// 40 | /// ## Safety 41 | /// Always prefer automatically derived implementations where possible, 42 | /// since they're just as fast and can never cause undefined behavior. 43 | /// This is basically an _unsafe automatically derived_ implementation, 44 | /// to be used only when a safe automatically derived implementation isn't possible (like with `Vec`). 45 | /// 46 | /// Undefined behavior if there could be additional garbage collected objects that are not reachable 47 | /// by dereferencing the specified lock, since the macro only traces the item the lock dereferences to. 48 | /// This usually isn't the case with most locks and would be somewhat rare, 49 | /// but it's still a possibility that causes the macro to be unsafe. 50 | #[macro_export] 51 | macro_rules! unsafe_trace_lock { 52 | ($target:ident, target = $target_type:ident; |$get_mut:ident| $get_mut_expr:expr, |$lock:ident| $acquire_guard:expr) => { 53 | unsafe_gc_impl!( 54 | target => $target<$target_type>, 55 | params => [$target_type], 56 | null_trace => { where $target_type: NullTrace }, 57 | NEEDS_TRACE => $target_type::NEEDS_TRACE, 58 | NEEDS_DROP => $target_type::NEEDS_DROP /* if our inner type needs a drop */ 59 | || core::mem::needs_drop::<$target<()>>(), // Or we have unconditional drop (std-mutex) 60 | trace_mut => |self, visitor| { 61 | let $get_mut = self; 62 | let value: &mut $target_type = $get_mut_expr; 63 | visitor.trace::<$target_type>(value) 64 | }, 65 | trace_immutable => |self, visitor| { 66 | if !::NEEDS_TRACE { return Ok(()) }; 67 | // We can immutably visit a lock by acquiring it 68 | let $lock = self; 69 | #[allow(unused_mut)] 70 | let mut guard = $acquire_guard; 71 | let guard_value = &mut *guard; 72 | visitor.trace(guard_value) 73 | } 74 | ); 75 | }; 76 | } 77 | 78 | /// Unsafely implement `GarbageCollected` for the specified type, 79 | /// by assuming it's a 'primitive' and never needs to be traced. 80 | /// 81 | /// The fact that this type is a 'primitive' means it's assumed to have no type parameters, 82 | /// and that there's nothing . 83 | /// This macro is only useful for unsafe types like `String` who use raw pointers internally, 84 | /// since raw pointers can't be automatically traced without additional type information. 85 | /// 86 | /// ## Safety 87 | /// Always prefer automatically derived implementations where possible, 88 | /// since they're just as fast and can never cause undefined behavior. 89 | /// This is basically an _unsafe automatically derived_ implementation, 90 | /// to be used only when a safe automatically derived implementation isn't possible (like with `String`). 91 | /// 92 | /// Undefined behavior only if there are garbage collected pointers in the type's interior, 93 | /// since the implementation assumes there's nothing to trace in the first place. 94 | /// 95 | /// This delegates to `unsafe_gc_impl!` to provide the (GcRebrand)[`crate::GcRebrand`] implementation, 96 | /// but that will never cause undefined behavior unless you 97 | /// already have garbage collected pointers inside 98 | /// (which are already undefined behavior for tracing). 99 | #[macro_export] 100 | macro_rules! unsafe_trace_primitive { 101 | ($target:ty) => (unsafe_trace_primitive!($target; @ deserialize => delegate);); 102 | ($target:ty; @ $(deserialize => $strategy:ident)?) => { 103 | unsafe_gc_impl! { 104 | target => $target, 105 | params => [], 106 | null_trace => always, 107 | NEEDS_TRACE => false, 108 | NEEDS_DROP => core::mem::needs_drop::<$target>(), 109 | collector_id => *, 110 | trace_template => |self, visitor| { /* nop */ Ok(()) }, 111 | $(deserialize => $strategy)* 112 | } 113 | unsafe impl<'gc, OwningRef> $crate::GcDirectBarrier<'gc, OwningRef> for $target { 114 | #[inline(always)] 115 | unsafe fn write_barrier( 116 | &self, _owner: &OwningRef, _field_offset: usize, 117 | ) { 118 | /* 119 | * TODO: We don't have any GC fields, 120 | * so what does it mean to have a write barrier? 121 | */ 122 | /* NOP */ 123 | } 124 | } 125 | }; 126 | } 127 | 128 | #[cfg(feature = "anyhow")] 129 | mod anyhow; 130 | #[cfg(feature = "arrayvec")] 131 | mod arrayvec; 132 | mod core; 133 | #[cfg(feature = "indexmap")] 134 | mod indexmap; 135 | #[cfg(feature = "parking_lot")] 136 | mod parking_lot; 137 | #[cfg(any(feature = "alloc", feature = "std"))] 138 | mod stdalloc; 139 | #[cfg(feature = "std")] 140 | mod stdlib; 141 | -------------------------------------------------------------------------------- /libs/context/src/state/nosync.rs: -------------------------------------------------------------------------------- 1 | //! A simpler implementation of (GcContext)[`::zerogc::GcContext`] 2 | //! that doesn't support multiple threads/contexts. 3 | //! 4 | //! In exchange, there is no locking :) 5 | //! 6 | //! Also, there is `#![no_std]` support 7 | 8 | use core::cell::{Cell, RefCell, UnsafeCell}; 9 | use core::fmt::{self, Debug, Formatter}; 10 | use core::marker::PhantomData; 11 | use core::mem::ManuallyDrop; 12 | 13 | use alloc::boxed::Box; 14 | 15 | use slog::{o, trace, FnValue, Logger}; 16 | 17 | use crate::collector::RawCollectorImpl; 18 | use crate::{CollectorRef, ContextState, ShadowStack}; 19 | 20 | /// Manages coordination of garbage collections 21 | /// 22 | /// This is factored out of the main code mainly due to 23 | /// differences from single-threaded collection 24 | pub struct CollectionManager { 25 | /// Implicit collector ref 26 | _marker: PhantomData>, 27 | /// Access to the internal state 28 | state: RefCell, 29 | /// Whether a collection is currently in progress 30 | /// 31 | /// Used for debugging only 32 | collecting: Cell, 33 | /// Sanity check to ensure there's only one context 34 | has_existing_context: Cell, 35 | } 36 | impl super::sealed::Sealed for CollectionManager {} 37 | unsafe impl super::CollectionManager for CollectionManager 38 | where 39 | C: RawCollectorImpl>, 40 | { 41 | type Context = RawContext; 42 | 43 | fn new() -> Self { 44 | assert!(!C::SYNC); 45 | CollectionManager { 46 | _marker: PhantomData, 47 | state: RefCell::new(CollectorState::new()), 48 | collecting: Cell::new(false), 49 | has_existing_context: Cell::new(false), 50 | } 51 | } 52 | #[inline] 53 | fn is_collecting(&self) -> bool { 54 | self.collecting.get() 55 | } 56 | #[inline] 57 | fn should_trigger_collection(&self) -> bool { 58 | /* 59 | * Unlike the sync context manager, we can assume 60 | * there is only a single thread. 61 | * Therefore we don't need to check for other threads 62 | * having a collection in progress when we're at a safepoint. 63 | * 64 | * If we were having a collection, control flow is already 65 | * taken over by the collector ;) 66 | */ 67 | false 68 | } 69 | unsafe fn freeze_context(&self, context: &RawContext) { 70 | debug_assert_eq!(context.state.get(), ContextState::Active); 71 | unimplemented!("Freezing single-threaded contexts") 72 | } 73 | unsafe fn unfreeze_context(&self, _context: &RawContext) { 74 | // We can't freeze, so we sure can't unfreeze 75 | unreachable!("Can't unfreeze a single-threaded context") 76 | } 77 | 78 | fn prevent_collection(_collector: &C, _func: impl FnOnce() -> R) -> R { 79 | unimplemented!("Preventing collections for non-sync collectors") 80 | } 81 | 82 | #[inline] 83 | unsafe fn free_context(_collector: &C, _context: *mut Self::Context) { 84 | assert!(!C::SYNC); 85 | // No extra work to do - automatic Drop handles everything 86 | } 87 | } 88 | pub struct RawContext { 89 | /// Since we're the only context, we should (logically) 90 | /// be the only owner. 91 | /// 92 | /// This is still an Arc for easier use alongside the 93 | /// thread-safe implementation 94 | pub(crate) collector: CollectorRef, 95 | // NOTE: We are Send, not Sync 96 | pub(super) shadow_stack: UnsafeCell>, 97 | // TODO: Does the collector access this async? 98 | pub(super) state: Cell, 99 | logger: Logger, 100 | } 101 | impl Debug for RawContext { 102 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 103 | f.debug_struct("RawContext") 104 | .field("collector", &format_args!("{:p}", &self.collector)) 105 | .field( 106 | "shadow_stacks", 107 | // We're assuming this is valid.... 108 | unsafe { &*self.shadow_stack.get() }, 109 | ) 110 | .field("state", &self.state.get()) 111 | .finish() 112 | } 113 | } 114 | impl super::sealed::Sealed for RawContext {} 115 | unsafe impl super::RawContext for RawContext 116 | where 117 | C: RawCollectorImpl>, 118 | { 119 | unsafe fn register_new(collector: &CollectorRef) -> ManuallyDrop> { 120 | assert!(!C::SYNC); 121 | // NOTE: Nosync collector must have only **ONE** context 122 | assert!( 123 | !collector 124 | .as_raw() 125 | .manager() 126 | .has_existing_context 127 | .replace(true), 128 | "Already created a context for the collector!" 129 | ); 130 | // Assume ownership 131 | let collector = collector.clone_internal(); 132 | let logger = collector.as_raw().logger().new(o!()); 133 | let context = ManuallyDrop::new(Box::new(RawContext { 134 | logger: logger.clone(), 135 | collector, 136 | shadow_stack: UnsafeCell::new(ShadowStack { 137 | last: core::ptr::null_mut(), 138 | }), 139 | state: Cell::new(ContextState::Active), 140 | })); 141 | trace!( 142 | logger, "Initializing context"; 143 | "ptr" => format_args!("{:p}", &**context), 144 | ); 145 | context 146 | } 147 | #[cold] 148 | #[inline(never)] 149 | unsafe fn trigger_safepoint(&self) { 150 | /* 151 | * Begin a collection. 152 | * 153 | * Since we are the only collector we don't have 154 | * to worry about awaiting other threads stopping 155 | * at a safepoint. 156 | * This simplifies the implementation considerably. 157 | */ 158 | assert!(!self.collector.as_raw().manager().collecting.get()); 159 | self.collector.as_raw().manager().collecting.set(true); 160 | let collection_id = self 161 | .collector 162 | .as_raw() 163 | .manager() 164 | .state 165 | .borrow_mut() 166 | .next_pending_id(); 167 | trace!( 168 | self.logger, 169 | "Creating collector"; 170 | "id" => collection_id, 171 | "ctx_ptr" => format_args!("{:?}", self) 172 | ); 173 | let shadow_stack = &*self.shadow_stack.get(); 174 | let ptr = self as *const RawContext as *mut RawContext; 175 | // Change our state to mark we are now waiting at a safepoint 176 | assert_eq!( 177 | self.state 178 | .replace(ContextState::SafePoint { collection_id }), 179 | ContextState::Active 180 | ); 181 | trace!( 182 | self.logger, "Beginning collection"; 183 | "ptr" => ?ptr, 184 | "shadow_stack" => FnValue(|_| alloc::format!("{:?}", shadow_stack.as_vec())), 185 | "state" => ?self.state, 186 | "collection_id" => collection_id, 187 | "original_size" => %self.collector.as_raw().allocated_size(), 188 | ); 189 | self.collector.as_raw().perform_raw_collection(&[ptr]); 190 | assert_eq!( 191 | self.state.replace(ContextState::Active), 192 | ContextState::SafePoint { collection_id } 193 | ); 194 | assert!(self.collector.as_raw().manager().collecting.replace(false)); 195 | } 196 | 197 | #[inline] 198 | fn shadow_stack_ptr(&self) -> *mut ShadowStack { 199 | self.shadow_stack.get() 200 | } 201 | 202 | #[inline] 203 | unsafe fn collector_ref(&self) -> &CollectorRef { 204 | &self.collector 205 | } 206 | 207 | #[inline] 208 | fn state(&self) -> ContextState { 209 | self.state.get() 210 | } 211 | } 212 | 213 | // Pending collections 214 | 215 | /// Keeps track of a pending collection (if any) 216 | /// 217 | /// This must be held under a write lock for a collection to happen. 218 | /// This must be held under a read lock to prevent collections. 219 | pub struct CollectorState { 220 | next_pending_id: u64, 221 | } 222 | #[allow(clippy::new_without_default)] 223 | impl CollectorState { 224 | pub fn new() -> Self { 225 | CollectorState { next_pending_id: 0 } 226 | } 227 | fn next_pending_id(&mut self) -> u64 { 228 | let id = self.next_pending_id; 229 | self.next_pending_id = id.checked_add(1).expect("Overflow"); 230 | id 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/array.rs: -------------------------------------------------------------------------------- 1 | //! Defines the interface to garbage collected arrays. 2 | use core::cmp::Ordering; 3 | use core::fmt::{self, Debug, Display, Formatter}; 4 | use core::hash::{Hash, Hasher}; 5 | use core::marker::PhantomData; 6 | use core::ops::{Deref, Index}; 7 | use core::ptr::NonNull; 8 | use core::slice::SliceIndex; 9 | use core::str; 10 | 11 | use crate::{CollectorId, Gc, GcRebrand, GcSafe}; 12 | use zerogc_derive::{unsafe_gc_impl, Trace}; 13 | 14 | use self::repr::GcArrayPtr; 15 | 16 | pub mod repr; 17 | 18 | /// A garbage collected string. 19 | /// 20 | /// This is a transparent wrapper around `GcArray`, 21 | /// with the additional invariant that it's utf8 encoded. 22 | /// 23 | /// ## Safety 24 | /// The bytes can be assumed to be UTF8 encoded, 25 | /// just like with a `str`. 26 | /// 27 | /// Assuming the bytes are utf8 encoded, 28 | /// this can be transmuted back and forth from `GcArray` 29 | #[repr(transparent)] 30 | #[derive(Trace, Eq, PartialEq, Hash, Clone, Copy)] 31 | #[zerogc(copy, collector_ids(Id))] 32 | pub struct GcString<'gc, Id: CollectorId> { 33 | bytes: GcArray<'gc, u8, Id>, 34 | } 35 | impl<'gc, Id: CollectorId> GcString<'gc, Id> { 36 | /// Convert an array of UTF8 bytes into a string. 37 | /// 38 | /// Returns an error if the bytes aren't valid UTF8, 39 | /// just like [core::str::from_utf8]. 40 | #[inline] 41 | pub fn from_utf8(bytes: GcArray<'gc, u8, Id>) -> Result { 42 | core::str::from_utf8(bytes.as_slice())?; 43 | // SAFETY: Validated with from_utf8 call 44 | Ok(unsafe { Self::from_utf8_unchecked(bytes) }) 45 | } 46 | /// Convert an array of UTF8 bytes into a string, 47 | /// without checking for validity. 48 | /// 49 | /// ## Safety 50 | /// Undefined behavior if the bytes aren't valid 51 | /// UTF8, just like with [core::str::from_utf8_unchecked] 52 | #[inline] 53 | pub const unsafe fn from_utf8_unchecked(bytes: GcArray<'gc, u8, Id>) -> Self { 54 | GcString { bytes } 55 | } 56 | /// Retrieve this string as a raw array of bytes 57 | #[inline] 58 | pub const fn as_bytes(&self) -> GcArray<'gc, u8, Id> { 59 | self.bytes 60 | } 61 | /// Convert this string into a slice of bytes 62 | #[inline] 63 | pub fn as_str(&self) -> &'gc str { 64 | unsafe { str::from_utf8_unchecked(self.as_bytes().as_slice()) } 65 | } 66 | } 67 | impl<'gc, Id: CollectorId> Deref for GcString<'gc, Id> { 68 | type Target = str; 69 | #[inline] 70 | fn deref(&self) -> &'_ str { 71 | self.as_str() 72 | } 73 | } 74 | impl<'gc, Id: CollectorId> Debug for GcString<'gc, Id> { 75 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 76 | Debug::fmt(self.as_str(), f) 77 | } 78 | } 79 | impl<'gc, Id: CollectorId> Display for GcString<'gc, Id> { 80 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 81 | Display::fmt(self.as_str(), f) 82 | } 83 | } 84 | 85 | /// A garbage collected array. 86 | /// 87 | /// The length is immutable and cannot change 88 | /// once it has been allocated. 89 | /// 90 | /// ## Safety 91 | /// This is a 'repr(transparent)' wrapper arround 92 | /// [GcArrayRepr]. 93 | #[repr(transparent)] 94 | pub struct GcArray<'gc, T, Id: CollectorId> { 95 | ptr: Id::ArrayPtr, 96 | marker: PhantomData>, 97 | } 98 | impl<'gc, T, Id: CollectorId> GcArray<'gc, T, Id> { 99 | /// Convert this array into a slice 100 | #[inline] 101 | pub fn as_slice(&self) -> &'gc [T] { 102 | unsafe { core::slice::from_raw_parts(self.as_raw_ptr(), self.len()) } 103 | } 104 | /// Load a raw pointer to the array's value 105 | #[inline] 106 | pub fn as_raw_ptr(&self) -> *mut T { 107 | self.ptr.as_raw_ptr() as *mut T 108 | } 109 | /// Get the underlying 'Id::ArrayPtr' for this array 110 | /// 111 | /// ## Safety 112 | /// Must not interpret the underlying pointer as the 113 | /// incorrect type. 114 | #[inline] 115 | pub const unsafe fn as_internal_ptr_repr(&self) -> &'_ Id::ArrayPtr { 116 | &self.ptr 117 | } 118 | /// Load the length of the array 119 | #[inline] 120 | pub fn len(&self) -> usize { 121 | match self.ptr.len() { 122 | Some(len) => len, 123 | None => Id::resolve_array_len(self), 124 | } 125 | } 126 | /// Check if the array is empty 127 | #[inline] 128 | pub fn is_empty(&self) -> bool { 129 | self.len() == 0 130 | } 131 | /// Resolve the [CollectorId] 132 | #[inline] 133 | pub fn collector_id(&self) -> &'_ Id { 134 | Id::resolve_array_id(self) 135 | } 136 | /// Create an array from the specified raw pointer and length 137 | /// 138 | /// ## Safety 139 | /// Pointer and length must be valid, and point to a garbage collected 140 | /// value allocated from the corresponding [CollectorId] 141 | #[inline] 142 | pub unsafe fn from_raw_ptr(ptr: NonNull, len: usize) -> Self { 143 | GcArray { 144 | ptr: Id::ArrayPtr::from_raw_parts(ptr, len), 145 | marker: PhantomData, 146 | } 147 | } 148 | } 149 | /// If the underlying type is `Sync`, it's safe 150 | /// to share garbage collected references between threads. 151 | /// 152 | /// The safety of the collector itself depends on whether [CollectorId] is Sync. 153 | /// If it is, the whole garbage collection implementation should be as well. 154 | unsafe impl<'gc, T, Id> Sync for GcArray<'gc, T, Id> 155 | where 156 | T: Sync, 157 | Id: CollectorId + Sync, 158 | { 159 | } 160 | unsafe impl<'gc, T, Id> Send for GcArray<'gc, T, Id> 161 | where 162 | T: Sync, 163 | Id: CollectorId + Sync, 164 | { 165 | } 166 | impl<'gc, T, I, Id: CollectorId> Index for GcArray<'gc, T, Id> 167 | where 168 | I: SliceIndex<[T]>, 169 | { 170 | type Output = I::Output; 171 | #[inline] 172 | fn index(&self, idx: I) -> &I::Output { 173 | &self.as_slice()[idx] 174 | } 175 | } 176 | impl<'gc, T, Id: CollectorId> Deref for GcArray<'gc, T, Id> { 177 | type Target = [T]; 178 | 179 | #[inline] 180 | fn deref(&self) -> &Self::Target { 181 | self.as_slice() 182 | } 183 | } 184 | impl<'gc, T, Id: CollectorId> Copy for GcArray<'gc, T, Id> {} 185 | impl<'gc, T, Id: CollectorId> Clone for GcArray<'gc, T, Id> { 186 | #[inline] 187 | fn clone(&self) -> Self { 188 | *self 189 | } 190 | } 191 | impl<'gc, T: Debug, Id: CollectorId> Debug for GcArray<'gc, T, Id> { 192 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 193 | f.debug_list().entries(self.iter()).finish() 194 | } 195 | } 196 | impl<'gc, T: PartialEq, Id: CollectorId> PartialEq for GcArray<'gc, T, Id> { 197 | #[inline] 198 | fn eq(&self, other: &Self) -> bool { 199 | self.as_slice() == other.as_slice() 200 | } 201 | } 202 | impl<'gc, T: PartialEq, Id: CollectorId> PartialEq<[T]> for GcArray<'gc, T, Id> { 203 | #[inline] 204 | fn eq(&self, other: &[T]) -> bool { 205 | self.as_slice() == other 206 | } 207 | } 208 | impl<'gc, T: PartialOrd, Id: CollectorId> PartialOrd for GcArray<'gc, T, Id> { 209 | #[inline] 210 | fn partial_cmp(&self, other: &Self) -> Option { 211 | self.as_slice().partial_cmp(other.as_slice()) 212 | } 213 | } 214 | impl<'gc, T: PartialOrd, Id: CollectorId> PartialOrd<[T]> for GcArray<'gc, T, Id> { 215 | #[inline] 216 | fn partial_cmp(&self, other: &[T]) -> Option { 217 | self.as_slice().partial_cmp(other) 218 | } 219 | } 220 | impl<'gc, T: Ord, Id: CollectorId> Ord for GcArray<'gc, T, Id> { 221 | #[inline] 222 | fn cmp(&self, other: &Self) -> Ordering { 223 | self.as_slice().cmp(other) 224 | } 225 | } 226 | impl<'gc, T: Eq, Id: CollectorId> Eq for GcArray<'gc, T, Id> {} 227 | impl<'gc, T: Hash, Id: CollectorId> Hash for GcArray<'gc, T, Id> { 228 | #[inline] 229 | fn hash(&self, hasher: &mut H) { 230 | T::hash_slice(self.as_slice(), hasher) 231 | } 232 | } 233 | impl<'gc, T, Id: CollectorId> IntoIterator for GcArray<'gc, T, Id> 234 | where 235 | T: 'gc, 236 | { 237 | type Item = &'gc T; 238 | 239 | type IntoIter = core::slice::Iter<'gc, T>; 240 | 241 | #[inline] 242 | fn into_iter(self) -> Self::IntoIter { 243 | self.as_slice().iter() 244 | } 245 | } 246 | impl<'array, 'gc, T, Id: CollectorId> IntoIterator for &'array GcArray<'gc, T, Id> 247 | where 248 | T: 'array, 249 | { 250 | type Item = &'array T; 251 | 252 | type IntoIter = core::slice::Iter<'array, T>; 253 | 254 | #[inline] 255 | fn into_iter(self) -> Self::IntoIter { 256 | self.as_slice().iter() 257 | } 258 | } 259 | // Need to implement by hand, because [T] is not GcRebrand 260 | unsafe_gc_impl!( 261 | target => GcArray<'gc, T, Id>, 262 | params => ['gc, T: GcSafe<'gc, Id>, Id: CollectorId], 263 | bounds => { 264 | TraceImmutable => never, 265 | GcRebrand => { where T: GcRebrand<'new_gc, Id>, >::Branded: Sized + GcSafe<'new_gc, Id> }, 266 | }, 267 | null_trace => never, 268 | branded_type => GcArray<'new_gc, >::Branded, Id>, 269 | NEEDS_TRACE => true, 270 | NEEDS_DROP => false, 271 | trace_mut => |self, visitor| { 272 | unsafe { visitor.trace_array(self) } 273 | }, 274 | collector_id => Id, 275 | visit_inside_gc => |gc, visitor| { 276 | visitor.trace_gc(gc) 277 | } 278 | ); 279 | 280 | #[cfg(test)] 281 | mod test { 282 | use crate::epsilon::{self}; 283 | use crate::{CollectorId, GcArray}; 284 | #[test] 285 | fn test_covariance<'a>() { 286 | fn covariant<'a, T, Id: CollectorId>(s: GcArray<'static, T, Id>) -> GcArray<'a, T, Id> { 287 | s as _ 288 | } 289 | const SRC: &[u32] = &[1, 2, 5]; 290 | let s: epsilon::GcArray<'static, u32> = epsilon::gc_array(SRC); 291 | let k: epsilon::GcArray<'a, u32> = covariant(s); 292 | assert_eq!(k.as_slice(), SRC); 293 | } 294 | } 295 | -------------------------------------------------------------------------------- /libs/context/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | negative_impls, // !Send is much cleaner than `PhantomData` 3 | ptr_metadata 4 | )] 5 | #![allow( 6 | clippy::missing_safety_doc, // Entirely internal code 7 | )] 8 | #![cfg_attr(not(feature = "std"), no_std)] 9 | //! The implementation of (GcContext)[`::zerogc::GcContext`] that is 10 | //! shared among both thread-safe and thread-unsafe code. 11 | 12 | /* 13 | * NOTE: Allocation is still needed for internals 14 | * 15 | * Uses: 16 | * 1. `Box` for each handle 17 | * 2. `Vec` for listing buckets of handles 18 | * 3. `Arc` and `Box` for boxing context state 19 | * 20 | * TODO: Should we drop these uses entirely? 21 | */ 22 | extern crate alloc; 23 | 24 | use core::fmt::{self, Debug, Formatter}; 25 | use core::mem::ManuallyDrop; 26 | 27 | use alloc::boxed::Box; 28 | use alloc::vec::Vec; 29 | 30 | use zerogc::prelude::*; 31 | 32 | pub mod state; 33 | 34 | #[macro_use] 35 | pub mod utils; 36 | pub mod collector; 37 | pub mod handle; 38 | 39 | use crate::collector::RawCollectorImpl; 40 | 41 | pub use crate::collector::{CollectorId, CollectorRef, WeakCollectorRef}; 42 | pub use crate::state::{CollectionManager, RawContext}; 43 | 44 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 45 | pub enum ContextState { 46 | /// The context is active. 47 | /// 48 | /// Its contents are potentially being mutated, 49 | /// so the `shadow_stack` doesn't necessarily 50 | /// reflect the actual set of thread roots. 51 | /// 52 | /// New objects could be allocated that are not 53 | /// actually being tracked in the `shadow_stack`. 54 | Active, 55 | /// The context is waiting at a safepoint 56 | /// for a collection to complete. 57 | /// 58 | /// The mutating thread is blocked for the 59 | /// duration of the safepoint (until collection completes). 60 | /// 61 | /// Therefore, its `shadow_stack` is guarenteed to reflect 62 | /// the actual set of thread roots. 63 | SafePoint { 64 | /// The id of the collection we are waiting for 65 | collection_id: u64, 66 | }, 67 | /// The context is frozen. 68 | /// Allocation or mutation can't happen 69 | /// but the mutator thread isn't actually blocked. 70 | /// 71 | /// Unlike a safepoint, this is explicitly unfrozen at the 72 | /// user's discretion. 73 | /// 74 | /// Because no allocation or mutation can happen, 75 | /// its shadow_stack stack is guarenteed to 76 | /// accurately reflect the roots of the context. 77 | #[cfg_attr(not(feature = "sync"), allow(unused))] 78 | // TODO: Implement frozen for simple contexts? 79 | Frozen, 80 | } 81 | impl ContextState { 82 | #[cfg_attr(not(feature = "sync"), allow(unused))] // TODO: Implement frozen for simple contexts? 83 | fn is_frozen(&self) -> bool { 84 | matches!(*self, ContextState::Frozen) 85 | } 86 | } 87 | 88 | /* 89 | * These form a stack of contexts, 90 | * which all share owns a pointer to the RawContext, 91 | * The raw context is implicitly bound to a single thread 92 | * and manages the state of all the contexts. 93 | * 94 | * https://llvm.org/docs/GarbageCollection.html#the-shadow-stack-gc 95 | * Essentially these objects maintain a shadow stack 96 | * 97 | * The pointer to the RawContext must be Arc, since the 98 | * collector maintains a weak reference to it. 99 | * I use double indirection with a `Rc` because I want 100 | * `recurse_context` to avoid the cost of atomic operations. 101 | * 102 | * SimpleCollectorContexts mirror the application stack. 103 | * They can be stack allocated inside `recurse_context`. 104 | * All we would need to do is internally track ownership of the original 105 | * context. The sub-collector in `recurse_context` is very clearly 106 | * restricted to the lifetime of the closure 107 | * which is a subset of the parent's lifetime. 108 | * 109 | * We still couldn't be Send, since we use interior mutablity 110 | * inside of RawContext that is not thread-safe. 111 | */ 112 | // TODO: Rename to remove 'Simple' from name 113 | pub struct CollectorContext { 114 | raw: *mut C::RawContext, 115 | /// Whether we are the root context 116 | /// 117 | /// Only the root actually owns the `Arc` 118 | /// and is responsible for dropping it 119 | root: bool, 120 | } 121 | impl CollectorContext { 122 | pub(crate) unsafe fn register_root(collector: &CollectorRef) -> Self { 123 | CollectorContext { 124 | raw: Box::into_raw(ManuallyDrop::into_inner(C::RawContext::register_new( 125 | collector, 126 | ))), 127 | root: true, // We are responsible for unregistering 128 | } 129 | } 130 | #[inline] 131 | pub fn collector(&self) -> &C { 132 | unsafe { (*self.raw).collector() } 133 | } 134 | #[inline(always)] 135 | unsafe fn with_shadow_stack( 136 | &self, 137 | value: *mut &mut T, 138 | func: impl FnOnce() -> R, 139 | ) -> R { 140 | let old_link = (*(*self.raw).shadow_stack_ptr()).last; 141 | let new_link = ShadowStackLink { 142 | element: C::as_dyn_trace_pointer(value), 143 | prev: old_link, 144 | }; 145 | (*(*self.raw).shadow_stack_ptr()).last = &new_link; 146 | let result = func(); 147 | debug_assert_eq!((*(*self.raw).shadow_stack_ptr()).last, &new_link); 148 | (*(*self.raw).shadow_stack_ptr()).last = new_link.prev; 149 | result 150 | } 151 | #[cold] 152 | unsafe fn trigger_basic_safepoint(&self, element: &mut &mut T) { 153 | self.with_shadow_stack(element, || { 154 | (*self.raw).trigger_safepoint(); 155 | }) 156 | } 157 | } 158 | impl Drop for CollectorContext { 159 | #[inline] 160 | fn drop(&mut self) { 161 | if self.root { 162 | unsafe { 163 | C::Manager::free_context(self.collector(), self.raw); 164 | } 165 | } 166 | } 167 | } 168 | unsafe impl GcContext for CollectorContext { 169 | type System = CollectorRef; 170 | type Id = CollectorId; 171 | 172 | #[inline] 173 | unsafe fn unchecked_safepoint(&self, value: &mut &mut T) { 174 | debug_assert_eq!((*self.raw).state(), ContextState::Active); 175 | if (*self.raw).collector().should_collect() { 176 | self.trigger_basic_safepoint(value); 177 | } 178 | debug_assert_eq!((*self.raw).state(), ContextState::Active); 179 | } 180 | 181 | unsafe fn freeze(&mut self) { 182 | (*self.raw).collector().manager().freeze_context(&*self.raw); 183 | } 184 | 185 | unsafe fn unfreeze(&mut self) { 186 | (*self.raw) 187 | .collector() 188 | .manager() 189 | .unfreeze_context(&*self.raw); 190 | } 191 | 192 | #[inline] 193 | unsafe fn recurse_context(&self, value: &mut &mut T, func: F) -> R 194 | where 195 | T: Trace, 196 | F: for<'gc> FnOnce(&'gc mut Self, &'gc mut T) -> R, 197 | { 198 | debug_assert_eq!((*self.raw).state(), ContextState::Active); 199 | self.with_shadow_stack(value, || { 200 | let mut sub_context = ManuallyDrop::new(CollectorContext { 201 | /* 202 | * safe to copy because we wont drop it 203 | * Lifetime is guarenteed to be restricted to 204 | * the closure. 205 | */ 206 | raw: self.raw, 207 | root: false, /* don't drop our pointer!!! */ 208 | }); 209 | let result = func(&mut *sub_context, value); 210 | debug_assert!(!sub_context.root); 211 | // No need to run drop code on context..... 212 | core::mem::forget(sub_context); 213 | debug_assert_eq!((*self.raw).state(), ContextState::Active); 214 | result 215 | }) 216 | } 217 | 218 | #[inline] 219 | fn system(&self) -> &'_ Self::System { 220 | unsafe { (&*self.raw).collector_ref() } 221 | } 222 | 223 | #[inline] 224 | fn id(&self) -> Self::Id { 225 | unsafe { (&*self.raw).collector() }.id() 226 | } 227 | } 228 | 229 | /// It's not safe for a context to be sent across threads. 230 | /// 231 | /// We use (thread-unsafe) interior mutability to maintain the 232 | /// shadow stack. Since we could potentially be cloned via `safepoint_recurse!`, 233 | /// implementing `Send` would allow another thread to obtain a 234 | /// reference to our internal `&RefCell`. Further mutation/access 235 | /// would be undefined..... 236 | impl !Send for CollectorContext {} 237 | 238 | // 239 | // Root tracking 240 | // 241 | 242 | #[repr(C)] 243 | #[derive(Debug)] 244 | pub(crate) struct ShadowStackLink { 245 | pub element: T, 246 | /// The previous link in the chain, 247 | /// or NULL if there isn't any 248 | pub prev: *const ShadowStackLink, 249 | } 250 | 251 | impl Debug for ShadowStack { 252 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 253 | f.debug_struct("ShadowStack") 254 | .field("last", &format_args!("{:p}", self.last)) 255 | .finish() 256 | } 257 | } 258 | #[derive(Clone)] 259 | pub struct ShadowStack { 260 | /// The last element in the shadow stack, 261 | /// or NULL if it's empty 262 | pub(crate) last: *const ShadowStackLink, 263 | } 264 | impl ShadowStack { 265 | unsafe fn as_vec(&self) -> Vec { 266 | let mut result: Vec<_> = self.reverse_iter().collect(); 267 | result.reverse(); 268 | result 269 | } 270 | #[inline] 271 | pub unsafe fn reverse_iter(&self) -> impl Iterator + '_ { 272 | core::iter::successors(self.last.as_ref(), |link| link.prev.as_ref()) 273 | .map(|link| link.element) 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /src/serde/hack.rs: -------------------------------------------------------------------------------- 1 | //! A horrible hack to pass `GcContext` back and forth to serde using thread locals. 2 | use std::any::TypeId; 3 | use std::cell::{Cell, RefCell, UnsafeCell}; 4 | use std::collections::hash_map::Entry; 5 | use std::collections::HashMap; 6 | use std::ffi::c_void; 7 | use std::marker::PhantomData; 8 | use std::ptr::NonNull; 9 | 10 | use crate::prelude::*; 11 | use crate::serde::GcDeserialize; 12 | use serde::{Deserialize, Deserializer}; 13 | 14 | struct ContextHackState { 15 | current_ctx: UnsafeCell>, 16 | /// The number of active references to the context. 17 | /// 18 | /// If this is zero, then `state` should be `None`, 19 | /// otherwise it should be `Some` 20 | active_refs: Cell, 21 | } 22 | impl ContextHackState { 23 | const fn uninit() -> ContextHackState { 24 | ContextHackState { 25 | current_ctx: UnsafeCell::new(None), 26 | active_refs: Cell::new(0), 27 | } 28 | } 29 | #[inline] 30 | unsafe fn get_unchecked(&self) -> Option<&ContextHack> { 31 | if self.active_refs.get() == 0 { 32 | None 33 | } else { 34 | Some((&*self.current_ctx.get()).as_ref().unwrap_unchecked()) 35 | } 36 | } 37 | unsafe fn lock_unchecked<'gc, C: GcContext>(&self) -> ContextHackGuard<'gc, C> { 38 | self.active_refs.set(self.active_refs.get() + 1); 39 | debug_assert_eq!( 40 | TypeId::of::(), 41 | self.get_unchecked().unwrap().collector_type_id 42 | ); 43 | ContextHackGuard { 44 | state: NonNull::from(self), 45 | marker: PhantomData, 46 | } 47 | } 48 | #[inline] 49 | unsafe fn release_lock(&self) -> bool { 50 | debug_assert!(self.active_refs.get() > 0); 51 | match self.active_refs.get() { 52 | 1 => { 53 | self::unregister_context(self); 54 | true 55 | } 56 | 0 => std::hint::unreachable_unchecked(), 57 | _ => { 58 | self.active_refs.set(self.active_refs.get() - 1); 59 | false 60 | } 61 | } 62 | } 63 | } 64 | /// A hack to store a dynamically typed [GcContext] in a thread-local. 65 | struct ContextHack { 66 | collector_type_id: TypeId, 67 | ptr: NonNull, 68 | } 69 | impl ContextHack { 70 | /// Cast this context into the specified type. 71 | /// 72 | /// Returns `None` if the id doesn't match 73 | #[inline] 74 | pub fn cast_as(&self) -> Option<&'_ Ctx> { 75 | if TypeId::of::() == self.collector_type_id { 76 | Some(unsafe { &*(self.ptr.as_ptr() as *mut Ctx) }) 77 | } else { 78 | None 79 | } 80 | } 81 | } 82 | thread_local! { 83 | static PRIMARY_DE_CONTEXT: ContextHackState = ContextHackState::uninit(); 84 | static OTHER_DE_CONTEXT: RefCell>> = RefCell::new(HashMap::new()); 85 | } 86 | 87 | pub struct ContextHackGuard<'gc, C: GcContext> { 88 | state: NonNull, 89 | marker: PhantomData<&'gc C>, 90 | } 91 | impl<'gc, C: GcContext> ContextHackGuard<'gc, C> { 92 | /// Get the guard's underlying context. 93 | /// 94 | /// ## Safety 95 | /// Undefined behavior if this guard is dropped 96 | /// and then subsequently mutated. 97 | /// 98 | /// Undefined behavior if a safepoint occurs (although the immutable reference should prevent that) 99 | #[inline] 100 | pub unsafe fn get_unchecked(&self) -> &'gc C { 101 | &*self 102 | .state 103 | .as_ref() 104 | .get_unchecked() 105 | .unwrap_unchecked() 106 | .ptr 107 | .cast::() 108 | .as_ptr() 109 | } 110 | } 111 | impl<'gc, C: GcContext> Drop for ContextHackGuard<'gc, C> { 112 | #[inline] 113 | fn drop(&mut self) { 114 | unsafe { 115 | self.state.as_ref().release_lock(); 116 | } 117 | } 118 | } 119 | /// Temporarily places the specified gc context 120 | /// in a thread-local, to allow deserialization with serde. 121 | /// 122 | /// Panics if another context of the same type is 123 | /// already in the process of being deserialized. 124 | /// 125 | /// ## Safety 126 | /// Undefined behavior if the context is mutated or ever used with the wrong 127 | /// lifetime (although this is technically part of the [current_ctx] contract) 128 | #[track_caller] 129 | pub unsafe fn set_context(ctx: &C) -> ContextHackGuard<'_, C> { 130 | let guard = PRIMARY_DE_CONTEXT.with(|state| match state.get_unchecked() { 131 | Some(other) => { 132 | if let Some(other) = other.cast_as::() { 133 | assert_eq!(other.id(), ctx.id(), "Multiple collectors of the same type"); 134 | Some(state.lock_unchecked()) 135 | } else { 136 | None 137 | } 138 | } 139 | None => { 140 | state.current_ctx.get().write(Some(ContextHack { 141 | collector_type_id: TypeId::of::(), 142 | ptr: NonNull::from(ctx).cast(), 143 | })); 144 | Some(state.lock_unchecked()) 145 | } 146 | }); 147 | guard.unwrap_or_else(|| _fallback_set_context(ctx)) 148 | } 149 | #[cold] 150 | unsafe fn _fallback_set_context(ctx: &C) -> ContextHackGuard<'_, C> { 151 | OTHER_DE_CONTEXT.with(|map| { 152 | let mut map = map.borrow_mut(); 153 | match map.entry(TypeId::of::()) { 154 | Entry::Occupied(occupied) => { 155 | let other = occupied.get(); 156 | assert_eq!( 157 | other.get_unchecked().unwrap().cast_as::().unwrap().id(), 158 | ctx.id(), 159 | "Multiple collectors of the same type" 160 | ); 161 | other.lock_unchecked() 162 | } 163 | Entry::Vacant(entry) => { 164 | let res = entry.insert(Box::new(ContextHackState { 165 | active_refs: Cell::new(0), 166 | current_ctx: UnsafeCell::new(Some(ContextHack { 167 | collector_type_id: TypeId::of::(), 168 | ptr: NonNull::from(ctx).cast(), 169 | })), 170 | })); 171 | res.lock_unchecked() 172 | } 173 | } 174 | }) 175 | } 176 | #[cold] 177 | unsafe fn unregister_context(state: &ContextHackState) { 178 | let expected_ctx = state.get_unchecked().unwrap_unchecked(); 179 | assert_eq!(state.active_refs.get(), 1); 180 | let needs_fallback_free = PRIMARY_DE_CONTEXT.with(|ctx| { 181 | if let Some(actual) = ctx.get_unchecked() { 182 | if actual.collector_type_id == expected_ctx.collector_type_id { 183 | debug_assert_eq!(actual.ptr.as_ptr(), expected_ctx as *const _ as *mut _); 184 | ctx.active_refs.set(0); 185 | ctx.current_ctx.get().write(None); 186 | return false; // don't search the fallback HashMap. We're freed the old fashioned way 187 | } 188 | } 189 | true // need to fallback to search HashMap 190 | }); 191 | if needs_fallback_free { 192 | OTHER_DE_CONTEXT.with(|map| { 193 | let mut map = map.borrow_mut(); 194 | let actual_state = map 195 | .remove(&expected_ctx.collector_type_id) 196 | .unwrap_or_else(|| unreachable!("Can't find collector in serde::hack state")); 197 | debug_assert_eq!( 198 | expected_ctx.collector_type_id, 199 | actual_state.get_unchecked().unwrap().collector_type_id 200 | ); 201 | debug_assert_eq!( 202 | actual_state.get_unchecked().unwrap() as *const _, 203 | expected_ctx as *const _ 204 | ); 205 | actual_state.as_ref().active_refs.set(0); 206 | drop(actual_state); 207 | }) 208 | } 209 | } 210 | 211 | /// Get the current context for deserialization 212 | /// 213 | /// ## Safety 214 | /// The inferred lifetime must be correct. 215 | #[track_caller] 216 | pub unsafe fn current_ctx<'gc, Id: CollectorId>() -> ContextHackGuard<'gc, Id::Context> { 217 | PRIMARY_DE_CONTEXT 218 | .with(|state| match state.get_unchecked() { 219 | Some(hack) if hack.collector_type_id == TypeId::of::() => { 220 | Some(state.lock_unchecked()) 221 | } 222 | _ => None, 223 | }) 224 | .unwrap_or_else(|| _fallback_current_ctx::<'gc, Id>()) 225 | } 226 | #[cold] 227 | #[track_caller] 228 | unsafe fn _fallback_current_ctx<'gc, Id: CollectorId>() -> ContextHackGuard<'gc, Id::Context> { 229 | OTHER_DE_CONTEXT.with(|map| { 230 | let map = map.borrow(); 231 | let state = map.get(&TypeId::of::()).unwrap_or_else(|| { 232 | unreachable!( 233 | "Can't find collector for {} in serde::hack state", 234 | std::any::type_name::() 235 | ) 236 | }); 237 | state.lock_unchecked() 238 | }) 239 | } 240 | 241 | /// Wrapper function to deserialize the specified value via the "hack", 242 | /// getting the current context via [current_ctx] 243 | /// 244 | /// ## Safety 245 | /// The contract of [current_de_ctx] must be upheld. 246 | /// In other words, the current context must've been set by [set_context] and have the appropriate lifetime 247 | #[track_caller] 248 | pub unsafe fn unchecked_deserialize_hack< 249 | 'gc, 250 | 'de, 251 | D: Deserializer<'de>, 252 | Id: CollectorId, 253 | T: GcDeserialize<'gc, 'de, Id>, 254 | >( 255 | deserializer: D, 256 | ) -> Result { 257 | let guard = current_ctx::<'gc, Id>(); 258 | T::deserialize_gc(guard.get_unchecked(), deserializer) 259 | } 260 | 261 | #[repr(transparent)] 262 | #[derive(Eq, Hash, Debug, PartialEq, Clone)] 263 | pub struct DeserializeHackWrapper(T, PhantomData); 264 | 265 | impl<'gc, 'de, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> Deserialize<'de> 266 | for DeserializeHackWrapper 267 | { 268 | fn deserialize(deserializer: D) -> Result 269 | where 270 | D: Deserializer<'de>, 271 | { 272 | let guard = unsafe { current_ctx::<'gc, Id>() }; 273 | Ok(DeserializeHackWrapper( 274 | T::deserialize_gc(unsafe { guard.get_unchecked() }, deserializer)?, 275 | PhantomData, 276 | )) 277 | } 278 | } 279 | 280 | /// Transmute between two types whose sizes may not be equal at compile time 281 | /// 282 | /// ## Safety 283 | /// All the usual cavets of [std::mem::transmute] apply. 284 | /// 285 | /// However, the sizes aren't verified 286 | /// to be the same size at compile time. 287 | /// 288 | /// It is undefined behavior to invoke this function with types whose sizes 289 | /// don't match at runtime. 290 | /// However, in the current implementation, this will cause a panic. 291 | #[inline] 292 | pub unsafe fn transmute_mismatched(src: T) -> U { 293 | assert_eq!( 294 | std::mem::size_of::(), 295 | std::mem::size_of::(), 296 | "UB: Mismatched sizes for {} and {}", 297 | std::any::type_name::(), 298 | std::any::type_name::() 299 | ); 300 | let src = std::mem::ManuallyDrop::new(src); 301 | std::mem::transmute_copy::(&*src) 302 | } 303 | -------------------------------------------------------------------------------- /src/epsilon/layout.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::Layout; 2 | use std::cell::Cell; 3 | use std::ffi::c_void; 4 | use std::marker::PhantomData; 5 | use std::ptr::NonNull; 6 | 7 | use crate::vec::raw::{GcRawVec, IGcVec}; 8 | use crate::{GcArray, GcRebrand, GcSafe, GcSimpleAlloc}; 9 | 10 | use super::{EpsilonCollectorId, EpsilonContext}; 11 | 12 | /// The header of an object in the epsilon collector. 13 | /// 14 | /// Not all objects need headers. 15 | /// If they are `Copy` and statically sized they can be elided. 16 | /// They are also unnecessary for statically allocated objects. 17 | pub struct EpsilonHeader { 18 | /// This object's `TypeInfo`, or `None` if it doesn't need any. 19 | pub type_info: &'static TypeInfo, 20 | /// The next allocated object, or `None` if this is the final object. 21 | pub next: Option>, 22 | } 23 | /* 24 | * We are Send + Sync because once we are allocated 25 | * `next` and `type_info` cannot change 26 | */ 27 | unsafe impl Send for EpsilonHeader {} 28 | unsafe impl Sync for EpsilonHeader {} 29 | impl EpsilonHeader { 30 | pub const LAYOUT: Layout = Layout::new::(); 31 | /// Assume the specified object has a header, 32 | /// and retrieve it if so. 33 | /// 34 | /// ## Safety 35 | /// Undefined behavior if the object doesn't have a header. 36 | /// Undefined behavior if the object isn't allocated in the epsilon collector. 37 | #[inline] 38 | pub unsafe fn assume_header(header: *const T) -> *const EpsilonHeader { 39 | let (_, offset) = Self::LAYOUT 40 | .extend(Layout::for_value(&*header)) 41 | .unwrap_unchecked(); 42 | (header as *const c_void).sub(offset).cast() 43 | } 44 | #[inline] 45 | #[track_caller] 46 | pub unsafe fn determine_layout(&self) -> Layout { 47 | let tp = self.type_info; 48 | match tp.layout { 49 | LayoutInfo::Fixed(fixed) => fixed, 50 | LayoutInfo::Array { element_layout } | LayoutInfo::Vec { element_layout } => { 51 | let array_header = EpsilonArrayHeader::from_common_header(self); 52 | let len = (*array_header).len; 53 | element_layout.repeat(len).unwrap_unchecked().0 54 | } 55 | } 56 | } 57 | } 58 | #[repr(C)] 59 | pub struct EpsilonArrayHeader { 60 | pub len: usize, 61 | pub common_header: EpsilonHeader, 62 | } 63 | impl EpsilonArrayHeader { 64 | const COMMON_OFFSET: usize = std::mem::size_of::() - std::mem::size_of::(); 65 | #[inline] 66 | pub unsafe fn from_common_header(header: *const EpsilonHeader) -> *const Self { 67 | (header as *const c_void).sub(Self::COMMON_OFFSET).cast() 68 | } 69 | } 70 | #[repr(C)] 71 | pub struct EpsilonVecHeader { 72 | pub capacity: usize, 73 | // NOTE: Suffix must be transmutable to `EpsilonArrayHeader` 74 | pub len: Cell, 75 | pub common_header: EpsilonHeader, 76 | } 77 | impl EpsilonVecHeader { 78 | const COMMON_OFFSET: usize = std::mem::size_of::() - std::mem::size_of::(); 79 | } 80 | pub enum LayoutInfo { 81 | Fixed(Layout), 82 | /// A variable sized array 83 | Array { 84 | element_layout: Layout, 85 | }, 86 | /// A variable sized vector 87 | Vec { 88 | element_layout: Layout, 89 | }, 90 | } 91 | impl LayoutInfo { 92 | #[inline] 93 | pub const fn align(&self) -> usize { 94 | match *self { 95 | LayoutInfo::Fixed(layout) 96 | | LayoutInfo::Array { 97 | element_layout: layout, 98 | } 99 | | LayoutInfo::Vec { 100 | element_layout: layout, 101 | } => layout.align(), 102 | } 103 | } 104 | #[inline] 105 | pub fn common_header_offset(&self) -> usize { 106 | match *self { 107 | LayoutInfo::Fixed(_) => 0, 108 | LayoutInfo::Array { .. } => EpsilonArrayHeader::COMMON_OFFSET, 109 | LayoutInfo::Vec { .. } => EpsilonVecHeader::COMMON_OFFSET, 110 | } 111 | } 112 | } 113 | pub struct TypeInfo { 114 | /// The function to drop this object, or `None` if the object doesn't need to be dropped 115 | pub drop_func: Option, 116 | pub layout: LayoutInfo, 117 | } 118 | impl TypeInfo { 119 | #[inline] 120 | pub const fn may_ignore(&self) -> bool { 121 | // NOTE: We don't care about `size` 122 | self.drop_func.is_none() && self.layout.align() <= std::mem::align_of::() 123 | } 124 | #[inline] 125 | pub const fn of() -> &'static TypeInfo { 126 | ::TYPE_INFO 127 | } 128 | #[inline] 129 | pub const fn of_array() -> &'static TypeInfo { 130 | <[T] as StaticTypeInfo>::TYPE_INFO 131 | } 132 | #[inline] 133 | pub const fn of_vec() -> &'static TypeInfo { 134 | // For now, vectors and arrays share type info 135 | ::VEC_INFO.as_ref().unwrap() 136 | } 137 | } 138 | trait StaticTypeInfo { 139 | const TYPE_INFO: &'static TypeInfo; 140 | const VEC_INFO: &'static Option; 141 | } 142 | impl StaticTypeInfo for T { 143 | const TYPE_INFO: &'static TypeInfo = &TypeInfo { 144 | drop_func: if std::mem::needs_drop::() { 145 | Some(unsafe { 146 | std::mem::transmute::( 147 | std::ptr::drop_in_place::, 148 | ) 149 | }) 150 | } else { 151 | None 152 | }, 153 | layout: LayoutInfo::Fixed(Layout::new::()), 154 | }; 155 | const VEC_INFO: &'static Option = &Some(TypeInfo { 156 | drop_func: if std::mem::needs_drop::() { 157 | Some(drop_array::) 158 | } else { 159 | None 160 | }, 161 | layout: LayoutInfo::Vec { 162 | element_layout: Layout::new::(), 163 | }, 164 | }); 165 | } 166 | impl StaticTypeInfo for [T] { 167 | const TYPE_INFO: &'static TypeInfo = &TypeInfo { 168 | drop_func: if std::mem::needs_drop::() { 169 | Some(drop_array::) 170 | } else { 171 | None 172 | }, 173 | layout: LayoutInfo::Array { 174 | element_layout: Layout::new::(), 175 | }, 176 | }; 177 | const VEC_INFO: &'static Option = &None; 178 | } 179 | /// Drop an array or vector of the specified type 180 | unsafe fn drop_array(ptr: *mut c_void) { 181 | let header = EpsilonArrayHeader::from_common_header(EpsilonHeader::assume_header( 182 | ptr as *const _ as *const T, 183 | )); 184 | let len = (*header).len; 185 | std::ptr::drop_in_place(std::ptr::slice_from_raw_parts_mut(ptr as *mut T, len)); 186 | } 187 | 188 | /// The raw representation of a vector in the "epsilon" collector 189 | /* 190 | * Implementation note: Length and capacity are stored implicitly in the [`EpsilonVecHeader`] 191 | */ 192 | pub struct EpsilonRawVec<'gc, T> { 193 | header: NonNull, 194 | context: &'gc EpsilonContext, 195 | marker: PhantomData>, 196 | } 197 | impl<'gc, T> Copy for EpsilonRawVec<'gc, T> {} 198 | impl<'gc, T> Clone for EpsilonRawVec<'gc, T> { 199 | #[inline] 200 | fn clone(&self) -> Self { 201 | *self 202 | } 203 | } 204 | impl<'gc, T> EpsilonRawVec<'gc, T> { 205 | #[inline] 206 | pub(super) unsafe fn from_raw_parts( 207 | header: NonNull, 208 | context: &'gc EpsilonContext, 209 | ) -> Self { 210 | EpsilonRawVec { 211 | header, 212 | context, 213 | marker: PhantomData, 214 | } 215 | } 216 | #[inline] 217 | fn header(&self) -> *const EpsilonVecHeader { 218 | self.header.as_ptr() as *const EpsilonVecHeader 219 | } 220 | } 221 | zerogc_derive::unsafe_gc_impl!( 222 | target => EpsilonRawVec<'gc, T>, 223 | params => ['gc, T: GcSafe<'gc, EpsilonCollectorId>], 224 | bounds => { 225 | TraceImmutable => never, 226 | GcRebrand => { where T: GcRebrand<'new_gc, EpsilonCollectorId>, T::Branded: Sized } 227 | }, 228 | branded_type => EpsilonRawVec<'new_gc, T::Branded>, 229 | collector_id => EpsilonCollectorId, 230 | NEEDS_TRACE => true, // meh 231 | NEEDS_DROP => T::NEEDS_DROP, 232 | null_trace => never, 233 | trace_mut => |self, visitor| { 234 | unsafe { visitor.trace_vec(self) } 235 | }, 236 | ); 237 | #[inherent::inherent] 238 | unsafe impl<'gc, T: GcSafe<'gc, EpsilonCollectorId>> GcRawVec<'gc, T> for EpsilonRawVec<'gc, T> { 239 | #[inline] 240 | #[allow(dead_code)] 241 | unsafe fn steal_as_array_unchecked(mut self) -> GcArray<'gc, T, EpsilonCollectorId> { 242 | // Set capacity to zero, so no one else gets any funny ideas! 243 | self.header.as_mut().capacity = 0; 244 | GcArray::from_raw_ptr(NonNull::new_unchecked(self.as_mut_ptr()), self.len()) 245 | } 246 | pub fn iter(&self) -> zerogc::vec::raw::RawVecIter<'gc, T, Self> 247 | where 248 | T: Copy; 249 | } 250 | #[inherent::inherent] 251 | unsafe impl<'gc, T: GcSafe<'gc, EpsilonCollectorId>> IGcVec<'gc, T> for EpsilonRawVec<'gc, T> { 252 | type Id = EpsilonCollectorId; 253 | 254 | #[inline] 255 | pub fn with_capacity_in(capacity: usize, ctx: &'gc EpsilonContext) -> Self { 256 | ctx.alloc_raw_vec_with_capacity::(capacity) 257 | } 258 | 259 | #[inline] 260 | pub fn len(&self) -> usize { 261 | unsafe { (*self.header()).len.get() } 262 | } 263 | 264 | #[inline] 265 | pub unsafe fn set_len(&mut self, len: usize) { 266 | (*self.header()).len.set(len) 267 | } 268 | 269 | #[inline] 270 | pub fn capacity(&self) -> usize { 271 | unsafe { (*self.header()).capacity } 272 | } 273 | 274 | #[inline] 275 | pub fn reserve_in_place( 276 | &mut self, 277 | _additional: usize, 278 | ) -> Result<(), crate::vec::raw::ReallocFailedError> { 279 | Err(crate::vec::raw::ReallocFailedError::Unsupported) 280 | } 281 | 282 | #[inline] 283 | pub unsafe fn as_ptr(&self) -> *const T { 284 | const LAYOUT: Layout = Layout::new::(); 285 | let offset = LAYOUT.size() + LAYOUT.padding_needed_for(core::mem::align_of::()); 286 | (self.header() as *const u8).add(offset) as *const T 287 | } 288 | 289 | #[inline] 290 | pub fn context(&self) -> &'gc EpsilonContext { 291 | self.context 292 | } 293 | 294 | // Default methods: 295 | pub unsafe fn as_mut_ptr(&mut self) -> *mut T; 296 | pub fn replace(&mut self, index: usize, val: T) -> T; 297 | pub fn set(&mut self, index: usize, val: T); 298 | pub fn extend_from_slice(&mut self, src: &[T]) 299 | where 300 | T: Copy; 301 | pub fn push(&mut self, val: T); 302 | pub fn pop(&mut self) -> Option; 303 | pub fn swap_remove(&mut self, index: usize) -> T; 304 | pub fn reserve(&mut self, additional: usize); 305 | pub fn is_empty(&self) -> bool; 306 | pub fn new_in(ctx: &'gc EpsilonContext) -> Self; 307 | pub fn copy_from_slice(src: &[T], ctx: &'gc EpsilonContext) -> Self 308 | where 309 | T: Copy; 310 | pub fn from_vec(src: Vec, ctx: &'gc EpsilonContext) -> Self; 311 | pub fn get(&mut self, index: usize) -> Option 312 | where 313 | T: Copy; 314 | pub unsafe fn as_slice_unchecked(&self) -> &[T]; 315 | } 316 | impl<'gc, T: GcSafe<'gc, EpsilonCollectorId>> Extend for EpsilonRawVec<'gc, T> { 317 | #[inline] 318 | fn extend>(&mut self, iter: E) { 319 | let iter = iter.into_iter(); 320 | self.reserve(iter.size_hint().1.unwrap_or(0)); 321 | for val in iter { 322 | self.push(val); 323 | } 324 | } 325 | } 326 | -------------------------------------------------------------------------------- /libs/derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | proc_macro_tracked_env, // Used for `DEBUG_DERIVE` 3 | proc_macro_span, // Used for source file ids 4 | proc_macro_diagnostic, // Used for warnings 5 | )] 6 | extern crate proc_macro; 7 | 8 | use crate::derive::TraceDeriveKind; 9 | use darling::{FromDeriveInput, FromMeta}; 10 | use proc_macro2::{Span, TokenStream}; 11 | use quote::{quote, ToTokens}; 12 | use std::fmt::Display; 13 | use std::io::Write; 14 | use syn::parse::Parse; 15 | use syn::punctuated::Punctuated; 16 | use syn::{ 17 | parse_macro_input, parse_quote, DeriveInput, GenericParam, Generics, Token, Type, WhereClause, 18 | }; 19 | 20 | mod derive; 21 | mod macros; 22 | 23 | /// Magic const that expands to either `::zerogc` or `crate::` 24 | /// depending on whether we are currently bootstrapping (compiling `zerogc` itself) 25 | /// 26 | /// This is equivalent to `$crate` for regular macros 27 | pub(crate) fn zerogc_crate() -> TokenStream { 28 | /* 29 | * TODO: A way to detect $crate 30 | * Checking environment variables 31 | * doesn't work well because doctests 32 | * and integration tests 33 | * will falsely believe they should access 34 | * `crate` instead of `zerogc` 35 | * 36 | * Instead, we re-export `extern crate self as zerogc` 37 | * at the start of the main crate 38 | */ 39 | quote!(::zerogc) 40 | } 41 | 42 | /// Sort the parameters so that lifetime parameters come before 43 | /// type parameters, and type parameters come before const paramaters 44 | pub(crate) fn sort_params(generics: &mut Generics) { 45 | #[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Debug)] 46 | enum ParamOrder { 47 | Lifetime, 48 | Type, 49 | Const, 50 | } 51 | let mut pairs = std::mem::take(&mut generics.params) 52 | .into_pairs() 53 | .collect::>(); 54 | use syn::punctuated::Pair; 55 | pairs.sort_by_key(|pair| match pair.value() { 56 | syn::GenericParam::Lifetime(_) => ParamOrder::Lifetime, 57 | syn::GenericParam::Type(_) => ParamOrder::Type, 58 | syn::GenericParam::Const(_) => ParamOrder::Const, 59 | }); 60 | /* 61 | * NOTE: The `Pair::End` can only come at the end. 62 | * Now that we've sorted, it's possible the old ending 63 | * could be in the first position or some other position 64 | * before the end. 65 | * 66 | * If that's the case, then add punctuation to the end. 67 | */ 68 | if let Some(old_ending_index) = pairs.iter().position(|p| p.punct().is_none()) { 69 | if old_ending_index != pairs.len() - 1 { 70 | let value = pairs.remove(old_ending_index).into_value(); 71 | pairs.insert( 72 | old_ending_index, 73 | Pair::Punctuated(value, Default::default()), 74 | ); 75 | } 76 | } 77 | generics.params = pairs.into_iter().collect(); 78 | } 79 | 80 | pub(crate) fn emit_warning(msg: impl ToString, span: Span) { 81 | let mut d = proc_macro::Diagnostic::new(proc_macro::Level::Warning, msg.to_string()); 82 | d.set_spans(span.unwrap()); 83 | d.emit(); 84 | } 85 | 86 | pub(crate) fn move_bounds_to_where_clause(mut generics: Generics) -> Generics { 87 | let where_clause = generics.where_clause.get_or_insert_with(|| WhereClause { 88 | where_token: Default::default(), 89 | predicates: Default::default(), 90 | }); 91 | for param in &mut generics.params { 92 | match *param { 93 | GenericParam::Lifetime(ref mut lt) => { 94 | let target = <.lifetime; 95 | let bounds = <.bounds; 96 | if !bounds.is_empty() { 97 | where_clause.predicates.push(parse_quote!(#target: #bounds)); 98 | } 99 | lt.colon_token = None; 100 | lt.bounds.clear(); 101 | } 102 | GenericParam::Type(ref mut tp) => { 103 | let bounds = &tp.bounds; 104 | let target = &tp.ident; 105 | if !bounds.is_empty() { 106 | where_clause.predicates.push(parse_quote!(#target: #bounds)); 107 | } 108 | tp.eq_token = None; 109 | tp.colon_token = None; 110 | tp.default = None; 111 | tp.bounds.clear(); 112 | } 113 | GenericParam::Const(ref mut c) => { 114 | c.eq_token = None; 115 | c.default = None; 116 | } 117 | } 118 | } 119 | if generics 120 | .where_clause 121 | .as_ref() 122 | .map_or(false, |clause| clause.predicates.is_empty()) 123 | { 124 | generics.where_clause = None; 125 | } 126 | generics 127 | } 128 | 129 | #[proc_macro] 130 | pub fn unsafe_gc_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 131 | let parsed = parse_macro_input!(input as macros::MacroInput); 132 | let res = parsed 133 | .expand_output() 134 | .unwrap_or_else(|e| e.to_compile_error()); 135 | let tp = match parsed.target_type { 136 | Type::Path(ref p) => Some(&p.path.segments.last().unwrap().ident), 137 | _ => None, 138 | }; 139 | let span_loc = span_file_loc(Span::call_site()); 140 | let f = if let Some(tp) = tp { 141 | format!("unsafe_gc_impl!(target={}, ...) @ {}", tp, span_loc) 142 | } else { 143 | format!("unsafe_gc_impl! @ {}", span_loc) 144 | }; 145 | debug_derive( 146 | "unsafe_gc_impl!", 147 | &tp.map_or_else(|| span_loc.to_string(), |tp| tp.to_string()), 148 | &f, 149 | &res, 150 | ); 151 | res.into() 152 | } 153 | 154 | #[proc_macro_derive(NullTrace, attributes(zerogc))] 155 | pub fn derive_null_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 156 | let input = parse_macro_input!(input as DeriveInput); 157 | let res = From::from( 158 | impl_derive_trace(&input, TraceDeriveKind::NullTrace).unwrap_or_else(|e| e.write_errors()), 159 | ); 160 | debug_derive( 161 | "derive(NullTrace)", 162 | &input.ident.to_string(), 163 | &format_args!("#[derive(NullTrace) for {}", input.ident), 164 | &res, 165 | ); 166 | res 167 | } 168 | 169 | #[proc_macro_derive(Trace, attributes(zerogc))] 170 | pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 171 | let input = parse_macro_input!(input as DeriveInput); 172 | let res = From::from( 173 | impl_derive_trace(&input, TraceDeriveKind::Regular).unwrap_or_else(|e| e.write_errors()), 174 | ); 175 | debug_derive( 176 | "derive(Trace)", 177 | &input.ident.to_string(), 178 | &format_args!("#[derive(Trace) for {}", input.ident), 179 | &res, 180 | ); 181 | res 182 | } 183 | 184 | pub(crate) const DESERIALIZE_ENABLED: bool = cfg!(feature = "__serde-internal"); 185 | 186 | #[proc_macro_derive(GcDeserialize, attributes(zerogc))] 187 | pub fn gc_deserialize(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 188 | let input = parse_macro_input!(input as DeriveInput); 189 | let res = From::from( 190 | impl_derive_trace(&input, TraceDeriveKind::Deserialize) 191 | .unwrap_or_else(|e| e.write_errors()), 192 | ); 193 | debug_derive( 194 | "derive(GcDeserialize)", 195 | &input.ident.to_string(), 196 | &format_args!("#[derive(GcDeserialize) for {}", input.ident), 197 | &res, 198 | ); 199 | res 200 | } 201 | 202 | fn impl_derive_trace( 203 | input: &DeriveInput, 204 | kind: TraceDeriveKind, 205 | ) -> Result { 206 | let mut input = derive::TraceDeriveInput::from_derive_input(input)?; 207 | input.normalize(kind)?; 208 | input.expand(kind) 209 | } 210 | 211 | /// A list like `#[zerogc(a, b, c)] parsed as a `Punctuated`, 212 | #[derive(Clone, Debug)] 213 | pub(crate) struct MetaList(pub Punctuated); 214 | impl Default for MetaList { 215 | fn default() -> Self { 216 | MetaList(Default::default()) 217 | } 218 | } 219 | impl FromMeta for MetaList { 220 | fn from_list(items: &[syn::NestedMeta]) -> darling::Result { 221 | let mut res: Punctuated = Default::default(); 222 | for item in items { 223 | res.push(syn::parse2(item.to_token_stream())?); 224 | } 225 | Ok(MetaList(res)) 226 | } 227 | } 228 | 229 | #[derive(Debug, Clone)] 230 | pub(crate) struct FromLitStr(pub T); 231 | impl Parse for FromLitStr { 232 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 233 | let s = input.parse::()?; 234 | Ok(FromLitStr(s.parse()?)) 235 | } 236 | } 237 | 238 | pub(crate) fn is_explicitly_unsized(param: &syn::TypeParam) -> bool { 239 | param.bounds.iter().any(|bound| { 240 | matches!(bound, syn::TypeParamBound::Trait(syn::TraitBound { 241 | ref path, modifier: syn::TraitBoundModifier::Maybe(_), .. 242 | }) if path.is_ident("Sized")) 243 | }) 244 | } 245 | 246 | fn span_file_loc(span: Span) -> String { 247 | /* 248 | * Source file identifiers in the form `:` 249 | */ 250 | let internal = span.unwrap(); 251 | let sf = internal.source_file(); 252 | let path = sf.path(); 253 | let file_name = if sf.is_real() { path.file_name() } else { None } 254 | .map(std::ffi::OsStr::to_string_lossy) 255 | .map(String::from) 256 | .unwrap_or_else(|| String::from("")); 257 | let lineno = internal.start().line(); 258 | format!("{}:{}", file_name, lineno) 259 | } 260 | 261 | fn debug_derive(key: &str, target: &dyn ToString, message: &dyn Display, value: &dyn Display) { 262 | let target = target.to_string(); 263 | // TODO: Use proc_macro::tracked_env::var 264 | match ::proc_macro::tracked_env::var("DEBUG_DERIVE") { 265 | Ok(ref var) if var == "*" || var == "1" || var.is_empty() => {} 266 | Ok(ref var) if var == "0" => { 267 | return; /* disabled */ 268 | } 269 | Ok(var) => { 270 | let target_parts = std::iter::once(key) 271 | .chain(target.split(':')) 272 | .collect::>(); 273 | for pattern in var.split_terminator(',') { 274 | let pattern_parts = pattern.split(':').collect::>(); 275 | if pattern_parts.len() > target_parts.len() { 276 | continue; 277 | } 278 | for (&pattern_part, &target_part) in pattern_parts 279 | .iter() 280 | .chain(std::iter::repeat(&"*")) 281 | .zip(&target_parts) 282 | { 283 | if pattern_part == "*" { 284 | continue; // Wildcard matches anything: Keep checking 285 | } 286 | if pattern_part != target_part { 287 | return; // Pattern mismatch 288 | } 289 | } 290 | } 291 | // Fallthrough -> enable this debug 292 | } 293 | _ => return, 294 | } 295 | eprintln!("{}:", message); 296 | use std::process::{Command, Stdio}; 297 | let original_input = format!("{}", value); 298 | let cmd_res = Command::new("rustfmt") 299 | .stdin(Stdio::piped()) 300 | .stdout(Stdio::piped()) 301 | .stderr(Stdio::piped()) 302 | .spawn() 303 | .and_then(|mut child| { 304 | let mut stdin = child.stdin.take().unwrap(); 305 | stdin.write_all(original_input.as_bytes())?; 306 | drop(stdin); 307 | child.wait_with_output() 308 | }); 309 | match cmd_res { 310 | Ok(output) if output.status.success() => { 311 | let formatted = String::from_utf8(output.stdout).unwrap(); 312 | for line in formatted.lines() { 313 | eprintln!(" {}", line); 314 | } 315 | } 316 | // Fallthrough on failure 317 | Ok(output) => { 318 | eprintln!( 319 | "Rustfmt error [code={}]:", 320 | output 321 | .status 322 | .code() 323 | .map_or_else(|| String::from("?"), |i| format!("{}", i)) 324 | ); 325 | let err_msg = String::from_utf8(output.stderr).unwrap(); 326 | for line in err_msg.lines() { 327 | eprintln!(" {}", line); 328 | } 329 | eprintln!("Original input: [[[["); 330 | for line in original_input.lines() { 331 | eprintln!("{}", line); 332 | } 333 | eprintln!("]]]]"); 334 | } 335 | Err(e) => { 336 | eprintln!("Failed to run rustfmt: {}", e) 337 | } 338 | } 339 | } 340 | -------------------------------------------------------------------------------- /src/hash_map/indexmap.rs: -------------------------------------------------------------------------------- 1 | //! Contains the implementation of [GcIndexMap] 2 | 3 | use core::borrow::Borrow; 4 | use core::hash::{BuildHasher, Hash, Hasher}; 5 | use core::mem; 6 | 7 | use hashbrown::raw::RawTable; 8 | 9 | use zerogc_derive::{unsafe_gc_impl, NullTrace, Trace}; 10 | 11 | use crate::prelude::*; 12 | use crate::SimpleAllocCollectorId; 13 | 14 | /// A garbage collected hashmap that preserves insertion order. 15 | /// 16 | /// This is based off [indexmap::IndexMap](https://docs.rs/indexmap/1.7.0/indexmap/map/struct.IndexMap.html), 17 | /// both in API design and in implementation. 18 | /// 19 | /// Like a [GcVec], there can only be one owner at a time, 20 | /// simplifying mutability checking. 21 | pub struct GcIndexMap< 22 | 'gc, 23 | K: GcSafe<'gc, Id>, 24 | V: GcSafe<'gc, Id>, 25 | Id: SimpleAllocCollectorId, 26 | S: BuildHasher = super::DefaultHasher, 27 | > { 28 | /// indices mapping from the entry hash to its index 29 | /// 30 | /// NOTE: This uses `std::alloc` instead of the garbage collector to allocate memory. 31 | /// This is necessary because of the possibility of relocating pointers..... 32 | /// 33 | /// The unfortunate downside is that allocating from `std::alloc` is slightly 34 | /// slower than allocating from a typical gc (which often uses bump-pointer allocation). 35 | indices: RawTable, 36 | /// an ordered, garbage collected vector of entries, 37 | /// in the original insertion order. 38 | entries: GcVec<'gc, Bucket, Id>, 39 | /// The hasher used to hash elements 40 | hasher: S, 41 | } 42 | unsafe impl<'gc, K: GcSafe<'gc, Id>, V: GcSafe<'gc, Id>, Id: SimpleAllocCollectorId, S: BuildHasher> 43 | crate::ImplicitWriteBarrier for GcIndexMap<'gc, K, V, Id, S> 44 | { 45 | } 46 | impl<'gc, K: GcSafe<'gc, Id>, V: GcSafe<'gc, Id>, Id: SimpleAllocCollectorId, S: BuildHasher> 47 | GcIndexMap<'gc, K, V, Id, S> 48 | { 49 | /// Allocate a new hashmap inside the specified collector 50 | #[inline] 51 | pub fn new_in(ctx: &'gc Id::Context) -> Self 52 | where 53 | S: Default, 54 | { 55 | Self::with_capacity_in(0, ctx) 56 | } 57 | /// Allocate a new hashmap with the specified capacity, 58 | /// inside of the specified collector 59 | #[inline] 60 | pub fn with_capacity_in(capacity: usize, ctx: &'gc Id::Context) -> Self 61 | where 62 | S: Default, 63 | { 64 | Self::with_capacity_and_hasher_in(capacity, Default::default(), ctx) 65 | } 66 | /// Allocate a new hashmap with the specified capacity and hasher, 67 | /// inside of the specified collector 68 | #[inline] 69 | pub fn with_capacity_and_hasher_in(capacity: usize, hasher: S, ctx: &'gc Id::Context) -> Self { 70 | GcIndexMap { 71 | indices: RawTable::with_capacity(capacity), 72 | entries: GcVec::with_capacity_in(capacity, ctx), 73 | hasher, 74 | } 75 | } 76 | 77 | /// Allocate a new hashmap with the specified hasher, 78 | /// inside the specified collector 79 | #[inline] 80 | pub fn with_hasher_in(hasher: S, ctx: &'gc Id::Context) -> Self { 81 | Self::with_capacity_and_hasher_in(0, hasher, ctx) 82 | } 83 | /// Return the number of entries in the map 84 | #[inline] 85 | pub fn len(&self) -> usize { 86 | self.entries.len() 87 | } 88 | /// Check if the map is empty. 89 | #[inline] 90 | pub fn is_empty(&self) -> bool { 91 | self.len() == 0 92 | } 93 | /// Return a reference to the value associated with the specified key, 94 | /// or `None` if it isn't present in the map. 95 | pub fn get(&self, key: &Q) -> Option<&V> 96 | where 97 | K: Borrow, 98 | Q: Hash + Eq, 99 | { 100 | self.get_index_of(key) 101 | .map(|index| &self.entries[index].value) 102 | } 103 | /// Return a mutable reference to the value associated with the specified key, 104 | /// or `None` if it isn't present in the map. 105 | pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> 106 | where 107 | K: Borrow, 108 | Q: Hash + Eq, 109 | { 110 | self.get_index_of(key) 111 | .map(move |index| &mut self.entries[index].value) 112 | } 113 | /// Remove the entry associated with 'key' and return its value. 114 | /// 115 | /// NOTE: This is equivalent to `swap_remove` and does *not* preserver ordering. 116 | pub fn remove(&mut self, key: &Q) -> Option 117 | where 118 | K: Borrow, 119 | Q: Hash + Eq, 120 | { 121 | self.swap_remove(key) 122 | } 123 | 124 | /// Remove the value associated with the specified key. 125 | /// 126 | /// This does **not** preserve ordering. 127 | /// It is similar to [Vec::swap_remove], 128 | /// or more specifically [IndexMap::swap_remove](https://docs.rs/indexmap/1.7.0/indexmap/map/struct.IndexMap.html#method.swap_remove). 129 | pub fn swap_remove(&mut self, key: &Q) -> Option 130 | where 131 | K: Borrow, 132 | Q: Hash + Eq, 133 | { 134 | let hash = self.hash(key); 135 | self.indices 136 | .remove_entry(hash.get(), equivalent(key, &self.entries)) 137 | .map(|index| { 138 | let entry = self.entries.swap_remove(index); 139 | /*hash_builder 140 | * correct the index that points to the moved entry 141 | * It was at 'self.len()', now it's at 142 | */ 143 | if let Some(entry) = self.entries.get(index) { 144 | let last = self.entries.len(); 145 | *self 146 | .indices 147 | .get_mut(entry.hash.get(), move |&i| i == last) 148 | .expect("index not found") = index; 149 | } 150 | entry.value 151 | }) 152 | } 153 | /// Returns 154 | /// Insert a key value pair into the map, returning the previous value (if any( 155 | /// 156 | /// If the key already exists, this replaces the existing pair 157 | /// and returns the previous value. 158 | #[inline] 159 | pub fn insert(&mut self, key: K, value: V) -> Option 160 | where 161 | K: Hash + Eq, 162 | { 163 | self.insert_full(key, value).1 164 | } 165 | /// Insert a key value pair into the map, implicitly looking up its index. 166 | /// 167 | /// If the key already exists, this replaces the existing pair 168 | /// and returns the previous value. 169 | /// 170 | /// If the key doesn't already exist, 171 | /// this returns a new entry. 172 | pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) 173 | where 174 | K: Hash + Eq, 175 | { 176 | let hash = self.hash(&key); 177 | match self 178 | .indices 179 | .get(hash.get(), equivalent(&key, &*self.entries)) 180 | { 181 | Some(&i) => (i, Some(mem::replace(&mut self.entries[i].value, value))), 182 | None => (self.push(hash, key, value), None), 183 | } 184 | } 185 | /// Return the index of the item with the specified key. 186 | pub fn get_index_of(&self, key: &Q) -> Option 187 | where 188 | Q: Hash + Eq, 189 | K: Borrow, 190 | { 191 | if self.is_empty() { 192 | None 193 | } else { 194 | let hash = self.hash(key); 195 | self.indices 196 | .get(hash.get(), equivalent(key, &*self.entries)) 197 | .copied() 198 | } 199 | } 200 | fn hash(&self, value: &Q) -> HashValue { 201 | let mut h = self.hasher.build_hasher(); 202 | value.hash(&mut h); 203 | HashValue(h.finish() as usize) 204 | } 205 | /// Append a new key-value pair, *without* checking whether it already exists. 206 | /// 207 | /// Return the pair's new index 208 | fn push(&mut self, hash: HashValue, key: K, value: V) -> usize { 209 | let i = self.entries.len(); 210 | self.indices.insert(hash.get(), i, get_hash(&self.entries)); 211 | self.entries.push(Bucket { key, value, hash }); 212 | i 213 | } 214 | /// Iterate over the entries in the map (in order) 215 | #[inline] 216 | pub fn iter(&self) -> Iter<'_, K, V> { 217 | Iter(self.entries.iter()) 218 | } 219 | /// Mutably iterate over the entries in the map (in order) 220 | #[inline] 221 | pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { 222 | IterMut(self.entries.iter_mut()) 223 | } 224 | /// Iterate over tke keys in the map (in order) 225 | #[inline] 226 | pub fn keys(&self) -> Keys<'_, K, V> { 227 | Keys(self.entries.iter()) 228 | } 229 | /// Iterate over the values in the map (in order) 230 | #[inline] 231 | pub fn values(&self) -> Values<'_, K, V> { 232 | Values(self.entries.iter()) 233 | } 234 | /// Mutably iterate over the values in the map (in order) 235 | #[inline] 236 | pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { 237 | ValuesMut(self.entries.iter_mut()) 238 | } 239 | /// Return the context implicitly associated with this map 240 | /// 241 | /// See also: [GcVec::context] 242 | #[inline] 243 | pub fn context(&self) -> &'gc Id::Context { 244 | self.entries.context() 245 | } 246 | } 247 | macro_rules! define_iterator { 248 | (struct $name:ident { 249 | const NAME = $item_name:literal; 250 | type Item = $item:ty; 251 | type Wrapped = $wrapped:ident; 252 | map => |$bucket:ident| $map:expr 253 | }) => { 254 | #[doc = concat!("An iterator over the ", $item_name, " of a [GcIndexMap]")] 255 | pub struct $name<'a, K: 'a, V: 'a>(core::slice::$wrapped<'a, Bucket>); 256 | impl<'a, K: 'a, V: 'a> Iterator for $name<'a, K, V> { 257 | type Item = $item; 258 | #[inline] 259 | fn next(&mut self) -> Option { 260 | self.0.next().map(|$bucket| $map) 261 | } 262 | #[inline] 263 | fn size_hint(&self) -> (usize, Option) { 264 | self.0.size_hint() 265 | } 266 | } 267 | impl<'a, K, V> DoubleEndedIterator for $name<'a, K, V> { 268 | #[inline] 269 | fn next_back(&mut self) -> Option { 270 | self.0.next_back().map(|$bucket| $map) 271 | } 272 | } 273 | impl<'a, K, V> core::iter::ExactSizeIterator for $name<'a, K, V> {} 274 | impl<'a, K, V> core::iter::FusedIterator for $name<'a, K, V> {} 275 | }; 276 | } 277 | 278 | define_iterator!(struct Iter { 279 | const NAME = "entries"; 280 | type Item = (&'a K, &'a V); 281 | type Wrapped = Iter; 282 | map => |bucket| (&bucket.key, &bucket.value) 283 | }); 284 | define_iterator!(struct Keys { 285 | const NAME = "keys"; 286 | type Item = &'a K; 287 | type Wrapped = Iter; 288 | map => |bucket| &bucket.key 289 | }); 290 | define_iterator!(struct Values { 291 | const NAME = "valuesj"; 292 | type Item = &'a V; 293 | type Wrapped = Iter; 294 | map => |bucket| &bucket.value 295 | }); 296 | define_iterator!(struct ValuesMut { 297 | const NAME = "mutable values"; 298 | type Item = &'a mut V; 299 | type Wrapped = IterMut; 300 | map => |bucket| &mut bucket.value 301 | }); 302 | define_iterator!(struct IterMut { 303 | const NAME = "mutable entries"; 304 | type Item = (&'a K, &'a mut V); 305 | type Wrapped = IterMut; 306 | map => |bucket| (&bucket.key, &mut bucket.value) 307 | }); 308 | 309 | unsafe_gc_impl!( 310 | target => GcIndexMap<'gc, K, V, Id, S>, 311 | params => ['gc, K: GcSafe<'gc, Id>, V: GcSafe<'gc, Id>, Id: SimpleAllocCollectorId, S: BuildHasher], 312 | bounds => { 313 | GcSafe => { where S: 'static }, 314 | Trace => { where S: 'static }, 315 | TraceImmutable => never, 316 | TrustedDrop => { where K: TrustedDrop, V: TrustedDrop, S: 'static }, 317 | GcRebrand => { 318 | where K: GcRebrand<'new_gc, Id>, V: GcRebrand<'new_gc, Id>, S: 'static, K::Branded: Sized, V::Branded: Sized } 319 | }, 320 | branded_type => GcIndexMap<'new_gc, K::Branded, V::Branded, Id, S>, 321 | NEEDS_TRACE => true, 322 | NEEDS_DROP => core::mem::needs_drop::(), 323 | null_trace => never, 324 | trace_template => |self, visitor| { 325 | for entry in self.entries.#iter() { 326 | visitor.#trace_func(entry)?; 327 | } 328 | Ok(()) 329 | }, 330 | collector_id => Id 331 | ); 332 | 333 | #[inline] 334 | fn equivalent<'a, K, V, Q: ?Sized>( 335 | key: &'a Q, 336 | entries: &'a [Bucket], 337 | ) -> impl Fn(&usize) -> bool + 'a 338 | where 339 | Q: Hash + Eq, 340 | K: Borrow, 341 | { 342 | move |&other_index| entries[other_index].key.borrow() == key 343 | } 344 | 345 | #[inline] 346 | fn get_hash(entries: &[Bucket]) -> impl Fn(&usize) -> u64 + '_ { 347 | move |&i| entries[i].hash.get() 348 | } 349 | 350 | #[derive(Copy, Clone, Debug, PartialEq, NullTrace)] 351 | struct HashValue(usize); 352 | impl HashValue { 353 | #[inline(always)] 354 | fn get(self) -> u64 { 355 | self.0 as u64 356 | } 357 | } 358 | 359 | #[derive(Copy, Clone, Debug, Trace)] 360 | #[zerogc(unsafe_skip_drop)] 361 | struct Bucket { 362 | hash: HashValue, 363 | key: K, 364 | value: V, 365 | } 366 | -------------------------------------------------------------------------------- /src/serde.rs: -------------------------------------------------------------------------------- 1 | //! Support for deserializing garbage collected types 2 | //! 3 | //! As long as you aren't worried about cycles, serialization is easy. 4 | //! Just do `#[derive(Serialize)]` on your type. 5 | //! 6 | //! Deserialization is much harder, because allocating a [Gc] requires 7 | //! access to [GcSimpleAlloc] and [serde::de::DeserializeSeed] can't be automatically derived. 8 | //! 9 | //! As a workaround, zerogc introduces a `GcDeserialize` type, 10 | //! indicating an implementation of [serde::Deserialize] 11 | //! that requires a [GcContext]. 12 | use std::collections::{HashMap, HashSet}; 13 | use std::hash::{BuildHasher, Hash}; 14 | use std::marker::PhantomData; 15 | 16 | use serde::de::{self, DeserializeSeed, Deserializer, MapAccess, SeqAccess, Visitor}; 17 | use serde::ser::SerializeSeq; 18 | use serde::Serialize; 19 | 20 | #[cfg(feature = "indexmap")] 21 | use indexmap::{IndexMap, IndexSet}; 22 | 23 | use crate::array::{GcArray, GcString}; 24 | use crate::prelude::*; 25 | 26 | #[doc(hidden)] 27 | #[macro_use] 28 | pub mod hack; 29 | 30 | /// An implementation of [serde::Deserialize] that requires a [GcContext] for allocation. 31 | /// 32 | /// The type must be [GcSafe], so that it can actually be allocated. 33 | pub trait GcDeserialize<'gc, 'de, Id: CollectorId>: GcSafe<'gc, Id> + Sized { 34 | /// Deserialize the value given the specified context 35 | fn deserialize_gc>( 36 | ctx: &'gc Id::Context, 37 | deserializer: D, 38 | ) -> Result; 39 | } 40 | 41 | /// A garbage collected type that can be deserialized without borrowing any data. 42 | /// 43 | /// [GcDeserialize] is to [`serde::de::Deserialize`] 44 | /// as [GcDeserializeOwned] is to [`serde::de::DeserializeOwned`] 45 | pub trait GcDeserializeOwned<'gc, Id: CollectorId>: for<'de> GcDeserialize<'gc, 'de, Id> {} 46 | impl<'gc, Id, T> GcDeserializeOwned<'gc, Id> for T 47 | where 48 | Id: CollectorId, 49 | T: for<'de> GcDeserialize<'gc, 'de, Id>, 50 | { 51 | } 52 | 53 | impl<'gc, 'de, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> GcDeserialize<'gc, 'de, Id> 54 | for Gc<'gc, T, Id> 55 | where 56 | Id::Context: GcSimpleAlloc, 57 | { 58 | #[inline] 59 | fn deserialize_gc>( 60 | ctx: &'gc Id::Context, 61 | deserializer: D, 62 | ) -> Result { 63 | Ok(ctx.alloc(T::deserialize_gc(ctx, deserializer)?)) 64 | } 65 | } 66 | 67 | impl<'gc, 'de, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> GcDeserialize<'gc, 'de, Id> 68 | for GcArray<'gc, T, Id> 69 | where 70 | Id::Context: GcSimpleAlloc, 71 | { 72 | fn deserialize_gc>( 73 | ctx: &'gc Id::Context, 74 | deserializer: D, 75 | ) -> Result { 76 | Ok(ctx.alloc_array_from_vec(Vec::::deserialize_gc(ctx, deserializer)?)) 77 | } 78 | } 79 | 80 | impl<'gc, 'de, Id: CollectorId> GcDeserialize<'gc, 'de, Id> for GcString<'gc, Id> 81 | where 82 | Id::Context: GcSimpleAlloc, 83 | { 84 | fn deserialize_gc>( 85 | ctx: &'gc Id::Context, 86 | deserializer: D, 87 | ) -> Result { 88 | struct GcStrVisitor<'gc, A: GcSimpleAlloc> { 89 | ctx: &'gc A, 90 | } 91 | impl<'de, 'gc, A: GcSimpleAlloc> de::Visitor<'de> for GcStrVisitor<'gc, A> { 92 | type Value = GcString<'gc, A::Id>; 93 | fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 94 | f.write_str("a string") 95 | } 96 | fn visit_str(self, v: &str) -> Result 97 | where 98 | E: de::Error, 99 | { 100 | Ok(self.ctx.alloc_str(v)) 101 | } 102 | } 103 | deserializer.deserialize_str(GcStrVisitor { ctx }) 104 | } 105 | } 106 | 107 | impl<'gc, T: Serialize, Id: CollectorId> Serialize for Gc<'gc, T, Id> { 108 | fn serialize(&self, serializer: S) -> Result 109 | where 110 | S: serde::Serializer, 111 | { 112 | self.value().serialize(serializer) 113 | } 114 | } 115 | 116 | impl<'gc, T: Serialize, Id: CollectorId> Serialize for GcArray<'gc, T, Id> { 117 | fn serialize(&self, serializer: S) -> Result 118 | where 119 | S: serde::Serializer, 120 | { 121 | let mut seq = serializer.serialize_seq(Some(self.len()))?; 122 | for val in self.as_slice().iter() { 123 | seq.serialize_element(val)?; 124 | } 125 | seq.end() 126 | } 127 | } 128 | 129 | impl<'gc, Id: CollectorId> Serialize for GcString<'gc, Id> { 130 | fn serialize(&self, serializer: S) -> Result 131 | where 132 | S: serde::Serializer, 133 | { 134 | serializer.serialize_str(self.as_str()) 135 | } 136 | } 137 | 138 | impl<'gc, 'de, T, Id: CollectorId> GcDeserialize<'gc, 'de, Id> for PhantomData { 139 | fn deserialize_gc>( 140 | _ctx: &'gc Id::Context, 141 | _deserializer: D, 142 | ) -> Result { 143 | Ok(PhantomData) 144 | } 145 | } 146 | 147 | impl Serialize for GcCell { 148 | fn serialize(&self, serializer: S) -> Result 149 | where 150 | S: serde::Serializer, 151 | { 152 | self.get().serialize(serializer) 153 | } 154 | } 155 | 156 | impl<'gc, 'de, T, Id> GcDeserialize<'gc, 'de, Id> for GcCell 157 | where 158 | T: Copy + GcDeserialize<'gc, 'de, Id>, 159 | Id: CollectorId, 160 | { 161 | fn deserialize_gc>( 162 | ctx: &'gc Id::Context, 163 | deser: D, 164 | ) -> Result { 165 | Ok(GcCell::new(T::deserialize_gc(ctx, deser)?)) 166 | } 167 | } 168 | 169 | impl<'gc, 'de, Id: CollectorId> GcDeserialize<'gc, 'de, Id> for () { 170 | fn deserialize_gc>( 171 | _ctx: &'gc Id::Context, 172 | deserializer: D, 173 | ) -> Result { 174 | struct UnitVisitor; 175 | impl<'de> Visitor<'de> for UnitVisitor { 176 | type Value = (); 177 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 178 | formatter.write_str("a unit tuple") 179 | } 180 | fn visit_unit(self) -> Result 181 | where 182 | E: de::Error, 183 | { 184 | Ok(()) 185 | } 186 | } 187 | deserializer.deserialize_unit(UnitVisitor) 188 | } 189 | } 190 | 191 | /// Implement [GcDeserialize] for a type by delegating to its [serde::Deserialize] implementation. 192 | /// 193 | /// This should only be used for types that can never have gc pointers inside of them (or if you don't care to support that). 194 | #[macro_export] 195 | macro_rules! impl_delegating_deserialize { 196 | (impl GcDeserialize for $target:path) => ( 197 | $crate::impl_delegating_deserialize!(impl <'gc, 'de, Id> GcDeserialize<'gc, 'de, Id> for $target where Id: zerogc::CollectorId); 198 | ); 199 | (impl $(<$($lt:lifetime,)* $($param:ident),*>)? GcDeserialize<$gc:lifetime, $de:lifetime, $id:ident> for $target:path $(where $($where_clause:tt)*)?) => { 200 | impl$(<$($lt,)* $($param),*>)? $crate::serde::GcDeserialize<$gc, $de, $id> for $target 201 | where Self: Deserialize<$de> + $(, $($where_clause)*)?{ 202 | fn deserialize_gc>(_ctx: &$gc <$id as $crate::CollectorId>::Context, deserializer: D) -> Result>::Error> { 203 | >::deserialize(deserializer) 204 | } 205 | } 206 | }; 207 | } 208 | 209 | /// An implementation of [serde::de::DeserializeSeed] that wraps [GcDeserialize] 210 | pub struct GcDeserializeSeed<'gc, 'de, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> { 211 | context: &'gc Id::Context, 212 | marker: PhantomData T>, 213 | } 214 | impl<'de, 'gc, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> GcDeserializeSeed<'gc, 'de, Id, T> { 215 | /// Create a new wrapper for the specified context 216 | #[inline] 217 | pub fn new(context: &'gc Id::Context) -> Self { 218 | GcDeserializeSeed { 219 | context, 220 | marker: PhantomData, 221 | } 222 | } 223 | } 224 | impl<'de, 'gc, Id: CollectorId, T: GcDeserialize<'gc, 'de, Id>> DeserializeSeed<'de> 225 | for GcDeserializeSeed<'gc, 'de, Id, T> 226 | { 227 | type Value = T; 228 | 229 | fn deserialize(self, deserializer: D) -> Result 230 | where 231 | D: Deserializer<'de>, 232 | { 233 | T::deserialize_gc(self.context, deserializer) 234 | } 235 | } 236 | 237 | macro_rules! impl_for_map { 238 | ($target:ident $(where $($bounds:tt)*)?) => { 239 | impl<'gc, 'de, Id: CollectorId, 240 | K: Eq + Hash + GcDeserialize<'gc, 'de, Id>, 241 | V: GcDeserialize<'gc, 'de, Id>, 242 | S: BuildHasher + Default 243 | > GcDeserialize<'gc, 'de, Id> for $target $(where $($bounds)*)* { 244 | fn deserialize_gc>(ctx: &'gc Id::Context, deserializer: D) -> Result { 245 | struct MapVisitor< 246 | 'gc, 'de, Id: CollectorId, 247 | K: GcDeserialize<'gc, 'de, Id>, 248 | V: GcDeserialize<'gc, 'de, Id>, 249 | S: BuildHasher + Default 250 | > { 251 | ctx: &'gc Id::Context, 252 | marker: PhantomData<(&'de S, K, V)> 253 | } 254 | impl<'gc, 'de, Id: CollectorId, 255 | K: Eq + Hash + GcDeserialize<'gc, 'de, Id>, 256 | V: GcDeserialize<'gc, 'de, Id>, 257 | S: BuildHasher + Default 258 | > Visitor<'de> for MapVisitor<'gc, 'de, Id, K, V, S> { 259 | type Value = $target; 260 | fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 261 | f.write_str(concat!("a ", stringify!($target))) 262 | } 263 | #[inline] 264 | fn visit_map(self, mut access: A) -> Result 265 | where A: MapAccess<'de>, { 266 | let mut values = $target::::with_capacity_and_hasher( 267 | access.size_hint().unwrap_or(0).min(1024), 268 | S::default() 269 | ); 270 | while let Some((key, value)) = access.next_entry_seed( 271 | GcDeserializeSeed::new(self.ctx), 272 | GcDeserializeSeed::new(self.ctx) 273 | )? { 274 | values.insert(key, value); 275 | } 276 | 277 | Ok(values) 278 | } 279 | } 280 | let visitor: MapVisitor = MapVisitor { ctx, marker: PhantomData }; 281 | deserializer.deserialize_map(visitor) 282 | } 283 | } 284 | }; 285 | } 286 | 287 | macro_rules! impl_for_set { 288 | ($target:ident $(where $($bounds:tt)*)?) => { 289 | impl<'gc, 'de, Id: CollectorId, 290 | T: Eq + Hash + GcDeserialize<'gc, 'de, Id>, 291 | S: BuildHasher + Default 292 | > GcDeserialize<'gc, 'de, Id> for $target $(where $($bounds)*)* { 293 | fn deserialize_gc>(ctx: &'gc Id::Context, deserializer: D) -> Result { 294 | struct SetVisitor< 295 | 'gc, 'de, Id: CollectorId, 296 | T: GcDeserialize<'gc, 'de, Id>, 297 | S: BuildHasher + Default 298 | > { 299 | ctx: &'gc Id::Context, 300 | marker: PhantomData T> 301 | } 302 | impl<'gc, 'de, Id: CollectorId, 303 | T: Eq + Hash + GcDeserialize<'gc, 'de, Id>, 304 | S: BuildHasher + Default 305 | > Visitor<'de> for SetVisitor<'gc, 'de, Id, T, S> { 306 | type Value = $target; 307 | fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 308 | f.write_str(concat!("a ", stringify!($target))) 309 | } 310 | #[inline] 311 | fn visit_seq(self, mut access: A) -> Result 312 | where A: SeqAccess<'de>, { 313 | let mut values = $target::::with_capacity_and_hasher( 314 | access.size_hint().unwrap_or(0).min(1024), 315 | S::default() 316 | ); 317 | while let Some(value) = access.next_element_seed( 318 | GcDeserializeSeed::new(self.ctx) 319 | )? { 320 | values.insert(value); 321 | } 322 | 323 | Ok(values) 324 | } 325 | } 326 | let visitor: SetVisitor = SetVisitor { ctx, marker: PhantomData }; 327 | deserializer.deserialize_seq(visitor) 328 | } 329 | } 330 | }; 331 | } 332 | 333 | impl_for_map!(HashMap where K: TraceImmutable, S: 'static); 334 | impl_for_set!(HashSet where T: TraceImmutable, S: 'static); 335 | #[cfg(feature = "indexmap")] 336 | impl_for_map!(IndexMap where K: GcSafe<'gc, Id>, S: 'static); 337 | #[cfg(feature = "indexmap")] 338 | impl_for_set!(IndexSet where T: TraceImmutable, S: 'static); 339 | 340 | #[cfg(test)] 341 | mod test { 342 | use super::*; 343 | use crate::epsilon::{EpsilonCollectorId, EpsilonSystem}; 344 | #[test] 345 | #[cfg(feature = "indexmap")] 346 | fn indexmap() { 347 | let system = EpsilonSystem::leak(); 348 | let ctx = system.new_context(); 349 | const INPUT: &str = r##"{"foo": "bar", "eats": "turds"}"##; 350 | let mut deser = serde_json::Deserializer::from_str(INPUT); 351 | let s = |s: &'static str| String::from(s); 352 | assert_eq!( 353 | > as GcDeserialize< 354 | EpsilonCollectorId, 355 | >>::deserialize_gc(&ctx, &mut deser) 356 | .unwrap(), 357 | indexmap::indexmap!( 358 | s("foo") => ctx.alloc(s("bar")), 359 | s("eats") => ctx.alloc(s("turds")) 360 | ) 361 | ); 362 | let mut deser = serde_json::Deserializer::from_str(INPUT); 363 | assert_eq!( 364 | , fnv::FnvBuildHasher> as GcDeserialize>::deserialize_gc(&ctx, &mut deser).unwrap(), 365 | indexmap::indexmap!( 366 | s("foo") => ctx.alloc(s("bar")), 367 | s("eats") => ctx.alloc(s("turds")) 368 | ) 369 | ); 370 | } 371 | #[test] 372 | fn gc() { 373 | let system = EpsilonSystem::leak(); 374 | let ctx = system.new_context(); 375 | let mut deser = serde_json::Deserializer::from_str(r#"128"#); 376 | assert_eq!( 377 | as GcDeserialize>::deserialize_gc( 378 | &ctx, &mut deser 379 | ) 380 | .unwrap(), 381 | ctx.alloc(128) 382 | ); 383 | } 384 | } 385 | -------------------------------------------------------------------------------- /libs/simple/src/alloc.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::vec_box)] // We must Box for a stable address 2 | use std::alloc::Layout; 3 | use std::mem; 4 | use std::mem::MaybeUninit; 5 | use std::ptr::NonNull; 6 | 7 | #[cfg(feature = "sync")] 8 | use once_cell::sync::OnceCell; 9 | #[cfg(not(feature = "sync"))] 10 | use once_cell::unsync::OnceCell; 11 | #[cfg(feature = "sync")] 12 | use parking_lot::Mutex; 13 | #[cfg(not(feature = "sync"))] 14 | use std::cell::RefCell; 15 | 16 | use zerogc_context::utils::AtomicCell; 17 | 18 | const DEBUG_INTERNAL_ALLOCATOR: bool = cfg!(zerogc_simple_debug_alloc); 19 | #[allow(clippy::assertions_on_constants)] // See rust-lang/clippy#7597 20 | mod debug { 21 | pub const PADDING: u32 = 0xDEADBEAF; 22 | pub const UNINIT: u32 = 0xCAFEBABE; 23 | pub const PADDING_TIMES: usize = 16; 24 | pub const PADDING_BYTES: usize = PADDING_TIMES * 4; 25 | pub unsafe fn pad_memory_block(ptr: *mut u8, size: usize) { 26 | assert!(super::DEBUG_INTERNAL_ALLOCATOR); 27 | let start = ptr.sub(PADDING_BYTES); 28 | for i in 0..PADDING_TIMES { 29 | (start as *mut u32).add(i).write(PADDING); 30 | } 31 | let end = ptr.add(size); 32 | for i in 0..PADDING_TIMES { 33 | (end as *mut u32).add(i).write(PADDING); 34 | } 35 | } 36 | pub unsafe fn mark_memory_uninit(ptr: *mut u8, size: usize) { 37 | assert!(super::DEBUG_INTERNAL_ALLOCATOR); 38 | let (blocks, leftover) = (size / 4, size % 4); 39 | for i in 0..blocks { 40 | (ptr as *mut u32).add(i).write(UNINIT); 41 | } 42 | let leftover_ptr = ptr.add(blocks * 4); 43 | debug_assert_eq!(leftover_ptr.wrapping_add(leftover), ptr.add(size)); 44 | for i in 0..leftover { 45 | leftover_ptr.add(i).write(0xF0); 46 | } 47 | } 48 | pub unsafe fn assert_padded(ptr: *mut u8, size: usize) { 49 | assert!(super::DEBUG_INTERNAL_ALLOCATOR); 50 | let start = ptr.sub(PADDING_BYTES); 51 | let end = ptr.add(size); 52 | let start_padding = 53 | std::slice::from_raw_parts(start as *const u8 as *const u32, PADDING_TIMES); 54 | let region = std::slice::from_raw_parts(ptr as *const u8, size); 55 | let end_padding = std::slice::from_raw_parts(end as *const u8 as *const u32, PADDING_TIMES); 56 | let print_memory_region = || { 57 | use std::fmt::Write; 58 | let mut res = String::new(); 59 | for &val in start_padding { 60 | write!(&mut res, "{:X}", val).unwrap(); 61 | } 62 | res.push_str("||"); 63 | for &b in region { 64 | write!(&mut res, "{:X}", b).unwrap(); 65 | } 66 | res.push_str("||"); 67 | for &val in end_padding { 68 | write!(&mut res, "{:X}", val).unwrap(); 69 | } 70 | res 71 | }; 72 | // Closest to farthest 73 | for (idx, &block) in start_padding.iter().rev().enumerate() { 74 | if block == PADDING { 75 | continue; 76 | } 77 | assert_eq!( 78 | block, 79 | PADDING, 80 | "Unexpected start padding (offset -{}) w/ {}", 81 | idx * 4, 82 | print_memory_region() 83 | ); 84 | } 85 | for (idx, &block) in end_padding.iter().enumerate() { 86 | if block == PADDING { 87 | continue; 88 | } 89 | assert_eq!( 90 | block, 91 | PADDING, 92 | "Unexpected end padding (offset {}) w/ {}", 93 | idx * 4, 94 | print_memory_region() 95 | ) 96 | } 97 | } 98 | } 99 | /// The minimum size of supported memory (in words) 100 | /// 101 | /// Since the header takes at least one word, 102 | /// its not really worth ever allocating less than this 103 | pub const MINIMUM_WORDS: usize = 2; 104 | /// The maximum words supported by small arenas 105 | /// 106 | /// Past this we have to fallback to the global allocator 107 | pub const MAXIMUM_SMALL_WORDS: usize = 32; 108 | /// The alignment of elements in the arena 109 | pub const ARENA_ELEMENT_ALIGN: usize = std::mem::align_of::(); 110 | 111 | use crate::layout::{GcHeader, UnknownHeader}; 112 | 113 | #[inline] 114 | pub const fn fits_small_object(layout: Layout) -> bool { 115 | layout.size() <= MAXIMUM_SMALL_WORDS * std::mem::size_of::() 116 | && layout.align() <= ARENA_ELEMENT_ALIGN 117 | } 118 | 119 | pub(crate) struct Chunk { 120 | pub start: *mut u8, 121 | current: AtomicCell<*mut u8>, 122 | pub end: *mut u8, 123 | } 124 | impl Chunk { 125 | fn alloc(capacity: usize) -> Box { 126 | assert!(capacity >= 1); 127 | let mut result = Vec::::with_capacity(capacity); 128 | let start = result.as_mut_ptr(); 129 | std::mem::forget(result); 130 | let current = AtomicCell::new(start); 131 | Box::new(Chunk { 132 | start, 133 | current, 134 | end: unsafe { start.add(capacity) }, 135 | }) 136 | } 137 | 138 | #[inline] 139 | fn try_alloc(&self, amount: usize) -> Option> { 140 | loop { 141 | let old_current = self.current.load(); 142 | let remaining = self.end as usize - old_current as usize; 143 | if remaining >= amount { 144 | unsafe { 145 | let updated = old_current.add(amount); 146 | if self.current.compare_exchange(old_current, updated).is_ok() { 147 | return Some(NonNull::new_unchecked(old_current)); 148 | } else { 149 | continue; 150 | } 151 | } 152 | } else { 153 | return None; 154 | } 155 | } 156 | } 157 | #[inline] 158 | fn capacity(&self) -> usize { 159 | self.end as usize - self.start as usize 160 | } 161 | } 162 | impl Drop for Chunk { 163 | fn drop(&mut self) { 164 | unsafe { drop(Vec::from_raw_parts(self.start, 0, self.capacity())) } 165 | } 166 | } 167 | 168 | /// A slot in the free list 169 | #[repr(C)] 170 | pub struct FreeSlot { 171 | /// Pointer to the previous free slot 172 | pub(crate) prev_free: Option>, 173 | } 174 | pub const NUM_SMALL_ARENAS: usize = 15; 175 | const INITIAL_SIZE: usize = 512; 176 | 177 | /// The current state of the allocator. 178 | /// 179 | /// TODO: Support per-thread arena caching 180 | struct ArenaState { 181 | /// We have to Box the chunk so that it'll remain valid 182 | /// even when we move it. 183 | /// 184 | /// This is required for thread safety. 185 | /// One thread could still be seeing an old chunk's location 186 | /// after it's been moved. 187 | #[cfg(feature = "sync")] 188 | chunks: Mutex>>, 189 | /// List of chunks, not thread-safe 190 | /// 191 | /// We still box it however, as an extra check of safety. 192 | #[cfg(not(feature = "sync"))] 193 | chunks: RefCell>>, 194 | /// Lockless access to the current chunk 195 | /// 196 | /// The pointers wont be invalidated, 197 | /// since the references are internally boxed. 198 | current_chunk: AtomicCell>, 199 | } 200 | impl ArenaState { 201 | fn new(chunks: Vec>) -> Self { 202 | assert!(!chunks.is_empty()); 203 | let current_chunk = NonNull::from(&**chunks.last().unwrap()); 204 | let chunk_lock; 205 | #[cfg(feature = "sync")] 206 | { 207 | chunk_lock = Mutex::new(chunks); 208 | } 209 | #[cfg(not(feature = "sync"))] 210 | { 211 | chunk_lock = RefCell::new(chunks); 212 | } 213 | ArenaState { 214 | chunks: chunk_lock, 215 | current_chunk: AtomicCell::new(current_chunk), 216 | } 217 | } 218 | #[inline] 219 | #[cfg(feature = "sync")] 220 | fn lock_chunks(&self) -> ::parking_lot::MutexGuard>> { 221 | self.chunks.lock() 222 | } 223 | #[inline] 224 | #[cfg(not(feature = "sync"))] 225 | fn lock_chunks(&self) -> ::std::cell::RefMut>> { 226 | self.chunks.borrow_mut() 227 | } 228 | #[inline] 229 | fn current_chunk(&self) -> NonNull { 230 | self.current_chunk.load() 231 | } 232 | #[inline] 233 | unsafe fn force_current_chunk(&self, ptr: NonNull) { 234 | self.current_chunk.store(ptr); 235 | } 236 | #[inline] 237 | fn alloc(&self, element_size: usize) -> NonNull { 238 | unsafe { 239 | let chunk = &*self.current_chunk().as_ptr(); 240 | match chunk.try_alloc(element_size) { 241 | Some(header) => header.cast(), 242 | None => self.alloc_fallback(element_size), 243 | } 244 | } 245 | } 246 | 247 | #[cold] 248 | #[inline(never)] 249 | fn alloc_fallback(&self, element_size: usize) -> NonNull { 250 | let mut chunks = self.lock_chunks(); 251 | // Now that we hold the lock, check the current chunk again 252 | unsafe { 253 | if let Some(header) = self.current_chunk().as_ref().try_alloc(element_size) { 254 | return header.cast(); 255 | } 256 | } 257 | // Double capacity to amortize growth 258 | let last_capacity = chunks.last().unwrap().capacity(); 259 | chunks.push(Chunk::alloc(last_capacity * 2)); 260 | unsafe { 261 | self.force_current_chunk(NonNull::from(&**chunks.last().unwrap())); 262 | self.current_chunk() 263 | .as_ref() 264 | .try_alloc(element_size) 265 | .unwrap() 266 | .cast::() 267 | } 268 | } 269 | } 270 | 271 | /// The free list 272 | /// 273 | /// This is a lock-free linked list 274 | #[derive(Default)] 275 | pub(crate) struct FreeList { 276 | next: AtomicCell>>, 277 | } 278 | impl FreeList { 279 | unsafe fn add_free(&self, free: *mut UnknownHeader, size: usize) { 280 | if DEBUG_INTERNAL_ALLOCATOR { 281 | debug::assert_padded(free as *mut u8, size); 282 | debug::mark_memory_uninit(free as *mut u8, size); 283 | } 284 | let new_slot = free as *mut FreeSlot; 285 | let mut next = self.next.load(); 286 | loop { 287 | (*new_slot).prev_free = next; 288 | match self 289 | .next 290 | .compare_exchange(next, Some(NonNull::new_unchecked(new_slot))) 291 | { 292 | Ok(_) => break, 293 | Err(actual_next) => { 294 | next = actual_next; 295 | } 296 | } 297 | } 298 | } 299 | #[inline] 300 | fn take_free(&self) -> Option> { 301 | loop { 302 | let next_free = match self.next.load() { 303 | Some(free) => free, 304 | None => return None, // Out of free space 305 | }; 306 | // Update free pointer 307 | unsafe { 308 | if self 309 | .next 310 | .compare_exchange(Some(next_free), next_free.as_ref().prev_free) 311 | .is_err() 312 | { 313 | continue; /* retry */ 314 | } 315 | return Some(next_free.cast()); 316 | } 317 | } 318 | } 319 | } 320 | 321 | pub struct SmallArena { 322 | pub(crate) element_size: usize, 323 | state: ArenaState, 324 | free: FreeList, 325 | } 326 | 327 | impl SmallArena { 328 | pub(crate) unsafe fn add_free(&self, obj: *mut UnknownHeader) { 329 | self.free.add_free(obj, self.element_size) 330 | } 331 | #[cold] // Initialization is the slow path 332 | fn with_words(num_words: usize) -> SmallArena { 333 | assert!(num_words >= MINIMUM_WORDS); 334 | let element_size = num_words * mem::size_of::(); 335 | assert!(INITIAL_SIZE >= element_size * 2); 336 | let chunks = vec![Chunk::alloc(INITIAL_SIZE)]; 337 | SmallArena { 338 | state: ArenaState::new(chunks), 339 | element_size, 340 | free: Default::default(), 341 | } 342 | } 343 | #[inline] 344 | pub(crate) fn alloc(&self) -> NonNull { 345 | // Check the free list 346 | if let Some(free) = self.free.take_free() { 347 | free.cast() 348 | } else if DEBUG_INTERNAL_ALLOCATOR { 349 | let mem = self 350 | .state 351 | .alloc(self.element_size + debug::PADDING_BYTES * 2) 352 | .as_ptr() as *mut u8; 353 | unsafe { 354 | let mem = mem.add(debug::PADDING_BYTES); 355 | debug::pad_memory_block(mem, self.element_size); 356 | debug::mark_memory_uninit(mem, self.element_size); 357 | NonNull::new_unchecked(mem).cast() 358 | } 359 | } else { 360 | self.state.alloc(self.element_size) 361 | } 362 | } 363 | } 364 | macro_rules! arena_match { 365 | ($arenas:expr, $target:ident, max = $max:expr; $($size:pat => $num_words:literal @ $idx:expr),*) => { 366 | Some(match $target { 367 | $($size => $arenas[$idx].get_or_init(|| { 368 | assert_eq!(SMALL_ARENA_SIZES[$idx], $num_words); 369 | SmallArena::with_words($num_words) 370 | }),)* 371 | _ => { 372 | assert!($target > $max); 373 | return None 374 | } 375 | }) 376 | }; 377 | } 378 | const SMALL_ARENA_SIZES: [usize; NUM_SMALL_ARENAS] = 379 | [2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32]; 380 | pub struct SmallArenaList { 381 | // NOTE: Internally boxed to avoid bloating main struct 382 | arenas: Box<[OnceCell; NUM_SMALL_ARENAS]>, 383 | } 384 | impl SmallArenaList { 385 | pub fn new() -> Self { 386 | // NOTE: Why does writing arrays have to be so difficult:? 387 | unsafe { 388 | let mut arenas: Box<[MaybeUninit>; NUM_SMALL_ARENAS]> = 389 | Box::new_uninit().assume_init(); 390 | for i in 0..NUM_SMALL_ARENAS { 391 | arenas[i].as_mut_ptr().write(OnceCell::new()); 392 | } 393 | SmallArenaList { 394 | // NOTE: This is done because I want to explicitly specify types 395 | arenas: mem::transmute::< 396 | Box<[MaybeUninit>; NUM_SMALL_ARENAS]>, 397 | Box<[OnceCell; NUM_SMALL_ARENAS]>, 398 | >(arenas), 399 | } 400 | } 401 | } 402 | #[inline] // This should hopefully be constant folded away (layout is const) 403 | pub fn find(&self, layout: Layout) -> Option<&SmallArena> { 404 | if !fits_small_object(layout) { 405 | return None; 406 | } 407 | // Divide round up 408 | let word_size = mem::size_of::(); 409 | let num_words = (layout.size() + (word_size - 1)) / word_size; 410 | self.find_raw(num_words) 411 | } 412 | #[inline] // We want this constant-folded away...... 413 | fn find_raw(&self, num_words: usize) -> Option<&SmallArena> { 414 | arena_match!( 415 | self.arenas, num_words, max = 32; 416 | 0..=2 => 2 @ 0, 417 | 3 => 3 @ 1, 418 | 4 => 4 @ 2, 419 | 5 => 5 @ 3, 420 | 6 => 6 @ 4, 421 | 7 => 7 @ 5, 422 | 8 => 8 @ 6, 423 | 9..=10 => 10 @ 7, 424 | 11..=12 => 12 @ 8, 425 | 13..=14 => 14 @ 9, 426 | 15..=16 => 16 @ 10, 427 | 17..=20 => 20 @ 11, 428 | 21..=24 => 24 @ 12, 429 | 25..=28 => 28 @ 13, 430 | 29..=32 => 32 @ 14 431 | ) 432 | } 433 | } 434 | --------------------------------------------------------------------------------