├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── docs ├── Allocation.md ├── Barriers.md └── RootingGuide.md ├── flamegraph.svg └── src ├── api.rs ├── heap.rs ├── lib.rs ├── main.rs ├── mem.rs ├── safepoint.rs ├── space.rs └── threads.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | /idea 4 | idea/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cgc" 3 | version = "0.4.0" 4 | authors = ["Adel prokurov "] 5 | edition = "2018" 6 | description = "Compacting garbage collector" 7 | repository = "https://github.com/playXE/cgc" 8 | readme = "README.md" 9 | keywords = ["gc","garbage-collector"] 10 | license = "MIT" 11 | 12 | [dependencies] 13 | parking_lot = "0.10" 14 | lazy_static = "1" 15 | time = "0.2" 16 | fxhash = "0.2" 17 | log = "0.4" 18 | simple_logger = "1" 19 | crossbeam = "0.7" 20 | smallvec = "1.4" 21 | [target.'cfg(target_family = "windows")'.dependencies] 22 | winapi = { version = "0.3", features = ["winuser","memoryapi","errhandlingapi","processthreadsapi","synchapi","sysinfoapi"] } 23 | kernel32-sys = "0.2" 24 | 25 | 26 | [target.'cfg(target_family = "unix")'.dependencies] 27 | libc = "0.2" 28 | 29 | [features] 30 | default = [] 31 | trace-gc = [] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Adel Prokurov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # concurrent-cgc 2 | Concurrent cgc implementation. This implementation is possible to use with multiple threads. 3 | 4 | ## Documentation 5 | Please look at `docs/` directory. 6 | -------------------------------------------------------------------------------- /docs/Allocation.md: -------------------------------------------------------------------------------- 1 | # Allocation 2 | Concurrent version of cgc does not allow creating your own instance of heap, instead there are global heap with 32kb per heap block and support for larget allocationg (allocation bigger than 8kb). There are `GlobalHeap::allocate` function but you should not use it, instead you need to use `mt_alloc` function from `cgc::threads`. 3 | 4 | ## mt_alloc(value,finalize) 5 | `mt_alloc` accepts two arguments: first one is value you want to place on heap and second one is boolean flag whether should GC invoke value finalizer and destructor or no. This boolean flag should be true for *all* structures that allocate on heap,otherwise memory leaks will happen. 6 | 7 | ## mt_root(handle) 8 | `mt_root` takes `Handle` and makes rooted value from it, this function is usefull if you want to put your value into rootset. -------------------------------------------------------------------------------- /docs/Barriers.md: -------------------------------------------------------------------------------- 1 | # Barriers in cgc 2 | Since cgc is concurrent and moving garbage collector it needs read and write barriers to work. This section covers how to use GC barriers and how they work. 3 | 4 | ## Read barriers 5 | Read barriers is implicit and programmer should not care about them. Read barrier is emitted when you try to load value and what read barrier does is returns proper pointer to heap value. 6 | 7 | Implementation of read barriers is pretty simple: 8 | ```rust 9 | fn read_barrier(src) { 10 | return forward(src) 11 | } 12 | ``` 13 | We assume that forwarding pointer of `src` points to `src` itself or into new address if GC is copied object and what read barrier does is reads forward pointer. 14 | 15 | ## Write barriers 16 | Write barriers is explicit and programmer should care about them otherwise this may lead to UB or segfault. 17 | Write barriers should be inserted before any store operation into heap value: 18 | ```rust 19 | let value = mt_alloc(vec![None],true); 20 | cgc::write_barrier(&value); 21 | value.get_mut()[0] = Some(42); 22 | ``` 23 | Write barrier helps GC to rescan object if other GC object is stored into other GC object. 24 | 25 | Implementation: 26 | ```rust 27 | fn write_barrier(src) { 28 | if copying_in_progress { 29 | if !is_grey(src) { 30 | worklist.push(src); 31 | } else { 32 | return; 33 | } 34 | } 35 | } 36 | ``` 37 | -------------------------------------------------------------------------------- /docs/RootingGuide.md: -------------------------------------------------------------------------------- 1 | # Rooting Guide 2 | 3 | This guide explains the basics of using `cgc` in your programs. 4 | Since `cgc` is a moving GC it's very important that it knows about each and every pointer to a GC thing in a system. 5 | 6 | ## What is a GC thing pointer? 7 | "GC thing" is the term used to refer to memory allocated and managed by cgc. 8 | So you can think that "GC Thing" is a thing that implements `Traceable` trait and rooted or stored in a rooted object. 9 | 10 | ## GC thing on the stack 11 | 12 | - `Rooted`: 13 | All GC thing pointers stored on stack (i.e local variables and function parameters) must use the `cgc::api::Rooted`. 14 | This is RAII structure returned from `gc.alloc` that gets destroyed at end of the scope. You could also use `Arc` or `Rc` with `Rooted`. 15 | 16 | Example: 17 | ```rust 18 | 19 | { /* scope start */ 20 | let foo: Rooted = mt_alloc(42); 21 | // do something with foo... 22 | } /* scope end */ 23 | HEAP.collect(); 24 | ``` 25 | As you can see we allocate `i32` on GC heap and then we can do somethign with it. When we done with `foo` it will reach scope end or you can just use `drop(foo)` but you actually shouldn't use `drop(foo)` if you already use GC. After scope end we also invoke `HEAP.collect()`, this function will trigger garbage collection cycle and will sweep `foo` ( "free" it) 26 | 27 | ## GC things on the heap 28 | - `Handle`: 29 | 30 | GC thing pointers on the heap must be wrapped in `Handle`. `Handle` **pointers must also continue to be traced in the normal way**, which is covered below. 31 | 32 | `Handle` doesn't require invoking `mt_alloc`, and can be obtained from `Rooted` using `Heap::from` or `Rooted::::to_heap` 33 | 34 | There are `Heap::get` and `Heap::get_mut` that user could use to get access to value. It's UB to access gc'ed value. 35 | 36 | To get `Rooted` again you can use `mt_root` which will put your handle to rootset of current mutator. 37 | # Tracing 38 | - 39 | All GC pointers stored on the heap must be traced or they will be freed. Almost always GC pointers is traced through rooted objects that located on the stack. 40 | -------------------------------------------------------------------------------- /flamegraph.svg: -------------------------------------------------------------------------------- 1 | Flame Graph Reset ZoomSearch cgc`cgc::collector::GlobalCollector::alloc::h86c52bbc2d816af5 (1 samples, 0.87%)cgc`_$LT$cgc..rooting..RootedInner$LT$T$GT$$u20$as$u20$cgc..rooting..RootedTrait$GT$::is_rooted::h6a32e89838dbc8c2 (1 samples, 0.87%)cgc`__rdl_alloc (1 samples, 0.87%)cgc`cgc::bump::BumpAllocator::bump_alloc::hbcf893041db80fbc (11 samples, 9.57%)cgc`cgc::bump..libsystem_malloc.dylib`default_zone_malloc (1 samples, 0.87%)libsystem_malloc.dylib`malloc (43 samples, 37.39%)libsystem_malloc.dylib`malloclibsystem_malloc.dylib`malloc_zone_malloc (41 samples, 35.65%)libsystem_malloc.dylib`malloc_zone_malloclibsystem_malloc.dylib`szone_malloc_should_clear (38 samples, 33.04%)libsystem_malloc.dylib`szone_malloc_should_clearlibsystem_malloc.dylib`tiny_malloc_should_clear (38 samples, 33.04%)libsystem_malloc.dylib`tiny_malloc_should_clearlibsystem_malloc.dylib`tiny_malloc_from_free_list (12 samples, 10.43%)libsystem_mallo..cgc`cgc::collector::GlobalCollector::alloc::h86c52bbc2d816af5 (91 samples, 79.13%)cgc`cgc::collector::GlobalCollector::alloc::h86c52bbc2d816af5libsystem_malloc.dylib`realloc (4 samples, 3.48%)lib..libsystem_malloc.dylib`malloc_zone_realloc (4 samples, 3.48%)lib..libsystem_malloc.dylib`szone_realloc (4 samples, 3.48%)lib..libsystem_platform.dylib`_platform_memmove$VARIANT$Haswell (4 samples, 3.48%)lib..cgc`_$LT$cgc..rooting..RootedInner$LT$T$GT$$u20$as$u20$cgc..rooting..RootedTrait$GT$::is_rooted::h6a32e89838dbc8c2 (3 samples, 2.61%)cg..cgc`cgc::collector::MarkCompact::relocate::h86f71d4ee2877b33 (1 samples, 0.87%)cgc`_$LT$cgc..rooting..RootedInner$LT$T$GT$$u20$as$u20$cgc..rooting..RootedTrait$GT$::is_rooted::h6a32e89838dbc8c2 (1 samples, 0.87%)cgc`main (108 samples, 93.91%)cgc`maincgc`std::rt::lang_start_internal::he40e6c5bb3d144c3 (108 samples, 93.91%)cgc`std::rt::lang_start_internal::he40e6c5bb3d144c3cgc`__rust_maybe_catch_panic (108 samples, 93.91%)cgc`__rust_maybe_catch_paniccgc`std::panicking::try::do_call::ha32f9b92275332df (108 samples, 93.91%)cgc`std::panicking::try::do_call::ha32f9b92275332dfcgc`std::rt::lang_start::_$u7b$$u7b$closure$u7d$$u7d$::h0ce5c51f5ae60b3b (108 samples, 93.91%)cgc`std::rt::lang_start::_$u7b$$u7b$closure$u7d$$u7d$::h0ce5c51f5ae60b3bcgc`cgc::main::hc4c06361cf57591d (107 samples, 93.04%)cgc`cgc::main::hc4c06361cf57591dcgc`cgc::collector::GlobalCollector::collect::h9f499dda670361a5 (14 samples, 12.17%)cgc`cgc::collector..libsystem_kernel.dylib`madvise (1 samples, 0.87%)all (115 samples, 100%)cgc`0x1 (115 samples, 100.00%)cgc`0x1libdyld.dylib`start (115 samples, 100.00%)libdyld.dylib`startlibsystem_kernel.dylib`__exit (7 samples, 6.09%)libsyste.. -------------------------------------------------------------------------------- /src/api.rs: -------------------------------------------------------------------------------- 1 | use crate::mem::Address; 2 | use smallvec::SmallVec; 3 | pub unsafe trait Trace: Finalizer { 4 | fn mark(&self); 5 | fn unmark(&self); 6 | fn references(&self) -> SmallVec<[*const dyn HeapTrait; 64]>; 7 | } 8 | 9 | #[derive(Default)] 10 | pub struct Tracer { 11 | stack: SmallVec<[*const dyn HeapTrait; 64]>, 12 | } 13 | impl Tracer { 14 | pub fn for_each(&mut self, mut f: impl FnMut(*const dyn HeapTrait)) { 15 | while let Some(item) = self.stack.pop() { 16 | f(item); 17 | } 18 | //self.stack.into_iter().for_each(|x| f(*x)); 19 | } 20 | 21 | pub fn trace(&mut self, item: *const dyn HeapTrait) { 22 | self.stack.push(item); 23 | } 24 | } 25 | 26 | pub trait Traceable 27 | where 28 | Self: Finalizer, 29 | { 30 | fn trace_with(&self, _: &mut Tracer) {} 31 | } 32 | 33 | unsafe impl Trace for T { 34 | fn mark(&self) { 35 | let mut tracer = Tracer::default(); 36 | self.trace_with(&mut tracer); 37 | tracer.for_each(|pointer| unsafe { (*pointer).mark() }); 38 | } 39 | fn unmark(&self) { 40 | let mut tracer = Tracer::default(); 41 | self.trace_with(&mut tracer); 42 | tracer.for_each(|pointer| unsafe { (*pointer).unmark() }); 43 | } 44 | 45 | fn references(&self) -> SmallVec<[*const dyn HeapTrait; 64]> { 46 | let mut tracer = Tracer::default(); 47 | self.trace_with(&mut tracer); 48 | tracer.stack 49 | } 50 | } 51 | 52 | pub unsafe trait HeapTrait { 53 | fn mark(&self); 54 | fn unmark(&self); 55 | 56 | fn get_fwd(&self) -> Address; 57 | fn set_fwd(&self, _: Address); 58 | fn copy_to(&self, addr: Address); 59 | fn addr(&self) -> Address; 60 | fn inner(&self) -> *mut crate::heap::HeapInner; 61 | 62 | fn is_marked(&self) -> bool; 63 | } 64 | 65 | pub trait Finalizer { 66 | fn finalize(&mut self) {} 67 | } 68 | 69 | macro_rules! simple { 70 | ($($t: ty)*) => { 71 | $( 72 | impl Traceable for $t {} 73 | impl Finalizer for $t { 74 | fn finalize(&mut self) { 75 | 76 | } 77 | } 78 | )* 79 | }; 80 | } 81 | 82 | simple!( 83 | i8 84 | i16 85 | i32 86 | i64 87 | i128 88 | u8 89 | u16 90 | u32 91 | u64 92 | u128 93 | f64 94 | f32 95 | bool 96 | String 97 | isize 98 | usize 99 | std::fs::File 100 | std::fs::FileType 101 | std::fs::Metadata 102 | std::fs::OpenOptions 103 | std::io::Stdin 104 | std::io::Stdout 105 | std::io::Stderr 106 | std::io::Error 107 | std::net::TcpStream 108 | std::net::TcpListener 109 | std::net::UdpSocket 110 | std::net::Ipv4Addr 111 | std::net::Ipv6Addr 112 | std::net::SocketAddrV4 113 | std::net::SocketAddrV6 114 | std::path::Path 115 | std::path::PathBuf 116 | std::process::Command 117 | std::process::Child 118 | std::process::ChildStdout 119 | std::process::ChildStdin 120 | std::process::ChildStderr 121 | std::process::Output 122 | std::process::ExitStatus 123 | std::process::Stdio 124 | std::sync::Barrier 125 | std::sync::Condvar 126 | std::sync::Once 127 | std::ffi::CStr 128 | std::ffi::CString 129 | &'static str 130 | ); 131 | 132 | impl Traceable for Option { 133 | fn trace_with(&self, tracer: &mut Tracer) { 134 | if let Some(item) = self { 135 | item.trace_with(tracer); 136 | } 137 | } 138 | } 139 | impl Finalizer for Option { 140 | fn finalize(&mut self) { 141 | if let Some(item) = self { 142 | item.finalize(); 143 | } 144 | } 145 | } 146 | 147 | impl Traceable for Vec { 148 | fn trace_with(&self, tracer: &mut Tracer) { 149 | for item in self.iter() { 150 | item.trace_with(tracer); 151 | } 152 | } 153 | } 154 | 155 | impl Traceable for T { 156 | fn trace_with(&self, tracer: &mut Tracer) { 157 | tracer.trace(self as *const dyn HeapTrait); 158 | } 159 | } 160 | 161 | impl Finalizer for Vec { 162 | fn finalize(&mut self) { 163 | for item in self.iter_mut() { 164 | item.finalize(); 165 | } 166 | } 167 | } 168 | 169 | pub trait RootedTrait 170 | where 171 | Self: HeapTrait, 172 | { 173 | fn is_rooted(&self) -> bool; 174 | fn references(&self) -> SmallVec<[*const dyn HeapTrait; 64]>; 175 | } 176 | 177 | pub struct Rooted { 178 | pub(crate) inner: *mut RootedInner, 179 | } 180 | 181 | impl Rooted { 182 | fn inner(&self) -> &mut RootedInner { 183 | unsafe { &mut *self.inner } 184 | } 185 | pub fn to_heap(self) -> Handle { 186 | Handle::from(self) 187 | } 188 | pub fn get(&self) -> &T { 189 | unsafe { 190 | &(&*crate::heap::read_barrier_impl(self.inner().inner as *const _ as *mut _)).value 191 | } 192 | //unsafe { &(&*self.inner().inner).value } 193 | } 194 | pub fn get_mut(&self) -> &mut T { 195 | unsafe { 196 | &mut (&mut *crate::heap::read_barrier_impl(self.inner().inner as *const _ as *mut _)) 197 | .value 198 | } 199 | } 200 | } 201 | 202 | pub(crate) struct RootedInner { 203 | pub(crate) rooted: bool, 204 | pub(crate) inner: *mut crate::heap::HeapInner, 205 | } 206 | impl Drop for Rooted { 207 | fn drop(&mut self) { 208 | unsafe { 209 | debug_assert!(!self.inner.is_null()); 210 | let inner = &mut *self.inner; 211 | inner.rooted = false; 212 | } 213 | } 214 | } 215 | 216 | unsafe impl HeapTrait for RootedInner { 217 | fn mark(&self) { 218 | unsafe { 219 | (&mut *self.inner).mark(true); 220 | } 221 | } 222 | 223 | fn unmark(&self) { 224 | unsafe { 225 | (&mut *self.inner).mark(false); 226 | } 227 | } 228 | fn get_fwd(&self) -> Address { 229 | unsafe { (&*self.inner).fwdptr() } 230 | } 231 | 232 | fn set_fwd(&self, fwd: Address) { 233 | unsafe { 234 | (&mut *self.inner).set_fwdptr(fwd); 235 | } 236 | } 237 | 238 | fn copy_to(&self, addr: Address) { 239 | debug_assert!(addr.is_non_null() && !self.inner.is_null()); 240 | unsafe { 241 | std::ptr::copy( 242 | self.inner as *const u8, 243 | addr.to_mut_ptr(), 244 | std::mem::size_of_val(&*self.inner), 245 | ) 246 | } 247 | } 248 | 249 | fn addr(&self) -> Address { 250 | Address::from_ptr(self.inner as *const u8) 251 | } 252 | fn is_marked(&self) -> bool { 253 | unsafe { (&*self.inner).is_marked() } 254 | } 255 | fn inner(&self) -> *mut crate::heap::HeapInner { 256 | self.inner 257 | } 258 | } 259 | 260 | impl RootedTrait for RootedInner { 261 | fn is_rooted(&self) -> bool { 262 | self.rooted 263 | } 264 | fn references(&self) -> SmallVec<[*const dyn HeapTrait; 64]> { 265 | unsafe { (&*self.inner).value.references() } 266 | } 267 | } 268 | 269 | /// Wraps GC heap pointer. 270 | /// 271 | /// GC thing pointers on the heap must be wrapped in a `Handle` 272 | pub struct Handle { 273 | pub(crate) inner: *mut crate::heap::HeapInner, 274 | } 275 | impl From> for Handle { 276 | fn from(x: Rooted) -> Self { 277 | unsafe { 278 | Self { 279 | inner: (*x.inner).inner, 280 | } 281 | } 282 | } 283 | } 284 | 285 | impl From<&Rooted> for Handle { 286 | fn from(x: &Rooted) -> Self { 287 | unsafe { 288 | Self { 289 | inner: (*x.inner).inner, 290 | } 291 | } 292 | } 293 | } 294 | 295 | impl Handle { 296 | pub fn get(&self) -> &T { 297 | unsafe { 298 | debug_assert!(!self.inner.is_null()); 299 | let src = crate::heap::read_barrier_impl(self.inner as *const _ as *mut _); 300 | &(&*src).value 301 | } 302 | } 303 | 304 | /// Returns mutable reference to rooted value 305 | /// 306 | /// # Safety 307 | /// Rust semantics doesn't allow two mutable references at the same time and this function is safe as long as you have only one mutable reference. 308 | /// 309 | /// If you want to be 100% sure that you don't have two or more mutable references at the same time please use `Heap>` 310 | /// 311 | /// 312 | pub fn get_mut(&mut self) -> &mut T { 313 | unsafe { 314 | debug_assert!(!self.inner.is_null()); 315 | let src = crate::heap::read_barrier_impl(self.inner as *const _ as *mut _); 316 | &mut (&mut *src).value 317 | } 318 | } 319 | } 320 | 321 | unsafe impl HeapTrait for Handle { 322 | fn copy_to(&self, addr: Address) { 323 | debug_assert!(addr.is_non_null() && !self.inner.is_null()); 324 | unsafe { 325 | std::ptr::copy( 326 | self.inner as *const u8, 327 | addr.to_mut_ptr(), 328 | std::mem::size_of_val(&*self.inner), 329 | ) 330 | } 331 | } 332 | fn mark(&self) { 333 | unsafe { 334 | (&mut *self.inner).mark(true); 335 | } 336 | } 337 | fn unmark(&self) { 338 | unsafe { 339 | (&mut *self.inner).mark(false); 340 | } 341 | } 342 | fn get_fwd(&self) -> Address { 343 | unsafe { (&*self.inner).fwdptr() } 344 | } 345 | 346 | fn set_fwd(&self, fwd: Address) { 347 | unsafe { 348 | (&mut *self.inner).set_fwdptr(fwd); 349 | } 350 | } 351 | 352 | fn addr(&self) -> Address { 353 | Address::from_ptr(self.inner as *const u8) 354 | } 355 | fn is_marked(&self) -> bool { 356 | unsafe { (&*self.inner).is_marked() } 357 | } 358 | fn inner(&self) -> *mut crate::heap::HeapInner { 359 | self.inner 360 | } 361 | } 362 | impl Copy for Handle {} 363 | impl Clone for Handle { 364 | fn clone(&self) -> Self { 365 | *self 366 | } 367 | } 368 | 369 | use std::cmp; 370 | 371 | impl PartialOrd for Handle { 372 | fn partial_cmp(&self, other: &Self) -> Option { 373 | self.get().partial_cmp(other.get()) 374 | } 375 | } 376 | 377 | impl Ord for Handle { 378 | fn cmp(&self, other: &Self) -> cmp::Ordering { 379 | self.get().cmp(other.get()) 380 | } 381 | } 382 | 383 | impl PartialEq for Handle { 384 | fn eq(&self, other: &Self) -> bool { 385 | self.get().eq(other.get()) 386 | } 387 | } 388 | 389 | impl Eq for Handle {} 390 | 391 | use std::hash::{Hash, Hasher}; 392 | 393 | impl Hash for Handle { 394 | fn hash(&self, state: &mut H) { 395 | self.get().hash(state); 396 | } 397 | } 398 | 399 | use std::fmt; 400 | 401 | impl fmt::Display for Handle { 402 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 403 | self.get().fmt(f) 404 | } 405 | } 406 | 407 | impl fmt::Debug for Handle { 408 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 409 | self.get().fmt(f) 410 | } 411 | } 412 | 413 | impl PartialOrd for Rooted { 414 | fn partial_cmp(&self, other: &Self) -> Option { 415 | self.get().partial_cmp(other.get()) 416 | } 417 | } 418 | 419 | impl Ord for Rooted { 420 | fn cmp(&self, other: &Self) -> cmp::Ordering { 421 | self.get().cmp(other.get()) 422 | } 423 | } 424 | 425 | impl PartialEq for Rooted { 426 | fn eq(&self, other: &Self) -> bool { 427 | self.get().eq(other.get()) 428 | } 429 | } 430 | 431 | impl Eq for Rooted {} 432 | 433 | impl Hash for Rooted { 434 | fn hash(&self, state: &mut H) { 435 | self.get().hash(state); 436 | } 437 | } 438 | 439 | impl fmt::Display for Rooted { 440 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 441 | self.get().fmt(f) 442 | } 443 | } 444 | 445 | impl fmt::Debug for Rooted { 446 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 447 | write!(f, "{:?}", self.get()) 448 | } 449 | } 450 | 451 | impl Finalizer for Handle { 452 | fn finalize(&mut self) {} 453 | } 454 | 455 | impl Finalizer for Rooted { 456 | fn finalize(&mut self) { 457 | self.get_mut().finalize(); 458 | } 459 | } 460 | 461 | use std::ops::{Deref, DerefMut}; 462 | 463 | impl Deref for Rooted { 464 | type Target = T; 465 | fn deref(&self) -> &T { 466 | self.get() 467 | } 468 | } 469 | 470 | impl DerefMut for Rooted { 471 | fn deref_mut(&mut self) -> &mut T { 472 | self.get_mut() 473 | } 474 | } 475 | 476 | impl Deref for Handle { 477 | type Target = T; 478 | fn deref(&self) -> &T { 479 | self.get() 480 | } 481 | } 482 | 483 | impl DerefMut for Handle { 484 | fn deref_mut(&mut self) -> &mut T { 485 | self.get_mut() 486 | } 487 | } 488 | -------------------------------------------------------------------------------- /src/heap.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use crate::mem::*; 3 | use crate::space::*; 4 | use crossbeam::queue::SegQueue; 5 | use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering}; 6 | pub const GC_WHITE: u8 = 0; 7 | pub const GC_GREY: u8 = 1; 8 | pub const GC_BLACK: u8 = 2; 9 | 10 | #[cfg(not(feature = "trace-gc"))] 11 | const TRACE_GC: bool = false; 12 | 13 | #[cfg(feature = "trace-gc")] 14 | const TRACE_GC: bool = true; 15 | 16 | pub struct HeapInner { 17 | /// Foward address, initially points to `self` for read barriers. 18 | pub(crate) forward: AtomicUsize, 19 | pub(crate) color: AtomicU8, 20 | pub(crate) value: T, 21 | } 22 | impl HeapInner { 23 | pub fn mark(&self, _x: bool) {} 24 | pub fn fwdptr(&self) -> Address { 25 | Address::from_ptr(self.forward.load(Ordering::Acquire) as *const u8) 26 | } 27 | pub fn set_fwdptr(&self, fwdptr: Address) { 28 | self.forward.store(fwdptr.to_usize(), Ordering::Release); 29 | } 30 | pub fn is_marked(&self) -> bool { 31 | false 32 | } 33 | } 34 | 35 | pub(crate) unsafe fn read_barrier_impl(src_: *mut HeapInner) -> *mut HeapInner { 36 | let src = &*src_; 37 | 38 | // src.forward points to fromspace or to tospace. 39 | let r = src.forward.load(Ordering::Acquire) as *mut _; 40 | log::trace!("Read barrier: From {:p} to {:p}", src_, r); 41 | r 42 | } 43 | 44 | pub(crate) unsafe fn write_barrier_impl(src: *mut HeapInner) { 45 | let cell = &mut *src; 46 | if HEAP.state.load(Ordering::Acquire) != GC_COPYING { 47 | return; 48 | } 49 | if cell.color.load(Ordering::Acquire) == GC_GREY { 50 | // Object is in worklist,return. 51 | return; 52 | } 53 | 54 | // Push object to worklist so GC will scan object for new objects written to our object. 55 | cell.color.store(GC_GREY, Ordering::Release); 56 | HEAP.worklist.push(GcValue { value: src }); 57 | } 58 | 59 | pub struct GcValue { 60 | value: *mut HeapInner, 61 | } 62 | 63 | impl GcValue { 64 | fn value(&self) -> &mut HeapInner { 65 | unsafe { &mut *self.value } 66 | } 67 | fn relocate(&self, addr: Address) { 68 | /*if self.slot.is_non_null() { 69 | unsafe { 70 | let slot = self.slot.to_mut_ptr::<*mut *mut u8>(); 71 | *slot = addr.to_mut_ptr(); 72 | } 73 | }*/ 74 | if !self.value().color.load(Ordering::Acquire) != HEAP.black.load(Ordering::Relaxed) { 75 | self.value().set_fwdptr(addr); 76 | self.value() 77 | .color 78 | .store(HEAP.black.load(Ordering::Relaxed), Ordering::Release); 79 | } 80 | } 81 | } 82 | 83 | unsafe impl Send for GcValue {} 84 | 85 | pub const GC_NONE: u8 = 0; 86 | pub const GC_COPYING: u8 = 2; 87 | pub const GC_INIT: u8 = 1; 88 | pub const GC_TERMINATE: u8 = 3; 89 | 90 | pub struct GlobalHeap { 91 | worklist: SegQueue, 92 | state: AtomicU8, 93 | fence_mutator: AtomicBool, 94 | needs_gc: AtomicBool, 95 | weak_handles: parking_lot::Mutex>>, 96 | from_space: parking_lot::Mutex, 97 | to_space: parking_lot::Mutex, 98 | white: AtomicU8, 99 | black: AtomicU8, 100 | pub(crate) threads: crate::threads::Threads, 101 | } 102 | 103 | unsafe impl Send for GlobalHeap {} 104 | unsafe impl Sync for GlobalHeap {} 105 | 106 | impl GlobalHeap { 107 | pub fn collect(&self) { 108 | //self.state.store(GC_INIT, Ordering::Release); 109 | 110 | crate::safepoint::stop_the_world(|mutators| { 111 | log::trace!("Start GC"); 112 | 113 | for thread in mutators.iter() { 114 | thread.rootset.borrow_mut().retain(|root| unsafe { 115 | if (&**root).is_rooted() { 116 | let value = GcValue { 117 | value: (&**root).inner(), 118 | }; 119 | HEAP.worklist.push(value); 120 | true 121 | } else { 122 | let _ = Box::from_raw(*root); 123 | false 124 | } 125 | }); 126 | } 127 | HEAP.state.store(GC_COPYING, Ordering::Relaxed); 128 | }); 129 | log::trace!("Resume threads"); 130 | } 131 | pub fn new() -> Self { 132 | Self { 133 | white: AtomicU8::new(GC_WHITE), 134 | black: AtomicU8::new(GC_BLACK), 135 | needs_gc: AtomicBool::new(false), 136 | to_space: parking_lot::Mutex::new(Space::new(32 * 1024)), 137 | from_space: parking_lot::Mutex::new(Space::new(32 * 1024)), 138 | worklist: SegQueue::new(), 139 | state: AtomicU8::new(0), 140 | fence_mutator: AtomicBool::new(false), 141 | weak_handles: parking_lot::Mutex::new(vec![]), 142 | threads: crate::threads::Threads::new(), 143 | } 144 | } 145 | fn flip() { 146 | let mut x = HEAP.to_space.lock(); 147 | 148 | let mut y = HEAP.from_space.lock(); 149 | y.reset_pages(); 150 | std::mem::swap(&mut *x, &mut *y); 151 | } 152 | fn visit(value: &mut HeapInner) { 153 | value.value.references().iter().for_each(|item| unsafe { 154 | HEAP.worklist.push(GcValue { 155 | value: (&**item).inner(), 156 | }) 157 | }); 158 | } 159 | fn flip_colours() { 160 | let white = HEAP.white.load(Ordering::Relaxed); 161 | let black = HEAP.black.load(Ordering::Relaxed); 162 | HEAP.white.store(black, Ordering::Relaxed); 163 | HEAP.black.store(white, Ordering::Relaxed); 164 | } 165 | fn collect_impl() { 166 | // copy objects 167 | Self::process_grey(); 168 | // disable write barriers 169 | HEAP.state.store(GC_NONE, Ordering::Release); 170 | // sweep objects that needs sweeping. 171 | let mut handles = HEAP.weak_handles.lock(); 172 | for i in (0..handles.len()).rev() { 173 | if unsafe { 174 | (&*handles[i]).color.load(Ordering::Relaxed) == HEAP.white.load(Ordering::Relaxed) 175 | } { 176 | let item = handles.swap_remove(i); 177 | unsafe { 178 | (&mut *item).value.finalize(); 179 | std::ptr::drop_in_place(item); 180 | } 181 | } else { 182 | unsafe { 183 | (&*handles[i]) 184 | .color 185 | .store(HEAP.white.load(Ordering::Relaxed), Ordering::Relaxed); 186 | } 187 | } 188 | } 189 | crate::safepoint::stop_the_world(|_| { 190 | log::trace!("GC Worker: flip"); 191 | Self::flip(); 192 | Self::flip_colours(); 193 | }); 194 | /*HEAP.weak_handles.lock().retain(|item| { 195 | if unsafe { (&**item).color.load(Ordering::Relaxed) == GC_WHITE } { 196 | unsafe { 197 | (&mut **item).value.finalize(); 198 | std::ptr::drop_in_place(*item); 199 | } 200 | false 201 | } else { 202 | true 203 | } 204 | })*/ 205 | } 206 | 207 | pub fn allocate(&self, value: T, finalize: bool) -> *mut HeapInner { 208 | let mut space = self.from_space.lock(); 209 | let mut gc = false; 210 | let memory = space.allocate(std::mem::size_of::>(), &mut gc); 211 | log::trace!("Allocate {:p}", memory.to_ptr::()); 212 | if self.state.load(Ordering::Relaxed) != GC_COPYING { 213 | self.needs_gc.store(gc, Ordering::Relaxed); 214 | } 215 | unsafe { 216 | let raw = memory.to_mut_ptr::>(); 217 | raw.write(HeapInner { 218 | forward: AtomicUsize::new(raw as usize), 219 | color: AtomicU8::new(self.white.load(Ordering::Relaxed)), 220 | value, 221 | }); 222 | if finalize { 223 | self.weak_handles.lock().push(raw); 224 | } 225 | 226 | raw 227 | } 228 | } 229 | 230 | fn process_grey() { 231 | while HEAP.worklist.is_empty() == false { 232 | let value: GcValue = loop { 233 | match HEAP.worklist.pop() { 234 | Ok(val) => break val, 235 | Err(x) => panic!("{}", x), 236 | } 237 | }; 238 | 239 | if value.value().color.load(Ordering::Relaxed) == GC_WHITE { 240 | let hvalue = HEAP 241 | .to_space 242 | .lock() 243 | .allocate(std::mem::size_of_val(value.value()), &mut false); 244 | value 245 | .value() 246 | .forward 247 | .store(hvalue.to_usize(), Ordering::Release); 248 | unsafe { 249 | std::ptr::copy_nonoverlapping( 250 | value.value() as *mut _ as *const u8, 251 | hvalue.to_mut_ptr::(), 252 | std::mem::size_of_val(value.value()), 253 | ); 254 | } 255 | log::trace!( 256 | "GC Worker: Copy {:p}->{:p}", 257 | value.value(), 258 | hvalue.to_mut_ptr::() 259 | ); 260 | Self::visit(value.value()); 261 | value.relocate(hvalue); 262 | } else { 263 | //value.relocate(value.value().fwdptr()); 264 | } 265 | } 266 | } 267 | } 268 | 269 | fn collect_routine() { 270 | loop { 271 | let mut attempt = 0; 272 | while HEAP.state.load(Ordering::Relaxed) == GC_NONE { 273 | if attempt < 512 { 274 | std::thread::yield_now(); 275 | } else { 276 | std::thread::sleep(std::time::Duration::from_nanos(10)); 277 | } 278 | attempt += 1; 279 | } 280 | if HEAP.state.load(Ordering::Relaxed) == GC_TERMINATE { 281 | return; 282 | } 283 | 284 | GlobalHeap::collect_impl(); 285 | } 286 | } 287 | 288 | lazy_static::lazy_static! { 289 | pub static ref HEAP: GlobalHeap = { 290 | std::thread::spawn(|| collect_routine()); 291 | GlobalHeap::new() 292 | }; 293 | } 294 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod heap; 3 | pub mod mem; 4 | pub mod safepoint; 5 | pub mod space; 6 | pub mod threads; 7 | 8 | /// Write barrier *must* be executed before store to some heap object happens. 9 | /// 10 | /// 11 | /// ## Where and when to use? 12 | /// You should place write barrier before store and write barrier is needed only when you store other GC value into GC value. 13 | pub fn write_barrier(src: &T) { 14 | unsafe { 15 | heap::write_barrier_impl(src.inner()); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate cgc; 2 | use cgc::heap::*; 3 | use cgc::threads::*; 4 | 5 | fn main() { 6 | simple_logger::init().unwrap(); 7 | attach_current_thread(); 8 | { 9 | let x = mt_alloc(42, false); 10 | let y = mt_alloc(3, false); 11 | println!("{}", *x + *y); 12 | } 13 | HEAP.collect(); 14 | std::thread::sleep(std::time::Duration::from_nanos(200000)); 15 | } 16 | -------------------------------------------------------------------------------- /src/mem.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::*; 2 | use std::fmt; 3 | static mut PAGE_SIZE: usize = 0; 4 | static mut PAGE_SIZE_BITS: usize = 0; 5 | 6 | pub fn page_size() -> usize { 7 | let result = unsafe { PAGE_SIZE }; 8 | 9 | if result != 0 { 10 | return result; 11 | } 12 | 13 | init_page_size(); 14 | 15 | unsafe { PAGE_SIZE } 16 | } 17 | 18 | pub fn page_size_bits() -> usize { 19 | let result = unsafe { PAGE_SIZE_BITS }; 20 | 21 | if result != 0 { 22 | return result; 23 | } 24 | 25 | init_page_size(); 26 | 27 | unsafe { PAGE_SIZE_BITS } 28 | } 29 | 30 | fn init_page_size() { 31 | unsafe { 32 | PAGE_SIZE = determine_page_size(); 33 | assert!((PAGE_SIZE & (PAGE_SIZE - 1)) == 0); 34 | 35 | PAGE_SIZE_BITS = log2(PAGE_SIZE); 36 | } 37 | } 38 | 39 | pub fn map_gc_mem() -> Address { 40 | commit(memory_limit(), false) 41 | } 42 | 43 | #[cfg(target_family = "unix")] 44 | pub fn memory_limit() -> usize { 45 | unsafe { 46 | use libc::*; 47 | sysconf(_SC_PHYS_PAGES) as usize * sysconf(_SC_PAGESIZE) as usize 48 | } 49 | } 50 | 51 | #[cfg(target_family = "windows")] 52 | pub(crate) fn memory_limit() -> usize { 53 | unimplemented!() 54 | } 55 | 56 | #[cfg(target_family = "unix")] 57 | fn determine_page_size() -> usize { 58 | let val = unsafe { libc::sysconf(libc::_SC_PAGESIZE) }; 59 | 60 | if val <= 0 { 61 | panic!("could not determine page size."); 62 | } 63 | 64 | val as usize 65 | } 66 | 67 | #[cfg(target_family = "windows")] 68 | fn determine_page_size() -> usize { 69 | use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO}; 70 | 71 | unsafe { 72 | let mut system_info: SYSTEM_INFO = std::mem::zeroed(); 73 | GetSystemInfo(&mut system_info as LPSYSTEM_INFO); 74 | 75 | system_info.dwPageSize as usize 76 | } 77 | } 78 | 79 | /// determine log_2 of given value 80 | fn log2(mut val: usize) -> usize { 81 | let mut log = 0; 82 | assert!(val <= u32::max_value() as usize); 83 | 84 | if (val & 0xFFFF0000) != 0 { 85 | val >>= 16; 86 | log += 16; 87 | } 88 | if val >= 256 { 89 | val >>= 8; 90 | log += 8; 91 | } 92 | if val >= 16 { 93 | val >>= 4; 94 | log += 4; 95 | } 96 | if val >= 4 { 97 | val >>= 2; 98 | log += 2; 99 | } 100 | 101 | log + (val >> 1) 102 | } 103 | 104 | #[test] 105 | fn test_log2() { 106 | for i in 0..32 { 107 | assert_eq!(i, log2(1 << i)); 108 | } 109 | } 110 | use std::i32; 111 | use std::mem::size_of; 112 | /// return pointer width: either 4 or 8 113 | /// (although only 64bit architectures are supported right now) 114 | #[inline(always)] 115 | pub fn ptr_width() -> i32 { 116 | size_of::<*const u8>() as i32 117 | } 118 | 119 | #[inline(always)] 120 | pub fn ptr_width_usize() -> usize { 121 | size_of::<*const u8>() as usize 122 | } 123 | 124 | /// returns true if given value is a multiple of a page size. 125 | pub fn is_page_aligned(val: usize) -> bool { 126 | let align = page_size_bits(); 127 | 128 | // we can use shifts here since we know that 129 | // page size is power of 2 130 | val == ((val >> align) << align) 131 | } 132 | 133 | #[test] 134 | fn test_is_page_aligned() { 135 | let p = page_size(); 136 | 137 | assert_eq!(false, is_page_aligned(1)); 138 | assert_eq!(false, is_page_aligned(2)); 139 | assert_eq!(false, is_page_aligned(64)); 140 | assert_eq!(true, is_page_aligned(p)); 141 | assert_eq!(true, is_page_aligned(2 * p)); 142 | assert_eq!(true, is_page_aligned(3 * p)); 143 | } 144 | 145 | /// round the given value up to the nearest multiple of a page 146 | pub fn page_align(val: usize) -> usize { 147 | let align = page_size_bits(); 148 | 149 | // we know that page size is power of 2, hence 150 | // we can use shifts instead of expensive division 151 | ((val + (1 << align) - 1) >> align) << align 152 | } 153 | 154 | #[test] 155 | fn test_page_align() { 156 | let p = page_size(); 157 | 158 | assert_eq!(p, page_align(1)); 159 | assert_eq!(p, page_align(p - 1)); 160 | assert_eq!(p, page_align(p)); 161 | assert_eq!(2 * p, page_align(p + 1)); 162 | } 163 | 164 | /// rounds the given value `val` up to the nearest multiple 165 | /// of `align` 166 | pub fn align(value: u32, align: u32) -> u32 { 167 | if align == 0 { 168 | return value; 169 | } 170 | 171 | ((value + align - 1) / align) * align 172 | } 173 | 174 | /// rounds the given value `val` up to the nearest multiple 175 | /// of `align` 176 | pub fn align_i32(value: i32, align: i32) -> i32 { 177 | if align == 0 { 178 | return value; 179 | } 180 | 181 | ((value + align - 1) / align) * align 182 | } 183 | 184 | /// rounds the given value `val` up to the nearest multiple 185 | /// of `align`. 186 | pub fn align_usize(value: usize, align: usize) -> usize { 187 | if align == 0 { 188 | return value; 189 | } 190 | 191 | ((value + align - 1) / align) * align 192 | } 193 | 194 | /// returns 'true' if th given `value` is already aligned 195 | /// to `align`. 196 | pub fn is_aligned(value: usize, align: usize) -> bool { 197 | align_usize(value, align) == value 198 | } 199 | 200 | /// returns true if value fits into u8 (unsigned 8bits). 201 | pub fn fits_u8(value: i64) -> bool { 202 | 0 <= value && value <= 255 203 | } 204 | 205 | /// returns true if value fits into i32 (signed 32bits). 206 | pub fn fits_i32(value: i64) -> bool { 207 | i32::MIN as i64 <= value && value <= i32::MAX as i64 208 | } 209 | use super::*; 210 | use std::ptr; 211 | #[cfg(test)] 212 | mod tests { 213 | use super::*; 214 | 215 | #[test] 216 | fn test_fits_u8() { 217 | assert_eq!(true, fits_u8(0)); 218 | assert_eq!(true, fits_u8(255)); 219 | assert_eq!(false, fits_u8(256)); 220 | assert_eq!(false, fits_u8(-1)); 221 | } 222 | 223 | #[test] 224 | fn test_fits_i32() { 225 | assert_eq!(true, fits_i32(0)); 226 | assert_eq!(true, fits_i32(i32::MAX as i64)); 227 | assert_eq!(true, fits_i32(i32::MIN as i64)); 228 | assert_eq!(false, fits_i32(i32::MAX as i64 + 1)); 229 | assert_eq!(false, fits_i32(i32::MIN as i64 - 1)); 230 | } 231 | } 232 | 233 | #[cfg(target_family = "unix")] 234 | pub fn reserve(size: usize) -> Address { 235 | debug_assert!(mem::is_page_aligned(size)); 236 | 237 | let ptr = unsafe { 238 | libc::mmap( 239 | ptr::null_mut(), 240 | size, 241 | libc::PROT_NONE, 242 | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_NORESERVE, 243 | -1, 244 | 0, 245 | ) as *mut libc::c_void 246 | }; 247 | 248 | if ptr == libc::MAP_FAILED { 249 | panic!("reserving memory with mmap() failed"); 250 | } 251 | 252 | Address::from_ptr(ptr) 253 | } 254 | 255 | #[cfg(target_family = "windows")] 256 | pub fn reserve(size: usize) -> Address { 257 | debug_assert!(mem::is_page_aligned(size)); 258 | 259 | use kernel32::VirtualAlloc; 260 | use winapi::um::winnt::{MEM_RESERVE, PAGE_NOACCESS}; 261 | 262 | let ptr = unsafe { VirtualAlloc(ptr::null_mut(), size as u64, MEM_RESERVE, PAGE_NOACCESS) }; 263 | 264 | if ptr.is_null() { 265 | panic!("VirtualAlloc failed"); 266 | } 267 | 268 | Address::from_ptr(ptr) 269 | } 270 | 271 | pub fn reserve_align(size: usize, align: usize) -> Address { 272 | debug_assert!(mem::is_page_aligned(size)); 273 | debug_assert!(mem::is_page_aligned(align)); 274 | 275 | let align_minus_page = align - page_size(); 276 | 277 | let unaligned = reserve(size + align_minus_page); 278 | let aligned: Address = mem::align_usize(unaligned.to_usize(), align).into(); 279 | 280 | let gap_start = aligned.offset_from(unaligned); 281 | let gap_end = align_minus_page - gap_start; 282 | 283 | if gap_start > 0 { 284 | uncommit(unaligned, gap_start); 285 | } 286 | 287 | if gap_end > 0 { 288 | uncommit(aligned.offset(size), gap_end); 289 | } 290 | 291 | aligned 292 | } 293 | 294 | #[cfg(target_family = "unix")] 295 | pub fn commit(size: usize, executable: bool) -> Address { 296 | debug_assert!(mem::is_page_aligned(size)); 297 | 298 | let mut prot = libc::PROT_READ | libc::PROT_WRITE; 299 | 300 | if executable { 301 | prot |= libc::PROT_EXEC; 302 | } 303 | 304 | let ptr = unsafe { 305 | libc::mmap( 306 | ptr::null_mut(), 307 | size, 308 | prot, 309 | libc::MAP_PRIVATE | libc::MAP_ANON, 310 | -1, 311 | 0, 312 | ) 313 | }; 314 | 315 | if ptr == libc::MAP_FAILED { 316 | panic!("committing memory with mmap() failed"); 317 | } 318 | 319 | Address::from_ptr(ptr) 320 | } 321 | 322 | #[cfg(target_family = "windows")] 323 | pub fn commit(size: usize, executable: bool) -> Address { 324 | debug_assert!(mem::is_page_aligned(size)); 325 | 326 | use kernel32::VirtualAlloc; 327 | use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_EXECUTE_READWRITE, PAGE_READWRITE}; 328 | 329 | let prot = if executable { 330 | PAGE_EXECUTE_READWRITE 331 | } else { 332 | PAGE_READWRITE 333 | }; 334 | 335 | let ptr = unsafe { VirtualAlloc(ptr::null_mut(), size as u64, MEM_COMMIT | MEM_RESERVE, prot) }; 336 | 337 | if ptr.is_null() { 338 | panic!("VirtualAlloc failed"); 339 | } 340 | 341 | Address::from_ptr(ptr) 342 | } 343 | 344 | #[cfg(target_family = "unix")] 345 | pub fn commit_at(ptr: Address, size: usize, executable: bool) { 346 | debug_assert!(ptr.is_page_aligned()); 347 | debug_assert!(mem::is_page_aligned(size)); 348 | 349 | let mut prot = libc::PROT_READ | libc::PROT_WRITE; 350 | 351 | if executable { 352 | prot |= libc::PROT_EXEC; 353 | } 354 | 355 | let val = unsafe { 356 | libc::mmap( 357 | ptr.to_mut_ptr(), 358 | size, 359 | prot, 360 | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED, 361 | -1, 362 | 0, 363 | ) 364 | }; 365 | 366 | if val == libc::MAP_FAILED { 367 | panic!("committing memory with mmap() failed"); 368 | } 369 | } 370 | 371 | #[cfg(target_family = "windows")] 372 | pub fn commit_at(ptr: Address, size: usize, executable: bool) { 373 | debug_assert!(ptr.is_page_aligned()); 374 | debug_assert!(mem::is_page_aligned(size)); 375 | 376 | use kernel32::VirtualAlloc; 377 | use winapi::um::winnt::{MEM_COMMIT, PAGE_EXECUTE_READWRITE, PAGE_READWRITE}; 378 | 379 | let prot = if executable { 380 | PAGE_EXECUTE_READWRITE 381 | } else { 382 | PAGE_READWRITE 383 | }; 384 | 385 | let result = unsafe { VirtualAlloc(ptr.to_mut_ptr(), size as u64, MEM_COMMIT, prot) }; 386 | 387 | if result != ptr.to_mut_ptr() { 388 | panic!("VirtualAlloc failed"); 389 | } 390 | } 391 | 392 | #[cfg(target_family = "unix")] 393 | pub fn uncommit(ptr: Address, size: usize) { 394 | debug_assert!(ptr.is_page_aligned()); 395 | debug_assert!(mem::is_page_aligned(size)); 396 | 397 | let val = unsafe { 398 | libc::mmap( 399 | ptr.to_mut_ptr(), 400 | size, 401 | libc::PROT_NONE, 402 | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_NORESERVE, 403 | -1, 404 | 0, 405 | ) 406 | }; 407 | 408 | if val == libc::MAP_FAILED { 409 | panic!("uncommitting memory with mmap() failed"); 410 | } 411 | } 412 | 413 | #[cfg(target_family = "windows")] 414 | pub fn uncommit(ptr: Address, size: usize) { 415 | debug_assert!(ptr.is_page_aligned()); 416 | debug_assert!(mem::is_page_aligned(size)); 417 | 418 | use kernel32::VirtualFree; 419 | use winapi::um::winnt::MEM_RELEASE; 420 | 421 | let _ = unsafe { VirtualFree(ptr.to_mut_ptr(), size as _, MEM_RELEASE) }; 422 | } 423 | 424 | #[cfg(target_family = "unix")] 425 | pub fn discard(ptr: Address, size: usize) { 426 | debug_assert!(ptr.is_page_aligned()); 427 | debug_assert!(mem::is_page_aligned(size)); 428 | 429 | let res = unsafe { libc::madvise(ptr.to_mut_ptr(), size, libc::MADV_DONTNEED) }; 430 | 431 | if res != 0 { 432 | panic!("discarding memory with madvise() failed"); 433 | } 434 | 435 | let res = unsafe { libc::mprotect(ptr.to_mut_ptr(), size, libc::PROT_NONE) }; 436 | 437 | if res != 0 { 438 | panic!("discarding memory with mprotect() failed"); 439 | } 440 | } 441 | 442 | #[cfg(target_family = "windows")] 443 | pub fn discard(ptr: Address, size: usize) { 444 | debug_assert!(ptr.is_page_aligned()); 445 | debug_assert!(mem::is_page_aligned(size)); 446 | 447 | use kernel32::VirtualFree; 448 | use winapi::um::winnt::MEM_DECOMMIT; 449 | 450 | let _ = unsafe { VirtualFree(ptr.to_mut_ptr(), size as u64, MEM_DECOMMIT) }; 451 | } 452 | 453 | #[cfg(target_family = "unix")] 454 | pub fn protect(start: Address, size: usize, access: Access) { 455 | debug_assert!(start.is_page_aligned()); 456 | debug_assert!(mem::is_page_aligned(size)); 457 | 458 | if access.is_none() { 459 | discard(start, size); 460 | return; 461 | } 462 | 463 | let protection = match access { 464 | Access::None => unreachable!(), 465 | Access::Read => libc::PROT_READ, 466 | Access::ReadWrite => libc::PROT_READ | libc::PROT_WRITE, 467 | Access::ReadExecutable => libc::PROT_READ | libc::PROT_EXEC, 468 | Access::ReadWriteExecutable => libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC, 469 | }; 470 | 471 | let res = unsafe { libc::mprotect(start.to_mut_ptr(), size, protection) }; 472 | 473 | if res != 0 { 474 | panic!("mprotect() failed"); 475 | } 476 | } 477 | 478 | #[cfg(target_family = "windows")] 479 | pub fn protect(start: Address, size: usize, access: Access) { 480 | debug_assert!(start.is_page_aligned()); 481 | debug_assert!(mem::is_page_aligned(size)); 482 | 483 | use kernel32::VirtualAlloc; 484 | use winapi::um::winnt::{ 485 | MEM_COMMIT, PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_READONLY, PAGE_READWRITE, 486 | }; 487 | 488 | if access.is_none() { 489 | discard(start, size); 490 | return; 491 | } 492 | 493 | let protection = match access { 494 | Access::None => unreachable!(), 495 | Access::Read => PAGE_READONLY, 496 | Access::ReadWrite => PAGE_READWRITE, 497 | Access::ReadExecutable => PAGE_EXECUTE_READ, 498 | Access::ReadWriteExecutable => PAGE_EXECUTE_READWRITE, 499 | }; 500 | 501 | let ptr = unsafe { VirtualAlloc(start.to_mut_ptr(), size as u64, MEM_COMMIT, protection) }; 502 | 503 | if ptr.is_null() { 504 | panic!("VirtualAlloc failed"); 505 | } 506 | } 507 | 508 | pub enum Access { 509 | None, 510 | Read, 511 | ReadWrite, 512 | ReadExecutable, 513 | ReadWriteExecutable, 514 | } 515 | 516 | impl Access { 517 | fn is_none(&self) -> bool { 518 | match self { 519 | Access::None => true, 520 | _ => false, 521 | } 522 | } 523 | } 524 | 525 | #[derive(Copy, Clone, PartialEq, Eq, Hash)] 526 | pub struct Address(usize); 527 | 528 | impl Address { 529 | pub fn deref(self) -> Self { 530 | unsafe { *(self.offset(0).to_mut_ptr::()) } 531 | } 532 | 533 | #[inline(always)] 534 | pub fn from(val: usize) -> Address { 535 | Address(val) 536 | } 537 | 538 | #[inline(always)] 539 | pub fn region_start(self, size: usize) -> Region { 540 | Region::new(self, self.offset(size)) 541 | } 542 | 543 | #[inline(always)] 544 | pub fn offset_from(self, base: Address) -> usize { 545 | debug_assert!(self >= base); 546 | 547 | self.to_usize() - base.to_usize() 548 | } 549 | 550 | #[inline(always)] 551 | pub fn offset(self, offset: usize) -> Address { 552 | Address(self.0 + offset) 553 | } 554 | 555 | #[inline(always)] 556 | pub fn sub(self, offset: usize) -> Address { 557 | Address(self.0 - offset) 558 | } 559 | 560 | #[inline(always)] 561 | pub fn add_ptr(self, words: usize) -> Address { 562 | Address(self.0 + words * mem::ptr_width_usize()) 563 | } 564 | 565 | #[inline(always)] 566 | pub fn sub_ptr(self, words: usize) -> Address { 567 | Address(self.0 - words * mem::ptr_width_usize()) 568 | } 569 | 570 | #[inline(always)] 571 | pub fn to_usize(self) -> usize { 572 | self.0 573 | } 574 | 575 | #[inline(always)] 576 | pub fn from_ptr(ptr: *const T) -> Address { 577 | Address(ptr as usize) 578 | } 579 | 580 | #[inline(always)] 581 | pub fn to_ptr(&self) -> *const T { 582 | self.0 as *const T 583 | } 584 | 585 | #[inline(always)] 586 | pub fn to_mut_ptr(&self) -> *mut T { 587 | self.0 as *const T as *mut T 588 | } 589 | 590 | #[inline(always)] 591 | pub fn null() -> Address { 592 | Address(0) 593 | } 594 | 595 | #[inline(always)] 596 | pub fn is_null(self) -> bool { 597 | self.0 == 0 598 | } 599 | 600 | #[inline(always)] 601 | pub fn is_non_null(self) -> bool { 602 | self.0 != 0 603 | } 604 | 605 | #[inline(always)] 606 | pub fn align_page(self) -> Address { 607 | mem::page_align(self.to_usize()).into() 608 | } 609 | 610 | #[inline(always)] 611 | pub fn align_page_down(self) -> Address { 612 | Address(self.0 & !(mem::page_size() - 1)) 613 | } 614 | 615 | #[inline(always)] 616 | pub fn is_page_aligned(self) -> bool { 617 | mem::is_page_aligned(self.to_usize()) 618 | } 619 | 620 | #[inline(always)] 621 | pub const fn and(self, x: Address) -> Self { 622 | Self(self.0 & x.0) 623 | } 624 | 625 | #[inline(always)] 626 | pub const fn or(self, x: Address) -> Self { 627 | Self(self.0 | x.0) 628 | } 629 | } 630 | 631 | impl fmt::Display for Address { 632 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 633 | write!(f, "0x{:x}", self.to_usize()) 634 | } 635 | } 636 | 637 | impl fmt::Debug for Address { 638 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 639 | write!(f, "0x{:x}", self.to_usize()) 640 | } 641 | } 642 | 643 | impl PartialOrd for Address { 644 | fn partial_cmp(&self, other: &Address) -> Option { 645 | Some(self.cmp(other)) 646 | } 647 | } 648 | 649 | impl Ord for Address { 650 | fn cmp(&self, other: &Address) -> std::cmp::Ordering { 651 | self.to_usize().cmp(&other.to_usize()) 652 | } 653 | } 654 | 655 | impl From for Address { 656 | fn from(val: usize) -> Address { 657 | Address(val) 658 | } 659 | } 660 | 661 | #[derive(Copy, Clone)] 662 | pub struct Region { 663 | pub start: Address, 664 | pub end: Address, 665 | } 666 | 667 | impl Region { 668 | pub fn new(start: Address, end: Address) -> Region { 669 | debug_assert!(start <= end); 670 | 671 | Region { start, end } 672 | } 673 | 674 | #[inline(always)] 675 | pub fn contains(&self, addr: Address) -> bool { 676 | self.start <= addr && addr < self.end 677 | } 678 | 679 | #[inline(always)] 680 | pub fn valid_top(&self, addr: Address) -> bool { 681 | self.start <= addr && addr <= self.end 682 | } 683 | 684 | #[inline(always)] 685 | pub fn size(&self) -> usize { 686 | self.end.to_usize() - self.start.to_usize() 687 | } 688 | 689 | #[inline(always)] 690 | pub fn empty(&self) -> bool { 691 | self.start == self.end 692 | } 693 | 694 | #[inline(always)] 695 | pub fn disjunct(&self, other: &Region) -> bool { 696 | self.end <= other.start || self.start >= other.end 697 | } 698 | 699 | #[inline(always)] 700 | pub fn overlaps(&self, other: &Region) -> bool { 701 | !self.disjunct(other) 702 | } 703 | 704 | #[inline(always)] 705 | pub fn fully_contains(&self, other: &Region) -> bool { 706 | self.contains(other.start) && self.valid_top(other.end) 707 | } 708 | } 709 | 710 | impl Default for Region { 711 | fn default() -> Region { 712 | Region { 713 | start: Address::null(), 714 | end: Address::null(), 715 | } 716 | } 717 | } 718 | 719 | impl fmt::Display for Region { 720 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 721 | write!(f, "{}-{}", self.start, self.end) 722 | } 723 | } 724 | 725 | pub struct FormattedSize { 726 | size: usize, 727 | } 728 | 729 | impl fmt::Display for FormattedSize { 730 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 731 | let ksize = (self.size as f64) / 1024f64; 732 | 733 | if ksize < 1f64 { 734 | return write!(f, "{}B", self.size); 735 | } 736 | 737 | let msize = ksize / 1024f64; 738 | 739 | if msize < 1f64 { 740 | return write!(f, "{:.1}K", ksize); 741 | } 742 | 743 | let gsize = msize / 1024f64; 744 | 745 | if gsize < 1f64 { 746 | write!(f, "{:.1}M", msize) 747 | } else { 748 | write!(f, "{:.1}G", gsize) 749 | } 750 | } 751 | } 752 | 753 | pub fn formatted_size(size: usize) -> FormattedSize { 754 | FormattedSize { size } 755 | } 756 | 757 | #[repr(transparent)] 758 | pub struct Ptr(pub(crate) *mut T); 759 | 760 | impl Ptr { 761 | pub fn get(&self) -> &mut T { 762 | unsafe { &mut *self.0 } 763 | } 764 | } 765 | 766 | impl Ptr { 767 | pub fn new(x: T) -> Self { 768 | Self(Box::into_raw(Box::new(x))) 769 | } 770 | 771 | pub fn from_box(b: Box) -> Self { 772 | Self(Box::into_raw(b)) 773 | } 774 | 775 | pub fn set(&self, val: T) { 776 | unsafe { self.0.write(val) }; 777 | } 778 | 779 | pub fn replace(&self, val: T) -> T { 780 | std::mem::replace(self.get(), val) 781 | } 782 | 783 | pub fn take(&self) -> T 784 | where 785 | T: Default, 786 | { 787 | self.replace(T::default()) 788 | } 789 | 790 | pub fn is_null(&self) -> bool { 791 | self.0.is_null() 792 | } 793 | 794 | pub fn null() -> Self { 795 | Self(std::ptr::null_mut()) 796 | } 797 | } 798 | 799 | use std::hash::*; 800 | 801 | impl Hash for Ptr { 802 | fn hash(&self, state: &mut H) { 803 | self.0.hash(state); 804 | } 805 | } 806 | 807 | impl PartialEq for Ptr { 808 | fn eq(&self, other: &Self) -> bool { 809 | self.0 == other.0 810 | } 811 | } 812 | 813 | impl Eq for Ptr {} 814 | 815 | impl Copy for Ptr {} 816 | impl Clone for Ptr { 817 | fn clone(&self) -> Self { 818 | *self 819 | } 820 | } 821 | 822 | impl std::ops::Deref for Ptr { 823 | type Target = T; 824 | fn deref(&self) -> &T { 825 | self.get() 826 | } 827 | } 828 | 829 | unsafe impl Send for Ptr {} 830 | unsafe impl Sync for Ptr {} 831 | 832 | use std::hash::{Hash, Hasher}; 833 | use std::sync::atomic::{AtomicPtr, Ordering}; 834 | 835 | /// The mask to use for untagging a pointer. 836 | const UNTAG_MASK: usize = (!0x7) as usize; 837 | 838 | /// Returns true if the pointer has the given bit set to 1. 839 | pub fn bit_is_set(pointer: *mut T, bit: usize) -> bool { 840 | let shifted = 1 << bit; 841 | 842 | (pointer as usize & shifted) == shifted 843 | } 844 | 845 | /// Returns the pointer with the given bit set. 846 | pub fn with_bit(pointer: *mut T, bit: usize) -> *mut T { 847 | (pointer as usize | 1 << bit) as _ 848 | } 849 | 850 | pub fn without_bit(pointer: *mut T, bit: usize) -> *mut T { 851 | (pointer as usize ^ 1 << bit) as _ 852 | } 853 | 854 | /// Returns the given pointer without any tags set. 855 | pub fn untagged(pointer: *mut T) -> *mut T { 856 | (pointer as usize & UNTAG_MASK) as _ 857 | } 858 | 859 | /// Structure wrapping a raw, tagged pointer. 860 | #[derive(Debug)] 861 | #[repr(transparent)] 862 | pub struct TaggedPointer { 863 | pub raw: *mut T, 864 | } 865 | 866 | impl TaggedPointer { 867 | /// Returns a new TaggedPointer without setting any bits. 868 | pub fn new(raw: *mut T) -> TaggedPointer { 869 | TaggedPointer { raw } 870 | } 871 | 872 | /// Returns a new TaggedPointer with the given bit set. 873 | pub fn with_bit(raw: *mut T, bit: usize) -> TaggedPointer { 874 | let mut pointer = Self::new(raw); 875 | 876 | pointer.set_bit(bit); 877 | 878 | pointer 879 | } 880 | 881 | pub fn unset_bit(&mut self, bit: usize) { 882 | if self.bit_is_set(bit) { 883 | self.raw = without_bit(self.raw, bit); 884 | } 885 | } 886 | 887 | /// Returns a null pointer. 888 | pub const fn null() -> TaggedPointer { 889 | TaggedPointer { 890 | raw: ptr::null::() as *mut T, 891 | } 892 | } 893 | 894 | /// Returns the wrapped pointer without any tags. 895 | pub fn untagged(self) -> *mut T { 896 | self::untagged(self.raw) 897 | } 898 | 899 | /// Returns a new TaggedPointer using the current pointer but without any 900 | /// tags. 901 | pub fn without_tags(self) -> Self { 902 | Self::new(self.untagged()) 903 | } 904 | 905 | /// Returns true if the given bit is set. 906 | pub fn bit_is_set(self, bit: usize) -> bool { 907 | self::bit_is_set(self.raw, bit) 908 | } 909 | 910 | /// Sets the given bit. 911 | pub fn set_bit(&mut self, bit: usize) { 912 | self.raw = with_bit(self.raw, bit); 913 | } 914 | 915 | /// Returns true if the current pointer is a null pointer. 916 | pub fn is_null(self) -> bool { 917 | self.untagged().is_null() 918 | } 919 | 920 | /// Returns an immutable to the pointer's value. 921 | pub fn as_ref<'a>(self) -> Option<&'a T> { 922 | unsafe { self.untagged().as_ref() } 923 | } 924 | 925 | /// Returns a mutable reference to the pointer's value. 926 | pub fn as_mut<'a>(self) -> Option<&'a mut T> { 927 | unsafe { self.untagged().as_mut() } 928 | } 929 | 930 | /// Atomically swaps the internal pointer with another one. 931 | /// 932 | /// This boolean returns true if the pointer was swapped, false otherwise. 933 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))] 934 | pub fn compare_and_swap(&self, current: *mut T, other: *mut T) -> bool { 935 | self.as_atomic() 936 | .compare_and_swap(current, other, Ordering::AcqRel) 937 | == current 938 | } 939 | 940 | /// Atomically replaces the current pointer with the given one. 941 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))] 942 | pub fn atomic_store(&self, other: *mut T) { 943 | self.as_atomic().store(other, Ordering::Release); 944 | } 945 | 946 | /// Atomically loads the pointer. 947 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))] 948 | pub fn atomic_load(&self) -> *mut T { 949 | self.as_atomic().load(Ordering::Acquire) 950 | } 951 | 952 | /// Checks if a bit is set using an atomic load. 953 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::trivially_copy_pass_by_ref))] 954 | pub fn atomic_bit_is_set(&self, bit: usize) -> bool { 955 | Self::new(self.atomic_load()).bit_is_set(bit) 956 | } 957 | 958 | fn as_atomic(&self) -> &AtomicPtr { 959 | unsafe { &*(self as *const TaggedPointer as *const AtomicPtr) } 960 | } 961 | } 962 | 963 | impl PartialEq for TaggedPointer { 964 | fn eq(&self, other: &TaggedPointer) -> bool { 965 | self.raw == other.raw 966 | } 967 | } 968 | 969 | impl Eq for TaggedPointer {} 970 | 971 | // These traits are implemented manually as "derive" doesn't handle the generic 972 | // "T" argument very well. 973 | impl Clone for TaggedPointer { 974 | fn clone(&self) -> TaggedPointer { 975 | TaggedPointer::new(self.raw) 976 | } 977 | } 978 | 979 | impl Copy for TaggedPointer {} 980 | 981 | impl Hash for TaggedPointer { 982 | fn hash(&self, state: &mut H) { 983 | self.raw.hash(state); 984 | } 985 | } 986 | -------------------------------------------------------------------------------- /src/safepoint.rs: -------------------------------------------------------------------------------- 1 | use super::heap::*; 2 | use super::threads::*; 3 | use std::sync::Arc; 4 | pub fn block(thread: &MutatorThread) { 5 | let safepoint_id = HEAP.threads.safepoint_id(); 6 | assert_ne!(safepoint_id, 0); 7 | let state = thread.state(); 8 | 9 | match state { 10 | ThreadState::Running | ThreadState::Parked => { 11 | thread.block(safepoint_id); 12 | } 13 | ThreadState::Blocked => { 14 | panic!("illegal thread state: {:?}", state); 15 | } 16 | }; 17 | 18 | let _mtx = HEAP.threads.barrier.wait(safepoint_id); 19 | thread.unblock(); 20 | } 21 | 22 | fn resume_threads(_threads: &[Arc], safepoint_id: usize) { 23 | HEAP.threads.barrier.resume(safepoint_id); 24 | HEAP.threads.clear_safepoint_request(); 25 | } 26 | 27 | fn all_threads_blocked( 28 | thread_self: &Arc, 29 | threads: &[Arc], 30 | safepoint_id: usize, 31 | ) -> bool { 32 | let mut all_blocked = true; 33 | 34 | for thread in threads { 35 | if Arc::ptr_eq(thread, thread_self) { 36 | assert!(thread.state().is_parked()); 37 | continue; 38 | } 39 | 40 | if !thread.in_safepoint(safepoint_id) { 41 | all_blocked = false; 42 | } 43 | } 44 | 45 | all_blocked 46 | } 47 | 48 | fn stop_threads(threads: &[Arc]) -> usize { 49 | let thread_self = THREAD.with(|thread| thread.borrow().clone()); 50 | let safepoint_id = HEAP.threads.request_safepoint(); 51 | 52 | HEAP.threads.barrier.guard(safepoint_id); 53 | 54 | while !all_threads_blocked(&thread_self, threads, safepoint_id) { 55 | std::thread::yield_now(); 56 | } 57 | 58 | safepoint_id 59 | } 60 | pub fn stop_the_world(f: F) -> R 61 | where 62 | F: FnOnce(&[Arc]) -> R, 63 | { 64 | THREAD.with(|thread| thread.borrow().park()); 65 | 66 | let threads = HEAP.threads.threads.lock(); 67 | if threads.len() == 1 { 68 | let ret = f(&*threads); 69 | THREAD.with(|thread| thread.borrow().unpark()); 70 | return ret; 71 | } 72 | 73 | let safepoint_id = stop_threads(&*threads); 74 | let ret = f(&*threads); 75 | resume_threads(&*threads, safepoint_id); 76 | THREAD.with(|thread| thread.borrow().unpark()); 77 | ret 78 | } 79 | 80 | pub extern "C" fn gc_guard() { 81 | let thread = THREAD.with(|thread| thread.borrow().clone()); 82 | block(&thread); 83 | } 84 | 85 | #[macro_export] 86 | macro_rules! safepoint { 87 | () => { 88 | $crate::safepoint::gc_guard(); 89 | }; 90 | } 91 | -------------------------------------------------------------------------------- /src/space.rs: -------------------------------------------------------------------------------- 1 | use crate::mem::*; 2 | pub struct Space { 3 | pub top: Address, 4 | pub limit: Address, 5 | pub pages: Vec, 6 | pub size: usize, 7 | pub size_limit: usize, 8 | pub page_size: usize, 9 | pub pages_count: usize, 10 | pub allocated_size: usize, 11 | } 12 | 13 | impl Space { 14 | pub fn empty() -> Self { 15 | Self { 16 | top: Address::null(), 17 | limit: Address::null(), 18 | pages: Vec::new(), 19 | size: 0, 20 | allocated_size: 0, 21 | page_size: 0, 22 | pages_count: 0, 23 | size_limit: 0, 24 | } 25 | } 26 | pub fn new(page_size: usize) -> Self { 27 | let mut pages = Vec::new(); 28 | let page = Page::new(page_size); 29 | pages.push(page); 30 | let top = Address::from_ptr(&pages.last().unwrap().top); 31 | let limit = Address::from_ptr(&pages.last().unwrap().limit); 32 | let mut space = Space { 33 | top, 34 | limit, 35 | pages, 36 | size: 0, 37 | page_size, 38 | size_limit: 0, 39 | pages_count: 1, 40 | allocated_size: 0, 41 | }; 42 | space.compute_size_limit(); 43 | space 44 | } 45 | 46 | pub fn compute_size_limit(&mut self) { 47 | self.size_limit = self.size << 1; 48 | } 49 | pub fn may_allocate_in_current(&mut self, size: usize) -> bool { 50 | let even_bytes = size + (size & 0x01); 51 | let place_in_current = self.top.deref().offset(even_bytes) <= self.limit.deref(); 52 | place_in_current 53 | } 54 | pub fn add_page(&mut self, size: usize) { 55 | let real_size = align_usize(size, page_size()); 56 | let page = Page::new(real_size); 57 | self.pages.push(page); 58 | self.pages_count += 1; 59 | let page = self.pages.last().unwrap(); 60 | self.size += real_size; 61 | self.top = Address::from_ptr(&page.top); 62 | self.limit = Address::from_ptr(&page.limit); 63 | } 64 | 65 | pub fn fast_allocate(&mut self, bytes: usize, needs_gc: &mut bool) -> Address { 66 | let even_bytes = bytes + (bytes & 0x01); 67 | let place_in_current = self.top.deref().offset(even_bytes) < self.limit.deref(); 68 | 69 | if !place_in_current { 70 | *needs_gc = true; 71 | log::debug!("Add new page"); 72 | self.add_page(even_bytes); 73 | } 74 | self.allocated_size += even_bytes; 75 | let result = self.top.deref(); 76 | unsafe { 77 | *self.top.to_mut_ptr::<*mut u8>() = 78 | self.top.deref().offset(even_bytes).to_mut_ptr::(); 79 | } 80 | result 81 | } 82 | pub fn try_find_page_for(&self, size: usize) -> Option<(Address, Address)> { 83 | for page in self.pages.iter() { 84 | if page.top.offset(size) < page.limit { 85 | return Some((Address::from_ptr(&page.top), Address::from_ptr(&page.limit))); 86 | } 87 | } 88 | 89 | return None; 90 | } 91 | pub fn allocate(&mut self, bytes: usize, needs_gc: &mut bool) -> Address { 92 | let even_bytes = bytes + (bytes & 0x01); 93 | let place_in_current = self.top.deref().offset(even_bytes) < self.limit.deref(); 94 | 95 | if !place_in_current { 96 | let head = self.try_find_page_for(even_bytes); 97 | 98 | if let None = head { 99 | *needs_gc = true; 100 | self.add_page(even_bytes); 101 | } else if let Some((top, limit)) = head { 102 | self.top = top; 103 | self.limit = limit; 104 | } 105 | } 106 | self.allocated_size += even_bytes; 107 | let result = self.top.deref(); 108 | unsafe { 109 | *self.top.to_mut_ptr::<*mut u8>() = 110 | self.top.deref().offset(even_bytes).to_mut_ptr::(); 111 | } 112 | result 113 | } 114 | 115 | pub fn swap(&mut self, space: &mut Space) { 116 | self.clear(); 117 | assert!(space.pages.is_empty() == false); 118 | while space.pages.is_empty() != true { 119 | self.pages.push(space.pages.pop().unwrap()); 120 | self.size += self.pages.last().unwrap().size; 121 | } 122 | self.allocated_size = space.allocated_size; 123 | let page = self.pages.last().unwrap(); 124 | self.top = Address::from_ptr(&page.top); 125 | self.limit = Address::from_ptr(&page.limit); 126 | } 127 | 128 | pub fn contains(&self, addr: Address) -> bool { 129 | for page in self.pages.iter() { 130 | let page: &Page = page; 131 | if addr >= page.data && addr <= page.limit { 132 | return true; 133 | } 134 | } 135 | 136 | false 137 | } 138 | 139 | pub fn reset_pages(&mut self) { 140 | for page in self.pages.iter_mut() { 141 | page.top = page.data; 142 | } 143 | } 144 | pub fn clear(&mut self) { 145 | self.size = 0; 146 | while let Some(page) = self.pages.pop() { 147 | page.uncommit(); 148 | } 149 | } 150 | } 151 | 152 | pub struct Page { 153 | pub data: Address, 154 | pub top: Address, 155 | pub limit: Address, 156 | pub size: usize, 157 | } 158 | 159 | impl Page { 160 | pub fn new(size: usize) -> Self { 161 | let data = commit(size, false); 162 | let top = data; 163 | let limit = data.offset(size); 164 | Self { 165 | top, 166 | data, 167 | limit, 168 | size, 169 | } 170 | } 171 | 172 | pub fn uncommit(&self) { 173 | uncommit(self.data, self.size) 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/threads.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::{Condvar, Mutex}; 2 | use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; 3 | pub struct Barrier { 4 | active: Mutex, 5 | done: Condvar, 6 | } 7 | 8 | impl Barrier { 9 | pub fn new() -> Barrier { 10 | Barrier { 11 | active: Mutex::new(0), 12 | done: Condvar::new(), 13 | } 14 | } 15 | 16 | pub fn guard(&self, safepoint_id: usize) { 17 | let mut active = self.active.lock(); 18 | assert_eq!(*active, 0); 19 | assert_ne!(safepoint_id, 0); 20 | *active = safepoint_id; 21 | } 22 | 23 | pub fn resume(&self, safepoint_id: usize) { 24 | let mut active = self.active.lock(); 25 | assert_eq!(*active, safepoint_id); 26 | assert_ne!(safepoint_id, 0); 27 | *active = 0; 28 | self.done.notify_all(); 29 | } 30 | 31 | pub fn wait(&self, safepoint_id: usize) { 32 | let mut active = self.active.lock(); 33 | assert_ne!(safepoint_id, 0); 34 | 35 | while *active == safepoint_id { 36 | self.done.wait(&mut active); 37 | } 38 | } 39 | } 40 | 41 | pub struct StateManager { 42 | mtx: Mutex<(ThreadState, usize)>, 43 | } 44 | 45 | impl StateManager { 46 | fn new() -> StateManager { 47 | StateManager { 48 | mtx: Mutex::new((ThreadState::Running, 0)), 49 | } 50 | } 51 | 52 | fn state(&self) -> ThreadState { 53 | let mtx = self.mtx.lock(); 54 | mtx.0 55 | } 56 | 57 | fn park(&self) { 58 | let mut mtx = self.mtx.lock(); 59 | assert!(mtx.0.is_running()); 60 | mtx.0 = ThreadState::Parked; 61 | } 62 | 63 | fn unpark(&self) { 64 | let mut mtx = self.mtx.lock(); 65 | assert!(mtx.0.is_parked()); 66 | mtx.0 = ThreadState::Running; 67 | } 68 | 69 | fn block(&self, safepoint_id: usize) { 70 | let mut mtx = self.mtx.lock(); 71 | assert!(mtx.0.is_running()); 72 | mtx.0 = ThreadState::Blocked; 73 | mtx.1 = safepoint_id; 74 | } 75 | 76 | fn unblock(&self) { 77 | let mut mtx = self.mtx.lock(); 78 | assert!(mtx.0.is_blocked()); 79 | mtx.0 = ThreadState::Running; 80 | mtx.1 = 0; 81 | } 82 | 83 | fn in_safepoint(&self, safepoint_id: usize) -> bool { 84 | assert_ne!(safepoint_id, 0); 85 | let mtx = self.mtx.lock(); 86 | 87 | match mtx.0 { 88 | ThreadState::Running => false, 89 | ThreadState::Blocked => mtx.1 == safepoint_id, 90 | ThreadState::Parked => true, 91 | } 92 | } 93 | } 94 | 95 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 96 | pub enum ThreadState { 97 | Running = 0, 98 | Parked = 1, 99 | Blocked = 2, 100 | } 101 | 102 | impl From for ThreadState { 103 | fn from(value: usize) -> ThreadState { 104 | match value { 105 | 0 => ThreadState::Running, 106 | 1 => ThreadState::Parked, 107 | 2 => ThreadState::Blocked, 108 | _ => unreachable!(), 109 | } 110 | } 111 | } 112 | 113 | impl ThreadState { 114 | pub fn is_running(&self) -> bool { 115 | match *self { 116 | ThreadState::Running => true, 117 | _ => false, 118 | } 119 | } 120 | 121 | pub fn is_parked(&self) -> bool { 122 | match *self { 123 | ThreadState::Parked => true, 124 | _ => false, 125 | } 126 | } 127 | 128 | pub fn is_blocked(&self) -> bool { 129 | match *self { 130 | ThreadState::Blocked => true, 131 | _ => false, 132 | } 133 | } 134 | 135 | pub fn to_usize(&self) -> usize { 136 | *self as usize 137 | } 138 | } 139 | 140 | impl Default for ThreadState { 141 | fn default() -> ThreadState { 142 | ThreadState::Running 143 | } 144 | } 145 | 146 | pub struct MutatorThread { 147 | pub state: StateManager, 148 | pub rootset: std::cell::RefCell>, 149 | } 150 | 151 | impl MutatorThread { 152 | pub fn new() -> Self { 153 | Self { 154 | state: StateManager::new(), 155 | rootset: std::cell::RefCell::new(vec![]), 156 | } 157 | } 158 | pub fn state(&self) -> ThreadState { 159 | self.state.state() 160 | } 161 | 162 | pub fn park(&self) { 163 | self.state.park(); 164 | } 165 | 166 | pub fn unpark(&self) { 167 | if super::heap::HEAP.threads.safepoint_id() != 0 { 168 | crate::safepoint::block(self); 169 | } 170 | 171 | self.state.unpark(); 172 | } 173 | 174 | pub fn block(&self, safepoint_id: usize) { 175 | self.state.block(safepoint_id); 176 | } 177 | 178 | pub fn unblock(&self) { 179 | self.state.unblock(); 180 | } 181 | 182 | pub fn in_safepoint(&self, safepoint_id: usize) -> bool { 183 | self.state.in_safepoint(safepoint_id) 184 | } 185 | } 186 | 187 | unsafe impl Send for MutatorThread {} 188 | unsafe impl Sync for MutatorThread {} 189 | pub struct Threads { 190 | pub threads: Mutex>>, 191 | pub cond_join: Condvar, 192 | 193 | pub next_id: AtomicUsize, 194 | pub safepoint: Mutex<(usize, usize)>, 195 | 196 | pub barrier: Barrier, 197 | } 198 | 199 | impl Threads { 200 | pub fn new() -> Threads { 201 | Threads { 202 | threads: Mutex::new(Vec::new()), 203 | cond_join: Condvar::new(), 204 | next_id: AtomicUsize::new(1), 205 | safepoint: Mutex::new((0, 1)), 206 | barrier: Barrier::new(), 207 | } 208 | } 209 | 210 | pub fn attach_current_thread(&self) { 211 | THREAD.with(|thread| { 212 | let mut threads = self.threads.lock(); 213 | threads.push(thread.borrow().clone()); 214 | }); 215 | } 216 | 217 | pub fn attach_thread(&self, thread: Arc) { 218 | let mut threads = self.threads.lock(); 219 | threads.push(thread); 220 | } 221 | 222 | pub fn next_id(&self) -> usize { 223 | self.next_id.fetch_add(1, Ordering::SeqCst) 224 | } 225 | 226 | pub fn safepoint_id(&self) -> usize { 227 | let safepoint = self.safepoint.lock(); 228 | safepoint.0 229 | } 230 | 231 | pub fn safepoint_requested(&self) -> bool { 232 | let safepoint = self.safepoint.lock(); 233 | safepoint.0 != 0 234 | } 235 | 236 | pub fn request_safepoint(&self) -> usize { 237 | let mut safepoint = self.safepoint.lock(); 238 | assert_eq!(safepoint.0, 0); 239 | safepoint.0 = safepoint.1; 240 | safepoint.1 += 1; 241 | 242 | safepoint.0 243 | } 244 | 245 | pub fn clear_safepoint_request(&self) { 246 | let mut safepoint = self.safepoint.lock(); 247 | assert_ne!(safepoint.0, 0); 248 | safepoint.0 = 0; 249 | } 250 | 251 | pub fn detach_current_thread(&self) { 252 | THREAD.with(|thread| { 253 | thread.borrow().park(); 254 | let mut threads = self.threads.lock(); 255 | threads.retain(|elem| !Arc::ptr_eq(elem, &*thread.borrow())); 256 | self.cond_join.notify_all(); 257 | }); 258 | } 259 | 260 | pub fn join_all(&self) { 261 | let mut threads = self.threads.lock(); 262 | 263 | while threads.len() > 0 { 264 | self.cond_join.wait(&mut threads); 265 | } 266 | } 267 | 268 | pub fn each(&self, mut f: F) 269 | where 270 | F: FnMut(&Arc), 271 | { 272 | let threads = self.threads.lock(); 273 | 274 | for thread in threads.iter() { 275 | f(thread) 276 | } 277 | } 278 | } 279 | 280 | thread_local! { 281 | pub static THREAD: std::cell::RefCell> = std::cell::RefCell::new(Arc::new(MutatorThread::new())); 282 | } 283 | 284 | pub extern "C" fn attach_current_thread() { 285 | crate::heap::HEAP.threads.attach_current_thread(); 286 | } 287 | 288 | pub extern "C" fn detach_current_thread() { 289 | crate::heap::HEAP.threads.attach_current_thread(); 290 | } 291 | use crate::api::*; 292 | /// Use this function to allocate object in GC heap. 293 | /// 294 | /// If value needs finalization `finalize` argument should be true. 295 | pub fn mt_alloc(value: T, finalize: bool) -> Rooted { 296 | let mem = crate::heap::HEAP.allocate(value, finalize); 297 | let rooted = Box::into_raw(Box::new(RootedInner { 298 | rooted: true, 299 | inner: mem, 300 | })); 301 | 302 | THREAD.with(|th| th.borrow().rootset.borrow_mut().push(rooted)); 303 | 304 | Rooted { inner: rooted } 305 | } 306 | 307 | pub fn mt_root(handle: Handle) -> Rooted { 308 | let rooted = Box::into_raw(Box::new(RootedInner { 309 | rooted: true, 310 | inner: handle.inner, 311 | })); 312 | THREAD.with(|th| th.borrow().rootset.borrow_mut().push(rooted)); 313 | Rooted { inner: rooted } 314 | } 315 | --------------------------------------------------------------------------------