├── .github └── workflows │ └── rust.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE.md ├── README.md ├── dumpster ├── .gitignore ├── Cargo.toml └── src │ ├── impls.rs │ ├── lib.rs │ ├── ptr.rs │ ├── sync │ ├── collect.rs │ ├── mod.rs │ └── tests.rs │ └── unsync │ ├── collect.rs │ ├── mod.rs │ └── tests.rs ├── dumpster_bench ├── .gitignore ├── Cargo.toml ├── scripts │ └── make_plots.py └── src │ ├── lib.rs │ └── main.rs ├── dumpster_derive ├── .gitignore ├── Cargo.toml └── src │ └── lib.rs ├── dumpster_test ├── .gitignore ├── Cargo.toml └── src │ └── lib.rs └── rustfmt.toml /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: ["master"] 6 | pull_request: 7 | branches: ["master"] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | test: 14 | runs-on: ${{ matrix.os }} 15 | 16 | strategy: 17 | matrix: 18 | os: 19 | - ubuntu-latest 20 | - windows-latest 21 | - macOS-latest 22 | 23 | steps: 24 | - name: Checkout sources 25 | uses: actions/checkout@v2 26 | - name: Install nightly toolchain 27 | uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: nightly 31 | override: true 32 | - name: Build 33 | uses: actions-rs/cargo@v1 34 | with: 35 | command: build 36 | args: --all-features 37 | - name: Run tests 38 | uses: actions-rs/cargo@v1 39 | with: 40 | command: test 41 | args: --all-features 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | 4 | *.csv 5 | .vscode -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # `dumpster` Changelog 2 | 3 | ## 1.1.0 4 | 5 | ### New features 6 | 7 | - Added support for [`either`](https://crates.io/crates/either). 8 | 9 | ### Bug fixes 10 | 11 | - Derive implementations no longer erroneously refer to `heapsize`. 12 | 13 | ### Other changes 14 | 15 | - Slight performance and code style improvements. 16 | - Improved internal documentation on safety. 17 | - Remove `strict-provenance` requirement as it is now stabilized. 18 | 19 | ## 1.0.0 20 | 21 | ### Breaking changes 22 | 23 | - Rename `Collectable` to `Trace`. 24 | 25 | ## 0.2.1 26 | 27 | ### New features 28 | 29 | - Implement `Collectable` for `std::any::TypeId`. 30 | 31 | ## 0.2.0 32 | 33 | ### New features 34 | 35 | - Added `Gc::as_ptr`. 36 | - Added `Gc::ptr_eq`. 37 | - Implemented `PartialEq` and `Eq` for garbage collected pointers. 38 | 39 | ### Other 40 | 41 | - Changed license from GNU GPLv3 or later to MPL 2.0. 42 | - Allocations which do not contain `Gc`s will simply be reference counted. 43 | 44 | ## 0.1.2 45 | 46 | ### New features 47 | 48 | - Implement `Collectable` for `OnceCell`, `HashMap`, and `BTreeMap`. 49 | - Add `try_clone` and `try_deref` to `unsync::Gc` and `sync::Gc`. 50 | - Make dereferencing `Gc` only panic on truly-dead `Gc`s. 51 | 52 | ### Bugfixes 53 | 54 | - Prevent dead `Gc`s from escaping their `Drop` implementation, potentially causing UAFs. 55 | - Use fully-qualified name for `Result` in derive macro, preventing some bugs. 56 | 57 | ### Other 58 | 59 | - Improve performance in `unsync` by using `parking_lot` for concurrency primitives. 60 | - Improve documentation of panicking behavior in `Gc`. 61 | - Fix spelling mistakes in documentation. 62 | 63 | ## 0.1.1 64 | 65 | ### Bugfixes 66 | 67 | - Prevent possible UAFs caused by accessing `Gc`s during `Drop` impls by panicking. 68 | 69 | ### Other 70 | 71 | - Fix spelling mistakes in documentation. 72 | 73 | ## 0.1.0 74 | 75 | Initial release. 76 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "dumpster", 4 | "dumpster_derive", 5 | "dumpster_test", 6 | "dumpster_bench", 7 | ] 8 | resolver = "2" 9 | 10 | [patch.crates-io] 11 | dumpster = { path = "dumpster" } 12 | 13 | [profile.release] 14 | lto = true -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | ### 1. Definitions 5 | 6 | **1.1. “Contributor”** 7 | means each individual or legal entity that creates, contributes to 8 | the creation of, or owns Covered Software. 9 | 10 | **1.2. “Contributor Version”** 11 | means the combination of the Contributions of others (if any) used 12 | by a Contributor and that particular Contributor's Contribution. 13 | 14 | **1.3. “Contribution”** 15 | means Covered Software of a particular Contributor. 16 | 17 | **1.4. “Covered Software”** 18 | means Source Code Form to which the initial Contributor has attached 19 | the notice in Exhibit A, the Executable Form of such Source Code 20 | Form, and Modifications of such Source Code Form, in each case 21 | including portions thereof. 22 | 23 | **1.5. “Incompatible With Secondary Licenses”** 24 | means 25 | 26 | * **(a)** that the initial Contributor has attached the notice described 27 | in Exhibit B to the Covered Software; or 28 | * **(b)** that the Covered Software was made available under the terms of 29 | version 1.1 or earlier of the License, but not also under the 30 | terms of a Secondary License. 31 | 32 | **1.6. “Executable Form”** 33 | means any form of the work other than Source Code Form. 34 | 35 | **1.7. “Larger Work”** 36 | means a work that combines Covered Software with other material, in 37 | a separate file or files, that is not Covered Software. 38 | 39 | **1.8. “License”** 40 | means this document. 41 | 42 | **1.9. “Licensable”** 43 | means having the right to grant, to the maximum extent possible, 44 | whether at the time of the initial grant or subsequently, any and 45 | all of the rights conveyed by this License. 46 | 47 | **1.10. “Modifications”** 48 | means any of the following: 49 | 50 | * **(a)** any file in Source Code Form that results from an addition to, 51 | deletion from, or modification of the contents of Covered 52 | Software; or 53 | * **(b)** any new file in Source Code Form that contains any Covered 54 | Software. 55 | 56 | **1.11. “Patent Claims” of a Contributor** 57 | means any patent claim(s), including without limitation, method, 58 | process, and apparatus claims, in any patent Licensable by such 59 | Contributor that would be infringed, but for the grant of the 60 | License, by the making, using, selling, offering for sale, having 61 | made, import, or transfer of either its Contributions or its 62 | Contributor Version. 63 | 64 | **1.12. “Secondary License”** 65 | means either the GNU General Public License, Version 2.0, the GNU 66 | Lesser General Public License, Version 2.1, the GNU Affero General 67 | Public License, Version 3.0, or any later versions of those 68 | licenses. 69 | 70 | **1.13. “Source Code Form”** 71 | means the form of the work preferred for making modifications. 72 | 73 | **1.14. “You” (or “Your”)** 74 | means an individual or a legal entity exercising rights under this 75 | License. For legal entities, “You” includes any entity that 76 | controls, is controlled by, or is under common control with You. For 77 | purposes of this definition, “control” means **(a)** the power, direct 78 | or indirect, to cause the direction or management of such entity, 79 | whether by contract or otherwise, or **(b)** ownership of more than 80 | fifty percent (50%) of the outstanding shares or beneficial 81 | ownership of such entity. 82 | 83 | 84 | ### 2. License Grants and Conditions 85 | 86 | #### 2.1. Grants 87 | 88 | Each Contributor hereby grants You a world-wide, royalty-free, 89 | non-exclusive license: 90 | 91 | * **(a)** under intellectual property rights (other than patent or trademark) 92 | Licensable by such Contributor to use, reproduce, make available, 93 | modify, display, perform, distribute, and otherwise exploit its 94 | Contributions, either on an unmodified basis, with Modifications, or 95 | as part of a Larger Work; and 96 | * **(b)** under Patent Claims of such Contributor to make, use, sell, offer 97 | for sale, have made, import, and otherwise transfer either its 98 | Contributions or its Contributor Version. 99 | 100 | #### 2.2. Effective Date 101 | 102 | The licenses granted in Section 2.1 with respect to any Contribution 103 | become effective for each Contribution on the date the Contributor first 104 | distributes such Contribution. 105 | 106 | #### 2.3. Limitations on Grant Scope 107 | 108 | The licenses granted in this Section 2 are the only rights granted under 109 | this License. No additional rights or licenses will be implied from the 110 | distribution or licensing of Covered Software under this License. 111 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 112 | Contributor: 113 | 114 | * **(a)** for any code that a Contributor has removed from Covered Software; 115 | or 116 | * **(b)** for infringements caused by: **(i)** Your and any other third party's 117 | modifications of Covered Software, or **(ii)** the combination of its 118 | Contributions with other software (except as part of its Contributor 119 | Version); or 120 | * **(c)** under Patent Claims infringed by Covered Software in the absence of 121 | its Contributions. 122 | 123 | This License does not grant any rights in the trademarks, service marks, 124 | or logos of any Contributor (except as may be necessary to comply with 125 | the notice requirements in Section 3.4). 126 | 127 | #### 2.4. Subsequent Licenses 128 | 129 | No Contributor makes additional grants as a result of Your choice to 130 | distribute the Covered Software under a subsequent version of this 131 | License (see Section 10.2) or under the terms of a Secondary License (if 132 | permitted under the terms of Section 3.3). 133 | 134 | #### 2.5. Representation 135 | 136 | Each Contributor represents that the Contributor believes its 137 | Contributions are its original creation(s) or it has sufficient rights 138 | to grant the rights to its Contributions conveyed by this License. 139 | 140 | #### 2.6. Fair Use 141 | 142 | This License is not intended to limit any rights You have under 143 | applicable copyright doctrines of fair use, fair dealing, or other 144 | equivalents. 145 | 146 | #### 2.7. Conditions 147 | 148 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 149 | in Section 2.1. 150 | 151 | 152 | ### 3. Responsibilities 153 | 154 | #### 3.1. Distribution of Source Form 155 | 156 | All distribution of Covered Software in Source Code Form, including any 157 | Modifications that You create or to which You contribute, must be under 158 | the terms of this License. You must inform recipients that the Source 159 | Code Form of the Covered Software is governed by the terms of this 160 | License, and how they can obtain a copy of this License. You may not 161 | attempt to alter or restrict the recipients' rights in the Source Code 162 | Form. 163 | 164 | #### 3.2. Distribution of Executable Form 165 | 166 | If You distribute Covered Software in Executable Form then: 167 | 168 | * **(a)** such Covered Software must also be made available in Source Code 169 | Form, as described in Section 3.1, and You must inform recipients of 170 | the Executable Form how they can obtain a copy of such Source Code 171 | Form by reasonable means in a timely manner, at a charge no more 172 | than the cost of distribution to the recipient; and 173 | 174 | * **(b)** You may distribute such Executable Form under the terms of this 175 | License, or sublicense it under different terms, provided that the 176 | license for the Executable Form does not attempt to limit or alter 177 | the recipients' rights in the Source Code Form under this License. 178 | 179 | #### 3.3. Distribution of a Larger Work 180 | 181 | You may create and distribute a Larger Work under terms of Your choice, 182 | provided that You also comply with the requirements of this License for 183 | the Covered Software. If the Larger Work is a combination of Covered 184 | Software with a work governed by one or more Secondary Licenses, and the 185 | Covered Software is not Incompatible With Secondary Licenses, this 186 | License permits You to additionally distribute such Covered Software 187 | under the terms of such Secondary License(s), so that the recipient of 188 | the Larger Work may, at their option, further distribute the Covered 189 | Software under the terms of either this License or such Secondary 190 | License(s). 191 | 192 | #### 3.4. Notices 193 | 194 | You may not remove or alter the substance of any license notices 195 | (including copyright notices, patent notices, disclaimers of warranty, 196 | or limitations of liability) contained within the Source Code Form of 197 | the Covered Software, except that You may alter any license notices to 198 | the extent required to remedy known factual inaccuracies. 199 | 200 | #### 3.5. Application of Additional Terms 201 | 202 | You may choose to offer, and to charge a fee for, warranty, support, 203 | indemnity or liability obligations to one or more recipients of Covered 204 | Software. However, You may do so only on Your own behalf, and not on 205 | behalf of any Contributor. You must make it absolutely clear that any 206 | such warranty, support, indemnity, or liability obligation is offered by 207 | You alone, and You hereby agree to indemnify every Contributor for any 208 | liability incurred by such Contributor as a result of warranty, support, 209 | indemnity or liability terms You offer. You may include additional 210 | disclaimers of warranty and limitations of liability specific to any 211 | jurisdiction. 212 | 213 | 214 | ### 4. Inability to Comply Due to Statute or Regulation 215 | 216 | If it is impossible for You to comply with any of the terms of this 217 | License with respect to some or all of the Covered Software due to 218 | statute, judicial order, or regulation then You must: **(a)** comply with 219 | the terms of this License to the maximum extent possible; and **(b)** 220 | describe the limitations and the code they affect. Such description must 221 | be placed in a text file included with all distributions of the Covered 222 | Software under this License. Except to the extent prohibited by statute 223 | or regulation, such description must be sufficiently detailed for a 224 | recipient of ordinary skill to be able to understand it. 225 | 226 | 227 | ### 5. Termination 228 | 229 | **5.1.** The rights granted under this License will terminate automatically 230 | if You fail to comply with any of its terms. However, if You become 231 | compliant, then the rights granted under this License from a particular 232 | Contributor are reinstated **(a)** provisionally, unless and until such 233 | Contributor explicitly and finally terminates Your grants, and **(b)** on an 234 | ongoing basis, if such Contributor fails to notify You of the 235 | non-compliance by some reasonable means prior to 60 days after You have 236 | come back into compliance. Moreover, Your grants from a particular 237 | Contributor are reinstated on an ongoing basis if such Contributor 238 | notifies You of the non-compliance by some reasonable means, this is the 239 | first time You have received notice of non-compliance with this License 240 | from such Contributor, and You become compliant prior to 30 days after 241 | Your receipt of the notice. 242 | 243 | **5.2.** If You initiate litigation against any entity by asserting a patent 244 | infringement claim (excluding declaratory judgment actions, 245 | counter-claims, and cross-claims) alleging that a Contributor Version 246 | directly or indirectly infringes any patent, then the rights granted to 247 | You by any and all Contributors for the Covered Software under Section 248 | 2.1 of this License shall terminate. 249 | 250 | **5.3.** In the event of termination under Sections 5.1 or 5.2 above, all 251 | end user license agreements (excluding distributors and resellers) which 252 | have been validly granted by You or Your distributors under this License 253 | prior to termination shall survive termination. 254 | 255 | 256 | ### 6. Disclaimer of Warranty 257 | 258 | > Covered Software is provided under this License on an “as is” 259 | > basis, without warranty of any kind, either expressed, implied, or 260 | > statutory, including, without limitation, warranties that the 261 | > Covered Software is free of defects, merchantable, fit for a 262 | > particular purpose or non-infringing. The entire risk as to the 263 | > quality and performance of the Covered Software is with You. 264 | > Should any Covered Software prove defective in any respect, You 265 | > (not any Contributor) assume the cost of any necessary servicing, 266 | > repair, or correction. This disclaimer of warranty constitutes an 267 | > essential part of this License. No use of any Covered Software is 268 | > authorized under this License except under this disclaimer. 269 | 270 | ### 7. Limitation of Liability 271 | 272 | > Under no circumstances and under no legal theory, whether tort 273 | > (including negligence), contract, or otherwise, shall any 274 | > Contributor, or anyone who distributes Covered Software as 275 | > permitted above, be liable to You for any direct, indirect, 276 | > special, incidental, or consequential damages of any character 277 | > including, without limitation, damages for lost profits, loss of 278 | > goodwill, work stoppage, computer failure or malfunction, or any 279 | > and all other commercial damages or losses, even if such party 280 | > shall have been informed of the possibility of such damages. This 281 | > limitation of liability shall not apply to liability for death or 282 | > personal injury resulting from such party's negligence to the 283 | > extent applicable law prohibits such limitation. Some 284 | > jurisdictions do not allow the exclusion or limitation of 285 | > incidental or consequential damages, so this exclusion and 286 | > limitation may not apply to You. 287 | 288 | 289 | ### 8. Litigation 290 | 291 | Any litigation relating to this License may be brought only in the 292 | courts of a jurisdiction where the defendant maintains its principal 293 | place of business and such litigation shall be governed by laws of that 294 | jurisdiction, without reference to its conflict-of-law provisions. 295 | Nothing in this Section shall prevent a party's ability to bring 296 | cross-claims or counter-claims. 297 | 298 | 299 | ### 9. Miscellaneous 300 | 301 | This License represents the complete agreement concerning the subject 302 | matter hereof. If any provision of this License is held to be 303 | unenforceable, such provision shall be reformed only to the extent 304 | necessary to make it enforceable. Any law or regulation which provides 305 | that the language of a contract shall be construed against the drafter 306 | shall not be used to construe this License against a Contributor. 307 | 308 | 309 | ### 10. Versions of the License 310 | 311 | #### 10.1. New Versions 312 | 313 | Mozilla Foundation is the license steward. Except as provided in Section 314 | 10.3, no one other than the license steward has the right to modify or 315 | publish new versions of this License. Each version will be given a 316 | distinguishing version number. 317 | 318 | #### 10.2. Effect of New Versions 319 | 320 | You may distribute the Covered Software under the terms of the version 321 | of the License under which You originally received the Covered Software, 322 | or under the terms of any subsequent version published by the license 323 | steward. 324 | 325 | #### 10.3. Modified Versions 326 | 327 | If you create software not governed by this License, and you want to 328 | create a new license for such software, you may create and use a 329 | modified version of this License if you rename the license and remove 330 | any references to the name of the license steward (except to note that 331 | such modified license differs from this License). 332 | 333 | #### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses 334 | 335 | If You choose to distribute Source Code Form that is Incompatible With 336 | Secondary Licenses under the terms of this version of the License, the 337 | notice described in Exhibit B of this License must be attached. 338 | 339 | ## Exhibit A - Source Code Form License Notice 340 | 341 | This Source Code Form is subject to the terms of the Mozilla Public 342 | License, v. 2.0. If a copy of the MPL was not distributed with this 343 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 344 | 345 | If it is not possible or desirable to put the notice in a particular 346 | file, then You may include the notice in a location (such as a LICENSE 347 | file in a relevant directory) where a recipient would be likely to look 348 | for such a notice. 349 | 350 | You may add additional accurate notices of copyright ownership. 351 | 352 | ## Exhibit B - “Incompatible With Secondary Licenses” Notice 353 | 354 | This Source Code Form is "Incompatible With Secondary Licenses", as 355 | defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `dumpster`: A cycle-tracking garbage collector for Rust 2 | 3 | `dumpster` is an cycle-detecting garbage collector for Rust. 4 | It detects unreachable allocations and automatically frees them. 5 | 6 | ## Why should you use this crate? 7 | 8 | In short, `dumpster` offers a great mix of usability, performance, and flexibility. 9 | 10 | - `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations 11 | (`Rc` and `Arc`). 12 | - It's very performant and has builtin implementations of both thread-local and concurrent 13 | garbage collection. 14 | - There are no restrictions on the reference structure within a garbage-collected allocation 15 | (references may point in any way you like). 16 | - It's trivial to make a custom type Trace using the provided derive macros. 17 | - You can even store `?Sized` data in a garbage-collected pointer! 18 | 19 | ## How it works 20 | 21 | `dumpster` is unlike most tracing garbage collectors. 22 | Other GCs keep track of a set of roots, which can then be used to perform a sweep and find out 23 | which allocations are reachable and which are not. 24 | Instead, `dumpster` extends reference-counted garbage collection (such as `std::rc::Rc`) with a 25 | cycle-detection algorithm, enabling it to effectively clean up self-referential data structures. 26 | 27 | For a deeper dive, check out this 28 | [blog post](https://claytonwramsey.github.io/2023/08/14/dumpster.html). 29 | 30 | ## What this library contains 31 | 32 | `dumpster` actually contains two garbage collector implementations: one thread-local, non-`Send` 33 | garbage collector in the module `unsync`, and one thread-safe garbage collector in the module 34 | `sync`. 35 | These garbage collectors can be safely mixed and matched. 36 | 37 | This library also comes with a derive macro for creating custom Trace types. 38 | 39 | ## Examples 40 | 41 | ```rust 42 | use dumpster::{Trace, unsync::Gc}; 43 | 44 | #[derive(Trace)] 45 | struct Foo { 46 | ptr: RefCell>>, 47 | } 48 | 49 | // Create a new garbage-collected Foo. 50 | let foo = Gc::new(Foo { 51 | ptr: RefCell::new(None), 52 | }); 53 | 54 | // Insert a circular reference inside of the foo. 55 | *foo.ptr.borrow_mut() = Some(foo.clone()); 56 | 57 | // Render the foo inaccessible. 58 | // This may trigger a collection, but it's not guaranteed. 59 | // If we had used `Rc` instead of `Gc`, this would have caused a memory leak. 60 | drop(foo); 61 | 62 | // Trigger a collection. 63 | // This isn't necessary, but it guarantees that `foo` will be collected immediately (instead of 64 | // later). 65 | dumpster::unsync::collect(); 66 | ``` 67 | 68 | ## Installation 69 | 70 | To install, simply add `dumpster` as a dependency to your project. 71 | 72 | ```toml 73 | [dependencies] 74 | dumpster = "1.1.0" 75 | ``` 76 | 77 | ## Optional features 78 | 79 | ## `derive` 80 | 81 | `derive` is enabled by default. 82 | It enables the derive macro for `Trace`, which makes it easy for users to implement their 83 | own Trace types. 84 | 85 | ```rust 86 | use dumpster::{unsync::Gc, Trace}; 87 | use std::cell::RefCell; 88 | 89 | #[derive(Trace)] // no manual implementation required 90 | struct Foo(RefCell>>); 91 | 92 | let my_foo = Gc::new(Foo(RefCell::new(None))); 93 | *my_foo.0.borrow_mut() = Some(my_foo.clone()); 94 | 95 | drop(my_foo); // my_foo will be automatically cleaned up 96 | ``` 97 | 98 | ## `either` 99 | 100 | `either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate, 101 | specifically by implementing `Trace` for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html). 102 | 103 | ## `coerce-unsized` 104 | 105 | `coerce-unsized` is disabled by default. 106 | This enables the implementation of `CoerceUnsized` for each garbage collector, 107 | making it possible to use `Gc` with `!Sized` types conveniently. 108 | 109 | ```rust 110 | use dumpster::unsync::Gc; 111 | 112 | // this only works with "coerce-unsized" enabled while compiling on nightly Rust 113 | let gc1: Gc<[u8]> = Gc::new([1, 2, 3]); 114 | ``` 115 | 116 | To use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature. 117 | 118 | ```toml 119 | [dependencies] 120 | dumpster = { version = "1.1.0", features = ["coerce-unsized"]} 121 | ``` 122 | 123 | ## License 124 | 125 | This code is licensed under the Mozilla Public License, version 2.0. 126 | For more information, refer to [LICENSE.md](LICENSE.md). 127 | -------------------------------------------------------------------------------- /dumpster/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /dumpster/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dumpster" 3 | version = "1.1.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | authors = ["Clayton Ramsey"] 7 | description = "A concurrent cycle-tracking garbage collector." 8 | repository = "https://github.com/claytonwramsey/dumpster" 9 | readme = "../README.md" 10 | keywords = ["dumpster", "garbage_collector", "gc"] 11 | categories = ["memory-management", "data-structures"] 12 | 13 | [features] 14 | default = ["derive"] 15 | coerce-unsized = [] 16 | derive = ["dep:dumpster_derive"] 17 | either = ["dep:either"] 18 | 19 | [dependencies] 20 | parking_lot = "0.12.3" 21 | dumpster_derive = { version = "1.1.0", path = "../dumpster_derive", optional = true } 22 | either = { version = "1.13.0", optional = true } 23 | 24 | [dev-dependencies] 25 | fastrand = "2.0.0" 26 | 27 | [package.metadata.playground] 28 | features = ["derive"] 29 | 30 | [package.metadata.docs.rs] 31 | features = ["derive"] 32 | targets = ["x86_64-unknown-linux-gnu"] 33 | rustdoc-args = ["--generate-link-to-definition"] 34 | -------------------------------------------------------------------------------- /dumpster/src/impls.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Implementations of [`Trace`] for common data types. 10 | 11 | #![allow(deprecated)] 12 | 13 | use std::{ 14 | any::TypeId, 15 | borrow::Cow, 16 | cell::{Cell, OnceCell, RefCell}, 17 | collections::{ 18 | hash_map::{DefaultHasher, RandomState}, 19 | BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, LinkedList, VecDeque, 20 | }, 21 | convert::Infallible, 22 | ffi::{OsStr, OsString}, 23 | hash::{BuildHasher, BuildHasherDefault, SipHasher}, 24 | marker::PhantomData, 25 | num::{ 26 | NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, 27 | NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, 28 | }, 29 | ops::Deref, 30 | path::{Path, PathBuf}, 31 | rc::Rc, 32 | sync::{ 33 | atomic::{ 34 | AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, 35 | AtomicU64, AtomicU8, AtomicUsize, 36 | }, 37 | Mutex, MutexGuard, OnceLock, RwLock, RwLockReadGuard, TryLockError, 38 | }, 39 | }; 40 | 41 | use crate::{Trace, Visitor}; 42 | 43 | unsafe impl Trace for Infallible { 44 | fn accept(&self, _: &mut V) -> Result<(), ()> { 45 | match *self {} 46 | } 47 | } 48 | 49 | #[cfg(feature = "either")] 50 | unsafe impl Trace for either::Either { 51 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 52 | match self { 53 | either::Either::Left(a) => a.accept(visitor), 54 | either::Either::Right(b) => b.accept(visitor), 55 | } 56 | } 57 | } 58 | 59 | /// Implement `Trace` trivially for some parametric `?Sized` type. 60 | macro_rules! param_trivial_impl_unsized { 61 | ($x: ty) => { 62 | unsafe impl Trace for $x { 63 | #[inline] 64 | fn accept(&self, _: &mut V) -> Result<(), ()> { 65 | Ok(()) 66 | } 67 | } 68 | }; 69 | } 70 | 71 | param_trivial_impl_unsized!(MutexGuard<'static, T>); 72 | param_trivial_impl_unsized!(RwLockReadGuard<'static, T>); 73 | param_trivial_impl_unsized!(&'static T); 74 | param_trivial_impl_unsized!(PhantomData); 75 | 76 | unsafe impl Trace for Box { 77 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 78 | (**self).accept(visitor) 79 | } 80 | } 81 | 82 | unsafe impl Trace for BuildHasherDefault { 83 | fn accept(&self, _: &mut V) -> Result<(), ()> { 84 | Ok(()) 85 | } 86 | } 87 | 88 | unsafe impl<'a, T: ToOwned> Trace for Cow<'a, T> 89 | where 90 | T::Owned: Trace, 91 | { 92 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 93 | if let Cow::Owned(ref v) = self { 94 | v.accept(visitor)?; 95 | } 96 | Ok(()) 97 | } 98 | } 99 | 100 | unsafe impl Trace for RefCell { 101 | #[inline] 102 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 103 | self.try_borrow().map_err(|_| ())?.accept(visitor) 104 | } 105 | } 106 | 107 | unsafe impl Trace for Mutex { 108 | #[inline] 109 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 110 | self.try_lock() 111 | .map_err(|e| match e { 112 | TryLockError::Poisoned(_) => panic!(), 113 | TryLockError::WouldBlock => (), 114 | })? 115 | .deref() 116 | .accept(visitor) 117 | } 118 | } 119 | 120 | unsafe impl Trace for RwLock { 121 | #[inline] 122 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 123 | self.try_read() 124 | .map_err(|e| match e { 125 | TryLockError::Poisoned(_) => panic!(), 126 | TryLockError::WouldBlock => (), 127 | })? 128 | .deref() 129 | .accept(visitor) 130 | } 131 | } 132 | 133 | unsafe impl Trace for Option { 134 | #[inline] 135 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 136 | match self { 137 | Some(x) => x.accept(visitor), 138 | None => Ok(()), 139 | } 140 | } 141 | } 142 | 143 | unsafe impl Trace for Result { 144 | #[inline] 145 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 146 | match self { 147 | Ok(t) => t.accept(visitor), 148 | Err(e) => e.accept(visitor), 149 | } 150 | } 151 | } 152 | 153 | unsafe impl Trace for Cell { 154 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 155 | self.get().accept(visitor) 156 | } 157 | } 158 | 159 | unsafe impl Trace for OnceCell { 160 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 161 | self.get().map_or(Ok(()), |x| x.accept(visitor)) 162 | } 163 | } 164 | 165 | unsafe impl Trace for OnceLock { 166 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 167 | self.get().map_or(Ok(()), |x| x.accept(visitor)) 168 | } 169 | } 170 | 171 | /// Implement [`Trace`] for a collection data structure which has some method `iter()` that 172 | /// iterates over all elements of the data structure and `iter_mut()` which does the same over 173 | /// mutable references. 174 | macro_rules! Trace_collection_impl { 175 | ($x: ty) => { 176 | unsafe impl Trace for $x { 177 | #[inline] 178 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 179 | for elem in self { 180 | elem.accept(visitor)?; 181 | } 182 | Ok(()) 183 | } 184 | } 185 | }; 186 | } 187 | 188 | Trace_collection_impl!(Vec); 189 | Trace_collection_impl!(VecDeque); 190 | Trace_collection_impl!(LinkedList); 191 | Trace_collection_impl!([T]); 192 | Trace_collection_impl!(HashSet); 193 | Trace_collection_impl!(BinaryHeap); 194 | Trace_collection_impl!(BTreeSet); 195 | 196 | unsafe impl Trace for HashMap { 197 | fn accept(&self, visitor: &mut Z) -> Result<(), ()> { 198 | for (k, v) in self { 199 | k.accept(visitor)?; 200 | v.accept(visitor)?; 201 | } 202 | self.hasher().accept(visitor) 203 | } 204 | } 205 | 206 | unsafe impl Trace for BTreeMap { 207 | fn accept(&self, visitor: &mut Z) -> Result<(), ()> { 208 | for (k, v) in self { 209 | k.accept(visitor)?; 210 | v.accept(visitor)?; 211 | } 212 | Ok(()) 213 | } 214 | } 215 | 216 | unsafe impl Trace for [T; N] { 217 | #[inline] 218 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 219 | for elem in self { 220 | elem.accept(visitor)?; 221 | } 222 | Ok(()) 223 | } 224 | } 225 | 226 | /// Implement [`Trace`] for a trivially-collected type which contains no [`Gc`]s in its 227 | /// fields. 228 | macro_rules! Trace_trivial_impl { 229 | ($x: ty) => { 230 | unsafe impl Trace for $x { 231 | #[inline] 232 | fn accept(&self, _: &mut V) -> Result<(), ()> { 233 | Ok(()) 234 | } 235 | } 236 | }; 237 | } 238 | 239 | Trace_trivial_impl!(()); 240 | 241 | Trace_trivial_impl!(u8); 242 | Trace_trivial_impl!(u16); 243 | Trace_trivial_impl!(u32); 244 | Trace_trivial_impl!(u64); 245 | Trace_trivial_impl!(u128); 246 | Trace_trivial_impl!(usize); 247 | Trace_trivial_impl!(i8); 248 | Trace_trivial_impl!(i16); 249 | Trace_trivial_impl!(i32); 250 | Trace_trivial_impl!(i64); 251 | Trace_trivial_impl!(i128); 252 | Trace_trivial_impl!(isize); 253 | 254 | Trace_trivial_impl!(bool); 255 | Trace_trivial_impl!(char); 256 | 257 | Trace_trivial_impl!(f32); 258 | Trace_trivial_impl!(f64); 259 | 260 | Trace_trivial_impl!(AtomicU8); 261 | Trace_trivial_impl!(AtomicU16); 262 | Trace_trivial_impl!(AtomicU32); 263 | Trace_trivial_impl!(AtomicU64); 264 | Trace_trivial_impl!(AtomicUsize); 265 | Trace_trivial_impl!(AtomicI8); 266 | Trace_trivial_impl!(AtomicI16); 267 | Trace_trivial_impl!(AtomicI32); 268 | Trace_trivial_impl!(AtomicI64); 269 | Trace_trivial_impl!(AtomicIsize); 270 | 271 | Trace_trivial_impl!(NonZeroU8); 272 | Trace_trivial_impl!(NonZeroU16); 273 | Trace_trivial_impl!(NonZeroU32); 274 | Trace_trivial_impl!(NonZeroU64); 275 | Trace_trivial_impl!(NonZeroU128); 276 | Trace_trivial_impl!(NonZeroUsize); 277 | Trace_trivial_impl!(NonZeroI8); 278 | Trace_trivial_impl!(NonZeroI16); 279 | Trace_trivial_impl!(NonZeroI32); 280 | Trace_trivial_impl!(NonZeroI64); 281 | Trace_trivial_impl!(NonZeroI128); 282 | Trace_trivial_impl!(NonZeroIsize); 283 | 284 | Trace_trivial_impl!(String); 285 | Trace_trivial_impl!(str); 286 | Trace_trivial_impl!(PathBuf); 287 | Trace_trivial_impl!(Path); 288 | Trace_trivial_impl!(OsString); 289 | Trace_trivial_impl!(OsStr); 290 | 291 | Trace_trivial_impl!(DefaultHasher); 292 | Trace_trivial_impl!(RandomState); 293 | Trace_trivial_impl!(Rc); 294 | Trace_trivial_impl!(SipHasher); 295 | 296 | Trace_trivial_impl!(TypeId); 297 | 298 | /// Implement [`Trace`] for a tuple. 299 | macro_rules! Trace_tuple { 300 | () => {}; // This case is handled above by the trivial case 301 | ($($args:ident),*) => { 302 | unsafe impl<$($args: Trace),*> Trace for ($($args,)*) { 303 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 304 | #[allow(non_snake_case)] 305 | let &($(ref $args,)*) = self; 306 | $(($args).accept(visitor)?;)* 307 | Ok(()) 308 | } 309 | } 310 | } 311 | } 312 | 313 | Trace_tuple!(); 314 | Trace_tuple!(A); 315 | Trace_tuple!(A, B); 316 | Trace_tuple!(A, B, C); 317 | Trace_tuple!(A, B, C, D); 318 | Trace_tuple!(A, B, C, D, E); 319 | Trace_tuple!(A, B, C, D, E, F); 320 | Trace_tuple!(A, B, C, D, E, F, G); 321 | Trace_tuple!(A, B, C, D, E, F, G, H); 322 | Trace_tuple!(A, B, C, D, E, F, G, H, I); 323 | Trace_tuple!(A, B, C, D, E, F, G, H, I, J); 324 | 325 | /// Implement `Trace` for one function type. 326 | macro_rules! Trace_fn { 327 | ($ty:ty $(,$args:ident)*) => { 328 | unsafe impl Trace for $ty { 329 | fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } 330 | } 331 | } 332 | } 333 | 334 | /// Implement `Trace` for all functions with a given set of args. 335 | macro_rules! Trace_fn_group { 336 | () => { 337 | Trace_fn!(extern "Rust" fn () -> Ret); 338 | Trace_fn!(extern "C" fn () -> Ret); 339 | Trace_fn!(unsafe extern "Rust" fn () -> Ret); 340 | Trace_fn!(unsafe extern "C" fn () -> Ret); 341 | }; 342 | ($($args:ident),*) => { 343 | Trace_fn!(extern "Rust" fn ($($args),*) -> Ret, $($args),*); 344 | Trace_fn!(extern "C" fn ($($args),*) -> Ret, $($args),*); 345 | Trace_fn!(extern "C" fn ($($args),*, ...) -> Ret, $($args),*); 346 | Trace_fn!(unsafe extern "Rust" fn ($($args),*) -> Ret, $($args),*); 347 | Trace_fn!(unsafe extern "C" fn ($($args),*) -> Ret, $($args),*); 348 | Trace_fn!(unsafe extern "C" fn ($($args),*, ...) -> Ret, $($args),*); 349 | } 350 | } 351 | 352 | Trace_fn_group!(); 353 | Trace_fn_group!(A); 354 | Trace_fn_group!(A, B); 355 | Trace_fn_group!(A, B, C); 356 | Trace_fn_group!(A, B, C, D); 357 | Trace_fn_group!(A, B, C, D, E); 358 | Trace_fn_group!(A, B, C, D, E, F); 359 | Trace_fn_group!(A, B, C, D, E, F, G); 360 | Trace_fn_group!(A, B, C, D, E, F, G, H); 361 | Trace_fn_group!(A, B, C, D, E, F, G, H, I); 362 | Trace_fn_group!(A, B, C, D, E, F, G, H, I, J); 363 | -------------------------------------------------------------------------------- /dumpster/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! A cycle-tracking concurrent garbage collector with an easy-to-use API. 10 | //! 11 | //! Most garbage collectors are _tracing_ garbage collectors, meaning that they keep track of a set 12 | //! of roots which are directly accessible from the stack, and then use those roots to find the set 13 | //! of all accessible allocations. 14 | //! However, because Rust does not allow us to hook into when a value is moved, it's quite difficult 15 | //! to detect when a garbage-collected value stops being a root. 16 | //! 17 | //! `dumpster` takes a different approach. 18 | //! It begins by using simple reference counting, then automatically detects cycles. 19 | //! Allocations are freed when their reference count reaches zero or when they are only accessible 20 | //! via their descendants. 21 | //! 22 | //! Garbage-collected pointers can be created and destroyed in _O(1)_ amortized time, but destroying 23 | //! a garbage-collected pointer may take _O(r)_, where _r_ is the number of existing 24 | //! garbage-collected references, on occasion. 25 | //! However, the cleanups that require _O(r)_ performance are performed once every _O(1/r)_ times 26 | //! a reference is dropped, yielding an amortized _O(1)_ runtime. 27 | //! 28 | //! # Why should you use this crate? 29 | //! 30 | //! In short, `dumpster` offers a great mix of usability, performance, and flexibility. 31 | //! 32 | //! - `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations 33 | //! (`Rc` and `Arc`). 34 | //! - It's very performant and has builtin implementations of both thread-local and concurrent 35 | //! garbage collection. 36 | //! - There are no restrictions on the reference structure within a garbage-collected allocation 37 | //! (references may point in any way you like). 38 | //! - It's trivial to make a custom type Trace using the provided derive macros. 39 | //! - You can even store `?Sized` data in a garbage-collected pointer! 40 | //! 41 | //! # Module structure 42 | //! 43 | //! `dumpster` contains 3 core modules: the root (this module), as well as [`sync`] and [`unsync`]. 44 | //! `sync` contains an implementation of thread-safe garbage-collected pointers, while `unsync` 45 | //! contains an implementation of thread-local garbage-collected pointers which cannot be shared 46 | //! across threads. 47 | //! Thread-safety requires some synchronization overhead, so for a single-threaded application, 48 | //! it is recommended to use `unsync`. 49 | //! 50 | //! The project root contains common definitions across both `sync` and `unsync`. 51 | //! Types which implement [`Trace`] can immediately be used in `unsync`, but in order to use 52 | //! `sync`'s garbage collector, the types must also implement [`Sync`]. 53 | //! 54 | //! # Examples 55 | //! 56 | //! If your code is meant to run as a single thread, or if your data doesn't need to be shared 57 | //! across threads, you should use [`unsync::Gc`] to store your allocations. 58 | //! 59 | //! ``` 60 | //! use dumpster::unsync::Gc; 61 | //! use std::cell::Cell; 62 | //! 63 | //! let my_gc = Gc::new(Cell::new(0451)); 64 | //! 65 | //! let other_gc = my_gc.clone(); // shallow copy 66 | //! other_gc.set(512); 67 | //! 68 | //! assert_eq!(my_gc.get(), 512); 69 | //! ``` 70 | //! 71 | //! For data which is shared across threads, you can use [`sync::Gc`] with the exact same API. 72 | //! 73 | //! ``` 74 | //! use dumpster::sync::Gc; 75 | //! use std::sync::Mutex; 76 | //! 77 | //! let my_shared_gc = Gc::new(Mutex::new(25)); 78 | //! let other_shared_gc = my_shared_gc.clone(); 79 | //! 80 | //! std::thread::scope(|s| { 81 | //! s.spawn(move || { 82 | //! *other_shared_gc.lock().unwrap() = 35; 83 | //! }); 84 | //! }); 85 | //! 86 | //! println!("{}", *my_shared_gc.lock().unwrap()); 87 | //! ``` 88 | //! 89 | //! It's trivial to use custom data structures with the provided derive macro. 90 | //! 91 | //! ``` 92 | //! use dumpster::{unsync::Gc, Trace}; 93 | //! use std::cell::RefCell; 94 | //! 95 | //! #[derive(Trace)] 96 | //! struct Foo { 97 | //! refs: RefCell>>, 98 | //! } 99 | //! 100 | //! let foo = Gc::new(Foo { 101 | //! refs: RefCell::new(Vec::new()), 102 | //! }); 103 | //! 104 | //! foo.refs.borrow_mut().push(foo.clone()); 105 | //! 106 | //! drop(foo); 107 | //! 108 | //! // even though foo had a self reference, it still got collected! 109 | //! ``` 110 | //! 111 | //! # Installation 112 | //! 113 | //! To use `dumpster`, add the following lines to your `Cargo.toml`. 114 | //! 115 | //! ```toml 116 | //! [dependencies] 117 | //! dumpster = "1.1.0" 118 | //! ``` 119 | //! 120 | //! # Optional features 121 | //! 122 | //! ## `derive` 123 | //! 124 | //! `derive` is enabled by default. 125 | //! It enables the derive macro for `Trace`, which makes it easy for users to implement their 126 | //! own Trace types. 127 | //! 128 | //! ``` 129 | //! use dumpster::{unsync::Gc, Trace}; 130 | //! use std::cell::RefCell; 131 | //! 132 | //! #[derive(Trace)] // no manual implementation required 133 | //! struct Foo(RefCell>>); 134 | //! 135 | //! let my_foo = Gc::new(Foo(RefCell::new(None))); 136 | //! *my_foo.0.borrow_mut() = Some(my_foo.clone()); 137 | //! 138 | //! drop(my_foo); // my_foo will be automatically cleaned up 139 | //! ``` 140 | //! 141 | //! ## `either` 142 | //! 143 | //! `either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate, 144 | //! specifically by implementing [`Trace`] for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html). 145 | //! 146 | //! ## `coerce-unsized` 147 | //! 148 | //! `coerce-unsized` is disabled by default. 149 | //! This enables the implementation of [`std::ops::CoerceUnsized`] for each garbage collector, 150 | //! making it possible to use `Gc` with `!Sized` types conveniently. 151 | #![cfg_attr( 152 | feature = "coerce-unsized", 153 | doc = r##" 154 | ``` 155 | // this only works with "coerce-unsized" enabled while compiling on nightly Rust 156 | use dumpster::unsync::Gc; 157 | 158 | let gc1: Gc<[u8]> = Gc::new([1, 2, 3]); 159 | ``` 160 | "## 161 | )] 162 | //! To use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature. 163 | //! 164 | //! ```toml 165 | //! [dependencies] 166 | //! dumpster = { version = "1.1.0", features = ["coerce-unsized"]} 167 | //! ``` 168 | //! 169 | //! # License 170 | //! 171 | //! `dumpster` is licensed under the Mozilla Public License, version 2.0. 172 | //! For more details, refer to 173 | //! [LICENSE.md](https://github.com/claytonwramsey/dumpster/blob/master/LICENSE.md). 174 | 175 | #![warn(clippy::pedantic)] 176 | #![warn(clippy::cargo)] 177 | #![warn(missing_docs)] 178 | #![warn(clippy::missing_docs_in_private_items)] 179 | #![allow(clippy::multiple_crate_versions, clippy::result_unit_err)] 180 | #![cfg_attr(feature = "coerce-unsized", feature(coerce_unsized))] 181 | #![cfg_attr(feature = "coerce-unsized", feature(unsize))] 182 | 183 | mod impls; 184 | 185 | mod ptr; 186 | pub mod sync; 187 | pub mod unsync; 188 | 189 | /// The trait that any garbage-Trace data must implement. 190 | /// 191 | /// This trait should usually be implemented by using `#[derive(Trace)]`, using the provided 192 | /// macro. 193 | /// Only data structures using raw pointers or other magic should manually implement `Trace`. 194 | /// 195 | /// # Safety 196 | /// 197 | /// If the implementation of this trait is incorrect, this will result in undefined behavior, 198 | /// typically double-frees or use-after-frees. 199 | /// This includes [`Trace::accept`], even though it is a safe function, since its correctness 200 | /// is required for safety. 201 | /// 202 | /// # Examples 203 | /// 204 | /// Implementing `Trace` for a scalar type which contains no garbage-collected references 205 | /// is very easy. 206 | /// Accepting a visitor is simply a no-op. 207 | /// 208 | /// ``` 209 | /// use dumpster::{Trace, Visitor}; 210 | /// 211 | /// struct Foo(u8); 212 | /// 213 | /// unsafe impl Trace for Foo { 214 | /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { 215 | /// Ok(()) 216 | /// } 217 | /// } 218 | /// ``` 219 | /// 220 | /// However, if a data structure contains a garbage collected pointer, it must delegate to its 221 | /// fields in `accept`. 222 | /// 223 | /// ``` 224 | /// use dumpster::{unsync::Gc, Trace, Visitor}; 225 | /// 226 | /// struct Bar(Gc); 227 | /// 228 | /// unsafe impl Trace for Bar { 229 | /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { 230 | /// self.0.accept(visitor) 231 | /// } 232 | /// } 233 | /// ``` 234 | /// 235 | /// A data structure with two or more fields which could own a garbage-collected pointer should 236 | /// delegate to both fields in a consistent order: 237 | /// 238 | /// ``` 239 | /// use dumpster::{unsync::Gc, Trace, Visitor}; 240 | /// 241 | /// struct Baz { 242 | /// a: Gc, 243 | /// b: Gc, 244 | /// } 245 | /// 246 | /// unsafe impl Trace for Baz { 247 | /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { 248 | /// self.a.accept(visitor)?; 249 | /// self.b.accept(visitor)?; 250 | /// Ok(()) 251 | /// } 252 | /// } 253 | /// ``` 254 | pub unsafe trait Trace { 255 | /// Accept a visitor to this garbage-collected value. 256 | /// 257 | /// Implementors of this function need only delegate to all fields owned by this value which 258 | /// may contain a garbage-collected reference (either a [`sync::Gc`] or a [`unsync::Gc`]). 259 | /// 260 | /// For structures which have more than one field, they should return immediately after the 261 | /// first `Err` is returned from one of its fields. 262 | /// To do so efficiently, we recommend using the try operator (`?`) on each field and then 263 | /// returning `Ok(())` after delegating to each field. 264 | /// 265 | /// # Errors 266 | /// 267 | /// Errors are returned from this function whenever a field of this object returns an error 268 | /// after delegating acceptance to it, or if this value's data is inaccessible (such as 269 | /// attempting to borrow from a [`RefCell`](std::cell::RefCell) which has already been 270 | /// mutably borrowed). 271 | fn accept(&self, visitor: &mut V) -> Result<(), ()>; 272 | } 273 | 274 | /// A visitor for a garbage collected value. 275 | /// 276 | /// This visitor allows us to hide details of the implementation of the garbage-collection procedure 277 | /// from implementors of [`Trace`]. 278 | /// 279 | /// When accepted by a `Trace`, this visitor will be delegated down until it reaches a 280 | /// garbage-collected pointer. 281 | /// Then, the garbage-collected pointer will call one of `visit_sync` or `visit_unsync`, depending 282 | /// on which type of pointer it is. 283 | /// 284 | /// In general, it's not expected for consumers of this library to write their own visitors. 285 | pub trait Visitor { 286 | /// Visit a synchronized garbage-collected pointer. 287 | /// 288 | /// This function is called for every [`sync::Gc`] owned by the value that accepted this 289 | /// visitor. 290 | fn visit_sync(&mut self, gc: &sync::Gc) 291 | where 292 | T: Trace + Send + Sync + ?Sized; 293 | 294 | /// Visit a thread-local garbage-collected pointer. 295 | /// 296 | /// This function is called for every [`unsync::Gc`] owned by the value that accepted this 297 | /// visitor. 298 | fn visit_unsync(&mut self, gc: &unsync::Gc) 299 | where 300 | T: Trace + ?Sized; 301 | } 302 | 303 | // Re-export #[derive(Trace)]. 304 | // 305 | // The reason re-exporting is not enabled by default is that disabling it would 306 | // be annoying for crates that provide handwritten impls or data formats. They 307 | // would need to disable default features and then explicitly re-enable std. 308 | #[cfg(feature = "derive")] 309 | extern crate dumpster_derive; 310 | 311 | #[cfg(feature = "derive")] 312 | /// The derive macro for implementing `Trace`. 313 | /// 314 | /// This enables users of `dumpster` to easily store custom types inside a `Gc`. 315 | /// To do so, simply annotate your type with `#[derive(Trace)]`. 316 | /// 317 | /// # Examples 318 | /// 319 | /// ``` 320 | /// use dumpster::Trace; 321 | /// 322 | /// #[derive(Trace)] 323 | /// struct Foo { 324 | /// bar: Option>, 325 | /// } 326 | /// ``` 327 | pub use dumpster_derive::Trace; 328 | 329 | /// Determine whether some value contains a garbage-collected pointer. 330 | /// 331 | /// This function will return one of three values: 332 | /// - `Ok(true)`: The data structure contains a garbage-collected pointer. 333 | /// - `Ok(false)`: The data structure contains no garbage-collected pointers. 334 | /// - `Err(())`: The data structure was accessed while we checked it for garbage-collected pointers. 335 | fn contains_gcs(x: &T) -> Result { 336 | /// A visitor structure used for determining whether some garbage-collected pointer contains a 337 | /// `Gc` in its pointed-to value. 338 | struct ContainsGcs(bool); 339 | 340 | impl Visitor for ContainsGcs { 341 | fn visit_sync(&mut self, _: &sync::Gc) 342 | where 343 | T: Trace + Send + Sync + ?Sized, 344 | { 345 | self.0 = true; 346 | } 347 | 348 | fn visit_unsync(&mut self, _: &unsync::Gc) 349 | where 350 | T: Trace + ?Sized, 351 | { 352 | self.0 = true; 353 | } 354 | } 355 | 356 | let mut visit = ContainsGcs(false); 357 | x.accept(&mut visit)?; 358 | Ok(visit.0) 359 | } 360 | -------------------------------------------------------------------------------- /dumpster/src/ptr.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Custom pointer types used by this garbage collector. 10 | 11 | use std::{ 12 | fmt, 13 | mem::{size_of, MaybeUninit}, 14 | ptr::{addr_of, addr_of_mut, copy_nonoverlapping, NonNull}, 15 | }; 16 | 17 | #[repr(C)] 18 | #[derive(Clone, Copy)] 19 | /// A pointer for an allocation, extracted out as raw data. 20 | /// This contains both the pointer and all the pointer's metadata, but hidden behind an unknown 21 | /// interpretation. 22 | /// We trust that all pointers (even to `?Sized` or `dyn` types) are 2 words or fewer in size. 23 | /// This is a hack! Like, a big hack! 24 | pub(crate) struct Erased([usize; 2]); 25 | 26 | impl Erased { 27 | /// Construct a new erased pointer to some data from a reference 28 | /// 29 | /// # Panics 30 | /// 31 | /// This function will panic if the size of a reference is larger than the size of an 32 | /// `ErasedPtr`. 33 | /// To my knowledge, there are no pointer types with this property. 34 | pub fn new(reference: NonNull) -> Erased { 35 | let mut ptr = Erased([0; 2]); 36 | let ptr_size = size_of::>(); 37 | // Extract out the pointer as raw memory 38 | assert!( 39 | ptr_size <= size_of::(), 40 | "pointers to T are too big for storage" 41 | ); 42 | unsafe { 43 | // SAFETY: We know that `cleanup` has at least as much space as `ptr_size`, and that 44 | // `box_ref` has size equal to `ptr_size`. 45 | copy_nonoverlapping( 46 | addr_of!(reference).cast::(), 47 | addr_of_mut!(ptr.0).cast::(), 48 | ptr_size, 49 | ); 50 | } 51 | 52 | ptr 53 | } 54 | 55 | /// Specify this pointer into a pointer of a particular type. 56 | /// 57 | /// # Safety 58 | /// 59 | /// This function must only be specified to the type that the pointer was constructed with 60 | /// via [`ErasedPtr::new`]. 61 | pub unsafe fn specify(self) -> NonNull { 62 | let mut box_ref: MaybeUninit> = MaybeUninit::zeroed(); 63 | 64 | // For some reason, switching the ordering of casts causes this to create wacky undefined 65 | // behavior. Why? I don't know. I have better things to do than pontificate on this on a 66 | // Sunday afternoon. 67 | copy_nonoverlapping( 68 | addr_of!(self.0).cast::(), 69 | addr_of_mut!(box_ref).cast::(), 70 | size_of::>(), 71 | ); 72 | 73 | box_ref.assume_init() 74 | } 75 | } 76 | 77 | impl fmt::Debug for Erased { 78 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 79 | write!(f, "ErasedPtr({:x?})", self.0) 80 | } 81 | } 82 | 83 | #[cfg(not(feature = "coerce-unsized"))] 84 | /// A nullable pointer to an `?Sized` type. 85 | /// 86 | /// We need this because it's actually impossible to create a null `*mut T` if `T` is `?Sized`. 87 | pub(crate) struct Nullable(Option>); 88 | #[cfg(feature = "coerce-unsized")] 89 | /// A nullable pointer to an `?Sized` type. 90 | /// 91 | /// We need this because it's actually impossible to create a null `*mut T` if `T` is `?Sized`. 92 | pub(crate) struct Nullable(*mut T); 93 | 94 | impl Nullable { 95 | /// Create a new nullable pointer from a non-null pointer. 96 | pub fn new(ptr: NonNull) -> Nullable { 97 | #[cfg(not(feature = "coerce-unsized"))] 98 | { 99 | Nullable(Some(ptr)) 100 | } 101 | #[cfg(feature = "coerce-unsized")] 102 | { 103 | Nullable(ptr.as_ptr()) 104 | } 105 | } 106 | 107 | #[allow(clippy::unused_self)] 108 | /// Convert this pointer to a null pointer. 109 | pub fn as_null(self) -> Nullable { 110 | #[cfg(not(feature = "coerce-unsized"))] 111 | { 112 | Nullable(None) 113 | } 114 | #[cfg(feature = "coerce-unsized")] 115 | { 116 | Nullable(self.0.with_addr(0)) 117 | } 118 | } 119 | 120 | /// Determine whether this pointer is null. 121 | pub fn is_null(self) -> bool { 122 | self.as_option().is_none() 123 | } 124 | 125 | /// Convert this pointer to an `Option>`. 126 | pub fn as_option(self) -> Option> { 127 | #[cfg(not(feature = "coerce-unsized"))] 128 | { 129 | self.0 130 | } 131 | #[cfg(feature = "coerce-unsized")] 132 | { 133 | NonNull::new(self.0) 134 | } 135 | } 136 | 137 | /// Convert this pointer to a `NonNull`, panicking if this pointer is null with message 138 | /// `msg`. 139 | pub fn expect(self, msg: &str) -> NonNull { 140 | self.as_option().expect(msg) 141 | } 142 | 143 | /// Convert this pointer to a `NonNull`, panicking if this pointer is null. 144 | pub fn unwrap(self) -> NonNull { 145 | self.as_option().unwrap() 146 | } 147 | } 148 | 149 | impl Clone for Nullable { 150 | fn clone(&self) -> Self { 151 | *self 152 | } 153 | } 154 | impl Copy for Nullable {} 155 | 156 | #[cfg(feature = "coerce-unsized")] 157 | impl std::ops::CoerceUnsized> for Nullable 158 | where 159 | T: std::marker::Unsize + ?Sized, 160 | U: ?Sized, 161 | { 162 | } 163 | 164 | impl fmt::Debug for Nullable { 165 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 166 | write!(f, "Nullable({:x?})", self.0) 167 | } 168 | } 169 | 170 | #[cfg(test)] 171 | mod tests { 172 | use std::alloc::{dealloc, Layout}; 173 | 174 | use super::*; 175 | 176 | #[test] 177 | fn erased_alloc() { 178 | let orig_ptr = Box::leak(Box::new(7u8)); 179 | let erased_ptr = Erased::new(NonNull::from(orig_ptr)); 180 | 181 | unsafe { 182 | let remade_ptr = erased_ptr.specify::(); 183 | dealloc(remade_ptr.as_ptr(), Layout::for_value(remade_ptr.as_ref())); 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /dumpster/src/sync/collect.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! A synchronized collection algorithm. 10 | 11 | use std::{ 12 | alloc::{dealloc, Layout}, 13 | cell::{Cell, RefCell}, 14 | collections::{hash_map::Entry, HashMap}, 15 | mem::{replace, swap, take, transmute}, 16 | ptr::{drop_in_place, NonNull}, 17 | sync::{ 18 | atomic::{AtomicPtr, AtomicUsize, Ordering}, 19 | LazyLock, 20 | }, 21 | }; 22 | 23 | use parking_lot::{Mutex, RwLock}; 24 | 25 | use crate::{ptr::Erased, Trace, Visitor}; 26 | 27 | use super::{default_collect_condition, CollectCondition, CollectInfo, Gc, GcBox, CURRENT_TAG}; 28 | 29 | /// The garbage truck, which is a global data structure containing information about allocations 30 | /// which might need to be collected. 31 | struct GarbageTruck { 32 | /// The contents of the garbage truck, containing all the allocations which need to be 33 | /// collected and have already been delivered by a [`Dumpster`]. 34 | contents: Mutex>, 35 | /// A lock used for synchronizing threads that are awaiting completion of a collection process. 36 | /// This lock should be acquired for reads by threads running a collection and for writes by 37 | /// threads awaiting collection completion. 38 | collecting_lock: RwLock<()>, 39 | /// The number of [`Gc`]s dropped since the last time [`Dumpster::collect_all()`] was called. 40 | n_gcs_dropped: AtomicUsize, 41 | /// The number of [`Gc`]s currently existing (which have not had their internals replaced with 42 | /// `None`). 43 | n_gcs_existing: AtomicUsize, 44 | /// The function which determines whether a collection should be triggered. 45 | /// This pointer value should always be cast to a [`CollectCondition`], but since `AtomicPtr` 46 | /// doesn't handle function pointers correctly, we just cast to `*mut ()`. 47 | collect_condition: AtomicPtr<()>, 48 | } 49 | 50 | /// A structure containing the global information for the garbage collector. 51 | struct Dumpster { 52 | /// A lookup table for the allocations which may need to be cleaned up later. 53 | contents: RefCell>, 54 | /// The number of times an allocation on this thread has been dropped. 55 | n_drops: Cell, 56 | } 57 | 58 | #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] 59 | /// A unique identifier for an allocation. 60 | struct AllocationId(NonNull>); 61 | 62 | #[derive(Debug)] 63 | /// The information which describes an allocation that may need to be cleaned up later. 64 | struct TrashCan { 65 | /// A pointer to the allocation to be cleaned up. 66 | ptr: Erased, 67 | /// The function which can be used to build a reference graph. 68 | /// This function is safe to call on `ptr`. 69 | dfs_fn: unsafe fn(Erased, &mut HashMap), 70 | } 71 | 72 | #[derive(Debug)] 73 | /// A node in the reference graph, which is constructed while searching for unreachable allocations. 74 | struct AllocationInfo { 75 | /// An erased pointer to the allocation. 76 | ptr: Erased, 77 | /// Function for dropping the allocation when its weak and strong count hits zero. 78 | /// Should have the same behavior as dropping a Gc normally to a reference count of zero. 79 | weak_drop_fn: unsafe fn(Erased), 80 | /// Information about this allocation's reachability. 81 | reachability: Reachability, 82 | } 83 | 84 | #[derive(Debug)] 85 | /// The state of whether an allocation is reachable or of unknown reachability. 86 | enum Reachability { 87 | /// The information describing an allocation whose accessibility is unknown. 88 | Unknown { 89 | /// The IDs for the allocations directly accessible from this allocation. 90 | children: Vec, 91 | /// The number of references in the reference count for this allocation which are 92 | /// "unaccounted," which have not been found while constructing the graph. 93 | /// It is the difference between the allocations indegree in the "true" reference graph vs 94 | /// the one we are currently building. 95 | n_unaccounted: usize, 96 | /// A function used to destroy the allocation. 97 | destroy_fn: unsafe fn(Erased, &HashMap), 98 | }, 99 | /// The allocation here is reachable. 100 | /// No further information is needed. 101 | Reachable, 102 | } 103 | 104 | /// The global garbage truck. 105 | /// All [`TrashCans`] should eventually end up in here. 106 | static GARBAGE_TRUCK: LazyLock = LazyLock::new(|| GarbageTruck { 107 | contents: Mutex::new(HashMap::new()), 108 | collecting_lock: RwLock::new(()), 109 | n_gcs_dropped: AtomicUsize::new(0), 110 | n_gcs_existing: AtomicUsize::new(0), 111 | collect_condition: AtomicPtr::new(default_collect_condition as *mut ()), 112 | }); 113 | 114 | thread_local! { 115 | /// The dumpster for this thread. 116 | /// Allocations which are "dirty" will be transferred to this dumpster before being moved into 117 | /// the garbage truck for final collection. 118 | static DUMPSTER: Dumpster = Dumpster { 119 | contents: RefCell::new(HashMap::new()), 120 | n_drops: Cell::new(0), 121 | }; 122 | 123 | /// Whether the currently-running thread is doing a cleanup. 124 | /// This cannot be stored in `DUMPSTER` because otherwise it would cause weird use-after-drop 125 | /// behavior. 126 | static CLEANING: Cell = const { Cell::new(false) }; 127 | } 128 | 129 | #[allow(clippy::module_name_repetitions)] 130 | /// Collect all allocations in the garbage truck (but not necessarily the dumpster), then await 131 | /// completion of the collection. 132 | /// Ensures that all allocations dropped on the calling thread are cleaned up 133 | pub fn collect_all_await() { 134 | DUMPSTER.with(|d| d.deliver_to(&GARBAGE_TRUCK)); 135 | GARBAGE_TRUCK.collect_all(); 136 | drop(GARBAGE_TRUCK.collecting_lock.read()); 137 | } 138 | 139 | /// Notify that a `Gc` was destroyed, and update the tracking count for the number of dropped and 140 | /// existing `Gc`s. 141 | /// 142 | /// This may trigger a linear-time cleanup of all allocations, but this will be guaranteed to 143 | /// occur with less-than-linear frequency, so it's always O(1). 144 | pub fn notify_dropped_gc() { 145 | GARBAGE_TRUCK.n_gcs_existing.fetch_sub(1, Ordering::Relaxed); 146 | GARBAGE_TRUCK.n_gcs_dropped.fetch_add(1, Ordering::Relaxed); 147 | DUMPSTER.with(|dumpster| { 148 | dumpster.n_drops.set(dumpster.n_drops.get() + 1); 149 | if dumpster.is_full() { 150 | dumpster.deliver_to(&GARBAGE_TRUCK); 151 | } 152 | }); 153 | 154 | let collect_cond = unsafe { 155 | // SAFETY: we only ever store collection conditions in the collect-condition box 156 | transmute::<*mut (), CollectCondition>( 157 | GARBAGE_TRUCK.collect_condition.load(Ordering::Relaxed), 158 | ) 159 | }; 160 | if collect_cond(&CollectInfo { _private: () }) { 161 | GARBAGE_TRUCK.collect_all(); 162 | } 163 | } 164 | 165 | /// Notify that a [`Gc`] was created, and increment the number of total existing `Gc`s. 166 | pub fn notify_created_gc() { 167 | GARBAGE_TRUCK.n_gcs_existing.fetch_add(1, Ordering::Relaxed); 168 | } 169 | 170 | /// Mark an allocation as "dirty," implying that it may or may not be inaccessible and need to 171 | /// be cleaned up. 172 | pub(super) fn mark_dirty(allocation: &GcBox) 173 | where 174 | T: Trace + Send + Sync + ?Sized, 175 | { 176 | DUMPSTER.with(|dumpster| { 177 | if dumpster 178 | .contents 179 | .borrow_mut() 180 | .insert( 181 | AllocationId::from(allocation), 182 | TrashCan { 183 | ptr: Erased::new(NonNull::from(allocation)), 184 | dfs_fn: dfs::, 185 | }, 186 | ) 187 | .is_none() 188 | { 189 | allocation.weak.fetch_add(1, Ordering::Acquire); 190 | } 191 | }); 192 | } 193 | 194 | /// Mark an allocation as "clean," implying that it has already been cleaned up and does not 195 | /// need to be cleaned again. 196 | pub(super) fn mark_clean(allocation: &GcBox) 197 | where 198 | T: Trace + Send + Sync + ?Sized, 199 | { 200 | DUMPSTER.with(|dumpster| { 201 | if dumpster 202 | .contents 203 | .borrow_mut() 204 | .remove(&AllocationId::from(allocation)) 205 | .is_some() 206 | { 207 | allocation.weak.fetch_sub(1, Ordering::Release); 208 | } 209 | }); 210 | } 211 | 212 | #[allow(clippy::missing_panics_doc)] 213 | /// Set the function which determines whether the garbage collector should be run. 214 | /// 215 | /// `f` will be periodically called by the garbage collector to determine whether it should perform 216 | /// a full traversal of the heap. 217 | /// When `f` returns true, a traversal will begin. 218 | /// 219 | /// # Examples 220 | /// 221 | /// ``` 222 | /// use dumpster::sync::{set_collect_condition, CollectInfo}; 223 | /// 224 | /// /// This function will make sure a GC traversal never happens unless directly activated. 225 | /// fn never_collect(_: &CollectInfo) -> bool { 226 | /// false 227 | /// } 228 | /// 229 | /// set_collect_condition(never_collect); 230 | /// ``` 231 | pub fn set_collect_condition(f: CollectCondition) { 232 | GARBAGE_TRUCK 233 | .collect_condition 234 | .store(f as *mut (), Ordering::Relaxed); 235 | } 236 | 237 | /// Determine whether this thread is currently cleaning. 238 | pub fn currently_cleaning() -> bool { 239 | CLEANING.get() 240 | } 241 | 242 | /// Get the number of `[Gc]`s dropped since the last collection. 243 | pub fn n_gcs_dropped() -> usize { 244 | GARBAGE_TRUCK.n_gcs_dropped.load(Ordering::Relaxed) 245 | } 246 | 247 | /// Get the number of `[Gc]`s currently existing in the entire program. 248 | pub fn n_gcs_existing() -> usize { 249 | GARBAGE_TRUCK.n_gcs_existing.load(Ordering::Relaxed) 250 | } 251 | 252 | impl Dumpster { 253 | /// Deliver all [`TrashCans`] contained by this dumpster to the garbage collect, removing them 254 | /// from the local dumpster storage and adding them to the global truck. 255 | fn deliver_to(&self, garbage_truck: &GarbageTruck) { 256 | self.n_drops.set(0); 257 | let mut guard = garbage_truck.contents.lock(); 258 | for (id, can) in self.contents.borrow_mut().drain() { 259 | if guard.insert(id, can).is_some() { 260 | unsafe { 261 | // SAFETY: an allocation can only be in the dumpster if it still exists and its 262 | // header is valid 263 | id.0.as_ref() 264 | } 265 | .weak 266 | .fetch_sub(1, Ordering::Release); 267 | } 268 | } 269 | } 270 | 271 | /// Determine whether this dumpster is full (and therefore should have its contents delivered to 272 | /// the garbage truck). 273 | fn is_full(&self) -> bool { 274 | self.contents.borrow().len() > 100_000 || self.n_drops.get() > 100_000 275 | } 276 | } 277 | 278 | impl GarbageTruck { 279 | #[allow(clippy::module_name_repetitions)] 280 | /// Search through the set of existing allocations which have been marked inaccessible, and see 281 | /// if they are inaccessible. 282 | /// If so, drop those allocations. 283 | fn collect_all(&self) { 284 | let collecting_guard = self.collecting_lock.write(); 285 | self.n_gcs_dropped.store(0, Ordering::Relaxed); 286 | let to_collect = take(&mut *self.contents.lock()); 287 | let mut ref_graph = HashMap::with_capacity(to_collect.len()); 288 | 289 | CURRENT_TAG.fetch_add(1, Ordering::Release); 290 | 291 | for (_, TrashCan { ptr, dfs_fn }) in to_collect { 292 | unsafe { 293 | // SAFETY: `ptr` may only be in `to_collect` if it was a valid pointer 294 | // and `dfs_fn` must have been created with the intent of referring to 295 | // the erased type of `ptr`. 296 | dfs_fn(ptr, &mut ref_graph); 297 | } 298 | } 299 | 300 | let root_ids = ref_graph 301 | .iter() 302 | .filter_map(|(&k, v)| match v.reachability { 303 | Reachability::Reachable => Some(k), 304 | Reachability::Unknown { n_unaccounted, .. } => (n_unaccounted > 0 305 | || unsafe { 306 | // SAFETY: we found `k` in the reference graph, 307 | // so it must still be an extant allocation 308 | k.0.as_ref().weak.load(Ordering::Acquire) > 1 309 | }) 310 | .then_some(k), 311 | }) 312 | .collect::>(); 313 | for root_id in root_ids { 314 | mark(root_id, &mut ref_graph); 315 | } 316 | 317 | CLEANING.set(true); 318 | // set of allocations which must be destroyed because we were the last weak pointer to it 319 | let mut weak_destroys = Vec::new(); 320 | for (id, node) in &ref_graph { 321 | let header_ref = unsafe { id.0.as_ref() }; 322 | match node.reachability { 323 | Reachability::Unknown { destroy_fn, .. } => unsafe { 324 | // SAFETY: `destroy_fn` must have been created with `node.ptr` in mind, 325 | // and we have proven that no other references to `node.ptr` exist 326 | destroy_fn(node.ptr, &ref_graph); 327 | }, 328 | Reachability::Reachable => { 329 | if header_ref.weak.fetch_sub(1, Ordering::Release) == 1 330 | && header_ref.strong.load(Ordering::Acquire) == 0 331 | { 332 | // we are the last reference to the allocation. 333 | // mark to be cleaned up later 334 | // no real synchronization loss to storing the guard because we had the last 335 | // reference anyway 336 | weak_destroys.push((node.weak_drop_fn, node.ptr)); 337 | } 338 | } 339 | }; 340 | } 341 | CLEANING.set(false); 342 | for (drop_fn, ptr) in weak_destroys { 343 | unsafe { 344 | // SAFETY: we have proven (via header_ref.weak = 1) that the cleaning 345 | // process had the last reference to the allocation. 346 | // `drop_fn` must have been created with the true value of `ptr` in mind. 347 | drop_fn(ptr); 348 | }; 349 | } 350 | drop(collecting_guard); 351 | } 352 | } 353 | 354 | /// Build out a part of the reference graph, making note of all allocations which are reachable from 355 | /// the one described in `ptr`. 356 | /// 357 | /// # Inputs 358 | /// 359 | /// - `ptr`: A pointer to the allocation that we should start constructing from. 360 | /// - `ref_graph`: A lookup from allocation IDs to node information about that allocation. 361 | /// 362 | /// # Effects 363 | /// 364 | /// `ref_graph` will be expanded to include all allocations reachable from `ptr`. 365 | /// 366 | /// # Safety 367 | /// 368 | /// `ptr` must have been created as a pointer to a `GcBox`. 369 | unsafe fn dfs( 370 | ptr: Erased, 371 | ref_graph: &mut HashMap, 372 | ) { 373 | let box_ref = unsafe { 374 | // SAFETY: We require `ptr` to be a an erased pointer to `GcBox`. 375 | ptr.specify::>().as_ref() 376 | }; 377 | let starting_id = AllocationId::from(box_ref); 378 | let Entry::Vacant(v) = ref_graph.entry(starting_id) else { 379 | // the weak count was incremented by another DFS operation elsewhere. 380 | // Decrement it to have only one from us. 381 | box_ref.weak.fetch_sub(1, Ordering::Release); 382 | return; 383 | }; 384 | let strong_count = box_ref.strong.load(Ordering::Acquire); 385 | v.insert(AllocationInfo { 386 | ptr, 387 | weak_drop_fn: drop_weak_zero::, 388 | reachability: Reachability::Unknown { 389 | children: Vec::new(), 390 | n_unaccounted: strong_count, 391 | destroy_fn: destroy_erased::, 392 | }, 393 | }); 394 | 395 | if box_ref 396 | .value 397 | .accept(&mut Dfs { 398 | ref_graph, 399 | current_id: starting_id, 400 | }) 401 | .is_err() 402 | || box_ref.generation.load(Ordering::Acquire) >= CURRENT_TAG.load(Ordering::Relaxed) 403 | { 404 | // box_ref.value was accessed while we worked 405 | // mark this allocation as reachable 406 | mark(starting_id, ref_graph); 407 | } 408 | } 409 | 410 | #[derive(Debug)] 411 | /// The visitor structure used for building the found-reference-graph of allocations. 412 | struct Dfs<'a> { 413 | /// The reference graph. 414 | /// Each allocation is assigned a node. 415 | ref_graph: &'a mut HashMap, 416 | /// The allocation ID currently being visited. 417 | /// Used for knowing which node is the parent of another. 418 | current_id: AllocationId, 419 | } 420 | 421 | impl<'a> Visitor for Dfs<'a> { 422 | fn visit_sync(&mut self, gc: &Gc) 423 | where 424 | T: Trace + Send + Sync + ?Sized, 425 | { 426 | // must not use deref operators since we don't want to update the generation 427 | let ptr = unsafe { 428 | // SAFETY: This is the same as the deref implementation, but avoids 429 | // incrementing the generation count. 430 | (*gc.ptr.get()).unwrap() 431 | }; 432 | let box_ref = unsafe { 433 | // SAFETY: same as above. 434 | ptr.as_ref() 435 | }; 436 | let current_tag = CURRENT_TAG.load(Ordering::Relaxed); 437 | if gc.tag.swap(current_tag, Ordering::Relaxed) >= current_tag 438 | || box_ref.generation.load(Ordering::Acquire) >= current_tag 439 | { 440 | // This pointer was already tagged by this sweep, so it must have been moved by 441 | mark(self.current_id, self.ref_graph); 442 | return; 443 | } 444 | 445 | let mut new_id = AllocationId::from(box_ref); 446 | 447 | let Reachability::Unknown { 448 | ref mut children, .. 449 | } = self 450 | .ref_graph 451 | .get_mut(&self.current_id) 452 | .unwrap() 453 | .reachability 454 | else { 455 | // this node has been proven reachable by something higher up. No need to keep building 456 | // its ref graph 457 | return; 458 | }; 459 | children.push(new_id); 460 | 461 | match self.ref_graph.entry(new_id) { 462 | Entry::Occupied(mut o) => match o.get_mut().reachability { 463 | Reachability::Unknown { 464 | ref mut n_unaccounted, 465 | .. 466 | } => { 467 | *n_unaccounted -= 1; 468 | } 469 | Reachability::Reachable => (), 470 | }, 471 | Entry::Vacant(v) => { 472 | // This allocation has never been visited by the reference graph builder 473 | let strong_count = box_ref.strong.load(Ordering::Acquire); 474 | box_ref.weak.fetch_add(1, Ordering::Acquire); 475 | v.insert(AllocationInfo { 476 | ptr: Erased::new(ptr), 477 | weak_drop_fn: drop_weak_zero::, 478 | reachability: Reachability::Unknown { 479 | children: Vec::new(), 480 | n_unaccounted: strong_count - 1, 481 | destroy_fn: destroy_erased::, 482 | }, 483 | }); 484 | 485 | // Save the previously visited ID, then carry on to the next one 486 | swap(&mut new_id, &mut self.current_id); 487 | 488 | if box_ref.value.accept(self).is_err() 489 | || box_ref.generation.load(Ordering::Acquire) >= current_tag 490 | { 491 | // On failure, this means `**gc` is accessible, and should be marked 492 | // as such 493 | mark(self.current_id, self.ref_graph); 494 | } 495 | 496 | // Restore current_id and carry on 497 | swap(&mut new_id, &mut self.current_id); 498 | } 499 | }; 500 | } 501 | 502 | fn visit_unsync(&mut self, _: &crate::unsync::Gc) 503 | where 504 | T: Trace + ?Sized, 505 | { 506 | unreachable!("sync Gc cannot own an unsync Gc"); 507 | } 508 | } 509 | 510 | /// Traverse the reference graph, marking `root` and any allocations reachable from `root` as 511 | /// reachable. 512 | fn mark(root: AllocationId, graph: &mut HashMap) { 513 | let node = graph.get_mut(&root).unwrap(); 514 | if let Reachability::Unknown { children, .. } = 515 | replace(&mut node.reachability, Reachability::Reachable) 516 | { 517 | for child in children { 518 | mark(child, graph); 519 | } 520 | } 521 | } 522 | 523 | /// Destroy an allocation, obliterating its GCs, dropping it, and deallocating it. 524 | /// 525 | /// # Safety 526 | /// 527 | /// `ptr` must have been created from a pointer to a `GcBox`. 528 | unsafe fn destroy_erased( 529 | ptr: Erased, 530 | graph: &HashMap, 531 | ) { 532 | /// A visitor for decrementing the reference count of pointees. 533 | struct PrepareForDestruction<'a> { 534 | /// The reference graph. 535 | /// Must have been populated with reachability already. 536 | graph: &'a HashMap, 537 | } 538 | 539 | impl Visitor for PrepareForDestruction<'_> { 540 | fn visit_sync(&mut self, gc: &crate::sync::Gc) 541 | where 542 | T: Trace + Send + Sync + ?Sized, 543 | { 544 | let id = AllocationId::from(unsafe { 545 | // SAFETY: This is the same as dereferencing the GC. 546 | (*gc.ptr.get()).unwrap() 547 | }); 548 | if matches!(self.graph[&id].reachability, Reachability::Reachable) { 549 | unsafe { 550 | // SAFETY: This is the same as dereferencing the GC. 551 | id.0.as_ref().strong.fetch_sub(1, Ordering::Release); 552 | } 553 | } else { 554 | unsafe { 555 | // SAFETY: The GC is unreachable, 556 | // so the GC will never be dereferenced again. 557 | gc.ptr.get().write((*gc.ptr.get()).as_null()); 558 | } 559 | } 560 | } 561 | 562 | fn visit_unsync(&mut self, _: &crate::unsync::Gc) 563 | where 564 | T: Trace + ?Sized, 565 | { 566 | unreachable!("no unsync members of sync Gc possible!"); 567 | } 568 | } 569 | 570 | let specified = ptr.specify::>().as_mut(); 571 | specified 572 | .value 573 | .accept(&mut PrepareForDestruction { graph }) 574 | .expect("allocation assumed to be unreachable but somehow was accessed"); 575 | let layout = Layout::for_value(specified); 576 | drop_in_place(specified); 577 | dealloc(std::ptr::from_mut::>(specified).cast(), layout); 578 | } 579 | 580 | /// Function for handling dropping an allocation when its weak and strong reference count reach 581 | /// zero. 582 | /// 583 | /// # Safety 584 | /// 585 | /// `ptr` must have been created as a pointer to a `GcBox`. 586 | unsafe fn drop_weak_zero(ptr: Erased) { 587 | let mut specified = ptr.specify::>(); 588 | assert_eq!(specified.as_ref().weak.load(Ordering::Relaxed), 0); 589 | assert_eq!(specified.as_ref().strong.load(Ordering::Relaxed), 0); 590 | 591 | let layout = Layout::for_value(specified.as_ref()); 592 | drop_in_place(specified.as_mut()); 593 | dealloc(specified.as_ptr().cast(), layout); 594 | } 595 | 596 | unsafe impl Send for AllocationId {} 597 | unsafe impl Sync for AllocationId {} 598 | 599 | impl From<&GcBox> for AllocationId 600 | where 601 | T: Trace + Send + Sync + ?Sized, 602 | { 603 | fn from(value: &GcBox) -> Self { 604 | AllocationId(NonNull::from(value).cast()) 605 | } 606 | } 607 | 608 | impl From>> for AllocationId 609 | where 610 | T: Trace + Send + Sync + ?Sized, 611 | { 612 | fn from(value: NonNull>) -> Self { 613 | AllocationId(value.cast()) 614 | } 615 | } 616 | 617 | impl Drop for Dumpster { 618 | fn drop(&mut self) { 619 | self.deliver_to(&GARBAGE_TRUCK); 620 | // collect_all(); 621 | } 622 | } 623 | 624 | impl Drop for GarbageTruck { 625 | fn drop(&mut self) { 626 | GARBAGE_TRUCK.collect_all(); 627 | } 628 | } 629 | -------------------------------------------------------------------------------- /dumpster/src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Thread-safe shared garbage collection. 10 | //! 11 | //! Most users of this module will be interested in using [`Gc`] directly out of the box - this will 12 | //! just work. 13 | //! Those with more particular needs (such as benchmarking) should turn toward 14 | //! [`set_collect_condition`] in order to tune exactly when the garbage collector does cleanups. 15 | //! 16 | //! # Examples 17 | //! 18 | //! ``` 19 | //! use dumpster::sync::Gc; 20 | //! 21 | //! let my_gc = Gc::new(100); 22 | //! let other_gc = my_gc.clone(); 23 | //! 24 | //! drop(my_gc); 25 | //! drop(other_gc); 26 | //! 27 | //! // contents of the Gc are automatically freed 28 | //! ``` 29 | 30 | mod collect; 31 | #[cfg(test)] 32 | mod tests; 33 | 34 | use std::{ 35 | alloc::{dealloc, Layout}, 36 | borrow::Borrow, 37 | cell::UnsafeCell, 38 | fmt::Debug, 39 | num::NonZeroUsize, 40 | ops::Deref, 41 | ptr::{addr_of, addr_of_mut, drop_in_place, NonNull}, 42 | sync::atomic::{fence, AtomicUsize, Ordering}, 43 | }; 44 | 45 | use crate::{contains_gcs, ptr::Nullable, Trace, Visitor}; 46 | 47 | use self::collect::{ 48 | collect_all_await, currently_cleaning, mark_clean, mark_dirty, n_gcs_dropped, n_gcs_existing, 49 | notify_created_gc, notify_dropped_gc, 50 | }; 51 | 52 | /// A thread-safe garbage-collected pointer. 53 | /// 54 | /// This pointer can be duplicated and then shared across threads. 55 | /// Garbage collection is performed concurrently. 56 | /// 57 | /// # Examples 58 | /// 59 | /// ``` 60 | /// use dumpster::sync::Gc; 61 | /// use std::sync::atomic::{AtomicUsize, Ordering}; 62 | /// 63 | /// let shared = Gc::new(AtomicUsize::new(0)); 64 | /// 65 | /// std::thread::scope(|s| { 66 | /// s.spawn(|| { 67 | /// let other_gc = shared.clone(); 68 | /// other_gc.store(1, Ordering::Relaxed); 69 | /// }); 70 | /// 71 | /// shared.store(2, Ordering::Relaxed); 72 | /// }); 73 | /// 74 | /// println!("{}", shared.load(Ordering::Relaxed)); 75 | /// ``` 76 | /// 77 | /// # Interaction with `Drop` 78 | /// 79 | /// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated 80 | /// object. 81 | /// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered 82 | /// inaccessible. 83 | /// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could 84 | /// result in the program panicking to keep the program from accessing memory after freeing it. 85 | /// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible 86 | /// operations [`Gc::try_deref`] and [`Gc::try_clone`]. 87 | pub struct Gc { 88 | /// The pointer to the allocation. 89 | ptr: UnsafeCell>>, 90 | /// The tag information of this pointer, used for mutation detection when marking. 91 | tag: AtomicUsize, 92 | } 93 | 94 | /// The tag of the current sweep operation. 95 | /// All new allocations are minted with the current tag. 96 | static CURRENT_TAG: AtomicUsize = AtomicUsize::new(0); 97 | 98 | #[repr(C)] 99 | /// The backing allocation for a [`Gc`]. 100 | struct GcBox 101 | where 102 | T: Trace + Send + Sync + ?Sized, 103 | { 104 | /// The "strong" count, which is the number of extant `Gc`s to this allocation. 105 | /// If the strong count is zero, a value contained in the allocation may be dropped, but the 106 | /// allocation itself must still be valid. 107 | strong: AtomicUsize, 108 | /// The "weak" count, which is the number of references to this allocation stored in to-collect 109 | /// buffers by the collection algorithm. 110 | /// If the weak count is zero, the allocation may be destroyed. 111 | weak: AtomicUsize, 112 | /// The current generation number of the allocation. 113 | /// The generation number is assigned to the global generation every time a strong reference is 114 | /// created or destroyed or a `Gc` pointing to this allocation is dereferenced. 115 | generation: AtomicUsize, 116 | /// The actual data stored in the allocation. 117 | value: T, 118 | } 119 | 120 | unsafe impl Send for Gc where T: Trace + Send + Sync + ?Sized {} 121 | unsafe impl Sync for Gc where T: Trace + Send + Sync + ?Sized {} 122 | 123 | /// Begin a collection operation of the allocations on the heap. 124 | /// 125 | /// Due to concurrency issues, this might not collect every single unreachable allocation that 126 | /// currently exists, but often calling `collect()` will get allocations made by this thread. 127 | /// 128 | /// # Examples 129 | /// 130 | /// ``` 131 | /// use dumpster::sync::{collect, Gc}; 132 | /// 133 | /// let gc = Gc::new(vec![1, 2, 3]); 134 | /// drop(gc); 135 | /// 136 | /// collect(); // the vector originally in `gc` _might_ be dropped now, but could be dropped later 137 | /// ``` 138 | pub fn collect() { 139 | collect_all_await(); 140 | } 141 | 142 | #[derive(Debug)] 143 | /// Information passed to a [`CollectCondition`] used to determine whether the garbage collector 144 | /// should start collecting. 145 | /// 146 | /// A `CollectInfo` is exclusively created by being passed as an argument to the collection 147 | /// condition. 148 | /// To set a custom collection condition, refer to [`set_collect_condition`]. 149 | /// 150 | /// # Examples 151 | /// 152 | /// ``` 153 | /// use dumpster::sync::{set_collect_condition, CollectInfo}; 154 | /// 155 | /// fn my_collect_condition(info: &CollectInfo) -> bool { 156 | /// (info.n_gcs_dropped_since_last_collect() + info.n_gcs_existing()) % 2 == 0 157 | /// } 158 | /// 159 | /// set_collect_condition(my_collect_condition); 160 | /// ``` 161 | pub struct CollectInfo { 162 | /// Dummy value so this is a private structure. 163 | _private: (), 164 | } 165 | 166 | /// A function which determines whether the garbage collector should start collecting. 167 | /// This type primarily exists so that it can be used with [`set_collect_condition`]. 168 | /// 169 | /// # Examples 170 | /// 171 | /// ```rust 172 | /// use dumpster::sync::{set_collect_condition, CollectInfo}; 173 | /// 174 | /// fn always_collect(_: &CollectInfo) -> bool { 175 | /// true 176 | /// } 177 | /// 178 | /// set_collect_condition(always_collect); 179 | /// ``` 180 | pub type CollectCondition = fn(&CollectInfo) -> bool; 181 | 182 | #[must_use] 183 | /// The default collection condition used by the garbage collector. 184 | /// 185 | /// There are no guarantees about what this function returns, other than that it will return `true` 186 | /// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime. 187 | /// 188 | /// This function isn't really meant to be called by users, but rather it's supposed to be handed 189 | /// off to [`set_collect_condition`] to return to the default operating mode of the library. 190 | /// 191 | /// This collection condition applies globally, i.e. to every thread. 192 | /// 193 | /// # Examples 194 | /// 195 | /// ```rust 196 | /// use dumpster::sync::{default_collect_condition, set_collect_condition, CollectInfo}; 197 | /// 198 | /// fn other_collect_condition(info: &CollectInfo) -> bool { 199 | /// info.n_gcs_existing() >= 25 || default_collect_condition(info) 200 | /// } 201 | /// 202 | /// // Use my custom collection condition. 203 | /// set_collect_condition(other_collect_condition); 204 | /// 205 | /// // I'm sick of the custom collection condition. 206 | /// // Return to the original. 207 | /// set_collect_condition(default_collect_condition); 208 | /// ``` 209 | pub fn default_collect_condition(info: &CollectInfo) -> bool { 210 | info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing() 211 | } 212 | 213 | pub use collect::set_collect_condition; 214 | 215 | impl Gc 216 | where 217 | T: Trace + Send + Sync + ?Sized, 218 | { 219 | /// Construct a new garbage-collected value. 220 | /// 221 | /// # Examples 222 | /// 223 | /// ``` 224 | /// use dumpster::sync::Gc; 225 | /// 226 | /// let _ = Gc::new(0); 227 | /// ``` 228 | pub fn new(value: T) -> Gc 229 | where 230 | T: Sized, 231 | { 232 | notify_created_gc(); 233 | Gc { 234 | ptr: UnsafeCell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox { 235 | strong: AtomicUsize::new(1), 236 | weak: AtomicUsize::new(0), 237 | generation: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), 238 | value, 239 | }))))), 240 | tag: AtomicUsize::new(0), 241 | } 242 | } 243 | 244 | /// Attempt to dereference this `Gc`. 245 | /// 246 | /// This function will return `None` if `self` is a "dead" `Gc`, which points to an 247 | /// already-deallocated object. 248 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 249 | /// [`Trace`] object. 250 | /// 251 | /// For a version which panics instead of returning `None`, consider using [`Deref`]. 252 | /// 253 | /// # Examples 254 | /// 255 | /// For a still-living `Gc`, this always returns `Some`. 256 | /// 257 | /// ``` 258 | /// use dumpster::sync::Gc; 259 | /// 260 | /// let gc1 = Gc::new(0); 261 | /// assert!(Gc::try_deref(&gc1).is_some()); 262 | /// ``` 263 | /// 264 | /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its 265 | /// `Drop` implementation. 266 | /// 267 | /// ``` 268 | /// use dumpster::{sync::Gc, Trace}; 269 | /// use std::sync::Mutex; 270 | /// 271 | /// #[derive(Trace)] 272 | /// struct Cycle(Mutex>>); 273 | /// 274 | /// impl Drop for Cycle { 275 | /// fn drop(&mut self) { 276 | /// let guard = self.0.lock().unwrap(); 277 | /// let maybe_ref = Gc::try_deref(guard.as_ref().unwrap()); 278 | /// assert!(maybe_ref.is_none()); 279 | /// } 280 | /// } 281 | /// 282 | /// let gc1 = Gc::new(Cycle(Mutex::new(None))); 283 | /// *gc1.0.lock().unwrap() = Some(gc1.clone()); 284 | /// # drop(gc1); 285 | /// # dumpster::sync::collect(); 286 | /// ``` 287 | pub fn try_deref(gc: &Gc) -> Option<&T> { 288 | #[allow(clippy::unnecessary_lazy_evaluations)] 289 | unsafe { 290 | (!(*gc.ptr.get()).is_null()).then(|| &**gc) 291 | } 292 | } 293 | 294 | /// Attempt to clone this `Gc`. 295 | /// 296 | /// This function will return `None` if `self` is a "dead" `Gc`, which points to an 297 | /// already-deallocated object. 298 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 299 | /// [`Trace`] object. 300 | /// 301 | /// For a version which panics instead of returning `None`, consider using [`Clone`]. 302 | /// 303 | /// # Examples 304 | /// 305 | /// For a still-living `Gc`, this always returns `Some`. 306 | /// 307 | /// ``` 308 | /// use dumpster::sync::Gc; 309 | /// 310 | /// let gc1 = Gc::new(0); 311 | /// let gc2 = Gc::try_clone(&gc1).unwrap(); 312 | /// ``` 313 | /// 314 | /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its 315 | /// `Drop` implementation. 316 | /// 317 | /// ``` 318 | /// use dumpster::{sync::Gc, Trace}; 319 | /// use std::sync::Mutex; 320 | /// 321 | /// #[derive(Trace)] 322 | /// struct Cycle(Mutex>>); 323 | /// 324 | /// impl Drop for Cycle { 325 | /// fn drop(&mut self) { 326 | /// let cloned = Gc::try_clone(self.0.lock().unwrap().as_ref().unwrap()); 327 | /// assert!(cloned.is_none()); 328 | /// } 329 | /// } 330 | /// 331 | /// let gc1 = Gc::new(Cycle(Mutex::new(None))); 332 | /// *gc1.0.lock().unwrap() = Some(gc1.clone()); 333 | /// # drop(gc1); 334 | /// # dumpster::sync::collect(); 335 | /// ``` 336 | pub fn try_clone(gc: &Gc) -> Option> { 337 | unsafe { (!(*gc.ptr.get()).is_null()).then(|| gc.clone()) } 338 | } 339 | 340 | /// Provides a raw pointer to the data. 341 | /// 342 | /// Panics if `self` is a "dead" `Gc`, 343 | /// which points to an already-deallocated object. 344 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 345 | /// [`Trace`] object. 346 | /// 347 | /// # Examples 348 | /// 349 | /// ``` 350 | /// use dumpster::sync::Gc; 351 | /// let x = Gc::new("hello".to_owned()); 352 | /// let y = Gc::clone(&x); 353 | /// let x_ptr = Gc::as_ptr(&x); 354 | /// assert_eq!(x_ptr, Gc::as_ptr(&x)); 355 | /// assert_eq!(unsafe { &*x_ptr }, "hello"); 356 | /// ``` 357 | pub fn as_ptr(gc: &Gc) -> *const T { 358 | unsafe { 359 | let ptr = NonNull::as_ptr((*gc.ptr.get()).unwrap()); 360 | addr_of_mut!((*ptr).value) 361 | } 362 | } 363 | 364 | /// Determine whether two `Gc`s are equivalent by reference. 365 | /// Returns `true` if both `this` and `other` point to the same value, in the same style as 366 | /// [`std::ptr::eq`]. 367 | /// 368 | /// # Examples 369 | /// 370 | /// ``` 371 | /// use dumpster::sync::Gc; 372 | /// 373 | /// let gc1 = Gc::new(0); 374 | /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1` 375 | /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1` 376 | /// 377 | /// assert!(Gc::ptr_eq(&gc1, &gc2)); 378 | /// assert!(!Gc::ptr_eq(&gc1, &gc3)); 379 | /// ``` 380 | pub fn ptr_eq(this: &Gc, other: &Gc) -> bool { 381 | unsafe { *this.ptr.get() }.as_option() == unsafe { *other.ptr.get() }.as_option() 382 | } 383 | 384 | /// Get the number of references to the value pointed to by this `Gc`. 385 | /// 386 | /// This does not include internal references generated by the garbage collector. 387 | /// 388 | /// # Panics 389 | /// 390 | /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. 391 | /// generated through a `Drop` implementation). For further reference, take a look at 392 | /// [`Gc::is_dead`]. 393 | /// 394 | /// # Examples 395 | /// 396 | /// ``` 397 | /// use dumpster::sync::Gc; 398 | /// 399 | /// let gc = Gc::new(()); 400 | /// assert_eq!(gc.ref_count().get(), 1); 401 | /// let gc2 = gc.clone(); 402 | /// assert_eq!(gc.ref_count().get(), 2); 403 | /// drop(gc); 404 | /// drop(gc2); 405 | /// ``` 406 | pub fn ref_count(&self) -> NonZeroUsize { 407 | let box_ptr = unsafe { *self.ptr.get() }.expect( 408 | "Attempt to dereference Gc to already-collected object. \ 409 | This means a Gc escaped from a Drop implementation, likely implying a bug in your code.", 410 | ); 411 | let box_ref = unsafe { box_ptr.as_ref() }; 412 | NonZeroUsize::new(box_ref.strong.load(Ordering::Relaxed)) 413 | .expect("strong count to a GcBox may never be zero while a Gc to it exists") 414 | } 415 | 416 | /// Determine whether this is a dead `Gc`. 417 | /// 418 | /// A `Gc` is dead if it is accessed while the value it points to has been destroyed; this only 419 | /// occurs if one attempts to interact with a `Gc` during a structure's [`Drop`] implementation. 420 | /// However, this is not always guaranteed - sometime the garbage collector will leave `Gc`s 421 | /// alive in differing orders, so users should not rely on the destruction order of `Gc`s to 422 | /// determine whether it is dead. 423 | /// 424 | /// # Examples 425 | /// 426 | /// ``` 427 | /// use dumpster::{sync::Gc, Trace}; 428 | /// use std::sync::OnceLock; 429 | /// 430 | /// #[derive(Trace)] 431 | /// struct Cycle(OnceLock>); 432 | /// 433 | /// impl Drop for Cycle { 434 | /// fn drop(&mut self) { 435 | /// assert!(self.0.get().unwrap().is_dead()); 436 | /// } 437 | /// } 438 | /// 439 | /// let gc1 = Gc::new(Cycle(OnceLock::new())); 440 | /// gc1.0.set(gc1.clone()); 441 | /// # drop(gc1); 442 | /// # dumpster::sync::collect(); 443 | /// ``` 444 | pub fn is_dead(&self) -> bool { 445 | unsafe { *self.ptr.get() }.is_null() 446 | } 447 | } 448 | 449 | impl Clone for Gc 450 | where 451 | T: Trace + Send + Sync + ?Sized, 452 | { 453 | /// Clone a garbage-collected reference. 454 | /// This does not clone the underlying data. 455 | /// 456 | /// # Panics 457 | /// 458 | /// This function will panic if the `Gc` being cloned points to a deallocated object. 459 | /// This is only possible if said `Gc` is accessed during the `Drop` implementation of a 460 | /// `Trace` value. 461 | /// 462 | /// For a fallible version, refer to [`Gc::try_clone`]. 463 | /// 464 | /// # Examples 465 | /// 466 | /// ``` 467 | /// use dumpster::sync::Gc; 468 | /// use std::sync::atomic::{AtomicU8, Ordering}; 469 | /// 470 | /// let gc1 = Gc::new(AtomicU8::new(0)); 471 | /// let gc2 = gc1.clone(); 472 | /// 473 | /// gc1.store(1, Ordering::Relaxed); 474 | /// assert_eq!(gc2.load(Ordering::Relaxed), 1); 475 | /// ``` 476 | /// 477 | /// The following example will fail, because cloning a `Gc` to a deallocated object is wrong. 478 | /// 479 | /// ```should_panic 480 | /// use dumpster::{sync::Gc, Trace}; 481 | /// use std::sync::Mutex; 482 | /// 483 | /// #[derive(Trace)] 484 | /// struct Cycle(Mutex>>); 485 | /// 486 | /// impl Drop for Cycle { 487 | /// fn drop(&mut self) { 488 | /// let _ = self.0.lock().unwrap().as_ref().unwrap().clone(); 489 | /// } 490 | /// } 491 | /// 492 | /// let gc1 = Gc::new(Cycle(Mutex::new(None))); 493 | /// *gc1.0.lock().unwrap() = Some(gc1.clone()); 494 | /// # drop(gc1); 495 | /// # dumpster::sync::collect(); 496 | /// ``` 497 | fn clone(&self) -> Gc { 498 | let box_ref = unsafe { 499 | (*self.ptr.get()).expect("attempt to clone Gc to already-deallocated object. \ 500 | This means a Gc was accessed during a Drop implementation, likely implying a bug in your code.").as_ref() 501 | }; 502 | // increment strong count before generation to ensure cleanup never underestimates ref count 503 | box_ref.strong.fetch_add(1, Ordering::Acquire); 504 | box_ref 505 | .generation 506 | .store(CURRENT_TAG.load(Ordering::Acquire), Ordering::Release); 507 | notify_created_gc(); 508 | // mark_clean(box_ref); // causes performance drops 509 | Gc { 510 | ptr: UnsafeCell::new(unsafe { *self.ptr.get() }), 511 | tag: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), 512 | } 513 | } 514 | } 515 | 516 | impl Drop for Gc 517 | where 518 | T: Trace + Send + Sync + ?Sized, 519 | { 520 | fn drop(&mut self) { 521 | if currently_cleaning() { 522 | return; 523 | } 524 | let Some(mut ptr) = unsafe { *self.ptr.get() }.as_option() else { 525 | return; 526 | }; 527 | let box_ref = unsafe { ptr.as_ref() }; 528 | box_ref.weak.fetch_add(1, Ordering::AcqRel); // ensures that this allocation wasn't freed 529 | // while we weren't looking 530 | box_ref 531 | .generation 532 | .store(CURRENT_TAG.load(Ordering::Relaxed), Ordering::Release); 533 | match box_ref.strong.fetch_sub(1, Ordering::AcqRel) { 534 | 0 => unreachable!("strong cannot reach zero while a Gc to it exists"), 535 | 1 => { 536 | mark_clean(box_ref); 537 | if box_ref.weak.fetch_sub(1, Ordering::Release) == 1 { 538 | // destroyed the last weak reference! we can safely deallocate this 539 | let layout = Layout::for_value(box_ref); 540 | fence(Ordering::Acquire); 541 | unsafe { 542 | drop_in_place(ptr.as_mut()); 543 | dealloc(ptr.as_ptr().cast(), layout); 544 | } 545 | } 546 | } 547 | _ => { 548 | if contains_gcs(&box_ref.value).unwrap_or(true) { 549 | mark_dirty(box_ref); 550 | } 551 | box_ref.weak.fetch_sub(1, Ordering::Release); 552 | } 553 | } 554 | notify_dropped_gc(); 555 | } 556 | } 557 | 558 | impl CollectInfo { 559 | #[must_use] 560 | /// Get the number of times that a [`Gc`] has been dropped since the last time a collection 561 | /// operation was performed. 562 | /// 563 | /// # Examples 564 | /// 565 | /// ``` 566 | /// use dumpster::sync::{set_collect_condition, CollectInfo}; 567 | /// 568 | /// // Collection condition for whether many Gc's have been dropped. 569 | /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool { 570 | /// info.n_gcs_dropped_since_last_collect() > 100 571 | /// } 572 | /// 573 | /// set_collect_condition(have_many_gcs_dropped); 574 | /// ``` 575 | pub fn n_gcs_dropped_since_last_collect(&self) -> usize { 576 | n_gcs_dropped() 577 | } 578 | 579 | #[must_use] 580 | /// Get the total number of [`Gc`]s which currently exist. 581 | /// 582 | /// # Examples 583 | /// 584 | /// ``` 585 | /// use dumpster::sync::{set_collect_condition, CollectInfo}; 586 | /// 587 | /// // Collection condition for whether many Gc's currently exist. 588 | /// fn do_many_gcs_exist(info: &CollectInfo) -> bool { 589 | /// info.n_gcs_existing() > 100 590 | /// } 591 | /// 592 | /// set_collect_condition(do_many_gcs_exist); 593 | /// ``` 594 | pub fn n_gcs_existing(&self) -> usize { 595 | n_gcs_existing() 596 | } 597 | } 598 | 599 | unsafe impl Trace for Gc { 600 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 601 | visitor.visit_sync(self); 602 | Ok(()) 603 | } 604 | } 605 | 606 | impl Deref for Gc { 607 | type Target = T; 608 | 609 | /// Dereference this pointer, creating a reference to the contained value `T`. 610 | /// 611 | /// # Panics 612 | /// 613 | /// This function may panic if it is called from within the implementation of `std::ops::Drop` 614 | /// of its owning value, since returning such a reference could cause a use-after-free. 615 | /// It is not guaranteed to panic. 616 | /// 617 | /// # Examples 618 | /// 619 | /// The following is a correct time to dereference a `Gc`. 620 | /// 621 | /// ``` 622 | /// use dumpster::sync::Gc; 623 | /// 624 | /// let my_gc = Gc::new(0u8); 625 | /// let my_ref: &u8 = &my_gc; 626 | /// ``` 627 | /// 628 | /// Dereferencing a `Gc` while dropping is not correct. 629 | /// 630 | /// ```should_panic 631 | /// // This is wrong! 632 | /// use dumpster::{sync::Gc, Trace}; 633 | /// use std::sync::Mutex; 634 | /// 635 | /// #[derive(Trace)] 636 | /// struct Bad { 637 | /// s: String, 638 | /// cycle: Mutex>>, 639 | /// } 640 | /// 641 | /// impl Drop for Bad { 642 | /// fn drop(&mut self) { 643 | /// println!("{}", self.cycle.lock().unwrap().as_ref().unwrap().s) 644 | /// } 645 | /// } 646 | /// 647 | /// let foo = Gc::new(Bad { 648 | /// s: "foo".to_string(), 649 | /// cycle: Mutex::new(None), 650 | /// }); 651 | /// ``` 652 | fn deref(&self) -> &Self::Target { 653 | let box_ref = unsafe { 654 | (*self.ptr.get()).expect( 655 | "Attempting to dereference Gc to already-deallocated object.\ 656 | This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code." 657 | ).as_ref() 658 | }; 659 | let current_tag = CURRENT_TAG.load(Ordering::Acquire); 660 | self.tag.store(current_tag, Ordering::Release); 661 | box_ref.generation.store(current_tag, Ordering::Release); 662 | &box_ref.value 663 | } 664 | } 665 | 666 | impl PartialEq> for Gc 667 | where 668 | T: Trace + Send + Sync + ?Sized + PartialEq, 669 | { 670 | /// Test for equality on two `Gc`s. 671 | /// 672 | /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different 673 | /// allocations. 674 | /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait 675 | /// specialization, this function does not do a "fast-path" check for reference equality. 676 | /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still 677 | /// require a direct call to `eq` on the values. 678 | /// 679 | /// # Panics 680 | /// 681 | /// This function may panic if it is called from within the implementation of `std::ops::Drop` 682 | /// of its owning value, since returning such a reference could cause a use-after-free. 683 | /// It is not guaranteed to panic. 684 | /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it 685 | /// could later cause a panic. 686 | /// For further details, refer to the main documentation for `Gc`. 687 | /// 688 | /// ``` 689 | /// use dumpster::sync::Gc; 690 | /// 691 | /// let gc = Gc::new(6); 692 | /// assert!(gc == Gc::new(6)); 693 | /// ``` 694 | fn eq(&self, other: &Gc) -> bool { 695 | self.as_ref() == other.as_ref() 696 | } 697 | } 698 | 699 | impl Eq for Gc where T: Trace + Send + Sync + ?Sized + PartialEq {} 700 | 701 | impl AsRef for Gc { 702 | fn as_ref(&self) -> &T { 703 | self 704 | } 705 | } 706 | 707 | impl Borrow for Gc { 708 | fn borrow(&self) -> &T { 709 | self 710 | } 711 | } 712 | 713 | impl std::fmt::Pointer for Gc { 714 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 715 | std::fmt::Pointer::fmt(&addr_of!(**self), f) 716 | } 717 | } 718 | 719 | #[cfg(feature = "coerce-unsized")] 720 | impl std::ops::CoerceUnsized> for Gc 721 | where 722 | T: std::marker::Unsize + Trace + Send + Sync + ?Sized, 723 | U: Trace + Send + Sync + ?Sized, 724 | { 725 | } 726 | 727 | impl Debug for Gc { 728 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 729 | write!( 730 | f, 731 | "Gc({:?}, {})", 732 | self.ptr, 733 | self.tag.load(Ordering::Acquire) 734 | ) 735 | } 736 | } 737 | -------------------------------------------------------------------------------- /dumpster/src/sync/tests.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | use std::{ 10 | collections::{hash_map::Entry, HashMap}, 11 | mem::{swap, take, transmute, MaybeUninit}, 12 | ptr::NonNull, 13 | sync::{ 14 | atomic::{AtomicUsize, Ordering}, 15 | Mutex, 16 | }, 17 | }; 18 | 19 | use crate::Visitor; 20 | 21 | use super::*; 22 | 23 | struct DropCount<'a>(&'a AtomicUsize); 24 | 25 | impl<'a> Drop for DropCount<'a> { 26 | fn drop(&mut self) { 27 | self.0.fetch_add(1, Ordering::Release); 28 | } 29 | } 30 | 31 | unsafe impl Trace for DropCount<'_> { 32 | fn accept(&self, _: &mut V) -> Result<(), ()> { 33 | Ok(()) 34 | } 35 | } 36 | 37 | struct MultiRef { 38 | refs: Mutex>>, 39 | #[allow(unused)] 40 | count: DropCount<'static>, 41 | } 42 | 43 | unsafe impl Trace for MultiRef { 44 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 45 | self.refs.accept(visitor) 46 | } 47 | } 48 | 49 | #[test] 50 | fn single_alloc() { 51 | static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); 52 | let gc1 = Gc::new(DropCount(&DROP_COUNT)); 53 | 54 | collect(); 55 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); 56 | drop(gc1); 57 | collect(); 58 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); 59 | } 60 | 61 | #[test] 62 | fn ref_count() { 63 | static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); 64 | let gc1 = Gc::new(DropCount(&DROP_COUNT)); 65 | let gc2 = Gc::clone(&gc1); 66 | 67 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); 68 | drop(gc1); 69 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); 70 | drop(gc2); 71 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); 72 | } 73 | 74 | #[test] 75 | fn self_referential() { 76 | struct Foo(Mutex>>); 77 | static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); 78 | 79 | unsafe impl Trace for Foo { 80 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 81 | self.0.accept(visitor) 82 | } 83 | } 84 | 85 | impl Drop for Foo { 86 | fn drop(&mut self) { 87 | println!("begin increment of the drop count!"); 88 | DROP_COUNT.fetch_add(1, Ordering::Release); 89 | } 90 | } 91 | 92 | let gc1 = Gc::new(Foo(Mutex::new(None))); 93 | *gc1.0.lock().unwrap() = Some(Gc::clone(&gc1)); 94 | 95 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); 96 | drop(gc1); 97 | collect(); 98 | assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); 99 | } 100 | 101 | #[test] 102 | fn two_cycle() { 103 | static DROP_0: AtomicUsize = AtomicUsize::new(0); 104 | static DROP_1: AtomicUsize = AtomicUsize::new(0); 105 | 106 | let gc0 = Gc::new(MultiRef { 107 | refs: Mutex::new(Vec::new()), 108 | count: DropCount(&DROP_0), 109 | }); 110 | let gc1 = Gc::new(MultiRef { 111 | refs: Mutex::new(vec![Gc::clone(&gc0)]), 112 | count: DropCount(&DROP_1), 113 | }); 114 | gc0.refs.lock().unwrap().push(Gc::clone(&gc1)); 115 | 116 | collect(); 117 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 118 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 119 | drop(gc0); 120 | collect(); 121 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 122 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 123 | drop(gc1); 124 | collect(); 125 | assert_eq!(DROP_0.load(Ordering::Acquire), 1); 126 | assert_eq!(DROP_0.load(Ordering::Acquire), 1); 127 | } 128 | 129 | #[test] 130 | fn self_ref_two_cycle() { 131 | static DROP_0: AtomicUsize = AtomicUsize::new(0); 132 | static DROP_1: AtomicUsize = AtomicUsize::new(0); 133 | 134 | let gc0 = Gc::new(MultiRef { 135 | refs: Mutex::new(Vec::new()), 136 | count: DropCount(&DROP_0), 137 | }); 138 | let gc1 = Gc::new(MultiRef { 139 | refs: Mutex::new(vec![Gc::clone(&gc0)]), 140 | count: DropCount(&DROP_1), 141 | }); 142 | gc0.refs.lock().unwrap().extend([gc0.clone(), gc1.clone()]); 143 | gc1.refs.lock().unwrap().push(gc1.clone()); 144 | 145 | collect(); 146 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 147 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 148 | drop(gc0); 149 | collect(); 150 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 151 | assert_eq!(DROP_0.load(Ordering::Acquire), 0); 152 | drop(gc1); 153 | collect(); 154 | assert_eq!(DROP_0.load(Ordering::Acquire), 1); 155 | assert_eq!(DROP_0.load(Ordering::Acquire), 1); 156 | } 157 | 158 | #[test] 159 | fn parallel_loop() { 160 | static COUNT_1: AtomicUsize = AtomicUsize::new(0); 161 | static COUNT_2: AtomicUsize = AtomicUsize::new(0); 162 | static COUNT_3: AtomicUsize = AtomicUsize::new(0); 163 | static COUNT_4: AtomicUsize = AtomicUsize::new(0); 164 | 165 | let gc1 = Gc::new(MultiRef { 166 | count: DropCount(&COUNT_1), 167 | refs: Mutex::new(Vec::new()), 168 | }); 169 | let gc2 = Gc::new(MultiRef { 170 | count: DropCount(&COUNT_2), 171 | refs: Mutex::new(vec![Gc::clone(&gc1)]), 172 | }); 173 | let gc3 = Gc::new(MultiRef { 174 | count: DropCount(&COUNT_3), 175 | refs: Mutex::new(vec![Gc::clone(&gc1)]), 176 | }); 177 | let gc4 = Gc::new(MultiRef { 178 | count: DropCount(&COUNT_4), 179 | refs: Mutex::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), 180 | }); 181 | gc1.refs.lock().unwrap().push(Gc::clone(&gc4)); 182 | 183 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 184 | assert_eq!(COUNT_2.load(Ordering::Acquire), 0); 185 | assert_eq!(COUNT_3.load(Ordering::Acquire), 0); 186 | assert_eq!(COUNT_4.load(Ordering::Acquire), 0); 187 | drop(gc1); 188 | collect(); 189 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 190 | assert_eq!(COUNT_2.load(Ordering::Acquire), 0); 191 | assert_eq!(COUNT_3.load(Ordering::Acquire), 0); 192 | assert_eq!(COUNT_4.load(Ordering::Acquire), 0); 193 | drop(gc2); 194 | collect(); 195 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 196 | assert_eq!(COUNT_2.load(Ordering::Acquire), 0); 197 | assert_eq!(COUNT_3.load(Ordering::Acquire), 0); 198 | assert_eq!(COUNT_4.load(Ordering::Acquire), 0); 199 | drop(gc3); 200 | collect(); 201 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 202 | assert_eq!(COUNT_2.load(Ordering::Acquire), 0); 203 | assert_eq!(COUNT_3.load(Ordering::Acquire), 0); 204 | assert_eq!(COUNT_4.load(Ordering::Acquire), 0); 205 | drop(gc4); 206 | collect(); 207 | assert_eq!(COUNT_1.load(Ordering::Acquire), 1); 208 | assert_eq!(COUNT_2.load(Ordering::Acquire), 1); 209 | assert_eq!(COUNT_3.load(Ordering::Acquire), 1); 210 | assert_eq!(COUNT_4.load(Ordering::Acquire), 1); 211 | } 212 | 213 | #[test] 214 | /// Test that we can drop a Gc which points to some allocation with a locked Mutex inside it 215 | // note: I tried using `ntest::timeout` but for some reason that caused this test to trivially pass. 216 | fn deadlock() { 217 | let gc1 = Gc::new(Mutex::new(())); 218 | let gc2 = gc1.clone(); 219 | 220 | let guard = gc1.lock(); 221 | drop(gc2); 222 | collect(); 223 | drop(guard); 224 | } 225 | 226 | #[test] 227 | fn open_drop() { 228 | static COUNT_1: AtomicUsize = AtomicUsize::new(0); 229 | let gc1 = Gc::new(MultiRef { 230 | refs: Mutex::new(Vec::new()), 231 | count: DropCount(&COUNT_1), 232 | }); 233 | 234 | gc1.refs.lock().unwrap().push(gc1.clone()); 235 | let guard = gc1.refs.lock(); 236 | collect(); 237 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 238 | drop(guard); 239 | drop(gc1); 240 | collect(); 241 | 242 | assert_eq!(COUNT_1.load(Ordering::Acquire), 1); 243 | } 244 | 245 | #[test] 246 | #[cfg_attr(miri, ignore = "miri is too slow")] 247 | fn eventually_collect() { 248 | static COUNT_1: AtomicUsize = AtomicUsize::new(0); 249 | static COUNT_2: AtomicUsize = AtomicUsize::new(0); 250 | 251 | let gc1 = Gc::new(MultiRef { 252 | refs: Mutex::new(Vec::new()), 253 | count: DropCount(&COUNT_1), 254 | }); 255 | let gc2 = Gc::new(MultiRef { 256 | refs: Mutex::new(vec![gc1.clone()]), 257 | count: DropCount(&COUNT_2), 258 | }); 259 | gc1.refs.lock().unwrap().push(gc2.clone()); 260 | 261 | assert_eq!(COUNT_1.load(Ordering::Acquire), 0); 262 | assert_eq!(COUNT_2.load(Ordering::Acquire), 0); 263 | 264 | drop(gc1); 265 | drop(gc2); 266 | 267 | for _ in 0..200_000 { 268 | let gc = Gc::new(()); 269 | drop(gc); 270 | } 271 | 272 | // after enough time, gc1 and gc2 should have been collected 273 | assert_eq!(COUNT_1.load(Ordering::Acquire), 1); 274 | assert_eq!(COUNT_2.load(Ordering::Acquire), 1); 275 | } 276 | 277 | #[test] 278 | #[cfg(feature = "coerce-unsized")] 279 | fn coerce_array() { 280 | let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); 281 | let gc2: Gc<[u8]> = gc1; 282 | assert_eq!(gc2.len(), 3); 283 | assert_eq!( 284 | std::mem::size_of::>(), 285 | 3 * std::mem::size_of::() 286 | ); 287 | } 288 | 289 | #[test] 290 | fn malicious() { 291 | static EVIL: AtomicUsize = AtomicUsize::new(0); 292 | static A_DROP_DETECT: AtomicUsize = AtomicUsize::new(0); 293 | struct A { 294 | x: Gc, 295 | y: Gc, 296 | } 297 | struct X { 298 | a: Mutex>>, 299 | y: NonNull, 300 | } 301 | struct Y { 302 | a: Mutex>>, 303 | } 304 | 305 | unsafe impl Send for X {} 306 | 307 | unsafe impl Trace for A { 308 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 309 | self.x.accept(visitor)?; 310 | self.y.accept(visitor) 311 | } 312 | } 313 | 314 | unsafe impl Trace for X { 315 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 316 | self.a.accept(visitor)?; 317 | 318 | if EVIL.fetch_add(1, Ordering::Relaxed) == 1 { 319 | println!("committing evil..."); 320 | // simulates a malicious thread 321 | let y = unsafe { self.y.as_ref() }; 322 | *y.a.lock().unwrap() = (*self.a.lock().unwrap()).take(); 323 | } 324 | 325 | Ok(()) 326 | } 327 | } 328 | 329 | unsafe impl Trace for Y { 330 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 331 | self.a.accept(visitor) 332 | } 333 | } 334 | 335 | unsafe impl Sync for X {} 336 | 337 | impl Drop for A { 338 | fn drop(&mut self) { 339 | A_DROP_DETECT.fetch_add(1, Ordering::Relaxed); 340 | } 341 | } 342 | 343 | let y = Gc::new(Y { 344 | a: Mutex::new(None), 345 | }); 346 | let x = Gc::new(X { 347 | a: Mutex::new(None), 348 | y: NonNull::from(y.as_ref()), 349 | }); 350 | let a = Gc::new(A { x, y }); 351 | *a.x.a.lock().unwrap() = Some(a.clone()); 352 | 353 | collect(); 354 | drop(a.clone()); 355 | EVIL.store(1, Ordering::Relaxed); 356 | collect(); 357 | assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 0); 358 | drop(a); 359 | collect(); 360 | assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 1); 361 | } 362 | 363 | #[test] 364 | #[cfg_attr(miri, ignore = "miri is too slow")] 365 | #[allow(clippy::too_many_lines)] 366 | fn fuzz() { 367 | const N: usize = 20_000; 368 | static DROP_DETECTORS: [AtomicUsize; N] = { 369 | let mut detectors: [MaybeUninit; N] = 370 | unsafe { transmute(MaybeUninit::<[AtomicUsize; N]>::uninit()) }; 371 | 372 | let mut i = 0; 373 | while i < N { 374 | detectors[i] = MaybeUninit::new(AtomicUsize::new(0)); 375 | i += 1; 376 | } 377 | 378 | unsafe { transmute(detectors) } 379 | }; 380 | 381 | #[derive(Debug)] 382 | struct Alloc { 383 | refs: Mutex>>, 384 | id: usize, 385 | } 386 | 387 | impl Drop for Alloc { 388 | fn drop(&mut self) { 389 | DROP_DETECTORS[self.id].fetch_add(1, Ordering::Relaxed); 390 | } 391 | } 392 | 393 | unsafe impl Trace for Alloc { 394 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 395 | self.refs.accept(visitor) 396 | } 397 | } 398 | 399 | fn dfs(alloc: &Gc, graph: &mut HashMap>) { 400 | if let Entry::Vacant(v) = graph.entry(alloc.id) { 401 | if alloc.id == 2822 || alloc.id == 2814 { 402 | println!("{} - {alloc:?}", alloc.id); 403 | } 404 | v.insert(Vec::new()); 405 | alloc.refs.lock().unwrap().iter().for_each(|a| { 406 | graph.get_mut(&alloc.id).unwrap().push(a.id); 407 | dfs(a, graph); 408 | }); 409 | } 410 | } 411 | 412 | fastrand::seed(12345); 413 | let mut gcs = (0..50) 414 | .map(|i| { 415 | Gc::new(Alloc { 416 | refs: Mutex::new(Vec::new()), 417 | id: i, 418 | }) 419 | }) 420 | .collect::>(); 421 | 422 | let mut next_detector = 50; 423 | for _ in 0..N { 424 | if gcs.is_empty() { 425 | gcs.push(Gc::new(Alloc { 426 | refs: Mutex::new(Vec::new()), 427 | id: next_detector, 428 | })); 429 | next_detector += 1; 430 | } 431 | match fastrand::u8(0..4) { 432 | 0 => { 433 | println!("add gc {next_detector}"); 434 | gcs.push(Gc::new(Alloc { 435 | refs: Mutex::new(Vec::new()), 436 | id: next_detector, 437 | })); 438 | next_detector += 1; 439 | } 440 | 1 => { 441 | if gcs.len() > 1 { 442 | let from = fastrand::usize(0..gcs.len()); 443 | let to = fastrand::usize(0..gcs.len()); 444 | println!("add ref {} -> {}", gcs[from].id, gcs[to].id); 445 | let new_gc = gcs[to].clone(); 446 | let mut guard = gcs[from].refs.lock().unwrap(); 447 | guard.push(new_gc); 448 | } 449 | } 450 | 2 => { 451 | let idx = fastrand::usize(0..gcs.len()); 452 | println!("remove gc {}", gcs[idx].id); 453 | gcs.swap_remove(idx); 454 | } 455 | 3 => { 456 | let from = fastrand::usize(0..gcs.len()); 457 | let mut guard = gcs[from].refs.lock().unwrap(); 458 | if !guard.is_empty() { 459 | let to = fastrand::usize(0..guard.len()); 460 | println!("drop ref {} -> {}", gcs[from].id, guard[to].id); 461 | guard.swap_remove(to); 462 | } 463 | } 464 | _ => unreachable!(), 465 | } 466 | } 467 | 468 | let mut graph = HashMap::new(); 469 | graph.insert(9999, Vec::new()); 470 | for alloc in &gcs { 471 | graph.get_mut(&9999).unwrap().push(alloc.id); 472 | dfs(alloc, &mut graph); 473 | } 474 | println!("{graph:#?}"); 475 | 476 | drop(gcs); 477 | collect(); 478 | 479 | let mut n_missing = 0; 480 | for (id, count) in DROP_DETECTORS[..next_detector].iter().enumerate() { 481 | let num = count.load(Ordering::Relaxed); 482 | if num != 1 { 483 | println!("expected 1 for id {id} but got {num}"); 484 | n_missing += 1; 485 | } 486 | } 487 | assert_eq!(n_missing, 0); 488 | } 489 | 490 | #[test] 491 | fn root_canal() { 492 | struct A { 493 | b: Gc, 494 | } 495 | 496 | struct B { 497 | a0: Mutex>>, 498 | a1: Mutex>>, 499 | a2: Mutex>>, 500 | a3: Mutex>>, 501 | } 502 | 503 | unsafe impl Trace for A { 504 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 505 | self.b.accept(visitor) 506 | } 507 | } 508 | 509 | unsafe impl Trace for B { 510 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 511 | let n_prior_visits = B_VISIT_COUNT.fetch_add(1, Ordering::Relaxed); 512 | self.a0.accept(visitor)?; 513 | self.a1.accept(visitor)?; 514 | 515 | // simulate a malicious thread swapping things around 516 | if n_prior_visits == 1 { 517 | println!("committing evil..."); 518 | swap( 519 | &mut *SMUGGLED_POINTERS[0].lock().unwrap(), 520 | &mut *SMUGGLED_POINTERS[1] 521 | .lock() 522 | .unwrap() 523 | .as_ref() 524 | .unwrap() 525 | .b 526 | .a0 527 | .lock() 528 | .unwrap(), 529 | ); 530 | swap(&mut *self.a0.lock().unwrap(), &mut *self.a2.lock().unwrap()); 531 | swap( 532 | &mut *SMUGGLED_POINTERS[0].lock().unwrap(), 533 | &mut *SMUGGLED_POINTERS[1] 534 | .lock() 535 | .unwrap() 536 | .as_ref() 537 | .unwrap() 538 | .b 539 | .a1 540 | .lock() 541 | .unwrap(), 542 | ); 543 | swap(&mut *self.a1.lock().unwrap(), &mut *self.a3.lock().unwrap()); 544 | } 545 | 546 | self.a2.accept(visitor)?; 547 | self.a3.accept(visitor)?; 548 | 549 | // smuggle out some pointers 550 | if n_prior_visits == 0 { 551 | println!("smuggling..."); 552 | *SMUGGLED_POINTERS[0].lock().unwrap() = take(&mut *self.a2.lock().unwrap()); 553 | *SMUGGLED_POINTERS[1].lock().unwrap() = take(&mut *self.a3.lock().unwrap()); 554 | } 555 | 556 | Ok(()) 557 | } 558 | } 559 | 560 | impl Drop for B { 561 | fn drop(&mut self) { 562 | B_DROP_DETECT.fetch_add(1, Ordering::Relaxed); 563 | } 564 | } 565 | 566 | static SMUGGLED_POINTERS: [Mutex>>; 2] = [Mutex::new(None), Mutex::new(None)]; 567 | static B_VISIT_COUNT: AtomicUsize = AtomicUsize::new(0); 568 | static B_DROP_DETECT: AtomicUsize = AtomicUsize::new(0); 569 | 570 | let a = Gc::new(A { 571 | b: Gc::new(B { 572 | a0: Mutex::new(None), 573 | a1: Mutex::new(None), 574 | a2: Mutex::new(None), 575 | a3: Mutex::new(None), 576 | }), 577 | }); 578 | *a.b.a0.lock().unwrap() = Some(a.clone()); 579 | *a.b.a1.lock().unwrap() = Some(a.clone()); 580 | *a.b.a2.lock().unwrap() = Some(a.clone()); 581 | *a.b.a3.lock().unwrap() = Some(a.clone()); 582 | 583 | drop(a.clone()); 584 | collect(); 585 | println!("{}", CURRENT_TAG.load(Ordering::Relaxed)); 586 | 587 | assert!(dbg!(SMUGGLED_POINTERS[0].lock().unwrap().as_ref()).is_some()); 588 | assert!(SMUGGLED_POINTERS[1].lock().unwrap().as_ref().is_some()); 589 | println!("{}", B_VISIT_COUNT.load(Ordering::Relaxed)); 590 | 591 | assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); 592 | drop(a); 593 | assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); 594 | collect(); 595 | println!("{}", CURRENT_TAG.load(Ordering::Relaxed)); 596 | 597 | assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); 598 | 599 | *SMUGGLED_POINTERS[0].lock().unwrap() = None; 600 | *SMUGGLED_POINTERS[1].lock().unwrap() = None; 601 | collect(); 602 | 603 | assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 1); 604 | } 605 | 606 | #[test] 607 | #[should_panic = "Attempting to dereference Gc to already-deallocated object.This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code."] 608 | fn escape_dead_pointer() { 609 | static ESCAPED: Mutex>> = Mutex::new(None); 610 | 611 | struct Escape { 612 | x: u8, 613 | ptr: Mutex>>, 614 | } 615 | 616 | impl Drop for Escape { 617 | fn drop(&mut self) { 618 | let mut escaped_guard = ESCAPED.lock().unwrap(); 619 | if escaped_guard.is_none() { 620 | *escaped_guard = self.ptr.lock().unwrap().take(); 621 | } 622 | } 623 | } 624 | 625 | unsafe impl Trace for Escape { 626 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 627 | self.ptr.accept(visitor) 628 | } 629 | } 630 | 631 | let esc = Gc::new(Escape { 632 | x: 0, 633 | ptr: Mutex::new(None), 634 | }); 635 | 636 | *(*esc).ptr.lock().unwrap() = Some(esc.clone()); 637 | drop(esc); 638 | collect(); 639 | println!("{}", ESCAPED.lock().unwrap().as_ref().unwrap().x); 640 | } 641 | -------------------------------------------------------------------------------- /dumpster/src/unsync/collect.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Implementations of the single-threaded garbage-collection logic. 10 | 11 | use std::{ 12 | alloc::{dealloc, Layout}, 13 | cell::{Cell, RefCell}, 14 | collections::{hash_map::Entry, HashMap, HashSet}, 15 | num::NonZeroUsize, 16 | ptr::{drop_in_place, NonNull}, 17 | }; 18 | 19 | use crate::{ 20 | ptr::Erased, 21 | unsync::{default_collect_condition, CollectInfo, Gc}, 22 | Trace, Visitor, 23 | }; 24 | 25 | use super::{CollectCondition, GcBox}; 26 | 27 | thread_local! { 28 | /// Whether the current thread is running a cleanup process. 29 | pub(super) static COLLECTING: Cell = const { Cell::new(false) }; 30 | /// The global collection of allocation information for this thread. 31 | pub(super) static DUMPSTER: Dumpster = Dumpster { 32 | to_collect: RefCell::new(HashMap::new()), 33 | n_ref_drops: Cell::new(0), 34 | n_refs_living: Cell::new(0), 35 | collect_condition: Cell::new(default_collect_condition), 36 | }; 37 | } 38 | 39 | /// A dumpster is a collection of all the garbage that may or may not need to be cleaned up. 40 | /// It also contains information relevant to when a cleanup should be triggered. 41 | pub(super) struct Dumpster { 42 | /// A map from allocation IDs for allocations which may need to be collected to pointers to 43 | /// their allocations. 44 | to_collect: RefCell>, 45 | /// The number of times a reference has been dropped since the last collection was triggered. 46 | pub n_ref_drops: Cell, 47 | /// The number of references that currently exist in the entire heap and stack. 48 | pub n_refs_living: Cell, 49 | /// The function for determining whether a collection should be run. 50 | pub collect_condition: Cell, 51 | } 52 | 53 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] 54 | /// A unique identifier for an allocated garbage-collected block. 55 | /// 56 | /// It contains a pointer to the reference count of the allocation. 57 | struct AllocationId(pub NonNull>); 58 | 59 | impl From>> for AllocationId 60 | where 61 | T: Trace + ?Sized, 62 | { 63 | /// Get an allocation ID from a pointer to an allocation. 64 | fn from(value: NonNull>) -> Self { 65 | AllocationId(value.cast()) 66 | } 67 | } 68 | 69 | #[derive(Debug)] 70 | /// The necessary information required to collect some garbage-collected data. 71 | /// This data is stored in a map from allocation IDs to the necessary cleanup operation. 72 | struct Cleanup { 73 | /// The function which is called to build the reference graph and find all allocations 74 | /// reachable from this allocation. 75 | dfs_fn: unsafe fn(Erased, &mut Dfs), 76 | /// The function which is called to mark descendants of this allocation as reachable. 77 | mark_fn: unsafe fn(Erased, &mut Mark), 78 | /// A function used for dropping the allocation. 79 | drop_fn: unsafe fn(Erased, &mut DropAlloc<'_>), 80 | /// An erased pointer to the allocation. 81 | ptr: Erased, 82 | } 83 | 84 | impl Cleanup { 85 | /// Construct a new cleanup for an allocation. 86 | fn new(box_ptr: NonNull>) -> Cleanup { 87 | Cleanup { 88 | dfs_fn: apply_visitor::, 89 | mark_fn: apply_visitor::, 90 | drop_fn: drop_assist::, 91 | ptr: Erased::new(box_ptr), 92 | } 93 | } 94 | } 95 | 96 | /// Apply a visitor to some erased pointer. 97 | /// 98 | /// # Safety 99 | /// 100 | /// `T` must be the same type that `ptr` was created with via [`ErasedPtr::new`]. 101 | unsafe fn apply_visitor(ptr: Erased, visitor: &mut V) { 102 | let specified: NonNull> = ptr.specify(); 103 | let _ = specified.as_ref().value.accept(visitor); 104 | } 105 | 106 | impl Dumpster { 107 | /// Collect all unreachable allocations that this dumpster is responsible for. 108 | pub fn collect_all(&self) { 109 | self.n_ref_drops.set(0); 110 | 111 | unsafe { 112 | let mut dfs = Dfs { 113 | visited: HashSet::with_capacity(self.to_collect.borrow().len()), 114 | ref_graph: HashMap::with_capacity(self.to_collect.borrow().len()), 115 | }; 116 | 117 | for (k, v) in &*self.to_collect.borrow() { 118 | if dfs.visited.insert(*k) { 119 | (v.dfs_fn)(v.ptr, &mut dfs); 120 | } 121 | } 122 | 123 | let mut mark = Mark { 124 | visited: HashSet::with_capacity(dfs.visited.len()), 125 | }; 126 | for (id, reachability) in dfs 127 | .ref_graph 128 | .iter() 129 | .filter(|(_, reachability)| reachability.n_unaccounted != 0) 130 | { 131 | mark.visited.insert(*id); 132 | (reachability.mark_fn)(reachability.ptr, &mut mark); 133 | } 134 | 135 | // any allocations which we didn't find must also be roots 136 | for (id, cleanup) in self 137 | .to_collect 138 | .borrow() 139 | .iter() 140 | .filter(|(id, _)| !dfs.ref_graph.contains_key(id)) 141 | { 142 | mark.visited.insert(*id); 143 | (cleanup.mark_fn)(cleanup.ptr, &mut mark); 144 | } 145 | 146 | dfs.visited.clear(); 147 | let mut decrementer = DropAlloc { 148 | visited: dfs.visited, 149 | reachable: &mark.visited, 150 | }; 151 | 152 | COLLECTING.set(true); 153 | for cleanup in self 154 | .to_collect 155 | .borrow_mut() 156 | .drain() 157 | .filter_map(|(id, cleanup)| (!mark.visited.contains(&id)).then_some(cleanup)) 158 | { 159 | (cleanup.drop_fn)(cleanup.ptr, &mut decrementer); 160 | } 161 | COLLECTING.set(false); 162 | } 163 | } 164 | 165 | /// Mark an allocation as "dirty," implying that it may need to be swept through later to find 166 | /// out if it has any references pointing to it. 167 | pub fn mark_dirty(&self, box_ptr: NonNull>) { 168 | self.to_collect 169 | .borrow_mut() 170 | .entry(AllocationId::from(box_ptr)) 171 | .or_insert_with(|| Cleanup::new(box_ptr)); 172 | } 173 | 174 | /// Mark an allocation as "cleaned," implying that the allocation is about to be destroyed and 175 | /// therefore should not be cleaned up later. 176 | pub fn mark_cleaned(&self, box_ptr: NonNull>) { 177 | self.to_collect 178 | .borrow_mut() 179 | .remove(&AllocationId::from(box_ptr)); 180 | } 181 | 182 | /// Notify the dumpster that a garbage-collected pointer has been dropped. 183 | /// 184 | /// This may trigger a cleanup of the heap, but is guaranteed to be amortized to _O(1)_. 185 | pub fn notify_dropped_gc(&self) { 186 | self.n_ref_drops.set(self.n_ref_drops.get() + 1); 187 | let old_refs_living = self.n_refs_living.get(); 188 | assert_ne!( 189 | old_refs_living, 0, 190 | "underflow on unsync::Gc number of living Gcs" 191 | ); 192 | self.n_refs_living.set(old_refs_living - 1); 193 | 194 | // check if it's been a long time since the last time we collected all 195 | // the garbage. 196 | // if so, go and collect it all again (amortized O(1)) 197 | if (self.collect_condition.get())(&CollectInfo { _private: () }) { 198 | self.collect_all(); 199 | } 200 | } 201 | 202 | /// Notify the dumpster that a new [`Gc`] has been created. 203 | pub fn notify_created_gc(&self) { 204 | self.n_refs_living.set(self.n_refs_living.get() + 1); 205 | } 206 | } 207 | 208 | impl Drop for Dumpster { 209 | fn drop(&mut self) { 210 | // cleanup any leftover allocations 211 | self.collect_all(); 212 | } 213 | } 214 | 215 | /// The data required to construct the graph of reachable allocations. 216 | struct Dfs { 217 | /// The set of allocations which have already been visited. 218 | visited: HashSet, 219 | /// A map from allocation identifiers to information about their reachability. 220 | ref_graph: HashMap, 221 | } 222 | 223 | #[derive(Debug)] 224 | /// Information about the reachability of a structure. 225 | struct Reachability { 226 | /// The number of unaccounted-for references to this allocation. 227 | /// If this number is 0, the reference is not a root. 228 | n_unaccounted: usize, 229 | /// An erased pointer to the allocation under concern. 230 | ptr: Erased, 231 | /// A function used to mark descendants of this allocation as accessible. 232 | mark_fn: unsafe fn(Erased, &mut Mark), 233 | } 234 | 235 | impl Visitor for Dfs { 236 | fn visit_sync(&mut self, _: &crate::sync::Gc) 237 | where 238 | T: Trace + Send + Sync + ?Sized, 239 | { 240 | // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return 241 | // immediately. 242 | } 243 | 244 | fn visit_unsync(&mut self, gc: &Gc) 245 | where 246 | T: Trace + ?Sized, 247 | { 248 | let ptr = gc.ptr.get().unwrap(); 249 | let next_id = AllocationId::from(ptr); 250 | match self.ref_graph.entry(next_id) { 251 | Entry::Occupied(ref mut o) => { 252 | o.get_mut().n_unaccounted -= 1; 253 | } 254 | Entry::Vacant(v) => { 255 | v.insert(Reachability { 256 | n_unaccounted: unsafe { next_id.0.as_ref().get().get() - 1 }, 257 | ptr: Erased::new(ptr), 258 | mark_fn: apply_visitor::, 259 | }); 260 | } 261 | } 262 | if self.visited.insert(next_id) { 263 | let _ = unsafe { ptr.as_ref() }.value.accept(self); 264 | } 265 | } 266 | } 267 | 268 | /// A mark traversal, which marks allocations as reachable. 269 | struct Mark { 270 | /// The set of allocations which have been marked as reachable. 271 | visited: HashSet, 272 | } 273 | 274 | impl Visitor for Mark { 275 | fn visit_sync(&mut self, _: &crate::sync::Gc) 276 | where 277 | T: Trace + Send + Sync + ?Sized, 278 | { 279 | // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return 280 | // immediately. 281 | } 282 | 283 | fn visit_unsync(&mut self, gc: &Gc) 284 | where 285 | T: Trace + ?Sized, 286 | { 287 | let ptr = gc.ptr.get().unwrap(); 288 | if self.visited.insert(AllocationId::from(ptr)) { 289 | let _ = unsafe { ptr.as_ref().value.accept(self) }; 290 | } 291 | } 292 | } 293 | 294 | /// A visitor for dropping allocations. 295 | struct DropAlloc<'a> { 296 | /// The set of unreachable allocations we've already visited. 297 | visited: HashSet, 298 | /// The set of unreachable allocations. 299 | reachable: &'a HashSet, 300 | } 301 | 302 | impl Visitor for DropAlloc<'_> { 303 | fn visit_sync(&mut self, _: &crate::sync::Gc) 304 | where 305 | T: Trace + Send + Sync + ?Sized, 306 | { 307 | // do nothing 308 | } 309 | 310 | fn visit_unsync(&mut self, gc: &Gc) 311 | where 312 | T: Trace + ?Sized, 313 | { 314 | let ptr = gc.ptr.get().unwrap(); 315 | let id = AllocationId::from(ptr); 316 | if self.reachable.contains(&id) { 317 | unsafe { 318 | let cell_ref = &ptr.as_ref().ref_count; 319 | cell_ref.set(NonZeroUsize::new(cell_ref.get().get() - 1).unwrap()); 320 | } 321 | return; 322 | } 323 | gc.ptr.set(gc.ptr.get().as_null()); 324 | if self.visited.insert(id) { 325 | unsafe { 326 | ptr.as_ref().value.accept(self).unwrap(); 327 | let layout = Layout::for_value(ptr.as_ref()); 328 | drop_in_place(ptr.as_ptr()); 329 | dealloc(ptr.as_ptr().cast(), layout); 330 | } 331 | } 332 | } 333 | } 334 | 335 | /// Decrement the outbound reference counts for any reachable allocations which this allocation can 336 | /// find. 337 | /// Also, drop the allocation when done. 338 | unsafe fn drop_assist(ptr: Erased, visitor: &mut DropAlloc<'_>) { 339 | if visitor 340 | .visited 341 | .insert(AllocationId::from(ptr.specify::>())) 342 | { 343 | ptr.specify::>() 344 | .as_ref() 345 | .value 346 | .accept(visitor) 347 | .unwrap(); 348 | 349 | let mut_spec = ptr.specify::>().as_mut(); 350 | let layout = Layout::for_value(mut_spec); 351 | drop_in_place(mut_spec); 352 | dealloc(std::ptr::from_mut::>(mut_spec).cast(), layout); 353 | } 354 | } 355 | -------------------------------------------------------------------------------- /dumpster/src/unsync/mod.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Thread-local garbage collection. 10 | //! 11 | //! Most users of this library will want to direct their attention to [`Gc`]. 12 | //! If you want to tune the garbage collector's cleanup frequency, take a look at 13 | //! [`set_collect_condition`]. 14 | //! 15 | //! # Examples 16 | //! 17 | //! ``` 18 | //! use dumpster::{unsync::Gc, Trace}; 19 | //! use std::cell::RefCell; 20 | //! 21 | //! #[derive(Trace)] 22 | //! struct Foo { 23 | //! refs: RefCell>>, 24 | //! } 25 | //! 26 | //! let foo = Gc::new(Foo { 27 | //! refs: RefCell::new(Vec::new()), 28 | //! }); 29 | //! 30 | //! // If you had used `Rc`, this would be a memory leak. 31 | //! // `Gc` can collect it, though! 32 | //! foo.refs.borrow_mut().push(foo.clone()); 33 | //! ``` 34 | 35 | use std::{ 36 | alloc::{dealloc, Layout}, 37 | borrow::Borrow, 38 | cell::Cell, 39 | num::NonZeroUsize, 40 | ops::Deref, 41 | ptr::{addr_of, addr_of_mut, drop_in_place, NonNull}, 42 | }; 43 | 44 | use crate::{contains_gcs, ptr::Nullable, Trace, Visitor}; 45 | 46 | use self::collect::{Dumpster, COLLECTING, DUMPSTER}; 47 | 48 | mod collect; 49 | #[cfg(test)] 50 | mod tests; 51 | 52 | #[derive(Debug)] 53 | /// A garbage-collected pointer. 54 | /// 55 | /// This garbage-collected pointer may be used for data which is not safe to share across threads 56 | /// (such as a [`std::cell::RefCell`]). 57 | /// It can also be used for variably sized data. 58 | /// 59 | /// # Examples 60 | /// 61 | /// ``` 62 | /// use dumpster::unsync::Gc; 63 | /// 64 | /// let x: Gc = Gc::new(3); 65 | /// 66 | /// println!("{}", *x); // prints '3' 67 | /// // x is then freed automatically! 68 | /// ``` 69 | /// 70 | /// # Interaction with `Drop` 71 | /// 72 | /// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated 73 | /// object. 74 | /// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered 75 | /// inaccessible. 76 | /// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could 77 | /// result in the program panicking to keep the program from accessing memory after freeing it. 78 | /// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible 79 | /// operations [`Gc::try_deref`] and [`Gc::try_clone`]. 80 | pub struct Gc { 81 | /// A pointer to the heap allocation containing the data under concern. 82 | /// The pointee box should never be mutated. 83 | /// 84 | /// If `ptr` is `None`, then this is a dead `Gc`, meaning that the allocation it points to has 85 | /// been dropped. 86 | /// This can only happen observably if this `Gc` is accessed during the [`Drop`] implementation 87 | /// of a [`Trace`] type. 88 | ptr: Cell>>, 89 | } 90 | 91 | /// Collect all existing unreachable allocations. 92 | /// 93 | /// This operation is most useful for making sure that the `Drop` implementation for some data has 94 | /// been called before moving on (such as for a file handle or mutex guard), because the garbage 95 | /// collector is not eager under normal conditions. 96 | /// This only collects the allocations local to the caller's thread. 97 | /// 98 | /// # Examples 99 | /// 100 | /// ``` 101 | /// # fn main() -> Result<(), Box> { 102 | /// use dumpster::unsync::{collect, Gc}; 103 | /// use std::sync::Mutex; 104 | /// 105 | /// static MY_MUTEX: Mutex<()> = Mutex::new(()); 106 | /// 107 | /// let guard_gc = Gc::new(MY_MUTEX.lock()?); 108 | /// drop(guard_gc); 109 | /// // We're not certain that the handle that was contained in `guard_gc` has been dropped, so we 110 | /// // should force a collection to make sure. 111 | /// collect(); 112 | /// 113 | /// // We know this won't cause a deadlock because we made sure to run a collection. 114 | /// let _x = MY_MUTEX.lock()?; 115 | /// # Ok(()) 116 | /// # } 117 | /// ``` 118 | pub fn collect() { 119 | DUMPSTER.with(Dumpster::collect_all); 120 | } 121 | 122 | /// Information passed to a [`CollectCondition`] used to determine whether the garbage collector 123 | /// should start collecting. 124 | pub struct CollectInfo { 125 | /// Dummy value so this is a private structure. 126 | _private: (), 127 | } 128 | 129 | /// A function which determines whether the garbage collector should start collecting. 130 | /// This function primarily exists so that it can be used with [`set_collect_condition`]. 131 | /// 132 | /// # Examples 133 | /// 134 | /// ```rust 135 | /// use dumpster::unsync::{set_collect_condition, CollectInfo}; 136 | /// 137 | /// fn always_collect(_: &CollectInfo) -> bool { 138 | /// true 139 | /// } 140 | /// 141 | /// set_collect_condition(always_collect); 142 | /// ``` 143 | pub type CollectCondition = fn(&CollectInfo) -> bool; 144 | 145 | #[must_use] 146 | /// The default collection condition used by the garbage collector. 147 | /// 148 | /// There are no guarantees about what this function returns, other than that it will return `true` 149 | /// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime. 150 | /// 151 | /// This function isn't really meant to be called by users, but rather it's supposed to be handed 152 | /// off to [`set_collect_condition`] to return to the default operating mode of the library. 153 | /// 154 | /// This collection condition applies locally, i.e. only to this thread. 155 | /// If you want it to apply globally, you'll have to update it every time you spawn a thread. 156 | /// 157 | /// # Examples 158 | /// 159 | /// ```rust 160 | /// use dumpster::unsync::{default_collect_condition, set_collect_condition}; 161 | /// 162 | /// set_collect_condition(default_collect_condition); 163 | /// ``` 164 | pub fn default_collect_condition(info: &CollectInfo) -> bool { 165 | info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing() 166 | } 167 | 168 | #[allow(clippy::missing_panics_doc)] 169 | /// Set the function which determines whether the garbage collector should be run. 170 | /// 171 | /// `f` will be periodically called by the garbage collector to determine whether it should perform 172 | /// a full cleanup of the heap. 173 | /// When `f` returns true, a cleanup will begin. 174 | /// 175 | /// # Examples 176 | /// 177 | /// ``` 178 | /// use dumpster::unsync::{set_collect_condition, CollectInfo}; 179 | /// 180 | /// /// This function will make sure a GC cleanup never happens unless directly activated. 181 | /// fn never_collect(_: &CollectInfo) -> bool { 182 | /// false 183 | /// } 184 | /// 185 | /// set_collect_condition(never_collect); 186 | /// ``` 187 | pub fn set_collect_condition(f: CollectCondition) { 188 | DUMPSTER.with(|d| d.collect_condition.set(f)); 189 | } 190 | 191 | #[repr(C)] 192 | /// The underlying heap allocation for a [`Gc`]. 193 | struct GcBox { 194 | /// The number of extant references to this garbage-collected data. 195 | ref_count: Cell, 196 | /// The stored value inside this garbage-collected box. 197 | value: T, 198 | } 199 | 200 | impl Gc { 201 | /// Construct a new garbage-collected allocation, with `value` as its value. 202 | /// 203 | /// # Examples 204 | /// 205 | /// ``` 206 | /// use dumpster::unsync::Gc; 207 | /// 208 | /// let gc = Gc::new(0); 209 | /// ``` 210 | pub fn new(value: T) -> Gc 211 | where 212 | T: Sized, 213 | { 214 | DUMPSTER.with(Dumpster::notify_created_gc); 215 | Gc { 216 | ptr: Cell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox { 217 | ref_count: Cell::new(NonZeroUsize::MIN), 218 | value, 219 | }))))), 220 | } 221 | } 222 | 223 | #[allow(clippy::unnecessary_lazy_evaluations)] 224 | /// Attempt to dereference this `Gc`. 225 | /// 226 | /// This function will return `None` if `self` is a "dead" `Gc`, which points to an 227 | /// already-deallocated object. 228 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 229 | /// [`Trace`] object. 230 | /// 231 | /// For a version which panics instead of returning `None`, consider using [`Deref`]. 232 | /// 233 | /// # Examples 234 | /// 235 | /// For a still-living `Gc`, this always returns `Some`. 236 | /// 237 | /// ``` 238 | /// use dumpster::unsync::Gc; 239 | /// 240 | /// let gc1 = Gc::new(0); 241 | /// assert!(Gc::try_deref(&gc1).is_some()); 242 | /// ``` 243 | /// 244 | /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its 245 | /// `Drop` implementation. 246 | /// 247 | /// ``` 248 | /// use dumpster::{unsync::Gc, Trace}; 249 | /// use std::cell::OnceCell; 250 | /// 251 | /// #[derive(Trace)] 252 | /// struct Cycle(OnceCell>); 253 | /// 254 | /// impl Drop for Cycle { 255 | /// fn drop(&mut self) { 256 | /// let maybe_ref = Gc::try_deref(self.0.get().unwrap()); 257 | /// assert!(maybe_ref.is_none()); 258 | /// } 259 | /// } 260 | /// 261 | /// let gc1 = Gc::new(Cycle(OnceCell::new())); 262 | /// gc1.0.set(gc1.clone()); 263 | /// # drop(gc1); 264 | /// # dumpster::unsync::collect(); 265 | /// ``` 266 | pub fn try_deref(gc: &Gc) -> Option<&T> { 267 | (!gc.ptr.get().is_null()).then(|| &**gc) 268 | } 269 | 270 | /// Attempt to clone this `Gc`. 271 | /// 272 | /// This function will return `None` if `self` is a "dead" `Gc`, which points to an 273 | /// already-deallocated object. 274 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 275 | /// [`Trace`] object. 276 | /// 277 | /// For a version which panics instead of returning `None`, consider using [`Clone`]. 278 | /// 279 | /// # Examples 280 | /// 281 | /// For a still-living `Gc`, this always returns `Some`. 282 | /// 283 | /// ``` 284 | /// use dumpster::unsync::Gc; 285 | /// 286 | /// let gc1 = Gc::new(0); 287 | /// let gc2 = Gc::try_clone(&gc1).unwrap(); 288 | /// ``` 289 | /// 290 | /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its 291 | /// `Drop` implementation. 292 | /// 293 | /// ``` 294 | /// use dumpster::{unsync::Gc, Trace}; 295 | /// use std::cell::OnceCell; 296 | /// 297 | /// #[derive(Trace)] 298 | /// struct Cycle(OnceCell>); 299 | /// 300 | /// impl Drop for Cycle { 301 | /// fn drop(&mut self) { 302 | /// let cloned = Gc::try_clone(self.0.get().unwrap()); 303 | /// assert!(cloned.is_none()); 304 | /// } 305 | /// } 306 | /// 307 | /// let gc1 = Gc::new(Cycle(OnceCell::new())); 308 | /// gc1.0.set(gc1.clone()); 309 | /// # drop(gc1); 310 | /// # dumpster::unsync::collect(); 311 | /// ``` 312 | pub fn try_clone(gc: &Gc) -> Option> { 313 | (!gc.ptr.get().is_null()).then(|| gc.clone()) 314 | } 315 | 316 | /// Provides a raw pointer to the data. 317 | /// 318 | /// Panics if `self` is a "dead" `Gc`, 319 | /// which points to an already-deallocated object. 320 | /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a 321 | /// [`Trace`] object. 322 | /// 323 | /// # Examples 324 | /// 325 | /// ``` 326 | /// use dumpster::unsync::Gc; 327 | /// let x = Gc::new("hello".to_owned()); 328 | /// let y = Gc::clone(&x); 329 | /// let x_ptr = Gc::as_ptr(&x); 330 | /// assert_eq!(x_ptr, Gc::as_ptr(&x)); 331 | /// assert_eq!(unsafe { &*x_ptr }, "hello"); 332 | /// ``` 333 | pub fn as_ptr(gc: &Gc) -> *const T { 334 | let ptr = NonNull::as_ptr(gc.ptr.get().unwrap()); 335 | unsafe { addr_of_mut!((*ptr).value) } 336 | } 337 | 338 | /// Determine whether two `Gc`s are equivalent by reference. 339 | /// Returns `true` if both `this` and `other` point to the same value, in the same style as 340 | /// [`std::ptr::eq`]. 341 | /// 342 | /// # Examples 343 | /// 344 | /// ``` 345 | /// use dumpster::unsync::Gc; 346 | /// 347 | /// let gc1 = Gc::new(0); 348 | /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1` 349 | /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1` 350 | /// 351 | /// assert!(Gc::ptr_eq(&gc1, &gc2)); 352 | /// assert!(!Gc::ptr_eq(&gc1, &gc3)); 353 | /// ``` 354 | pub fn ptr_eq(this: &Gc, other: &Gc) -> bool { 355 | this.ptr.get().as_option() == other.ptr.get().as_option() 356 | } 357 | 358 | /// Get the number of references to the value pointed to by this `Gc`. 359 | /// 360 | /// This does not include internal references generated by the garbage collector. 361 | /// 362 | /// # Panics 363 | /// 364 | /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. 365 | /// generated through a `Drop` implementation). For further reference, take a look at 366 | /// [`Gc::is_dead`]. 367 | /// 368 | /// # Examples 369 | /// 370 | /// ``` 371 | /// use dumpster::unsync::Gc; 372 | /// 373 | /// let gc = Gc::new(()); 374 | /// assert_eq!(gc.ref_count().get(), 1); 375 | /// let gc2 = gc.clone(); 376 | /// assert_eq!(gc.ref_count().get(), 2); 377 | /// drop(gc); 378 | /// drop(gc2); 379 | /// ``` 380 | pub fn ref_count(&self) -> NonZeroUsize { 381 | let box_ptr = self.ptr.get().expect( 382 | "Attempt to dereference Gc to already-collected object. \ 383 | This means a Gc escaped from a Drop implementation, likely implying a bug in your code.", 384 | ); 385 | let box_ref = unsafe { box_ptr.as_ref() }; 386 | box_ref.ref_count.get() 387 | } 388 | 389 | /// Determine whether this is a dead `Gc`. 390 | /// 391 | /// A `Gc` is dead if it is accessed while the value it points to has been destroyed; this only 392 | /// occurs if one attempts to interact with a `Gc` during a structure's [`Drop`] implementation. 393 | /// However, this is not always guaranteed - sometime the garbage collector will leave `Gc`s 394 | /// alive in differing orders, so users should not rely on the destruction order of `Gc`s to 395 | /// determine whether it is dead. 396 | /// 397 | /// # Examples 398 | /// 399 | /// ``` 400 | /// use dumpster::{unsync::Gc, Trace}; 401 | /// use std::cell::OnceCell; 402 | /// 403 | /// #[derive(Trace)] 404 | /// struct Cycle(OnceCell>); 405 | /// 406 | /// impl Drop for Cycle { 407 | /// fn drop(&mut self) { 408 | /// assert!(self.0.get().unwrap().is_dead()); 409 | /// } 410 | /// } 411 | /// 412 | /// let gc1 = Gc::new(Cycle(OnceCell::new())); 413 | /// gc1.0.set(gc1.clone()); 414 | /// # drop(gc1); 415 | /// # dumpster::unsync::collect(); 416 | /// ``` 417 | pub fn is_dead(&self) -> bool { 418 | self.ptr.get().is_null() 419 | } 420 | } 421 | 422 | impl Deref for Gc { 423 | type Target = T; 424 | 425 | /// Dereference this pointer, creating a reference to the contained value `T`. 426 | /// 427 | /// # Panics 428 | /// 429 | /// This function may panic if it is called from within the implementation of `std::ops::Drop` 430 | /// of its owning value, since returning such a reference could cause a use-after-free. 431 | /// It is not guaranteed to panic. 432 | /// 433 | /// For a version which returns `None` instead of panicking, consider [`Gc::try_deref`]. 434 | /// 435 | /// # Examples 436 | /// 437 | /// The following is a correct time to dereference a `Gc`. 438 | /// 439 | /// ``` 440 | /// use dumpster::unsync::Gc; 441 | /// 442 | /// let my_gc = Gc::new(0u8); 443 | /// let my_ref: &u8 = &my_gc; 444 | /// ``` 445 | /// 446 | /// Dereferencing a `Gc` while dropping is not correct. 447 | /// 448 | /// ```should_panic 449 | /// // This is wrong! 450 | /// use dumpster::{unsync::Gc, Trace}; 451 | /// use std::cell::RefCell; 452 | /// 453 | /// #[derive(Trace)] 454 | /// struct Bad { 455 | /// s: String, 456 | /// cycle: RefCell>>, 457 | /// } 458 | /// 459 | /// impl Drop for Bad { 460 | /// fn drop(&mut self) { 461 | /// println!("{}", self.cycle.borrow().as_ref().unwrap().s) 462 | /// } 463 | /// } 464 | /// 465 | /// let foo = Gc::new(Bad { 466 | /// s: "foo".to_string(), 467 | /// cycle: RefCell::new(None), 468 | /// }); 469 | /// ``` 470 | fn deref(&self) -> &Self::Target { 471 | assert!( 472 | !COLLECTING.with(Cell::get), 473 | "dereferencing GC to already-collected object" 474 | ); 475 | unsafe { 476 | &self.ptr.get().expect("dereferencing Gc to already-collected object. \ 477 | This means a Gc escaped from a Drop implementation, likely implying a bug in your code.").as_ref().value 478 | } 479 | } 480 | } 481 | 482 | impl Clone for Gc { 483 | #[allow(clippy::clone_on_copy)] 484 | /// Create a duplicate reference to the same data pointed to by `self`. 485 | /// This does not duplicate the data. 486 | /// 487 | /// # Panics 488 | /// 489 | /// This function will panic if the `Gc` being cloned points to a deallocated object. 490 | /// This is only possible if said `Gc` is accessed during the `Drop` implementation of a 491 | /// `Trace` value. 492 | /// 493 | /// For a fallible version, refer to [`Gc::try_clone`]. 494 | /// 495 | /// # Examples 496 | /// 497 | /// ``` 498 | /// use dumpster::unsync::Gc; 499 | /// use std::sync::atomic::{AtomicU8, Ordering}; 500 | /// 501 | /// let gc1 = Gc::new(AtomicU8::new(0)); 502 | /// let gc2 = gc1.clone(); 503 | /// 504 | /// gc1.store(1, Ordering::Relaxed); 505 | /// assert_eq!(gc2.load(Ordering::Relaxed), 1); 506 | /// ``` 507 | /// 508 | /// The following example will fail, because cloning a `Gc` to a deallocated object is wrong. 509 | /// 510 | /// ```should_panic 511 | /// use dumpster::{unsync::Gc, Trace}; 512 | /// use std::cell::OnceCell; 513 | /// 514 | /// #[derive(Trace)] 515 | /// struct Cycle(OnceCell>); 516 | /// 517 | /// impl Drop for Cycle { 518 | /// fn drop(&mut self) { 519 | /// let _ = self.0.get().unwrap().clone(); 520 | /// } 521 | /// } 522 | /// 523 | /// let gc1 = Gc::new(Cycle(OnceCell::new())); 524 | /// gc1.0.set(gc1.clone()); 525 | /// # drop(gc1); 526 | /// # dumpster::unsync::collect(); 527 | /// ``` 528 | fn clone(&self) -> Self { 529 | unsafe { 530 | let box_ref = self.ptr.get().expect("Attempt to clone Gc to already-collected object. \ 531 | This means a Gc escaped from a Drop implementation, likely implying a bug in your code.").as_ref(); 532 | box_ref 533 | .ref_count 534 | .set(box_ref.ref_count.get().saturating_add(1)); 535 | } 536 | DUMPSTER.with(|d| { 537 | d.notify_created_gc(); 538 | // d.mark_cleaned(self.ptr); 539 | }); 540 | Self { 541 | ptr: self.ptr.clone(), 542 | } 543 | } 544 | } 545 | 546 | impl Drop for Gc { 547 | /// Destroy this garbage-collected pointer. 548 | /// 549 | /// If this is the last reference which can reach the pointed-to data, the allocation that it 550 | /// points to will be destroyed. 551 | fn drop(&mut self) { 552 | if COLLECTING.with(Cell::get) { 553 | return; 554 | } 555 | let Some(mut ptr) = self.ptr.get().as_option() else { 556 | return; 557 | }; 558 | DUMPSTER.with(|d| { 559 | let box_ref = unsafe { ptr.as_ref() }; 560 | match box_ref.ref_count.get() { 561 | NonZeroUsize::MIN => { 562 | d.mark_cleaned(ptr); 563 | unsafe { 564 | // this was the last reference, drop unconditionally 565 | drop_in_place(addr_of_mut!(ptr.as_mut().value)); 566 | // note: `box_ref` is no longer usable 567 | dealloc(ptr.as_ptr().cast::(), Layout::for_value(ptr.as_ref())); 568 | } 569 | } 570 | n => { 571 | // decrement the ref count - but another reference to this data still 572 | // lives 573 | box_ref 574 | .ref_count 575 | .set(NonZeroUsize::new(n.get() - 1).unwrap()); 576 | 577 | if contains_gcs(&box_ref.value).unwrap_or(true) { 578 | // remaining references could be a cycle - therefore, mark it as dirty 579 | // so we can check later 580 | d.mark_dirty(ptr); 581 | } 582 | } 583 | } 584 | // Notify that a GC has been dropped, potentially triggering a cleanup 585 | d.notify_dropped_gc(); 586 | }); 587 | } 588 | } 589 | 590 | impl PartialEq> for Gc 591 | where 592 | T: Trace + ?Sized + PartialEq, 593 | { 594 | /// Test for equality on two `Gc`s. 595 | /// 596 | /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different 597 | /// allocations. 598 | /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait 599 | /// specialization, this function does not do a "fast-path" check for reference equality. 600 | /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still 601 | /// require a direct call to `eq` on the values. 602 | /// 603 | /// # Panics 604 | /// 605 | /// This function may panic if it is called from within the implementation of `std::ops::Drop` 606 | /// of its owning value, since returning such a reference could cause a use-after-free. 607 | /// It is not guaranteed to panic. 608 | /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it 609 | /// could later cause a panic. 610 | /// For further details, refer to the main documentation for `Gc`. 611 | /// 612 | /// # Examples 613 | /// 614 | /// ``` 615 | /// use dumpster::unsync::Gc; 616 | /// 617 | /// let gc = Gc::new(6); 618 | /// assert!(gc == Gc::new(6)); 619 | /// ``` 620 | fn eq(&self, other: &Gc) -> bool { 621 | self.as_ref() == other.as_ref() 622 | } 623 | } 624 | 625 | impl Eq for Gc where T: Trace + ?Sized + PartialEq {} 626 | 627 | impl CollectInfo { 628 | #[must_use] 629 | /// Get the number of times that a [`Gc`] has been dropped since the last time a collection 630 | /// operation was performed. 631 | /// 632 | /// # Examples 633 | /// 634 | /// ``` 635 | /// use dumpster::unsync::{set_collect_condition, CollectInfo}; 636 | /// 637 | /// // Collection condition for whether many Gc's have been dropped. 638 | /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool { 639 | /// info.n_gcs_dropped_since_last_collect() > 100 640 | /// } 641 | /// 642 | /// set_collect_condition(have_many_gcs_dropped); 643 | /// ``` 644 | pub fn n_gcs_dropped_since_last_collect(&self) -> usize { 645 | DUMPSTER.with(|d| d.n_ref_drops.get()) 646 | } 647 | 648 | #[must_use] 649 | /// Get the total number of [`Gc`]s which currently exist. 650 | /// 651 | /// # Examples 652 | /// 653 | /// ``` 654 | /// use dumpster::unsync::{set_collect_condition, CollectInfo}; 655 | /// 656 | /// // Collection condition for whether many Gc's currently exist. 657 | /// fn do_many_gcs_exist(info: &CollectInfo) -> bool { 658 | /// info.n_gcs_existing() > 100 659 | /// } 660 | /// 661 | /// set_collect_condition(do_many_gcs_exist); 662 | /// ``` 663 | pub fn n_gcs_existing(&self) -> usize { 664 | DUMPSTER.with(|d| d.n_refs_living.get()) 665 | } 666 | } 667 | 668 | unsafe impl Trace for Gc { 669 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 670 | visitor.visit_unsync(self); 671 | Ok(()) 672 | } 673 | } 674 | 675 | impl AsRef for Gc { 676 | fn as_ref(&self) -> &T { 677 | self 678 | } 679 | } 680 | 681 | impl Borrow for Gc { 682 | fn borrow(&self) -> &T { 683 | self 684 | } 685 | } 686 | 687 | impl Default for Gc { 688 | fn default() -> Self { 689 | Gc::new(T::default()) 690 | } 691 | } 692 | 693 | impl std::fmt::Pointer for Gc { 694 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 695 | std::fmt::Pointer::fmt(&addr_of!(**self), f) 696 | } 697 | } 698 | 699 | #[cfg(feature = "coerce-unsized")] 700 | impl std::ops::CoerceUnsized> for Gc 701 | where 702 | T: std::marker::Unsize + Trace + ?Sized, 703 | U: Trace + ?Sized, 704 | { 705 | } 706 | -------------------------------------------------------------------------------- /dumpster/src/unsync/tests.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, acycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. 3 | 4 | This Source Code Form is subject to the terms of the Mozilla Public 5 | License, v. 2.0. If a copy of the MPL was not distributed with this 6 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | */ 8 | 9 | //! Simple tests using manual implementations of [`Trace`]. 10 | 11 | use crate::Visitor; 12 | 13 | use super::*; 14 | use std::{ 15 | cell::RefCell, 16 | sync::{ 17 | atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering}, 18 | Mutex, 19 | }, 20 | }; 21 | 22 | #[test] 23 | /// Test a simple data structure 24 | fn simple() { 25 | static DROPPED: AtomicBool = AtomicBool::new(false); 26 | struct Foo; 27 | 28 | impl Drop for Foo { 29 | fn drop(&mut self) { 30 | DROPPED.store(true, Ordering::Relaxed); 31 | } 32 | } 33 | 34 | unsafe impl Trace for Foo { 35 | fn accept(&self, _: &mut V) -> Result<(), ()> { 36 | Ok(()) 37 | } 38 | } 39 | 40 | let gc1 = Gc::new(Foo); 41 | let gc2 = Gc::clone(&gc1); 42 | 43 | assert!(!DROPPED.load(Ordering::Relaxed)); 44 | 45 | drop(gc1); 46 | 47 | assert!(!DROPPED.load(Ordering::Relaxed)); 48 | 49 | drop(gc2); 50 | 51 | assert!(DROPPED.load(Ordering::Relaxed)); 52 | } 53 | 54 | #[derive(Debug)] 55 | struct MultiRef { 56 | refs: RefCell>>, 57 | drop_count: &'static AtomicUsize, 58 | } 59 | 60 | unsafe impl Trace for MultiRef { 61 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 62 | self.refs.accept(visitor) 63 | } 64 | } 65 | 66 | impl Drop for MultiRef { 67 | fn drop(&mut self) { 68 | self.drop_count.fetch_add(1, Ordering::Relaxed); 69 | } 70 | } 71 | 72 | #[test] 73 | fn self_referential() { 74 | static DROPPED: AtomicU8 = AtomicU8::new(0); 75 | struct Foo(RefCell>>); 76 | 77 | unsafe impl Trace for Foo { 78 | #[inline] 79 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 80 | self.0.accept(visitor) 81 | } 82 | } 83 | 84 | impl Drop for Foo { 85 | fn drop(&mut self) { 86 | DROPPED.fetch_add(1, Ordering::Relaxed); 87 | } 88 | } 89 | 90 | let gc = Gc::new(Foo(RefCell::new(None))); 91 | gc.0.replace(Some(Gc::clone(&gc))); 92 | 93 | assert_eq!(DROPPED.load(Ordering::Relaxed), 0); 94 | drop(gc); 95 | collect(); 96 | assert_eq!(DROPPED.load(Ordering::Relaxed), 1); 97 | } 98 | 99 | #[test] 100 | fn cyclic() { 101 | static DROPPED: AtomicU8 = AtomicU8::new(0); 102 | struct Foo(RefCell>>); 103 | 104 | unsafe impl Trace for Foo { 105 | #[inline] 106 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 107 | self.0.accept(visitor) 108 | } 109 | } 110 | 111 | impl Drop for Foo { 112 | fn drop(&mut self) { 113 | DROPPED.fetch_add(1, Ordering::Relaxed); 114 | } 115 | } 116 | 117 | let foo1 = Gc::new(Foo(RefCell::new(None))); 118 | let foo2 = Gc::new(Foo(RefCell::new(Some(Gc::clone(&foo1))))); 119 | foo1.0.replace(Some(Gc::clone(&foo2))); 120 | 121 | assert_eq!(DROPPED.load(Ordering::Relaxed), 0); 122 | drop(foo1); 123 | assert_eq!(DROPPED.load(Ordering::Relaxed), 0); 124 | drop(foo2); 125 | collect(); 126 | assert_eq!(DROPPED.load(Ordering::Relaxed), 2); 127 | } 128 | 129 | /// Construct a complete graph of garbage-collected 130 | fn complete_graph(detectors: &'static [AtomicUsize]) -> Vec> { 131 | let mut gcs = Vec::new(); 132 | for d in detectors { 133 | let gc = Gc::new(MultiRef { 134 | refs: RefCell::new(Vec::new()), 135 | drop_count: d, 136 | }); 137 | for x in &gcs { 138 | gc.refs.borrow_mut().push(Gc::clone(x)); 139 | x.refs.borrow_mut().push(Gc::clone(&gc)); 140 | } 141 | gcs.push(gc); 142 | } 143 | 144 | gcs 145 | } 146 | 147 | #[test] 148 | fn complete4() { 149 | static DETECTORS: [AtomicUsize; 4] = [ 150 | AtomicUsize::new(0), 151 | AtomicUsize::new(0), 152 | AtomicUsize::new(0), 153 | AtomicUsize::new(0), 154 | ]; 155 | 156 | let mut gcs = complete_graph(&DETECTORS); 157 | 158 | for _ in 0..3 { 159 | gcs.pop(); 160 | } 161 | 162 | for detector in &DETECTORS { 163 | assert_eq!(detector.load(Ordering::Relaxed), 0); 164 | } 165 | 166 | drop(gcs); 167 | collect(); 168 | 169 | for detector in &DETECTORS { 170 | assert_eq!(detector.load(Ordering::Relaxed), 1); 171 | } 172 | } 173 | 174 | #[test] 175 | fn parallel_loop() { 176 | static COUNT_1: AtomicUsize = AtomicUsize::new(0); 177 | static COUNT_2: AtomicUsize = AtomicUsize::new(0); 178 | static COUNT_3: AtomicUsize = AtomicUsize::new(0); 179 | static COUNT_4: AtomicUsize = AtomicUsize::new(0); 180 | 181 | let gc1 = Gc::new(MultiRef { 182 | drop_count: &COUNT_1, 183 | refs: RefCell::new(Vec::new()), 184 | }); 185 | let gc2 = Gc::new(MultiRef { 186 | drop_count: &COUNT_2, 187 | refs: RefCell::new(vec![Gc::clone(&gc1)]), 188 | }); 189 | let gc3 = Gc::new(MultiRef { 190 | drop_count: &COUNT_3, 191 | refs: RefCell::new(vec![Gc::clone(&gc1)]), 192 | }); 193 | let gc4 = Gc::new(MultiRef { 194 | drop_count: &COUNT_4, 195 | refs: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), 196 | }); 197 | gc1.refs.borrow_mut().push(Gc::clone(&gc4)); 198 | 199 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); 200 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); 201 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); 202 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); 203 | drop(gc1); 204 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); 205 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); 206 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); 207 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); 208 | drop(gc2); 209 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); 210 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); 211 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); 212 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); 213 | drop(gc3); 214 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); 215 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); 216 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); 217 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); 218 | drop(gc4); 219 | collect(); 220 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 1); 221 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 1); 222 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 1); 223 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 1); 224 | } 225 | 226 | #[test] 227 | /// Check that we can drop a Gc which points to some allocation with a borrowed `RefCell` in it. 228 | fn double_borrow() { 229 | static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); 230 | 231 | let gc = Gc::new(MultiRef { 232 | refs: RefCell::new(Vec::new()), 233 | drop_count: &DROP_COUNT, 234 | }); 235 | gc.refs.borrow_mut().push(gc.clone()); 236 | let mut my_borrow = gc.refs.borrow_mut(); 237 | my_borrow.pop(); 238 | drop(my_borrow); 239 | 240 | assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0); 241 | collect(); 242 | drop(gc); 243 | collect(); 244 | assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1); 245 | } 246 | 247 | #[test] 248 | #[cfg(feature = "coerce-unsized")] 249 | fn coerce_array() { 250 | let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); 251 | let gc2: Gc<[u8]> = gc1; 252 | assert_eq!(gc2.len(), 3); 253 | assert_eq!( 254 | std::mem::size_of::>(), 255 | 2 * std::mem::size_of::() 256 | ); 257 | } 258 | 259 | #[test] 260 | #[should_panic = "dereferencing Gc to already-collected object. This means a Gc escaped from a Drop implementation, likely implying a bug in your code."] 261 | fn escape_dead_pointer() { 262 | thread_local! {static ESCAPED: Mutex>> = const { Mutex::new(None) };} 263 | 264 | struct Escape { 265 | x: u8, 266 | ptr: Mutex>>, 267 | } 268 | 269 | impl Drop for Escape { 270 | fn drop(&mut self) { 271 | ESCAPED.with(|e| { 272 | let mut escaped_guard = e.lock().unwrap(); 273 | if escaped_guard.is_none() { 274 | *escaped_guard = (*self.ptr.lock().unwrap()).take(); 275 | } 276 | }); 277 | } 278 | } 279 | 280 | unsafe impl Trace for Escape { 281 | fn accept(&self, visitor: &mut V) -> Result<(), ()> { 282 | self.ptr.accept(visitor) 283 | } 284 | } 285 | 286 | let esc = Gc::new(Escape { 287 | x: 0, 288 | ptr: Mutex::new(None), 289 | }); 290 | 291 | *(*esc).ptr.lock().unwrap() = Some(esc.clone()); 292 | drop(esc); 293 | collect(); 294 | println!( 295 | "{}", 296 | ESCAPED.with(|e| e.lock().unwrap().as_ref().unwrap().x) 297 | ); 298 | } 299 | -------------------------------------------------------------------------------- /dumpster_bench/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /dumpster_bench/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dumpster_bench" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | authors = ["Clayton Ramsey"] 7 | description = "Benchmark for dumpster garbage collection crate" 8 | repository = "https://github.com/claytonwramsey/dumpster" 9 | readme = "../README.md" 10 | keywords = ["dumpster", "garbage_collector", "benchmark"] 11 | categories = ["data-structures", "memory-management"] 12 | 13 | [dependencies] 14 | dumpster = { version = "1.1.0", path = "../dumpster", features = ["derive"] } 15 | gc = "0.5.0" 16 | bacon_rajan_cc = "0.4.0" 17 | fastrand = "2.0.0" 18 | shredder = "0.2.0" 19 | shredder_derive = "0.2.0" 20 | parking_lot = "0.12.3" 21 | -------------------------------------------------------------------------------- /dumpster_bench/scripts/make_plots.py: -------------------------------------------------------------------------------- 1 | # dumpster, a cycle-tracking garbage collector for Rust. 2 | # Copyright (C) 2023 Clayton Ramsey. 3 | 4 | # This Source Code Form is subject to the terms of the Mozilla Public 5 | # License, v. 2.0. If a copy of the MPL was not distributed with this 6 | # file, You can obtain one at http://mozilla.org/MPL/2.0/. 7 | 8 | import matplotlib.pyplot as plt 9 | import sys 10 | 11 | csv_file = open(sys.argv[1]) 12 | 13 | multi_times = {} 14 | single_times = {} 15 | 16 | for line in csv_file.read().split('\n'): 17 | if len(line) == 0: 18 | continue 19 | name, test_type, n_threads, n_ops, time = line.split(',') 20 | times = single_times if test_type == 'single_threaded' else multi_times 21 | if name not in times.keys(): 22 | times[name] = ([], []) 23 | times[name][0].append(int(n_threads)) 24 | times[name][1].append(float(time) / 1000.0) 25 | 26 | for (name, v) in multi_times.items(): 27 | (xs, ys) = v 28 | plt.scatter(xs, ys, label=name) 29 | plt.xlabel('Number of threads') 30 | plt.ylabel('Time taken for 1M ops (ms)') 31 | plt.title('Parallel garbage collector scaling') 32 | plt.legend() 33 | plt.show() 34 | 35 | multi_times.pop('shredder', None) 36 | for (i, (name, v)) in enumerate(multi_times.items()): 37 | (xs, ys) = v 38 | plt.scatter(xs, ys, label=name, color=f"tab:{['blue', 'orange', 'red'][i]}") 39 | plt.xlabel('Number of threads') 40 | plt.ylabel('Time taken for 1M ops (ms)') 41 | plt.title('Parallel garbage collector scaling (sans shredder)') 42 | plt.legend() 43 | plt.show() 44 | 45 | def violin(times: dict, name: str): 46 | data = [] 47 | labels = [] 48 | for (label, (_, ys)) in times.items(): 49 | data.append(ys) 50 | labels.append(label) 51 | 52 | fig = plt.figure() 53 | plt.violinplot(data, range(len(data)), vert=False) 54 | plt.yticks(range(len(data)), labels=labels) 55 | plt.ylabel('Garbage collector') 56 | plt.xlabel('Runtime for 1M ops (ms)') 57 | plt.tight_layout(rect=(10, 1.08, 1.08, 1.08)) 58 | plt.title(name) 59 | plt.show() 60 | 61 | violin(single_times, 'Single-threaded GC comparison') 62 | single_times.pop('shredder', None) 63 | violin(single_times, 'Single-threaded GC comparison (sans shredder)') 64 | -------------------------------------------------------------------------------- /dumpster_bench/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, a cycle-tracking garbage collector for Rust. 3 | Copyright (C) 2023 Clayton Ramsey. 4 | 5 | This Source Code Form is subject to the terms of the Mozilla Public 6 | License, v. 2.0. If a copy of the MPL was not distributed with this 7 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 8 | */ 9 | 10 | #![expect(non_local_definitions)] 11 | 12 | use std::{ 13 | rc::Rc, 14 | sync::{Arc, Mutex}, 15 | }; 16 | 17 | /// A garbage-collected structure which points to an arbitrary number of other garbage-collected 18 | /// structures. 19 | /// 20 | /// Cloning a `Multiref` yields a duplicated pointer, not a deep copy. 21 | pub trait Multiref: Clone { 22 | /// Create a new multiref which points to some data. 23 | fn new(points_to: Vec) -> Self; 24 | /// Apply some function to the backing set of references owned by this structure. 25 | fn apply(&self, f: impl FnOnce(&mut Vec)); 26 | /// Collect all the floating GCs out there. 27 | fn collect(); 28 | } 29 | 30 | /// A trait for thread-safe synchronized multirefs. 31 | pub trait SyncMultiref: Send + Sync + Multiref {} 32 | 33 | impl SyncMultiref for T where T: Send + Sync + Multiref {} 34 | 35 | /// A simple multi-reference which uses `Rc`, which is technically not a garbage collector, as a 36 | /// baseline. 37 | pub struct RcMultiref { 38 | refs: Mutex>>, 39 | } 40 | 41 | /// A simple multi-reference which uses `Arc`, which is technically not a garbage collector, as a 42 | /// baseline. 43 | pub struct ArcMultiref { 44 | refs: Mutex>>, 45 | } 46 | 47 | #[derive(dumpster::Trace, Debug)] 48 | pub struct DumpsterSyncMultiref { 49 | refs: Mutex>>, 50 | } 51 | 52 | #[derive(dumpster::Trace)] 53 | pub struct DumpsterUnsyncMultiref { 54 | refs: Mutex>>, 55 | } 56 | 57 | pub struct GcMultiref { 58 | refs: gc::GcCell>>, 59 | } 60 | 61 | pub struct BaconRajanMultiref { 62 | refs: Mutex>>, 63 | } 64 | 65 | #[derive(shredder_derive::Scan)] 66 | pub struct ShredderMultiref { 67 | refs: Mutex>>, 68 | } 69 | 70 | #[derive(shredder_derive::Scan)] 71 | pub struct ShredderSyncMultiref { 72 | refs: Mutex>>, 73 | } 74 | 75 | impl bacon_rajan_cc::Trace for BaconRajanMultiref { 76 | fn trace(&self, tracer: &mut bacon_rajan_cc::Tracer) { 77 | self.refs.lock().unwrap().trace(tracer); 78 | } 79 | } 80 | 81 | impl gc::Finalize for GcMultiref {} 82 | 83 | unsafe impl gc::Trace for GcMultiref { 84 | #[inline] 85 | unsafe fn trace(&self) { 86 | self.refs.trace(); 87 | } 88 | 89 | #[inline] 90 | unsafe fn root(&self) { 91 | self.refs.root(); 92 | } 93 | 94 | #[inline] 95 | unsafe fn unroot(&self) { 96 | self.refs.unroot(); 97 | } 98 | 99 | #[inline] 100 | fn finalize_glue(&self) { 101 | self.refs.finalize_glue() 102 | } 103 | } 104 | 105 | impl Multiref for dumpster::sync::Gc { 106 | fn new(points_to: Vec) -> Self { 107 | dumpster::sync::Gc::new(DumpsterSyncMultiref { 108 | refs: Mutex::new(points_to), 109 | }) 110 | } 111 | 112 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 113 | f(self.refs.lock().unwrap().as_mut()); 114 | } 115 | 116 | fn collect() { 117 | dumpster::sync::collect() 118 | } 119 | } 120 | 121 | impl Multiref for dumpster::unsync::Gc { 122 | fn new(points_to: Vec) -> Self { 123 | dumpster::unsync::Gc::new(DumpsterUnsyncMultiref { 124 | refs: Mutex::new(points_to), 125 | }) 126 | } 127 | 128 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 129 | f(self.refs.lock().unwrap().as_mut()); 130 | } 131 | 132 | fn collect() { 133 | dumpster::unsync::collect() 134 | } 135 | } 136 | 137 | impl Multiref for gc::Gc { 138 | fn new(points_to: Vec) -> Self { 139 | gc::Gc::new(GcMultiref { 140 | refs: gc::GcCell::new(points_to), 141 | }) 142 | } 143 | 144 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 145 | f(self.refs.borrow_mut().as_mut()) 146 | } 147 | 148 | fn collect() { 149 | gc::force_collect(); 150 | } 151 | } 152 | 153 | impl Multiref for bacon_rajan_cc::Cc { 154 | fn new(points_to: Vec) -> Self { 155 | bacon_rajan_cc::Cc::new(BaconRajanMultiref { 156 | refs: Mutex::new(points_to), 157 | }) 158 | } 159 | 160 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 161 | f(self.refs.lock().unwrap().as_mut()); 162 | } 163 | 164 | fn collect() { 165 | bacon_rajan_cc::collect_cycles(); 166 | assert_eq!(bacon_rajan_cc::number_of_roots_buffered(), 0); 167 | } 168 | } 169 | 170 | impl Multiref for shredder::Gc { 171 | fn new(points_to: Vec) -> Self { 172 | shredder::Gc::new(ShredderMultiref { 173 | refs: Mutex::new(points_to), 174 | }) 175 | } 176 | 177 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 178 | f(self.get().refs.lock().unwrap().as_mut()); 179 | } 180 | 181 | fn collect() { 182 | shredder::synchronize_destructors(); 183 | } 184 | } 185 | 186 | impl Multiref for shredder::Gc { 187 | fn new(points_to: Vec) -> Self { 188 | shredder::Gc::new(ShredderSyncMultiref { 189 | refs: Mutex::new(points_to), 190 | }) 191 | } 192 | 193 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 194 | f(self.get().refs.lock().unwrap().as_mut()); 195 | } 196 | 197 | fn collect() { 198 | shredder::synchronize_destructors(); 199 | } 200 | } 201 | 202 | impl Multiref for Rc { 203 | fn new(points_to: Vec) -> Self { 204 | Rc::new(RcMultiref { 205 | refs: Mutex::new(points_to), 206 | }) 207 | } 208 | 209 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 210 | f(self.refs.lock().unwrap().as_mut()); 211 | } 212 | 213 | fn collect() {} 214 | } 215 | 216 | impl Multiref for Arc { 217 | fn new(points_to: Vec) -> Self { 218 | Arc::new(ArcMultiref { 219 | refs: Mutex::new(points_to), 220 | }) 221 | } 222 | 223 | fn apply(&self, f: impl FnOnce(&mut Vec)) { 224 | f(self.refs.lock().unwrap().as_mut()); 225 | } 226 | 227 | fn collect() {} 228 | } 229 | -------------------------------------------------------------------------------- /dumpster_bench/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, a cycle-tracking garbage collector for Rust. 3 | Copyright (C) 2023 Clayton Ramsey. 4 | 5 | This Source Code Form is subject to the terms of the Mozilla Public 6 | License, v. 2.0. If a copy of the MPL was not distributed with this 7 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 8 | */ 9 | 10 | //! Benchmarks for the `dumpster` garbage collection library. 11 | 12 | use std::{ 13 | fmt::Display, 14 | rc::Rc, 15 | sync::Arc, 16 | thread::{self, available_parallelism, scope}, 17 | time::{Duration, Instant}, 18 | }; 19 | 20 | use dumpster_bench::{ 21 | ArcMultiref, BaconRajanMultiref, DumpsterSyncMultiref, DumpsterUnsyncMultiref, GcMultiref, 22 | Multiref, RcMultiref, ShredderMultiref, ShredderSyncMultiref, SyncMultiref, 23 | }; 24 | 25 | use parking_lot::Mutex; 26 | 27 | struct BenchmarkData { 28 | name: &'static str, 29 | test: &'static str, 30 | n_threads: usize, 31 | n_ops: usize, 32 | duration: Duration, 33 | } 34 | 35 | impl Display for BenchmarkData { 36 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 37 | write!( 38 | f, 39 | "{},{},{},{},{}", 40 | self.name, 41 | self.test, 42 | self.n_threads, 43 | self.n_ops, 44 | self.duration.as_micros() 45 | ) 46 | } 47 | } 48 | 49 | fn unsync_never_collect(_: &dumpster::unsync::CollectInfo) -> bool { 50 | false 51 | } 52 | 53 | fn sync_never_collect(_: &dumpster::sync::CollectInfo) -> bool { 54 | false 55 | } 56 | 57 | fn main() { 58 | const N_ITERS: usize = 1_000_000; 59 | for _ in 0..100 { 60 | dumpster::unsync::set_collect_condition(dumpster::unsync::default_collect_condition); 61 | println!( 62 | "{}", 63 | single_threaded::>( 64 | "dumpster (unsync)", 65 | N_ITERS, 66 | ) 67 | ); 68 | dumpster::unsync::set_collect_condition(unsync_never_collect); 69 | println!( 70 | "{}", 71 | single_threaded::>( 72 | "dumpster (unsync/manual)", 73 | N_ITERS, 74 | ) 75 | ); 76 | dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition); 77 | println!( 78 | "{}", 79 | single_threaded::>("dumpster (sync)", N_ITERS) 80 | ); 81 | dumpster::sync::set_collect_condition(sync_never_collect); 82 | println!( 83 | "{}", 84 | single_threaded::>( 85 | "dumpster (sync/manual)", 86 | N_ITERS 87 | ) 88 | ); 89 | println!("{}", single_threaded::>("gc", N_ITERS)); 90 | println!( 91 | "{}", 92 | single_threaded::>("bacon-rajan-cc", N_ITERS) 93 | ); 94 | for n_threads in 1..=available_parallelism().unwrap().get() { 95 | // println!("--- {n_threads} threads"); 96 | dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition); 97 | println!( 98 | "{}", 99 | multi_threaded::>( 100 | "dumpster (sync)", 101 | N_ITERS, 102 | n_threads, 103 | ) 104 | ); 105 | 106 | dumpster::sync::set_collect_condition(sync_never_collect); 107 | println!( 108 | "{}", 109 | multi_threaded::>( 110 | "dumpster (sync/manual)", 111 | N_ITERS, 112 | n_threads, 113 | ) 114 | ); 115 | } 116 | } 117 | 118 | for _ in 0..20 { 119 | // run fewer tests of shredder because it takes forever 120 | 121 | println!( 122 | "{}", 123 | single_threaded::>("shredder", N_ITERS) 124 | ); 125 | 126 | for n_threads in 1..=available_parallelism().unwrap().get() { 127 | println!( 128 | "{}", 129 | multi_threaded::>( 130 | "shredder", N_ITERS, n_threads 131 | ) 132 | ); 133 | } 134 | } 135 | 136 | for _ in 0..100 { 137 | println!("{}", single_threaded::>("Rc", N_ITERS)); 138 | println!("{}", single_threaded::>("Arc", N_ITERS)); 139 | for n_threads in 1..=available_parallelism().unwrap().get() { 140 | println!( 141 | "{}", 142 | multi_threaded::>("Arc", N_ITERS, n_threads) 143 | ); 144 | } 145 | } 146 | } 147 | 148 | /// Run a benchmark of a multi-threaded garbage collector. 149 | fn single_threaded(name: &'static str, n_iters: usize) -> BenchmarkData { 150 | fastrand::seed(12345); 151 | let mut gcs = (0..50).map(|_| M::new(Vec::new())).collect::>(); 152 | 153 | // println!("{name}: running..."); 154 | let tic = Instant::now(); 155 | for _n in 0..n_iters { 156 | // println!("iter {_n}"); 157 | if gcs.is_empty() { 158 | gcs.push(M::new(Vec::new())); 159 | } else { 160 | match fastrand::u8(0..4) { 161 | 0 => { 162 | // println!("create allocation"); 163 | // create new allocation 164 | gcs.push(M::new(Vec::new())); 165 | } 166 | 1 => { 167 | // println!("add reference"); 168 | // add a reference 169 | if gcs.len() > 1 { 170 | let from = fastrand::usize(0..gcs.len()); 171 | let to = fastrand::usize(0..gcs.len()); 172 | let new_gc = gcs[to].clone(); 173 | gcs[from].apply(|v| v.push(new_gc)); 174 | } 175 | } 176 | 2 => { 177 | // println!("remove gc"); 178 | // destroy a reference owned by the vector 179 | gcs.swap_remove(fastrand::usize(0..gcs.len())); 180 | } 181 | 3 => { 182 | // println!("remove reference"); 183 | // destroy a reference owned by some gc 184 | let from = fastrand::usize(0..gcs.len()); 185 | gcs[from].apply(|v| { 186 | if !v.is_empty() { 187 | let to = fastrand::usize(0..v.len()); 188 | v.swap_remove(to); 189 | } 190 | }) 191 | } 192 | _ => unreachable!(), 193 | } 194 | } 195 | } 196 | drop(gcs); 197 | M::collect(); 198 | let toc = Instant::now(); 199 | // println!("finished {name} in {:?}", (toc - tic)); 200 | BenchmarkData { 201 | name, 202 | test: "single_threaded", 203 | n_threads: 1, 204 | n_ops: n_iters, 205 | duration: toc.duration_since(tic), 206 | } 207 | } 208 | 209 | fn multi_threaded( 210 | name: &'static str, 211 | n_iters: usize, 212 | n_threads: usize, 213 | ) -> BenchmarkData { 214 | let vecs: Vec>> = (0..(n_threads * 10)) 215 | .map(|_| Mutex::new((0..50).map(|_| M::new(Vec::new())).collect())) 216 | .collect(); 217 | 218 | let tic = Mutex::new(Instant::now()); 219 | let toc = Mutex::new(Instant::now()); 220 | scope(|s| { 221 | for i in 0..n_threads { 222 | let vecs = &vecs; 223 | let tic = &tic; 224 | let toc = &toc; 225 | thread::Builder::new() 226 | .name(format!("multi_threaded{i}")) 227 | .spawn_scoped(s, move || { 228 | *tic.lock() = Instant::now(); 229 | fastrand::seed(12345 + i as u64); 230 | 231 | for _n in 0..(n_iters / n_threads) { 232 | let v1_id = fastrand::usize(0..vecs.len()); 233 | match fastrand::u8(0..4) { 234 | // create 235 | 0 => vecs[v1_id].lock().push(M::new(Vec::new())), 236 | // add ref 237 | 1 => { 238 | let v2_id = fastrand::usize(0..vecs.len()); 239 | if v1_id == v2_id { 240 | let g1 = vecs[v1_id].lock(); 241 | if g1.len() < 2 { 242 | continue; 243 | } 244 | let i1 = fastrand::usize(0..g1.len()); 245 | let i2 = fastrand::usize(0..g1.len()); 246 | let new_gc = g1[i2].clone(); 247 | g1[i1].apply(|v| v.push(new_gc)); 248 | } else { 249 | // prevent deadlock by locking lower one first 250 | let (g1, g2) = if v1_id < v2_id { 251 | (vecs[v1_id].lock(), vecs[v2_id].lock()) 252 | } else { 253 | let g2 = vecs[v2_id].lock(); 254 | (vecs[v1_id].lock(), g2) 255 | }; 256 | if g1.is_empty() || g2.is_empty() { 257 | continue; 258 | } 259 | let i1 = fastrand::usize(0..g1.len()); 260 | let i2 = fastrand::usize(0..g2.len()); 261 | let new_gc = g2[i2].clone(); 262 | g1[i1].apply(|v| v.push(new_gc)); 263 | } 264 | } 265 | // destroy gc 266 | 2 => { 267 | let mut guard = vecs[v1_id].lock(); 268 | if guard.is_empty() { 269 | continue; 270 | } 271 | let idx = fastrand::usize(0..guard.len()); 272 | guard.swap_remove(idx); 273 | } 274 | // destroy ref 275 | 3 => { 276 | let guard = vecs[v1_id].lock(); 277 | if guard.is_empty() { 278 | continue; 279 | } 280 | guard[fastrand::usize(0..guard.len())].apply(|v| { 281 | if !v.is_empty() { 282 | v.swap_remove(fastrand::usize(0..v.len())); 283 | } 284 | }); 285 | } 286 | _ => unreachable!(), 287 | }; 288 | } 289 | *toc.lock() = Instant::now(); 290 | }) 291 | .unwrap(); 292 | } 293 | }); 294 | M::collect(); // This op is single threaded and shouldn't count 295 | let duration = toc.lock().duration_since(*tic.lock()); 296 | 297 | // println!("finished {name} in {duration:?}"); 298 | BenchmarkData { 299 | name, 300 | test: "multi_threaded", 301 | n_threads, 302 | n_ops: (n_iters / n_threads) * n_threads, 303 | duration, 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /dumpster_derive/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /dumpster_derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dumpster_derive" 3 | version = "1.1.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | authors = ["Clayton Ramsey"] 7 | description = "Implementation of #[derive(Trace)] for dumpster" 8 | repository = "https://github.com/claytonwramsey/dumpster" 9 | readme = "../README.md" 10 | keywords = ["dumpster", "garbage_collector", "derive", "gc"] 11 | categories = ["memory-management", "data-structures"] 12 | 13 | [lib] 14 | proc-macro = true 15 | 16 | [dependencies] 17 | proc-macro2 = "1.0.60" 18 | quote = "1.0" 19 | syn = "2.0" 20 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 21 | -------------------------------------------------------------------------------- /dumpster_derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, a cycle-tracking garbage collector for Rust. 3 | Copyright (C) 2023 Clayton Ramsey. 4 | 5 | This Source Code Form is subject to the terms of the Mozilla Public 6 | License, v. 2.0. If a copy of the MPL was not distributed with this 7 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 8 | */ 9 | 10 | #![warn(clippy::pedantic)] 11 | #![warn(clippy::cargo)] 12 | #![allow(clippy::multiple_crate_versions)] 13 | 14 | use proc_macro2::TokenStream; 15 | use quote::{format_ident, quote, quote_spanned}; 16 | use syn::{ 17 | parse_macro_input, parse_quote, spanned::Spanned, Data, DeriveInput, Fields, GenericParam, 18 | Generics, Ident, Index, 19 | }; 20 | 21 | #[proc_macro_derive(Trace)] 22 | pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 23 | let input = parse_macro_input!(input as DeriveInput); 24 | 25 | // name of the type being implemented 26 | let name = &input.ident; 27 | 28 | // generic parameters of the type being implemented 29 | let generics = add_trait_bounds(input.generics); 30 | let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); 31 | 32 | let do_visitor = delegate_methods(name, &input.data); 33 | 34 | let generated = quote! { 35 | unsafe impl #impl_generics ::dumpster::Trace for #name #ty_generics #where_clause { 36 | #[inline] 37 | fn accept(&self, visitor: &mut V) -> ::core::result::Result<(), ()> { 38 | #do_visitor 39 | } 40 | } 41 | }; 42 | 43 | generated.into() 44 | } 45 | 46 | /// Collect the trait bounds for some generic expression. 47 | fn add_trait_bounds(mut generics: Generics) -> Generics { 48 | for param in &mut generics.params { 49 | if let GenericParam::Type(ref mut type_param) = *param { 50 | type_param.bounds.push(parse_quote!(::dumpster::Trace)); 51 | } 52 | } 53 | generics 54 | } 55 | 56 | #[allow(clippy::too_many_lines)] 57 | /// Generate method implementations for [`Trace`] for some data type. 58 | fn delegate_methods(name: &Ident, data: &Data) -> TokenStream { 59 | match data { 60 | Data::Struct(data) => match data.fields { 61 | Fields::Named(ref f) => { 62 | let delegate_visit = f.named.iter().map(|f| { 63 | let name = &f.ident; 64 | quote_spanned! {f.span() => 65 | ::dumpster::Trace::accept( 66 | &self.#name, 67 | visitor 68 | )?; 69 | } 70 | }); 71 | 72 | quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) } 73 | } 74 | Fields::Unnamed(ref f) => { 75 | let delegate_visit = f.unnamed.iter().enumerate().map(|(i, f)| { 76 | let index = Index::from(i); 77 | quote_spanned! {f.span() => 78 | ::dumpster::Trace::accept( 79 | &self.#index, 80 | visitor 81 | )?; 82 | } 83 | }); 84 | 85 | quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) } 86 | } 87 | Fields::Unit => quote! { ::core::result::Result::Ok(()) }, 88 | }, 89 | Data::Enum(e) => { 90 | let mut delegate_visit = TokenStream::new(); 91 | for var in &e.variants { 92 | let var_name = &var.ident; 93 | 94 | match &var.fields { 95 | Fields::Named(n) => { 96 | let mut binding = TokenStream::new(); 97 | let mut execution_visit = TokenStream::new(); 98 | let mut execution_destroy = TokenStream::new(); 99 | for (i, name) in n.named.iter().enumerate() { 100 | let field_name = format_ident!("field{i}"); 101 | let field_ident = name.ident.as_ref().unwrap(); 102 | if i == 0 { 103 | binding.extend(quote! { 104 | #field_ident: #field_name 105 | }); 106 | } else { 107 | binding.extend(quote! { 108 | , #field_ident: #field_name 109 | }); 110 | } 111 | 112 | execution_visit.extend(quote! { 113 | ::dumpster::Trace::accept( 114 | #field_name, 115 | visitor 116 | )?; 117 | }); 118 | 119 | execution_destroy.extend(quote! { 120 | ::dumpster::Trace::destroy_gcs( 121 | #field_name, destroyer 122 | ); 123 | }); 124 | } 125 | 126 | delegate_visit.extend( 127 | quote! {#name::#var_name{#binding} => {#execution_visit ::core::result::Result::Ok(())},}, 128 | ); 129 | } 130 | Fields::Unnamed(u) => { 131 | let mut binding = TokenStream::new(); 132 | let mut execution_visit = TokenStream::new(); 133 | let mut execution_destroy = TokenStream::new(); 134 | for (i, _) in u.unnamed.iter().enumerate() { 135 | let field_name = format_ident!("field{i}"); 136 | if i == 0 { 137 | binding.extend(quote! { 138 | #field_name 139 | }); 140 | } else { 141 | binding.extend(quote! { 142 | , #field_name 143 | }); 144 | } 145 | 146 | execution_visit.extend(quote! { 147 | ::dumpster::Trace::accept( 148 | #field_name, 149 | visitor 150 | )?; 151 | }); 152 | 153 | execution_destroy.extend(quote! { 154 | ::dumpster::Trace::destroy_gcs(#field_name, destroyer); 155 | }); 156 | } 157 | 158 | delegate_visit.extend( 159 | quote! {#name::#var_name(#binding) => {#execution_visit ::core::result::Result::Ok(())},}, 160 | ); 161 | } 162 | Fields::Unit => { 163 | delegate_visit 164 | .extend(quote! {#name::#var_name => ::core::result::Result::Ok(()),}); 165 | } 166 | } 167 | } 168 | 169 | quote! {match self {#delegate_visit}} 170 | } 171 | Data::Union(u) => { 172 | quote_spanned! { 173 | u.union_token.span => compile_error!("`Trace` must be manually implemented for unions"); 174 | } 175 | } 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /dumpster_test/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /dumpster_test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dumpster_test" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MPL-2.0" 6 | authors = ["Clayton Ramsey"] 7 | description = "Tests for dumpster garbage collection crate" 8 | repository = "https://github.com/claytonwramsey/dumpster" 9 | readme = "../README.md" 10 | keywords = ["dumpster", "garbage_collector", "test"] 11 | categories = ["data-structures", "memory-management"] 12 | 13 | [dev-dependencies] 14 | dumpster = { version = "1.1.0", path = "../dumpster" } 15 | dumpster_derive = { version = "1.1.0", path = "../dumpster_derive" } 16 | -------------------------------------------------------------------------------- /dumpster_test/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | dumpster, a cycle-tracking garbage collector for Rust. 3 | Copyright (C) 2023 Clayton Ramsey. 4 | 5 | This Source Code Form is subject to the terms of the Mozilla Public 6 | License, v. 2.0. If a copy of the MPL was not distributed with this 7 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 8 | */ 9 | 10 | #![warn(clippy::pedantic)] 11 | #![warn(clippy::cargo)] 12 | #![cfg(test)] 13 | 14 | use std::{ 15 | cell::RefCell, 16 | sync::atomic::{AtomicU8, AtomicUsize, Ordering}, 17 | }; 18 | 19 | use dumpster::unsync::{collect, Gc}; 20 | use dumpster_derive::Trace; 21 | 22 | #[derive(Trace)] 23 | struct Empty; 24 | 25 | #[derive(Trace)] 26 | #[allow(dead_code)] 27 | struct UnitTuple(); 28 | 29 | #[derive(Trace)] 30 | struct MultiRef { 31 | counter: &'static AtomicUsize, 32 | pointers: RefCell>>, 33 | } 34 | 35 | #[derive(Trace)] 36 | #[allow(unused)] 37 | enum Refs { 38 | None, 39 | One(Gc), 40 | Many { refs: Vec> }, 41 | } 42 | 43 | #[derive(Trace)] 44 | #[allow(unused)] 45 | enum A { 46 | None, 47 | } 48 | 49 | #[derive(Trace)] 50 | #[allow(unused)] 51 | enum B { 52 | One(Gc), 53 | } 54 | 55 | #[derive(Trace)] 56 | #[allow(unused)] 57 | struct Generic { 58 | value: T, 59 | } 60 | 61 | impl Drop for MultiRef { 62 | fn drop(&mut self) { 63 | self.counter.fetch_add(1, Ordering::Relaxed); 64 | } 65 | } 66 | 67 | #[test] 68 | fn unit() { 69 | static DROP_COUNT: AtomicU8 = AtomicU8::new(0); 70 | #[derive(Trace)] 71 | struct DropCount; 72 | 73 | impl Drop for DropCount { 74 | fn drop(&mut self) { 75 | DROP_COUNT.fetch_add(1, Ordering::Relaxed); 76 | } 77 | } 78 | 79 | let gc1 = Gc::new(DropCount); 80 | let gc2 = Gc::clone(&gc1); 81 | 82 | drop(gc1); 83 | assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0); 84 | drop(gc2); 85 | assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1); 86 | } 87 | 88 | #[test] 89 | fn self_referential() { 90 | static COUNT: AtomicUsize = AtomicUsize::new(0); 91 | 92 | let gc1 = Gc::new(MultiRef { 93 | counter: &COUNT, 94 | pointers: RefCell::new(Vec::new()), 95 | }); 96 | gc1.pointers.borrow_mut().push(Gc::clone(&gc1)); 97 | 98 | assert_eq!(COUNT.load(Ordering::Relaxed), 0); 99 | drop(gc1); 100 | collect(); 101 | assert_eq!(COUNT.load(Ordering::Relaxed), 1); 102 | } 103 | 104 | #[test] 105 | fn double_loop() { 106 | static COUNT: AtomicUsize = AtomicUsize::new(0); 107 | 108 | let gc1 = Gc::new(MultiRef { 109 | counter: &COUNT, 110 | pointers: RefCell::new(Vec::new()), 111 | }); 112 | gc1.pointers 113 | .borrow_mut() 114 | .extend([Gc::clone(&gc1), Gc::clone(&gc1)]); 115 | 116 | assert_eq!(COUNT.load(Ordering::Relaxed), 0); 117 | drop(gc1); 118 | collect(); 119 | assert_eq!(COUNT.load(Ordering::Relaxed), 1); 120 | } 121 | 122 | #[test] 123 | fn parallel_loop() { 124 | static COUNT_1: AtomicUsize = AtomicUsize::new(0); 125 | static COUNT_2: AtomicUsize = AtomicUsize::new(0); 126 | static COUNT_3: AtomicUsize = AtomicUsize::new(0); 127 | static COUNT_4: AtomicUsize = AtomicUsize::new(0); 128 | 129 | let gc1 = Gc::new(MultiRef { 130 | counter: &COUNT_1, 131 | pointers: RefCell::new(Vec::new()), 132 | }); 133 | let gc2 = Gc::new(MultiRef { 134 | counter: &COUNT_2, 135 | pointers: RefCell::new(vec![Gc::clone(&gc1)]), 136 | }); 137 | let gc3 = Gc::new(MultiRef { 138 | counter: &COUNT_3, 139 | pointers: RefCell::new(vec![Gc::clone(&gc1)]), 140 | }); 141 | let gc4 = Gc::new(MultiRef { 142 | counter: &COUNT_4, 143 | pointers: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), 144 | }); 145 | gc1.pointers.borrow_mut().push(Gc::clone(&gc4)); 146 | 147 | drop(gc1); 148 | drop(gc2); 149 | drop(gc3); 150 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); 151 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); 152 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); 153 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); 154 | drop(gc4); 155 | collect(); 156 | assert_eq!(COUNT_1.load(Ordering::Relaxed), 1); 157 | assert_eq!(COUNT_2.load(Ordering::Relaxed), 1); 158 | assert_eq!(COUNT_3.load(Ordering::Relaxed), 1); 159 | assert_eq!(COUNT_4.load(Ordering::Relaxed), 1); 160 | } 161 | 162 | #[test] 163 | #[allow(clippy::similar_names)] 164 | fn unsync_as_ptr() { 165 | #[derive(Trace)] 166 | struct B(Gc); 167 | 168 | let empty = Gc::new(Empty); 169 | let empty_a = Gc::clone(&empty); 170 | let empty_ptr = Gc::as_ptr(&empty); 171 | assert_eq!(empty_ptr, Gc::as_ptr(&empty_a)); 172 | 173 | let b = B(Gc::clone(&empty)); 174 | assert_eq!(empty_ptr, Gc::as_ptr(&b.0)); 175 | let bb = Gc::new(B(Gc::clone(&empty))); 176 | assert_eq!(empty_ptr, Gc::as_ptr(&bb.0)); 177 | 178 | let empty2 = Gc::new(Empty); 179 | let empty2_ptr = Gc::as_ptr(&empty2); 180 | assert_ne!(empty_ptr, empty2_ptr); 181 | let b2 = Gc::new(B(Gc::clone(&empty2))); 182 | assert_eq!(empty2_ptr, Gc::as_ptr(&b2.0)); 183 | assert_ne!(empty_ptr, Gc::as_ptr(&b2.0)); 184 | assert_ne!(Gc::as_ptr(&b.0), Gc::as_ptr(&b2.0)); 185 | assert_ne!(Gc::as_ptr(&b.0), empty2_ptr); 186 | } 187 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | newline_style = "Unix" 2 | wrap_comments = true 3 | comment_width = 100 4 | format_code_in_doc_comments = true 5 | imports_granularity = "Crate" 6 | --------------------------------------------------------------------------------