├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── defer.rs ├── flush.rs └── pin.rs ├── examples └── sanitize.rs └── src ├── atomic.rs ├── collector.rs ├── default.rs ├── deferred.rs ├── epoch.rs ├── guard.rs ├── internal.rs ├── lib.rs └── sync ├── list.rs ├── mod.rs └── queue.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - stable 5 | - beta 6 | - nightly 7 | - 1.26.0 8 | 9 | addons: 10 | apt: 11 | sources: 12 | - ubuntu-toolchain-r-test 13 | - llvm-toolchain-precise 14 | - llvm-toolchain-precise-3.8 15 | packages: 16 | - llvm-3.8 17 | - llvm-3.8-dev 18 | - clang-3.8 19 | - clang-3.8-dev 20 | 21 | script: 22 | - cargo build 23 | - cargo build --release 24 | - cargo build --no-default-features 25 | - cargo build --release --no-default-features 26 | - cargo test 27 | - cargo test --release 28 | 29 | - | 30 | if [ $TRAVIS_RUST_VERSION == nightly ]; then 31 | cargo build --features nightly --no-default-features 32 | cargo build --features nightly --release --no-default-features 33 | fi 34 | 35 | - | 36 | if [ $TRAVIS_RUST_VERSION == nightly ]; then 37 | cargo test --features nightly 38 | fi 39 | 40 | - | 41 | if [[ $TRAVIS_RUST_VERSION == nightly ]]; then 42 | cargo test --features nightly --release 43 | fi 44 | 45 | - | 46 | if [[ $TRAVIS_RUST_VERSION == nightly ]]; then 47 | ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \ 48 | RUSTFLAGS="-Z sanitizer=address" \ 49 | cargo run \ 50 | --target x86_64-unknown-linux-gnu \ 51 | --features sanitize,nightly \ 52 | --example sanitize 53 | fi 54 | 55 | - | 56 | if [[ $TRAVIS_RUST_VERSION == nightly ]]; then 57 | ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \ 58 | RUSTFLAGS="-Z sanitizer=address" \ 59 | cargo run \ 60 | --release \ 61 | --target x86_64-unknown-linux-gnu \ 62 | --features sanitize,nightly \ 63 | --example sanitize 64 | fi 65 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 5 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.6.0] - 2018-09-11 10 | ### Changed 11 | - `defer` now requires `F: Send + 'static`. 12 | - Bump the minimum Rust version to 1.26. 13 | - Pinning while TLS is tearing down does not fail anymore. 14 | - Rename `Handle` to `LocalHandle`. 15 | 16 | ### Added 17 | - `defer_unchecked` and `defer_destroy`. 18 | 19 | ### Removed 20 | - Remove `Clone` impl for `LocalHandle`. 21 | 22 | ## [0.5.2] - 2018-08-02 23 | ### Changed 24 | - Update `crossbeam-utils` to `0.5`. 25 | 26 | ## [0.5.1] - 2018-07-20 27 | ### Changed 28 | - Fix compatibility with the latest Rust nightly. 29 | 30 | ## [0.5.0] - 2018-06-12 31 | ### Changed 32 | - Update `crossbeam-utils` to `0.4`. 33 | - Specify the minimum Rust version to `1.25.0`. 34 | 35 | ## [0.4.3] - 2018-06-12 36 | ### Changed 37 | - Downgrade `crossbeam-utils` to `0.3` because it was a breaking change. 38 | 39 | ## [0.4.2] - 2018-06-12 40 | ### Added 41 | - Expose the `Pointer` trait. 42 | - Warn missing docs and missing debug impls. 43 | 44 | ## Changed 45 | - Update `crossbeam-utils` to `0.4`. 46 | 47 | ## [0.4.1] - 2018-03-20 48 | ### Added 49 | - Add `Debug` impls for `Collector`, `Handle`, and `Guard`. 50 | - Add `load_consume` to `Atomic`. 51 | 52 | ### Changed 53 | - Rename `Collector::handle` to `Collector::register`. 54 | 55 | ### Fixed 56 | - Remove the `Send` implementation for `Handle` (this was a bug). Only 57 | `Collector`s can be shared among multiple threads, while `Handle`s and 58 | `Guard`s must stay within the thread in which they were created. 59 | 60 | ## [0.4.0] - 2018-02-10 61 | ### Changed 62 | - Update dependencies. 63 | 64 | ### Removed 65 | - Remove support for Rust 1.13. 66 | 67 | ## [0.3.0] - 2018-02-10 68 | ### Added 69 | - Add support for Rust 1.13. 70 | 71 | ### Changed 72 | - Improve documentation for CAS. 73 | 74 | ## [0.2.0] - 2017-11-29 75 | ### Added 76 | - Add method `Owned::into_box`. 77 | 78 | ### Changed 79 | - Fix a use-after-free bug in `Local::finalize`. 80 | - Fix an ordering bug in `Global::push_bag`. 81 | - Fix a bug in calculating distance between epochs. 82 | 83 | ### Removed 84 | - Remove `impl Into> for Owned`. 85 | 86 | ## 0.1.0 - 2017-11-26 87 | ### Added 88 | - First version of the new epoch-based GC. 89 | 90 | [Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.6.0...HEAD 91 | [0.6.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.5.2...v0.6.0 92 | [0.5.2]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.5.1...v0.5.2 93 | [0.5.1]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.5.0...v0.5.1 94 | [0.5.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.3...v0.5.0 95 | [0.4.3]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.2...v0.4.3 96 | [0.4.2]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.1...v0.4.2 97 | [0.4.1]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.0...v0.4.1 98 | [0.4.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.3.0...v0.4.0 99 | [0.3.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...v0.3.0 100 | [0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0 101 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "crossbeam-epoch" 3 | version = "0.6.0" 4 | authors = ["The Crossbeam Project Developers"] 5 | license = "MIT/Apache-2.0" 6 | readme = "README.md" 7 | repository = "https://github.com/crossbeam-rs/crossbeam-epoch" 8 | homepage = "https://github.com/crossbeam-rs/crossbeam-epoch" 9 | documentation = "https://docs.rs/crossbeam-epoch" 10 | description = "Epoch-based garbage collection" 11 | keywords = ["lock-free", "rcu", "atomic", "garbage"] 12 | categories = ["concurrency", "memory-management"] 13 | 14 | [features] 15 | default = ["use_std"] 16 | use_std = ["lazy_static", "crossbeam-utils/use_std"] 17 | nightly = ["arrayvec/use_union"] 18 | 19 | # triggering potential data races sooner than later for testing/debugging purposes 20 | sanitize = [] 21 | 22 | [dependencies] 23 | arrayvec = { version = "0.4", default-features = false } 24 | cfg-if = "0.1" 25 | crossbeam-utils = { version = "0.5", default-features = false } 26 | lazy_static = { version = "1", optional = true } 27 | memoffset = { version = "0.2" } 28 | scopeguard = { version = "0.3", default-features = false } 29 | 30 | [dev-dependencies] 31 | rand = "0.5" 32 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## NOTE: This crate has been moved into the [crossbeam](https://github.com/crossbeam-rs/crossbeam) repository. 2 | 3 | Do not use this repository. 4 | 5 | # Epoch-based garbage collection 6 | 7 | [![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-epoch.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-epoch) 8 | [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-epoch) 9 | [![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](https://crates.io/crates/crossbeam-epoch) 10 | [![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](https://docs.rs/crossbeam-epoch) 11 | 12 | This crate provides epoch-based garbage collection for use in concurrent data structures. 13 | 14 | If a thread removes a node from a concurrent data structure, other threads 15 | may still have pointers to that node, so it cannot be immediately destructed. 16 | Epoch GC allows deferring destruction until it becomes safe to do so. 17 | 18 | ## Usage 19 | 20 | Add this to your `Cargo.toml`: 21 | 22 | ```toml 23 | [dependencies] 24 | crossbeam-epoch = "0.6" 25 | ``` 26 | 27 | Next, add this to your crate: 28 | 29 | ```rust 30 | extern crate crossbeam_epoch as epoch; 31 | ``` 32 | 33 | The minimum required Rust version is 1.26. 34 | 35 | ## License 36 | 37 | Licensed under the terms of MIT license and the Apache License (Version 2.0). 38 | 39 | See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details. 40 | -------------------------------------------------------------------------------- /benches/defer.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate crossbeam_epoch as epoch; 4 | extern crate crossbeam_utils as utils; 5 | extern crate test; 6 | 7 | use epoch::Owned; 8 | use test::Bencher; 9 | use utils::thread::scope; 10 | 11 | #[bench] 12 | fn single_alloc_defer_free(b: &mut Bencher) { 13 | b.iter(|| { 14 | let guard = &epoch::pin(); 15 | let p = Owned::new(1).into_shared(guard); 16 | unsafe { 17 | guard.defer(move || p.into_owned()); 18 | } 19 | }); 20 | } 21 | 22 | #[bench] 23 | fn single_defer(b: &mut Bencher) { 24 | b.iter(|| { 25 | let guard = &epoch::pin(); 26 | unsafe { 27 | guard.defer(move || ()); 28 | } 29 | }); 30 | } 31 | 32 | #[bench] 33 | fn multi_alloc_defer_free(b: &mut Bencher) { 34 | const THREADS: usize = 16; 35 | const STEPS: usize = 10_000; 36 | 37 | b.iter(|| { 38 | scope(|s| { 39 | for _ in 0..THREADS { 40 | s.spawn(|| { 41 | for _ in 0..STEPS { 42 | let guard = &epoch::pin(); 43 | let p = Owned::new(1).into_shared(guard); 44 | unsafe { 45 | guard.defer(move || p.into_owned()); 46 | } 47 | } 48 | }); 49 | } 50 | }); 51 | }); 52 | } 53 | 54 | #[bench] 55 | fn multi_defer(b: &mut Bencher) { 56 | const THREADS: usize = 16; 57 | const STEPS: usize = 10_000; 58 | 59 | b.iter(|| { 60 | scope(|s| { 61 | for _ in 0..THREADS { 62 | s.spawn(|| { 63 | for _ in 0..STEPS { 64 | let guard = &epoch::pin(); 65 | unsafe { 66 | guard.defer(move || ()); 67 | } 68 | } 69 | }); 70 | } 71 | }); 72 | }); 73 | } 74 | -------------------------------------------------------------------------------- /benches/flush.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate crossbeam_epoch as epoch; 4 | extern crate crossbeam_utils as utils; 5 | extern crate test; 6 | 7 | use std::sync::Barrier; 8 | 9 | use test::Bencher; 10 | use utils::thread::scope; 11 | 12 | #[bench] 13 | fn single_flush(b: &mut Bencher) { 14 | const THREADS: usize = 16; 15 | 16 | let start = Barrier::new(THREADS + 1); 17 | let end = Barrier::new(THREADS + 1); 18 | 19 | scope(|s| { 20 | for _ in 0..THREADS { 21 | s.spawn(|| { 22 | epoch::pin(); 23 | start.wait(); 24 | end.wait(); 25 | }); 26 | } 27 | 28 | start.wait(); 29 | b.iter(|| epoch::pin().flush()); 30 | end.wait(); 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn multi_flush(b: &mut Bencher) { 36 | const THREADS: usize = 16; 37 | const STEPS: usize = 10_000; 38 | 39 | b.iter(|| { 40 | scope(|s| { 41 | for _ in 0..THREADS { 42 | s.spawn(|| { 43 | for _ in 0..STEPS { 44 | let guard = &epoch::pin(); 45 | guard.flush(); 46 | } 47 | }); 48 | } 49 | }); 50 | }); 51 | } 52 | -------------------------------------------------------------------------------- /benches/pin.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate crossbeam_epoch as epoch; 4 | extern crate crossbeam_utils as utils; 5 | extern crate test; 6 | 7 | use test::Bencher; 8 | use utils::thread::scope; 9 | 10 | #[bench] 11 | fn single_pin(b: &mut Bencher) { 12 | b.iter(|| epoch::pin()); 13 | } 14 | 15 | #[bench] 16 | fn multi_pin(b: &mut Bencher) { 17 | const THREADS: usize = 16; 18 | const STEPS: usize = 100_000; 19 | 20 | b.iter(|| { 21 | scope(|s| { 22 | for _ in 0..THREADS { 23 | s.spawn(|| { 24 | for _ in 0..STEPS { 25 | epoch::pin(); 26 | } 27 | }); 28 | } 29 | }); 30 | }); 31 | } 32 | -------------------------------------------------------------------------------- /examples/sanitize.rs: -------------------------------------------------------------------------------- 1 | extern crate crossbeam_epoch as epoch; 2 | extern crate rand; 3 | 4 | use std::sync::Arc; 5 | use std::sync::atomic::AtomicUsize; 6 | use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; 7 | use std::time::{Duration, Instant}; 8 | use std::thread; 9 | 10 | use epoch::{Atomic, Collector, LocalHandle, Owned, Shared}; 11 | use rand::Rng; 12 | 13 | fn worker(a: Arc>, handle: LocalHandle) -> usize { 14 | let mut rng = rand::thread_rng(); 15 | let mut sum = 0; 16 | 17 | if rng.gen() { 18 | thread::sleep(Duration::from_millis(1)); 19 | } 20 | let timeout = Duration::from_millis(rng.gen_range(0, 10)); 21 | let now = Instant::now(); 22 | 23 | while now.elapsed() < timeout { 24 | for _ in 0..100 { 25 | let guard = &handle.pin(); 26 | guard.flush(); 27 | 28 | let val = if rng.gen() { 29 | let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard); 30 | unsafe { 31 | guard.defer_destroy(p); 32 | guard.flush(); 33 | p.deref().load(Relaxed) 34 | } 35 | } else { 36 | let p = a.load(Acquire, guard); 37 | unsafe { 38 | p.deref().fetch_add(sum, Relaxed) 39 | } 40 | }; 41 | 42 | sum = sum.wrapping_add(val); 43 | } 44 | } 45 | 46 | sum 47 | } 48 | 49 | fn main() { 50 | for _ in 0..100 { 51 | let collector = Collector::new(); 52 | let a = Arc::new(Atomic::new(AtomicUsize::new(777))); 53 | 54 | let threads = (0..16) 55 | .map(|_| { 56 | let a = a.clone(); 57 | let c = collector.clone(); 58 | thread::spawn(move || worker(a, c.register())) 59 | }) 60 | .collect::>(); 61 | 62 | for t in threads { 63 | t.join().unwrap(); 64 | } 65 | 66 | unsafe { 67 | a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned(); 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/atomic.rs: -------------------------------------------------------------------------------- 1 | use core::borrow::{Borrow, BorrowMut}; 2 | use core::cmp; 3 | use core::fmt; 4 | use core::marker::PhantomData; 5 | use core::mem; 6 | use core::ptr; 7 | use core::ops::{Deref, DerefMut}; 8 | use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; 9 | use core::sync::atomic::Ordering; 10 | use alloc::boxed::Box; 11 | 12 | use guard::Guard; 13 | use crossbeam_utils::AtomicConsume; 14 | 15 | /// Given ordering for the success case in a compare-exchange operation, returns the strongest 16 | /// appropriate ordering for the failure case. 17 | #[inline] 18 | fn strongest_failure_ordering(ord: Ordering) -> Ordering { 19 | use self::Ordering::*; 20 | match ord { 21 | Relaxed | Release => Relaxed, 22 | Acquire | AcqRel => Acquire, 23 | _ => SeqCst, 24 | } 25 | } 26 | 27 | /// The error returned on failed compare-and-set operation. 28 | pub struct CompareAndSetError<'g, T: 'g, P: Pointer> { 29 | /// The value in the atomic pointer at the time of the failed operation. 30 | pub current: Shared<'g, T>, 31 | 32 | /// The new value, which the operation failed to store. 33 | pub new: P, 34 | } 35 | 36 | impl<'g, T: 'g, P: Pointer + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> { 37 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 38 | f.debug_struct("CompareAndSetError") 39 | .field("current", &self.current) 40 | .field("new", &self.new) 41 | .finish() 42 | } 43 | } 44 | 45 | /// Memory orderings for compare-and-set operations. 46 | /// 47 | /// A compare-and-set operation can have different memory orderings depending on whether it 48 | /// succeeds or fails. This trait generalizes different ways of specifying memory orderings. 49 | /// 50 | /// The two ways of specifying orderings for compare-and-set are: 51 | /// 52 | /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate 53 | /// ordering is chosen. 54 | /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is 55 | /// for the failure case. 56 | pub trait CompareAndSetOrdering { 57 | /// The ordering of the operation when it succeeds. 58 | fn success(&self) -> Ordering; 59 | 60 | /// The ordering of the operation when it fails. 61 | /// 62 | /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than 63 | /// the success ordering. 64 | fn failure(&self) -> Ordering; 65 | } 66 | 67 | impl CompareAndSetOrdering for Ordering { 68 | #[inline] 69 | fn success(&self) -> Ordering { 70 | *self 71 | } 72 | 73 | #[inline] 74 | fn failure(&self) -> Ordering { 75 | strongest_failure_ordering(*self) 76 | } 77 | } 78 | 79 | impl CompareAndSetOrdering for (Ordering, Ordering) { 80 | #[inline] 81 | fn success(&self) -> Ordering { 82 | self.0 83 | } 84 | 85 | #[inline] 86 | fn failure(&self) -> Ordering { 87 | self.1 88 | } 89 | } 90 | 91 | /// Panics if the pointer is not properly unaligned. 92 | #[inline] 93 | fn ensure_aligned(raw: *const T) { 94 | assert_eq!(raw as usize & low_bits::(), 0, "unaligned pointer"); 95 | } 96 | 97 | /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. 98 | #[inline] 99 | fn low_bits() -> usize { 100 | (1 << mem::align_of::().trailing_zeros()) - 1 101 | } 102 | 103 | /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. 104 | /// 105 | /// `tag` is truncated to fit into the unused bits of the pointer to `T`. 106 | #[inline] 107 | fn data_with_tag(data: usize, tag: usize) -> usize { 108 | (data & !low_bits::()) | (tag & low_bits::()) 109 | } 110 | 111 | /// Decomposes a tagged pointer `data` into the pointer and the tag. 112 | #[inline] 113 | fn decompose_data(data: usize) -> (*mut T, usize) { 114 | let raw = (data & !low_bits::()) as *mut T; 115 | let tag = data & low_bits::(); 116 | (raw, tag) 117 | } 118 | 119 | /// An atomic pointer that can be safely shared between threads. 120 | /// 121 | /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused 122 | /// least significant bits of the address. More precisely, a tag should be less than `(1 << 123 | /// mem::align_of::().trailing_zeros())`. 124 | /// 125 | /// Any method that loads the pointer must be passed a reference to a [`Guard`]. 126 | /// 127 | /// [`Guard`]: struct.Guard.html 128 | pub struct Atomic { 129 | data: AtomicUsize, 130 | _marker: PhantomData<*mut T>, 131 | } 132 | 133 | unsafe impl Send for Atomic {} 134 | unsafe impl Sync for Atomic {} 135 | 136 | impl Atomic { 137 | /// Returns a new atomic pointer pointing to the tagged pointer `data`. 138 | fn from_usize(data: usize) -> Self { 139 | Self { 140 | data: AtomicUsize::new(data), 141 | _marker: PhantomData, 142 | } 143 | } 144 | 145 | /// Returns a new null atomic pointer. 146 | /// 147 | /// # Examples 148 | /// 149 | /// ``` 150 | /// use crossbeam_epoch::Atomic; 151 | /// 152 | /// let a = Atomic::::null(); 153 | /// ``` 154 | #[cfg(not(feature = "nightly"))] 155 | pub fn null() -> Atomic { 156 | Self { 157 | data: ATOMIC_USIZE_INIT, 158 | _marker: PhantomData, 159 | } 160 | } 161 | 162 | /// Returns a new null atomic pointer. 163 | /// 164 | /// # Examples 165 | /// 166 | /// ``` 167 | /// use crossbeam_epoch::Atomic; 168 | /// 169 | /// let a = Atomic::::null(); 170 | /// ``` 171 | #[cfg(feature = "nightly")] 172 | pub const fn null() -> Atomic { 173 | Self { 174 | data: ATOMIC_USIZE_INIT, 175 | _marker: PhantomData, 176 | } 177 | } 178 | 179 | /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. 180 | /// 181 | /// # Examples 182 | /// 183 | /// ``` 184 | /// use crossbeam_epoch::Atomic; 185 | /// 186 | /// let a = Atomic::new(1234); 187 | /// ``` 188 | pub fn new(value: T) -> Atomic { 189 | Self::from(Owned::new(value)) 190 | } 191 | 192 | /// Loads a `Shared` from the atomic pointer. 193 | /// 194 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 195 | /// operation. 196 | /// 197 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 198 | /// 199 | /// # Examples 200 | /// 201 | /// ``` 202 | /// use crossbeam_epoch::{self as epoch, Atomic}; 203 | /// use std::sync::atomic::Ordering::SeqCst; 204 | /// 205 | /// let a = Atomic::new(1234); 206 | /// let guard = &epoch::pin(); 207 | /// let p = a.load(SeqCst, guard); 208 | /// ``` 209 | pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { 210 | unsafe { Shared::from_usize(self.data.load(ord)) } 211 | } 212 | 213 | /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. 214 | /// 215 | /// This is similar to the "acquire" ordering, except that an ordering is 216 | /// only guaranteed with operations that "depend on" the result of the load. 217 | /// However consume loads are usually much faster than acquire loads on 218 | /// architectures with a weak memory model since they don't require memory 219 | /// fence instructions. 220 | /// 221 | /// The exact definition of "depend on" is a bit vague, but it works as you 222 | /// would expect in practice since a lot of software, especially the Linux 223 | /// kernel, rely on this behavior. 224 | /// 225 | /// # Examples 226 | /// 227 | /// ``` 228 | /// use crossbeam_epoch::{self as epoch, Atomic}; 229 | /// 230 | /// let a = Atomic::new(1234); 231 | /// let guard = &epoch::pin(); 232 | /// let p = a.load_consume(guard); 233 | /// ``` 234 | pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { 235 | unsafe { Shared::from_usize(self.data.load_consume()) } 236 | } 237 | 238 | /// Stores a `Shared` or `Owned` pointer into the atomic pointer. 239 | /// 240 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 241 | /// operation. 242 | /// 243 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 244 | /// 245 | /// # Examples 246 | /// 247 | /// ``` 248 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; 249 | /// use std::sync::atomic::Ordering::SeqCst; 250 | /// 251 | /// let a = Atomic::new(1234); 252 | /// a.store(Shared::null(), SeqCst); 253 | /// a.store(Owned::new(1234), SeqCst); 254 | /// ``` 255 | pub fn store<'g, P: Pointer>(&self, new: P, ord: Ordering) { 256 | self.data.store(new.into_usize(), ord); 257 | } 258 | 259 | /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous 260 | /// `Shared`. 261 | /// 262 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 263 | /// operation. 264 | /// 265 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 266 | /// 267 | /// # Examples 268 | /// 269 | /// ``` 270 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; 271 | /// use std::sync::atomic::Ordering::SeqCst; 272 | /// 273 | /// let a = Atomic::new(1234); 274 | /// let guard = &epoch::pin(); 275 | /// let p = a.swap(Shared::null(), SeqCst, guard); 276 | /// ``` 277 | pub fn swap<'g, P: Pointer>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { 278 | unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } 279 | } 280 | 281 | /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current 282 | /// value is the same as `current`. The tag is also taken into account, so two pointers to the 283 | /// same object, but with different tags, will not be considered equal. 284 | /// 285 | /// The return value is a result indicating whether the new pointer was written. On success the 286 | /// pointer that was written is returned. On failure the actual current value and `new` are 287 | /// returned. 288 | /// 289 | /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory 290 | /// ordering of this operation. 291 | /// 292 | /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html 293 | /// 294 | /// # Examples 295 | /// 296 | /// ``` 297 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; 298 | /// use std::sync::atomic::Ordering::SeqCst; 299 | /// 300 | /// let a = Atomic::new(1234); 301 | /// 302 | /// let guard = &epoch::pin(); 303 | /// let mut curr = a.load(SeqCst, guard); 304 | /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); 305 | /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); 306 | /// ``` 307 | pub fn compare_and_set<'g, O, P>( 308 | &self, 309 | current: Shared, 310 | new: P, 311 | ord: O, 312 | _: &'g Guard, 313 | ) -> Result, CompareAndSetError<'g, T, P>> 314 | where 315 | O: CompareAndSetOrdering, 316 | P: Pointer, 317 | { 318 | let new = new.into_usize(); 319 | self.data 320 | .compare_exchange(current.into_usize(), new, ord.success(), ord.failure()) 321 | .map(|_| unsafe { Shared::from_usize(new) }) 322 | .map_err(|current| unsafe { 323 | CompareAndSetError { 324 | current: Shared::from_usize(current), 325 | new: P::from_usize(new), 326 | } 327 | }) 328 | } 329 | 330 | /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current 331 | /// value is the same as `current`. The tag is also taken into account, so two pointers to the 332 | /// same object, but with different tags, will not be considered equal. 333 | /// 334 | /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison 335 | /// succeeds, which can result in more efficient code on some platforms. The return value is a 336 | /// result indicating whether the new pointer was written. On success the pointer that was 337 | /// written is returned. On failure the actual current value and `new` are returned. 338 | /// 339 | /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory 340 | /// ordering of this operation. 341 | /// 342 | /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set 343 | /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html 344 | /// 345 | /// # Examples 346 | /// 347 | /// ``` 348 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; 349 | /// use std::sync::atomic::Ordering::SeqCst; 350 | /// 351 | /// let a = Atomic::new(1234); 352 | /// let guard = &epoch::pin(); 353 | /// 354 | /// let mut new = Owned::new(5678); 355 | /// let mut ptr = a.load(SeqCst, guard); 356 | /// loop { 357 | /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { 358 | /// Ok(p) => { 359 | /// ptr = p; 360 | /// break; 361 | /// } 362 | /// Err(err) => { 363 | /// ptr = err.current; 364 | /// new = err.new; 365 | /// } 366 | /// } 367 | /// } 368 | /// 369 | /// let mut curr = a.load(SeqCst, guard); 370 | /// loop { 371 | /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { 372 | /// Ok(_) => break, 373 | /// Err(err) => curr = err.current, 374 | /// } 375 | /// } 376 | /// ``` 377 | pub fn compare_and_set_weak<'g, O, P>( 378 | &self, 379 | current: Shared, 380 | new: P, 381 | ord: O, 382 | _: &'g Guard, 383 | ) -> Result, CompareAndSetError<'g, T, P>> 384 | where 385 | O: CompareAndSetOrdering, 386 | P: Pointer, 387 | { 388 | let new = new.into_usize(); 389 | self.data 390 | .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure()) 391 | .map(|_| unsafe { Shared::from_usize(new) }) 392 | .map_err(|current| unsafe { 393 | CompareAndSetError { 394 | current: Shared::from_usize(current), 395 | new: P::from_usize(new), 396 | } 397 | }) 398 | } 399 | 400 | /// Bitwise "and" with the current tag. 401 | /// 402 | /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the 403 | /// new tag to the result. Returns the previous pointer. 404 | /// 405 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 406 | /// operation. 407 | /// 408 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 409 | /// 410 | /// # Examples 411 | /// 412 | /// ``` 413 | /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; 414 | /// use std::sync::atomic::Ordering::SeqCst; 415 | /// 416 | /// let a = Atomic::::from(Shared::null().with_tag(3)); 417 | /// let guard = &epoch::pin(); 418 | /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); 419 | /// assert_eq!(a.load(SeqCst, guard).tag(), 2); 420 | /// ``` 421 | pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { 422 | unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::(), ord)) } 423 | } 424 | 425 | /// Bitwise "or" with the current tag. 426 | /// 427 | /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the 428 | /// new tag to the result. Returns the previous pointer. 429 | /// 430 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 431 | /// operation. 432 | /// 433 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 434 | /// 435 | /// # Examples 436 | /// 437 | /// ``` 438 | /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; 439 | /// use std::sync::atomic::Ordering::SeqCst; 440 | /// 441 | /// let a = Atomic::::from(Shared::null().with_tag(1)); 442 | /// let guard = &epoch::pin(); 443 | /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); 444 | /// assert_eq!(a.load(SeqCst, guard).tag(), 3); 445 | /// ``` 446 | pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { 447 | unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::(), ord)) } 448 | } 449 | 450 | /// Bitwise "xor" with the current tag. 451 | /// 452 | /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the 453 | /// new tag to the result. Returns the previous pointer. 454 | /// 455 | /// This method takes an [`Ordering`] argument which describes the memory ordering of this 456 | /// operation. 457 | /// 458 | /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html 459 | /// 460 | /// # Examples 461 | /// 462 | /// ``` 463 | /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; 464 | /// use std::sync::atomic::Ordering::SeqCst; 465 | /// 466 | /// let a = Atomic::::from(Shared::null().with_tag(1)); 467 | /// let guard = &epoch::pin(); 468 | /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); 469 | /// assert_eq!(a.load(SeqCst, guard).tag(), 2); 470 | /// ``` 471 | pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { 472 | unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::(), ord)) } 473 | } 474 | } 475 | 476 | impl fmt::Debug for Atomic { 477 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 478 | let data = self.data.load(Ordering::SeqCst); 479 | let (raw, tag) = decompose_data::(data); 480 | 481 | f.debug_struct("Atomic") 482 | .field("raw", &raw) 483 | .field("tag", &tag) 484 | .finish() 485 | } 486 | } 487 | 488 | impl fmt::Pointer for Atomic { 489 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 490 | let data = self.data.load(Ordering::SeqCst); 491 | let (raw, _) = decompose_data::(data); 492 | fmt::Pointer::fmt(&raw, f) 493 | } 494 | } 495 | 496 | impl Clone for Atomic { 497 | /// Returns a copy of the atomic value. 498 | /// 499 | /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other 500 | /// atomics or fences. 501 | fn clone(&self) -> Self { 502 | let data = self.data.load(Ordering::Relaxed); 503 | Atomic::from_usize(data) 504 | } 505 | } 506 | 507 | impl Default for Atomic { 508 | fn default() -> Self { 509 | Atomic::null() 510 | } 511 | } 512 | 513 | impl From> for Atomic { 514 | /// Returns a new atomic pointer pointing to `owned`. 515 | /// 516 | /// # Examples 517 | /// 518 | /// ``` 519 | /// use crossbeam_epoch::{Atomic, Owned}; 520 | /// 521 | /// let a = Atomic::::from(Owned::new(1234)); 522 | /// ``` 523 | fn from(owned: Owned) -> Self { 524 | let data = owned.data; 525 | mem::forget(owned); 526 | Self::from_usize(data) 527 | } 528 | } 529 | 530 | impl From> for Atomic { 531 | fn from(b: Box) -> Self { 532 | Self::from(Owned::from(b)) 533 | } 534 | } 535 | 536 | impl From for Atomic { 537 | fn from(t: T) -> Self { 538 | Self::new(t) 539 | } 540 | } 541 | 542 | impl<'g, T> From> for Atomic { 543 | /// Returns a new atomic pointer pointing to `ptr`. 544 | /// 545 | /// # Examples 546 | /// 547 | /// ``` 548 | /// use crossbeam_epoch::{Atomic, Shared}; 549 | /// 550 | /// let a = Atomic::::from(Shared::::null()); 551 | /// ``` 552 | fn from(ptr: Shared<'g, T>) -> Self { 553 | Self::from_usize(ptr.data) 554 | } 555 | } 556 | 557 | impl From<*const T> for Atomic { 558 | /// Returns a new atomic pointer pointing to `raw`. 559 | /// 560 | /// # Examples 561 | /// 562 | /// ``` 563 | /// use std::ptr; 564 | /// use crossbeam_epoch::Atomic; 565 | /// 566 | /// let a = Atomic::::from(ptr::null::()); 567 | /// ``` 568 | fn from(raw: *const T) -> Self { 569 | Self::from_usize(raw as usize) 570 | } 571 | } 572 | 573 | /// A trait for either `Owned` or `Shared` pointers. 574 | pub trait Pointer { 575 | /// Returns the machine representation of the pointer. 576 | fn into_usize(self) -> usize; 577 | 578 | /// Returns a new pointer pointing to the tagged pointer `data`. 579 | unsafe fn from_usize(data: usize) -> Self; 580 | } 581 | 582 | /// An owned heap-allocated object. 583 | /// 584 | /// This type is very similar to `Box`. 585 | /// 586 | /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused 587 | /// least significant bits of the address. 588 | pub struct Owned { 589 | data: usize, 590 | _marker: PhantomData>, 591 | } 592 | 593 | impl Pointer for Owned { 594 | #[inline] 595 | fn into_usize(self) -> usize { 596 | let data = self.data; 597 | mem::forget(self); 598 | data 599 | } 600 | 601 | /// Returns a new pointer pointing to the tagged pointer `data`. 602 | /// 603 | /// # Panics 604 | /// 605 | /// Panics if the data is zero in debug mode. 606 | #[inline] 607 | unsafe fn from_usize(data: usize) -> Self { 608 | debug_assert!(data != 0, "converting zero into `Owned`"); 609 | Owned { 610 | data: data, 611 | _marker: PhantomData, 612 | } 613 | } 614 | } 615 | 616 | impl Owned { 617 | /// Allocates `value` on the heap and returns a new owned pointer pointing to it. 618 | /// 619 | /// # Examples 620 | /// 621 | /// ``` 622 | /// use crossbeam_epoch::Owned; 623 | /// 624 | /// let o = Owned::new(1234); 625 | /// ``` 626 | pub fn new(value: T) -> Owned { 627 | Self::from(Box::new(value)) 628 | } 629 | 630 | /// Returns a new owned pointer pointing to `raw`. 631 | /// 632 | /// This function is unsafe because improper use may lead to memory problems. Argument `raw` 633 | /// must be a valid pointer. Also, a double-free may occur if the function is called twice on 634 | /// the same raw pointer. 635 | /// 636 | /// # Panics 637 | /// 638 | /// Panics if `raw` is not properly aligned. 639 | /// 640 | /// # Examples 641 | /// 642 | /// ``` 643 | /// use crossbeam_epoch::Owned; 644 | /// 645 | /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; 646 | /// ``` 647 | pub unsafe fn from_raw(raw: *mut T) -> Owned { 648 | ensure_aligned(raw); 649 | Self::from_usize(raw as usize) 650 | } 651 | 652 | /// Converts the owned pointer into a [`Shared`]. 653 | /// 654 | /// # Examples 655 | /// 656 | /// ``` 657 | /// use crossbeam_epoch::{self as epoch, Owned}; 658 | /// 659 | /// let o = Owned::new(1234); 660 | /// let guard = &epoch::pin(); 661 | /// let p = o.into_shared(guard); 662 | /// ``` 663 | /// 664 | /// [`Shared`]: struct.Shared.html 665 | pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { 666 | unsafe { Shared::from_usize(self.into_usize()) } 667 | } 668 | 669 | /// Converts the owned pointer into a `Box`. 670 | /// 671 | /// # Examples 672 | /// 673 | /// ``` 674 | /// use crossbeam_epoch::{self as epoch, Owned}; 675 | /// 676 | /// let o = Owned::new(1234); 677 | /// let b: Box = o.into_box(); 678 | /// assert_eq!(*b, 1234); 679 | /// ``` 680 | pub fn into_box(self) -> Box { 681 | let (raw, _) = decompose_data::(self.data); 682 | mem::forget(self); 683 | unsafe { Box::from_raw(raw) } 684 | } 685 | 686 | /// Returns the tag stored within the pointer. 687 | /// 688 | /// # Examples 689 | /// 690 | /// ``` 691 | /// use crossbeam_epoch::Owned; 692 | /// 693 | /// assert_eq!(Owned::new(1234).tag(), 0); 694 | /// ``` 695 | pub fn tag(&self) -> usize { 696 | let (_, tag) = decompose_data::(self.data); 697 | tag 698 | } 699 | 700 | /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the 701 | /// unused bits of the pointer to `T`. 702 | /// 703 | /// # Examples 704 | /// 705 | /// ``` 706 | /// use crossbeam_epoch::Owned; 707 | /// 708 | /// let o = Owned::new(0u64); 709 | /// assert_eq!(o.tag(), 0); 710 | /// let o = o.with_tag(2); 711 | /// assert_eq!(o.tag(), 2); 712 | /// ``` 713 | pub fn with_tag(self, tag: usize) -> Owned { 714 | let data = self.into_usize(); 715 | unsafe { Self::from_usize(data_with_tag::(data, tag)) } 716 | } 717 | } 718 | 719 | impl Drop for Owned { 720 | fn drop(&mut self) { 721 | let (raw, _) = decompose_data::(self.data); 722 | unsafe { 723 | drop(Box::from_raw(raw)); 724 | } 725 | } 726 | } 727 | 728 | impl fmt::Debug for Owned { 729 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 730 | let (raw, tag) = decompose_data::(self.data); 731 | 732 | f.debug_struct("Owned") 733 | .field("raw", &raw) 734 | .field("tag", &tag) 735 | .finish() 736 | } 737 | } 738 | 739 | impl Clone for Owned { 740 | fn clone(&self) -> Self { 741 | Owned::new((**self).clone()).with_tag(self.tag()) 742 | } 743 | } 744 | 745 | impl Deref for Owned { 746 | type Target = T; 747 | 748 | fn deref(&self) -> &T { 749 | let (raw, _) = decompose_data::(self.data); 750 | unsafe { &*raw } 751 | } 752 | } 753 | 754 | impl DerefMut for Owned { 755 | fn deref_mut(&mut self) -> &mut T { 756 | let (raw, _) = decompose_data::(self.data); 757 | unsafe { &mut *raw } 758 | } 759 | } 760 | 761 | impl From for Owned { 762 | fn from(t: T) -> Self { 763 | Owned::new(t) 764 | } 765 | } 766 | 767 | impl From> for Owned { 768 | /// Returns a new owned pointer pointing to `b`. 769 | /// 770 | /// # Panics 771 | /// 772 | /// Panics if the pointer (the `Box`) is not properly aligned. 773 | /// 774 | /// # Examples 775 | /// 776 | /// ``` 777 | /// use crossbeam_epoch::Owned; 778 | /// 779 | /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; 780 | /// ``` 781 | fn from(b: Box) -> Self { 782 | unsafe { Self::from_raw(Box::into_raw(b)) } 783 | } 784 | } 785 | 786 | impl Borrow for Owned { 787 | fn borrow(&self) -> &T { 788 | &**self 789 | } 790 | } 791 | 792 | impl BorrowMut for Owned { 793 | fn borrow_mut(&mut self) -> &mut T { 794 | &mut **self 795 | } 796 | } 797 | 798 | impl AsRef for Owned { 799 | fn as_ref(&self) -> &T { 800 | &**self 801 | } 802 | } 803 | 804 | impl AsMut for Owned { 805 | fn as_mut(&mut self) -> &mut T { 806 | &mut **self 807 | } 808 | } 809 | 810 | /// A pointer to an object protected by the epoch GC. 811 | /// 812 | /// The pointer is valid for use only during the lifetime `'g`. 813 | /// 814 | /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused 815 | /// least significant bits of the address. 816 | pub struct Shared<'g, T: 'g> { 817 | data: usize, 818 | _marker: PhantomData<(&'g (), *const T)>, 819 | } 820 | 821 | impl<'g, T> Clone for Shared<'g, T> { 822 | fn clone(&self) -> Self { 823 | Shared { 824 | data: self.data, 825 | _marker: PhantomData, 826 | } 827 | } 828 | } 829 | 830 | impl<'g, T> Copy for Shared<'g, T> {} 831 | 832 | impl<'g, T> Pointer for Shared<'g, T> { 833 | #[inline] 834 | fn into_usize(self) -> usize { 835 | self.data 836 | } 837 | 838 | #[inline] 839 | unsafe fn from_usize(data: usize) -> Self { 840 | Shared { 841 | data: data, 842 | _marker: PhantomData, 843 | } 844 | } 845 | } 846 | 847 | impl<'g, T> Shared<'g, T> { 848 | /// Returns a new null pointer. 849 | /// 850 | /// # Examples 851 | /// 852 | /// ``` 853 | /// use crossbeam_epoch::Shared; 854 | /// 855 | /// let p = Shared::::null(); 856 | /// assert!(p.is_null()); 857 | /// ``` 858 | pub fn null() -> Shared<'g, T> { 859 | Shared { 860 | data: 0, 861 | _marker: PhantomData, 862 | } 863 | } 864 | 865 | /// Returns `true` if the pointer is null. 866 | /// 867 | /// # Examples 868 | /// 869 | /// ``` 870 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 871 | /// use std::sync::atomic::Ordering::SeqCst; 872 | /// 873 | /// let a = Atomic::null(); 874 | /// let guard = &epoch::pin(); 875 | /// assert!(a.load(SeqCst, guard).is_null()); 876 | /// a.store(Owned::new(1234), SeqCst); 877 | /// assert!(!a.load(SeqCst, guard).is_null()); 878 | /// ``` 879 | pub fn is_null(&self) -> bool { 880 | self.as_raw().is_null() 881 | } 882 | 883 | /// Converts the pointer to a raw pointer (without the tag). 884 | /// 885 | /// # Examples 886 | /// 887 | /// ``` 888 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 889 | /// use std::sync::atomic::Ordering::SeqCst; 890 | /// 891 | /// let o = Owned::new(1234); 892 | /// let raw = &*o as *const _; 893 | /// let a = Atomic::from(o); 894 | /// 895 | /// let guard = &epoch::pin(); 896 | /// let p = a.load(SeqCst, guard); 897 | /// assert_eq!(p.as_raw(), raw); 898 | /// ``` 899 | pub fn as_raw(&self) -> *const T { 900 | let (raw, _) = decompose_data::(self.data); 901 | raw 902 | } 903 | 904 | /// Dereferences the pointer. 905 | /// 906 | /// Returns a reference to the pointee that is valid during the lifetime `'g`. 907 | /// 908 | /// # Safety 909 | /// 910 | /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. 911 | /// 912 | /// Another concern is the possiblity of data races due to lack of proper synchronization. 913 | /// For example, consider the following scenario: 914 | /// 915 | /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` 916 | /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` 917 | /// 918 | /// The problem is that relaxed orderings don't synchronize initialization of the object with 919 | /// the read from the second thread. This is a data race. A possible solution would be to use 920 | /// `Release` and `Acquire` orderings. 921 | /// 922 | /// # Examples 923 | /// 924 | /// ``` 925 | /// use crossbeam_epoch::{self as epoch, Atomic}; 926 | /// use std::sync::atomic::Ordering::SeqCst; 927 | /// 928 | /// let a = Atomic::new(1234); 929 | /// let guard = &epoch::pin(); 930 | /// let p = a.load(SeqCst, guard); 931 | /// unsafe { 932 | /// assert_eq!(p.deref(), &1234); 933 | /// } 934 | /// ``` 935 | pub unsafe fn deref(&self) -> &'g T { 936 | &*self.as_raw() 937 | } 938 | 939 | /// Converts the pointer to a reference. 940 | /// 941 | /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. 942 | /// 943 | /// # Safety 944 | /// 945 | /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. 946 | /// 947 | /// Another concern is the possiblity of data races due to lack of proper synchronization. 948 | /// For example, consider the following scenario: 949 | /// 950 | /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` 951 | /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` 952 | /// 953 | /// The problem is that relaxed orderings don't synchronize initialization of the object with 954 | /// the read from the second thread. This is a data race. A possible solution would be to use 955 | /// `Release` and `Acquire` orderings. 956 | /// 957 | /// # Examples 958 | /// 959 | /// ``` 960 | /// use crossbeam_epoch::{self as epoch, Atomic}; 961 | /// use std::sync::atomic::Ordering::SeqCst; 962 | /// 963 | /// let a = Atomic::new(1234); 964 | /// let guard = &epoch::pin(); 965 | /// let p = a.load(SeqCst, guard); 966 | /// unsafe { 967 | /// assert_eq!(p.as_ref(), Some(&1234)); 968 | /// } 969 | /// ``` 970 | pub unsafe fn as_ref(&self) -> Option<&'g T> { 971 | self.as_raw().as_ref() 972 | } 973 | 974 | /// Takes ownership of the pointee. 975 | /// 976 | /// # Panics 977 | /// 978 | /// Panics if this pointer is null, but only in debug mode. 979 | /// 980 | /// # Safety 981 | /// 982 | /// This method may be called only if the pointer is valid and nobody else is holding a 983 | /// reference to the same object. 984 | /// 985 | /// # Examples 986 | /// 987 | /// ``` 988 | /// use crossbeam_epoch::{self as epoch, Atomic}; 989 | /// use std::sync::atomic::Ordering::SeqCst; 990 | /// 991 | /// let a = Atomic::new(1234); 992 | /// unsafe { 993 | /// let guard = &epoch::unprotected(); 994 | /// let p = a.load(SeqCst, guard); 995 | /// drop(p.into_owned()); 996 | /// } 997 | /// ``` 998 | pub unsafe fn into_owned(self) -> Owned { 999 | debug_assert!( 1000 | self.as_raw() != ptr::null(), 1001 | "converting a null `Shared` into `Owned`" 1002 | ); 1003 | Owned::from_usize(self.data) 1004 | } 1005 | 1006 | /// Returns the tag stored within the pointer. 1007 | /// 1008 | /// # Examples 1009 | /// 1010 | /// ``` 1011 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 1012 | /// use std::sync::atomic::Ordering::SeqCst; 1013 | /// 1014 | /// let a = Atomic::::from(Owned::new(0u64).with_tag(2)); 1015 | /// let guard = &epoch::pin(); 1016 | /// let p = a.load(SeqCst, guard); 1017 | /// assert_eq!(p.tag(), 2); 1018 | /// ``` 1019 | pub fn tag(&self) -> usize { 1020 | let (_, tag) = decompose_data::(self.data); 1021 | tag 1022 | } 1023 | 1024 | /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the 1025 | /// unused bits of the pointer to `T`. 1026 | /// 1027 | /// # Examples 1028 | /// 1029 | /// ``` 1030 | /// use crossbeam_epoch::{self as epoch, Atomic}; 1031 | /// use std::sync::atomic::Ordering::SeqCst; 1032 | /// 1033 | /// let a = Atomic::new(0u64); 1034 | /// let guard = &epoch::pin(); 1035 | /// let p1 = a.load(SeqCst, guard); 1036 | /// let p2 = p1.with_tag(2); 1037 | /// 1038 | /// assert_eq!(p1.tag(), 0); 1039 | /// assert_eq!(p2.tag(), 2); 1040 | /// assert_eq!(p1.as_raw(), p2.as_raw()); 1041 | /// ``` 1042 | pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { 1043 | unsafe { Self::from_usize(data_with_tag::(self.data, tag)) } 1044 | } 1045 | } 1046 | 1047 | impl<'g, T> From<*const T> for Shared<'g, T> { 1048 | /// Returns a new pointer pointing to `raw`. 1049 | /// 1050 | /// # Panics 1051 | /// 1052 | /// Panics if `raw` is not properly aligned. 1053 | /// 1054 | /// # Examples 1055 | /// 1056 | /// ``` 1057 | /// use crossbeam_epoch::Shared; 1058 | /// 1059 | /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) }; 1060 | /// assert!(!p.is_null()); 1061 | /// ``` 1062 | fn from(raw: *const T) -> Self { 1063 | ensure_aligned(raw); 1064 | unsafe { Self::from_usize(raw as usize) } 1065 | } 1066 | } 1067 | 1068 | impl<'g, T> PartialEq> for Shared<'g, T> { 1069 | fn eq(&self, other: &Self) -> bool { 1070 | self.data == other.data 1071 | } 1072 | } 1073 | 1074 | impl<'g, T> Eq for Shared<'g, T> {} 1075 | 1076 | impl<'g, T> PartialOrd> for Shared<'g, T> { 1077 | fn partial_cmp(&self, other: &Self) -> Option { 1078 | self.data.partial_cmp(&other.data) 1079 | } 1080 | } 1081 | 1082 | impl<'g, T> Ord for Shared<'g, T> { 1083 | fn cmp(&self, other: &Self) -> cmp::Ordering { 1084 | self.data.cmp(&other.data) 1085 | } 1086 | } 1087 | 1088 | impl<'g, T> fmt::Debug for Shared<'g, T> { 1089 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 1090 | let (raw, tag) = decompose_data::(self.data); 1091 | 1092 | f.debug_struct("Shared") 1093 | .field("raw", &raw) 1094 | .field("tag", &tag) 1095 | .finish() 1096 | } 1097 | } 1098 | 1099 | impl<'g, T> fmt::Pointer for Shared<'g, T> { 1100 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 1101 | fmt::Pointer::fmt(&self.as_raw(), f) 1102 | } 1103 | } 1104 | 1105 | impl<'g, T> Default for Shared<'g, T> { 1106 | fn default() -> Self { 1107 | Shared::null() 1108 | } 1109 | } 1110 | 1111 | #[cfg(test)] 1112 | mod tests { 1113 | use super::Shared; 1114 | 1115 | #[test] 1116 | fn valid_tag_i8() { 1117 | Shared::::null().with_tag(0); 1118 | } 1119 | 1120 | #[test] 1121 | fn valid_tag_i64() { 1122 | Shared::::null().with_tag(7); 1123 | } 1124 | } 1125 | -------------------------------------------------------------------------------- /src/collector.rs: -------------------------------------------------------------------------------- 1 | /// Epoch-based garbage collector. 2 | /// 3 | /// # Examples 4 | /// 5 | /// ``` 6 | /// use crossbeam_epoch::Collector; 7 | /// 8 | /// let collector = Collector::new(); 9 | /// 10 | /// let handle = collector.register(); 11 | /// drop(collector); // `handle` still works after dropping `collector` 12 | /// 13 | /// handle.pin().flush(); 14 | /// ``` 15 | 16 | use alloc::sync::Arc; 17 | use core::fmt; 18 | 19 | use internal::{Global, Local}; 20 | use guard::Guard; 21 | 22 | /// An epoch-based garbage collector. 23 | pub struct Collector { 24 | pub(crate) global: Arc, 25 | } 26 | 27 | unsafe impl Send for Collector {} 28 | unsafe impl Sync for Collector {} 29 | 30 | impl Collector { 31 | /// Creates a new collector. 32 | pub fn new() -> Self { 33 | Collector { global: Arc::new(Global::new()) } 34 | } 35 | 36 | /// Registers a new handle for the collector. 37 | pub fn register(&self) -> LocalHandle { 38 | Local::register(self) 39 | } 40 | } 41 | 42 | impl Clone for Collector { 43 | /// Creates another reference to the same garbage collector. 44 | fn clone(&self) -> Self { 45 | Collector { global: self.global.clone() } 46 | } 47 | } 48 | 49 | impl fmt::Debug for Collector { 50 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 51 | f.debug_struct("Collector").finish() 52 | } 53 | } 54 | 55 | impl PartialEq for Collector { 56 | /// Checks if both handles point to the same collector. 57 | fn eq(&self, rhs: &Collector) -> bool { 58 | Arc::ptr_eq(&self.global, &rhs.global) 59 | } 60 | } 61 | impl Eq for Collector {} 62 | 63 | /// A handle to a garbage collector. 64 | pub struct LocalHandle { 65 | pub(crate) local: *const Local, 66 | } 67 | 68 | impl LocalHandle { 69 | /// Pins the handle. 70 | #[inline] 71 | pub fn pin(&self) -> Guard { 72 | unsafe { (*self.local).pin() } 73 | } 74 | 75 | /// Returns `true` if the handle is pinned. 76 | #[inline] 77 | pub fn is_pinned(&self) -> bool { 78 | unsafe { (*self.local).is_pinned() } 79 | } 80 | 81 | /// Returns the `Collector` associated with this handle. 82 | #[inline] 83 | pub fn collector(&self) -> &Collector { 84 | unsafe { (*self.local).collector() } 85 | } 86 | } 87 | 88 | impl Drop for LocalHandle { 89 | #[inline] 90 | fn drop(&mut self) { 91 | unsafe { 92 | Local::release_handle(&*self.local); 93 | } 94 | } 95 | } 96 | 97 | impl fmt::Debug for LocalHandle { 98 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 99 | f.debug_struct("LocalHandle").finish() 100 | } 101 | } 102 | 103 | #[cfg(test)] 104 | mod tests { 105 | use std::mem; 106 | use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; 107 | use std::sync::atomic::Ordering; 108 | 109 | use crossbeam_utils::thread; 110 | 111 | use {Collector, Owned}; 112 | 113 | const NUM_THREADS: usize = 8; 114 | 115 | #[test] 116 | fn pin_reentrant() { 117 | let collector = Collector::new(); 118 | let handle = collector.register(); 119 | drop(collector); 120 | 121 | assert!(!handle.is_pinned()); 122 | { 123 | let _guard = &handle.pin(); 124 | assert!(handle.is_pinned()); 125 | { 126 | let _guard = &handle.pin(); 127 | assert!(handle.is_pinned()); 128 | } 129 | assert!(handle.is_pinned()); 130 | } 131 | assert!(!handle.is_pinned()); 132 | } 133 | 134 | #[test] 135 | fn flush_local_bag() { 136 | let collector = Collector::new(); 137 | let handle = collector.register(); 138 | drop(collector); 139 | 140 | for _ in 0..100 { 141 | let guard = &handle.pin(); 142 | unsafe { 143 | let a = Owned::new(7).into_shared(guard); 144 | guard.defer_destroy(a); 145 | 146 | assert!(!(*(*guard.local).bag.get()).is_empty()); 147 | 148 | while !(*(*guard.local).bag.get()).is_empty() { 149 | guard.flush(); 150 | } 151 | } 152 | } 153 | } 154 | 155 | #[test] 156 | fn garbage_buffering() { 157 | let collector = Collector::new(); 158 | let handle = collector.register(); 159 | drop(collector); 160 | 161 | let guard = &handle.pin(); 162 | unsafe { 163 | for _ in 0..10 { 164 | let a = Owned::new(7).into_shared(guard); 165 | guard.defer_destroy(a); 166 | } 167 | assert!(!(*(*guard.local).bag.get()).is_empty()); 168 | } 169 | } 170 | 171 | #[test] 172 | fn pin_holds_advance() { 173 | let collector = Collector::new(); 174 | 175 | thread::scope(|scope| { 176 | for _ in 0..NUM_THREADS { 177 | scope.spawn(|| { 178 | let handle = collector.register(); 179 | for _ in 0..500_000 { 180 | let guard = &handle.pin(); 181 | 182 | let before = collector.global.epoch.load(Ordering::Relaxed); 183 | collector.global.collect(guard); 184 | let after = collector.global.epoch.load(Ordering::Relaxed); 185 | 186 | assert!(after.wrapping_sub(before) <= 2); 187 | } 188 | }); 189 | } 190 | }) 191 | } 192 | 193 | #[test] 194 | fn incremental() { 195 | const COUNT: usize = 100_000; 196 | static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; 197 | 198 | let collector = Collector::new(); 199 | let handle = collector.register(); 200 | 201 | unsafe { 202 | let guard = &handle.pin(); 203 | for _ in 0..COUNT { 204 | let a = Owned::new(7i32).into_shared(guard); 205 | guard.defer_unchecked(move || { 206 | drop(a.into_owned()); 207 | DESTROYS.fetch_add(1, Ordering::Relaxed); 208 | }); 209 | } 210 | guard.flush(); 211 | } 212 | 213 | let mut last = 0; 214 | 215 | while last < COUNT { 216 | let curr = DESTROYS.load(Ordering::Relaxed); 217 | assert!(curr - last <= 1024); 218 | last = curr; 219 | 220 | let guard = &handle.pin(); 221 | collector.global.collect(guard); 222 | } 223 | assert!(DESTROYS.load(Ordering::Relaxed) == 100_000); 224 | } 225 | 226 | #[test] 227 | fn buffering() { 228 | const COUNT: usize = 10; 229 | static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; 230 | 231 | let collector = Collector::new(); 232 | let handle = collector.register(); 233 | 234 | unsafe { 235 | let guard = &handle.pin(); 236 | for _ in 0..COUNT { 237 | let a = Owned::new(7i32).into_shared(guard); 238 | guard.defer_unchecked(move || { 239 | drop(a.into_owned()); 240 | DESTROYS.fetch_add(1, Ordering::Relaxed); 241 | }); 242 | } 243 | } 244 | 245 | for _ in 0..100_000 { 246 | collector.global.collect(&handle.pin()); 247 | } 248 | assert!(DESTROYS.load(Ordering::Relaxed) < COUNT); 249 | 250 | handle.pin().flush(); 251 | 252 | while DESTROYS.load(Ordering::Relaxed) < COUNT { 253 | let guard = &handle.pin(); 254 | collector.global.collect(guard); 255 | } 256 | assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); 257 | } 258 | 259 | #[test] 260 | fn count_drops() { 261 | const COUNT: usize = 100_000; 262 | static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; 263 | 264 | struct Elem(i32); 265 | 266 | impl Drop for Elem { 267 | fn drop(&mut self) { 268 | DROPS.fetch_add(1, Ordering::Relaxed); 269 | } 270 | } 271 | 272 | let collector = Collector::new(); 273 | let handle = collector.register(); 274 | 275 | unsafe { 276 | let guard = &handle.pin(); 277 | 278 | for _ in 0..COUNT { 279 | let a = Owned::new(Elem(7i32)).into_shared(guard); 280 | guard.defer_destroy(a); 281 | } 282 | guard.flush(); 283 | } 284 | 285 | while DROPS.load(Ordering::Relaxed) < COUNT { 286 | let guard = &handle.pin(); 287 | collector.global.collect(guard); 288 | } 289 | assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); 290 | } 291 | 292 | #[test] 293 | fn count_destroy() { 294 | const COUNT: usize = 100_000; 295 | static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; 296 | 297 | let collector = Collector::new(); 298 | let handle = collector.register(); 299 | 300 | unsafe { 301 | let guard = &handle.pin(); 302 | 303 | for _ in 0..COUNT { 304 | let a = Owned::new(7i32).into_shared(guard); 305 | guard.defer_unchecked(move || { 306 | drop(a.into_owned()); 307 | DESTROYS.fetch_add(1, Ordering::Relaxed); 308 | }); 309 | } 310 | guard.flush(); 311 | } 312 | 313 | while DESTROYS.load(Ordering::Relaxed) < COUNT { 314 | let guard = &handle.pin(); 315 | collector.global.collect(guard); 316 | } 317 | assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); 318 | } 319 | 320 | #[test] 321 | fn drop_array() { 322 | const COUNT: usize = 700; 323 | static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; 324 | 325 | struct Elem(i32); 326 | 327 | impl Drop for Elem { 328 | fn drop(&mut self) { 329 | DROPS.fetch_add(1, Ordering::Relaxed); 330 | } 331 | } 332 | 333 | let collector = Collector::new(); 334 | let handle = collector.register(); 335 | 336 | let mut guard = handle.pin(); 337 | 338 | let mut v = Vec::with_capacity(COUNT); 339 | for i in 0..COUNT { 340 | v.push(Elem(i as i32)); 341 | } 342 | 343 | { 344 | let a = Owned::new(v).into_shared(&guard); 345 | unsafe { guard.defer_destroy(a); } 346 | guard.flush(); 347 | } 348 | 349 | while DROPS.load(Ordering::Relaxed) < COUNT { 350 | guard.repin(); 351 | collector.global.collect(&guard); 352 | } 353 | assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); 354 | } 355 | 356 | #[test] 357 | fn destroy_array() { 358 | const COUNT: usize = 100_000; 359 | static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT; 360 | 361 | let collector = Collector::new(); 362 | let handle = collector.register(); 363 | 364 | unsafe { 365 | let guard = &handle.pin(); 366 | 367 | let mut v = Vec::with_capacity(COUNT); 368 | for i in 0..COUNT { 369 | v.push(i as i32); 370 | } 371 | 372 | let ptr = v.as_mut_ptr() as usize; 373 | let len = v.len(); 374 | guard.defer_unchecked(move || { 375 | drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len)); 376 | DESTROYS.fetch_add(len, Ordering::Relaxed); 377 | }); 378 | guard.flush(); 379 | 380 | mem::forget(v); 381 | } 382 | 383 | while DESTROYS.load(Ordering::Relaxed) < COUNT { 384 | let guard = &handle.pin(); 385 | collector.global.collect(guard); 386 | } 387 | assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); 388 | } 389 | 390 | #[test] 391 | fn stress() { 392 | const THREADS: usize = 8; 393 | const COUNT: usize = 100_000; 394 | static DROPS: AtomicUsize = ATOMIC_USIZE_INIT; 395 | 396 | struct Elem(i32); 397 | 398 | impl Drop for Elem { 399 | fn drop(&mut self) { 400 | DROPS.fetch_add(1, Ordering::Relaxed); 401 | } 402 | } 403 | 404 | let collector = Collector::new(); 405 | 406 | thread::scope(|scope| { 407 | for _ in 0..THREADS { 408 | scope.spawn(|| { 409 | let handle = collector.register(); 410 | for _ in 0..COUNT { 411 | let guard = &handle.pin(); 412 | unsafe { 413 | let a = Owned::new(Elem(7i32)).into_shared(guard); 414 | guard.defer_destroy(a); 415 | } 416 | } 417 | }); 418 | } 419 | }); 420 | 421 | let handle = collector.register(); 422 | while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { 423 | let guard = &handle.pin(); 424 | collector.global.collect(guard); 425 | } 426 | assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS); 427 | } 428 | } 429 | -------------------------------------------------------------------------------- /src/default.rs: -------------------------------------------------------------------------------- 1 | //! The default garbage collector. 2 | //! 3 | //! For each thread, a participant is lazily initialized on its first use, when the current thread 4 | //! is registered in the default collector. If initialized, the thread's participant will get 5 | //! destructed on thread exit, which in turn unregisters the thread. 6 | 7 | use collector::{Collector, LocalHandle}; 8 | use guard::Guard; 9 | 10 | lazy_static! { 11 | /// The global data for the default garbage collector. 12 | static ref COLLECTOR: Collector = Collector::new(); 13 | } 14 | 15 | thread_local! { 16 | /// The per-thread participant for the default garbage collector. 17 | static HANDLE: LocalHandle = COLLECTOR.register(); 18 | } 19 | 20 | /// Pins the current thread. 21 | #[inline] 22 | pub fn pin() -> Guard { 23 | with_handle(|handle| handle.pin()) 24 | } 25 | 26 | /// Returns `true` if the current thread is pinned. 27 | #[inline] 28 | pub fn is_pinned() -> bool { 29 | with_handle(|handle| handle.is_pinned()) 30 | } 31 | 32 | /// Returns the default global collector. 33 | pub fn default_collector() -> &'static Collector { 34 | &COLLECTOR 35 | } 36 | 37 | #[inline] 38 | fn with_handle(mut f: F) -> R 39 | where 40 | F: FnMut(&LocalHandle) -> R, 41 | { 42 | HANDLE.try_with(|h| f(h)).unwrap_or_else(|_| f(&COLLECTOR.register())) 43 | } 44 | 45 | #[cfg(test)] 46 | mod tests { 47 | use crossbeam_utils::thread; 48 | 49 | #[test] 50 | fn pin_while_exiting() { 51 | struct Foo; 52 | 53 | impl Drop for Foo { 54 | fn drop(&mut self) { 55 | // Pin after `HANDLE` has been dropped. This must not panic. 56 | super::pin(); 57 | } 58 | } 59 | 60 | thread_local! { 61 | static FOO: Foo = Foo; 62 | } 63 | 64 | thread::scope(|scope| { 65 | scope.spawn(|| { 66 | // Initialize `FOO` and then `HANDLE`. 67 | FOO.with(|_| ()); 68 | super::pin(); 69 | // At thread exit, `HANDLE` gets dropped first and `FOO` second. 70 | }); 71 | }); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/deferred.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | use core::mem; 4 | use core::ptr; 5 | use alloc::boxed::Box; 6 | 7 | /// Number of words a piece of `Data` can hold. 8 | /// 9 | /// Three words should be enough for the majority of cases. For example, you can fit inside it the 10 | /// function pointer together with a fat pointer representing an object that needs to be destroyed. 11 | const DATA_WORDS: usize = 3; 12 | 13 | /// Some space to keep a `FnOnce()` object on the stack. 14 | type Data = [usize; DATA_WORDS]; 15 | 16 | /// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. 17 | /// 18 | /// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. 19 | pub struct Deferred { 20 | call: unsafe fn(*mut u8), 21 | data: Data, 22 | _marker: PhantomData<*mut ()>, // !Send + !Sync 23 | } 24 | 25 | impl fmt::Debug for Deferred { 26 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 27 | write!(f, "Deferred {{ ... }}") 28 | } 29 | } 30 | 31 | impl Deferred { 32 | /// Constructs a new `Deferred` from a `FnOnce()`. 33 | pub fn new(f: F) -> Self { 34 | let size = mem::size_of::(); 35 | let align = mem::align_of::(); 36 | 37 | unsafe { 38 | if size <= mem::size_of::() && align <= mem::align_of::() { 39 | let mut data: Data = mem::uninitialized(); 40 | ptr::write(&mut data as *mut Data as *mut F, f); 41 | 42 | unsafe fn call(raw: *mut u8) { 43 | let f: F = ptr::read(raw as *mut F); 44 | f(); 45 | } 46 | 47 | Deferred { 48 | call: call::, 49 | data, 50 | _marker: PhantomData, 51 | } 52 | } else { 53 | let b: Box = Box::new(f); 54 | let mut data: Data = mem::uninitialized(); 55 | ptr::write(&mut data as *mut Data as *mut Box, b); 56 | 57 | unsafe fn call(raw: *mut u8) { 58 | let b: Box = ptr::read(raw as *mut Box); 59 | (*b)(); 60 | } 61 | 62 | Deferred { 63 | call: call::, 64 | data, 65 | _marker: PhantomData, 66 | } 67 | } 68 | } 69 | } 70 | 71 | /// Calls the function. 72 | #[inline] 73 | pub fn call(mut self) { 74 | let call = self.call; 75 | unsafe { call(&mut self.data as *mut Data as *mut u8) }; 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use std::cell::Cell; 82 | use super::Deferred; 83 | 84 | #[test] 85 | fn on_stack() { 86 | let fired = &Cell::new(false); 87 | let a = [0usize; 1]; 88 | 89 | let d = Deferred::new(move || { 90 | drop(a); 91 | fired.set(true); 92 | }); 93 | 94 | assert!(!fired.get()); 95 | d.call(); 96 | assert!(fired.get()); 97 | } 98 | 99 | #[test] 100 | fn on_heap() { 101 | let fired = &Cell::new(false); 102 | let a = [0usize; 10]; 103 | 104 | let d = Deferred::new(move || { 105 | drop(a); 106 | fired.set(true); 107 | }); 108 | 109 | assert!(!fired.get()); 110 | d.call(); 111 | assert!(fired.get()); 112 | } 113 | 114 | #[test] 115 | fn string() { 116 | let a = "hello".to_string(); 117 | let d = Deferred::new(move || assert_eq!(a, "hello")); 118 | d.call(); 119 | } 120 | 121 | #[test] 122 | fn boxed_slice_i32() { 123 | let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); 124 | let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); 125 | d.call(); 126 | } 127 | 128 | #[test] 129 | fn long_slice_usize() { 130 | let a: [usize; 5] = [2, 3, 5, 7, 11]; 131 | let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); 132 | d.call(); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/epoch.rs: -------------------------------------------------------------------------------- 1 | //! The global epoch 2 | //! 3 | //! The last bit in this number is unused and is always zero. Every so often the global epoch is 4 | //! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only 5 | //! if all currently pinned participants have been pinned in the current epoch. 6 | //! 7 | //! If an object became garbage in some epoch, then we can be sure that after two advancements no 8 | //! participant will hold a reference to it. That is the crux of safe memory reclamation. 9 | 10 | use core::sync::atomic::{AtomicUsize, Ordering}; 11 | 12 | /// An epoch that can be marked as pinned or unpinned. 13 | /// 14 | /// Internally, the epoch is represented as an integer that wraps around at some unspecified point 15 | /// and a flag that represents whether it is pinned or unpinned. 16 | #[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] 17 | pub struct Epoch { 18 | /// The least significant bit is set if pinned. The rest of the bits hold the epoch. 19 | data: usize, 20 | } 21 | 22 | impl Epoch { 23 | /// Returns the starting epoch in unpinned state. 24 | #[inline] 25 | pub fn starting() -> Self { 26 | Self::default() 27 | } 28 | 29 | /// Returns the number of epochs `self` is ahead of `rhs`. 30 | /// 31 | /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX 32 | /// / 2)`, so the returned distance will be in the same interval. 33 | pub fn wrapping_sub(self, rhs: Self) -> isize { 34 | // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`, 35 | // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)` 36 | // will be ignored in the shift operation. 37 | self.data.wrapping_sub(rhs.data & !1) as isize >> 1 38 | } 39 | 40 | /// Returns `true` if the epoch is marked as pinned. 41 | #[inline] 42 | pub fn is_pinned(self) -> bool { 43 | (self.data & 1) == 1 44 | } 45 | 46 | /// Returns the same epoch, but marked as pinned. 47 | #[inline] 48 | pub fn pinned(self) -> Epoch { 49 | Epoch { data: self.data | 1 } 50 | } 51 | 52 | /// Returns the same epoch, but marked as unpinned. 53 | #[inline] 54 | pub fn unpinned(self) -> Epoch { 55 | Epoch { data: self.data & !1 } 56 | } 57 | 58 | /// Returns the successor epoch. 59 | /// 60 | /// The returned epoch will be marked as pinned only if the previous one was as well. 61 | #[inline] 62 | pub fn successor(self) -> Epoch { 63 | Epoch { data: self.data.wrapping_add(2) } 64 | } 65 | } 66 | 67 | /// An atomic value that holds an `Epoch`. 68 | #[derive(Default, Debug)] 69 | pub struct AtomicEpoch { 70 | /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented 71 | /// using an `AtomicUsize`. 72 | data: AtomicUsize, 73 | } 74 | 75 | impl AtomicEpoch { 76 | /// Creates a new atomic epoch. 77 | #[inline] 78 | pub fn new(epoch: Epoch) -> Self { 79 | let data = AtomicUsize::new(epoch.data); 80 | AtomicEpoch { data } 81 | } 82 | 83 | /// Loads a value from the atomic epoch. 84 | #[inline] 85 | pub fn load(&self, ord: Ordering) -> Epoch { 86 | Epoch { data: self.data.load(ord) } 87 | } 88 | 89 | /// Stores a value into the atomic epoch. 90 | #[inline] 91 | pub fn store(&self, epoch: Epoch, ord: Ordering) { 92 | self.data.store(epoch.data, ord); 93 | } 94 | 95 | /// Stores a value into the atomic epoch if the current value is the same as `current`. 96 | /// 97 | /// The return value is always the previous value. If it is equal to `current`, then the value 98 | /// is updated. 99 | /// 100 | /// The `Ordering` argument describes the memory ordering of this operation. 101 | #[inline] 102 | pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch { 103 | let data = self.data.compare_and_swap(current.data, new.data, ord); 104 | Epoch { data } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/guard.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::ptr; 3 | use core::mem; 4 | 5 | use atomic::Shared; 6 | use collector::Collector; 7 | use deferred::Deferred; 8 | use internal::Local; 9 | 10 | /// A guard that keeps the current thread pinned. 11 | /// 12 | /// # Pinning 13 | /// 14 | /// The current thread is pinned by calling [`pin`], which returns a new guard: 15 | /// 16 | /// ``` 17 | /// use crossbeam_epoch as epoch; 18 | /// 19 | /// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. 20 | /// // This is not really necessary, but makes passing references to the guard a bit easier. 21 | /// let guard = &epoch::pin(); 22 | /// ``` 23 | /// 24 | /// When a guard gets dropped, the current thread is automatically unpinned. 25 | /// 26 | /// # Pointers on the stack 27 | /// 28 | /// Having a guard allows us to create pointers on the stack to heap-allocated objects. 29 | /// For example: 30 | /// 31 | /// ``` 32 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 33 | /// use std::sync::atomic::Ordering::SeqCst; 34 | /// 35 | /// // Create a heap-allocated number. 36 | /// let a = Atomic::new(777); 37 | /// 38 | /// // Pin the current thread. 39 | /// let guard = &epoch::pin(); 40 | /// 41 | /// // Load the heap-allocated object and create pointer `p` on the stack. 42 | /// let p = a.load(SeqCst, guard); 43 | /// 44 | /// // Dereference the pointer and print the value: 45 | /// if let Some(num) = unsafe { p.as_ref() } { 46 | /// println!("The number is {}.", num); 47 | /// } 48 | /// ``` 49 | /// 50 | /// # Multiple guards 51 | /// 52 | /// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the 53 | /// thread will actually be pinned only when the first guard is created and unpinned when the last 54 | /// one is dropped: 55 | /// 56 | /// ``` 57 | /// use crossbeam_epoch as epoch; 58 | /// 59 | /// let guard1 = epoch::pin(); 60 | /// let guard2 = epoch::pin(); 61 | /// assert!(epoch::is_pinned()); 62 | /// drop(guard1); 63 | /// assert!(epoch::is_pinned()); 64 | /// drop(guard2); 65 | /// assert!(!epoch::is_pinned()); 66 | /// ``` 67 | /// 68 | /// The same can be achieved by cloning guards: 69 | /// 70 | /// ``` 71 | /// use crossbeam_epoch as epoch; 72 | /// 73 | /// let guard1 = epoch::pin(); 74 | /// let guard2 = guard1.clone(); 75 | /// ``` 76 | /// 77 | /// [`pin`]: fn.pin.html 78 | pub struct Guard { 79 | pub(crate) local: *const Local, 80 | } 81 | 82 | impl Guard { 83 | /// Stores a function so that it can be executed at some point after all currently pinned 84 | /// threads get unpinned. 85 | /// 86 | /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache 87 | /// becomes full, some functions are moved into the global cache. At the same time, some 88 | /// functions from both local and global caches may get executed in order to incrementally 89 | /// clean up the caches as they fill up. 90 | /// 91 | /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it 92 | /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might 93 | /// never run, but the epoch-based garbage collection will make an effort to execute it 94 | /// reasonably soon. 95 | /// 96 | /// If this method is called from an [`unprotected`] guard, the function will simply be 97 | /// executed immediately. 98 | /// 99 | /// [`unprotected`]: fn.unprotected.html 100 | pub fn defer(&self, f: F) 101 | where 102 | F: FnOnce() -> R, 103 | F: Send + 'static, 104 | { 105 | unsafe { 106 | self.defer_unchecked(f); 107 | } 108 | } 109 | 110 | /// Stores a function so that it can be executed at some point after all currently pinned 111 | /// threads get unpinned. 112 | /// 113 | /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache 114 | /// becomes full, some functions are moved into the global cache. At the same time, some 115 | /// functions from both local and global caches may get executed in order to incrementally 116 | /// clean up the caches as they fill up. 117 | /// 118 | /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it 119 | /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might 120 | /// never run, but the epoch-based garbage collection will make an effort to execute it 121 | /// reasonably soon. 122 | /// 123 | /// If this method is called from an [`unprotected`] guard, the function will simply be 124 | /// executed immediately. 125 | /// 126 | /// # Safety 127 | /// 128 | /// The given function must not hold reference onto the stack. It is highly recommended that 129 | /// the passed function is **always** marked with `move` in order to prevent accidental 130 | /// borrows. 131 | /// 132 | /// ``` 133 | /// use crossbeam_epoch as epoch; 134 | /// 135 | /// let guard = &epoch::pin(); 136 | /// let message = "Hello!"; 137 | /// unsafe { 138 | /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. 139 | /// guard.defer_unchecked(move || { 140 | /// println!("{}", message); 141 | /// }); 142 | /// } 143 | /// ``` 144 | /// 145 | /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by 146 | /// the closure must be `Send`. 147 | /// 148 | /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove 149 | /// `F: Send` for typical use cases. For example, consider the following code snippet, which 150 | /// exemplifies the typical use case of deferring the deallocation of a shared reference: 151 | /// 152 | /// ```ignore 153 | /// let shared = Owned::new(7i32).into_shared(guard); 154 | /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`! 155 | /// ``` 156 | /// 157 | /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, 158 | /// because it's called only after the grace period and `shared` is no longer shared with other 159 | /// threads. But we don't expect type systems to prove this. 160 | /// 161 | /// # Examples 162 | /// 163 | /// When a heap-allocated object in a data structure becomes unreachable, it has to be 164 | /// deallocated. However, the current thread and other threads may be still holding references 165 | /// on the stack to that same object. Therefore it cannot be deallocated before those references 166 | /// get dropped. This method can defer deallocation until all those threads get unpinned and 167 | /// consequently drop all their references on the stack. 168 | /// 169 | /// ```rust 170 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 171 | /// use std::sync::atomic::Ordering::SeqCst; 172 | /// 173 | /// let a = Atomic::new("foo"); 174 | /// 175 | /// // Now suppose that `a` is shared among multiple threads and concurrently 176 | /// // accessed and modified... 177 | /// 178 | /// // Pin the current thread. 179 | /// let guard = &epoch::pin(); 180 | /// 181 | /// // Steal the object currently stored in `a` and swap it with another one. 182 | /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); 183 | /// 184 | /// if !p.is_null() { 185 | /// // The object `p` is pointing to is now unreachable. 186 | /// // Defer its deallocation until all currently pinned threads get unpinned. 187 | /// unsafe { 188 | /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. 189 | /// guard.defer_unchecked(move || { 190 | /// println!("{} is now being deallocated.", p.deref()); 191 | /// // Now we have unique access to the object pointed to by `p` and can turn it 192 | /// // into an `Owned`. Dropping the `Owned` will deallocate the object. 193 | /// drop(p.into_owned()); 194 | /// }); 195 | /// } 196 | /// } 197 | /// ``` 198 | /// 199 | /// [`unprotected`]: fn.unprotected.html 200 | pub unsafe fn defer_unchecked(&self, f: F) 201 | where 202 | F: FnOnce() -> R, 203 | { 204 | if let Some(local) = self.local.as_ref() { 205 | local.defer(Deferred::new(move || drop(f())), self); 206 | } 207 | } 208 | 209 | /// Stores a destructor for an object so that it can be deallocated and dropped at some point 210 | /// after all currently pinned threads get unpinned. 211 | /// 212 | /// This method first stores the destructor into the thread-local (or handle-local) cache. If 213 | /// this cache becomes full, some destructors are moved into the global cache. At the same 214 | /// time, some destructors from both local and global caches may get executed in order to 215 | /// incrementally clean up the caches as they fill up. 216 | /// 217 | /// There is no guarantee when exactly the destructor will be executed. The only guarantee is 218 | /// that it won't be executed until all currently pinned threads get unpinned. In theory, the 219 | /// destructor might never run, but the epoch-based garbage collection will make an effort to 220 | /// execute it reasonably soon. 221 | /// 222 | /// If this method is called from an [`unprotected`] guard, the destructor will simply be 223 | /// executed immediately. 224 | /// 225 | /// # Safety 226 | /// 227 | /// The object must not be reachable by other threads anymore, otherwise it might be still in 228 | /// use when the destructor runs. 229 | /// 230 | /// Apart from that, keep in mind that another thread may execute the destructor, so the object 231 | /// must be sendable to other threads. 232 | /// 233 | /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove 234 | /// `T: Send` for typical use cases. For example, consider the following code snippet, which 235 | /// exemplifies the typical use case of deferring the deallocation of a shared reference: 236 | /// 237 | /// ```ignore 238 | /// let shared = Owned::new(7i32).into_shared(guard); 239 | /// guard.defer_destroy(shared); // `Shared` is not `Send`! 240 | /// ``` 241 | /// 242 | /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because 243 | /// it's called only after the grace period and `shared` is no longer shared with other 244 | /// threads. But we don't expect type systems to prove this. 245 | /// 246 | /// # Examples 247 | /// 248 | /// When a heap-allocated object in a data structure becomes unreachable, it has to be 249 | /// deallocated. However, the current thread and other threads may be still holding references 250 | /// on the stack to that same object. Therefore it cannot be deallocated before those references 251 | /// get dropped. This method can defer deallocation until all those threads get unpinned and 252 | /// consequently drop all their references on the stack. 253 | /// 254 | /// ```rust 255 | /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; 256 | /// use std::sync::atomic::Ordering::SeqCst; 257 | /// 258 | /// let a = Atomic::new("foo"); 259 | /// 260 | /// // Now suppose that `a` is shared among multiple threads and concurrently 261 | /// // accessed and modified... 262 | /// 263 | /// // Pin the current thread. 264 | /// let guard = &epoch::pin(); 265 | /// 266 | /// // Steal the object currently stored in `a` and swap it with another one. 267 | /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); 268 | /// 269 | /// if !p.is_null() { 270 | /// // The object `p` is pointing to is now unreachable. 271 | /// // Defer its deallocation until all currently pinned threads get unpinned. 272 | /// unsafe { 273 | /// guard.defer_destroy(p); 274 | /// } 275 | /// } 276 | /// ``` 277 | /// 278 | /// [`unprotected`]: fn.unprotected.html 279 | pub unsafe fn defer_destroy(&self, ptr: Shared) { 280 | self.defer_unchecked(move || ptr.into_owned()); 281 | } 282 | 283 | /// Clears up the thread-local cache of deferred functions by executing them or moving into the 284 | /// global cache. 285 | /// 286 | /// Call this method after deferring execution of a function if you want to get it executed as 287 | /// soon as possible. Flushing will make sure it is residing in in the global cache, so that 288 | /// any thread has a chance of taking the function and executing it. 289 | /// 290 | /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). 291 | /// 292 | /// # Examples 293 | /// 294 | /// ``` 295 | /// use crossbeam_epoch as epoch; 296 | /// 297 | /// let guard = &epoch::pin(); 298 | /// unsafe { 299 | /// guard.defer(move || { 300 | /// println!("This better be printed as soon as possible!"); 301 | /// }); 302 | /// } 303 | /// guard.flush(); 304 | /// ``` 305 | /// 306 | /// [`unprotected`]: fn.unprotected.html 307 | pub fn flush(&self) { 308 | if let Some(local) = unsafe { self.local.as_ref() } { 309 | local.flush(self); 310 | } 311 | } 312 | 313 | /// Unpins and then immediately re-pins the thread. 314 | /// 315 | /// This method is useful when you don't want delay the advancement of the global epoch by 316 | /// holding an old epoch. For safety, you should not maintain any guard-based reference across 317 | /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this 318 | /// is the only active guard for the current thread. 319 | /// 320 | /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. 321 | /// 322 | /// # Examples 323 | /// 324 | /// ``` 325 | /// use crossbeam_epoch::{self as epoch, Atomic}; 326 | /// use std::sync::atomic::Ordering::SeqCst; 327 | /// use std::thread; 328 | /// use std::time::Duration; 329 | /// 330 | /// let a = Atomic::new(777); 331 | /// let mut guard = epoch::pin(); 332 | /// { 333 | /// let p = a.load(SeqCst, &guard); 334 | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); 335 | /// } 336 | /// guard.repin(); 337 | /// { 338 | /// let p = a.load(SeqCst, &guard); 339 | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); 340 | /// } 341 | /// ``` 342 | /// 343 | /// [`unprotected`]: fn.unprotected.html 344 | pub fn repin(&mut self) { 345 | if let Some(local) = unsafe { self.local.as_ref() } { 346 | local.repin(); 347 | } 348 | } 349 | 350 | /// Temporarily unpins the thread, executes the given function and then re-pins the thread. 351 | /// 352 | /// This method is useful when you need to perform a long-running operation (e.g. sleeping) 353 | /// and don't need to maintain any guard-based reference across the call (the latter is enforced 354 | /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the 355 | /// current thread. 356 | /// 357 | /// If this method is called from an [`unprotected`] guard, then the passed function is called 358 | /// directly without unpinning the thread. 359 | /// 360 | /// # Examples 361 | /// 362 | /// ``` 363 | /// use crossbeam_epoch::{self as epoch, Atomic}; 364 | /// use std::sync::atomic::Ordering::SeqCst; 365 | /// use std::thread; 366 | /// use std::time::Duration; 367 | /// 368 | /// let a = Atomic::new(777); 369 | /// let mut guard = epoch::pin(); 370 | /// { 371 | /// let p = a.load(SeqCst, &guard); 372 | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); 373 | /// } 374 | /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); 375 | /// { 376 | /// let p = a.load(SeqCst, &guard); 377 | /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); 378 | /// } 379 | /// ``` 380 | /// 381 | /// [`unprotected`]: fn.unprotected.html 382 | pub fn repin_after(&mut self, f: F) -> R 383 | where 384 | F: FnOnce() -> R, 385 | { 386 | if let Some(local) = unsafe { self.local.as_ref() } { 387 | // We need to acquire a handle here to ensure the Local doesn't 388 | // disappear from under us. 389 | local.acquire_handle(); 390 | local.unpin(); 391 | } 392 | 393 | // Ensure the Guard is re-pinned even if the function panics 394 | defer! { 395 | if let Some(local) = unsafe { self.local.as_ref() } { 396 | mem::forget(local.pin()); 397 | local.release_handle(); 398 | } 399 | } 400 | 401 | f() 402 | } 403 | 404 | /// Returns the `Collector` associated with this guard. 405 | /// 406 | /// This method is useful when you need to ensure that all guards used with 407 | /// a data structure come from the same collector. 408 | /// 409 | /// If this method is called from an [`unprotected`] guard, then `None` is returned. 410 | /// 411 | /// # Examples 412 | /// 413 | /// ``` 414 | /// use crossbeam_epoch as epoch; 415 | /// 416 | /// let mut guard1 = epoch::pin(); 417 | /// let mut guard2 = epoch::pin(); 418 | /// assert!(guard1.collector() == guard2.collector()); 419 | /// ``` 420 | /// 421 | /// [`unprotected`]: fn.unprotected.html 422 | pub fn collector(&self) -> Option<&Collector> { 423 | unsafe { self.local.as_ref().map(|local| local.collector()) } 424 | } 425 | } 426 | 427 | impl Drop for Guard { 428 | #[inline] 429 | fn drop(&mut self) { 430 | if let Some(local) = unsafe { self.local.as_ref() } { 431 | local.unpin(); 432 | } 433 | } 434 | } 435 | 436 | impl Clone for Guard { 437 | #[inline] 438 | fn clone(&self) -> Guard { 439 | match unsafe { self.local.as_ref() } { 440 | None => Guard { local: ptr::null() }, 441 | Some(local) => local.pin(), 442 | } 443 | } 444 | } 445 | 446 | impl fmt::Debug for Guard { 447 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 448 | f.debug_struct("Guard").finish() 449 | } 450 | } 451 | 452 | /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. 453 | /// 454 | /// This guard should be used in special occasions only. Note that it doesn't actually keep any 455 | /// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. 456 | /// 457 | /// Note that calling [`defer`] with a dummy guard will not defer the function - it will just 458 | /// execute the function immediately. 459 | /// 460 | /// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. 461 | /// 462 | /// # Safety 463 | /// 464 | /// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the 465 | /// [`Atomic`] is not being concurrently modified by other threads. 466 | /// 467 | /// # Examples 468 | /// 469 | /// ``` 470 | /// use crossbeam_epoch::{self as epoch, Atomic}; 471 | /// use std::sync::atomic::Ordering::Relaxed; 472 | /// 473 | /// let a = Atomic::new(7); 474 | /// 475 | /// unsafe { 476 | /// // Load `a` without pinning the current thread. 477 | /// a.load(Relaxed, epoch::unprotected()); 478 | /// 479 | /// // It's possible to create more dummy guards by calling `clone()`. 480 | /// let dummy = &epoch::unprotected().clone(); 481 | /// 482 | /// dummy.defer(move || { 483 | /// println!("This gets executed immediately."); 484 | /// }); 485 | /// 486 | /// // Dropping `dummy` doesn't affect the current thread - it's just a noop. 487 | /// } 488 | /// ``` 489 | /// 490 | /// The most common use of this function is when constructing or destructing a data structure. 491 | /// 492 | /// For example, we can use a dummy guard in the destructor of a Treiber stack because at that 493 | /// point no other thread could concurrently modify the [`Atomic`]s we are accessing. 494 | /// 495 | /// If we were to actually pin the current thread during destruction, that would just unnecessarily 496 | /// delay garbage collection and incur some performance cost, so in cases like these `unprotected` 497 | /// is very helpful. 498 | /// 499 | /// ``` 500 | /// use crossbeam_epoch::{self as epoch, Atomic}; 501 | /// use std::mem::ManuallyDrop; 502 | /// use std::sync::atomic::Ordering::Relaxed; 503 | /// 504 | /// struct Stack { 505 | /// head: Atomic>, 506 | /// } 507 | /// 508 | /// struct Node { 509 | /// data: ManuallyDrop, 510 | /// next: Atomic>, 511 | /// } 512 | /// 513 | /// impl Drop for Stack { 514 | /// fn drop(&mut self) { 515 | /// unsafe { 516 | /// // Unprotected load. 517 | /// let mut node = self.head.load(Relaxed, epoch::unprotected()); 518 | /// 519 | /// while let Some(n) = node.as_ref() { 520 | /// // Unprotected load. 521 | /// let next = n.next.load(Relaxed, epoch::unprotected()); 522 | /// 523 | /// // Take ownership of the node, then drop its data and deallocate it. 524 | /// let mut o = node.into_owned(); 525 | /// ManuallyDrop::drop(&mut o.data); 526 | /// drop(o); 527 | /// 528 | /// node = next; 529 | /// } 530 | /// } 531 | /// } 532 | /// } 533 | /// ``` 534 | /// 535 | /// [`Atomic`]: struct.Atomic.html 536 | /// [`defer`]: struct.Guard.html#method.defer 537 | #[inline] 538 | pub unsafe fn unprotected() -> &'static Guard { 539 | // HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null. 540 | // Since this function returns a `'static` reference to a `Guard`, we must return a reference 541 | // to a global guard. However, it's not possible to create a `static` `Guard` because it does 542 | // not implement `Sync`. To get around the problem, we create a static `usize` initialized to 543 | // zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard` 544 | // (consisting of a single pointer) have the same representation in memory. 545 | static UNPROTECTED: usize = 0; 546 | &*(&UNPROTECTED as *const _ as *const Guard) 547 | } 548 | -------------------------------------------------------------------------------- /src/internal.rs: -------------------------------------------------------------------------------- 1 | //! The global data and participant for garbage collection. 2 | //! 3 | //! # Registration 4 | //! 5 | //! In order to track all participants in one place, we need some form of participant 6 | //! registration. When a participant is created, it is registered to a global lock-free 7 | //! singly-linked list of registries; and when a participant is leaving, it is unregistered from the 8 | //! list. 9 | //! 10 | //! # Pinning 11 | //! 12 | //! Every participant contains an integer that tells whether the participant is pinned and if so, 13 | //! what was the global epoch at the time it was pinned. Participants also hold a pin counter that 14 | //! aids in periodic global epoch advancement. 15 | //! 16 | //! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. 17 | //! Guards are necessary for performing atomic operations, and for freeing/dropping locations. 18 | //! 19 | //! # Thread-local bag 20 | //! 21 | //! Objects that get unlinked from concurrent data structures must be stashed away until the global 22 | //! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects 23 | //! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current 24 | //! global epoch and pushed into the global queue of bags. We store objects in thread-local storages 25 | //! for amortizing the synchronization cost of pushing the garbages to a global queue. 26 | //! 27 | //! # Global queue 28 | //! 29 | //! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and 30 | //! destroyed along the way. This design reduces contention on data structures. The global queue 31 | //! cannot be explicitly accessed: the only way to interact with it is by calling functions 32 | //! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers 33 | //! garbage collection. 34 | //! 35 | //! Ideally each instance of concurrent data structure may have its own queue that gets fully 36 | //! destroyed as soon as the data structure gets dropped. 37 | 38 | use core::cell::{Cell, UnsafeCell}; 39 | use core::mem::{self, ManuallyDrop}; 40 | use core::num::Wrapping; 41 | use core::ptr; 42 | use core::sync::atomic; 43 | use core::sync::atomic::Ordering; 44 | use alloc::boxed::Box; 45 | 46 | use crossbeam_utils::CachePadded; 47 | use arrayvec::ArrayVec; 48 | 49 | use atomic::Owned; 50 | use collector::{LocalHandle, Collector}; 51 | use epoch::{AtomicEpoch, Epoch}; 52 | use guard::{unprotected, Guard}; 53 | use deferred::Deferred; 54 | use sync::list::{List, Entry, IterError, IsElement}; 55 | use sync::queue::Queue; 56 | 57 | /// Maximum number of objects a bag can contain. 58 | #[cfg(not(feature = "sanitize"))] 59 | const MAX_OBJECTS: usize = 64; 60 | #[cfg(feature = "sanitize")] 61 | const MAX_OBJECTS: usize = 4; 62 | 63 | /// A bag of deferred functions. 64 | #[derive(Default, Debug)] 65 | pub struct Bag { 66 | /// Stashed objects. 67 | deferreds: ArrayVec<[Deferred; MAX_OBJECTS]>, 68 | } 69 | 70 | /// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. 71 | unsafe impl Send for Bag {} 72 | 73 | impl Bag { 74 | /// Returns a new, empty bag. 75 | pub fn new() -> Self { 76 | Self::default() 77 | } 78 | 79 | /// Returns `true` if the bag is empty. 80 | pub fn is_empty(&self) -> bool { 81 | self.deferreds.is_empty() 82 | } 83 | 84 | /// Attempts to insert a deferred function into the bag. 85 | /// 86 | /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is 87 | /// full. 88 | /// 89 | /// # Safety 90 | /// 91 | /// It should be safe for another thread to execute the given function. 92 | pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { 93 | self.deferreds.try_push(deferred).map_err(|e| e.element()) 94 | } 95 | 96 | /// Seals the bag with the given epoch. 97 | fn seal(self, epoch: Epoch) -> SealedBag { 98 | SealedBag { epoch, bag: self } 99 | } 100 | } 101 | 102 | impl Drop for Bag { 103 | fn drop(&mut self) { 104 | // Call all deferred functions. 105 | for deferred in self.deferreds.drain(..) { 106 | deferred.call(); 107 | } 108 | } 109 | } 110 | 111 | /// A pair of an epoch and a bag. 112 | #[derive(Default, Debug)] 113 | struct SealedBag { 114 | epoch: Epoch, 115 | bag: Bag, 116 | } 117 | 118 | /// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. 119 | unsafe impl Sync for SealedBag {} 120 | 121 | impl SealedBag { 122 | /// Checks if it is safe to drop the bag w.r.t. the given global epoch. 123 | fn is_expired(&self, global_epoch: Epoch) -> bool { 124 | // A pinned participant can witness at most one epoch advancement. Therefore, any bag that 125 | // is within one epoch of the current one cannot be destroyed yet. 126 | global_epoch.wrapping_sub(self.epoch) >= 2 127 | } 128 | } 129 | 130 | /// The global data for a garbage collector. 131 | pub struct Global { 132 | /// The intrusive linked list of `Local`s. 133 | locals: List, 134 | 135 | /// The global queue of bags of deferred functions. 136 | queue: Queue, 137 | 138 | /// The global epoch. 139 | pub(crate) epoch: CachePadded, 140 | } 141 | 142 | impl Global { 143 | /// Number of bags to destroy. 144 | const COLLECT_STEPS: usize = 8; 145 | 146 | /// Creates a new global data for garbage collection. 147 | #[inline] 148 | pub fn new() -> Self { 149 | Self { 150 | locals: List::new(), 151 | queue: Queue::new(), 152 | epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), 153 | } 154 | } 155 | 156 | /// Pushes the bag into the global queue and replaces the bag with a new empty bag. 157 | pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) { 158 | let bag = mem::replace(bag, Bag::new()); 159 | 160 | atomic::fence(Ordering::SeqCst); 161 | 162 | let epoch = self.epoch.load(Ordering::Relaxed); 163 | self.queue.push(bag.seal(epoch), guard); 164 | } 165 | 166 | /// Collects several bags from the global queue and executes deferred functions in them. 167 | /// 168 | /// Note: This may itself produce garbage and in turn allocate new bags. 169 | /// 170 | /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold 171 | /// path. In other words, we want the compiler to optimize branching for the case when 172 | /// `collect()` is not called. 173 | #[cold] 174 | pub fn collect(&self, guard: &Guard) { 175 | let global_epoch = self.try_advance(guard); 176 | 177 | let steps = if cfg!(feature = "sanitize") { 178 | usize::max_value() 179 | } else { 180 | Self::COLLECT_STEPS 181 | }; 182 | 183 | for _ in 0..steps { 184 | match self.queue.try_pop_if( 185 | &|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch), 186 | guard, 187 | ) 188 | { 189 | None => break, 190 | Some(sealed_bag) => drop(sealed_bag), 191 | } 192 | } 193 | } 194 | 195 | /// Attempts to advance the global epoch. 196 | /// 197 | /// The global epoch can advance only if all currently pinned participants have been pinned in 198 | /// the current epoch. 199 | /// 200 | /// Returns the current global epoch. 201 | /// 202 | /// `try_advance()` is annotated `#[cold]` because it is rarely called. 203 | #[cold] 204 | pub fn try_advance(&self, guard: &Guard) -> Epoch { 205 | let global_epoch = self.epoch.load(Ordering::Relaxed); 206 | atomic::fence(Ordering::SeqCst); 207 | 208 | // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly 209 | // easy to implement in a lock-free manner. However, traversal can be slow due to cache 210 | // misses and data dependencies. We should experiment with other data structures as well. 211 | for local in self.locals.iter(&guard) { 212 | match local { 213 | Err(IterError::Stalled) => { 214 | // A concurrent thread stalled this iteration. That thread might also try to 215 | // advance the epoch, in which case we leave the job to it. Otherwise, the 216 | // epoch will not be advanced. 217 | return global_epoch; 218 | } 219 | Ok(local) => { 220 | let local_epoch = local.epoch.load(Ordering::Relaxed); 221 | 222 | // If the participant was pinned in a different epoch, we cannot advance the 223 | // global epoch just yet. 224 | if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch { 225 | return global_epoch; 226 | } 227 | } 228 | } 229 | } 230 | atomic::fence(Ordering::Acquire); 231 | 232 | // All pinned participants were pinned in the current global epoch. 233 | // Now let's advance the global epoch... 234 | // 235 | // Note that if another thread already advanced it before us, this store will simply 236 | // overwrite the global epoch with the same value. This is true because `try_advance` was 237 | // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be 238 | // advanced two steps ahead of it. 239 | let new_epoch = global_epoch.successor(); 240 | self.epoch.store(new_epoch, Ordering::Release); 241 | new_epoch 242 | } 243 | } 244 | 245 | /// Participant for garbage collection. 246 | pub struct Local { 247 | /// A node in the intrusive linked list of `Local`s. 248 | entry: Entry, 249 | 250 | /// The local epoch. 251 | epoch: AtomicEpoch, 252 | 253 | /// A reference to the global data. 254 | /// 255 | /// When all guards and handles get dropped, this reference is destroyed. 256 | collector: UnsafeCell>, 257 | 258 | /// The local bag of deferred functions. 259 | pub(crate) bag: UnsafeCell, 260 | 261 | /// The number of guards keeping this participant pinned. 262 | guard_count: Cell, 263 | 264 | /// The number of active handles. 265 | handle_count: Cell, 266 | 267 | /// Total number of pinnings performed. 268 | /// 269 | /// This is just an auxilliary counter that sometimes kicks off collection. 270 | pin_count: Cell>, 271 | } 272 | 273 | impl Local { 274 | /// Number of pinnings after which a participant will execute some deferred functions from the 275 | /// global queue. 276 | const PINNINGS_BETWEEN_COLLECT: usize = 128; 277 | 278 | /// Registers a new `Local` in the provided `Global`. 279 | pub fn register(collector: &Collector) -> LocalHandle { 280 | unsafe { 281 | // Since we dereference no pointers in this block, it is safe to use `unprotected`. 282 | 283 | let local = Owned::new(Local { 284 | entry: Entry::default(), 285 | epoch: AtomicEpoch::new(Epoch::starting()), 286 | collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())), 287 | bag: UnsafeCell::new(Bag::new()), 288 | guard_count: Cell::new(0), 289 | handle_count: Cell::new(1), 290 | pin_count: Cell::new(Wrapping(0)), 291 | }).into_shared(&unprotected()); 292 | collector.global.locals.insert(local, &unprotected()); 293 | LocalHandle { local: local.as_raw() } 294 | } 295 | } 296 | 297 | /// Returns a reference to the `Global` in which this `Local` resides. 298 | #[inline] 299 | pub fn global(&self) -> &Global { 300 | &self.collector().global 301 | } 302 | 303 | /// Returns a reference to the `Collector` in which this `Local` resides. 304 | #[inline] 305 | pub fn collector(&self) -> &Collector { 306 | unsafe { &**self.collector.get() } 307 | } 308 | 309 | /// Returns `true` if the current participant is pinned. 310 | #[inline] 311 | pub fn is_pinned(&self) -> bool { 312 | self.guard_count.get() > 0 313 | } 314 | 315 | /// Adds `deferred` to the thread-local bag. 316 | /// 317 | /// # Safety 318 | /// 319 | /// It should be safe for another thread to execute the given function. 320 | pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { 321 | let bag = &mut *self.bag.get(); 322 | 323 | while let Err(d) = bag.try_push(deferred) { 324 | self.global().push_bag(bag, guard); 325 | deferred = d; 326 | } 327 | } 328 | 329 | pub fn flush(&self, guard: &Guard) { 330 | let bag = unsafe { &mut *self.bag.get() }; 331 | 332 | if !bag.is_empty() { 333 | self.global().push_bag(bag, guard); 334 | } 335 | 336 | self.global().collect(guard); 337 | } 338 | 339 | /// Pins the `Local`. 340 | #[inline] 341 | pub fn pin(&self) -> Guard { 342 | let guard = Guard { local: self }; 343 | 344 | let guard_count = self.guard_count.get(); 345 | self.guard_count.set(guard_count.checked_add(1).unwrap()); 346 | 347 | if guard_count == 0 { 348 | let global_epoch = self.global().epoch.load(Ordering::Relaxed); 349 | let new_epoch = global_epoch.pinned(); 350 | 351 | // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence. 352 | // The fence makes sure that any future loads from `Atomic`s will not happen before 353 | // this store. 354 | if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { 355 | // HACK(stjepang): On x86 architectures there are two different ways of executing 356 | // a `SeqCst` fence. 357 | // 358 | // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. 359 | // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg` 360 | // instruction. 361 | // 362 | // Both instructions have the effect of a full barrier, but benchmarks have shown 363 | // that the second one makes pinning faster in this particular case. 364 | let current = Epoch::starting(); 365 | let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst); 366 | debug_assert_eq!(current, previous, "participant was expected to be unpinned"); 367 | } else { 368 | self.epoch.store(new_epoch, Ordering::Relaxed); 369 | atomic::fence(Ordering::SeqCst); 370 | } 371 | 372 | // Increment the pin counter. 373 | let count = self.pin_count.get(); 374 | self.pin_count.set(count + Wrapping(1)); 375 | 376 | // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting 377 | // some garbage. 378 | if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 { 379 | self.global().collect(&guard); 380 | } 381 | } 382 | 383 | guard 384 | } 385 | 386 | /// Unpins the `Local`. 387 | #[inline] 388 | pub fn unpin(&self) { 389 | let guard_count = self.guard_count.get(); 390 | self.guard_count.set(guard_count - 1); 391 | 392 | if guard_count == 1 { 393 | self.epoch.store(Epoch::starting(), Ordering::Release); 394 | 395 | if self.handle_count.get() == 0 { 396 | self.finalize(); 397 | } 398 | } 399 | } 400 | 401 | /// Unpins and then pins the `Local`. 402 | #[inline] 403 | pub fn repin(&self) { 404 | let guard_count = self.guard_count.get(); 405 | 406 | // Update the local epoch only if there's only one guard. 407 | if guard_count == 1 { 408 | let epoch = self.epoch.load(Ordering::Relaxed); 409 | let global_epoch = self.global().epoch.load(Ordering::Relaxed); 410 | 411 | // Update the local epoch only if the global epoch is greater than the local epoch. 412 | if epoch != global_epoch { 413 | // We store the new epoch with `Release` because we need to ensure any memory 414 | // accesses from the previous epoch do not leak into the new one. 415 | self.epoch.store(global_epoch, Ordering::Release); 416 | 417 | // However, we don't need a following `SeqCst` fence, because it is safe for memory 418 | // accesses from the new epoch to be executed before updating the local epoch. At 419 | // worse, other threads will see the new epoch late and delay GC slightly. 420 | } 421 | } 422 | } 423 | 424 | /// Increments the handle count. 425 | #[inline] 426 | pub fn acquire_handle(&self) { 427 | let handle_count = self.handle_count.get(); 428 | debug_assert!(handle_count >= 1); 429 | self.handle_count.set(handle_count + 1); 430 | } 431 | 432 | /// Decrements the handle count. 433 | #[inline] 434 | pub fn release_handle(&self) { 435 | let guard_count = self.guard_count.get(); 436 | let handle_count = self.handle_count.get(); 437 | debug_assert!(handle_count >= 1); 438 | self.handle_count.set(handle_count - 1); 439 | 440 | if guard_count == 0 && handle_count == 1 { 441 | self.finalize(); 442 | } 443 | } 444 | 445 | /// Removes the `Local` from the global linked list. 446 | #[cold] 447 | fn finalize(&self) { 448 | debug_assert_eq!(self.guard_count.get(), 0); 449 | debug_assert_eq!(self.handle_count.get(), 0); 450 | 451 | // Temporarily increment handle count. This is required so that the following call to `pin` 452 | // doesn't call `finalize` again. 453 | self.handle_count.set(1); 454 | unsafe { 455 | // Pin and move the local bag into the global queue. It's important that `push_bag` 456 | // doesn't defer destruction on any new garbage. 457 | let guard = &self.pin(); 458 | self.global().push_bag(&mut *self.bag.get(), guard); 459 | } 460 | // Revert the handle count back to zero. 461 | self.handle_count.set(0); 462 | 463 | unsafe { 464 | // Take the reference to the `Global` out of this `Local`. Since we're not protected 465 | // by a guard at this time, it's crucial that the reference is read before marking the 466 | // `Local` as deleted. 467 | let collector: Collector = ptr::read(&*(*self.collector.get())); 468 | 469 | // Mark this node in the linked list as deleted. 470 | self.entry.delete(&unprotected()); 471 | 472 | // Finally, drop the reference to the global. Note that this might be the last reference 473 | // to the `Global`. If so, the global data will be destroyed and all deferred functions 474 | // in its queue will be executed. 475 | drop(collector); 476 | } 477 | } 478 | } 479 | 480 | impl IsElement for Local { 481 | fn entry_of(local: &Local) -> &Entry { 482 | let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry; 483 | unsafe { &*entry_ptr } 484 | } 485 | 486 | unsafe fn element_of(entry: &Entry) -> &Local { 487 | // offset_of! macro uses unsafe, but it's unnecessary in this context. 488 | #[allow(unused_unsafe)] 489 | let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local; 490 | &*local_ptr 491 | } 492 | 493 | unsafe fn finalize(entry: &Entry) { 494 | let local = Self::element_of(entry); 495 | drop(Box::from_raw(local as *const Local as *mut Local)); 496 | } 497 | } 498 | 499 | #[cfg(test)] 500 | mod tests { 501 | use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; 502 | use std::sync::atomic::Ordering; 503 | 504 | use super::*; 505 | 506 | #[test] 507 | fn check_defer() { 508 | static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; 509 | fn set() { 510 | FLAG.store(42, Ordering::Relaxed); 511 | } 512 | 513 | let d = Deferred::new(set); 514 | assert_eq!(FLAG.load(Ordering::Relaxed), 0); 515 | d.call(); 516 | assert_eq!(FLAG.load(Ordering::Relaxed), 42); 517 | } 518 | 519 | #[test] 520 | fn check_bag() { 521 | static FLAG: AtomicUsize = ATOMIC_USIZE_INIT; 522 | fn incr() { 523 | FLAG.fetch_add(1, Ordering::Relaxed); 524 | } 525 | 526 | let mut bag = Bag::new(); 527 | assert!(bag.is_empty()); 528 | 529 | for _ in 0..MAX_OBJECTS { 530 | assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() }); 531 | assert!(!bag.is_empty()); 532 | assert_eq!(FLAG.load(Ordering::Relaxed), 0); 533 | } 534 | 535 | let result = unsafe { bag.try_push(Deferred::new(incr)) }; 536 | assert!(result.is_err()); 537 | assert!(!bag.is_empty()); 538 | assert_eq!(FLAG.load(Ordering::Relaxed), 0); 539 | 540 | drop(bag); 541 | assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS); 542 | } 543 | } 544 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Epoch-based memory reclamation. 2 | //! 3 | //! An interesting problem concurrent collections deal with comes from the remove operation. 4 | //! Suppose that a thread removes an element from a lock-free map, while another thread is reading 5 | //! that same element at the same time. The first thread must wait until the second thread stops 6 | //! reading the element. Only then it is safe to destruct it. 7 | //! 8 | //! Programming languages that come with garbage collectors solve this problem trivially. The 9 | //! garbage collector will destruct the removed element when no thread can hold a reference to it 10 | //! anymore. 11 | //! 12 | //! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an 13 | //! element gets removed from a concurrent collection, it is inserted into a pile of garbage and 14 | //! marked with the current epoch. Every time a thread accesses a collection, it checks the current 15 | //! epoch, attempts to increment it, and destructs some garbage that became so old that no thread 16 | //! can be referencing it anymore. 17 | //! 18 | //! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit 19 | //! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something 20 | //! users of concurrent collections don't have to worry much about. 21 | //! 22 | //! # Pointers 23 | //! 24 | //! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which 25 | //! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a 26 | //! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely 27 | //! read. 28 | //! 29 | //! # Pinning 30 | //! 31 | //! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant 32 | //! we declare that any object that gets removed from now on must not be destructed just 33 | //! yet. Garbage collection of newly removed objects is suspended until the participant gets 34 | //! unpinned. 35 | //! 36 | //! # Garbage 37 | //! 38 | //! Objects that get removed from concurrent collections must be stashed away until all currently 39 | //! pinned participants get unpinned. Such objects can be stored into a thread-local or global 40 | //! storage, where they are kept until the right time for their destruction comes. 41 | //! 42 | //! There is a global shared instance of garbage queue. You can [`defer`] the execution of an 43 | //! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data 44 | //! structures may defer the deallocation of an object. 45 | //! 46 | //! # APIs 47 | //! 48 | //! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you 49 | //! want to create your own garbage collector, use the [`Collector`] API. 50 | //! 51 | //! [`Atomic`]: struct.Atomic.html 52 | //! [`Collector`]: struct.Collector.html 53 | //! [`Shared`]: struct.Shared.html 54 | //! [`pin`]: fn.pin.html 55 | //! [`defer`]: fn.defer.html 56 | 57 | #![cfg_attr(feature = "nightly", feature(const_fn))] 58 | #![cfg_attr(feature = "nightly", feature(alloc))] 59 | #![cfg_attr(not(test), no_std)] 60 | 61 | #![warn(missing_docs, missing_debug_implementations)] 62 | 63 | #[cfg(test)] 64 | extern crate core; 65 | #[cfg(all(not(test), feature = "use_std"))] 66 | #[macro_use] 67 | extern crate std; 68 | 69 | // Use liballoc on nightly to avoid a dependency on libstd 70 | #[cfg(feature = "nightly")] 71 | extern crate alloc; 72 | #[cfg(not(feature = "nightly"))] 73 | extern crate std as alloc; 74 | 75 | extern crate arrayvec; 76 | extern crate crossbeam_utils; 77 | #[cfg(feature = "use_std")] 78 | #[macro_use] 79 | extern crate lazy_static; 80 | #[macro_use] 81 | extern crate memoffset; 82 | #[macro_use] 83 | extern crate scopeguard; 84 | 85 | mod atomic; 86 | mod collector; 87 | #[cfg(feature = "use_std")] 88 | mod default; 89 | mod deferred; 90 | mod epoch; 91 | mod guard; 92 | mod internal; 93 | mod sync; 94 | 95 | pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared, Pointer}; 96 | pub use self::guard::{unprotected, Guard}; 97 | #[cfg(feature = "use_std")] 98 | pub use self::default::{default_collector, is_pinned, pin}; 99 | pub use self::collector::{Collector, LocalHandle}; 100 | -------------------------------------------------------------------------------- /src/sync/list.rs: -------------------------------------------------------------------------------- 1 | //! Lock-free intrusive linked list. 2 | //! 3 | //! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA 4 | //! 2002. http://dl.acm.org/citation.cfm?id=564870.564881 5 | 6 | use core::marker::PhantomData; 7 | use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 8 | 9 | use {Atomic, Shared, Guard, unprotected}; 10 | 11 | /// An entry in a linked list. 12 | /// 13 | /// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different 14 | /// cache-line than thread-local data in terms of performance. 15 | #[derive(Debug)] 16 | pub struct Entry { 17 | /// The next entry in the linked list. 18 | /// If the tag is 1, this entry is marked as deleted. 19 | next: Atomic, 20 | } 21 | 22 | /// Implementing this trait asserts that the type `T` can be used as an element in the intrusive 23 | /// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance 24 | /// of `Entry`. 25 | /// 26 | /// # Example 27 | /// 28 | /// ```ignore 29 | /// struct A { 30 | /// entry: Entry, 31 | /// data: usize, 32 | /// } 33 | /// 34 | /// impl IsElement for A { 35 | /// fn entry_of(a: &A) -> &Entry { 36 | /// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry; 37 | /// unsafe { &*entry_ptr } 38 | /// } 39 | /// 40 | /// unsafe fn element_of(entry: &Entry) -> &T { 41 | /// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T; 42 | /// &*elem_ptr 43 | /// } 44 | /// 45 | /// unsafe fn finalize(entry: &Entry) { 46 | /// let elem = Self::element_of(entry); 47 | /// drop(Box::from_raw(elem as *const A as *mut A)); 48 | /// } 49 | /// } 50 | /// ``` 51 | /// 52 | /// This trait is implemented on a type separate from `T` (although it can be just `T`), because 53 | /// one type might be placeable into multiple lists, in which case it would require multiple 54 | /// implementations of `IsElement`. In such cases, each struct implementing `IsElement` 55 | /// represents a distinct `Entry` in `T`. 56 | /// 57 | /// For example, we can insert the following struct into two lists using `entry1` for one 58 | /// and `entry2` for the other: 59 | /// 60 | /// ```ignore 61 | /// struct B { 62 | /// entry1: Entry, 63 | /// entry2: Entry, 64 | /// data: usize, 65 | /// } 66 | /// ``` 67 | /// 68 | pub trait IsElement { 69 | /// Returns a reference to this element's `Entry`. 70 | fn entry_of(&T) -> &Entry; 71 | 72 | /// Given a reference to an element's entry, returns that element. 73 | /// 74 | /// ```ignore 75 | /// let elem = ListElement::new(); 76 | /// assert_eq!(elem.entry_of(), 77 | /// unsafe { ListElement::element_of(elem.entry_of()) } ); 78 | /// ``` 79 | /// 80 | /// # Safety 81 | /// The caller has to guarantee that the `Entry` it 82 | /// is called with was retrieved from an instance of the element type (`T`). 83 | unsafe fn element_of(&Entry) -> &T; 84 | 85 | /// Deallocates the whole element given its `Entry`. This is called when the list 86 | /// is ready to actually free the element. 87 | /// 88 | /// # Safety 89 | /// The caller has to guarantee that the `Entry` it 90 | /// is called with was retrieved from an instance of the element type (`T`). 91 | unsafe fn finalize(&Entry); 92 | } 93 | 94 | /// A lock-free, intrusive linked list of type `T`. 95 | #[derive(Debug)] 96 | pub struct List = T> { 97 | /// The head of the linked list. 98 | head: Atomic, 99 | 100 | /// The phantom data for using `T` and `C`. 101 | _marker: PhantomData<(T, C)>, 102 | } 103 | 104 | /// An iterator used for retrieving values from the list. 105 | pub struct Iter<'g, T: 'g, C: IsElement> { 106 | /// The guard that protects the iteration. 107 | guard: &'g Guard, 108 | 109 | /// Pointer from the predecessor to the current entry. 110 | pred: &'g Atomic, 111 | 112 | /// The current entry. 113 | curr: Shared<'g, Entry>, 114 | 115 | /// The list head, needed for restarting iteration. 116 | head: &'g Atomic, 117 | 118 | /// Logically, we store a borrow of an instance of `T` and 119 | /// use the type information from `C`. 120 | _marker: PhantomData<(&'g T, C)>, 121 | } 122 | 123 | /// An error that occurs during iteration over the list. 124 | #[derive(PartialEq, Debug)] 125 | pub enum IterError { 126 | /// A concurrent thread modified the state of the list at the same place that this iterator 127 | /// was inspecting. Subsequent iteration will restart from the beginning of the list. 128 | Stalled, 129 | } 130 | 131 | impl Default for Entry { 132 | /// Returns the empty entry. 133 | fn default() -> Self { 134 | Self { next: Atomic::null() } 135 | } 136 | } 137 | 138 | impl Entry { 139 | /// Marks this entry as deleted, deferring the actual deallocation to a later iteration. 140 | /// 141 | /// # Safety 142 | /// 143 | /// The entry should be a member of a linked list, and it should not have been deleted. 144 | /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C` 145 | /// is the associated helper for the linked list. 146 | pub unsafe fn delete(&self, guard: &Guard) { 147 | self.next.fetch_or(1, Release, guard); 148 | } 149 | } 150 | 151 | impl> List { 152 | /// Returns a new, empty linked list. 153 | pub fn new() -> Self { 154 | Self { 155 | head: Atomic::null(), 156 | _marker: PhantomData, 157 | } 158 | } 159 | 160 | /// Inserts `entry` into the head of the list. 161 | /// 162 | /// # Safety 163 | /// 164 | /// You should guarantee that: 165 | /// 166 | /// - `container` is not null 167 | /// - `container` is immovable, e.g. inside a `Box` 168 | /// - the same `Entry` is not inserted more than once 169 | /// - the inserted object will be removed before the list is dropped 170 | pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { 171 | // Insert right after head, i.e. at the beginning of the list. 172 | let to = &self.head; 173 | // Get the intrusively stored Entry of the new element to insert. 174 | let entry: &Entry = C::entry_of(container.deref()); 175 | // Make a Shared ptr to that Entry. 176 | let entry_ptr = Shared::from(entry as *const _); 177 | // Read the current successor of where we want to insert. 178 | let mut next = to.load(Relaxed, guard); 179 | 180 | loop { 181 | // Set the Entry of the to-be-inserted element to point to the previous successor of 182 | // `to`. 183 | entry.next.store(next, Relaxed); 184 | match to.compare_and_set_weak(next, entry_ptr, Release, guard) { 185 | Ok(_) => break, 186 | // We lost the race or weak CAS failed spuriously. Update the successor and try 187 | // again. 188 | Err(err) => next = err.current, 189 | } 190 | } 191 | } 192 | 193 | /// Returns an iterator over all objects. 194 | /// 195 | /// # Caveat 196 | /// 197 | /// Every object that is inserted at the moment this function is called and persists at least 198 | /// until the end of iteration will be returned. Since this iterator traverses a lock-free 199 | /// linked list that may be concurrently modified, some additional caveats apply: 200 | /// 201 | /// 1. If a new object is inserted during iteration, it may or may not be returned. 202 | /// 2. If an object is deleted during iteration, it may or may not be returned. 203 | /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning 204 | /// thread will continue to iterate over the same list. 205 | pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { 206 | Iter { 207 | guard, 208 | pred: &self.head, 209 | curr: self.head.load(Acquire, guard), 210 | head: &self.head, 211 | _marker: PhantomData, 212 | } 213 | } 214 | } 215 | 216 | impl> Drop for List { 217 | fn drop(&mut self) { 218 | unsafe { 219 | let guard = &unprotected(); 220 | let mut curr = self.head.load(Relaxed, guard); 221 | while let Some(c) = curr.as_ref() { 222 | let succ = c.next.load(Relaxed, guard); 223 | // Verify that all elements have been removed from the list. 224 | assert_eq!(succ.tag(), 1); 225 | 226 | C::finalize(curr.deref()); 227 | curr = succ; 228 | } 229 | } 230 | } 231 | } 232 | 233 | impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { 234 | type Item = Result<&'g T, IterError>; 235 | 236 | fn next(&mut self) -> Option { 237 | while let Some(c) = unsafe { self.curr.as_ref() } { 238 | let succ = c.next.load(Acquire, self.guard); 239 | 240 | if succ.tag() == 1 { 241 | // This entry was removed. Try unlinking it from the list. 242 | let succ = succ.with_tag(0); 243 | 244 | // The tag should never be zero, because removing a node after a logically deleted 245 | // node leaves the list in an invalid state. 246 | debug_assert!(self.curr.tag() == 0); 247 | 248 | match self.pred.compare_and_set( 249 | self.curr, 250 | succ, 251 | Acquire, 252 | self.guard, 253 | ) { 254 | Ok(_) => { 255 | // We succeeded in unlinking this element from the list, so we have to 256 | // schedule deallocation. Deferred drop is okay, because `list.delete()` 257 | // can only be called if `T: 'static`. 258 | unsafe { 259 | let p = self.curr; 260 | self.guard.defer_unchecked(move || C::finalize(p.deref())); 261 | } 262 | 263 | // Move over the removed by only advancing `curr`, not `pred`. 264 | self.curr = succ; 265 | continue; 266 | } 267 | Err(_) => { 268 | // A concurrent thread modified the predecessor node. Since it might've 269 | // been deleted, we need to restart from `head`. 270 | self.pred = self.head; 271 | self.curr = self.head.load(Acquire, self.guard); 272 | 273 | return Some(Err(IterError::Stalled)); 274 | } 275 | } 276 | } 277 | 278 | // Move one step forward. 279 | self.pred = &c.next; 280 | self.curr = succ; 281 | 282 | return Some(Ok(unsafe { C::element_of(c) })); 283 | } 284 | 285 | // We reached the end of the list. 286 | None 287 | } 288 | } 289 | 290 | #[cfg(test)] 291 | mod tests { 292 | use {Collector, Owned}; 293 | use crossbeam_utils::thread; 294 | use std::sync::Barrier; 295 | use super::*; 296 | 297 | impl IsElement for Entry { 298 | fn entry_of(entry: &Entry) -> &Entry { 299 | entry 300 | } 301 | 302 | unsafe fn element_of(entry: &Entry) -> &Entry { 303 | entry 304 | } 305 | 306 | unsafe fn finalize(entry: &Entry) { 307 | drop(Box::from_raw(entry as *const Entry as *mut Entry)); 308 | } 309 | } 310 | 311 | /// Checks whether the list retains inserted elements 312 | /// and returns them in the correct order. 313 | #[test] 314 | fn insert() { 315 | let collector = Collector::new(); 316 | let handle = collector.register(); 317 | let guard = handle.pin(); 318 | 319 | let l: List = List::new(); 320 | 321 | let e1 = Owned::new(Entry::default()).into_shared(&guard); 322 | let e2 = Owned::new(Entry::default()).into_shared(&guard); 323 | let e3 = Owned::new(Entry::default()).into_shared(&guard); 324 | 325 | unsafe { 326 | l.insert(e1, &guard); 327 | l.insert(e2, &guard); 328 | l.insert(e3, &guard); 329 | } 330 | 331 | let mut iter = l.iter(&guard); 332 | let maybe_e3 = iter.next(); 333 | assert!(maybe_e3.is_some()); 334 | assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); 335 | let maybe_e2 = iter.next(); 336 | assert!(maybe_e2.is_some()); 337 | assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw()); 338 | let maybe_e1 = iter.next(); 339 | assert!(maybe_e1.is_some()); 340 | assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); 341 | assert!(iter.next().is_none()); 342 | 343 | unsafe { 344 | e1.as_ref().unwrap().delete(&guard); 345 | e2.as_ref().unwrap().delete(&guard); 346 | e3.as_ref().unwrap().delete(&guard); 347 | } 348 | } 349 | 350 | /// Checks whether elements can be removed from the list and whether 351 | /// the correct elements are removed. 352 | #[test] 353 | fn delete() { 354 | let collector = Collector::new(); 355 | let handle = collector.register(); 356 | let guard = handle.pin(); 357 | 358 | let l: List = List::new(); 359 | 360 | let e1 = Owned::new(Entry::default()).into_shared(&guard); 361 | let e2 = Owned::new(Entry::default()).into_shared(&guard); 362 | let e3 = Owned::new(Entry::default()).into_shared(&guard); 363 | unsafe { 364 | l.insert(e1, &guard); 365 | l.insert(e2, &guard); 366 | l.insert(e3, &guard); 367 | e2.as_ref().unwrap().delete(&guard); 368 | } 369 | 370 | let mut iter = l.iter(&guard); 371 | let maybe_e3 = iter.next(); 372 | assert!(maybe_e3.is_some()); 373 | assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); 374 | let maybe_e1 = iter.next(); 375 | assert!(maybe_e1.is_some()); 376 | assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); 377 | assert!(iter.next().is_none()); 378 | 379 | unsafe { 380 | e1.as_ref().unwrap().delete(&guard); 381 | e3.as_ref().unwrap().delete(&guard); 382 | } 383 | 384 | let mut iter = l.iter(&guard); 385 | assert!(iter.next().is_none()); 386 | } 387 | 388 | const THREADS: usize = 8; 389 | const ITERS: usize = 512; 390 | 391 | /// Contends the list on insert and delete operations to make sure they can run concurrently. 392 | #[test] 393 | fn insert_delete_multi() { 394 | let collector = Collector::new(); 395 | 396 | let l: List = List::new(); 397 | let b = Barrier::new(THREADS); 398 | 399 | thread::scope(|s| for _ in 0..THREADS { 400 | s.spawn(|| { 401 | b.wait(); 402 | 403 | let handle = collector.register(); 404 | let guard: Guard = handle.pin(); 405 | let mut v = Vec::with_capacity(ITERS); 406 | 407 | for _ in 0..ITERS { 408 | let e = Owned::new(Entry::default()).into_shared(&guard); 409 | v.push(e); 410 | unsafe { 411 | l.insert(e, &guard); 412 | } 413 | } 414 | 415 | for e in v { 416 | unsafe { 417 | e.as_ref().unwrap().delete(&guard); 418 | } 419 | } 420 | }); 421 | }); 422 | 423 | let handle = collector.register(); 424 | let guard = handle.pin(); 425 | 426 | let mut iter = l.iter(&guard); 427 | assert!(iter.next().is_none()); 428 | } 429 | 430 | /// Contends the list on iteration to make sure that it can be iterated over concurrently. 431 | #[test] 432 | fn iter_multi() { 433 | let collector = Collector::new(); 434 | 435 | let l: List = List::new(); 436 | let b = Barrier::new(THREADS); 437 | 438 | thread::scope(|s| for _ in 0..THREADS { 439 | s.spawn(|| { 440 | b.wait(); 441 | 442 | let handle = collector.register(); 443 | let guard: Guard = handle.pin(); 444 | let mut v = Vec::with_capacity(ITERS); 445 | 446 | for _ in 0..ITERS { 447 | let e = Owned::new(Entry::default()).into_shared(&guard); 448 | v.push(e); 449 | unsafe { 450 | l.insert(e, &guard); 451 | } 452 | } 453 | 454 | let mut iter = l.iter(&guard); 455 | for _ in 0..ITERS { 456 | assert!(iter.next().is_some()); 457 | } 458 | 459 | for e in v { 460 | unsafe { 461 | e.as_ref().unwrap().delete(&guard); 462 | } 463 | } 464 | }); 465 | }); 466 | 467 | let handle = collector.register(); 468 | let guard = handle.pin(); 469 | 470 | let mut iter = l.iter(&guard); 471 | assert!(iter.next().is_none()); 472 | } 473 | } 474 | -------------------------------------------------------------------------------- /src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives. 2 | 3 | pub mod list; 4 | pub mod queue; 5 | -------------------------------------------------------------------------------- /src/sync/queue.rs: -------------------------------------------------------------------------------- 1 | //! Michael-Scott lock-free queue. 2 | //! 3 | //! Usable with any number of producers and consumers. 4 | //! 5 | //! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue 6 | //! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106 7 | 8 | use core::mem::{self, ManuallyDrop}; 9 | use core::ptr; 10 | use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; 11 | 12 | use crossbeam_utils::CachePadded; 13 | 14 | use {unprotected, Atomic, Guard, Owned, Shared}; 15 | 16 | // The representation here is a singly-linked list, with a sentinel node at the front. In general 17 | // the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or 18 | // all `Blocked` (requests for data from blocked threads). 19 | #[derive(Debug)] 20 | pub struct Queue { 21 | head: CachePadded>>, 22 | tail: CachePadded>>, 23 | } 24 | 25 | #[derive(Debug)] 26 | struct Node { 27 | /// The slot in which a value of type `T` can be stored. 28 | /// 29 | /// The type of `data` is `ManuallyDrop` because a `Node` doesn't always contain a `T`. 30 | /// For example, the sentinel node in a queue never contains a value: its slot is always empty. 31 | /// Other nodes start their life with a push operation and contain a value until it gets popped 32 | /// out. After that such empty nodes get added to the collector for destruction. 33 | data: ManuallyDrop, 34 | 35 | next: Atomic>, 36 | } 37 | 38 | // Any particular `T` should never be accessed concurrently, so no need for `Sync`. 39 | unsafe impl Sync for Queue {} 40 | unsafe impl Send for Queue {} 41 | 42 | 43 | impl Queue { 44 | /// Create a new, empty queue. 45 | pub fn new() -> Queue { 46 | let q = Queue { 47 | head: CachePadded::new(Atomic::null()), 48 | tail: CachePadded::new(Atomic::null()), 49 | }; 50 | let sentinel = Owned::new(Node { 51 | data: unsafe { mem::uninitialized() }, 52 | next: Atomic::null(), 53 | }); 54 | unsafe { 55 | let guard = &unprotected(); 56 | let sentinel = sentinel.into_shared(guard); 57 | q.head.store(sentinel, Relaxed); 58 | q.tail.store(sentinel, Relaxed); 59 | q 60 | } 61 | } 62 | 63 | /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on 64 | /// success. The queue's `tail` pointer may be updated. 65 | #[inline(always)] 66 | fn push_internal(&self, onto: Shared>, new: Shared>, guard: &Guard) -> bool { 67 | // is `onto` the actual tail? 68 | let o = unsafe { onto.deref() }; 69 | let next = o.next.load(Acquire, guard); 70 | if unsafe { next.as_ref().is_some() } { 71 | // if not, try to "help" by moving the tail pointer forward 72 | let _ = self.tail.compare_and_set(onto, next, Release, guard); 73 | false 74 | } else { 75 | // looks like the actual tail; attempt to link in `n` 76 | let result = o.next 77 | .compare_and_set(Shared::null(), new, Release, guard) 78 | .is_ok(); 79 | if result { 80 | // try to move the tail pointer forward 81 | let _ = self.tail.compare_and_set(onto, new, Release, guard); 82 | } 83 | result 84 | } 85 | } 86 | 87 | /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. 88 | pub fn push(&self, t: T, guard: &Guard) { 89 | let new = Owned::new(Node { 90 | data: ManuallyDrop::new(t), 91 | next: Atomic::null(), 92 | }); 93 | let new = Owned::into_shared(new, guard); 94 | 95 | loop { 96 | // We push onto the tail, so we'll start optimistically by looking there first. 97 | let tail = self.tail.load(Acquire, guard); 98 | 99 | // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. 100 | if self.push_internal(tail, new, guard) { 101 | break; 102 | } 103 | } 104 | } 105 | 106 | /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop. 107 | #[inline(always)] 108 | fn pop_internal(&self, guard: &Guard) -> Result, ()> { 109 | let head = self.head.load(Acquire, guard); 110 | let h = unsafe { head.deref() }; 111 | let next = h.next.load(Acquire, guard); 112 | match unsafe { next.as_ref() } { 113 | Some(n) => unsafe { 114 | self.head 115 | .compare_and_set(head, next, Release, guard) 116 | .map(|_| { 117 | guard.defer_destroy(head); 118 | Some(ManuallyDrop::into_inner(ptr::read(&n.data))) 119 | }) 120 | .map_err(|_| ()) 121 | }, 122 | None => Ok(None), 123 | } 124 | } 125 | 126 | /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue 127 | /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop. 128 | #[inline(always)] 129 | fn pop_if_internal(&self, condition: F, guard: &Guard) -> Result, ()> 130 | where 131 | T: Sync, 132 | F: Fn(&T) -> bool, 133 | { 134 | let head = self.head.load(Acquire, guard); 135 | let h = unsafe { head.deref() }; 136 | let next = h.next.load(Acquire, guard); 137 | match unsafe { next.as_ref() } { 138 | Some(n) if condition(&n.data) => unsafe { 139 | self.head 140 | .compare_and_set(head, next, Release, guard) 141 | .map(|_| { 142 | guard.defer_destroy(head); 143 | Some(ManuallyDrop::into_inner(ptr::read(&n.data))) 144 | }) 145 | .map_err(|_| ()) 146 | }, 147 | None | Some(_) => Ok(None), 148 | } 149 | } 150 | 151 | /// Attempts to dequeue from the front. 152 | /// 153 | /// Returns `None` if the queue is observed to be empty. 154 | pub fn try_pop(&self, guard: &Guard) -> Option { 155 | loop { 156 | if let Ok(head) = self.pop_internal(guard) { 157 | return head; 158 | } 159 | } 160 | } 161 | 162 | /// Attempts to dequeue from the front, if the item satisfies the given condition. 163 | /// 164 | /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given 165 | /// condition. 166 | pub fn try_pop_if(&self, condition: F, guard: &Guard) -> Option 167 | where 168 | T: Sync, 169 | F: Fn(&T) -> bool, 170 | { 171 | loop { 172 | if let Ok(head) = self.pop_if_internal(&condition, guard) { 173 | return head; 174 | } 175 | } 176 | } 177 | } 178 | 179 | impl Drop for Queue { 180 | fn drop(&mut self) { 181 | unsafe { 182 | let guard = &unprotected(); 183 | 184 | while let Some(_) = self.try_pop(guard) {} 185 | 186 | // Destroy the remaining sentinel node. 187 | let sentinel = self.head.load(Relaxed, guard); 188 | drop(sentinel.into_owned()); 189 | } 190 | } 191 | } 192 | 193 | 194 | #[cfg(test)] 195 | mod test { 196 | use super::*; 197 | use crossbeam_utils::thread; 198 | use pin; 199 | 200 | struct Queue { 201 | queue: super::Queue, 202 | } 203 | 204 | impl Queue { 205 | pub fn new() -> Queue { 206 | Queue { queue: super::Queue::new() } 207 | } 208 | 209 | pub fn push(&self, t: T) { 210 | let guard = &pin(); 211 | self.queue.push(t, guard); 212 | } 213 | 214 | pub fn is_empty(&self) -> bool { 215 | let guard = &pin(); 216 | let head = self.queue.head.load(Acquire, guard); 217 | let h = unsafe { head.deref() }; 218 | h.next.load(Acquire, guard).is_null() 219 | } 220 | 221 | pub fn try_pop(&self) -> Option { 222 | let guard = &pin(); 223 | self.queue.try_pop(guard) 224 | } 225 | 226 | pub fn pop(&self) -> T { 227 | loop { 228 | match self.try_pop() { 229 | None => continue, 230 | Some(t) => return t, 231 | } 232 | } 233 | } 234 | } 235 | 236 | const CONC_COUNT: i64 = 1000000; 237 | 238 | #[test] 239 | fn push_try_pop_1() { 240 | let q: Queue = Queue::new(); 241 | assert!(q.is_empty()); 242 | q.push(37); 243 | assert!(!q.is_empty()); 244 | assert_eq!(q.try_pop(), Some(37)); 245 | assert!(q.is_empty()); 246 | } 247 | 248 | #[test] 249 | fn push_try_pop_2() { 250 | let q: Queue = Queue::new(); 251 | assert!(q.is_empty()); 252 | q.push(37); 253 | q.push(48); 254 | assert_eq!(q.try_pop(), Some(37)); 255 | assert!(!q.is_empty()); 256 | assert_eq!(q.try_pop(), Some(48)); 257 | assert!(q.is_empty()); 258 | } 259 | 260 | #[test] 261 | fn push_try_pop_many_seq() { 262 | let q: Queue = Queue::new(); 263 | assert!(q.is_empty()); 264 | for i in 0..200 { 265 | q.push(i) 266 | } 267 | assert!(!q.is_empty()); 268 | for i in 0..200 { 269 | assert_eq!(q.try_pop(), Some(i)); 270 | } 271 | assert!(q.is_empty()); 272 | } 273 | 274 | #[test] 275 | fn push_pop_1() { 276 | let q: Queue = Queue::new(); 277 | assert!(q.is_empty()); 278 | q.push(37); 279 | assert!(!q.is_empty()); 280 | assert_eq!(q.pop(), 37); 281 | assert!(q.is_empty()); 282 | } 283 | 284 | #[test] 285 | fn push_pop_2() { 286 | let q: Queue = Queue::new(); 287 | q.push(37); 288 | q.push(48); 289 | assert_eq!(q.pop(), 37); 290 | assert_eq!(q.pop(), 48); 291 | } 292 | 293 | #[test] 294 | fn push_pop_many_seq() { 295 | let q: Queue = Queue::new(); 296 | assert!(q.is_empty()); 297 | for i in 0..200 { 298 | q.push(i) 299 | } 300 | assert!(!q.is_empty()); 301 | for i in 0..200 { 302 | assert_eq!(q.pop(), i); 303 | } 304 | assert!(q.is_empty()); 305 | } 306 | 307 | #[test] 308 | fn push_try_pop_many_spsc() { 309 | let q: Queue = Queue::new(); 310 | assert!(q.is_empty()); 311 | 312 | thread::scope(|scope| { 313 | scope.spawn(|| { 314 | let mut next = 0; 315 | 316 | while next < CONC_COUNT { 317 | if let Some(elem) = q.try_pop() { 318 | assert_eq!(elem, next); 319 | next += 1; 320 | } 321 | } 322 | }); 323 | 324 | for i in 0..CONC_COUNT { 325 | q.push(i) 326 | } 327 | }); 328 | } 329 | 330 | #[test] 331 | fn push_try_pop_many_spmc() { 332 | fn recv(_t: i32, q: &Queue) { 333 | let mut cur = -1; 334 | for _i in 0..CONC_COUNT { 335 | if let Some(elem) = q.try_pop() { 336 | assert!(elem > cur); 337 | cur = elem; 338 | 339 | if cur == CONC_COUNT - 1 { 340 | break; 341 | } 342 | } 343 | } 344 | } 345 | 346 | let q: Queue = Queue::new(); 347 | assert!(q.is_empty()); 348 | let qr = &q; 349 | thread::scope(|scope| { 350 | for i in 0..3 { 351 | scope.spawn(move || recv(i, qr)); 352 | } 353 | 354 | scope.spawn(|| for i in 0..CONC_COUNT { 355 | q.push(i); 356 | }) 357 | }); 358 | } 359 | 360 | #[test] 361 | fn push_try_pop_many_mpmc() { 362 | enum LR { 363 | Left(i64), 364 | Right(i64), 365 | } 366 | 367 | let q: Queue = Queue::new(); 368 | assert!(q.is_empty()); 369 | 370 | thread::scope(|scope| for _t in 0..2 { 371 | scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { 372 | q.push(LR::Left(i)) 373 | }); 374 | scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT { 375 | q.push(LR::Right(i)) 376 | }); 377 | scope.spawn(|| { 378 | let mut vl = vec![]; 379 | let mut vr = vec![]; 380 | for _i in 0..CONC_COUNT { 381 | match q.try_pop() { 382 | Some(LR::Left(x)) => vl.push(x), 383 | Some(LR::Right(x)) => vr.push(x), 384 | _ => {} 385 | } 386 | } 387 | 388 | let mut vl2 = vl.clone(); 389 | let mut vr2 = vr.clone(); 390 | vl2.sort(); 391 | vr2.sort(); 392 | 393 | assert_eq!(vl, vl2); 394 | assert_eq!(vr, vr2); 395 | }); 396 | }); 397 | } 398 | 399 | #[test] 400 | fn push_pop_many_spsc() { 401 | let q: Queue = Queue::new(); 402 | 403 | thread::scope(|scope| { 404 | scope.spawn(|| { 405 | let mut next = 0; 406 | while next < CONC_COUNT { 407 | assert_eq!(q.pop(), next); 408 | next += 1; 409 | } 410 | }); 411 | 412 | for i in 0..CONC_COUNT { 413 | q.push(i) 414 | } 415 | }); 416 | assert!(q.is_empty()); 417 | } 418 | 419 | #[test] 420 | fn is_empty_dont_pop() { 421 | let q: Queue = Queue::new(); 422 | q.push(20); 423 | q.push(20); 424 | assert!(!q.is_empty()); 425 | assert!(!q.is_empty()); 426 | assert!(q.try_pop().is_some()); 427 | } 428 | } 429 | --------------------------------------------------------------------------------