├── .clog.toml ├── .envrc ├── .github ├── FUNDING.yml └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── IMPLEMENTATION.md ├── LICENSE ├── README.md ├── benches └── bench.rs ├── bin └── loom.sh ├── flake.lock ├── flake.nix ├── rust-toolchain.toml ├── src ├── cfg.rs ├── clear.rs ├── implementation.rs ├── iter.rs ├── lib.rs ├── macros.rs ├── page │ ├── mod.rs │ ├── slot.rs │ └── stack.rs ├── pool.rs ├── shard.rs ├── sync.rs ├── tests │ ├── custom_config.rs │ ├── loom_pool.rs │ ├── loom_slab.rs │ ├── mod.rs │ └── properties.rs └── tid.rs └── tests └── reserved_bits_leak.rs /.clog.toml: -------------------------------------------------------------------------------- 1 | [clog] 2 | # A repository link with the trailing '.git' which will be used to generate 3 | # all commit and issue links 4 | repository = "https://github.com/hawkw/sharded-slab" 5 | # A constant release title 6 | # subtitle = "sharded-slab" 7 | 8 | # specify the style of commit links to generate, defaults to "github" if omitted 9 | link-style = "github" 10 | 11 | # The preferred way to set a constant changelog. This file will be read for old changelog 12 | # data, then prepended to for new changelog data. It's the equivilant to setting 13 | # both infile and outfile to the same file. 14 | # 15 | # Do not use with outfile or infile fields! 16 | # 17 | # Defaults to stdout when omitted 18 | changelog = "CHANGELOG.md" 19 | 20 | # This sets the output format. There are two options "json" or "markdown" and 21 | # defaults to "markdown" when omitted 22 | output-format = "markdown" 23 | 24 | # If you use tags, you can set the following if you wish to only pick 25 | # up changes since your latest tag 26 | from-latest-tag = true 27 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake; -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [hawkw] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | 8 | env: 9 | RUSTFLAGS: -Dwarnings 10 | RUST_BACKTRACE: 1 11 | MSRV: 1.42.0 12 | 13 | jobs: 14 | build: 15 | name: Build (stable, ${{ matrix.target }}) 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | target: 20 | - x86_64-unknown-linux-gnu 21 | - i686-unknown-linux-musl 22 | steps: 23 | - uses: actions/checkout@master 24 | - name: Install toolchain 25 | uses: actions-rs/toolchain@v1 26 | with: 27 | profile: minimal 28 | toolchain: stable 29 | target: ${{ matrix.target }} 30 | override: true 31 | - name: cargo build --target ${{ matrix.target }} 32 | uses: actions-rs/cargo@v1 33 | with: 34 | command: build 35 | args: --all-targets --target ${{ matrix.target }} 36 | 37 | build-msrv: 38 | name: Build (MSRV) 39 | runs-on: ubuntu-latest 40 | steps: 41 | - uses: actions/checkout@master 42 | - name: Install toolchain 43 | uses: actions-rs/toolchain@v1 44 | with: 45 | profile: minimal 46 | toolchain: ${{ env.MSRV }} 47 | override: true 48 | - name: cargo +${{ env.MSRV }} build 49 | uses: actions-rs/cargo@v1 50 | with: 51 | command: build 52 | env: 53 | RUSTFLAGS: "" # remove -Dwarnings 54 | 55 | build-nightly: 56 | name: Build (nightly) 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@master 60 | - name: Install toolchain 61 | uses: actions-rs/toolchain@v1 62 | with: 63 | profile: minimal 64 | toolchain: nightly 65 | override: true 66 | - name: cargo +nightly build 67 | uses: actions-rs/cargo@v1 68 | with: 69 | command: build 70 | env: 71 | RUSTFLAGS: "" # remove -Dwarnings 72 | 73 | test: 74 | name: Tests (stable) 75 | needs: build 76 | runs-on: ubuntu-latest 77 | steps: 78 | - uses: actions/checkout@master 79 | - name: Install toolchain 80 | uses: actions-rs/toolchain@v1 81 | with: 82 | profile: minimal 83 | toolchain: stable 84 | override: true 85 | - name: Run tests 86 | run: cargo test 87 | 88 | test-loom: 89 | name: Loom tests (stable) 90 | needs: build 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@master 94 | - name: Install toolchain 95 | uses: actions-rs/toolchain@v1 96 | with: 97 | profile: minimal 98 | toolchain: stable 99 | override: true 100 | - name: Run Loom tests 101 | run: ./bin/loom.sh 102 | 103 | clippy: 104 | name: Clippy (stable) 105 | runs-on: ubuntu-latest 106 | steps: 107 | - uses: actions/checkout@v2 108 | - name: Install toolchain 109 | uses: actions-rs/toolchain@v1 110 | with: 111 | profile: minimal 112 | toolchain: stable 113 | components: clippy 114 | override: true 115 | - name: cargo clippy --all-targets --all-features 116 | uses: actions-rs/clippy-check@v1 117 | with: 118 | token: ${{ secrets.GITHUB_TOKEN }} 119 | args: --all-targets --all-features 120 | 121 | rustfmt: 122 | name: Rustfmt (stable) 123 | runs-on: ubuntu-latest 124 | steps: 125 | - uses: actions/checkout@v2 126 | - name: Install toolchain 127 | uses: actions-rs/toolchain@v1 128 | with: 129 | profile: minimal 130 | toolchain: stable 131 | components: rustfmt 132 | override: true 133 | - name: Run rustfmt 134 | uses: actions-rs/cargo@v1 135 | with: 136 | command: fmt 137 | args: -- --check 138 | 139 | all-systems-go: 140 | name: "all systems go!" 141 | needs: 142 | - build 143 | - build-msrv 144 | # Note: we explicitly *don't* require the `build-nightly` job to pass, 145 | # since the nightly Rust compiler is unstable. We don't want nightly 146 | # regressions to break our build --- this CI job is intended for 147 | # informational reasons rather than as a gatekeeper for merging PRs. 148 | - test 149 | - test-loom 150 | - clippy 151 | - rustfmt 152 | runs-on: ubuntu-latest 153 | steps: 154 | - run: exit 0 155 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v[0-9]+.* 7 | 8 | jobs: 9 | create-release: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: taiki-e/create-gh-release-action@v1 14 | with: 15 | # Path to changelog. 16 | changelog: CHANGELOG.md 17 | # Reject releases from commits not contained in branches 18 | # that match the specified pattern (regular expression) 19 | branch: main 20 | env: 21 | # (Required) GitHub token for creating GitHub Releases. 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .direnv -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | ### v0.1.7 (2023-10-04) 3 | 4 | 5 | #### Bug Fixes 6 | 7 | * index out of bounds in `get()` and `get_owned()` (#88) ([fdbc930f](https://github.com/hawkw/sharded-slab/commit/fdbc930fb14b0f6f8b77cd6efdad5a1bdf8d3c04)) 8 | * **unique_iter:** prevent panics if a slab is empty (#88) ([bd599e0b](https://github.com/hawkw/sharded-slab/commit/bd599e0b2a60a953f25f27ba1fa86682150e05c2), closes [#73](https://github.com/hawkw/sharded-slab/issues/73)) 9 | 10 | 11 | 12 | 13 | ## 0.1.6 (2023-09-27) 14 | 15 | 16 | #### Features 17 | 18 | * publicly export `UniqueIter` (#87) ([e4d6482d](https://github.com/hawkw/sharded-slab/commit/e4d6482db05d5767b47eae1b0217faad30f2ebd5), closes [#77](https://github.com/hawkw/sharded-slab/issues/77)) 19 | 20 | #### Bug Fixes 21 | 22 | * use a smaller `CustomConfig` for 32-bit tests (#84) ([828ffff9](https://github.com/hawkw/sharded-slab/commit/828ffff9f82cfc41ed66b4743563c4dddc97c1ce), closes [#82](https://github.com/hawkw/sharded-slab/issues/82)) 23 | 24 | 25 | 26 | 27 | ## 0.1.5 (2023-08-28) 28 | 29 | 30 | #### Bug Fixes 31 | 32 | * **Slab:** invalid generation in case of custom config (#80) ([ca090279](https://github.com/hawkw/sharded-slab/commit/ca09027944812d024676029a3dde62d27ef22015)) 33 | 34 | 35 | 36 | 37 | ### 0.1.4 (2021-10-12) 38 | 39 | 40 | #### Features 41 | 42 | * emit a nicer panic when thread count overflows `MAX_SHARDS` (#64) ([f1ed058a](https://github.com/hawkw/sharded-slab/commit/f1ed058a3ee296eff033fc0fb88f62a8b2f83f10)) 43 | 44 | 45 | 46 | 47 | ### 0.1.3 (2021-08-02) 48 | 49 | 50 | #### Bug Fixes 51 | 52 | * set up MSRV in CI (#61) ([dfcc9080](https://github.com/hawkw/sharded-slab/commit/dfcc9080a62d08e359f298a9ffb0f275928b83e4), closes [#60](https://github.com/hawkw/sharded-slab/issues/60)) 53 | * **tests:** duplicate `hint` mod defs with loom ([0ce3fd91](https://github.com/hawkw/sharded-slab/commit/0ce3fd91feac8b4edb4f1ece6aebfc4ba4e50026)) 54 | 55 | 56 | 57 | 58 | ### 0.1.2 (2021-08-01) 59 | 60 | 61 | #### Bug Fixes 62 | 63 | * make debug assertions drop safe ([26d35a69](https://github.com/hawkw/sharded-slab/commit/26d35a695c9e5d7c62ab07cc5e66a0c6f8b6eade)) 64 | 65 | #### Features 66 | 67 | * improve panics on thread ID bit exhaustion ([9ecb8e61](https://github.com/hawkw/sharded-slab/commit/9ecb8e614f107f68b5c6ba770342ae72af1cd07b)) 68 | 69 | 70 | 71 | 72 | ## 0.1.1 (2021-1-4) 73 | 74 | 75 | #### Bug Fixes 76 | 77 | * change `loom` to an optional dependency ([9bd442b5](https://github.com/hawkw/sharded-slab/commit/9bd442b57bc56153a67d7325144ebcf303e0fe98)) 78 | 79 | 80 | ## 0.1.0 (2020-10-20) 81 | 82 | 83 | #### Bug Fixes 84 | 85 | * fix `remove` and `clear` returning true when the key is stale ([b52d38b2](https://github.com/hawkw/sharded-slab/commit/b52d38b2d2d3edc3a59d3dba6b75095bbd864266)) 86 | 87 | #### Breaking Changes 88 | 89 | * **Pool:** change `Pool::create` to return a mutable guard (#48) ([778065ea](https://github.com/hawkw/sharded-slab/commit/778065ead83523e0a9d951fbd19bb37fda3cc280), closes [#41](https://github.com/hawkw/sharded-slab/issues/41), [#16](https://github.com/hawkw/sharded-slab/issues/16)) 90 | * **Slab:** rename `Guard` to `Entry` for consistency ([425ad398](https://github.com/hawkw/sharded-slab/commit/425ad39805ee818dc6b332286006bc92c8beab38)) 91 | 92 | #### Features 93 | 94 | * add missing `Debug` impls ([71a8883f](https://github.com/hawkw/sharded-slab/commit/71a8883ff4fd861b95e81840cb5dca167657fe36)) 95 | * **Pool:** 96 | * add `Pool::create_owned` and `OwnedRefMut` ([f7774ae0](https://github.com/hawkw/sharded-slab/commit/f7774ae0c5be99340f1e7941bde62f7044f4b4d8)) 97 | * add `Arc::get_owned` and `OwnedRef` ([3e566d91](https://github.com/hawkw/sharded-slab/commit/3e566d91e1bc8cc4630a8635ad24b321ec047fe7), closes [#29](https://github.com/hawkw/sharded-slab/issues/29)) 98 | * change `Pool::create` to return a mutable guard (#48) ([778065ea](https://github.com/hawkw/sharded-slab/commit/778065ead83523e0a9d951fbd19bb37fda3cc280), closes [#41](https://github.com/hawkw/sharded-slab/issues/41), [#16](https://github.com/hawkw/sharded-slab/issues/16)) 99 | * **Slab:** 100 | * add `Arc::get_owned` and `OwnedEntry` ([53a970a2](https://github.com/hawkw/sharded-slab/commit/53a970a2298c30c1afd9578268c79ccd44afba05), closes [#29](https://github.com/hawkw/sharded-slab/issues/29)) 101 | * rename `Guard` to `Entry` for consistency ([425ad398](https://github.com/hawkw/sharded-slab/commit/425ad39805ee818dc6b332286006bc92c8beab38)) 102 | * add `slab`-style `VacantEntry` API ([6776590a](https://github.com/hawkw/sharded-slab/commit/6776590adeda7bf4a117fb233fc09cfa64d77ced), closes [#16](https://github.com/hawkw/sharded-slab/issues/16)) 103 | 104 | #### Performance 105 | 106 | * allocate shard metadata lazily (#45) ([e543a06d](https://github.com/hawkw/sharded-slab/commit/e543a06d7474b3ff92df2cdb4a4571032135ff8d)) 107 | 108 | 109 | 110 | 111 | ### 0.0.9 (2020-04-03) 112 | 113 | 114 | #### Features 115 | 116 | * **Config:** validate concurrent refs ([9b32af58](9b32af58), closes [#21](21)) 117 | * **Pool:** 118 | * add `fmt::Debug` impl for `Pool` ([ffa5c7a0](ffa5c7a0)) 119 | * add `Default` impl for `Pool` ([d2399365](d2399365)) 120 | * add a sharded object pool for reusing heap allocations (#19) ([89734508](89734508), closes [#2](2), [#15](15)) 121 | * **Slab::take:** add exponential backoff when spinning ([6b743a27](6b743a27)) 122 | 123 | #### Bug Fixes 124 | 125 | * incorrect wrapping when overflowing maximum ref count ([aea693f3](aea693f3), closes [#22](22)) 126 | 127 | 128 | 129 | 130 | ### 0.0.8 (2020-01-31) 131 | 132 | 133 | #### Bug Fixes 134 | 135 | * `remove` not adding slots to free lists ([dfdd7aee](dfdd7aee)) 136 | 137 | 138 | 139 | 140 | ### 0.0.7 (2019-12-06) 141 | 142 | 143 | #### Bug Fixes 144 | 145 | * **Config:** compensate for 0 being a valid TID ([b601f5d9](b601f5d9)) 146 | * **DefaultConfig:** 147 | * const overflow on 32-bit ([74d42dd1](74d42dd1), closes [#10](10)) 148 | * wasted bit patterns on 64-bit ([8cf33f66](8cf33f66)) 149 | 150 | 151 | 152 | 153 | ## 0.0.6 (2019-11-08) 154 | 155 | 156 | #### Features 157 | 158 | * **Guard:** expose `key` method #8 ([748bf39b](748bf39b)) 159 | 160 | 161 | 162 | 163 | ## 0.0.5 (2019-10-31) 164 | 165 | 166 | #### Performance 167 | 168 | * consolidate per-slot state into one AtomicUsize (#6) ([f1146d33](f1146d33)) 169 | 170 | #### Features 171 | 172 | * add Default impl for Slab ([61bb3316](61bb3316)) 173 | 174 | 175 | 176 | 177 | ## 0.0.4 (2019-21-30) 178 | 179 | 180 | #### Features 181 | 182 | * prevent items from being removed while concurrently accessed ([872c81d1](872c81d1)) 183 | * added `Slab::remove` method that marks an item to be removed when the last thread 184 | accessing it finishes ([872c81d1](872c81d1)) 185 | 186 | #### Bug Fixes 187 | 188 | * nicer handling of races in remove ([475d9a06](475d9a06)) 189 | 190 | #### Breaking Changes 191 | 192 | * renamed `Slab::remove` to `Slab::take` ([872c81d1](872c81d1)) 193 | * `Slab::get` now returns a `Guard` type ([872c81d1](872c81d1)) 194 | 195 | 196 | 197 | ## 0.0.3 (2019-07-30) 198 | 199 | 200 | #### Bug Fixes 201 | 202 | * split local/remote to fix false sharing & potential races ([69f95fb0](69f95fb0)) 203 | * set next pointer _before_ head ([cc7a0bf1](cc7a0bf1)) 204 | 205 | #### Breaking Changes 206 | 207 | * removed potentially racy `Slab::len` and `Slab::capacity` methods ([27af7d6c](27af7d6c)) 208 | 209 | 210 | ## 0.0.2 (2019-03-30) 211 | 212 | 213 | #### Bug Fixes 214 | 215 | * fix compilation failure in release mode ([617031da](617031da)) 216 | 217 | 218 | 219 | ## 0.0.1 (2019-02-30) 220 | 221 | - Initial release 222 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sharded-slab" 3 | version = "0.1.7" 4 | authors = ["Eliza Weisman "] 5 | edition = "2018" 6 | documentation = "https://docs.rs/sharded-slab/" 7 | homepage = "https://github.com/hawkw/sharded-slab" 8 | repository = "https://github.com/hawkw/sharded-slab" 9 | readme = "README.md" 10 | rust-version = "1.42.0" 11 | license = "MIT" 12 | keywords = ["slab", "allocator", "lock-free", "atomic"] 13 | categories = ["memory-management", "data-structures", "concurrency"] 14 | description = """ 15 | A lock-free concurrent slab. 16 | """ 17 | exclude = [ 18 | "flake.nix", 19 | "flake.lock", 20 | ".envrc", 21 | ".clog.toml", 22 | ".cargo", 23 | ".github", 24 | ".direnv", 25 | "bin", 26 | ] 27 | 28 | [badges] 29 | maintenance = { status = "experimental" } 30 | 31 | [[bench]] 32 | name = "bench" 33 | harness = false 34 | 35 | [dependencies] 36 | lazy_static = "1" 37 | 38 | [dev-dependencies] 39 | proptest = "1" 40 | criterion = "0.3" 41 | slab = "0.4.2" 42 | memory-stats = "1" 43 | indexmap = "1" # newer versions lead to "candidate versions found which didn't match" on 1.42.0 44 | 45 | [target.'cfg(loom)'.dependencies] 46 | loom = { version = "0.5", features = ["checkpoint"], optional = true } 47 | 48 | [target.'cfg(loom)'.dev-dependencies] 49 | loom = { version = "0.5", features = ["checkpoint"] } 50 | 51 | [package.metadata.docs.rs] 52 | all-features = true 53 | rustdoc-args = ["--cfg", "docsrs"] 54 | 55 | [lints.rust] 56 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)', 'cfg(slab_print)'] } 57 | -------------------------------------------------------------------------------- /IMPLEMENTATION.md: -------------------------------------------------------------------------------- 1 | Notes on `sharded-slab`'s implementation and design. 2 | 3 | # Design 4 | 5 | The sharded slab's design is strongly inspired by the ideas presented by 6 | Leijen, Zorn, and de Moura in [Mimalloc: Free List Sharding in 7 | Action][mimalloc]. In this report, the authors present a novel design for a 8 | memory allocator based on a concept of _free list sharding_. 9 | 10 | Memory allocators must keep track of what memory regions are not currently 11 | allocated ("free") in order to provide them to future allocation requests. 12 | The term [_free list_][freelist] refers to a technique for performing this 13 | bookkeeping, where each free block stores a pointer to the next free block, 14 | forming a linked list. The memory allocator keeps a pointer to the most 15 | recently freed block, the _head_ of the free list. To allocate more memory, 16 | the allocator pops from the free list by setting the head pointer to the 17 | next free block of the current head block, and returning the previous head. 18 | To deallocate a block, the block is pushed to the free list by setting its 19 | first word to the current head pointer, and the head pointer is set to point 20 | to the deallocated block. Most implementations of slab allocators backed by 21 | arrays or vectors use a similar technique, where pointers are replaced by 22 | indices into the backing array. 23 | 24 | When allocations and deallocations can occur concurrently across threads, 25 | they must synchronize accesses to the free list; either by putting the 26 | entire allocator state inside of a lock, or by using atomic operations to 27 | treat the free list as a lock-free structure (such as a [Treiber stack]). In 28 | both cases, there is a significant performance cost — even when the free 29 | list is lock-free, it is likely that a noticeable amount of time will be 30 | spent in compare-and-swap loops. Ideally, the global synchronzation point 31 | created by the single global free list could be avoided as much as possible. 32 | 33 | The approach presented by Leijen, Zorn, and de Moura is to introduce 34 | sharding and thus increase the granularity of synchronization significantly. 35 | In mimalloc, the heap is _sharded_ so that each thread has its own 36 | thread-local heap. Objects are always allocated from the local heap of the 37 | thread where the allocation is performed. Because allocations are always 38 | done from a thread's local heap, they need not be synchronized. 39 | 40 | However, since objects can move between threads before being deallocated, 41 | _deallocations_ may still occur concurrently. Therefore, Leijen et al. 42 | introduce a concept of _local_ and _global_ free lists. When an object is 43 | deallocated on the same thread it was originally allocated on, it is placed 44 | on the local free list; if it is deallocated on another thread, it goes on 45 | the global free list for the heap of the thread from which it originated. To 46 | allocate, the local free list is used first; if it is empty, the entire 47 | global free list is popped onto the local free list. Since the local free 48 | list is only ever accessed by the thread it belongs to, it does not require 49 | synchronization at all, and because the global free list is popped from 50 | infrequently, the cost of synchronization has a reduced impact. A majority 51 | of allocations can occur without any synchronization at all; and 52 | deallocations only require synchronization when an object has left its 53 | parent thread (a relatively uncommon case). 54 | 55 | [mimalloc]: https://www.microsoft.com/en-us/research/uploads/prod/2019/06/mimalloc-tr-v1.pdf 56 | [freelist]: https://en.wikipedia.org/wiki/Free_list 57 | [Treiber stack]: https://en.wikipedia.org/wiki/Treiber_stack 58 | 59 | # Implementation 60 | 61 | A slab is represented as an array of [`MAX_THREADS`] _shards_. A shard 62 | consists of a vector of one or more _pages_ plus associated metadata. 63 | Finally, a page consists of an array of _slots_, head indices for the local 64 | and remote free lists. 65 | 66 | ```text 67 | ┌─────────────┐ 68 | │ shard 1 │ 69 | │ │ ┌─────────────┐ ┌────────┐ 70 | │ pages───────┼───▶│ page 1 │ │ │ 71 | ├─────────────┤ ├─────────────┤ ┌────▶│ next──┼─┐ 72 | │ shard 2 │ │ page 2 │ │ ├────────┤ │ 73 | ├─────────────┤ │ │ │ │XXXXXXXX│ │ 74 | │ shard 3 │ │ local_head──┼──┘ ├────────┤ │ 75 | └─────────────┘ │ remote_head─┼──┐ │ │◀┘ 76 | ... ├─────────────┤ │ │ next──┼─┐ 77 | ┌─────────────┐ │ page 3 │ │ ├────────┤ │ 78 | │ shard n │ └─────────────┘ │ │XXXXXXXX│ │ 79 | └─────────────┘ ... │ ├────────┤ │ 80 | ┌─────────────┐ │ │XXXXXXXX│ │ 81 | │ page n │ │ ├────────┤ │ 82 | └─────────────┘ │ │ │◀┘ 83 | └────▶│ next──┼───▶ ... 84 | ├────────┤ 85 | │XXXXXXXX│ 86 | └────────┘ 87 | ``` 88 | 89 | 90 | The size of the first page in a shard is always a power of two, and every 91 | subsequent page added after the first is twice as large as the page that 92 | preceeds it. 93 | 94 | ```text 95 | 96 | pg. 97 | ┌───┐ ┌─┬─┐ 98 | │ 0 │───▶ │ │ 99 | ├───┤ ├─┼─┼─┬─┐ 100 | │ 1 │───▶ │ │ │ │ 101 | ├───┤ ├─┼─┼─┼─┼─┬─┬─┬─┐ 102 | │ 2 │───▶ │ │ │ │ │ │ │ │ 103 | ├───┤ ├─┼─┼─┼─┼─┼─┼─┼─┼─┬─┬─┬─┬─┬─┬─┬─┐ 104 | │ 3 │───▶ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 105 | └───┘ └─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┘ 106 | ``` 107 | 108 | When searching for a free slot, the smallest page is searched first, and if 109 | it is full, the search proceeds to the next page until either a free slot is 110 | found or all available pages have been searched. If all available pages have 111 | been searched and the maximum number of pages has not yet been reached, a 112 | new page is then allocated. 113 | 114 | Since every page is twice as large as the previous page, and all page sizes 115 | are powers of two, we can determine the page index that contains a given 116 | address by shifting the address down by the smallest page size and 117 | looking at how many twos places necessary to represent that number, 118 | telling us what power of two page size it fits inside of. We can 119 | determine the number of twos places by counting the number of leading 120 | zeros (unused twos places) in the number's binary representation, and 121 | subtracting that count from the total number of bits in a word. 122 | 123 | The formula for determining the page number that contains an offset is thus: 124 | 125 | ```rust,ignore 126 | WIDTH - ((offset + INITIAL_PAGE_SIZE) >> INDEX_SHIFT).leading_zeros() 127 | ``` 128 | 129 | where `WIDTH` is the number of bits in a `usize`, and `INDEX_SHIFT` is 130 | 131 | ```rust,ignore 132 | INITIAL_PAGE_SIZE.trailing_zeros() + 1; 133 | ``` 134 | 135 | [`MAX_THREADS`]: https://docs.rs/sharded-slab/latest/sharded_slab/trait.Config.html#associatedconstant.MAX_THREADS 136 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Eliza Weisman 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sharded-slab 2 | 3 | A lock-free concurrent slab. 4 | 5 | [![Crates.io][crates-badge]][crates-url] 6 | [![Documentation][docs-badge]][docs-url] 7 | [![CI Status][ci-badge]][ci-url] 8 | [![GitHub License][license-badge]][license] 9 | ![maintenance status][maint-badge] 10 | 11 | [crates-badge]: https://img.shields.io/crates/v/sharded-slab.svg 12 | [crates-url]: https://crates.io/crates/sharded-slab 13 | [docs-badge]: https://docs.rs/sharded-slab/badge.svg 14 | [docs-url]: https://docs.rs/sharded-slab/latest 15 | [ci-badge]: https://github.com/hawkw/sharded-slab/workflows/CI/badge.svg 16 | [ci-url]: https://github.com/hawkw/sharded-slab/actions?workflow=CI 17 | [license-badge]: https://img.shields.io/crates/l/sharded-slab 18 | [license]: LICENSE 19 | [maint-badge]: https://img.shields.io/badge/maintenance-experimental-blue.svg 20 | 21 | Slabs provide pre-allocated storage for many instances of a single data 22 | type. When a large number of values of a single type are required, 23 | this can be more efficient than allocating each item individually. Since the 24 | allocated items are the same size, memory fragmentation is reduced, and 25 | creating and removing new items can be very cheap. 26 | 27 | This crate implements a lock-free concurrent slab, indexed by `usize`s. 28 | 29 | **Note**: This crate is currently experimental. Please feel free to use it in 30 | your projects, but bear in mind that there's still plenty of room for 31 | optimization, and there may still be some lurking bugs. 32 | 33 | ## Usage 34 | 35 | First, add this to your `Cargo.toml`: 36 | 37 | ```toml 38 | sharded-slab = "0.1.7" 39 | ``` 40 | 41 | This crate provides two types, [`Slab`] and [`Pool`], which provide slightly 42 | different APIs for using a sharded slab. 43 | 44 | [`Slab`] implements a slab for _storing_ small types, sharing them between 45 | threads, and accessing them by index. New entries are allocated by [inserting] 46 | data, moving it in by value. Similarly, entries may be deallocated by [taking] 47 | from the slab, moving the value out. This API is similar to a `Vec>`, 48 | but allowing lock-free concurrent insertion and removal. 49 | 50 | In contrast, the [`Pool`] type provides an [object pool] style API for 51 | _reusing storage_. Rather than constructing values and moving them into 52 | the pool, as with [`Slab`], [allocating an entry][create] from the pool 53 | takes a closure that's provided with a mutable reference to initialize 54 | the entry in place. When entries are deallocated, they are [cleared] in 55 | place. Types which own a heap allocation can be cleared by dropping any 56 | _data_ they store, but retaining any previously-allocated capacity. This 57 | means that a [`Pool`] may be used to reuse a set of existing heap 58 | allocations, reducing allocator load. 59 | 60 | [`Slab`]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/struct.Slab.html 61 | [inserting]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/struct.Slab.html#method.insert 62 | [taking]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/struct.Slab.html#method.take 63 | [`Pool`]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/struct.Pool.html 64 | [create]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/struct.Pool.html#method.create 65 | [cleared]: https://docs.rs/sharded-slab/0.1.4/sharded_slab/trait.Clear.html 66 | [object pool]: https://en.wikipedia.org/wiki/Object_pool_pattern 67 | 68 | ### Examples 69 | 70 | Inserting an item into the slab, returning an index: 71 | 72 | ```rust 73 | use sharded_slab::Slab; 74 | let slab = Slab::new(); 75 | 76 | let key = slab.insert("hello world").unwrap(); 77 | assert_eq!(slab.get(key).unwrap(), "hello world"); 78 | ``` 79 | 80 | To share a slab across threads, it may be wrapped in an `Arc`: 81 | 82 | ```rust 83 | use sharded_slab::Slab; 84 | use std::sync::Arc; 85 | let slab = Arc::new(Slab::new()); 86 | 87 | let slab2 = slab.clone(); 88 | let thread2 = std::thread::spawn(move || { 89 | let key = slab2.insert("hello from thread two").unwrap(); 90 | assert_eq!(slab2.get(key).unwrap(), "hello from thread two"); 91 | key 92 | }); 93 | 94 | let key1 = slab.insert("hello from thread one").unwrap(); 95 | assert_eq!(slab.get(key1).unwrap(), "hello from thread one"); 96 | 97 | // Wait for thread 2 to complete. 98 | let key2 = thread2.join().unwrap(); 99 | 100 | // The item inserted by thread 2 remains in the slab. 101 | assert_eq!(slab.get(key2).unwrap(), "hello from thread two"); 102 | ``` 103 | 104 | If items in the slab must be mutated, a `Mutex` or `RwLock` may be used for 105 | each item, providing granular locking of items rather than of the slab: 106 | 107 | ```rust 108 | use sharded_slab::Slab; 109 | use std::sync::{Arc, Mutex}; 110 | let slab = Arc::new(Slab::new()); 111 | 112 | let key = slab.insert(Mutex::new(String::from("hello world"))).unwrap(); 113 | 114 | let slab2 = slab.clone(); 115 | let thread2 = std::thread::spawn(move || { 116 | let hello = slab2.get(key).expect("item missing"); 117 | let mut hello = hello.lock().expect("mutex poisoned"); 118 | *hello = String::from("hello everyone!"); 119 | }); 120 | 121 | thread2.join().unwrap(); 122 | 123 | let hello = slab.get(key).expect("item missing"); 124 | let mut hello = hello.lock().expect("mutex poisoned"); 125 | assert_eq!(hello.as_str(), "hello everyone!"); 126 | ``` 127 | 128 | ## Comparison with Similar Crates 129 | 130 | - [`slab`][slab crate]: Carl Lerche's `slab` crate provides a slab implementation with a 131 | similar API, implemented by storing all data in a single vector. 132 | 133 | Unlike `sharded-slab`, inserting and removing elements from the slab requires 134 | mutable access. This means that if the slab is accessed concurrently by 135 | multiple threads, it is necessary for it to be protected by a `Mutex` or 136 | `RwLock`. Items may not be inserted or removed (or accessed, if a `Mutex` is 137 | used) concurrently, even when they are unrelated. In many cases, the lock can 138 | become a significant bottleneck. On the other hand, `sharded-slab` allows 139 | separate indices in the slab to be accessed, inserted, and removed 140 | concurrently without requiring a global lock. Therefore, when the slab is 141 | shared across multiple threads, this crate offers significantly better 142 | performance than `slab`. 143 | 144 | However, the lock free slab introduces some additional constant-factor 145 | overhead. This means that in use-cases where a slab is _not_ shared by 146 | multiple threads and locking is not required, `sharded-slab` will likely 147 | offer slightly worse performance. 148 | 149 | In summary: `sharded-slab` offers significantly improved performance in 150 | concurrent use-cases, while `slab` should be preferred in single-threaded 151 | use-cases. 152 | 153 | [slab crate]: https://crates.io/crates/slab 154 | 155 | ## Safety and Correctness 156 | 157 | Most implementations of lock-free data structures in Rust require some 158 | amount of unsafe code, and this crate is not an exception. In order to catch 159 | potential bugs in this unsafe code, we make use of [`loom`], a 160 | permutation-testing tool for concurrent Rust programs. All `unsafe` blocks 161 | this crate occur in accesses to `loom` `UnsafeCell`s. This means that when 162 | those accesses occur in this crate's tests, `loom` will assert that they are 163 | valid under the C11 memory model across multiple permutations of concurrent 164 | executions of those tests. 165 | 166 | In order to guard against the [ABA problem][aba], this crate makes use of 167 | _generational indices_. Each slot in the slab tracks a generation counter 168 | which is incremented every time a value is inserted into that slot, and the 169 | indices returned by `Slab::insert` include the generation of the slot when 170 | the value was inserted, packed into the high-order bits of the index. This 171 | ensures that if a value is inserted, removed, and a new value is inserted 172 | into the same slot in the slab, the key returned by the first call to 173 | `insert` will not map to the new value. 174 | 175 | Since a fixed number of bits are set aside to use for storing the generation 176 | counter, the counter will wrap around after being incremented a number of 177 | times. To avoid situations where a returned index lives long enough to see the 178 | generation counter wrap around to the same value, it is good to be fairly 179 | generous when configuring the allocation of index bits. 180 | 181 | [`loom`]: https://crates.io/crates/loom 182 | [aba]: https://en.wikipedia.org/wiki/ABA_problem 183 | 184 | ## Performance 185 | 186 | These graphs were produced by [benchmarks] of the sharded slab implementation, 187 | using the [`criterion`] crate. 188 | 189 | The first shows the results of a benchmark where an increasing number of 190 | items are inserted and then removed into a slab concurrently by five 191 | threads. It compares the performance of the sharded slab implementation 192 | with a `RwLock`: 193 | 194 | Screen Shot 2019-10-01 at 5 09 49 PM 195 | 196 | The second graph shows the results of a benchmark where an increasing 197 | number of items are inserted and then removed by a _single_ thread. It 198 | compares the performance of the sharded slab implementation with an 199 | `RwLock` and a `mut slab::Slab`. 200 | 201 | Screen Shot 2019-10-01 at 5 13 45 PM 202 | 203 | These benchmarks demonstrate that, while the sharded approach introduces 204 | a small constant-factor overhead, it offers significantly better 205 | performance across concurrent accesses. 206 | 207 | [benchmarks]: https://github.com/hawkw/sharded-slab/blob/master/benches/bench.rs 208 | [`criterion`]: https://crates.io/crates/criterion 209 | 210 | ## License 211 | 212 | This project is licensed under the [MIT license](LICENSE). 213 | 214 | ### Contribution 215 | 216 | Unless you explicitly state otherwise, any contribution intentionally submitted 217 | for inclusion in this project by you, shall be licensed as MIT, without any 218 | additional terms or conditions. 219 | -------------------------------------------------------------------------------- /benches/bench.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 2 | use std::{ 3 | sync::{Arc, Barrier, RwLock}, 4 | thread, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | #[derive(Clone)] 9 | struct MultithreadedBench { 10 | start: Arc, 11 | end: Arc, 12 | slab: Arc, 13 | } 14 | 15 | impl MultithreadedBench { 16 | fn new(slab: Arc) -> Self { 17 | Self { 18 | start: Arc::new(Barrier::new(5)), 19 | end: Arc::new(Barrier::new(5)), 20 | slab, 21 | } 22 | } 23 | 24 | fn thread(&self, f: impl FnOnce(&Barrier, &T) + Send + 'static) -> &Self { 25 | let start = self.start.clone(); 26 | let end = self.end.clone(); 27 | let slab = self.slab.clone(); 28 | thread::spawn(move || { 29 | f(&start, &*slab); 30 | end.wait(); 31 | }); 32 | self 33 | } 34 | 35 | fn run(&self) -> Duration { 36 | self.start.wait(); 37 | let t0 = Instant::now(); 38 | self.end.wait(); 39 | t0.elapsed() 40 | } 41 | } 42 | 43 | const N_INSERTIONS: &[usize] = &[100, 300, 500, 700, 1000, 3000, 5000]; 44 | 45 | fn insert_remove_local(c: &mut Criterion) { 46 | // the 10000-insertion benchmark takes the `slab` crate about an hour to 47 | // run; don't run this unless you're prepared for that... 48 | // const N_INSERTIONS: &'static [usize] = &[100, 500, 1000, 5000, 10000]; 49 | let mut group = c.benchmark_group("insert_remove_local"); 50 | let g = group.measurement_time(Duration::from_secs(15)); 51 | 52 | for i in N_INSERTIONS { 53 | g.bench_with_input(BenchmarkId::new("sharded_slab", i), i, |b, &i| { 54 | b.iter_custom(|iters| { 55 | let mut total = Duration::from_secs(0); 56 | for _ in 0..iters { 57 | let bench = MultithreadedBench::new(Arc::new(sharded_slab::Slab::new())); 58 | let elapsed = bench 59 | .thread(move |start, slab| { 60 | start.wait(); 61 | let v: Vec<_> = (0..i).map(|i| slab.insert(i).unwrap()).collect(); 62 | for i in v { 63 | slab.remove(i); 64 | } 65 | }) 66 | .thread(move |start, slab| { 67 | start.wait(); 68 | let v: Vec<_> = (0..i).map(|i| slab.insert(i).unwrap()).collect(); 69 | for i in v { 70 | slab.remove(i); 71 | } 72 | }) 73 | .thread(move |start, slab| { 74 | start.wait(); 75 | let v: Vec<_> = (0..i).map(|i| slab.insert(i).unwrap()).collect(); 76 | for i in v { 77 | slab.remove(i); 78 | } 79 | }) 80 | .thread(move |start, slab| { 81 | start.wait(); 82 | let v: Vec<_> = (0..i).map(|i| slab.insert(i).unwrap()).collect(); 83 | for i in v { 84 | slab.remove(i); 85 | } 86 | }) 87 | .run(); 88 | total += elapsed; 89 | } 90 | total 91 | }) 92 | }); 93 | g.bench_with_input(BenchmarkId::new("slab_biglock", i), i, |b, &i| { 94 | b.iter_custom(|iters| { 95 | let mut total = Duration::from_secs(0); 96 | for _ in 0..iters { 97 | let bench = MultithreadedBench::new(Arc::new(RwLock::new(slab::Slab::new()))); 98 | let elapsed = bench 99 | .thread(move |start, slab| { 100 | start.wait(); 101 | let v: Vec<_> = 102 | (0..i).map(|i| slab.write().unwrap().insert(i)).collect(); 103 | for i in v { 104 | slab.write().unwrap().remove(i); 105 | } 106 | }) 107 | .thread(move |start, slab| { 108 | start.wait(); 109 | let v: Vec<_> = 110 | (0..i).map(|i| slab.write().unwrap().insert(i)).collect(); 111 | for i in v { 112 | slab.write().unwrap().remove(i); 113 | } 114 | }) 115 | .thread(move |start, slab| { 116 | start.wait(); 117 | let v: Vec<_> = 118 | (0..i).map(|i| slab.write().unwrap().insert(i)).collect(); 119 | for i in v { 120 | slab.write().unwrap().remove(i); 121 | } 122 | }) 123 | .thread(move |start, slab| { 124 | start.wait(); 125 | let v: Vec<_> = 126 | (0..i).map(|i| slab.write().unwrap().insert(i)).collect(); 127 | for i in v { 128 | slab.write().unwrap().remove(i); 129 | } 130 | }) 131 | .run(); 132 | total += elapsed; 133 | } 134 | total 135 | }) 136 | }); 137 | } 138 | group.finish(); 139 | } 140 | 141 | fn insert_remove_single_thread(c: &mut Criterion) { 142 | // the 10000-insertion benchmark takes the `slab` crate about an hour to 143 | // run; don't run this unless you're prepared for that... 144 | // const N_INSERTIONS: &'static [usize] = &[100, 500, 1000, 5000, 10000]; 145 | let mut group = c.benchmark_group("insert_remove_single_threaded"); 146 | 147 | for i in N_INSERTIONS { 148 | group.bench_with_input(BenchmarkId::new("sharded_slab", i), i, |b, &i| { 149 | let slab = sharded_slab::Slab::new(); 150 | b.iter(|| { 151 | let v: Vec<_> = (0..i).map(|i| slab.insert(i).unwrap()).collect(); 152 | for i in v { 153 | slab.remove(i); 154 | } 155 | }); 156 | }); 157 | group.bench_with_input(BenchmarkId::new("slab_no_lock", i), i, |b, &i| { 158 | let mut slab = slab::Slab::new(); 159 | b.iter(|| { 160 | let v: Vec<_> = (0..i).map(|i| slab.insert(i)).collect(); 161 | for i in v { 162 | slab.remove(i); 163 | } 164 | }); 165 | }); 166 | group.bench_with_input(BenchmarkId::new("slab_uncontended", i), i, |b, &i| { 167 | let slab = RwLock::new(slab::Slab::new()); 168 | b.iter(|| { 169 | let v: Vec<_> = (0..i).map(|i| slab.write().unwrap().insert(i)).collect(); 170 | for i in v { 171 | slab.write().unwrap().remove(i); 172 | } 173 | }); 174 | }); 175 | } 176 | group.finish(); 177 | } 178 | 179 | criterion_group!(benches, insert_remove_local, insert_remove_single_thread); 180 | criterion_main!(benches); 181 | -------------------------------------------------------------------------------- /bin/loom.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Runs Loom tests with defaults for Loom's configuration values. 3 | # 4 | # The tests are compiled in release mode to improve performance, but debug 5 | # assertions are enabled. 6 | # 7 | # Any arguments to this script are passed to the `cargo test` invocation. 8 | 9 | RUSTFLAGS="${RUSTFLAGS} --cfg loom -C debug-assertions=on" \ 10 | LOOM_MAX_PREEMPTIONS="${LOOM_MAX_PREEMPTIONS:-2}" \ 11 | LOOM_CHECKPOINT_INTERVAL="${LOOM_CHECKPOINT_INTERVAL:-1}" \ 12 | LOOM_LOG=1 \ 13 | LOOM_LOCATION=1 \ 14 | cargo test --release --lib "$@" 15 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1692799911, 9 | "narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1693158576, 24 | "narHash": "sha256-aRTTXkYvhXosGx535iAFUaoFboUrZSYb1Ooih/auGp0=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "a999c1cc0c9eb2095729d5aa03e0d8f7ed256780", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs", 41 | "rust-overlay": "rust-overlay" 42 | } 43 | }, 44 | "rust-overlay": { 45 | "inputs": { 46 | "flake-utils": [ 47 | "flake-utils" 48 | ], 49 | "nixpkgs": [ 50 | "nixpkgs" 51 | ] 52 | }, 53 | "locked": { 54 | "lastModified": 1693188660, 55 | "narHash": "sha256-F8vlVcYoEBRJqV3pN2QNSCI/A2i77ad5R9iiZ4llt1A=", 56 | "owner": "oxalica", 57 | "repo": "rust-overlay", 58 | "rev": "23756b2c5594da5c1ad2f40ae2440b9f8a2165b7", 59 | "type": "github" 60 | }, 61 | "original": { 62 | "owner": "oxalica", 63 | "repo": "rust-overlay", 64 | "type": "github" 65 | } 66 | }, 67 | "systems": { 68 | "locked": { 69 | "lastModified": 1681028828, 70 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 71 | "owner": "nix-systems", 72 | "repo": "default", 73 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 74 | "type": "github" 75 | }, 76 | "original": { 77 | "owner": "nix-systems", 78 | "repo": "default", 79 | "type": "github" 80 | } 81 | } 82 | }, 83 | "root": "root", 84 | "version": 7 85 | } 86 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | # in flake.nix 2 | { 3 | description = 4 | "Flake containing a development shell for the `sharded-slab` crate"; 5 | 6 | inputs = { 7 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 8 | flake-utils.url = "github:numtide/flake-utils"; 9 | rust-overlay = { 10 | url = "github:oxalica/rust-overlay"; 11 | inputs = { 12 | nixpkgs.follows = "nixpkgs"; 13 | flake-utils.follows = "flake-utils"; 14 | }; 15 | }; 16 | }; 17 | 18 | outputs = { self, nixpkgs, flake-utils, rust-overlay }: 19 | flake-utils.lib.eachDefaultSystem (system: 20 | let 21 | overlays = [ (import rust-overlay) ]; 22 | pkgs = import nixpkgs { inherit system overlays; }; 23 | rustToolchain = pkgs.pkgsBuildHost.rust-bin.stable.latest.default; 24 | nativeBuildInputs = with pkgs; [ rustToolchain pkg-config ]; 25 | in with pkgs; { 26 | devShells.default = mkShell { inherit nativeBuildInputs; }; 27 | }); 28 | } 29 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | profile = "default" 4 | targets = [ 5 | "i686-unknown-linux-musl", 6 | "x86_64-unknown-linux-gnu", 7 | ] -------------------------------------------------------------------------------- /src/cfg.rs: -------------------------------------------------------------------------------- 1 | use crate::page::{ 2 | slot::{Generation, RefCount}, 3 | Addr, 4 | }; 5 | use crate::Pack; 6 | use std::{fmt, marker::PhantomData}; 7 | /// Configuration parameters which can be overridden to tune the behavior of a slab. 8 | pub trait Config: Sized { 9 | /// The maximum number of threads which can access the slab. 10 | /// 11 | /// This value (rounded to a power of two) determines the number of shards 12 | /// in the slab. If a thread is created, accesses the slab, and then terminates, 13 | /// its shard may be reused and thus does not count against the maximum 14 | /// number of threads once the thread has terminated. 15 | const MAX_THREADS: usize = DefaultConfig::MAX_THREADS; 16 | /// The maximum number of pages in each shard in the slab. 17 | /// 18 | /// This value, in combination with `INITIAL_PAGE_SIZE`, determines how many 19 | /// bits of each index are used to represent page addresses. 20 | const MAX_PAGES: usize = DefaultConfig::MAX_PAGES; 21 | /// The size of the first page in each shard. 22 | /// 23 | /// When a page in a shard has been filled with values, a new page 24 | /// will be allocated that is twice as large as the previous page. Thus, the 25 | /// second page will be twice this size, and the third will be four times 26 | /// this size, and so on. 27 | /// 28 | /// Note that page sizes must be powers of two. If this value is not a power 29 | /// of two, it will be rounded to the next power of two. 30 | const INITIAL_PAGE_SIZE: usize = DefaultConfig::INITIAL_PAGE_SIZE; 31 | /// Sets a number of high-order bits in each index which are reserved from 32 | /// user code. 33 | /// 34 | /// Note that these bits are taken from the generation counter; if the page 35 | /// address and thread IDs are configured to use a large number of bits, 36 | /// reserving additional bits will decrease the period of the generation 37 | /// counter. These should thus be used relatively sparingly, to ensure that 38 | /// generation counters are able to effectively prevent the ABA problem. 39 | const RESERVED_BITS: usize = 0; 40 | } 41 | 42 | pub(crate) trait CfgPrivate: Config { 43 | const USED_BITS: usize = Generation::::LEN + Generation::::SHIFT; 44 | const INITIAL_SZ: usize = next_pow2(Self::INITIAL_PAGE_SIZE); 45 | const MAX_SHARDS: usize = next_pow2(Self::MAX_THREADS - 1); 46 | const ADDR_INDEX_SHIFT: usize = Self::INITIAL_SZ.trailing_zeros() as usize + 1; 47 | 48 | fn page_size(n: usize) -> usize { 49 | Self::INITIAL_SZ * 2usize.pow(n as _) 50 | } 51 | 52 | fn debug() -> DebugConfig { 53 | DebugConfig { _cfg: PhantomData } 54 | } 55 | 56 | fn validate() { 57 | assert!( 58 | Self::INITIAL_SZ.is_power_of_two(), 59 | "invalid Config: {:#?}", 60 | Self::debug(), 61 | ); 62 | assert!( 63 | Self::INITIAL_SZ <= Addr::::BITS, 64 | "invalid Config: {:#?}", 65 | Self::debug() 66 | ); 67 | 68 | assert!( 69 | Generation::::BITS >= 3, 70 | "invalid Config: {:#?}\ngeneration counter should be at least 3 bits!", 71 | Self::debug() 72 | ); 73 | 74 | assert!( 75 | Self::USED_BITS <= WIDTH, 76 | "invalid Config: {:#?}\ntotal number of bits per index is too large to fit in a word!", 77 | Self::debug() 78 | ); 79 | 80 | assert!( 81 | WIDTH - Self::USED_BITS >= Self::RESERVED_BITS, 82 | "invalid Config: {:#?}\nindices are too large to fit reserved bits!", 83 | Self::debug() 84 | ); 85 | 86 | assert!( 87 | RefCount::::MAX > 1, 88 | "invalid config: {:#?}\n maximum concurrent references would be {}", 89 | Self::debug(), 90 | RefCount::::MAX, 91 | ); 92 | } 93 | 94 | #[inline(always)] 95 | fn unpack>(packed: usize) -> A { 96 | A::from_packed(packed) 97 | } 98 | 99 | #[inline(always)] 100 | fn unpack_addr(packed: usize) -> Addr { 101 | Self::unpack(packed) 102 | } 103 | 104 | #[inline(always)] 105 | fn unpack_tid(packed: usize) -> crate::Tid { 106 | Self::unpack(packed) 107 | } 108 | 109 | #[inline(always)] 110 | fn unpack_gen(packed: usize) -> Generation { 111 | Self::unpack(packed) 112 | } 113 | } 114 | impl CfgPrivate for C {} 115 | 116 | /// Default slab configuration values. 117 | #[derive(Copy, Clone)] 118 | pub struct DefaultConfig { 119 | _p: (), 120 | } 121 | 122 | pub(crate) struct DebugConfig { 123 | _cfg: PhantomData, 124 | } 125 | 126 | pub(crate) const WIDTH: usize = std::mem::size_of::() * 8; 127 | 128 | pub(crate) const fn next_pow2(n: usize) -> usize { 129 | let pow2 = n.count_ones() == 1; 130 | let zeros = n.leading_zeros(); 131 | 1 << (WIDTH - zeros as usize - pow2 as usize) 132 | } 133 | 134 | // === impl DefaultConfig === 135 | 136 | impl Config for DefaultConfig { 137 | const INITIAL_PAGE_SIZE: usize = 32; 138 | 139 | #[cfg(target_pointer_width = "64")] 140 | const MAX_THREADS: usize = 4096; 141 | #[cfg(target_pointer_width = "32")] 142 | // TODO(eliza): can we find enough bits to give 32-bit platforms more threads? 143 | const MAX_THREADS: usize = 128; 144 | 145 | const MAX_PAGES: usize = WIDTH / 2; 146 | } 147 | 148 | impl fmt::Debug for DefaultConfig { 149 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 150 | Self::debug().fmt(f) 151 | } 152 | } 153 | 154 | impl fmt::Debug for DebugConfig { 155 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 156 | f.debug_struct(std::any::type_name::()) 157 | .field("initial_page_size", &C::INITIAL_SZ) 158 | .field("max_shards", &C::MAX_SHARDS) 159 | .field("max_pages", &C::MAX_PAGES) 160 | .field("used_bits", &C::USED_BITS) 161 | .field("reserved_bits", &C::RESERVED_BITS) 162 | .field("pointer_width", &WIDTH) 163 | .field("max_concurrent_references", &RefCount::::MAX) 164 | .finish() 165 | } 166 | } 167 | 168 | #[cfg(test)] 169 | mod tests { 170 | use super::*; 171 | use crate::test_util; 172 | use crate::Slab; 173 | 174 | #[test] 175 | #[cfg_attr(loom, ignore)] 176 | #[should_panic] 177 | fn validates_max_refs() { 178 | struct GiantGenConfig; 179 | 180 | // Configure the slab with a very large number of bits for the generation 181 | // counter. This will only leave 1 bit to use for the slot reference 182 | // counter, which will fail to validate. 183 | impl Config for GiantGenConfig { 184 | const INITIAL_PAGE_SIZE: usize = 1; 185 | const MAX_THREADS: usize = 1; 186 | const MAX_PAGES: usize = 1; 187 | } 188 | 189 | let _slab = Slab::::new_with_config::(); 190 | } 191 | 192 | #[test] 193 | #[cfg_attr(loom, ignore)] 194 | fn big() { 195 | let slab = Slab::new(); 196 | 197 | for i in 0..10000 { 198 | println!("{:?}", i); 199 | let k = slab.insert(i).expect("insert"); 200 | assert_eq!(slab.get(k).expect("get"), i); 201 | } 202 | } 203 | 204 | #[test] 205 | #[cfg_attr(loom, ignore)] 206 | fn custom_page_sz() { 207 | let slab = Slab::new_with_config::(); 208 | 209 | for i in 0..4096 { 210 | println!("{}", i); 211 | let k = slab.insert(i).expect("insert"); 212 | assert_eq!(slab.get(k).expect("get"), i); 213 | } 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /src/clear.rs: -------------------------------------------------------------------------------- 1 | use std::{collections, hash, ops::DerefMut, sync}; 2 | 3 | /// Trait implemented by types which can be cleared in place, retaining any 4 | /// allocated memory. 5 | /// 6 | /// This is essentially a generalization of methods on standard library 7 | /// collection types, including as [`Vec::clear`], [`String::clear`], and 8 | /// [`HashMap::clear`]. These methods drop all data stored in the collection, 9 | /// but retain the collection's heap allocation for future use. Types such as 10 | /// `BTreeMap`, whose `clear` methods drops allocations, should not 11 | /// implement this trait. 12 | /// 13 | /// When implemented for types which do not own a heap allocation, `Clear` 14 | /// should reset the type in place if possible. If the type has an empty state 15 | /// or stores `Option`s, those values should be reset to the empty state. For 16 | /// "plain old data" types, which hold no pointers to other data and do not have 17 | /// an empty or initial state, it's okay for a `Clear` implementation to be a 18 | /// no-op. In that case, it essentially serves as a marker indicating that the 19 | /// type may be reused to store new data. 20 | /// 21 | /// [`Vec::clear`]: https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.clear 22 | /// [`String::clear`]: https://doc.rust-lang.org/stable/std/string/struct.String.html#method.clear 23 | /// [`HashMap::clear`]: https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html#method.clear 24 | pub trait Clear { 25 | /// Clear all data in `self`, retaining the allocated capacithy. 26 | fn clear(&mut self); 27 | } 28 | 29 | impl Clear for Option { 30 | fn clear(&mut self) { 31 | let _ = self.take(); 32 | } 33 | } 34 | 35 | impl Clear for Box 36 | where 37 | T: Clear, 38 | { 39 | #[inline] 40 | fn clear(&mut self) { 41 | self.deref_mut().clear() 42 | } 43 | } 44 | 45 | impl Clear for Vec { 46 | #[inline] 47 | fn clear(&mut self) { 48 | Vec::clear(self) 49 | } 50 | } 51 | 52 | impl Clear for collections::HashMap 53 | where 54 | K: hash::Hash + Eq, 55 | S: hash::BuildHasher, 56 | { 57 | #[inline] 58 | fn clear(&mut self) { 59 | collections::HashMap::clear(self) 60 | } 61 | } 62 | 63 | impl Clear for collections::HashSet 64 | where 65 | T: hash::Hash + Eq, 66 | S: hash::BuildHasher, 67 | { 68 | #[inline] 69 | fn clear(&mut self) { 70 | collections::HashSet::clear(self) 71 | } 72 | } 73 | 74 | impl Clear for String { 75 | #[inline] 76 | fn clear(&mut self) { 77 | String::clear(self) 78 | } 79 | } 80 | 81 | impl Clear for sync::Mutex { 82 | #[inline] 83 | fn clear(&mut self) { 84 | self.get_mut().unwrap().clear(); 85 | } 86 | } 87 | 88 | impl Clear for sync::RwLock { 89 | #[inline] 90 | fn clear(&mut self) { 91 | self.write().unwrap().clear(); 92 | } 93 | } 94 | 95 | #[cfg(all(loom, test))] 96 | impl Clear for crate::sync::alloc::Track { 97 | fn clear(&mut self) { 98 | self.get_mut().clear() 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/implementation.rs: -------------------------------------------------------------------------------- 1 | // This module exists only to provide a separate page for the implementation 2 | // documentation. 3 | 4 | //! Notes on `sharded-slab`'s implementation and design. 5 | //! 6 | //! # Design 7 | //! 8 | //! The sharded slab's design is strongly inspired by the ideas presented by 9 | //! Leijen, Zorn, and de Moura in [Mimalloc: Free List Sharding in 10 | //! Action][mimalloc]. In this report, the authors present a novel design for a 11 | //! memory allocator based on a concept of _free list sharding_. 12 | //! 13 | //! Memory allocators must keep track of what memory regions are not currently 14 | //! allocated ("free") in order to provide them to future allocation requests. 15 | //! The term [_free list_][freelist] refers to a technique for performing this 16 | //! bookkeeping, where each free block stores a pointer to the next free block, 17 | //! forming a linked list. The memory allocator keeps a pointer to the most 18 | //! recently freed block, the _head_ of the free list. To allocate more memory, 19 | //! the allocator pops from the free list by setting the head pointer to the 20 | //! next free block of the current head block, and returning the previous head. 21 | //! To deallocate a block, the block is pushed to the free list by setting its 22 | //! first word to the current head pointer, and the head pointer is set to point 23 | //! to the deallocated block. Most implementations of slab allocators backed by 24 | //! arrays or vectors use a similar technique, where pointers are replaced by 25 | //! indices into the backing array. 26 | //! 27 | //! When allocations and deallocations can occur concurrently across threads, 28 | //! they must synchronize accesses to the free list; either by putting the 29 | //! entire allocator state inside of a lock, or by using atomic operations to 30 | //! treat the free list as a lock-free structure (such as a [Treiber stack]). In 31 | //! both cases, there is a significant performance cost — even when the free 32 | //! list is lock-free, it is likely that a noticeable amount of time will be 33 | //! spent in compare-and-swap loops. Ideally, the global synchronzation point 34 | //! created by the single global free list could be avoided as much as possible. 35 | //! 36 | //! The approach presented by Leijen, Zorn, and de Moura is to introduce 37 | //! sharding and thus increase the granularity of synchronization significantly. 38 | //! In mimalloc, the heap is _sharded_ so that each thread has its own 39 | //! thread-local heap. Objects are always allocated from the local heap of the 40 | //! thread where the allocation is performed. Because allocations are always 41 | //! done from a thread's local heap, they need not be synchronized. 42 | //! 43 | //! However, since objects can move between threads before being deallocated, 44 | //! _deallocations_ may still occur concurrently. Therefore, Leijen et al. 45 | //! introduce a concept of _local_ and _global_ free lists. When an object is 46 | //! deallocated on the same thread it was originally allocated on, it is placed 47 | //! on the local free list; if it is deallocated on another thread, it goes on 48 | //! the global free list for the heap of the thread from which it originated. To 49 | //! allocate, the local free list is used first; if it is empty, the entire 50 | //! global free list is popped onto the local free list. Since the local free 51 | //! list is only ever accessed by the thread it belongs to, it does not require 52 | //! synchronization at all, and because the global free list is popped from 53 | //! infrequently, the cost of synchronization has a reduced impact. A majority 54 | //! of allocations can occur without any synchronization at all; and 55 | //! deallocations only require synchronization when an object has left its 56 | //! parent thread (a relatively uncommon case). 57 | //! 58 | //! [mimalloc]: https://www.microsoft.com/en-us/research/uploads/prod/2019/06/mimalloc-tr-v1.pdf 59 | //! [freelist]: https://en.wikipedia.org/wiki/Free_list 60 | //! [Treiber stack]: https://en.wikipedia.org/wiki/Treiber_stack 61 | //! 62 | //! # Implementation 63 | //! 64 | //! A slab is represented as an array of [`MAX_THREADS`] _shards_. A shard 65 | //! consists of a vector of one or more _pages_ plus associated metadata. 66 | //! Finally, a page consists of an array of _slots_, head indices for the local 67 | //! and remote free lists. 68 | //! 69 | //! ```text 70 | //! ┌─────────────┐ 71 | //! │ shard 1 │ 72 | //! │ │ ┌─────────────┐ ┌────────┐ 73 | //! │ pages───────┼───▶│ page 1 │ │ │ 74 | //! ├─────────────┤ ├─────────────┤ ┌────▶│ next──┼─┐ 75 | //! │ shard 2 │ │ page 2 │ │ ├────────┤ │ 76 | //! ├─────────────┤ │ │ │ │XXXXXXXX│ │ 77 | //! │ shard 3 │ │ local_head──┼──┘ ├────────┤ │ 78 | //! └─────────────┘ │ remote_head─┼──┐ │ │◀┘ 79 | //! ... ├─────────────┤ │ │ next──┼─┐ 80 | //! ┌─────────────┐ │ page 3 │ │ ├────────┤ │ 81 | //! │ shard n │ └─────────────┘ │ │XXXXXXXX│ │ 82 | //! └─────────────┘ ... │ ├────────┤ │ 83 | //! ┌─────────────┐ │ │XXXXXXXX│ │ 84 | //! │ page n │ │ ├────────┤ │ 85 | //! └─────────────┘ │ │ │◀┘ 86 | //! └────▶│ next──┼───▶ ... 87 | //! ├────────┤ 88 | //! │XXXXXXXX│ 89 | //! └────────┘ 90 | //! ``` 91 | //! 92 | //! 93 | //! The size of the first page in a shard is always a power of two, and every 94 | //! subsequent page added after the first is twice as large as the page that 95 | //! preceeds it. 96 | //! 97 | //! ```text 98 | //! 99 | //! pg. 100 | //! ┌───┐ ┌─┬─┐ 101 | //! │ 0 │───▶ │ │ 102 | //! ├───┤ ├─┼─┼─┬─┐ 103 | //! │ 1 │───▶ │ │ │ │ 104 | //! ├───┤ ├─┼─┼─┼─┼─┬─┬─┬─┐ 105 | //! │ 2 │───▶ │ │ │ │ │ │ │ │ 106 | //! ├───┤ ├─┼─┼─┼─┼─┼─┼─┼─┼─┬─┬─┬─┬─┬─┬─┬─┐ 107 | //! │ 3 │───▶ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 108 | //! └───┘ └─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┘ 109 | //! ``` 110 | //! 111 | //! When searching for a free slot, the smallest page is searched first, and if 112 | //! it is full, the search proceeds to the next page until either a free slot is 113 | //! found or all available pages have been searched. If all available pages have 114 | //! been searched and the maximum number of pages has not yet been reached, a 115 | //! new page is then allocated. 116 | //! 117 | //! Since every page is twice as large as the previous page, and all page sizes 118 | //! are powers of two, we can determine the page index that contains a given 119 | //! address by shifting the address down by the smallest page size and 120 | //! looking at how many twos places necessary to represent that number, 121 | //! telling us what power of two page size it fits inside of. We can 122 | //! determine the number of twos places by counting the number of leading 123 | //! zeros (unused twos places) in the number's binary representation, and 124 | //! subtracting that count from the total number of bits in a word. 125 | //! 126 | //! The formula for determining the page number that contains an offset is thus: 127 | //! 128 | //! ```rust,ignore 129 | //! WIDTH - ((offset + INITIAL_PAGE_SIZE) >> INDEX_SHIFT).leading_zeros() 130 | //! ``` 131 | //! 132 | //! where `WIDTH` is the number of bits in a `usize`, and `INDEX_SHIFT` is 133 | //! 134 | //! ```rust,ignore 135 | //! INITIAL_PAGE_SIZE.trailing_zeros() + 1; 136 | //! ``` 137 | //! 138 | //! [`MAX_THREADS`]: https://docs.rs/sharded-slab/latest/sharded_slab/trait.Config.html#associatedconstant.MAX_THREADS 139 | -------------------------------------------------------------------------------- /src/iter.rs: -------------------------------------------------------------------------------- 1 | use std::{iter::FusedIterator, slice}; 2 | 3 | use crate::{cfg, page, shard}; 4 | 5 | /// An exclusive fused iterator over the items in a [`Slab`](crate::Slab). 6 | #[must_use = "iterators are lazy and do nothing unless consumed"] 7 | #[derive(Debug)] 8 | pub struct UniqueIter<'a, T, C: cfg::Config> { 9 | pub(super) shards: shard::IterMut<'a, Option, C>, 10 | pub(super) pages: slice::Iter<'a, page::Shared, C>>, 11 | pub(super) slots: Option>, 12 | } 13 | 14 | impl<'a, T, C: cfg::Config> Iterator for UniqueIter<'a, T, C> { 15 | type Item = &'a T; 16 | 17 | fn next(&mut self) -> Option { 18 | test_println!("UniqueIter::next"); 19 | loop { 20 | test_println!("-> try next slot"); 21 | if let Some(item) = self.slots.as_mut().and_then(|slots| slots.next()) { 22 | test_println!("-> found an item!"); 23 | return Some(item); 24 | } 25 | 26 | test_println!("-> try next page"); 27 | if let Some(page) = self.pages.next() { 28 | test_println!("-> found another page"); 29 | self.slots = page.iter(); 30 | continue; 31 | } 32 | 33 | test_println!("-> try next shard"); 34 | if let Some(shard) = self.shards.next() { 35 | test_println!("-> found another shard"); 36 | self.pages = shard.iter(); 37 | } else { 38 | test_println!("-> all done!"); 39 | return None; 40 | } 41 | } 42 | } 43 | } 44 | 45 | impl FusedIterator for UniqueIter<'_, T, C> {} 46 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! test_println { 2 | ($($arg:tt)*) => { 3 | if cfg!(test) && cfg!(slab_print) { 4 | if std::thread::panicking() { 5 | // getting the thread ID while panicking doesn't seem to play super nicely with loom's 6 | // mock lazy_static... 7 | println!("[PANIC {:>17}:{:<3}] {}", file!(), line!(), format_args!($($arg)*)) 8 | } else { 9 | println!("[{:?} {:>17}:{:<3}] {}", crate::Tid::::current(), file!(), line!(), format_args!($($arg)*)) 10 | } 11 | } 12 | } 13 | } 14 | 15 | #[cfg(all(test, loom))] 16 | macro_rules! test_dbg { 17 | ($e:expr) => { 18 | match $e { 19 | e => { 20 | test_println!("{} = {:?}", stringify!($e), &e); 21 | e 22 | } 23 | } 24 | }; 25 | } 26 | 27 | macro_rules! panic_in_drop { 28 | ($($arg:tt)*) => { 29 | if !std::thread::panicking() { 30 | panic!($($arg)*) 31 | } else { 32 | let thread = std::thread::current(); 33 | eprintln!( 34 | "thread '{thread}' attempted to panic at '{msg}', {file}:{line}:{col}\n\ 35 | note: we were already unwinding due to a previous panic.", 36 | thread = thread.name().unwrap_or(""), 37 | msg = format_args!($($arg)*), 38 | file = file!(), 39 | line = line!(), 40 | col = column!(), 41 | ); 42 | } 43 | } 44 | } 45 | 46 | macro_rules! debug_assert_eq_in_drop { 47 | ($this:expr, $that:expr) => { 48 | debug_assert_eq_in_drop!(@inner $this, $that, "") 49 | }; 50 | ($this:expr, $that:expr, $($arg:tt)+) => { 51 | debug_assert_eq_in_drop!(@inner $this, $that, format_args!(": {}", format_args!($($arg)+))) 52 | }; 53 | (@inner $this:expr, $that:expr, $msg:expr) => { 54 | if cfg!(debug_assertions) { 55 | if $this != $that { 56 | panic_in_drop!( 57 | "assertion failed ({} == {})\n left: `{:?}`,\n right: `{:?}`{}", 58 | stringify!($this), 59 | stringify!($that), 60 | $this, 61 | $that, 62 | $msg, 63 | ) 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/page/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::cfg::{self, CfgPrivate}; 2 | use crate::clear::Clear; 3 | use crate::sync::UnsafeCell; 4 | use crate::Pack; 5 | 6 | pub(crate) mod slot; 7 | mod stack; 8 | pub(crate) use self::slot::Slot; 9 | use std::{fmt, marker::PhantomData}; 10 | 11 | /// A page address encodes the location of a slot within a shard (the page 12 | /// number and offset within that page) as a single linear value. 13 | #[repr(transparent)] 14 | pub(crate) struct Addr { 15 | addr: usize, 16 | _cfg: PhantomData, 17 | } 18 | 19 | impl Addr { 20 | const NULL: usize = Self::BITS + 1; 21 | 22 | pub(crate) fn index(self) -> usize { 23 | // Since every page is twice as large as the previous page, and all page sizes 24 | // are powers of two, we can determine the page index that contains a given 25 | // address by counting leading zeros, which tells us what power of two 26 | // the offset fits into. 27 | // 28 | // First, we must shift down to the smallest page size, so that the last 29 | // offset on the first page becomes 0. 30 | let shifted = (self.addr + C::INITIAL_SZ) >> C::ADDR_INDEX_SHIFT; 31 | // Now, we can determine the number of twos places by counting the 32 | // number of leading zeros (unused twos places) in the number's binary 33 | // representation, and subtracting that count from the total number of bits in a word. 34 | cfg::WIDTH - shifted.leading_zeros() as usize 35 | } 36 | 37 | pub(crate) fn offset(self) -> usize { 38 | self.addr 39 | } 40 | } 41 | 42 | pub(crate) trait FreeList { 43 | fn push(&self, new_head: usize, slot: &Slot) 44 | where 45 | C: cfg::Config; 46 | } 47 | 48 | impl Pack for Addr { 49 | const LEN: usize = C::MAX_PAGES + C::ADDR_INDEX_SHIFT; 50 | 51 | type Prev = (); 52 | 53 | fn as_usize(&self) -> usize { 54 | self.addr 55 | } 56 | 57 | fn from_usize(addr: usize) -> Self { 58 | debug_assert!(addr <= Self::BITS); 59 | Self { 60 | addr, 61 | _cfg: PhantomData, 62 | } 63 | } 64 | } 65 | 66 | pub(crate) type Iter<'a, T, C> = std::iter::FilterMap< 67 | std::slice::Iter<'a, Slot, C>>, 68 | fn(&'a Slot, C>) -> Option<&'a T>, 69 | >; 70 | 71 | pub(crate) struct Local { 72 | /// Index of the first slot on the local free list 73 | head: UnsafeCell, 74 | } 75 | 76 | pub(crate) struct Shared { 77 | /// The remote free list 78 | /// 79 | /// Slots freed from a remote thread are pushed onto this list. 80 | remote: stack::TransferStack, 81 | // Total size of the page. 82 | // 83 | // If the head index of the local or remote free list is greater than the size of the 84 | // page, then that free list is emtpy. If the head of both free lists is greater than `size` 85 | // then there are no slots left in that page. 86 | size: usize, 87 | prev_sz: usize, 88 | slab: UnsafeCell>>, 89 | } 90 | 91 | type Slots = Box<[Slot]>; 92 | 93 | impl Local { 94 | pub(crate) fn new() -> Self { 95 | Self { 96 | head: UnsafeCell::new(0), 97 | } 98 | } 99 | 100 | #[inline(always)] 101 | fn head(&self) -> usize { 102 | self.head.with(|head| unsafe { *head }) 103 | } 104 | 105 | #[inline(always)] 106 | fn set_head(&self, new_head: usize) { 107 | self.head.with_mut(|head| unsafe { 108 | *head = new_head; 109 | }) 110 | } 111 | } 112 | 113 | impl FreeList for Local { 114 | fn push(&self, new_head: usize, slot: &Slot) { 115 | slot.set_next(self.head()); 116 | self.set_head(new_head); 117 | } 118 | } 119 | 120 | impl Shared 121 | where 122 | C: cfg::Config, 123 | { 124 | const NULL: usize = Addr::::NULL; 125 | 126 | pub(crate) fn new(size: usize, prev_sz: usize) -> Self { 127 | Self { 128 | prev_sz, 129 | size, 130 | remote: stack::TransferStack::new(), 131 | slab: UnsafeCell::new(None), 132 | } 133 | } 134 | 135 | /// Return the head of the freelist 136 | /// 137 | /// If there is space on the local list, it returns the head of the local list. Otherwise, it 138 | /// pops all the slots from the global list and returns the head of that list 139 | /// 140 | /// *Note*: The local list's head is reset when setting the new state in the slot pointed to be 141 | /// `head` returned from this function 142 | #[inline] 143 | fn pop(&self, local: &Local) -> Option { 144 | let head = local.head(); 145 | 146 | test_println!("-> local head {:?}", head); 147 | 148 | // are there any items on the local free list? (fast path) 149 | let head = if head < self.size { 150 | head 151 | } else { 152 | // slow path: if the local free list is empty, pop all the items on 153 | // the remote free list. 154 | let head = self.remote.pop_all(); 155 | 156 | test_println!("-> remote head {:?}", head); 157 | head? 158 | }; 159 | 160 | // if the head is still null, both the local and remote free lists are 161 | // empty --- we can't fit any more items on this page. 162 | if head == Self::NULL { 163 | test_println!("-> NULL! {:?}", head); 164 | None 165 | } else { 166 | Some(head) 167 | } 168 | } 169 | 170 | /// Returns `true` if storage is currently allocated for this page, `false` 171 | /// otherwise. 172 | #[inline] 173 | fn is_unallocated(&self) -> bool { 174 | self.slab.with(|s| unsafe { (*s).is_none() }) 175 | } 176 | 177 | #[inline] 178 | pub(crate) fn with_slot<'a, U>( 179 | &'a self, 180 | addr: Addr, 181 | f: impl FnOnce(&'a Slot) -> Option, 182 | ) -> Option { 183 | let poff = addr.offset() - self.prev_sz; 184 | 185 | test_println!("-> offset {:?}", poff); 186 | 187 | self.slab.with(|slab| { 188 | let slot = unsafe { &*slab }.as_ref()?.get(poff)?; 189 | f(slot) 190 | }) 191 | } 192 | 193 | #[inline(always)] 194 | pub(crate) fn free_list(&self) -> &impl FreeList { 195 | &self.remote 196 | } 197 | } 198 | 199 | impl<'a, T, C> Shared, C> 200 | where 201 | C: cfg::Config + 'a, 202 | { 203 | pub(crate) fn take( 204 | &self, 205 | addr: Addr, 206 | gen: slot::Generation, 207 | free_list: &F, 208 | ) -> Option 209 | where 210 | F: FreeList, 211 | { 212 | let offset = addr.offset() - self.prev_sz; 213 | 214 | test_println!("-> take: offset {:?}", offset); 215 | 216 | self.slab.with(|slab| { 217 | let slab = unsafe { &*slab }.as_ref()?; 218 | let slot = slab.get(offset)?; 219 | slot.remove_value(gen, offset, free_list) 220 | }) 221 | } 222 | 223 | pub(crate) fn remove>( 224 | &self, 225 | addr: Addr, 226 | gen: slot::Generation, 227 | free_list: &F, 228 | ) -> bool { 229 | let offset = addr.offset() - self.prev_sz; 230 | 231 | test_println!("-> offset {:?}", offset); 232 | 233 | self.slab.with(|slab| { 234 | let slab = unsafe { &*slab }.as_ref(); 235 | if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { 236 | slot.try_remove_value(gen, offset, free_list) 237 | } else { 238 | false 239 | } 240 | }) 241 | } 242 | 243 | // Need this function separately, as we need to pass a function pointer to `filter_map` and 244 | // `Slot::value` just returns a `&T`, specifically a `&Option` for this impl. 245 | fn make_ref(slot: &'a Slot, C>) -> Option<&'a T> { 246 | slot.value().as_ref() 247 | } 248 | 249 | pub(crate) fn iter(&self) -> Option> { 250 | let slab = self.slab.with(|slab| unsafe { (*slab).as_ref() }); 251 | slab.map(|slab| { 252 | slab.iter() 253 | .filter_map(Shared::make_ref as fn(&'a Slot, C>) -> Option<&'a T>) 254 | }) 255 | } 256 | } 257 | 258 | impl Shared 259 | where 260 | T: Clear + Default, 261 | C: cfg::Config, 262 | { 263 | pub(crate) fn init_with( 264 | &self, 265 | local: &Local, 266 | init: impl FnOnce(usize, &Slot) -> Option, 267 | ) -> Option { 268 | let head = self.pop(local)?; 269 | 270 | // do we need to allocate storage for this page? 271 | if self.is_unallocated() { 272 | self.allocate(); 273 | } 274 | 275 | let index = head + self.prev_sz; 276 | 277 | let result = self.slab.with(|slab| { 278 | let slab = unsafe { &*(slab) } 279 | .as_ref() 280 | .expect("page must have been allocated to insert!"); 281 | let slot = &slab[head]; 282 | let result = init(index, slot)?; 283 | local.set_head(slot.next()); 284 | Some(result) 285 | })?; 286 | 287 | test_println!("-> init_with: insert at offset: {}", index); 288 | Some(result) 289 | } 290 | 291 | /// Allocates storage for the page's slots. 292 | #[cold] 293 | fn allocate(&self) { 294 | test_println!("-> alloc new page ({})", self.size); 295 | debug_assert!(self.is_unallocated()); 296 | 297 | let mut slab = Vec::with_capacity(self.size); 298 | slab.extend((1..self.size).map(Slot::new)); 299 | slab.push(Slot::new(Self::NULL)); 300 | self.slab.with_mut(|s| { 301 | // safety: this mut access is safe — it only occurs to initially allocate the page, 302 | // which only happens on this thread; if the page has not yet been allocated, other 303 | // threads will not try to access it yet. 304 | unsafe { 305 | *s = Some(slab.into_boxed_slice()); 306 | } 307 | }); 308 | } 309 | 310 | pub(crate) fn mark_clear>( 311 | &self, 312 | addr: Addr, 313 | gen: slot::Generation, 314 | free_list: &F, 315 | ) -> bool { 316 | let offset = addr.offset() - self.prev_sz; 317 | 318 | test_println!("-> offset {:?}", offset); 319 | 320 | self.slab.with(|slab| { 321 | let slab = unsafe { &*slab }.as_ref(); 322 | if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { 323 | slot.try_clear_storage(gen, offset, free_list) 324 | } else { 325 | false 326 | } 327 | }) 328 | } 329 | 330 | pub(crate) fn clear>( 331 | &self, 332 | addr: Addr, 333 | gen: slot::Generation, 334 | free_list: &F, 335 | ) -> bool { 336 | let offset = addr.offset() - self.prev_sz; 337 | 338 | test_println!("-> offset {:?}", offset); 339 | 340 | self.slab.with(|slab| { 341 | let slab = unsafe { &*slab }.as_ref(); 342 | if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { 343 | slot.clear_storage(gen, offset, free_list) 344 | } else { 345 | false 346 | } 347 | }) 348 | } 349 | } 350 | 351 | impl fmt::Debug for Local { 352 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 353 | self.head.with(|head| { 354 | let head = unsafe { *head }; 355 | f.debug_struct("Local") 356 | .field("head", &format_args!("{:#0x}", head)) 357 | .finish() 358 | }) 359 | } 360 | } 361 | 362 | impl fmt::Debug for Shared { 363 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 364 | f.debug_struct("Shared") 365 | .field("remote", &self.remote) 366 | .field("prev_sz", &self.prev_sz) 367 | .field("size", &self.size) 368 | // .field("slab", &self.slab) 369 | .finish() 370 | } 371 | } 372 | 373 | impl fmt::Debug for Addr { 374 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 375 | f.debug_struct("Addr") 376 | .field("addr", &format_args!("{:#0x}", &self.addr)) 377 | .field("index", &self.index()) 378 | .field("offset", &self.offset()) 379 | .finish() 380 | } 381 | } 382 | 383 | impl PartialEq for Addr { 384 | fn eq(&self, other: &Self) -> bool { 385 | self.addr == other.addr 386 | } 387 | } 388 | 389 | impl Eq for Addr {} 390 | 391 | impl PartialOrd for Addr { 392 | fn partial_cmp(&self, other: &Self) -> Option { 393 | Some(self.cmp(other)) 394 | } 395 | } 396 | 397 | impl Ord for Addr { 398 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 399 | self.addr.cmp(&other.addr) 400 | } 401 | } 402 | 403 | impl Clone for Addr { 404 | fn clone(&self) -> Self { 405 | *self 406 | } 407 | } 408 | 409 | impl Copy for Addr {} 410 | 411 | #[inline(always)] 412 | pub(crate) fn indices(idx: usize) -> (Addr, usize) { 413 | let addr = C::unpack_addr(idx); 414 | (addr, addr.index()) 415 | } 416 | 417 | #[cfg(test)] 418 | mod test { 419 | use super::*; 420 | use crate::Pack; 421 | use proptest::prelude::*; 422 | 423 | proptest! { 424 | #[test] 425 | fn addr_roundtrips(pidx in 0usize..Addr::::BITS) { 426 | let addr = Addr::::from_usize(pidx); 427 | let packed = addr.pack(0); 428 | assert_eq!(addr, Addr::from_packed(packed)); 429 | } 430 | #[test] 431 | fn gen_roundtrips(gen in 0usize..slot::Generation::::BITS) { 432 | let gen = slot::Generation::::from_usize(gen); 433 | let packed = gen.pack(0); 434 | assert_eq!(gen, slot::Generation::from_packed(packed)); 435 | } 436 | 437 | #[test] 438 | fn page_roundtrips( 439 | gen in 0usize..slot::Generation::::BITS, 440 | addr in 0usize..Addr::::BITS, 441 | ) { 442 | let gen = slot::Generation::::from_usize(gen); 443 | let addr = Addr::::from_usize(addr); 444 | let packed = gen.pack(addr.pack(0)); 445 | assert_eq!(addr, Addr::from_packed(packed)); 446 | assert_eq!(gen, slot::Generation::from_packed(packed)); 447 | } 448 | } 449 | } 450 | -------------------------------------------------------------------------------- /src/page/slot.rs: -------------------------------------------------------------------------------- 1 | use super::FreeList; 2 | use crate::sync::{ 3 | atomic::{AtomicUsize, Ordering}, 4 | hint, UnsafeCell, 5 | }; 6 | use crate::{cfg, clear::Clear, Pack, Tid}; 7 | use std::{fmt, marker::PhantomData, mem, ptr, thread}; 8 | 9 | pub(crate) struct Slot { 10 | lifecycle: AtomicUsize, 11 | /// The offset of the next item on the free list. 12 | next: UnsafeCell, 13 | /// The data stored in the slot. 14 | item: UnsafeCell, 15 | _cfg: PhantomData, 16 | } 17 | 18 | #[derive(Debug)] 19 | pub(crate) struct Guard { 20 | slot: ptr::NonNull>, 21 | } 22 | 23 | #[derive(Debug)] 24 | pub(crate) struct InitGuard { 25 | slot: ptr::NonNull>, 26 | curr_lifecycle: usize, 27 | released: bool, 28 | } 29 | 30 | #[repr(transparent)] 31 | pub(crate) struct Generation { 32 | value: usize, 33 | _cfg: PhantomData, 34 | } 35 | 36 | #[repr(transparent)] 37 | pub(crate) struct RefCount { 38 | value: usize, 39 | _cfg: PhantomData, 40 | } 41 | 42 | pub(crate) struct Lifecycle { 43 | state: State, 44 | _cfg: PhantomData, 45 | } 46 | struct LifecycleGen(Generation); 47 | 48 | #[derive(Debug, Eq, PartialEq, Copy, Clone)] 49 | #[repr(usize)] 50 | enum State { 51 | Present = 0b00, 52 | Marked = 0b01, 53 | Removing = 0b11, 54 | } 55 | 56 | impl Pack for Generation { 57 | /// Use all the remaining bits in the word for the generation counter, minus 58 | /// any bits reserved by the user. 59 | const LEN: usize = (cfg::WIDTH - C::RESERVED_BITS) - Self::SHIFT; 60 | 61 | type Prev = Tid; 62 | 63 | #[inline(always)] 64 | fn from_usize(u: usize) -> Self { 65 | debug_assert!(u <= Self::BITS); 66 | Self::new(u) 67 | } 68 | 69 | #[inline(always)] 70 | fn as_usize(&self) -> usize { 71 | self.value 72 | } 73 | } 74 | 75 | impl Generation { 76 | fn new(value: usize) -> Self { 77 | Self { 78 | value, 79 | _cfg: PhantomData, 80 | } 81 | } 82 | } 83 | 84 | // Slot methods which should work across all trait bounds 85 | impl Slot 86 | where 87 | C: cfg::Config, 88 | { 89 | #[inline(always)] 90 | pub(super) fn next(&self) -> usize { 91 | self.next.with(|next| unsafe { *next }) 92 | } 93 | 94 | #[inline(always)] 95 | pub(crate) fn value(&self) -> &T { 96 | self.item.with(|item| unsafe { &*item }) 97 | } 98 | 99 | #[inline(always)] 100 | pub(super) fn set_next(&self, next: usize) { 101 | self.next.with_mut(|n| unsafe { 102 | (*n) = next; 103 | }) 104 | } 105 | 106 | #[inline(always)] 107 | pub(crate) fn get(&self, gen: Generation) -> Option> { 108 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); 109 | loop { 110 | // Unpack the current state. 111 | let state = Lifecycle::::from_packed(lifecycle); 112 | let current_gen = LifecycleGen::::from_packed(lifecycle).0; 113 | let refs = RefCount::::from_packed(lifecycle); 114 | 115 | test_println!( 116 | "-> get {:?}; current_gen={:?}; lifecycle={:#x}; state={:?}; refs={:?};", 117 | gen, 118 | current_gen, 119 | lifecycle, 120 | state, 121 | refs, 122 | ); 123 | 124 | // Is it okay to access this slot? The accessed generation must be 125 | // current, and the slot must not be in the process of being 126 | // removed. If we can no longer access the slot at the given 127 | // generation, return `None`. 128 | if gen != current_gen || state != Lifecycle::PRESENT { 129 | test_println!("-> get: no longer exists!"); 130 | return None; 131 | } 132 | 133 | // Try to increment the slot's ref count by one. 134 | let new_refs = refs.incr()?; 135 | match self.lifecycle.compare_exchange( 136 | lifecycle, 137 | new_refs.pack(lifecycle), 138 | Ordering::AcqRel, 139 | Ordering::Acquire, 140 | ) { 141 | Ok(_) => { 142 | test_println!("-> {:?}", new_refs); 143 | return Some(Guard { 144 | slot: ptr::NonNull::from(self), 145 | }); 146 | } 147 | Err(actual) => { 148 | // Another thread modified the slot's state before us! We 149 | // need to retry with the new state. 150 | // 151 | // Since the new state may mean that the accessed generation 152 | // is no longer valid, we'll check again on the next 153 | // iteration of the loop. 154 | test_println!("-> get: retrying; lifecycle={:#x};", actual); 155 | lifecycle = actual; 156 | } 157 | }; 158 | } 159 | } 160 | 161 | /// Marks this slot to be released, returning `true` if the slot can be 162 | /// mutated *now* and `false` otherwise. 163 | /// 164 | /// This method checks if there are any references to this slot. If there _are_ valid 165 | /// references, it just marks them for modification and returns and the next thread calling 166 | /// either `clear_storage` or `remove_value` will try and modify the storage 167 | fn mark_release(&self, gen: Generation) -> Option { 168 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); 169 | let mut curr_gen; 170 | 171 | // Try to advance the slot's state to "MARKED", which indicates that it 172 | // should be removed when it is no longer concurrently accessed. 173 | loop { 174 | curr_gen = LifecycleGen::from_packed(lifecycle).0; 175 | test_println!( 176 | "-> mark_release; gen={:?}; current_gen={:?};", 177 | gen, 178 | curr_gen 179 | ); 180 | 181 | // Is the slot still at the generation we are trying to remove? 182 | if gen != curr_gen { 183 | return None; 184 | } 185 | 186 | let state = Lifecycle::::from_packed(lifecycle).state; 187 | test_println!("-> mark_release; state={:?};", state); 188 | match state { 189 | State::Removing => { 190 | test_println!("--> mark_release; cannot release (already removed!)"); 191 | return None; 192 | } 193 | State::Marked => { 194 | test_println!("--> mark_release; already marked;"); 195 | break; 196 | } 197 | State::Present => {} 198 | }; 199 | 200 | // Set the new state to `MARKED`. 201 | let new_lifecycle = Lifecycle::::MARKED.pack(lifecycle); 202 | test_println!( 203 | "-> mark_release; old_lifecycle={:#x}; new_lifecycle={:#x};", 204 | lifecycle, 205 | new_lifecycle 206 | ); 207 | 208 | match self.lifecycle.compare_exchange( 209 | lifecycle, 210 | new_lifecycle, 211 | Ordering::AcqRel, 212 | Ordering::Acquire, 213 | ) { 214 | Ok(_) => break, 215 | Err(actual) => { 216 | test_println!("-> mark_release; retrying"); 217 | lifecycle = actual; 218 | } 219 | } 220 | } 221 | 222 | // Unpack the current reference count to see if we can remove the slot now. 223 | let refs = RefCount::::from_packed(lifecycle); 224 | test_println!("-> mark_release: marked; refs={:?};", refs); 225 | 226 | // Are there currently outstanding references to the slot? If so, it 227 | // will have to be removed when those references are dropped. 228 | Some(refs.value == 0) 229 | } 230 | 231 | /// Mutates this slot. 232 | /// 233 | /// This method spins until no references to this slot are left, and calls the mutator 234 | fn release_with(&self, gen: Generation, offset: usize, free: &F, mutator: M) -> R 235 | where 236 | F: FreeList, 237 | M: FnOnce(Option<&mut T>) -> R, 238 | { 239 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); 240 | let mut advanced = false; 241 | // Exponential spin backoff while waiting for the slot to be released. 242 | let mut spin_exp = 0; 243 | let next_gen = gen.advance(); 244 | loop { 245 | let current_gen = LifecycleGen::from_packed(lifecycle).0; 246 | test_println!("-> release_with; lifecycle={:#x}; expected_gen={:?}; current_gen={:?}; next_gen={:?};", 247 | lifecycle, 248 | gen, 249 | current_gen, 250 | next_gen 251 | ); 252 | 253 | // First, make sure we are actually able to remove the value. 254 | // If we're going to remove the value, the generation has to match 255 | // the value that `remove_value` was called with...unless we've 256 | // already stored the new generation. 257 | if (!advanced) && gen != current_gen { 258 | test_println!("-> already removed!"); 259 | return mutator(None); 260 | } 261 | 262 | match self.lifecycle.compare_exchange( 263 | lifecycle, 264 | LifecycleGen(next_gen).pack(lifecycle), 265 | Ordering::AcqRel, 266 | Ordering::Acquire, 267 | ) { 268 | Ok(actual) => { 269 | // If we're in this state, we have successfully advanced to 270 | // the next generation. 271 | advanced = true; 272 | 273 | // Make sure that there are no outstanding references. 274 | let refs = RefCount::::from_packed(actual); 275 | test_println!("-> advanced gen; lifecycle={:#x}; refs={:?};", actual, refs); 276 | if refs.value == 0 { 277 | test_println!("-> ok to remove!"); 278 | // safety: we've modified the generation of this slot and any other thread 279 | // calling this method will exit out at the generation check above in the 280 | // next iteraton of the loop. 281 | let value = self 282 | .item 283 | .with_mut(|item| mutator(Some(unsafe { &mut *item }))); 284 | free.push(offset, self); 285 | return value; 286 | } 287 | 288 | // Otherwise, a reference must be dropped before we can 289 | // remove the value. Spin here until there are no refs remaining... 290 | test_println!("-> refs={:?}; spin...", refs); 291 | 292 | // Back off, spinning and possibly yielding. 293 | exponential_backoff(&mut spin_exp); 294 | } 295 | Err(actual) => { 296 | test_println!("-> retrying; lifecycle={:#x};", actual); 297 | lifecycle = actual; 298 | // The state changed; reset the spin backoff. 299 | spin_exp = 0; 300 | } 301 | } 302 | } 303 | } 304 | 305 | /// Initialize a slot 306 | /// 307 | /// This method initializes and sets up the state for a slot. When being used in `Pool`, we 308 | /// only need to ensure that the `Slot` is in the right `state, while when being used in a 309 | /// `Slab` we want to insert a value into it, as the memory is not initialized 310 | pub(crate) fn init(&self) -> Option> { 311 | // Load the current lifecycle state. 312 | let lifecycle = self.lifecycle.load(Ordering::Acquire); 313 | let gen = LifecycleGen::::from_packed(lifecycle).0; 314 | let refs = RefCount::::from_packed(lifecycle); 315 | 316 | test_println!( 317 | "-> initialize_state; state={:?}; gen={:?}; refs={:?};", 318 | Lifecycle::::from_packed(lifecycle), 319 | gen, 320 | refs, 321 | ); 322 | 323 | if refs.value != 0 { 324 | test_println!("-> initialize while referenced! cancelling"); 325 | return None; 326 | } 327 | 328 | Some(InitGuard { 329 | slot: ptr::NonNull::from(self), 330 | curr_lifecycle: lifecycle, 331 | released: false, 332 | }) 333 | } 334 | } 335 | 336 | // Slot impl which _needs_ an `Option` for self.item, this is for `Slab` to use. 337 | impl Slot, C> 338 | where 339 | C: cfg::Config, 340 | { 341 | fn is_empty(&self) -> bool { 342 | self.item.with(|item| unsafe { (*item).is_none() }) 343 | } 344 | 345 | /// Insert a value into a slot 346 | /// 347 | /// We first initialize the state and then insert the pased in value into the slot. 348 | #[inline] 349 | pub(crate) fn insert(&self, value: &mut Option) -> Option> { 350 | debug_assert!(self.is_empty(), "inserted into full slot"); 351 | debug_assert!(value.is_some(), "inserted twice"); 352 | 353 | let mut guard = self.init()?; 354 | let gen = guard.generation(); 355 | unsafe { 356 | // Safety: Accessing the value of an `InitGuard` is unsafe because 357 | // it has a pointer to a slot which may dangle. Here, we know the 358 | // pointed slot is alive because we have a reference to it in scope, 359 | // and the `InitGuard` will be dropped when this function returns. 360 | mem::swap(guard.value_mut(), value); 361 | guard.release(); 362 | }; 363 | test_println!("-> inserted at {:?}", gen); 364 | 365 | Some(gen) 366 | } 367 | 368 | /// Tries to remove the value in the slot, returning `true` if the value was 369 | /// removed. 370 | /// 371 | /// This method tries to remove the value in the slot. If there are existing references, then 372 | /// the slot is marked for removal and the next thread calling either this method or 373 | /// `remove_value` will do the work instead. 374 | #[inline] 375 | pub(super) fn try_remove_value>( 376 | &self, 377 | gen: Generation, 378 | offset: usize, 379 | free: &F, 380 | ) -> bool { 381 | let should_remove = match self.mark_release(gen) { 382 | // If `mark_release` returns `Some`, a value exists at this 383 | // generation. The bool inside this option indicates whether or not 384 | // _we're_ allowed to remove the value. 385 | Some(should_remove) => should_remove, 386 | // Otherwise, the generation we tried to remove has already expired, 387 | // and we did not mark anything for removal. 388 | None => { 389 | test_println!( 390 | "-> try_remove_value; nothing exists at generation={:?}", 391 | gen 392 | ); 393 | return false; 394 | } 395 | }; 396 | 397 | test_println!("-> try_remove_value; marked!"); 398 | 399 | if should_remove { 400 | // We're allowed to remove the slot now! 401 | test_println!("-> try_remove_value; can remove now"); 402 | self.remove_value(gen, offset, free); 403 | } 404 | 405 | true 406 | } 407 | 408 | #[inline] 409 | pub(super) fn remove_value>( 410 | &self, 411 | gen: Generation, 412 | offset: usize, 413 | free: &F, 414 | ) -> Option { 415 | self.release_with(gen, offset, free, |item| item.and_then(Option::take)) 416 | } 417 | } 418 | 419 | // These impls are specific to `Pool` 420 | impl Slot 421 | where 422 | T: Default + Clear, 423 | C: cfg::Config, 424 | { 425 | pub(in crate::page) fn new(next: usize) -> Self { 426 | Self { 427 | lifecycle: AtomicUsize::new(Lifecycle::::REMOVING.as_usize()), 428 | item: UnsafeCell::new(T::default()), 429 | next: UnsafeCell::new(next), 430 | _cfg: PhantomData, 431 | } 432 | } 433 | 434 | /// Try to clear this slot's storage 435 | /// 436 | /// If there are references to this slot, then we mark this slot for clearing and let the last 437 | /// thread do the work for us. 438 | #[inline] 439 | pub(super) fn try_clear_storage>( 440 | &self, 441 | gen: Generation, 442 | offset: usize, 443 | free: &F, 444 | ) -> bool { 445 | let should_clear = match self.mark_release(gen) { 446 | // If `mark_release` returns `Some`, a value exists at this 447 | // generation. The bool inside this option indicates whether or not 448 | // _we're_ allowed to clear the value. 449 | Some(should_clear) => should_clear, 450 | // Otherwise, the generation we tried to remove has already expired, 451 | // and we did not mark anything for removal. 452 | None => { 453 | test_println!( 454 | "-> try_clear_storage; nothing exists at generation={:?}", 455 | gen 456 | ); 457 | return false; 458 | } 459 | }; 460 | 461 | test_println!("-> try_clear_storage; marked!"); 462 | 463 | if should_clear { 464 | // We're allowed to remove the slot now! 465 | test_println!("-> try_remove_value; can clear now"); 466 | return self.clear_storage(gen, offset, free); 467 | } 468 | 469 | true 470 | } 471 | 472 | /// Clear this slot's storage 473 | /// 474 | /// This method blocks until all references have been dropped and clears the storage. 475 | pub(super) fn clear_storage>( 476 | &self, 477 | gen: Generation, 478 | offset: usize, 479 | free: &F, 480 | ) -> bool { 481 | // release_with will _always_ wait unitl it can release the slot or just return if the slot 482 | // has already been released. 483 | self.release_with(gen, offset, free, |item| { 484 | let cleared = item.map(|inner| Clear::clear(inner)).is_some(); 485 | test_println!("-> cleared: {}", cleared); 486 | cleared 487 | }) 488 | } 489 | } 490 | 491 | impl Slot { 492 | fn release(&self) -> bool { 493 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); 494 | loop { 495 | let refs = RefCount::::from_packed(lifecycle); 496 | let state = Lifecycle::::from_packed(lifecycle).state; 497 | let gen = LifecycleGen::::from_packed(lifecycle).0; 498 | 499 | // Are we the last guard, and is the slot marked for removal? 500 | let dropping = refs.value == 1 && state == State::Marked; 501 | let new_lifecycle = if dropping { 502 | // If so, we want to advance the state to "removing". 503 | // Also, reset the ref count to 0. 504 | LifecycleGen(gen).pack(State::Removing as usize) 505 | } else { 506 | // Otherwise, just subtract 1 from the ref count. 507 | refs.decr().pack(lifecycle) 508 | }; 509 | 510 | test_println!( 511 | "-> drop guard: state={:?}; gen={:?}; refs={:?}; lifecycle={:#x}; new_lifecycle={:#x}; dropping={:?}", 512 | state, 513 | gen, 514 | refs, 515 | lifecycle, 516 | new_lifecycle, 517 | dropping 518 | ); 519 | match self.lifecycle.compare_exchange( 520 | lifecycle, 521 | new_lifecycle, 522 | Ordering::AcqRel, 523 | Ordering::Acquire, 524 | ) { 525 | Ok(_) => { 526 | test_println!("-> drop guard: done; dropping={:?}", dropping); 527 | return dropping; 528 | } 529 | Err(actual) => { 530 | test_println!("-> drop guard; retry, actual={:#x}", actual); 531 | lifecycle = actual; 532 | } 533 | } 534 | } 535 | } 536 | } 537 | 538 | impl fmt::Debug for Slot { 539 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 540 | let lifecycle = self.lifecycle.load(Ordering::Relaxed); 541 | f.debug_struct("Slot") 542 | .field("lifecycle", &format_args!("{:#x}", lifecycle)) 543 | .field("state", &Lifecycle::::from_packed(lifecycle).state) 544 | .field("gen", &LifecycleGen::::from_packed(lifecycle).0) 545 | .field("refs", &RefCount::::from_packed(lifecycle)) 546 | .field("next", &self.next()) 547 | .finish() 548 | } 549 | } 550 | 551 | // === impl Generation === 552 | 553 | impl fmt::Debug for Generation { 554 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 555 | f.debug_tuple("Generation").field(&self.value).finish() 556 | } 557 | } 558 | 559 | impl Generation { 560 | fn advance(self) -> Self { 561 | Self::from_usize((self.value + 1) % Self::BITS) 562 | } 563 | } 564 | 565 | impl PartialEq for Generation { 566 | fn eq(&self, other: &Self) -> bool { 567 | self.value == other.value 568 | } 569 | } 570 | 571 | impl Eq for Generation {} 572 | 573 | impl PartialOrd for Generation { 574 | fn partial_cmp(&self, other: &Self) -> Option { 575 | Some(self.cmp(other)) 576 | } 577 | } 578 | 579 | impl Ord for Generation { 580 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 581 | self.value.cmp(&other.value) 582 | } 583 | } 584 | 585 | impl Clone for Generation { 586 | fn clone(&self) -> Self { 587 | *self 588 | } 589 | } 590 | 591 | impl Copy for Generation {} 592 | 593 | // === impl Guard === 594 | 595 | impl Guard { 596 | /// Releases the guard, returning `true` if the slot should be cleared. 597 | /// 598 | /// ## Safety 599 | /// 600 | /// This dereferences a raw pointer to the slot. The caller is responsible 601 | /// for ensuring that the `Guard` does not outlive the slab that contains 602 | /// the pointed slot. Failure to do so means this pointer may dangle. 603 | #[inline] 604 | pub(crate) unsafe fn release(&self) -> bool { 605 | self.slot().release() 606 | } 607 | 608 | /// Returns a borrowed reference to the slot. 609 | /// 610 | /// ## Safety 611 | /// 612 | /// This dereferences a raw pointer to the slot. The caller is responsible 613 | /// for ensuring that the `Guard` does not outlive the slab that contains 614 | /// the pointed slot. Failure to do so means this pointer may dangle. 615 | #[inline] 616 | pub(crate) unsafe fn slot(&self) -> &Slot { 617 | self.slot.as_ref() 618 | } 619 | 620 | /// Returns a borrowed reference to the slot's value. 621 | /// 622 | /// ## Safety 623 | /// 624 | /// This dereferences a raw pointer to the slot. The caller is responsible 625 | /// for ensuring that the `Guard` does not outlive the slab that contains 626 | /// the pointed slot. Failure to do so means this pointer may dangle. 627 | #[inline(always)] 628 | pub(crate) unsafe fn value(&self) -> &T { 629 | self.slot().item.with(|item| &*item) 630 | } 631 | } 632 | 633 | // === impl Lifecycle === 634 | 635 | impl Lifecycle { 636 | const MARKED: Self = Self { 637 | state: State::Marked, 638 | _cfg: PhantomData, 639 | }; 640 | const REMOVING: Self = Self { 641 | state: State::Removing, 642 | _cfg: PhantomData, 643 | }; 644 | const PRESENT: Self = Self { 645 | state: State::Present, 646 | _cfg: PhantomData, 647 | }; 648 | } 649 | 650 | impl Pack for Lifecycle { 651 | const LEN: usize = 2; 652 | type Prev = (); 653 | 654 | fn from_usize(u: usize) -> Self { 655 | Self { 656 | state: match u & Self::MASK { 657 | 0b00 => State::Present, 658 | 0b01 => State::Marked, 659 | 0b11 => State::Removing, 660 | bad => unreachable!("weird lifecycle {:#b}", bad), 661 | }, 662 | _cfg: PhantomData, 663 | } 664 | } 665 | 666 | fn as_usize(&self) -> usize { 667 | self.state as usize 668 | } 669 | } 670 | 671 | impl PartialEq for Lifecycle { 672 | fn eq(&self, other: &Self) -> bool { 673 | self.state == other.state 674 | } 675 | } 676 | 677 | impl Eq for Lifecycle {} 678 | 679 | impl fmt::Debug for Lifecycle { 680 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 681 | f.debug_tuple("Lifecycle").field(&self.state).finish() 682 | } 683 | } 684 | 685 | // === impl RefCount === 686 | 687 | impl Pack for RefCount { 688 | const LEN: usize = cfg::WIDTH - (Lifecycle::::LEN + Generation::::LEN); 689 | type Prev = Lifecycle; 690 | 691 | fn from_usize(value: usize) -> Self { 692 | debug_assert!(value <= Self::BITS); 693 | Self { 694 | value, 695 | _cfg: PhantomData, 696 | } 697 | } 698 | 699 | fn as_usize(&self) -> usize { 700 | self.value 701 | } 702 | } 703 | 704 | impl RefCount { 705 | pub(crate) const MAX: usize = Self::BITS - 1; 706 | 707 | #[inline] 708 | fn incr(self) -> Option { 709 | if self.value >= Self::MAX { 710 | test_println!("-> get: {}; MAX={}", self.value, RefCount::::MAX); 711 | return None; 712 | } 713 | 714 | Some(Self::from_usize(self.value + 1)) 715 | } 716 | 717 | #[inline] 718 | fn decr(self) -> Self { 719 | Self::from_usize(self.value - 1) 720 | } 721 | } 722 | 723 | impl fmt::Debug for RefCount { 724 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 725 | f.debug_tuple("RefCount").field(&self.value).finish() 726 | } 727 | } 728 | 729 | impl PartialEq for RefCount { 730 | fn eq(&self, other: &Self) -> bool { 731 | self.value == other.value 732 | } 733 | } 734 | 735 | impl Eq for RefCount {} 736 | 737 | impl PartialOrd for RefCount { 738 | fn partial_cmp(&self, other: &Self) -> Option { 739 | Some(self.cmp(other)) 740 | } 741 | } 742 | 743 | impl Ord for RefCount { 744 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 745 | self.value.cmp(&other.value) 746 | } 747 | } 748 | 749 | impl Clone for RefCount { 750 | fn clone(&self) -> Self { 751 | *self 752 | } 753 | } 754 | 755 | impl Copy for RefCount {} 756 | 757 | // === impl LifecycleGen === 758 | 759 | impl Pack for LifecycleGen { 760 | const LEN: usize = Generation::::LEN; 761 | type Prev = RefCount; 762 | 763 | fn from_usize(value: usize) -> Self { 764 | Self(Generation::from_usize(value)) 765 | } 766 | 767 | fn as_usize(&self) -> usize { 768 | self.0.as_usize() 769 | } 770 | } 771 | 772 | impl InitGuard { 773 | pub(crate) fn generation(&self) -> Generation { 774 | LifecycleGen::::from_packed(self.curr_lifecycle).0 775 | } 776 | 777 | /// Returns a borrowed reference to the slot's value. 778 | /// 779 | /// ## Safety 780 | /// 781 | /// This dereferences a raw pointer to the slot. The caller is responsible 782 | /// for ensuring that the `InitGuard` does not outlive the slab that 783 | /// contains the pointed slot. Failure to do so means this pointer may 784 | /// dangle. 785 | pub(crate) unsafe fn value(&self) -> &T { 786 | self.slot.as_ref().item.with(|val| &*val) 787 | } 788 | 789 | /// Returns a mutably borrowed reference to the slot's value. 790 | /// 791 | /// ## Safety 792 | /// 793 | /// This dereferences a raw pointer to the slot. The caller is responsible 794 | /// for ensuring that the `InitGuard` does not outlive the slab that 795 | /// contains the pointed slot. Failure to do so means this pointer may 796 | /// dangle. 797 | /// 798 | /// It's safe to reference the slot mutably, though, because creating an 799 | /// `InitGuard` ensures there are no outstanding immutable references. 800 | pub(crate) unsafe fn value_mut(&mut self) -> &mut T { 801 | self.slot.as_ref().item.with_mut(|val| &mut *val) 802 | } 803 | 804 | /// Releases the guard, returning `true` if the slot should be cleared. 805 | /// 806 | /// ## Safety 807 | /// 808 | /// This dereferences a raw pointer to the slot. The caller is responsible 809 | /// for ensuring that the `InitGuard` does not outlive the slab that 810 | /// contains the pointed slot. Failure to do so means this pointer may 811 | /// dangle. 812 | pub(crate) unsafe fn release(&mut self) -> bool { 813 | self.release2(0) 814 | } 815 | 816 | /// Downgrades the guard to an immutable guard 817 | /// 818 | /// ## Safety 819 | /// 820 | /// This dereferences a raw pointer to the slot. The caller is responsible 821 | /// for ensuring that the `InitGuard` does not outlive the slab that 822 | /// contains the pointed slot. Failure to do so means this pointer may 823 | /// dangle. 824 | pub(crate) unsafe fn downgrade(&mut self) -> Guard { 825 | let _ = self.release2(RefCount::::from_usize(1).pack(0)); 826 | Guard { slot: self.slot } 827 | } 828 | 829 | unsafe fn release2(&mut self, new_refs: usize) -> bool { 830 | test_println!( 831 | "InitGuard::release; curr_lifecycle={:?}; downgrading={}", 832 | Lifecycle::::from_packed(self.curr_lifecycle), 833 | new_refs != 0, 834 | ); 835 | if self.released { 836 | test_println!("-> already released!"); 837 | return false; 838 | } 839 | self.released = true; 840 | let mut curr_lifecycle = self.curr_lifecycle; 841 | let slot = self.slot.as_ref(); 842 | let new_lifecycle = LifecycleGen::::from_packed(self.curr_lifecycle) 843 | .pack(Lifecycle::::PRESENT.pack(new_refs)); 844 | 845 | match slot.lifecycle.compare_exchange( 846 | curr_lifecycle, 847 | new_lifecycle, 848 | Ordering::AcqRel, 849 | Ordering::Acquire, 850 | ) { 851 | Ok(_) => { 852 | test_println!("--> advanced to PRESENT; done"); 853 | return false; 854 | } 855 | Err(actual) => { 856 | test_println!( 857 | "--> lifecycle changed; actual={:?}", 858 | Lifecycle::::from_packed(actual) 859 | ); 860 | curr_lifecycle = actual; 861 | } 862 | } 863 | 864 | // if the state was no longer the prior state, we are now responsible 865 | // for releasing the slot. 866 | loop { 867 | let refs = RefCount::::from_packed(curr_lifecycle); 868 | let state = Lifecycle::::from_packed(curr_lifecycle).state; 869 | 870 | test_println!( 871 | "-> InitGuard::release; lifecycle={:#x}; state={:?}; refs={:?};", 872 | curr_lifecycle, 873 | state, 874 | refs, 875 | ); 876 | 877 | debug_assert!(state == State::Marked || thread::panicking(), "state was not MARKED; someone else has removed the slot while we have exclusive access!\nactual={:?}", state); 878 | debug_assert!(refs.value == 0 || thread::panicking(), "ref count was not 0; someone else has referenced the slot while we have exclusive access!\nactual={:?}", refs); 879 | 880 | let new_lifecycle = LifecycleGen(self.generation()).pack(State::Removing as usize); 881 | 882 | match slot.lifecycle.compare_exchange( 883 | curr_lifecycle, 884 | new_lifecycle, 885 | Ordering::AcqRel, 886 | Ordering::Acquire, 887 | ) { 888 | Ok(_) => { 889 | test_println!("-> InitGuard::RELEASE: done!"); 890 | return true; 891 | } 892 | Err(actual) => { 893 | debug_assert!(thread::panicking(), "we should not have to retry this CAS!"); 894 | test_println!("-> InitGuard::release; retry, actual={:#x}", actual); 895 | curr_lifecycle = actual; 896 | } 897 | } 898 | } 899 | } 900 | } 901 | 902 | // === helpers === 903 | 904 | #[inline(always)] 905 | fn exponential_backoff(exp: &mut usize) { 906 | /// Maximum exponent we can back off to. 907 | const MAX_EXPONENT: usize = 8; 908 | 909 | // Issue 2^exp pause instructions. 910 | for _ in 0..(1 << *exp) { 911 | hint::spin_loop(); 912 | } 913 | 914 | if *exp >= MAX_EXPONENT { 915 | // If we have reached the max backoff, also yield to the scheduler 916 | // explicitly. 917 | crate::sync::yield_now(); 918 | } else { 919 | // Otherwise, increment the exponent. 920 | *exp += 1; 921 | } 922 | } 923 | -------------------------------------------------------------------------------- /src/page/stack.rs: -------------------------------------------------------------------------------- 1 | use crate::cfg; 2 | use crate::sync::atomic::{AtomicUsize, Ordering}; 3 | use std::{fmt, marker::PhantomData}; 4 | 5 | pub(super) struct TransferStack { 6 | head: AtomicUsize, 7 | _cfg: PhantomData, 8 | } 9 | 10 | impl TransferStack { 11 | pub(super) fn new() -> Self { 12 | Self { 13 | head: AtomicUsize::new(super::Addr::::NULL), 14 | _cfg: PhantomData, 15 | } 16 | } 17 | 18 | pub(super) fn pop_all(&self) -> Option { 19 | let val = self.head.swap(super::Addr::::NULL, Ordering::Acquire); 20 | test_println!("-> pop {:#x}", val); 21 | if val == super::Addr::::NULL { 22 | None 23 | } else { 24 | Some(val) 25 | } 26 | } 27 | 28 | fn push(&self, new_head: usize, before: impl Fn(usize)) { 29 | // We loop to win the race to set the new head. The `next` variable 30 | // is the next slot on the stack which needs to be pointed to by the 31 | // new head. 32 | let mut next = self.head.load(Ordering::Relaxed); 33 | loop { 34 | test_println!("-> next {:#x}", next); 35 | before(next); 36 | 37 | match self 38 | .head 39 | .compare_exchange(next, new_head, Ordering::Release, Ordering::Relaxed) 40 | { 41 | // lost the race! 42 | Err(actual) => { 43 | test_println!("-> retry!"); 44 | next = actual; 45 | } 46 | Ok(_) => { 47 | test_println!("-> successful; next={:#x}", next); 48 | return; 49 | } 50 | } 51 | } 52 | } 53 | } 54 | 55 | impl super::FreeList for TransferStack { 56 | fn push(&self, new_head: usize, slot: &super::Slot) { 57 | self.push(new_head, |next| slot.set_next(next)) 58 | } 59 | } 60 | 61 | impl fmt::Debug for TransferStack { 62 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 63 | f.debug_struct("TransferStack") 64 | .field( 65 | "head", 66 | &format_args!("{:#0x}", &self.head.load(Ordering::Relaxed)), 67 | ) 68 | .finish() 69 | } 70 | } 71 | 72 | #[cfg(all(loom, test))] 73 | mod test { 74 | use super::*; 75 | use crate::{sync::UnsafeCell, test_util}; 76 | use loom::thread; 77 | use std::sync::Arc; 78 | 79 | #[test] 80 | fn transfer_stack() { 81 | test_util::run_model("transfer_stack", || { 82 | let causalities = [UnsafeCell::new(999), UnsafeCell::new(999)]; 83 | let shared = Arc::new((causalities, TransferStack::::new())); 84 | let shared1 = shared.clone(); 85 | let shared2 = shared.clone(); 86 | 87 | let t1 = thread::spawn(move || { 88 | let (causalities, stack) = &*shared1; 89 | stack.push(0, |prev| { 90 | causalities[0].with_mut(|c| unsafe { 91 | *c = 0; 92 | }); 93 | test_println!("prev={:#x}", prev) 94 | }); 95 | }); 96 | let t2 = thread::spawn(move || { 97 | let (causalities, stack) = &*shared2; 98 | stack.push(1, |prev| { 99 | causalities[1].with_mut(|c| unsafe { 100 | *c = 1; 101 | }); 102 | test_println!("prev={:#x}", prev) 103 | }); 104 | }); 105 | 106 | let (causalities, stack) = &*shared; 107 | let mut idx = stack.pop_all(); 108 | while idx == None { 109 | idx = stack.pop_all(); 110 | thread::yield_now(); 111 | } 112 | let idx = idx.unwrap(); 113 | causalities[idx].with(|val| unsafe { 114 | assert_eq!( 115 | *val, idx, 116 | "UnsafeCell write must happen-before index is pushed to the stack!" 117 | ); 118 | }); 119 | 120 | t1.join().unwrap(); 121 | t2.join().unwrap(); 122 | }); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/shard.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | cfg::{self, CfgPrivate}, 3 | clear::Clear, 4 | page, 5 | sync::{ 6 | alloc, 7 | atomic::{ 8 | AtomicPtr, AtomicUsize, 9 | Ordering::{self, *}, 10 | }, 11 | }, 12 | tid::Tid, 13 | Pack, 14 | }; 15 | 16 | use std::{fmt, ptr, slice}; 17 | 18 | // ┌─────────────┐ ┌────────┐ 19 | // │ page 1 │ │ │ 20 | // ├─────────────┤ ┌───▶│ next──┼─┐ 21 | // │ page 2 │ │ ├────────┤ │ 22 | // │ │ │ │XXXXXXXX│ │ 23 | // │ local_free──┼─┘ ├────────┤ │ 24 | // │ global_free─┼─┐ │ │◀┘ 25 | // ├─────────────┤ └───▶│ next──┼─┐ 26 | // │ page 3 │ ├────────┤ │ 27 | // └─────────────┘ │XXXXXXXX│ │ 28 | // ... ├────────┤ │ 29 | // ┌─────────────┐ │XXXXXXXX│ │ 30 | // │ page n │ ├────────┤ │ 31 | // └─────────────┘ │ │◀┘ 32 | // │ next──┼───▶ 33 | // ├────────┤ 34 | // │XXXXXXXX│ 35 | // └────────┘ 36 | // ... 37 | pub(crate) struct Shard { 38 | /// The shard's parent thread ID. 39 | pub(crate) tid: usize, 40 | /// The local free list for each page. 41 | /// 42 | /// These are only ever accessed from this shard's thread, so they are 43 | /// stored separately from the shared state for the page that can be 44 | /// accessed concurrently, to minimize false sharing. 45 | local: Box<[page::Local]>, 46 | /// The shared state for each page in this shard. 47 | /// 48 | /// This consists of the page's metadata (size, previous size), remote free 49 | /// list, and a pointer to the actual array backing that page. 50 | shared: Box<[page::Shared]>, 51 | } 52 | 53 | pub(crate) struct Array { 54 | shards: Box<[Ptr]>, 55 | max: AtomicUsize, 56 | } 57 | 58 | #[derive(Debug)] 59 | struct Ptr(AtomicPtr>>); 60 | 61 | #[derive(Debug)] 62 | pub(crate) struct IterMut<'a, T: 'a, C: cfg::Config + 'a>(slice::IterMut<'a, Ptr>); 63 | 64 | // === impl Shard === 65 | 66 | impl Shard 67 | where 68 | C: cfg::Config, 69 | { 70 | #[inline(always)] 71 | pub(crate) fn with_slot<'a, U>( 72 | &'a self, 73 | idx: usize, 74 | f: impl FnOnce(&'a page::Slot) -> Option, 75 | ) -> Option { 76 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 77 | let (addr, page_index) = page::indices::(idx); 78 | 79 | test_println!("-> {:?}", addr); 80 | if page_index >= self.shared.len() { 81 | return None; 82 | } 83 | 84 | self.shared[page_index].with_slot(addr, f) 85 | } 86 | 87 | pub(crate) fn new(tid: usize) -> Self { 88 | let mut total_sz = 0; 89 | let shared = (0..C::MAX_PAGES) 90 | .map(|page_num| { 91 | let sz = C::page_size(page_num); 92 | let prev_sz = total_sz; 93 | total_sz += sz; 94 | page::Shared::new(sz, prev_sz) 95 | }) 96 | .collect(); 97 | let local = (0..C::MAX_PAGES).map(|_| page::Local::new()).collect(); 98 | Self { tid, local, shared } 99 | } 100 | } 101 | 102 | impl Shard, C> 103 | where 104 | C: cfg::Config, 105 | { 106 | /// Remove an item on the shard's local thread. 107 | pub(crate) fn take_local(&self, idx: usize) -> Option { 108 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 109 | let (addr, page_index) = page::indices::(idx); 110 | 111 | test_println!("-> remove_local {:?}", addr); 112 | 113 | self.shared 114 | .get(page_index)? 115 | .take(addr, C::unpack_gen(idx), self.local(page_index)) 116 | } 117 | 118 | /// Remove an item, while on a different thread from the shard's local thread. 119 | pub(crate) fn take_remote(&self, idx: usize) -> Option { 120 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 121 | debug_assert!(Tid::::current().as_usize() != self.tid); 122 | 123 | let (addr, page_index) = page::indices::(idx); 124 | 125 | test_println!("-> take_remote {:?}; page {:?}", addr, page_index); 126 | 127 | let shared = self.shared.get(page_index)?; 128 | shared.take(addr, C::unpack_gen(idx), shared.free_list()) 129 | } 130 | 131 | pub(crate) fn remove_local(&self, idx: usize) -> bool { 132 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 133 | let (addr, page_index) = page::indices::(idx); 134 | 135 | if page_index >= self.shared.len() { 136 | return false; 137 | } 138 | 139 | self.shared[page_index].remove(addr, C::unpack_gen(idx), self.local(page_index)) 140 | } 141 | 142 | pub(crate) fn remove_remote(&self, idx: usize) -> bool { 143 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 144 | let (addr, page_index) = page::indices::(idx); 145 | 146 | if page_index >= self.shared.len() { 147 | return false; 148 | } 149 | 150 | let shared = &self.shared[page_index]; 151 | shared.remove(addr, C::unpack_gen(idx), shared.free_list()) 152 | } 153 | 154 | pub(crate) fn iter(&self) -> std::slice::Iter<'_, page::Shared, C>> { 155 | self.shared.iter() 156 | } 157 | } 158 | 159 | impl Shard 160 | where 161 | T: Clear + Default, 162 | C: cfg::Config, 163 | { 164 | pub(crate) fn init_with( 165 | &self, 166 | mut init: impl FnMut(usize, &page::Slot) -> Option, 167 | ) -> Option { 168 | // Can we fit the value into an exist`ing page? 169 | for (page_idx, page) in self.shared.iter().enumerate() { 170 | let local = self.local(page_idx); 171 | 172 | test_println!("-> page {}; {:?}; {:?}", page_idx, local, page); 173 | 174 | if let Some(res) = page.init_with(local, &mut init) { 175 | return Some(res); 176 | } 177 | } 178 | 179 | None 180 | } 181 | 182 | pub(crate) fn mark_clear_local(&self, idx: usize) -> bool { 183 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 184 | let (addr, page_index) = page::indices::(idx); 185 | 186 | if page_index >= self.shared.len() { 187 | return false; 188 | } 189 | 190 | self.shared[page_index].mark_clear(addr, C::unpack_gen(idx), self.local(page_index)) 191 | } 192 | 193 | pub(crate) fn mark_clear_remote(&self, idx: usize) -> bool { 194 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 195 | let (addr, page_index) = page::indices::(idx); 196 | 197 | if page_index >= self.shared.len() { 198 | return false; 199 | } 200 | 201 | let shared = &self.shared[page_index]; 202 | shared.mark_clear(addr, C::unpack_gen(idx), shared.free_list()) 203 | } 204 | 205 | pub(crate) fn clear_after_release(&self, idx: usize) { 206 | crate::sync::atomic::fence(crate::sync::atomic::Ordering::Acquire); 207 | let tid = Tid::::current().as_usize(); 208 | test_println!( 209 | "-> clear_after_release; self.tid={:?}; current.tid={:?};", 210 | tid, 211 | self.tid 212 | ); 213 | if tid == self.tid { 214 | self.clear_local(idx); 215 | } else { 216 | self.clear_remote(idx); 217 | } 218 | } 219 | 220 | fn clear_local(&self, idx: usize) -> bool { 221 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 222 | let (addr, page_index) = page::indices::(idx); 223 | 224 | if page_index >= self.shared.len() { 225 | return false; 226 | } 227 | 228 | self.shared[page_index].clear(addr, C::unpack_gen(idx), self.local(page_index)) 229 | } 230 | 231 | fn clear_remote(&self, idx: usize) -> bool { 232 | debug_assert_eq_in_drop!(Tid::::from_packed(idx).as_usize(), self.tid); 233 | let (addr, page_index) = page::indices::(idx); 234 | 235 | if page_index >= self.shared.len() { 236 | return false; 237 | } 238 | 239 | let shared = &self.shared[page_index]; 240 | shared.clear(addr, C::unpack_gen(idx), shared.free_list()) 241 | } 242 | 243 | #[inline(always)] 244 | fn local(&self, i: usize) -> &page::Local { 245 | #[cfg(debug_assertions)] 246 | debug_assert_eq_in_drop!( 247 | Tid::::current().as_usize(), 248 | self.tid, 249 | "tried to access local data from another thread!" 250 | ); 251 | 252 | &self.local[i] 253 | } 254 | } 255 | 256 | impl fmt::Debug for Shard { 257 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 258 | let mut d = f.debug_struct("Shard"); 259 | 260 | #[cfg(debug_assertions)] 261 | d.field("tid", &self.tid); 262 | d.field("shared", &self.shared).finish() 263 | } 264 | } 265 | 266 | // === impl Array === 267 | 268 | impl Array 269 | where 270 | C: cfg::Config, 271 | { 272 | pub(crate) fn new() -> Self { 273 | let mut shards = Vec::with_capacity(C::MAX_SHARDS); 274 | for _ in 0..C::MAX_SHARDS { 275 | // XXX(eliza): T_T this could be avoided with maybeuninit or something... 276 | shards.push(Ptr::null()); 277 | } 278 | Self { 279 | shards: shards.into(), 280 | max: AtomicUsize::new(0), 281 | } 282 | } 283 | 284 | #[inline] 285 | pub(crate) fn get(&self, idx: usize) -> Option<&Shard> { 286 | test_println!("-> get shard={}", idx); 287 | self.shards.get(idx)?.load(Acquire) 288 | } 289 | 290 | #[inline] 291 | pub(crate) fn current(&self) -> (Tid, &Shard) { 292 | let tid = Tid::::current(); 293 | test_println!("current: {:?}", tid); 294 | let idx = tid.as_usize(); 295 | assert!( 296 | idx < self.shards.len(), 297 | "Thread count overflowed the configured max count. \ 298 | Thread index = {}, max threads = {}.", 299 | idx, 300 | C::MAX_SHARDS, 301 | ); 302 | // It's okay for this to be relaxed. The value is only ever stored by 303 | // the thread that corresponds to the index, and we are that thread. 304 | let shard = self.shards[idx].load(Relaxed).unwrap_or_else(|| { 305 | let ptr = Box::into_raw(Box::new(alloc::Track::new(Shard::new(idx)))); 306 | test_println!("-> allocated new shard for index {} at {:p}", idx, ptr); 307 | self.shards[idx].set(ptr); 308 | let mut max = self.max.load(Acquire); 309 | while max < idx { 310 | match self.max.compare_exchange(max, idx, AcqRel, Acquire) { 311 | Ok(_) => break, 312 | Err(actual) => max = actual, 313 | } 314 | } 315 | test_println!("-> highest index={}, prev={}", std::cmp::max(max, idx), max); 316 | unsafe { 317 | // Safety: we just put it there! 318 | &*ptr 319 | } 320 | .get_ref() 321 | }); 322 | (tid, shard) 323 | } 324 | 325 | pub(crate) fn iter_mut(&mut self) -> IterMut<'_, T, C> { 326 | test_println!("Array::iter_mut"); 327 | let max = self.max.load(Acquire); 328 | test_println!("-> highest index={}", max); 329 | IterMut(self.shards[0..=max].iter_mut()) 330 | } 331 | } 332 | 333 | impl Drop for Array { 334 | fn drop(&mut self) { 335 | // XXX(eliza): this could be `with_mut` if we wanted to impl a wrapper for std atomics to change `get_mut` to `with_mut`... 336 | let max = self.max.load(Acquire); 337 | for shard in &self.shards[0..=max] { 338 | // XXX(eliza): this could be `with_mut` if we wanted to impl a wrapper for std atomics to change `get_mut` to `with_mut`... 339 | let ptr = shard.0.load(Acquire); 340 | if ptr.is_null() { 341 | continue; 342 | } 343 | let shard = unsafe { 344 | // Safety: this is the only place where these boxes are 345 | // deallocated, and we have exclusive access to the shard array, 346 | // because...we are dropping it... 347 | Box::from_raw(ptr) 348 | }; 349 | drop(shard) 350 | } 351 | } 352 | } 353 | 354 | impl fmt::Debug for Array { 355 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 356 | let max = self.max.load(Acquire); 357 | let mut set = f.debug_map(); 358 | for shard in &self.shards[0..=max] { 359 | let ptr = shard.0.load(Acquire); 360 | if let Some(shard) = ptr::NonNull::new(ptr) { 361 | set.entry(&format_args!("{:p}", ptr), unsafe { shard.as_ref() }); 362 | } else { 363 | set.entry(&format_args!("{:p}", ptr), &()); 364 | } 365 | } 366 | set.finish() 367 | } 368 | } 369 | 370 | // === impl Ptr === 371 | 372 | impl Ptr { 373 | #[inline] 374 | fn null() -> Self { 375 | Self(AtomicPtr::new(ptr::null_mut())) 376 | } 377 | 378 | #[inline] 379 | fn load(&self, order: Ordering) -> Option<&Shard> { 380 | let ptr = self.0.load(order); 381 | test_println!("---> loaded={:p} (order={:?})", ptr, order); 382 | if ptr.is_null() { 383 | test_println!("---> null"); 384 | return None; 385 | } 386 | let track = unsafe { 387 | // Safety: The returned reference will have the same lifetime as the 388 | // reference to the shard pointer, which (morally, if not actually) 389 | // owns the shard. The shard is only deallocated when the shard 390 | // array is dropped, and it won't be dropped while this pointer is 391 | // borrowed --- and the returned reference has the same lifetime. 392 | // 393 | // We know that the pointer is not null, because we just 394 | // null-checked it immediately prior. 395 | &*ptr 396 | }; 397 | 398 | Some(track.get_ref()) 399 | } 400 | 401 | #[inline] 402 | fn set(&self, new: *mut alloc::Track>) { 403 | self.0 404 | .compare_exchange(ptr::null_mut(), new, AcqRel, Acquire) 405 | .expect("a shard can only be inserted by the thread that owns it, this is a bug!"); 406 | } 407 | } 408 | 409 | // === Iterators === 410 | 411 | impl<'a, T, C> Iterator for IterMut<'a, T, C> 412 | where 413 | T: 'a, 414 | C: cfg::Config + 'a, 415 | { 416 | type Item = &'a Shard; 417 | fn next(&mut self) -> Option { 418 | test_println!("IterMut::next"); 419 | loop { 420 | // Skip over empty indices if they are less than the highest 421 | // allocated shard. Some threads may have accessed the slab 422 | // (generating a thread ID) but never actually inserted data, so 423 | // they may have never allocated a shard. 424 | let next = self.0.next(); 425 | test_println!("-> next.is_some={}", next.is_some()); 426 | if let Some(shard) = next?.load(Acquire) { 427 | test_println!("-> done"); 428 | return Some(shard); 429 | } 430 | } 431 | } 432 | } 433 | -------------------------------------------------------------------------------- /src/sync.rs: -------------------------------------------------------------------------------- 1 | pub(crate) use self::inner::*; 2 | 3 | #[cfg(all(loom, any(test, feature = "loom")))] 4 | mod inner { 5 | pub(crate) mod atomic { 6 | pub use loom::sync::atomic::*; 7 | pub use std::sync::atomic::Ordering; 8 | } 9 | pub(crate) use loom::{ 10 | cell::UnsafeCell, hint, lazy_static, sync::Mutex, thread::yield_now, thread_local, 11 | }; 12 | 13 | pub(crate) mod alloc { 14 | #![allow(dead_code)] 15 | use loom::alloc; 16 | use std::fmt; 17 | /// Track allocations, detecting leaks 18 | /// 19 | /// This is a version of `loom::alloc::Track` that adds a missing 20 | /// `Default` impl. 21 | pub struct Track(alloc::Track); 22 | 23 | impl Track { 24 | /// Track a value for leaks 25 | #[inline(always)] 26 | pub fn new(value: T) -> Track { 27 | Track(alloc::Track::new(value)) 28 | } 29 | 30 | /// Get a reference to the value 31 | #[inline(always)] 32 | pub fn get_ref(&self) -> &T { 33 | self.0.get_ref() 34 | } 35 | 36 | /// Get a mutable reference to the value 37 | #[inline(always)] 38 | pub fn get_mut(&mut self) -> &mut T { 39 | self.0.get_mut() 40 | } 41 | 42 | /// Stop tracking the value for leaks 43 | #[inline(always)] 44 | pub fn into_inner(self) -> T { 45 | self.0.into_inner() 46 | } 47 | } 48 | 49 | impl fmt::Debug for Track { 50 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 51 | self.0.fmt(f) 52 | } 53 | } 54 | 55 | impl Default for Track { 56 | fn default() -> Self { 57 | Self::new(T::default()) 58 | } 59 | } 60 | } 61 | } 62 | 63 | #[cfg(not(all(loom, any(feature = "loom", test))))] 64 | mod inner { 65 | #![allow(dead_code)] 66 | pub(crate) use lazy_static::lazy_static; 67 | pub(crate) use std::{ 68 | sync::{atomic, Mutex}, 69 | thread::yield_now, 70 | thread_local, 71 | }; 72 | 73 | pub(crate) mod hint { 74 | #[inline(always)] 75 | pub(crate) fn spin_loop() { 76 | // MSRV: std::hint::spin_loop() stabilized in 1.49.0 77 | #[allow(deprecated)] 78 | super::atomic::spin_loop_hint() 79 | } 80 | } 81 | 82 | #[derive(Debug)] 83 | pub(crate) struct UnsafeCell(std::cell::UnsafeCell); 84 | 85 | impl UnsafeCell { 86 | pub fn new(data: T) -> UnsafeCell { 87 | UnsafeCell(std::cell::UnsafeCell::new(data)) 88 | } 89 | 90 | #[inline(always)] 91 | pub fn with(&self, f: F) -> R 92 | where 93 | F: FnOnce(*const T) -> R, 94 | { 95 | f(self.0.get()) 96 | } 97 | 98 | #[inline(always)] 99 | pub fn with_mut(&self, f: F) -> R 100 | where 101 | F: FnOnce(*mut T) -> R, 102 | { 103 | f(self.0.get()) 104 | } 105 | } 106 | 107 | pub(crate) mod alloc { 108 | /// Track allocations, detecting leaks 109 | #[derive(Debug, Default)] 110 | pub struct Track { 111 | value: T, 112 | } 113 | 114 | impl Track { 115 | /// Track a value for leaks 116 | #[inline(always)] 117 | pub fn new(value: T) -> Track { 118 | Track { value } 119 | } 120 | 121 | /// Get a reference to the value 122 | #[inline(always)] 123 | pub fn get_ref(&self) -> &T { 124 | &self.value 125 | } 126 | 127 | /// Get a mutable reference to the value 128 | #[inline(always)] 129 | pub fn get_mut(&mut self) -> &mut T { 130 | &mut self.value 131 | } 132 | 133 | /// Stop tracking the value for leaks 134 | #[inline(always)] 135 | pub fn into_inner(self) -> T { 136 | self.value 137 | } 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/tests/custom_config.rs: -------------------------------------------------------------------------------- 1 | //! Ensures that a custom config behaves as the default config, until limits are reached. 2 | //! Prevents regression after #80. 3 | 4 | use crate::{cfg::CfgPrivate, Config, Slab}; 5 | 6 | struct CustomConfig; 7 | 8 | #[cfg(target_pointer_width = "64")] 9 | impl Config for CustomConfig { 10 | const INITIAL_PAGE_SIZE: usize = 32; 11 | const MAX_PAGES: usize = 15; 12 | const MAX_THREADS: usize = 256; 13 | const RESERVED_BITS: usize = 24; 14 | } 15 | 16 | #[cfg(not(target_pointer_width = "64"))] 17 | impl Config for CustomConfig { 18 | const INITIAL_PAGE_SIZE: usize = 16; 19 | const MAX_PAGES: usize = 6; 20 | const MAX_THREADS: usize = 128; 21 | const RESERVED_BITS: usize = 12; 22 | } 23 | 24 | // We should repeat actions several times to detect invalid lifecycle changes. 25 | const ITERS: u64 = 5; 26 | 27 | #[track_caller] 28 | fn slab_eq(mut lhs: Slab, mut rhs: Slab) { 29 | let mut lhs_vec = lhs.unique_iter().collect::>(); 30 | lhs_vec.sort_unstable(); 31 | let mut rhs_vec = rhs.unique_iter().collect::>(); 32 | rhs_vec.sort_unstable(); 33 | assert_eq!(lhs_vec, rhs_vec); 34 | } 35 | 36 | /// Calls `insert(); remove()` multiple times to detect invalid releasing. 37 | /// Initially, it revealed bugs in the `Slot::release_with()` implementation. 38 | #[test] 39 | fn insert_remove() { 40 | eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug()); 41 | 42 | let default_slab = Slab::::new(); 43 | let custom_slab = Slab::::new_with_config::(); 44 | 45 | for i in 0..=ITERS { 46 | let idx = default_slab.insert(i).unwrap(); 47 | assert!(default_slab.remove(idx)); 48 | 49 | let idx = custom_slab.insert(i).unwrap(); 50 | assert!(custom_slab.remove(idx)); 51 | } 52 | 53 | slab_eq(custom_slab, default_slab); 54 | } 55 | 56 | /// Calls `get()` multiple times to detect invalid ref counting. 57 | /// Initially, it revealed bugs in the `Slot::get()` implementation. 58 | #[test] 59 | fn double_get() { 60 | eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug()); 61 | 62 | let default_slab = Slab::::new(); 63 | let custom_slab = Slab::::new_with_config::(); 64 | 65 | for i in 0..=ITERS { 66 | let idx = default_slab.insert(i).unwrap(); 67 | assert!(default_slab.get(idx).is_some()); 68 | assert!(default_slab.get(idx).is_some()); 69 | assert!(default_slab.remove(idx)); 70 | 71 | let idx = custom_slab.insert(i).unwrap(); 72 | assert!(custom_slab.get(idx).is_some()); 73 | assert!(custom_slab.get(idx).is_some()); 74 | assert!(custom_slab.remove(idx)); 75 | } 76 | 77 | slab_eq(custom_slab, default_slab); 78 | } 79 | -------------------------------------------------------------------------------- /src/tests/loom_pool.rs: -------------------------------------------------------------------------------- 1 | use super::util::*; 2 | use crate::{clear::Clear, sync::alloc, Pack, Pool}; 3 | use loom::{ 4 | sync::{ 5 | atomic::{AtomicBool, Ordering}, 6 | Condvar, Mutex, 7 | }, 8 | thread, 9 | }; 10 | use std::sync::Arc; 11 | 12 | #[derive(Default, Debug)] 13 | struct State { 14 | is_dropped: AtomicBool, 15 | is_cleared: AtomicBool, 16 | id: usize, 17 | } 18 | 19 | impl State { 20 | fn assert_clear(&self) { 21 | assert!(!self.is_dropped.load(Ordering::SeqCst)); 22 | assert!(self.is_cleared.load(Ordering::SeqCst)); 23 | } 24 | 25 | fn assert_not_clear(&self) { 26 | assert!(!self.is_dropped.load(Ordering::SeqCst)); 27 | assert!(!self.is_cleared.load(Ordering::SeqCst)); 28 | } 29 | } 30 | 31 | impl PartialEq for State { 32 | fn eq(&self, other: &State) -> bool { 33 | self.id.eq(&other.id) 34 | } 35 | } 36 | 37 | #[derive(Default, Debug)] 38 | struct DontDropMe(Arc); 39 | 40 | impl PartialEq for DontDropMe { 41 | fn eq(&self, other: &DontDropMe) -> bool { 42 | self.0.eq(&other.0) 43 | } 44 | } 45 | 46 | impl DontDropMe { 47 | fn new(id: usize) -> (Arc, Self) { 48 | let state = Arc::new(State { 49 | is_dropped: AtomicBool::new(false), 50 | is_cleared: AtomicBool::new(false), 51 | id, 52 | }); 53 | (state.clone(), Self(state)) 54 | } 55 | } 56 | 57 | impl Drop for DontDropMe { 58 | fn drop(&mut self) { 59 | test_println!("-> DontDropMe drop: dropping data {:?}", self.0.id); 60 | self.0.is_dropped.store(true, Ordering::SeqCst) 61 | } 62 | } 63 | 64 | impl Clear for DontDropMe { 65 | fn clear(&mut self) { 66 | test_println!("-> DontDropMe clear: clearing data {:?}", self.0.id); 67 | self.0.is_cleared.store(true, Ordering::SeqCst); 68 | } 69 | } 70 | 71 | #[test] 72 | fn dont_drop() { 73 | run_model("dont_drop", || { 74 | let pool: Pool = Pool::new(); 75 | let (item1, value) = DontDropMe::new(1); 76 | test_println!("-> dont_drop: Inserting into pool {}", item1.id); 77 | let idx = pool 78 | .create_with(move |item| *item = value) 79 | .expect("create_with"); 80 | 81 | item1.assert_not_clear(); 82 | 83 | test_println!("-> dont_drop: clearing idx: {}", idx); 84 | pool.clear(idx); 85 | 86 | item1.assert_clear(); 87 | }); 88 | } 89 | 90 | #[test] 91 | fn concurrent_create_with_clear() { 92 | run_model("concurrent_create_with_clear", || { 93 | let pool: Arc> = Arc::new(Pool::new()); 94 | let pair = Arc::new((Mutex::new(None), Condvar::new())); 95 | 96 | let (item1, value) = DontDropMe::new(1); 97 | let idx1 = pool 98 | .create_with(move |item| *item = value) 99 | .expect("create_with"); 100 | let p = pool.clone(); 101 | let pair2 = pair.clone(); 102 | let test_value = item1.clone(); 103 | let t1 = thread::spawn(move || { 104 | let (lock, cvar) = &*pair2; 105 | test_println!("-> making get request"); 106 | assert_eq!(p.get(idx1).unwrap().0.id, test_value.id); 107 | let mut next = lock.lock().unwrap(); 108 | *next = Some(()); 109 | cvar.notify_one(); 110 | }); 111 | 112 | test_println!("-> making get request"); 113 | let guard = pool.get(idx1); 114 | 115 | let (lock, cvar) = &*pair; 116 | let mut next = lock.lock().unwrap(); 117 | // wait until we have a guard on the other thread. 118 | while next.is_none() { 119 | next = cvar.wait(next).unwrap(); 120 | } 121 | // the item should be marked (clear returns true)... 122 | assert!(pool.clear(idx1)); 123 | // ...but the value shouldn't be removed yet. 124 | item1.assert_not_clear(); 125 | 126 | t1.join().expect("thread 1 unable to join"); 127 | 128 | drop(guard); 129 | item1.assert_clear(); 130 | }) 131 | } 132 | 133 | #[test] 134 | fn racy_clear() { 135 | run_model("racy_clear", || { 136 | let pool = Arc::new(Pool::new()); 137 | let (item, value) = DontDropMe::new(1); 138 | 139 | let idx = pool 140 | .create_with(move |item| *item = value) 141 | .expect("create_with"); 142 | assert_eq!(pool.get(idx).unwrap().0.id, item.id); 143 | 144 | let p = pool.clone(); 145 | let t2 = thread::spawn(move || p.clear(idx)); 146 | let r1 = pool.clear(idx); 147 | let r2 = t2.join().expect("thread 2 should not panic"); 148 | 149 | test_println!("r1: {}, r2: {}", r1, r2); 150 | 151 | assert!( 152 | !(r1 && r2), 153 | "Both threads should not have cleared the value" 154 | ); 155 | assert!(r1 || r2, "One thread should have removed the value"); 156 | assert!(pool.get(idx).is_none()); 157 | item.assert_clear(); 158 | }) 159 | } 160 | 161 | #[test] 162 | fn clear_local_and_reuse() { 163 | run_model("take_remote_and_reuse", || { 164 | let pool = Arc::new(Pool::new_with_config::()); 165 | 166 | let idx1 = pool 167 | .create_with(|item: &mut String| { 168 | item.push_str("hello world"); 169 | }) 170 | .expect("create_with"); 171 | let idx2 = pool 172 | .create_with(|item| item.push_str("foo")) 173 | .expect("create_with"); 174 | let idx3 = pool 175 | .create_with(|item| item.push_str("bar")) 176 | .expect("create_with"); 177 | 178 | assert_eq!(pool.get(idx1).unwrap(), String::from("hello world")); 179 | assert_eq!(pool.get(idx2).unwrap(), String::from("foo")); 180 | assert_eq!(pool.get(idx3).unwrap(), String::from("bar")); 181 | 182 | let first = idx1 & (!crate::page::slot::Generation::::MASK); 183 | assert!(pool.clear(idx1)); 184 | 185 | let idx1 = pool 186 | .create_with(move |item| item.push_str("h")) 187 | .expect("create_with"); 188 | 189 | let second = idx1 & (!crate::page::slot::Generation::::MASK); 190 | assert_eq!(first, second); 191 | assert!(pool.get(idx1).unwrap().capacity() >= 11); 192 | }) 193 | } 194 | 195 | #[test] 196 | fn create_mut_guard_prevents_access() { 197 | run_model("create_mut_guard_prevents_access", || { 198 | let pool = Arc::new(Pool::::new()); 199 | let guard = pool.create().unwrap(); 200 | let key: usize = guard.key(); 201 | 202 | let pool2 = pool.clone(); 203 | thread::spawn(move || { 204 | assert!(pool2.get(key).is_none()); 205 | }) 206 | .join() 207 | .unwrap(); 208 | }); 209 | } 210 | 211 | #[test] 212 | fn create_mut_guard() { 213 | run_model("create_mut_guard", || { 214 | let pool = Arc::new(Pool::::new()); 215 | let mut guard = pool.create().unwrap(); 216 | let key: usize = guard.key(); 217 | 218 | let pool2 = pool.clone(); 219 | let t1 = thread::spawn(move || { 220 | test_dbg!(pool2.get(key)); 221 | }); 222 | 223 | guard.push_str("Hello world"); 224 | drop(guard); 225 | 226 | t1.join().unwrap(); 227 | }); 228 | } 229 | 230 | #[test] 231 | fn create_mut_guard_2() { 232 | run_model("create_mut_guard_2", || { 233 | let pool = Arc::new(Pool::::new()); 234 | let mut guard = pool.create().unwrap(); 235 | let key: usize = guard.key(); 236 | 237 | let pool2 = pool.clone(); 238 | let pool3 = pool.clone(); 239 | let t1 = thread::spawn(move || { 240 | test_dbg!(pool2.get(key)); 241 | }); 242 | 243 | guard.push_str("Hello world"); 244 | let t2 = thread::spawn(move || { 245 | test_dbg!(pool3.get(key)); 246 | }); 247 | drop(guard); 248 | 249 | t1.join().unwrap(); 250 | t2.join().unwrap(); 251 | }); 252 | } 253 | 254 | #[test] 255 | fn create_mut_guard_downgrade() { 256 | run_model("create_mut_guard_downgrade", || { 257 | let pool = Arc::new(Pool::::new()); 258 | let mut guard = pool.create().unwrap(); 259 | let key: usize = guard.key(); 260 | 261 | let pool2 = pool.clone(); 262 | let pool3 = pool.clone(); 263 | let t1 = thread::spawn(move || { 264 | test_dbg!(pool2.get(key)); 265 | }); 266 | 267 | guard.push_str("Hello world"); 268 | let guard = guard.downgrade(); 269 | let t2 = thread::spawn(move || { 270 | test_dbg!(pool3.get(key)); 271 | }); 272 | 273 | t1.join().unwrap(); 274 | t2.join().unwrap(); 275 | assert_eq!(guard, "Hello world".to_owned()); 276 | }); 277 | } 278 | 279 | #[test] 280 | fn create_mut_guard_downgrade_clear() { 281 | run_model("create_mut_guard_downgrade_clear", || { 282 | let pool = Arc::new(Pool::::new()); 283 | let mut guard = pool.create().unwrap(); 284 | let key: usize = guard.key(); 285 | 286 | let pool2 = pool.clone(); 287 | 288 | guard.push_str("Hello world"); 289 | let guard = guard.downgrade(); 290 | let pool3 = pool.clone(); 291 | let t1 = thread::spawn(move || { 292 | test_dbg!(pool2.get(key)); 293 | }); 294 | let t2 = thread::spawn(move || { 295 | test_dbg!(pool3.clear(key)); 296 | }); 297 | 298 | assert_eq!(guard, "Hello world".to_owned()); 299 | drop(guard); 300 | 301 | t1.join().unwrap(); 302 | t2.join().unwrap(); 303 | 304 | assert!(pool.get(key).is_none()); 305 | }); 306 | } 307 | 308 | #[test] 309 | fn create_mut_downgrade_during_clear() { 310 | run_model("create_mut_downgrade_during_clear", || { 311 | let pool = Arc::new(Pool::::new()); 312 | let mut guard = pool.create().unwrap(); 313 | let key: usize = guard.key(); 314 | guard.push_str("Hello world"); 315 | 316 | let pool2 = pool.clone(); 317 | let guard = guard.downgrade(); 318 | let t1 = thread::spawn(move || { 319 | test_dbg!(pool2.clear(key)); 320 | }); 321 | 322 | t1.join().unwrap(); 323 | 324 | assert_eq!(guard, "Hello world".to_owned()); 325 | drop(guard); 326 | 327 | assert!(pool.get(key).is_none()); 328 | }); 329 | } 330 | 331 | #[test] 332 | fn ownedref_send_out_of_local() { 333 | run_model("ownedref_send_out_of_local", || { 334 | let pool = Arc::new(Pool::>::new()); 335 | let key1 = pool 336 | .create_with(|item| item.get_mut().push_str("hello")) 337 | .expect("create item 1"); 338 | let key2 = pool 339 | .create_with(|item| item.get_mut().push_str("goodbye")) 340 | .expect("create item 2"); 341 | 342 | let item1 = pool.clone().get_owned(key1).expect("get key1"); 343 | let item2 = pool.clone().get_owned(key2).expect("get key2"); 344 | let pool2 = pool.clone(); 345 | 346 | test_dbg!(pool.clear(key1)); 347 | 348 | let t1 = thread::spawn(move || { 349 | assert_eq!(item1.get_ref(), &String::from("hello")); 350 | drop(item1); 351 | }); 352 | let t2 = thread::spawn(move || { 353 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 354 | test_dbg!(pool2.clear(key2)); 355 | drop(item2); 356 | }); 357 | 358 | t1.join().unwrap(); 359 | t2.join().unwrap(); 360 | 361 | assert!(pool.get(key1).is_none()); 362 | assert!(pool.get(key2).is_none()); 363 | }); 364 | } 365 | 366 | #[test] 367 | fn ownedrefs_outlive_pool() { 368 | run_model("ownedrefs_outlive_pool", || { 369 | let pool = Arc::new(Pool::>::new()); 370 | let key1 = pool 371 | .create_with(|item| item.get_mut().push_str("hello")) 372 | .expect("create item 1"); 373 | let key2 = pool 374 | .create_with(|item| item.get_mut().push_str("goodbye")) 375 | .expect("create item 2"); 376 | 377 | let item1_1 = pool.clone().get_owned(key1).expect("get key1"); 378 | let item1_2 = pool.clone().get_owned(key1).expect("get key1 again"); 379 | let item2 = pool.clone().get_owned(key2).expect("get key2"); 380 | drop(pool); 381 | 382 | let t1 = thread::spawn(move || { 383 | assert_eq!(item1_1.get_ref(), &String::from("hello")); 384 | drop(item1_1); 385 | }); 386 | 387 | let t2 = thread::spawn(move || { 388 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 389 | drop(item2); 390 | }); 391 | 392 | t1.join().unwrap(); 393 | t2.join().unwrap(); 394 | 395 | assert_eq!(item1_2.get_ref(), &String::from("hello")); 396 | }); 397 | } 398 | 399 | #[test] 400 | fn ownedref_ping_pong() { 401 | run_model("ownedref_ping_pong", || { 402 | let pool = Arc::new(Pool::>::new()); 403 | let key1 = pool 404 | .create_with(|item| item.get_mut().push_str("hello")) 405 | .expect("create item 1"); 406 | let key2 = pool 407 | .create_with(|item| item.get_mut().push_str("world")) 408 | .expect("create item 2"); 409 | 410 | let item1 = pool.clone().get_owned(key1).expect("get key1"); 411 | let pool2 = pool.clone(); 412 | let pool3 = pool.clone(); 413 | 414 | let t1 = thread::spawn(move || { 415 | assert_eq!(item1.get_ref(), &String::from("hello")); 416 | pool2.clear(key1); 417 | item1 418 | }); 419 | 420 | let t2 = thread::spawn(move || { 421 | let item2 = pool3.clone().get_owned(key2).unwrap(); 422 | assert_eq!(item2.get_ref(), &String::from("world")); 423 | pool3.clear(key1); 424 | item2 425 | }); 426 | 427 | let item1 = t1.join().unwrap(); 428 | let item2 = t2.join().unwrap(); 429 | 430 | assert_eq!(item1.get_ref(), &String::from("hello")); 431 | assert_eq!(item2.get_ref(), &String::from("world")); 432 | }); 433 | } 434 | 435 | #[test] 436 | fn ownedref_drop_from_other_threads() { 437 | run_model("ownedref_drop_from_other_threads", || { 438 | let pool = Arc::new(Pool::>::new()); 439 | let key1 = pool 440 | .create_with(|item| item.get_mut().push_str("hello")) 441 | .expect("create item 1"); 442 | let item1 = pool.clone().get_owned(key1).expect("get key1"); 443 | 444 | let pool2 = pool.clone(); 445 | 446 | let t1 = thread::spawn(move || { 447 | let pool = pool2.clone(); 448 | let key2 = pool 449 | .create_with(|item| item.get_mut().push_str("goodbye")) 450 | .expect("create item 1"); 451 | let item2 = pool.clone().get_owned(key2).expect("get key1"); 452 | let t2 = thread::spawn(move || { 453 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 454 | test_dbg!(pool2.clear(key1)); 455 | drop(item2) 456 | }); 457 | assert_eq!(item1.get_ref(), &String::from("hello")); 458 | test_dbg!(pool.clear(key2)); 459 | drop(item1); 460 | (t2, key2) 461 | }); 462 | 463 | let (t2, key2) = t1.join().unwrap(); 464 | test_dbg!(pool.get(key1)); 465 | test_dbg!(pool.get(key2)); 466 | 467 | t2.join().unwrap(); 468 | 469 | assert!(pool.get(key1).is_none()); 470 | assert!(pool.get(key2).is_none()); 471 | }); 472 | } 473 | 474 | #[test] 475 | fn create_owned_mut_guard() { 476 | run_model("create_owned_mut_guard", || { 477 | let pool = Arc::new(Pool::::new()); 478 | let mut guard = pool.clone().create_owned().unwrap(); 479 | let key: usize = guard.key(); 480 | 481 | let pool2 = pool.clone(); 482 | let t1 = thread::spawn(move || { 483 | test_dbg!(pool2.get(key)); 484 | }); 485 | 486 | guard.push_str("Hello world"); 487 | drop(guard); 488 | 489 | t1.join().unwrap(); 490 | }); 491 | } 492 | 493 | #[test] 494 | fn create_owned_mut_guard_send() { 495 | run_model("create_owned_mut_guard", || { 496 | let pool = Arc::new(Pool::::new()); 497 | let mut guard = pool.clone().create_owned().unwrap(); 498 | let key: usize = guard.key(); 499 | 500 | let pool2 = pool.clone(); 501 | let t1 = thread::spawn(move || { 502 | test_dbg!(pool2.get(key)); 503 | }); 504 | 505 | let t2 = thread::spawn(move || { 506 | guard.push_str("Hello world"); 507 | drop(guard); 508 | }); 509 | 510 | t1.join().unwrap(); 511 | t2.join().unwrap(); 512 | }); 513 | } 514 | 515 | #[test] 516 | fn create_owned_mut_guard_2() { 517 | run_model("create_owned_mut_guard_2", || { 518 | let pool = Arc::new(Pool::::new()); 519 | let mut guard = pool.clone().create_owned().unwrap(); 520 | let key: usize = guard.key(); 521 | 522 | let pool2 = pool.clone(); 523 | let pool3 = pool.clone(); 524 | let t1 = thread::spawn(move || { 525 | test_dbg!(pool2.get(key)); 526 | }); 527 | 528 | guard.push_str("Hello world"); 529 | let t2 = thread::spawn(move || { 530 | test_dbg!(pool3.get(key)); 531 | }); 532 | drop(guard); 533 | 534 | t1.join().unwrap(); 535 | t2.join().unwrap(); 536 | }); 537 | } 538 | 539 | #[test] 540 | fn create_owned_mut_guard_downgrade() { 541 | run_model("create_owned_mut_guard_downgrade", || { 542 | let pool = Arc::new(Pool::::new()); 543 | let mut guard = pool.clone().create_owned().unwrap(); 544 | guard.push_str("Hello world"); 545 | 546 | let key: usize = guard.key(); 547 | 548 | let pool2 = pool.clone(); 549 | let pool3 = pool.clone(); 550 | let t1 = thread::spawn(move || { 551 | test_dbg!(pool2.get(key)); 552 | }); 553 | 554 | let guard = guard.downgrade(); 555 | let t2 = thread::spawn(move || { 556 | assert_eq!(pool3.get(key).unwrap(), "Hello world".to_owned()); 557 | }); 558 | 559 | t1.join().unwrap(); 560 | t2.join().unwrap(); 561 | assert_eq!(guard, "Hello world".to_owned()); 562 | }); 563 | } 564 | 565 | #[test] 566 | fn create_owned_mut_guard_downgrade_then_clear() { 567 | run_model("create_owned_mut_guard_downgrade_then_clear", || { 568 | let pool = Arc::new(Pool::::new()); 569 | let mut guard = pool.clone().create_owned().unwrap(); 570 | let key: usize = guard.key(); 571 | 572 | let pool2 = pool.clone(); 573 | 574 | guard.push_str("Hello world"); 575 | let guard = guard.downgrade(); 576 | let pool3 = pool.clone(); 577 | let t1 = thread::spawn(move || { 578 | test_dbg!(pool2.get(key)); 579 | }); 580 | let t2 = thread::spawn(move || { 581 | test_dbg!(pool3.clear(key)); 582 | }); 583 | 584 | assert_eq!(guard, "Hello world".to_owned()); 585 | drop(guard); 586 | 587 | t1.join().unwrap(); 588 | t2.join().unwrap(); 589 | 590 | assert!(pool.get(key).is_none()); 591 | }); 592 | } 593 | 594 | #[test] 595 | fn create_owned_mut_downgrade_during_clear() { 596 | run_model("create_owned_mut_downgrade_during_clear", || { 597 | let pool = Arc::new(Pool::::new()); 598 | let mut guard = pool.clone().create_owned().unwrap(); 599 | let key: usize = guard.key(); 600 | guard.push_str("Hello world"); 601 | 602 | let pool2 = pool.clone(); 603 | let guard = guard.downgrade(); 604 | let t1 = thread::spawn(move || { 605 | test_dbg!(pool2.clear(key)); 606 | }); 607 | 608 | t1.join().unwrap(); 609 | 610 | assert_eq!(guard, "Hello world".to_owned()); 611 | drop(guard); 612 | 613 | assert!(pool.get(key).is_none()); 614 | }); 615 | } 616 | 617 | #[test] 618 | fn create_mut_downgrade_during_clear_by_other_thead() { 619 | run_model("create_mut_downgrade_during_clear_by_other_thread", || { 620 | let pool = Arc::new(Pool::::new()); 621 | let mut guard = pool.clone().create_owned().unwrap(); 622 | let key: usize = guard.key(); 623 | guard.push_str("Hello world"); 624 | 625 | let pool2 = pool.clone(); 626 | let t1 = thread::spawn(move || { 627 | let guard = guard.downgrade(); 628 | assert_eq!(guard, "Hello world".to_owned()); 629 | drop(guard); 630 | }); 631 | 632 | let t2 = thread::spawn(move || { 633 | test_dbg!(pool2.clear(key)); 634 | }); 635 | 636 | test_dbg!(pool.get(key)); 637 | 638 | t1.join().unwrap(); 639 | t2.join().unwrap(); 640 | }); 641 | } 642 | -------------------------------------------------------------------------------- /src/tests/loom_slab.rs: -------------------------------------------------------------------------------- 1 | use super::util::*; 2 | use crate::sync::alloc; 3 | use crate::Slab; 4 | use loom::sync::{Condvar, Mutex}; 5 | use loom::thread; 6 | use std::sync::{ 7 | atomic::{AtomicBool, Ordering}, 8 | Arc, 9 | }; 10 | 11 | #[test] 12 | fn take_local() { 13 | run_model("take_local", || { 14 | let slab = Arc::new(Slab::new()); 15 | 16 | let s = slab.clone(); 17 | let t1 = thread::spawn(move || { 18 | let idx = s.insert(1).expect("insert"); 19 | assert_eq!(s.get(idx).unwrap(), 1); 20 | assert_eq!(s.take(idx), Some(1)); 21 | assert!(s.get(idx).is_none()); 22 | let idx = s.insert(2).expect("insert"); 23 | assert_eq!(s.get(idx).unwrap(), 2); 24 | assert_eq!(s.take(idx), Some(2)); 25 | assert!(s.get(idx).is_none()); 26 | }); 27 | 28 | let s = slab.clone(); 29 | let t2 = thread::spawn(move || { 30 | let idx = s.insert(3).expect("insert"); 31 | assert_eq!(s.get(idx).unwrap(), 3); 32 | assert_eq!(s.take(idx), Some(3)); 33 | assert!(s.get(idx).is_none()); 34 | let idx = s.insert(4).expect("insert"); 35 | assert_eq!(s.get(idx).unwrap(), 4); 36 | assert_eq!(s.take(idx), Some(4)); 37 | assert!(s.get(idx).is_none()); 38 | }); 39 | 40 | let s = slab; 41 | let idx1 = s.insert(5).expect("insert"); 42 | assert_eq!(s.get(idx1).unwrap(), 5); 43 | let idx2 = s.insert(6).expect("insert"); 44 | assert_eq!(s.get(idx2).unwrap(), 6); 45 | assert_eq!(s.take(idx1), Some(5)); 46 | assert!(s.get(idx1).is_none()); 47 | assert_eq!(s.get(idx2).unwrap(), 6); 48 | assert_eq!(s.take(idx2), Some(6)); 49 | assert!(s.get(idx2).is_none()); 50 | 51 | t1.join().expect("thread 1 should not panic"); 52 | t2.join().expect("thread 2 should not panic"); 53 | }); 54 | } 55 | 56 | #[test] 57 | fn take_remote() { 58 | run_model("take_remote", || { 59 | let slab = Arc::new(Slab::new()); 60 | 61 | let idx1 = slab.insert(1).expect("insert"); 62 | assert_eq!(slab.get(idx1).unwrap(), 1); 63 | let idx2 = slab.insert(2).expect("insert"); 64 | assert_eq!(slab.get(idx2).unwrap(), 2); 65 | 66 | let idx3 = slab.insert(3).expect("insert"); 67 | assert_eq!(slab.get(idx3).unwrap(), 3); 68 | 69 | let s = slab.clone(); 70 | let t1 = thread::spawn(move || { 71 | assert_eq!(s.get(idx2).unwrap(), 2); 72 | assert_eq!(s.take(idx2), Some(2)); 73 | }); 74 | 75 | let s = slab.clone(); 76 | let t2 = thread::spawn(move || { 77 | assert_eq!(s.get(idx3).unwrap(), 3); 78 | assert_eq!(s.take(idx3), Some(3)); 79 | }); 80 | 81 | t1.join().expect("thread 1 should not panic"); 82 | t2.join().expect("thread 2 should not panic"); 83 | 84 | assert_eq!(slab.get(idx1).unwrap(), 1); 85 | assert!(slab.get(idx2).is_none()); 86 | assert!(slab.get(idx3).is_none()); 87 | }); 88 | } 89 | 90 | #[test] 91 | fn racy_take() { 92 | run_model("racy_take", || { 93 | let slab = Arc::new(Slab::new()); 94 | 95 | let idx = slab.insert(1).expect("insert"); 96 | assert_eq!(slab.get(idx).unwrap(), 1); 97 | 98 | let s1 = slab.clone(); 99 | let s2 = slab.clone(); 100 | 101 | let t1 = thread::spawn(move || s1.take(idx)); 102 | let t2 = thread::spawn(move || s2.take(idx)); 103 | 104 | let r1 = t1.join().expect("thread 1 should not panic"); 105 | let r2 = t2.join().expect("thread 2 should not panic"); 106 | 107 | assert!( 108 | r1.is_none() || r2.is_none(), 109 | "both threads should not have removed the value" 110 | ); 111 | assert_eq!( 112 | r1.or(r2), 113 | Some(1), 114 | "one thread should have removed the value" 115 | ); 116 | assert!(slab.get(idx).is_none()); 117 | }); 118 | } 119 | 120 | #[test] 121 | fn racy_take_local() { 122 | run_model("racy_take_local", || { 123 | let slab = Arc::new(Slab::new()); 124 | 125 | let idx = slab.insert(1).expect("insert"); 126 | assert_eq!(slab.get(idx).unwrap(), 1); 127 | 128 | let s = slab.clone(); 129 | let t2 = thread::spawn(move || s.take(idx)); 130 | let r1 = slab.take(idx); 131 | let r2 = t2.join().expect("thread 2 should not panic"); 132 | 133 | assert!( 134 | r1.is_none() || r2.is_none(), 135 | "both threads should not have removed the value" 136 | ); 137 | assert!( 138 | r1.or(r2).is_some(), 139 | "one thread should have removed the value" 140 | ); 141 | assert!(slab.get(idx).is_none()); 142 | }); 143 | } 144 | 145 | #[test] 146 | fn concurrent_insert_take() { 147 | run_model("concurrent_insert_remove", || { 148 | let slab = Arc::new(Slab::new()); 149 | let pair = Arc::new((Mutex::new(None), Condvar::new())); 150 | 151 | let slab2 = slab.clone(); 152 | let pair2 = pair.clone(); 153 | let remover = thread::spawn(move || { 154 | let (lock, cvar) = &*pair2; 155 | for i in 0..2 { 156 | test_println!("--- remover i={} ---", i); 157 | let mut next = lock.lock().unwrap(); 158 | while next.is_none() { 159 | next = cvar.wait(next).unwrap(); 160 | } 161 | let key = next.take().unwrap(); 162 | assert_eq!(slab2.take(key), Some(i)); 163 | cvar.notify_one(); 164 | } 165 | }); 166 | 167 | let (lock, cvar) = &*pair; 168 | for i in 0..2 { 169 | test_println!("--- inserter i={} ---", i); 170 | let key = slab.insert(i).expect("insert"); 171 | 172 | let mut next = lock.lock().unwrap(); 173 | *next = Some(key); 174 | cvar.notify_one(); 175 | 176 | // Wait for the item to be removed. 177 | while next.is_some() { 178 | next = cvar.wait(next).unwrap(); 179 | } 180 | 181 | assert!(slab.get(key).is_none()); 182 | } 183 | 184 | remover.join().unwrap(); 185 | }) 186 | } 187 | 188 | #[test] 189 | fn take_remote_and_reuse() { 190 | run_model("take_remote_and_reuse", || { 191 | let slab = Arc::new(Slab::new_with_config::()); 192 | 193 | let idx1 = slab.insert(1).expect("insert"); 194 | let idx2 = slab.insert(2).expect("insert"); 195 | let idx3 = slab.insert(3).expect("insert"); 196 | let idx4 = slab.insert(4).expect("insert"); 197 | 198 | assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab); 199 | assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab); 200 | assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab); 201 | assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab); 202 | 203 | let s = slab.clone(); 204 | let t1 = thread::spawn(move || { 205 | assert_eq!(s.take(idx1), Some(1), "slab: {:#?}", s); 206 | }); 207 | 208 | let idx1 = slab.insert(5).expect("insert"); 209 | t1.join().expect("thread 1 should not panic"); 210 | 211 | assert_eq!(slab.get(idx1).unwrap(), 5, "slab: {:#?}", slab); 212 | assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab); 213 | assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab); 214 | assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab); 215 | }); 216 | } 217 | 218 | fn store_when_free(slab: &Arc>, t: usize) -> usize { 219 | loop { 220 | test_println!("try store {:?}", t); 221 | if let Some(key) = slab.insert(t) { 222 | test_println!("inserted at {:#x}", key); 223 | return key; 224 | } 225 | test_println!("retrying; slab is full..."); 226 | thread::yield_now(); 227 | } 228 | } 229 | 230 | struct TinierConfig; 231 | 232 | impl crate::Config for TinierConfig { 233 | const INITIAL_PAGE_SIZE: usize = 2; 234 | const MAX_PAGES: usize = 1; 235 | } 236 | 237 | #[test] 238 | fn concurrent_remove_remote_and_reuse() { 239 | let mut model = loom::model::Builder::new(); 240 | model.max_branches = 100000; 241 | run_builder("concurrent_remove_remote_and_reuse", model, || { 242 | let slab = Arc::new(Slab::new_with_config::()); 243 | 244 | let idx1 = slab.insert(1).unwrap(); 245 | let idx2 = slab.insert(2).unwrap(); 246 | 247 | assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab); 248 | assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab); 249 | 250 | let s = slab.clone(); 251 | let s2 = slab.clone(); 252 | 253 | let t1 = thread::spawn(move || { 254 | s.take(idx1).expect("must remove"); 255 | }); 256 | 257 | let t2 = thread::spawn(move || { 258 | s2.take(idx2).expect("must remove"); 259 | }); 260 | 261 | let idx3 = store_when_free(&slab, 3); 262 | t1.join().expect("thread 1 should not panic"); 263 | t2.join().expect("thread 1 should not panic"); 264 | 265 | assert!(slab.get(idx1).is_none(), "slab: {:#?}", slab); 266 | assert!(slab.get(idx2).is_none(), "slab: {:#?}", slab); 267 | assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab); 268 | }); 269 | } 270 | 271 | struct SetDropped { 272 | val: usize, 273 | dropped: std::sync::Arc, 274 | } 275 | 276 | struct AssertDropped { 277 | dropped: std::sync::Arc, 278 | } 279 | 280 | impl AssertDropped { 281 | fn new(val: usize) -> (Self, SetDropped) { 282 | let dropped = std::sync::Arc::new(AtomicBool::new(false)); 283 | let val = SetDropped { 284 | val, 285 | dropped: dropped.clone(), 286 | }; 287 | (Self { dropped }, val) 288 | } 289 | 290 | fn assert_dropped(&self) { 291 | assert!( 292 | self.dropped.load(Ordering::SeqCst), 293 | "value should have been dropped!" 294 | ); 295 | } 296 | } 297 | 298 | impl Drop for SetDropped { 299 | fn drop(&mut self) { 300 | self.dropped.store(true, Ordering::SeqCst); 301 | } 302 | } 303 | 304 | #[test] 305 | fn remove_local() { 306 | run_model("remove_local", || { 307 | let slab = Arc::new(Slab::new_with_config::()); 308 | let slab2 = slab.clone(); 309 | 310 | let (dropped, item) = AssertDropped::new(1); 311 | let idx = slab.insert(item).expect("insert"); 312 | 313 | let guard = slab.get(idx).unwrap(); 314 | 315 | assert!(slab.remove(idx)); 316 | 317 | let t1 = thread::spawn(move || { 318 | let g = slab2.get(idx); 319 | drop(g); 320 | }); 321 | 322 | assert!(slab.get(idx).is_none()); 323 | 324 | t1.join().expect("thread 1 should not panic"); 325 | 326 | drop(guard); 327 | assert!(slab.get(idx).is_none()); 328 | dropped.assert_dropped(); 329 | }) 330 | } 331 | 332 | #[test] 333 | fn remove_remote() { 334 | run_model("remove_remote", || { 335 | let slab = Arc::new(Slab::new_with_config::()); 336 | let slab2 = slab.clone(); 337 | 338 | let (dropped, item) = AssertDropped::new(1); 339 | let idx = slab.insert(item).expect("insert"); 340 | 341 | assert!(slab.remove(idx)); 342 | let t1 = thread::spawn(move || { 343 | let g = slab2.get(idx); 344 | drop(g); 345 | }); 346 | 347 | t1.join().expect("thread 1 should not panic"); 348 | 349 | assert!(slab.get(idx).is_none()); 350 | dropped.assert_dropped(); 351 | }); 352 | } 353 | 354 | #[test] 355 | fn remove_remote_during_insert() { 356 | run_model("remove_remote_during_insert", || { 357 | let slab = Arc::new(Slab::new_with_config::()); 358 | let slab2 = slab.clone(); 359 | 360 | let (dropped, item) = AssertDropped::new(1); 361 | let idx = slab.insert(item).expect("insert"); 362 | 363 | let t1 = thread::spawn(move || { 364 | let g = slab2.get(idx); 365 | assert_ne!(g.as_ref().map(|v| v.val), Some(2)); 366 | drop(g); 367 | }); 368 | 369 | let (_, item) = AssertDropped::new(2); 370 | assert!(slab.remove(idx)); 371 | let idx2 = slab.insert(item).expect("insert"); 372 | 373 | t1.join().expect("thread 1 should not panic"); 374 | 375 | assert!(slab.get(idx).is_none()); 376 | assert!(slab.get(idx2).is_some()); 377 | dropped.assert_dropped(); 378 | }); 379 | } 380 | 381 | #[test] 382 | fn unique_iter() { 383 | run_model("unique_iter", || { 384 | let mut slab = Arc::new(Slab::new()); 385 | 386 | let s = slab.clone(); 387 | let t1 = thread::spawn(move || { 388 | s.insert(1).expect("insert"); 389 | s.insert(2).expect("insert"); 390 | }); 391 | 392 | let s = slab.clone(); 393 | let t2 = thread::spawn(move || { 394 | s.insert(3).expect("insert"); 395 | s.insert(4).expect("insert"); 396 | }); 397 | 398 | t1.join().expect("thread 1 should not panic"); 399 | t2.join().expect("thread 2 should not panic"); 400 | 401 | let slab = Arc::get_mut(&mut slab).expect("other arcs should be dropped"); 402 | let items: Vec<_> = slab.unique_iter().map(|&i| i).collect(); 403 | assert!(items.contains(&1), "items: {:?}", items); 404 | assert!(items.contains(&2), "items: {:?}", items); 405 | assert!(items.contains(&3), "items: {:?}", items); 406 | assert!(items.contains(&4), "items: {:?}", items); 407 | }); 408 | } 409 | 410 | #[test] 411 | fn custom_page_sz() { 412 | let mut model = loom::model::Builder::new(); 413 | model.max_branches = 100000; 414 | model.check(|| { 415 | let slab = Slab::::new_with_config::(); 416 | 417 | for i in 0..1024usize { 418 | test_println!("{}", i); 419 | let k = slab.insert(i).expect("insert"); 420 | let v = slab.get(k).expect("get"); 421 | assert_eq!(v, i, "slab: {:#?}", slab); 422 | } 423 | }); 424 | } 425 | 426 | #[test] 427 | fn max_refs() { 428 | struct LargeGenConfig; 429 | 430 | // Configure the slab with a very large number of bits for the generation 431 | // counter. That way, there will be very few bits for the ref count left 432 | // over, and this test won't have to malloc millions of references. 433 | impl crate::cfg::Config for LargeGenConfig { 434 | const INITIAL_PAGE_SIZE: usize = 2; 435 | const MAX_THREADS: usize = 32; 436 | const MAX_PAGES: usize = 2; 437 | } 438 | 439 | let mut model = loom::model::Builder::new(); 440 | model.max_branches = 100000; 441 | model.check(|| { 442 | let slab = Slab::new_with_config::(); 443 | let key = slab.insert("hello world").unwrap(); 444 | let max = crate::page::slot::RefCount::::MAX; 445 | 446 | // Create the maximum number of concurrent references to the entry. 447 | let mut refs = (0..max) 448 | .map(|_| slab.get(key).unwrap()) 449 | // Store the refs in a vec so they don't get dropped immediately. 450 | .collect::>(); 451 | 452 | assert!(slab.get(key).is_none()); 453 | 454 | // After dropping a ref, we should now be able to access the slot again. 455 | drop(refs.pop()); 456 | let ref1 = slab.get(key); 457 | assert!(ref1.is_some()); 458 | 459 | // Ref1 should max out the number of references again. 460 | assert!(slab.get(key).is_none()); 461 | }) 462 | } 463 | 464 | mod free_list_reuse { 465 | use super::*; 466 | struct TinyConfig; 467 | 468 | impl crate::cfg::Config for TinyConfig { 469 | const INITIAL_PAGE_SIZE: usize = 2; 470 | } 471 | 472 | #[test] 473 | fn local_remove() { 474 | run_model("free_list_reuse::local_remove", || { 475 | let slab = Slab::new_with_config::(); 476 | 477 | let t1 = slab.insert("hello").expect("insert"); 478 | let t2 = slab.insert("world").expect("insert"); 479 | assert_eq!( 480 | crate::page::indices::(t1).1, 481 | 0, 482 | "1st slot should be on 0th page" 483 | ); 484 | assert_eq!( 485 | crate::page::indices::(t2).1, 486 | 0, 487 | "2nd slot should be on 0th page" 488 | ); 489 | let t3 = slab.insert("earth").expect("insert"); 490 | assert_eq!( 491 | crate::page::indices::(t3).1, 492 | 1, 493 | "3rd slot should be on 1st page" 494 | ); 495 | 496 | slab.remove(t2); 497 | let t4 = slab.insert("universe").expect("insert"); 498 | assert_eq!( 499 | crate::page::indices::(t4).1, 500 | 0, 501 | "2nd slot should be reused (0th page)" 502 | ); 503 | 504 | slab.remove(t1); 505 | let _ = slab.insert("goodbye").expect("insert"); 506 | assert_eq!( 507 | crate::page::indices::(t4).1, 508 | 0, 509 | "1st slot should be reused (0th page)" 510 | ); 511 | }); 512 | } 513 | 514 | #[test] 515 | fn local_take() { 516 | run_model("free_list_reuse::local_take", || { 517 | let slab = Slab::new_with_config::(); 518 | 519 | let t1 = slab.insert("hello").expect("insert"); 520 | let t2 = slab.insert("world").expect("insert"); 521 | assert_eq!( 522 | crate::page::indices::(t1).1, 523 | 0, 524 | "1st slot should be on 0th page" 525 | ); 526 | assert_eq!( 527 | crate::page::indices::(t2).1, 528 | 0, 529 | "2nd slot should be on 0th page" 530 | ); 531 | let t3 = slab.insert("earth").expect("insert"); 532 | assert_eq!( 533 | crate::page::indices::(t3).1, 534 | 1, 535 | "3rd slot should be on 1st page" 536 | ); 537 | 538 | assert_eq!(slab.take(t2), Some("world")); 539 | let t4 = slab.insert("universe").expect("insert"); 540 | assert_eq!( 541 | crate::page::indices::(t4).1, 542 | 0, 543 | "2nd slot should be reused (0th page)" 544 | ); 545 | 546 | assert_eq!(slab.take(t1), Some("hello")); 547 | let _ = slab.insert("goodbye").expect("insert"); 548 | assert_eq!( 549 | crate::page::indices::(t4).1, 550 | 0, 551 | "1st slot should be reused (0th page)" 552 | ); 553 | }); 554 | } 555 | } 556 | 557 | #[test] 558 | fn vacant_entry() { 559 | run_model("vacant_entry", || { 560 | let slab = Arc::new(Slab::new()); 561 | let entry = slab.vacant_entry().unwrap(); 562 | let key: usize = entry.key(); 563 | 564 | let slab2 = slab.clone(); 565 | let t1 = thread::spawn(move || { 566 | test_dbg!(slab2.get(key)); 567 | }); 568 | 569 | entry.insert("hello world"); 570 | t1.join().unwrap(); 571 | 572 | assert_eq!(slab.get(key).expect("get"), "hello world"); 573 | }); 574 | } 575 | 576 | #[test] 577 | fn vacant_entry_2() { 578 | run_model("vacant_entry_2", || { 579 | let slab = Arc::new(Slab::new()); 580 | let entry = slab.vacant_entry().unwrap(); 581 | let key: usize = entry.key(); 582 | 583 | let slab2 = slab.clone(); 584 | let slab3 = slab.clone(); 585 | let t1 = thread::spawn(move || { 586 | test_dbg!(slab2.get(key)); 587 | }); 588 | 589 | entry.insert("hello world"); 590 | let t2 = thread::spawn(move || { 591 | test_dbg!(slab3.get(key)); 592 | }); 593 | 594 | t1.join().unwrap(); 595 | t2.join().unwrap(); 596 | assert_eq!(slab.get(key).expect("get"), "hello world"); 597 | }); 598 | } 599 | 600 | #[test] 601 | fn vacant_entry_remove() { 602 | run_model("vacant_entry_remove", || { 603 | let slab = Arc::new(Slab::new()); 604 | let entry = slab.vacant_entry().unwrap(); 605 | let key: usize = entry.key(); 606 | 607 | let slab2 = slab.clone(); 608 | let t1 = thread::spawn(move || { 609 | assert!(!slab2.remove(key)); 610 | }); 611 | 612 | t1.join().unwrap(); 613 | 614 | entry.insert("hello world"); 615 | assert_eq!(slab.get(key).expect("get"), "hello world"); 616 | }); 617 | } 618 | 619 | #[test] 620 | fn owned_entry_send_out_of_local() { 621 | run_model("owned_entry_send_out_of_local", || { 622 | let slab = Arc::new(Slab::>::new()); 623 | let key1 = slab 624 | .insert(alloc::Track::new(String::from("hello"))) 625 | .expect("insert item 1"); 626 | let key2 = slab 627 | .insert(alloc::Track::new(String::from("goodbye"))) 628 | .expect("insert item 2"); 629 | 630 | let item1 = slab.clone().get_owned(key1).expect("get key1"); 631 | let item2 = slab.clone().get_owned(key2).expect("get key2"); 632 | let slab2 = slab.clone(); 633 | 634 | test_dbg!(slab.remove(key1)); 635 | 636 | let t1 = thread::spawn(move || { 637 | assert_eq!(item1.get_ref(), &String::from("hello")); 638 | drop(item1); 639 | }); 640 | let t2 = thread::spawn(move || { 641 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 642 | test_dbg!(slab2.remove(key2)); 643 | drop(item2); 644 | }); 645 | 646 | t1.join().unwrap(); 647 | t2.join().unwrap(); 648 | 649 | assert!(slab.get(key1).is_none()); 650 | assert!(slab.get(key2).is_none()); 651 | }); 652 | } 653 | 654 | #[test] 655 | fn owned_entrys_outlive_slab() { 656 | run_model("owned_entrys_outlive_slab", || { 657 | let slab = Arc::new(Slab::>::new()); 658 | let key1 = slab 659 | .insert(alloc::Track::new(String::from("hello"))) 660 | .expect("insert item 1"); 661 | let key2 = slab 662 | .insert(alloc::Track::new(String::from("goodbye"))) 663 | .expect("insert item 2"); 664 | 665 | let item1_1 = slab.clone().get_owned(key1).expect("get key1"); 666 | let item1_2 = slab.clone().get_owned(key1).expect("get key1 again"); 667 | let item2 = slab.clone().get_owned(key2).expect("get key2"); 668 | drop(slab); 669 | 670 | let t1 = thread::spawn(move || { 671 | assert_eq!(item1_1.get_ref(), &String::from("hello")); 672 | drop(item1_1); 673 | }); 674 | 675 | let t2 = thread::spawn(move || { 676 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 677 | drop(item2); 678 | }); 679 | 680 | t1.join().unwrap(); 681 | t2.join().unwrap(); 682 | 683 | assert_eq!(item1_2.get_ref(), &String::from("hello")); 684 | }); 685 | } 686 | 687 | #[test] 688 | fn owned_entry_ping_pong() { 689 | run_model("owned_entry_ping_pong", || { 690 | let slab = Arc::new(Slab::>::new()); 691 | let key1 = slab 692 | .insert(alloc::Track::new(String::from("hello"))) 693 | .expect("insert item 1"); 694 | let key2 = slab 695 | .insert(alloc::Track::new(String::from("world"))) 696 | .expect("insert item 2"); 697 | 698 | let item1 = slab.clone().get_owned(key1).expect("get key1"); 699 | let slab2 = slab.clone(); 700 | let slab3 = slab.clone(); 701 | 702 | let t1 = thread::spawn(move || { 703 | assert_eq!(item1.get_ref(), &String::from("hello")); 704 | slab2.remove(key1); 705 | item1 706 | }); 707 | 708 | let t2 = thread::spawn(move || { 709 | let item2 = slab3.clone().get_owned(key2).unwrap(); 710 | assert_eq!(item2.get_ref(), &String::from("world")); 711 | slab3.remove(key1); 712 | item2 713 | }); 714 | 715 | let item1 = t1.join().unwrap(); 716 | let item2 = t2.join().unwrap(); 717 | 718 | assert_eq!(item1.get_ref(), &String::from("hello")); 719 | assert_eq!(item2.get_ref(), &String::from("world")); 720 | }); 721 | } 722 | 723 | #[test] 724 | fn owned_entry_drop_from_other_threads() { 725 | run_model("owned_entry_drop_from_other_threads", || { 726 | let slab = Arc::new(Slab::>::new()); 727 | let key1 = slab 728 | .insert(alloc::Track::new(String::from("hello"))) 729 | .expect("insert item 1"); 730 | let item1 = slab.clone().get_owned(key1).expect("get key1"); 731 | 732 | let slab2 = slab.clone(); 733 | 734 | let t1 = thread::spawn(move || { 735 | let slab = slab2.clone(); 736 | let key2 = slab 737 | .insert(alloc::Track::new(String::from("goodbye"))) 738 | .expect("insert item 1"); 739 | let item2 = slab.clone().get_owned(key2).expect("get key1"); 740 | let t2 = thread::spawn(move || { 741 | assert_eq!(item2.get_ref(), &String::from("goodbye")); 742 | test_dbg!(slab2.remove(key1)); 743 | drop(item2) 744 | }); 745 | assert_eq!(item1.get_ref(), &String::from("hello")); 746 | test_dbg!(slab.remove(key2)); 747 | drop(item1); 748 | (t2, key2) 749 | }); 750 | 751 | let (t2, key2) = t1.join().unwrap(); 752 | test_dbg!(slab.get(key1)); 753 | test_dbg!(slab.get(key2)); 754 | 755 | t2.join().unwrap(); 756 | 757 | assert!(slab.get(key1).is_none()); 758 | assert!(slab.get(key2).is_none()); 759 | }); 760 | } 761 | -------------------------------------------------------------------------------- /src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod idx { 2 | use crate::{ 3 | cfg, 4 | page::{self, slot}, 5 | Pack, Tid, 6 | }; 7 | use proptest::prelude::*; 8 | 9 | proptest! { 10 | #[test] 11 | #[cfg_attr(loom, ignore)] 12 | fn tid_roundtrips(tid in 0usize..Tid::::BITS) { 13 | let tid = Tid::::from_usize(tid); 14 | let packed = tid.pack(0); 15 | assert_eq!(tid, Tid::from_packed(packed)); 16 | } 17 | 18 | #[test] 19 | #[cfg_attr(loom, ignore)] 20 | fn idx_roundtrips( 21 | tid in 0usize..Tid::::BITS, 22 | gen in 0usize..slot::Generation::::BITS, 23 | addr in 0usize..page::Addr::::BITS, 24 | ) { 25 | let tid = Tid::::from_usize(tid); 26 | let gen = slot::Generation::::from_usize(gen); 27 | let addr = page::Addr::::from_usize(addr); 28 | let packed = tid.pack(gen.pack(addr.pack(0))); 29 | assert_eq!(addr, page::Addr::from_packed(packed)); 30 | assert_eq!(gen, slot::Generation::from_packed(packed)); 31 | assert_eq!(tid, Tid::from_packed(packed)); 32 | } 33 | } 34 | } 35 | 36 | pub(crate) mod util { 37 | #[cfg(loom)] 38 | use std::sync::atomic::{AtomicUsize, Ordering}; 39 | pub(crate) struct TinyConfig; 40 | 41 | impl crate::Config for TinyConfig { 42 | const INITIAL_PAGE_SIZE: usize = 4; 43 | } 44 | 45 | #[cfg(loom)] 46 | pub(crate) fn run_model(name: &'static str, f: impl Fn() + Sync + Send + 'static) { 47 | run_builder(name, loom::model::Builder::new(), f) 48 | } 49 | 50 | #[cfg(loom)] 51 | pub(crate) fn run_builder( 52 | name: &'static str, 53 | builder: loom::model::Builder, 54 | f: impl Fn() + Sync + Send + 'static, 55 | ) { 56 | let iters = AtomicUsize::new(1); 57 | builder.check(move || { 58 | test_println!( 59 | "\n------------ running test {}; iteration {} ------------\n", 60 | name, 61 | iters.fetch_add(1, Ordering::SeqCst) 62 | ); 63 | f() 64 | }); 65 | } 66 | } 67 | 68 | #[cfg(not(loom))] 69 | mod custom_config; 70 | #[cfg(loom)] 71 | mod loom_pool; 72 | #[cfg(loom)] 73 | mod loom_slab; 74 | #[cfg(not(loom))] 75 | mod properties; 76 | -------------------------------------------------------------------------------- /src/tests/properties.rs: -------------------------------------------------------------------------------- 1 | //! This module contains property-based tests against the public API: 2 | //! * API never panics. 3 | //! * Active entries cannot be overridden until removed. 4 | //! * The slab doesn't produce overlapping keys. 5 | //! * The slab doesn't leave "lost" keys. 6 | //! * `get()`, `get_owned`, and `contains()` are consistent. 7 | //! * `RESERVED_BITS` are actually not used. 8 | //! 9 | //! The test is supposed to be deterministic, so it doesn't spawn real threads 10 | //! and uses `tid::with()` to override the TID for the current thread. 11 | 12 | use std::{ops::Range, sync::Arc}; 13 | 14 | use indexmap::IndexMap; 15 | use proptest::prelude::*; 16 | 17 | use crate::{tid, Config, DefaultConfig, Slab}; 18 | 19 | const THREADS: Range = 1..4; 20 | const ACTIONS: Range = 1..1000; 21 | 22 | #[derive(Debug, Clone)] 23 | struct Action { 24 | tid: usize, 25 | kind: ActionKind, 26 | } 27 | 28 | #[derive(Debug, Clone)] 29 | enum ActionKind { 30 | Insert, 31 | VacantEntry, 32 | RemoveRandom(usize), // key 33 | RemoveExistent(usize), // seed 34 | TakeRandom(usize), // key 35 | TakeExistent(usize), // seed 36 | GetRandom(usize), // key 37 | GetExistent(usize), // seed 38 | } 39 | 40 | prop_compose! { 41 | fn action_strategy()(tid in THREADS, kind in action_kind_strategy()) -> Action { 42 | Action { tid, kind } 43 | } 44 | } 45 | 46 | fn action_kind_strategy() -> impl Strategy { 47 | prop_oneof![ 48 | 1 => Just(ActionKind::Insert), 49 | 1 => Just(ActionKind::VacantEntry), 50 | 1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveRandom), 51 | 1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveExistent), 52 | 1 => prop::num::usize::ANY.prop_map(ActionKind::TakeRandom), 53 | 1 => prop::num::usize::ANY.prop_map(ActionKind::TakeExistent), 54 | // Produce `GetRandom` and `GetExistent` more often. 55 | 5 => prop::num::usize::ANY.prop_map(ActionKind::GetRandom), 56 | 5 => prop::num::usize::ANY.prop_map(ActionKind::GetExistent), 57 | ] 58 | } 59 | 60 | /// Stores active entries (added and not yet removed). 61 | #[derive(Default)] 62 | struct Active { 63 | // Use `IndexMap` to preserve determinism. 64 | map: IndexMap, 65 | prev_value: u32, 66 | } 67 | 68 | impl Active { 69 | fn next_value(&mut self) -> u32 { 70 | self.prev_value += 1; 71 | self.prev_value 72 | } 73 | 74 | fn get(&self, key: usize) -> Option { 75 | self.map.get(&key).copied() 76 | } 77 | 78 | fn get_any(&self, seed: usize) -> Option<(usize, u32)> { 79 | if self.map.is_empty() { 80 | return None; 81 | } 82 | 83 | let index = seed % self.map.len(); 84 | self.map.get_index(index).map(|(k, v)| (*k, *v)) 85 | } 86 | 87 | fn insert(&mut self, key: usize, value: u32) { 88 | assert_eq!( 89 | self.map.insert(key, value), 90 | None, 91 | "keys of active entries must be unique" 92 | ); 93 | } 94 | 95 | fn remove(&mut self, key: usize) -> Option { 96 | self.map.swap_remove(&key) 97 | } 98 | 99 | fn remove_any(&mut self, seed: usize) -> Option<(usize, u32)> { 100 | if self.map.is_empty() { 101 | return None; 102 | } 103 | 104 | let index = seed % self.map.len(); 105 | self.map.swap_remove_index(index) 106 | } 107 | 108 | fn drain(&mut self) -> impl Iterator + '_ { 109 | self.map.drain(..) 110 | } 111 | } 112 | 113 | fn used_bits(key: usize) -> usize { 114 | assert_eq!( 115 | C::RESERVED_BITS + Slab::::USED_BITS, 116 | std::mem::size_of::() * 8 117 | ); 118 | key & ((!0) >> C::RESERVED_BITS) 119 | } 120 | 121 | fn apply_action( 122 | slab: &Arc>, 123 | active: &mut Active, 124 | action: ActionKind, 125 | ) -> Result<(), TestCaseError> { 126 | match action { 127 | ActionKind::Insert => { 128 | let value = active.next_value(); 129 | let key = slab.insert(value).expect("unexpectedly exhausted slab"); 130 | prop_assert_eq!(used_bits::(key), key); 131 | active.insert(key, value); 132 | } 133 | ActionKind::VacantEntry => { 134 | let value = active.next_value(); 135 | let entry = slab.vacant_entry().expect("unexpectedly exhausted slab"); 136 | let key = entry.key(); 137 | prop_assert_eq!(used_bits::(key), key); 138 | entry.insert(value); 139 | active.insert(key, value); 140 | } 141 | ActionKind::RemoveRandom(key) => { 142 | let used_key = used_bits::(key); 143 | prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e)); 144 | prop_assert_eq!(slab.remove(key), active.remove(used_key).is_some()); 145 | } 146 | ActionKind::RemoveExistent(seed) => { 147 | if let Some((key, _value)) = active.remove_any(seed) { 148 | prop_assert!(slab.contains(key)); 149 | prop_assert!(slab.remove(key)); 150 | } 151 | } 152 | ActionKind::TakeRandom(key) => { 153 | let used_key = used_bits::(key); 154 | prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e)); 155 | prop_assert_eq!(slab.take(key), active.remove(used_key)); 156 | } 157 | ActionKind::TakeExistent(seed) => { 158 | if let Some((key, value)) = active.remove_any(seed) { 159 | prop_assert!(slab.contains(key)); 160 | prop_assert_eq!(slab.take(key), Some(value)); 161 | } 162 | } 163 | ActionKind::GetRandom(key) => { 164 | let used_key = used_bits::(key); 165 | prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e)); 166 | prop_assert_eq!(slab.get(key).map(|e| *e), active.get(used_key)); 167 | prop_assert_eq!( 168 | slab.clone().get_owned(key).map(|e| *e), 169 | active.get(used_key) 170 | ); 171 | } 172 | ActionKind::GetExistent(seed) => { 173 | if let Some((key, value)) = active.get_any(seed) { 174 | prop_assert!(slab.contains(key)); 175 | prop_assert_eq!(slab.get(key).map(|e| *e), Some(value)); 176 | prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value)); 177 | } 178 | } 179 | } 180 | 181 | Ok(()) 182 | } 183 | 184 | fn run(actions: Vec) -> Result<(), TestCaseError> { 185 | let mut slab = Arc::new(Slab::new_with_config::()); 186 | let mut active = Active::default(); 187 | 188 | // Apply all actions. 189 | for action in actions { 190 | // Override the TID for the current thread instead of using multiple real threads 191 | // to preserve determinism. We're not checking concurrency issues here, they should be 192 | // covered by loom tests anyway. Thus, it's fine to run all actions consequently. 193 | tid::with(action.tid, || { 194 | apply_action::(&slab, &mut active, action.kind) 195 | })?; 196 | } 197 | 198 | // Ensure the slab contains all remaining entries. 199 | let mut expected_values = Vec::new(); 200 | for (key, value) in active.drain() { 201 | prop_assert!(slab.contains(key)); 202 | prop_assert_eq!(slab.get(key).map(|e| *e), Some(value)); 203 | prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value)); 204 | expected_values.push(value); 205 | } 206 | expected_values.sort(); 207 | 208 | // Ensure `unique_iter()` returns all remaining entries. 209 | let slab = Arc::get_mut(&mut slab).unwrap(); 210 | let mut actual_values = slab.unique_iter().copied().collect::>(); 211 | actual_values.sort(); 212 | prop_assert_eq!(actual_values, expected_values); 213 | 214 | Ok(()) 215 | } 216 | 217 | proptest! { 218 | #[test] 219 | fn default_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) { 220 | run::(actions)?; 221 | } 222 | 223 | #[test] 224 | fn custom_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) { 225 | run::(actions)?; 226 | } 227 | } 228 | 229 | struct CustomConfig; 230 | 231 | #[cfg(target_pointer_width = "64")] 232 | impl Config for CustomConfig { 233 | const INITIAL_PAGE_SIZE: usize = 32; 234 | const MAX_PAGES: usize = 15; 235 | const MAX_THREADS: usize = 256; 236 | const RESERVED_BITS: usize = 24; 237 | } 238 | #[cfg(target_pointer_width = "32")] 239 | impl Config for CustomConfig { 240 | const INITIAL_PAGE_SIZE: usize = 16; 241 | const MAX_PAGES: usize = 6; 242 | const MAX_THREADS: usize = 128; 243 | const RESERVED_BITS: usize = 12; 244 | } 245 | -------------------------------------------------------------------------------- /src/tid.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | cfg::{self, CfgPrivate}, 3 | page, 4 | sync::{ 5 | atomic::{AtomicUsize, Ordering}, 6 | lazy_static, thread_local, Mutex, 7 | }, 8 | Pack, 9 | }; 10 | use std::{ 11 | cell::{Cell, UnsafeCell}, 12 | collections::VecDeque, 13 | fmt, 14 | marker::PhantomData, 15 | }; 16 | 17 | /// Uniquely identifies a thread. 18 | pub(crate) struct Tid { 19 | id: usize, 20 | _not_send: PhantomData>, 21 | _cfg: PhantomData, 22 | } 23 | 24 | #[derive(Debug)] 25 | struct Registration(Cell>); 26 | 27 | struct Registry { 28 | next: AtomicUsize, 29 | free: Mutex>, 30 | } 31 | 32 | lazy_static! { 33 | static ref REGISTRY: Registry = Registry { 34 | next: AtomicUsize::new(0), 35 | free: Mutex::new(VecDeque::new()), 36 | }; 37 | } 38 | 39 | thread_local! { 40 | static REGISTRATION: Registration = Registration::new(); 41 | } 42 | 43 | // === impl Tid === 44 | 45 | impl Pack for Tid { 46 | const LEN: usize = C::MAX_SHARDS.trailing_zeros() as usize + 1; 47 | 48 | type Prev = page::Addr; 49 | 50 | #[inline(always)] 51 | fn as_usize(&self) -> usize { 52 | self.id 53 | } 54 | 55 | #[inline(always)] 56 | fn from_usize(id: usize) -> Self { 57 | Self { 58 | id, 59 | _not_send: PhantomData, 60 | _cfg: PhantomData, 61 | } 62 | } 63 | } 64 | 65 | impl Tid { 66 | #[inline] 67 | pub(crate) fn current() -> Self { 68 | REGISTRATION 69 | .try_with(Registration::current) 70 | .unwrap_or_else(|_| Self::poisoned()) 71 | } 72 | 73 | pub(crate) fn is_current(self) -> bool { 74 | REGISTRATION 75 | .try_with(|r| self == r.current::()) 76 | .unwrap_or(false) 77 | } 78 | 79 | #[inline(always)] 80 | pub fn new(id: usize) -> Self { 81 | Self::from_usize(id) 82 | } 83 | } 84 | 85 | impl Tid { 86 | #[cold] 87 | fn poisoned() -> Self { 88 | Self { 89 | id: std::usize::MAX, 90 | _not_send: PhantomData, 91 | _cfg: PhantomData, 92 | } 93 | } 94 | 95 | /// Returns true if the local thread ID was accessed while unwinding. 96 | pub(crate) fn is_poisoned(&self) -> bool { 97 | self.id == std::usize::MAX 98 | } 99 | } 100 | 101 | impl fmt::Debug for Tid { 102 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 103 | if self.is_poisoned() { 104 | f.debug_tuple("Tid") 105 | .field(&format_args!("")) 106 | .finish() 107 | } else { 108 | f.debug_tuple("Tid") 109 | .field(&format_args!("{}", self.id)) 110 | .finish() 111 | } 112 | } 113 | } 114 | 115 | impl PartialEq for Tid { 116 | fn eq(&self, other: &Self) -> bool { 117 | self.id == other.id 118 | } 119 | } 120 | 121 | impl Eq for Tid {} 122 | 123 | impl Clone for Tid { 124 | fn clone(&self) -> Self { 125 | *self 126 | } 127 | } 128 | 129 | impl Copy for Tid {} 130 | 131 | // === impl Registration === 132 | 133 | impl Registration { 134 | fn new() -> Self { 135 | Self(Cell::new(None)) 136 | } 137 | 138 | #[inline(always)] 139 | fn current(&self) -> Tid { 140 | if let Some(tid) = self.0.get().map(Tid::new) { 141 | return tid; 142 | } 143 | 144 | self.register() 145 | } 146 | 147 | #[cold] 148 | fn register(&self) -> Tid { 149 | let id = REGISTRY 150 | .free 151 | .lock() 152 | .ok() 153 | .and_then(|mut free| { 154 | if free.len() > 1 { 155 | free.pop_front() 156 | } else { 157 | None 158 | } 159 | }) 160 | .unwrap_or_else(|| { 161 | let id = REGISTRY.next.fetch_add(1, Ordering::AcqRel); 162 | if id > Tid::::BITS { 163 | panic_in_drop!( 164 | "creating a new thread ID ({}) would exceed the \ 165 | maximum number of thread ID bits specified in {} \ 166 | ({})", 167 | id, 168 | std::any::type_name::(), 169 | Tid::::BITS, 170 | ); 171 | } 172 | id 173 | }); 174 | 175 | self.0.set(Some(id)); 176 | Tid::new(id) 177 | } 178 | } 179 | 180 | // Reusing thread IDs doesn't work under loom, since this `Drop` impl results in 181 | // an access to a `loom` lazy_static while the test is shutting down, which 182 | // panics. T_T 183 | // Just skip TID reuse and use loom's lazy_static macro to ensure we have a 184 | // clean initial TID on every iteration, instead. 185 | #[cfg(not(all(loom, any(feature = "loom", test))))] 186 | impl Drop for Registration { 187 | fn drop(&mut self) { 188 | use std::sync::PoisonError; 189 | 190 | if let Some(id) = self.0.get() { 191 | let mut free_list = REGISTRY.free.lock().unwrap_or_else(PoisonError::into_inner); 192 | free_list.push_back(id); 193 | } 194 | } 195 | } 196 | 197 | #[cfg(all(test, not(loom)))] 198 | pub(crate) fn with(tid: usize, f: impl FnOnce() -> R) -> R { 199 | struct Guard(Option); 200 | 201 | impl Drop for Guard { 202 | fn drop(&mut self) { 203 | REGISTRATION.with(|r| r.0.set(self.0.take())); 204 | } 205 | } 206 | 207 | let prev = REGISTRATION.with(|r| r.0.replace(Some(tid))); 208 | let _guard = Guard(prev); 209 | f() 210 | } 211 | -------------------------------------------------------------------------------- /tests/reserved_bits_leak.rs: -------------------------------------------------------------------------------- 1 | // Reproduces https://github.com/hawkw/sharded-slab/issues/83 2 | use memory_stats::memory_stats; 3 | use sharded_slab::Config; 4 | use sharded_slab::Slab; 5 | 6 | struct CustomConfig; 7 | impl Config for CustomConfig { 8 | const RESERVED_BITS: usize = 1; // This is the cause. 9 | } 10 | 11 | #[test] 12 | fn reserved_bits_doesnt_leak() { 13 | let slab = Slab::new_with_config::(); 14 | for n in 0..1000 { 15 | let mem_before = memory_stats().unwrap(); 16 | let key = slab.insert(0).unwrap(); 17 | slab.remove(key); 18 | let usage = memory_stats().unwrap(); 19 | eprintln!( 20 | "n: {n:<4}\tkey: {key:#08x} rss: {:>16} vs:{:>16}", 21 | usage.physical_mem, usage.virtual_mem 22 | ); 23 | 24 | assert_eq!(mem_before.virtual_mem, usage.virtual_mem); 25 | } 26 | } 27 | --------------------------------------------------------------------------------