├── .github ├── codecov.yml ├── dependabot.yml └── workflows │ ├── check.yml │ ├── coverage.yml │ ├── safety.yml │ ├── scheduled.yml │ └── test.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── rustfmt.toml ├── src ├── aliasing.rs ├── lib.rs ├── read.rs ├── read │ ├── factory.rs │ └── guard.rs ├── sync.rs ├── utilities.rs └── write.rs └── tests ├── deque.rs └── loom.rs /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | # ref: https://docs.codecov.com/docs/codecovyml-reference 2 | coverage: 3 | # Hold ourselves to a high bar 4 | range: 85..100 5 | round: down 6 | precision: 1 7 | status: 8 | # ref: https://docs.codecov.com/docs/commit-status 9 | project: 10 | default: 11 | # Avoid false negatives 12 | threshold: 1% 13 | 14 | # Test files aren't important for coverage 15 | ignore: 16 | - "tests" 17 | 18 | # Make comments less noisy 19 | comment: 20 | layout: "files" 21 | require_changes: yes 22 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: cargo 8 | directory: / 9 | schedule: 10 | interval: daily 11 | ignore: 12 | - dependency-name: "*" 13 | # patch and minor updates don't matter for libraries 14 | # remove this ignore rule if your package has binaries 15 | update-types: 16 | - "version-update:semver-patch" 17 | - "version-update:semver-minor" 18 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | permissions: 2 | contents: read 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | # Spend CI time only on latest ref: https://github.com/jonhoo/rust-ci-conf/pull/5 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 10 | cancel-in-progress: true 11 | name: check 12 | jobs: 13 | fmt: 14 | runs-on: ubuntu-latest 15 | name: stable / fmt 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | submodules: true 20 | - name: Install stable 21 | uses: dtolnay/rust-toolchain@stable 22 | with: 23 | components: rustfmt 24 | - name: cargo fmt --check 25 | run: cargo fmt --check 26 | clippy: 27 | runs-on: ubuntu-latest 28 | name: ${{ matrix.toolchain }} / clippy 29 | permissions: 30 | contents: read 31 | checks: write 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | toolchain: [stable, beta] 36 | steps: 37 | - uses: actions/checkout@v4 38 | with: 39 | submodules: true 40 | - name: Install ${{ matrix.toolchain }} 41 | uses: dtolnay/rust-toolchain@master 42 | with: 43 | toolchain: ${{ matrix.toolchain }} 44 | components: clippy 45 | - name: cargo clippy 46 | uses: actions-rs/clippy-check@v1 47 | with: 48 | token: ${{ secrets.GITHUB_TOKEN }} 49 | doc: 50 | runs-on: ubuntu-latest 51 | name: nightly / doc 52 | steps: 53 | - uses: actions/checkout@v4 54 | with: 55 | submodules: true 56 | - name: Install nightly 57 | uses: dtolnay/rust-toolchain@nightly 58 | - name: cargo doc 59 | run: cargo doc --no-deps --all-features 60 | env: 61 | RUSTDOCFLAGS: --cfg docsrs 62 | hack: 63 | runs-on: ubuntu-latest 64 | name: ubuntu / stable / features 65 | steps: 66 | - uses: actions/checkout@v4 67 | with: 68 | submodules: true 69 | - name: Install stable 70 | uses: dtolnay/rust-toolchain@stable 71 | - name: cargo install cargo-hack 72 | uses: taiki-e/install-action@cargo-hack 73 | # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 74 | - name: cargo hack 75 | run: cargo hack --feature-powerset check 76 | msrv: 77 | runs-on: ubuntu-latest 78 | # we use a matrix here just because env can't be used in job names 79 | # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability 80 | strategy: 81 | matrix: 82 | msrv: ["1.40.0"] 83 | name: ubuntu / ${{ matrix.msrv }} 84 | steps: 85 | - uses: actions/checkout@v4 86 | with: 87 | submodules: true 88 | - name: Install ${{ matrix.msrv }} 89 | uses: dtolnay/rust-toolchain@master 90 | with: 91 | toolchain: ${{ matrix.msrv }} 92 | - name: cargo +${{ matrix.msrv }} check 93 | run: cargo check 94 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [main] 4 | pull_request: 5 | name: coverage 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | container: 10 | image: xd009642/tarpaulin:develop-nightly 11 | options: --security-opt seccomp=unconfined 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Generate code coverage 15 | run: | 16 | cargo +nightly tarpaulin --verbose --all-features --workspace --timeout 120 --run-types doctests --run-types lib --run-types tests --out xml 17 | - name: Upload to codecov.io 18 | uses: codecov/codecov-action@v3 19 | with: 20 | fail_ci_if_error: true 21 | -------------------------------------------------------------------------------- /.github/workflows/safety.yml: -------------------------------------------------------------------------------- 1 | permissions: 2 | contents: read 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | # Spend CI time only on latest ref: https://github.com/jonhoo/rust-ci-conf/pull/5 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 10 | cancel-in-progress: true 11 | name: safety 12 | jobs: 13 | sanitizers: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | with: 18 | submodules: true 19 | - name: Install nightly 20 | uses: dtolnay/rust-toolchain@nightly 21 | - run: | 22 | # to get the symbolizer for debug symbol resolution 23 | sudo apt install llvm 24 | # to fix buggy leak analyzer: 25 | # https://github.com/japaric/rust-san#unrealiable-leaksanitizer 26 | # ensure there's a profile.dev section 27 | if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then 28 | echo >> Cargo.toml 29 | echo '[profile.dev]' >> Cargo.toml 30 | fi 31 | # remove pre-existing opt-levels in profile.dev 32 | sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml 33 | # now set opt-level to 1 34 | sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml 35 | cat Cargo.toml 36 | name: Enable debug symbols 37 | - name: cargo test -Zsanitizer=address 38 | # only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945 39 | run: cargo test --lib --tests --all-features --target x86_64-unknown-linux-gnu 40 | env: 41 | ASAN_OPTIONS: "detect_odr_violation=0:detect_leaks=0" 42 | RUSTFLAGS: "-Z sanitizer=address" 43 | - name: cargo test -Zsanitizer=leak 44 | if: always() 45 | run: cargo test --all-features --target x86_64-unknown-linux-gnu 46 | env: 47 | LSAN_OPTIONS: "suppressions=lsan-suppressions.txt" 48 | RUSTFLAGS: "-Z sanitizer=leak" 49 | miri: 50 | runs-on: ubuntu-latest 51 | steps: 52 | - uses: actions/checkout@v4 53 | with: 54 | submodules: true 55 | - run: | 56 | echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> $GITHUB_ENV 57 | - name: Install ${{ env.NIGHTLY }} 58 | uses: dtolnay/rust-toolchain@master 59 | with: 60 | toolchain: ${{ env.NIGHTLY }} 61 | components: miri 62 | - name: cargo miri test 63 | run: cargo miri test 64 | env: 65 | MIRIFLAGS: "" 66 | loom: 67 | runs-on: ubuntu-latest 68 | steps: 69 | - uses: actions/checkout@v4 70 | with: 71 | submodules: true 72 | - name: Install stable 73 | uses: dtolnay/rust-toolchain@stable 74 | - name: cargo test --test loom 75 | run: cargo test --release --test loom 76 | env: 77 | LOOM_MAX_PREEMPTIONS: 3 78 | RUSTFLAGS: "--cfg loom" 79 | -------------------------------------------------------------------------------- /.github/workflows/scheduled.yml: -------------------------------------------------------------------------------- 1 | permissions: 2 | contents: read 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | schedule: 8 | - cron: '7 7 * * *' 9 | # Spend CI time only on latest ref: https://github.com/jonhoo/rust-ci-conf/pull/5 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | name: rolling 14 | jobs: 15 | # https://twitter.com/mycoliza/status/1571295690063753218 16 | nightly: 17 | runs-on: ubuntu-latest 18 | name: ubuntu / nightly 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | submodules: true 23 | - name: Install nightly 24 | uses: dtolnay/rust-toolchain@nightly 25 | - name: cargo generate-lockfile 26 | if: hashFiles('Cargo.lock') == '' 27 | run: cargo generate-lockfile 28 | - name: cargo test --locked 29 | run: cargo test --locked --all-features --all-targets 30 | # https://twitter.com/alcuadrado/status/1571291687837732873 31 | update: 32 | runs-on: ubuntu-latest 33 | name: ubuntu / beta / updated 34 | # There's no point running this if no Cargo.lock was checked in in the 35 | # first place, since we'd just redo what happened in the regular test job. 36 | # Unfortunately, hashFiles only works in if on steps, so we reepeat it. 37 | # if: hashFiles('Cargo.lock') != '' 38 | steps: 39 | - uses: actions/checkout@v4 40 | with: 41 | submodules: true 42 | - name: Install beta 43 | if: hashFiles('Cargo.lock') != '' 44 | uses: dtolnay/rust-toolchain@beta 45 | - name: cargo update 46 | if: hashFiles('Cargo.lock') != '' 47 | run: cargo update 48 | - name: cargo test 49 | if: hashFiles('Cargo.lock') != '' 50 | run: cargo test --locked --all-features --all-targets 51 | env: 52 | RUSTFLAGS: -D deprecated 53 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | permissions: 2 | contents: read 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | # Spend CI time only on latest ref: https://github.com/jonhoo/rust-ci-conf/pull/5 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 10 | cancel-in-progress: true 11 | name: test 12 | jobs: 13 | required: 14 | runs-on: ubuntu-latest 15 | name: ubuntu / ${{ matrix.toolchain }} 16 | strategy: 17 | matrix: 18 | toolchain: [stable, beta] 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | submodules: true 23 | - name: Install ${{ matrix.toolchain }} 24 | uses: dtolnay/rust-toolchain@master 25 | with: 26 | toolchain: ${{ matrix.toolchain }} 27 | - name: cargo generate-lockfile 28 | if: hashFiles('Cargo.lock') == '' 29 | run: cargo generate-lockfile 30 | # https://twitter.com/jonhoo/status/1571290371124260865 31 | - name: cargo test --locked 32 | run: cargo test --locked --all-features --all-targets 33 | # https://github.com/rust-lang/cargo/issues/6669 34 | - name: cargo test --doc 35 | run: cargo test --locked --all-features --doc 36 | minimal: 37 | runs-on: ubuntu-latest 38 | name: ubuntu / stable / minimal-versions 39 | steps: 40 | - uses: actions/checkout@v4 41 | with: 42 | submodules: true 43 | - name: Install stable 44 | uses: dtolnay/rust-toolchain@stable 45 | - name: Install nightly for -Zminimal-versions 46 | uses: dtolnay/rust-toolchain@nightly 47 | - name: rustup default stable 48 | run: rustup default stable 49 | - name: cargo update -Zminimal-versions 50 | run: cargo +nightly update -Zminimal-versions 51 | - name: cargo test 52 | run: cargo test --locked --all-features --all-targets 53 | os-check: 54 | runs-on: ${{ matrix.os }} 55 | name: ${{ matrix.os }} / stable 56 | strategy: 57 | fail-fast: false 58 | matrix: 59 | os: [macos-latest, windows-latest] 60 | steps: 61 | # if your project needs OpenSSL, uncommment this to fix Windows builds. 62 | # it's commented out by default as tthe install command takes 5-10m. 63 | # - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append 64 | # if: runner.os == 'Windows' 65 | # - run: vcpkg install openssl:x64-windows-static-md 66 | # if: runner.os == 'Windows' 67 | - uses: actions/checkout@v4 68 | with: 69 | submodules: true 70 | - name: Install stable 71 | uses: dtolnay/rust-toolchain@stable 72 | - name: cargo generate-lockfile 73 | if: hashFiles('Cargo.lock') == '' 74 | run: cargo generate-lockfile 75 | - name: cargo test 76 | run: cargo test --locked --all-features --all-targets 77 | # continue using our own coverage.yml for now to get doctest checking 78 | # https://github.com/taiki-e/cargo-llvm-cov/issues/2 79 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "ansi_term" 7 | version = "0.12.1" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" 10 | dependencies = [ 11 | "winapi", 12 | ] 13 | 14 | [[package]] 15 | name = "autocfg" 16 | version = "1.1.0" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 19 | 20 | [[package]] 21 | name = "cc" 22 | version = "1.0.73" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" 25 | 26 | [[package]] 27 | name = "cfg-if" 28 | version = "1.0.0" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 31 | 32 | [[package]] 33 | name = "generator" 34 | version = "0.7.1" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "cc184cace1cea8335047a471cc1da80f18acf8a76f3bab2028d499e328948ec7" 37 | dependencies = [ 38 | "cc", 39 | "libc", 40 | "log", 41 | "rustversion", 42 | "windows", 43 | ] 44 | 45 | [[package]] 46 | name = "lazy_static" 47 | version = "1.4.0" 48 | source = "registry+https://github.com/rust-lang/crates.io-index" 49 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 50 | 51 | [[package]] 52 | name = "left-right" 53 | version = "0.11.5" 54 | dependencies = [ 55 | "loom", 56 | "slab", 57 | ] 58 | 59 | [[package]] 60 | name = "libc" 61 | version = "0.2.132" 62 | source = "registry+https://github.com/rust-lang/crates.io-index" 63 | checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" 64 | 65 | [[package]] 66 | name = "log" 67 | version = "0.4.17" 68 | source = "registry+https://github.com/rust-lang/crates.io-index" 69 | checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" 70 | dependencies = [ 71 | "cfg-if", 72 | ] 73 | 74 | [[package]] 75 | name = "loom" 76 | version = "0.5.6" 77 | source = "registry+https://github.com/rust-lang/crates.io-index" 78 | checksum = "ff50ecb28bb86013e935fb6683ab1f6d3a20016f123c76fd4c27470076ac30f5" 79 | dependencies = [ 80 | "cfg-if", 81 | "generator", 82 | "scoped-tls", 83 | "tracing", 84 | "tracing-subscriber", 85 | ] 86 | 87 | [[package]] 88 | name = "matchers" 89 | version = "0.1.0" 90 | source = "registry+https://github.com/rust-lang/crates.io-index" 91 | checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" 92 | dependencies = [ 93 | "regex-automata", 94 | ] 95 | 96 | [[package]] 97 | name = "once_cell" 98 | version = "1.14.0" 99 | source = "registry+https://github.com/rust-lang/crates.io-index" 100 | checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0" 101 | 102 | [[package]] 103 | name = "pin-project-lite" 104 | version = "0.2.9" 105 | source = "registry+https://github.com/rust-lang/crates.io-index" 106 | checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" 107 | 108 | [[package]] 109 | name = "proc-macro2" 110 | version = "1.0.43" 111 | source = "registry+https://github.com/rust-lang/crates.io-index" 112 | checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" 113 | dependencies = [ 114 | "unicode-ident", 115 | ] 116 | 117 | [[package]] 118 | name = "quote" 119 | version = "1.0.21" 120 | source = "registry+https://github.com/rust-lang/crates.io-index" 121 | checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" 122 | dependencies = [ 123 | "proc-macro2", 124 | ] 125 | 126 | [[package]] 127 | name = "regex" 128 | version = "1.6.0" 129 | source = "registry+https://github.com/rust-lang/crates.io-index" 130 | checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" 131 | dependencies = [ 132 | "regex-syntax", 133 | ] 134 | 135 | [[package]] 136 | name = "regex-automata" 137 | version = "0.1.10" 138 | source = "registry+https://github.com/rust-lang/crates.io-index" 139 | checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" 140 | dependencies = [ 141 | "regex-syntax", 142 | ] 143 | 144 | [[package]] 145 | name = "regex-syntax" 146 | version = "0.6.27" 147 | source = "registry+https://github.com/rust-lang/crates.io-index" 148 | checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" 149 | 150 | [[package]] 151 | name = "rustversion" 152 | version = "1.0.9" 153 | source = "registry+https://github.com/rust-lang/crates.io-index" 154 | checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" 155 | 156 | [[package]] 157 | name = "scoped-tls" 158 | version = "1.0.0" 159 | source = "registry+https://github.com/rust-lang/crates.io-index" 160 | checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" 161 | 162 | [[package]] 163 | name = "sharded-slab" 164 | version = "0.1.4" 165 | source = "registry+https://github.com/rust-lang/crates.io-index" 166 | checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" 167 | dependencies = [ 168 | "lazy_static", 169 | ] 170 | 171 | [[package]] 172 | name = "slab" 173 | version = "0.4.7" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" 176 | dependencies = [ 177 | "autocfg", 178 | ] 179 | 180 | [[package]] 181 | name = "smallvec" 182 | version = "1.9.0" 183 | source = "registry+https://github.com/rust-lang/crates.io-index" 184 | checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" 185 | 186 | [[package]] 187 | name = "syn" 188 | version = "1.0.99" 189 | source = "registry+https://github.com/rust-lang/crates.io-index" 190 | checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13" 191 | dependencies = [ 192 | "proc-macro2", 193 | "quote", 194 | "unicode-ident", 195 | ] 196 | 197 | [[package]] 198 | name = "thread_local" 199 | version = "1.1.4" 200 | source = "registry+https://github.com/rust-lang/crates.io-index" 201 | checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" 202 | dependencies = [ 203 | "once_cell", 204 | ] 205 | 206 | [[package]] 207 | name = "tracing" 208 | version = "0.1.36" 209 | source = "registry+https://github.com/rust-lang/crates.io-index" 210 | checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" 211 | dependencies = [ 212 | "cfg-if", 213 | "pin-project-lite", 214 | "tracing-attributes", 215 | "tracing-core", 216 | ] 217 | 218 | [[package]] 219 | name = "tracing-attributes" 220 | version = "0.1.22" 221 | source = "registry+https://github.com/rust-lang/crates.io-index" 222 | checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" 223 | dependencies = [ 224 | "proc-macro2", 225 | "quote", 226 | "syn", 227 | ] 228 | 229 | [[package]] 230 | name = "tracing-core" 231 | version = "0.1.29" 232 | source = "registry+https://github.com/rust-lang/crates.io-index" 233 | checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7" 234 | dependencies = [ 235 | "once_cell", 236 | "valuable", 237 | ] 238 | 239 | [[package]] 240 | name = "tracing-log" 241 | version = "0.1.3" 242 | source = "registry+https://github.com/rust-lang/crates.io-index" 243 | checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" 244 | dependencies = [ 245 | "lazy_static", 246 | "log", 247 | "tracing-core", 248 | ] 249 | 250 | [[package]] 251 | name = "tracing-subscriber" 252 | version = "0.3.15" 253 | source = "registry+https://github.com/rust-lang/crates.io-index" 254 | checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" 255 | dependencies = [ 256 | "ansi_term", 257 | "matchers", 258 | "once_cell", 259 | "regex", 260 | "sharded-slab", 261 | "smallvec", 262 | "thread_local", 263 | "tracing", 264 | "tracing-core", 265 | "tracing-log", 266 | ] 267 | 268 | [[package]] 269 | name = "unicode-ident" 270 | version = "1.0.4" 271 | source = "registry+https://github.com/rust-lang/crates.io-index" 272 | checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" 273 | 274 | [[package]] 275 | name = "valuable" 276 | version = "0.1.0" 277 | source = "registry+https://github.com/rust-lang/crates.io-index" 278 | checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" 279 | 280 | [[package]] 281 | name = "winapi" 282 | version = "0.3.9" 283 | source = "registry+https://github.com/rust-lang/crates.io-index" 284 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 285 | dependencies = [ 286 | "winapi-i686-pc-windows-gnu", 287 | "winapi-x86_64-pc-windows-gnu", 288 | ] 289 | 290 | [[package]] 291 | name = "winapi-i686-pc-windows-gnu" 292 | version = "0.4.0" 293 | source = "registry+https://github.com/rust-lang/crates.io-index" 294 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 295 | 296 | [[package]] 297 | name = "winapi-x86_64-pc-windows-gnu" 298 | version = "0.4.0" 299 | source = "registry+https://github.com/rust-lang/crates.io-index" 300 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 301 | 302 | [[package]] 303 | name = "windows" 304 | version = "0.32.0" 305 | source = "registry+https://github.com/rust-lang/crates.io-index" 306 | checksum = "fbedf6db9096bc2364adce0ae0aa636dcd89f3c3f2cd67947062aaf0ca2a10ec" 307 | dependencies = [ 308 | "windows_aarch64_msvc", 309 | "windows_i686_gnu", 310 | "windows_i686_msvc", 311 | "windows_x86_64_gnu", 312 | "windows_x86_64_msvc", 313 | ] 314 | 315 | [[package]] 316 | name = "windows_aarch64_msvc" 317 | version = "0.32.0" 318 | source = "registry+https://github.com/rust-lang/crates.io-index" 319 | checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" 320 | 321 | [[package]] 322 | name = "windows_i686_gnu" 323 | version = "0.32.0" 324 | source = "registry+https://github.com/rust-lang/crates.io-index" 325 | checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" 326 | 327 | [[package]] 328 | name = "windows_i686_msvc" 329 | version = "0.32.0" 330 | source = "registry+https://github.com/rust-lang/crates.io-index" 331 | checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" 332 | 333 | [[package]] 334 | name = "windows_x86_64_gnu" 335 | version = "0.32.0" 336 | source = "registry+https://github.com/rust-lang/crates.io-index" 337 | checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" 338 | 339 | [[package]] 340 | name = "windows_x86_64_msvc" 341 | version = "0.32.0" 342 | source = "registry+https://github.com/rust-lang/crates.io-index" 343 | checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" 344 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "left-right" 3 | version = "0.11.5" 4 | authors = ["Jon Gjengset "] 5 | edition = "2018" 6 | license = "MIT OR Apache-2.0" 7 | 8 | description = "A concurrency primitive for high concurrency reads over a single-writer data structure." 9 | repository = "https://github.com/jonhoo/left-right.git" 10 | 11 | keywords = ["concurrency","lock-free"] 12 | categories = ["concurrency"] 13 | 14 | [dependencies] 15 | slab = "0.4.1" 16 | 17 | [target.'cfg(loom)'.dependencies] 18 | loom = "0.5.6" 19 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Jon Gjengset 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Codecov](https://codecov.io/github/jonhoo/left-right/coverage.svg?branch=main)](https://codecov.io/gh/jonhoo/left-right) 2 | [![Crates.io](https://img.shields.io/crates/v/left-right.svg)](https://crates.io/crates/left-right) 3 | [![Documentation](https://docs.rs/left-right/badge.svg)](https://docs.rs/left-right/) 4 | 5 | Left-right is a concurrency primitive for high concurrency reads over a 6 | single-writer data structure. The primitive keeps two copies of the 7 | backing data structure, one that is accessed by readers, and one that is 8 | accessed by the (single) writer. This enables all reads to proceed in 9 | parallel with minimal coordination, and shifts the coordination overhead 10 | to the writer. In the absence of writes, reads scale linearly with the 11 | number of cores. 12 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" 2 | -------------------------------------------------------------------------------- /src/aliasing.rs: -------------------------------------------------------------------------------- 1 | //! Types that aid in aliasing values across the two left-right data copies. 2 | //! 3 | //! This module primarily revolves around the [`Aliased`] type, and its associated [`DropBehavior`] 4 | //! trait. The basic flow of using it is going to go as follows. 5 | //! 6 | //! In general, each value in your data structure should be stored wrapped in an `Aliased`, with an 7 | //! associated type `D` that has `DropBehavior::DO_DROP` set to `false`. In 8 | //! [`Absorb::absorb_first`], you then simply drop any removed `Aliased` as normal. The 9 | //! backing `T` will not be dropped. 10 | //! 11 | //! In [`Absorb::absorb_second`], you first cast your datastructure from 12 | //! 13 | //! ```rust,ignore 14 | //! &mut DataStructure> 15 | //! ``` 16 | //! 17 | //! to 18 | //! 19 | //! ```rust,ignore 20 | //! &mut DataStructure> 21 | //! ``` 22 | //! 23 | //! where `::DO_DROP` is `true`. This time, any `Aliased` that you drop 24 | //! _will_ drop the inner `T`, but this should be safe since the only other alias was dropped in 25 | //! `absorb_first`. This is where the invariant that `absorb_*` is deterministic becomes extremely 26 | //! important! 27 | //! 28 | //! Sounds nice enough, right? Well, you have to be _really_ careful when working with this type. 29 | //! There are two primary things to watch out for: 30 | //! 31 | //! ## Mismatched dropping 32 | //! 33 | //! If `absorb_first` and `absorb_second` do not drop _exactly_ the same aliased values for a given 34 | //! operation from the oplog, unsoundness ensues. Specifically, what will happen is that 35 | //! `absorb_first` does _not_ drop some aliased `t: T`, but `absorb_second` _does_. Since 36 | //! `absorb_second` _assumes_ that `t` no longer has any alises (it expects that `absorb_first` got 37 | //! rid of such an alias), it will drop the `T`. But that `T` is still in the "other" data copy, 38 | //! and may still get accessed by readers, who will then be accessing a dropped value, which is 39 | //! unsound. 40 | //! 41 | //! While it might seem like it is easy to ensure that `absorb_first` and `absorb_second` do the 42 | //! same thing, it is not. A good example of this is non-deterministic (likely malicious) 43 | //! implementations of traits that you'd _expect_ to be deterministic like `Hash` or `Eq`. Imagine 44 | //! someone writes an implementation like: 45 | //! 46 | //! ```rust 47 | //! use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; 48 | //! static SNEAKY: AtomicBool = AtomicBool::new(false); 49 | //! 50 | //! #[derive(Eq, Hash)] 51 | //! struct Sneaky(Vec); 52 | //! impl PartialEq for Sneaky { 53 | //! fn eq(&self, other: &Self) -> bool { 54 | //! if SNEAKY.swap(false, SeqCst) { 55 | //! false 56 | //! } else { 57 | //! self.0 == other.0 58 | //! } 59 | //! } 60 | //! } 61 | //! ``` 62 | //! 63 | //! Will your `absorb_*` calls still do the same thing? If the answer is no, then your 64 | //! datastructure is unsound. 65 | //! 66 | //! Every datastructure is different, so it is difficult to give good advice on how to achieve 67 | //! determinism. My general advice is to never call user-defined methods in `absorb_second`. Call 68 | //! them in `absorb_first`, and use the `&mut O` to stash the results in the oplog itself. That 69 | //! way, in `absorb_second`, you can use those cached values instead. This may be hard to pull off 70 | //! for complicated datastructures, but it does tend to guarantee determinism. 71 | //! 72 | //! If that is unrealistic, mark the constructor for your data structure as `unsafe`, with a safety 73 | //! comment that says that the inner types must have deterministic implementations of certain 74 | //! traits. It's not ideal, but hopefully consumers _know_ what types they are using your 75 | //! datastructures with, and will be able to check that their implementations are indeed not 76 | //! malicious. 77 | //! 78 | //! ## Unsafe casting 79 | //! 80 | //! The instructions above say to cast from 81 | //! 82 | //! ```rust,ignore 83 | //! &mut DataStructure> 84 | //! ``` 85 | //! 86 | //! to 87 | //! 88 | //! ```rust,ignore 89 | //! &mut DataStructure> 90 | //! ``` 91 | //! 92 | //! That cast is unsafe, and rightly so! While it is _likely_ that the cast is safe, that is far 93 | //! from obvious, and it's worth spending some time on why, since it has implications for how you 94 | //! use `Aliased` in your own crate. 95 | //! 96 | //! The cast is only sound if the two types are laid out exactly the same in memory, but that is 97 | //! harder to guarantee than you might expect. The Rust compiler does not guarantee that 98 | //! `A>` and `A` are laid out the same in memory for any arbitrary `A`, _even_ if 99 | //! both `A` and `Aliased` are `#[repr(transparent)]`. The _primary_ reason for this is associated 100 | //! types. Imagine that I write this code: 101 | //! 102 | //! ```rust,ignore 103 | //! trait Wonky { type Weird; } 104 | //! struct A(T, T::Weird); 105 | //! 106 | //! impl Wonky for Aliased { type Weird = u32; } 107 | //! impl Wonky for T { type Weird = u16; } 108 | //! ``` 109 | //! 110 | //! Clearly, these types will end up with different memory layouts, since one will contain a 111 | //! `u32` and the other a `u16` (let's ignore the fact that this particular example requires 112 | //! specialization). This, in turn, means that it is _not_ generally safe to transmute between a 113 | //! wrapper around one type and that same wrapper around a different type with the same layout! You 114 | //! can see this discussed in far more detail here if you're curious: 115 | //! 116 | //! 117 | //! 118 | //! Now, if we can find a way to _guarantee_ that the types have the same layout, this problem 119 | //! changes, but how might we go about this? Our saving grace is that we are casting between 120 | //! `A>` and `A>` where we control both `D` and `D2`. If we ensure 121 | //! that both those types are private, there is no way for any code external to your crate can 122 | //! implement a trait for one type but not the other. And thus there's no way (that I know of) for 123 | //! making it unsound to cast between the types! 124 | //! 125 | //! Now, I only say that this is likely sound because the language does not actually give 126 | //! this as a _guarantee_ at the moment. Though wiser minds [seem to suggest that this might 127 | //! be okay](https://github.com/rust-lang/unsafe-code-guidelines/issues/35#issuecomment-735858397). 128 | //! 129 | //! But this warrants repeating: **your `D` types for `Aliased` _must_ be private**. 130 | 131 | use std::marker::PhantomData; 132 | use std::mem::MaybeUninit; 133 | use std::ops::Deref; 134 | 135 | // Just to make the doc comment linking work. 136 | #[allow(unused_imports)] 137 | use crate::Absorb; 138 | 139 | /// Dictates the dropping behavior for the implementing type when used with [`Aliased`]. 140 | pub trait DropBehavior { 141 | /// An [`Aliased`](Aliased) will drop its inner `T` if and only if `D::DO_DROP` is `true` 142 | const DO_DROP: bool; 143 | } 144 | 145 | /// A `T` that is aliased. 146 | /// 147 | /// You should be able to mostly ignore this type, as it can generally be treated exactly like a 148 | /// `&T`. However, there are some minor exceptions around forwarding traits -- since `Aliased` is a 149 | /// wrapper type around `T`, it cannot automatically forward traits it does not know about to `&T`. 150 | /// This means that if your `&T` implements, say, `Serialize` or some custom `Borrow`, 151 | /// `Aliased` will not implement that same trait. You can work around this either by 152 | /// implementing your trait specifically for `Aliased` where possible, or by manually 153 | /// dereferencing to get the `&T` before using the trait. 154 | #[repr(transparent)] 155 | pub struct Aliased 156 | where 157 | D: DropBehavior, 158 | { 159 | aliased: MaybeUninit, 160 | 161 | drop_behavior: PhantomData, 162 | 163 | // We cannot implement Send just because T is Send since we're aliasing it. 164 | _no_auto_send: PhantomData<*const T>, 165 | } 166 | 167 | impl Aliased 168 | where 169 | D: DropBehavior, 170 | { 171 | /// Create an alias of the inner `T`. 172 | /// 173 | /// # Safety 174 | /// 175 | /// This method is only safe to call as long as you ensure that the alias is never used after 176 | /// an `Aliased` of `self` where `D::DO_DROP` is `true` is dropped, **and** as long 177 | /// as no `&mut T` is ever given out while some `Aliased` may still be used. The returned 178 | /// type assumes that it is always safe to dereference into `&T`, which would not be true if 179 | /// either of those invariants were broken. 180 | pub unsafe fn alias(&self) -> Self { 181 | // safety: 182 | // We are aliasing T here, but it is okay because: 183 | // a) the T is behind a MaybeUninit, and so will cannot be accessed safely; and 184 | // b) we only expose _either_ &T while aliased, or &mut after the aliasing ends. 185 | Aliased { 186 | aliased: std::ptr::read(&self.aliased), 187 | drop_behavior: PhantomData, 188 | _no_auto_send: PhantomData, 189 | } 190 | } 191 | 192 | /// Construct an aliased value around a `T`. 193 | /// 194 | /// This method is safe since it effectively leaks `T`. Note that we do not implement `From` 195 | /// because we do not want users to construct `Aliased`s on their own. If they did, they 196 | /// would almost certain end up with incorrect drop behavior. 197 | pub fn from(t: T) -> Self { 198 | Self { 199 | aliased: MaybeUninit::new(t), 200 | drop_behavior: PhantomData, 201 | _no_auto_send: PhantomData, 202 | } 203 | } 204 | 205 | /// Turn this aliased `T` into one with a different drop behavior. 206 | /// 207 | /// # Safety 208 | /// 209 | /// It is always safe to change an `Aliased` from a dropping `D` to a non-dropping `D`. Going 210 | /// the other way around is only safe if `self` is the last alias for the `T`. 211 | pub unsafe fn change_drop(self) -> Aliased { 212 | Aliased { 213 | // safety: 214 | aliased: std::ptr::read(&self.aliased), 215 | drop_behavior: PhantomData, 216 | _no_auto_send: PhantomData, 217 | } 218 | } 219 | } 220 | 221 | // Aliased gives &T across threads if shared or sent across thread boundaries. 222 | // Aliased gives &mut T across threads (for drop) if sent across thread boundaries. 223 | // This implies that we are only Send if T is Send+Sync, and Sync if T is Sync. 224 | // 225 | // Note that these bounds are stricter than what the compiler would auto-generate for the type. 226 | unsafe impl Send for Aliased 227 | where 228 | D: DropBehavior, 229 | T: Send + Sync, 230 | { 231 | } 232 | unsafe impl Sync for Aliased 233 | where 234 | D: DropBehavior, 235 | T: Sync, 236 | { 237 | } 238 | 239 | impl Drop for Aliased 240 | where 241 | D: DropBehavior, 242 | { 243 | fn drop(&mut self) { 244 | if D::DO_DROP { 245 | // safety: 246 | // MaybeUninit was created from a valid T. 247 | // That T has not been dropped (getting a Aliased is unsafe). 248 | // T is no longer aliased (by the safety assumption of getting a Aliased), 249 | // so we are allowed to re-take ownership of the T. 250 | unsafe { std::ptr::drop_in_place(self.aliased.as_mut_ptr()) } 251 | } 252 | } 253 | } 254 | 255 | impl AsRef for Aliased 256 | where 257 | D: DropBehavior, 258 | { 259 | fn as_ref(&self) -> &T { 260 | // safety: 261 | // MaybeUninit was created from a valid T. 262 | // That T has not been dropped (getting a Aliased is unsafe). 263 | // All we have done to T is alias it. But, since we only give out &T 264 | // (which should be legal anyway), we're fine. 265 | unsafe { &*self.aliased.as_ptr() } 266 | } 267 | } 268 | 269 | impl Deref for Aliased 270 | where 271 | D: DropBehavior, 272 | { 273 | type Target = T; 274 | fn deref(&self) -> &Self::Target { 275 | self.as_ref() 276 | } 277 | } 278 | 279 | use std::hash::{Hash, Hasher}; 280 | impl Hash for Aliased 281 | where 282 | D: DropBehavior, 283 | T: Hash, 284 | { 285 | fn hash(&self, state: &mut H) 286 | where 287 | H: Hasher, 288 | { 289 | self.as_ref().hash(state) 290 | } 291 | } 292 | 293 | use std::fmt; 294 | impl fmt::Debug for Aliased 295 | where 296 | D: DropBehavior, 297 | T: fmt::Debug, 298 | { 299 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 300 | self.as_ref().fmt(f) 301 | } 302 | } 303 | 304 | impl PartialEq for Aliased 305 | where 306 | D: DropBehavior, 307 | T: PartialEq, 308 | { 309 | fn eq(&self, other: &Self) -> bool { 310 | self.as_ref().eq(other.as_ref()) 311 | } 312 | } 313 | 314 | impl Eq for Aliased 315 | where 316 | D: DropBehavior, 317 | T: Eq, 318 | { 319 | } 320 | 321 | impl PartialOrd for Aliased 322 | where 323 | D: DropBehavior, 324 | T: PartialOrd, 325 | { 326 | fn partial_cmp(&self, other: &Self) -> Option { 327 | self.as_ref().partial_cmp(other.as_ref()) 328 | } 329 | 330 | fn lt(&self, other: &Self) -> bool { 331 | self.as_ref().lt(other.as_ref()) 332 | } 333 | 334 | fn le(&self, other: &Self) -> bool { 335 | self.as_ref().le(other.as_ref()) 336 | } 337 | 338 | fn gt(&self, other: &Self) -> bool { 339 | self.as_ref().gt(other.as_ref()) 340 | } 341 | 342 | fn ge(&self, other: &Self) -> bool { 343 | self.as_ref().ge(other.as_ref()) 344 | } 345 | } 346 | 347 | impl Ord for Aliased 348 | where 349 | D: DropBehavior, 350 | T: Ord, 351 | { 352 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 353 | self.as_ref().cmp(other.as_ref()) 354 | } 355 | } 356 | 357 | use std::borrow::Borrow; 358 | impl Borrow for Aliased 359 | where 360 | D: DropBehavior, 361 | { 362 | fn borrow(&self) -> &T { 363 | self.as_ref() 364 | } 365 | } 366 | // What we _really_ want here is: 367 | // ``` 368 | // impl Borrow for Aliased 369 | // where 370 | // T: Borrow, 371 | // { 372 | // fn borrow(&self) -> &U { 373 | // self.as_ref().borrow() 374 | // } 375 | // } 376 | // ``` 377 | // But unfortunately that won't work due to trait coherence. 378 | // Instead, we manually write the nice Borrow impls from std. 379 | // This won't help with custom Borrow impls, but gets you pretty far. 380 | impl Borrow for Aliased 381 | where 382 | D: DropBehavior, 383 | { 384 | fn borrow(&self) -> &str { 385 | self.as_ref() 386 | } 387 | } 388 | impl Borrow for Aliased 389 | where 390 | D: DropBehavior, 391 | { 392 | fn borrow(&self) -> &std::path::Path { 393 | self.as_ref() 394 | } 395 | } 396 | impl Borrow<[T]> for Aliased, D> 397 | where 398 | D: DropBehavior, 399 | { 400 | fn borrow(&self) -> &[T] { 401 | self.as_ref() 402 | } 403 | } 404 | impl Borrow for Aliased, D> 405 | where 406 | T: ?Sized, 407 | D: DropBehavior, 408 | { 409 | fn borrow(&self) -> &T { 410 | self.as_ref() 411 | } 412 | } 413 | impl Borrow for Aliased, D> 414 | where 415 | T: ?Sized, 416 | D: DropBehavior, 417 | { 418 | fn borrow(&self) -> &T { 419 | self.as_ref() 420 | } 421 | } 422 | impl Borrow for Aliased, D> 423 | where 424 | T: ?Sized, 425 | D: DropBehavior, 426 | { 427 | fn borrow(&self) -> &T { 428 | self.as_ref() 429 | } 430 | } 431 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A concurrency primitive for high concurrency reads over a single-writer data structure. 2 | //! 3 | //! The primitive keeps two copies of the backing data structure, one that is accessed by readers, 4 | //! and one that is accessed by the (single) writer. This enables all reads to proceed in parallel 5 | //! with minimal coordination, and shifts the coordination overhead to the writer. In the absence 6 | //! of writes, reads scale linearly with the number of cores. 7 | //! 8 | //! When the writer wishes to expose new changes to the datastructure (see 9 | //! [`WriteHandle::publish`]), it "flips" the two copies so that subsequent reads go to the old 10 | //! "write side", and future writers go to the old "read side". This process does cause two cache 11 | //! line invalidations for the readers, but does not stop them from making progress (i.e., reads 12 | //! are wait-free). 13 | //! 14 | //! In order to keep both copies up to date, left-right keeps an operational log ("oplog") of all 15 | //! the modifications to the data structure, which it uses to bring the old read data up to date 16 | //! with the latest writes on a flip. Since there are two copies of the data, each oplog entry is 17 | //! applied twice: once to the write copy and again to the (stale) read copy. 18 | //! 19 | //! # Trade-offs 20 | //! 21 | //! Few concurrency wins come for free, and this one is no exception. The drawbacks of this 22 | //! primitive are: 23 | //! 24 | //! - **Increased memory use**: since we keep two copies of the backing data structure, we are 25 | //! effectively doubling the memory use of the underlying data. With some clever de-duplication, 26 | //! this cost can be ameliorated to some degree, but it's something to be aware of. Furthermore, 27 | //! if writers only call `publish` infrequently despite adding many writes to the operational log, 28 | //! the operational log itself may grow quite large, which adds additional overhead. 29 | //! - **Deterministic operations**: as the entries in the operational log are applied twice, once 30 | //! to each copy of the data, it is essential that the operations are deterministic. If they are 31 | //! not, the two copies will no longer mirror one another, and will continue to diverge over time. 32 | //! - **Single writer**: left-right only supports a single writer. To have multiple writers, you 33 | //! need to ensure exclusive access to the [`WriteHandle`] through something like a 34 | //! [`Mutex`](std::sync::Mutex). 35 | //! - **Slow writes**: Writes through left-right are slower than they would be directly against 36 | //! the backing datastructure. This is both because they have to go through the operational log, 37 | //! and because they must each be applied twice. 38 | //! 39 | //! # How does it work? 40 | //! 41 | //! Take a look at [this YouTube video](https://www.youtube.com/watch?v=eLNAMEoKAAc) which goes 42 | //! through the basic concurrency algorithm, as well as the initial development of this library. 43 | //! Alternatively, there's a shorter (but also less complete) description in [this 44 | //! talk](https://www.youtube.com/watch?v=s19G6n0UjsM&t=1994). 45 | //! 46 | //! At a glance, left-right is implemented using two regular `T`s, an operational log, epoch 47 | //! counting, and some pointer magic. There is a single pointer through which all readers go. It 48 | //! points to a `T` that the readers access in order to read data. Every time a read has accessed 49 | //! the pointer, they increment a local epoch counter, and they update it again when they have 50 | //! finished the read. When a write occurs, the writer updates the other `T` (for which there are 51 | //! no readers), and also stores a copy of the change in a log. When [`WriteHandle::publish`] is 52 | //! called, the writer, atomically swaps the reader pointer to point to the other `T`. It then 53 | //! waits for the epochs of all current readers to change, and then replays the operational log to 54 | //! bring the stale copy up to date. 55 | //! 56 | //! The design resembles this [left-right concurrency 57 | //! scheme](https://hal.archives-ouvertes.fr/hal-01207881/document) from 2015, though I am not 58 | //! aware of any follow-up to that work. 59 | //! 60 | //! # How do I use it? 61 | //! 62 | //! If you just want a data structure for fast reads, you likely want to use a crate that _uses_ 63 | //! this crate, like [`evmap`](https://docs.rs/evmap/). If you want to develop such a crate 64 | //! yourself, here's what you do: 65 | //! 66 | //! ```rust 67 | //! use left_right::{Absorb, ReadHandle, WriteHandle}; 68 | //! 69 | //! // First, define an operational log type. 70 | //! // For most real-world use-cases, this will be an `enum`, but we'll keep it simple: 71 | //! struct CounterAddOp(i32); 72 | //! 73 | //! // Then, implement the unsafe `Absorb` trait for your data structure type, 74 | //! // and provide the oplog type as the generic argument. 75 | //! // You can read this as "`i32` can absorb changes of type `CounterAddOp`". 76 | //! impl Absorb for i32 { 77 | //! // See the documentation of `Absorb::absorb_first`. 78 | //! // 79 | //! // Essentially, this is where you define what applying 80 | //! // the oplog type to the datastructure does. 81 | //! fn absorb_first(&mut self, operation: &mut CounterAddOp, _: &Self) { 82 | //! *self += operation.0; 83 | //! } 84 | //! 85 | //! // See the documentation of `Absorb::absorb_second`. 86 | //! // 87 | //! // This may or may not be the same as `absorb_first`, 88 | //! // depending on whether or not you de-duplicate values 89 | //! // across the two copies of your data structure. 90 | //! fn absorb_second(&mut self, operation: CounterAddOp, _: &Self) { 91 | //! *self += operation.0; 92 | //! } 93 | //! 94 | //! // See the documentation of `Absorb::drop_first`. 95 | //! fn drop_first(self: Box) {} 96 | //! 97 | //! fn sync_with(&mut self, first: &Self) { 98 | //! *self = *first 99 | //! } 100 | //! } 101 | //! 102 | //! // Now, you can construct a new left-right over an instance of your data structure. 103 | //! // This will give you a `WriteHandle` that accepts writes in the form of oplog entries, 104 | //! // and a (cloneable) `ReadHandle` that gives you `&` access to the data structure. 105 | //! let (write, read) = left_right::new::(); 106 | //! 107 | //! // You will likely want to embed these handles in your own types so that you can 108 | //! // provide more ergonomic methods for performing operations on your type. 109 | //! struct Counter(WriteHandle); 110 | //! impl Counter { 111 | //! // The methods on you write handle type will likely all just add to the operational log. 112 | //! pub fn add(&mut self, i: i32) { 113 | //! self.0.append(CounterAddOp(i)); 114 | //! } 115 | //! 116 | //! // You should also provide a method for exposing the results of any pending operations. 117 | //! // 118 | //! // Until this is called, any writes made since the last call to `publish` will not be 119 | //! // visible to readers. See `WriteHandle::publish` for more details. Make sure to call 120 | //! // this out in _your_ documentation as well, so that your users will be aware of this 121 | //! // "weird" behavior. 122 | //! pub fn publish(&mut self) { 123 | //! self.0.publish(); 124 | //! } 125 | //! } 126 | //! 127 | //! // Similarly, for reads: 128 | //! #[derive(Clone)] 129 | //! struct CountReader(ReadHandle); 130 | //! impl CountReader { 131 | //! pub fn get(&self) -> i32 { 132 | //! // The `ReadHandle` itself does not allow you to access the underlying data. 133 | //! // Instead, you must first "enter" the data structure. This is similar to 134 | //! // taking a `Mutex`, except that no lock is actually taken. When you enter, 135 | //! // you are given back a guard, which gives you shared access (through the 136 | //! // `Deref` trait) to the "read copy" of the data structure. 137 | //! // 138 | //! // Note that `enter` may yield `None`, which implies that the `WriteHandle` 139 | //! // was dropped, and took the backing data down with it. 140 | //! // 141 | //! // Note also that for as long as the guard lives, a writer that tries to 142 | //! // call `WriteHandle::publish` will be blocked from making progress. 143 | //! self.0.enter().map(|guard| *guard).unwrap_or(0) 144 | //! } 145 | //! } 146 | //! 147 | //! // These wrapper types are likely what you'll give out to your consumers. 148 | //! let (mut w, r) = (Counter(write), CountReader(read)); 149 | //! 150 | //! // They can then use the type fairly ergonomically: 151 | //! assert_eq!(r.get(), 0); 152 | //! w.add(1); 153 | //! // no call to publish, so read side remains the same: 154 | //! assert_eq!(r.get(), 0); 155 | //! w.publish(); 156 | //! assert_eq!(r.get(), 1); 157 | //! drop(w); 158 | //! // writer dropped data, so reads yield fallback value: 159 | //! assert_eq!(r.get(), 0); 160 | //! ``` 161 | //! 162 | //! One additional noteworthy detail: much like with `Mutex`, `RwLock`, and `RefCell` from the 163 | //! standard library, the values you dereference out of a `ReadGuard` are tied to the lifetime of 164 | //! that `ReadGuard`. This can make it awkward to write ergonomic methods on the read handle that 165 | //! return references into the underlying data, and may tempt you to clone the data out or take a 166 | //! closure instead. Instead, consider using [`ReadGuard::map`] and [`ReadGuard::try_map`], which 167 | //! (like `RefCell`'s [`Ref::map`](std::cell::Ref::map)) allow you to provide a guarded reference 168 | //! deeper into your data structure. 169 | #![warn( 170 | missing_docs, 171 | rust_2018_idioms, 172 | missing_debug_implementations, 173 | broken_intra_doc_links 174 | )] 175 | #![allow(clippy::type_complexity)] 176 | 177 | mod sync; 178 | 179 | use crate::sync::{Arc, AtomicUsize, Mutex}; 180 | 181 | type Epochs = Arc>>>; 182 | 183 | mod write; 184 | pub use crate::write::Taken; 185 | pub use crate::write::WriteHandle; 186 | 187 | mod read; 188 | pub use crate::read::{ReadGuard, ReadHandle, ReadHandleFactory}; 189 | 190 | pub mod aliasing; 191 | 192 | /// Types that can incorporate operations of type `O`. 193 | /// 194 | /// This trait allows `left-right` to keep the two copies of the underlying data structure (see the 195 | /// [crate-level documentation](crate)) the same over time. Each write operation to the data 196 | /// structure is logged as an operation of type `O` in an _operational log_ (oplog), and is applied 197 | /// once to each copy of the data. 198 | /// 199 | /// Implementations should ensure that the absorbption of each `O` is deterministic. That is, if 200 | /// two instances of the implementing type are initially equal, and then absorb the same `O`, 201 | /// they should remain equal afterwards. If this is not the case, the two copies will drift apart 202 | /// over time, and hold different values. 203 | /// 204 | /// The trait provides separate methods for the first and second absorption of each `O`. For many 205 | /// implementations, these will be the same (which is why `absorb_second` defaults to calling 206 | /// `absorb_first`), but not all. In particular, some implementations may need to modify the `O` to 207 | /// ensure deterministic results when it is applied to the second copy. Or, they may need to 208 | /// ensure that removed values in the data structure are only dropped when they are removed from 209 | /// _both_ copies, in case they alias the backing data to save memory. 210 | /// 211 | /// For the same reason, `Absorb` allows implementors to define `drop_first`, which is used to drop 212 | /// the first of the two copies. In this case, de-duplicating implementations may need to forget 213 | /// values rather than drop them so that they are not dropped twice when the second copy is 214 | /// dropped. 215 | pub trait Absorb { 216 | /// Apply `O` to the first of the two copies. 217 | /// 218 | /// `other` is a reference to the other copy of the data, which has seen all operations up 219 | /// until the previous call to [`WriteHandle::publish`]. That is, `other` is one "publish 220 | /// cycle" behind. 221 | fn absorb_first(&mut self, operation: &mut O, other: &Self); 222 | 223 | /// Apply `O` to the second of the two copies. 224 | /// 225 | /// `other` is a reference to the other copy of the data, which has seen all operations up to 226 | /// the call to [`WriteHandle::publish`] that initially exposed this `O`. That is, `other` is 227 | /// one "publish cycle" ahead. 228 | /// 229 | /// Note that this method should modify the underlying data in _exactly_ the same way as 230 | /// `O` modified `other`, otherwise the two copies will drift apart. Be particularly mindful of 231 | /// non-deterministic implementations of traits that are often assumed to be deterministic 232 | /// (like `Eq` and `Hash`), and of "hidden states" that subtly affect results like the 233 | /// `RandomState` of a `HashMap` which can change iteration order. 234 | /// 235 | /// Defaults to calling `absorb_first`. 236 | fn absorb_second(&mut self, mut operation: O, other: &Self) { 237 | Self::absorb_first(self, &mut operation, other) 238 | } 239 | 240 | /// Drop the first of the two copies. 241 | /// 242 | /// Defaults to calling `Self::drop`. 243 | #[allow(clippy::boxed_local)] 244 | fn drop_first(self: Box) {} 245 | 246 | /// Drop the second of the two copies. 247 | /// 248 | /// Defaults to calling `Self::drop`. 249 | #[allow(clippy::boxed_local)] 250 | fn drop_second(self: Box) {} 251 | 252 | /// Sync the data from `first` into `self`. 253 | /// 254 | /// To improve initialization performance, before the first call to `publish` changes aren't 255 | /// added to the internal oplog, but applied to the first copy directly using `absorb_second`. 256 | /// The first `publish` then calls `sync_with` instead of `absorb_second`. 257 | /// 258 | /// `sync_with` should ensure that `self`'s state exactly matches that of `first` after it 259 | /// returns. Be particularly mindful of non-deterministic implementations of traits that are 260 | /// often assumed to be deterministic (like `Eq` and `Hash`), and of "hidden states" that 261 | /// subtly affect results like the `RandomState` of a `HashMap` which can change iteration 262 | /// order. 263 | fn sync_with(&mut self, first: &Self); 264 | } 265 | 266 | /// Construct a new write and read handle pair from an empty data structure. 267 | /// 268 | /// The type must implement `Clone` so we can construct the second copy from the first. 269 | pub fn new_from_empty(t: T) -> (WriteHandle, ReadHandle) 270 | where 271 | T: Absorb + Clone, 272 | { 273 | let epochs = Default::default(); 274 | 275 | let r = ReadHandle::new(t.clone(), Arc::clone(&epochs)); 276 | let w = WriteHandle::new(t, epochs, r.clone()); 277 | (w, r) 278 | } 279 | 280 | /// Construct a new write and read handle pair from the data structure default. 281 | /// 282 | /// The type must implement `Default` so we can construct two empty instances. You must ensure that 283 | /// the trait's `Default` implementation is deterministic and idempotent - that is to say, two 284 | /// instances created by it must behave _exactly_ the same. An example of where this is problematic 285 | /// is `HashMap` - due to `RandomState`, two instances returned by `Default` may have a different 286 | /// iteration order. 287 | /// 288 | /// If your type's `Default` implementation does not guarantee this, you can use `new_from_empty`, 289 | /// which relies on `Clone` instead of `Default`. 290 | pub fn new() -> (WriteHandle, ReadHandle) 291 | where 292 | T: Absorb + Default, 293 | { 294 | let epochs = Default::default(); 295 | 296 | let r = ReadHandle::new(T::default(), Arc::clone(&epochs)); 297 | let w = WriteHandle::new(T::default(), epochs, r.clone()); 298 | (w, r) 299 | } 300 | -------------------------------------------------------------------------------- /src/read.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::{fence, Arc, AtomicPtr, AtomicUsize, Ordering}; 2 | use std::cell::Cell; 3 | use std::fmt; 4 | use std::marker::PhantomData; 5 | use std::ptr::NonNull; 6 | 7 | // To make [`WriteHandle`] and friends work. 8 | #[cfg(doc)] 9 | use crate::WriteHandle; 10 | 11 | mod guard; 12 | pub use guard::ReadGuard; 13 | 14 | mod factory; 15 | pub use factory::ReadHandleFactory; 16 | 17 | /// A read handle to a left-right guarded data structure. 18 | /// 19 | /// To use a handle, first call [`enter`](Self::enter) to acquire a [`ReadGuard`]. This is similar 20 | /// to acquiring a `Mutex`, except that no exclusive lock is taken. All reads of the underlying 21 | /// data structure can then happen through the [`ReadGuard`] (which implements `Deref`). 23 | /// 24 | /// Reads through a `ReadHandle` only see the changes up until the last time 25 | /// [`WriteHandle::publish`] was called. That is, even if a writer performs a number of 26 | /// modifications to the underlying data, those changes are not visible to reads until the writer 27 | /// calls [`publish`](crate::WriteHandle::publish). 28 | /// 29 | /// `ReadHandle` is not `Sync`, which means that you cannot share a `ReadHandle` across many 30 | /// threads. This is because the coordination necessary to do so would significantly hamper the 31 | /// scalability of reads. If you had many reads go through one `ReadHandle`, they would need to 32 | /// coordinate among themselves for every read, which would lead to core contention and poor 33 | /// multi-core performance. By having `ReadHandle` not be `Sync`, you are forced to keep a 34 | /// `ReadHandle` per reader, which guarantees that you do not accidentally ruin your performance. 35 | /// 36 | /// You can create a new, independent `ReadHandle` either by cloning an existing handle or by using 37 | /// a [`ReadHandleFactory`]. Note, however, that creating a new handle through either of these 38 | /// mechanisms _does_ take a lock, and may therefore become a bottleneck if you do it frequently. 39 | pub struct ReadHandle { 40 | pub(crate) inner: Arc>, 41 | pub(crate) epochs: crate::Epochs, 42 | epoch: Arc, 43 | epoch_i: usize, 44 | enters: Cell, 45 | 46 | // `ReadHandle` is _only_ Send if T is Sync. If T is !Sync, then it's not okay for us to expose 47 | // references to it to other threads! Since negative impls are not available on stable, we pull 48 | // this little hack to make the type not auto-impl Send, and then explicitly add the impl when 49 | // appropriate. 50 | _unimpl_send: PhantomData<*const T>, 51 | } 52 | unsafe impl Send for ReadHandle where T: Sync {} 53 | 54 | impl Drop for ReadHandle { 55 | fn drop(&mut self) { 56 | // epoch must already be even for us to have &mut self, 57 | // so okay to lock since we're not holding up the epoch anyway. 58 | let e = self.epochs.lock().unwrap().remove(self.epoch_i); 59 | assert!(Arc::ptr_eq(&e, &self.epoch)); 60 | assert_eq!(self.enters.get(), 0); 61 | } 62 | } 63 | 64 | impl fmt::Debug for ReadHandle { 65 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 66 | f.debug_struct("ReadHandle") 67 | .field("epochs", &self.epochs) 68 | .field("epoch", &self.epoch) 69 | .finish() 70 | } 71 | } 72 | 73 | impl Clone for ReadHandle { 74 | fn clone(&self) -> Self { 75 | ReadHandle::new_with_arc(Arc::clone(&self.inner), Arc::clone(&self.epochs)) 76 | } 77 | } 78 | 79 | impl ReadHandle { 80 | pub(crate) fn new(inner: T, epochs: crate::Epochs) -> Self { 81 | let store = Box::into_raw(Box::new(inner)); 82 | let inner = Arc::new(AtomicPtr::new(store)); 83 | Self::new_with_arc(inner, epochs) 84 | } 85 | 86 | fn new_with_arc(inner: Arc>, epochs: crate::Epochs) -> Self { 87 | // tell writer about our epoch tracker 88 | let epoch = Arc::new(AtomicUsize::new(0)); 89 | // okay to lock, since we're not holding up the epoch 90 | let epoch_i = epochs.lock().unwrap().insert(Arc::clone(&epoch)); 91 | 92 | Self { 93 | epochs, 94 | epoch, 95 | epoch_i, 96 | enters: Cell::new(0), 97 | inner, 98 | _unimpl_send: PhantomData, 99 | } 100 | } 101 | 102 | /// Create a [`ReadHandleFactory`] which is `Send` & `Sync` and can be shared across threads to create 103 | /// additional [`ReadHandle`] instances. 104 | pub fn factory(&self) -> ReadHandleFactory { 105 | ReadHandleFactory { 106 | inner: Arc::clone(&self.inner), 107 | epochs: Arc::clone(&self.epochs), 108 | } 109 | } 110 | } 111 | 112 | impl ReadHandle { 113 | /// Take out a guarded live reference to the read copy of the `T`. 114 | /// 115 | /// While the guard lives, the [`WriteHandle`] cannot proceed with a call to 116 | /// [`WriteHandle::publish`], so no queued operations will become visible to _any_ reader. 117 | /// 118 | /// If the `WriteHandle` has been dropped, this function returns `None`. 119 | pub fn enter(&self) -> Option> { 120 | let enters = self.enters.get(); 121 | if enters != 0 { 122 | // We have already locked the epoch. 123 | // Just give out another guard. 124 | let r_handle = self.inner.load(Ordering::Acquire); 125 | // since we previously bumped our epoch, this pointer will remain valid until we bump 126 | // it again, which only happens when the last ReadGuard is dropped. 127 | let r_handle = unsafe { r_handle.as_ref() }; 128 | 129 | return if let Some(r_handle) = r_handle { 130 | self.enters.set(enters + 1); 131 | Some(ReadGuard { 132 | handle: guard::ReadHandleState::from(self), 133 | t: r_handle, 134 | }) 135 | } else { 136 | unreachable!("if pointer is null, no ReadGuard should have been issued"); 137 | }; 138 | } 139 | 140 | // once we update our epoch, the writer can no longer do a swap until we set the MSB to 141 | // indicate that we've finished our read. however, we still need to deal with the case of a 142 | // race between when the writer reads our epoch and when they decide to make the swap. 143 | // 144 | // assume that there is a concurrent writer. it just swapped the atomic pointer from A to 145 | // B. the writer wants to modify A, and needs to know if that is safe. we can be in any of 146 | // the following cases when we atomically swap out our epoch: 147 | // 148 | // 1. the writer has read our previous epoch twice 149 | // 2. the writer has already read our previous epoch once 150 | // 3. the writer has not yet read our previous epoch 151 | // 152 | // let's discuss each of these in turn. 153 | // 154 | // 1. since writers assume they are free to proceed if they read an epoch with MSB set 155 | // twice in a row, this is equivalent to case (2) below. 156 | // 2. the writer will see our epoch change, and so will assume that we have read B. it 157 | // will therefore feel free to modify A. note that *another* pointer swap can happen, 158 | // back to A, but then the writer would be block on our epoch, and so cannot modify 159 | // A *or* B. consequently, using a pointer we read *after* the epoch swap is definitely 160 | // safe here. 161 | // 3. the writer will read our epoch, notice that MSB is not set, and will keep reading, 162 | // continuing to observe that it is still not set until we finish our read. thus, 163 | // neither A nor B are being modified, and we can safely use either. 164 | // 165 | // in all cases, using a pointer we read *after* updating our epoch is safe. 166 | 167 | // so, update our epoch tracker. 168 | self.epoch.fetch_add(1, Ordering::AcqRel); 169 | 170 | // ensure that the pointer read happens strictly after updating the epoch 171 | fence(Ordering::SeqCst); 172 | 173 | // then, atomically read pointer, and use the copy being pointed to 174 | let r_handle = self.inner.load(Ordering::Acquire); 175 | 176 | // since we bumped our epoch, this pointer will remain valid until we bump it again 177 | let r_handle = unsafe { r_handle.as_ref() }; 178 | 179 | if let Some(r_handle) = r_handle { 180 | // add a guard to ensure we restore read parity even if we panic 181 | let enters = self.enters.get() + 1; 182 | self.enters.set(enters); 183 | Some(ReadGuard { 184 | handle: guard::ReadHandleState::from(self), 185 | t: r_handle, 186 | }) 187 | } else { 188 | // the writehandle has been dropped, and so has both copies, 189 | // so restore parity and return None 190 | self.epoch.fetch_add(1, Ordering::AcqRel); 191 | None 192 | } 193 | } 194 | 195 | /// Returns true if the [`WriteHandle`] has been dropped. 196 | pub fn was_dropped(&self) -> bool { 197 | self.inner.load(Ordering::Acquire).is_null() 198 | } 199 | 200 | /// Returns a raw pointer to the read copy of the data. 201 | /// 202 | /// Note that it is only safe to read through this pointer if you _know_ that the writer will 203 | /// not start writing into it. This is most likely only the case if you are calling this method 204 | /// from inside a method that holds `&mut WriteHandle`. 205 | /// 206 | /// Casting this pointer to `&mut` is never safe. 207 | pub fn raw_handle(&self) -> Option> { 208 | NonNull::new(self.inner.load(Ordering::Acquire)) 209 | } 210 | } 211 | 212 | /// `ReadHandle` cannot be shared across threads: 213 | /// 214 | /// ```compile_fail 215 | /// use left_right::ReadHandle; 216 | /// 217 | /// fn is_sync() { 218 | /// // dummy function just used for its parameterized type bound 219 | /// } 220 | /// 221 | /// // the line below will not compile as ReadHandle does not implement Sync 222 | /// 223 | /// is_sync::>() 224 | /// ``` 225 | /// 226 | /// But, it can be sent across threads: 227 | /// 228 | /// ``` 229 | /// use left_right::ReadHandle; 230 | /// 231 | /// fn is_send() { 232 | /// // dummy function just used for its parameterized type bound 233 | /// } 234 | /// 235 | /// is_send::>() 236 | /// ``` 237 | /// 238 | /// As long as the wrapped type is `Sync` that is. 239 | /// 240 | /// ```compile_fail 241 | /// use left_right::ReadHandle; 242 | /// 243 | /// fn is_send() {} 244 | /// 245 | /// is_send::>>() 246 | /// ``` 247 | #[allow(dead_code)] 248 | struct CheckReadHandleSendNotSync; 249 | -------------------------------------------------------------------------------- /src/read/factory.rs: -------------------------------------------------------------------------------- 1 | use super::ReadHandle; 2 | use crate::sync::{Arc, AtomicPtr}; 3 | use std::fmt; 4 | 5 | /// A type that is both `Sync` and `Send` and lets you produce new [`ReadHandle`] instances. 6 | /// 7 | /// This serves as a handy way to distribute read handles across many threads without requiring 8 | /// additional external locking to synchronize access to the non-`Sync` [`ReadHandle`] type. Note 9 | /// that this _internally_ takes a lock whenever you call [`ReadHandleFactory::handle`], so 10 | /// you should not expect producing new handles rapidly to scale well. 11 | pub struct ReadHandleFactory { 12 | pub(super) inner: Arc>, 13 | pub(super) epochs: crate::Epochs, 14 | } 15 | 16 | impl fmt::Debug for ReadHandleFactory { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | f.debug_struct("ReadHandleFactory") 19 | .field("epochs", &self.epochs) 20 | .finish() 21 | } 22 | } 23 | 24 | impl Clone for ReadHandleFactory { 25 | fn clone(&self) -> Self { 26 | Self { 27 | inner: Arc::clone(&self.inner), 28 | epochs: Arc::clone(&self.epochs), 29 | } 30 | } 31 | } 32 | 33 | impl ReadHandleFactory { 34 | /// Produce a new [`ReadHandle`] to the same left-right data structure as this factory was 35 | /// originally produced from. 36 | pub fn handle(&self) -> ReadHandle { 37 | ReadHandle::new_with_arc(Arc::clone(&self.inner), Arc::clone(&self.epochs)) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/read/guard.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::{AtomicUsize, Ordering}; 2 | use std::cell::Cell; 3 | use std::mem; 4 | 5 | #[derive(Debug, Copy, Clone)] 6 | pub(super) struct ReadHandleState<'rh> { 7 | pub(super) epoch: &'rh AtomicUsize, 8 | pub(super) enters: &'rh Cell, 9 | } 10 | 11 | impl<'rh, T> From<&'rh super::ReadHandle> for ReadHandleState<'rh> { 12 | fn from(rh: &'rh super::ReadHandle) -> Self { 13 | Self { 14 | epoch: &rh.epoch, 15 | enters: &rh.enters, 16 | } 17 | } 18 | } 19 | 20 | /// A guard wrapping a live reference into a left-right protected `T`. 21 | /// 22 | /// As long as this guard lives, the `T` being read cannot change. If a writer attempts to call 23 | /// [`WriteHandle::publish`](crate::WriteHandle::publish), that call will block until this guard is 24 | /// dropped. 25 | /// 26 | /// To scope the guard to a subset of the data in `T`, use [`map`](Self::map) and 27 | /// [`try_map`](Self::try_map). 28 | #[derive(Debug)] 29 | pub struct ReadGuard<'rh, T: ?Sized> { 30 | // NOTE: _technically_ this is more like &'self. 31 | // the reference is valid until the guard is dropped. 32 | pub(super) t: &'rh T, 33 | pub(super) handle: ReadHandleState<'rh>, 34 | } 35 | 36 | impl<'rh, T: ?Sized> ReadGuard<'rh, T> { 37 | /// Makes a new `ReadGuard` for a component of the borrowed data. 38 | /// 39 | /// This is an associated function that needs to be used as `ReadGuard::map(...)`, since 40 | /// a method would interfere with methods of the same name on the contents of a `Readguard` 41 | /// used through `Deref`. 42 | /// 43 | /// # Examples 44 | /// 45 | /// ``` 46 | /// use left_right::{ReadGuard, ReadHandle}; 47 | /// 48 | /// fn get_str(handle: &ReadHandle>, i: usize) -> Option> { 49 | /// handle.enter().map(|guard| { 50 | /// ReadGuard::map(guard, |t| { 51 | /// &*t[i].0 52 | /// }) 53 | /// }) 54 | /// } 55 | /// ``` 56 | pub fn map(orig: Self, f: F) -> ReadGuard<'rh, U> 57 | where 58 | F: for<'a> FnOnce(&'a T) -> &'a U, 59 | { 60 | let rg = ReadGuard { 61 | t: f(orig.t), 62 | handle: orig.handle, 63 | }; 64 | mem::forget(orig); 65 | rg 66 | } 67 | 68 | /// Makes a new `ReadGuard` for a component of the borrowed data that may not exist. 69 | /// 70 | /// This method differs from [`map`](Self::map) in that it drops the guard if the closure maps 71 | /// to `None`. This allows you to "lift" a `ReadGuard>` into an 72 | /// `Option>`. 73 | /// 74 | /// This is an associated function that needs to be used as `ReadGuard::try_map(...)`, since 75 | /// a method would interfere with methods of the same name on the contents of a `Readguard` 76 | /// used through `Deref`. 77 | /// 78 | /// # Examples 79 | /// 80 | /// ``` 81 | /// use left_right::{ReadGuard, ReadHandle}; 82 | /// 83 | /// fn try_get_str(handle: &ReadHandle>, i: usize) -> Option> { 84 | /// handle.enter().and_then(|guard| { 85 | /// ReadGuard::try_map(guard, |t| { 86 | /// t.get(i).map(|v| &*v.0) 87 | /// }) 88 | /// }) 89 | /// } 90 | /// ``` 91 | pub fn try_map(orig: Self, f: F) -> Option> 92 | where 93 | F: for<'a> FnOnce(&'a T) -> Option<&'a U>, 94 | { 95 | let rg = ReadGuard { 96 | t: f(orig.t)?, 97 | handle: orig.handle, 98 | }; 99 | mem::forget(orig); 100 | Some(rg) 101 | } 102 | } 103 | 104 | impl<'rh, T: ?Sized> AsRef for ReadGuard<'rh, T> { 105 | fn as_ref(&self) -> &T { 106 | self.t 107 | } 108 | } 109 | 110 | impl<'rh, T: ?Sized> std::ops::Deref for ReadGuard<'rh, T> { 111 | type Target = T; 112 | fn deref(&self) -> &Self::Target { 113 | self.t 114 | } 115 | } 116 | 117 | impl<'rh, T: ?Sized> Drop for ReadGuard<'rh, T> { 118 | fn drop(&mut self) { 119 | let enters = self.handle.enters.get() - 1; 120 | self.handle.enters.set(enters); 121 | if enters == 0 { 122 | // We are the last guard to be dropped -- now release our epoch. 123 | self.handle.epoch.fetch_add(1, Ordering::AcqRel); 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/sync.rs: -------------------------------------------------------------------------------- 1 | #[cfg(loom)] 2 | pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; 3 | #[cfg(loom)] 4 | pub(crate) use loom::sync::{Arc, Mutex, MutexGuard}; 5 | #[cfg(loom)] 6 | pub(crate) fn fence(ord: Ordering) { 7 | if let Ordering::Acquire = ord { 8 | } else { 9 | // FIXME: loom only supports acquire fences at the moment. 10 | // https://github.com/tokio-rs/loom/issues/117 11 | // let's at least not panic... 12 | // this may generate some false positives (`SeqCst` is stronger than `Acquire` 13 | // for example), and some false negatives (`Relaxed` is weaker than `Acquire`), 14 | // but it's the best we can do for the time being. 15 | } 16 | loom::sync::atomic::fence(Ordering::Acquire) 17 | } 18 | 19 | #[cfg(not(loom))] 20 | pub(crate) use std::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; 21 | #[cfg(not(loom))] 22 | pub(crate) use std::sync::{Arc, Mutex, MutexGuard}; 23 | -------------------------------------------------------------------------------- /src/utilities.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | #[derive(Debug)] 3 | pub struct CounterAddOp(pub i32); 4 | 5 | #[cfg(test)] 6 | impl Absorb for i32 { 7 | fn absorb_first(&mut self, operation: &mut CounterAddOp, _: &Self) { 8 | *self += operation.0; 9 | } 10 | 11 | fn absorb_second(&mut self, operation: CounterAddOp, _: &Self) { 12 | *self += operation.0; 13 | } 14 | 15 | fn drop_first(self: Box) {} 16 | 17 | fn sync_with(&mut self, first: &Self) { 18 | *self = *first 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/write.rs: -------------------------------------------------------------------------------- 1 | use crate::read::ReadHandle; 2 | use crate::Absorb; 3 | 4 | use crate::sync::{fence, Arc, AtomicUsize, MutexGuard, Ordering}; 5 | use std::collections::VecDeque; 6 | use std::marker::PhantomData; 7 | use std::ops::DerefMut; 8 | use std::ptr::NonNull; 9 | #[cfg(test)] 10 | use std::sync::atomic::AtomicBool; 11 | use std::{fmt, thread}; 12 | 13 | /// A writer handle to a left-right guarded data structure. 14 | /// 15 | /// All operations on the underlying data should be enqueued as operations of type `O` using 16 | /// [`append`](Self::append). The effect of this operations are only exposed to readers once 17 | /// [`publish`](Self::publish) is called. 18 | /// 19 | /// # Reading through a `WriteHandle` 20 | /// 21 | /// `WriteHandle` allows access to a [`ReadHandle`] through `Deref`. Note that 22 | /// since the reads go through a [`ReadHandle`], those reads are subject to the same visibility 23 | /// restrictions as reads that do not go through the `WriteHandle`: they only see the effects of 24 | /// operations prior to the last call to [`publish`](Self::publish). 25 | pub struct WriteHandle 26 | where 27 | T: Absorb, 28 | { 29 | epochs: crate::Epochs, 30 | w_handle: NonNull, 31 | oplog: VecDeque, 32 | swap_index: usize, 33 | r_handle: ReadHandle, 34 | last_epochs: Vec, 35 | #[cfg(test)] 36 | refreshes: usize, 37 | #[cfg(test)] 38 | is_waiting: Arc, 39 | /// Write directly to the write handle map, since no publish has happened. 40 | first: bool, 41 | /// A publish has happened, but the two copies have not been synchronized yet. 42 | second: bool, 43 | /// If we call `Self::take` the drop needs to be different. 44 | taken: bool, 45 | } 46 | 47 | // safety: if a `WriteHandle` is sent across a thread boundary, we need to be able to take 48 | // ownership of both Ts and Os across that thread boundary. since `WriteHandle` holds a 49 | // `ReadHandle`, we also need to respect its Send requirements. 50 | unsafe impl Send for WriteHandle 51 | where 52 | T: Absorb, 53 | T: Send, 54 | O: Send, 55 | ReadHandle: Send, 56 | { 57 | } 58 | 59 | impl fmt::Debug for WriteHandle 60 | where 61 | T: Absorb + fmt::Debug, 62 | O: fmt::Debug, 63 | { 64 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 65 | f.debug_struct("WriteHandle") 66 | .field("epochs", &self.epochs) 67 | .field("w_handle", &self.w_handle) 68 | .field("oplog", &self.oplog) 69 | .field("swap_index", &self.swap_index) 70 | .field("r_handle", &self.r_handle) 71 | .field("first", &self.first) 72 | .field("second", &self.second) 73 | .finish() 74 | } 75 | } 76 | 77 | /// A **smart pointer** to an owned backing data structure. This makes sure that the 78 | /// data is dropped correctly (using [`Absorb::drop_second`]). 79 | /// 80 | /// Additionally it allows for unsafely getting the inner data out using [`into_box()`](Taken::into_box). 81 | pub struct Taken, O> { 82 | inner: Option>, 83 | _marker: PhantomData, 84 | } 85 | 86 | impl + std::fmt::Debug, O> std::fmt::Debug for Taken { 87 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 88 | f.debug_struct("Taken") 89 | .field( 90 | "inner", 91 | self.inner 92 | .as_ref() 93 | .expect("inner is only taken in `into_box` which drops self"), 94 | ) 95 | .finish() 96 | } 97 | } 98 | 99 | impl, O> Deref for Taken { 100 | type Target = T; 101 | 102 | fn deref(&self) -> &Self::Target { 103 | self.inner 104 | .as_ref() 105 | .expect("inner is only taken in `into_box` which drops self") 106 | } 107 | } 108 | 109 | impl, O> DerefMut for Taken { 110 | fn deref_mut(&mut self) -> &mut Self::Target { 111 | self.inner 112 | .as_mut() 113 | .expect("inner is only taken in `into_box` which drops self") 114 | } 115 | } 116 | 117 | impl, O> Taken { 118 | /// This is unsafe because you must call [`Absorb::drop_second`] in 119 | /// case just dropping `T` is not safe and sufficient. 120 | /// 121 | /// If you used the default implementation of [`Absorb::drop_second`] (which just calls [`drop`](Drop::drop)) 122 | /// you don't need to call [`Absorb::drop_second`]. 123 | pub unsafe fn into_box(mut self) -> Box { 124 | self.inner 125 | .take() 126 | .expect("inner is only taken here then self is dropped") 127 | } 128 | } 129 | 130 | impl, O> Drop for Taken { 131 | fn drop(&mut self) { 132 | if let Some(inner) = self.inner.take() { 133 | T::drop_second(inner); 134 | } 135 | } 136 | } 137 | 138 | impl WriteHandle 139 | where 140 | T: Absorb, 141 | { 142 | /// Takes out the inner backing data structure if it hasn't been taken yet. Otherwise returns `None`. 143 | /// 144 | /// Makes sure that all the pending operations are applied and waits till all the read handles 145 | /// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and 146 | /// returns the other copy as a [`Taken`] smart pointer. 147 | fn take_inner(&mut self) -> Option> { 148 | use std::ptr; 149 | // Can only take inner once. 150 | if self.taken { 151 | return None; 152 | } 153 | 154 | // Disallow taking again. 155 | self.taken = true; 156 | 157 | // first, ensure both copies are up to date 158 | // (otherwise safely dropping the possibly duplicated w_handle data is a pain) 159 | if self.first || !self.oplog.is_empty() { 160 | self.publish(); 161 | } 162 | if !self.oplog.is_empty() { 163 | self.publish(); 164 | } 165 | assert!(self.oplog.is_empty()); 166 | 167 | // next, grab the read handle and set it to NULL 168 | let r_handle = self.r_handle.inner.swap(ptr::null_mut(), Ordering::Release); 169 | 170 | // now, wait for all readers to depart 171 | let epochs = Arc::clone(&self.epochs); 172 | let mut epochs = epochs.lock().unwrap(); 173 | self.wait(&mut epochs); 174 | 175 | // ensure that the subsequent epoch reads aren't re-ordered to before the swap 176 | fence(Ordering::SeqCst); 177 | 178 | // all readers have now observed the NULL, so we own both handles. 179 | // all operations have been applied to both w_handle and r_handle. 180 | // give the underlying data structure an opportunity to handle the one copy differently: 181 | // 182 | // safety: w_handle was initially crated from a `Box`, and is no longer aliased. 183 | Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) }); 184 | 185 | // next we take the r_handle and return it as a boxed value. 186 | // 187 | // this is safe, since we know that no readers are using this pointer 188 | // anymore (due to the .wait() following swapping the pointer with NULL). 189 | // 190 | // safety: r_handle was initially crated from a `Box`, and is no longer aliased. 191 | let boxed_r_handle = unsafe { Box::from_raw(r_handle) }; 192 | 193 | Some(Taken { 194 | inner: Some(boxed_r_handle), 195 | _marker: PhantomData, 196 | }) 197 | } 198 | } 199 | 200 | impl Drop for WriteHandle 201 | where 202 | T: Absorb, 203 | { 204 | fn drop(&mut self) { 205 | if let Some(inner) = self.take_inner() { 206 | drop(inner); 207 | } 208 | } 209 | } 210 | 211 | impl WriteHandle 212 | where 213 | T: Absorb, 214 | { 215 | pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle) -> Self { 216 | Self { 217 | epochs, 218 | // safety: Box is not null and covariant. 219 | w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) }, 220 | oplog: VecDeque::new(), 221 | swap_index: 0, 222 | r_handle, 223 | last_epochs: Vec::new(), 224 | #[cfg(test)] 225 | is_waiting: Arc::new(AtomicBool::new(false)), 226 | #[cfg(test)] 227 | refreshes: 0, 228 | first: true, 229 | second: true, 230 | taken: false, 231 | } 232 | } 233 | 234 | fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab>>) { 235 | let mut iter = 0; 236 | let mut starti = 0; 237 | 238 | #[cfg(test)] 239 | { 240 | self.is_waiting.store(true, Ordering::Relaxed); 241 | } 242 | // we're over-estimating here, but slab doesn't expose its max index 243 | self.last_epochs.resize(epochs.capacity(), 0); 244 | 'retry: loop { 245 | // read all and see if all have changed (which is likely) 246 | for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) { 247 | // if the reader's epoch was even last we read it (which was _after_ the swap), 248 | // then they either do not have the pointer, or must have read the pointer strictly 249 | // after the swap. in either case, they cannot be using the old pointer value (what 250 | // is now w_handle). 251 | // 252 | // note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1, 253 | // which is odd, and std::u{N}::MAX + 1 == 0 is even. 254 | // 255 | // note also that `ri` _may_ have been re-used since we last read into last_epochs. 256 | // this is okay though, as a change still implies that the new reader must have 257 | // arrived _after_ we did the atomic swap, and thus must also have seen the new 258 | // pointer. 259 | if self.last_epochs[ri] % 2 == 0 { 260 | continue; 261 | } 262 | 263 | let now = epoch.load(Ordering::Acquire); 264 | if now != self.last_epochs[ri] { 265 | // reader must have seen the last swap, since they have done at least one 266 | // operation since we last looked at their epoch, which _must_ mean that they 267 | // are no longer using the old pointer value. 268 | } else { 269 | // reader may not have seen swap 270 | // continue from this reader's epoch 271 | starti = ii; 272 | 273 | if !cfg!(loom) { 274 | // how eagerly should we retry? 275 | if iter != 20 { 276 | iter += 1; 277 | } else { 278 | thread::yield_now(); 279 | } 280 | } 281 | 282 | #[cfg(loom)] 283 | loom::thread::yield_now(); 284 | 285 | continue 'retry; 286 | } 287 | } 288 | break; 289 | } 290 | #[cfg(test)] 291 | { 292 | self.is_waiting.store(false, Ordering::Relaxed); 293 | } 294 | } 295 | 296 | /// Publish all operations append to the log to reads. 297 | /// 298 | /// This method needs to wait for all readers to move to the "other" copy of the data so that 299 | /// it can replay the operational log onto the stale copy the readers used to use. This can 300 | /// take some time, especially if readers are executing slow operations, or if there are many 301 | /// of them. 302 | pub fn publish(&mut self) -> &mut Self { 303 | // we need to wait until all epochs have changed since the swaps *or* until a "finished" 304 | // flag has been observed to be on for two subsequent iterations (there still may be some 305 | // readers present since we did the previous refresh) 306 | // 307 | // NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will 308 | // only block on pre-existing readers, and they are never waiting to push onto epochs 309 | // unless they have finished reading. 310 | let epochs = Arc::clone(&self.epochs); 311 | let mut epochs = epochs.lock().unwrap(); 312 | 313 | self.wait(&mut epochs); 314 | 315 | if !self.first { 316 | // all the readers have left! 317 | // safety: we haven't freed the Box, and no readers are accessing the w_handle 318 | let w_handle = unsafe { self.w_handle.as_mut() }; 319 | 320 | // safety: we will not swap while we hold this reference 321 | let r_handle = unsafe { 322 | self.r_handle 323 | .inner 324 | .load(Ordering::Acquire) 325 | .as_ref() 326 | .unwrap() 327 | }; 328 | 329 | if self.second { 330 | Absorb::sync_with(w_handle, r_handle); 331 | self.second = false 332 | } 333 | 334 | // the w_handle copy has not seen any of the writes in the oplog 335 | // the r_handle copy has not seen any of the writes following swap_index 336 | if self.swap_index != 0 { 337 | // we can drain out the operations that only the w_handle copy needs 338 | // 339 | // NOTE: the if above is because drain(0..0) would remove 0 340 | for op in self.oplog.drain(0..self.swap_index) { 341 | T::absorb_second(w_handle, op, r_handle); 342 | } 343 | } 344 | // we cannot give owned operations to absorb_first 345 | // since they'll also be needed by the r_handle copy 346 | for op in self.oplog.iter_mut() { 347 | T::absorb_first(w_handle, op, r_handle); 348 | } 349 | // the w_handle copy is about to become the r_handle, and can ignore the oplog 350 | self.swap_index = self.oplog.len(); 351 | 352 | // w_handle (the old r_handle) is now fully up to date! 353 | } else { 354 | self.first = false 355 | } 356 | 357 | // at this point, we have exclusive access to w_handle, and it is up-to-date with all 358 | // writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer 359 | // inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in 360 | // r_handle. 361 | // 362 | // it's now time for us to swap the copies so that readers see up-to-date results from 363 | // w_handle. 364 | 365 | // swap in our w_handle, and get r_handle in return 366 | let r_handle = self 367 | .r_handle 368 | .inner 369 | .swap(self.w_handle.as_ptr(), Ordering::Release); 370 | 371 | // NOTE: at this point, there are likely still readers using r_handle. 372 | // safety: r_handle was also created from a Box, so it is not null and is covariant. 373 | self.w_handle = unsafe { NonNull::new_unchecked(r_handle) }; 374 | 375 | // ensure that the subsequent epoch reads aren't re-ordered to before the swap 376 | fence(Ordering::SeqCst); 377 | 378 | for (ri, epoch) in epochs.iter() { 379 | self.last_epochs[ri] = epoch.load(Ordering::Acquire); 380 | } 381 | 382 | #[cfg(test)] 383 | { 384 | self.refreshes += 1; 385 | } 386 | 387 | self 388 | } 389 | 390 | /// Publish as necessary to ensure that all operations are visible to readers. 391 | /// 392 | /// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps. 393 | /// This method will only do so if there are pending operations. 394 | pub fn flush(&mut self) { 395 | if self.has_pending_operations() { 396 | self.publish(); 397 | } 398 | } 399 | 400 | /// Returns true if there are operations in the operational log that have not yet been exposed 401 | /// to readers. 402 | pub fn has_pending_operations(&self) -> bool { 403 | // NOTE: we don't use self.oplog.is_empty() here because it's not really that important if 404 | // there are operations that have not yet been applied to the _write_ handle. 405 | self.swap_index < self.oplog.len() 406 | } 407 | 408 | /// Append the given operation to the operational log. 409 | /// 410 | /// Its effects will not be exposed to readers until you call [`publish`](Self::publish). 411 | pub fn append(&mut self, op: O) -> &mut Self { 412 | self.extend(std::iter::once(op)); 413 | self 414 | } 415 | 416 | /// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing). 417 | /// 418 | /// Note that it is only safe to mutate through this pointer if you _know_ that there are no 419 | /// readers still present in this copy. This is not normally something you know; even after 420 | /// calling `publish`, readers may still be in the write copy for some time. In general, the 421 | /// only time you know this is okay is before the first call to `publish` (since no readers 422 | /// ever entered the write copy). 423 | // TODO: Make this return `Option<&mut T>`, 424 | // and only `Some` if there are indeed to readers in the write copy. 425 | pub fn raw_write_handle(&mut self) -> NonNull { 426 | self.w_handle 427 | } 428 | 429 | /// Returns the backing data structure. 430 | /// 431 | /// Makes sure that all the pending operations are applied and waits till all the read handles 432 | /// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and 433 | /// returns the other copy as a [`Taken`] smart pointer. 434 | pub fn take(mut self) -> Taken { 435 | // It is always safe to `expect` here because `take_inner` is private 436 | // and it is only called here and in the drop impl. Since we have an owned 437 | // `self` we know the drop has not yet been called. And every first call of 438 | // `take_inner` returns `Some` 439 | self.take_inner() 440 | .expect("inner is only taken here then self is dropped") 441 | } 442 | } 443 | 444 | // allow using write handle for reads 445 | use std::ops::Deref; 446 | impl Deref for WriteHandle 447 | where 448 | T: Absorb, 449 | { 450 | type Target = ReadHandle; 451 | fn deref(&self) -> &Self::Target { 452 | &self.r_handle 453 | } 454 | } 455 | 456 | impl Extend for WriteHandle 457 | where 458 | T: Absorb, 459 | { 460 | /// Add multiple operations to the operational log. 461 | /// 462 | /// Their effects will not be exposed to readers until you call [`publish`](Self::publish) 463 | fn extend(&mut self, ops: I) 464 | where 465 | I: IntoIterator, 466 | { 467 | if self.first { 468 | // Safety: we know there are no outstanding w_handle readers, since we haven't 469 | // refreshed ever before, so we can modify it directly! 470 | let mut w_inner = self.raw_write_handle(); 471 | let w_inner = unsafe { w_inner.as_mut() }; 472 | let r_handle = self.enter().expect("map has not yet been destroyed"); 473 | // Because we are operating directly on the map, and nothing is aliased, we do want 474 | // to perform drops, so we invoke absorb_second. 475 | for op in ops { 476 | Absorb::absorb_second(w_inner, op, &*r_handle); 477 | } 478 | } else { 479 | self.oplog.extend(ops); 480 | } 481 | } 482 | } 483 | 484 | /// `WriteHandle` can be sent across thread boundaries: 485 | /// 486 | /// ``` 487 | /// use left_right::WriteHandle; 488 | /// 489 | /// struct Data; 490 | /// impl left_right::Absorb<()> for Data { 491 | /// fn absorb_first(&mut self, _: &mut (), _: &Self) {} 492 | /// fn sync_with(&mut self, _: &Self) {} 493 | /// } 494 | /// 495 | /// fn is_send() { 496 | /// // dummy function just used for its parameterized type bound 497 | /// } 498 | /// 499 | /// is_send::>() 500 | /// ``` 501 | /// 502 | /// As long as the inner types allow that of course. 503 | /// Namely, the data type has to be `Send`: 504 | /// 505 | /// ```compile_fail 506 | /// use left_right::WriteHandle; 507 | /// use std::rc::Rc; 508 | /// 509 | /// struct Data(Rc<()>); 510 | /// impl left_right::Absorb<()> for Data { 511 | /// fn absorb_first(&mut self, _: &mut (), _: &Self) {} 512 | /// } 513 | /// 514 | /// fn is_send() { 515 | /// // dummy function just used for its parameterized type bound 516 | /// } 517 | /// 518 | /// is_send::>() 519 | /// ``` 520 | /// 521 | /// .. the operation type has to be `Send`: 522 | /// 523 | /// ```compile_fail 524 | /// use left_right::WriteHandle; 525 | /// use std::rc::Rc; 526 | /// 527 | /// struct Data; 528 | /// impl left_right::Absorb> for Data { 529 | /// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {} 530 | /// } 531 | /// 532 | /// fn is_send() { 533 | /// // dummy function just used for its parameterized type bound 534 | /// } 535 | /// 536 | /// is_send::>>() 537 | /// ``` 538 | /// 539 | /// .. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s: 540 | /// 541 | /// ```compile_fail 542 | /// use left_right::WriteHandle; 543 | /// use std::cell::Cell; 544 | /// 545 | /// struct Data(Cell<()>); 546 | /// impl left_right::Absorb<()> for Data { 547 | /// fn absorb_first(&mut self, _: &mut (), _: &Self) {} 548 | /// } 549 | /// 550 | /// fn is_send() { 551 | /// // dummy function just used for its parameterized type bound 552 | /// } 553 | /// 554 | /// is_send::>() 555 | /// ``` 556 | #[allow(dead_code)] 557 | struct CheckWriteHandleSend; 558 | 559 | #[cfg(test)] 560 | mod tests { 561 | use crate::sync::{AtomicUsize, Mutex, Ordering}; 562 | use crate::Absorb; 563 | use slab::Slab; 564 | include!("./utilities.rs"); 565 | 566 | #[test] 567 | fn append_test() { 568 | let (mut w, _r) = crate::new::(); 569 | assert_eq!(w.first, true); 570 | w.append(CounterAddOp(1)); 571 | assert_eq!(w.oplog.len(), 0); 572 | assert_eq!(w.first, true); 573 | w.publish(); 574 | assert_eq!(w.first, false); 575 | w.append(CounterAddOp(2)); 576 | w.append(CounterAddOp(3)); 577 | assert_eq!(w.oplog.len(), 2); 578 | } 579 | 580 | #[test] 581 | fn take_test() { 582 | // publish twice then take with no pending operations 583 | let (mut w, _r) = crate::new_from_empty::(2); 584 | w.append(CounterAddOp(1)); 585 | w.publish(); 586 | w.append(CounterAddOp(1)); 587 | w.publish(); 588 | assert_eq!(*w.take(), 4); 589 | 590 | // publish twice then pending operation published by take 591 | let (mut w, _r) = crate::new_from_empty::(2); 592 | w.append(CounterAddOp(1)); 593 | w.publish(); 594 | w.append(CounterAddOp(1)); 595 | w.publish(); 596 | w.append(CounterAddOp(2)); 597 | assert_eq!(*w.take(), 6); 598 | 599 | // normal publish then pending operations published by take 600 | let (mut w, _r) = crate::new_from_empty::(2); 601 | w.append(CounterAddOp(1)); 602 | w.publish(); 603 | w.append(CounterAddOp(1)); 604 | assert_eq!(*w.take(), 4); 605 | 606 | // pending operations published by take 607 | let (mut w, _r) = crate::new_from_empty::(2); 608 | w.append(CounterAddOp(1)); 609 | assert_eq!(*w.take(), 3); 610 | 611 | // emptry op queue 612 | let (mut w, _r) = crate::new_from_empty::(2); 613 | w.append(CounterAddOp(1)); 614 | w.publish(); 615 | assert_eq!(*w.take(), 3); 616 | 617 | // no operations 618 | let (w, _r) = crate::new_from_empty::(2); 619 | assert_eq!(*w.take(), 2); 620 | } 621 | 622 | #[test] 623 | fn wait_test() { 624 | use std::sync::{Arc, Barrier}; 625 | use std::thread; 626 | let (mut w, _r) = crate::new::(); 627 | 628 | // Case 1: If epoch is set to default. 629 | let test_epochs: crate::Epochs = Default::default(); 630 | let mut test_epochs = test_epochs.lock().unwrap(); 631 | // since there is no epoch to waiting for, wait function will return immediately. 632 | w.wait(&mut test_epochs); 633 | 634 | // Case 2: If one of the reader is still reading(epoch is odd and count is same as in last_epoch) 635 | // and wait has been called. 636 | let held_epoch = Arc::new(AtomicUsize::new(1)); 637 | 638 | w.last_epochs = vec![2, 2, 1]; 639 | let mut epochs_slab = Slab::new(); 640 | epochs_slab.insert(Arc::new(AtomicUsize::new(2))); 641 | epochs_slab.insert(Arc::new(AtomicUsize::new(2))); 642 | epochs_slab.insert(Arc::clone(&held_epoch)); 643 | 644 | let barrier = Arc::new(Barrier::new(2)); 645 | 646 | let is_waiting = Arc::clone(&w.is_waiting); 647 | 648 | // check writers waiting state before calling wait. 649 | let is_waiting_v = is_waiting.load(Ordering::Relaxed); 650 | assert_eq!(false, is_waiting_v); 651 | 652 | let barrier2 = Arc::clone(&barrier); 653 | let test_epochs = Arc::new(Mutex::new(epochs_slab)); 654 | let wait_handle = thread::spawn(move || { 655 | barrier2.wait(); 656 | let mut test_epochs = test_epochs.lock().unwrap(); 657 | w.wait(&mut test_epochs); 658 | }); 659 | 660 | barrier.wait(); 661 | 662 | // make sure that writer wait() will call first, only then allow to updates the held epoch. 663 | while !is_waiting.load(Ordering::Relaxed) { 664 | thread::yield_now(); 665 | } 666 | 667 | held_epoch.fetch_add(1, Ordering::SeqCst); 668 | 669 | // join to make sure that wait must return after the progress/increment 670 | // of held_epoch. 671 | let _ = wait_handle.join(); 672 | } 673 | 674 | #[test] 675 | fn flush_noblock() { 676 | let (mut w, r) = crate::new::(); 677 | w.append(CounterAddOp(42)); 678 | w.publish(); 679 | assert_eq!(*r.enter().unwrap(), 42); 680 | 681 | // pin the epoch 682 | let _count = r.enter(); 683 | // refresh would hang here 684 | assert_eq!(w.oplog.iter().skip(w.swap_index).count(), 0); 685 | assert!(!w.has_pending_operations()); 686 | } 687 | 688 | #[test] 689 | fn flush_no_refresh() { 690 | let (mut w, _) = crate::new::(); 691 | 692 | // Until we refresh, writes are written directly instead of going to the 693 | // oplog (because there can't be any readers on the w_handle table). 694 | assert!(!w.has_pending_operations()); 695 | w.publish(); 696 | assert!(!w.has_pending_operations()); 697 | assert_eq!(w.refreshes, 1); 698 | 699 | w.append(CounterAddOp(42)); 700 | assert!(w.has_pending_operations()); 701 | w.publish(); 702 | assert!(!w.has_pending_operations()); 703 | assert_eq!(w.refreshes, 2); 704 | 705 | w.append(CounterAddOp(42)); 706 | assert!(w.has_pending_operations()); 707 | w.publish(); 708 | assert!(!w.has_pending_operations()); 709 | assert_eq!(w.refreshes, 3); 710 | 711 | // Sanity check that a refresh would have been visible 712 | assert!(!w.has_pending_operations()); 713 | w.publish(); 714 | assert_eq!(w.refreshes, 4); 715 | } 716 | } 717 | -------------------------------------------------------------------------------- /tests/deque.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::collections::VecDeque; 3 | use std::rc::Rc; 4 | 5 | use left_right::{ 6 | aliasing::{Aliased, DropBehavior}, 7 | Absorb, ReadHandle, 8 | }; 9 | 10 | // Value encapsulates an integer value and keeps a registry of live values up to 11 | // date. 12 | struct Value { 13 | v: i32, 14 | r: Rc, 15 | } 16 | 17 | impl Value { 18 | fn new(v: i32, r: Rc) -> Self { 19 | r.adjust_count(1); 20 | Self { v, r } 21 | } 22 | } 23 | 24 | impl Drop for Value { 25 | fn drop(&mut self) { 26 | self.r.adjust_count(-1); 27 | } 28 | } 29 | 30 | // ValueRegistry keeps track of the number of Values that have been created and 31 | // not yet dropped. 32 | struct ValueRegistry { 33 | num_live_values: Cell, 34 | } 35 | 36 | impl ValueRegistry { 37 | fn new() -> Self { 38 | Self { 39 | num_live_values: Cell::new(0), 40 | } 41 | } 42 | 43 | fn adjust_count(&self, delta: i64) { 44 | let mut live_vals = self.num_live_values.get(); 45 | live_vals += delta; 46 | assert!(live_vals >= 0); 47 | self.num_live_values.set(live_vals); 48 | } 49 | 50 | fn expect(&self, expected_count: i64) { 51 | assert_eq!(self.num_live_values.get(), expected_count); 52 | } 53 | } 54 | 55 | struct NoDrop; 56 | impl DropBehavior for NoDrop { 57 | const DO_DROP: bool = false; 58 | } 59 | 60 | struct DoDrop; 61 | impl DropBehavior for DoDrop { 62 | const DO_DROP: bool = true; 63 | } 64 | type Deque = VecDeque>; 65 | 66 | enum Op { 67 | PushBack(Aliased), 68 | PopFront, 69 | } 70 | 71 | impl Absorb for Deque { 72 | fn absorb_first(&mut self, operation: &mut Op, _other: &Self) { 73 | match operation { 74 | Op::PushBack(value) => { 75 | self.push_back(unsafe { value.alias() }); 76 | } 77 | Op::PopFront => { 78 | self.pop_front(); 79 | } 80 | } 81 | } 82 | 83 | fn absorb_second(&mut self, operation: Op, _other: &Self) { 84 | // Cast the data structure to the variant that drops entries. 85 | // SAFETY: the Aliased type guarantees the same memory layout for NoDrop 86 | // vs DoDrop, so the cast is sound. 87 | let with_drop: &mut VecDeque> = 88 | unsafe { &mut *(self as *mut _ as *mut _) }; 89 | match operation { 90 | Op::PushBack(value) => { 91 | with_drop.push_back(unsafe { value.change_drop() }); 92 | } 93 | Op::PopFront => { 94 | with_drop.pop_front(); 95 | } 96 | } 97 | } 98 | 99 | fn sync_with(&mut self, first: &Self) { 100 | assert_eq!(self.len(), 0); 101 | self.extend(first.iter().map(|v| unsafe { v.alias() })); 102 | } 103 | 104 | fn drop_first(self: Box) { 105 | // The Deque type has NoDrop, so this will not drop any of the values. 106 | } 107 | 108 | fn drop_second(self: Box) { 109 | // Convert self to DoDrop and drop it. 110 | let with_drop: Box>> = 111 | unsafe { Box::from_raw(Box::into_raw(self) as *mut _ as *mut _) }; 112 | drop(with_drop); 113 | } 114 | } 115 | 116 | // Test a deque of aliased values, verifying that the lifetimes of the values 117 | // are as promised. 118 | #[test] 119 | fn deque() { 120 | let registry = Rc::new(ValueRegistry::new()); 121 | 122 | let mkval = |v| Aliased::from(Value::new(v, Rc::clone(®istry))); 123 | let expect = |r: &ReadHandle, expected: &[i32]| { 124 | let guard = r.enter().unwrap(); 125 | assert!(guard.iter().map(|v| &v.v).eq(expected.iter())); 126 | }; 127 | 128 | let (mut w, r) = left_right::new::(); 129 | w.append(Op::PushBack(mkval(1))); 130 | w.append(Op::PushBack(mkval(2))); 131 | w.append(Op::PushBack(mkval(3))); 132 | w.publish(); 133 | 134 | registry.expect(3); 135 | expect(&r, &[1, 2, 3]); 136 | 137 | w.append(Op::PushBack(mkval(4))); 138 | w.publish(); 139 | 140 | registry.expect(4); 141 | expect(&r, &[1, 2, 3, 4]); 142 | 143 | w.append(Op::PopFront); 144 | w.append(Op::PopFront); 145 | w.publish(); 146 | 147 | // At this point, the popped values should not be freed. 148 | registry.expect(4); 149 | expect(&r, &[3, 4]); 150 | 151 | w.append(Op::PopFront); 152 | w.publish(); 153 | 154 | // The two previously popped values (1, 2) should have been freed. 155 | registry.expect(2); 156 | expect(&r, &[4]); 157 | 158 | drop(r); 159 | drop(w); 160 | 161 | registry.expect(0); 162 | } 163 | -------------------------------------------------------------------------------- /tests/loom.rs: -------------------------------------------------------------------------------- 1 | #[cfg(loom)] 2 | #[cfg(test)] 3 | mod loom_tests { 4 | // Evil hack to share CounterAddOp between 5 | // unit tests and integration tests. 6 | use left_right::Absorb; 7 | include!("../src/utilities.rs"); 8 | 9 | use loom::thread; 10 | 11 | #[test] 12 | fn read_before_publish() { 13 | loom::model(|| { 14 | let (mut w, r) = left_right::new::(); 15 | 16 | w.append(CounterAddOp(1)); 17 | w.publish(); 18 | 19 | let jh = thread::spawn(move || *r.enter().unwrap()); 20 | 21 | w.publish(); 22 | w.append(CounterAddOp(1)); 23 | 24 | let val = jh.join().unwrap(); 25 | 26 | assert_eq!(1, val); 27 | }); 28 | } 29 | } 30 | --------------------------------------------------------------------------------