├── .github └── workflows │ ├── doc.yml │ └── test.yml ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── bench-scripts ├── circ │ ├── bench-long-queue.py │ ├── bench.py │ ├── experiment.sh │ ├── legends.py │ ├── plot-long-queue.py │ ├── plot-map.py │ └── plot-queue.py ├── hp-brcu │ ├── bench-long-running.py │ ├── bench.py │ ├── experiment.sh │ ├── legends.py │ ├── plot-long-running.py │ └── plot.py └── hp-revisited │ ├── bench-hp-trees.py │ ├── bench-short-lists.py │ ├── bench.py │ ├── experiment.sh │ ├── legends.py │ ├── plot-hp-trees.py │ ├── plot-short-lists.py │ └── plot.py ├── docs └── adding-your-smr.md ├── requirements.txt ├── rust-toolchain ├── smrs ├── cdrc │ ├── Cargo.toml │ └── src │ │ ├── internal │ │ ├── mod.rs │ │ ├── smr │ │ │ ├── ebr.rs │ │ │ ├── ebr_impl │ │ │ │ ├── atomic.rs │ │ │ │ ├── collector.rs │ │ │ │ ├── default.rs │ │ │ │ ├── deferred.rs │ │ │ │ ├── epoch.rs │ │ │ │ ├── guard.rs │ │ │ │ ├── internal.rs │ │ │ │ ├── mod.rs │ │ │ │ └── sync │ │ │ │ │ ├── list.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── once_lock.rs │ │ │ │ │ └── queue.rs │ │ │ ├── hp.rs │ │ │ ├── hp_impl │ │ │ │ ├── domain.rs │ │ │ │ ├── hazard.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── retire.rs │ │ │ │ └── thread.rs │ │ │ └── mod.rs │ │ ├── smr_common.rs │ │ └── utils.rs │ │ ├── lib.rs │ │ ├── strongs.rs │ │ └── weaks.rs ├── circ │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── smr │ │ ├── ebr.rs │ │ ├── ebr_impl │ │ │ ├── atomic.rs │ │ │ ├── collector.rs │ │ │ ├── default.rs │ │ │ ├── deferred.rs │ │ │ ├── epoch.rs │ │ │ ├── guard.rs │ │ │ ├── internal.rs │ │ │ ├── mod.rs │ │ │ └── sync │ │ │ │ ├── list.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── once_lock.rs │ │ │ │ └── queue.rs │ │ ├── hp.rs │ │ ├── hp_impl │ │ │ ├── domain.rs │ │ │ ├── hazard.rs │ │ │ ├── mod.rs │ │ │ ├── retire.rs │ │ │ └── thread.rs │ │ └── mod.rs │ │ ├── smr_common.rs │ │ ├── strong.rs │ │ ├── utils.rs │ │ └── weak.rs ├── hp-brcu │ ├── Cargo.toml │ └── src │ │ ├── deferred.rs │ │ ├── epoch.rs │ │ ├── handle.rs │ │ ├── hazard.rs │ │ ├── internal.rs │ │ ├── lib.rs │ │ ├── pointers.rs │ │ ├── queue.rs │ │ └── rollback.rs ├── hp-pp │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ ├── domain.rs │ │ ├── hazard.rs │ │ ├── lib.rs │ │ ├── retire.rs │ │ ├── tag.rs │ │ └── thread.rs │ └── tests │ │ ├── harris_list.rs │ │ └── test.rs ├── nbr │ ├── Cargo.toml │ └── src │ │ ├── block_bag.rs │ │ ├── collector.rs │ │ ├── lib.rs │ │ ├── recovery.rs │ │ └── stats.rs └── vbr │ ├── Cargo.toml │ └── src │ └── lib.rs ├── src ├── bin │ ├── cdrc-ebr-flush.rs │ ├── cdrc-ebr.rs │ ├── cdrc-hp.rs │ ├── circ-ebr.rs │ ├── circ-hp.rs │ ├── double-link.rs │ ├── ebr.rs │ ├── hp-brcu.rs │ ├── hp-pp.rs │ ├── hp-rcu.rs │ ├── hp.rs │ ├── long-running.rs │ ├── nbr.rs │ ├── nr.rs │ ├── pebr.rs │ └── vbr.rs ├── config │ ├── map.rs │ └── mod.rs ├── ds_impl │ ├── cdrc │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── circ_ebr │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── circ_hp │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── ebr │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── elim_ab_tree.rs │ │ ├── ellen_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── hp │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── elim_ab_tree.rs │ │ ├── ellen_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ ├── pointers.rs │ │ └── skip_list.rs │ ├── hp_brcu │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── elim_ab_tree.rs │ │ ├── list.rs │ │ ├── list_alter.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── hp_pp │ │ ├── bonsai_tree.rs │ │ ├── ellen_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs │ ├── mod.rs │ ├── nbr │ │ ├── concurrent_map.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ └── natarajan_mittal_tree.rs │ ├── nr │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── double_link.rs │ │ ├── elim_ab_tree.rs │ │ ├── ellen_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ ├── pointers.rs │ │ └── skip_list.rs │ ├── pebr │ │ ├── bonsai_tree.rs │ │ ├── concurrent_map.rs │ │ ├── elim_ab_tree.rs │ │ ├── ellen_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ ├── shield_pool.rs │ │ └── skip_list.rs │ └── vbr │ │ ├── concurrent_map.rs │ │ ├── elim_ab_tree.rs │ │ ├── list.rs │ │ ├── michael_hash_map.rs │ │ ├── mod.rs │ │ ├── natarajan_mittal_tree.rs │ │ └── skip_list.rs ├── lib.rs └── utils.rs └── test-scripts ├── sanitize-circ.sh ├── sanitize-elim.sh ├── sanitize-hp.sh ├── sanitize-hppp.sh ├── sanitize-hpsh.sh ├── stress-vbr.sh ├── test-hpsh.sh ├── test-skiplist.sh └── test-vbr.sh /.github/workflows/doc.yml: -------------------------------------------------------------------------------- 1 | name: Build Docs 2 | 3 | on: [push, pull_request] 4 | 5 | permissions: 6 | contents: read 7 | pages: write 8 | id-token: write 9 | 10 | jobs: 11 | build: 12 | name: Build 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | 19 | - name: Setup Rust 20 | uses: dtolnay/rust-toolchain@stable 21 | 22 | - name: Configure cache 23 | uses: Swatinem/rust-cache@v2 24 | 25 | - name: Setup pages 26 | id: pages 27 | uses: actions/configure-pages@v4 28 | 29 | - name: Build docs 30 | run: | 31 | cargo clean --doc 32 | cargo doc --workspace --no-deps --lib 33 | cargo doc -p crossbeam-epoch --no-deps 34 | cargo doc -p crossbeam-pebr-epoch --no-deps 35 | 36 | # GitHub cannot find the nested index file by default. 37 | # We need to specify the target manually. 38 | - name: Add redirect 39 | run: echo '' > target/doc/index.html 40 | 41 | - name: Remove lock file 42 | run: rm target/doc/.lock 43 | 44 | - name: Upload artifact 45 | uses: actions/upload-pages-artifact@v3 46 | with: 47 | path: target/doc 48 | 49 | deploy: 50 | name: Deploy 51 | needs: build 52 | runs-on: ubuntu-latest 53 | 54 | environment: 55 | name: github-pages 56 | url: ${{ steps.deployment.outputs.page_url }} 57 | 58 | steps: 59 | - name: Deploy to GitHub Pages 60 | id: deployment 61 | uses: actions/deploy-pages@v4 62 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test Benchmark 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v3 11 | with: 12 | submodules: recursive 13 | 14 | - name: Install clang 15 | run: sudo apt-get update && sudo apt-get install -y clang 16 | 17 | - name: Install Rust nightly toolchain 18 | uses: actions-rs/toolchain@v1 19 | with: 20 | toolchain: nightly 21 | components: rustfmt, clippy 22 | 23 | - name: Install cargo-audit 24 | run: cargo install cargo-audit 25 | 26 | - name: Check code formatting 27 | run: cargo fmt -- --check 28 | 29 | - name: Run checks 30 | run: | 31 | cargo check --verbose 32 | cargo audit 33 | 34 | - name: Run tests 35 | run: | 36 | cargo test -- --nocapture --test-threads 1 37 | cargo test --release -- --nocapture --test-threads 1 38 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "./smrs/hp-pp", 4 | "./smrs/nbr", 5 | "./smrs/cdrc", 6 | "./smrs/hp-brcu", 7 | "./smrs/vbr", 8 | "./smrs/circ", 9 | ] 10 | 11 | [package] 12 | name = "smr-benchmark" 13 | version = "0.1.0" 14 | authors = ["authors"] 15 | edition = "2021" 16 | description = "SMR Benchmark: A Microbenchmark Suite for Concurrent Safe Memory Reclamation Schemes" 17 | repository = "https://github.com/kaist-cp/smr-benchmark" 18 | readme = "README.md" 19 | 20 | [dependencies] 21 | bitflags = "2.5" 22 | cfg-if = "1.0" 23 | clap = { version = "4.5.4", features = ["derive", "string"] } 24 | crossbeam-utils = "0.8" 25 | csv = "1.3.0" 26 | rand = "0.8" 27 | typenum = "1.17" 28 | num = "0.4.3" 29 | arrayvec = "0.7.6" 30 | scopeguard = "1" 31 | hp_pp = { path = "./smrs/hp-pp" } 32 | nbr = { path = "./smrs/nbr" } 33 | cdrc = { path = "./smrs/cdrc" } 34 | hp-brcu = { path = "./smrs/hp-brcu" } 35 | vbr = { path = "./smrs/vbr" } 36 | circ = { path = "./smrs/circ" } 37 | 38 | [target.'cfg(target_os = "linux")'.dependencies] 39 | tikv-jemallocator = "0.5" 40 | tikv-jemalloc-ctl = "0.5" 41 | 42 | [dependencies.crossbeam-ebr] 43 | package = "crossbeam-epoch" 44 | git = "https://github.com/kaist-cp/crossbeam" 45 | branch = "smr-benchmark" 46 | 47 | [dependencies.crossbeam-pebr] 48 | package = "crossbeam-pebr-epoch" 49 | git = "https://github.com/kaist-cp/crossbeam" 50 | branch = "pebr" 51 | 52 | [profile.release] 53 | lto = true 54 | codegen-units = 1 55 | 56 | [profile.release-with-debug] 57 | inherits = "release" 58 | debug = true 59 | 60 | [profile.release-simple] 61 | inherits = "release" 62 | debug = true 63 | lto = false 64 | codegen-units = 16 65 | 66 | [features] 67 | sanitize = ["crossbeam-pebr/sanitize"] 68 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | COPY . /bench 4 | WORKDIR /bench 5 | 6 | RUN apt-get update && apt-get install -y \ 7 | python3.10 \ 8 | python3-pip \ 9 | curl && \ 10 | pip3 install -r requirements.txt && \ 11 | rm -rf /var/lib/apt/lists/* && \ 12 | (curl https://sh.rustup.rs -sSf | bash -s -- -y) && \ 13 | (echo 'source $HOME/.cargo/env' >> $HOME/.bashrc) && \ 14 | # Download dependencies and pre-build binaries 15 | ~/.cargo/bin/cargo build --release && \ 16 | ~/.cargo/bin/cargo test --no-run --release 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 KAIST Concurrency & Parallelism Laboratory 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bench-scripts/circ/bench-long-queue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | mms_queue = ['ebr', 'cdrc-ebr', 'circ-ebr'] 10 | runs = 5 11 | 12 | if os.path.exists('.git'): 13 | subprocess.run(['git', 'submodule', 'update', '--init', '--recursive']) 14 | subprocess.run(['cargo', 'build', '--release']) 15 | 16 | 17 | def extract_interval(cmd): 18 | for i in range(len(cmd)): 19 | if cmd[i] == '-i' and i + 1 < len(cmd): 20 | return int(cmd[i + 1]) 21 | return 10 22 | 23 | cmds = [] 24 | 25 | for mm in mms_queue: 26 | for i in range(10, 61, 10): 27 | cmd = [os.path.join(BIN_PATH, "double-link"), '-m', mm, '-i', str(i), '-t', '64', '-o', os.path.join(RESULTS_PATH, 'double-link-long-running.csv')] 28 | cmds.append(cmd) 29 | 30 | print('number of configurations: ', len(cmds)) 31 | print('estimated time: ', sum(map(extract_interval, cmds)) // 60, ' min *', runs, 'times') 32 | 33 | os.makedirs(RESULTS_PATH, exist_ok=True) 34 | failed = [] 35 | for run in range(runs): 36 | for i, cmd in enumerate(cmds): 37 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 38 | try: 39 | subprocess.run(cmd, timeout=extract_interval(cmd) + 30) 40 | except subprocess.TimeoutExpired: 41 | print("timeout") 42 | failed.append(' '.join(cmd)) 43 | except KeyboardInterrupt: 44 | if len(failed) > 0: 45 | print("====failed====") 46 | print("\n".join(failed)) 47 | exit(0) 48 | except: 49 | failed.append(' '.join(cmd)) 50 | 51 | if len(failed) > 0: 52 | print("====failed====") 53 | print("\n".join(failed)) 54 | -------------------------------------------------------------------------------- /bench-scripts/circ/bench.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os, argparse 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | dss = ['h-list', 'hm-list', 'hhs-list', 'hash-map', 'nm-tree', 'skip-list'] 10 | mms_map = ['nr', 'ebr', 'hp', 'circ-ebr', 'circ-hp', 'cdrc-ebr', 'cdrc-hp'] 11 | mms_queue = ['nr', 'ebr', 'circ-ebr', 'cdrc-ebr', 'cdrc-ebr-flush'] 12 | i = 10 13 | runs = 1 14 | gs = [0, 1, 2] 15 | 16 | t_step, t_end = 0, 0 17 | cpu_count = os.cpu_count() 18 | if not cpu_count or cpu_count <= 12: 19 | t_step, t_end = 2, 16 20 | elif cpu_count <= 24: 21 | t_step, t_end = 4, 32 22 | elif cpu_count <= 64: 23 | t_step, t_end = 8, 128 24 | else: 25 | t_step, t_end = 8, 192 26 | 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument("-e", "--end", dest="end", type=int, default=t_end, 29 | help="the maximum number in a sequence of the number of threads") 30 | parser.add_argument("-t", "--step", dest="step", type=int, default=t_step, 31 | help="the interval between adjacent pair in a sequence of the number of threads") 32 | args = parser.parse_args() 33 | t_end = args.end 34 | t_step = args.step 35 | 36 | ts_map = list(map(str, [1] + list(range(t_step, t_end + 1, t_step)))) 37 | ts_queue = list(map(str, [1] + list(range(t_step, t_end + 1, t_step)))) 38 | 39 | if os.path.exists('.git'): 40 | subprocess.run(['git', 'submodule', 'update', '--init', '--recursive']) 41 | subprocess.run(['cargo', 'build', '--release']) 42 | 43 | def key_ranges(ds): 44 | if ds in ["h-list", "hm-list", "hhs-list"]: 45 | return ["1000", "10000"] 46 | else: 47 | # 100K and 100M 48 | return ["100000", "100000000"] 49 | 50 | def invalid(mm, ds, g): 51 | is_invalid = False 52 | if ds == 'hhs-list': 53 | is_invalid |= g == 0 # HHSList is just HList with faster get() 54 | if mm == 'hp': 55 | is_invalid |= ds in ["h-list", "hhs-list", "nm-tree"] 56 | return is_invalid 57 | 58 | cmds = [] 59 | estimated_time = 0 60 | 61 | for ds in dss: 62 | for mm in mms_map: 63 | for g in gs: 64 | if invalid(mm, ds, g): 65 | continue 66 | for t in ts_map: 67 | for kr in key_ranges(ds): 68 | cmd = [os.path.join(BIN_PATH, mm), '-i', str(i), '-d', ds, '-g', str(g), '-t', t, '-r', str(kr), '-o', os.path.join(RESULTS_PATH, f'{ds}.csv')] 69 | cmds.append(cmd) 70 | estimated_time += i * (1.1 if int(kr) <= 100000 else 1.5) 71 | 72 | for mm in mms_queue: 73 | for t in ts_queue: 74 | cmd = [os.path.join(BIN_PATH, "double-link"), '-m', mm, '-i', str(i), '-t', str(t), '-o', os.path.join(RESULTS_PATH, 'double-link.csv')] 75 | cmds.append(cmd) 76 | estimated_time += i * 1.1 77 | 78 | print('number of configurations: ', len(cmds)) 79 | print('estimated time: ', int(estimated_time) // 60, ' min *', runs, 'times') 80 | 81 | os.makedirs(RESULTS_PATH, exist_ok=True) 82 | failed = [] 83 | for run in range(runs): 84 | for i, cmd in enumerate(cmds): 85 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 86 | try: 87 | subprocess.run(cmd, timeout=i+30) 88 | except subprocess.TimeoutExpired: 89 | print("timeout") 90 | failed.append(' '.join(cmd)) 91 | except KeyboardInterrupt: 92 | if len(failed) > 0: 93 | print("====failed====") 94 | print("\n".join(failed)) 95 | exit(0) 96 | except: 97 | failed.append(' '.join(cmd)) 98 | 99 | if len(failed) > 0: 100 | print("====failed====") 101 | print("\n".join(failed)) 102 | -------------------------------------------------------------------------------- /bench-scripts/circ/experiment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | python3 ./bench.py "$@" 4 | python3 ./bench-long-queue.py 5 | 6 | python3 ./plot-map.py "$@" 7 | python3 ./plot-queue.py "$@" 8 | python3 ./plot-long-queue.py 9 | -------------------------------------------------------------------------------- /bench-scripts/circ/legends.py: -------------------------------------------------------------------------------- 1 | import os 2 | import matplotlib.pyplot as plt 3 | 4 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 5 | 6 | color_triple = ["#E53629", "#2CD23E", "#4149C3"] 7 | face_alpha = "DF" 8 | 9 | EBR = "ebr" 10 | NR = "nr" 11 | HP = "hp" 12 | CDRC_EBR = "cdrc-ebr" 13 | CDRC_HP = "cdrc-hp" 14 | CIRC_EBR = "circ-ebr" 15 | CIRC_HP = "circ-hp" 16 | CDRC_EBR_FLUSH = "cdrc-ebr-flush" 17 | 18 | line_shapes = { 19 | NR: { 20 | "marker": ".", 21 | "color": "k", 22 | "linestyle": "-", 23 | }, 24 | EBR: { 25 | "marker": "o", 26 | "color": color_triple[0], 27 | "markeredgewidth": 0.75, 28 | "markerfacecolor": color_triple[0] + face_alpha, 29 | "markeredgecolor": "k", 30 | "linestyle": "-", 31 | }, 32 | CDRC_EBR: { 33 | "marker": "o", 34 | "color": color_triple[1], 35 | "markeredgewidth": 0.75, 36 | "markerfacecolor": color_triple[1] + face_alpha, 37 | "markeredgecolor": "k", 38 | "linestyle": "dotted", 39 | }, 40 | CIRC_EBR: { 41 | "marker": "o", 42 | "color": color_triple[2], 43 | "markeredgewidth": 0.75, 44 | "markerfacecolor": color_triple[2] + face_alpha, 45 | "markeredgecolor": "k", 46 | "linestyle": "dashed", 47 | }, 48 | HP: { 49 | "marker": "v", 50 | "color": color_triple[0], 51 | "markeredgewidth": 0.75, 52 | "markerfacecolor": color_triple[0] + face_alpha, 53 | "markeredgecolor": "k", 54 | "linestyle": "-", 55 | }, 56 | CDRC_HP: { 57 | "marker": "v", 58 | "color": color_triple[1], 59 | "markeredgewidth": 0.75, 60 | "markerfacecolor": color_triple[1] + face_alpha, 61 | "markeredgecolor": "k", 62 | "linestyle": "dotted", 63 | }, 64 | CIRC_HP: { 65 | "marker": "v", 66 | "color": color_triple[2], 67 | "markeredgewidth": 0.75, 68 | "markerfacecolor": color_triple[2] + face_alpha, 69 | "markeredgecolor": "k", 70 | "linestyle": "dashed", 71 | }, 72 | CDRC_EBR_FLUSH: { 73 | "marker": "s", 74 | "color": "#828282", 75 | "markeredgewidth": 0.75, 76 | "markerfacecolor": "#828282" + face_alpha, 77 | "markeredgecolor": "k", 78 | "linestyle": "dashed", 79 | } 80 | } 81 | 82 | SMRS = [EBR, NR, HP, CDRC_EBR, CDRC_HP, CIRC_EBR, CIRC_HP, CDRC_EBR_FLUSH] 83 | 84 | os.makedirs(f'{RESULTS_PATH}/legends', exist_ok=True) 85 | for smr in SMRS: 86 | fig, ax = plt.subplots(ncols=1) 87 | ax.plot([0] * 3, linewidth=3, markersize=18, **line_shapes[smr]) 88 | ax.set_xlim(2/3, 4/3) 89 | ax.set_axis_off() 90 | fig.set_size_inches(1.25, 0.75) 91 | fig.tight_layout() 92 | fig.savefig(f"{RESULTS_PATH}/legends/{smr}.pdf", bbox_inches="tight") 93 | -------------------------------------------------------------------------------- /bench-scripts/circ/plot-long-queue.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | import pandas as pd 3 | import warnings 4 | import os 5 | import matplotlib 6 | import matplotlib.pyplot as plt 7 | 8 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 9 | long_running_is = list(range(10, 61, 10)) 10 | cpu_count = os.cpu_count() 11 | ts = [cpu_count//2, cpu_count, cpu_count*2] 12 | 13 | # raw column names 14 | THREADS = "threads" 15 | THROUGHPUT = "throughput" 16 | PEAK_MEM = "peak_mem" 17 | AVG_MEM = "avg_mem" 18 | INTERVAL = "interval" 19 | 20 | # legend 21 | SMR_ONLY = "SMR\n" 22 | 23 | EBR = "ebr" 24 | NR = "nr" 25 | HP = "hp" 26 | CDRC_EBR = "cdrc-ebr" 27 | CDRC_HP = "cdrc-hp" 28 | CIRC_EBR = "circ-ebr" 29 | CIRC_HP = "circ-hp" 30 | 31 | SMR_ONLYs = [EBR, CDRC_EBR, CIRC_EBR] 32 | 33 | color_triple = ["#E53629", "#2CD23E", "#4149C3"] 34 | face_alpha = "DF" 35 | 36 | line_shapes = { 37 | EBR: { 38 | "marker": "o", 39 | "color": color_triple[0], 40 | "markeredgewidth": 0.75, 41 | "markerfacecolor": color_triple[0] + face_alpha, 42 | "markeredgecolor": "k", 43 | "linestyle": "-", 44 | }, 45 | CDRC_EBR: { 46 | "marker": "o", 47 | "color": color_triple[1], 48 | "markeredgewidth": 0.75, 49 | "markerfacecolor": color_triple[1] + face_alpha, 50 | "markeredgecolor": "k", 51 | "linestyle": "dotted", 52 | }, 53 | CIRC_EBR: { 54 | "marker": "o", 55 | "color": color_triple[2], 56 | "markeredgewidth": 0.75, 57 | "markerfacecolor": color_triple[2] + face_alpha, 58 | "markeredgecolor": "k", 59 | "linestyle": "dashed", 60 | }, 61 | } 62 | 63 | mm_order = { 64 | EBR: 1, 65 | CDRC_EBR: 2, 66 | CIRC_EBR: 4, 67 | } 68 | 69 | def draw(name, data, y_value, y_label): 70 | plt.figure(figsize=(5, 4)) 71 | 72 | for mm in sorted(list(set(data.mm)), key=lambda mm: mm_order[mm]): 73 | d = data[data.mm == mm].sort_values(by=[INTERVAL], axis=0) 74 | plt.plot(d[INTERVAL], d[y_value], 75 | linewidth=3, markersize=15, **line_shapes[mm], zorder=30) 76 | 77 | plt.xlabel("Time interval to run (seconds)", fontsize=13) 78 | if y_label: 79 | plt.ylabel(y_label, fontsize=13) 80 | plt.yticks(fontsize=12) 81 | plt.xticks(fontsize=12) 82 | plt.grid(alpha=0.5) 83 | plt.savefig(name, bbox_inches='tight') 84 | 85 | 86 | if __name__ == '__main__': 87 | warnings.filterwarnings("ignore") 88 | pd.set_option('display.max_rows', None) 89 | 90 | # avoid Type 3 fonts 91 | matplotlib.rcParams['pdf.fonttype'] = 42 92 | matplotlib.rcParams['ps.fonttype'] = 42 93 | 94 | os.makedirs(f'{RESULTS_PATH}/queue-long-running', exist_ok=True) 95 | 96 | # preprocess (map) 97 | data = pd.read_csv(f'{RESULTS_PATH}/' + "double-link-long-running.csv") 98 | data.throughput = data.throughput.map(lambda x: x / 1_000_000) 99 | data.peak_mem = data.peak_mem.map(lambda x: x / (2 ** 30)) 100 | data.avg_mem = data.avg_mem.map(lambda x: x / (2 ** 30)) 101 | 102 | # take average of each runs 103 | avg = data.groupby(['mm', 'interval']).mean().reset_index() 104 | avg[SMR_ONLY] = pd.Categorical(avg.mm.map(str), SMR_ONLYs) 105 | 106 | y_label = 'Avg memory usage (GiB)' 107 | name = f'{RESULTS_PATH}/queue-long-running/long-running-queue_avg_mem.pdf' 108 | draw(name, avg, AVG_MEM, y_label) 109 | -------------------------------------------------------------------------------- /bench-scripts/hp-brcu/bench-long-running.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 5 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release", "long-running") 6 | 7 | mms = ['nr', 'ebr', 'pebr', 'hp', 'hp-pp', 'nbr', 'nbr-large', 'hp-brcu', 'hp-rcu', 'vbr'] 8 | 9 | krs = [(2 ** e) for e in range(18, 30, 1)] 10 | cpu_count = os.cpu_count() 11 | writers = cpu_count // 2 12 | readers = cpu_count // 2 13 | runs = 4 14 | i = 10 15 | 16 | if os.path.exists('.git'): 17 | subprocess.run(['git', 'submodule', 'update', '--init', '--recursive']) 18 | subprocess.run(['cargo', 'build', '--release', '--bin', 'long-running']) 19 | 20 | run_cmd = [BIN_PATH, f'-i{i}', f'-w{writers}', f'-g{readers}'] 21 | 22 | cmds = [] 23 | 24 | for mm in mms: 25 | for kr in krs: 26 | cmd = run_cmd + [f'-m{mm}', f'-r{kr}', f'-o{RESULTS_PATH}/long-running.csv'] 27 | cmds.append(cmd) 28 | 29 | print('number of configurations: ', len(cmds)) 30 | print('estimated time: ', (len(cmds) * i * 1.7) // 60, ' min *', runs, 'times') 31 | 32 | os.makedirs(RESULTS_PATH, exist_ok=True) 33 | failed = [] 34 | for run in range(runs): 35 | for i, cmd in enumerate(cmds): 36 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 37 | try: 38 | # NOTE(`timeout=120`): prefilling may take a while... 39 | subprocess.run(cmd, timeout=120, check=True) 40 | except subprocess.TimeoutExpired: 41 | print("timeout") 42 | failed.append(' '.join(cmd)) 43 | except KeyboardInterrupt: 44 | if len(failed) > 0: 45 | print("====failed====") 46 | print("\n".join(failed)) 47 | exit(0) 48 | except: 49 | failed.append(' '.join(cmd)) 50 | 51 | if len(failed) > 0: 52 | print("====failed====") 53 | print("\n".join(failed)) 54 | -------------------------------------------------------------------------------- /bench-scripts/hp-brcu/bench.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | dss = ['h-list', 'hm-list', 'hhs-list', 'hash-map', 'nm-tree', 'skip-list'] 10 | # "-large" suffix if it uses a large garbage bag. 11 | mms = ['nr', 'ebr', 'pebr', 'hp', 'hp-pp', 'nbr', 'nbr-large', 'hp-brcu', 'vbr', 'hp-rcu'] 12 | i = 10 13 | cpu_count = os.cpu_count() 14 | if not cpu_count or cpu_count <= 24: 15 | ts = list(map(str, [1] + list(range(4, 33, 4)))) 16 | elif cpu_count <= 64: 17 | ts = list(map(str, [1] + list(range(8, 129, 8)))) 18 | else: 19 | ts = list(map(str, [1] + list(range(12, 193, 12)))) 20 | runs = 1 21 | gs = [0, 1, 2, 3] 22 | 23 | if os.path.exists('.git'): 24 | subprocess.run(['git', 'submodule', 'update', '--init', '--recursive']) 25 | subprocess.run(['cargo', 'build', '--release']) 26 | 27 | def key_ranges(ds): 28 | if ds in ["h-list", "hm-list", "hhs-list"]: 29 | # 1K and 10K 30 | return ["1000", "10000"] 31 | else: 32 | # 100K and 100M 33 | return ["100000", "100000000"] 34 | 35 | def is_suffix(orig, suf): 36 | return len(suf) <= len(orig) and mm[-len(suf):] == suf 37 | 38 | def make_cmd(mm, i, ds, g, t, kr): 39 | bag = "small" 40 | if is_suffix(mm, "-large"): 41 | mm = mm[:len(mm)-len("-large")] 42 | bag = "large" 43 | 44 | return [os.path.join(BIN_PATH, mm), 45 | '-i', str(i), 46 | '-d', str(ds), 47 | '-g', str(g), 48 | '-t', str(t), 49 | '-r', str(kr), 50 | '-b', bag, 51 | '-o', os.path.join(RESULTS_PATH, f'{ds}.csv')] 52 | 53 | def invalid(mm, ds, g): 54 | is_invalid = False 55 | if ds == 'hhs-list': 56 | is_invalid |= g == 0 # HHSList is just HList with faster get() 57 | if mm == 'hp': 58 | is_invalid |= ds in ["h-list", "hhs-list", "nm-tree"] 59 | if mm == 'nbr': 60 | is_invalid |= ds in ["hm-list", "skip-list"] 61 | return is_invalid 62 | 63 | cmds = [] 64 | 65 | for ds in dss: 66 | for kr in key_ranges(ds): 67 | for mm in mms: 68 | for g in gs: 69 | if invalid(mm, ds, g): 70 | continue 71 | for t in ts: 72 | cmds.append(make_cmd(mm, i, ds, g, t, kr)) 73 | 74 | print('number of configurations: ', len(cmds)) 75 | print('estimated time: ', (len(cmds) * i * 1.1) // 60, ' min *', runs, 'times\n') 76 | 77 | for i, cmd in enumerate(cmds): 78 | try: 79 | print(f"\rdry-running commands... ({i+1}/{len(cmds)})", end="") 80 | subprocess.run(cmd + ['--dry-run']) 81 | except: 82 | print(f"A dry-run for the following command is failed:\n{' '.join(cmd)}") 83 | exit(1) 84 | print("\nAll dry-runs passed!\n") 85 | 86 | os.makedirs(RESULTS_PATH, exist_ok=True) 87 | failed = [] 88 | for run in range(runs): 89 | for i, cmd in enumerate(cmds): 90 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 91 | try: 92 | subprocess.run(cmd, timeout=i+30) 93 | except subprocess.TimeoutExpired: 94 | print("timeout") 95 | failed.append(' '.join(cmd)) 96 | except KeyboardInterrupt: 97 | if len(failed) > 0: 98 | print("====failed====") 99 | print("\n".join(failed)) 100 | exit(0) 101 | except: 102 | failed.append(' '.join(cmd)) 103 | 104 | if len(failed) > 0: 105 | print("====failed====") 106 | print("\n".join(failed)) 107 | -------------------------------------------------------------------------------- /bench-scripts/hp-brcu/experiment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "=======================================================================" 4 | echo "1. Throughputs & unreclaimed memory blocks on varying ratio of writes" 5 | echo "=======================================================================" 6 | 7 | python3 ./bench.py 8 | python3 ./plot.py 9 | 10 | echo "=======================================================================" 11 | echo "2. Throughputs on a long-running operations" 12 | echo "=======================================================================" 13 | 14 | python3 ./bench-long-running.py 15 | python3 ./plot-long-running.py 16 | -------------------------------------------------------------------------------- /bench-scripts/hp-brcu/legends.py: -------------------------------------------------------------------------------- 1 | import os 2 | import matplotlib.pyplot as plt 3 | import matplotlib.colors as colors 4 | from matplotlib.path import Path 5 | from matplotlib.transforms import Affine2D 6 | 7 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 8 | 9 | EBR = "ebr" 10 | PEBR = "pebr" 11 | NR = "nr" 12 | HP = "hp" 13 | HP_PP = "hp-pp" 14 | NBR = "nbr" 15 | NBR_LARGE = "nbr-large" 16 | HP_BRCU = "hp-brcu" 17 | HP_RCU = "hp-rcu" 18 | VBR = "vbr" 19 | 20 | SMRs = [NR, EBR, NBR, NBR_LARGE, HP_PP, HP, PEBR, HP_BRCU, HP_RCU, VBR] 21 | 22 | FACE_ALPHA = 0.85 23 | 24 | # https://matplotlib.org/stable/gallery/lines_bars_and_markers/marker_reference.html 25 | line_shapes = { 26 | NR: { 27 | "marker": ".", 28 | "color": "k", 29 | "linestyle": "-", 30 | }, 31 | EBR: { 32 | "marker": "o", 33 | "color": "c", 34 | "linestyle": "-", 35 | }, 36 | HP: { 37 | "marker": "v", 38 | "color": "hotpink", 39 | "linestyle": "dashed", 40 | }, 41 | HP_PP: { 42 | "marker": "^", 43 | "color": "purple", 44 | "linestyle": "dashdot", 45 | }, 46 | PEBR: { 47 | # Diamond("D") shape, but smaller. 48 | "marker": Path.unit_rectangle() 49 | .transformed(Affine2D().translate(-0.5, -0.5) 50 | .rotate_deg(45)), 51 | "color": "y", 52 | "linestyle": (5, (10, 3)), 53 | }, 54 | NBR: { 55 | "marker": "p", 56 | "color": "blue", 57 | "linestyle": (0, (3, 1, 1, 1)), 58 | }, 59 | NBR_LARGE: { 60 | "marker": "H", 61 | "color": "indigo", 62 | "linestyle": (0, (3, 1, 1, 1)), 63 | }, 64 | HP_BRCU: { 65 | "marker": "X", 66 | "color": "r", 67 | "linestyle": (5, (10, 3)), 68 | }, 69 | HP_RCU: { 70 | "marker": "P", 71 | "color": "green", 72 | "linestyle": (5, (10, 3)), 73 | }, 74 | VBR: { 75 | "marker": "d", 76 | "color": "orange", 77 | "linestyle": (0, (2, 1)), 78 | }, 79 | } 80 | 81 | # Add some common or derivable properties. 82 | line_shapes = dict(map( 83 | lambda kv: kv if kv[0] == NR else 84 | (kv[0], { **kv[1], 85 | "markerfacecolor": (*colors.to_rgb(kv[1]["color"]), FACE_ALPHA), 86 | "markeredgecolor": "k", 87 | "markeredgewidth": 0.75 }), 88 | line_shapes.items() 89 | )) 90 | 91 | if __name__ == "__main__": 92 | os.makedirs(f'{RESULTS_PATH}/legends', exist_ok=True) 93 | for smr in SMRs: 94 | fig, ax = plt.subplots(ncols=1) 95 | ax.plot([0] * 3, linewidth=3, markersize=18, **line_shapes[smr]) 96 | ax.set_xlim(2/3, 4/3) 97 | ax.set_axis_off() 98 | fig.set_size_inches(1.25, 0.75) 99 | fig.tight_layout() 100 | fig.savefig(f"{RESULTS_PATH}/legends/{smr}.pdf", bbox_inches="tight") 101 | -------------------------------------------------------------------------------- /bench-scripts/hp-revisited/bench-hp-trees.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | dss = ['nm-tree', 'efrb-tree'] 10 | # "-large" suffix if it uses a large garbage bag. 11 | mms = ['hp'] 12 | i = 10 13 | cpu_count = os.cpu_count() 14 | if not cpu_count or cpu_count <= 24: 15 | ts = list(map(str, [1] + list(range(4, 33, 4)))) 16 | elif cpu_count <= 64: 17 | ts = list(map(str, [1] + list(range(8, 129, 8)))) 18 | else: 19 | ts = list(map(str, [1] + list(range(12, 193, 12)))) 20 | runs = 2 21 | gs = [0, 1, 2] 22 | 23 | subprocess.run(['cargo', 'build', '--release']) 24 | 25 | def key_ranges(ds): 26 | return ["100000"] 27 | 28 | def is_suffix(orig, suf): 29 | return len(suf) <= len(orig) and mm[-len(suf):] == suf 30 | 31 | def make_cmd(mm, i, ds, g, t, kr): 32 | bag = "small" 33 | if is_suffix(mm, "-large"): 34 | mm = mm[:len(mm)-len("-large")] 35 | bag = "large" 36 | 37 | return [os.path.join(BIN_PATH, mm), 38 | '-i', str(i), 39 | '-d', str(ds), 40 | '-g', str(g), 41 | '-t', str(t), 42 | '-r', str(kr), 43 | '-b', bag, 44 | '-o', os.path.join(RESULTS_PATH, f'{ds}.csv')] 45 | 46 | def invalid(mm, ds, g): 47 | is_invalid = False 48 | if ds == 'hhs-list': 49 | is_invalid |= g == 0 # HHSList is just HList with faster get() 50 | if mm == 'nbr': 51 | is_invalid |= ds in ["hm-list", "skip-list"] 52 | if ds == 'elim-ab-tree': 53 | is_invalid |= mm in ["pebr", "hp-pp", "vbr"] 54 | return is_invalid 55 | 56 | cmds = [] 57 | 58 | for ds in dss: 59 | for kr in key_ranges(ds): 60 | for mm in mms: 61 | for g in gs: 62 | if invalid(mm, ds, g): 63 | continue 64 | for t in ts: 65 | cmds.append(make_cmd(mm, i, ds, g, t, kr)) 66 | 67 | print('number of configurations: ', len(cmds)) 68 | print('estimated time: ', (len(cmds) * i * 1.1) // 60, ' min *', runs, 'times\n') 69 | 70 | for i, cmd in enumerate(cmds): 71 | try: 72 | print(f"\rdry-running commands... ({i+1}/{len(cmds)})", end="") 73 | subprocess.run(cmd + ['--dry-run']) 74 | except: 75 | print(f"A dry-run for the following command is failed:\n{' '.join(cmd)}") 76 | exit(1) 77 | print("\nAll dry-runs passed!\n") 78 | 79 | os.makedirs(RESULTS_PATH, exist_ok=True) 80 | failed = [] 81 | for run in range(runs): 82 | for i, cmd in enumerate(cmds): 83 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 84 | try: 85 | subprocess.run(cmd, timeout=i+30) 86 | except subprocess.TimeoutExpired: 87 | print("timeout") 88 | failed.append(' '.join(cmd)) 89 | except KeyboardInterrupt: 90 | if len(failed) > 0: 91 | print("====failed====") 92 | print("\n".join(failed)) 93 | exit(0) 94 | except: 95 | failed.append(' '.join(cmd)) 96 | 97 | if len(failed) > 0: 98 | print("====failed====") 99 | print("\n".join(failed)) 100 | -------------------------------------------------------------------------------- /bench-scripts/hp-revisited/bench-short-lists.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | dss = ['hhs-list', 'hm-list'] 10 | # "-large" suffix if it uses a large garbage bag. 11 | mms = ['hp', 'hp-pp'] 12 | i = 10 13 | cpu_count = os.cpu_count() 14 | if not cpu_count or cpu_count <= 24: 15 | ts = list(map(str, [1] + list(range(4, 33, 4)))) 16 | elif cpu_count <= 64: 17 | ts = list(map(str, [1] + list(range(8, 129, 8)))) 18 | else: 19 | ts = list(map(str, [1] + list(range(12, 193, 12)))) 20 | runs = 2 21 | gs = [0, 1, 2] 22 | 23 | subprocess.run(['cargo', 'build', '--release']) 24 | 25 | def key_ranges(ds): 26 | return ["16"] 27 | 28 | def is_suffix(orig, suf): 29 | return len(suf) <= len(orig) and mm[-len(suf):] == suf 30 | 31 | def make_cmd(mm, i, ds, g, t, kr): 32 | bag = "small" 33 | if is_suffix(mm, "-large"): 34 | mm = mm[:len(mm)-len("-large")] 35 | bag = "large" 36 | 37 | return [os.path.join(BIN_PATH, mm), 38 | '-i', str(i), 39 | '-d', str(ds), 40 | '-g', str(g), 41 | '-t', str(t), 42 | '-r', str(kr), 43 | '-b', bag, 44 | '-o', os.path.join(RESULTS_PATH, f'{ds}.csv')] 45 | 46 | def invalid(mm, ds, g): 47 | is_invalid = False 48 | if ds == 'hhs-list': 49 | is_invalid |= g == 0 # HHSList is just HList with faster get() 50 | if mm == 'nbr': 51 | is_invalid |= ds in ["hm-list", "skip-list"] 52 | if ds == 'elim-ab-tree': 53 | is_invalid |= mm in ["pebr", "hp-pp", "vbr"] 54 | return is_invalid 55 | 56 | cmds = [] 57 | 58 | for ds in dss: 59 | for kr in key_ranges(ds): 60 | for mm in mms: 61 | for g in gs: 62 | if invalid(mm, ds, g): 63 | continue 64 | for t in ts: 65 | cmds.append(make_cmd(mm, i, ds, g, t, kr)) 66 | 67 | print('number of configurations: ', len(cmds)) 68 | print('estimated time: ', (len(cmds) * i * 1.1) // 60, ' min *', runs, 'times\n') 69 | 70 | for i, cmd in enumerate(cmds): 71 | try: 72 | print(f"\rdry-running commands... ({i+1}/{len(cmds)})", end="") 73 | subprocess.run(cmd + ['--dry-run']) 74 | except: 75 | print(f"A dry-run for the following command is failed:\n{' '.join(cmd)}") 76 | exit(1) 77 | print("\nAll dry-runs passed!\n") 78 | 79 | os.makedirs(RESULTS_PATH, exist_ok=True) 80 | failed = [] 81 | for run in range(runs): 82 | for i, cmd in enumerate(cmds): 83 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 84 | try: 85 | subprocess.run(cmd, timeout=i+30) 86 | except subprocess.TimeoutExpired: 87 | print("timeout") 88 | failed.append(' '.join(cmd)) 89 | except KeyboardInterrupt: 90 | if len(failed) > 0: 91 | print("====failed====") 92 | print("\n".join(failed)) 93 | exit(0) 94 | except: 95 | failed.append(' '.join(cmd)) 96 | 97 | if len(failed) > 0: 98 | print("====failed====") 99 | print("\n".join(failed)) 100 | -------------------------------------------------------------------------------- /bench-scripts/hp-revisited/bench.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | import os 5 | 6 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 7 | BIN_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "target", "release") 8 | 9 | dss = ['h-list', 'hm-list', 'hhs-list', 'hash-map', 'nm-tree', 'skip-list', 'elim-ab-tree'] 10 | # "-large" suffix if it uses a large garbage bag. 11 | mms = ['nr', 'ebr', 'pebr', 'hp', 'hp-pp', 'hp-brcu', 'vbr'] 12 | i = 10 13 | cpu_count = os.cpu_count() 14 | if not cpu_count or cpu_count <= 24: 15 | ts = list(map(str, [1] + list(range(4, 33, 4)))) 16 | elif cpu_count <= 64: 17 | ts = list(map(str, [1] + list(range(8, 129, 8)))) 18 | else: 19 | ts = list(map(str, [1] + list(range(12, 193, 12)))) 20 | runs = 2 21 | gs = [0, 1, 2] 22 | 23 | subprocess.run(['cargo', 'build', '--release']) 24 | 25 | def key_ranges(ds): 26 | if ds in ["h-list", "hm-list", "hhs-list"]: 27 | # 1K and 10K 28 | return ["1000", "10000"] 29 | else: 30 | # 100K and 100M 31 | return ["100000", "100000000"] 32 | 33 | def is_suffix(orig, suf): 34 | return len(suf) <= len(orig) and mm[-len(suf):] == suf 35 | 36 | def make_cmd(mm, i, ds, g, t, kr): 37 | bag = "small" 38 | if is_suffix(mm, "-large"): 39 | mm = mm[:len(mm)-len("-large")] 40 | bag = "large" 41 | 42 | return [os.path.join(BIN_PATH, mm), 43 | '-i', str(i), 44 | '-d', str(ds), 45 | '-g', str(g), 46 | '-t', str(t), 47 | '-r', str(kr), 48 | '-b', bag, 49 | '-o', os.path.join(RESULTS_PATH, f'{ds}.csv')] 50 | 51 | def invalid(mm, ds, g): 52 | is_invalid = False 53 | if ds == 'hhs-list': 54 | is_invalid |= g == 0 # HHSList is just HList with faster get() 55 | if mm == 'nbr': 56 | is_invalid |= ds in ["hm-list", "skip-list"] 57 | if ds == 'elim-ab-tree': 58 | is_invalid |= mm in ["hp-pp"] 59 | return is_invalid 60 | 61 | cmds = [] 62 | 63 | for ds in dss: 64 | for kr in key_ranges(ds): 65 | for mm in mms: 66 | for g in gs: 67 | if invalid(mm, ds, g): 68 | continue 69 | for t in ts: 70 | cmds.append(make_cmd(mm, i, ds, g, t, kr)) 71 | 72 | print('number of configurations: ', len(cmds)) 73 | print('estimated time: ', (len(cmds) * i * 1.1) // 60, ' min *', runs, 'times\n') 74 | 75 | for i, cmd in enumerate(cmds): 76 | try: 77 | print(f"\rdry-running commands... ({i+1}/{len(cmds)})", end="") 78 | subprocess.run(cmd + ['--dry-run']) 79 | except: 80 | print(f"A dry-run for the following command is failed:\n{' '.join(cmd)}") 81 | exit(1) 82 | print("\nAll dry-runs passed!\n") 83 | 84 | os.makedirs(RESULTS_PATH, exist_ok=True) 85 | failed = [] 86 | for run in range(runs): 87 | for i, cmd in enumerate(cmds): 88 | print("run {}/{}, bench {}/{}: '{}'".format(run + 1, runs, i + 1, len(cmds), ' '.join(cmd))) 89 | try: 90 | subprocess.run(cmd, timeout=i+30) 91 | except subprocess.TimeoutExpired: 92 | print("timeout") 93 | failed.append(' '.join(cmd)) 94 | except KeyboardInterrupt: 95 | if len(failed) > 0: 96 | print("====failed====") 97 | print("\n".join(failed)) 98 | exit(0) 99 | except: 100 | failed.append(' '.join(cmd)) 101 | 102 | if len(failed) > 0: 103 | print("====failed====") 104 | print("\n".join(failed)) 105 | -------------------------------------------------------------------------------- /bench-scripts/hp-revisited/experiment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | python3 ./bench-scripts/hp-revisited/bench.py 4 | python3 ./bench-scripts/hp-revisited/bench-short-lists.py 5 | python3 ./bench-scripts/hp-revisited/bench-hp-trees.py 6 | 7 | python3 ./bench-scripts/hp-revisited/plot.py 8 | python3 ./bench-scripts/hp-revisited/plot-short-lists.py 9 | python3 ./bench-scripts/hp-revisited/plot-hp-trees.py 10 | -------------------------------------------------------------------------------- /bench-scripts/hp-revisited/legends.py: -------------------------------------------------------------------------------- 1 | import os 2 | import matplotlib.pyplot as plt 3 | import matplotlib.colors as colors 4 | from matplotlib.path import Path 5 | from matplotlib.transforms import Affine2D 6 | 7 | RESULTS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") 8 | 9 | EBR = "ebr" 10 | PEBR = "pebr" 11 | NR = "nr" 12 | HP = "hp" 13 | HP_PP = "hp-pp" 14 | HP_BRCU = "hp-brcu" 15 | HP_RCU = "hp-rcu" 16 | VBR = "vbr" 17 | 18 | SMRs = [NR, EBR, HP_PP, HP, PEBR, HP_BRCU, HP_RCU, VBR] 19 | 20 | FACE_ALPHA = 0.85 21 | 22 | # https://matplotlib.org/stable/gallery/lines_bars_and_markers/marker_reference.html 23 | line_shapes = { 24 | NR: { 25 | "marker": ".", 26 | "color": "k", 27 | "linestyle": "-", 28 | }, 29 | EBR: { 30 | "marker": "o", 31 | "color": "c", 32 | "linestyle": "-", 33 | }, 34 | HP: { 35 | "marker": "v", 36 | "color": "hotpink", 37 | "linestyle": "dashed", 38 | }, 39 | HP_PP: { 40 | "marker": "^", 41 | "color": "purple", 42 | "linestyle": "dashdot", 43 | }, 44 | PEBR: { 45 | # Diamond("D") shape, but smaller. 46 | "marker": Path.unit_rectangle() 47 | .transformed(Affine2D().translate(-0.5, -0.5) 48 | .rotate_deg(45)), 49 | "color": "y", 50 | "linestyle": (5, (10, 3)), 51 | }, 52 | HP_BRCU: { 53 | "marker": "X", 54 | "color": "r", 55 | "linestyle": (5, (10, 3)), 56 | }, 57 | HP_RCU: { 58 | "marker": "P", 59 | "color": "green", 60 | "linestyle": (5, (10, 3)), 61 | }, 62 | VBR: { 63 | "marker": "d", 64 | "color": "orange", 65 | "linestyle": (0, (2, 1)), 66 | }, 67 | # Used in `plot-short-lists` 68 | "PESSIM_HP": { 69 | "marker": "v", 70 | "color": "#828282", 71 | "linestyle": "dotted", 72 | }, 73 | } 74 | 75 | # Add some common or derivable properties. 76 | line_shapes = dict(map( 77 | lambda kv: kv if kv[0] == NR else 78 | (kv[0], { **kv[1], 79 | "markerfacecolor": (*colors.to_rgb(kv[1]["color"]), FACE_ALPHA), 80 | "markeredgecolor": "k", 81 | "markeredgewidth": 0.75 }), 82 | line_shapes.items() 83 | )) 84 | 85 | if __name__ == "__main__": 86 | os.makedirs(f'{RESULTS_PATH}/legends', exist_ok=True) 87 | for smr in SMRs: 88 | fig, ax = plt.subplots(ncols=1) 89 | ax.plot([0] * 3, linewidth=3, markersize=18, **line_shapes[smr]) 90 | ax.set_xlim(2/3, 4/3) 91 | ax.set_axis_off() 92 | fig.set_size_inches(1.25, 0.75) 93 | fig.tight_layout() 94 | fig.savefig(f"{RESULTS_PATH}/legends/{smr}.pdf", bbox_inches="tight") 95 | -------------------------------------------------------------------------------- /docs/adding-your-smr.md: -------------------------------------------------------------------------------- 1 | # Tl;dr 2 | 3 | To add a new benchmark in `smr-benchmark`, you need to complete the following three tasks: 4 | 5 | 1. (If necessary) Implement your SMR in `./smrs` and define its dependency in `./Cargo.toml`. 6 | 2. Implement data structures in `./src/ds_impl/`. 7 | 3. Write the benchmark driver (a standalone binary for your SMR) in `./src/bin/.rs` 8 | 9 | # Details 10 | 11 | ### Implementing Your SMR 12 | 13 | Implement your SMR as a package in `./smrs` and define its dependency in `./Cargo.toml`. Alternatively, you can directly specify the dependency in `Cargo.toml` using the crate.io registry or a GitHub repository. 14 | 15 | * [Example 1 (CIRC; Implemented manually)](https://github.com/kaist-cp/smr-benchmark/tree/main/smrs/circ) 16 | * [Example 2 (`crossbeam-epoch`; Imported from GitHub)](https://github.com/kaist-cp/smr-benchmark/blob/main/Cargo.toml\#L40-L43) 17 | 18 | ### Implementing Data Structures 19 | 20 | Implement data structures in `./src/ds_impl/`. Follow this simple convention for the directory structure: 21 | 22 | * Define a common trait for your `ConcurrentMap` implementation and its stress test `smoke` in `concurrent_map.rs` ([Example](https://github.com/kaist-cp/smr-benchmark/blob/main/src/ds\_impl/ebr/concurrent\_map.rs)). 23 | * Implement your data structures according to the predefined `ConcurrentMap` trait ([Example](https://github.com/kaist-cp/smr-benchmark/blob/main/src/ds\_impl/ebr/list.rs\#L387-L412)), and include a test function that invokes `smoke` internally ([Example](https://github.com/kaist-cp/smr-benchmark/blob/main/src/ds\_impl/ebr/list.rs\#L485-L498)). 24 | 25 | There is currently no convention for other types of data structures, such as queues. 26 | 27 | ### Writing the Benchmark Driver 28 | 29 | The benchmark driver for map data structures (a standalone binary for your SMR) is located at `./src/bin/.rs` ([Example](https://github.com/kaist-cp/smr-benchmark/blob/main/src/bin/ebr.rs)). This will mostly be boilerplate code, so you should be able to write it easily by referring to existing examples. 30 | 31 | Afterward, you can run a benchmark by: 32 | 33 | ``` 34 | cargo run --release --bin -- \ 35 | -d \ 36 | -t \ 37 | -g \ 38 | -r \ 39 | -i 40 | ``` 41 | 42 | Please refer to `README.md` or `cargo run --bin -- -h`. 43 | 44 | # Small Notes 45 | 46 | * To compile the suite, an **x86 Ubuntu** machine is required. 47 | * This is because some SMRs depend on x86 Ubuntu (e.g., PEBR and HP++ use a modified `membarrier`, which is specifically optimized for x86 Ubuntu). 48 | * By removing these dependencies, you can compile and run the suite on other environments (e.g., AArch64 macOS). 49 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyarrow==15.0.1 2 | pandas==2.2.1 3 | plotnine==0.13.1 4 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2024-05-04 2 | -------------------------------------------------------------------------------- /smrs/cdrc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cdrc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | crossbeam-utils = "0.8" 10 | membarrier = { git = "https://github.com/jeehoonkang/membarrier-rs.git", branch = "smr-benchmark" } 11 | scopeguard = "1.1.0" 12 | static_assertions = "1.1.0" 13 | atomic = "0.5" 14 | cfg-if = "1.0" 15 | rustc-hash = "1.1.0" 16 | memoffset = "0.7" 17 | 18 | [dev-dependencies] 19 | rand = "0.8" 20 | bitflags = "2.4.0" 21 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/mod.rs: -------------------------------------------------------------------------------- 1 | mod smr; 2 | mod smr_common; 3 | mod utils; 4 | 5 | pub use smr::{ebr_impl, hp_impl, CsEBR, CsHP}; 6 | pub use smr_common::{Acquired, Cs, RetireType}; 7 | pub use utils::{Counted, EjectAction, Pointer, TaggedCnt}; 8 | 9 | pub(crate) use utils::*; 10 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | 3 | use atomic::Ordering; 4 | 5 | use super::ebr_impl::{pin, Guard}; 6 | use crate::internal::utils::Counted; 7 | use crate::internal::{Acquired, Cs, RetireType, TaggedCnt}; 8 | 9 | /// A tagged pointer which is pointing a `CountedObjPtr`. 10 | /// 11 | /// We may want to use `crossbeam_ebr::Shared` as a `Acquired`, 12 | /// but trait interfaces can be complicated because `crossbeam_ebr::Shared` 13 | /// requires to specify a lifetime specifier. 14 | pub struct AcquiredEBR(TaggedCnt); 15 | 16 | impl Acquired for AcquiredEBR { 17 | #[inline(always)] 18 | fn as_ptr(&self) -> TaggedCnt { 19 | self.0 20 | } 21 | 22 | #[inline(always)] 23 | fn null() -> Self { 24 | Self(TaggedCnt::null()) 25 | } 26 | 27 | #[inline(always)] 28 | fn is_null(&self) -> bool { 29 | self.0.is_null() 30 | } 31 | 32 | #[inline(always)] 33 | fn swap(p1: &mut Self, p2: &mut Self) { 34 | mem::swap(p1, p2); 35 | } 36 | 37 | #[inline(always)] 38 | fn eq(&self, other: &Self) -> bool { 39 | self.0 == other.0 40 | } 41 | 42 | #[inline] 43 | fn clear(&mut self) { 44 | self.0 = TaggedCnt::null(); 45 | } 46 | 47 | #[inline] 48 | fn set_tag(&mut self, tag: usize) { 49 | self.0 = self.0.with_tag(tag); 50 | } 51 | 52 | #[inline] 53 | unsafe fn copy_to(&self, other: &mut Self) { 54 | other.0 = self.0; 55 | } 56 | } 57 | 58 | pub struct CsEBR { 59 | guard: Option, 60 | } 61 | 62 | impl From for CsEBR { 63 | #[inline(always)] 64 | fn from(guard: Guard) -> Self { 65 | Self { guard: Some(guard) } 66 | } 67 | } 68 | 69 | impl Cs for CsEBR { 70 | type RawShield = AcquiredEBR; 71 | 72 | #[inline(always)] 73 | fn new() -> Self { 74 | Self::from(pin()) 75 | } 76 | 77 | #[inline(always)] 78 | fn create_object(obj: T) -> *mut Counted { 79 | let obj = Counted::new(obj); 80 | Box::into_raw(Box::new(obj)) 81 | } 82 | 83 | #[inline(always)] 84 | fn reserve(&self, ptr: TaggedCnt, shield: &mut Self::RawShield) { 85 | *shield = AcquiredEBR(ptr); 86 | } 87 | 88 | #[inline(always)] 89 | fn protect_snapshot( 90 | &self, 91 | link: &atomic::Atomic>, 92 | shield: &mut Self::RawShield, 93 | ) -> bool { 94 | let ptr = link.load(Ordering::Acquire); 95 | if !ptr.is_null() && unsafe { ptr.deref() }.ref_count() == 0 { 96 | shield.clear(); 97 | false 98 | } else { 99 | *shield = AcquiredEBR(ptr); 100 | true 101 | } 102 | } 103 | 104 | #[inline(always)] 105 | unsafe fn own_object(ptr: *mut Counted) -> Counted { 106 | *Box::from_raw(ptr) 107 | } 108 | 109 | #[inline(always)] 110 | unsafe fn retire(&self, ptr: *mut Counted, ret_type: RetireType) { 111 | debug_assert!(!ptr.is_null()); 112 | let cnt = &mut *ptr; 113 | if let Some(guard) = &self.guard { 114 | guard.defer_unchecked(move || { 115 | let inner_guard = Self::unprotected(); 116 | inner_guard.eject(cnt, ret_type); 117 | }); 118 | } else { 119 | self.eject(cnt, ret_type); 120 | } 121 | } 122 | 123 | #[inline] 124 | unsafe fn without_epoch() -> Self { 125 | Self { guard: None } 126 | } 127 | 128 | #[inline] 129 | unsafe fn unprotected() -> Self { 130 | Self { guard: None } 131 | } 132 | 133 | #[inline] 134 | fn clear(&mut self) { 135 | if let Some(guard) = &mut self.guard { 136 | guard.repin_after(|| {}); 137 | } 138 | } 139 | 140 | #[inline] 141 | fn eager_reclaim(&mut self) { 142 | if let Some(guard) = &mut self.guard { 143 | guard.repin_after(|| {}); 144 | guard.flush(); 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr_impl/collector.rs: -------------------------------------------------------------------------------- 1 | /// Epoch-based garbage collector. 2 | use core::fmt; 3 | use core::sync::atomic::Ordering; 4 | 5 | use super::guard::Guard; 6 | use super::internal::{Global, Local}; 7 | use super::Epoch; 8 | use std::sync::Arc; 9 | 10 | /// An epoch-based garbage collector. 11 | pub struct Collector { 12 | pub(crate) global: Arc, 13 | } 14 | 15 | unsafe impl Send for Collector {} 16 | unsafe impl Sync for Collector {} 17 | 18 | impl Default for Collector { 19 | fn default() -> Self { 20 | Self { 21 | global: Arc::new(Global::new()), 22 | } 23 | } 24 | } 25 | 26 | impl Collector { 27 | /// Creates a new collector. 28 | pub fn new() -> Self { 29 | Self::default() 30 | } 31 | 32 | /// Registers a new handle for the collector. 33 | pub fn register(&self) -> LocalHandle { 34 | Local::register(self) 35 | } 36 | 37 | /// Reads the global epoch, without issueing a fence. 38 | #[inline] 39 | pub fn global_epoch(&self) -> Epoch { 40 | self.global.epoch.load(Ordering::Relaxed) 41 | } 42 | 43 | /// Checks if the global queue is empty. 44 | pub fn is_global_queue_empty(&self) -> bool { 45 | self.global.is_global_queue_empty() 46 | } 47 | } 48 | 49 | impl Clone for Collector { 50 | /// Creates another reference to the same garbage collector. 51 | fn clone(&self) -> Self { 52 | Collector { 53 | global: self.global.clone(), 54 | } 55 | } 56 | } 57 | 58 | impl fmt::Debug for Collector { 59 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 60 | f.pad("Collector { .. }") 61 | } 62 | } 63 | 64 | impl PartialEq for Collector { 65 | /// Checks if both handles point to the same collector. 66 | fn eq(&self, rhs: &Collector) -> bool { 67 | Arc::ptr_eq(&self.global, &rhs.global) 68 | } 69 | } 70 | impl Eq for Collector {} 71 | 72 | /// A handle to a garbage collector. 73 | pub struct LocalHandle { 74 | pub(crate) local: *const Local, 75 | } 76 | 77 | impl LocalHandle { 78 | /// Pins the handle. 79 | #[inline] 80 | pub fn pin(&self) -> Guard { 81 | unsafe { (*self.local).pin() } 82 | } 83 | 84 | /// Returns `true` if the handle is pinned. 85 | #[inline] 86 | pub fn is_pinned(&self) -> bool { 87 | unsafe { (*self.local).is_pinned() } 88 | } 89 | 90 | /// Returns the `Collector` associated with this handle. 91 | #[inline] 92 | pub fn collector(&self) -> &Collector { 93 | unsafe { (*self.local).collector() } 94 | } 95 | } 96 | 97 | impl Drop for LocalHandle { 98 | #[inline] 99 | fn drop(&mut self) { 100 | unsafe { 101 | Local::release_handle(&*self.local); 102 | } 103 | } 104 | } 105 | 106 | impl fmt::Debug for LocalHandle { 107 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 108 | f.pad("LocalHandle { .. }") 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr_impl/default.rs: -------------------------------------------------------------------------------- 1 | //! The default garbage collector. 2 | //! 3 | //! For each thread, a participant is lazily initialized on its first use, when the current thread 4 | //! is registered in the default collector. If initialized, the thread's participant will get 5 | //! destructed on thread exit, which in turn unregisters the thread. 6 | 7 | use super::collector::{Collector, LocalHandle}; 8 | use super::guard::Guard; 9 | use super::sync::once_lock::OnceLock; 10 | 11 | fn collector() -> &'static Collector { 12 | /// The global data for the default garbage collector. 13 | static COLLECTOR: OnceLock = OnceLock::new(); 14 | COLLECTOR.get_or_init(Collector::new) 15 | } 16 | 17 | thread_local! { 18 | /// The per-thread participant for the default garbage collector. 19 | static HANDLE: LocalHandle = collector().register(); 20 | } 21 | 22 | /// Pins the current thread. 23 | #[inline] 24 | pub fn pin() -> Guard { 25 | with_handle(|handle| handle.pin()) 26 | } 27 | 28 | /// Returns `true` if the current thread is pinned. 29 | #[inline] 30 | pub fn is_pinned() -> bool { 31 | with_handle(|handle| handle.is_pinned()) 32 | } 33 | 34 | /// Returns the default global collector. 35 | pub fn default_collector() -> &'static Collector { 36 | collector() 37 | } 38 | 39 | #[inline] 40 | fn with_handle(mut f: F) -> R 41 | where 42 | F: FnMut(&LocalHandle) -> R, 43 | { 44 | HANDLE 45 | .try_with(|h| f(h)) 46 | .unwrap_or_else(|_| f(&collector().register())) 47 | } 48 | 49 | #[cfg(all(test, not(crossbeam_loom)))] 50 | mod tests { 51 | use crossbeam_utils::thread; 52 | 53 | #[test] 54 | fn pin_while_exiting() { 55 | struct Foo; 56 | 57 | impl Drop for Foo { 58 | fn drop(&mut self) { 59 | // Pin after `HANDLE` has been dropped. This must not panic. 60 | super::pin(); 61 | } 62 | } 63 | 64 | thread_local! { 65 | static FOO: Foo = Foo; 66 | } 67 | 68 | thread::scope(|scope| { 69 | scope.spawn(|_| { 70 | // Initialize `FOO` and then `HANDLE`. 71 | FOO.with(|_| ()); 72 | super::pin(); 73 | // At thread exit, `HANDLE` gets dropped first and `FOO` second. 74 | }); 75 | }) 76 | .unwrap(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr_impl/deferred.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | use core::mem::{self, MaybeUninit}; 4 | use core::ptr; 5 | 6 | /// Number of words a piece of `Data` can hold. 7 | /// 8 | /// Three words should be enough for the majority of cases. For example, you can fit inside it the 9 | /// function pointer together with a fat pointer representing an object that needs to be destroyed. 10 | const DATA_WORDS: usize = 3; 11 | 12 | /// Some space to keep a `FnOnce()` object on the stack. 13 | type Data = [usize; DATA_WORDS]; 14 | 15 | /// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. 16 | /// 17 | /// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. 18 | pub(crate) struct Deferred { 19 | call: unsafe fn(*mut u8), 20 | data: MaybeUninit, 21 | _marker: PhantomData<*mut ()>, // !Send + !Sync 22 | } 23 | 24 | impl fmt::Debug for Deferred { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 26 | f.pad("Deferred { .. }") 27 | } 28 | } 29 | 30 | impl Deferred { 31 | /// Constructs a new `Deferred` from a `FnOnce()`. 32 | pub(crate) fn new(f: F) -> Self { 33 | let size = mem::size_of::(); 34 | let align = mem::align_of::(); 35 | 36 | unsafe { 37 | if size <= mem::size_of::() && align <= mem::align_of::() { 38 | let mut data = MaybeUninit::::uninit(); 39 | ptr::write(data.as_mut_ptr().cast::(), f); 40 | 41 | unsafe fn call(raw: *mut u8) { 42 | let f: F = ptr::read(raw.cast::()); 43 | f(); 44 | } 45 | 46 | Deferred { 47 | call: call::, 48 | data, 49 | _marker: PhantomData, 50 | } 51 | } else { 52 | let b: Box = Box::new(f); 53 | let mut data = MaybeUninit::::uninit(); 54 | ptr::write(data.as_mut_ptr().cast::>(), b); 55 | 56 | unsafe fn call(raw: *mut u8) { 57 | // It's safe to cast `raw` from `*mut u8` to `*mut Box`, because `raw` is 58 | // originally derived from `*mut Box`. 59 | let b: Box = ptr::read(raw.cast::>()); 60 | (*b)(); 61 | } 62 | 63 | Deferred { 64 | call: call::, 65 | data, 66 | _marker: PhantomData, 67 | } 68 | } 69 | } 70 | } 71 | 72 | /// Calls the function. 73 | #[inline] 74 | pub(crate) fn call(mut self) { 75 | let call = self.call; 76 | unsafe { call(self.data.as_mut_ptr().cast::()) }; 77 | } 78 | } 79 | 80 | #[cfg(all(test, not(crossbeam_loom)))] 81 | mod tests { 82 | #![allow(clippy::drop_copy)] 83 | 84 | use super::Deferred; 85 | use core::hint::black_box; 86 | use std::cell::Cell; 87 | 88 | #[test] 89 | fn on_stack() { 90 | let fired = &Cell::new(false); 91 | let a = [0usize; 1]; 92 | 93 | let d = Deferred::new(move || { 94 | black_box(a); 95 | fired.set(true); 96 | }); 97 | 98 | assert!(!fired.get()); 99 | d.call(); 100 | assert!(fired.get()); 101 | } 102 | 103 | #[test] 104 | fn on_heap() { 105 | let fired = &Cell::new(false); 106 | let a = [0usize; 10]; 107 | 108 | let d = Deferred::new(move || { 109 | black_box(a); 110 | fired.set(true); 111 | }); 112 | 113 | assert!(!fired.get()); 114 | d.call(); 115 | assert!(fired.get()); 116 | } 117 | 118 | #[test] 119 | fn string() { 120 | let a = "hello".to_string(); 121 | let d = Deferred::new(move || assert_eq!(a, "hello")); 122 | d.call(); 123 | } 124 | 125 | #[test] 126 | fn boxed_slice_i32() { 127 | let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); 128 | let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); 129 | d.call(); 130 | } 131 | 132 | #[test] 133 | fn long_slice_usize() { 134 | let a: [usize; 5] = [2, 3, 5, 7, 11]; 135 | let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); 136 | d.call(); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr_impl/sync/mod.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives. 2 | 3 | pub(crate) mod list; 4 | pub(crate) mod once_lock; 5 | pub(crate) mod queue; 6 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/ebr_impl/sync/once_lock.rs: -------------------------------------------------------------------------------- 1 | // Based on unstable std::sync::OnceLock. 2 | // 3 | // Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs 4 | 5 | use core::cell::UnsafeCell; 6 | use core::mem::MaybeUninit; 7 | use core::sync::atomic::{AtomicBool, Ordering}; 8 | use std::sync::Once; 9 | 10 | pub(crate) struct OnceLock { 11 | once: Once, 12 | // Once::is_completed requires Rust 1.43, so use this to track of whether they have been initialized. 13 | is_initialized: AtomicBool, 14 | value: UnsafeCell>, 15 | // Unlike std::sync::OnceLock, we don't need PhantomData here because 16 | // we don't use #[may_dangle]. 17 | } 18 | 19 | unsafe impl Sync for OnceLock {} 20 | unsafe impl Send for OnceLock {} 21 | 22 | impl OnceLock { 23 | /// Creates a new empty cell. 24 | #[must_use] 25 | pub(crate) const fn new() -> Self { 26 | Self { 27 | once: Once::new(), 28 | is_initialized: AtomicBool::new(false), 29 | value: UnsafeCell::new(MaybeUninit::uninit()), 30 | } 31 | } 32 | 33 | /// Gets the contents of the cell, initializing it with `f` if the cell 34 | /// was empty. 35 | /// 36 | /// Many threads may call `get_or_init` concurrently with different 37 | /// initializing functions, but it is guaranteed that only one function 38 | /// will be executed. 39 | /// 40 | /// # Panics 41 | /// 42 | /// If `f` panics, the panic is propagated to the caller, and the cell 43 | /// remains uninitialized. 44 | /// 45 | /// It is an error to reentrantly initialize the cell from `f`. The 46 | /// exact outcome is unspecified. Current implementation deadlocks, but 47 | /// this may be changed to a panic in the future. 48 | pub(crate) fn get_or_init(&self, f: F) -> &T 49 | where 50 | F: FnOnce() -> T, 51 | { 52 | // Fast path check 53 | if self.is_initialized() { 54 | // SAFETY: The inner value has been initialized 55 | return unsafe { self.get_unchecked() }; 56 | } 57 | self.initialize(f); 58 | 59 | debug_assert!(self.is_initialized()); 60 | 61 | // SAFETY: The inner value has been initialized 62 | unsafe { self.get_unchecked() } 63 | } 64 | 65 | #[inline] 66 | fn is_initialized(&self) -> bool { 67 | self.is_initialized.load(Ordering::Acquire) 68 | } 69 | 70 | #[cold] 71 | fn initialize(&self, f: F) 72 | where 73 | F: FnOnce() -> T, 74 | { 75 | let slot = self.value.get().cast::(); 76 | let is_initialized = &self.is_initialized; 77 | 78 | self.once.call_once(|| { 79 | let value = f(); 80 | unsafe { 81 | slot.write(value); 82 | } 83 | is_initialized.store(true, Ordering::Release); 84 | }); 85 | } 86 | 87 | /// # Safety 88 | /// 89 | /// The value must be initialized 90 | unsafe fn get_unchecked(&self) -> &T { 91 | debug_assert!(self.is_initialized()); 92 | &*self.value.get().cast::() 93 | } 94 | } 95 | 96 | impl Drop for OnceLock { 97 | fn drop(&mut self) { 98 | if self.is_initialized() { 99 | // SAFETY: The inner value has been initialized 100 | unsafe { self.value.get().cast::().drop_in_place() }; 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/hp.rs: -------------------------------------------------------------------------------- 1 | use std::{mem::swap, ptr::null}; 2 | 3 | use atomic::Ordering; 4 | 5 | use crate::{Acquired, Counted, Cs, TaggedCnt}; 6 | 7 | use super::hp_impl::{HazardPointer, Thread, DEFAULT_THREAD}; 8 | 9 | pub struct AcquiredHP { 10 | hazptr: HazardPointer, 11 | ptr: TaggedCnt, 12 | } 13 | 14 | impl Acquired for AcquiredHP { 15 | #[inline] 16 | fn clear(&mut self) { 17 | self.hazptr.reset_protection(); 18 | self.ptr = TaggedCnt::null(); 19 | } 20 | 21 | #[inline] 22 | fn as_ptr(&self) -> TaggedCnt { 23 | self.ptr 24 | } 25 | 26 | #[inline] 27 | fn set_tag(&mut self, tag: usize) { 28 | self.ptr = self.ptr.with_tag(tag); 29 | } 30 | 31 | #[inline] 32 | fn null() -> Self { 33 | Self { 34 | hazptr: HazardPointer::default(), 35 | ptr: TaggedCnt::null(), 36 | } 37 | } 38 | 39 | #[inline] 40 | fn is_null(&self) -> bool { 41 | self.ptr.is_null() 42 | } 43 | 44 | #[inline] 45 | fn swap(p1: &mut Self, p2: &mut Self) { 46 | HazardPointer::swap(&mut p1.hazptr, &mut p2.hazptr); 47 | swap(&mut p1.ptr, &mut p2.ptr); 48 | } 49 | 50 | #[inline] 51 | fn eq(&self, other: &Self) -> bool { 52 | self.ptr == other.ptr 53 | } 54 | 55 | #[inline] 56 | unsafe fn copy_to(&self, other: &mut Self) { 57 | other.ptr = self.ptr; 58 | other.hazptr.protect_raw(other.ptr.as_raw()); 59 | membarrier::light_membarrier(); 60 | } 61 | } 62 | 63 | pub struct CsHP { 64 | thread: *const Thread, 65 | } 66 | 67 | impl Cs for CsHP { 68 | type RawShield = AcquiredHP; 69 | 70 | #[inline] 71 | fn new() -> Self { 72 | let thread = DEFAULT_THREAD.with(|t| (&**t) as *const Thread); 73 | Self { thread } 74 | } 75 | 76 | #[inline] 77 | unsafe fn without_epoch() -> Self { 78 | Self::new() 79 | } 80 | 81 | #[inline] 82 | unsafe fn unprotected() -> Self { 83 | Self { thread: null() } 84 | } 85 | 86 | #[inline] 87 | fn create_object(obj: T) -> *mut crate::Counted { 88 | let obj = Counted::new(obj); 89 | Box::into_raw(Box::new(obj)) 90 | } 91 | 92 | #[inline] 93 | fn reserve(&self, ptr: TaggedCnt, shield: &mut Self::RawShield) { 94 | shield.ptr = ptr; 95 | shield.hazptr.protect_raw(ptr.as_raw()); 96 | membarrier::light_membarrier(); 97 | } 98 | 99 | #[inline] 100 | fn protect_snapshot( 101 | &self, 102 | link: &atomic::Atomic>, 103 | shield: &mut Self::RawShield, 104 | ) -> bool { 105 | let mut ptr = link.load(Ordering::Relaxed); 106 | loop { 107 | shield.ptr = ptr; 108 | shield.hazptr.protect_raw(ptr.as_raw()); 109 | membarrier::light_membarrier(); 110 | 111 | let new_ptr = link.load(Ordering::Acquire); 112 | if new_ptr == ptr { 113 | break; 114 | } 115 | ptr = new_ptr; 116 | } 117 | 118 | if !ptr.is_null() && unsafe { ptr.deref() }.ref_count() == 0 { 119 | shield.clear(); 120 | false 121 | } else { 122 | true 123 | } 124 | } 125 | 126 | #[inline] 127 | unsafe fn own_object(ptr: *mut Counted) -> Counted { 128 | *Box::from_raw(ptr) 129 | } 130 | 131 | #[inline] 132 | unsafe fn retire(&self, ptr: *mut Counted, ret_type: crate::RetireType) { 133 | debug_assert!(!ptr.is_null()); 134 | let cnt = &mut *ptr; 135 | if let Some(thread) = self.thread.as_ref() { 136 | thread.defer(ptr, move || { 137 | let inner_guard = Self::new(); 138 | inner_guard.eject(cnt, ret_type); 139 | }); 140 | } else { 141 | self.eject(cnt, ret_type); 142 | } 143 | } 144 | 145 | #[inline] 146 | fn clear(&mut self) { 147 | // No-op for HP. 148 | } 149 | 150 | #[inline] 151 | fn eager_reclaim(&mut self) { 152 | if let Some(thread) = unsafe { self.thread.as_ref() } { 153 | thread.eager_reclaim(); 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/hp_impl/domain.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | use crossbeam_utils::CachePadded; 4 | use rustc_hash::FxHashSet; 5 | 6 | use super::hazard::ThreadRecords; 7 | use super::retire::RetiredList; 8 | use super::thread::Thread; 9 | 10 | #[derive(Debug)] 11 | pub struct Domain { 12 | pub(crate) threads: CachePadded, 13 | pub(crate) retireds: CachePadded, 14 | pub(crate) num_garbages: CachePadded, 15 | } 16 | 17 | impl Domain { 18 | pub const fn new() -> Self { 19 | Self { 20 | threads: CachePadded::new(ThreadRecords::new()), 21 | retireds: CachePadded::new(RetiredList::new()), 22 | num_garbages: CachePadded::new(AtomicUsize::new(0)), 23 | } 24 | } 25 | 26 | pub fn collect_guarded_ptrs(&self, reclaimer: &Thread) -> FxHashSet<*mut u8> { 27 | self.threads 28 | .iter() 29 | .flat_map(|thread| thread.iter(reclaimer)) 30 | .collect() 31 | } 32 | 33 | pub fn num_garbages(&self) -> usize { 34 | self.num_garbages.load(Ordering::Acquire) 35 | } 36 | } 37 | 38 | impl Drop for Domain { 39 | fn drop(&mut self) { 40 | for t in self.threads.iter() { 41 | assert!(t.available.load(Ordering::Relaxed)) 42 | } 43 | while !self.retireds.is_empty() { 44 | let mut retireds = self.retireds.pop_all(); 45 | for r in retireds.drain(..) { 46 | unsafe { r.call() }; 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/hp_impl/mod.rs: -------------------------------------------------------------------------------- 1 | mod domain; 2 | mod hazard; 3 | mod retire; 4 | mod thread; 5 | 6 | pub use hazard::HazardPointer; 7 | pub use thread::set_counts_between_flush; 8 | 9 | use std::thread_local; 10 | 11 | use domain::Domain; 12 | pub use thread::Thread; 13 | 14 | pub static DEFAULT_DOMAIN: Domain = Domain::new(); 15 | 16 | thread_local! { 17 | pub static DEFAULT_THREAD: Box = Box::new(Thread::new(&DEFAULT_DOMAIN)); 18 | } 19 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/hp_impl/retire.rs: -------------------------------------------------------------------------------- 1 | use core::ptr; 2 | use core::sync::atomic::{AtomicPtr, Ordering}; 3 | use std::mem::{self, MaybeUninit}; 4 | 5 | const DATA_WORDS: usize = 3; 6 | type DeferredData = [usize; DATA_WORDS]; 7 | 8 | #[derive(Debug, Clone, Copy)] 9 | pub(crate) struct Retired { 10 | pub(crate) ptr: *mut u8, 11 | data: MaybeUninit, 12 | call: unsafe fn(*mut u8), 13 | } 14 | 15 | // TODO: require in retire 16 | unsafe impl Send for Retired {} 17 | 18 | impl Retired { 19 | pub(crate) fn new(ptr: *mut u8, f: F) -> Self { 20 | let size = mem::size_of::(); 21 | let align = mem::align_of::(); 22 | 23 | unsafe { 24 | if size <= mem::size_of::() && align <= mem::align_of::() { 25 | let mut data = MaybeUninit::::uninit(); 26 | ptr::write(data.as_mut_ptr().cast::(), f); 27 | 28 | unsafe fn call(raw: *mut u8) { 29 | let f: F = ptr::read(raw.cast::()); 30 | f(); 31 | } 32 | 33 | Self { 34 | ptr, 35 | data, 36 | call: call::, 37 | } 38 | } else { 39 | let b: Box = Box::new(f); 40 | let mut data = MaybeUninit::::uninit(); 41 | ptr::write(data.as_mut_ptr().cast::>(), b); 42 | 43 | unsafe fn call(raw: *mut u8) { 44 | // It's safe to cast `raw` from `*mut u8` to `*mut Box`, because `raw` is 45 | // originally derived from `*mut Box`. 46 | let b: Box = ptr::read(raw.cast::>()); 47 | (*b)(); 48 | } 49 | 50 | Self { 51 | ptr, 52 | data, 53 | call: call::, 54 | } 55 | } 56 | } 57 | } 58 | 59 | pub(crate) unsafe fn call(mut self) { 60 | let call = self.call; 61 | unsafe { call(self.data.as_mut_ptr().cast::()) }; 62 | } 63 | } 64 | 65 | #[derive(Debug)] 66 | pub(crate) struct RetiredList { 67 | head: AtomicPtr, 68 | } 69 | 70 | #[derive(Debug)] 71 | struct RetiredListNode { 72 | retireds: Vec, 73 | next: *const RetiredListNode, 74 | } 75 | 76 | impl RetiredList { 77 | pub(crate) const fn new() -> Self { 78 | Self { 79 | head: AtomicPtr::new(core::ptr::null_mut()), 80 | } 81 | } 82 | 83 | pub(crate) fn is_empty(&self) -> bool { 84 | self.head.load(Ordering::Acquire).is_null() 85 | } 86 | 87 | pub(crate) fn push(&self, retireds: Vec) { 88 | let new = Box::leak(Box::new(RetiredListNode { 89 | retireds, 90 | next: ptr::null_mut(), 91 | })); 92 | 93 | let mut head = self.head.load(Ordering::Relaxed); 94 | loop { 95 | new.next = head; 96 | match self 97 | .head 98 | .compare_exchange(head, new, Ordering::Release, Ordering::Relaxed) 99 | { 100 | Ok(_) => return, 101 | Err(head_new) => head = head_new, 102 | } 103 | } 104 | } 105 | 106 | pub(crate) fn pop_all(&self) -> Vec { 107 | let mut cur = self.head.swap(core::ptr::null_mut(), Ordering::Acquire); 108 | let mut retireds = Vec::new(); 109 | while !cur.is_null() { 110 | let mut cur_box = unsafe { Box::from_raw(cur) }; 111 | retireds.append(&mut cur_box.retireds); 112 | cur = cur_box.next.cast_mut(); 113 | } 114 | retireds 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr/mod.rs: -------------------------------------------------------------------------------- 1 | mod ebr; 2 | pub mod ebr_impl; 3 | mod hp; 4 | pub mod hp_impl; 5 | 6 | pub use ebr::CsEBR; 7 | pub use hp::CsHP; 8 | -------------------------------------------------------------------------------- /smrs/cdrc/src/internal/smr_common.rs: -------------------------------------------------------------------------------- 1 | use atomic::Atomic; 2 | 3 | use crate::internal::utils::Counted; 4 | use crate::internal::utils::EjectAction; 5 | use crate::internal::utils::TaggedCnt; 6 | 7 | pub enum RetireType { 8 | DecrementStrongCount, 9 | DecrementWeakCount, 10 | Dispose, 11 | } 12 | 13 | /// A SMR-specific acquired pointer trait. 14 | /// 15 | /// In most cases such as EBR, IBR and Hyaline, Acquired is equivalent to a simple tagged 16 | /// pointer pointing a `Counted`. 17 | /// 18 | /// However, for some pointer-based SMR, `Acquired` should contain other information like an 19 | /// index of a hazard slot. For this reason, a type for acquired pointer must be SMR-dependent, 20 | /// and every SMR must provide some reasonable interfaces to access and manage this pointer. 21 | pub trait Acquired { 22 | fn clear(&mut self); 23 | fn as_ptr(&self) -> TaggedCnt; 24 | fn set_tag(&mut self, tag: usize); 25 | fn null() -> Self; 26 | fn is_null(&self) -> bool; 27 | fn swap(p1: &mut Self, p2: &mut Self); 28 | fn eq(&self, other: &Self) -> bool; 29 | unsafe fn copy_to(&self, other: &mut Self); 30 | } 31 | 32 | /// A SMR-specific critical section manager trait. 33 | /// 34 | /// We construct this `Cs` right before starting an operation, 35 | /// and drop(or `clear`) it after the operation. 36 | pub trait Cs { 37 | /// A SMR-specific acquired pointer trait 38 | /// 39 | /// For more information, read a comment on `Acquired`. 40 | type RawShield: Acquired; 41 | 42 | fn new() -> Self; 43 | unsafe fn without_epoch() -> Self; 44 | unsafe fn unprotected() -> Self; 45 | fn create_object(obj: T) -> *mut Counted; 46 | /// Creates a shield for the given pointer, assuming that `ptr` is already protected by a 47 | /// reference count. 48 | fn reserve(&self, ptr: TaggedCnt, shield: &mut Self::RawShield); 49 | fn protect_snapshot( 50 | &self, 51 | link: &Atomic>, 52 | shield: &mut Self::RawShield, 53 | ) -> bool; 54 | unsafe fn own_object(ptr: *mut Counted) -> Counted; 55 | unsafe fn retire(&self, ptr: *mut Counted, ret_type: RetireType); 56 | fn clear(&mut self); 57 | fn eager_reclaim(&mut self); 58 | 59 | #[inline] 60 | unsafe fn dispose(&self, cnt: &mut Counted) { 61 | debug_assert!(cnt.ref_count() == 0); 62 | cnt.dispose(); 63 | if cnt.release_weak() { 64 | self.destroy(cnt); 65 | } 66 | } 67 | 68 | #[inline] 69 | unsafe fn destroy(&self, cnt: &mut Counted) { 70 | debug_assert!(cnt.ref_count() == 0); 71 | drop(Self::own_object(cnt)); 72 | } 73 | 74 | /// Perform an eject action. This can correspond to any action that 75 | /// should be delayed until the ptr is no longer protected 76 | #[inline] 77 | unsafe fn eject(&self, cnt: &mut Counted, ret_type: RetireType) { 78 | match ret_type { 79 | RetireType::DecrementStrongCount => self.decrement_ref_cnt(cnt), 80 | RetireType::DecrementWeakCount => self.decrement_weak_cnt(cnt), 81 | RetireType::Dispose => self.dispose(cnt), 82 | } 83 | } 84 | 85 | #[inline] 86 | unsafe fn increment_ref_cnt(&self, cnt: &Counted) -> bool { 87 | cnt.add_ref() 88 | } 89 | 90 | #[inline] 91 | unsafe fn increment_weak_cnt(&self, cnt: &Counted) -> bool { 92 | cnt.add_weak() 93 | } 94 | 95 | #[inline] 96 | unsafe fn decrement_ref_cnt(&self, cnt: &mut Counted) { 97 | debug_assert!(cnt.ref_count() >= 1); 98 | let result = cnt.release_ref(); 99 | 100 | match result { 101 | EjectAction::Nothing => {} 102 | EjectAction::Delay => self.retire(cnt, RetireType::Dispose), 103 | EjectAction::Destroy => self.destroy(cnt), 104 | } 105 | } 106 | 107 | #[inline] 108 | unsafe fn decrement_weak_cnt(&self, cnt: &mut Counted) { 109 | debug_assert!(cnt.weak_count() >= 1); 110 | if cnt.release_weak() { 111 | self.destroy(cnt); 112 | } 113 | } 114 | 115 | #[inline] 116 | unsafe fn delayed_decrement_ref_cnt(&self, cnt: &mut Counted) { 117 | debug_assert!(cnt.ref_count() >= 1); 118 | self.retire(cnt, RetireType::DecrementStrongCount); 119 | } 120 | 121 | #[inline] 122 | unsafe fn delayed_decrement_weak_cnt(&self, cnt: &mut Counted) { 123 | debug_assert!(cnt.weak_count() >= 1); 124 | self.retire(cnt, RetireType::DecrementWeakCount); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /smrs/cdrc/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod internal; 2 | mod strongs; 3 | mod weaks; 4 | 5 | pub use internal::*; 6 | pub use strongs::*; 7 | pub use weaks::*; 8 | 9 | #[inline] 10 | pub fn set_counts_between_flush_ebr(counts: usize) { 11 | internal::ebr_impl::set_bag_capacity(counts); 12 | } 13 | 14 | #[inline] 15 | pub fn set_counts_between_flush_hp(counts: usize) { 16 | internal::hp_impl::set_counts_between_flush(counts); 17 | } 18 | -------------------------------------------------------------------------------- /smrs/circ/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "circ" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | crossbeam-utils = "0.8" 10 | membarrier = { git = "https://github.com/jeehoonkang/membarrier-rs.git", branch = "smr-benchmark" } 11 | scopeguard = "1.1.0" 12 | static_assertions = "1.1.0" 13 | atomic = "0.5" 14 | cfg-if = "1.0" 15 | rustc-hash = "1.1.0" 16 | memoffset = "0.7" 17 | 18 | [dev-dependencies] 19 | rand = "0.8" 20 | bitflags = "2.4.0" 21 | -------------------------------------------------------------------------------- /smrs/circ/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(cfg_sanitize)] 2 | mod smr; 3 | mod smr_common; 4 | mod strong; 5 | mod utils; 6 | mod weak; 7 | 8 | pub use smr::*; 9 | pub use smr_common::*; 10 | pub use strong::*; 11 | pub use utils::*; 12 | pub use weak::*; 13 | 14 | #[inline] 15 | pub fn set_counts_between_flush_ebr(counts: usize) { 16 | smr::ebr_impl::set_bag_capacity(counts); 17 | } 18 | 19 | #[inline] 20 | pub fn set_counts_between_flush_hp(counts: usize) { 21 | smr::hp_impl::set_counts_between_flush(counts); 22 | } 23 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/ebr_impl/default.rs: -------------------------------------------------------------------------------- 1 | //! The default garbage collector. 2 | //! 3 | //! For each thread, a participant is lazily initialized on its first use, when the current thread 4 | //! is registered in the default collector. If initialized, the thread's participant will get 5 | //! destructed on thread exit, which in turn unregisters the thread. 6 | 7 | use super::collector::{Collector, LocalHandle}; 8 | use super::guard::Guard; 9 | use super::sync::once_lock::OnceLock; 10 | 11 | fn collector() -> &'static Collector { 12 | /// The global data for the default garbage collector. 13 | static COLLECTOR: OnceLock = OnceLock::new(); 14 | COLLECTOR.get_or_init(Collector::new) 15 | } 16 | 17 | thread_local! { 18 | /// The per-thread participant for the default garbage collector. 19 | static HANDLE: LocalHandle = collector().register(); 20 | } 21 | 22 | /// Pins the current thread. 23 | #[inline] 24 | pub fn pin() -> Guard { 25 | with_handle(|handle| handle.pin()) 26 | } 27 | 28 | /// Returns `true` if the current thread is pinned. 29 | #[inline] 30 | pub fn is_pinned() -> bool { 31 | with_handle(|handle| handle.is_pinned()) 32 | } 33 | 34 | /// Returns the default global collector. 35 | pub fn default_collector() -> &'static Collector { 36 | collector() 37 | } 38 | 39 | #[inline] 40 | fn with_handle(mut f: F) -> R 41 | where 42 | F: FnMut(&LocalHandle) -> R, 43 | { 44 | HANDLE 45 | .try_with(|h| f(h)) 46 | .unwrap_or_else(|_| f(&collector().register())) 47 | } 48 | 49 | #[cfg(all(test, not(crossbeam_loom)))] 50 | mod tests { 51 | use crossbeam_utils::thread; 52 | 53 | #[test] 54 | fn pin_while_exiting() { 55 | struct Foo; 56 | 57 | impl Drop for Foo { 58 | fn drop(&mut self) { 59 | // Pin after `HANDLE` has been dropped. This must not panic. 60 | super::pin(); 61 | } 62 | } 63 | 64 | thread_local! { 65 | static FOO: Foo = Foo; 66 | } 67 | 68 | thread::scope(|scope| { 69 | scope.spawn(|_| { 70 | // Initialize `FOO` and then `HANDLE`. 71 | FOO.with(|_| ()); 72 | super::pin(); 73 | // At thread exit, `HANDLE` gets dropped first and `FOO` second. 74 | }); 75 | }) 76 | .unwrap(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/ebr_impl/deferred.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | use core::mem::{self, MaybeUninit}; 4 | use core::ptr; 5 | 6 | /// Number of words a piece of `Data` can hold. 7 | /// 8 | /// Three words should be enough for the majority of cases. For example, you can fit inside it the 9 | /// function pointer together with a fat pointer representing an object that needs to be destroyed. 10 | const DATA_WORDS: usize = 3; 11 | 12 | /// Some space to keep a `FnOnce()` object on the stack. 13 | type Data = [usize; DATA_WORDS]; 14 | 15 | /// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. 16 | /// 17 | /// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. 18 | pub(crate) struct Deferred { 19 | call: unsafe fn(*mut u8), 20 | data: MaybeUninit, 21 | _marker: PhantomData<*mut ()>, // !Send + !Sync 22 | } 23 | 24 | impl fmt::Debug for Deferred { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 26 | f.pad("Deferred { .. }") 27 | } 28 | } 29 | 30 | impl Deferred { 31 | /// Constructs a new `Deferred` from a `FnOnce()`. 32 | pub(crate) fn new(f: F) -> Self { 33 | let size = mem::size_of::(); 34 | let align = mem::align_of::(); 35 | 36 | unsafe { 37 | if size <= mem::size_of::() && align <= mem::align_of::() { 38 | let mut data = MaybeUninit::::uninit(); 39 | ptr::write(data.as_mut_ptr().cast::(), f); 40 | 41 | unsafe fn call(raw: *mut u8) { 42 | let f: F = ptr::read(raw.cast::()); 43 | f(); 44 | } 45 | 46 | Deferred { 47 | call: call::, 48 | data, 49 | _marker: PhantomData, 50 | } 51 | } else { 52 | let b: Box = Box::new(f); 53 | let mut data = MaybeUninit::::uninit(); 54 | ptr::write(data.as_mut_ptr().cast::>(), b); 55 | 56 | unsafe fn call(raw: *mut u8) { 57 | // It's safe to cast `raw` from `*mut u8` to `*mut Box`, because `raw` is 58 | // originally derived from `*mut Box`. 59 | let b: Box = ptr::read(raw.cast::>()); 60 | (*b)(); 61 | } 62 | 63 | Deferred { 64 | call: call::, 65 | data, 66 | _marker: PhantomData, 67 | } 68 | } 69 | } 70 | } 71 | 72 | /// Calls the function. 73 | #[inline] 74 | pub(crate) fn call(mut self) { 75 | let call = self.call; 76 | unsafe { call(self.data.as_mut_ptr().cast::()) }; 77 | } 78 | } 79 | 80 | #[cfg(all(test, not(crossbeam_loom)))] 81 | mod tests { 82 | #![allow(clippy::drop_copy)] 83 | 84 | use super::Deferred; 85 | use core::hint::black_box; 86 | use std::cell::Cell; 87 | 88 | #[test] 89 | fn on_stack() { 90 | let fired = &Cell::new(false); 91 | let a = [0usize; 1]; 92 | 93 | let d = Deferred::new(move || { 94 | black_box(a); 95 | fired.set(true); 96 | }); 97 | 98 | assert!(!fired.get()); 99 | d.call(); 100 | assert!(fired.get()); 101 | } 102 | 103 | #[test] 104 | fn on_heap() { 105 | let fired = &Cell::new(false); 106 | let a = [0usize; 10]; 107 | 108 | let d = Deferred::new(move || { 109 | black_box(a); 110 | fired.set(true); 111 | }); 112 | 113 | assert!(!fired.get()); 114 | d.call(); 115 | assert!(fired.get()); 116 | } 117 | 118 | #[test] 119 | fn string() { 120 | let a = "hello".to_string(); 121 | let d = Deferred::new(move || assert_eq!(a, "hello")); 122 | d.call(); 123 | } 124 | 125 | #[test] 126 | fn boxed_slice_i32() { 127 | let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); 128 | let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); 129 | d.call(); 130 | } 131 | 132 | #[test] 133 | fn long_slice_usize() { 134 | let a: [usize; 5] = [2, 3, 5, 7, 11]; 135 | let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); 136 | d.call(); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/ebr_impl/sync/mod.rs: -------------------------------------------------------------------------------- 1 | //! Synchronization primitives. 2 | 3 | pub(crate) mod list; 4 | pub(crate) mod once_lock; 5 | pub(crate) mod queue; 6 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/ebr_impl/sync/once_lock.rs: -------------------------------------------------------------------------------- 1 | // Based on unstable std::sync::OnceLock. 2 | // 3 | // Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs 4 | 5 | use core::cell::UnsafeCell; 6 | use core::mem::MaybeUninit; 7 | use core::sync::atomic::{AtomicBool, Ordering}; 8 | use std::sync::Once; 9 | 10 | pub(crate) struct OnceLock { 11 | once: Once, 12 | // Once::is_completed requires Rust 1.43, so use this to track of whether they have been initialized. 13 | is_initialized: AtomicBool, 14 | value: UnsafeCell>, 15 | // Unlike std::sync::OnceLock, we don't need PhantomData here because 16 | // we don't use #[may_dangle]. 17 | } 18 | 19 | unsafe impl Sync for OnceLock {} 20 | unsafe impl Send for OnceLock {} 21 | 22 | impl OnceLock { 23 | /// Creates a new empty cell. 24 | #[must_use] 25 | pub(crate) const fn new() -> Self { 26 | Self { 27 | once: Once::new(), 28 | is_initialized: AtomicBool::new(false), 29 | value: UnsafeCell::new(MaybeUninit::uninit()), 30 | } 31 | } 32 | 33 | /// Gets the contents of the cell, initializing it with `f` if the cell 34 | /// was empty. 35 | /// 36 | /// Many threads may call `get_or_init` concurrently with different 37 | /// initializing functions, but it is guaranteed that only one function 38 | /// will be executed. 39 | /// 40 | /// # Panics 41 | /// 42 | /// If `f` panics, the panic is propagated to the caller, and the cell 43 | /// remains uninitialized. 44 | /// 45 | /// It is an error to reentrantly initialize the cell from `f`. The 46 | /// exact outcome is unspecified. Current implementation deadlocks, but 47 | /// this may be changed to a panic in the future. 48 | pub(crate) fn get_or_init(&self, f: F) -> &T 49 | where 50 | F: FnOnce() -> T, 51 | { 52 | // Fast path check 53 | if self.is_initialized() { 54 | // SAFETY: The inner value has been initialized 55 | return unsafe { self.get_unchecked() }; 56 | } 57 | self.initialize(f); 58 | 59 | debug_assert!(self.is_initialized()); 60 | 61 | // SAFETY: The inner value has been initialized 62 | unsafe { self.get_unchecked() } 63 | } 64 | 65 | #[inline] 66 | fn is_initialized(&self) -> bool { 67 | self.is_initialized.load(Ordering::Acquire) 68 | } 69 | 70 | #[cold] 71 | fn initialize(&self, f: F) 72 | where 73 | F: FnOnce() -> T, 74 | { 75 | let slot = self.value.get().cast::(); 76 | let is_initialized = &self.is_initialized; 77 | 78 | self.once.call_once(|| { 79 | let value = f(); 80 | unsafe { 81 | slot.write(value); 82 | } 83 | is_initialized.store(true, Ordering::Release); 84 | }); 85 | } 86 | 87 | /// # Safety 88 | /// 89 | /// The value must be initialized 90 | unsafe fn get_unchecked(&self) -> &T { 91 | debug_assert!(self.is_initialized()); 92 | &*self.value.get().cast::() 93 | } 94 | } 95 | 96 | impl Drop for OnceLock { 97 | fn drop(&mut self) { 98 | if self.is_initialized() { 99 | // SAFETY: The inner value has been initialized 100 | unsafe { self.value.get().cast::().drop_in_place() }; 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/hp_impl/domain.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | use crossbeam_utils::CachePadded; 4 | use rustc_hash::FxHashSet; 5 | 6 | use super::hazard::ThreadRecords; 7 | use super::retire::{Pile, Retired}; 8 | use super::thread::Thread; 9 | 10 | #[derive(Debug)] 11 | pub struct Domain { 12 | pub(crate) threads: CachePadded, 13 | pub(crate) retireds: CachePadded>>, 14 | pub(crate) num_garbages: CachePadded, 15 | } 16 | 17 | impl Domain { 18 | pub const fn new() -> Self { 19 | Self { 20 | threads: CachePadded::new(ThreadRecords::new()), 21 | retireds: CachePadded::new(Pile::new()), 22 | num_garbages: CachePadded::new(AtomicUsize::new(0)), 23 | } 24 | } 25 | 26 | pub fn collect_guarded_ptrs(&self, reclaimer: &Thread) -> FxHashSet<*mut u8> { 27 | self.threads 28 | .iter() 29 | .flat_map(|thread| thread.iter(reclaimer)) 30 | .collect() 31 | } 32 | 33 | pub fn num_garbages(&self) -> usize { 34 | self.num_garbages.load(Ordering::Acquire) 35 | } 36 | } 37 | 38 | impl Drop for Domain { 39 | fn drop(&mut self) { 40 | for t in self.threads.iter() { 41 | assert!(t.available.load(Ordering::Relaxed)) 42 | } 43 | while !self.retireds.is_empty() { 44 | let mut retireds = self.retireds.pop_all_flatten(); 45 | for r in retireds.drain(..) { 46 | unsafe { r.call() }; 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/hp_impl/mod.rs: -------------------------------------------------------------------------------- 1 | mod domain; 2 | mod hazard; 3 | mod retire; 4 | mod thread; 5 | 6 | pub use hazard::HazardPointer; 7 | pub use thread::set_counts_between_flush; 8 | 9 | use std::thread_local; 10 | 11 | use domain::Domain; 12 | pub use thread::Thread; 13 | 14 | pub static DEFAULT_DOMAIN: Domain = Domain::new(); 15 | 16 | thread_local! { 17 | pub static DEFAULT_THREAD: Box = Box::new(Thread::new(&DEFAULT_DOMAIN)); 18 | } 19 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/hp_impl/retire.rs: -------------------------------------------------------------------------------- 1 | use core::ptr; 2 | use core::sync::atomic::{AtomicPtr, Ordering}; 3 | use std::mem::{self, MaybeUninit}; 4 | 5 | const DATA_WORDS: usize = 3; 6 | type DeferredData = [usize; DATA_WORDS]; 7 | 8 | #[derive(Debug, Clone, Copy)] 9 | pub(crate) struct Retired { 10 | pub(crate) ptr: *mut u8, 11 | data: MaybeUninit, 12 | call: unsafe fn(*mut u8), 13 | } 14 | 15 | // TODO: require in retire 16 | unsafe impl Send for Retired {} 17 | 18 | impl Retired { 19 | pub(crate) fn new(ptr: *mut u8, f: F) -> Self { 20 | let size = mem::size_of::(); 21 | let align = mem::align_of::(); 22 | 23 | unsafe { 24 | if size <= mem::size_of::() && align <= mem::align_of::() { 25 | let mut data = MaybeUninit::::uninit(); 26 | ptr::write(data.as_mut_ptr().cast::(), f); 27 | 28 | unsafe fn call(raw: *mut u8) { 29 | let f: F = ptr::read(raw.cast::()); 30 | f(); 31 | } 32 | 33 | Self { 34 | ptr, 35 | data, 36 | call: call::, 37 | } 38 | } else { 39 | let b: Box = Box::new(f); 40 | let mut data = MaybeUninit::::uninit(); 41 | ptr::write(data.as_mut_ptr().cast::>(), b); 42 | 43 | unsafe fn call(raw: *mut u8) { 44 | // It's safe to cast `raw` from `*mut u8` to `*mut Box`, because `raw` is 45 | // originally derived from `*mut Box`. 46 | let b: Box = ptr::read(raw.cast::>()); 47 | (*b)(); 48 | } 49 | 50 | Self { 51 | ptr, 52 | data, 53 | call: call::, 54 | } 55 | } 56 | } 57 | } 58 | 59 | pub(crate) unsafe fn call(mut self) { 60 | let call = self.call; 61 | unsafe { call(self.data.as_mut_ptr().cast::()) }; 62 | } 63 | } 64 | 65 | #[derive(Debug)] 66 | pub(crate) struct Pile { 67 | head: AtomicPtr>, 68 | } 69 | 70 | #[derive(Debug)] 71 | struct PileNode { 72 | item: T, 73 | next: *const Self, 74 | } 75 | 76 | impl Pile { 77 | pub(crate) const fn new() -> Self { 78 | Self { 79 | head: AtomicPtr::new(core::ptr::null_mut()), 80 | } 81 | } 82 | 83 | pub(crate) fn is_empty(&self) -> bool { 84 | self.head.load(Ordering::Acquire).is_null() 85 | } 86 | 87 | pub(crate) fn push(&self, item: T) { 88 | let new = Box::leak(Box::new(PileNode { 89 | item, 90 | next: ptr::null_mut(), 91 | })); 92 | 93 | let mut head = self.head.load(Ordering::Relaxed); 94 | loop { 95 | new.next = head; 96 | match self 97 | .head 98 | .compare_exchange(head, new, Ordering::Release, Ordering::Relaxed) 99 | { 100 | Ok(_) => return, 101 | Err(head_new) => head = head_new, 102 | } 103 | } 104 | } 105 | 106 | pub(crate) fn pop_all(&self) -> Vec { 107 | let mut cur = self.head.swap(core::ptr::null_mut(), Ordering::Acquire); 108 | let mut popped = Vec::new(); 109 | while !cur.is_null() { 110 | let cur_box = unsafe { Box::from_raw(cur) }; 111 | popped.push(cur_box.item); 112 | cur = cur_box.next.cast_mut(); 113 | } 114 | popped 115 | } 116 | } 117 | 118 | impl Pile> { 119 | pub(crate) fn pop_all_flatten(&self) -> Vec { 120 | let mut cur = self.head.swap(core::ptr::null_mut(), Ordering::Acquire); 121 | let mut popped = Vec::new(); 122 | while !cur.is_null() { 123 | let mut cur_box = unsafe { Box::from_raw(cur) }; 124 | popped.append(&mut cur_box.item); 125 | cur = cur_box.next.cast_mut(); 126 | } 127 | popped 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /smrs/circ/src/smr/mod.rs: -------------------------------------------------------------------------------- 1 | mod ebr; 2 | pub mod ebr_impl; 3 | mod hp; 4 | pub mod hp_impl; 5 | 6 | pub use ebr::CsEBR; 7 | pub use hp::CsHP; 8 | 9 | pub use ebr::*; 10 | -------------------------------------------------------------------------------- /smrs/circ/src/smr_common.rs: -------------------------------------------------------------------------------- 1 | use atomic::Ordering; 2 | 3 | use crate::{GraphNode, RcInner, TaggedCnt}; 4 | 5 | /// A SMR-specific acquired pointer trait. 6 | /// 7 | /// In most cases such as EBR, IBR and Hyaline, Acquired is equivalent to a simple tagged 8 | /// pointer pointing a `Counted`. 9 | /// 10 | /// However, for some pointer-based SMR, `Acquired` should contain other information like an 11 | /// index of a hazard slot. For this reason, a type for acquired pointer must be SMR-dependent, 12 | /// and every SMR must provide some reasonable interfaces to access and manage this pointer. 13 | pub trait Acquired { 14 | fn clear(&mut self); 15 | fn as_ptr(&self) -> TaggedCnt; 16 | fn set_tag(&mut self, tag: usize); 17 | fn null() -> Self; 18 | fn is_null(&self) -> bool; 19 | fn swap(p1: &mut Self, p2: &mut Self); 20 | fn eq(&self, other: &Self) -> bool; 21 | unsafe fn copy_to(&self, other: &mut Self); 22 | } 23 | 24 | pub trait Validatable { 25 | fn validate(&self) -> bool; 26 | fn ptr(&self) -> TaggedCnt; 27 | } 28 | 29 | /// A SMR-specific critical section manager trait. 30 | /// 31 | /// We construct this `Cs` right before starting an operation, 32 | /// and drop(or `clear`) it after the operation. 33 | pub trait Cs { 34 | /// A SMR-specific acquired pointer trait 35 | /// 36 | /// For more information, read a comment on `Acquired`. 37 | type RawShield: Acquired; 38 | type WeakGuard: Validatable; 39 | 40 | fn new() -> Self; 41 | unsafe fn unprotected() -> Self; 42 | fn create_object(obj: T, init_strong: u32) -> *mut RcInner; 43 | unsafe fn own_object(ptr: *mut RcInner) -> RcInner; 44 | /// Creates a shield for the given pointer, assuming that `ptr` is already protected by a 45 | /// reference count. 46 | fn reserve(&self, ptr: TaggedCnt, shield: &mut Self::RawShield); 47 | fn acquire(&self, load: F, shield: &mut Self::RawShield) -> TaggedCnt 48 | where 49 | F: Fn(Ordering) -> TaggedCnt; 50 | fn clear(&mut self); 51 | 52 | fn timestamp() -> Option; 53 | fn increment_strong(inner: &RcInner) -> bool; 54 | unsafe fn decrement_strong>( 55 | inner: &mut RcInner, 56 | count: u32, 57 | cs: Option<&Self>, 58 | ); 59 | unsafe fn try_destruct>(inner: &mut RcInner); 60 | unsafe fn try_dealloc(inner: &mut RcInner); 61 | fn increment_weak(inner: &RcInner, count: u32); 62 | unsafe fn decrement_weak(inner: &mut RcInner, cs: Option<&Self>); 63 | fn non_zero(inner: &RcInner) -> bool; 64 | fn strong_count(inner: &RcInner) -> u32; 65 | } 66 | -------------------------------------------------------------------------------- /smrs/circ/src/utils.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | use std::{hash::Hash, mem::ManuallyDrop, ptr::null_mut, sync::atomic::AtomicU64}; 3 | 4 | /// An instance of an object of type T with an atomic reference count. 5 | pub struct RcInner { 6 | pub storage: ManuallyDrop, 7 | pub state: AtomicU64, 8 | } 9 | 10 | impl RcInner { 11 | pub fn new(val: T, init: u64) -> Self { 12 | Self { 13 | storage: ManuallyDrop::new(val), 14 | state: AtomicU64::new(init), 15 | } 16 | } 17 | 18 | pub fn data(&self) -> &T { 19 | &self.storage 20 | } 21 | 22 | pub fn data_mut(&mut self) -> &mut T { 23 | &mut self.storage 24 | } 25 | 26 | pub fn into_inner(self) -> T { 27 | ManuallyDrop::into_inner(self.storage) 28 | } 29 | } 30 | 31 | pub struct Tagged { 32 | ptr: *mut T, 33 | } 34 | 35 | impl Default for Tagged { 36 | fn default() -> Self { 37 | Self { ptr: null_mut() } 38 | } 39 | } 40 | 41 | impl Clone for Tagged { 42 | fn clone(&self) -> Self { 43 | *self 44 | } 45 | } 46 | 47 | impl Copy for Tagged {} 48 | 49 | impl PartialEq for Tagged { 50 | fn eq(&self, other: &Self) -> bool { 51 | self.with_high_tag(0).ptr == other.with_high_tag(0).ptr 52 | } 53 | } 54 | 55 | impl Eq for Tagged {} 56 | 57 | impl Hash for Tagged { 58 | fn hash(&self, state: &mut H) { 59 | self.ptr.hash(state) 60 | } 61 | } 62 | 63 | pub const HIGH_TAG_WIDTH: u32 = 4; 64 | 65 | impl Tagged { 66 | const fn high_bits_pos() -> u32 { 67 | usize::BITS - HIGH_TAG_WIDTH 68 | } 69 | 70 | const fn high_bits() -> usize { 71 | ((1 << HIGH_TAG_WIDTH) - 1) << Self::high_bits_pos() 72 | } 73 | 74 | pub fn new(ptr: *mut T) -> Self { 75 | Self { ptr } 76 | } 77 | 78 | pub fn null() -> Self { 79 | Self { ptr: null_mut() } 80 | } 81 | 82 | pub fn is_null(&self) -> bool { 83 | self.as_raw().is_null() 84 | } 85 | 86 | pub fn tag(&self) -> usize { 87 | let ptr = self.ptr as usize; 88 | ptr & low_bits::() 89 | } 90 | 91 | pub fn high_tag(&self) -> usize { 92 | let ptr = self.ptr as usize; 93 | (ptr & Self::high_bits()) >> Self::high_bits_pos() 94 | } 95 | 96 | /// Converts the pointer to a raw pointer (without the tag). 97 | pub fn as_raw(&self) -> *mut T { 98 | let ptr = self.ptr as usize; 99 | (ptr & !low_bits::() & !Self::high_bits()) as *mut T 100 | } 101 | 102 | pub fn as_usize(&self) -> usize { 103 | self.ptr as usize 104 | } 105 | 106 | pub fn with_tag(&self, tag: usize) -> Self { 107 | Self::new(with_tag(self.ptr, tag)) 108 | } 109 | 110 | pub fn with_high_tag(&self, tag: usize) -> Self { 111 | Self::new( 112 | (self.ptr as usize & !Self::high_bits() 113 | | ((tag & ((1 << HIGH_TAG_WIDTH) - 1)) << Self::high_bits_pos())) 114 | as *mut T, 115 | ) 116 | } 117 | 118 | pub unsafe fn deref<'g>(&self) -> &'g T { 119 | &*self.as_raw() 120 | } 121 | 122 | pub unsafe fn deref_mut<'g>(&mut self) -> &'g mut T { 123 | &mut *self.as_raw() 124 | } 125 | 126 | pub unsafe fn as_ref<'g>(&self) -> Option<&'g T> { 127 | if self.is_null() { 128 | None 129 | } else { 130 | Some(self.deref()) 131 | } 132 | } 133 | 134 | pub unsafe fn as_mut<'g>(&mut self) -> Option<&'g mut T> { 135 | if self.is_null() { 136 | None 137 | } else { 138 | Some(self.deref_mut()) 139 | } 140 | } 141 | } 142 | 143 | /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. 144 | const fn low_bits() -> usize { 145 | (1 << mem::align_of::().trailing_zeros()) - 1 146 | } 147 | 148 | /// Returns the pointer with the given tag 149 | fn with_tag(ptr: *mut T, tag: usize) -> *mut T { 150 | ((ptr as usize & !low_bits::()) | (tag & low_bits::())) as *mut T 151 | } 152 | 153 | pub type TaggedCnt = Tagged>; 154 | 155 | pub trait Pointer { 156 | fn as_ptr(&self) -> TaggedCnt; 157 | fn is_null(&self) -> bool { 158 | self.as_ptr().is_null() 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /smrs/hp-brcu/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hp-brcu" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | nix = "0.26.2" 10 | atomic = "0.5" 11 | crossbeam-utils = "0.8" 12 | static_assertions = "1.1.0" 13 | setjmp = { git = "https://github.com/powergee/setjmp.rs.git" } 14 | arrayvec = "0.7.3" 15 | bitflags = "2.3.3" 16 | cfg-if = "1.0" 17 | 18 | [dev-dependencies] 19 | rand = "0.8" 20 | -------------------------------------------------------------------------------- /smrs/hp-brcu/src/deferred.rs: -------------------------------------------------------------------------------- 1 | use std::mem::forget; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | 4 | use crate::epoch::Epoch; 5 | 6 | #[cfg(not(feature = "sanitize"))] 7 | static MAX_OBJECTS: AtomicUsize = AtomicUsize::new(64); 8 | #[cfg(feature = "sanitize")] 9 | static MAX_OBJECTS: AtomicUsize = AtomicUsize::new(4); 10 | 11 | /// Sets the capacity of thread-local garbage bag. 12 | /// 13 | /// This value applies to all threads. 14 | #[inline] 15 | pub fn set_bag_capacity(cap: usize) { 16 | assert!(cap > 1, "capacity must be greater than 1."); 17 | MAX_OBJECTS.store(cap, Ordering::Relaxed); 18 | } 19 | 20 | /// Returns the current capacity of thread-local garbage bag. 21 | #[inline] 22 | pub fn bag_capacity() -> usize { 23 | MAX_OBJECTS.load(Ordering::Relaxed) 24 | } 25 | 26 | /// A deferred task consisted of data and a callable function. 27 | /// 28 | /// Note that a [`Deferred`] must be finalized by `execute` function, and `drop`ping this object 29 | /// will trigger a panic! 30 | /// 31 | /// Also, [`Deferred`] is `Send` because it may be executed by an arbitrary thread. 32 | #[derive(Debug)] 33 | pub(crate) struct Deferred { 34 | data: *mut u8, 35 | task: unsafe fn(*mut u8), 36 | } 37 | 38 | impl Deferred { 39 | #[inline] 40 | #[must_use] 41 | pub fn new(data: *mut u8, task: unsafe fn(*mut u8)) -> Self { 42 | Self { data, task } 43 | } 44 | 45 | /// Executes and finalizes this deferred task. 46 | #[inline] 47 | pub unsafe fn execute(self) { 48 | (self.task)(self.data); 49 | // Prevent calling the `drop` for this object. 50 | forget(self); 51 | } 52 | 53 | /// Returns a copy of inner `data`. 54 | #[inline] 55 | pub fn data(&self) -> *mut u8 { 56 | self.data 57 | } 58 | } 59 | 60 | impl Drop for Deferred { 61 | fn drop(&mut self) { 62 | // Note that a `Deferred` must be finalized by `execute` function. 63 | // In other words, we must make sure that all deferred tasks are executed consequently! 64 | panic!("`Deferred` task must be finalized by `execute`!"); 65 | } 66 | } 67 | 68 | /// [`Deferred`] can be collected by arbitrary threads. 69 | unsafe impl Send for Deferred {} 70 | 71 | /// A bag of deferred functions. 72 | pub(crate) struct Bag { 73 | /// Stashed garbages. 74 | defs: Vec, 75 | } 76 | 77 | /// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. 78 | unsafe impl Send for Bag {} 79 | 80 | impl Bag { 81 | /// Returns a new, empty bag. 82 | #[inline] 83 | pub fn new() -> Self { 84 | Self { 85 | defs: Vec::with_capacity(bag_capacity()), 86 | } 87 | } 88 | 89 | /// Attempts to insert a deferred function into the bag. 90 | /// 91 | /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is 92 | /// full. 93 | #[inline] 94 | pub fn try_push(&mut self, def: Deferred) -> Result<(), Deferred> { 95 | if self.len() == bag_capacity() { 96 | return Err(def); 97 | } 98 | self.defs.push(def); 99 | Ok(()) 100 | } 101 | 102 | /// Creates an iterator of [`Deferred`] from a [`Bag`]. 103 | #[inline] 104 | pub fn into_iter(self) -> impl Iterator { 105 | self.defs.into_iter() 106 | } 107 | 108 | #[inline] 109 | pub fn len(&self) -> usize { 110 | self.defs.len() 111 | } 112 | } 113 | 114 | impl Default for Bag { 115 | fn default() -> Self { 116 | Self::new() 117 | } 118 | } 119 | 120 | /// A pair of an epoch and a bag. 121 | pub(crate) struct SealedBag { 122 | epoch: Epoch, 123 | inner: Bag, 124 | } 125 | 126 | /// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. 127 | unsafe impl Sync for SealedBag {} 128 | 129 | impl SealedBag { 130 | #[inline] 131 | pub(crate) fn new(epoch: Epoch, inner: Bag) -> Self { 132 | Self { epoch, inner } 133 | } 134 | 135 | /// Checks if it is safe to drop the bag w.r.t. the given global epoch. 136 | #[inline] 137 | pub(crate) fn is_expired(&self, global_epoch: Epoch) -> bool { 138 | global_epoch.value() - self.epoch.value() >= 2 139 | } 140 | 141 | #[inline] 142 | pub(crate) fn into_inner(self) -> Bag { 143 | self.inner 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /smrs/hp-brcu/src/epoch.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | /// An epoch that can be marked as pinned or unpinned. 4 | /// 5 | /// Internally, the epoch is represented as an integer that wraps around at some unspecified point 6 | /// and a flag that represents whether it is pinned or unpinned. 7 | #[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] 8 | pub struct Epoch { 9 | /// The least significant bit is set if pinned. The rest of the bits hold the epoch. 10 | data: usize, 11 | } 12 | 13 | impl Epoch { 14 | /// Returns the starting epoch in unpinned state. 15 | #[inline] 16 | pub const fn starting() -> Self { 17 | Epoch { data: 0 } 18 | } 19 | 20 | /// Returns the number of epoch as `isize`. 21 | #[inline] 22 | pub fn value(&self) -> isize { 23 | (self.data & !1) as isize >> 1 24 | } 25 | 26 | /// Returns `true` if the epoch is marked as pinned. 27 | #[inline] 28 | pub fn is_pinned(self) -> bool { 29 | (self.data & 1) == 1 30 | } 31 | 32 | /// Returns the same epoch, but marked as pinned. 33 | #[inline] 34 | pub fn pinned(self) -> Epoch { 35 | Epoch { 36 | data: self.data | 1, 37 | } 38 | } 39 | 40 | /// Returns the same epoch, but marked as unpinned. 41 | #[inline] 42 | pub fn unpinned(self) -> Epoch { 43 | Epoch { 44 | data: self.data & !1, 45 | } 46 | } 47 | 48 | /// Returns the successor epoch. 49 | /// 50 | /// The returned epoch will be marked as pinned only if the previous one was as well. 51 | #[inline] 52 | pub fn successor(self) -> Epoch { 53 | Epoch { 54 | data: self.data.wrapping_add(2), 55 | } 56 | } 57 | } 58 | 59 | /// An atomic value that holds an `Epoch`. 60 | #[derive(Default, Debug)] 61 | #[repr(transparent)] 62 | pub struct AtomicEpoch { 63 | /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented 64 | /// using an `AtomicUsize`. 65 | data: AtomicUsize, 66 | } 67 | 68 | impl AtomicEpoch { 69 | /// Creates a new atomic epoch. 70 | #[inline] 71 | pub const fn new(epoch: Epoch) -> Self { 72 | let data = AtomicUsize::new(epoch.data); 73 | AtomicEpoch { data } 74 | } 75 | 76 | /// Loads a value from the atomic epoch. 77 | #[inline] 78 | pub fn load(&self, ord: Ordering) -> Epoch { 79 | Epoch { 80 | data: self.data.load(ord), 81 | } 82 | } 83 | 84 | /// Stores a value into the atomic epoch. 85 | #[inline] 86 | pub fn store(&self, epoch: Epoch, ord: Ordering) { 87 | self.data.store(epoch.data, ord); 88 | } 89 | 90 | /// Stores a value into the atomic epoch if the current value is the same as `current`. 91 | /// 92 | /// The return value is a result indicating whether the new value was written and containing 93 | /// the previous value. On success this value is guaranteed to be equal to `current`. 94 | /// 95 | /// This method takes two `Ordering` arguments to describe the memory 96 | /// ordering of this operation. `success` describes the required ordering for the 97 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. 98 | /// `failure` describes the required ordering for the load operation that takes place when 99 | /// the comparison fails. Using `Acquire` as success ordering makes the store part 100 | /// of this operation `Relaxed`, and using `Release` makes the successful load 101 | /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` 102 | /// and must be equivalent to or weaker than the success ordering. 103 | #[inline] 104 | pub fn compare_exchange( 105 | &self, 106 | current: Epoch, 107 | new: Epoch, 108 | success: Ordering, 109 | failure: Ordering, 110 | ) -> Result { 111 | match self 112 | .data 113 | .compare_exchange(current.data, new.data, success, failure) 114 | { 115 | Ok(data) => Ok(Epoch { data }), 116 | Err(data) => Err(Epoch { data }), 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /smrs/hp-brcu/src/hazard.rs: -------------------------------------------------------------------------------- 1 | use std::{mem::swap, ptr::null_mut, sync::atomic::AtomicPtr}; 2 | 3 | use atomic::{fence, Ordering}; 4 | 5 | use crate::internal::Local; 6 | 7 | /// A low-level owner of hazard pointer slot. 8 | /// 9 | /// A `Shield` owns a `HazardPointer` as its field. 10 | pub(crate) struct HazardPointer { 11 | local: *const Local, 12 | idx: usize, 13 | } 14 | 15 | impl HazardPointer { 16 | /// Creates a hazard pointer in the given thread. 17 | pub(crate) fn new(local: &mut Local) -> Self { 18 | let idx = local.acquire_slot(); 19 | Self { local, idx } 20 | } 21 | 22 | #[inline] 23 | fn slot(&self) -> &AtomicPtr { 24 | unsafe { (*self.local).slot_unchecked(self.idx) } 25 | } 26 | 27 | /// Protect the given address. 28 | #[inline] 29 | pub fn protect_raw(&self, ptr: *mut T, order: Ordering) { 30 | self.slot().store(ptr as *mut u8, order); 31 | } 32 | 33 | /// Release the protection awarded by this hazard pointer, if any. 34 | #[inline] 35 | pub fn reset_protection(&self) { 36 | self.slot().store(null_mut(), Ordering::Release); 37 | } 38 | 39 | /// Check if `src` still points to `pointer`. If not, returns the current value. 40 | /// 41 | /// For a pointer `p`, if "`src` still pointing to `pointer`" implies that `p` is not retired, 42 | /// then `Ok(())` means that shields set to `p` are validated. 43 | #[inline] 44 | pub fn validate(pointer: *mut T, src: &AtomicPtr) -> Result<(), *mut T> { 45 | fence(Ordering::SeqCst); 46 | let new = src.load(Ordering::Acquire); 47 | if pointer == new { 48 | Ok(()) 49 | } else { 50 | Err(new) 51 | } 52 | } 53 | 54 | /// Try protecting `pointer` obtained from `src`. If not, returns the current value. 55 | /// 56 | /// If "`src` still pointing to `pointer`" implies that `pointer` is not retired, then `Ok(())` 57 | /// means that this shield is validated. 58 | #[inline] 59 | pub fn try_protect(&self, pointer: *mut T, src: &AtomicPtr) -> Result<(), *mut T> { 60 | self.protect_raw(pointer, Ordering::Release); 61 | Self::validate(pointer, src) 62 | } 63 | 64 | /// Get a protected pointer from `src`. 65 | /// 66 | /// See `try_protect()`. 67 | #[inline] 68 | pub fn protect(&self, src: &AtomicPtr) -> *mut T { 69 | let mut pointer = src.load(Ordering::Relaxed); 70 | while let Err(new) = self.try_protect(pointer, src) { 71 | pointer = new; 72 | } 73 | pointer 74 | } 75 | 76 | #[inline] 77 | pub fn swap(x: &mut HazardPointer, y: &mut HazardPointer) { 78 | swap(&mut x.idx, &mut y.idx); 79 | } 80 | } 81 | 82 | impl Drop for HazardPointer { 83 | fn drop(&mut self) { 84 | self.reset_protection(); 85 | unsafe { (*self.local.cast_mut()).release_slot(self.idx) }; 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /smrs/hp-brcu/src/lib.rs: -------------------------------------------------------------------------------- 1 | // To use `#[cfg(sanitize = "address")]` 2 | #![feature(cfg_sanitize)] 3 | #![feature(thread_local)] 4 | 5 | mod deferred; 6 | mod epoch; 7 | mod handle; 8 | mod hazard; 9 | mod internal; 10 | mod pointers; 11 | mod queue; 12 | mod rollback; 13 | 14 | pub use deferred::{bag_capacity, set_bag_capacity}; 15 | pub use handle::*; 16 | pub use internal::*; 17 | pub use pointers::*; 18 | 19 | use std::{cell::RefCell, sync::OnceLock}; 20 | 21 | static GLOBAL: OnceLock = OnceLock::new(); 22 | 23 | #[inline] 24 | pub fn global() -> &'static Global { 25 | GLOBAL.get_or_init(Global::new) 26 | } 27 | 28 | thread_local! { 29 | pub static THREAD: RefCell> = RefCell::new(Box::new(global().register())); 30 | } 31 | 32 | #[cfg(test)] 33 | mod test { 34 | use std::thread::scope; 35 | 36 | use atomic::Ordering; 37 | 38 | use super::THREAD; 39 | use crate::handle::{RollbackProof, Thread, Unprotected}; 40 | use crate::pointers::{Atomic, Owned, Shield}; 41 | 42 | struct Node { 43 | next: Atomic, 44 | } 45 | 46 | struct Cursor { 47 | prev: Shield, 48 | curr: Shield, 49 | } 50 | 51 | impl Cursor { 52 | fn default(thread: &mut crate::Thread) -> Self { 53 | Self { 54 | prev: Shield::null(thread), 55 | curr: Shield::null(thread), 56 | } 57 | } 58 | } 59 | 60 | const THREADS: usize = 30; 61 | const COUNT_PER_THREAD: usize = 1 << 15; 62 | 63 | #[test] 64 | fn double_node() { 65 | let head = Atomic::new(Node { 66 | next: Atomic::new(Node { 67 | next: Atomic::null(), 68 | }), 69 | }); 70 | scope(|s| { 71 | for _ in 0..THREADS { 72 | s.spawn(|| THREAD.with(|th| double_node_work(&mut *th.borrow_mut(), &head))); 73 | } 74 | }); 75 | unsafe { 76 | let head = head.into_owned(); 77 | drop( 78 | head.next 79 | .load(Ordering::Relaxed, &Unprotected::new()) 80 | .into_owned(), 81 | ); 82 | } 83 | } 84 | 85 | fn double_node_work(thread: &mut Thread, head: &Atomic) { 86 | let mut cursor = Cursor::default(thread); 87 | for _ in 0..COUNT_PER_THREAD { 88 | loop { 89 | unsafe { 90 | thread.critical_section(|guard| { 91 | let prev = head.load(Ordering::Acquire, guard); 92 | let curr = prev.as_ref().unwrap().next.load(Ordering::Acquire, guard); 93 | cursor.prev.protect(prev); 94 | cursor.curr.protect(curr); 95 | }); 96 | } 97 | 98 | let new = Owned::new(Node { 99 | next: Atomic::new(Node { 100 | next: Atomic::null(), 101 | }), 102 | }); 103 | 104 | match head.compare_exchange( 105 | cursor.prev.shared(), 106 | new, 107 | Ordering::AcqRel, 108 | Ordering::Acquire, 109 | thread, 110 | ) { 111 | Ok(_) => unsafe { 112 | thread.retire(cursor.prev.shared()); 113 | thread.retire(cursor.curr.shared()); 114 | break; 115 | }, 116 | Err(e) => unsafe { 117 | let new = e.new; 118 | drop( 119 | new.next 120 | .load(Ordering::Relaxed, &Unprotected::new()) 121 | .into_owned(), 122 | ); 123 | }, 124 | } 125 | } 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /smrs/hp-pp/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hp_pp" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | membarrier = { git = "https://github.com/jeehoonkang/membarrier-rs.git", branch = "smr-benchmark" } 10 | crossbeam-utils = "0.8.14" 11 | rustc-hash = "1.1.0" 12 | 13 | [dev-dependencies] 14 | rand = "0.8" 15 | -------------------------------------------------------------------------------- /smrs/hp-pp/README.md: -------------------------------------------------------------------------------- 1 | # HP++: A Hazard Pointers Extension for Better Applicability 2 | 3 | This is an implementation of *HP++*, a safe memory reclamation scheme proposed in 4 | 5 | > Jaehwang Jung, Janggun Lee, Jeonghyeon Kim and Jeehoon Kang, Applying Hazard Pointers to More Concurrent Data Structures, SPAA 2023. 6 | 7 | The benchmark suite which evaluates the performance of HP++ can be found at [smr-benchmark](https://github.com/kaist-cp/smr-benchmark) repository. 8 | 9 | ## Introduction 10 | 11 | **HP++ is a backward-compatible extension for hazard pointers (HP) 12 | that enables optimistically traversing possibly detached nodes.** 13 | The key idea is *under-approximating* the unreachability in validation 14 | to allow optimistic traversal by letting the deleter mark the node *after* detaching, and 15 | *patching up* the potentially unsafe accesses arising from false-negatives 16 | by letting the deleter protect such pointers. 17 | Thanks to optimistic traversal, data structures with HP++ 18 | may outperform same-purpose data structures with HP 19 | while consuming a similar amount of memory. 20 | 21 | ## API Design 22 | 23 | You can check the actual implementation of [Harris's list](https://www.cl.cam.ac.uk/research/srg/netos/papers/2001-caslists.pdf) in [tests/harris_list.rs](tests/harris_list.rs). 24 | 25 | This crate provides two major APIs: `try_protect_pp` and `try_unlink`, corresponding to **TryProtect** and **TryUnlink** in the original paper, respectively. 26 | 27 | (`.._pp`, which stands for *plus-plus*, is used in `try_protect_pp` to distinguish it from `try_protect` function that provides HP version of protecting.) 28 | 29 | ### `try_protect_pp` 30 | 31 | ```ignore 32 | pub fn try_protect_pp( 33 | &mut self, 34 | ptr: *mut T, 35 | src: &S, 36 | src_link: &AtomicPtr, 37 | is_invalid: &F, 38 | ) -> Result<(), ProtectError> 39 | where 40 | F: Fn(&S) -> bool, 41 | ``` 42 | 43 | Traversing threads use `try_protect_pp` to protect a pointer loaded from a source object. 44 | 45 | It takes 4 arguments (except the `HazardPointer` to protect with): 46 | 47 | 1. `ptr`: the pointer to protect. 48 | 2. `src`: the reference of the source object. 49 | 3. `src_link`: the field of `src` from which `ptr` was loaded. 50 | 4. `is_invalid`: the predicate to check whether src is invalidated. 51 | 52 | If `src` is invalidated, `try_protect_pp` returns `false` meaning that it is unsafe to create new protection from `src`. Otherwise, it returns `true`, but if `src_link` has changed from `ptr`, then the new value is written to `ptr`. 53 | 54 | ### `try_unlink` 55 | 56 | ```ignore 57 | pub unsafe fn try_unlink( 58 | unlink: impl Unlink, 59 | frontier: &[*mut T] 60 | ) -> bool 61 | where 62 | T: Invalidate, 63 | ``` 64 | 65 | Unlinking threads use `try_unlink` to physically delete and retire node(s) while protecting the traversing threads. The protection will persist until the retired nodes are invalidated by reclaimers. 66 | 67 | It takes 2 arguments: 68 | 69 | 1. `unlink`: the Trait object which implements `do_unlink` function that performs unlinking and returns the unlinked node(s). 70 | 2. `frontier`: the pointers that the unlinker has to protect for the traversing threads. 71 | 72 | The *unlinking frontier* is the set of pointers that are reachable by following a single link from the *to-be-unlinked objects* but 73 | are not themselves to-be-unlinked. The `frontier` argument to `try_unlink` must be decided ahead of the actual unlinking, and the data structure must guarantee that the frontier does not change once it is decided: otherwise, the traversing thread’s access to a frontier node may not be protected. 74 | 75 | Note that the node type `T` implements the `Invalidate` trait, so that unlinked nodes can be later invalidated by the reclaimers. 76 | 77 | Invalidation can be implemented by adding a flag to the node. But in most cases, this can be done without extra space overhead using tagged pointers, similar to logical deletion. 78 | 79 | `try_unlink` returns whether the unlink was successful. 80 | -------------------------------------------------------------------------------- /smrs/hp-pp/src/domain.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | use crossbeam_utils::CachePadded; 4 | use rustc_hash::FxHashSet; 5 | 6 | use crate::hazard::ThreadRecords; 7 | use crate::retire::RetiredList; 8 | use crate::thread::Thread; 9 | 10 | #[derive(Debug)] 11 | pub struct Domain { 12 | pub(crate) threads: CachePadded, 13 | pub(crate) barrier: CachePadded, 14 | pub(crate) retireds: CachePadded, 15 | pub(crate) num_garbages: CachePadded, 16 | } 17 | 18 | impl Domain { 19 | pub const fn new() -> Self { 20 | Self { 21 | threads: CachePadded::new(ThreadRecords::new()), 22 | barrier: CachePadded::new(EpochBarrier(AtomicUsize::new(0))), 23 | retireds: CachePadded::new(RetiredList::new()), 24 | num_garbages: CachePadded::new(AtomicUsize::new(0)), 25 | } 26 | } 27 | 28 | pub fn collect_guarded_ptrs<'domain>( 29 | &self, 30 | reclaimer: &mut Thread<'domain>, 31 | ) -> FxHashSet<*mut u8> { 32 | self.threads 33 | .iter() 34 | .flat_map(|thread| thread.iter(reclaimer)) 35 | .collect() 36 | } 37 | 38 | pub fn num_garbages(&self) -> usize { 39 | self.num_garbages.load(Ordering::Relaxed) 40 | } 41 | } 42 | 43 | impl Drop for Domain { 44 | fn drop(&mut self) { 45 | for t in self.threads.iter() { 46 | assert!(t.available.load(Ordering::Relaxed)) 47 | } 48 | let mut retireds = self.retireds.pop_all(); 49 | for r in retireds.drain(..) { 50 | unsafe { (r.deleter)(r.ptr) }; 51 | } 52 | } 53 | } 54 | 55 | #[derive(Debug)] 56 | pub(crate) struct EpochBarrier(AtomicUsize); 57 | 58 | impl EpochBarrier { 59 | pub(crate) fn barrier(&self) { 60 | let epoch = self.0.load(Ordering::Acquire); 61 | membarrier::heavy(); 62 | let new_epoch = epoch.wrapping_add(1); 63 | let _ = self 64 | .0 65 | .compare_exchange(epoch, new_epoch, Ordering::Release, Ordering::Acquire); 66 | } 67 | 68 | pub(crate) fn read(&self) -> usize { 69 | let mut epoch = self.0.load(Ordering::Acquire); 70 | loop { 71 | membarrier::light_membarrier(); 72 | let new_epoch = self.0.load(Ordering::Acquire); 73 | if epoch == new_epoch { 74 | return epoch; 75 | } 76 | epoch = new_epoch 77 | } 78 | } 79 | 80 | pub(crate) fn check(old: usize, new: usize) -> bool { 81 | new.wrapping_sub(old) >= 2 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /smrs/hp-pp/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | 3 | mod domain; 4 | mod hazard; 5 | mod retire; 6 | mod tag; 7 | mod thread; 8 | 9 | pub use hazard::HazardPointer; 10 | pub use hazard::ProtectError; 11 | pub use membarrier::light_membarrier; 12 | pub use tag::*; 13 | pub use thread::set_counts_between_flush; 14 | 15 | use core::cell::RefCell; 16 | use std::thread_local; 17 | 18 | use crate::domain::Domain; 19 | pub use crate::thread::Thread; 20 | 21 | pub static DEFAULT_DOMAIN: Domain = Domain::new(); 22 | 23 | // NOTE: MUST NOT take raw pointer to TLS. They randomly move??? 24 | thread_local! { 25 | static DEFAULT_THREAD: RefCell>> = RefCell::new(Box::new(Thread::new(&DEFAULT_DOMAIN))); 26 | } 27 | 28 | pub trait Unlink { 29 | fn do_unlink(&self) -> Result, ()>; 30 | } 31 | 32 | pub trait Invalidate { 33 | fn invalidate(&self); 34 | } 35 | 36 | /// Retire a pointer, in the thread-local retired pointer bag. 37 | /// 38 | /// # Safety 39 | /// TODO 40 | #[inline] 41 | pub unsafe fn retire(ptr: *mut T) { 42 | DEFAULT_THREAD.with(|t| t.borrow_mut().retire(ptr)) 43 | } 44 | 45 | /// Protects `links` and try unlinking by `do_unlink`. if successful, mark the returned nodes as 46 | /// invalidated and retire them. 47 | /// 48 | /// `do_unlink` tries unlinking, and if successful, it returns raw pointers to unlinked nodes. 49 | /// 50 | /// # Safety 51 | /// * The memory blocks in `to_be_unlinked` are no longer modified. 52 | /// * TODO 53 | pub unsafe fn try_unlink(unlink: impl Unlink, frontier: &[*mut T]) -> bool 54 | where 55 | T: Invalidate, 56 | { 57 | DEFAULT_THREAD.with(|t| t.borrow_mut().try_unlink(unlink, frontier)) 58 | } 59 | 60 | /// Trigger reclamation 61 | pub fn do_reclamation() { 62 | DEFAULT_THREAD.with(|t| { 63 | t.borrow_mut().do_reclamation(); 64 | }) 65 | } 66 | -------------------------------------------------------------------------------- /smrs/hp-pp/src/retire.rs: -------------------------------------------------------------------------------- 1 | use core::ptr; 2 | use core::sync::atomic::{AtomicPtr, Ordering}; 3 | use std::mem; 4 | 5 | use crate::{HazardPointer, Invalidate}; 6 | 7 | #[derive(Debug, Clone, Copy)] 8 | pub(crate) struct Retired { 9 | pub(crate) ptr: *mut u8, 10 | pub(crate) deleter: unsafe fn(ptr: *mut u8), 11 | } 12 | 13 | pub(crate) struct Unlinked<'domain> { 14 | ptrs: Vec<*mut u8>, 15 | invalidater: unsafe fn(*mut u8), 16 | deleter: unsafe fn(*mut u8), 17 | hps: Vec>, 18 | } 19 | 20 | // TODO: require in retire 21 | unsafe impl Send for Retired {} 22 | 23 | impl Retired { 24 | pub(crate) fn new(ptr: *mut T) -> Self { 25 | Self { 26 | ptr: ptr as *mut u8, 27 | deleter: free::, 28 | } 29 | } 30 | } 31 | 32 | impl<'domain> Unlinked<'domain> { 33 | pub(crate) fn new(ptrs: Vec<*mut T>, hps: Vec>) -> Self { 34 | Self { 35 | ptrs: unsafe { mem::transmute::, Vec<*mut u8>>(ptrs) }, 36 | invalidater: invalidate::, 37 | deleter: free::, 38 | hps, 39 | } 40 | } 41 | 42 | pub(crate) fn do_invalidation(self) -> (Vec, Vec>) { 43 | let mut retireds = Vec::with_capacity(self.ptrs.len()); 44 | for ptr in self.ptrs { 45 | unsafe { (self.invalidater)(ptr) }; 46 | retireds.push(Retired { 47 | ptr, 48 | deleter: self.deleter, 49 | }); 50 | } 51 | (retireds, self.hps) 52 | } 53 | } 54 | 55 | unsafe fn free(ptr: *mut u8) { 56 | drop(Box::from_raw(ptr as *mut T)) 57 | } 58 | 59 | unsafe fn invalidate(ptr: *mut u8) { 60 | T::invalidate(&*(ptr as *mut T)) 61 | } 62 | 63 | #[derive(Debug)] 64 | pub(crate) struct RetiredList { 65 | head: AtomicPtr, 66 | } 67 | 68 | #[derive(Debug)] 69 | struct RetiredListNode { 70 | retireds: Vec, 71 | next: *const RetiredListNode, 72 | } 73 | 74 | impl RetiredList { 75 | pub(crate) const fn new() -> Self { 76 | Self { 77 | head: AtomicPtr::new(core::ptr::null_mut()), 78 | } 79 | } 80 | 81 | pub(crate) fn push(&self, retireds: Vec) { 82 | let new = Box::leak(Box::new(RetiredListNode { 83 | retireds, 84 | next: ptr::null_mut(), 85 | })); 86 | 87 | let mut head = self.head.load(Ordering::Relaxed); 88 | loop { 89 | new.next = head; 90 | match self 91 | .head 92 | .compare_exchange(head, new, Ordering::Release, Ordering::Relaxed) 93 | { 94 | Ok(_) => return, 95 | Err(head_new) => head = head_new, 96 | } 97 | } 98 | } 99 | 100 | pub(crate) fn pop_all(&self) -> Vec { 101 | let mut cur = self.head.swap(core::ptr::null_mut(), Ordering::Acquire); 102 | let mut retireds = Vec::new(); 103 | while !cur.is_null() { 104 | let mut cur_box = unsafe { Box::from_raw(cur) }; 105 | retireds.append(&mut cur_box.retireds); 106 | cur = cur_box.next.cast_mut(); 107 | } 108 | retireds 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /smrs/hp-pp/src/tag.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | 3 | /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. 4 | #[inline] 5 | const fn low_bits() -> usize { 6 | (1 << mem::align_of::().trailing_zeros()) - 1 7 | } 8 | 9 | /// Returns the pointer with the given tag 10 | #[inline] 11 | pub fn tagged(ptr: *mut T, tag: usize) -> *mut T { 12 | ((ptr as usize & !low_bits::()) | (tag & low_bits::())) as *mut T 13 | } 14 | 15 | /// Decomposes a tagged pointer `data` into the pointer and the tag. 16 | #[inline] 17 | pub fn decompose_ptr(ptr: *mut T) -> (*mut T, usize) { 18 | let ptr = ptr as usize; 19 | let raw = (ptr & !low_bits::()) as *mut T; 20 | let tag = ptr & low_bits::(); 21 | (raw, tag) 22 | } 23 | 24 | /// Extract the actual address out of a tagged pointer 25 | #[inline] 26 | pub fn untagged(ptr: *mut T) -> *mut T { 27 | let ptr = ptr as usize; 28 | (ptr & !low_bits::()) as *mut T 29 | } 30 | 31 | /// Extracts the tag out of a tagged pointer 32 | #[inline] 33 | pub fn tag(ptr: *mut T) -> usize { 34 | let ptr = ptr as usize; 35 | ptr & low_bits::() 36 | } 37 | -------------------------------------------------------------------------------- /smrs/nbr/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nbr" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | setjmp = { git = "https://github.com/powergee/setjmp.rs.git" } 10 | nix = "0.26.2" 11 | cfg-if = "1.0" 12 | rustc-hash = "1.1.0" 13 | atomic = "0.5" 14 | static_assertions = "1.1.0" 15 | 16 | [dev-dependencies] 17 | rand = "0.8" 18 | -------------------------------------------------------------------------------- /smrs/nbr/src/recovery.rs: -------------------------------------------------------------------------------- 1 | /// A thread-local recovery manager with signal handling 2 | use nix::libc::{c_void, siginfo_t}; 3 | use nix::sys::pthread::{pthread_kill, Pthread}; 4 | use nix::sys::signal::{sigaction, SaFlags, SigAction, SigHandler, SigSet, Signal}; 5 | use setjmp::{jmp_buf, sigjmp_buf, siglongjmp}; 6 | use std::mem::{transmute, MaybeUninit}; 7 | use std::sync::atomic::{compiler_fence, AtomicBool, Ordering}; 8 | 9 | static mut NEUTRALIZE_SIGNAL: Signal = Signal::SIGUSR1; 10 | static mut SIG_ACTION: MaybeUninit = MaybeUninit::uninit(); 11 | 12 | thread_local! { 13 | static JMP_BUF: Box = Box::new(unsafe { MaybeUninit::zeroed().assume_init() }); 14 | static RESTARTABLE: Box = Box::new(AtomicBool::new(false)); 15 | } 16 | 17 | /// Install a process-wide signal handler. 18 | /// Note that we don't have to call `sigaction` for every child thread. 19 | /// 20 | /// By default, SIGUSR1 is used as a neutralize signal. 21 | /// To use the other signal, use `set_neutralize_signal`. 22 | #[inline] 23 | pub(crate) unsafe fn install() { 24 | let sig_action = SigAction::new( 25 | SigHandler::SigAction(handle_signal), 26 | // Restart any interrupted sys calls instead of silently failing 27 | SaFlags::SA_RESTART | SaFlags::SA_SIGINFO, 28 | // Block signals during handler 29 | SigSet::all(), 30 | ); 31 | SIG_ACTION.write(sig_action); 32 | if sigaction(NEUTRALIZE_SIGNAL, SIG_ACTION.assume_init_ref()).is_err() { 33 | panic!("failed to install signal handler"); 34 | } 35 | } 36 | 37 | #[inline] 38 | pub(crate) unsafe fn send_signal(pthread: Pthread) -> nix::Result<()> { 39 | pthread_kill(pthread, NEUTRALIZE_SIGNAL) 40 | } 41 | 42 | pub(crate) struct Status { 43 | pub(crate) jmp_buf: *mut sigjmp_buf, 44 | pub(crate) rest: &'static AtomicBool, 45 | } 46 | 47 | impl Status { 48 | #[inline] 49 | pub unsafe fn new() -> Self { 50 | Self { 51 | jmp_buf: JMP_BUF.with(|buf| (&**buf as *const sigjmp_buf).cast_mut()), 52 | rest: RESTARTABLE.with(|rest| transmute(&**rest)), 53 | } 54 | } 55 | 56 | #[inline(always)] 57 | pub fn set_restartable(&self) { 58 | self.rest.store(true, Ordering::Relaxed); 59 | } 60 | 61 | #[inline(always)] 62 | pub fn unset_restartable(&self) { 63 | self.rest.store(false, Ordering::Relaxed); 64 | } 65 | } 66 | 67 | /// Get a current neutralize signal. 68 | /// 69 | /// By default, SIGUSR1 is used as a neutralize signal. 70 | /// To use the other signal, use `set_neutralize_signal`. 71 | /// 72 | /// # Safety 73 | /// 74 | /// This function accesses and modify static variable. 75 | /// To avoid potential race conditions, do not 76 | /// call this function concurrently. 77 | #[inline] 78 | pub unsafe fn neutralize_signal() -> Signal { 79 | NEUTRALIZE_SIGNAL 80 | } 81 | 82 | /// Set user-defined neutralize signal. 83 | /// This function allows a user to use the other signal 84 | /// than SIGUSR1 for a neutralize signal. 85 | /// Note that it must called before creating 86 | /// a Collector object. 87 | /// 88 | /// # Safety 89 | /// 90 | /// This function accesses and modify static variable. 91 | /// To avoid potential race conditions, do not 92 | /// call this function concurrently. 93 | #[inline] 94 | pub unsafe fn set_neutralize_signal(signal: Signal) { 95 | NEUTRALIZE_SIGNAL = signal; 96 | } 97 | 98 | #[inline] 99 | pub unsafe fn jmp_buf() -> *mut jmp_buf { 100 | JMP_BUF.with(|buf| (&**buf as *const sigjmp_buf).cast_mut()) 101 | } 102 | 103 | extern "C" fn handle_signal(_: i32, _: *mut siginfo_t, _: *mut c_void) { 104 | let rest: &AtomicBool = match RESTARTABLE.try_with(|rest| unsafe { transmute(&**rest) }) { 105 | Ok(rest) => rest, 106 | Err(_) => return, 107 | }; 108 | 109 | if !rest.load(Ordering::Relaxed) { 110 | return; 111 | } 112 | 113 | let buf = match JMP_BUF.try_with(|buf| (&**buf as *const sigjmp_buf).cast_mut()) { 114 | Ok(buf) => buf, 115 | Err(_) => return, 116 | }; 117 | rest.store(false, Ordering::Relaxed); 118 | compiler_fence(Ordering::SeqCst); 119 | 120 | unsafe { siglongjmp(buf, 1) }; 121 | } 122 | -------------------------------------------------------------------------------- /smrs/nbr/src/stats.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | pub(crate) static GLOBAL_GARBAGE_COUNT: AtomicUsize = AtomicUsize::new(0); 4 | 5 | pub(crate) fn incr_garb(count: usize) { 6 | GLOBAL_GARBAGE_COUNT.fetch_add(count, Ordering::Relaxed); 7 | } 8 | 9 | pub(crate) fn decr_garb(count: usize) { 10 | GLOBAL_GARBAGE_COUNT.fetch_sub(count, Ordering::Relaxed); 11 | } 12 | 13 | /// Get current count of unreclaimed pointers. 14 | pub fn count_garbages() -> usize { 15 | GLOBAL_GARBAGE_COUNT.load(Ordering::Relaxed) 16 | } 17 | -------------------------------------------------------------------------------- /smrs/vbr/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vbr" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | crossbeam-utils = "0.8" 10 | atomic = "0.5" 11 | portable-atomic = "1" 12 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Shared runtime configuration for map benchmarks. 2 | 3 | pub mod map; 4 | -------------------------------------------------------------------------------- /src/ds_impl/cdrc/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | pub trait OutputHolder { 2 | fn default() -> Self; 3 | fn output(&self) -> &V; 4 | } 5 | 6 | pub trait ConcurrentMap { 7 | type Output: OutputHolder; 8 | 9 | fn empty_output() -> Self::Output { 10 | >::default() 11 | } 12 | 13 | fn new() -> Self; 14 | fn get(&self, key: &K, output: &mut Self::Output, cs: &C) -> bool; 15 | fn insert(&self, key: K, value: V, output: &mut Self::Output, cs: &C) -> bool; 16 | fn remove(&self, key: &K, output: &mut Self::Output, cs: &C) -> bool; 17 | } 18 | 19 | #[cfg(test)] 20 | pub mod tests { 21 | extern crate rand; 22 | use super::{ConcurrentMap, OutputHolder}; 23 | use cdrc::Cs; 24 | use crossbeam_utils::thread; 25 | use rand::prelude::*; 26 | 27 | const THREADS: i32 = 30; 28 | const ELEMENTS_PER_THREADS: i32 = 1000; 29 | 30 | pub fn smoke + Send + Sync>() { 31 | let map = &M::new(); 32 | 33 | thread::scope(|s| { 34 | for t in 0..THREADS { 35 | s.spawn(move |_| { 36 | let output = &mut M::empty_output(); 37 | let mut rng = rand::thread_rng(); 38 | let mut keys: Vec = 39 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 40 | keys.shuffle(&mut rng); 41 | for i in keys { 42 | assert!(map.insert(i, i.to_string(), output, &C::new())); 43 | } 44 | }); 45 | } 46 | }) 47 | .unwrap(); 48 | 49 | thread::scope(|s| { 50 | for t in 0..(THREADS / 2) { 51 | s.spawn(move |_| { 52 | let output = &mut M::empty_output(); 53 | let mut rng = rand::thread_rng(); 54 | let mut keys: Vec = 55 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 56 | keys.shuffle(&mut rng); 57 | let cs = &mut C::new(); 58 | for i in keys { 59 | assert!(map.remove(&i, output, cs)); 60 | assert_eq!(i.to_string(), *output.output()); 61 | cs.clear(); 62 | } 63 | }); 64 | } 65 | }) 66 | .unwrap(); 67 | 68 | thread::scope(|s| { 69 | for t in (THREADS / 2)..THREADS { 70 | s.spawn(move |_| { 71 | let output = &mut M::empty_output(); 72 | let mut rng = rand::thread_rng(); 73 | let mut keys: Vec = 74 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 75 | keys.shuffle(&mut rng); 76 | let cs = &mut C::new(); 77 | for i in keys { 78 | assert!(map.get(&i, output, cs)); 79 | assert_eq!(i.to_string(), *output.output()); 80 | cs.clear(); 81 | } 82 | }); 83 | } 84 | }) 85 | .unwrap(); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/ds_impl/cdrc/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::ConcurrentMap; 2 | use cdrc::Cs; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | use super::list::{Cursor, HMList}; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash + Default, 15 | V: Default, 16 | C: Cs, 17 | { 18 | pub fn with_capacity(n: usize) -> Self { 19 | let mut buckets = Vec::with_capacity(n); 20 | for _ in 0..n { 21 | buckets.push(HMList::new()); 22 | } 23 | 24 | HashMap { buckets } 25 | } 26 | 27 | #[inline] 28 | pub fn get_bucket(&self, index: usize) -> &HMList { 29 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 30 | } 31 | 32 | #[inline] 33 | fn hash(k: &K) -> usize { 34 | let mut s = DefaultHasher::new(); 35 | k.hash(&mut s); 36 | s.finish() as usize 37 | } 38 | 39 | pub fn get(&self, k: &K, cursor: &mut Cursor, cs: &C) -> bool { 40 | let i = Self::hash(k); 41 | self.get_bucket(i).get(k, cursor, cs) 42 | } 43 | 44 | pub fn insert(&self, k: K, v: V, cursor: &mut Cursor, cs: &C) -> bool { 45 | let i = Self::hash(&k); 46 | self.get_bucket(i).insert(k, v, cursor, cs) 47 | } 48 | 49 | pub fn remove(&self, k: &K, cursor: &mut Cursor, cs: &C) -> bool { 50 | let i = Self::hash(k); 51 | self.get_bucket(i).remove(k, cursor, cs) 52 | } 53 | } 54 | 55 | impl ConcurrentMap for HashMap 56 | where 57 | K: Ord + Hash + Default, 58 | V: Default, 59 | C: Cs, 60 | { 61 | type Output = Cursor; 62 | 63 | fn new() -> Self { 64 | Self::with_capacity(30000) 65 | } 66 | 67 | #[inline(always)] 68 | fn get(&self, key: &K, output: &mut Self::Output, cs: &C) -> bool { 69 | self.get(key, output, cs) 70 | } 71 | #[inline(always)] 72 | fn insert(&self, key: K, value: V, output: &mut Self::Output, cs: &C) -> bool { 73 | self.insert(key, value, output, cs) 74 | } 75 | #[inline(always)] 76 | fn remove(&self, key: &K, output: &mut Self::Output, cs: &C) -> bool { 77 | self.remove(key, output, cs) 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use super::HashMap; 84 | use crate::ds_impl::cdrc::concurrent_map; 85 | use cdrc::{CsEBR, CsHP}; 86 | 87 | #[test] 88 | fn smoke_hashmap_ebr() { 89 | concurrent_map::tests::smoke::>(); 90 | } 91 | 92 | #[test] 93 | fn smoke_hashmap_hp() { 94 | concurrent_map::tests::smoke::>(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/ds_impl/cdrc/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod bonsai_tree; 4 | pub mod double_link; 5 | pub mod list; 6 | pub mod michael_hash_map; 7 | pub mod natarajan_mittal_tree; 8 | pub mod skip_list; 9 | 10 | pub use self::concurrent_map::{ConcurrentMap, OutputHolder}; 11 | 12 | pub use self::bonsai_tree::BonsaiTreeMap; 13 | pub use self::double_link::DoubleLink; 14 | pub use self::list::{HHSList, HList, HMList}; 15 | pub use self::michael_hash_map::HashMap; 16 | pub use self::natarajan_mittal_tree::NMTreeMap; 17 | pub use self::skip_list::SkipList; 18 | -------------------------------------------------------------------------------- /src/ds_impl/circ_ebr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use circ::CsEBR; 2 | 3 | pub trait OutputHolder { 4 | fn output(&self) -> &V; 5 | } 6 | 7 | pub trait ConcurrentMap { 8 | type Output: OutputHolder; 9 | 10 | fn new() -> Self; 11 | fn get(&self, key: &K, cs: &CsEBR) -> Option; 12 | fn insert(&self, key: K, value: V, cs: &CsEBR) -> bool; 13 | fn remove(&self, key: &K, cs: &CsEBR) -> Option; 14 | } 15 | 16 | #[cfg(test)] 17 | pub mod tests { 18 | extern crate rand; 19 | use super::{ConcurrentMap, OutputHolder}; 20 | use circ::{Cs, CsEBR}; 21 | use crossbeam_utils::thread; 22 | use rand::prelude::*; 23 | 24 | const THREADS: i32 = 30; 25 | const ELEMENTS_PER_THREADS: i32 = 1000; 26 | 27 | pub fn smoke + Send + Sync>() { 28 | let map = &M::new(); 29 | 30 | thread::scope(|s| { 31 | for t in 0..THREADS { 32 | s.spawn(move |_| { 33 | let mut rng = rand::thread_rng(); 34 | let mut keys: Vec = 35 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 36 | keys.shuffle(&mut rng); 37 | for i in keys { 38 | assert!(map.insert(i, i.to_string(), &CsEBR::new())); 39 | } 40 | }); 41 | } 42 | }) 43 | .unwrap(); 44 | 45 | thread::scope(|s| { 46 | for t in 0..(THREADS / 2) { 47 | s.spawn(move |_| { 48 | let mut rng = rand::thread_rng(); 49 | let mut keys: Vec = 50 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 51 | keys.shuffle(&mut rng); 52 | let cs = &mut CsEBR::new(); 53 | for i in keys { 54 | assert_eq!(i.to_string(), *map.remove(&i, cs).unwrap().output()); 55 | cs.clear(); 56 | } 57 | }); 58 | } 59 | }) 60 | .unwrap(); 61 | 62 | thread::scope(|s| { 63 | for t in (THREADS / 2)..THREADS { 64 | s.spawn(move |_| { 65 | let mut rng = rand::thread_rng(); 66 | let mut keys: Vec = 67 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 68 | keys.shuffle(&mut rng); 69 | let cs = &mut CsEBR::new(); 70 | for i in keys { 71 | let result = map.get(&i, cs); 72 | if (0..THREADS / 2).contains(&i) { 73 | assert!(result.is_none()); 74 | } else { 75 | assert_eq!(i.to_string(), *result.unwrap().output()); 76 | } 77 | cs.clear(); 78 | } 79 | }); 80 | } 81 | }) 82 | .unwrap(); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/ds_impl/circ_ebr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::ConcurrentMap; 2 | use circ::CsEBR; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | use super::list::HHSList; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash + Default, 15 | V: Default, 16 | { 17 | pub fn with_capacity(n: usize) -> Self { 18 | let mut buckets = Vec::with_capacity(n); 19 | for _ in 0..n { 20 | buckets.push(HHSList::new()); 21 | } 22 | 23 | HashMap { buckets } 24 | } 25 | 26 | #[inline] 27 | pub fn get_bucket(&self, index: usize) -> &HHSList { 28 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 29 | } 30 | 31 | #[inline] 32 | fn hash(k: &K) -> usize { 33 | let mut s = DefaultHasher::new(); 34 | k.hash(&mut s); 35 | s.finish() as usize 36 | } 37 | 38 | pub fn get(&self, k: &K, cs: &CsEBR) -> Option< as ConcurrentMap>::Output> { 39 | let i = Self::hash(k); 40 | self.get_bucket(i).get(k, cs) 41 | } 42 | 43 | pub fn insert(&self, k: K, v: V, cs: &CsEBR) -> bool { 44 | let i = Self::hash(&k); 45 | self.get_bucket(i).insert(k, v, cs) 46 | } 47 | 48 | pub fn remove( 49 | &self, 50 | k: &K, 51 | cs: &CsEBR, 52 | ) -> Option< as ConcurrentMap>::Output> { 53 | let i = Self::hash(k); 54 | self.get_bucket(i).remove(k, cs) 55 | } 56 | } 57 | 58 | impl ConcurrentMap for HashMap 59 | where 60 | K: Ord + Hash + Default, 61 | V: Default, 62 | { 63 | type Output = as ConcurrentMap>::Output; 64 | 65 | fn new() -> Self { 66 | Self::with_capacity(30000) 67 | } 68 | 69 | #[inline(always)] 70 | fn get(&self, key: &K, cs: &CsEBR) -> Option { 71 | self.get(key, cs) 72 | } 73 | #[inline(always)] 74 | fn insert(&self, key: K, value: V, cs: &CsEBR) -> bool { 75 | self.insert(key, value, cs) 76 | } 77 | #[inline(always)] 78 | fn remove(&self, key: &K, cs: &CsEBR) -> Option { 79 | self.remove(key, cs) 80 | } 81 | } 82 | 83 | #[cfg(test)] 84 | mod tests { 85 | use super::HashMap; 86 | use crate::ds_impl::circ_ebr::concurrent_map; 87 | 88 | #[test] 89 | fn smoke_hashmap() { 90 | concurrent_map::tests::smoke::>(); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/ds_impl/circ_ebr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod bonsai_tree; 4 | pub mod double_link; 5 | pub mod list; 6 | pub mod michael_hash_map; 7 | pub mod natarajan_mittal_tree; 8 | pub mod skip_list; 9 | 10 | pub use self::concurrent_map::{ConcurrentMap, OutputHolder}; 11 | 12 | pub use self::bonsai_tree::BonsaiTreeMap; 13 | pub use self::double_link::DoubleLink; 14 | pub use self::list::{HHSList, HList, HMList}; 15 | pub use self::michael_hash_map::HashMap; 16 | pub use self::natarajan_mittal_tree::NMTreeMap; 17 | pub use self::skip_list::SkipList; 18 | -------------------------------------------------------------------------------- /src/ds_impl/circ_hp/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use circ::CsHP; 2 | 3 | pub trait OutputHolder { 4 | fn default() -> Self; 5 | fn output(&self) -> &V; 6 | } 7 | 8 | pub trait ConcurrentMap { 9 | type Output: OutputHolder; 10 | 11 | fn empty_output() -> Self::Output { 12 | >::default() 13 | } 14 | 15 | fn new() -> Self; 16 | fn get(&self, key: &K, output: &mut Self::Output, cs: &CsHP) -> bool; 17 | fn insert(&self, key: K, value: V, output: &mut Self::Output, cs: &CsHP) -> bool; 18 | fn remove(&self, key: &K, output: &mut Self::Output, cs: &CsHP) -> bool; 19 | } 20 | 21 | #[cfg(test)] 22 | pub mod tests { 23 | extern crate rand; 24 | use super::{ConcurrentMap, OutputHolder}; 25 | use circ::{Cs, CsHP}; 26 | use crossbeam_utils::thread; 27 | use rand::prelude::*; 28 | 29 | const THREADS: i32 = 30; 30 | const ELEMENTS_PER_THREADS: i32 = 1000; 31 | 32 | pub fn smoke + Send + Sync>() { 33 | let map = &M::new(); 34 | 35 | thread::scope(|s| { 36 | for t in 0..THREADS { 37 | s.spawn(move |_| { 38 | let output = &mut M::empty_output(); 39 | let mut rng = rand::thread_rng(); 40 | let mut keys: Vec = 41 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 42 | keys.shuffle(&mut rng); 43 | for i in keys { 44 | assert!(map.insert(i, i.to_string(), output, &CsHP::new())); 45 | } 46 | }); 47 | } 48 | }) 49 | .unwrap(); 50 | 51 | thread::scope(|s| { 52 | for t in 0..(THREADS / 2) { 53 | s.spawn(move |_| { 54 | let output = &mut M::empty_output(); 55 | let mut rng = rand::thread_rng(); 56 | let mut keys: Vec = 57 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 58 | keys.shuffle(&mut rng); 59 | let cs = &mut CsHP::new(); 60 | for i in keys { 61 | assert!(map.remove(&i, output, cs)); 62 | assert_eq!(i.to_string(), *output.output()); 63 | cs.clear(); 64 | } 65 | }); 66 | } 67 | }) 68 | .unwrap(); 69 | 70 | thread::scope(|s| { 71 | for t in (THREADS / 2)..THREADS { 72 | s.spawn(move |_| { 73 | let output = &mut M::empty_output(); 74 | let mut rng = rand::thread_rng(); 75 | let mut keys: Vec = 76 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 77 | keys.shuffle(&mut rng); 78 | let cs = &mut CsHP::new(); 79 | for i in keys { 80 | assert!(map.get(&i, output, cs)); 81 | assert_eq!(i.to_string(), *output.output()); 82 | cs.clear(); 83 | } 84 | }); 85 | } 86 | }) 87 | .unwrap(); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/ds_impl/circ_hp/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::ConcurrentMap; 2 | use circ::CsHP; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | use super::list::{Cursor, HMList}; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash + Default, 15 | V: Default, 16 | { 17 | pub fn with_capacity(n: usize) -> Self { 18 | let mut buckets = Vec::with_capacity(n); 19 | for _ in 0..n { 20 | buckets.push(HMList::new()); 21 | } 22 | 23 | HashMap { buckets } 24 | } 25 | 26 | #[inline] 27 | pub fn get_bucket(&self, index: usize) -> &HMList { 28 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 29 | } 30 | 31 | #[inline] 32 | fn hash(k: &K) -> usize { 33 | let mut s = DefaultHasher::new(); 34 | k.hash(&mut s); 35 | s.finish() as usize 36 | } 37 | 38 | pub fn get(&self, k: &K, cursor: &mut Cursor, cs: &CsHP) -> bool { 39 | let i = Self::hash(k); 40 | self.get_bucket(i).get(k, cursor, cs) 41 | } 42 | 43 | pub fn insert(&self, k: K, v: V, cursor: &mut Cursor, cs: &CsHP) -> bool { 44 | let i = Self::hash(&k); 45 | self.get_bucket(i).insert(k, v, cursor, cs) 46 | } 47 | 48 | pub fn remove(&self, k: &K, cursor: &mut Cursor, cs: &CsHP) -> bool { 49 | let i = Self::hash(k); 50 | self.get_bucket(i).remove(k, cursor, cs) 51 | } 52 | } 53 | 54 | impl ConcurrentMap for HashMap 55 | where 56 | K: Ord + Hash + Default, 57 | V: Default, 58 | { 59 | type Output = Cursor; 60 | 61 | fn new() -> Self { 62 | Self::with_capacity(30000) 63 | } 64 | 65 | #[inline(always)] 66 | fn get(&self, key: &K, output: &mut Self::Output, cs: &CsHP) -> bool { 67 | self.get(key, output, cs) 68 | } 69 | #[inline(always)] 70 | fn insert(&self, key: K, value: V, output: &mut Self::Output, cs: &CsHP) -> bool { 71 | self.insert(key, value, output, cs) 72 | } 73 | #[inline(always)] 74 | fn remove(&self, key: &K, output: &mut Self::Output, cs: &CsHP) -> bool { 75 | self.remove(key, output, cs) 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use super::HashMap; 82 | use crate::ds_impl::circ_hp::concurrent_map; 83 | 84 | #[test] 85 | fn smoke_hashmap() { 86 | concurrent_map::tests::smoke::>(); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/ds_impl/circ_hp/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod bonsai_tree; 4 | pub mod double_link; 5 | pub mod list; 6 | pub mod michael_hash_map; 7 | pub mod natarajan_mittal_tree; 8 | pub mod skip_list; 9 | 10 | pub use self::concurrent_map::{ConcurrentMap, OutputHolder}; 11 | 12 | pub use self::bonsai_tree::BonsaiTreeMap; 13 | pub use self::double_link::DoubleLink; 14 | pub use self::list::{HHSList, HList, HMList}; 15 | pub use self::michael_hash_map::HashMap; 16 | pub use self::natarajan_mittal_tree::NMTreeMap; 17 | pub use self::skip_list::SkipList; 18 | -------------------------------------------------------------------------------- /src/ds_impl/ebr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_ebr::Guard; 2 | 3 | pub trait OutputHolder { 4 | fn output(&self) -> &V; 5 | } 6 | 7 | impl<'g, V> OutputHolder for &'g V { 8 | fn output(&self) -> &V { 9 | self 10 | } 11 | } 12 | 13 | impl OutputHolder for V { 14 | fn output(&self) -> &V { 15 | self 16 | } 17 | } 18 | 19 | pub trait ConcurrentMap { 20 | fn new() -> Self; 21 | fn get<'g>(&'g self, key: &'g K, guard: &'g Guard) -> Option>; 22 | fn insert(&self, key: K, value: V, guard: &Guard) -> bool; 23 | fn remove<'g>(&'g self, key: &'g K, guard: &'g Guard) -> Option>; 24 | } 25 | 26 | #[cfg(test)] 27 | pub mod tests { 28 | extern crate rand; 29 | use super::{ConcurrentMap, OutputHolder}; 30 | use crossbeam_ebr::pin; 31 | use crossbeam_utils::thread; 32 | use rand::prelude::*; 33 | use std::fmt::Debug; 34 | 35 | const THREADS: i32 = 30; 36 | const ELEMENTS_PER_THREADS: i32 = 1000; 37 | 38 | pub fn smoke(to_value: &F) 39 | where 40 | V: Eq + Debug, 41 | M: ConcurrentMap + Send + Sync, 42 | F: Sync + Fn(&i32) -> V, 43 | { 44 | let map = &M::new(); 45 | 46 | thread::scope(|s| { 47 | for t in 0..THREADS { 48 | s.spawn(move |_| { 49 | let mut rng = rand::thread_rng(); 50 | let mut keys: Vec = 51 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 52 | keys.shuffle(&mut rng); 53 | for i in keys { 54 | assert!(map.insert(i, to_value(&i), &pin())); 55 | } 56 | }); 57 | } 58 | }) 59 | .unwrap(); 60 | 61 | thread::scope(|s| { 62 | for t in 0..(THREADS / 2) { 63 | s.spawn(move |_| { 64 | let mut rng = rand::thread_rng(); 65 | let mut keys: Vec = 66 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 67 | keys.shuffle(&mut rng); 68 | for i in keys { 69 | assert_eq!(to_value(&i), *map.remove(&i, &pin()).unwrap().output()); 70 | } 71 | }); 72 | } 73 | }) 74 | .unwrap(); 75 | 76 | thread::scope(|s| { 77 | for t in (THREADS / 2)..THREADS { 78 | s.spawn(move |_| { 79 | let mut rng = rand::thread_rng(); 80 | let mut keys: Vec = 81 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 82 | keys.shuffle(&mut rng); 83 | for i in keys { 84 | assert_eq!(to_value(&i), *map.get(&i, &pin()).unwrap().output()); 85 | } 86 | }); 87 | } 88 | }) 89 | .unwrap(); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/ds_impl/ebr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::{ConcurrentMap, OutputHolder}; 2 | use crossbeam_ebr::Guard; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | use super::list::HHSList; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash + Default, 15 | V: Default, 16 | { 17 | pub fn with_capacity(n: usize) -> Self { 18 | let mut buckets = Vec::with_capacity(n); 19 | for _ in 0..n { 20 | buckets.push(HHSList::new()); 21 | } 22 | 23 | HashMap { buckets } 24 | } 25 | 26 | #[inline] 27 | pub fn get_bucket(&self, index: usize) -> &HHSList { 28 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 29 | } 30 | 31 | #[inline] 32 | fn hash(k: &K) -> usize { 33 | let mut s = DefaultHasher::new(); 34 | k.hash(&mut s); 35 | s.finish() as usize 36 | } 37 | 38 | pub fn get<'g>(&'g self, k: &'g K, guard: &'g Guard) -> Option + 'g> { 39 | let i = Self::hash(k); 40 | self.get_bucket(i).get(k, guard) 41 | } 42 | 43 | pub fn insert(&self, k: K, v: V, guard: &Guard) -> bool { 44 | let i = Self::hash(&k); 45 | self.get_bucket(i).insert(k, v, guard) 46 | } 47 | 48 | pub fn remove<'g>(&'g self, k: &'g K, guard: &'g Guard) -> Option + 'g> { 49 | let i = Self::hash(k); 50 | self.get_bucket(i).remove(k, guard) 51 | } 52 | } 53 | 54 | impl ConcurrentMap for HashMap 55 | where 56 | K: Ord + Hash + Default, 57 | V: Default, 58 | { 59 | fn new() -> Self { 60 | Self::with_capacity(30000) 61 | } 62 | 63 | #[inline(always)] 64 | fn get<'g>(&'g self, key: &'g K, guard: &'g Guard) -> Option> { 65 | self.get(key, guard) 66 | } 67 | #[inline(always)] 68 | fn insert(&self, key: K, value: V, guard: &Guard) -> bool { 69 | self.insert(key, value, guard) 70 | } 71 | #[inline(always)] 72 | fn remove<'g>(&'g self, key: &'g K, guard: &'g Guard) -> Option> { 73 | self.remove(key, guard) 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::HashMap; 80 | use crate::ds_impl::ebr::concurrent_map; 81 | 82 | #[test] 83 | fn smoke_hashmap() { 84 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/ds_impl/ebr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod bonsai_tree; 4 | pub mod double_link; 5 | pub mod elim_ab_tree; 6 | pub mod ellen_tree; 7 | pub mod list; 8 | pub mod michael_hash_map; 9 | pub mod natarajan_mittal_tree; 10 | pub mod skip_list; 11 | 12 | pub use self::concurrent_map::ConcurrentMap; 13 | 14 | pub use self::bonsai_tree::BonsaiTreeMap; 15 | pub use self::double_link::DoubleLink; 16 | pub use self::elim_ab_tree::ElimABTree; 17 | pub use self::ellen_tree::EFRBTree; 18 | pub use self::list::{HHSList, HList, HMList}; 19 | pub use self::michael_hash_map::HashMap; 20 | pub use self::natarajan_mittal_tree::NMTreeMap; 21 | pub use self::skip_list::SkipList; 22 | -------------------------------------------------------------------------------- /src/ds_impl/hp/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | pub trait OutputHolder { 2 | fn output(&self) -> &V; 3 | } 4 | 5 | impl<'g, V> OutputHolder for &'g V { 6 | fn output(&self) -> &V { 7 | self 8 | } 9 | } 10 | 11 | impl OutputHolder for V { 12 | fn output(&self) -> &V { 13 | self 14 | } 15 | } 16 | 17 | pub trait ConcurrentMap { 18 | type Handle<'domain>; 19 | 20 | fn new() -> Self; 21 | 22 | fn handle() -> Self::Handle<'static>; 23 | 24 | fn get<'hp>( 25 | &'hp self, 26 | handle: &'hp mut Self::Handle<'_>, 27 | key: &'hp K, 28 | ) -> Option>; 29 | 30 | fn insert(&self, handle: &mut Self::Handle<'_>, key: K, value: V) -> bool; 31 | 32 | fn remove<'hp>( 33 | &'hp self, 34 | handle: &'hp mut Self::Handle<'_>, 35 | key: &'hp K, 36 | ) -> Option>; 37 | } 38 | 39 | #[cfg(test)] 40 | pub mod tests { 41 | extern crate rand; 42 | use super::{ConcurrentMap, OutputHolder}; 43 | use crossbeam_utils::thread; 44 | use rand::prelude::*; 45 | use std::fmt::Debug; 46 | 47 | const THREADS: i32 = 30; 48 | const ELEMENTS_PER_THREADS: i32 = 1000; 49 | 50 | pub fn smoke(to_value: &F) 51 | where 52 | V: Eq + Debug, 53 | M: ConcurrentMap + Send + Sync, 54 | F: Sync + Fn(&i32) -> V, 55 | { 56 | let map = &M::new(); 57 | 58 | thread::scope(|s| { 59 | for t in 0..THREADS { 60 | s.spawn(move |_| { 61 | let mut handle = M::handle(); 62 | let mut rng = rand::thread_rng(); 63 | let mut keys: Vec = 64 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 65 | keys.shuffle(&mut rng); 66 | for i in keys { 67 | assert!(map.insert(&mut handle, i, to_value(&i))); 68 | } 69 | }); 70 | } 71 | }) 72 | .unwrap(); 73 | 74 | thread::scope(|s| { 75 | for t in 0..(THREADS / 2) { 76 | s.spawn(move |_| { 77 | let mut handle = M::handle(); 78 | let mut rng = rand::thread_rng(); 79 | let mut keys: Vec = 80 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 81 | keys.shuffle(&mut rng); 82 | for i in keys { 83 | assert_eq!(to_value(&i), *map.remove(&mut handle, &i).unwrap().output()); 84 | } 85 | }); 86 | } 87 | }) 88 | .unwrap(); 89 | 90 | thread::scope(|s| { 91 | for t in (THREADS / 2)..THREADS { 92 | s.spawn(move |_| { 93 | let mut handle = M::handle(); 94 | let mut rng = rand::thread_rng(); 95 | let mut keys: Vec = 96 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 97 | keys.shuffle(&mut rng); 98 | for i in keys { 99 | assert_eq!(to_value(&i), *map.get(&mut handle, &i).unwrap().output()); 100 | } 101 | }); 102 | } 103 | }) 104 | .unwrap(); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/ds_impl/hp/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::{ConcurrentMap, OutputHolder}; 2 | use std::collections::hash_map::DefaultHasher; 3 | use std::hash::{Hash, Hasher}; 4 | 5 | use super::list::HHSList; 6 | pub use super::list::{Cursor, Handle}; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash + 'static, 15 | { 16 | pub fn with_capacity(n: usize) -> Self { 17 | let mut buckets = Vec::with_capacity(n); 18 | for _ in 0..n { 19 | buckets.push(HHSList::new()); 20 | } 21 | 22 | HashMap { buckets } 23 | } 24 | 25 | #[inline] 26 | pub fn get_bucket(&self, index: usize) -> &HHSList { 27 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 28 | } 29 | 30 | #[inline] 31 | fn hash(k: &K) -> usize { 32 | let mut s = DefaultHasher::new(); 33 | k.hash(&mut s); 34 | s.finish() as usize 35 | } 36 | } 37 | 38 | impl ConcurrentMap for HashMap 39 | where 40 | K: Ord + Hash + Send + 'static, 41 | V: Send, 42 | { 43 | type Handle<'domain> = Handle<'domain>; 44 | 45 | fn new() -> Self { 46 | Self::with_capacity(30000) 47 | } 48 | 49 | fn handle() -> Self::Handle<'static> { 50 | Handle::default() 51 | } 52 | 53 | #[inline(always)] 54 | fn get<'hp>( 55 | &'hp self, 56 | handle: &'hp mut Self::Handle<'_>, 57 | key: &'hp K, 58 | ) -> Option> { 59 | let i = Self::hash(key); 60 | self.get_bucket(i).get(handle, key) 61 | } 62 | #[inline(always)] 63 | fn insert(&self, handle: &mut Self::Handle<'_>, key: K, value: V) -> bool { 64 | let i = Self::hash(&key); 65 | self.get_bucket(i).insert(handle, key, value) 66 | } 67 | #[inline(always)] 68 | fn remove<'hp>( 69 | &'hp self, 70 | handle: &'hp mut Self::Handle<'_>, 71 | key: &'hp K, 72 | ) -> Option> { 73 | let i = Self::hash(key); 74 | self.get_bucket(i).remove(handle, key) 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use super::HashMap; 81 | use crate::ds_impl::hp::concurrent_map; 82 | 83 | #[test] 84 | fn smoke_hashmap() { 85 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/ds_impl/hp/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | pub mod pointers; 3 | 4 | pub mod bonsai_tree; 5 | pub mod double_link; 6 | pub mod elim_ab_tree; 7 | pub mod ellen_tree; 8 | pub mod list; 9 | pub mod michael_hash_map; 10 | pub mod natarajan_mittal_tree; 11 | pub mod skip_list; 12 | 13 | pub use self::concurrent_map::ConcurrentMap; 14 | 15 | pub use self::bonsai_tree::BonsaiTreeMap; 16 | pub use self::double_link::DoubleLink; 17 | pub use self::elim_ab_tree::ElimABTree; 18 | pub use self::ellen_tree::EFRBTree; 19 | pub use self::list::{HHSList, HList, HMList}; 20 | pub use self::michael_hash_map::HashMap; 21 | pub use self::natarajan_mittal_tree::NMTreeMap; 22 | pub use self::skip_list::SkipList; 23 | -------------------------------------------------------------------------------- /src/ds_impl/hp_brcu/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use hp_brcu::Thread; 2 | 3 | pub trait OutputHolder { 4 | fn default(thread: &mut Thread) -> Self; 5 | fn output(&self) -> &V; 6 | } 7 | 8 | pub trait ConcurrentMap { 9 | type Output: OutputHolder; 10 | 11 | fn empty_output(thread: &mut Thread) -> Self::Output { 12 | >::default(thread) 13 | } 14 | 15 | fn new() -> Self; 16 | fn get(&self, key: &K, output: &mut Self::Output, thread: &mut Thread) -> bool; 17 | fn insert(&self, key: K, value: V, output: &mut Self::Output, thread: &mut Thread) -> bool; 18 | fn remove(&self, key: &K, output: &mut Self::Output, thread: &mut Thread) -> bool; 19 | } 20 | 21 | #[cfg(test)] 22 | pub mod tests { 23 | extern crate rand; 24 | use super::{ConcurrentMap, OutputHolder}; 25 | use crossbeam_utils::thread; 26 | use hp_brcu::THREAD; 27 | use rand::prelude::*; 28 | use std::fmt::Debug; 29 | 30 | const THREADS: i32 = 30; 31 | const ELEMENTS_PER_THREADS: i32 = 1000; 32 | 33 | pub fn smoke(to_value: &F) 34 | where 35 | V: Eq + Debug, 36 | M: ConcurrentMap + Send + Sync, 37 | F: Sync + Fn(&i32) -> V, 38 | { 39 | let map = &M::new(); 40 | 41 | thread::scope(|s| { 42 | for t in 0..THREADS { 43 | s.spawn(move |_| { 44 | THREAD.with(|thread| { 45 | let thread = &mut **thread.borrow_mut(); 46 | let output = &mut M::empty_output(thread); 47 | let mut rng = rand::thread_rng(); 48 | let mut keys: Vec = 49 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 50 | keys.shuffle(&mut rng); 51 | for i in keys { 52 | assert!(map.insert(i, to_value(&i), output, thread)); 53 | } 54 | }); 55 | }); 56 | } 57 | }) 58 | .unwrap(); 59 | 60 | thread::scope(|s| { 61 | for t in 0..(THREADS / 2) { 62 | s.spawn(move |_| { 63 | THREAD.with(|thread| { 64 | let thread = &mut **thread.borrow_mut(); 65 | let output = &mut M::empty_output(thread); 66 | let mut rng = rand::thread_rng(); 67 | let mut keys: Vec = 68 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 69 | keys.shuffle(&mut rng); 70 | for i in keys { 71 | assert!(map.remove(&i, output, thread)); 72 | } 73 | }); 74 | }); 75 | } 76 | }) 77 | .unwrap(); 78 | 79 | thread::scope(|s| { 80 | for t in (THREADS / 2)..THREADS { 81 | s.spawn(move |_| { 82 | THREAD.with(|thread| { 83 | let thread = &mut **thread.borrow_mut(); 84 | let output = &mut M::empty_output(thread); 85 | let mut rng = rand::thread_rng(); 86 | let mut keys: Vec = 87 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 88 | keys.shuffle(&mut rng); 89 | for i in keys { 90 | assert!(map.get(&i, output, thread)); 91 | assert_eq!(to_value(&i), *output.output()); 92 | } 93 | }); 94 | }); 95 | } 96 | }) 97 | .unwrap(); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/ds_impl/hp_brcu/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::ConcurrentMap; 2 | use super::list::Cursor; 3 | 4 | use std::collections::hash_map::DefaultHasher; 5 | use std::hash::{Hash, Hasher}; 6 | 7 | use super::list::HHSList; 8 | 9 | pub struct HashMap { 10 | buckets: Vec>, 11 | } 12 | 13 | impl HashMap 14 | where 15 | K: Ord + Default + Hash, 16 | V: Default, 17 | { 18 | pub fn with_capacity(n: usize) -> Self { 19 | let mut buckets = Vec::with_capacity(n); 20 | for _ in 0..n { 21 | buckets.push(HHSList::new()); 22 | } 23 | 24 | HashMap { buckets } 25 | } 26 | 27 | #[inline] 28 | pub fn get_bucket(&self, index: usize) -> &HHSList { 29 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 30 | } 31 | 32 | // TODO(@jeehoonkang): we're converting u64 to usize, which may lose information. 33 | #[inline] 34 | fn hash(k: &K) -> usize { 35 | let mut s = DefaultHasher::new(); 36 | k.hash(&mut s); 37 | s.finish() as usize 38 | } 39 | 40 | #[inline] 41 | pub fn get(&self, k: &K, cursor: &mut Cursor, thread: &mut hp_brcu::Thread) -> bool { 42 | let i = Self::hash(k); 43 | self.get_bucket(i).get(k, cursor, thread) 44 | } 45 | 46 | #[inline] 47 | pub fn insert( 48 | &self, 49 | k: K, 50 | v: V, 51 | cursor: &mut Cursor, 52 | thread: &mut hp_brcu::Thread, 53 | ) -> bool { 54 | let i = Self::hash(&k); 55 | self.get_bucket(i).insert(k, v, cursor, thread) 56 | } 57 | 58 | #[inline] 59 | pub fn remove(&self, k: &K, cursor: &mut Cursor, thread: &mut hp_brcu::Thread) -> bool { 60 | let i = Self::hash(&k); 61 | self.get_bucket(i).remove(k, cursor, thread) 62 | } 63 | } 64 | 65 | impl ConcurrentMap for HashMap 66 | where 67 | K: Ord + Default + Hash, 68 | V: Default, 69 | { 70 | type Output = Cursor; 71 | 72 | #[inline] 73 | fn new() -> Self { 74 | Self::with_capacity(30000) 75 | } 76 | 77 | #[inline(always)] 78 | fn get(&self, key: &K, cursor: &mut Cursor, thread: &mut hp_brcu::Thread) -> bool { 79 | self.get(key, cursor, thread) 80 | } 81 | #[inline(always)] 82 | fn insert( 83 | &self, 84 | key: K, 85 | value: V, 86 | cursor: &mut Cursor, 87 | thread: &mut hp_brcu::Thread, 88 | ) -> bool { 89 | self.insert(key, value, cursor, thread) 90 | } 91 | #[inline(always)] 92 | fn remove(&self, key: &K, cursor: &mut Cursor, thread: &mut hp_brcu::Thread) -> bool { 93 | self.remove(key, cursor, thread) 94 | } 95 | } 96 | 97 | #[cfg(test)] 98 | mod tests { 99 | use super::HashMap; 100 | use crate::ds_impl::hp_brcu::concurrent_map; 101 | 102 | #[test] 103 | fn smoke_hashmap() { 104 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/ds_impl/hp_brcu/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | mod bonsai_tree; 4 | mod elim_ab_tree; 5 | mod list; 6 | pub mod list_alter; 7 | mod michael_hash_map; 8 | mod natarajan_mittal_tree; 9 | mod skip_list; 10 | 11 | pub use self::concurrent_map::ConcurrentMap; 12 | pub use bonsai_tree::BonsaiTreeMap; 13 | pub use elim_ab_tree::ElimABTree; 14 | pub use list::{HHSList, HList, HMList}; 15 | pub use michael_hash_map::HashMap; 16 | pub use natarajan_mittal_tree::NMTreeMap; 17 | pub use skip_list::SkipList; 18 | -------------------------------------------------------------------------------- /src/ds_impl/hp_pp/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use crate::ds_impl::hp::concurrent_map::{ConcurrentMap, OutputHolder}; 2 | use std::collections::hash_map::DefaultHasher; 3 | use std::hash::{Hash, Hasher}; 4 | 5 | use super::list::HHSList; 6 | pub use super::list::{Cursor, Handle}; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash, 15 | { 16 | pub fn with_capacity(n: usize) -> Self { 17 | let mut buckets = Vec::with_capacity(n); 18 | for _ in 0..n { 19 | buckets.push(HHSList::new()); 20 | } 21 | 22 | HashMap { buckets } 23 | } 24 | 25 | #[inline] 26 | pub fn get_bucket(&self, index: usize) -> &HHSList { 27 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 28 | } 29 | 30 | // TODO(@jeehoonkang): we're converting u64 to usize, which may lose information. 31 | #[inline] 32 | fn hash(k: &K) -> usize { 33 | let mut s = DefaultHasher::new(); 34 | k.hash(&mut s); 35 | s.finish() as usize 36 | } 37 | } 38 | 39 | impl ConcurrentMap for HashMap 40 | where 41 | K: Ord + Hash + Send, 42 | V: Send, 43 | { 44 | type Handle<'domain> = Handle<'domain>; 45 | 46 | fn new() -> Self { 47 | Self::with_capacity(30000) 48 | } 49 | 50 | fn handle() -> Self::Handle<'static> { 51 | Handle::default() 52 | } 53 | 54 | #[inline(always)] 55 | fn get<'hp>( 56 | &'hp self, 57 | handle: &'hp mut Self::Handle<'_>, 58 | key: &'hp K, 59 | ) -> Option> { 60 | let i = Self::hash(key); 61 | self.get_bucket(i).get(handle, key) 62 | } 63 | #[inline(always)] 64 | fn insert(&self, handle: &mut Self::Handle<'_>, key: K, value: V) -> bool { 65 | let i = Self::hash(&key); 66 | self.get_bucket(i).insert(handle, key, value) 67 | } 68 | #[inline(always)] 69 | fn remove<'hp>( 70 | &'hp self, 71 | handle: &'hp mut Self::Handle<'_>, 72 | key: &'hp K, 73 | ) -> Option> { 74 | let i = Self::hash(key); 75 | self.get_bucket(i).remove(handle, key) 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use super::HashMap; 82 | use crate::ds_impl::hp::concurrent_map; 83 | 84 | #[test] 85 | fn smoke_hashmap() { 86 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/ds_impl/hp_pp/mod.rs: -------------------------------------------------------------------------------- 1 | // NOTE: hp_pp can use hp concurrent_map interface 2 | 3 | pub mod bonsai_tree; 4 | pub mod ellen_tree; 5 | pub mod list; 6 | pub mod michael_hash_map; 7 | pub mod natarajan_mittal_tree; 8 | pub mod skip_list; 9 | 10 | pub use self::bonsai_tree::BonsaiTreeMap; 11 | pub use self::ellen_tree::EFRBTree; 12 | pub use self::list::{HHSList, HList, HMList}; 13 | pub use self::michael_hash_map::HashMap; 14 | pub use self::natarajan_mittal_tree::NMTreeMap; 15 | pub use self::skip_list::SkipList; 16 | -------------------------------------------------------------------------------- /src/ds_impl/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cdrc; 2 | pub mod circ_ebr; 3 | pub mod circ_hp; 4 | pub mod ebr; 5 | pub mod hp; 6 | pub mod hp_brcu; 7 | pub mod hp_pp; 8 | pub mod nbr; 9 | pub mod nr; 10 | pub mod pebr; 11 | pub mod vbr; 12 | -------------------------------------------------------------------------------- /src/ds_impl/nbr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use nbr::Guard; 2 | 3 | pub trait ConcurrentMap { 4 | type Handle; 5 | 6 | fn new() -> Self; 7 | fn handle(guard: &mut Guard) -> Self::Handle; 8 | fn get<'g>(&'g self, key: &'g K, handle: &mut Self::Handle, guard: &'g Guard) -> Option<&'g V>; 9 | fn insert(&self, key: K, value: V, handle: &mut Self::Handle, guard: &Guard) -> bool; 10 | fn remove<'g>( 11 | &'g self, 12 | key: &'g K, 13 | handle: &mut Self::Handle, 14 | guard: &'g Guard, 15 | ) -> Option<&'g V>; 16 | } 17 | 18 | #[cfg(test)] 19 | pub mod tests { 20 | extern crate rand; 21 | use super::ConcurrentMap; 22 | use crossbeam_utils::thread; 23 | use nbr::Collector; 24 | use rand::prelude::*; 25 | use std::sync::Arc; 26 | 27 | const THREADS: i32 = 30; 28 | const ELEMENTS_PER_THREADS: i32 = 1000; 29 | 30 | /// `max_hazptr_per_thread` depends on the data structure. 31 | pub fn smoke + Send + Sync>() { 32 | let map = &M::new(); 33 | let collector = Arc::new(Collector::new(THREADS as usize, 256, 32, 16)); 34 | 35 | thread::scope(|s| { 36 | for t in 0..THREADS { 37 | let collector = Arc::clone(&collector); 38 | s.spawn(move |_| { 39 | let mut guard = collector.register(); 40 | let mut handle = M::handle(&mut guard); 41 | let mut rng = rand::thread_rng(); 42 | let mut keys: Vec = 43 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 44 | keys.shuffle(&mut rng); 45 | for i in keys { 46 | assert!(map.insert(i, i.to_string(), &mut handle, &guard)); 47 | } 48 | }); 49 | } 50 | }) 51 | .unwrap(); 52 | 53 | let mut collector = Arc::try_unwrap(collector).unwrap_or_else(|_| panic!()); 54 | collector.reset_registrations(); 55 | let collector = Arc::new(collector); 56 | 57 | thread::scope(|s| { 58 | for t in 0..THREADS { 59 | let collector = Arc::clone(&collector); 60 | s.spawn(move |_| { 61 | let mut guard = collector.register(); 62 | let mut handle = M::handle(&mut guard); 63 | let mut rng = rand::thread_rng(); 64 | let mut keys: Vec = 65 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 66 | keys.shuffle(&mut rng); 67 | if t < THREADS / 2 { 68 | for i in keys { 69 | assert_eq!( 70 | i.to_string(), 71 | *map.remove(&i, &mut handle, &guard).unwrap() 72 | ); 73 | } 74 | } else { 75 | for i in keys { 76 | assert_eq!(i.to_string(), *map.get(&i, &mut handle, &guard).unwrap()); 77 | } 78 | } 79 | }); 80 | } 81 | }) 82 | .unwrap(); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/ds_impl/nbr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::ConcurrentMap; 2 | use nbr::Guard; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | use super::list::{HHSList, Handle}; 7 | 8 | pub struct HashMap { 9 | buckets: Vec>, 10 | } 11 | 12 | impl HashMap 13 | where 14 | K: Ord + Hash, 15 | { 16 | pub fn with_capacity(n: usize) -> Self { 17 | let mut buckets = Vec::with_capacity(n); 18 | for _ in 0..n { 19 | buckets.push(HHSList::new()); 20 | } 21 | 22 | HashMap { buckets } 23 | } 24 | 25 | #[inline] 26 | pub fn get_bucket(&self, index: usize) -> &HHSList { 27 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 28 | } 29 | 30 | // TODO(@jeehoonkang): we're converting u64 to usize, which may lose information. 31 | #[inline] 32 | fn hash(k: &K) -> usize { 33 | let mut s = DefaultHasher::new(); 34 | k.hash(&mut s); 35 | s.finish() as usize 36 | } 37 | 38 | #[inline] 39 | pub fn get<'g>(&'g self, k: &'g K, handle: &mut Handle, guard: &'g Guard) -> Option<&'g V> { 40 | let i = Self::hash(k); 41 | self.get_bucket(i).get(k, handle, guard) 42 | } 43 | 44 | #[inline] 45 | pub fn insert(&self, k: K, v: V, handle: &mut Handle, guard: &Guard) -> bool { 46 | let i = Self::hash(&k); 47 | self.get_bucket(i).insert(k, v, handle, guard) 48 | } 49 | 50 | #[inline] 51 | pub fn remove<'g>(&'g self, k: &'g K, handle: &mut Handle, guard: &'g Guard) -> Option<&'g V> { 52 | let i = Self::hash(&k); 53 | self.get_bucket(i).remove(k, handle, guard) 54 | } 55 | } 56 | 57 | impl ConcurrentMap for HashMap 58 | where 59 | K: Ord + Hash, 60 | { 61 | type Handle = Handle; 62 | 63 | fn handle(guard: &mut Guard) -> Self::Handle { 64 | Self::Handle { 65 | prev: guard.acquire_shield().unwrap(), 66 | curr: guard.acquire_shield().unwrap(), 67 | } 68 | } 69 | 70 | fn new() -> Self { 71 | Self::with_capacity(30000) 72 | } 73 | 74 | #[inline(always)] 75 | fn get<'g>(&'g self, key: &'g K, handle: &mut Handle, guard: &'g Guard) -> Option<&'g V> { 76 | self.get(key, handle, guard) 77 | } 78 | #[inline(always)] 79 | fn insert(&self, key: K, value: V, handle: &mut Handle, guard: &Guard) -> bool { 80 | self.insert(key, value, handle, guard) 81 | } 82 | #[inline(always)] 83 | fn remove<'g>(&'g self, key: &'g K, handle: &mut Handle, guard: &'g Guard) -> Option<&'g V> { 84 | self.remove(key, handle, guard) 85 | } 86 | } 87 | 88 | #[cfg(test)] 89 | mod tests { 90 | use super::HashMap; 91 | use crate::ds_impl::nbr::concurrent_map; 92 | 93 | #[test] 94 | fn smoke_hashmap() { 95 | concurrent_map::tests::smoke::>(); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/ds_impl/nbr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod list; 4 | pub mod michael_hash_map; 5 | pub mod natarajan_mittal_tree; 6 | 7 | pub use self::concurrent_map::ConcurrentMap; 8 | 9 | pub use self::list::HHSList; 10 | pub use self::list::HList; 11 | pub use self::list::HMList; 12 | pub use self::michael_hash_map::HashMap; 13 | pub use self::natarajan_mittal_tree::NMTreeMap; 14 | -------------------------------------------------------------------------------- /src/ds_impl/nr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | pub trait OutputHolder { 2 | fn output(&self) -> &V; 3 | } 4 | 5 | impl<'g, V> OutputHolder for &'g V { 6 | fn output(&self) -> &V { 7 | self 8 | } 9 | } 10 | 11 | impl OutputHolder for V { 12 | fn output(&self) -> &V { 13 | self 14 | } 15 | } 16 | 17 | pub trait ConcurrentMap { 18 | fn new() -> Self; 19 | fn get(&self, key: &K) -> Option>; 20 | fn insert(&self, key: K, value: V) -> bool; 21 | fn remove(&self, key: &K) -> Option>; 22 | } 23 | 24 | #[cfg(test)] 25 | pub mod tests { 26 | extern crate rand; 27 | use super::{ConcurrentMap, OutputHolder}; 28 | use crossbeam_utils::thread; 29 | use rand::prelude::*; 30 | use std::fmt::Debug; 31 | 32 | const THREADS: i32 = 30; 33 | const ELEMENTS_PER_THREADS: i32 = 1000; 34 | 35 | pub fn smoke(to_value: &F) 36 | where 37 | V: Eq + Debug, 38 | M: ConcurrentMap + Send + Sync, 39 | F: Sync + Fn(&i32) -> V, 40 | { 41 | let map = &M::new(); 42 | 43 | thread::scope(|s| { 44 | for t in 0..THREADS { 45 | s.spawn(move |_| { 46 | let mut rng = rand::thread_rng(); 47 | let mut keys: Vec = 48 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 49 | keys.shuffle(&mut rng); 50 | for i in keys { 51 | assert!(map.insert(i, to_value(&i))); 52 | } 53 | }); 54 | } 55 | }) 56 | .unwrap(); 57 | 58 | thread::scope(|s| { 59 | for t in 0..(THREADS / 2) { 60 | s.spawn(move |_| { 61 | let mut rng = rand::thread_rng(); 62 | let mut keys: Vec = 63 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 64 | keys.shuffle(&mut rng); 65 | for i in keys { 66 | assert_eq!(to_value(&i), *map.remove(&i).unwrap().output()); 67 | } 68 | }); 69 | } 70 | }) 71 | .unwrap(); 72 | 73 | thread::scope(|s| { 74 | for t in (THREADS / 2)..THREADS { 75 | s.spawn(move |_| { 76 | let mut rng = rand::thread_rng(); 77 | let mut keys: Vec = 78 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 79 | keys.shuffle(&mut rng); 80 | for i in keys { 81 | assert_eq!(to_value(&i), *map.get(&i).unwrap().output()); 82 | } 83 | }); 84 | } 85 | }) 86 | .unwrap(); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/ds_impl/nr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::{ConcurrentMap, OutputHolder}; 2 | use std::collections::hash_map::DefaultHasher; 3 | use std::hash::{Hash, Hasher}; 4 | 5 | use super::list::HHSList; 6 | 7 | pub struct HashMap { 8 | buckets: Vec>, 9 | } 10 | 11 | impl HashMap 12 | where 13 | K: Ord + Hash + 'static, 14 | V: 'static, 15 | { 16 | pub fn with_capacity(n: usize) -> Self { 17 | let mut buckets = Vec::with_capacity(n); 18 | for _ in 0..n { 19 | buckets.push(HHSList::new()); 20 | } 21 | 22 | HashMap { buckets } 23 | } 24 | 25 | #[inline] 26 | pub fn get_bucket(&self, index: usize) -> &HHSList { 27 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 28 | } 29 | 30 | #[inline] 31 | fn hash(k: &K) -> usize { 32 | let mut s = DefaultHasher::new(); 33 | k.hash(&mut s); 34 | s.finish() as usize 35 | } 36 | } 37 | 38 | impl ConcurrentMap for HashMap 39 | where 40 | K: Ord + Hash + 'static, 41 | V: 'static, 42 | { 43 | fn new() -> Self { 44 | Self::with_capacity(30000) 45 | } 46 | 47 | #[inline(always)] 48 | fn get(&self, key: &K) -> Option> { 49 | let i = Self::hash(key); 50 | self.get_bucket(i).get(key) 51 | } 52 | #[inline(always)] 53 | fn insert(&self, key: K, value: V) -> bool { 54 | let i = Self::hash(&key); 55 | self.get_bucket(i).insert(key, value) 56 | } 57 | #[inline(always)] 58 | fn remove(&self, key: &K) -> Option> { 59 | let i = Self::hash(key); 60 | self.get_bucket(i).remove(key) 61 | } 62 | } 63 | 64 | #[cfg(test)] 65 | mod tests { 66 | use super::HashMap; 67 | use crate::ds_impl::nr::concurrent_map; 68 | 69 | #[test] 70 | fn smoke_hashmap() { 71 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/ds_impl/nr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | pub mod pointers; 3 | 4 | pub mod bonsai_tree; 5 | pub mod double_link; 6 | pub mod elim_ab_tree; 7 | pub mod ellen_tree; 8 | pub mod list; 9 | pub mod michael_hash_map; 10 | pub mod natarajan_mittal_tree; 11 | pub mod skip_list; 12 | 13 | pub use self::concurrent_map::ConcurrentMap; 14 | 15 | pub use self::bonsai_tree::BonsaiTreeMap; 16 | pub use self::double_link::DoubleLink; 17 | pub use self::elim_ab_tree::ElimABTree; 18 | pub use self::ellen_tree::EFRBTree; 19 | pub use self::list::{HHSList, HList, HMList}; 20 | pub use self::michael_hash_map::HashMap; 21 | pub use self::natarajan_mittal_tree::NMTreeMap; 22 | pub use self::skip_list::SkipList; 23 | -------------------------------------------------------------------------------- /src/ds_impl/pebr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_pebr::Guard; 2 | 3 | pub trait OutputHolder { 4 | fn output(&self) -> &V; 5 | } 6 | 7 | impl<'g, V> OutputHolder for &'g V { 8 | fn output(&self) -> &V { 9 | self 10 | } 11 | } 12 | 13 | impl OutputHolder for V { 14 | fn output(&self) -> &V { 15 | self 16 | } 17 | } 18 | 19 | pub trait ConcurrentMap { 20 | type Handle; 21 | 22 | fn new() -> Self; 23 | fn handle<'g>(guard: &'g Guard) -> Self::Handle; 24 | fn clear(handle: &mut Self::Handle); 25 | 26 | fn get<'g>( 27 | &'g self, 28 | handle: &'g mut Self::Handle, 29 | key: &'g K, 30 | guard: &'g mut Guard, 31 | ) -> Option>; 32 | fn insert(&self, handle: &mut Self::Handle, key: K, value: V, guard: &mut Guard) -> bool; 33 | fn remove( 34 | &self, 35 | handle: &mut Self::Handle, 36 | key: &K, 37 | guard: &mut Guard, 38 | ) -> Option>; 39 | } 40 | 41 | #[cfg(test)] 42 | pub mod tests { 43 | extern crate rand; 44 | use super::{ConcurrentMap, OutputHolder}; 45 | use crossbeam_pebr::pin; 46 | use crossbeam_utils::thread; 47 | use rand::prelude::*; 48 | use std::fmt::Debug; 49 | 50 | const THREADS: i32 = 30; 51 | const ELEMENTS_PER_THREADS: i32 = 1000; 52 | 53 | pub fn smoke(to_value: &F) 54 | where 55 | V: Eq + Debug, 56 | M: ConcurrentMap + Send + Sync, 57 | F: Sync + Fn(&i32) -> V, 58 | { 59 | let map = &M::new(); 60 | 61 | thread::scope(|s| { 62 | for t in 0..THREADS { 63 | s.spawn(move |_| { 64 | let mut handle = M::handle(&pin()); 65 | let mut rng = rand::thread_rng(); 66 | let mut keys: Vec = 67 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 68 | keys.shuffle(&mut rng); 69 | for i in keys { 70 | assert!(map.insert(&mut handle, i, to_value(&i), &mut pin())); 71 | } 72 | }); 73 | } 74 | }) 75 | .unwrap(); 76 | 77 | thread::scope(|s| { 78 | for t in 0..(THREADS / 2) { 79 | s.spawn(move |_| { 80 | let mut handle = M::handle(&pin()); 81 | let mut rng = rand::thread_rng(); 82 | let mut keys: Vec = 83 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 84 | keys.shuffle(&mut rng); 85 | for i in keys { 86 | assert_eq!( 87 | to_value(&i), 88 | *map.remove(&mut handle, &i, &mut pin()).unwrap().output() 89 | ); 90 | } 91 | }); 92 | } 93 | }) 94 | .unwrap(); 95 | 96 | thread::scope(|s| { 97 | for t in (THREADS / 2)..THREADS { 98 | s.spawn(move |_| { 99 | let mut handle = M::handle(&pin()); 100 | let mut rng = rand::thread_rng(); 101 | let mut keys: Vec = 102 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 103 | keys.shuffle(&mut rng); 104 | for i in keys { 105 | assert_eq!( 106 | to_value(&i), 107 | *map.get(&mut handle, &i, &mut pin()).unwrap().output() 108 | ); 109 | } 110 | }); 111 | } 112 | }) 113 | .unwrap(); 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/ds_impl/pebr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use super::concurrent_map::{ConcurrentMap, OutputHolder}; 2 | use crossbeam_pebr::Guard; 3 | use std::collections::hash_map::DefaultHasher; 4 | use std::hash::{Hash, Hasher}; 5 | 6 | pub use super::list::Cursor; 7 | use super::list::HHSList; 8 | 9 | pub struct HashMap { 10 | buckets: Vec>, 11 | } 12 | 13 | impl HashMap 14 | where 15 | K: Ord + Hash + Clone, 16 | { 17 | pub fn with_capacity(n: usize) -> Self { 18 | let mut buckets = Vec::with_capacity(n); 19 | for _ in 0..n { 20 | buckets.push(HHSList::new()); 21 | } 22 | 23 | HashMap { buckets } 24 | } 25 | 26 | #[inline] 27 | pub fn get_bucket(&self, index: usize) -> &HHSList { 28 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 29 | } 30 | 31 | // TODO(@jeehoonkang): we're converting u64 to usize, which may lose information. 32 | #[inline] 33 | fn hash(k: &K) -> usize { 34 | let mut s = DefaultHasher::new(); 35 | k.hash(&mut s); 36 | s.finish() as usize 37 | } 38 | } 39 | 40 | impl ConcurrentMap for HashMap 41 | where 42 | K: Ord + Hash + Clone, 43 | { 44 | type Handle = Cursor; 45 | 46 | fn new() -> Self { 47 | Self::with_capacity(30000) 48 | } 49 | 50 | fn handle(guard: &Guard) -> Self::Handle { 51 | Cursor::new(guard) 52 | } 53 | 54 | fn clear(handle: &mut Self::Handle) { 55 | handle.release(); 56 | } 57 | 58 | #[inline(always)] 59 | fn get<'g>( 60 | &'g self, 61 | handle: &'g mut Self::Handle, 62 | key: &'g K, 63 | guard: &'g mut Guard, 64 | ) -> Option> { 65 | let i = Self::hash(key); 66 | self.get_bucket(i).get(handle, key, guard) 67 | } 68 | #[inline(always)] 69 | fn insert(&self, handle: &mut Self::Handle, key: K, value: V, guard: &mut Guard) -> bool { 70 | let i = Self::hash(&key); 71 | self.get_bucket(i).insert(handle, key, value, guard) 72 | } 73 | #[inline(always)] 74 | fn remove( 75 | &self, 76 | handle: &mut Self::Handle, 77 | key: &K, 78 | guard: &mut Guard, 79 | ) -> Option> { 80 | let i = Self::hash(&key); 81 | self.get_bucket(i).remove(handle, key, guard) 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | mod tests { 87 | use super::HashMap; 88 | use crate::ds_impl::pebr::concurrent_map; 89 | 90 | #[test] 91 | fn smoke_hashmap() { 92 | concurrent_map::tests::smoke::<_, HashMap, _>(&i32::to_string); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/ds_impl/pebr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod shield_pool; 2 | 3 | pub mod concurrent_map; 4 | 5 | pub mod bonsai_tree; 6 | pub mod elim_ab_tree; 7 | pub mod ellen_tree; 8 | pub mod list; 9 | pub mod michael_hash_map; 10 | pub mod natarajan_mittal_tree; 11 | pub mod skip_list; 12 | 13 | pub use self::concurrent_map::ConcurrentMap; 14 | 15 | pub use self::bonsai_tree::BonsaiTreeMap; 16 | pub use self::elim_ab_tree::ElimABTree; 17 | pub use self::ellen_tree::EFRBTree; 18 | pub use self::list::{HHSList, HList, HMList}; 19 | pub use self::michael_hash_map::HashMap; 20 | pub use self::natarajan_mittal_tree::NMTreeMap; 21 | pub use self::skip_list::SkipList; 22 | -------------------------------------------------------------------------------- /src/ds_impl/pebr/shield_pool.rs: -------------------------------------------------------------------------------- 1 | use core::ops::Deref; 2 | use crossbeam_pebr::{Guard, Shared, Shield, ShieldError}; 3 | 4 | /// Thread-local pool of shields 5 | #[derive(Debug)] 6 | pub struct ShieldPool { 7 | shields: Vec<*mut Shield>, 8 | /// Indices of available shields in `shields`. 9 | available: Vec, 10 | } 11 | 12 | impl ShieldPool { 13 | pub fn new() -> ShieldPool { 14 | ShieldPool { 15 | shields: Vec::new(), 16 | available: Vec::new(), 17 | } 18 | } 19 | 20 | pub fn defend<'g>( 21 | &mut self, 22 | ptr: Shared<'g, T>, 23 | guard: &Guard, 24 | ) -> Result, ShieldError> { 25 | if let Some(index) = self.available.pop() { 26 | let shield_ref = unsafe { &mut **self.shields.get_unchecked(index) }; 27 | shield_ref.defend(ptr, guard)?; 28 | return Ok(ShieldHandle { pool: self, index }); 29 | } 30 | let new_shield = Box::into_raw(Box::new(Shield::new(ptr, guard)?)); 31 | let index = self.shields.len(); 32 | self.shields.push(new_shield); 33 | Ok(ShieldHandle { pool: self, index }) 34 | } 35 | } 36 | 37 | impl Drop for ShieldPool { 38 | fn drop(&mut self) { 39 | for s in self.shields.drain(..) { 40 | unsafe { drop(Box::from_raw(s)) } 41 | } 42 | } 43 | } 44 | 45 | #[derive(Debug)] 46 | pub struct ShieldHandle { 47 | /// The shield pool this handle belongs to. 48 | pool: *mut ShieldPool, 49 | /// The index of the underlying shield. 50 | index: usize, 51 | } 52 | 53 | impl Drop for ShieldHandle { 54 | fn drop(&mut self) { 55 | let pool = unsafe { &mut *self.pool }; 56 | // release only 57 | unsafe { (**pool.shields.get_unchecked(self.index)).release() }; 58 | pool.available.push(self.index); 59 | } 60 | } 61 | 62 | impl Deref for ShieldHandle { 63 | type Target = Shield; 64 | fn deref(&self) -> &Self::Target { 65 | unsafe { &(**(*self.pool).shields.get_unchecked(self.index)) } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/ds_impl/vbr/concurrent_map.rs: -------------------------------------------------------------------------------- 1 | pub trait ConcurrentMap { 2 | type Global: Sync; 3 | type Local; 4 | 5 | fn global(key_range_hint: usize) -> Self::Global; 6 | fn local(global: &Self::Global) -> Self::Local; 7 | fn new(local: &Self::Local) -> Self; 8 | fn get(&self, key: &K, local: &Self::Local) -> Option; 9 | fn insert(&self, key: K, value: V, local: &Self::Local) -> bool; 10 | fn remove(&self, key: &K, local: &Self::Local) -> Option; 11 | } 12 | 13 | #[cfg(test)] 14 | pub mod tests { 15 | extern crate rand; 16 | use super::ConcurrentMap; 17 | use crossbeam_utils::thread; 18 | use rand::prelude::*; 19 | 20 | const THREADS: i32 = 30; 21 | const ELEMENTS_PER_THREADS: i32 = 1000; 22 | 23 | pub fn smoke + Send + Sync>() { 24 | let global = &M::global((THREADS * ELEMENTS_PER_THREADS) as _); 25 | let local = &M::local(global); 26 | let map = &M::new(local); 27 | 28 | thread::scope(|s| { 29 | for t in 0..THREADS { 30 | s.spawn(move |_| { 31 | let local = &M::local(global); 32 | let mut rng = rand::thread_rng(); 33 | let mut keys: Vec = 34 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 35 | keys.shuffle(&mut rng); 36 | for i in keys { 37 | assert!(map.insert(i, i, local)); 38 | } 39 | }); 40 | } 41 | }) 42 | .unwrap(); 43 | 44 | thread::scope(|s| { 45 | for t in 0..(THREADS / 2) { 46 | s.spawn(move |_| { 47 | let local = &M::local(global); 48 | let mut rng = rand::thread_rng(); 49 | let mut keys: Vec = 50 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 51 | keys.shuffle(&mut rng); 52 | for i in keys { 53 | assert_eq!(i, map.remove(&i, local).unwrap()); 54 | } 55 | }); 56 | } 57 | }) 58 | .unwrap(); 59 | 60 | thread::scope(|s| { 61 | for t in (THREADS / 2)..THREADS { 62 | s.spawn(move |_| { 63 | let local = &M::local(global); 64 | let mut rng = rand::thread_rng(); 65 | let mut keys: Vec = 66 | (0..ELEMENTS_PER_THREADS).map(|k| k * THREADS + t).collect(); 67 | keys.shuffle(&mut rng); 68 | for i in keys { 69 | assert_eq!(i, map.get(&i, local).unwrap()); 70 | } 71 | }); 72 | } 73 | }) 74 | .unwrap(); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/ds_impl/vbr/michael_hash_map.rs: -------------------------------------------------------------------------------- 1 | use vbr::{Global, Local}; 2 | 3 | use super::concurrent_map::ConcurrentMap; 4 | use std::collections::hash_map::DefaultHasher; 5 | use std::hash::{Hash, Hasher}; 6 | 7 | use super::list::{HHSList, Node}; 8 | 9 | pub struct HashMap 10 | where 11 | K: 'static + Ord + Hash + Copy + Default, 12 | V: 'static + Copy + Default, 13 | { 14 | buckets: Vec>, 15 | } 16 | 17 | impl HashMap 18 | where 19 | K: 'static + Ord + Hash + Copy + Default, 20 | V: 'static + Copy + Default, 21 | { 22 | pub fn with_capacity(n: usize, local: &Local>) -> Self { 23 | let mut buckets = Vec::with_capacity(n); 24 | for _ in 0..n { 25 | buckets.push(HHSList::new(local)); 26 | } 27 | 28 | HashMap { buckets } 29 | } 30 | 31 | #[inline] 32 | pub fn get_bucket(&self, index: usize) -> &HHSList { 33 | unsafe { self.buckets.get_unchecked(index % self.buckets.len()) } 34 | } 35 | 36 | // TODO(@jeehoonkang): we're converting u64 to usize, which may lose information. 37 | #[inline] 38 | fn hash(k: &K) -> usize { 39 | let mut s = DefaultHasher::new(); 40 | k.hash(&mut s); 41 | s.finish() as usize 42 | } 43 | 44 | pub fn get<'g>(&'g self, k: &'g K, local: &Local>) -> Option { 45 | let i = Self::hash(k); 46 | self.get_bucket(i).get(k, local) 47 | } 48 | 49 | pub fn insert(&self, k: K, v: V, local: &Local>) -> bool { 50 | let i = Self::hash(&k); 51 | self.get_bucket(i).insert(k, v, local) 52 | } 53 | 54 | pub fn remove<'g>(&'g self, k: &'g K, local: &Local>) -> Option { 55 | let i = Self::hash(&k); 56 | self.get_bucket(i).remove(k, local) 57 | } 58 | } 59 | 60 | impl ConcurrentMap for HashMap 61 | where 62 | K: 'static + Ord + Hash + Copy + Default, 63 | V: 'static + Copy + Default, 64 | { 65 | type Global = Global>; 66 | 67 | type Local = Local>; 68 | 69 | fn global(key_range_hint: usize) -> Self::Global { 70 | Global::new(key_range_hint) 71 | } 72 | 73 | fn local(global: &Self::Global) -> Self::Local { 74 | Local::new(global) 75 | } 76 | 77 | fn new(local: &Self::Local) -> Self { 78 | Self::with_capacity(30000, local) 79 | } 80 | 81 | fn get(&self, key: &K, local: &Self::Local) -> Option { 82 | self.get(key, local) 83 | } 84 | 85 | fn insert(&self, key: K, value: V, local: &Self::Local) -> bool { 86 | self.insert(key, value, local) 87 | } 88 | 89 | fn remove(&self, key: &K, local: &Self::Local) -> Option { 90 | self.remove(key, local) 91 | } 92 | } 93 | 94 | #[cfg(test)] 95 | mod tests { 96 | use super::HashMap; 97 | use crate::ds_impl::vbr::concurrent_map; 98 | 99 | #[test] 100 | fn smoke_hashmap() { 101 | concurrent_map::tests::smoke::>(); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/ds_impl/vbr/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod concurrent_map; 2 | 3 | pub mod elim_ab_tree; 4 | pub mod list; 5 | pub mod michael_hash_map; 6 | pub mod natarajan_mittal_tree; 7 | pub mod skip_list; 8 | 9 | pub use self::concurrent_map::ConcurrentMap; 10 | 11 | pub use elim_ab_tree::ElimABTree; 12 | pub use list::{HHSList, HList, HMList}; 13 | pub use michael_hash_map::HashMap; 14 | pub use natarajan_mittal_tree::NMTreeMap; 15 | pub use skip_list::SkipList; 16 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![feature(strict_provenance_atomic_ptr, strict_provenance)] 3 | #![feature(cfg_sanitize)] 4 | 5 | #[macro_use] 6 | extern crate cfg_if; 7 | 8 | cfg_if! { 9 | if #[cfg(all(not(feature = "sanitize"), target_os = "linux"))] { 10 | extern crate tikv_jemallocator; 11 | #[global_allocator] 12 | static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; 13 | 14 | extern crate tikv_jemalloc_ctl; 15 | pub struct MemSampler { 16 | epoch_mib: tikv_jemalloc_ctl::epoch_mib, 17 | allocated_mib: tikv_jemalloc_ctl::stats::allocated_mib, 18 | } 19 | 20 | impl Default for MemSampler { 21 | fn default() -> Self { 22 | Self::new() 23 | } 24 | } 25 | 26 | impl MemSampler { 27 | pub fn new() -> Self { 28 | MemSampler { 29 | epoch_mib: tikv_jemalloc_ctl::epoch::mib().unwrap(), 30 | allocated_mib: tikv_jemalloc_ctl::stats::allocated::mib().unwrap(), 31 | } 32 | } 33 | pub fn sample(&self) -> usize { 34 | self.epoch_mib.advance().unwrap(); 35 | self.allocated_mib.read().unwrap() 36 | } 37 | } 38 | } else { 39 | pub struct MemSampler {} 40 | 41 | impl Default for MemSampler { 42 | fn default() -> Self { 43 | Self::new() 44 | } 45 | } 46 | 47 | impl MemSampler { 48 | pub fn new() -> Self { 49 | println!("NOTE: Memory usage benchmark is supported only for linux."); 50 | MemSampler {} 51 | } 52 | pub fn sample(&self) -> usize { 53 | 0 54 | } 55 | } 56 | } 57 | } 58 | 59 | extern crate crossbeam_ebr; 60 | extern crate crossbeam_utils; 61 | #[macro_use] 62 | extern crate bitflags; 63 | extern crate clap; 64 | extern crate typenum; 65 | 66 | #[macro_use] 67 | mod utils; 68 | pub mod config; 69 | pub mod ds_impl; 70 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | /// Ok or executing the given expression. 3 | macro_rules! ok_or { 4 | ($e:expr, $err:expr) => {{ 5 | match $e { 6 | Ok(r) => r, 7 | Err(_) => $err, 8 | } 9 | }}; 10 | } 11 | 12 | #[macro_export] 13 | /// Some or executing the given expression. 14 | macro_rules! some_or { 15 | ($e:expr, $err:expr) => {{ 16 | match $e { 17 | Some(r) => r, 18 | None => $err, 19 | } 20 | }}; 21 | } 22 | -------------------------------------------------------------------------------- /test-scripts/sanitize-circ.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUST_BACKTRACE=1 RUSTFLAGS='-Z sanitizer=address' 4 | 5 | circ_ebr="cargo run --bin circ-ebr --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 6 | circ_hp="cargo run --bin circ-hp --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 7 | cdrc_ebr="cargo run --bin cdrc-ebr --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 8 | cdrc_hp="cargo run --bin cdrc-hp --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 9 | double_link="cargo run --bin double-link --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 10 | 11 | set -e 12 | for i in {1..200}; do 13 | $cdrc_hp -dh-list -i3 -t128 -r10 -g1 14 | $cdrc_hp -dhm-list -i3 -t128 -r10 -g1 15 | $cdrc_hp -dhhs-list -i3 -t128 -r10 -g1 16 | $cdrc_hp -dhash-map -i3 -t128 -r10 -g1 17 | $cdrc_hp -dnm-tree -i3 -t128 -r10 -g1 18 | $cdrc_hp -dskip-list -i3 -t128 -r10 -g1 19 | $cdrc_hp -defrb-tree -i3 -t128 -r10 -g1 20 | 21 | $cdrc_ebr -dh-list -i3 -t128 -r10 -g1 22 | $cdrc_ebr -dhm-list -i3 -t128 -r10 -g1 23 | $cdrc_ebr -dhhs-list -i3 -t128 -r10 -g1 24 | $cdrc_ebr -dhash-map -i3 -t128 -r10 -g1 25 | $cdrc_ebr -dnm-tree -i3 -t128 -r10 -g1 26 | $cdrc_ebr -dskip-list -i3 -t128 -r10 -g1 27 | $cdrc_ebr -defrb-tree -i3 -t128 -r10 -g1 28 | 29 | $circ_hp -dh-list -i3 -t128 -r10 -g1 30 | $circ_hp -dhm-list -i3 -t128 -r10 -g1 31 | $circ_hp -dhhs-list -i3 -t128 -r10 -g1 32 | $circ_hp -dhash-map -i3 -t128 -r10 -g1 33 | $circ_hp -dnm-tree -i3 -t128 -r10 -g1 34 | $circ_hp -dskip-list -i3 -t128 -r10 -g1 35 | $circ_hp -defrb-tree -i3 -t128 -r10 -g1 36 | 37 | export ASAN_OPTIONS=detect_leaks=0 38 | $circ_ebr -dh-list -i3 -t128 -r10 -g1 39 | $circ_ebr -dhm-list -i3 -t128 -r10 -g1 40 | $circ_ebr -dhhs-list -i3 -t128 -r10 -g1 41 | $circ_ebr -dhash-map -i3 -t128 -r10 -g1 42 | $circ_ebr -dnm-tree -i3 -t128 -r10 -g1 43 | $circ_ebr -dskip-list -i3 -t128 -r10 -g1 44 | $circ_ebr -defrb-tree -i3 -t128 -r10 -g1 45 | 46 | $double_link -t128 -mcirc-ebr 47 | export ASAN_OPTIONS=detect_leaks=1 48 | $double_link -t128 -mcirc-hp 49 | $double_link -t128 -mcdrc-ebr 50 | $double_link -t128 -mcdrc-hp 51 | done 52 | -------------------------------------------------------------------------------- /test-scripts/sanitize-elim.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUST_BACKTRACE=1 RUSTFLAGS='-Z sanitizer=address' 4 | 5 | run="cargo run --bin hp --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 6 | 7 | set -e 8 | for i in {1..5000}; do 9 | $run -delim-ab-tree -i3 -t256 -r100000 -g1 10 | done 11 | -------------------------------------------------------------------------------- /test-scripts/sanitize-hp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUST_BACKTRACE=1 RUSTFLAGS='-Z sanitizer=address' 4 | 5 | hps="cargo run --bin hp --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 6 | 7 | set -e 8 | for i in {1..5000}; do 9 | $hps -dh-list -i3 -t256 -r10 -g1 10 | $hps -dnm-tree -i3 -t256 -r10 -g1 11 | done 12 | -------------------------------------------------------------------------------- /test-scripts/sanitize-hppp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUST_BACKTRACE=1 RUSTFLAGS='-Z sanitizer=address' 4 | 5 | hps="cargo run --bin hp-pp --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 6 | 7 | set -e 8 | for i in {1..5000}; do 9 | $hps -defrb-tree -i3 -t256 -r10 -g1 10 | done 11 | -------------------------------------------------------------------------------- /test-scripts/sanitize-hpsh.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RUST_BACKTRACE=1 RUSTFLAGS='-Z sanitizer=address' 4 | 5 | hps="cargo run --bin hp-brcu --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 6 | hps0="cargo run --bin hp-rcu --profile=release-simple --target x86_64-unknown-linux-gnu --features sanitize -- " 7 | 8 | set -e 9 | for i in {1..5000}; do 10 | $hps -dh-list -i3 -t128 -r10 -g1 11 | $hps -dhm-list -i3 -t128 -r10 -g1 12 | $hps -dhhs-list -i3 -t128 -r10 -g1 13 | $hps -dhash-map -i3 -t256 -r10 -g1 14 | $hps -dnm-tree -i3 -t256 -r10 -g1 15 | $hps -dskip-list -i3 -t256 -r10 -g1 16 | 17 | $hps0 -dh-list -i3 -t128 -r10 -g1 18 | $hps0 -dhm-list -i3 -t128 -r10 -g1 19 | $hps0 -dhhs-list -i3 -t128 -r10 -g1 20 | $hps0 -dhash-map -i3 -t256 -r10 -g1 21 | $hps0 -dnm-tree -i3 -t256 -r10 -g1 22 | $hps0 -dskip-list -i3 -t256 -r10 -g1 23 | done 24 | -------------------------------------------------------------------------------- /test-scripts/stress-vbr.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | vbr="cargo run --bin vbr --release -- " 4 | 5 | set -e 6 | for i in {1..5000}; do 7 | $vbr -dskip-list -i3 -t256 -r100000 -g0 8 | done 9 | -------------------------------------------------------------------------------- /test-scripts/test-hpsh.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | for i in {1..300}; do 6 | cargo test --release -- list_alter 7 | done 8 | 9 | for i in {1..2000}; do 10 | RUSTFLAGS="-Z sanitizer=address" cargo test --profile=release-simple --target x86_64-unknown-linux-gnu -- list_alter 11 | done 12 | -------------------------------------------------------------------------------- /test-scripts/test-skiplist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | for i in {1..500}; do 6 | echo $i 7 | cargo test --release -- skip_list 8 | done 9 | -------------------------------------------------------------------------------- /test-scripts/test-vbr.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | for i in {1..3000}; do 6 | cargo test --release -- vbr 7 | done 8 | --------------------------------------------------------------------------------