├── .github ├── protected-workflows.yml ├── scripts │ ├── ci-build.sh │ ├── ci-common.sh │ ├── ci-doc.sh │ ├── ci-setup-i686-unknown-linux-gnu.sh │ ├── ci-setup-x86_64-apple-darwin.sh │ ├── ci-setup-x86_64-unknown-linux-gnu.sh │ ├── ci-style.sh │ └── ci-test.sh └── workflows │ ├── api-check.yml │ ├── cargo-msrv.yml │ ├── cargo-publish.yml │ ├── micro-bm.yml │ ├── mmtk-dev-env.yml │ ├── perf-compare-ci.yml │ ├── perf-jikesrvm-baseline.yml │ ├── perf-openjdk-baseline.yml │ ├── perf-regression-ci.yml │ ├── post-review-ci.yml │ ├── pr-binding-refs.yml │ ├── pre-review-ci.yml │ ├── rustdoc.yml │ └── stress-ci.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── COPYRIGHT ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build.rs ├── docs ├── header │ └── mmtk.h ├── portingguide │ ├── .gitignore │ ├── book.toml │ └── src │ │ ├── SUMMARY.md │ │ ├── before_start.md │ │ ├── bindings.png │ │ ├── howto │ │ ├── next_steps.md │ │ ├── nogc.md │ │ └── prefix.md │ │ ├── portability.md │ │ └── prefix.md └── tutorial │ ├── .gitignore │ ├── book.toml │ ├── code │ └── mygc_semispace │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ └── src │ ├── SUMMARY.md │ ├── further_reading.md │ ├── intro │ ├── glossary.md │ ├── what_is_mmtk.md │ └── what_will_this_tutorial_cover.md │ ├── mygc │ ├── create.md │ ├── gencopy.md │ └── ss │ │ ├── alloc.md │ │ ├── collection.md │ │ ├── exercise.md │ │ ├── exercise_solution.md │ │ └── prefix.md │ ├── prefix.md │ └── preliminaries │ ├── set_up.md │ └── test.md ├── examples ├── allocation_benchmark.c ├── bench.sh ├── build.py ├── main.c └── reference_bump_allocator.c ├── macros ├── Cargo.toml └── src │ ├── lib.rs │ ├── plan_trace_object_impl.rs │ └── util.rs ├── rust-toolchain ├── src ├── build_info.rs ├── lib.rs ├── memory_manager.rs ├── mmtk.rs ├── plan │ ├── barriers.rs │ ├── gc_requester.rs │ ├── generational │ │ ├── barrier.rs │ │ ├── copying │ │ │ ├── gc_work.rs │ │ │ ├── global.rs │ │ │ ├── mod.rs │ │ │ └── mutator.rs │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── immix │ │ │ ├── gc_work.rs │ │ │ ├── global.rs │ │ │ ├── mod.rs │ │ │ └── mutator.rs │ │ └── mod.rs │ ├── global.rs │ ├── immix │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── markcompact │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── marksweep │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── mod.rs │ ├── mutator_context.rs │ ├── nogc │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── pageprotect │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── plan_constraints.rs │ ├── semispace │ │ ├── gc_work.rs │ │ ├── global.rs │ │ ├── mod.rs │ │ └── mutator.rs │ ├── sticky │ │ ├── immix │ │ │ ├── gc_work.rs │ │ │ ├── global.rs │ │ │ ├── mod.rs │ │ │ └── mutator.rs │ │ └── mod.rs │ └── tracing.rs ├── policy │ ├── copy_context.rs │ ├── copyspace.rs │ ├── gc_work.rs │ ├── immix │ │ ├── block.rs │ │ ├── defrag.rs │ │ ├── immixspace.rs │ │ ├── line.rs │ │ └── mod.rs │ ├── immortalspace.rs │ ├── largeobjectspace.rs │ ├── lockfreeimmortalspace.rs │ ├── markcompactspace.rs │ ├── marksweepspace │ │ ├── malloc_ms │ │ │ ├── global.rs │ │ │ ├── metadata.rs │ │ │ └── mod.rs │ │ ├── mod.rs │ │ └── native_ms │ │ │ ├── block.rs │ │ │ ├── block_list.rs │ │ │ ├── global.rs │ │ │ └── mod.rs │ ├── mod.rs │ ├── sft.rs │ ├── sft_map.rs │ ├── space.rs │ └── vmspace.rs ├── scheduler │ ├── affinity.rs │ ├── controller.rs │ ├── gc_work.rs │ ├── mod.rs │ ├── scheduler.rs │ ├── stat.rs │ ├── work.rs │ ├── work_bucket.rs │ ├── work_counter.rs │ └── worker.rs ├── util │ ├── address.rs │ ├── alloc │ │ ├── allocator.rs │ │ ├── allocators.rs │ │ ├── bumpallocator.rs │ │ ├── embedded_meta_data.rs │ │ ├── free_list_allocator.rs │ │ ├── immix_allocator.rs │ │ ├── large_object_allocator.rs │ │ ├── malloc_allocator.rs │ │ ├── markcompact_allocator.rs │ │ └── mod.rs │ ├── analysis │ │ ├── gc_count.rs │ │ ├── mod.rs │ │ ├── obj_num.rs │ │ └── obj_size.rs │ ├── constants.rs │ ├── conversions.rs │ ├── copy │ │ └── mod.rs │ ├── edge_logger.rs │ ├── erase_vm.rs │ ├── finalizable_processor.rs │ ├── freelist.rs │ ├── heap │ │ ├── accounting.rs │ │ ├── blockpageresource.rs │ │ ├── chunk_map.rs │ │ ├── freelistpageresource.rs │ │ ├── gc_trigger.rs │ │ ├── heap_meta.rs │ │ ├── layout │ │ │ ├── byte_map_mmapper.rs │ │ │ ├── fragmented_mapper.rs │ │ │ ├── heap_parameters.rs │ │ │ ├── map.rs │ │ │ ├── map32.rs │ │ │ ├── map64.rs │ │ │ ├── mmapper.rs │ │ │ ├── mod.rs │ │ │ └── vm_layout_constants.rs │ │ ├── mod.rs │ │ ├── monotonepageresource.rs │ │ ├── pageresource.rs │ │ ├── space_descriptor.rs │ │ └── vmrequest.rs │ ├── int_array_freelist.rs │ ├── is_mmtk_object.rs │ ├── linear_scan.rs │ ├── logger.rs │ ├── malloc │ │ ├── library.rs │ │ ├── malloc_ms_util.rs │ │ └── mod.rs │ ├── memory.rs │ ├── metadata │ │ ├── global.rs │ │ ├── header_metadata.rs │ │ ├── log_bit.rs │ │ ├── mark_bit.rs │ │ ├── metadata_val_traits.rs │ │ ├── mod.rs │ │ ├── pin_bit.rs │ │ ├── side_metadata │ │ │ ├── constants.rs │ │ │ ├── global.rs │ │ │ ├── helpers.rs │ │ │ ├── helpers_32.rs │ │ │ ├── mod.rs │ │ │ ├── sanity.rs │ │ │ ├── side_metadata_tests.rs │ │ │ └── spec_defs.rs │ │ └── vo_bit │ │ │ ├── helper.rs │ │ │ └── mod.rs │ ├── mod.rs │ ├── object_forwarding.rs │ ├── opaque_pointer.rs │ ├── options.rs │ ├── raw_memory_freelist.rs │ ├── reference_processor.rs │ ├── rust_util │ │ ├── mod.rs │ │ ├── rev_group.rs │ │ └── zeroed_alloc.rs │ ├── sanity │ │ ├── mod.rs │ │ └── sanity_checker.rs │ ├── statistics │ │ ├── counter │ │ │ ├── event_counter.rs │ │ │ ├── long_counter.rs │ │ │ ├── mod.rs │ │ │ ├── perf_event.rs │ │ │ └── size_counter.rs │ │ ├── mod.rs │ │ └── stats.rs │ ├── test_util.rs │ └── treadmill.rs └── vm │ ├── active_plan.rs │ ├── collection.rs │ ├── edge_shape.rs │ ├── mod.rs │ ├── object_model.rs │ ├── reference_glue.rs │ └── scanning.rs ├── tests ├── test_address.rs └── test_roots_work_factory.rs ├── tools └── tracing │ ├── README.md │ ├── alloc_slow.bt │ ├── epilogue.bt.fragment │ ├── gc_stages.bt │ ├── lock_contended.bt │ ├── packet_size.bt │ ├── prologue_with_harness.bt.fragment │ ├── prologue_without_harness.bt.fragment │ └── run.py └── vmbindings └── dummyvm ├── Cargo.toml ├── api └── mmtk.h └── src ├── active_plan.rs ├── api.rs ├── collection.rs ├── edges.rs ├── lib.rs ├── object_model.rs ├── reference_glue.rs ├── scanning.rs └── tests ├── allocate_align_offset.rs ├── allocate_with_disable_collection.rs ├── allocate_with_initialize_collection.rs ├── allocate_with_re_enable_collection.rs ├── allocate_without_initialize_collection.rs ├── barrier_slow_path_assertion.rs ├── conservatism.rs ├── edges_test.rs ├── fixtures └── mod.rs ├── handle_mmap_conflict.rs ├── handle_mmap_oom.rs ├── is_in_mmtk_spaces.rs ├── issue139.rs ├── malloc_api.rs ├── malloc_counted.rs ├── malloc_ms.rs └── mod.rs /.github/protected-workflows.yml: -------------------------------------------------------------------------------- 1 | events: 2 | pull_request: &config 3 | - trustAnyone: true 4 | paths: 5 | disallowed: 6 | - ".github/**" 7 | - ".github/protected-workflows.yml" 8 | - trustOrgMembers: true 9 | 10 | pull_request_target: *config 11 | push: *config 12 | 13 | # 'anyEvent' value is a rule, and will be used when an event specific configuration is not set. 14 | # It is automatically added in case it was not explictly set and it does not supports the 'paths' property. 15 | anyEvent: 16 | trustAnyone: false 17 | trustCollaborators: false 18 | trustedUserNames: [] -------------------------------------------------------------------------------- /.github/scripts/ci-build.sh: -------------------------------------------------------------------------------- 1 | . $(dirname "$0")/ci-common.sh 2 | 3 | # Execute this script under the root folder of this repo. Otherwise it will fail. 4 | 5 | # Build basic 6 | cargo build 7 | 8 | # Build features 9 | for_all_features "cargo build" 10 | 11 | # Build release 12 | for_all_features "cargo build --release" 13 | 14 | # target-specific features 15 | if [[ $arch == "x86_64" && $os == "linux" ]]; then 16 | cargo build --features perf_counter 17 | fi 18 | -------------------------------------------------------------------------------- /.github/scripts/ci-doc.sh: -------------------------------------------------------------------------------- 1 | . $(dirname "$0")/ci-common.sh 2 | 3 | # rustdoc.yml will copy the docs from respective directories to a directory for publishing. 4 | # If the output path is changed in this script, we need to update rustdoc.yml as well. 5 | 6 | # deny warnings for rustdoc 7 | export RUSTFLAGS="-D warnings" 8 | 9 | # Check cargo doc 10 | # We generate two versions of docs: one with only public items for binding developers for our API, and 11 | # the other with both public and private items for MMTk developers (GC implementers). 12 | cargo doc --features $non_exclusive_features --no-deps --target-dir target/mmtk-public 13 | cargo doc --features $non_exclusive_features --no-deps --document-private-items --target-dir target/mmtk-full 14 | 15 | # Check tutorial code 16 | tutorial_code_dir=$project_root/docs/tutorial/code/mygc_semispace 17 | # Clear the dir and copy again 18 | rm -rf $project_root/src/plan/mygc 19 | cp -r $tutorial_code_dir $project_root/src/plan/mygc 20 | # If we havent appended the mod line, append it 21 | if ! cat $project_root/src/plan/mod.rs | grep -q "pub mod mygc;"; then 22 | echo "pub mod mygc;" >> $project_root/src/plan/mod.rs 23 | fi 24 | cargo build 25 | 26 | # Install mdbook using the stable toolchain (mdbook uses scoped-tls which requires rust 1.59.0) 27 | cargo +stable install mdbook 28 | mdbook build $project_root/docs/portingguide 29 | mdbook build $project_root/docs/tutorial -------------------------------------------------------------------------------- /.github/scripts/ci-setup-i686-unknown-linux-gnu.sh: -------------------------------------------------------------------------------- 1 | set -xe 2 | 3 | sudo apt-get update 4 | sudo apt-get install build-essential gcc-multilib -y 5 | 6 | # Necessary libraries for 32bit mmtk build 7 | sudo dpkg --add-architecture i386 8 | sudo apt-get update 9 | sudo apt-get install libgcc-s1:i386 10 | sudo apt-get install libc6-dev-i386 11 | sudo apt-get install zlib1g-dev:i386 12 | -------------------------------------------------------------------------------- /.github/scripts/ci-setup-x86_64-apple-darwin.sh: -------------------------------------------------------------------------------- 1 | set -xe 2 | -------------------------------------------------------------------------------- /.github/scripts/ci-setup-x86_64-unknown-linux-gnu.sh: -------------------------------------------------------------------------------- 1 | set -xe 2 | 3 | sudo apt-get update 4 | sudo apt-get install build-essential gcc-multilib -y 5 | -------------------------------------------------------------------------------- /.github/scripts/ci-style.sh: -------------------------------------------------------------------------------- 1 | . $(dirname "$0")/ci-common.sh 2 | 3 | export RUSTFLAGS="-D warnings" 4 | 5 | # check base 6 | cargo clippy 7 | # check all features 8 | for_all_features "cargo clippy" 9 | # check release 10 | for_all_features "cargo clippy --release" 11 | # check for tests 12 | for_all_features "cargo clippy --tests" 13 | # check for dummyvm 14 | cargo clippy --manifest-path=vmbindings/dummyvm/Cargo.toml 15 | 16 | # target-specific features 17 | if [[ $arch == "x86_64" && $os == "linux" ]]; then 18 | cargo clippy --features perf_counter 19 | cargo clippy --release --features perf_counter 20 | cargo clippy --tests --features perf_counter 21 | fi 22 | 23 | # check format 24 | cargo fmt -- --check 25 | -------------------------------------------------------------------------------- /.github/scripts/ci-test.sh: -------------------------------------------------------------------------------- 1 | . $(dirname "$0")/ci-common.sh 2 | 3 | export RUST_BACKTRACE=1 4 | # Run all tests with 1G heap 5 | export MMTK_GC_TRIGGER=FixedHeapSize:1000000000 6 | 7 | for_all_features "cargo test" 8 | 9 | # target-specific features 10 | if [[ $arch == "x86_64" && $os == "linux" ]]; then 11 | cargo test --features perf_counter 12 | fi 13 | 14 | ./examples/build.py 15 | 16 | ALL_PLANS=$(sed -n '/enum PlanSelector/,/}/p' src/util/options.rs | xargs | grep -o '{.*}' | grep -o '\w\+') 17 | 18 | # Test with DummyVM (each test in a separate run) 19 | cd vmbindings/dummyvm 20 | for fn in $(ls src/tests/*.rs); do 21 | t=$(basename -s .rs $fn) 22 | 23 | if [[ $t == "mod" ]]; then 24 | continue 25 | fi 26 | 27 | # Get the required plans. 28 | # Some tests need to be run with multiple plans because 29 | # some bugs can only be reproduced in some plans but not others. 30 | PLANS=$(sed -n 's/^\/\/ *GITHUB-CI: *MMTK_PLAN=//p' $fn) 31 | if [[ $PLANS == 'all' ]]; then 32 | PLANS=$ALL_PLANS 33 | elif [[ -z $PLANS ]]; then 34 | PLANS=NoGC 35 | fi 36 | 37 | # Some tests need some features enabled. 38 | FEATURES=$(sed -n 's/^\/\/ *GITHUB-CI: *FEATURES=//p' $fn) 39 | 40 | # Run the test with each plan it needs. 41 | for MMTK_PLAN in $PLANS; do 42 | env MMTK_PLAN=$MMTK_PLAN cargo test --features "$FEATURES" -- $t; 43 | done 44 | done 45 | 46 | -------------------------------------------------------------------------------- /.github/workflows/api-check.yml: -------------------------------------------------------------------------------- 1 | name: Public API Check 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | concurrency: 9 | # Cancels pending runs when a PR gets updated. 10 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 11 | cancel-in-progress: true 12 | 13 | # The workflow may fail if we change the public API in a pull request. 14 | # We allow fail on this action. But we should manually check if the changes are reasonable when we see a failed action. 15 | # It would be good if the workflow returns a neutral status when we find API changes. But it is currently not 16 | # possible with Github actions. 17 | jobs: 18 | check-public-api-changes: 19 | runs-on: ubuntu-latest 20 | steps: 21 | # Full git history needed 22 | - uses: actions/checkout@v2 23 | with: 24 | fetch-depth: 0 25 | 26 | # Install nightly 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | # We need nightly for cargo-public-api to get the API output. 30 | toolchain: nightly-2023-01-04 31 | profile: minimal 32 | # It is not necessary to use nightly as default (which is used to install cargo-public-api and compile our code). 33 | # However, our current toolchain is 1.59.0, and cargo-public-api requires 1.60 at least. To make it simple, 34 | # we just use the latest nightly toolchain. 35 | override: true 36 | - run: cargo --version 37 | 38 | # Install cargo-public-api 39 | - name: Install cargo-public-api 40 | run: cargo install cargo-public-api --version 0.26.0 41 | - name: API Diff 42 | run: cargo public-api diff origin/${GITHUB_BASE_REF}..${{ github.event.pull_request.head.sha }} --deny=all 43 | -------------------------------------------------------------------------------- /.github/workflows/cargo-msrv.yml: -------------------------------------------------------------------------------- 1 | name: Minimal Supported Rust Version 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | concurrency: 9 | # Cancels pending runs when a PR gets updated. 10 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | msrv: 15 | runs-on: ubuntu-22.04 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Install Rust toolchain 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | toolchain: stable 22 | override: true 23 | - name: Install cargo-msrv 24 | run: cargo install cargo-msrv 25 | # Verify the MSRV defined in Cargo.toml 26 | - name: Verify MSRV 27 | run: cargo msrv verify 28 | # If the previous step fails, find MSRV 29 | - name: Find MSRV 30 | if: failure() 31 | run: cargo msrv 32 | -------------------------------------------------------------------------------- /.github/workflows/cargo-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to crates.io 2 | 3 | on: 4 | # Triggered when we tag a release (including a prerelease) 5 | release: 6 | types: [published] 7 | 8 | concurrency: 9 | # Cancels pending runs when a PR gets updated. 10 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | cargo-publish: 15 | runs-on: ubuntu-22.04 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Install latest nightly 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | components: rustfmt, clippy 22 | target: i686-unknown-linux-gnu 23 | # This overwrites the default toolchain with the toolchain specified above. 24 | override: true 25 | - name: Cargo login 26 | run: cargo login ${{ secrets.CI_CARGO_LOGIN }} 27 | - name: Publish sub crates 28 | run: | 29 | cargo publish --manifest-path=macros/Cargo.toml 30 | # Publish MMTk core. 31 | # As mmtk-core depends on the crate we just publish above, in practice there could be 32 | # a delay before we can find the exact version for the dependent crate on crates.io. 33 | # The script will retry publish for 5 times with 60 seconds between the retries. 34 | - name: Public mmtk-core 35 | run: | 36 | for n in {1..5}; do 37 | echo "Attempt #"$n 38 | cargo publish && break 39 | echo "Wait for Retry #"$n 40 | sleep 60 41 | done 42 | -------------------------------------------------------------------------------- /.github/workflows/mmtk-dev-env.yml: -------------------------------------------------------------------------------- 1 | name: Check mmtk-dev-env 2 | 3 | # Triggerred when a new commit is pushed to master 4 | on: 5 | push: 6 | branches: 7 | - master 8 | 9 | concurrency: 10 | # Cancels pending runs when a PR gets updated. 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | # Trigger ci.yml from mmtk/mmtk-dev-env to make sure mmtk-core can build with mmtk-dev-env. 16 | check-mmtk-dev-env: 17 | runs-on: ubuntu-22.04 18 | steps: 19 | - uses: convictional/trigger-workflow-and-wait@v1.3.0 20 | with: 21 | owner: mmtk 22 | repo: mmtk-dev-env 23 | github_token: ${{ secrets.CI_ACCESS_TOKEN }} 24 | workflow_file_name: ci.yml 25 | ref: main 26 | wait_interval: 30 27 | inputs: '{}' 28 | propagate_failure: true 29 | trigger_workflow: true 30 | wait_workflow: true 31 | -------------------------------------------------------------------------------- /.github/workflows/perf-jikesrvm-baseline.yml: -------------------------------------------------------------------------------- 1 | name: JikesRVM Performance Baseline 2 | 3 | on: 4 | # Manual dispatch 5 | workflow_dispatch: 6 | # Or on every sunday 1200am UTC 7 | schedule: 8 | - cron: '0 0 * * SUN' 9 | 10 | jobs: 11 | jikesrvm-baseline: 12 | runs-on: [self-hosted, Linux, freq-scaling-off] 13 | # Allow 1 day to run 14 | timeout-minutes: 1440 15 | steps: 16 | - name: Checkout JikesRVM Binding 17 | uses: actions/checkout@v2 18 | with: 19 | repository: mmtk/mmtk-jikesrvm 20 | path: mmtk-jikesrvm 21 | submodules: true 22 | # checkout perf-kit 23 | - name: Checkout Perf Kit 24 | uses: actions/checkout@v2 25 | with: 26 | token: ${{ secrets.CI_ACCESS_TOKEN }} 27 | repository: mmtk/ci-perf-kit 28 | ref: "0.6.8" 29 | path: ci-perf-kit 30 | submodules: true 31 | # setup 32 | - name: Setup 33 | run: | 34 | ./ci-perf-kit/scripts/history-run-setup.sh 35 | sed -i 's/^mmtk[[:space:]]=/#ci:mmtk=/g' mmtk-jikesrvm/mmtk/Cargo.toml 36 | sed -i 's/^#[[:space:]]mmtk/mmtk/g' mmtk-jikesrvm/mmtk/Cargo.toml 37 | # run 38 | - name: Performance Run 39 | timeout-minutes: 1440 40 | run: | 41 | export RESULT_REPO=mmtk/ci-perf-result 42 | export RESULT_REPO_BRANCH=self-hosted 43 | export RESULT_REPO_ACCESS_TOKEN=${{ secrets.CI_ACCESS_TOKEN }} 44 | export FROM_DATE=2020-07-10 45 | JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 ./ci-perf-kit/scripts/jikesrvm-stock.sh ./mmtk-jikesrvm/repos/jikesrvm 46 | -------------------------------------------------------------------------------- /.github/workflows/perf-openjdk-baseline.yml: -------------------------------------------------------------------------------- 1 | name: OpenJDK Performance Baseline 2 | 3 | on: 4 | # Manual dispatch 5 | workflow_dispatch: 6 | # Or on every sunday 1200am UTC 7 | schedule: 8 | - cron: '0 0 * * SUN' 9 | 10 | jobs: 11 | openjdk-baseline: 12 | runs-on: [self-hosted, Linux, freq-scaling-off] 13 | # Allow 2 days to run (it currently takes slightly more than 1 day to finish) 14 | timeout-minutes: 2880 15 | steps: 16 | - name: Checkout OpenJDK Binding 17 | uses: actions/checkout@v2 18 | with: 19 | repository: mmtk/mmtk-openjdk 20 | path: mmtk-openjdk 21 | submodules: true 22 | # checkout perf-kit 23 | - name: Checkout Perf Kit 24 | uses: actions/checkout@v2 25 | with: 26 | token: ${{ secrets.CI_ACCESS_TOKEN }} 27 | repository: mmtk/ci-perf-kit 28 | ref: "0.6.8" 29 | path: ci-perf-kit 30 | submodules: true 31 | # setup 32 | - name: Setup 33 | run: | 34 | ./ci-perf-kit/scripts/history-run-setup.sh 35 | sed -i 's/^mmtk[[:space:]]=/#ci:mmtk=/g' mmtk-openjdk/mmtk/Cargo.toml 36 | sed -i 's/^#[[:space:]]mmtk/mmtk/g' mmtk-openjdk/mmtk/Cargo.toml 37 | # run 38 | - name: Performance Run 39 | timeout-minutes: 2880 40 | run: | 41 | export RESULT_REPO=mmtk/ci-perf-result 42 | export RESULT_REPO_BRANCH=self-hosted 43 | export RESULT_REPO_ACCESS_TOKEN=${{ secrets.CI_ACCESS_TOKEN }} 44 | export FROM_DATE=2020-07-10 45 | ./ci-perf-kit/scripts/openjdk-stock.sh ./mmtk-openjdk/repos/openjdk 46 | -------------------------------------------------------------------------------- /.github/workflows/pre-review-ci.yml: -------------------------------------------------------------------------------- 1 | name: Pre Code Review Checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | concurrency: 9 | # Cancels pending runs when a PR gets updated. 10 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | # Setup dynamic test matrix 15 | setup-test-matrix: 16 | runs-on: ubuntu-latest 17 | outputs: 18 | rust: ${{ steps.rust.outputs.array }} 19 | steps: 20 | - uses: actions/checkout@v2 21 | # Get rust version 22 | - id: rust 23 | run: | 24 | export MSRV=`cargo read-manifest | python -c 'import json,sys; print(json.load(sys.stdin)["rust_version"])'` 25 | export TEST=`cat rust-toolchain` 26 | echo "::set-output name=array::[\"$MSRV\", \"$TEST\", \"stable\"]" 27 | 28 | pre-code-review-checks: 29 | needs: setup-test-matrix 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | target: 34 | - { os: ubuntu-22.04, triple: x86_64-unknown-linux-gnu } 35 | - { os: ubuntu-22.04, triple: i686-unknown-linux-gnu } 36 | - { os: macos-12, triple: x86_64-apple-darwin } 37 | rust: ${{ fromJson(needs.setup-test-matrix.outputs.rust )}} 38 | 39 | name: ${{ matrix.target.triple }} / ${{ matrix.rust }} 40 | runs-on: ${{ matrix.target.os }} 41 | 42 | steps: 43 | - uses: actions/checkout@v2 44 | - name: Install Rust 45 | uses: actions-rs/toolchain@v1 46 | with: 47 | toolchain: ${{ matrix.rust }}-${{ matrix.target.triple }} 48 | components: rustfmt, clippy 49 | # This overwrites the default toolchain with the toolchain specified above. 50 | override: true 51 | 52 | # Setup Environments 53 | - name: Setup Environments 54 | run: ./.github/scripts/ci-setup-${{ matrix.target.triple }}.sh 55 | 56 | # Build 57 | - name: Build 58 | run: ./.github/scripts/ci-build.sh 59 | 60 | # Test 61 | - name: Test 62 | run: ./.github/scripts/ci-test.sh 63 | 64 | # Style checks 65 | - name: Style checks 66 | run: ./.github/scripts/ci-style.sh 67 | 68 | # Document check 69 | - name: Rustdoc 70 | run: ./.github/scripts/ci-doc.sh 71 | -------------------------------------------------------------------------------- /.github/workflows/rustdoc.yml: -------------------------------------------------------------------------------- 1 | name: Generate doc 2 | 3 | # Triggerred when a new commit is pushed to master 4 | on: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | publish-rustdoc-as-ghpages: 11 | runs-on: ubuntu-22.04 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Install nightly 15 | uses: actions-rs/toolchain@v1 16 | with: 17 | components: rustfmt, clippy 18 | target: i686-unknown-linux-gnu 19 | # This overwrites the default toolchain with the toolchain specified above. 20 | override: true 21 | - name: Append sha to crate version 22 | run: | 23 | sed -i 's/^version = "[0-9]\+.[0-9]\+.[0-9]\+/&-'${GITHUB_SHA}'/' Cargo.toml 24 | - name: Generate rustdoc 25 | run: ./.github/scripts/ci-doc.sh 26 | - name: Copy docs 27 | # docs/ is the root for github pages. 28 | # The generated docs are put to docs/ (i.e. root for github pages) 29 | # mmtk public doc: docs/public-doc 30 | # mmtk full doc: docs/full-doc 31 | # porting guide: docs/portingguide 32 | # tutorial: docs/tutorial 33 | run: | 34 | mkdir -p to_publish/docs 35 | cp -r target/mmtk-full/doc/* to_publish/docs/ 36 | mv to_publish/docs/mmtk to_publish/docs/full-doc 37 | cp -r target/mmtk-public/doc/mmtk to_publish/docs/public-doc 38 | cp -r docs/portingguide/book to_publish/docs/portingguide 39 | cp -r docs/tutorial/book to_publish/docs/tutorial 40 | - name: Deploy to Github Page 41 | uses: peaceiris/actions-gh-pages@v3 42 | with: 43 | personal_token: ${{ secrets.CI_ACCESS_TOKEN }} 44 | publish_dir: to_publish 45 | publish_branch: gh-pages 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # will have compiled files and executables 2 | target/ 3 | 4 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 5 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 6 | Cargo.lock 7 | 8 | # These are backup files generated by rustfmt 9 | **/*.rs.bk 10 | 11 | # Intellij 12 | /.idea/ 13 | /mmtk.iml 14 | 15 | # VSCode 16 | /.vscode/ 17 | 18 | # generated by JikesRVM 19 | /src/vm/jikesrvm/entrypoint.rs 20 | /src/vm/jikesrvm/inc.asm 21 | 22 | # build.py & bench.sh 23 | /*.dylib 24 | /*.so 25 | /bench/*.so 26 | /bench/*.dylib 27 | /bench-exe 28 | /test_mmtk 29 | /test_mmtk_32 30 | 31 | # Generated for testing tutorial code build 32 | /src/plan/mygc 33 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to MMTk 2 | 3 | Thank you for your interest in contributing to MMTk. We appreciate all the contributors. There are multiple ways you can help and contribute to MMTk. 4 | 5 | ## Reporting a bug 6 | 7 | If you encounter any bug when using MMTk, you are welcome to submit an issue ([mmtk-core issues](https://github.com/mmtk/mmtk-core/issues)) to report it. We would suggest including essential information to reproduce and investigate the bug, such as the revisions of mmtk-core and the related bindings, the command line arguments used to build, and the command line executed to reproduce the bug. 8 | 9 | ## Submit a pull request 10 | 11 | If you would like to upstream non-trivial changes to MMTk, we suggest first getting involved in the discussion of the related [Github issues](https://github.com/mmtk/mmtk-core/issues), or talking to any MMTk team member on [our Zulip](https://mmtk.zulipchat.com/). This makes sure that others know what you are up to, and makes it easier for your changes to get accepted to MMTk. 12 | 13 | Generally we expect a pull request to meeting the following requirements before it can be merged: 14 | 1. The PR includes only one change. You can break down large pull requests into separate smaller ones. 15 | 2. The code is well documented. 16 | 3. The PR does not introduce unsafe Rust code unless necessary. Whenever introducing unsafe code, the contributor must elaborate why it is necessary. 17 | 4. The PR passes the mmtk-core unit tests and complies with the coding style. We have scripts in `.github/scripts` that are used by our Github action to run those checks for each PR. 18 | 5. The PR passes all the binding tests. We run benchmarks with bindings to test mmtk-core. A new pull request should not break bindings, as we ensure that our supported bindings always work with the latest mmtk-core. If a pull request makes changes that require the bindings to be updated correspondingly, you can approach the MMTk team on [our Zulip](https://mmtk.zulipchat.com/) and seek help from them to update the bindings. -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | Copyrights in the MMTk project are retained by their contributors. No 2 | copyright assignment is required to contribute to the MMTk project. 3 | 4 | Some files may include explicit copyright notices and/or license notices. 5 | For full authorship information, see the version control history. 6 | 7 | Except as otherwise noted (below and/or in individual files), MMTk is 8 | licensed under the Apache License, Version 2.0 or 9 | or the MIT license 10 | or , at your option. 11 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | built::write_built_file().expect("Failed to acquire build-time information"); 3 | } 4 | -------------------------------------------------------------------------------- /docs/portingguide/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | -------------------------------------------------------------------------------- /docs/portingguide/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Angus Atkinson", "Brenda Wang", "Steve Blackburn"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "MMTk Porting Guide" 7 | -------------------------------------------------------------------------------- /docs/portingguide/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | [MMTk Porting Guide](./prefix.md) 4 | 5 | - [MMTk’s Approach to Portability](./portability.md) 6 | - [Before Starting a Port](./before_start.md) 7 | - [How to Undertake a Port](./howto/prefix.md) 8 | - [NoGC](./howto/nogc.md) 9 | - [Next Steps](./howto/next_steps.md) 10 | -------------------------------------------------------------------------------- /docs/portingguide/src/before_start.md: -------------------------------------------------------------------------------- 1 | # Things to Consider Before Starting a Port 2 | 3 | In principle, a port to MMTk is not particularly difficult. 4 | MMTk can present itself as a standard library and the core of the API is relatively simple. 5 | 6 | However, porting a runtime to a different GC (any GC) can be difficult and time consuming. 7 | Key questions include: 8 | - How well encapsulated is the runtime's existing collector? 9 | - Does the runtime make tacit assumptions about the underlying collector's implementation? 10 | - How many places in the runtime codebase reference some part of the GC? 11 | - If the runtime has a JIT, how good is the interface between the JIT and the GC (for write barriers and allocations, for example)? 12 | - Does the runtime support precise stack scanning? 13 | - etc. 14 | 15 | Thinking through these questions should give you a sense for how big a task a GC port will be. -------------------------------------------------------------------------------- /docs/portingguide/src/bindings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wenyuzhao/mmtk-core/2d5bd64d39ae061cdb81d7a5defe63bb2e67b946/docs/portingguide/src/bindings.png -------------------------------------------------------------------------------- /docs/portingguide/src/howto/next_steps.md: -------------------------------------------------------------------------------- 1 | # Next Steps 2 | 3 | Your choice of the next GC plan to implement depends on your situation. 4 | If you’re developing a new VM from scratch, or if you are intimately familiar with the internals of your target VM, then implementing a SemiSpace collector is probably the best course of action. 5 | Although the GC itself is rather simplistic, it stresses many of the key components of the MMTk <-> VM binding that will be required for later (and more powerful) GCs. 6 | In particular, since it always moves objects, it is an excellent stress test. 7 | 8 | An alternative route is to implement MarkSweep. 9 | This may be necessary in scenarios where the target VM doesn’t support object movement, or would require significant refactoring to do so. 10 | This can then serve as a stepping stone for future, moving GCs such as SemiSpace. 11 | 12 | We hope to have an Immix implementation available soon, which provides a nice middle ground between moving and non-moving (since it copies opportunistically, and can cope with a strictly non-moving requirement if needs be). 13 | -------------------------------------------------------------------------------- /docs/portingguide/src/howto/prefix.md: -------------------------------------------------------------------------------- 1 | # How to Undertake a Port 2 | 3 | We recommend a highly incremental approach to implementing a port. The broad idea is: 4 | - Start with the NoGC plan and gradually move to more advanced collectors 5 | - Focus on simplicity and correctness. 6 | - Optimize the port later. 7 | 8 | In MMTk’s language, a plan is essentially a configuration which specifies a GC algorithm. 9 | Plans can be selected at run time. 10 | Not all plans will be suitable for all runtimes. 11 | For example, a runtime that for some reason cannot support object movement won’t be able to use plans that use copying garbage collection. 12 | -------------------------------------------------------------------------------- /docs/portingguide/src/portability.md: -------------------------------------------------------------------------------- 1 | # Overview of MMTk’s Approach to Portability 2 | 3 | MMTk is designed from the outset to be both high performance and portable. 4 | The core of MMTk is entirely runtime-neutral, and is written in Rust. 5 | Runtimes that wish to use MMTk may be written in any language so long as they have a means to call into MMTk’s API, which presents itself as a shared library. 6 | 7 | MMTk uses the concept of *bindings* to create high performance impedance matching between runtimes and MMTk. 8 | 9 | MMTk’s approach to portability follows these principles: 10 | 11 | 1. The MMTk core must remain entirely runtime-agnostic and free of any runtime-specific code. 12 | 2. The runtime’s code base should be entirely garbage-collector agnostic and free of any MMTk-specific code. 13 | 3. The semantics of all MMTk functionality is strictly defined within the MMTk core. 14 | 15 | Those principles have the following important implications: 16 | 17 | - Each port of a runtime is supported by a binding that has two components: one which is a logical extension of the runtime, written in the same language as the runtime, but which is MMTk-specific, and one which is a logical extension of MMTk, written in Rust, but which is runtime-specific (see diagram below). 18 | - A fully-correct but non-performant port will simply implement calls from the runtime to MMTk (to allocate an object, for example), and from MMTk to the runtime (to enumerate pointers, for example). 19 | - A performant port will likely replicate and lift MMTk functionality into the runtime portion of the port, and conversely replicate runtime functionality in Rust for performant access by MMTk. 20 | 21 | ![A diagram with four boxes, left to right: OpenJDK, MMTk-specific mutator code, OpenJDK-specific MMTk code, MMTk](bindings.png) 22 | 23 | The diagram above illustrates a port of MMTk to OpenJDK with the binding in the center. 24 | The code coloured brown is logically part of MMTk and is written in Rust. 25 | The code coloured white is logically part of OpenJDK and is written in C++. 26 | The rightmost box is entirely free of any OpenJDK-specific code. 27 | The leftmost box should be entirely free of any MMTk-specific code. 28 | 29 | > Note: we do currently maintain a fork of OpenJDK which includes some necessary changes to their code base, but this is not MMTk-specific and ideally this will be upstreamed. Our port to V8 is a cleaner example, where we’ve managed to work closely with the V8 team to upstream all of the refactoring of the V8 code base that was necessary for it to support a third party heap. 30 | 31 | We structure the code into three repos. Taking the example of the OpenJDK port, the three repos are: the [MMTk core](https://github.com/mmtk/mmtk-core), the [binding repo](https://github.com/mmtk/mmtk-openjdk) containing both parts of the binding, and the OpenJDK repo, which is currently [a fork](https://github.com/mmtk/openjdk) we maintain. -------------------------------------------------------------------------------- /docs/portingguide/src/prefix.md: -------------------------------------------------------------------------------- 1 | # Porting Guide 2 | > Note: This guide is work in progress. 3 | 4 | This guide is designed to get you started on porting MMTk to a new runtime. 5 | We start with an overview of the MMTk approach to porting and then step through recommended strategies for implementing a port. 6 | 7 | There’s no fixed way to implement a new port. 8 | What we outline here is a distillation of best practices that have emerged from community as it has worked through many ports (each at various levels of maturity). -------------------------------------------------------------------------------- /docs/tutorial/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | -------------------------------------------------------------------------------- /docs/tutorial/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["Rouane Bannister"] 3 | language = "en" 4 | multilingual = false 5 | src = "src" 6 | title = "MMTk Tutorial" 7 | -------------------------------------------------------------------------------- /docs/tutorial/code/mygc_semispace/mod.rs: -------------------------------------------------------------------------------- 1 | // This module's code is unused When we compile this module with MMTk core. Allow it. 2 | #![allow(dead_code)] 3 | 4 | mod gc_work; // Add 5 | mod global; 6 | mod mutator; 7 | 8 | pub use self::global::MyGC; 9 | pub use self::global::MYGC_CONSTRAINTS; 10 | -------------------------------------------------------------------------------- /docs/tutorial/code/mygc_semispace/mutator.rs: -------------------------------------------------------------------------------- 1 | // ANCHOR: imports 2 | use super::MyGC; // Add 3 | use crate::Plan; 4 | use crate::plan::barriers::NoBarrier; 5 | use crate::plan::mutator_context::Mutator; 6 | use crate::plan::mutator_context::MutatorConfig; 7 | use crate::plan::AllocationSemantics; 8 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 9 | use crate::util::alloc::BumpAllocator; 10 | use crate::util::opaque_pointer::*; 11 | use crate::vm::VMBinding; 12 | use crate::plan::mutator_context::{ 13 | create_allocator_mapping, create_space_mapping, ReservedAllocators, 14 | }; 15 | use enum_map::EnumMap; 16 | // Remove crate::plan::mygc::MyGC 17 | // Remove mygc_mutator_noop 18 | // ANCHOR_END: imports 19 | 20 | // Add 21 | pub fn mygc_mutator_prepare( 22 | _mutator: &mut Mutator, 23 | _tls: VMWorkerThread, 24 | ) { 25 | // Do nothing 26 | } 27 | 28 | // Add 29 | // ANCHOR: release 30 | pub fn mygc_mutator_release( 31 | mutator: &mut Mutator, 32 | _tls: VMWorkerThread, 33 | ) { 34 | // rebind the allocation bump pointer to the appropriate semispace 35 | let bump_allocator = unsafe { 36 | mutator 37 | .allocators 38 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 39 | } 40 | .downcast_mut::>() 41 | .unwrap(); 42 | bump_allocator.rebind( 43 | mutator 44 | .plan 45 | .downcast_ref::>() 46 | .unwrap() 47 | .tospace(), 48 | ); 49 | } 50 | // ANCHOR_END: release 51 | 52 | // Modify 53 | // ANCHOR: allocator_mapping 54 | const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 55 | n_bump_pointer: 1, 56 | ..ReservedAllocators::DEFAULT 57 | }; 58 | 59 | lazy_static! { 60 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 61 | let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); 62 | map[AllocationSemantics::Default] = AllocatorSelector::BumpPointer(0); 63 | map 64 | }; 65 | } 66 | // ANCHOR_END: allocator_mapping 67 | 68 | pub fn create_mygc_mutator( 69 | mutator_tls: VMMutatorThread, 70 | plan: &'static dyn Plan, 71 | ) -> Mutator { 72 | // ANCHOR: plan_downcast 73 | let mygc = plan.downcast_ref::>().unwrap(); 74 | // ANCHOR_END: plan_downcast 75 | let config = MutatorConfig { 76 | allocator_mapping: &*ALLOCATOR_MAPPING, 77 | // Modify 78 | // ANCHOR: space_mapping 79 | space_mapping: Box::new({ 80 | let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); 81 | vec.push((AllocatorSelector::BumpPointer(0), mygc.tospace())); 82 | vec 83 | }), 84 | // ANCHOR_END: space_mapping 85 | prepare_func: &mygc_mutator_prepare, // Modify 86 | release_func: &mygc_mutator_release, // Modify 87 | }; 88 | 89 | Mutator { 90 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 91 | barrier: Box::new(NoBarrier), 92 | mutator_tls, 93 | config, 94 | plan, 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /docs/tutorial/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | [MMTk Tutorial](./prefix.md) 4 | 5 | - [Introduction]() 6 | - [What is MMTk?](./intro/what_is_mmtk.md) 7 | - [What will this tutorial cover?](./intro/what_will_this_tutorial_cover.md) 8 | - [Glossary](./intro/glossary.md) 9 | - [Preliminaries]() 10 | - [Set up MMTk and OpenJDK](./preliminaries/set_up.md) 11 | - [Test the build](./preliminaries/test.md) 12 | - [MyGC]() 13 | - [Create MyGC](./mygc/create.md) 14 | - [Building a semispace GC](./mygc/ss/prefix.md) 15 | - [Allocation](./mygc/ss/alloc.md) 16 | - [Collection](./mygc/ss/collection.md) 17 | - [Exercise](./mygc/ss/exercise.md) 18 | - [Exercise solution](./mygc/ss/exercise_solution.md) 19 | - [Building a generational copying GC](./mygc/gencopy.md) 20 | - [Further Reading](./further_reading.md) 21 | -------------------------------------------------------------------------------- /docs/tutorial/src/further_reading.md: -------------------------------------------------------------------------------- 1 | # Further Reading 2 | 3 | - [MMTk Crate Documentation](https://www.mmtk.io/mmtk-core/mmtk/index.html) 4 | - Original MMTk papers: 5 | - [*Oil and Water? High Performance Garbage Collection in Java with MMTk*](https://www.mmtk.io/assets/pubs/mmtk-icse-2004.pdf) (Blackburn, Cheng, McKinley, 2004) 6 | - [*Myths and realities: The performance impact of garbage collection*](https://www.mmtk.io/assets/pubs/mmtk-sigmetrics-2004.pdf) (Blackburn, Cheng, McKinley, 2004) 7 | - [*The Garbage Collection Handbook*](https://learning.oreilly.com/library/view/the-garbage-collection/9781315388007) (Jones, Hosking, Moss, 2016) 8 | - Videos: [MPLR 2020 Keynote](https://www.youtube.com/watch?v=3L6XEVaYAmU), [Deconstructing the Garbage-First Collector](https://www.youtube.com/watch?v=MAk6RdApGLs) -------------------------------------------------------------------------------- /docs/tutorial/src/intro/glossary.md: -------------------------------------------------------------------------------- 1 | # Glossary 2 | 3 | *allocator*: Code that allocates new objects into memory. 4 | 5 | *collector*: Finds and frees memory occupied by 'dead' objects. 6 | 7 | *dead*: An object that is not live. 8 | 9 | *GC work (unit), GC packet*: A schedulable unit of collection work. 10 | 11 | *GC worker*: A worker thread that performs garbage collection operations 12 | (as required by GC work units). 13 | 14 | *live*: An object that is reachable, and thus can still be accessed by other 15 | objects, is live/alive. 16 | 17 | *mutator*: Something that 'mutates', or changes, the objects stored in memory. 18 | This is the term that is traditionally used in the garbage collection literature 19 | to describe the running program (because it 'mutates' the object graph). 20 | 21 | *plan*: A garbage collection algorithm expressed as a configuration of policies. 22 | See also [Plans and policies](#plans-and-policies) below. 23 | 24 | *policy*: A specific garbage collection algorithm, such as marksweep, copying, 25 | immix, etc. Plans are made up of an arrangement of one or more policies. 26 | See also [Plans and policies](#plans-and-policies) below. 27 | 28 | *scheduler*: Dynamically dispatches units of GC work to workers. 29 | 30 | *zeroing*, *zero initialization*: Initializing and resetting unused memory 31 | bits to have a value of 0. Required by most memory-safe programming languages. 32 | 33 | See also: [Further Reading](../further_reading.md) 34 | 35 | 36 | ## Plans and Policies 37 | 38 | In MMTk, collectors are instantiated as plans, which can be thought of as 39 | configurations of collector policies. In practice, most production 40 | collectors and almost all collectors in MMTk are comprised of multiple 41 | algorithms/policies. For example the gencopy plan describes a configuration 42 | that combines a copying nursery with a semispace mature space. In MMTk we 43 | think of these as three spaces, each of which happen to use the copyspace 44 | policy, and which have a relationship which is defined by the gencopy plan. 45 | Under the hood, gencopy builds upon a common plan which may also contain other 46 | policies including a space for code, a read-only space, etc. 47 | 48 | Thus, someone wishing to construct a new collector based entirely on existing 49 | policies may be able to do so in MMTk by simply writing a new plan, which is 50 | what this tutorial covers. 51 | 52 | On the other hand, someone wishing to introduce an entirely new garbage 53 | collection policy (such as Immix, for example), would need to first create 54 | a policy which specifies that algorithm, before creating a plan which defines 55 | how the GC algorithm fits together and utilizes that policy. -------------------------------------------------------------------------------- /docs/tutorial/src/intro/what_is_mmtk.md: -------------------------------------------------------------------------------- 1 | # What *is* MMTk? 2 | 3 | The Memory Management Toolkit (MMTk) is a framework for designing and 4 | implementing memory managers. It has a runtime-neutral core (mmtk-core) 5 | written in Rust, and bindings that allow it to work with OpenJDK, V8, 6 | and JikesRVM, with more bindings currently in development. 7 | MMTk was originally written in Java as part of the JikesRVM Java runtime. 8 | The current version is similar in its purpose, but was made to be 9 | very flexible with runtime and able to be ported to many different VMs. 10 | 11 | The principal idea of MMTk is that it can be used as a 12 | toolkit, allowing new GC algorithms to be rapidly developed using 13 | common components. It also allows different GC algorithms to be 14 | compared on an apples-to-apples basis, since they share common mechanisms. -------------------------------------------------------------------------------- /docs/tutorial/src/intro/what_will_this_tutorial_cover.md: -------------------------------------------------------------------------------- 1 | # What will this tutorial cover? 2 | 3 | This tutorial is intended to get you comfortable constructing new plans in 4 | MMTk. 5 | 6 | You will first be guided through building a semispace collector. After that, 7 | you will extend this collector to be a generational collector, to further 8 | familiarise you with different concepts in MMTk. There will also be 9 | questions and exercises at various points in the tutorial, intended to 10 | encourage you to think about what the code is doing, increase your general 11 | understanding of MMTk, and motivate further research. 12 | 13 | Where possible, there will be links to finished, functioning code after each 14 | section so that you can check that your code is correct. Note, however, that 15 | these will be full collectors. Therefore, there may be some differences between 16 | these files and your collector due to your position in the tutorial. By the end 17 | of each major section, your code should be functionally identical to the 18 | finished code provided. 19 | 20 | Furthermore, please note that this code may not be identical to the main code 21 | of the MMTk. It is deliberately kept separate as a simpler stable 22 | version. Make sure to refer to the 23 | [provided tutorial code](https://github.com/mmtk/mmtk-core/tree/master/docs/tutorial/code) 24 | and not the main collector code during the tutorial. -------------------------------------------------------------------------------- /docs/tutorial/src/mygc/gencopy.md: -------------------------------------------------------------------------------- 1 | # Building a generational copying collector 2 | 3 | > Note: This part is work in progress. 4 | 5 | ## What is a generational collector? 6 | 7 | The *weak generational hypothesis* states that most of the objects allocated 8 | to a heap after one collection will die before the next collection. 9 | Therefore, it is worth separating out 'young' and 'old' objects and only 10 | scanning each as needed, to minimise the number of times old live objects are 11 | scanned. New objects are allocated to a 'nursery', and after one collection 12 | they move to the 'mature' space. In `triplespace`, `youngspace` is a 13 | proto-nursery, and the `tospace` and `fromspace` are the mature spaces. 14 | 15 | This collector fixes one of the major problems with semispace - namely, that 16 | any long-lived objects are repeatedly copied back and forth. By separating 17 | these objects into a separate 'mature' space, the number of full heap 18 | collections needed is greatly reduced. 19 | 20 | This section is currently incomplete. Instructions for building a 21 | generational copying (gencopy) collector will be added in future. 22 | -------------------------------------------------------------------------------- /docs/tutorial/src/mygc/ss/exercise.md: -------------------------------------------------------------------------------- 1 | # Exercise: Adding another copyspace 2 | 3 | Now that you have a working semispace collector, you should be familiar 4 | enough with the code to start writing some yourself. The intention of this 5 | exercise is to reinforce the information from the semispace section, rather 6 | than to create a useful new collector. 7 | 8 | 1. Create a copy of your semispace collector, called `triplespace`. 9 | 2. Add a new copyspace to the collector, called the `youngspace`, with the 10 | following traits: 11 | * New objects are allocated to the youngspace (rather than the fromspace). 12 | * During a collection, live objects in the youngspace are moved to the 13 | tospace. 14 | * Garbage is still collected at the same time for all spaces. 15 | 16 | Triplespace is a sort of generational garbage collector. These collectors 17 | separate out old objects and new objects into separate spaces. Newly 18 | allocated objects should be scanned far more often than old objects, which 19 | minimises the time spent repeatedly re-scanning long-lived objects. 20 | 21 | Of course, this means that the Triplespace is incredibly inefficient for a 22 | generational collector, because the older objects are still being scanned 23 | every collection. It wouldn't be very useful in a real-life scenario. The 24 | next thing to do is to make this collector into a more efficient proper 25 | generational collector. 26 | 27 | When you are finished, try running the benchmarks and seeing how the 28 | performance of this collector compares to MyGC. Great work! -------------------------------------------------------------------------------- /docs/tutorial/src/mygc/ss/prefix.md: -------------------------------------------------------------------------------- 1 | # Building a semispace collector 2 | 3 | In a semispace collector, the heap is divided into two equally-sized spaces, 4 | called 'semispaces'. One of these is defined as a 'fromspace', and the other 5 | a 'tospace'. The allocator allocates to the tospace until it is full. 6 | 7 | When the tospace is full, a stop-the-world GC is triggered. The mutator is 8 | paused, and the definitions of the spaces are flipped (the 'tospace' becomes 9 | a 'fromspace', and vice versa). Then, the collector scans each object in what 10 | is now the fromspace. If a live object is found, a copy of it is made in the 11 | tospace. That is to say, live objects are copied *from* the fromspace *to* 12 | the tospace. After every object is scanned, the fromspace is cleared. The GC 13 | finishes, and the mutator is resumed. 14 | -------------------------------------------------------------------------------- /docs/tutorial/src/prefix.md: -------------------------------------------------------------------------------- 1 | # MMTk Tutorial 2 | 3 | In this tutorial, you will build multiple garbage collectors from 4 | scratch using MMTk. 5 | You will start with an incredibly simple 'collector' called NoGC, 6 | and through a series of additions and refinements end up with a 7 | generational copying garbage collector. 8 | 9 | This tutorial is aimed at GC implementors who would like to implement 10 | new GC algorithms/plans with MMTk. If you are a language implementor 11 | interested in *porting* your runtime to MMTk, you should refer to the 12 | [porting guide](https://www.mmtk.io/mmtk-core/portingguide/) instead. 13 | 14 | This tutorial is a work in progress. Some sections may be rough, and others may 15 | be missing information (especially about import statements). If something is 16 | missing or inaccurate, refer to the relevant completed garbage collector if 17 | possible. Please also raise an issue, or create a pull request addressing 18 | the problem. -------------------------------------------------------------------------------- /docs/tutorial/src/preliminaries/set_up.md: -------------------------------------------------------------------------------- 1 | # Set up MMTk and OpenJDK 2 | 3 | This tutorial can be completed with any binding. However, for the sake of 4 | simplicity, only the setup for the OpenJDK binding will be described in detail 5 | here. If you would like to use another binding, you will need to follow the 6 | README files in their respective repositories 7 | ([JikesRVM](https://github.com/mmtk/mmtk-jikesrvm), 8 | [V8](https://github.com/mmtk/mmtk-v8)) 9 | to set them up, and find appropriate benchmarks for testing. 10 | Also, while it may be useful to fork the relevant repositories to your own 11 | account, it is not required for this tutorial. 12 | 13 | First, set up OpenJDK, MMTk, and the binding: 14 | 1. Clone the OpenJDK binding and mmtk-core repository, and install any relevant 15 | dependencies by following the instructions in the 16 | [OpenJDK binding repository](https://github.com/mmtk/mmtk-openjdk/blob/master/README.md). 17 | 2. Ensure you can build OpenJDK according to the instructions in the READMEs of 18 | [the mmtk-core repository](https://github.com/mmtk/mmtk-core/blob/master/README.md) and the 19 | [OpenJDK binding repository](https://github.com/mmtk/mmtk-openjdk/blob/master/README.md). 20 | * Use the `slowdebug` option when building the OpenJDK binding. This is the 21 | fastest debug variant to build, and allows for easier debugging and better 22 | testing. The rest of the tutorial will assume you are using `slowdebug`. 23 | * You can use the env var `MMTK_PLAN=[PlanName]` to choose a plan to use at run-time. 24 | The plans that are relevant to this tutorial are `NoGC` and `SemiSpace`. 25 | * Make sure you *only* use the env var `MMTK_PLAN=[PlanName]` when you run the generated `java` binary 26 | (`./build/linux-x86_64-normal-server-$DEBUG_LEVEL/jdk/bin/java`). Do not set `MMTK_PLAN` 27 | when you build OpenJDK (if you already have set the env var `MMTK_PLAN`, you would need to do 28 | `export MMTK_PLAN=` or `unset MMTK_PLAN` to clear the env var before building). 29 | 30 | The MMTk OpenJDK binding ships with a fixed version of mmtk-core, specified in `mmtk-openjdk/mmtk/Cargo.toml`. 31 | For local development, you would need to build the binding with a local copy of the mmtk-core repo that you 32 | can modify. You would need to point the mmtk dependency to a local path. 33 | 1. Find `mmtk` under `[dependencies]` in `mmtk-openjdk/mmtk/Cargo.toml`. It should point 34 | to the mmtk-core git path with a specific revision. 35 | 2. Comment out the line for the git dependency, and uncomment the following line for a local dependency. 36 | 3. The local dependency points to `mmtk-openjdk/repos/mmtk-core` by default. If your local mmtk-core path is 37 | not `mmtk-openjdk/repos/mmtk-core`, modify the path to point to your local mmtk-core. 38 | 4. Rebuild the OpenJDK binding. 39 | -------------------------------------------------------------------------------- /examples/allocation_benchmark.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "mmtk.h" 5 | 6 | int main() { 7 | volatile uint64_t * tmp; 8 | mmtk_init(1024*1024*1024); 9 | MMTk_Mutator handle = mmtk_bind_mutator(0); 10 | 11 | for (int i=0; i<1024*1024*100; i++) { 12 | tmp = mmtk_alloc(handle, 8, 1, 0, 0); 13 | #ifdef STORE 14 | *tmp = 42; 15 | #endif 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/bench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Using Rust bump pointer allocator" 4 | clang -O3 -lmmtk -L../target/release -o bench-exe -I../api ./allocation_benchmark.c 5 | export LD_LIBRARY_PATH=../target/release 6 | time ./bench-exe 7 | 8 | echo "Using C bump pointer allocator" 9 | clang -O3 -shared -fPIC -o ./libmmtk.so ./reference_bump_allocator.c 10 | clang -O3 -lmmtk -L. -o bench-exe -I../api ./allocation_benchmark.c 11 | export LD_LIBRARY_PATH=. 12 | time ./bench-exe 13 | 14 | echo "Using Rust bump pointer allocator with storing" 15 | clang -O3 -lmmtk -L../target/release -o bench-exe -D STORE -I../api ./allocation_benchmark.c 16 | export LD_LIBRARY_PATH=../target/release 17 | time ./bench-exe 18 | 19 | echo "Using C bump pointer allocator with storing" 20 | clang -O3 -shared -fPIC -o ./libmmtk.so ./reference_bump_allocator.c 21 | clang -O3 -lmmtk -L. -o bench-exe -D STORE -I../api ./allocation_benchmark.c 22 | export LD_LIBRARY_PATH=. 23 | time ./bench-exe 24 | -------------------------------------------------------------------------------- /examples/build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import platform 4 | import subprocess 5 | import shutil 6 | import os 7 | import sys 8 | 9 | MMTk_ROOT = os.path.join(__file__, "..", "..") 10 | 11 | PLANS = [] 12 | 13 | # Find all plans from options.rs 14 | options = "" 15 | with open(os.path.abspath(os.path.join(MMTk_ROOT, "src", "util", "options.rs")), 'r') as file: 16 | options = file.read().replace('\n', '') 17 | import re 18 | search = re.search("enum PlanSelector \{([^\}]*)\}", options) 19 | if search: 20 | raw_plans = search.group(1) 21 | # Python split() results in an empty string as the last element. Use filter() to remove it. 22 | PLANS = list(filter(None, [x.strip() for x in raw_plans.split(",")])) 23 | else: 24 | print("cannot find PlanSelector in options.rs") 25 | sys.exit(1) 26 | 27 | os.chdir(os.path.abspath(MMTk_ROOT)) 28 | 29 | extra_features = "" 30 | if len(sys.argv) > 1: 31 | extra_features = sys.argv[1] 32 | 33 | 34 | def exec_and_redirect(args, env=None): 35 | print("[exec_and_redirect] {} (env = {})".format(args, env)) 36 | p = subprocess.Popen(args, 37 | env=env) 38 | p.wait() 39 | if p.returncode != 0: 40 | exit(p.returncode) 41 | 42 | # Get the active toolchain, something like this: stable-x86_64-unknown-linux-gnu 43 | active_toolchain = str(subprocess.check_output(["rustup", "show", "active-toolchain"]).decode('utf-8')).split(' ')[0] 44 | print("Active rust toolchain: " + active_toolchain) 45 | if "x86_64" in active_toolchain: 46 | m32 = False 47 | elif "i686" in active_toolchain: 48 | m32 = True 49 | else: 50 | print("Unknown toolchain: " + active_toolchain) 51 | sys.exit(1) 52 | 53 | system = platform.system() 54 | assert system == "Darwin" or system == "Linux" 55 | 56 | SUFFIX = ".so" 57 | if system == "Darwin": 58 | SUFFIX = ".dylib" 59 | elif system == "Linux": 60 | SUFFIX = ".so" 61 | 62 | LIBRARY_PATH = "LD_LIBRARY_PATH" 63 | if system == "Darwin": 64 | LIBRARY_PATH = "DYLD_LIBRARY_PATH" 65 | elif system == "Linux": 66 | LIBRARY_PATH = "LD_LIBRARY_PATH" 67 | 68 | vmbinding = "vmbindings/dummyvm" 69 | 70 | cmd = [] 71 | cmd.append("cargo") 72 | cmd.extend([ 73 | "build", 74 | "--manifest-path", 75 | "vmbindings/dummyvm/Cargo.toml", 76 | "--features", " ".join(extra_features) 77 | ]) 78 | 79 | exec_and_redirect(cmd) 80 | exec_and_redirect(cmd + ["--release"]) 81 | shutil.copyfile("{}/target/release/libmmtk_dummyvm{}".format(vmbinding, SUFFIX), 82 | "./libmmtk{}".format(SUFFIX)) 83 | 84 | cmd = [ 85 | "gcc", 86 | "./examples/main.c", 87 | "-lmmtk", 88 | "-L.", 89 | "-I{}/api".format(vmbinding), 90 | "-O3", 91 | "-o", 92 | "test_mmtk", 93 | ] 94 | if m32: 95 | cmd.append("-m32") 96 | 97 | exec_and_redirect(cmd) 98 | 99 | for plan in PLANS: 100 | exec_and_redirect(["./test_mmtk"], env={LIBRARY_PATH: ".", "MMTK_PLAN": plan}) 101 | 102 | os.remove("./test_mmtk") 103 | -------------------------------------------------------------------------------- /examples/main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "mmtk.h" 3 | 4 | int main(int argc, char* argv[]){ 5 | mmtk_init(1024*1024); 6 | 7 | MMTk_Mutator handle = mmtk_bind_mutator(0); 8 | 9 | for (int i=0;i<4;i++){ 10 | int arr_size = 10000; 11 | int* my_arr = mmtk_alloc(handle, sizeof(int)*arr_size, 8, 0, 0); 12 | if (!my_arr){ 13 | printf("OOM\n"); 14 | break; 15 | } 16 | for (int j=0;j 2 | #include 3 | #include 4 | #include "../api/mmtk.h" 5 | 6 | typedef struct { 7 | void* heap_start; 8 | void* heap_end; 9 | void* heap_cursor; 10 | } Space; 11 | 12 | Space IMMORTAL_SPACE; 13 | 14 | size_t align_up (size_t addr, size_t align) { 15 | return (addr + align - 1) & ~(align - 1); 16 | } 17 | 18 | extern void gc_init(size_t heap_size) { 19 | size_t SPACE_ALIGN = 1 << 19; 20 | void* alloced = mmap(NULL, heap_size + SPACE_ALIGN, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 21 | if (!alloced) { 22 | printf("Unable to allocate memory\n"); 23 | exit(1); 24 | } 25 | IMMORTAL_SPACE.heap_start = (void*) align_up((size_t) alloced, SPACE_ALIGN); 26 | IMMORTAL_SPACE.heap_end = (void*) ((size_t) IMMORTAL_SPACE.heap_start + heap_size); 27 | IMMORTAL_SPACE.heap_cursor = IMMORTAL_SPACE.heap_start; 28 | } 29 | 30 | extern MMTk_Mutator bind_mutator(void *tls) { 31 | return NULL; 32 | } 33 | 34 | extern void* align_allocation(void* region, size_t align, size_t offset) { 35 | ssize_t region_signed = (ssize_t) region; 36 | 37 | ssize_t mask = (ssize_t) (align - 1); 38 | ssize_t neg_off = -offset; 39 | ssize_t delta = (neg_off - region_signed) & mask; 40 | 41 | return (void*) ((ssize_t)region + delta); 42 | } 43 | 44 | extern void* alloc(MMTk_Mutator mutator, size_t size, 45 | size_t align, size_t offset, int allocator) { 46 | 47 | void* result = align_allocation(IMMORTAL_SPACE.heap_cursor, align, offset); 48 | void* new_cursor = (void*)((size_t) result + size); 49 | if (new_cursor > IMMORTAL_SPACE.heap_end) { 50 | return NULL; 51 | } 52 | IMMORTAL_SPACE.heap_cursor = new_cursor; 53 | return (void*) result; 54 | } 55 | 56 | extern void* alloc_slow(MMTk_Mutator mutator, size_t size, 57 | size_t align, size_t offset, int allocator) { 58 | 59 | perror("Not implemented\n"); 60 | exit(1); 61 | return NULL; 62 | } 63 | 64 | void* mmtk_malloc(size_t size) { 65 | return alloc(NULL, size, 1, 0, 0); 66 | } 67 | 68 | void mmtk_free(void* ptr) { 69 | return; 70 | } -------------------------------------------------------------------------------- /macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mmtk-macros" 3 | # the macro crate uses the same version as mmtk-core 4 | version = "0.18.0" 5 | edition = "2021" 6 | license = "MIT OR Apache-2.0" 7 | description = "MMTk macros provides procedural macros used by mmtk-core." 8 | homepage = "https://www.mmtk.io" 9 | repository = "https://github.com/mmtk/mmtk-core/tree/master/macros" 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | proc-macro2 = "1.0.37" 16 | syn = { version = "1.0.91", features = ["extra-traits"] } 17 | quote = "1.0.18" 18 | proc-macro-error = "1.0.4" 19 | -------------------------------------------------------------------------------- /macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | extern crate syn; 3 | extern crate proc_macro_error; 4 | extern crate quote; 5 | 6 | use proc_macro::TokenStream; 7 | use proc_macro_error::proc_macro_error; 8 | use syn::{parse_macro_input}; 9 | use proc_macro_error::abort_call_site; 10 | use quote::quote; 11 | use syn::DeriveInput; 12 | 13 | mod util; 14 | mod plan_trace_object_impl; 15 | 16 | const DEBUG_MACRO_OUTPUT: bool = false; 17 | 18 | /// Generally a plan needs to add these attributes in order for the macro to work. The macro will 19 | /// generate an implementation of `PlanTraceObject` for the plan. With `PlanTraceObject`, the plan use 20 | /// `PlanProcessEdges` for GC tracing. The attributes only affects code generation in the macro, thus 21 | /// only affects the generated `PlanTraceObject` implementation. 22 | /// * add `#[derive(PlanTraceObject)]` to the plan struct. 23 | /// * add `#[trace]` to each space field the plan struct has. If the policy is a copying policy, 24 | /// it needs to further specify the copy semantic (`#[trace(CopySemantics::X)]`) 25 | /// * add `#[fallback_trace]` to the parent plan if the plan is composed with other plans (or parent plans). 26 | /// For example, `GenImmix` is composed with `Gen`, `Gen` is composed with `CommonPlan`, `CommonPlan` is composed 27 | /// with `BasePlan`. 28 | /// * add `#[post_scan]` to any space field that has some policy-specific post_scan_object(). For objects in those spaces, 29 | /// `post_scan_object()` in the policy will be called after `VM::VMScanning::scan_object()`. 30 | #[proc_macro_error] 31 | #[proc_macro_derive(PlanTraceObject, attributes(trace, post_scan, fallback_trace))] 32 | pub fn derive_plan_trace_object(input: TokenStream) -> TokenStream { 33 | let input = parse_macro_input!(input as DeriveInput); 34 | let ident = input.ident; 35 | let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); 36 | 37 | let output = if let syn::Data::Struct(syn::DataStruct { 38 | fields: syn::Fields::Named(ref fields), 39 | .. 40 | }) = input.data { 41 | let spaces = util::get_fields_with_attribute(fields, "trace"); 42 | let post_scan_spaces = util::get_fields_with_attribute(fields, "post_scan"); 43 | let fallback = util::get_unique_field_with_attribute(fields, "fallback_trace"); 44 | 45 | let trace_object_function = plan_trace_object_impl::generate_trace_object(&spaces, &fallback, &ty_generics); 46 | let post_scan_object_function = plan_trace_object_impl::generate_post_scan_object(&post_scan_spaces, &fallback, &ty_generics); 47 | let may_move_objects_function = plan_trace_object_impl::generate_may_move_objects(&spaces, &fallback, &ty_generics); 48 | quote!{ 49 | impl #impl_generics crate::plan::PlanTraceObject #ty_generics for #ident #ty_generics #where_clause { 50 | #trace_object_function 51 | 52 | #post_scan_object_function 53 | 54 | #may_move_objects_function 55 | } 56 | } 57 | } else { 58 | abort_call_site!("`#[derive(PlanTraceObject)]` only supports structs with named fields.") 59 | }; 60 | 61 | // Debug the output - use the following code to debug the generated code (when cargo exapand is not working) 62 | if DEBUG_MACRO_OUTPUT { 63 | use quote::ToTokens; 64 | println!("{}", output.to_token_stream()); 65 | } 66 | 67 | output.into() 68 | } 69 | -------------------------------------------------------------------------------- /macros/src/util.rs: -------------------------------------------------------------------------------- 1 | use proc_macro_error::abort; 2 | use syn::{spanned::Spanned, Attribute, Field, FieldsNamed}; 3 | 4 | pub fn get_field_attribute<'f>(field: &'f Field, attr_name: &str) -> Option<&'f Attribute> { 5 | let attrs = field 6 | .attrs 7 | .iter() 8 | .filter(|a| a.path.is_ident(attr_name)) 9 | .collect::>(); 10 | if attrs.len() > 1 { 11 | let second_attr = attrs.get(1).unwrap(); 12 | abort! { second_attr.path.span(), "Duplicated attribute: #[{}]", attr_name } 13 | }; 14 | 15 | attrs.get(0).cloned() 16 | } 17 | 18 | pub fn get_fields_with_attribute<'f>(fields: &'f FieldsNamed, attr_name: &str) -> Vec<&'f Field> { 19 | fields 20 | .named 21 | .iter() 22 | .filter(|f| get_field_attribute(f, attr_name).is_some()) 23 | .collect::>() 24 | } 25 | 26 | pub fn get_unique_field_with_attribute<'f>( 27 | fields: &'f FieldsNamed, 28 | attr_name: &str, 29 | ) -> Option<&'f Field> { 30 | let mut result = None; 31 | 32 | for field in fields.named.iter() { 33 | if let Some(attr) = get_field_attribute(field, attr_name) { 34 | if result.is_none() { 35 | result = Some(field); 36 | continue; 37 | } else { 38 | let span = attr.path.span(); 39 | abort! { span, "At most one field in a struct can have the #[{}] attribute.", attr_name }; 40 | } 41 | } 42 | } 43 | 44 | result 45 | } 46 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.66.1 2 | -------------------------------------------------------------------------------- /src/build_info.rs: -------------------------------------------------------------------------------- 1 | mod raw { 2 | // This includes a full list of the constants in built.rs generated by the 'built' crate. 3 | // https://docs.rs/built/latest/built/index.html 4 | include!(concat!(env!("OUT_DIR"), "/built.rs")); 5 | } 6 | 7 | /// MMTk crate version such as 0.14.0 8 | pub const MMTK_PKG_VERSION: &str = raw::PKG_VERSION; 9 | 10 | /// Comma separated features enabled for this build 11 | pub const MMTK_FEATURES: &str = raw::FEATURES_STR; 12 | 13 | lazy_static! { 14 | /// Git version as short commit hash, such as a96e8f9, or a96e8f9-dirty, or unknown-git-version if MMTk 15 | /// is not built from a git repo. 16 | pub static ref MMTK_GIT_VERSION: &'static str = &MMTK_GIT_VERSION_STRING; 17 | // Owned string 18 | static ref MMTK_GIT_VERSION_STRING: String = match (raw::GIT_COMMIT_HASH, raw::GIT_DIRTY) { 19 | (Some(hash), Some(dirty)) => format!("{}{}", hash.split_at(7).0, if dirty { "-dirty" } else { "" }), 20 | (Some(hash), None) => format!("{}{}", hash.split_at(7).0, "-?"), 21 | _ => "unknown-git-version".to_string(), 22 | }; 23 | 24 | /// Full build info, including MMTk's name, version, git, and features in the build, 25 | /// such as MMTk 0.14.0 (43e0ce8-dirty, DEFAULT, EXTREME_ASSERTIONS) 26 | pub static ref MMTK_FULL_BUILD_INFO: &'static str = &MMTK_FULL_BUILD_INFO_STRING; 27 | // Owned string 28 | static ref MMTK_FULL_BUILD_INFO_STRING: String = format!("MMTk {} ({}, {})", MMTK_PKG_VERSION, *MMTK_GIT_VERSION, MMTK_FEATURES); 29 | } 30 | 31 | #[cfg(test)] 32 | mod tests { 33 | #[test] 34 | fn test_git_version() { 35 | println!("Git version: {}", *crate::build_info::MMTK_GIT_VERSION); 36 | } 37 | 38 | #[test] 39 | fn test_full_build_version() { 40 | println!( 41 | "Full build version: {}", 42 | *crate::build_info::MMTK_FULL_BUILD_INFO 43 | ); 44 | } 45 | 46 | #[test] 47 | fn test_pkg_version() { 48 | println!("Package version: {}", crate::build_info::MMTK_PKG_VERSION); 49 | } 50 | 51 | #[test] 52 | fn test_features() { 53 | println!("Features: {}", crate::build_info::MMTK_FEATURES); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // TODO: We should fix missing docs for public items and turn this on (Issue #309). 2 | // #![deny(missing_docs)] 3 | 4 | // Allow this for now. Clippy suggests we should use Sft, Mmtk, rather than SFT and MMTK. 5 | // According to its documentation (https://rust-lang.github.io/rust-clippy/master/index.html#upper_case_acronyms), 6 | // with upper-case-acronyms-aggressive turned on, it should also warn us about SFTMap, VMBinding, GCWorker. 7 | // However, it seems clippy does not catch all these patterns at the moment. So it would be hard for us to 8 | // find all the patterns and consistently change all of them. I think it would be a better idea to just allow this. 9 | // We may reconsider this in the future. Plus, using upper case letters for acronyms does not sound a big issue 10 | // to me - considering it will break our API and all the efforts for all the developers to make the change, it may 11 | // not worth it. 12 | #![allow(clippy::upper_case_acronyms)] 13 | 14 | //! Memory Management ToolKit (MMTk) is a portable and high performance memory manager 15 | //! that includes various garbage collection algorithms and provides clean and efficient 16 | //! interfaces to cooperate with language implementations. MMTk features highly modular 17 | //! and highly reusable designs. It includes components such as allocators, spaces and 18 | //! work packets that GC implementers can choose from to compose their own GC plan easily. 19 | //! 20 | //! Logically, this crate includes these major parts: 21 | //! * GC components: 22 | //! * [Allocators](util/alloc/allocator/trait.Allocator.html): handlers of allocation requests which allocate objects to the bound space. 23 | //! * [Policies](policy/space/trait.Space.html): definitions of semantics and behaviors for memory regions. 24 | //! Each space is an instance of a policy, and takes up a unique proportion of the heap. 25 | //! * [Work packets](scheduler/work/trait.GCWork.html): units of GC work scheduled by the MMTk's scheduler. 26 | //! * [GC plans](plan/global/trait.Plan.html): GC algorithms composed from components. 27 | //! * [Heap implementations](util/heap/index.html): the underlying implementations of memory resources that support spaces. 28 | //! * [Scheduler](scheduler/scheduler/struct.GCWorkScheduler.html): the MMTk scheduler to allow flexible and parallel execution of GC work. 29 | //! * Interfaces: bi-directional interfaces between MMTk and language implementations 30 | //! i.e. [the memory manager API](memory_manager/index.html) that allows a language's memory manager to use MMTk 31 | //! and [the VMBinding trait](vm/trait.VMBinding.html) that allows MMTk to call the language implementation. 32 | 33 | extern crate libc; 34 | extern crate strum_macros; 35 | #[macro_use] 36 | extern crate lazy_static; 37 | #[macro_use] 38 | extern crate log; 39 | #[cfg(target = "x86_64-unknown-linux-gnu")] 40 | extern crate atomic; 41 | extern crate atomic_traits; 42 | extern crate crossbeam; 43 | extern crate num_cpus; 44 | #[macro_use] 45 | extern crate downcast_rs; 46 | #[macro_use] 47 | extern crate static_assertions; 48 | #[macro_use] 49 | extern crate probe; 50 | 51 | mod mmtk; 52 | pub use mmtk::MMTKBuilder; 53 | pub(crate) use mmtk::MMAPPER; 54 | pub use mmtk::MMTK; 55 | 56 | mod policy; 57 | 58 | pub mod build_info; 59 | pub mod memory_manager; 60 | pub mod plan; 61 | pub mod scheduler; 62 | pub mod util; 63 | pub mod vm; 64 | 65 | pub use crate::plan::{ 66 | AllocationSemantics, BarrierSelector, Mutator, MutatorContext, ObjectQueue, Plan, 67 | }; 68 | pub use crate::policy::copy_context::PolicyCopyContext; 69 | -------------------------------------------------------------------------------- /src/plan/gc_requester.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::VMBinding; 2 | use std::marker::PhantomData; 3 | use std::sync::atomic::{AtomicBool, Ordering}; 4 | use std::sync::{Condvar, Mutex}; 5 | 6 | struct RequestSync { 7 | request_count: isize, 8 | last_request_count: isize, 9 | } 10 | 11 | /// GC requester. This object allows other threads to request (trigger) GC, 12 | /// and the GC coordinator thread waits for GC requests using this object. 13 | pub struct GCRequester { 14 | request_sync: Mutex, 15 | request_condvar: Condvar, 16 | request_flag: AtomicBool, 17 | phantom: PhantomData, 18 | } 19 | 20 | // Clippy says we need this... 21 | impl Default for GCRequester { 22 | fn default() -> Self { 23 | Self::new() 24 | } 25 | } 26 | 27 | impl GCRequester { 28 | pub fn new() -> Self { 29 | GCRequester { 30 | request_sync: Mutex::new(RequestSync { 31 | request_count: 0, 32 | last_request_count: -1, 33 | }), 34 | request_condvar: Condvar::new(), 35 | request_flag: AtomicBool::new(false), 36 | phantom: PhantomData, 37 | } 38 | } 39 | 40 | pub fn request(&self) { 41 | if self.request_flag.load(Ordering::Relaxed) { 42 | return; 43 | } 44 | 45 | let mut guard = self.request_sync.lock().unwrap(); 46 | if !self.request_flag.load(Ordering::Relaxed) { 47 | self.request_flag.store(true, Ordering::Relaxed); 48 | guard.request_count += 1; 49 | self.request_condvar.notify_all(); 50 | } 51 | } 52 | 53 | pub fn clear_request(&self) { 54 | let guard = self.request_sync.lock().unwrap(); 55 | self.request_flag.store(false, Ordering::Relaxed); 56 | drop(guard); 57 | } 58 | 59 | pub fn wait_for_request(&self) { 60 | let mut guard = self.request_sync.lock().unwrap(); 61 | guard.last_request_count += 1; 62 | while guard.last_request_count == guard.request_count { 63 | guard = self.request_condvar.wait(guard).unwrap(); 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/plan/generational/copying/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::global::GenCopy; 2 | use crate::plan::generational::gc_work::GenNurseryProcessEdges; 3 | use crate::vm::*; 4 | 5 | use crate::policy::gc_work::DEFAULT_TRACE; 6 | use crate::scheduler::gc_work::PlanProcessEdges; 7 | 8 | pub struct GenCopyNurseryGCWorkContext(std::marker::PhantomData); 9 | impl crate::scheduler::GCWorkContext for GenCopyNurseryGCWorkContext { 10 | type VM = VM; 11 | type PlanType = GenCopy; 12 | type ProcessEdgesWorkType = GenNurseryProcessEdges; 13 | } 14 | 15 | pub struct GenCopyGCWorkContext(std::marker::PhantomData); 16 | impl crate::scheduler::GCWorkContext for GenCopyGCWorkContext { 17 | type VM = VM; 18 | type PlanType = GenCopy; 19 | type ProcessEdgesWorkType = PlanProcessEdges, DEFAULT_TRACE>; 20 | } 21 | -------------------------------------------------------------------------------- /src/plan/generational/copying/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: generational copying 2 | 3 | pub(in crate::plan) mod gc_work; 4 | pub(in crate::plan) mod global; 5 | pub(in crate::plan) mod mutator; 6 | 7 | pub use self::global::GenCopy; 8 | 9 | pub use self::global::GENCOPY_CONSTRAINTS; 10 | -------------------------------------------------------------------------------- /src/plan/generational/copying/mutator.rs: -------------------------------------------------------------------------------- 1 | pub(super) use super::super::ALLOCATOR_MAPPING; 2 | use super::GenCopy; 3 | use crate::plan::barriers::ObjectBarrier; 4 | use crate::plan::generational::barrier::GenObjectBarrierSemantics; 5 | use crate::plan::generational::create_gen_space_mapping; 6 | use crate::plan::mutator_context::Mutator; 7 | use crate::plan::mutator_context::MutatorConfig; 8 | use crate::plan::AllocationSemantics; 9 | use crate::util::alloc::allocators::Allocators; 10 | use crate::util::alloc::BumpAllocator; 11 | use crate::util::{VMMutatorThread, VMWorkerThread}; 12 | use crate::vm::VMBinding; 13 | use crate::MMTK; 14 | 15 | pub fn gencopy_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) { 16 | // Do nothing 17 | } 18 | 19 | pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { 20 | // reset nursery allocator 21 | let bump_allocator = unsafe { 22 | mutator 23 | .allocators 24 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 25 | } 26 | .downcast_mut::>() 27 | .unwrap(); 28 | bump_allocator.reset(); 29 | } 30 | 31 | pub fn create_gencopy_mutator( 32 | mutator_tls: VMMutatorThread, 33 | mmtk: &'static MMTK, 34 | ) -> Mutator { 35 | let gencopy = mmtk.plan.downcast_ref::>().unwrap(); 36 | let config = MutatorConfig { 37 | allocator_mapping: &ALLOCATOR_MAPPING, 38 | space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &gencopy.gen.nursery)), 39 | prepare_func: &gencopy_mutator_prepare, 40 | release_func: &gencopy_mutator_release, 41 | }; 42 | 43 | Mutator { 44 | allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), 45 | barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( 46 | mmtk, gencopy, 47 | ))), 48 | mutator_tls, 49 | config, 50 | plan: gencopy, 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/plan/generational/immix/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::global::GenImmix; 2 | use crate::plan::generational::gc_work::GenNurseryProcessEdges; 3 | use crate::policy::gc_work::TraceKind; 4 | use crate::scheduler::gc_work::PlanProcessEdges; 5 | use crate::vm::VMBinding; 6 | 7 | pub struct GenImmixNurseryGCWorkContext(std::marker::PhantomData); 8 | impl crate::scheduler::GCWorkContext for GenImmixNurseryGCWorkContext { 9 | type VM = VM; 10 | type PlanType = GenImmix; 11 | type ProcessEdgesWorkType = GenNurseryProcessEdges; 12 | } 13 | 14 | pub(super) struct GenImmixMatureGCWorkContext( 15 | std::marker::PhantomData, 16 | ); 17 | impl crate::scheduler::GCWorkContext 18 | for GenImmixMatureGCWorkContext 19 | { 20 | type VM = VM; 21 | type PlanType = GenImmix; 22 | type ProcessEdgesWorkType = PlanProcessEdges, KIND>; 23 | } 24 | -------------------------------------------------------------------------------- /src/plan/generational/immix/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: generational immix 2 | 3 | pub(in crate::plan) mod gc_work; 4 | pub(in crate::plan) mod global; 5 | pub(in crate::plan) mod mutator; 6 | 7 | pub use self::global::GenImmix; 8 | 9 | pub use self::global::GENIMMIX_CONSTRAINTS; 10 | -------------------------------------------------------------------------------- /src/plan/generational/immix/mutator.rs: -------------------------------------------------------------------------------- 1 | pub(super) use super::super::ALLOCATOR_MAPPING; 2 | use crate::plan::barriers::ObjectBarrier; 3 | use crate::plan::generational::barrier::GenObjectBarrierSemantics; 4 | use crate::plan::generational::create_gen_space_mapping; 5 | use crate::plan::generational::immix::GenImmix; 6 | use crate::plan::mutator_context::Mutator; 7 | use crate::plan::mutator_context::MutatorConfig; 8 | use crate::plan::AllocationSemantics; 9 | use crate::util::alloc::allocators::Allocators; 10 | use crate::util::alloc::BumpAllocator; 11 | use crate::util::{VMMutatorThread, VMWorkerThread}; 12 | use crate::vm::VMBinding; 13 | use crate::MMTK; 14 | 15 | pub fn genimmix_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} 16 | 17 | pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { 18 | // reset nursery allocator 19 | let bump_allocator = unsafe { 20 | mutator 21 | .allocators 22 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 23 | } 24 | .downcast_mut::>() 25 | .unwrap(); 26 | bump_allocator.reset(); 27 | } 28 | 29 | pub fn create_genimmix_mutator( 30 | mutator_tls: VMMutatorThread, 31 | mmtk: &'static MMTK, 32 | ) -> Mutator { 33 | let genimmix = mmtk.plan.downcast_ref::>().unwrap(); 34 | let config = MutatorConfig { 35 | allocator_mapping: &ALLOCATOR_MAPPING, 36 | space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &genimmix.gen.nursery)), 37 | prepare_func: &genimmix_mutator_prepare, 38 | release_func: &genimmix_mutator_release, 39 | }; 40 | 41 | Mutator { 42 | allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), 43 | barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( 44 | mmtk, genimmix, 45 | ))), 46 | mutator_tls, 47 | config, 48 | plan: genimmix, 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/plan/immix/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::global::Immix; 2 | use crate::policy::gc_work::TraceKind; 3 | use crate::scheduler::gc_work::PlanProcessEdges; 4 | use crate::vm::VMBinding; 5 | 6 | pub(super) struct ImmixGCWorkContext( 7 | std::marker::PhantomData, 8 | ); 9 | impl crate::scheduler::GCWorkContext 10 | for ImmixGCWorkContext 11 | { 12 | type VM = VM; 13 | type PlanType = Immix; 14 | type ProcessEdgesWorkType = PlanProcessEdges, KIND>; 15 | } 16 | -------------------------------------------------------------------------------- /src/plan/immix/mod.rs: -------------------------------------------------------------------------------- 1 | pub(super) mod gc_work; 2 | pub(super) mod global; 3 | pub(super) mod mutator; 4 | 5 | pub use self::global::Immix; 6 | pub use self::global::IMMIX_CONSTRAINTS; 7 | -------------------------------------------------------------------------------- /src/plan/immix/mutator.rs: -------------------------------------------------------------------------------- 1 | use super::Immix; 2 | use crate::plan::mutator_context::create_allocator_mapping; 3 | use crate::plan::mutator_context::create_space_mapping; 4 | use crate::plan::mutator_context::Mutator; 5 | use crate::plan::mutator_context::MutatorConfig; 6 | use crate::plan::mutator_context::ReservedAllocators; 7 | use crate::plan::AllocationSemantics; 8 | use crate::plan::Plan; 9 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 10 | use crate::util::alloc::ImmixAllocator; 11 | use crate::vm::VMBinding; 12 | use crate::{ 13 | plan::barriers::NoBarrier, 14 | util::opaque_pointer::{VMMutatorThread, VMWorkerThread}, 15 | }; 16 | use enum_map::EnumMap; 17 | 18 | pub fn immix_mutator_prepare(mutator: &mut Mutator, _tls: VMWorkerThread) { 19 | let immix_allocator = unsafe { 20 | mutator 21 | .allocators 22 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 23 | } 24 | .downcast_mut::>() 25 | .unwrap(); 26 | immix_allocator.reset(); 27 | } 28 | 29 | pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { 30 | let immix_allocator = unsafe { 31 | mutator 32 | .allocators 33 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 34 | } 35 | .downcast_mut::>() 36 | .unwrap(); 37 | immix_allocator.reset(); 38 | } 39 | 40 | pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 41 | n_immix: 1, 42 | ..ReservedAllocators::DEFAULT 43 | }; 44 | 45 | lazy_static! { 46 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 47 | let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); 48 | map[AllocationSemantics::Default] = AllocatorSelector::Immix(0); 49 | map 50 | }; 51 | } 52 | 53 | pub fn create_immix_mutator( 54 | mutator_tls: VMMutatorThread, 55 | plan: &'static dyn Plan, 56 | ) -> Mutator { 57 | let immix = plan.downcast_ref::>().unwrap(); 58 | let config = MutatorConfig { 59 | allocator_mapping: &ALLOCATOR_MAPPING, 60 | space_mapping: Box::new({ 61 | let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); 62 | vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); 63 | vec 64 | }), 65 | prepare_func: &immix_mutator_prepare, 66 | release_func: &immix_mutator_release, 67 | }; 68 | 69 | Mutator { 70 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 71 | barrier: Box::new(NoBarrier), 72 | mutator_tls, 73 | config, 74 | plan, 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/plan/markcompact/mod.rs: -------------------------------------------------------------------------------- 1 | pub(super) mod gc_work; 2 | pub(super) mod global; 3 | pub(super) mod mutator; 4 | 5 | pub use self::global::MarkCompact; 6 | pub use self::global::MARKCOMPACT_CONSTRAINTS; 7 | -------------------------------------------------------------------------------- /src/plan/markcompact/mutator.rs: -------------------------------------------------------------------------------- 1 | use super::MarkCompact; // Add 2 | use crate::plan::barriers::NoBarrier; 3 | use crate::plan::mutator_context::create_allocator_mapping; 4 | use crate::plan::mutator_context::create_space_mapping; 5 | use crate::plan::mutator_context::Mutator; 6 | use crate::plan::mutator_context::MutatorConfig; 7 | use crate::plan::mutator_context::ReservedAllocators; 8 | use crate::plan::AllocationSemantics; 9 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 10 | use crate::util::alloc::MarkCompactAllocator; 11 | use crate::util::opaque_pointer::*; 12 | use crate::vm::VMBinding; 13 | use crate::Plan; 14 | use enum_map::EnumMap; 15 | 16 | const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 17 | n_mark_compact: 1, 18 | ..ReservedAllocators::DEFAULT 19 | }; 20 | 21 | lazy_static! { 22 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 23 | let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); 24 | map[AllocationSemantics::Default] = AllocatorSelector::MarkCompact(0); 25 | map 26 | }; 27 | } 28 | 29 | pub fn create_markcompact_mutator( 30 | mutator_tls: VMMutatorThread, 31 | plan: &'static dyn Plan, 32 | ) -> Mutator { 33 | let markcompact = plan.downcast_ref::>().unwrap(); 34 | let config = MutatorConfig { 35 | allocator_mapping: &ALLOCATOR_MAPPING, 36 | space_mapping: Box::new({ 37 | let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); 38 | vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); 39 | vec 40 | }), 41 | prepare_func: &markcompact_mutator_prepare, 42 | release_func: &markcompact_mutator_release, 43 | }; 44 | 45 | Mutator { 46 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 47 | barrier: Box::new(NoBarrier), 48 | mutator_tls, 49 | config, 50 | plan, 51 | } 52 | } 53 | 54 | pub fn markcompact_mutator_prepare( 55 | _mutator: &mut Mutator, 56 | _tls: VMWorkerThread, 57 | ) { 58 | } 59 | 60 | pub fn markcompact_mutator_release( 61 | _mutator: &mut Mutator, 62 | _tls: VMWorkerThread, 63 | ) { 64 | // reset the thread-local allocation bump pointer 65 | let markcompact_allocator = unsafe { 66 | _mutator 67 | .allocators 68 | .get_allocator_mut(_mutator.config.allocator_mapping[AllocationSemantics::Default]) 69 | } 70 | .downcast_mut::>() 71 | .unwrap(); 72 | markcompact_allocator.reset(); 73 | } 74 | -------------------------------------------------------------------------------- /src/plan/marksweep/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::MarkSweep; 2 | use crate::policy::gc_work::DEFAULT_TRACE; 3 | use crate::scheduler::gc_work::*; 4 | use crate::vm::VMBinding; 5 | 6 | pub struct MSGCWorkContext(std::marker::PhantomData); 7 | impl crate::scheduler::GCWorkContext for MSGCWorkContext { 8 | type VM = VM; 9 | type PlanType = MarkSweep; 10 | type ProcessEdgesWorkType = PlanProcessEdges, DEFAULT_TRACE>; 11 | } 12 | -------------------------------------------------------------------------------- /src/plan/marksweep/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: marksweep 2 | 3 | mod gc_work; 4 | mod global; 5 | pub mod mutator; 6 | 7 | pub use self::global::MarkSweep; 8 | pub use self::global::MS_CONSTRAINTS; 9 | -------------------------------------------------------------------------------- /src/plan/mod.rs: -------------------------------------------------------------------------------- 1 | //! GC algorithms from the MMTk suite. 2 | //! 3 | //! This module provides various GC plans, each of which implements a GC algorithm. 4 | //! Generally a plan consists of a few parts: 5 | //! * A plan type that implements the [`Plan`](crate::plan::Plan) trait, which defines 6 | //! spaces used in the plan, and their behaviors in GC and page accounting. 7 | //! * A mutator definition, which describes the mapping between allocators and allocation semantics, 8 | //! and the mapping between allocators and spaces. If the plan needs barrier, the barrier definition is 9 | //! also included here. 10 | //! * A constant for [`PlanConstraints`](crate::plan::PlanConstraints), which defines 11 | //! plan-specific constants. 12 | //! * Plan-specific [`GCWork`](crate::scheduler::GCWork), which is scheduled during GC. 13 | //! 14 | //! For more about implementing a plan, it is recommended to read the [MMTk tutorial](/docs/tutorial/Tutorial.md). 15 | 16 | mod barriers; 17 | pub use barriers::BarrierSelector; 18 | 19 | pub(crate) mod gc_requester; 20 | 21 | mod global; 22 | pub(crate) use global::create_gc_worker_context; 23 | pub(crate) use global::create_mutator; 24 | pub(crate) use global::create_plan; 25 | pub use global::AllocationSemantics; 26 | pub(crate) use global::GcStatus; 27 | pub use global::Plan; 28 | pub(crate) use global::PlanTraceObject; 29 | #[cfg(feature = "vm_space")] // This is used for creating VM space 30 | pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; 31 | 32 | mod mutator_context; 33 | pub use mutator_context::Mutator; 34 | pub use mutator_context::MutatorContext; 35 | 36 | mod plan_constraints; 37 | pub use plan_constraints::PlanConstraints; 38 | pub use plan_constraints::DEFAULT_PLAN_CONSTRAINTS; 39 | 40 | mod tracing; 41 | pub use tracing::{ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue}; 42 | 43 | /// Generational plans (with a copying nursery) 44 | mod generational; 45 | /// Sticky plans (using sticky marks for generational behaviors without a copying nursery) 46 | mod sticky; 47 | 48 | mod immix; 49 | mod markcompact; 50 | mod marksweep; 51 | mod nogc; 52 | mod pageprotect; 53 | mod semispace; 54 | 55 | pub(crate) use generational::global::is_nursery_gc; 56 | pub(crate) use generational::global::GenerationalPlan; 57 | 58 | // Expose plan constraints as public. Though a binding can get them from plan.constraints(), 59 | // it is possible for performance reasons that they want the constraints as constants. 60 | 61 | pub use generational::copying::GENCOPY_CONSTRAINTS; 62 | pub use generational::immix::GENIMMIX_CONSTRAINTS; 63 | pub use immix::IMMIX_CONSTRAINTS; 64 | pub use markcompact::MARKCOMPACT_CONSTRAINTS; 65 | pub use marksweep::MS_CONSTRAINTS; 66 | pub use nogc::NOGC_CONSTRAINTS; 67 | pub use pageprotect::PP_CONSTRAINTS; 68 | pub use semispace::SS_CONSTRAINTS; 69 | pub use sticky::immix::STICKY_IMMIX_CONSTRAINTS; 70 | -------------------------------------------------------------------------------- /src/plan/nogc/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: nogc (allocation-only) 2 | 3 | pub(super) mod global; 4 | pub(super) mod mutator; 5 | 6 | pub use self::global::NoGC; 7 | pub use self::global::NOGC_CONSTRAINTS; 8 | -------------------------------------------------------------------------------- /src/plan/nogc/mutator.rs: -------------------------------------------------------------------------------- 1 | use crate::plan::barriers::NoBarrier; 2 | use crate::plan::mutator_context::Mutator; 3 | use crate::plan::mutator_context::MutatorConfig; 4 | use crate::plan::mutator_context::{ 5 | create_allocator_mapping, create_space_mapping, ReservedAllocators, 6 | }; 7 | use crate::plan::nogc::NoGC; 8 | use crate::plan::AllocationSemantics; 9 | use crate::plan::Plan; 10 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 11 | use crate::util::{VMMutatorThread, VMWorkerThread}; 12 | use crate::vm::VMBinding; 13 | use enum_map::{enum_map, EnumMap}; 14 | 15 | /// We use three bump allocators when enabling nogc_multi_space. 16 | const MULTI_SPACE_RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 17 | n_bump_pointer: 3, 18 | ..ReservedAllocators::DEFAULT 19 | }; 20 | 21 | lazy_static! { 22 | /// When nogc_multi_space is disabled, force all the allocation go to the default allocator and space. 23 | static ref ALLOCATOR_MAPPING_SINGLE_SPACE: EnumMap = enum_map! { 24 | _ => AllocatorSelector::BumpPointer(0), 25 | }; 26 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 27 | if cfg!(feature = "nogc_multi_space") { 28 | let mut map = create_allocator_mapping(MULTI_SPACE_RESERVED_ALLOCATORS, false); 29 | map[AllocationSemantics::Default] = AllocatorSelector::BumpPointer(0); 30 | map[AllocationSemantics::Immortal] = AllocatorSelector::BumpPointer(1); 31 | map[AllocationSemantics::Los] = AllocatorSelector::BumpPointer(2); 32 | map 33 | } else { 34 | *ALLOCATOR_MAPPING_SINGLE_SPACE 35 | } 36 | }; 37 | } 38 | 39 | pub fn nogc_mutator_noop(_mutator: &mut Mutator, _tls: VMWorkerThread) { 40 | unreachable!(); 41 | } 42 | 43 | pub fn create_nogc_mutator( 44 | mutator_tls: VMMutatorThread, 45 | plan: &'static dyn Plan, 46 | ) -> Mutator { 47 | let config = MutatorConfig { 48 | allocator_mapping: &ALLOCATOR_MAPPING, 49 | space_mapping: Box::new({ 50 | let mut vec = create_space_mapping(MULTI_SPACE_RESERVED_ALLOCATORS, false, plan); 51 | vec.push(( 52 | AllocatorSelector::BumpPointer(0), 53 | &plan.downcast_ref::>().unwrap().nogc_space, 54 | )); 55 | vec.push(( 56 | AllocatorSelector::BumpPointer(1), 57 | &plan.downcast_ref::>().unwrap().immortal, 58 | )); 59 | vec.push(( 60 | AllocatorSelector::BumpPointer(2), 61 | &plan.downcast_ref::>().unwrap().los, 62 | )); 63 | vec 64 | }), 65 | prepare_func: &nogc_mutator_noop, 66 | release_func: &nogc_mutator_noop, 67 | }; 68 | 69 | Mutator { 70 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 71 | barrier: Box::new(NoBarrier), 72 | mutator_tls, 73 | config, 74 | plan, 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/plan/pageprotect/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::global::PageProtect; 2 | use crate::policy::gc_work::DEFAULT_TRACE; 3 | use crate::scheduler::gc_work::PlanProcessEdges; 4 | use crate::vm::VMBinding; 5 | 6 | pub struct PPGCWorkContext(std::marker::PhantomData); 7 | impl crate::scheduler::GCWorkContext for PPGCWorkContext { 8 | type VM = VM; 9 | type PlanType = PageProtect; 10 | type ProcessEdgesWorkType = PlanProcessEdges, DEFAULT_TRACE>; 11 | } 12 | -------------------------------------------------------------------------------- /src/plan/pageprotect/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: pageprotect 2 | //! 3 | //! Allocate each object on a separate page and protect the memory on release. 4 | //! This GC is commonly used for debugging purposes. 5 | 6 | pub(super) mod gc_work; 7 | pub(super) mod global; 8 | pub(super) mod mutator; 9 | 10 | pub use self::global::PageProtect; 11 | pub use self::global::CONSTRAINTS as PP_CONSTRAINTS; 12 | -------------------------------------------------------------------------------- /src/plan/pageprotect/mutator.rs: -------------------------------------------------------------------------------- 1 | use super::PageProtect; 2 | use crate::plan::mutator_context::Mutator; 3 | use crate::plan::mutator_context::MutatorConfig; 4 | use crate::plan::mutator_context::{ 5 | create_allocator_mapping, create_space_mapping, ReservedAllocators, 6 | }; 7 | use crate::plan::AllocationSemantics; 8 | use crate::plan::Plan; 9 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 10 | use crate::vm::VMBinding; 11 | use crate::{ 12 | plan::barriers::NoBarrier, 13 | util::opaque_pointer::{VMMutatorThread, VMWorkerThread}, 14 | }; 15 | use enum_map::EnumMap; 16 | 17 | /// Prepare mutator. Do nothing. 18 | fn pp_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} 19 | 20 | /// Release mutator. Do nothing. 21 | fn pp_mutator_release(_mutator: &mut Mutator, _tls: VMWorkerThread) {} 22 | 23 | const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 24 | n_large_object: 1, 25 | ..ReservedAllocators::DEFAULT 26 | }; 27 | 28 | lazy_static! { 29 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 30 | let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); 31 | map[AllocationSemantics::Default] = AllocatorSelector::LargeObject(0); 32 | map 33 | }; 34 | } 35 | 36 | /// Create a mutator instance. 37 | /// Every object is allocated to LOS. 38 | pub fn create_pp_mutator( 39 | mutator_tls: VMMutatorThread, 40 | plan: &'static dyn Plan, 41 | ) -> Mutator { 42 | let page = plan.downcast_ref::>().unwrap(); 43 | let config = MutatorConfig { 44 | allocator_mapping: &ALLOCATOR_MAPPING, 45 | space_mapping: Box::new({ 46 | let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); 47 | vec.push((AllocatorSelector::LargeObject(0), &page.space)); 48 | vec 49 | }), 50 | prepare_func: &pp_mutator_prepare, 51 | release_func: &pp_mutator_release, 52 | }; 53 | 54 | Mutator { 55 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 56 | barrier: Box::new(NoBarrier), 57 | mutator_tls, 58 | config, 59 | plan, 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/plan/plan_constraints.rs: -------------------------------------------------------------------------------- 1 | //! Plan-specific constraints. 2 | 3 | use crate::plan::barriers::BarrierSelector; 4 | use crate::util::constants::*; 5 | 6 | /// This struct defines plan-specific constraints. 7 | /// Most of the constraints are constants. Each plan should declare a constant of this struct, 8 | /// and use the constant wherever possible. However, for plan-neutral implementations, 9 | /// these constraints are not constant. 10 | pub struct PlanConstraints { 11 | pub moves_objects: bool, 12 | pub gc_header_bits: usize, 13 | pub gc_header_words: usize, 14 | pub num_specialized_scans: usize, 15 | /// Size (in bytes) beyond which new regular objects must be allocated to the LOS. 16 | /// This usually depends on the restriction of the default allocator, e.g. block size for Immix, 17 | /// nursery size, max possible cell for freelist, etc. 18 | pub max_non_los_default_alloc_bytes: usize, 19 | /// Size (in bytes) beyond which copied objects must be copied to the LOS. 20 | /// This depends on the copy allocator. 21 | pub max_non_los_copy_bytes: usize, 22 | /// Does this plan use the log bit? See vm::ObjectModel::GLOBAL_LOG_BIT_SPEC. 23 | pub needs_log_bit: bool, 24 | /// Some plans may allow benign race for testing mark bit, and this will lead to trace the same edges 25 | /// multiple times. If a plan allows tracing duplicate edges, we will not run duplicate edge check 26 | /// in extreme_assertions. 27 | pub may_trace_duplicate_edges: bool, 28 | pub barrier: BarrierSelector, 29 | // the following seems unused for now 30 | pub needs_linear_scan: bool, 31 | pub needs_concurrent_workers: bool, 32 | pub generate_gc_trace: bool, 33 | /// Some policies do object forwarding after the first liveness transitive closure, such as mark compact. 34 | /// For plans that use those policies, they should set this as true. 35 | pub needs_forward_after_liveness: bool, 36 | } 37 | 38 | impl PlanConstraints { 39 | pub const fn default() -> Self { 40 | PlanConstraints { 41 | moves_objects: false, 42 | gc_header_bits: 0, 43 | gc_header_words: 0, 44 | num_specialized_scans: 0, 45 | max_non_los_default_alloc_bytes: MAX_INT, 46 | max_non_los_copy_bytes: MAX_INT, 47 | needs_linear_scan: SUPPORT_CARD_SCANNING || LAZY_SWEEP, 48 | needs_concurrent_workers: false, 49 | generate_gc_trace: false, 50 | may_trace_duplicate_edges: false, 51 | needs_forward_after_liveness: false, 52 | needs_log_bit: false, 53 | barrier: BarrierSelector::NoBarrier, 54 | } 55 | } 56 | } 57 | 58 | pub const DEFAULT_PLAN_CONSTRAINTS: PlanConstraints = PlanConstraints::default(); 59 | 60 | // Use 16 pages as the size limit for non-LOS objects to avoid copying large objects 61 | pub const MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN: usize = 16 << LOG_BYTES_IN_PAGE; 62 | -------------------------------------------------------------------------------- /src/plan/semispace/gc_work.rs: -------------------------------------------------------------------------------- 1 | use super::global::SemiSpace; 2 | use crate::policy::gc_work::DEFAULT_TRACE; 3 | use crate::scheduler::gc_work::PlanProcessEdges; 4 | use crate::vm::VMBinding; 5 | 6 | pub struct SSGCWorkContext(std::marker::PhantomData); 7 | impl crate::scheduler::GCWorkContext for SSGCWorkContext { 8 | type VM = VM; 9 | type PlanType = SemiSpace; 10 | type ProcessEdgesWorkType = PlanProcessEdges, DEFAULT_TRACE>; 11 | } 12 | -------------------------------------------------------------------------------- /src/plan/semispace/mod.rs: -------------------------------------------------------------------------------- 1 | //! Plan: semispace 2 | 3 | pub(super) mod gc_work; 4 | pub(super) mod global; 5 | pub(super) mod mutator; 6 | 7 | pub use self::global::SemiSpace; 8 | pub use self::global::SS_CONSTRAINTS; 9 | -------------------------------------------------------------------------------- /src/plan/semispace/mutator.rs: -------------------------------------------------------------------------------- 1 | use super::SemiSpace; 2 | use crate::plan::barriers::NoBarrier; 3 | use crate::plan::mutator_context::Mutator; 4 | use crate::plan::mutator_context::MutatorConfig; 5 | use crate::plan::mutator_context::{ 6 | create_allocator_mapping, create_space_mapping, ReservedAllocators, 7 | }; 8 | use crate::plan::AllocationSemantics; 9 | use crate::plan::Plan; 10 | use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; 11 | use crate::util::alloc::BumpAllocator; 12 | use crate::util::{VMMutatorThread, VMWorkerThread}; 13 | use crate::vm::VMBinding; 14 | use enum_map::EnumMap; 15 | 16 | pub fn ss_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) { 17 | // Do nothing 18 | } 19 | 20 | pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { 21 | // rebind the allocation bump pointer to the appropriate semispace 22 | let bump_allocator = unsafe { 23 | mutator 24 | .allocators 25 | .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) 26 | } 27 | .downcast_mut::>() 28 | .unwrap(); 29 | bump_allocator.rebind( 30 | mutator 31 | .plan 32 | .downcast_ref::>() 33 | .unwrap() 34 | .tospace(), 35 | ); 36 | } 37 | 38 | const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { 39 | n_bump_pointer: 1, 40 | ..ReservedAllocators::DEFAULT 41 | }; 42 | 43 | lazy_static! { 44 | pub static ref ALLOCATOR_MAPPING: EnumMap = { 45 | let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); 46 | map[AllocationSemantics::Default] = AllocatorSelector::BumpPointer(0); 47 | map 48 | }; 49 | } 50 | 51 | pub fn create_ss_mutator( 52 | mutator_tls: VMMutatorThread, 53 | plan: &'static dyn Plan, 54 | ) -> Mutator { 55 | let ss = plan.downcast_ref::>().unwrap(); 56 | let config = MutatorConfig { 57 | allocator_mapping: &ALLOCATOR_MAPPING, 58 | space_mapping: Box::new({ 59 | let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, plan); 60 | vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); 61 | vec 62 | }), 63 | prepare_func: &ss_mutator_prepare, 64 | release_func: &ss_mutator_release, 65 | }; 66 | 67 | Mutator { 68 | allocators: Allocators::::new(mutator_tls, plan, &config.space_mapping), 69 | barrier: Box::new(NoBarrier), 70 | mutator_tls, 71 | config, 72 | plan, 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/plan/sticky/immix/gc_work.rs: -------------------------------------------------------------------------------- 1 | use crate::policy::gc_work::TraceKind; 2 | use crate::scheduler::gc_work::PlanProcessEdges; 3 | use crate::{plan::generational::gc_work::GenNurseryProcessEdges, vm::VMBinding}; 4 | 5 | use super::global::StickyImmix; 6 | 7 | pub struct StickyImmixNurseryGCWorkContext(std::marker::PhantomData); 8 | impl crate::scheduler::GCWorkContext for StickyImmixNurseryGCWorkContext { 9 | type VM = VM; 10 | type PlanType = StickyImmix; 11 | type ProcessEdgesWorkType = GenNurseryProcessEdges; 12 | } 13 | 14 | pub struct StickyImmixMatureGCWorkContext( 15 | std::marker::PhantomData, 16 | ); 17 | impl crate::scheduler::GCWorkContext 18 | for StickyImmixMatureGCWorkContext 19 | { 20 | type VM = VM; 21 | type PlanType = StickyImmix; 22 | type ProcessEdgesWorkType = PlanProcessEdges; 23 | } 24 | -------------------------------------------------------------------------------- /src/plan/sticky/immix/mod.rs: -------------------------------------------------------------------------------- 1 | pub(in crate::plan) mod gc_work; 2 | pub(in crate::plan) mod global; 3 | pub(in crate::plan) mod mutator; 4 | 5 | pub use global::StickyImmix; 6 | pub use global::STICKY_IMMIX_CONSTRAINTS; 7 | -------------------------------------------------------------------------------- /src/plan/sticky/immix/mutator.rs: -------------------------------------------------------------------------------- 1 | use crate::plan::barriers::ObjectBarrier; 2 | use crate::plan::generational::barrier::GenObjectBarrierSemantics; 3 | use crate::plan::immix; 4 | use crate::plan::mutator_context::{create_space_mapping, MutatorConfig}; 5 | use crate::plan::sticky::immix::global::StickyImmix; 6 | use crate::util::alloc::allocators::Allocators; 7 | use crate::util::alloc::AllocatorSelector; 8 | use crate::util::opaque_pointer::VMWorkerThread; 9 | use crate::util::VMMutatorThread; 10 | use crate::vm::VMBinding; 11 | use crate::{Mutator, MMTK}; 12 | 13 | pub fn stickyimmix_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { 14 | immix::mutator::immix_mutator_prepare(mutator, tls) 15 | } 16 | 17 | pub fn stickyimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { 18 | immix::mutator::immix_mutator_release(mutator, tls) 19 | } 20 | 21 | pub use immix::mutator::ALLOCATOR_MAPPING; 22 | 23 | pub fn create_stickyimmix_mutator( 24 | mutator_tls: VMMutatorThread, 25 | mmtk: &'static MMTK, 26 | ) -> Mutator { 27 | let stickyimmix = mmtk.plan.downcast_ref::>().unwrap(); 28 | let config = MutatorConfig { 29 | allocator_mapping: &ALLOCATOR_MAPPING, 30 | space_mapping: Box::new({ 31 | let mut vec = 32 | create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, &*mmtk.plan); 33 | vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); 34 | vec 35 | }), 36 | prepare_func: &stickyimmix_mutator_prepare, 37 | release_func: &stickyimmix_mutator_release, 38 | }; 39 | 40 | Mutator { 41 | allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), 42 | barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( 43 | mmtk, 44 | stickyimmix, 45 | ))), 46 | mutator_tls, 47 | config, 48 | plan: &*mmtk.plan, 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/plan/sticky/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod immix; 2 | -------------------------------------------------------------------------------- /src/policy/copy_context.rs: -------------------------------------------------------------------------------- 1 | use crate::util::Address; 2 | use crate::util::ObjectReference; 3 | use crate::vm::VMBinding; 4 | 5 | /// A GC worker's copy allocator for copying GCs. 6 | /// Each copying policy should provide their implementation of PolicyCopyContext. 7 | /// If we copy objects from one policy to a different policy, the copy context of the destination 8 | /// policy should be used. For example, for generational immix, the nursery is CopySpace, and the 9 | /// mature space is ImmixSpace. When we copy from nursery to mature, ImmixCopyContext should be 10 | /// used. 11 | /// Note that this trait should only be implemented with policy specific behaviors. Please 12 | /// refer to [`crate::util::copy::GCWorkerCopyContext`] which implements common 13 | /// behaviors for copying. 14 | pub trait PolicyCopyContext: 'static + Send { 15 | type VM: VMBinding; 16 | fn prepare(&mut self); 17 | fn release(&mut self); 18 | fn alloc_copy( 19 | &mut self, 20 | original: ObjectReference, 21 | bytes: usize, 22 | align: usize, 23 | offset: usize, 24 | ) -> Address; 25 | fn post_copy(&mut self, _obj: ObjectReference, _bytes: usize) {} 26 | } 27 | -------------------------------------------------------------------------------- /src/policy/gc_work.rs: -------------------------------------------------------------------------------- 1 | /// Used to identify the trace if a policy has different kinds of traces. For example, defrag vs fast trace for Immix, 2 | /// mark vs forward trace for mark compact. 3 | pub(crate) type TraceKind = u8; 4 | 5 | pub const DEFAULT_TRACE: u8 = u8::MAX; 6 | 7 | use crate::plan::ObjectQueue; 8 | use crate::scheduler::GCWorker; 9 | use crate::util::copy::CopySemantics; 10 | 11 | use crate::util::ObjectReference; 12 | 13 | use crate::vm::VMBinding; 14 | 15 | /// This trait defines policy-specific behavior for tracing objects. 16 | /// The procedural macro #[derive(PlanTraceObject)] will generate code 17 | /// that uses this trait. We expect any policy to implement this trait. 18 | /// For the sake of performance, the implementation 19 | /// of this trait should mark methods as `[inline(always)]`. 20 | pub trait PolicyTraceObject { 21 | /// Trace object in the policy. If the policy copies objects, we should 22 | /// expect `copy` to be a `Some` value. 23 | fn trace_object( 24 | &self, 25 | queue: &mut Q, 26 | object: ObjectReference, 27 | copy: Option, 28 | worker: &mut GCWorker, 29 | ) -> ObjectReference; 30 | 31 | /// Policy-specific post-scan-object hook. It is called after scanning 32 | /// each object in this space. 33 | fn post_scan_object(&self, _object: ObjectReference) { 34 | // Do nothing. 35 | } 36 | 37 | /// Return whether the policy moves objects. 38 | fn may_move_objects() -> bool; 39 | } 40 | -------------------------------------------------------------------------------- /src/policy/immix/line.rs: -------------------------------------------------------------------------------- 1 | use super::block::Block; 2 | use crate::util::linear_scan::{Region, RegionIterator}; 3 | use crate::util::metadata::side_metadata::SideMetadataSpec; 4 | use crate::{ 5 | util::{Address, ObjectReference}, 6 | vm::*, 7 | }; 8 | 9 | /// Data structure to reference a line within an immix block. 10 | #[repr(transparent)] 11 | #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq)] 12 | pub struct Line(Address); 13 | 14 | impl Region for Line { 15 | const LOG_BYTES: usize = 8; 16 | 17 | #[allow(clippy::assertions_on_constants)] // make sure line is not used when BLOCK_ONLY is turned on. 18 | fn from_aligned_address(address: Address) -> Self { 19 | debug_assert!(!super::BLOCK_ONLY); 20 | debug_assert!(address.is_aligned_to(Self::BYTES)); 21 | Self(address) 22 | } 23 | 24 | fn start(&self) -> Address { 25 | self.0 26 | } 27 | } 28 | 29 | #[allow(clippy::assertions_on_constants)] 30 | impl Line { 31 | pub const RESET_MARK_STATE: u8 = 1; 32 | pub const MAX_MARK_STATE: u8 = 127; 33 | 34 | /// Line mark table (side) 35 | pub const MARK_TABLE: SideMetadataSpec = 36 | crate::util::metadata::side_metadata::spec_defs::IX_LINE_MARK; 37 | 38 | /// Get the block containing the line. 39 | pub fn block(&self) -> Block { 40 | debug_assert!(!super::BLOCK_ONLY); 41 | Block::from_unaligned_address(self.0) 42 | } 43 | 44 | /// Get line index within its containing block. 45 | pub fn get_index_within_block(&self) -> usize { 46 | let addr = self.start(); 47 | addr.get_extent(Block::align(addr)) >> Line::LOG_BYTES 48 | } 49 | 50 | /// Mark the line. This will update the side line mark table. 51 | pub fn mark(&self, state: u8) { 52 | debug_assert!(!super::BLOCK_ONLY); 53 | unsafe { 54 | Self::MARK_TABLE.store::(self.start(), state); 55 | } 56 | } 57 | 58 | /// Test line mark state. 59 | pub fn is_marked(&self, state: u8) -> bool { 60 | debug_assert!(!super::BLOCK_ONLY); 61 | unsafe { Self::MARK_TABLE.load::(self.start()) == state } 62 | } 63 | 64 | /// Mark all lines the object is spanned to. 65 | pub fn mark_lines_for_object(object: ObjectReference, state: u8) -> usize { 66 | debug_assert!(!super::BLOCK_ONLY); 67 | let start = object.to_object_start::(); 68 | let end = start + VM::VMObjectModel::get_current_size(object); 69 | let start_line = Line::from_unaligned_address(start); 70 | let mut end_line = Line::from_unaligned_address(end); 71 | if !Line::is_aligned(end) { 72 | end_line = end_line.next(); 73 | } 74 | let mut marked_lines = 0; 75 | let iter = RegionIterator::::new(start_line, end_line); 76 | for line in iter { 77 | if !line.is_marked(state) { 78 | marked_lines += 1; 79 | } 80 | line.mark(state) 81 | } 82 | marked_lines 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/policy/immix/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod block; 2 | pub mod defrag; 3 | pub mod immixspace; 4 | pub mod line; 5 | 6 | pub use immixspace::*; 7 | 8 | use crate::policy::immix::block::Block; 9 | use crate::util::linear_scan::Region; 10 | 11 | /// The max object size for immix: half of a block 12 | pub const MAX_IMMIX_OBJECT_SIZE: usize = Block::BYTES >> 1; 13 | 14 | /// Mark/sweep memory for block-level only 15 | pub const BLOCK_ONLY: bool = false; 16 | 17 | /// Do we allow Immix to do defragmentation? 18 | pub const DEFRAG: bool = !cfg!(feature = "immix_non_moving"); // defrag if we are allowed to move. 19 | 20 | /// Make every GC a defragment GC. (for debugging) 21 | pub const STRESS_DEFRAG: bool = false; 22 | 23 | /// Mark every allocated block as defragmentation source before GC. (for debugging) 24 | /// Set both this and `STRESS_DEFRAG` to true to make Immix move as many objects as possible. 25 | pub const DEFRAG_EVERY_BLOCK: bool = false; 26 | 27 | /// If Immix is used as a nursery space, do we prefer copy? 28 | pub const PREFER_COPY_ON_NURSERY_GC: bool = !cfg!(feature = "immix_non_moving"); // copy nursery objects if we are allowed to move. 29 | 30 | /// In some cases/settings, Immix may never move objects. 31 | /// Currently we only have two cases where we move objects: 1. defrag, 2. nursery copy. 32 | /// If we do neither, we will not move objects. 33 | /// If we have other reasons to move objects, we need to add them here. 34 | pub const NEVER_MOVE_OBJECTS: bool = !DEFRAG && !PREFER_COPY_ON_NURSERY_GC; 35 | 36 | /// Mark lines when scanning objects. 37 | /// Otherwise, do it at mark time. 38 | pub const MARK_LINE_AT_SCAN_TIME: bool = true; 39 | 40 | macro_rules! validate { 41 | ($x: expr) => { assert!($x, stringify!($x)) }; 42 | ($x: expr => $y: expr) => { if $x { assert!($y, stringify!($x implies $y)) } }; 43 | } 44 | 45 | fn validate_features() { 46 | // Block-only immix cannot do defragmentation 47 | validate!(DEFRAG => !BLOCK_ONLY); 48 | // Number of lines in a block should not exceed BlockState::MARK_MARKED 49 | assert!(Block::LINES / 2 <= u8::MAX as usize - 2); 50 | } 51 | -------------------------------------------------------------------------------- /src/policy/marksweepspace/malloc_ms/mod.rs: -------------------------------------------------------------------------------- 1 | mod global; 2 | mod metadata; 3 | 4 | pub use global::*; 5 | pub use metadata::*; 6 | -------------------------------------------------------------------------------- /src/policy/marksweepspace/mod.rs: -------------------------------------------------------------------------------- 1 | //! Mark sweep space. 2 | //! MMTk provides two implementations of mark sweep: 3 | //! 1. mark sweep using a native freelist allocator implemented in MMTk. This is the default mark sweep implementation, and 4 | //! most people should use this. 5 | //! 2. mark sweep using malloc as its freelist allocator. Use the feature `malloc_mark_sweep` to enable it. As we do not control 6 | //! the allocation of malloc, we have to work around a few issues to make it for mark sweep. Thus it has considerably worse performance. 7 | //! This is an experimental feature, and should only be used if you are actually interested in using malloc as the allocator. 8 | //! Otherwise this should not be used. 9 | 10 | // TODO: we should extract the code about mark sweep, and make both implementation use the same mark sweep code. 11 | 12 | /// Malloc mark sweep. This uses `MallocSpace` and `MallocAllocator`. 13 | pub(crate) mod malloc_ms; 14 | /// Native mark sweep. This uses `MarkSweepSpace` and `FreeListAllocator`. 15 | pub(crate) mod native_ms; 16 | -------------------------------------------------------------------------------- /src/policy/marksweepspace/native_ms/mod.rs: -------------------------------------------------------------------------------- 1 | mod block; 2 | mod block_list; 3 | mod global; 4 | 5 | pub use block::*; 6 | pub use block_list::*; 7 | pub use global::*; 8 | -------------------------------------------------------------------------------- /src/policy/mod.rs: -------------------------------------------------------------------------------- 1 | //! Memory policies that can be used for spaces. 2 | 3 | /// This class defines and manages spaces. Each policy is an instance 4 | /// of a space. A space is a region of virtual memory (contiguous or 5 | /// discontigous) which is subject to the same memory management 6 | /// regime. Multiple spaces (instances of this class or its 7 | /// descendants) may have the same policy (eg there could be numerous 8 | /// instances of CopySpace, each with different roles). Spaces are 9 | /// defined in terms of a unique region of virtual memory, so no two 10 | /// space instances ever share any virtual memory.

11 | /// In addition to tracking virtual memory use and the mapping to 12 | /// policy, spaces also manage memory consumption (used virtual 13 | /// memory). 14 | pub mod space; 15 | 16 | /// Copy context defines the thread local copy allocator for copying policies. 17 | pub mod copy_context; 18 | /// Policy specific GC work 19 | pub mod gc_work; 20 | pub mod sft; 21 | pub mod sft_map; 22 | 23 | pub mod copyspace; 24 | pub mod immix; 25 | pub mod immortalspace; 26 | pub mod largeobjectspace; 27 | pub mod lockfreeimmortalspace; 28 | pub mod markcompactspace; 29 | pub mod marksweepspace; 30 | #[cfg(feature = "vm_space")] 31 | pub mod vmspace; 32 | -------------------------------------------------------------------------------- /src/scheduler/affinity.rs: -------------------------------------------------------------------------------- 1 | use super::worker::ThreadId; 2 | use crate::util::options::AffinityKind; 3 | #[cfg(target_os = "linux")] 4 | use libc::{cpu_set_t, sched_getaffinity, sched_setaffinity, CPU_COUNT, CPU_SET, CPU_ZERO}; 5 | 6 | /// Represents the ID of a logical CPU on a system. 7 | pub type CoreId = u16; 8 | 9 | // XXX: Maybe in the future we can use a library such as https://github.com/Elzair/core_affinity_rs 10 | // to have an OS agnostic way of setting thread affinity. 11 | #[cfg(target_os = "linux")] 12 | /// Return the total number of cores allocated to the program. 13 | pub fn get_total_num_cpus() -> u16 { 14 | use std::mem::MaybeUninit; 15 | unsafe { 16 | let mut cs = MaybeUninit::zeroed().assume_init(); 17 | CPU_ZERO(&mut cs); 18 | sched_getaffinity(0, std::mem::size_of::(), &mut cs); 19 | CPU_COUNT(&cs) as u16 20 | } 21 | } 22 | 23 | #[cfg(not(target_os = "linux"))] 24 | /// Return the total number of cores allocated to the program. 25 | pub fn get_total_num_cpus() -> u16 { 26 | unimplemented!() 27 | } 28 | 29 | impl AffinityKind { 30 | /// Resolve affinity of GC thread. Has a side-effect of calling into the kernel to set the 31 | /// thread affinity. Note that we assume that each GC thread is equivalent to an OS or hardware 32 | /// thread. 33 | pub fn resolve_affinity(&self, thread: ThreadId) { 34 | match self { 35 | AffinityKind::OsDefault => {} 36 | AffinityKind::RoundRobin(cpuset) => { 37 | let cpu = cpuset[thread % cpuset.len()]; 38 | debug!("Set affinity for thread {} to core {}", thread, cpu); 39 | bind_current_thread_to_core(cpu); 40 | } 41 | } 42 | } 43 | } 44 | 45 | #[cfg(target_os = "linux")] 46 | /// Bind the current thread to the specified core. 47 | fn bind_current_thread_to_core(cpu: CoreId) { 48 | use std::mem::MaybeUninit; 49 | unsafe { 50 | let mut cs = MaybeUninit::zeroed().assume_init(); 51 | CPU_ZERO(&mut cs); 52 | CPU_SET(cpu as usize, &mut cs); 53 | sched_setaffinity(0, std::mem::size_of::(), &cs); 54 | } 55 | } 56 | 57 | #[cfg(not(target_os = "linux"))] 58 | /// Bind the current thread to the specified core. 59 | fn bind_current_thread_to_core(_cpu: CoreId) { 60 | unimplemented!() 61 | } 62 | -------------------------------------------------------------------------------- /src/scheduler/mod.rs: -------------------------------------------------------------------------------- 1 | //! A general scheduler implementation. MMTk uses it to schedule GC-related work. 2 | 3 | pub mod affinity; 4 | 5 | #[allow(clippy::module_inception)] 6 | mod scheduler; 7 | pub(crate) use scheduler::GCWorkScheduler; 8 | 9 | mod stat; 10 | pub(self) mod work_counter; 11 | 12 | mod work; 13 | pub use work::GCWork; 14 | pub(crate) use work::GCWorkContext; 15 | 16 | mod work_bucket; 17 | pub use work_bucket::WorkBucketStage; 18 | 19 | mod worker; 20 | pub(crate) use worker::current_worker_ordinal; 21 | pub use worker::GCWorker; 22 | 23 | mod controller; 24 | pub use controller::GCController; 25 | 26 | pub(crate) mod gc_work; 27 | pub use gc_work::ProcessEdgesWork; 28 | -------------------------------------------------------------------------------- /src/scheduler/work.rs: -------------------------------------------------------------------------------- 1 | use super::worker::*; 2 | use crate::mmtk::MMTK; 3 | use crate::vm::VMBinding; 4 | #[cfg(feature = "work_packet_stats")] 5 | use std::any::{type_name, TypeId}; 6 | 7 | pub trait GCWork: 'static + Send { 8 | /// Define the work for this packet. However, this is not supposed to be called directly. 9 | /// Usually `do_work_with_stat()` should be used. 10 | fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK); 11 | 12 | /// Do work and collect statistics. This internally calls `do_work()`. In most cases, 13 | /// this should be called rather than `do_work()` so that MMTk can correctly collect 14 | /// statistics for the work packets. 15 | /// If the feature "work_packet_stats" is not enabled, this call simply forwards the call 16 | /// to `do_work()`. 17 | fn do_work_with_stat(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { 18 | debug!("{}", std::any::type_name::()); 19 | debug_assert!(!worker.tls.0.0.is_null(), "TLS must be set correctly for a GC worker before the worker does any work. GC Worker {} has no valid tls.", worker.ordinal); 20 | 21 | #[cfg(feature = "work_packet_stats")] 22 | // Start collecting statistics 23 | let stat = { 24 | let mut worker_stat = worker.shared.borrow_stat_mut(); 25 | worker_stat.measure_work(TypeId::of::(), type_name::(), mmtk) 26 | }; 27 | 28 | // Do the actual work 29 | self.do_work(worker, mmtk); 30 | 31 | #[cfg(feature = "work_packet_stats")] 32 | // Finish collecting statistics 33 | { 34 | let mut worker_stat = worker.shared.borrow_stat_mut(); 35 | stat.end_of_work(&mut worker_stat); 36 | } 37 | } 38 | 39 | /// Get the compile-time static type name for the work packet. 40 | fn get_type_name(&self) -> &'static str { 41 | std::any::type_name::() 42 | } 43 | } 44 | 45 | use super::gc_work::ProcessEdgesWork; 46 | use crate::plan::Plan; 47 | 48 | /// This trait provides a group of associated types that are needed to 49 | /// create GC work packets for a certain plan. For example, `GCWorkScheduler.schedule_common_work()` 50 | /// needs this trait to schedule different work packets. For certain plans, 51 | /// they may need to provide several types that implement this trait, e.g. one for 52 | /// nursery GC, one for mature GC. 53 | pub trait GCWorkContext { 54 | type VM: VMBinding; 55 | type PlanType: Plan; 56 | // We should use SFTProcessEdges as the default value for this associate type. However, this requires 57 | // `associated_type_defaults` which has not yet been stablized. 58 | type ProcessEdgesWorkType: ProcessEdgesWork; 59 | } 60 | -------------------------------------------------------------------------------- /src/util/alloc/embedded_meta_data.rs: -------------------------------------------------------------------------------- 1 | use crate::util::constants::LOG_BYTES_IN_PAGE; 2 | use crate::util::Address; 3 | 4 | /* The (log of the) size of each region of meta data management */ 5 | pub const LOG_BYTES_IN_REGION: usize = 22; 6 | pub const BYTES_IN_REGION: usize = 1 << LOG_BYTES_IN_REGION; 7 | pub const REGION_MASK: usize = BYTES_IN_REGION - 1; 8 | pub const LOG_PAGES_IN_REGION: usize = LOG_BYTES_IN_REGION - LOG_BYTES_IN_PAGE as usize; 9 | pub const PAGES_IN_REGION: usize = 1 << LOG_PAGES_IN_REGION; 10 | 11 | pub fn get_metadata_base(address: Address) -> Address { 12 | address.align_down(BYTES_IN_REGION) 13 | } 14 | 15 | pub fn get_metadata_offset(address: Address, log_coverage: usize, log_align: usize) -> usize { 16 | ((address & REGION_MASK) >> (log_coverage + log_align)) << log_align 17 | } 18 | -------------------------------------------------------------------------------- /src/util/alloc/large_object_allocator.rs: -------------------------------------------------------------------------------- 1 | use crate::plan::Plan; 2 | use crate::policy::largeobjectspace::LargeObjectSpace; 3 | use crate::policy::space::Space; 4 | use crate::util::alloc::{allocator, Allocator}; 5 | use crate::util::opaque_pointer::*; 6 | use crate::util::Address; 7 | use crate::vm::VMBinding; 8 | 9 | #[repr(C)] 10 | pub struct LargeObjectAllocator { 11 | /// [`VMThread`] associated with this allocator instance 12 | pub tls: VMThread, 13 | /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. 14 | space: &'static LargeObjectSpace, 15 | /// [`Plan`] instance that this allocator instance is associated with. 16 | plan: &'static dyn Plan, 17 | } 18 | 19 | impl Allocator for LargeObjectAllocator { 20 | fn get_tls(&self) -> VMThread { 21 | self.tls 22 | } 23 | 24 | fn get_plan(&self) -> &'static dyn Plan { 25 | self.plan 26 | } 27 | 28 | fn get_space(&self) -> &'static dyn Space { 29 | // Casting the interior of the Option: from &LargeObjectSpace to &dyn Space 30 | self.space as &'static dyn Space 31 | } 32 | 33 | fn does_thread_local_allocation(&self) -> bool { 34 | false 35 | } 36 | 37 | fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { 38 | let cell: Address = self.alloc_slow(size, align, offset); 39 | // We may get a null ptr from alloc due to the VM being OOM 40 | if !cell.is_zero() { 41 | allocator::align_allocation::(cell, align, offset) 42 | } else { 43 | cell 44 | } 45 | } 46 | 47 | fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { 48 | let header = 0; // HashSet is used instead of DoublyLinkedList 49 | let maxbytes = allocator::get_maximum_aligned_size::(size + header, align); 50 | let pages = crate::util::conversions::bytes_to_pages_up(maxbytes); 51 | let sp = self.space.allocate_pages(self.tls, pages); 52 | if sp.is_zero() { 53 | sp 54 | } else { 55 | sp + header 56 | } 57 | } 58 | } 59 | 60 | impl LargeObjectAllocator { 61 | pub fn new( 62 | tls: VMThread, 63 | space: &'static LargeObjectSpace, 64 | plan: &'static dyn Plan, 65 | ) -> Self { 66 | LargeObjectAllocator { tls, space, plan } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/util/alloc/malloc_allocator.rs: -------------------------------------------------------------------------------- 1 | use crate::policy::marksweepspace::malloc_ms::MallocSpace; 2 | use crate::policy::space::Space; 3 | use crate::util::alloc::Allocator; 4 | use crate::util::opaque_pointer::*; 5 | use crate::util::Address; 6 | use crate::vm::VMBinding; 7 | use crate::Plan; 8 | 9 | #[repr(C)] 10 | pub struct MallocAllocator { 11 | /// [`VMThread`] associated with this allocator instance 12 | pub tls: VMThread, 13 | /// [`Space`](src/policy/space/Space) instance associated with this allocator instance. 14 | space: &'static MallocSpace, 15 | /// [`Plan`] instance that this allocator instance is associated with. 16 | plan: &'static dyn Plan, 17 | } 18 | 19 | impl Allocator for MallocAllocator { 20 | fn get_space(&self) -> &'static dyn Space { 21 | self.space as &'static dyn Space 22 | } 23 | 24 | fn get_plan(&self) -> &'static dyn Plan { 25 | self.plan 26 | } 27 | 28 | fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address { 29 | self.alloc_slow(size, align, offset) 30 | } 31 | 32 | fn get_tls(&self) -> VMThread { 33 | self.tls 34 | } 35 | 36 | fn does_thread_local_allocation(&self) -> bool { 37 | false 38 | } 39 | 40 | fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { 41 | self.space.alloc(self.tls, size, align, offset) 42 | } 43 | } 44 | 45 | impl MallocAllocator { 46 | pub fn new( 47 | tls: VMThread, 48 | space: &'static MallocSpace, 49 | plan: &'static dyn Plan, 50 | ) -> Self { 51 | MallocAllocator { tls, space, plan } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/util/alloc/mod.rs: -------------------------------------------------------------------------------- 1 | //! Various allocators implementation. 2 | 3 | /// The allocator trait and allocation-related functions. 4 | pub(crate) mod allocator; 5 | pub use allocator::fill_alignment_gap; 6 | pub use allocator::AllocationError; 7 | pub use allocator::Allocator; 8 | 9 | /// A list of all the allocators, embedded in Mutator 10 | pub(crate) mod allocators; 11 | pub use allocators::AllocatorSelector; 12 | pub use allocators::Allocators; 13 | 14 | /// Bump pointer allocator 15 | mod bumpallocator; 16 | pub use bumpallocator::BumpAllocator; 17 | 18 | mod large_object_allocator; 19 | pub use large_object_allocator::LargeObjectAllocator; 20 | 21 | /// An alloactor backed by malloc 22 | mod malloc_allocator; 23 | pub use malloc_allocator::MallocAllocator; 24 | 25 | /// Immix allocator 26 | pub mod immix_allocator; 27 | pub use self::immix_allocator::ImmixAllocator; 28 | 29 | // Free list allocator based on Mimalloc 30 | pub mod free_list_allocator; 31 | pub use free_list_allocator::FreeListAllocator; 32 | 33 | /// Mark compact allocator (actually a bump pointer allocator with an extra heade word) 34 | mod markcompact_allocator; 35 | pub use markcompact_allocator::MarkCompactAllocator; 36 | 37 | /// Embedded metadata pages 38 | pub(crate) mod embedded_meta_data; 39 | -------------------------------------------------------------------------------- /src/util/analysis/gc_count.rs: -------------------------------------------------------------------------------- 1 | use crate::util::analysis::RtAnalysis; 2 | use crate::util::statistics::counter::EventCounter; 3 | use crate::vm::VMBinding; 4 | use crate::MMTK; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | /** 8 | * Simple analysis routine that counts the number of collections over course of program execution 9 | */ 10 | pub struct GcCounter { 11 | running: bool, 12 | counter: Arc>, 13 | } 14 | 15 | impl GcCounter { 16 | pub fn new(running: bool, counter: Arc>) -> Self { 17 | Self { running, counter } 18 | } 19 | } 20 | 21 | impl RtAnalysis for GcCounter { 22 | fn gc_hook(&mut self, _mmtk: &'static MMTK) { 23 | if self.running { 24 | // The analysis routine simply updates the counter when the allocation hook is called 25 | self.counter.lock().unwrap().inc(); 26 | } 27 | } 28 | 29 | fn set_running(&mut self, running: bool) { 30 | self.running = running; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/util/analysis/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::scheduler::*; 2 | use crate::util::statistics::stats::Stats; 3 | use crate::vm::VMBinding; 4 | use crate::MMTK; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | pub mod gc_count; 8 | pub mod obj_num; 9 | pub mod obj_size; 10 | 11 | use self::gc_count::GcCounter; 12 | use self::obj_num::ObjectCounter; 13 | use self::obj_size::PerSizeClassObjectCounter; 14 | 15 | /// 16 | /// This trait exposes hooks for developers to implement their own analysis routines. 17 | /// 18 | /// Most traits would want to hook into the `Stats` and counters provided by the MMTk 19 | /// framework that are exposed to the Harness. 20 | /// 21 | /// The arguments for the hooks should be sufficient, however, if one wishes to add 22 | /// other arguments, then they can create an analysis routine specific function and 23 | /// invoke it in its respective place. 24 | /// 25 | pub trait RtAnalysis { 26 | fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: usize) {} 27 | fn gc_hook(&mut self, _mmtk: &'static MMTK) {} 28 | fn set_running(&mut self, running: bool); 29 | } 30 | 31 | #[derive(Default)] 32 | pub struct GcHookWork; 33 | 34 | impl GCWork for GcHookWork { 35 | fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { 36 | let base = &mmtk.plan.base(); 37 | base.analysis_manager.gc_hook(mmtk); 38 | } 39 | } 40 | 41 | // The AnalysisManager essentially acts as a proxy for all analysis routines made. 42 | // The framwework uses the AnalysisManager to call hooks for analysis routines. 43 | #[derive(Default)] 44 | pub struct AnalysisManager { 45 | routines: Mutex + Send>>>>, 46 | } 47 | 48 | impl AnalysisManager { 49 | pub fn new(stats: &Stats) -> Self { 50 | let mut manager = AnalysisManager { 51 | routines: Mutex::new(vec![]), 52 | }; 53 | manager.initialize_routines(stats); 54 | manager 55 | } 56 | 57 | // Initializing all routines. If you want to add a new routine, here is the place 58 | // to do so 59 | fn initialize_routines(&mut self, stats: &Stats) { 60 | let ctr = stats.new_event_counter("obj.num", true, true); 61 | let gc_ctr = stats.new_event_counter("gc.num", true, true); 62 | let obj_num = Arc::new(Mutex::new(ObjectCounter::new(true, ctr))); 63 | let gc_count = Arc::new(Mutex::new(GcCounter::new(true, gc_ctr))); 64 | let obj_size = Arc::new(Mutex::new(PerSizeClassObjectCounter::new(true))); 65 | self.add_analysis_routine(obj_num); 66 | self.add_analysis_routine(gc_count); 67 | self.add_analysis_routine(obj_size); 68 | } 69 | 70 | pub fn add_analysis_routine(&mut self, routine: Arc + Send>>) { 71 | let mut routines = self.routines.lock().unwrap(); 72 | routines.push(routine.clone()); 73 | } 74 | 75 | pub fn alloc_hook(&self, size: usize, align: usize, offset: usize) { 76 | let routines = self.routines.lock().unwrap(); 77 | for r in &*routines { 78 | r.lock().unwrap().alloc_hook(size, align, offset); 79 | } 80 | } 81 | 82 | pub fn gc_hook(&self, mmtk: &'static MMTK) { 83 | let routines = self.routines.lock().unwrap(); 84 | for r in &*routines { 85 | r.lock().unwrap().gc_hook(mmtk); 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/util/analysis/obj_num.rs: -------------------------------------------------------------------------------- 1 | use crate::util::analysis::RtAnalysis; 2 | use crate::util::statistics::counter::EventCounter; 3 | use crate::vm::VMBinding; 4 | use std::sync::{Arc, Mutex}; 5 | 6 | /** 7 | * Simple analysis routine that counts the number of objects allocated 8 | */ 9 | pub struct ObjectCounter { 10 | running: bool, 11 | counter: Arc>, 12 | } 13 | 14 | impl ObjectCounter { 15 | pub fn new(running: bool, counter: Arc>) -> Self { 16 | Self { running, counter } 17 | } 18 | } 19 | 20 | impl RtAnalysis for ObjectCounter { 21 | fn alloc_hook(&mut self, _size: usize, _align: usize, _offset: usize) { 22 | if self.running { 23 | // The analysis routine simply updates the counter when the allocation hook is called 24 | self.counter.lock().unwrap().inc(); 25 | } 26 | } 27 | 28 | fn set_running(&mut self, running: bool) { 29 | self.running = running; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/util/analysis/obj_size.rs: -------------------------------------------------------------------------------- 1 | use crate::util::analysis::RtAnalysis; 2 | use crate::util::statistics::counter::EventCounter; 3 | use crate::vm::{ActivePlan, VMBinding}; 4 | use std::collections::HashMap; 5 | use std::sync::{Arc, Mutex}; 6 | 7 | /** 8 | * This file implements an analysis routine that counts the number of objects allocated 9 | * in each size class. Here, a size class 'sizeX' is defined as 'X bytes or lower'. For 10 | * example, size64 is the size class with objects <= 64 bytes but > 32 bytes which is 11 | * the previous size class. 12 | * 13 | * We keep track of the size classes using a HashMap with the key being the name of the 14 | * size class. 15 | */ 16 | #[derive(Default)] 17 | pub struct PerSizeClassObjectCounter { 18 | running: bool, 19 | size_classes: Mutex>>>, 20 | } 21 | 22 | // Macro to simplify the creation of a new counter for a particular size class. 23 | // This is a macro as opposed to a function as otherwise we would have to unlock 24 | // and relock the size_classes map 25 | macro_rules! new_ctr { 26 | ( $stats:expr, $map:expr, $size_class:expr ) => {{ 27 | let ctr = $stats.new_event_counter(&$size_class, true, true); 28 | $map.insert($size_class.to_string(), ctr.clone()); 29 | ctr 30 | }}; 31 | } 32 | 33 | impl PerSizeClassObjectCounter { 34 | pub fn new(running: bool) -> Self { 35 | Self { 36 | running, 37 | size_classes: Mutex::new(HashMap::new()), 38 | } 39 | } 40 | 41 | // Fastest way to compute the smallest power of 2 that is larger than n 42 | // See: https://stackoverflow.com/questions/3272424/compute-fast-log-base-2-ceiling/51351885#51351885 43 | fn size_class(&self, size: usize) -> usize { 44 | 2_usize.pow(63_u32 - (size - 1).leading_zeros() + 1) 45 | } 46 | } 47 | 48 | impl RtAnalysis for PerSizeClassObjectCounter { 49 | fn alloc_hook(&mut self, size: usize, _align: usize, _offset: usize) { 50 | if !self.running { 51 | return; 52 | } 53 | 54 | let stats = &(VM::VMActivePlan::global().base()).stats; 55 | let size_class = format!("size{}", self.size_class(size)); 56 | let mut size_classes = self.size_classes.lock().unwrap(); 57 | let c = size_classes.get_mut(&size_class); 58 | match c { 59 | None => { 60 | // Create (and increment) the counter associated with the size class if it doesn't exist 61 | let ctr = new_ctr!(stats, size_classes, size_class); 62 | ctr.lock().unwrap().inc(); 63 | } 64 | Some(ctr) => { 65 | // Increment counter associated with the size class 66 | ctr.lock().unwrap().inc(); 67 | } 68 | } 69 | } 70 | 71 | fn set_running(&mut self, running: bool) { 72 | self.running = running; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/util/edge_logger.rs: -------------------------------------------------------------------------------- 1 | //! This is a simple module to log edges and check for duplicate edges. 2 | //! 3 | //! It uses a hash-set to keep track of edge, and is so very expensive. 4 | //! We currently only use this as part of the `extreme_assertions` feature. 5 | //! 6 | 7 | use crate::plan::Plan; 8 | use crate::vm::edge_shape::Edge; 9 | use crate::vm::VMBinding; 10 | use std::collections::HashSet; 11 | use std::sync::RwLock; 12 | 13 | pub struct EdgeLogger { 14 | // A private hash-set to keep track of edges. 15 | edge_log: RwLock>, 16 | } 17 | 18 | unsafe impl Sync for EdgeLogger {} 19 | 20 | impl EdgeLogger { 21 | pub fn new() -> Self { 22 | Self { 23 | edge_log: Default::default(), 24 | } 25 | } 26 | 27 | /// Logs an edge. 28 | /// Panics if the edge was already logged. 29 | /// 30 | /// # Arguments 31 | /// 32 | /// * `edge` - The edge to log. 33 | /// 34 | pub fn log_edge(&self, edge: ES) { 35 | trace!("log_edge({:?})", edge); 36 | let mut edge_log = self.edge_log.write().unwrap(); 37 | assert!( 38 | edge_log.insert(edge), 39 | "duplicate edge ({:?}) detected", 40 | edge 41 | ); 42 | } 43 | 44 | /// Reset the edge logger by clearing the hash-set of edges. 45 | /// This function is called at the end of each GC iteration. 46 | /// 47 | pub fn reset(&self) { 48 | let mut edge_log = self.edge_log.write().unwrap(); 49 | edge_log.clear(); 50 | } 51 | } 52 | 53 | /// Whether we should check duplicate edges. This depends on the actual plan. 54 | pub fn should_check_duplicate_edges(plan: &dyn Plan) -> bool { 55 | // If a plan allows tracing duplicate edges, we should not run this check. 56 | !plan.constraints().may_trace_duplicate_edges 57 | } 58 | -------------------------------------------------------------------------------- /src/util/erase_vm.rs: -------------------------------------------------------------------------------- 1 | //! MMTk uses [`crate::vm::VMBinding`], which allows us to call into bindings 2 | //! with little overhead. As a result, some types in MMTk are generic types with a type parameter ``. 3 | //! However, in some cases, using generic types is not allowed. For example, in an object-safe trait, 4 | //! the methods cannot be generic, thus the method's parameters cannot be generic types. 5 | //! 6 | //! This module defines macros that can be used to create a special ref type that erases the type parameter. 7 | //! For example, we create a type `TErasedRef` for `&T`. `TErasedRef` has no type parameter, and 8 | //! can be used in places where a type parameter is undesired. The type `TErasedRef` can be cast back to `&T` 9 | //! when we supply a type parameter ``. This works under the assumption that 10 | //! one MMTk process should only have one VM type. In such a case, when we cast from a `&T` to `TErasedRef`, and 11 | //! cast back to `&T`, the type parameter is guaranteed to be the same. Thus the casting is correct. 12 | //! 13 | //! `TErasedRef` has the same lifetime as `&T`. 14 | 15 | macro_rules! define_erased_vm_mut_ref { 16 | ($new_type: ident = $orig_type: ty) => { 17 | pub struct $new_type<'a>(usize, PhantomData<&'a ()>); 18 | impl<'a> $new_type<'a> { 19 | pub fn new(r: &'a mut $orig_type) -> Self { 20 | Self(unsafe { std::mem::transmute(r) }, PhantomData) 21 | } 22 | pub fn into_mut(self) -> &'a mut $orig_type { 23 | unsafe { std::mem::transmute(self.0) } 24 | } 25 | } 26 | }; 27 | } 28 | 29 | pub(crate) use define_erased_vm_mut_ref; 30 | -------------------------------------------------------------------------------- /src/util/heap/accounting.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::atomic::Ordering; 3 | 4 | /// The struct is used for page usage. 5 | /// Both page resource and side metadata uses this struct to do page accounting. 6 | pub struct PageAccounting { 7 | /// The reserved pages. This should be incremented when we are about to allocate pages. 8 | /// Note this is different than quarantining address range. We do not count for quarantined 9 | /// memory. 10 | reserved: AtomicUsize, 11 | /// The committed pages. This should be incremented when we successfully allocate pages from the OS. 12 | committed: AtomicUsize, 13 | } 14 | 15 | impl PageAccounting { 16 | pub fn new() -> Self { 17 | Self { 18 | reserved: AtomicUsize::new(0), 19 | committed: AtomicUsize::new(0), 20 | } 21 | } 22 | 23 | /// Inform of both reserving and committing a certain number of pages. 24 | pub fn reserve_and_commit(&self, pages: usize) { 25 | self.reserved.fetch_add(pages, Ordering::Relaxed); 26 | self.committed.fetch_add(pages, Ordering::Relaxed); 27 | } 28 | 29 | /// Inform of reserving a certain number of pages. Usually this is called before attempting 30 | /// to allocate memory. 31 | pub fn reserve(&self, pages: usize) { 32 | self.reserved.fetch_add(pages, Ordering::Relaxed); 33 | } 34 | 35 | /// Inform of clearing some reserved pages. This is used when we have reserved some pages but 36 | /// the allocation cannot be satisfied. We can call this to clear the number of reserved pages, 37 | /// so later we can reserve and attempt again. 38 | pub fn clear_reserved(&self, pages: usize) { 39 | let _prev = self.reserved.fetch_sub(pages, Ordering::Relaxed); 40 | debug_assert!(_prev >= pages); 41 | } 42 | 43 | /// Inform of successfully committing a certain number of pages. This is used after we have reserved 44 | /// pages and successfully allocated those memory. 45 | pub fn commit(&self, pages: usize) { 46 | self.committed.fetch_add(pages, Ordering::Relaxed); 47 | } 48 | 49 | /// Inform of releasing a certain number of pages. The number of pages will be deducted from 50 | /// both reserved and committed pages. 51 | pub fn release(&self, pages: usize) { 52 | let _prev_reserved = self.reserved.fetch_sub(pages, Ordering::Relaxed); 53 | debug_assert!(_prev_reserved >= pages); 54 | 55 | let _prev_committed = self.committed.fetch_sub(pages, Ordering::Relaxed); 56 | debug_assert!(_prev_committed >= pages); 57 | } 58 | 59 | /// Set both reserved and committed pages to zero. This is only used when we completely clear a space. 60 | pub fn reset(&self) { 61 | self.reserved.store(0, Ordering::Relaxed); 62 | self.committed.store(0, Ordering::Relaxed); 63 | } 64 | 65 | pub fn get_reserved_pages(&self) -> usize { 66 | self.reserved.load(Ordering::Relaxed) 67 | } 68 | 69 | pub fn get_committed_pages(&self) -> usize { 70 | self.committed.load(Ordering::Relaxed) 71 | } 72 | } 73 | 74 | impl Default for PageAccounting { 75 | fn default() -> Self { 76 | Self::new() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/util/heap/heap_meta.rs: -------------------------------------------------------------------------------- 1 | use crate::util::heap::layout::vm_layout_constants::{HEAP_END, HEAP_START}; 2 | use crate::util::Address; 3 | 4 | pub struct HeapMeta { 5 | pub heap_cursor: Address, 6 | pub heap_limit: Address, 7 | } 8 | 9 | impl HeapMeta { 10 | pub fn new() -> Self { 11 | HeapMeta { 12 | heap_cursor: HEAP_START, 13 | heap_limit: HEAP_END, 14 | } 15 | } 16 | 17 | pub fn reserve(&mut self, extent: usize, top: bool) -> Address { 18 | let ret = if top { 19 | self.heap_limit -= extent; 20 | self.heap_limit 21 | } else { 22 | let start = self.heap_cursor; 23 | self.heap_cursor += extent; 24 | start 25 | }; 26 | 27 | assert!( 28 | self.heap_cursor <= self.heap_limit, 29 | "Out of virtual address space at {} ({} > {})", 30 | self.heap_cursor - extent, 31 | self.heap_cursor, 32 | self.heap_limit 33 | ); 34 | 35 | ret 36 | } 37 | 38 | pub fn get_discontig_start(&self) -> Address { 39 | self.heap_cursor 40 | } 41 | 42 | pub fn get_discontig_end(&self) -> Address { 43 | self.heap_limit - 1 44 | } 45 | } 46 | 47 | // make clippy happy 48 | impl Default for HeapMeta { 49 | fn default() -> Self { 50 | Self::new() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/util/heap/layout/heap_parameters.rs: -------------------------------------------------------------------------------- 1 | /** 2 | * log_2 of the maximum number of spaces a Plan can support. 3 | */ 4 | pub const LOG_MAX_SPACES: usize = 4; 5 | 6 | /** 7 | * Maximum number of spaces a Plan can support. 8 | */ 9 | pub const MAX_SPACES: usize = 1 << LOG_MAX_SPACES; 10 | 11 | /** 12 | * In a 64-bit addressing model, each space is the same size, given 13 | * by this constant. At the moment, we require that the number of 14 | * pages in a space fit into a 32-bit signed int, so the maximum 15 | * size of this constant is 41 (assuming 4k pages). 16 | */ 17 | pub const LOG_SPACE_SIZE_64: usize = 41; 18 | -------------------------------------------------------------------------------- /src/util/heap/layout/map.rs: -------------------------------------------------------------------------------- 1 | use crate::util::freelist::FreeList; 2 | use crate::util::heap::freelistpageresource::CommonFreeListPageResource; 3 | use crate::util::heap::space_descriptor::SpaceDescriptor; 4 | use crate::util::Address; 5 | 6 | pub trait VMMap: Sync { 7 | fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor); 8 | 9 | /// Create a free-list for a discontiguous space. Must only be called at boot time. 10 | /// bind_freelist() must be called by the caller after this method. 11 | fn create_freelist(&self, start: Address) -> Box; 12 | 13 | /// Create a free-list for a contiguous space. Must only be called at boot time. 14 | /// bind_freelist() must be called by the caller after this method. 15 | fn create_parent_freelist(&self, start: Address, units: usize, grain: i32) 16 | -> Box; 17 | 18 | /// Bind a created freelist with the page resource. 19 | /// This must called after create_freelist() or create_parent_freelist(). 20 | fn bind_freelist(&self, pr: &'static CommonFreeListPageResource); 21 | 22 | fn allocate_contiguous_chunks( 23 | &self, 24 | descriptor: SpaceDescriptor, 25 | chunks: usize, 26 | head: Address, 27 | ) -> Address; 28 | 29 | fn get_next_contiguous_region(&self, start: Address) -> Address; 30 | 31 | fn get_contiguous_region_chunks(&self, start: Address) -> usize; 32 | 33 | fn get_contiguous_region_size(&self, start: Address) -> usize; 34 | 35 | /// Return the total number of chunks available (unassigned) within the range of virtual memory 36 | /// apportioned to discontiguous spaces. 37 | fn get_available_discontiguous_chunks(&self) -> usize; 38 | 39 | /// Return the total number of clients contending for chunks. This is useful when establishing 40 | /// conservative bounds on the number of remaining chunks. 41 | fn get_chunk_consumer_count(&self) -> usize; 42 | 43 | fn free_all_chunks(&self, any_chunk: Address); 44 | 45 | fn free_contiguous_chunks(&self, start: Address) -> usize; 46 | 47 | fn boot(&self) {} 48 | 49 | fn finalize_static_space_map(&self, from: Address, to: Address); 50 | 51 | fn is_finalized(&self) -> bool; 52 | 53 | fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor; 54 | 55 | fn add_to_cumulative_committed_pages(&self, pages: usize); 56 | } 57 | -------------------------------------------------------------------------------- /src/util/heap/layout/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod heap_parameters; 2 | pub mod vm_layout_constants; 3 | 4 | mod mmapper; 5 | pub use self::mmapper::Mmapper; 6 | mod byte_map_mmapper; 7 | #[cfg(target_pointer_width = "64")] 8 | mod fragmented_mapper; 9 | 10 | mod map; 11 | pub use self::map::VMMap; 12 | mod map32; 13 | #[cfg(target_pointer_width = "64")] 14 | mod map64; 15 | 16 | #[cfg(target_pointer_width = "32")] 17 | pub fn create_vm_map() -> Box { 18 | Box::new(map32::Map32::new()) 19 | } 20 | 21 | #[cfg(target_pointer_width = "64")] 22 | pub fn create_vm_map() -> Box { 23 | // TODO: Map32 for compressed pointers 24 | Box::new(map64::Map64::new()) 25 | } 26 | 27 | #[cfg(target_pointer_width = "32")] 28 | pub fn create_mmapper() -> Box { 29 | Box::new(byte_map_mmapper::ByteMapMmapper::new()) 30 | } 31 | 32 | #[cfg(target_pointer_width = "64")] 33 | pub fn create_mmapper() -> Box { 34 | // TODO: ByteMapMmapper for 39-bit or less virtual space 35 | Box::new(fragmented_mapper::FragmentedMapper::new()) 36 | } 37 | 38 | use crate::util::Address; 39 | use std::ops::Range; 40 | 41 | /// The heap range between HEAP_START and HEAP_END 42 | /// Heap range include the availble range, but may include some address ranges 43 | /// that we count as part of the heap but we do not allocate into, such as 44 | /// VM spaces. However, currently, heap range is the same as available range. 45 | pub const fn heap_range() -> Range

{ 46 | vm_layout_constants::HEAP_START..vm_layout_constants::HEAP_END 47 | } 48 | 49 | /// The avialable heap range between AVAILABLE_START and AVAILABLE_END. 50 | /// Available range is what MMTk may allocate into. 51 | pub const fn available_range() -> Range
{ 52 | vm_layout_constants::AVAILABLE_START..vm_layout_constants::AVAILABLE_END 53 | } 54 | -------------------------------------------------------------------------------- /src/util/heap/mod.rs: -------------------------------------------------------------------------------- 1 | mod accounting; 2 | #[macro_use] 3 | pub mod layout; 4 | pub mod blockpageresource; 5 | pub mod chunk_map; 6 | pub mod freelistpageresource; 7 | pub mod gc_trigger; 8 | mod heap_meta; 9 | pub mod monotonepageresource; 10 | pub mod pageresource; 11 | pub mod space_descriptor; 12 | mod vmrequest; 13 | 14 | pub use self::accounting::PageAccounting; 15 | pub use self::blockpageresource::BlockPageResource; 16 | pub use self::freelistpageresource::FreeListPageResource; 17 | pub use self::heap_meta::HeapMeta; 18 | pub use self::monotonepageresource::MonotonePageResource; 19 | pub use self::pageresource::PageResource; 20 | pub use self::vmrequest::VMRequest; 21 | -------------------------------------------------------------------------------- /src/util/heap/vmrequest.rs: -------------------------------------------------------------------------------- 1 | use super::layout::vm_layout_constants::*; 2 | use crate::util::constants::*; 3 | use crate::util::Address; 4 | 5 | #[derive(Clone, Copy, Debug)] 6 | pub enum VMRequest { 7 | Discontiguous, 8 | Fixed { start: Address, extent: usize }, 9 | Extent { extent: usize, top: bool }, 10 | Fraction { frac: f32, top: bool }, 11 | } 12 | 13 | impl VMRequest { 14 | pub fn is_discontiguous(&self) -> bool { 15 | matches!(self, VMRequest::Discontiguous { .. }) 16 | } 17 | 18 | pub fn common64bit(top: bool) -> Self { 19 | VMRequest::Extent { 20 | extent: MAX_SPACE_EXTENT, 21 | top, 22 | } 23 | } 24 | 25 | pub fn discontiguous() -> Self { 26 | if cfg!(target_pointer_width = "64") { 27 | return Self::common64bit(false); 28 | } 29 | VMRequest::Discontiguous 30 | } 31 | 32 | pub fn fixed_size(mb: usize) -> Self { 33 | if cfg!(target_pointer_width = "64") { 34 | return Self::common64bit(false); 35 | } 36 | VMRequest::Extent { 37 | extent: mb << LOG_BYTES_IN_MBYTE, 38 | top: false, 39 | } 40 | } 41 | 42 | pub fn fraction(frac: f32) -> Self { 43 | if cfg!(target_pointer_width = "64") { 44 | return Self::common64bit(false); 45 | } 46 | VMRequest::Fraction { frac, top: false } 47 | } 48 | 49 | pub fn high_fixed_size(mb: usize) -> Self { 50 | if cfg!(target_pointer_width = "64") { 51 | return Self::common64bit(true); 52 | } 53 | VMRequest::Extent { 54 | extent: mb << LOG_BYTES_IN_MBYTE, 55 | top: true, 56 | } 57 | } 58 | 59 | pub fn fixed_extent(extent: usize, top: bool) -> Self { 60 | if cfg!(target_pointer_width = "64") { 61 | return Self::common64bit(top); 62 | } 63 | VMRequest::Extent { extent, top } 64 | } 65 | 66 | pub fn fixed(start: Address, extent: usize) -> Self { 67 | VMRequest::Fixed { start, extent } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/util/is_mmtk_object.rs: -------------------------------------------------------------------------------- 1 | /// The region size (in bytes) of the `VO_BIT` side metadata. 2 | /// The VM can use this to check if an object is properly aligned. 3 | pub const VO_BIT_REGION_SIZE: usize = 4 | 1usize << crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC.log_bytes_in_region; 5 | -------------------------------------------------------------------------------- /src/util/logger.rs: -------------------------------------------------------------------------------- 1 | use log::{self, SetLoggerError}; 2 | 3 | /// Attempt to init a env_logger for MMTk. 4 | pub fn try_init() -> Result<(), SetLoggerError> { 5 | env_logger::try_init_from_env( 6 | // By default, use info level logging. 7 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), 8 | ) 9 | } 10 | -------------------------------------------------------------------------------- /src/util/malloc/library.rs: -------------------------------------------------------------------------------- 1 | // Export one of the malloc libraries. 2 | 3 | #[cfg(feature = "malloc_hoard")] 4 | pub use self::hoard::*; 5 | #[cfg(feature = "malloc_jemalloc")] 6 | pub use self::jemalloc::*; 7 | #[cfg(not(any( 8 | feature = "malloc_jemalloc", 9 | feature = "malloc_mimalloc", 10 | feature = "malloc_hoard", 11 | )))] 12 | pub use self::libc_malloc::*; 13 | #[cfg(feature = "malloc_mimalloc")] 14 | pub use self::mimalloc::*; 15 | 16 | /// When we count page usage of library malloc, we assume they allocate in pages. For some malloc implementations, 17 | /// they may use a larger page (e.g. mimalloc's 64K page). For libraries that we are not sure, we assume they use 18 | /// normal 4k pages. 19 | pub const BYTES_IN_MALLOC_PAGE: usize = 1 << LOG_BYTES_IN_MALLOC_PAGE; 20 | 21 | // Different malloc libraries 22 | 23 | // TODO: We should conditinally include some methods in the module, such as posix extension and GNU extension. 24 | 25 | #[cfg(feature = "malloc_jemalloc")] 26 | mod jemalloc { 27 | // Normal 4K page 28 | pub const LOG_BYTES_IN_MALLOC_PAGE: u8 = crate::util::constants::LOG_BYTES_IN_PAGE; 29 | // ANSI C 30 | pub use jemalloc_sys::{calloc, free, malloc, realloc}; 31 | // Posix 32 | pub use jemalloc_sys::posix_memalign; 33 | // GNU 34 | pub use jemalloc_sys::malloc_usable_size; 35 | } 36 | 37 | #[cfg(feature = "malloc_mimalloc")] 38 | mod mimalloc { 39 | // Normal 4K page accounting 40 | pub const LOG_BYTES_IN_MALLOC_PAGE: u8 = crate::util::constants::LOG_BYTES_IN_PAGE; 41 | // ANSI C 42 | pub use mimalloc_sys::{ 43 | mi_calloc as calloc, mi_free as free, mi_malloc as malloc, mi_realloc as realloc, 44 | }; 45 | // Posix 46 | pub use mimalloc_sys::mi_posix_memalign as posix_memalign; 47 | // GNU 48 | pub use mimalloc_sys::mi_malloc_usable_size as malloc_usable_size; 49 | } 50 | 51 | #[cfg(feature = "malloc_hoard")] 52 | mod hoard { 53 | // Normal 4K page 54 | pub const LOG_BYTES_IN_MALLOC_PAGE: u8 = crate::util::constants::LOG_BYTES_IN_PAGE; 55 | // ANSI C 56 | pub use hoard_sys::{calloc, free, malloc, realloc}; 57 | // Posix 58 | pub use hoard_sys::posix_memalign; 59 | // GNU 60 | pub use hoard_sys::malloc_usable_size; 61 | } 62 | 63 | /// If no malloc lib is specified, use the libc implementation 64 | #[cfg(not(any( 65 | feature = "malloc_jemalloc", 66 | feature = "malloc_mimalloc", 67 | feature = "malloc_hoard", 68 | )))] 69 | mod libc_malloc { 70 | // Normal 4K page 71 | pub const LOG_BYTES_IN_MALLOC_PAGE: u8 = crate::util::constants::LOG_BYTES_IN_PAGE; 72 | // ANSI C 73 | pub use libc::{calloc, free, malloc, realloc}; 74 | // Posix 75 | pub use libc::posix_memalign; 76 | // GNU 77 | #[cfg(target_os = "linux")] 78 | pub use libc::malloc_usable_size; 79 | #[cfg(target_os = "macos")] 80 | extern "C" { 81 | pub fn malloc_size(ptr: *const libc::c_void) -> usize; 82 | } 83 | #[cfg(target_os = "macos")] 84 | pub use self::malloc_size as malloc_usable_size; 85 | } 86 | -------------------------------------------------------------------------------- /src/util/malloc/mod.rs: -------------------------------------------------------------------------------- 1 | /// Malloc provided by libraries 2 | pub(crate) mod library; 3 | /// Using malloc as mark sweep free-list allocator 4 | pub mod malloc_ms_util; 5 | 6 | use crate::util::Address; 7 | #[cfg(feature = "malloc_counted_size")] 8 | use crate::vm::VMBinding; 9 | #[cfg(feature = "malloc_counted_size")] 10 | use crate::MMTK; 11 | 12 | // The following expose a set of malloc API. They are currently implemented with 13 | // the library malloc. When we have native malloc implementation, we should change 14 | // their implementation to point to our native malloc. 15 | 16 | // We have two versions for each function: 17 | // * a normal version: it has the signature that is compatible with the standard malloc library. 18 | // * a counted version: the allocated/freed bytes are calculated into MMTk's heap. So extra arguments 19 | // are needed to maintain allocated bytes properly. The API is inspired by Julia's counted malloc. 20 | // The counted version is only available with the feature `malloc_counted_size`. 21 | 22 | pub fn malloc(size: usize) -> Address { 23 | Address::from_mut_ptr(unsafe { self::library::malloc(size) }) 24 | } 25 | 26 | #[cfg(feature = "malloc_counted_size")] 27 | pub fn counted_malloc(mmtk: &MMTK, size: usize) -> Address { 28 | let res = malloc(size); 29 | if !res.is_zero() { 30 | mmtk.plan.base().increase_malloc_bytes_by(size); 31 | } 32 | res 33 | } 34 | 35 | pub fn calloc(num: usize, size: usize) -> Address { 36 | Address::from_mut_ptr(unsafe { self::library::calloc(num, size) }) 37 | } 38 | 39 | #[cfg(feature = "malloc_counted_size")] 40 | pub fn counted_calloc(mmtk: &MMTK, num: usize, size: usize) -> Address { 41 | let res = calloc(num, size); 42 | if !res.is_zero() { 43 | mmtk.plan.base().increase_malloc_bytes_by(num * size); 44 | } 45 | res 46 | } 47 | 48 | pub fn realloc(addr: Address, size: usize) -> Address { 49 | Address::from_mut_ptr(unsafe { self::library::realloc(addr.to_mut_ptr(), size) }) 50 | } 51 | 52 | #[cfg(feature = "malloc_counted_size")] 53 | pub fn realloc_with_old_size( 54 | mmtk: &MMTK, 55 | addr: Address, 56 | size: usize, 57 | old_size: usize, 58 | ) -> Address { 59 | let res = realloc(addr, size); 60 | 61 | if !addr.is_zero() { 62 | mmtk.plan.base().decrease_malloc_bytes_by(old_size); 63 | } 64 | if size != 0 && !res.is_zero() { 65 | mmtk.plan.base().increase_malloc_bytes_by(size); 66 | } 67 | 68 | res 69 | } 70 | 71 | pub fn free(addr: Address) { 72 | unsafe { self::library::free(addr.to_mut_ptr()) } 73 | } 74 | 75 | #[cfg(feature = "malloc_counted_size")] 76 | pub fn free_with_size(mmtk: &MMTK, addr: Address, old_size: usize) { 77 | free(addr); 78 | if !addr.is_zero() { 79 | mmtk.plan.base().decrease_malloc_bytes_by(old_size); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/util/metadata/log_bit.rs: -------------------------------------------------------------------------------- 1 | use crate::util::ObjectReference; 2 | use crate::vm::VMBinding; 3 | use crate::vm::VMGlobalLogBitSpec; 4 | use std::sync::atomic::Ordering; 5 | 6 | use super::MetadataSpec; 7 | 8 | impl VMGlobalLogBitSpec { 9 | /// Mark the log bit as unlogged (1 means unlogged) 10 | pub fn mark_as_unlogged(&self, object: ObjectReference, order: Ordering) { 11 | self.store_atomic::(object, 1, None, order) 12 | } 13 | 14 | /// Mark the entire byte as unlogged if the log bit is in the side metadata. As it marks the entire byte, 15 | /// it may unlog adjacent objects. This method should only be used 16 | /// when adjacent objects are also in the mature space, and there is no harm if we also unlog them. 17 | /// This method is meant to be an optimization, and can always be replaced with `mark_as_unlogged`. 18 | pub fn mark_byte_as_unlogged(&self, object: ObjectReference, order: Ordering) { 19 | match self.as_spec() { 20 | // If the log bit is in the header, there is nothing we can do. We just call `mark_as_unlogged`. 21 | MetadataSpec::InHeader(_) => self.mark_as_unlogged::(object, order), 22 | // If the log bit is in the side metadata, we can simply set the entire byte to 0xff. Because we 23 | // know we are setting log bit for mature space, and every object in the space should have log 24 | // bit as 1. 25 | MetadataSpec::OnSide(spec) => unsafe { 26 | spec.set_raw_byte_atomic(object.to_address::(), order) 27 | }, 28 | } 29 | } 30 | 31 | pub fn is_unlogged(&self, object: ObjectReference, order: Ordering) -> bool { 32 | self.load_atomic::(object, None, order) == 1 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/util/metadata/pin_bit.rs: -------------------------------------------------------------------------------- 1 | use crate::util::ObjectReference; 2 | use crate::vm::VMBinding; 3 | use crate::vm::VMLocalPinningBitSpec; 4 | use std::sync::atomic::Ordering; 5 | 6 | impl VMLocalPinningBitSpec { 7 | /// Pin the object 8 | pub fn pin_object(&self, object: ObjectReference) -> bool { 9 | let res = self.compare_exchange_metadata::( 10 | object, 11 | 0, 12 | 1, 13 | None, 14 | Ordering::SeqCst, 15 | Ordering::SeqCst, 16 | ); 17 | 18 | res.is_ok() 19 | } 20 | 21 | pub fn unpin_object(&self, object: ObjectReference) -> bool { 22 | let res = self.compare_exchange_metadata::( 23 | object, 24 | 1, 25 | 0, 26 | None, 27 | Ordering::SeqCst, 28 | Ordering::SeqCst, 29 | ); 30 | 31 | res.is_ok() 32 | } 33 | 34 | pub fn is_object_pinned(&self, object: ObjectReference) -> bool { 35 | if unsafe { self.load::(object, None) == 1 } { 36 | return true; 37 | } 38 | 39 | false 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/util/metadata/side_metadata/mod.rs: -------------------------------------------------------------------------------- 1 | mod constants; 2 | mod helpers; 3 | #[cfg(target_pointer_width = "32")] 4 | mod helpers_32; 5 | 6 | mod global; 7 | mod sanity; 8 | mod side_metadata_tests; 9 | pub(crate) mod spec_defs; 10 | 11 | pub use constants::*; 12 | pub use global::*; 13 | pub use helpers::*; 14 | #[cfg(target_pointer_width = "32")] 15 | pub use helpers_32::*; 16 | pub use sanity::SideMetadataSanity; 17 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utilities used by other modules, including allocators, heap implementation, etc. 2 | 3 | // Allow unused code in the util mod. We may have some functions that are not in use, 4 | // but will be useful in future implementations. 5 | #![allow(dead_code)] 6 | 7 | // The following modules are public. MMTk bindings can use them to help implementation. 8 | 9 | /// An abstract of memory address and object reference. 10 | pub mod address; 11 | /// Allocators 12 | // This module is made public so the binding could implement allocator slowpaths if they would like to. 13 | pub mod alloc; 14 | /// Constants used in MMTk 15 | pub mod constants; 16 | /// Calculation, conversion and rounding for memory related numbers. 17 | pub mod conversions; 18 | /// The copy allocators for a GC worker. 19 | pub mod copy; 20 | /// Linear scan through a heap range 21 | pub mod linear_scan; 22 | /// Wrapper functions for memory syscalls such as mmap, mprotect, etc. 23 | pub mod memory; 24 | /// Opaque pointers used in MMTk, e.g. VMThread. 25 | pub mod opaque_pointer; 26 | /// MMTk command line options. 27 | pub mod options; 28 | /// Reference processing implementation. 29 | pub mod reference_processor; 30 | 31 | // The following modules are only public in the mmtk crate. They should only be used in MMTk core. 32 | /// An analysis framework for collecting data and profiling in GC. 33 | #[cfg(feature = "analysis")] 34 | pub(crate) mod analysis; 35 | /// Logging edges to check duplicated edges in GC. 36 | #[cfg(feature = "extreme_assertions")] 37 | pub(crate) mod edge_logger; 38 | /// Non-generic refs to generic types of . 39 | pub(crate) mod erase_vm; 40 | /// Finalization implementation. 41 | pub(crate) mod finalizable_processor; 42 | /// Heap implementation, including page resource, mmapper, etc. 43 | pub(crate) mod heap; 44 | #[cfg(feature = "is_mmtk_object")] 45 | pub mod is_mmtk_object; 46 | /// Logger initialization 47 | pub(crate) mod logger; 48 | /// Various malloc implementations (conditionally compiled by features) 49 | pub mod malloc; 50 | /// Metadata (OnSide or InHeader) implementation. 51 | pub mod metadata; 52 | /// Forwarding word in object copying. 53 | pub(crate) mod object_forwarding; 54 | /// Utilities funcitons for Rust 55 | pub(crate) mod rust_util; 56 | /// Sanity checker for GC. 57 | #[cfg(feature = "sanity")] 58 | pub(crate) mod sanity; 59 | /// Utils for collecting statistics. 60 | pub(crate) mod statistics; 61 | /// Test utilities. 62 | #[cfg(test)] 63 | pub(crate) mod test_util; 64 | /// A treadmill implementation. 65 | pub(crate) mod treadmill; 66 | 67 | // These modules are private. They are only used by other util modules. 68 | 69 | /// A very simple, generic malloc-free allocator 70 | mod freelist; 71 | /// Implementation of GenericFreeList by an int vector. 72 | mod int_array_freelist; 73 | /// Implementation of GenericFreeList backed by raw memory, allocated 74 | /// on demand direct from the OS (via mmap). 75 | mod raw_memory_freelist; 76 | 77 | pub use self::address::Address; 78 | pub use self::address::ObjectReference; 79 | pub use self::opaque_pointer::*; 80 | pub use self::reference_processor::ReferenceProcessor; 81 | -------------------------------------------------------------------------------- /src/util/opaque_pointer.rs: -------------------------------------------------------------------------------- 1 | use crate::util::Address; 2 | use libc::c_void; 3 | 4 | // OpaquePointer does not provide any method for dereferencing, as we should not dereference it in MMTk. 5 | // However, there are occurrences that we may need to dereference tls in the VM binding code. 6 | // In JikesRVM's implementation of ActivePlan, we need to dereference tls to get mutator and collector context. 7 | // This is done by transmute (unsafe). 8 | #[repr(transparent)] 9 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 10 | pub struct OpaquePointer(*mut c_void); 11 | 12 | // We never really dereference an opaque pointer in mmtk-core. 13 | unsafe impl Sync for OpaquePointer {} 14 | unsafe impl Send for OpaquePointer {} 15 | 16 | impl Default for OpaquePointer { 17 | fn default() -> Self { 18 | Self::UNINITIALIZED 19 | } 20 | } 21 | 22 | impl OpaquePointer { 23 | pub const UNINITIALIZED: Self = Self(0 as *mut c_void); 24 | 25 | pub fn from_address(addr: Address) -> Self { 26 | OpaquePointer(addr.to_mut_ptr::()) 27 | } 28 | 29 | pub fn to_address(self) -> Address { 30 | Address::from_mut_ptr(self.0) 31 | } 32 | 33 | pub fn is_null(self) -> bool { 34 | self.0.is_null() 35 | } 36 | } 37 | 38 | /// A VMThread is an opaque pointer that can uniquely identify a thread in the VM. 39 | /// A VM binding may use thread pointers or thread IDs as VMThreads. MMTk does not make any assumption on this. 40 | /// This is used as arguments in the VM->MMTk APIs, and MMTk may store it and pass it back through the MMTk->VM traits, 41 | /// so the VM knows the context. 42 | /// A VMThread may be a VMMutatorThread, a VMWorkerThread, or any VMThread. 43 | #[repr(transparent)] 44 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 45 | pub struct VMThread(pub OpaquePointer); 46 | 47 | impl VMThread { 48 | pub const UNINITIALIZED: Self = Self(OpaquePointer::UNINITIALIZED); 49 | } 50 | 51 | /// A VMMutatorThread is a VMThread that associates with a [`crate::plan::Mutator`]. 52 | /// When a VMMutatorThread is used as an argument or a field of a type, it generally means 53 | /// the function or the functions for the type is executed in the context of the mutator thread. 54 | #[repr(transparent)] 55 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 56 | pub struct VMMutatorThread(pub VMThread); 57 | 58 | /// A VMWorkerThread is a VMThread that is associates with a [`crate::scheduler::GCWorker`]. 59 | /// When a VMWorkerThread is used as an argument or a field of a type, it generally means 60 | /// the function or the functions for the type is executed in the context of the mutator thread. 61 | #[repr(transparent)] 62 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 63 | pub struct VMWorkerThread(pub VMThread); 64 | -------------------------------------------------------------------------------- /src/util/rust_util/zeroed_alloc.rs: -------------------------------------------------------------------------------- 1 | //! This module is for allocating large arrays or vectors with initial zero values. 2 | //! 3 | //! Note: The standard library uses the `IsZero` trait to specialize the intialization of `Vec` 4 | //! if the initial element values are zero. Primitive type, such as `i8`, `usize`, `f32`, as well 5 | //! as types with known representations such as `Option` implement the `IsZero` 6 | //! trait. However, it has several limitations. 7 | //! 8 | //! 1. Composite types, such as `SpaceDescriptor(usize)`, doesn't implement the `IsZero` trait, 9 | //! even if it has the `#[repr(transparent)]` annotation. 10 | //! 2. The `IsZero` trait is private to the `std` module, and we cannot use it. 11 | //! 12 | //! Therefore, `vec![0usize; 33554432]` takes only 4 **microseconds**, while 13 | //! `vec![SpaceDescriptor(0); 33554432]` will take 22 **milliseconds** to execute on some machine. 14 | //! If such an allocation happens during start-up, the delay will be noticeable to light-weight 15 | //! scripting languages, such as Ruby. 16 | //! 17 | //! We implement our own fast allocation of large zeroed vectors in this module. If one day Rust 18 | //! provides a standard way to optimize for zeroed allocation of vectors of composite types, we 19 | //! can switch to the standard mechanism. 20 | use std::alloc::{alloc_zeroed, Layout}; 21 | 22 | /// Allocate a `Vec` of all-zero values. 23 | /// 24 | /// This intends to be a faster alternative to `vec![T(0), size]`. It will allocate pre-zeroed 25 | /// buffer, and not store zero values to its elements as part of initialization. 26 | /// 27 | /// It is useful when creating large (hundreds of megabytes) Vecs when the execution time is 28 | /// critical (such as during start-up, where a 100ms delay is obvious to small applications.) 29 | /// However, because of its unsafe nature, it should only be used when necessary. 30 | /// 31 | /// Arguments: 32 | /// 33 | /// - `T`: The element type. 34 | /// - `size`: The length and capacity of the created vector. 35 | /// 36 | /// Returns the created vector. 37 | /// 38 | /// # Unsafe 39 | /// 40 | /// This function is unsafe. It will not call any constructor of `T`. The user must ensure 41 | /// that a value with all bits being zero is meaningful for type `T`. 42 | pub(crate) unsafe fn new_zeroed_vec(size: usize) -> Vec { 43 | let layout = Layout::array::(size).unwrap(); 44 | let ptr = alloc_zeroed(layout) as *mut T; 45 | Vec::from_raw_parts(ptr, size, size) 46 | } 47 | -------------------------------------------------------------------------------- /src/util/sanity/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod sanity_checker; 2 | -------------------------------------------------------------------------------- /src/util/statistics/counter/perf_event.rs: -------------------------------------------------------------------------------- 1 | use super::Diffable; 2 | use pfm::{PerfEvent, PerfEventValue}; 3 | 4 | /// A [`Diffable`] helper type for measuring overall perf events for mutators 5 | /// and GC 6 | /// This is the process-wide counterpart of [`crate::scheduler::work_counter::WorkPerfEvent`]. 7 | pub struct PerfEventDiffable { 8 | pe: PerfEvent, 9 | } 10 | 11 | impl PerfEventDiffable { 12 | pub fn new(name: &str) -> Self { 13 | let mut pe = PerfEvent::new(name, true) 14 | .unwrap_or_else(|_| panic!("Failed to create perf event {}", name)); 15 | // measures the calling thread (and all child threads) on all CPUs 16 | pe.open(0, -1) 17 | .unwrap_or_else(|_| panic!("Failed to open perf event {}", name)); 18 | PerfEventDiffable { pe } 19 | } 20 | } 21 | 22 | impl Diffable for PerfEventDiffable { 23 | type Val = PerfEventValue; 24 | 25 | fn start(&mut self) { 26 | self.pe.reset().expect("Failed to reset perf evet"); 27 | self.pe.enable().expect("Failed to enable perf evet"); 28 | } 29 | 30 | fn stop(&mut self) { 31 | self.pe.disable().expect("Failed to disable perf evet"); 32 | } 33 | 34 | fn current_value(&mut self) -> Self::Val { 35 | let val = self.pe.read().expect("Failed to read perf evet"); 36 | assert_eq!(val.time_enabled, val.time_running, "perf event multiplexed"); 37 | val 38 | } 39 | 40 | fn diff(current: &Self::Val, earlier: &Self::Val) -> u64 { 41 | assert!(current.value >= earlier.value, "perf event overflowed"); 42 | current.value as u64 - earlier.value as u64 43 | } 44 | 45 | fn print_diff(val: u64) { 46 | print!("{}", val); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/util/statistics/counter/size_counter.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use std::sync::Arc; 3 | use std::sync::Mutex; 4 | 5 | pub struct SizeCounter { 6 | units: Arc>, 7 | volume: Arc>, 8 | } 9 | 10 | /** 11 | * This file implements a simple counter of events of different sizes 12 | * (eg object allocations, where total number of objects and total 13 | * volume of objects would be counted). 14 | * 15 | * The counter is trivially composed from two event counters (one for 16 | * counting the number of events, the other for counting the volume). 17 | */ 18 | impl SizeCounter { 19 | pub fn new(units: Arc>, volume: Arc>) -> Self { 20 | SizeCounter { units, volume } 21 | } 22 | 23 | /** 24 | * Increment the event counter by provided value 25 | */ 26 | pub fn inc(&mut self, size: u64) { 27 | self.units.lock().unwrap().inc(); 28 | self.volume.lock().unwrap().inc_by(size); 29 | } 30 | 31 | /** 32 | * Start this counter 33 | */ 34 | pub fn start(&mut self) { 35 | self.units.lock().unwrap().start(); 36 | self.volume.lock().unwrap().start(); 37 | } 38 | 39 | /** 40 | * Stop this counter 41 | */ 42 | pub fn stop(&mut self) { 43 | self.units.lock().unwrap().stop(); 44 | self.volume.lock().unwrap().stop(); 45 | } 46 | 47 | /** 48 | * Print current (mid-phase) units 49 | */ 50 | pub fn print_current_units(&self) { 51 | self.units.lock().unwrap().print_current(); 52 | } 53 | 54 | /** 55 | * Print (mid-phase) volume 56 | */ 57 | pub fn print_current_volume(&self) { 58 | self.volume.lock().unwrap().print_current(); 59 | } 60 | 61 | /** 62 | * Print units 63 | */ 64 | pub fn print_units(&self) { 65 | self.units.lock().unwrap().print_total(None); 66 | } 67 | 68 | /** 69 | * Print volume 70 | */ 71 | pub fn print_volume(&self) { 72 | self.volume.lock().unwrap().print_total(None); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/util/statistics/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::counter::Counter; 2 | pub use self::counter::Timer; 3 | 4 | pub mod counter; 5 | pub mod stats; 6 | -------------------------------------------------------------------------------- /src/vm/mod.rs: -------------------------------------------------------------------------------- 1 | //! MMTk-to-VM interface: the VMBinding trait. 2 | //! 3 | //! This module provides VM-specific traits that serve as MMTK-to-VM interfaces. 4 | //! Each VM binding needs to provide an implementation for each of the traits. 5 | //! MMTk requires the interfaces to be efficient, as some of the methods are called frequently 6 | //! during collection (e.g. the methods for `ObjectModel`). We rely on cross-crate *link-time-optimization* 7 | //! to remove the overhead of MMTk invoking methods on those traits. 8 | //! 9 | //! It is recommended for a VM binding that uses mmtk-core to do the following to ensure LTO is enabled for performance. 10 | //! 1. Add the following section in the manifest file of a VM binding (`Cargo.toml`). This enables LTO for the release build: 11 | //! ``` 12 | //! [profile.release] 13 | //! lto = true 14 | //! ``` 15 | //! 2. Make sure that the crate type for a VM binding supports LTO. To our knowledge, `staticlib` and `cdylib` support LTO, and 16 | //! `rlib` does *not* support LTO. 17 | 18 | use crate::util::constants::*; 19 | 20 | mod active_plan; 21 | mod collection; 22 | pub mod edge_shape; 23 | pub(crate) mod object_model; 24 | mod reference_glue; 25 | mod scanning; 26 | pub use self::active_plan::ActivePlan; 27 | pub use self::collection::Collection; 28 | pub use self::collection::GCThreadContext; 29 | pub use self::object_model::specs::*; 30 | pub use self::object_model::ObjectModel; 31 | pub use self::reference_glue::Finalizable; 32 | pub use self::reference_glue::ReferenceGlue; 33 | pub use self::scanning::EdgeVisitor; 34 | pub use self::scanning::ObjectTracer; 35 | pub use self::scanning::ObjectTracerContext; 36 | pub use self::scanning::RootsWorkFactory; 37 | pub use self::scanning::Scanning; 38 | 39 | const DEFAULT_LOG_MIN_ALIGNMENT: usize = LOG_BYTES_IN_INT as usize; 40 | const DEFAULT_LOG_MAX_ALIGNMENT: usize = LOG_BYTES_IN_LONG as usize; 41 | 42 | /// The `VMBinding` trait associates with each trait, and provides VM-specific constants. 43 | pub trait VMBinding 44 | where 45 | Self: Sized + 'static + Send + Sync + Default, 46 | { 47 | type VMObjectModel: ObjectModel; 48 | type VMScanning: Scanning; 49 | type VMCollection: Collection; 50 | type VMActivePlan: ActivePlan; 51 | type VMReferenceGlue: ReferenceGlue; 52 | 53 | /// The type of edges in this VM. 54 | type VMEdge: edge_shape::Edge; 55 | /// The type of heap memory slice in this VM. 56 | type VMMemorySlice: edge_shape::MemorySlice; 57 | 58 | /// A value to fill in alignment gaps. This value can be used for debugging. 59 | const ALIGNMENT_VALUE: usize = 0xdead_beef; 60 | /// Allowed minimal alignment in bytes. 61 | const MIN_ALIGNMENT: usize = 1 << DEFAULT_LOG_MIN_ALIGNMENT; 62 | /// Allowed maximum alignment in bytes. 63 | const MAX_ALIGNMENT: usize = 1 << DEFAULT_LOG_MAX_ALIGNMENT; 64 | /// Does the binding use a non-zero allocation offset? If this is false, we expect the binding 65 | /// to always use offset === 0 for allocation, and we are able to do some optimization if we know 66 | /// offset === 0. 67 | const USE_ALLOCATION_OFFSET: bool = true; 68 | 69 | /// This value is used to assert if the cursor is reasonable after allocations. 70 | /// At the end of an allocation, the allocation cursor should be aligned to this value. 71 | /// Note that MMTk does not attempt to do anything to align the cursor to this value, but 72 | /// it merely asserts with this constant. 73 | const ALLOC_END_ALIGNMENT: usize = 1; 74 | } 75 | -------------------------------------------------------------------------------- /tests/test_address.rs: -------------------------------------------------------------------------------- 1 | extern crate mmtk; 2 | 3 | use mmtk::util::Address; 4 | 5 | #[test] 6 | fn test_align_up() { 7 | let addr = unsafe { Address::zero() }; 8 | let aligned = addr.align_up(8); 9 | 10 | assert_eq!(addr, aligned); 11 | } 12 | 13 | #[test] 14 | fn test_is_aligned() { 15 | let addr = unsafe { Address::zero() }; 16 | assert!(addr.is_aligned_to(8)); 17 | 18 | let addr = unsafe { Address::from_usize(8) }; 19 | assert!(addr.is_aligned_to(8)); 20 | } 21 | -------------------------------------------------------------------------------- /tests/test_roots_work_factory.rs: -------------------------------------------------------------------------------- 1 | //! This is for testing the assumption that RootsWorkFactory can work with embedded Box or Arc 2 | //! to hold or share large components in the heap. Real-world RootsWorkFactory implementations 3 | //! should not be this complicated, and should probably not have shared mutable states, if they 4 | //! have any mutable states at all. 5 | 6 | use std::sync::{Arc, Mutex}; 7 | 8 | use mmtk::{ 9 | util::{Address, ObjectReference}, 10 | vm::RootsWorkFactory, 11 | }; 12 | 13 | #[derive(Default)] 14 | struct MockScanning { 15 | roots: Vec
, 16 | } 17 | 18 | impl MockScanning { 19 | fn add_roots(&mut self, roots: &[Address]) { 20 | self.roots.extend(roots); 21 | } 22 | 23 | fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory
) { 24 | factory.create_process_edge_roots_work(self.roots.clone()); 25 | } 26 | } 27 | 28 | static EDGES: [Address; 3] = [ 29 | unsafe { Address::from_usize(0x8) }, 30 | unsafe { Address::from_usize(0x8) }, 31 | unsafe { Address::from_usize(0x8) }, 32 | ]; 33 | 34 | /// A factory with a plain value, a boxed value and a shared data with Arc. 35 | #[derive(Clone)] 36 | struct MockFactory { 37 | round: i32, 38 | v: String, 39 | #[allow(clippy::box_collection)] // for testing `Box` inside a factory 40 | b: Box, 41 | a: Arc>, 42 | } 43 | 44 | impl RootsWorkFactory
for MockFactory { 45 | fn create_process_edge_roots_work(&mut self, edges: Vec
) { 46 | assert_eq!(edges, EDGES); 47 | match self.round { 48 | 1 => { 49 | assert_eq!(self.v, "y"); 50 | assert_eq!(*self.b, "b"); 51 | assert_eq!(self.a.lock().unwrap().clone(), "a"); 52 | } 53 | 2 => { 54 | assert_eq!(self.v, "y"); 55 | assert_eq!(*self.b, "b"); 56 | assert_eq!(self.a.lock().unwrap().clone(), "a2"); 57 | } 58 | 3 => { 59 | assert_eq!(self.v, "y2"); 60 | assert_eq!(*self.b, "b2"); 61 | assert_eq!(self.a.lock().unwrap().clone(), "a2"); 62 | } 63 | _ => { 64 | panic!("Unreachable"); 65 | } 66 | } 67 | } 68 | 69 | fn create_process_node_roots_work(&mut self, _nodes: Vec) { 70 | unimplemented!(); 71 | } 72 | } 73 | 74 | #[test] 75 | fn test_scan() { 76 | let factory = MockFactory { 77 | round: 1, 78 | v: "y".to_string(), 79 | b: Box::new("b".to_string()), 80 | a: Arc::new(Mutex::new("a".to_string())), 81 | }; 82 | let mut scanning = MockScanning::default(); 83 | scanning.add_roots(&EDGES); 84 | scanning.mock_scan_roots(factory); 85 | } 86 | 87 | #[test] 88 | fn test_clone() { 89 | let factory1 = MockFactory { 90 | round: 2, 91 | v: "y".to_string(), 92 | b: Box::new("b".to_string()), 93 | a: Arc::new(Mutex::new("a".to_string())), 94 | }; 95 | 96 | let mut factory2 = factory1.clone(); 97 | factory2.round = 3; 98 | factory2.v = "y2".to_string(); 99 | *factory2.b = "b2".to_string(); 100 | *factory2.a.lock().unwrap() = "a2".to_string(); 101 | 102 | let mut scanning = MockScanning::default(); 103 | 104 | scanning.add_roots(&EDGES); 105 | scanning.mock_scan_roots(factory1); 106 | scanning.mock_scan_roots(factory2); 107 | } 108 | -------------------------------------------------------------------------------- /tools/tracing/alloc_slow.bt: -------------------------------------------------------------------------------- 1 | usdt:$MMTK:mmtk:alloc_slow_once_start { 2 | if (@stats_enabled) { 3 | @alloc_slow_nsecs[tid] = nsecs; 4 | } 5 | } 6 | 7 | usdt:$MMTK:mmtk:alloc_slow_once_end { 8 | if (@stats_enabled) { 9 | @alloc_slow_hist = hist((nsecs - @alloc_slow_nsecs[tid])/400); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tools/tracing/epilogue.bt.fragment: -------------------------------------------------------------------------------- 1 | END { 2 | system("rm $TMP_FILE"); 3 | } 4 | 5 | interval:s:1200 { 6 | exit(); 7 | } 8 | -------------------------------------------------------------------------------- /tools/tracing/gc_stages.bt: -------------------------------------------------------------------------------- 1 | usdt:$MMTK:mmtk:gc_start { 2 | if (@stats_enabled) { 3 | @gc_start_nsecs = nsecs; 4 | } 5 | } 6 | 7 | usdt:$MMTK:mmtk:gc_end { 8 | if (@stats_enabled) { 9 | @post_closure_time += nsecs - @post_closure_nsecs; 10 | } 11 | } 12 | 13 | usdt:$MMTK:mmtk:bucket_opened { 14 | if (@stats_enabled) { 15 | $ns = nsecs; 16 | // Please check enum WorkBucketStage for the numerical values of stages 17 | // Closure is 2 when vo_bit is not set 18 | if (arg0 == 2) { 19 | @closure_nsecs = $ns; 20 | @pre_closure_time += $ns - @gc_start_nsecs; 21 | } 22 | // Release is 14 when vo_bit is not set 23 | if (arg0 == 14) { 24 | @post_closure_nsecs = $ns; 25 | @closure_time += $ns - @closure_nsecs; 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /tools/tracing/lock_contended.bt: -------------------------------------------------------------------------------- 1 | uprobe:$MMTK:_ZN3std3sys4unix5locks11futex_mutex5Mutex14lock_contended* { 2 | if (@stats_enabled) { 3 | @lock_nsecs[tid] = (arg0, nsecs); 4 | } 5 | } 6 | 7 | uretprobe:$MMTK:_ZN3std3sys4unix5locks11futex_mutex5Mutex14lock_contended* { 8 | if (@stats_enabled) { 9 | @lock_dist[@lock_nsecs[tid].0] = hist((nsecs - @lock_nsecs[tid].1)/256); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tools/tracing/packet_size.bt: -------------------------------------------------------------------------------- 1 | usdt:$MMTK:mmtk:process_edges { 2 | if (@stats_enabled) { 3 | @process_edges_packet_size = hist(arg0); 4 | if (arg1) { 5 | @process_edges_root_packet_size = hist(arg0); 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /tools/tracing/prologue_with_harness.bt.fragment: -------------------------------------------------------------------------------- 1 | usdt:$MMTK:mmtk:harness_begin { 2 | //begin collecting data at harness_begin (start of final iteration) 3 | @stats_enabled = 1; 4 | } 5 | 6 | usdt:$MMTK:mmtk:harness_end { 7 | //end data at harness_end (end of final iteration) 8 | @stats_enabled = 0; 9 | exit(); 10 | } 11 | -------------------------------------------------------------------------------- /tools/tracing/prologue_without_harness.bt.fragment: -------------------------------------------------------------------------------- 1 | BEGIN { 2 | //always collect data 3 | @stats_enabled = 1; 4 | } 5 | -------------------------------------------------------------------------------- /tools/tracing/run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from string import Template 3 | from argparse import ArgumentParser 4 | from pathlib import Path 5 | import tempfile 6 | import sys 7 | import os 8 | 9 | 10 | def get_args(): 11 | parser = ArgumentParser() 12 | parser.add_argument("-b", "--bpftrace", type=str, default="bpftrace", 13 | help="Path of the bpftrace executable") 14 | parser.add_argument("-m", "--mmtk", type=str, required=True, 15 | help="Path of the MMTk binary") 16 | parser.add_argument("-H", "--harness", action="store_true", 17 | help="Only collect data for the timing iteration (harness_begin/harness_end)") 18 | parser.add_argument("-p", "--print-script", action="store_true", 19 | help="Print the content of the bpftrace script") 20 | parser.add_argument( 21 | "-f", "--format", choices=["text", "json"], default="text", help="bpftrace output format") 22 | parser.add_argument("tool", type=str, help="Name of the bpftrace tool") 23 | return parser.parse_args() 24 | 25 | 26 | def main(): 27 | args = get_args() 28 | here = Path(__file__).parent.resolve() 29 | bpftrace_script = here / f"{args.tool}.bt" 30 | if not bpftrace_script.exists(): 31 | print(f"Tracing script {str(bpftrace_script)} not found.") 32 | sys.exit(1) 33 | mmtk_bin = Path(args.mmtk) 34 | if not mmtk_bin.exists(): 35 | print(f"MMTk binary {str(mmtk_bin)} not found.") 36 | sys.exit(1) 37 | prologue_file = here / \ 38 | ("prologue_with_harness.bt.fragment" if args.harness else "prologue_without_harness.bt.fragment") 39 | prologue = prologue_file.read_text() 40 | epilogue = (here / "epilogue.bt.fragment").read_text() 41 | template = Template(prologue + bpftrace_script.read_text() + epilogue) 42 | with tempfile.NamedTemporaryFile(mode="w+t") as tmp: 43 | content = template.safe_substitute( 44 | MMTK=mmtk_bin, TMP_FILE=tmp.name) 45 | if args.print_script: 46 | print(content) 47 | tmp.write(content) 48 | tmp.flush() 49 | # We use execvp to replace the current process instead of creating 50 | # a subprocess (or sh -c). This is so that when users invoke this from 51 | # the command line, Ctrl-C will be captured by bpftrace instead of the 52 | # outer Python script. The temporary file can then be cleaned up by 53 | # the END probe in bpftrace. 54 | # 55 | # In theory, you can implement this via pty, but it is very finicky 56 | # and doesn't work reliably. 57 | # See also https://github.com/anupli/running-ng/commit/b74e3a13f56dd97f73432d8a391e1d6cd9db8663 58 | os.execvp("sudo", ["sudo", args.bpftrace, 59 | "--unsafe", "-f", args.format, tmp.name]) 60 | 61 | 62 | if __name__ == "__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mmtk_dummyvm" 3 | version = "0.0.1" 4 | authors = [" <>"] 5 | edition = "2021" 6 | 7 | [lib] 8 | name = "mmtk_dummyvm" 9 | # be careful - LTO is only allowed for certain crate types 10 | crate-type = ["cdylib"] 11 | 12 | [profile.release] 13 | lto = true 14 | 15 | [dependencies] 16 | mmtk = { path = "../../", version = "*" } 17 | libc = "0.2" 18 | lazy_static = "1.1" 19 | atomic_refcell = "0.1.7" 20 | atomic = "0.4.6" 21 | log = "0.4" 22 | 23 | [features] 24 | default = [] 25 | is_mmtk_object = ["mmtk/is_mmtk_object"] 26 | malloc_counted_size = ["mmtk/malloc_counted_size"] 27 | malloc_mark_sweep = ["mmtk/malloc_mark_sweep"] 28 | vo_bit = ["mmtk/vo_bit"] 29 | extreme_assertions = ["mmtk/extreme_assertions"] 30 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/active_plan.rs: -------------------------------------------------------------------------------- 1 | use mmtk::Plan; 2 | use mmtk::vm::ActivePlan; 3 | use mmtk::util::opaque_pointer::*; 4 | use mmtk::Mutator; 5 | use crate::DummyVM; 6 | use crate::SINGLETON; 7 | 8 | pub struct VMActivePlan<> {} 9 | 10 | impl ActivePlan for VMActivePlan { 11 | fn global() -> &'static dyn Plan { 12 | SINGLETON.get_plan() 13 | } 14 | 15 | fn number_of_mutators() -> usize { 16 | unimplemented!() 17 | } 18 | 19 | fn is_mutator(_tls: VMThread) -> bool { 20 | // FIXME 21 | true 22 | } 23 | 24 | fn mutator(_tls: VMMutatorThread) -> &'static mut Mutator { 25 | unimplemented!() 26 | } 27 | 28 | fn mutators<'a>() -> Box> + 'a> { 29 | unimplemented!() 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/collection.rs: -------------------------------------------------------------------------------- 1 | use crate::DummyVM; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::vm::Collection; 4 | use mmtk::vm::GCThreadContext; 5 | use mmtk::Mutator; 6 | 7 | pub struct VMCollection {} 8 | 9 | impl Collection for VMCollection { 10 | fn stop_all_mutators(_tls: VMWorkerThread, _mutator_visitor: F) 11 | where 12 | F: FnMut(&'static mut Mutator), 13 | { 14 | unimplemented!() 15 | } 16 | 17 | fn resume_mutators(_tls: VMWorkerThread) { 18 | unimplemented!() 19 | } 20 | 21 | fn block_for_gc(_tls: VMMutatorThread) { 22 | panic!("block_for_gc is not implemented") 23 | } 24 | 25 | fn spawn_gc_thread(_tls: VMThread, _ctx: GCThreadContext) {} 26 | } 27 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate libc; 2 | extern crate mmtk; 3 | #[macro_use] 4 | extern crate lazy_static; 5 | 6 | use mmtk::vm::VMBinding; 7 | use mmtk::MMTKBuilder; 8 | use mmtk::MMTK; 9 | 10 | pub mod active_plan; 11 | pub mod api; 12 | pub mod collection; 13 | pub mod object_model; 14 | pub mod reference_glue; 15 | pub mod scanning; 16 | 17 | mod edges; 18 | #[cfg(test)] 19 | mod tests; 20 | 21 | #[derive(Default)] 22 | pub struct DummyVM; 23 | 24 | impl VMBinding for DummyVM { 25 | type VMObjectModel = object_model::VMObjectModel; 26 | type VMScanning = scanning::VMScanning; 27 | type VMCollection = collection::VMCollection; 28 | type VMActivePlan = active_plan::VMActivePlan; 29 | type VMReferenceGlue = reference_glue::VMReferenceGlue; 30 | type VMEdge = edges::DummyVMEdge; 31 | type VMMemorySlice = edges::DummyVMMemorySlice; 32 | 33 | /// Allowed maximum alignment in bytes. 34 | const MAX_ALIGNMENT: usize = 1 << 6; 35 | } 36 | 37 | use std::sync::atomic::{AtomicBool, Ordering}; 38 | use std::sync::Mutex; 39 | 40 | /// This is used to ensure we initialize MMTk at a specified timing. 41 | pub static MMTK_INITIALIZED: AtomicBool = AtomicBool::new(false); 42 | 43 | lazy_static! { 44 | pub static ref BUILDER: Mutex = Mutex::new(MMTKBuilder::new()); 45 | pub static ref SINGLETON: MMTK = { 46 | let builder = BUILDER.lock().unwrap(); 47 | debug_assert!(!MMTK_INITIALIZED.load(Ordering::SeqCst)); 48 | let ret = mmtk::memory_manager::mmtk_init(&builder); 49 | MMTK_INITIALIZED.store(true, std::sync::atomic::Ordering::Relaxed); 50 | *ret 51 | }; 52 | } 53 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/object_model.rs: -------------------------------------------------------------------------------- 1 | use mmtk::util::copy::{CopySemantics, GCWorkerCopyContext}; 2 | use mmtk::util::{Address, ObjectReference}; 3 | use mmtk::vm::*; 4 | use crate::DummyVM; 5 | 6 | pub struct VMObjectModel {} 7 | 8 | // This is intentionally set to a non-zero value to see if it breaks. 9 | // Change this if you want to test other values. 10 | pub const OBJECT_REF_OFFSET: usize = 4; 11 | 12 | impl ObjectModel for VMObjectModel { 13 | const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); 14 | const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = VMLocalForwardingPointerSpec::in_header(0); 15 | const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = VMLocalForwardingBitsSpec::in_header(0); 16 | const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0); 17 | const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = VMLocalLOSMarkNurserySpec::in_header(0); 18 | 19 | const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET as isize; 20 | 21 | fn copy( 22 | _from: ObjectReference, 23 | _semantics: CopySemantics, 24 | _copy_context: &mut GCWorkerCopyContext, 25 | ) -> ObjectReference { 26 | unimplemented!() 27 | } 28 | 29 | fn copy_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address { 30 | unimplemented!() 31 | } 32 | 33 | fn get_current_size(_object: ObjectReference) -> usize { 34 | unimplemented!() 35 | } 36 | 37 | fn get_size_when_copied(object: ObjectReference) -> usize { 38 | Self::get_current_size(object) 39 | } 40 | 41 | fn get_align_when_copied(_object: ObjectReference) -> usize { 42 | ::std::mem::size_of::() 43 | } 44 | 45 | fn get_align_offset_when_copied(_object: ObjectReference) -> usize { 46 | 0 47 | } 48 | 49 | fn get_reference_when_copied_to(_from: ObjectReference, _to: Address) -> ObjectReference { 50 | unimplemented!() 51 | } 52 | 53 | fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] { 54 | unimplemented!() 55 | } 56 | 57 | fn ref_to_object_start(object: ObjectReference) -> Address { 58 | object.to_raw_address().sub(OBJECT_REF_OFFSET) 59 | } 60 | 61 | fn ref_to_header(object: ObjectReference) -> Address { 62 | object.to_raw_address() 63 | } 64 | 65 | fn ref_to_address(object: ObjectReference) -> Address { 66 | // Just use object start. 67 | Self::ref_to_object_start(object) 68 | } 69 | 70 | fn address_to_ref(addr: Address) -> ObjectReference { 71 | ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)) 72 | } 73 | 74 | fn dump_object(_object: ObjectReference) { 75 | unimplemented!() 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/reference_glue.rs: -------------------------------------------------------------------------------- 1 | use mmtk::vm::ReferenceGlue; 2 | use mmtk::util::ObjectReference; 3 | use mmtk::util::opaque_pointer::VMWorkerThread; 4 | use crate::DummyVM; 5 | 6 | pub struct VMReferenceGlue {} 7 | 8 | impl ReferenceGlue for VMReferenceGlue { 9 | type FinalizableType = ObjectReference; 10 | 11 | fn set_referent(_reference: ObjectReference, _referent: ObjectReference) { 12 | unimplemented!() 13 | } 14 | fn get_referent(_object: ObjectReference) -> ObjectReference { 15 | unimplemented!() 16 | } 17 | fn enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) { 18 | unimplemented!() 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/scanning.rs: -------------------------------------------------------------------------------- 1 | use crate::DummyVM; 2 | use crate::edges::DummyVMEdge; 3 | use mmtk::util::opaque_pointer::*; 4 | use mmtk::util::ObjectReference; 5 | use mmtk::vm::EdgeVisitor; 6 | use mmtk::vm::RootsWorkFactory; 7 | use mmtk::vm::Scanning; 8 | use mmtk::Mutator; 9 | 10 | pub struct VMScanning {} 11 | 12 | impl Scanning for VMScanning { 13 | fn scan_roots_in_mutator_thread( 14 | _tls: VMWorkerThread, 15 | _mutator: &'static mut Mutator, 16 | _factory: impl RootsWorkFactory, 17 | ) { 18 | unimplemented!() 19 | } 20 | fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { 21 | unimplemented!() 22 | } 23 | fn scan_object>( 24 | _tls: VMWorkerThread, 25 | _object: ObjectReference, 26 | _edge_visitor: &mut EV, 27 | ) { 28 | unimplemented!() 29 | } 30 | fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: VMWorkerThread) { 31 | unimplemented!() 32 | } 33 | fn supports_return_barrier() -> bool { 34 | unimplemented!() 35 | } 36 | fn prepare_for_roots_re_scanning() { 37 | unimplemented!() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/allocate_align_offset.rs: -------------------------------------------------------------------------------- 1 | // GITHUB-CI: MMTK_PLAN=all 2 | 3 | use crate::api; 4 | use crate::DummyVM; 5 | use crate::tests::fixtures::{SerialFixture, MutatorFixture}; 6 | use mmtk::plan::AllocationSemantics; 7 | use mmtk::vm::VMBinding; 8 | use log::info; 9 | 10 | lazy_static! { 11 | static ref MUTATOR: SerialFixture = SerialFixture::new(); 12 | } 13 | 14 | #[test] 15 | pub fn allocate_alignment() { 16 | MUTATOR.with_fixture(|fixture| { 17 | let min = DummyVM::MIN_ALIGNMENT; 18 | let max = DummyVM::MAX_ALIGNMENT; 19 | info!("Allowed alignment between {} and {}", min, max); 20 | let mut align = min; 21 | while align <= max { 22 | info!("Test allocation with alignment {}", align); 23 | let addr = api::mmtk_alloc(fixture.mutator, 8, align, 0, AllocationSemantics::Default); 24 | assert!(addr.is_aligned_to(align), "Expected allocation alignment {}, returned address is {:?}", align, addr); 25 | align *= 2; 26 | } 27 | }) 28 | } 29 | 30 | #[test] 31 | pub fn allocate_offset() { 32 | MUTATOR.with_fixture(|fixture| { 33 | const OFFSET: usize = 4; 34 | let min = DummyVM::MIN_ALIGNMENT; 35 | let max = DummyVM::MAX_ALIGNMENT; 36 | info!("Allowed alignment between {} and {}", min, max); 37 | let mut align = min; 38 | while align <= max { 39 | info!("Test allocation with alignment {} and offset {}", align, OFFSET); 40 | let addr = api::mmtk_alloc(fixture.mutator, 8, align, OFFSET, AllocationSemantics::Default); 41 | assert!((addr + OFFSET).is_aligned_to(align), "Expected allocation alignment {}, returned address is {:?}", align, addr); 42 | align *= 2; 43 | } 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/allocate_with_disable_collection.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::AllocationSemantics; 4 | 5 | /// This test allocates after calling disable_collection(). When we exceed the heap limit, MMTk will NOT trigger a GC. 6 | /// And the allocation will succeed. 7 | #[test] 8 | pub fn allocate_with_disable_collection() { 9 | const MB: usize = 1024 * 1024; 10 | // 1MB heap 11 | mmtk_init(MB); 12 | mmtk_initialize_collection(VMThread::UNINITIALIZED); 13 | let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 14 | // Allocate 1MB. It should be fine. 15 | let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); 16 | assert!(!addr.is_zero()); 17 | // Disable GC 18 | mmtk_disable_collection(); 19 | // Allocate another MB. This exceeds the heap size. But as we have disabled GC, MMTk will not trigger a GC, and allow this allocation. 20 | let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); 21 | assert!(!addr.is_zero()); 22 | } 23 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::AllocationSemantics; 4 | 5 | /// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. 6 | /// We havent implemented block_for_gc so it will panic. 7 | #[test] 8 | #[should_panic(expected = "block_for_gc is not implemented")] 9 | pub fn allocate_with_initialize_collection() { 10 | const MB: usize = 1024 * 1024; 11 | // 1MB heap 12 | mmtk_init(MB); 13 | mmtk_initialize_collection(VMThread::UNINITIALIZED); 14 | let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 15 | // Attempt to allocate 2MB. This will trigger GC. 16 | let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default); 17 | assert!(!addr.is_zero()); 18 | } 19 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/allocate_with_re_enable_collection.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::AllocationSemantics; 4 | 5 | /// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. 6 | /// We havent implemented block_for_gc so it will panic. This test is similar to allocate_with_initialize_collection, except that we once disabled GC in the test. 7 | #[test] 8 | #[should_panic(expected = "block_for_gc is not implemented")] 9 | pub fn allocate_with_re_enable_collection() { 10 | const MB: usize = 1024 * 1024; 11 | // 1MB heap 12 | mmtk_init(MB); 13 | mmtk_initialize_collection(VMThread::UNINITIALIZED); 14 | let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 15 | // Allocate 1MB. It should be fine. 16 | let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); 17 | assert!(!addr.is_zero()); 18 | // Disable GC. So we can keep allocate without triggering a GC. 19 | mmtk_disable_collection(); 20 | let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); 21 | assert!(!addr.is_zero()); 22 | // Enable GC again. When we allocate, we should see a GC triggered immediately. 23 | mmtk_enable_collection(); 24 | let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); 25 | assert!(!addr.is_zero()); 26 | } 27 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::AllocationSemantics; 4 | 5 | /// This test allocates without calling initialize_collection(). When we exceed the heap limit, a GC should be triggered by MMTk. 6 | /// But as we haven't enabled collection, GC is not initialized, so MMTk will panic. 7 | #[test] 8 | #[should_panic(expected = "GC is not allowed here")] 9 | pub fn allocate_without_initialize_collection() { 10 | const MB: usize = 1024 * 1024; 11 | // 1MB heap 12 | mmtk_init(MB); 13 | let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 14 | // Attempt to allocate 2MB memory. This should trigger a GC, but as we never call initialize_collection(), we cannot do GC. 15 | let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default); 16 | assert!(!addr.is_zero()); 17 | } 18 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs: -------------------------------------------------------------------------------- 1 | // GITHUB-CI: MMTK_PLAN=GenImmix 2 | // GITHUB-CI: FEATURES=vo_bit,extreme_assertions 3 | 4 | // Run the test with any plan that uses object barrier, and we also need both VO bit and extreme assertions. 5 | 6 | use crate::object_model::OBJECT_REF_OFFSET; 7 | use crate::{api::*, edges}; 8 | use crate::tests::fixtures::MMTKSingleton; 9 | use crate::tests::fixtures::FixtureContent; 10 | use atomic::Atomic; 11 | use mmtk::util::{Address, ObjectReference}; 12 | use mmtk::util::{VMThread, VMMutatorThread}; 13 | use mmtk::vm::edge_shape::SimpleEdge; 14 | use mmtk::AllocationSemantics; 15 | 16 | lazy_static! { 17 | static ref MMTK_SINGLETON: MMTKSingleton = MMTKSingleton::create(); 18 | } 19 | 20 | #[test] 21 | #[should_panic(expected = "object bit is unset")] 22 | fn test_assertion_barrier_invalid_ref() { 23 | let mutator = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 24 | 25 | // Allocate 26 | let size = 24; 27 | let addr = mmtk_alloc(mutator, size, 8, 0, AllocationSemantics::Default); 28 | let objref: ObjectReference = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); 29 | mmtk_post_alloc(mutator, objref, size, AllocationSemantics::Default); 30 | // Create an edge 31 | let mut slot: Atomic = Atomic::new(objref); 32 | let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); 33 | // Create an invalid object reference (offset 8 bytes on the original object ref), and invoke barrier slowpath with it 34 | // The invalid object ref has no VO bit, and the assertion should fail. 35 | let invalid_objref = ObjectReference::from_raw_address(objref.to_raw_address() + 8usize); 36 | unsafe { 37 | let mu = &mut *mutator; 38 | mu.barrier 39 | .object_reference_write_slow(invalid_objref, edges::DummyVMEdge::Simple(edge), objref); 40 | } 41 | } 42 | 43 | #[test] 44 | fn test_assertion_barrier_valid_ref() { 45 | let mutator = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 46 | 47 | // Allocate 48 | let size = 24; 49 | let addr = mmtk_alloc(mutator, size, 8, 0, AllocationSemantics::Default); 50 | let objref: ObjectReference = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); 51 | mmtk_post_alloc(mutator, objref, size, AllocationSemantics::Default); 52 | // Create an edge 53 | let mut slot: Atomic = Atomic::new(objref); 54 | let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); 55 | // Invoke barrier slowpath with the valid object ref 56 | unsafe { 57 | let mu = &mut *mutator; 58 | mu.barrier 59 | .object_reference_write_slow(objref, edges::DummyVMEdge::Simple(edge), objref); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/handle_mmap_conflict.rs: -------------------------------------------------------------------------------- 1 | use mmtk::util::Address; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::util::memory; 4 | use crate::DummyVM; 5 | 6 | #[test] 7 | pub fn test_handle_mmap_conflict() { 8 | let start = unsafe { Address::from_usize(0x100_0000 )}; 9 | let one_megabyte = 1000000; 10 | let mmap1_res = memory::dzmmap_noreplace(start, one_megabyte); 11 | assert!(mmap1_res.is_ok()); 12 | 13 | let panic_res = std::panic::catch_unwind(|| { 14 | let mmap2_res = memory::dzmmap_noreplace(start, one_megabyte); 15 | assert!(mmap2_res.is_err()); 16 | memory::handle_mmap_error::(mmap2_res.err().unwrap(), VMThread::UNINITIALIZED); 17 | }); 18 | 19 | // The error should match the error message in memory::handle_mmap_error() 20 | assert!(panic_res.is_err()); 21 | let err = panic_res.err().unwrap(); 22 | assert!(err.is::<&str>()); 23 | assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"); 24 | } -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/handle_mmap_oom.rs: -------------------------------------------------------------------------------- 1 | use mmtk::util::Address; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::util::memory; 4 | use crate::DummyVM; 5 | 6 | #[cfg(target_pointer_width = "32")] 7 | const LARGE_SIZE: usize = 4_294_967_295; 8 | #[cfg(target_pointer_width = "64")] 9 | const LARGE_SIZE: usize = 1_000_000_000_000; 10 | 11 | #[test] 12 | pub fn test_handle_mmap_oom() { 13 | let panic_res = std::panic::catch_unwind(move || { 14 | let start = unsafe { Address::from_usize(0x100_0000 )}; 15 | // mmap 1 terabyte memory - we expect this will fail due to out of memory. 16 | // If that's not the case, increase the size we mmap. 17 | let mmap_res = memory::dzmmap_noreplace(start, LARGE_SIZE); 18 | 19 | memory::handle_mmap_error::(mmap_res.err().unwrap(), VMThread::UNINITIALIZED); 20 | }); 21 | assert!(panic_res.is_err()); 22 | 23 | // The error should match the default implementation of Collection::out_of_memory() 24 | let err = panic_res.err().unwrap(); 25 | assert!(err.is::()); 26 | assert_eq!(err.downcast_ref::().unwrap(), &"Out of memory with MmapOutOfMemory!"); 27 | } 28 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs: -------------------------------------------------------------------------------- 1 | // GITHUB-CI: MMTK_PLAN=all 2 | 3 | use crate::tests::fixtures::{Fixture, SingleObject}; 4 | use crate::api::mmtk_is_in_mmtk_spaces as is_in_mmtk_spaces; 5 | use mmtk::util::*; 6 | 7 | lazy_static! { 8 | static ref SINGLE_OBJECT: Fixture = Fixture::new(); 9 | } 10 | 11 | #[test] 12 | pub fn null() { 13 | SINGLE_OBJECT.with_fixture(|_fixture| { 14 | assert!( 15 | !is_in_mmtk_spaces(ObjectReference::NULL), 16 | "NULL pointer should not be in any MMTk spaces." 17 | ); 18 | }); 19 | } 20 | 21 | #[test] 22 | pub fn max() { 23 | SINGLE_OBJECT.with_fixture(|_fixture| { 24 | assert!( 25 | !is_in_mmtk_spaces(ObjectReference::from_raw_address(Address::MAX)), 26 | "Address::MAX should not be in any MMTk spaces." 27 | ); 28 | }); 29 | } 30 | 31 | #[test] 32 | pub fn direct_hit() { 33 | SINGLE_OBJECT.with_fixture(|fixture| { 34 | assert!( 35 | is_in_mmtk_spaces(fixture.objref), 36 | "The address of the allocated object should be in the space" 37 | ); 38 | }); 39 | } 40 | 41 | #[test] 42 | pub fn large_offsets_aligned() { 43 | SINGLE_OBJECT.with_fixture(|fixture| { 44 | for log_offset in 12usize..(usize::BITS as usize) { 45 | let offset = 1usize << log_offset; 46 | let addr = match fixture.objref.to_raw_address().as_usize().checked_add(offset) { 47 | Some(n) => unsafe { Address::from_usize(n) }, 48 | None => break, 49 | }; 50 | // It's just a smoke test. It is hard to predict if the addr is still in any space, 51 | // but it must not crash. 52 | let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); 53 | } 54 | }); 55 | } 56 | 57 | #[test] 58 | pub fn negative_offsets() { 59 | SINGLE_OBJECT.with_fixture(|fixture| { 60 | for log_offset in 1usize..(usize::BITS as usize) { 61 | let offset = 1usize << log_offset; 62 | let addr = match fixture.objref.to_raw_address().as_usize().checked_sub(offset) { 63 | Some(n) => unsafe { Address::from_usize(n) }, 64 | None => break, 65 | }; 66 | // It's just a smoke test. It is hard to predict if the addr is still in any space, 67 | // but it must not crash. 68 | let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); 69 | } 70 | }); 71 | } 72 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/issue139.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | use mmtk::util::opaque_pointer::*; 3 | use mmtk::AllocationSemantics; 4 | 5 | #[test] 6 | pub fn issue139_alloc_non_multiple_of_min_alignment() { 7 | mmtk_init(200*1024*1024); 8 | let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); 9 | 10 | // Allocate 6 bytes with 8 bytes ailgnment required 11 | let addr = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default); 12 | assert!(addr.is_aligned_to(8)); 13 | // After the allocation, the cursor is not MIN_ALIGNMENT aligned. If we have the assertion in the next allocation to check if the cursor is aligned to MIN_ALIGNMENT, it fails. 14 | // We have to remove that assertion. 15 | let addr2 = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default); 16 | assert!(addr2.is_aligned_to(8)); 17 | } 18 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/malloc_api.rs: -------------------------------------------------------------------------------- 1 | use crate::api::*; 2 | 3 | #[test] 4 | pub fn malloc_free() { 5 | let res = mmtk_malloc(8); 6 | assert!(!res.is_zero()); 7 | mmtk_free(res); 8 | } 9 | 10 | #[test] 11 | pub fn calloc_free() { 12 | let res = mmtk_calloc(1, 8); 13 | assert!(!res.is_zero()); 14 | mmtk_free(res); 15 | } 16 | 17 | #[test] 18 | pub fn realloc_free() { 19 | let res1 = mmtk_malloc(8); 20 | assert!(!res1.is_zero()); 21 | let res2 = mmtk_realloc(res1, 16); 22 | assert!(!res2.is_zero()); 23 | mmtk_free(res2); 24 | } 25 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/malloc_counted.rs: -------------------------------------------------------------------------------- 1 | // GITHUB-CI: FEATURES=malloc_counted_size 2 | 3 | use crate::tests::fixtures::{SerialFixture, MMTKSingleton}; 4 | use crate::api::*; 5 | 6 | lazy_static! { 7 | static ref MMTK_SINGLETON: SerialFixture = SerialFixture::new(); 8 | } 9 | 10 | #[test] 11 | pub fn malloc_free() { 12 | MMTK_SINGLETON.with_fixture(|fixture| { 13 | let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); 14 | 15 | let res = mmtk_counted_malloc(8); 16 | assert!(!res.is_zero()); 17 | let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 18 | assert_eq!(bytes_before + 8, bytes_after_alloc); 19 | 20 | mmtk_free_with_size(res, 8); 21 | let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); 22 | assert_eq!(bytes_before, bytes_after_free); 23 | }); 24 | } 25 | 26 | #[test] 27 | pub fn calloc_free() { 28 | MMTK_SINGLETON.with_fixture(|fixture| { 29 | let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); 30 | 31 | let res = mmtk_counted_calloc(1, 8); 32 | assert!(!res.is_zero()); 33 | let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 34 | assert_eq!(bytes_before + 8, bytes_after_alloc); 35 | 36 | mmtk_free_with_size(res, 8); 37 | let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); 38 | assert_eq!(bytes_before, bytes_after_free); 39 | }); 40 | } 41 | 42 | #[test] 43 | pub fn realloc_grow() { 44 | MMTK_SINGLETON.with_fixture(|fixture| { 45 | let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); 46 | 47 | let res1 = mmtk_counted_malloc(8); 48 | assert!(!res1.is_zero()); 49 | let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 50 | assert_eq!(bytes_before + 8, bytes_after_alloc); 51 | 52 | // grow to 16 bytes 53 | let res2 = mmtk_realloc_with_old_size(res1, 16, 8); 54 | assert!(!res2.is_zero()); 55 | let bytes_after_realloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 56 | assert_eq!(bytes_before + 16, bytes_after_realloc); 57 | 58 | mmtk_free_with_size(res2, 16); 59 | let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); 60 | assert_eq!(bytes_before, bytes_after_free); 61 | }); 62 | } 63 | 64 | #[test] 65 | pub fn realloc_shrink() { 66 | MMTK_SINGLETON.with_fixture(|fixture| { 67 | let bytes_before = fixture.mmtk.get_plan().base().get_malloc_bytes(); 68 | 69 | let res1 = mmtk_counted_malloc(16); 70 | assert!(!res1.is_zero()); 71 | let bytes_after_alloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 72 | assert_eq!(bytes_before + 16, bytes_after_alloc); 73 | 74 | // shrink to 8 bytes 75 | let res2 = mmtk_realloc_with_old_size(res1, 8, 16); 76 | assert!(!res2.is_zero()); 77 | let bytes_after_realloc = fixture.mmtk.get_plan().base().get_malloc_bytes(); 78 | assert_eq!(bytes_before + 8, bytes_after_realloc); 79 | 80 | mmtk_free_with_size(res2, 8); 81 | let bytes_after_free = fixture.mmtk.get_plan().base().get_malloc_bytes(); 82 | assert_eq!(bytes_before, bytes_after_free); 83 | }); 84 | } 85 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/malloc_ms.rs: -------------------------------------------------------------------------------- 1 | use mmtk::util::malloc::malloc_ms_util; 2 | use crate::DummyVM; 3 | 4 | #[test] 5 | fn test_malloc() { 6 | let (address1, bool1) = malloc_ms_util::alloc::(16, 8, 0); 7 | let (address2, bool2) = malloc_ms_util::alloc::(16, 32, 0); 8 | let (address3, bool3) = malloc_ms_util::alloc::(16, 8, 4); 9 | let (address4, bool4) = malloc_ms_util::alloc::(32, 64, 4); 10 | 11 | assert!(address1.is_aligned_to(8)); 12 | assert!(address2.is_aligned_to(32)); 13 | assert!((address3 + 4 as isize).is_aligned_to(8)); 14 | assert!((address4 + 4 as isize).is_aligned_to(64)); 15 | 16 | assert!(!bool1); 17 | #[cfg(feature = "malloc_hoard")] 18 | assert!(bool2); 19 | #[cfg(not(feature = "malloc_hoard"))] 20 | assert!(!bool2); 21 | assert!(bool3); 22 | assert!(bool4); 23 | 24 | assert!(malloc_ms_util::get_malloc_usable_size(address1, bool1) >= 16); 25 | assert!(malloc_ms_util::get_malloc_usable_size(address2, bool2) >= 16); 26 | assert!(malloc_ms_util::get_malloc_usable_size(address3, bool3) >= 16); 27 | assert!(malloc_ms_util::get_malloc_usable_size(address4, bool4) >= 32); 28 | 29 | unsafe { malloc_ms_util::free(address1.to_mut_ptr()); } 30 | #[cfg(feature = "malloc_hoard")] 31 | malloc_ms_util::offset_free(address2); 32 | #[cfg(not(feature = "malloc_hoard"))] 33 | unsafe { malloc_ms_util::free(address2.to_mut_ptr()); } 34 | malloc_ms_util::offset_free(address3); 35 | malloc_ms_util::offset_free(address4); 36 | } 37 | -------------------------------------------------------------------------------- /vmbindings/dummyvm/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | // NOTE: Since the dummyvm uses a global MMTK instance, 2 | // it will panic if MMTK initialized more than once per process. 3 | // We run each of the following modules in a separate test process. 4 | // 5 | // One way to avoid re-initialization is to have only one #[test] per module. 6 | // There are also helpers for creating fixtures in `fixture/mod.rs`. 7 | mod issue139; 8 | mod handle_mmap_oom; 9 | #[cfg(target_os = "linux")] 10 | mod handle_mmap_conflict; 11 | mod allocate_align_offset; 12 | mod allocate_without_initialize_collection; 13 | mod allocate_with_initialize_collection; 14 | mod allocate_with_disable_collection; 15 | mod allocate_with_re_enable_collection; 16 | #[cfg(not(feature = "malloc_counted_size"))] 17 | mod malloc_api; 18 | #[cfg(feature = "malloc_counted_size")] 19 | mod malloc_counted; 20 | mod malloc_ms; 21 | #[cfg(feature = "is_mmtk_object")] 22 | mod conservatism; 23 | mod is_in_mmtk_spaces; 24 | mod fixtures; 25 | mod edges_test; 26 | mod barrier_slow_path_assertion; 27 | --------------------------------------------------------------------------------