├── .github ├── DOCS.md ├── codecov.yml ├── dependabot.yml └── workflows │ ├── check.yml │ ├── safety.yml │ ├── scheduled.yml │ └── test.yml ├── .gitignore ├── .neoconf.json ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── calc.rs └── fibbo.rs ├── examples ├── async.rs ├── calculator.rs ├── heavy_fibbo.rs └── heavy_fibbo_base.rs └── src ├── allocator.rs ├── defer.rs ├── lib.rs ├── ptr.rs ├── stack ├── future.rs ├── mod.rs ├── runner.rs ├── stk.rs └── test.rs ├── stub_waker.rs ├── test.rs ├── tree ├── future.rs ├── mod.rs ├── runner.rs ├── schedular │ ├── atomic_waker.rs │ ├── mod.rs │ ├── queue.rs │ └── waker.rs ├── stk.rs └── test.rs └── vtable.rs /.github/DOCS.md: -------------------------------------------------------------------------------- 1 | # Github config and workflows 2 | 3 | In this folder there is configuration for codecoverage, dependabot, and ci 4 | workflows that check the library more deeply than the default configurations. 5 | 6 | This folder can be or was merged using a --allow-unrelated-histories merge 7 | strategy from which provides a 8 | reasonably sensible base for writing your own ci on. By using this strategy 9 | the history of the CI repo is included in your repo, and future updates to 10 | the CI can be merged later. 11 | 12 | To perform this merge run: 13 | 14 | ```shell 15 | git remote add ci https://github.com/jonhoo/rust-ci-conf.git 16 | git fetch ci 17 | git merge --allow-unrelated-histories ci/main 18 | ``` 19 | 20 | An overview of the files in this project is available at: 21 | , which contains some 22 | rationale for decisions and runs through an example of solving minimal version 23 | and OpenSSL issues. 24 | -------------------------------------------------------------------------------- /.github/codecov.yml: -------------------------------------------------------------------------------- 1 | # ref: https://docs.codecov.com/docs/codecovyml-reference 2 | coverage: 3 | # Hold ourselves to a high bar 4 | range: 85..100 5 | round: down 6 | precision: 1 7 | status: 8 | # ref: https://docs.codecov.com/docs/commit-status 9 | project: 10 | default: 11 | # Avoid false negatives 12 | threshold: 1% 13 | 14 | # Test files aren't important for coverage 15 | ignore: 16 | - "tests" 17 | 18 | # Make comments less noisy 19 | comment: 20 | layout: "files" 21 | require_changes: true 22 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: daily 7 | - package-ecosystem: cargo 8 | directory: / 9 | schedule: 10 | interval: daily 11 | ignore: 12 | - dependency-name: "*" 13 | # patch and minor updates don't matter for libraries as consumers of this library build 14 | # with their own lockfile, rather than the version specified in this library's lockfile 15 | # remove this ignore rule if your package has binaries to ensure that the binaries are 16 | # built with the exact set of dependencies and those are up to date. 17 | update-types: 18 | - "version-update:semver-patch" 19 | - "version-update:semver-minor" 20 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs 2 | # several checks: 3 | # - fmt: checks that the code is formatted according to rustfmt 4 | # - clippy: checks that the code does not contain any clippy warnings 5 | # - doc: checks that the code can be documented without errors 6 | # - hack: check combinations of feature flags 7 | # - msrv: check that the msrv specified in the crate is correct 8 | permissions: 9 | contents: read 10 | # This configuration allows maintainers of this repo to create a branch and pull request based on 11 | # the new branch. Restricting the push trigger to the main branch ensures that the PR only gets 12 | # built once. 13 | on: 14 | push: 15 | branches: [main] 16 | pull_request: 17 | # If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that 18 | # we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5 19 | concurrency: 20 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 21 | cancel-in-progress: true 22 | name: check 23 | jobs: 24 | fmt: 25 | runs-on: ubuntu-latest 26 | name: stable / fmt 27 | steps: 28 | - uses: actions/checkout@v4 29 | with: 30 | submodules: true 31 | - name: Install stable 32 | uses: dtolnay/rust-toolchain@stable 33 | with: 34 | components: rustfmt 35 | - name: cargo fmt --check 36 | run: cargo fmt --check 37 | clippy: 38 | runs-on: ubuntu-latest 39 | name: ${{ matrix.toolchain }} / clippy 40 | permissions: 41 | contents: read 42 | checks: write 43 | strategy: 44 | fail-fast: false 45 | matrix: 46 | # Get early warning of new lints which are regularly introduced in beta channels. 47 | toolchain: [stable, beta] 48 | steps: 49 | - uses: actions/checkout@v4 50 | with: 51 | submodules: true 52 | - name: Install ${{ matrix.toolchain }} 53 | uses: dtolnay/rust-toolchain@master 54 | with: 55 | toolchain: ${{ matrix.toolchain }} 56 | components: clippy 57 | - name: cargo clippy 58 | uses: giraffate/clippy-action@v1 59 | with: 60 | reporter: 'github-pr-check' 61 | github_token: ${{ secrets.GITHUB_TOKEN }} 62 | doc: 63 | # run docs generation on nightly rather than stable. This enables features like 64 | # https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an 65 | # API be documented as only available in some specific platforms. 66 | runs-on: ubuntu-latest 67 | name: nightly / doc 68 | steps: 69 | - uses: actions/checkout@v4 70 | with: 71 | submodules: true 72 | - name: Install nightly 73 | uses: dtolnay/rust-toolchain@nightly 74 | - name: cargo doc 75 | run: cargo doc --no-deps --all-features 76 | env: 77 | RUSTDOCFLAGS: --cfg docsrs 78 | # hack: 79 | # # cargo-hack checks combinations of feature flags to ensure that features are all additive 80 | # # which is required for feature unification 81 | # runs-on: ubuntu-latest 82 | # name: ubuntu / stable / features 83 | # steps: 84 | # - uses: actions/checkout@v4 85 | # with: 86 | # submodules: true 87 | # - name: Install stable 88 | # uses: dtolnay/rust-toolchain@stable 89 | # - name: cargo install cargo-hack 90 | # uses: taiki-e/install-action@cargo-hack 91 | # # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 92 | # # --feature-powerset runs for every combination of features 93 | # - name: cargo hack 94 | # run: cargo hack --feature-powerset check 95 | msrv: 96 | # check that we can build using the minimal rust version that is specified by this crate 97 | runs-on: ubuntu-latest 98 | # we use a matrix here just because env can't be used in job names 99 | # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability 100 | strategy: 101 | matrix: 102 | msrv: ["1.84"] # strict/exposed_provinece were stablized in 1.84 103 | name: ubuntu / ${{ matrix.msrv }} 104 | steps: 105 | - uses: actions/checkout@v4 106 | with: 107 | submodules: true 108 | - name: Install ${{ matrix.msrv }} 109 | uses: dtolnay/rust-toolchain@master 110 | with: 111 | toolchain: ${{ matrix.msrv }} 112 | - name: cargo +${{ matrix.msrv }} check 113 | run: cargo check 114 | -------------------------------------------------------------------------------- /.github/workflows/safety.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs checks for unsafe code. In crates that don't have any unsafe code, this can be 2 | # removed. Runs: 3 | # - miri - detects undefined behavior and memory leaks 4 | # - address sanitizer - detects memory errors 5 | # - leak sanitizer - detects memory leaks 6 | # - loom - Permutation testing for concurrent code https://crates.io/crates/loom 7 | # See check.yml for information about how the concurrency cancellation and workflow triggering works 8 | permissions: 9 | contents: read 10 | on: 11 | push: 12 | branches: [main] 13 | pull_request: 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 16 | cancel-in-progress: true 17 | name: safety 18 | jobs: 19 | sanitizers: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | with: 24 | submodules: true 25 | - name: Install nightly 26 | uses: dtolnay/rust-toolchain@nightly 27 | - run: | 28 | # to get the symbolizer for debug symbol resolution 29 | sudo apt install llvm 30 | # to fix buggy leak analyzer: 31 | # https://github.com/japaric/rust-san#unrealiable-leaksanitizer 32 | # ensure there's a profile.dev section 33 | if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then 34 | echo >> Cargo.toml 35 | echo '[profile.dev]' >> Cargo.toml 36 | fi 37 | # remove pre-existing opt-levels in profile.dev 38 | sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml 39 | # now set opt-level to 1 40 | sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml 41 | cat Cargo.toml 42 | name: Enable debug symbols 43 | - name: Install rust-src 44 | run: | 45 | rustup component add rust-src 46 | - name: cargo test -Zsanitizer=address 47 | # only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945 48 | run: cargo test -Zbuild-std --all-features --target x86_64-unknown-linux-gnu -- --skip forget 49 | env: 50 | RUSTFLAGS: "-Z sanitizer=address" 51 | # - name: cargo test -Zsanitizer=leak 52 | # if: always() 53 | # run: cargo test --all-features --target x86_64-unknown-linux-gnu 54 | # env: 55 | # LSAN_OPTIONS: "suppressions=lsan-suppressions.txt" 56 | # RUSTFLAGS: "-Z sanitizer=leak" 57 | miri: 58 | runs-on: ubuntu-latest 59 | steps: 60 | - uses: actions/checkout@v4 61 | with: 62 | submodules: true 63 | - run: | 64 | echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> $GITHUB_ENV 65 | - name: Install ${{ env.NIGHTLY }} 66 | uses: dtolnay/rust-toolchain@master 67 | with: 68 | toolchain: ${{ env.NIGHTLY }} 69 | components: miri 70 | - name: cargo miri test 71 | run: cargo miri test --all-features 72 | env: 73 | MIRIFLAGS: "" 74 | 75 | 76 | # loom: 77 | # runs-on: ubuntu-latest 78 | # steps: 79 | # - uses: actions/checkout@v4 80 | # with: 81 | # submodules: true 82 | # - name: Install stable 83 | # uses: dtolnay/rust-toolchain@stable 84 | # - name: cargo test --test loom 85 | # run: cargo test --release --test loom 86 | # env: 87 | # LOOM_MAX_PREEMPTIONS: 2 88 | # RUSTFLAGS: "--cfg loom" 89 | -------------------------------------------------------------------------------- /.github/workflows/scheduled.yml: -------------------------------------------------------------------------------- 1 | # Run scheduled (rolling) jobs on a nightly basis, as your crate may break independently of any 2 | # given PR. E.g., updates to rust nightly and updates to this crates dependencies. See check.yml for 3 | # information about how the concurrency cancellation and workflow triggering works 4 | permissions: 5 | contents: read 6 | on: 7 | push: 8 | branches: [main] 9 | pull_request: 10 | schedule: 11 | - cron: '7 7 * * *' 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 14 | cancel-in-progress: true 15 | name: rolling 16 | jobs: 17 | # https://twitter.com/mycoliza/status/1571295690063753218 18 | nightly: 19 | runs-on: ubuntu-latest 20 | name: ubuntu / nightly 21 | steps: 22 | - uses: actions/checkout@v4 23 | with: 24 | submodules: true 25 | - name: Install nightly 26 | uses: dtolnay/rust-toolchain@nightly 27 | - name: cargo generate-lockfile 28 | if: hashFiles('Cargo.lock') == '' 29 | run: cargo generate-lockfile 30 | - name: cargo test --locked 31 | run: cargo test --locked --all-features --all-targets 32 | # https://twitter.com/alcuadrado/status/1571291687837732873 33 | update: 34 | # This action checks that updating the dependencies of this crate to the latest available that 35 | # satisfy the versions in Cargo.toml does not break this crate. This is important as consumers 36 | # of this crate will generally use the latest available crates. This is subject to the standard 37 | # Cargo semver rules (i.e cargo does not update to a new major version unless explicitly told 38 | # to). 39 | runs-on: ubuntu-latest 40 | name: ubuntu / beta / updated 41 | # There's no point running this if no Cargo.lock was checked in in the first place, since we'd 42 | # just redo what happened in the regular test job. Unfortunately, hashFiles only works in if on 43 | # steps, so we repeat it. 44 | steps: 45 | - uses: actions/checkout@v4 46 | with: 47 | submodules: true 48 | - name: Install beta 49 | if: hashFiles('Cargo.lock') != '' 50 | uses: dtolnay/rust-toolchain@beta 51 | - name: cargo update 52 | if: hashFiles('Cargo.lock') != '' 53 | run: cargo update 54 | - name: cargo test 55 | if: hashFiles('Cargo.lock') != '' 56 | run: cargo test --locked --features tree --all-targets 57 | env: 58 | RUSTFLAGS: -D deprecated 59 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # This is the main CI workflow that runs the test suite on all pushes to main and all pull requests. 2 | # It runs the following jobs: 3 | # - required: runs the test suite on ubuntu with stable and beta rust toolchains 4 | # - minimal: runs the test suite with the minimal versions of the dependencies that satisfy the 5 | # requirements of this crate, and its dependencies 6 | # - os-check: runs the test suite on mac and windows 7 | # - coverage: runs the test suite and collects coverage information 8 | # See check.yml for information about how the concurrency cancellation and workflow triggering works 9 | permissions: 10 | contents: read 11 | on: 12 | push: 13 | branches: [main] 14 | pull_request: 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 17 | cancel-in-progress: true 18 | name: test 19 | jobs: 20 | required: 21 | runs-on: ubuntu-latest 22 | name: ubuntu / ${{ matrix.toolchain }} 23 | strategy: 24 | matrix: 25 | # run on stable and beta to ensure that tests won't break on the next version of the rust 26 | # toolchain 27 | # toolchain: [stable, beta] 28 | toolchain: [beta] 29 | steps: 30 | - uses: actions/checkout@v4 31 | with: 32 | submodules: true 33 | - name: Install ${{ matrix.toolchain }} 34 | uses: dtolnay/rust-toolchain@master 35 | with: 36 | toolchain: ${{ matrix.toolchain }} 37 | - name: cargo generate-lockfile 38 | # enable this ci template to run regardless of whether the lockfile is checked in or not 39 | if: hashFiles('Cargo.lock') == '' 40 | run: cargo generate-lockfile 41 | # https://twitter.com/jonhoo/status/1571290371124260865 42 | - name: cargo test --locked 43 | run: cargo test --locked --features tree --all-targets 44 | # https://github.com/rust-lang/cargo/issues/6669 45 | - name: cargo test --doc 46 | run: cargo test --locked --features tree --doc 47 | minimal: 48 | # This action chooses the oldest version of the dependencies permitted by Cargo.toml to ensure 49 | # that this crate is compatible with the minimal version that this crate and its dependencies 50 | # require. This will pickup issues where this create relies on functionality that was introduced 51 | # later than the actual version specified (e.g., when we choose just a major version, but a 52 | # method was added after this version). 53 | # 54 | # This particular check can be difficult to get to succeed as often transitive dependencies may 55 | # be incorrectly specified (e.g., a dependency specifies 1.0 but really requires 1.1.5). There 56 | # is an alternative flag available -Zdirect-minimal-versions that uses the minimal versions for 57 | # direct dependencies of this crate, while selecting the maximal versions for the transitive 58 | # dependencies. Alternatively, you can add a line in your Cargo.toml to artificially increase 59 | # the minimal dependency, which you do with e.g.: 60 | # ```toml 61 | # # for minimal-versions 62 | # [target.'cfg(any())'.dependencies] 63 | # openssl = { version = "0.10.55", optional = true } # needed to allow foo to build with -Zminimal-versions 64 | # ``` 65 | # The optional = true is necessary in case that dependency isn't otherwise transitively required 66 | # by your library, and the target bit is so that this dependency edge never actually affects 67 | # Cargo build order. See also 68 | # https://github.com/jonhoo/fantoccini/blob/fde336472b712bc7ebf5b4e772023a7ba71b2262/Cargo.toml#L47-L49. 69 | # This action is run on ubuntu with the stable toolchain, as it is not expected to fail 70 | runs-on: ubuntu-latest 71 | name: ubuntu / stable / minimal-versions 72 | steps: 73 | - uses: actions/checkout@v4 74 | with: 75 | submodules: true 76 | - name: Install stable 77 | uses: dtolnay/rust-toolchain@stable 78 | - name: Install nightly for -Zminimal-versions 79 | uses: dtolnay/rust-toolchain@nightly 80 | - name: rustup default stable 81 | run: rustup default stable 82 | - name: cargo update -Zminimal-versions 83 | run: cargo +nightly update -Zminimal-versions 84 | - name: cargo test 85 | run: cargo test --locked --features tree --all-targets 86 | os-check: 87 | # run cargo test on mac and windows 88 | runs-on: ${{ matrix.os }} 89 | name: ${{ matrix.os }} / stable 90 | strategy: 91 | fail-fast: false 92 | matrix: 93 | os: [macos-latest, windows-latest] 94 | steps: 95 | # if your project needs OpenSSL, uncomment this to fix Windows builds. 96 | # it's commented out by default as the install command takes 5-10m. 97 | # - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append 98 | # if: runner.os == 'Windows' 99 | # - run: vcpkg install openssl:x64-windows-static-md 100 | # if: runner.os == 'Windows' 101 | - uses: actions/checkout@v4 102 | with: 103 | submodules: true 104 | - name: Install stable 105 | uses: dtolnay/rust-toolchain@stable 106 | - name: cargo generate-lockfile 107 | if: hashFiles('Cargo.lock') == '' 108 | run: cargo generate-lockfile 109 | - name: cargo test 110 | run: cargo test --locked --features tree --all-targets 111 | coverage: 112 | # use llvm-cov to build and collect coverage and outputs in a format that 113 | # is compatible with codecov.io 114 | # 115 | # note that codecov as of v4 requires that CODECOV_TOKEN from 116 | # 117 | # https://app.codecov.io/gh///settings 118 | # 119 | # is set in two places on your repo: 120 | # 121 | # - https://github.com/jonhoo/guardian/settings/secrets/actions 122 | # - https://github.com/jonhoo/guardian/settings/secrets/dependabot 123 | # 124 | # (the former is needed for codecov uploads to work with Dependabot PRs) 125 | # 126 | # PRs coming from forks of your repo will not have access to the token, but 127 | # for those, codecov allows uploading coverage reports without a token. 128 | # it's all a little weird and inconvenient. see 129 | # 130 | # https://github.com/codecov/feedback/issues/112 131 | # 132 | # for lots of more discussion 133 | runs-on: ubuntu-latest 134 | name: ubuntu / stable / coverage 135 | steps: 136 | - uses: actions/checkout@v4 137 | with: 138 | submodules: true 139 | - name: Install stable 140 | uses: dtolnay/rust-toolchain@stable 141 | with: 142 | components: llvm-tools-preview 143 | - name: cargo install cargo-llvm-cov 144 | uses: taiki-e/install-action@cargo-llvm-cov 145 | - name: cargo generate-lockfile 146 | if: hashFiles('Cargo.lock') == '' 147 | run: cargo generate-lockfile 148 | - name: cargo llvm-cov 149 | run: cargo llvm-cov --locked --features tree --lcov --output-path lcov.info 150 | - name: Record Rust version 151 | run: echo "RUST=$(rustc --version)" >> "$GITHUB_ENV" 152 | - name: Upload to codecov.io 153 | uses: codecov/codecov-action@v5 154 | with: 155 | fail_ci_if_error: true 156 | token: ${{ secrets.CODECOV_TOKEN }} 157 | env_vars: OS,RUST 158 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /.neoconf.json: -------------------------------------------------------------------------------- 1 | { 2 | "lspconfig": { 3 | "rust_analyzer": { 4 | "rust-analyzer.cargo.features": [ 5 | "tree" 6 | ], 7 | "rust-analyzer.check.command": "clippy" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "addr2line" 7 | version = "0.24.2" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" 10 | dependencies = [ 11 | "gimli", 12 | ] 13 | 14 | [[package]] 15 | name = "adler2" 16 | version = "2.0.0" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" 19 | 20 | [[package]] 21 | name = "aho-corasick" 22 | version = "1.1.3" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" 25 | dependencies = [ 26 | "memchr", 27 | ] 28 | 29 | [[package]] 30 | name = "anes" 31 | version = "0.1.6" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" 34 | 35 | [[package]] 36 | name = "anstyle" 37 | version = "1.0.10" 38 | source = "registry+https://github.com/rust-lang/crates.io-index" 39 | checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" 40 | 41 | [[package]] 42 | name = "autocfg" 43 | version = "1.4.0" 44 | source = "registry+https://github.com/rust-lang/crates.io-index" 45 | checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" 46 | 47 | [[package]] 48 | name = "backtrace" 49 | version = "0.3.74" 50 | source = "registry+https://github.com/rust-lang/crates.io-index" 51 | checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" 52 | dependencies = [ 53 | "addr2line", 54 | "cfg-if", 55 | "libc", 56 | "miniz_oxide", 57 | "object", 58 | "rustc-demangle", 59 | "windows-targets", 60 | ] 61 | 62 | [[package]] 63 | name = "bitflags" 64 | version = "2.9.0" 65 | source = "registry+https://github.com/rust-lang/crates.io-index" 66 | checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" 67 | 68 | [[package]] 69 | name = "bumpalo" 70 | version = "3.17.0" 71 | source = "registry+https://github.com/rust-lang/crates.io-index" 72 | checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" 73 | 74 | [[package]] 75 | name = "bytes" 76 | version = "1.10.1" 77 | source = "registry+https://github.com/rust-lang/crates.io-index" 78 | checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" 79 | 80 | [[package]] 81 | name = "cast" 82 | version = "0.3.0" 83 | source = "registry+https://github.com/rust-lang/crates.io-index" 84 | checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" 85 | 86 | [[package]] 87 | name = "cfg-if" 88 | version = "1.0.0" 89 | source = "registry+https://github.com/rust-lang/crates.io-index" 90 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 91 | 92 | [[package]] 93 | name = "ciborium" 94 | version = "0.2.2" 95 | source = "registry+https://github.com/rust-lang/crates.io-index" 96 | checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" 97 | dependencies = [ 98 | "ciborium-io", 99 | "ciborium-ll", 100 | "serde", 101 | ] 102 | 103 | [[package]] 104 | name = "ciborium-io" 105 | version = "0.2.2" 106 | source = "registry+https://github.com/rust-lang/crates.io-index" 107 | checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" 108 | 109 | [[package]] 110 | name = "ciborium-ll" 111 | version = "0.2.2" 112 | source = "registry+https://github.com/rust-lang/crates.io-index" 113 | checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" 114 | dependencies = [ 115 | "ciborium-io", 116 | "half", 117 | ] 118 | 119 | [[package]] 120 | name = "clap" 121 | version = "4.5.35" 122 | source = "registry+https://github.com/rust-lang/crates.io-index" 123 | checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" 124 | dependencies = [ 125 | "clap_builder", 126 | ] 127 | 128 | [[package]] 129 | name = "clap_builder" 130 | version = "4.5.35" 131 | source = "registry+https://github.com/rust-lang/crates.io-index" 132 | checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" 133 | dependencies = [ 134 | "anstyle", 135 | "clap_lex", 136 | ] 137 | 138 | [[package]] 139 | name = "clap_lex" 140 | version = "0.7.4" 141 | source = "registry+https://github.com/rust-lang/crates.io-index" 142 | checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" 143 | 144 | [[package]] 145 | name = "criterion" 146 | version = "0.5.1" 147 | source = "registry+https://github.com/rust-lang/crates.io-index" 148 | checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" 149 | dependencies = [ 150 | "anes", 151 | "cast", 152 | "ciborium", 153 | "clap", 154 | "criterion-plot", 155 | "is-terminal", 156 | "itertools", 157 | "num-traits", 158 | "once_cell", 159 | "oorandom", 160 | "plotters", 161 | "rayon", 162 | "regex", 163 | "serde", 164 | "serde_derive", 165 | "serde_json", 166 | "tinytemplate", 167 | "walkdir", 168 | ] 169 | 170 | [[package]] 171 | name = "criterion-plot" 172 | version = "0.5.0" 173 | source = "registry+https://github.com/rust-lang/crates.io-index" 174 | checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" 175 | dependencies = [ 176 | "cast", 177 | "itertools", 178 | ] 179 | 180 | [[package]] 181 | name = "crossbeam-deque" 182 | version = "0.8.6" 183 | source = "registry+https://github.com/rust-lang/crates.io-index" 184 | checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" 185 | dependencies = [ 186 | "crossbeam-epoch", 187 | "crossbeam-utils", 188 | ] 189 | 190 | [[package]] 191 | name = "crossbeam-epoch" 192 | version = "0.9.18" 193 | source = "registry+https://github.com/rust-lang/crates.io-index" 194 | checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" 195 | dependencies = [ 196 | "crossbeam-utils", 197 | ] 198 | 199 | [[package]] 200 | name = "crossbeam-utils" 201 | version = "0.8.21" 202 | source = "registry+https://github.com/rust-lang/crates.io-index" 203 | checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" 204 | 205 | [[package]] 206 | name = "crunchy" 207 | version = "0.2.3" 208 | source = "registry+https://github.com/rust-lang/crates.io-index" 209 | checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" 210 | 211 | [[package]] 212 | name = "either" 213 | version = "1.15.0" 214 | source = "registry+https://github.com/rust-lang/crates.io-index" 215 | checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" 216 | 217 | [[package]] 218 | name = "futures-core" 219 | version = "0.3.31" 220 | source = "registry+https://github.com/rust-lang/crates.io-index" 221 | checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" 222 | 223 | [[package]] 224 | name = "futures-macro" 225 | version = "0.3.31" 226 | source = "registry+https://github.com/rust-lang/crates.io-index" 227 | checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" 228 | dependencies = [ 229 | "proc-macro2", 230 | "quote", 231 | "syn", 232 | ] 233 | 234 | [[package]] 235 | name = "futures-task" 236 | version = "0.3.31" 237 | source = "registry+https://github.com/rust-lang/crates.io-index" 238 | checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" 239 | 240 | [[package]] 241 | name = "futures-util" 242 | version = "0.3.31" 243 | source = "registry+https://github.com/rust-lang/crates.io-index" 244 | checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" 245 | dependencies = [ 246 | "futures-core", 247 | "futures-macro", 248 | "futures-task", 249 | "pin-project-lite", 250 | "pin-utils", 251 | "slab", 252 | ] 253 | 254 | [[package]] 255 | name = "gimli" 256 | version = "0.31.1" 257 | source = "registry+https://github.com/rust-lang/crates.io-index" 258 | checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" 259 | 260 | [[package]] 261 | name = "half" 262 | version = "2.5.0" 263 | source = "registry+https://github.com/rust-lang/crates.io-index" 264 | checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" 265 | dependencies = [ 266 | "cfg-if", 267 | "crunchy", 268 | ] 269 | 270 | [[package]] 271 | name = "hermit-abi" 272 | version = "0.5.0" 273 | source = "registry+https://github.com/rust-lang/crates.io-index" 274 | checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" 275 | 276 | [[package]] 277 | name = "is-terminal" 278 | version = "0.4.16" 279 | source = "registry+https://github.com/rust-lang/crates.io-index" 280 | checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" 281 | dependencies = [ 282 | "hermit-abi", 283 | "libc", 284 | "windows-sys 0.59.0", 285 | ] 286 | 287 | [[package]] 288 | name = "itertools" 289 | version = "0.10.5" 290 | source = "registry+https://github.com/rust-lang/crates.io-index" 291 | checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" 292 | dependencies = [ 293 | "either", 294 | ] 295 | 296 | [[package]] 297 | name = "itoa" 298 | version = "1.0.15" 299 | source = "registry+https://github.com/rust-lang/crates.io-index" 300 | checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" 301 | 302 | [[package]] 303 | name = "js-sys" 304 | version = "0.3.77" 305 | source = "registry+https://github.com/rust-lang/crates.io-index" 306 | checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" 307 | dependencies = [ 308 | "once_cell", 309 | "wasm-bindgen", 310 | ] 311 | 312 | [[package]] 313 | name = "libc" 314 | version = "0.2.171" 315 | source = "registry+https://github.com/rust-lang/crates.io-index" 316 | checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" 317 | 318 | [[package]] 319 | name = "lock_api" 320 | version = "0.4.12" 321 | source = "registry+https://github.com/rust-lang/crates.io-index" 322 | checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" 323 | dependencies = [ 324 | "autocfg", 325 | "scopeguard", 326 | ] 327 | 328 | [[package]] 329 | name = "log" 330 | version = "0.4.27" 331 | source = "registry+https://github.com/rust-lang/crates.io-index" 332 | checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" 333 | 334 | [[package]] 335 | name = "memchr" 336 | version = "2.7.4" 337 | source = "registry+https://github.com/rust-lang/crates.io-index" 338 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" 339 | 340 | [[package]] 341 | name = "miniz_oxide" 342 | version = "0.8.7" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430" 345 | dependencies = [ 346 | "adler2", 347 | ] 348 | 349 | [[package]] 350 | name = "mio" 351 | version = "1.0.3" 352 | source = "registry+https://github.com/rust-lang/crates.io-index" 353 | checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" 354 | dependencies = [ 355 | "libc", 356 | "wasi", 357 | "windows-sys 0.52.0", 358 | ] 359 | 360 | [[package]] 361 | name = "num-traits" 362 | version = "0.2.19" 363 | source = "registry+https://github.com/rust-lang/crates.io-index" 364 | checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" 365 | dependencies = [ 366 | "autocfg", 367 | ] 368 | 369 | [[package]] 370 | name = "object" 371 | version = "0.36.7" 372 | source = "registry+https://github.com/rust-lang/crates.io-index" 373 | checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" 374 | dependencies = [ 375 | "memchr", 376 | ] 377 | 378 | [[package]] 379 | name = "once_cell" 380 | version = "1.21.3" 381 | source = "registry+https://github.com/rust-lang/crates.io-index" 382 | checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" 383 | 384 | [[package]] 385 | name = "oorandom" 386 | version = "11.1.5" 387 | source = "registry+https://github.com/rust-lang/crates.io-index" 388 | checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" 389 | 390 | [[package]] 391 | name = "parking_lot" 392 | version = "0.12.3" 393 | source = "registry+https://github.com/rust-lang/crates.io-index" 394 | checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" 395 | dependencies = [ 396 | "lock_api", 397 | "parking_lot_core", 398 | ] 399 | 400 | [[package]] 401 | name = "parking_lot_core" 402 | version = "0.9.10" 403 | source = "registry+https://github.com/rust-lang/crates.io-index" 404 | checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" 405 | dependencies = [ 406 | "cfg-if", 407 | "libc", 408 | "redox_syscall", 409 | "smallvec", 410 | "windows-targets", 411 | ] 412 | 413 | [[package]] 414 | name = "pin-project-lite" 415 | version = "0.2.16" 416 | source = "registry+https://github.com/rust-lang/crates.io-index" 417 | checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" 418 | 419 | [[package]] 420 | name = "pin-utils" 421 | version = "0.1.0" 422 | source = "registry+https://github.com/rust-lang/crates.io-index" 423 | checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" 424 | 425 | [[package]] 426 | name = "plotters" 427 | version = "0.3.7" 428 | source = "registry+https://github.com/rust-lang/crates.io-index" 429 | checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" 430 | dependencies = [ 431 | "num-traits", 432 | "plotters-backend", 433 | "plotters-svg", 434 | "wasm-bindgen", 435 | "web-sys", 436 | ] 437 | 438 | [[package]] 439 | name = "plotters-backend" 440 | version = "0.3.7" 441 | source = "registry+https://github.com/rust-lang/crates.io-index" 442 | checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" 443 | 444 | [[package]] 445 | name = "plotters-svg" 446 | version = "0.3.7" 447 | source = "registry+https://github.com/rust-lang/crates.io-index" 448 | checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" 449 | dependencies = [ 450 | "plotters-backend", 451 | ] 452 | 453 | [[package]] 454 | name = "pollster" 455 | version = "0.3.0" 456 | source = "registry+https://github.com/rust-lang/crates.io-index" 457 | checksum = "22686f4785f02a4fcc856d3b3bb19bf6c8160d103f7a99cc258bddd0251dc7f2" 458 | 459 | [[package]] 460 | name = "proc-macro2" 461 | version = "1.0.94" 462 | source = "registry+https://github.com/rust-lang/crates.io-index" 463 | checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" 464 | dependencies = [ 465 | "unicode-ident", 466 | ] 467 | 468 | [[package]] 469 | name = "quote" 470 | version = "1.0.40" 471 | source = "registry+https://github.com/rust-lang/crates.io-index" 472 | checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" 473 | dependencies = [ 474 | "proc-macro2", 475 | ] 476 | 477 | [[package]] 478 | name = "rayon" 479 | version = "1.10.0" 480 | source = "registry+https://github.com/rust-lang/crates.io-index" 481 | checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" 482 | dependencies = [ 483 | "either", 484 | "rayon-core", 485 | ] 486 | 487 | [[package]] 488 | name = "rayon-core" 489 | version = "1.12.1" 490 | source = "registry+https://github.com/rust-lang/crates.io-index" 491 | checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" 492 | dependencies = [ 493 | "crossbeam-deque", 494 | "crossbeam-utils", 495 | ] 496 | 497 | [[package]] 498 | name = "reblessive" 499 | version = "0.4.3" 500 | dependencies = [ 501 | "ciborium", 502 | "criterion", 503 | "futures-util", 504 | "pollster", 505 | "regex", 506 | "tokio", 507 | ] 508 | 509 | [[package]] 510 | name = "redox_syscall" 511 | version = "0.5.11" 512 | source = "registry+https://github.com/rust-lang/crates.io-index" 513 | checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" 514 | dependencies = [ 515 | "bitflags", 516 | ] 517 | 518 | [[package]] 519 | name = "regex" 520 | version = "1.11.1" 521 | source = "registry+https://github.com/rust-lang/crates.io-index" 522 | checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" 523 | dependencies = [ 524 | "aho-corasick", 525 | "memchr", 526 | "regex-automata", 527 | "regex-syntax", 528 | ] 529 | 530 | [[package]] 531 | name = "regex-automata" 532 | version = "0.4.9" 533 | source = "registry+https://github.com/rust-lang/crates.io-index" 534 | checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" 535 | dependencies = [ 536 | "aho-corasick", 537 | "memchr", 538 | "regex-syntax", 539 | ] 540 | 541 | [[package]] 542 | name = "regex-syntax" 543 | version = "0.8.5" 544 | source = "registry+https://github.com/rust-lang/crates.io-index" 545 | checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" 546 | 547 | [[package]] 548 | name = "rustc-demangle" 549 | version = "0.1.24" 550 | source = "registry+https://github.com/rust-lang/crates.io-index" 551 | checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" 552 | 553 | [[package]] 554 | name = "rustversion" 555 | version = "1.0.20" 556 | source = "registry+https://github.com/rust-lang/crates.io-index" 557 | checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" 558 | 559 | [[package]] 560 | name = "ryu" 561 | version = "1.0.20" 562 | source = "registry+https://github.com/rust-lang/crates.io-index" 563 | checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" 564 | 565 | [[package]] 566 | name = "same-file" 567 | version = "1.0.6" 568 | source = "registry+https://github.com/rust-lang/crates.io-index" 569 | checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" 570 | dependencies = [ 571 | "winapi-util", 572 | ] 573 | 574 | [[package]] 575 | name = "scopeguard" 576 | version = "1.2.0" 577 | source = "registry+https://github.com/rust-lang/crates.io-index" 578 | checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" 579 | 580 | [[package]] 581 | name = "serde" 582 | version = "1.0.219" 583 | source = "registry+https://github.com/rust-lang/crates.io-index" 584 | checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" 585 | dependencies = [ 586 | "serde_derive", 587 | ] 588 | 589 | [[package]] 590 | name = "serde_derive" 591 | version = "1.0.219" 592 | source = "registry+https://github.com/rust-lang/crates.io-index" 593 | checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" 594 | dependencies = [ 595 | "proc-macro2", 596 | "quote", 597 | "syn", 598 | ] 599 | 600 | [[package]] 601 | name = "serde_json" 602 | version = "1.0.140" 603 | source = "registry+https://github.com/rust-lang/crates.io-index" 604 | checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" 605 | dependencies = [ 606 | "itoa", 607 | "memchr", 608 | "ryu", 609 | "serde", 610 | ] 611 | 612 | [[package]] 613 | name = "signal-hook-registry" 614 | version = "1.4.2" 615 | source = "registry+https://github.com/rust-lang/crates.io-index" 616 | checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" 617 | dependencies = [ 618 | "libc", 619 | ] 620 | 621 | [[package]] 622 | name = "slab" 623 | version = "0.4.9" 624 | source = "registry+https://github.com/rust-lang/crates.io-index" 625 | checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" 626 | dependencies = [ 627 | "autocfg", 628 | ] 629 | 630 | [[package]] 631 | name = "smallvec" 632 | version = "1.15.0" 633 | source = "registry+https://github.com/rust-lang/crates.io-index" 634 | checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" 635 | 636 | [[package]] 637 | name = "socket2" 638 | version = "0.5.9" 639 | source = "registry+https://github.com/rust-lang/crates.io-index" 640 | checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" 641 | dependencies = [ 642 | "libc", 643 | "windows-sys 0.52.0", 644 | ] 645 | 646 | [[package]] 647 | name = "syn" 648 | version = "2.0.100" 649 | source = "registry+https://github.com/rust-lang/crates.io-index" 650 | checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" 651 | dependencies = [ 652 | "proc-macro2", 653 | "quote", 654 | "unicode-ident", 655 | ] 656 | 657 | [[package]] 658 | name = "tinytemplate" 659 | version = "1.2.1" 660 | source = "registry+https://github.com/rust-lang/crates.io-index" 661 | checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" 662 | dependencies = [ 663 | "serde", 664 | "serde_json", 665 | ] 666 | 667 | [[package]] 668 | name = "tokio" 669 | version = "1.44.2" 670 | source = "registry+https://github.com/rust-lang/crates.io-index" 671 | checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" 672 | dependencies = [ 673 | "backtrace", 674 | "bytes", 675 | "libc", 676 | "mio", 677 | "parking_lot", 678 | "pin-project-lite", 679 | "signal-hook-registry", 680 | "socket2", 681 | "tokio-macros", 682 | "windows-sys 0.52.0", 683 | ] 684 | 685 | [[package]] 686 | name = "tokio-macros" 687 | version = "2.5.0" 688 | source = "registry+https://github.com/rust-lang/crates.io-index" 689 | checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" 690 | dependencies = [ 691 | "proc-macro2", 692 | "quote", 693 | "syn", 694 | ] 695 | 696 | [[package]] 697 | name = "unicode-ident" 698 | version = "1.0.18" 699 | source = "registry+https://github.com/rust-lang/crates.io-index" 700 | checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" 701 | 702 | [[package]] 703 | name = "walkdir" 704 | version = "2.5.0" 705 | source = "registry+https://github.com/rust-lang/crates.io-index" 706 | checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" 707 | dependencies = [ 708 | "same-file", 709 | "winapi-util", 710 | ] 711 | 712 | [[package]] 713 | name = "wasi" 714 | version = "0.11.0+wasi-snapshot-preview1" 715 | source = "registry+https://github.com/rust-lang/crates.io-index" 716 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" 717 | 718 | [[package]] 719 | name = "wasm-bindgen" 720 | version = "0.2.100" 721 | source = "registry+https://github.com/rust-lang/crates.io-index" 722 | checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" 723 | dependencies = [ 724 | "cfg-if", 725 | "once_cell", 726 | "rustversion", 727 | "wasm-bindgen-macro", 728 | ] 729 | 730 | [[package]] 731 | name = "wasm-bindgen-backend" 732 | version = "0.2.100" 733 | source = "registry+https://github.com/rust-lang/crates.io-index" 734 | checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" 735 | dependencies = [ 736 | "bumpalo", 737 | "log", 738 | "proc-macro2", 739 | "quote", 740 | "syn", 741 | "wasm-bindgen-shared", 742 | ] 743 | 744 | [[package]] 745 | name = "wasm-bindgen-macro" 746 | version = "0.2.100" 747 | source = "registry+https://github.com/rust-lang/crates.io-index" 748 | checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" 749 | dependencies = [ 750 | "quote", 751 | "wasm-bindgen-macro-support", 752 | ] 753 | 754 | [[package]] 755 | name = "wasm-bindgen-macro-support" 756 | version = "0.2.100" 757 | source = "registry+https://github.com/rust-lang/crates.io-index" 758 | checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" 759 | dependencies = [ 760 | "proc-macro2", 761 | "quote", 762 | "syn", 763 | "wasm-bindgen-backend", 764 | "wasm-bindgen-shared", 765 | ] 766 | 767 | [[package]] 768 | name = "wasm-bindgen-shared" 769 | version = "0.2.100" 770 | source = "registry+https://github.com/rust-lang/crates.io-index" 771 | checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" 772 | dependencies = [ 773 | "unicode-ident", 774 | ] 775 | 776 | [[package]] 777 | name = "web-sys" 778 | version = "0.3.77" 779 | source = "registry+https://github.com/rust-lang/crates.io-index" 780 | checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" 781 | dependencies = [ 782 | "js-sys", 783 | "wasm-bindgen", 784 | ] 785 | 786 | [[package]] 787 | name = "winapi-util" 788 | version = "0.1.9" 789 | source = "registry+https://github.com/rust-lang/crates.io-index" 790 | checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" 791 | dependencies = [ 792 | "windows-sys 0.59.0", 793 | ] 794 | 795 | [[package]] 796 | name = "windows-sys" 797 | version = "0.52.0" 798 | source = "registry+https://github.com/rust-lang/crates.io-index" 799 | checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" 800 | dependencies = [ 801 | "windows-targets", 802 | ] 803 | 804 | [[package]] 805 | name = "windows-sys" 806 | version = "0.59.0" 807 | source = "registry+https://github.com/rust-lang/crates.io-index" 808 | checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" 809 | dependencies = [ 810 | "windows-targets", 811 | ] 812 | 813 | [[package]] 814 | name = "windows-targets" 815 | version = "0.52.6" 816 | source = "registry+https://github.com/rust-lang/crates.io-index" 817 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" 818 | dependencies = [ 819 | "windows_aarch64_gnullvm", 820 | "windows_aarch64_msvc", 821 | "windows_i686_gnu", 822 | "windows_i686_gnullvm", 823 | "windows_i686_msvc", 824 | "windows_x86_64_gnu", 825 | "windows_x86_64_gnullvm", 826 | "windows_x86_64_msvc", 827 | ] 828 | 829 | [[package]] 830 | name = "windows_aarch64_gnullvm" 831 | version = "0.52.6" 832 | source = "registry+https://github.com/rust-lang/crates.io-index" 833 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" 834 | 835 | [[package]] 836 | name = "windows_aarch64_msvc" 837 | version = "0.52.6" 838 | source = "registry+https://github.com/rust-lang/crates.io-index" 839 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" 840 | 841 | [[package]] 842 | name = "windows_i686_gnu" 843 | version = "0.52.6" 844 | source = "registry+https://github.com/rust-lang/crates.io-index" 845 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" 846 | 847 | [[package]] 848 | name = "windows_i686_gnullvm" 849 | version = "0.52.6" 850 | source = "registry+https://github.com/rust-lang/crates.io-index" 851 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" 852 | 853 | [[package]] 854 | name = "windows_i686_msvc" 855 | version = "0.52.6" 856 | source = "registry+https://github.com/rust-lang/crates.io-index" 857 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" 858 | 859 | [[package]] 860 | name = "windows_x86_64_gnu" 861 | version = "0.52.6" 862 | source = "registry+https://github.com/rust-lang/crates.io-index" 863 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" 864 | 865 | [[package]] 866 | name = "windows_x86_64_gnullvm" 867 | version = "0.52.6" 868 | source = "registry+https://github.com/rust-lang/crates.io-index" 869 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" 870 | 871 | [[package]] 872 | name = "windows_x86_64_msvc" 873 | version = "0.52.6" 874 | source = "registry+https://github.com/rust-lang/crates.io-index" 875 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" 876 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "reblessive" 3 | version = "0.4.3" 4 | edition = "2021" 5 | rust-version = "1.84" 6 | license = "MIT" 7 | readme = "README.md" 8 | description = "A small runtime for running deeply nested recursive functions" 9 | keywords = ["stack","call","async","memory","runtime"] 10 | repository = "https://github.com/DelSkayn/reblessive.git" 11 | 12 | [dev-dependencies] 13 | criterion = "0.5.1" 14 | futures-util = "0.3.30" 15 | pollster = "0.3.0" 16 | tokio = { version = "1.36.0", features = ["full"] } 17 | # Pinned so that criterion compiles when minimal version is used. 18 | ciborium = "0.2.2" 19 | regex = "1.5.3" 20 | 21 | [features] 22 | tree = [] 23 | nightly = [] 24 | 25 | [package.metadata.docs.rs] 26 | # document all features 27 | all-features = true 28 | # defines the configuration attribute `docsrs` 29 | rustdoc-args = ["--cfg", "docsrs"] 30 | 31 | [[bench]] 32 | name = "calc" 33 | harness = false 34 | [[bench]] 35 | name = "fibbo" 36 | harness = false 37 | 38 | [lints.rust] 39 | unexpected_cfgs = { level = "allow", check-cfg = ['cfg(docrs)'] } 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Mees Delzenne 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![codecov](https://codecov.io/gh/DelSkayn/reblessive/graph/badge.svg?token=A2DZXD34AZ)](https://codecov.io/gh/DelSkayn/reblessive) 2 | [![crates.io](https://img.shields.io/crates/v/reblessive.svg)](https://crates.io/crates/reblessive/) 3 | 4 | 5 | # Reblessive 6 | 7 | A heap allocated runtime for deeply recursive algorithms. 8 | 9 | Turn your cursed recursive algorithm into a blessed heap allocated structure which won't 10 | overflow the stack, regardless of depth. 11 | 12 | ## What is this crate for? 13 | 14 | There are some types of algorithms which are easiest to write as a recursive algorithm. 15 | Examples include a recursive descent parsers and tree-walking interpreters. 16 | These algorithms often need to keep track of complex stack of state and are therefore easiest to write as a set of recursive function calling each other. 17 | This does however have a major downside: The stack can be rather limited. 18 | Especially when the input of a algorithm is externally controlled, implementing it as a recursive algorithm is asking for stack overflows. 19 | 20 | This library is an attempt to solve that issue. 21 | It provides a small executor which is able to efficiently allocate new futures in a stack like order and then drive them in a single loop. 22 | With these executors you can write your recursive algorithm as a set of futures. 23 | The executor will then, whenever a function needs to call another, pause the current future to execute the newly scheduled future. 24 | This allows one to implement your algorithm in a way that still looks recursive but won't run out of stack if recursing very deep. 25 | 26 | 27 | ## Example 28 | 29 | ```rust 30 | use std::{ 31 | mem::MaybeUninit, 32 | time::{Duration, Instant}, 33 | }; 34 | 35 | use reblessive::{Stack, Stk}; 36 | 37 | async fn heavy_fibbo(ctx: &mut Stk, n: usize) -> usize { 38 | // An extra stack allocation to simulate a more complex function. 39 | let mut ballast: MaybeUninit<[u8; 1024 * 1024]> = std::mem::MaybeUninit::uninit(); 40 | // Make sure the ballast isn't compiled out. 41 | std::hint::black_box(&mut ballast); 42 | 43 | match n { 44 | 0 => 1, 45 | 1 => 1, 46 | x => { 47 | ctx.run(move |ctx| heavy_fibbo(ctx, x - 1)).await 48 | + ctx.run(move |ctx| heavy_fibbo(ctx, x - 2)).await 49 | } 50 | } 51 | } 52 | 53 | fn main() { 54 | // Create a stack to run the function in. 55 | let mut stack = Stack::new(); 56 | 57 | // run the function to completion on the stack. 58 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 20)).finish(); 59 | println!("result: {res}"); 60 | 61 | assert_eq!(res, 10946); 62 | 63 | // Reblessive can also make any recursive function interuptable. 64 | let mut runner = stack.enter(|ctx| heavy_fibbo(ctx, 60)); 65 | 66 | let start = Instant::now(); 67 | loop { 68 | // run the function forward by a step. 69 | // If this returned Some than the function completed. 70 | if let Some(x) = runner.step() { 71 | println!("finished: {x}") 72 | } 73 | // We didn't complete the computation in time so we can just drop the runner and stop the 74 | // function. 75 | if start.elapsed() > Duration::from_secs(3) { 76 | println!("Timed out!"); 77 | break; 78 | } 79 | } 80 | } 81 | ``` 82 | -------------------------------------------------------------------------------- /benches/calc.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Write}; 2 | 3 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 4 | use reblessive::Stk; 5 | 6 | #[derive(Debug)] 7 | enum UnaryOperator { 8 | Neg, 9 | Pos, 10 | } 11 | 12 | #[derive(Eq, PartialEq, Debug)] 13 | enum BinaryOperator { 14 | Pow, 15 | Mul, 16 | Div, 17 | Add, 18 | Sub, 19 | } 20 | 21 | #[derive(Debug)] 22 | enum Expression { 23 | Number(f64), 24 | Covered(Box), 25 | Binary { 26 | left: Box, 27 | op: BinaryOperator, 28 | right: Box, 29 | }, 30 | Unary { 31 | op: UnaryOperator, 32 | expr: Box, 33 | }, 34 | } 35 | 36 | #[derive(Debug)] 37 | pub enum Error { 38 | Parse, 39 | } 40 | 41 | impl std::error::Error for Error {} 42 | 43 | impl fmt::Display for Error { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 45 | match self { 46 | Self::Parse => write!(f, "Failed to parse expression"), 47 | } 48 | } 49 | } 50 | 51 | fn is_number_char(v: u8) -> bool { 52 | v.is_ascii_digit() || matches!(v, b'.' | b'e' | b'E') 53 | } 54 | 55 | struct Buffer<'a>(&'a [u8]); 56 | 57 | impl Iterator for Buffer<'_> { 58 | type Item = u8; 59 | 60 | fn next(&mut self) -> Option { 61 | let (head, tail) = self.0.split_first()?; 62 | self.0 = tail; 63 | Some(*head) 64 | } 65 | } 66 | 67 | impl Buffer<'_> { 68 | pub fn get(&self, index: I) -> Option<&I::Output> 69 | where 70 | I: std::slice::SliceIndex<[u8]>, 71 | { 72 | self.0.get(index) 73 | } 74 | } 75 | 76 | async fn parse( 77 | ctx: &mut Stk, 78 | bytes: &mut Buffer<'_>, 79 | binding_power: u8, 80 | ) -> Result { 81 | let mut peek = bytes.get(0).copied(); 82 | let mut lhs = loop { 83 | match peek { 84 | Some(b'+') => { 85 | bytes.next(); 86 | let expr = ctx.run(|ctx| parse(ctx, bytes, 7)).await?; 87 | break Expression::Unary { 88 | op: UnaryOperator::Pos, 89 | expr: Box::new(expr), 90 | }; 91 | } 92 | Some(b'-') => { 93 | bytes.next(); 94 | let expr = ctx.run(|ctx| parse(ctx, bytes, 7)).await?; 95 | break Expression::Unary { 96 | op: UnaryOperator::Neg, 97 | expr: Box::new(expr), 98 | }; 99 | } 100 | Some(b'(') => { 101 | bytes.next(); 102 | let expr = ctx.run(|ctx| parse(ctx, bytes, 0)).await?; 103 | let Some(b')') = bytes.next() else { 104 | return Err(Error::Parse); 105 | }; 106 | break Expression::Covered(Box::new(expr)); 107 | } 108 | Some(x) if x.is_ascii_whitespace() => { 109 | bytes.next(); 110 | peek = bytes.get(0).copied(); 111 | continue; 112 | } 113 | Some(x) if is_number_char(x) => { 114 | let mut number = String::new(); 115 | number.push(x as char); 116 | bytes.next(); 117 | while bytes.get(0).copied().map(is_number_char).unwrap_or(false) { 118 | let c = bytes.next().unwrap(); 119 | number.push(c as char); 120 | let n = bytes.get(0).copied(); 121 | if n.map(|c| c.to_ascii_lowercase()) == Some(b'e') { 122 | number.push(n.unwrap() as char); 123 | let n = bytes.get(0).copied(); 124 | if matches!(n, Some(b'-' | b'+')) { 125 | bytes.next(); 126 | number.push(n.unwrap() as char); 127 | } 128 | } 129 | } 130 | let num = number.parse::().map_err(|_| Error::Parse)?; 131 | break Expression::Number(num); 132 | } 133 | _ => { 134 | return Err(Error::Parse); 135 | } 136 | }; 137 | }; 138 | 139 | loop { 140 | let (op, bp) = match bytes.get(0).copied() { 141 | Some(b'*') => { 142 | if let Some(b'*') = bytes.get(1) { 143 | (BinaryOperator::Pow, (5, 6)) 144 | } else { 145 | (BinaryOperator::Mul, (3, 4)) 146 | } 147 | } 148 | Some(b'/') => (BinaryOperator::Div, (3, 4)), 149 | Some(b'+') => (BinaryOperator::Add, (1, 2)), 150 | Some(b'-') => (BinaryOperator::Sub, (1, 2)), 151 | Some(x) if x.is_ascii_whitespace() => { 152 | bytes.next(); 153 | continue; 154 | } 155 | _ => { 156 | break; 157 | } 158 | }; 159 | 160 | if bp.0 < binding_power { 161 | break; 162 | } 163 | 164 | bytes.next(); 165 | if op == BinaryOperator::Pow { 166 | bytes.next(); 167 | } 168 | 169 | let rhs = ctx.run(|ctx| parse(ctx, bytes, bp.1)).await?; 170 | 171 | lhs = Expression::Binary { 172 | left: Box::new(lhs), 173 | op, 174 | right: Box::new(rhs), 175 | } 176 | } 177 | 178 | Ok(lhs) 179 | } 180 | 181 | async fn eval(ctx: &mut Stk, expr: &Expression) -> f64 { 182 | match expr { 183 | Expression::Number(x) => *x, 184 | Expression::Covered(ref x) => ctx.run(|ctx| eval(ctx, x)).await, 185 | Expression::Binary { left, op, right } => { 186 | let left = ctx.run(|ctx| eval(ctx, left)).await; 187 | let right = ctx.run(|ctx| eval(ctx, right)).await; 188 | match op { 189 | BinaryOperator::Pow => left.powf(right), 190 | BinaryOperator::Mul => left * right, 191 | BinaryOperator::Div => left / right, 192 | BinaryOperator::Add => left + right, 193 | BinaryOperator::Sub => left - right, 194 | } 195 | } 196 | Expression::Unary { op, expr } => { 197 | let expr = ctx.run(|ctx| eval(ctx, expr)).await; 198 | match op { 199 | UnaryOperator::Neg => -expr, 200 | UnaryOperator::Pos => expr, 201 | } 202 | } 203 | } 204 | } 205 | 206 | fn generate_expression(len: usize) -> String { 207 | struct Rand(u32); 208 | 209 | impl Rand { 210 | fn new() -> Self { 211 | Rand(0x194b93c) 212 | } 213 | 214 | fn next(&mut self) -> u32 { 215 | let mut x = self.0; 216 | x ^= x << 13; 217 | x ^= x >> 17; 218 | x ^= x << 5; 219 | self.0 = x; 220 | x 221 | } 222 | } 223 | 224 | let mut res = String::new(); 225 | let mut rand = Rand::new(); 226 | for _ in 0..len { 227 | let num = (rand.next() % 1000) as f32 * 0.01; 228 | write!(res, "{} ", num).unwrap(); 229 | match rand.next() % 4 { 230 | 0 => write!(res, "+ ").unwrap(), 231 | 1 => write!(res, "* ").unwrap(), 232 | 2 => write!(res, "- ").unwrap(), 233 | 3 => write!(res, "/ ").unwrap(), 234 | _ => unreachable!(), 235 | } 236 | } 237 | 238 | let num = (rand.next() % 1000) as f32 * 0.01; 239 | write!(res, "{} ", num).unwrap(); 240 | 241 | res 242 | } 243 | 244 | fn bench_expr(c: &mut Criterion) { 245 | c.bench_function("expr 100", |b| { 246 | let expr = generate_expression(100); 247 | b.iter(|| { 248 | let mut stack = reblessive::Stack::new(); 249 | let mut tokens = Buffer(expr.as_bytes()); 250 | let expr = stack 251 | .enter(|ctx| parse(ctx, &mut tokens, 0)) 252 | .finish() 253 | .unwrap(); 254 | black_box(stack.enter(|ctx| eval(ctx, &expr)).finish()); 255 | }) 256 | }); 257 | 258 | c.bench_function("expr 100 no startup", |b| { 259 | let expr = generate_expression(100); 260 | let mut stack = reblessive::Stack::new(); 261 | b.iter(|| { 262 | let mut tokens = Buffer(expr.as_bytes()); 263 | let expr = stack 264 | .enter(|ctx| parse(ctx, &mut tokens, 0)) 265 | .finish() 266 | .unwrap(); 267 | black_box(stack.enter(|ctx| eval(ctx, &expr)).finish()) 268 | }) 269 | }); 270 | } 271 | criterion_group!(benches, bench_expr); 272 | criterion_main!(benches); 273 | -------------------------------------------------------------------------------- /benches/fibbo.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use reblessive::{Stack, Stk}; 3 | 4 | async fn heavy_fibbo(ctx: &mut Stk, n: usize) -> usize { 5 | match n { 6 | 0 => 1, 7 | 1 => 1, 8 | x => { 9 | ctx.run(move |ctx| heavy_fibbo(ctx, x - 1)).await 10 | + ctx.run(move |ctx| heavy_fibbo(ctx, x - 2)).await 11 | } 12 | } 13 | } 14 | 15 | fn bench_fibbo(c: &mut Criterion) { 16 | c.bench_function("fib 15", |b| { 17 | b.iter(|| { 18 | // Create a stack to run the function in. 19 | let mut stack = Stack::new(); 20 | 21 | // run the function to completion on the stack. 22 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 15)).finish(); 23 | assert_eq!(res, 987); 24 | }) 25 | }); 26 | 27 | c.bench_function("fib 20", |b| { 28 | b.iter(|| { 29 | // Create a stack to run the function in. 30 | let mut stack = Stack::new(); 31 | 32 | // run the function to completion on the stack. 33 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 20)).finish(); 34 | assert_eq!(res, 10946); 35 | }) 36 | }); 37 | 38 | c.bench_function("fib 25", |b| { 39 | b.iter(|| { 40 | // Create a stack to run the function in. 41 | let mut stack = Stack::new(); 42 | 43 | // run the function to completion on the stack. 44 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 30)).finish(); 45 | assert_eq!(res, 1346269); 46 | }) 47 | }); 48 | } 49 | 50 | fn bench_fibbo_async(c: &mut Criterion) { 51 | c.bench_function("fib async 15", |b| { 52 | let rt = tokio::runtime::Builder::new_current_thread() 53 | .build() 54 | .unwrap(); 55 | b.iter(|| { 56 | rt.block_on(async { 57 | // Create a stack to run the function in. 58 | let mut stack = Stack::new(); 59 | 60 | // run the function to completion on the stack. 61 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 15)).finish_async().await; 62 | assert_eq!(res, 987); 63 | }) 64 | }) 65 | }); 66 | 67 | c.bench_function("fib async 20", |b| { 68 | let rt = tokio::runtime::Builder::new_current_thread() 69 | .build() 70 | .unwrap(); 71 | b.iter(|| { 72 | rt.block_on(async { 73 | // Create a stack to run the function in. 74 | let mut stack = Stack::new(); 75 | 76 | // run the function to completion on the stack. 77 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 20)).finish_async().await; 78 | assert_eq!(res, 10946); 79 | }) 80 | }) 81 | }); 82 | 83 | c.bench_function("fib async 25", |b| { 84 | let rt = tokio::runtime::Builder::new_current_thread() 85 | .build() 86 | .unwrap(); 87 | b.iter(|| { 88 | rt.block_on(async { 89 | // Create a stack to run the function in. 90 | let mut stack = Stack::new(); 91 | 92 | // run the function to completion on the stack. 93 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 30)).finish_async().await; 94 | assert_eq!(res, 1346269); 95 | }) 96 | }) 97 | }); 98 | } 99 | 100 | criterion_group!(benches, bench_fibbo, bench_fibbo_async); 101 | criterion_main!(benches); 102 | -------------------------------------------------------------------------------- /examples/async.rs: -------------------------------------------------------------------------------- 1 | use std::mem::MaybeUninit; 2 | 3 | use reblessive::{Stack, Stk}; 4 | 5 | async fn deep_read(ctx: &mut Stk, n: usize) -> String { 6 | let mut ballast: MaybeUninit<[u8; 1024 * 1024]> = std::mem::MaybeUninit::uninit(); 7 | std::hint::black_box(&mut ballast); 8 | 9 | if n == 0 { 10 | tokio::fs::read_to_string("./Cargo.toml").await.unwrap() 11 | } else { 12 | ctx.run(|ctx| deep_read(ctx, n - 1)).await 13 | } 14 | } 15 | 16 | #[tokio::main] 17 | async fn main() { 18 | let mut stack = Stack::new(); 19 | 20 | let str = stack.enter(|ctx| deep_read(ctx, 20)).finish_async().await; 21 | 22 | println!("{}", str); 23 | } 24 | -------------------------------------------------------------------------------- /examples/calculator.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use reblessive::Stk; 4 | 5 | #[derive(Debug)] 6 | enum UnaryOperator { 7 | Neg, 8 | Pos, 9 | } 10 | 11 | #[derive(Eq, PartialEq, Debug)] 12 | enum BinaryOperator { 13 | Pow, 14 | Mul, 15 | Div, 16 | Add, 17 | Sub, 18 | } 19 | 20 | #[derive(Debug)] 21 | enum Expression { 22 | Number(f64), 23 | Covered(Box), 24 | Binary { 25 | left: Box, 26 | op: BinaryOperator, 27 | right: Box, 28 | }, 29 | Unary { 30 | op: UnaryOperator, 31 | expr: Box, 32 | }, 33 | } 34 | 35 | #[derive(Debug)] 36 | pub enum Error { 37 | Parse, 38 | } 39 | 40 | impl std::error::Error for Error {} 41 | 42 | impl fmt::Display for Error { 43 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 44 | match self { 45 | Self::Parse => write!(f, "Failed to parse expression"), 46 | } 47 | } 48 | } 49 | 50 | fn is_number_char(v: u8) -> bool { 51 | v.is_ascii_digit() || matches!(v, b'.' | b'e' | b'E') 52 | } 53 | 54 | struct Buffer<'a>(&'a [u8]); 55 | 56 | impl Iterator for Buffer<'_> { 57 | type Item = u8; 58 | 59 | fn next(&mut self) -> Option { 60 | let (head, tail) = self.0.split_first()?; 61 | self.0 = tail; 62 | Some(*head) 63 | } 64 | } 65 | 66 | impl Buffer<'_> { 67 | pub fn get(&self, index: I) -> Option<&I::Output> 68 | where 69 | I: std::slice::SliceIndex<[u8]>, 70 | { 71 | self.0.get(index) 72 | } 73 | } 74 | 75 | async fn parse( 76 | ctx: &mut Stk, 77 | bytes: &mut Buffer<'_>, 78 | binding_power: u8, 79 | ) -> Result { 80 | let peek = bytes.get(0).copied(); 81 | let mut lhs = loop { 82 | match peek { 83 | Some(b'+') => { 84 | bytes.next(); 85 | let expr = ctx.run(|ctx| parse(ctx, bytes, 7)).await?; 86 | break Expression::Unary { 87 | op: UnaryOperator::Pos, 88 | expr: Box::new(expr), 89 | }; 90 | } 91 | Some(b'-') => { 92 | bytes.next(); 93 | let expr = ctx.run(|ctx| parse(ctx, bytes, 7)).await?; 94 | break Expression::Unary { 95 | op: UnaryOperator::Neg, 96 | expr: Box::new(expr), 97 | }; 98 | } 99 | Some(b'(') => { 100 | bytes.next(); 101 | let expr = ctx.run(|ctx| parse(ctx, bytes, 0)).await?; 102 | let Some(b')') = bytes.next() else { 103 | return Err(Error::Parse); 104 | }; 105 | break Expression::Covered(Box::new(expr)); 106 | } 107 | Some(x) if x.is_ascii_whitespace() => continue, 108 | Some(x) if is_number_char(x) => { 109 | let mut number = String::new(); 110 | number.push(x as char); 111 | bytes.next(); 112 | while bytes.get(0).copied().map(is_number_char).unwrap_or(false) { 113 | let c = bytes.next().unwrap(); 114 | number.push(c as char); 115 | if c.eq_ignore_ascii_case(&b'e') { 116 | let n = bytes.get(0).copied(); 117 | if matches!(n, Some(b'-' | b'+')) { 118 | bytes.next(); 119 | number.push(n.unwrap() as char); 120 | } 121 | } 122 | } 123 | let num = number.parse::().map_err(|_| Error::Parse)?; 124 | break Expression::Number(num); 125 | } 126 | _ => { 127 | return Err(Error::Parse); 128 | } 129 | }; 130 | }; 131 | 132 | loop { 133 | let (op, bp) = match bytes.get(0).copied() { 134 | Some(b'*') => { 135 | if let Some(b'*') = bytes.get(1) { 136 | (BinaryOperator::Pow, (5, 6)) 137 | } else { 138 | (BinaryOperator::Mul, (3, 4)) 139 | } 140 | } 141 | Some(b'/') => (BinaryOperator::Div, (3, 4)), 142 | Some(b'+') => (BinaryOperator::Add, (1, 2)), 143 | Some(b'-') => (BinaryOperator::Sub, (1, 2)), 144 | Some(x) if x.is_ascii_whitespace() => { 145 | continue; 146 | } 147 | _ => break, 148 | }; 149 | 150 | if bp.0 < binding_power { 151 | break; 152 | } 153 | 154 | bytes.next(); 155 | if op == BinaryOperator::Pow { 156 | bytes.next(); 157 | } 158 | 159 | let rhs = ctx.run(|ctx| parse(ctx, bytes, bp.1)).await?; 160 | 161 | lhs = Expression::Binary { 162 | left: Box::new(lhs), 163 | op, 164 | right: Box::new(rhs), 165 | } 166 | } 167 | 168 | Ok(lhs) 169 | } 170 | 171 | async fn eval(ctx: &mut Stk, expr: &Expression) -> f64 { 172 | match expr { 173 | Expression::Number(x) => *x, 174 | Expression::Covered(ref x) => ctx.run(|ctx| eval(ctx, x)).await, 175 | Expression::Binary { left, op, right } => { 176 | let left = ctx.run(|ctx| eval(ctx, left)).await; 177 | let right = ctx.run(|ctx| eval(ctx, right)).await; 178 | match op { 179 | BinaryOperator::Pow => left.powf(right), 180 | BinaryOperator::Mul => left * right, 181 | BinaryOperator::Div => left / right, 182 | BinaryOperator::Add => left + right, 183 | BinaryOperator::Sub => left - right, 184 | } 185 | } 186 | Expression::Unary { op, expr } => { 187 | let expr = ctx.run(|ctx| eval(ctx, expr)).await; 188 | match op { 189 | UnaryOperator::Neg => -expr, 190 | UnaryOperator::Pos => expr, 191 | } 192 | } 193 | } 194 | } 195 | 196 | // A recursively defined simple calculater which can parse arbitrary depth expressions without 197 | // ever overflowing the stack. 198 | fn main() -> Result<(), Error> { 199 | let expr = std::env::args().skip(1).collect::>().join(" "); 200 | if expr.is_empty() { 201 | return Ok(()); 202 | } 203 | let mut stack = reblessive::Stack::new(); 204 | let mut tokens = Buffer(expr.as_bytes()); 205 | let expr = stack.enter(|ctx| parse(ctx, &mut tokens, 0)).finish()?; 206 | 207 | eprintln!("EXPRESSION: {:#?}", expr); 208 | 209 | println!("{}", stack.enter(|ctx| eval(ctx, &expr)).finish()); 210 | 211 | Ok(()) 212 | } 213 | -------------------------------------------------------------------------------- /examples/heavy_fibbo.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | mem::MaybeUninit, 3 | time::{Duration, Instant}, 4 | }; 5 | 6 | use reblessive::{Stack, Stk}; 7 | 8 | async fn heavy_fibbo(ctx: &mut Stk, n: usize) -> usize { 9 | // An extra stack allocation to simulate a more complex function. 10 | let mut ballast: MaybeUninit<[u8; 1024 * 1024]> = std::mem::MaybeUninit::uninit(); 11 | // Make sure the ballast isn't compiled out. 12 | std::hint::black_box(&mut ballast); 13 | 14 | match n { 15 | 0 => 1, 16 | 1 => 1, 17 | x => { 18 | ctx.run(move |ctx| heavy_fibbo(ctx, x - 1)).await 19 | + ctx.run(move |ctx| heavy_fibbo(ctx, x - 2)).await 20 | } 21 | } 22 | } 23 | 24 | fn main() { 25 | // Create a stack to run the function in. 26 | let mut stack = Stack::new(); 27 | 28 | // run the function to completion on the stack. 29 | let res = stack.enter(|ctx| heavy_fibbo(ctx, 20)).finish(); 30 | println!("result: {res}"); 31 | 32 | assert_eq!(res, 10946); 33 | 34 | // Reblessive can also make any recursive function interuptable. 35 | let mut runner = stack.enter(|ctx| heavy_fibbo(ctx, 60)); 36 | 37 | let start = Instant::now(); 38 | loop { 39 | // run the function forward by a step. 40 | // If this returned Some than the function completed. 41 | if let Some(x) = runner.step() { 42 | println!("finished: {x}") 43 | } 44 | // We didn't complete the computation in time so we can just drop the runner and stop the 45 | // function. 46 | if start.elapsed() > Duration::from_secs(3) { 47 | println!("Timed out!"); 48 | break; 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /examples/heavy_fibbo_base.rs: -------------------------------------------------------------------------------- 1 | use std::mem::MaybeUninit; 2 | 3 | fn heavy_fibbo(n: usize) -> usize { 4 | let mut ballast: MaybeUninit<[u8; 1024 * 1024]> = std::mem::MaybeUninit::uninit(); 5 | std::hint::black_box(&mut ballast); 6 | 7 | match n { 8 | 0 => 1, 9 | 1 => 1, 10 | x => heavy_fibbo(x - 1) + heavy_fibbo(x - 2), 11 | } 12 | } 13 | 14 | fn main() { 15 | let res = heavy_fibbo(20); 16 | 17 | assert_eq!(res, 10946) 18 | } 19 | -------------------------------------------------------------------------------- /src/allocator.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::Layout; 2 | 3 | use crate::ptr::Owned; 4 | 5 | struct BlockHeader { 6 | previous: Option>, 7 | last: Owned, 8 | } 9 | 10 | /// A stack allocator, an allocator which is only able to free the most recent allocated value. 11 | /// 12 | /// Allocates increasingly larger and larger chunks of memory, freeing previous ones once they are 13 | /// empty, only keeping the most recent around. 14 | pub struct StackAllocator { 15 | block: Option>, 16 | top: Option>, 17 | } 18 | 19 | impl StackAllocator { 20 | pub const MINIMUM_ALIGN: usize = std::mem::align_of::>>(); 21 | pub const BACK_POINTER_SIZE: usize = std::mem::size_of::>>(); 22 | 23 | pub fn new() -> Self { 24 | StackAllocator { 25 | block: None, 26 | top: None, 27 | } 28 | } 29 | 30 | pub fn top(&self) -> Option> { 31 | self.top.map(|x| unsafe { x.add(Self::BACK_POINTER_SIZE) }) 32 | } 33 | 34 | // returns the amount of bytes required at most to allocate a value. 35 | // If the layout has an alignment bigger than that of the block header we allocate a space 36 | // larger then actually needed to ensure we can align the allocation pointer properly. 37 | const fn alloc_size(layout: Layout) -> Option { 38 | let pad_size = layout.align().saturating_sub(Self::MINIMUM_ALIGN); 39 | layout 40 | .size() 41 | .checked_add(pad_size + Self::BACK_POINTER_SIZE) 42 | } 43 | 44 | fn within_block(block: Owned, addr: usize) -> bool { 45 | let size = unsafe { Self::block_size(block) }; 46 | let block_addr = block.addr().get(); 47 | (block_addr..=(block_addr + size)).contains(&addr) 48 | } 49 | 50 | #[inline(always)] 51 | pub fn alloc_with T>(&mut self, f: F) -> Owned { 52 | #[inline(always)] 53 | unsafe fn inner_writer(ptr: *mut T, f: F) 54 | where 55 | F: FnOnce() -> T, 56 | { 57 | std::ptr::write(ptr, f()) 58 | } 59 | 60 | let layout = Layout::new::(); 61 | 62 | let ptr = self.alloc_layout(layout).cast::(); 63 | unsafe { inner_writer(ptr.as_ptr(), f) } 64 | ptr 65 | } 66 | 67 | /// Push a new allocation to the top of the allocator. 68 | pub fn alloc_layout(&mut self, layout: Layout) -> Owned { 69 | let block = if let Some(b) = self.block { 70 | b 71 | } else { 72 | unsafe { self.grow_initial(layout) } 73 | }; 74 | 75 | let mut top_ptr = self 76 | .top 77 | .map(|x| { 78 | let x = x.cast::(); 79 | if !Self::within_block(block, x.addr().get()) { 80 | // only allocate on the last block. 81 | unsafe { block.as_ref().last } 82 | } else { 83 | x 84 | } 85 | }) 86 | .unwrap_or_else(|| unsafe { block.as_ref().last }); 87 | 88 | let alloc_size = Self::alloc_size(layout).expect("Type layour exceeded limits"); 89 | 90 | let remaining = top_ptr 91 | .addr() 92 | .get() 93 | .saturating_sub(alloc_size) 94 | .saturating_sub(block.addr().get()); 95 | 96 | if remaining < std::mem::size_of::() { 97 | top_ptr = unsafe { self.grow_alloc_new_top(layout) }; 98 | } 99 | 100 | let align = layout.align().max(Self::MINIMUM_ALIGN); 101 | let size = layout.size(); 102 | 103 | let res = unsafe { top_ptr.sub(size) }; 104 | let res = unsafe { res.map_addr_unchecked(|addr| addr & !(align - 1)) }; 105 | unsafe { 106 | let new_top = res.sub(Self::BACK_POINTER_SIZE); 107 | new_top.cast().write(self.top); 108 | self.top = Some(new_top); 109 | } 110 | 111 | res 112 | } 113 | 114 | /// Pop the top allocation from the allocator. 115 | /// # Safety 116 | /// Caller must ensure that the to be popped memory is no longer used and it was allocated with 117 | /// the same layout as given to this function. 118 | pub unsafe fn pop_dealloc(&mut self) { 119 | let top = self.top.expect("invalid deallocation"); 120 | // if there is a top, there must be a block. 121 | let block = self.block.unwrap(); 122 | self.top = top.cast::>>().read(); 123 | 124 | if Self::within_block(block, top.addr().get()) { 125 | return; 126 | } 127 | 128 | // the current top was not allocated on the head block, 129 | // so it must be on the old one. 130 | let old_block = block.as_ref().previous.expect("invalid deallocation"); 131 | if self 132 | .top 133 | .map(|x| !Self::within_block(old_block, x.addr().get())) 134 | .unwrap_or(true) 135 | { 136 | // top was either None, meaning nothing is allocated, or the new head is not in the 137 | // current block, meaning that this block must now be empty, so can be deallcated. 138 | block.as_mut().previous = old_block.as_ref().previous; 139 | Self::dealloc_old_block(old_block); 140 | } 141 | } 142 | 143 | #[cold] 144 | unsafe fn grow_alloc_new_top(&mut self, layout: Layout) -> Owned { 145 | let required_size = Self::alloc_size(layout).unwrap(); 146 | let old_block = self.block.take().unwrap(); 147 | 148 | // we failed to allocate so we need to allocate a new block. 149 | let new_alloc_size = unsafe { 150 | Self::block_size(old_block) 151 | .checked_add(required_size) 152 | .unwrap() 153 | .next_power_of_two() 154 | }; 155 | let block = Self::alloc_new_block(new_alloc_size); 156 | 157 | block.as_mut().previous = Some(old_block); 158 | self.block = Some(block); 159 | 160 | block.as_ref().last 161 | } 162 | 163 | #[cold] 164 | unsafe fn grow_initial(&mut self, layout: Layout) -> Owned { 165 | let required_size = Self::alloc_size(layout).unwrap() + std::mem::size_of::(); 166 | 167 | // we failed to allocate so we need to allocate a new block. 168 | let new_alloc_size = required_size.next_power_of_two(); 169 | assert_ne!(new_alloc_size, 0); 170 | 171 | let block = Self::alloc_new_block(new_alloc_size); 172 | self.block = Some(block); 173 | block 174 | } 175 | 176 | unsafe fn block_size(block: Owned) -> usize { 177 | block.as_ref().last.offset_from(block.cast::()) as usize 178 | } 179 | 180 | unsafe fn alloc_new_block(size: usize) -> Owned { 181 | debug_assert!(size.is_power_of_two()); 182 | debug_assert!(size >= std::mem::size_of::()); 183 | assert!( 184 | size < isize::MAX as usize, 185 | "Exceeded maximum allocation size" 186 | ); 187 | 188 | let layout = Layout::from_size_align(size, std::mem::align_of::()).unwrap(); 189 | 190 | let ptr = Owned::from_ptr(std::alloc::alloc(layout)) 191 | .unwrap() 192 | .cast::(); 193 | 194 | let head = ptr.cast::().add(size); 195 | 196 | ptr.as_ptr().write(BlockHeader { 197 | previous: None, 198 | last: head, 199 | }); 200 | 201 | ptr 202 | } 203 | 204 | #[cold] 205 | unsafe fn dealloc_old_block(ptr: Owned) { 206 | let size = ptr.as_ref().last.offset_from(ptr.cast::()) as usize; 207 | 208 | let layout = Layout::from_size_align(size, std::mem::align_of::()).unwrap(); 209 | 210 | std::alloc::dealloc(ptr.as_ptr().cast(), layout) 211 | } 212 | } 213 | 214 | impl Drop for StackAllocator { 215 | fn drop(&mut self) { 216 | let mut cur = self.block; 217 | while let Some(b) = cur { 218 | unsafe { 219 | cur = b.as_ref().previous; 220 | Self::dealloc_old_block(b); 221 | } 222 | } 223 | } 224 | } 225 | 226 | #[cfg(test)] 227 | mod test { 228 | use crate::ptr::{map_ptr, Owned}; 229 | 230 | use super::StackAllocator; 231 | 232 | pub struct Ballast { 233 | v: V, 234 | _other: [usize; SIZE], 235 | } 236 | 237 | impl PartialEq for Ballast { 238 | fn eq(&self, other: &Self) -> bool { 239 | self.v.eq(&other.v) 240 | } 241 | } 242 | 243 | #[test] 244 | fn test_allocation() { 245 | unsafe { 246 | let mut alloc = StackAllocator::new(); 247 | let mut allocations = Vec::>>::new(); 248 | 249 | let amount: usize = if cfg!(miri) { 10 } else { 1000 }; 250 | 251 | for i in 0..amount { 252 | let b = Ballast { 253 | v: i, 254 | _other: [0usize; 64], 255 | }; 256 | let alloc = alloc.alloc_with(|| b); 257 | assert!(!allocations.contains(&alloc)); 258 | allocations.push(alloc); 259 | } 260 | 261 | for (i, v) in allocations.iter().enumerate() { 262 | assert_eq!(i, v.map_ptr(map_ptr!(Ballast<_,_>,v)).read()) 263 | } 264 | 265 | for _ in 0..(amount / 2) { 266 | assert_eq!(allocations.last().map(|x| x.cast()), alloc.top()); 267 | allocations.pop(); 268 | alloc.pop_dealloc(); 269 | } 270 | 271 | let mut allocations_2 = Vec::new(); 272 | 273 | for i in 0..amount { 274 | let alloc = alloc.alloc_with(|| i as u128); 275 | assert!(!allocations_2.contains(&alloc)); 276 | allocations_2.push(alloc); 277 | } 278 | 279 | for (i, v) in allocations_2.iter().enumerate() { 280 | assert_eq!(i as u128, v.as_ptr().read()) 281 | } 282 | } 283 | } 284 | } 285 | -------------------------------------------------------------------------------- /src/defer.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | mem::ManuallyDrop, 3 | ops::{Deref, DerefMut}, 4 | }; 5 | 6 | pub struct Defer { 7 | value: ManuallyDrop, 8 | f: Option, 9 | } 10 | 11 | impl Defer { 12 | pub fn new(value: T, func: F) -> Self { 13 | Defer { 14 | value: ManuallyDrop::new(value), 15 | f: Some(func), 16 | } 17 | } 18 | 19 | #[allow(dead_code)] 20 | pub fn take(mut self) -> T { 21 | self.f = None; 22 | unsafe { ManuallyDrop::take(&mut self.value) } 23 | } 24 | } 25 | 26 | impl Deref for Defer { 27 | type Target = T; 28 | 29 | fn deref(&self) -> &Self::Target { 30 | &self.value 31 | } 32 | } 33 | 34 | impl DerefMut for Defer { 35 | fn deref_mut(&mut self) -> &mut Self::Target { 36 | &mut self.value 37 | } 38 | } 39 | 40 | impl Drop for Defer 41 | where 42 | F: FnOnce(&mut T), 43 | { 44 | fn drop(&mut self) { 45 | if let Some(x) = self.f.take() { 46 | (x)(&mut *self.value); 47 | unsafe { ManuallyDrop::drop(&mut self.value) } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docrs, feature(doc_cfg))] 2 | 3 | mod allocator; 4 | mod defer; 5 | mod ptr; 6 | mod stub_waker; 7 | mod vtable; 8 | 9 | pub mod stack; 10 | 11 | #[cfg(feature = "tree")] 12 | #[cfg_attr(docrs, doc(cfg(feature = "tree")))] 13 | pub mod tree; 14 | 15 | #[cfg(feature = "tree")] 16 | #[doc(inline)] 17 | #[cfg_attr(docrs, doc(cfg(feature = "tree")))] 18 | pub use tree::{Stk as TreeStk, TreeStack}; 19 | 20 | #[doc(inline)] 21 | pub use stack::{Stack, Stk}; 22 | 23 | #[cfg(test)] 24 | mod test; 25 | -------------------------------------------------------------------------------- /src/ptr.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, marker::PhantomData, num::NonZeroUsize, ptr::NonNull}; 2 | 3 | #[macro_use] 4 | mod mac { 5 | #[doc(hidden)] 6 | #[macro_export] 7 | macro_rules! __map_ptr { 8 | ($ty:ty $(,$field:ident)*) => { 9 | |x| { 10 | let ptr = x; 11 | $(let ptr = std::ptr::addr_of_mut!((*ptr).$field);)* 12 | ptr 13 | } 14 | }; 15 | } 16 | } 17 | pub(crate) use crate::__map_ptr as map_ptr; 18 | 19 | pub struct Owned { 20 | ptr: NonNull, 21 | _marker: PhantomData, 22 | } 23 | 24 | impl Copy for Owned {} 25 | impl Clone for Owned { 26 | fn clone(&self) -> Self { 27 | *self 28 | } 29 | } 30 | 31 | impl Owned { 32 | pub unsafe fn write(&self, v: T) { 33 | self.ptr.as_ptr().write(v); 34 | } 35 | 36 | pub unsafe fn read(&self) -> T { 37 | self.ptr.as_ptr().read() 38 | } 39 | 40 | #[cfg(feature = "tree")] 41 | pub unsafe fn replace(&self, v: T) -> T { 42 | std::ptr::replace(self.as_ptr(), v) 43 | } 44 | } 45 | 46 | impl From<&T> for Owned { 47 | fn from(t: &T) -> Self { 48 | Owned { 49 | ptr: NonNull::from(t), 50 | _marker: PhantomData, 51 | } 52 | } 53 | } 54 | 55 | impl From<&mut T> for Owned { 56 | fn from(value: &mut T) -> Self { 57 | Owned { 58 | ptr: NonNull::from(value), 59 | _marker: PhantomData, 60 | } 61 | } 62 | } 63 | 64 | macro_rules! impl_base_methods { 65 | ($ty:ident<$($lt:lifetime,)?$gen:ident>) => { 66 | 67 | 68 | impl<$($lt,)?$gen> fmt::Debug for $ty<$($lt,)?$gen>{ 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | f.debug_struct("Owned").field("ptr", &self.ptr).finish() 71 | } 72 | } 73 | 74 | impl<$($lt,)?$gen> PartialEq for $ty<$($lt,)?$gen>{ 75 | fn eq(&self, other: &Self) -> bool { 76 | self.ptr == other.ptr 77 | } 78 | } 79 | 80 | impl<$($lt,)?$gen> Eq for $ty<$($lt,)?$gen>{ 81 | } 82 | 83 | 84 | impl<$($lt,)?$gen> PartialOrd for $ty<$($lt,)?$gen> { 85 | fn partial_cmp(&self, other: &Self) -> Option { 86 | Some(self.ptr.cmp(&other.ptr)) 87 | } 88 | } 89 | 90 | impl<$($lt,)?$gen> Ord for $ty<$($lt,)?$gen> { 91 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 92 | self.ptr.cmp(&other.ptr) 93 | } 94 | } 95 | 96 | impl<$($lt,)?$gen> $ty<$($lt,)?$gen> { 97 | pub fn dangling() -> Self{ 98 | Self { 99 | ptr: NonNull::dangling(), 100 | _marker: PhantomData, 101 | } 102 | } 103 | 104 | 105 | pub fn from_ptr(ptr: *mut $gen) -> Option { 106 | NonNull::new(ptr).map(|x| Self { 107 | ptr: x, 108 | _marker: PhantomData, 109 | }) 110 | } 111 | 112 | pub unsafe fn from_ptr_unchecked(ptr: *mut $gen) -> Self { 113 | Self { 114 | ptr: NonNull::new_unchecked(ptr), 115 | _marker: PhantomData, 116 | } 117 | } 118 | 119 | pub fn as_ptr(&self) -> *mut $gen { 120 | self.ptr.as_ptr() 121 | } 122 | 123 | pub unsafe fn as_ref<'a>(self) -> &'a $($lt)? $gen { 124 | self.ptr.as_ref() 125 | } 126 | 127 | pub unsafe fn as_mut<'a>(mut self) -> &'a $($lt)? mut $gen { 128 | self.ptr.as_mut() 129 | } 130 | 131 | pub unsafe fn add(self, offset: usize) -> Self{ 132 | Self{ 133 | ptr: NonNull::new_unchecked(self.ptr.as_ptr().add(offset)), 134 | _marker: PhantomData 135 | } 136 | } 137 | 138 | pub unsafe fn sub(self, offset: usize) -> Self{ 139 | Self{ 140 | ptr: NonNull::new_unchecked(self.ptr.as_ptr().sub(offset)), 141 | _marker: PhantomData 142 | } 143 | } 144 | 145 | pub unsafe fn offset_from(self, other: $ty<$gen>) -> isize{ 146 | self.ptr.as_ptr().offset_from(other.ptr.as_ptr()) 147 | } 148 | 149 | pub unsafe fn map_ptr(self, f: F) -> $ty<$($lt,)?R> 150 | where F: FnOnce(*mut T) -> *mut R 151 | { 152 | $ty::from_ptr_unchecked(f(self.as_ptr())) 153 | } 154 | 155 | pub fn cast(self) -> $ty<$($lt,)?R>{ 156 | $ty{ 157 | ptr: self.ptr.cast(), 158 | _marker: PhantomData, 159 | } 160 | } 161 | 162 | pub fn addr(self) -> NonZeroUsize{ 163 | unsafe{ NonZeroUsize::new_unchecked(self.ptr.as_ptr().addr()) } 164 | } 165 | 166 | pub unsafe fn map_addr_unchecked(self, f: F) -> Self 167 | where F: FnOnce(usize) -> usize 168 | { 169 | unsafe{ Self::from_ptr_unchecked(self.ptr.as_ptr().map_addr(f)) } 170 | } 171 | 172 | } 173 | }; 174 | } 175 | 176 | impl_base_methods!(Owned); 177 | -------------------------------------------------------------------------------- /src/stack/future.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::Cell, 3 | future::Future, 4 | marker::PhantomData, 5 | pin::Pin, 6 | task::{Context, Poll}, 7 | }; 8 | 9 | use crate::{ptr::Owned, Stack}; 10 | 11 | use super::{ 12 | stk::{StackMarker, Stk}, 13 | StackState, 14 | }; 15 | 16 | pub enum StkFutureState { 17 | Initial(F), 18 | Running(Cell>), 19 | Done, 20 | } 21 | 22 | pub(crate) struct InnerStkFuture<'a, F, R, M> { 23 | pub(crate) state: StkFutureState, 24 | pub(crate) _marker: PhantomData<&'a mut M>, 25 | } 26 | 27 | impl InnerStkFuture<'_, F, R, M> { 28 | pub fn new(f: F) -> Self { 29 | InnerStkFuture { 30 | state: StkFutureState::Initial(f), 31 | _marker: PhantomData, 32 | } 33 | } 34 | } 35 | 36 | impl<'a, F, Fut, R, M> Future for InnerStkFuture<'a, F, R, M> 37 | where 38 | F: FnOnce(&'a mut M) -> Fut, 39 | Fut: Future + 'a, 40 | M: StackMarker, 41 | { 42 | type Output = R; 43 | 44 | #[inline] 45 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 46 | let this = unsafe { self.get_unchecked_mut() }; 47 | Stack::with_context(|stack| match this.state { 48 | StkFutureState::Initial(_) => { 49 | // match in two steps so we don't move out of state until we are sure we can move 50 | // out. 51 | let StkFutureState::Initial(closure) = 52 | std::mem::replace(&mut this.state, StkFutureState::Running(Cell::new(None))) 53 | else { 54 | unreachable!(); 55 | }; 56 | 57 | let StkFutureState::Running(ref mut state) = this.state else { 58 | unreachable!(); 59 | }; 60 | 61 | let state_ptr = Owned::from(state); 62 | 63 | // Safety: M::create is save here because we are running in the reblessive runtime. 64 | let future = unsafe { closure(M::create()) }; 65 | 66 | // todo check for sub schedulars 67 | if stack.is_rebless_context(cx) { 68 | unsafe { 69 | stack.push_task(async move { 70 | state_ptr.as_ref().set(Some(future.await)); 71 | }) 72 | } 73 | } else { 74 | // if the context has changed there is probably a sub schedular inbetween this 75 | // future and the reblessive runtime. We should notify this schedular when this 76 | // future is ready. 77 | let waker = cx.waker().clone(); 78 | unsafe { 79 | stack.push_task(async move { 80 | state_ptr.as_ref().set(Some(future.await)); 81 | waker.wake() 82 | }) 83 | } 84 | } 85 | Poll::Pending 86 | } 87 | StkFutureState::Running(ref closure) => { 88 | let Some(x) = closure.take() else { 89 | return Poll::Pending; 90 | }; 91 | 92 | this.state = StkFutureState::Done; 93 | 94 | Poll::Ready(x) 95 | } 96 | StkFutureState::Done => Poll::Pending, 97 | }) 98 | } 99 | } 100 | 101 | impl Drop for InnerStkFuture<'_, F, R, M> { 102 | fn drop(&mut self) { 103 | match self.state { 104 | // Never polled 105 | StkFutureState::Initial(_) => {} 106 | // Finised execution 107 | StkFutureState::Done => {} 108 | // Dropped after polled, might only be partially finished. 109 | StkFutureState::Running(ref r) => { 110 | if r.take().is_none() { 111 | // r.take is none so it's parent future hasn't finished yet. 112 | Stack::with_context(|stack| { 113 | // make sure the task didn't already get dropped. 114 | if stack.state.get() != StackState::Cancelled { 115 | unsafe { stack.pop_cancel_task() }; 116 | } 117 | }) 118 | } 119 | } 120 | } 121 | } 122 | } 123 | 124 | /// Future returned by [`Stk::run`] 125 | /// 126 | /// Should be immediatly polled when created and driven until finished. 127 | #[must_use = "futures do nothing unless you `.await` or poll them"] 128 | pub struct StkFuture<'a, F, R>(pub(crate) InnerStkFuture<'a, F, R, Stk>); 129 | impl<'a, F, Fut, R> Future for StkFuture<'a, F, R> 130 | where 131 | F: FnOnce(&'a mut Stk) -> Fut, 132 | Fut: Future + 'a, 133 | { 134 | type Output = R; 135 | 136 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 137 | unsafe { self.map_unchecked_mut(|t| &mut t.0).poll(cx) } 138 | } 139 | } 140 | 141 | /// Future returned by [`Stk::yield_now`] 142 | pub struct YieldFuture<'a>(bool, PhantomData<&'a mut Stk>); 143 | 144 | impl YieldFuture<'_> { 145 | pub(crate) fn new<'a>() -> YieldFuture<'a> { 146 | YieldFuture(false, PhantomData) 147 | } 148 | } 149 | 150 | impl Future for YieldFuture<'_> { 151 | type Output = (); 152 | 153 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 154 | Stack::with_context(|stack| { 155 | if !self.0 && matches!(stack.state.get(), StackState::Yield) { 156 | self.0 = true; 157 | if !stack.is_rebless_context(cx) { 158 | cx.waker().wake_by_ref() 159 | } 160 | let s = stack.state.replace(StackState::Yield); 161 | assert_eq!( 162 | s, 163 | StackState::Base, 164 | "Stack was in inconsistant state for yielding." 165 | ); 166 | return Poll::Pending; 167 | } 168 | Poll::Ready(()) 169 | }) 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/stack/mod.rs: -------------------------------------------------------------------------------- 1 | //! The stack runtime 2 | //! 3 | //! A runtime for turning recursive functions into a number of futures which are run from a single 4 | //! flattened loop, preventing stack overflows. 5 | //! 6 | //! This runtime also has support for external async function but it explicitly doesn't support 7 | //! intra-task concurrency, i.e. calling select or join on multiple futures at the same time. These 8 | //! types of patterns break the stack allocation pattern which this executor uses to be able to 9 | //! allocate and run futures efficiently. 10 | 11 | use crate::{ 12 | allocator::StackAllocator, 13 | defer::Defer, 14 | ptr::{map_ptr, Owned}, 15 | vtable::{TaskBox, VTable}, 16 | }; 17 | use std::{ 18 | cell::{Cell, UnsafeCell}, 19 | future::Future, 20 | marker::PhantomData, 21 | task::{Context, Poll}, 22 | }; 23 | 24 | pub(crate) mod future; 25 | mod runner; 26 | mod stk; 27 | #[cfg(test)] 28 | mod test; 29 | 30 | pub use future::{StkFuture, YieldFuture}; 31 | pub use runner::{FinishFuture, Runner, StepFuture}; 32 | pub(crate) use stk::StackMarker; 33 | pub use stk::Stk; 34 | 35 | thread_local! { 36 | static STACK_PTR: Cell>> = const { Cell::new(None) }; 37 | } 38 | 39 | type ResultPlace = UnsafeCell>; 40 | 41 | #[derive(Clone, Copy, Eq, PartialEq, Debug)] 42 | pub(crate) enum StackState { 43 | /// Initial stack state 44 | Base, 45 | /// A future requested a that the executor yield 46 | Yield, 47 | /// The pending tasks in the executor are being canceled. 48 | Cancelled, 49 | /// A new task was created, execution should switch to the new task 50 | NewTask, 51 | } 52 | 53 | /// A small minimal runtime for executing futures flattened onto the heap preventing stack 54 | /// overflows on deeply nested futures. 55 | /// 56 | /// Only capable of running a single future at the same time and has no support for waking 57 | /// tasks by itself. 58 | pub struct Stack { 59 | allocator: UnsafeCell, 60 | pub(crate) len: Cell, 61 | pub(crate) state: Cell, 62 | async_context: Cell, 63 | } 64 | 65 | unsafe impl Send for Stack {} 66 | unsafe impl Sync for Stack {} 67 | 68 | impl Stack { 69 | /// Create a new empty stack to run reblessive futures in. 70 | /// 71 | /// This function does not allocate. 72 | pub fn new() -> Self { 73 | Self { 74 | allocator: UnsafeCell::new(StackAllocator::new()), 75 | len: Cell::new(0), 76 | state: Cell::new(StackState::Base), 77 | async_context: Cell::new(Owned::::dangling().addr().get()), 78 | } 79 | } 80 | 81 | /// Run a future in the stack. 82 | pub fn enter<'a, F, Fut, R>(&'a mut self, f: F) -> Runner<'a, R> 83 | where 84 | F: FnOnce(&'a mut Stk) -> Fut, 85 | Fut: Future + 'a, 86 | { 87 | let fut = unsafe { f(Stk::create()) }; 88 | 89 | unsafe { self.enter_future(fut) } 90 | 91 | Runner { 92 | stack: self, 93 | _marker: PhantomData, 94 | } 95 | } 96 | 97 | pub(crate) unsafe fn enter_future(&self, fut: F) 98 | where 99 | F: Future, 100 | { 101 | assert_eq!( 102 | self.len.get(), 103 | 0, 104 | "Stack still has unresolved futures, did a previous runner leak?" 105 | ); 106 | 107 | let place_ptr = 108 | (*self.allocator.get()).alloc_with::, _>(|| UnsafeCell::new(None)); 109 | 110 | self.len.set(1); 111 | 112 | self.alloc_future( 113 | async move { unsafe { place_ptr.as_ref().get().write(Some(fut.await)) } }, 114 | ); 115 | } 116 | 117 | pub(crate) fn enter_context(&self, f: F) -> R 118 | where 119 | F: FnOnce() -> R, 120 | { 121 | let ptr = STACK_PTR.with(|x| x.replace(Some(Owned::from(self)))); 122 | let _defer = Defer::new(ptr, |ptr| { 123 | let ptr = *ptr; 124 | STACK_PTR.with(|x| x.set(ptr)); 125 | }); 126 | f() 127 | } 128 | 129 | pub(crate) fn with_context(f: F) -> R 130 | where 131 | F: FnOnce(&Self) -> R, 132 | { 133 | let ptr = STACK_PTR 134 | .with(|x| x.get()) 135 | .expect("Not within a stack context"); 136 | unsafe { f(ptr.as_ref()) } 137 | } 138 | 139 | unsafe fn alloc_future(&self, f: F) -> Owned> 140 | where 141 | F: Future, 142 | { 143 | let res = (*self.allocator.get()) 144 | .alloc_with(|| TaskBox { 145 | v_table: VTable::get::(), 146 | future: f, 147 | }) 148 | .cast(); 149 | self.len.set(self.len.get() + 1); 150 | res 151 | } 152 | 153 | pub(crate) fn pending_tasks(&self) -> usize { 154 | self.len.get().saturating_sub(1) 155 | } 156 | 157 | // tries to get the final result of the last allocation 158 | pub(crate) unsafe fn try_get_result(&self) -> Option { 159 | if self.len.get() == 1 { 160 | let place_ptr = (*self.allocator.get()).top().unwrap(); 161 | let res = (*place_ptr.cast::>().as_ref().get()).take(); 162 | assert!( 163 | res.is_some(), 164 | "Result was not writen even after all futures finished!", 165 | ); 166 | 167 | (*self.allocator.get()).pop_dealloc(); 168 | self.len.set(0); 169 | 170 | return res; 171 | } 172 | None 173 | } 174 | 175 | pub(crate) unsafe fn drive_top_task(&self, context: &mut Context) -> Poll { 176 | self.enter_context(|| { 177 | let task = (*self.allocator.get()).top().unwrap().cast::>(); 178 | let r = Self::drive_task(task, context); 179 | match r { 180 | Poll::Ready(_) => { 181 | // task was ready, it can be popped. 182 | self.pop_task(); 183 | Poll::Ready(false) 184 | } 185 | Poll::Pending => { 186 | // the future yielded, find out why, 187 | match self.state.get() { 188 | StackState::Base => { 189 | // State didn't change, but future still yielded, 190 | // This means that some outside future yielded so we should yield too. 191 | Poll::Pending 192 | } 193 | StackState::Yield => { 194 | // A future requested a yield for the reblessive runtime 195 | self.state.set(StackState::Base); 196 | Poll::Ready(true) 197 | } 198 | StackState::Cancelled => { 199 | panic!("stack should never be running tasks while cancelling") 200 | } 201 | StackState::NewTask => { 202 | self.state.set(StackState::Base); 203 | Poll::Ready(false) 204 | } 205 | } 206 | } 207 | } 208 | }) 209 | } 210 | 211 | pub(crate) unsafe fn push_task(&self, f: F) 212 | where 213 | F: Future, 214 | { 215 | let old = self.state.replace(StackState::NewTask); 216 | assert_eq!( 217 | old, 218 | StackState::Base, 219 | "Invalid stack state, futures are not being evaluated in stack order." 220 | ); 221 | self.alloc_future(f); 222 | } 223 | 224 | pub(crate) unsafe fn pop_task(&self) { 225 | let task = (*self.allocator.get()).top().unwrap().cast::>(); 226 | Self::drop_task_inline(task); 227 | (*self.allocator.get()).pop_dealloc(); 228 | self.len.set(self.len.get() - 1); 229 | } 230 | 231 | pub(crate) unsafe fn pop_cancel_task(&self) { 232 | let old_state = self.state.replace(StackState::Cancelled); 233 | self.pop_task(); 234 | self.state.set(old_state); 235 | } 236 | 237 | unsafe fn drive_task(drive: Owned>, context: &mut Context) -> Poll<()> { 238 | let v_table = drive.map_ptr(map_ptr!(Owned>, v_table)).read(); 239 | (v_table.driver)(drive, context) 240 | } 241 | 242 | unsafe fn drop_task_inline(drive: Owned>) { 243 | let v_table = drive.map_ptr(map_ptr!(Owned>, v_table)).read(); 244 | (v_table.dropper)(drive) 245 | } 246 | 247 | pub(crate) fn is_rebless_context(&self, context: &mut Context) -> bool { 248 | self.async_context.get() == Owned::from(context).addr().get() 249 | } 250 | 251 | pub(crate) fn set_rebless_context(&self, context: &mut Context) -> usize { 252 | self.set_rebless_context_addr(Owned::from(context).addr().get()) 253 | } 254 | 255 | pub(crate) fn set_rebless_context_addr(&self, context: usize) -> usize { 256 | self.async_context.replace(context) 257 | } 258 | 259 | pub(crate) unsafe fn clear(&self) { 260 | let len = self.len.get(); 261 | if len == 0 { 262 | // No tasks pushed, nothing to be done. 263 | return; 264 | } 265 | 266 | self.state.set(StackState::Cancelled); 267 | 268 | // Clear all the futures 269 | self.enter_context(|| { 270 | let borrow = &mut (*self.allocator.get()); 271 | for _ in (1..len).rev() { 272 | unsafe { Self::drop_task_inline(borrow.top().unwrap().cast()) } 273 | borrow.pop_dealloc(); 274 | } 275 | }); 276 | 277 | // If the count was less then or equal to 2 it means that the final future might have 278 | // produced a result, so we need to take the possible result out of final pointer to run 279 | // it's drop, if any 280 | if std::mem::needs_drop::() && len <= 2 { 281 | let place_ptr = (*self.allocator.get()).top().unwrap(); 282 | (*place_ptr.cast::>>().as_ref().get()).take(); 283 | } 284 | 285 | // Deallocate the result allocation. 286 | (*self.allocator.get()).pop_dealloc(); 287 | // reset len since the stack is now empty. 288 | self.len.set(0); 289 | self.state.set(StackState::Base); 290 | } 291 | } 292 | 293 | impl Default for Stack { 294 | fn default() -> Self { 295 | Self::new() 296 | } 297 | } 298 | 299 | impl Drop for Stack { 300 | fn drop(&mut self) { 301 | // clear some memory, this might leak if the root value had allocations but this leak will 302 | // only happen if the Runner was leaked. 303 | unsafe { self.clear::<()>() } 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /src/stack/runner.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | marker::PhantomData, 4 | pin::Pin, 5 | task::{Context, Poll}, 6 | }; 7 | 8 | use crate::{stub_waker, Stack}; 9 | 10 | use super::StackState; 11 | 12 | /// Struct returned by [`Stack::enter`] determines how futures should be ran. 13 | pub struct Runner<'a, R> { 14 | pub(crate) stack: &'a Stack, 15 | pub(crate) _marker: PhantomData, 16 | } 17 | 18 | impl<'a, R> Runner<'a, R> { 19 | /// Run the spawned future for a single step, returning none if a future either completed or 20 | /// spawned a new future onto the stack. Will return some if the root future is finished. 21 | /// 22 | /// This function supports sleeping or taking ownership of the waker allowing it to be used 23 | /// with external async runtimes. 24 | pub fn step_async(&mut self) -> StepFuture<'a, R> { 25 | StepFuture { 26 | stack: self.stack, 27 | _marker: PhantomData, 28 | } 29 | } 30 | 31 | /// Drive the stack until it completes. 32 | /// 33 | /// This function supports cloning and awakening allowing it to be used with external async 34 | /// runtimes 35 | pub fn finish_async(&mut self) -> FinishFuture<'a, R> { 36 | FinishFuture { 37 | stack: self.stack, 38 | _marker: PhantomData, 39 | } 40 | } 41 | 42 | /// Run the spawned future for a single step, returning none if a future either completed or 43 | /// spawned a new future onto the stack. Will return some if the root future is finished. 44 | /// 45 | /// # Panics 46 | /// 47 | /// This function will panic if the waker inside the future running on the stack either tries 48 | /// to clone the waker or tries to call wake. This function is not meant to used with any other 49 | /// future except those generated with the various function provided by the stack. For the 50 | /// async version see [`Runner::step_async`] 51 | pub fn step(&mut self) -> Option { 52 | if let Some(x) = unsafe { self.stack.try_get_result::() } { 53 | return Some(x); 54 | } 55 | 56 | let waker = stub_waker::get(); 57 | let mut context = Context::from_waker(&waker); 58 | let rebless_addr = self.stack.set_rebless_context(&mut context); 59 | 60 | if unsafe { self.stack.drive_top_task(&mut context) }.is_pending() { 61 | panic!( 62 | "a non-reblessive future was run while running the reblessive runtime as non-async" 63 | ) 64 | } 65 | 66 | self.stack.set_rebless_context_addr(rebless_addr); 67 | None 68 | } 69 | 70 | /// Drive the stack until it completes. 71 | /// 72 | /// # Panics 73 | /// 74 | /// This function will panic if the waker inside the future running on the stack either tries 75 | /// to clone the waker or tries to call wake. This function is not meant to used with any other 76 | /// future except those generated with the various function provided by the stack. For the 77 | /// async version see [`Runner::finish_async`] 78 | pub fn finish(mut self) -> R { 79 | loop { 80 | if let Some(x) = self.step() { 81 | return x; 82 | } 83 | } 84 | } 85 | 86 | /// Returns the number of futures currently spawned on the stack. 87 | pub fn depth(&self) -> usize { 88 | self.stack.pending_tasks() 89 | } 90 | } 91 | 92 | impl Drop for Runner<'_, R> { 93 | fn drop(&mut self) { 94 | // The runner is dropped so we need to clear all the futures that might still be present on 95 | // the stack, if the runner was dropped before finishing the stack. 96 | self.stack.state.set(StackState::Cancelled); 97 | unsafe { self.stack.clear::() } 98 | self.stack.state.set(StackState::Base); 99 | } 100 | } 101 | 102 | /// Future returned by [`Runner::step_async`] 103 | #[must_use = "futures do nothing unless you `.await` or poll them"] 104 | pub struct StepFuture<'a, R> { 105 | stack: &'a Stack, 106 | _marker: PhantomData, 107 | } 108 | 109 | impl Future for StepFuture<'_, R> { 110 | type Output = Option; 111 | 112 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 113 | if let Some(x) = unsafe { self.stack.try_get_result() } { 114 | return Poll::Ready(Some(x)); 115 | } 116 | 117 | let rebless_addr = self.stack.set_rebless_context(cx); 118 | let r = unsafe { self.stack.drive_top_task(cx) }; 119 | self.stack.set_rebless_context_addr(rebless_addr); 120 | match r { 121 | Poll::Ready(_) => Poll::Ready(None), 122 | Poll::Pending => Poll::Pending, 123 | } 124 | } 125 | } 126 | 127 | /// Future returned by [`Runner::finish_async`] 128 | #[must_use = "futures do nothing unless you `.await` or poll them"] 129 | pub struct FinishFuture<'a, R> { 130 | stack: &'a Stack, 131 | _marker: PhantomData, 132 | } 133 | 134 | impl Future for FinishFuture<'_, R> { 135 | type Output = R; 136 | 137 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 138 | loop { 139 | if let Some(x) = unsafe { self.stack.try_get_result() } { 140 | return Poll::Ready(x); 141 | } 142 | 143 | let rebless_addr = self.stack.set_rebless_context(cx); 144 | let r = unsafe { self.stack.drive_top_task(cx) }; 145 | self.stack.set_rebless_context_addr(rebless_addr); 146 | match r { 147 | Poll::Ready(_) => {} 148 | Poll::Pending => return Poll::Pending, 149 | } 150 | } 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/stack/stk.rs: -------------------------------------------------------------------------------- 1 | use super::future::{InnerStkFuture, StkFuture, YieldFuture}; 2 | use crate::{ptr::Owned, Stack}; 3 | use std::{future::Future, marker::PhantomData}; 4 | 5 | /// A reference back to stack from inside the running future. 6 | /// 7 | /// Used for spawning new futures onto the stack from a future running on the stack. 8 | pub struct Stk( 9 | // Marker to make sure Stk doesn't implement send or sync. 10 | PhantomData<*mut Stack>, 11 | ); 12 | 13 | pub trait StackMarker: 'static { 14 | unsafe fn create() -> &'static mut Self; 15 | } 16 | 17 | impl StackMarker for Stk { 18 | unsafe fn create() -> &'static mut Self { 19 | // Safety: Stk is an unsized typed so any pointer that is not null is a valid pointer to the type. 20 | // Therefore we can create a static reference to the type from a dangling pointer. 21 | unsafe { Owned::dangling().as_mut() } 22 | } 23 | } 24 | 25 | impl Stk { 26 | /// Run a new future in the runtime. 27 | pub fn run<'a, F, Fut, R>(&'a mut self, f: F) -> StkFuture<'a, F, R> 28 | where 29 | F: FnOnce(&'a mut Stk) -> Fut, 30 | Fut: Future + 'a, 31 | { 32 | StkFuture(InnerStkFuture::new(f)) 33 | } 34 | 35 | /// A less type save version of Stk::run which doesn't require passing arround a Stk object. 36 | /// Invalid use of this function will cause a panic, deadlock or otherwise generally sound but 37 | /// strange behaviour. 38 | /// 39 | /// # Panic 40 | /// This function will panic while not within a Stack 41 | /// The future returned by this function will panic if another stack futures is created which 42 | /// is not contained within the future returned by this function while the current future is 43 | /// still running 44 | pub fn enter_run<'a, F, Fut, R>(f: F) -> StkFuture<'a, F, R> 45 | where 46 | F: FnOnce(&'a mut Stk) -> Fut, 47 | Fut: Future + 'a, 48 | { 49 | StkFuture(InnerStkFuture::new(f)) 50 | } 51 | 52 | /// Yield the execution of the recursive futures back to the reblessive runtime. 53 | /// 54 | /// When stepping through a function instead of finishing it awaiting the future returned by 55 | /// this function will cause the the current step to complete. 56 | pub fn yield_now(&mut self) -> YieldFuture { 57 | YieldFuture::new() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/stack/test.rs: -------------------------------------------------------------------------------- 1 | use std::{cell::Cell, future::Future, mem::MaybeUninit, pin::Pin, task::Poll, time::Duration}; 2 | 3 | use crate::{ 4 | test::{run_with_stack_size, thread_sleep, ManualPoll, KB, MB, PAGE_SIZE}, 5 | Stack, Stk, 6 | }; 7 | 8 | #[test] 9 | fn call_not_wrapped() { 10 | async fn a(ctx: &mut Stk, depth: usize) { 11 | b(ctx, depth).await 12 | } 13 | 14 | async fn b(ctx: &mut Stk, depth: usize) { 15 | if depth == 0 { 16 | return; 17 | } 18 | ctx.run(|ctx| a(ctx, depth - 1)).await 19 | } 20 | 21 | let mut stack = Stack::new(); 22 | 23 | let depth = if cfg!(miri) { 16 } else { 1024 }; 24 | 25 | stack.enter(|ctx| a(ctx, depth)).finish(); 26 | } 27 | 28 | #[test] 29 | fn fibbo() { 30 | async fn heavy_fibbo(ctx: &mut Stk, n: usize) -> usize { 31 | // An extra stack allocation to simulate a more complex function. 32 | let mut ballast: MaybeUninit<[u8; KB]> = std::mem::MaybeUninit::uninit(); 33 | // Make sure the ballast isn't compiled out. 34 | std::hint::black_box(&mut ballast); 35 | 36 | match n { 37 | 0 => 1, 38 | 1 => 1, 39 | x => { 40 | ctx.run(move |ctx| heavy_fibbo(ctx, x - 1)).await 41 | + ctx.run(move |ctx| heavy_fibbo(ctx, x - 2)).await 42 | } 43 | } 44 | } 45 | let mut stack = Stack::new(); 46 | 47 | let (v, depth) = if cfg!(miri) { (13, 6) } else { (10946, 20) }; 48 | 49 | // run the function to completion on the stack. 50 | let res = run_with_stack_size(8 * PAGE_SIZE, "fibbo", move || { 51 | stack.enter(|ctx| heavy_fibbo(ctx, depth)).finish() 52 | }); 53 | assert_eq!(res, v); 54 | } 55 | 56 | #[test] 57 | fn fibbo_stepping() { 58 | async fn fibbo(ctx: &mut Stk, n: usize) -> usize { 59 | match n { 60 | 0 => 1, 61 | 1 => 1, 62 | x => { 63 | ctx.run(move |ctx| fibbo(ctx, x - 1)).await 64 | + ctx.run(move |ctx| fibbo(ctx, x - 2)).await 65 | } 66 | } 67 | } 68 | let mut stack = Stack::new(); 69 | 70 | let (v, depth) = if cfg!(miri) { (13, 6) } else { (10946, 20) }; 71 | 72 | // run the function to completion on the stack. 73 | let mut runner = stack.enter(|ctx| fibbo(ctx, depth)); 74 | loop { 75 | if let Some(x) = runner.step() { 76 | assert_eq!(x, v); 77 | break; 78 | } 79 | } 80 | } 81 | 82 | #[test] 83 | fn fibbo_stepping_yield() { 84 | async fn fibbo(ctx: &mut Stk, n: usize) -> usize { 85 | match n { 86 | 0 => 1, 87 | 1 => 1, 88 | x => { 89 | let a = ctx.run(move |ctx| fibbo(ctx, x - 1)).await; 90 | ctx.yield_now().await; 91 | a + ctx.run(move |ctx| fibbo(ctx, x - 2)).await 92 | } 93 | } 94 | } 95 | let mut stack = Stack::new(); 96 | 97 | let (v, depth) = if cfg!(miri) { (13, 6) } else { (10946, 20) }; 98 | 99 | // run the function to completion on the stack. 100 | let mut runner = stack.enter(|ctx| fibbo(ctx, depth)); 101 | loop { 102 | if let Some(x) = runner.step() { 103 | assert_eq!(x, v); 104 | break; 105 | } 106 | } 107 | } 108 | 109 | #[test] 110 | fn fibbo_finish_yield() { 111 | async fn fibbo(ctx: &mut Stk, n: usize) -> usize { 112 | match n { 113 | 0 => 1, 114 | 1 => 1, 115 | x => { 116 | let a = ctx.run(move |ctx| fibbo(ctx, x - 1)).await; 117 | ctx.yield_now().await; 118 | a + ctx.run(move |ctx| fibbo(ctx, x - 2)).await 119 | } 120 | } 121 | } 122 | let mut stack = Stack::new(); 123 | 124 | let (v, depth) = if cfg!(miri) { (13, 6) } else { (10946, 20) }; 125 | 126 | // run the function to completion on the stack. 127 | let res = stack.enter(|ctx| fibbo(ctx, depth)).finish(); 128 | assert_eq!(res, v) 129 | } 130 | 131 | #[test] 132 | #[should_panic = "a non-reblessive future was run while running the reblessive runtime as non-async"] 133 | fn not_async() { 134 | let mut stack = Stack::new(); 135 | 136 | stack 137 | .enter(|_| async { thread_sleep(Duration::from_secs(1)).await }) 138 | .finish(); 139 | } 140 | 141 | #[test] 142 | fn very_deep() { 143 | async fn deep(ctx: &mut Stk, n: usize) -> usize { 144 | // An extra stack allocation to simulate a more complex function. 145 | let mut ballast: MaybeUninit<[u8; 32 * KB]> = std::mem::MaybeUninit::uninit(); 146 | // Make sure the ballast isn't compiled out. 147 | std::hint::black_box(&mut ballast); 148 | 149 | if n == 0 { 150 | return 0xCAFECAFE; 151 | } 152 | 153 | ctx.run(move |ctx| deep(ctx, n - 1)).await 154 | } 155 | let mut stack = Stack::new(); 156 | 157 | let depth = if cfg!(miri) { 16 } else { 1024 }; 158 | 159 | // run the function to completion on the stack. 160 | let res = run_with_stack_size(MB, "very_deep", move || { 161 | stack.enter(|ctx| deep(ctx, depth)).finish() 162 | }); 163 | assert_eq!(res, 0xCAFECAFE) 164 | } 165 | 166 | #[test] 167 | fn deep_sleep() { 168 | run_with_stack_size(MB, "deep_sleep", || { 169 | pollster::block_on(async { 170 | async fn deep(ctx: &mut Stk, n: usize) -> usize { 171 | // An extra stack allocation to simulate a more complex function. 172 | let mut ballast: MaybeUninit<[u8; 32 * KB]> = std::mem::MaybeUninit::uninit(); 173 | // Make sure the ballast isn't compiled out. 174 | std::hint::black_box(&mut ballast); 175 | 176 | if n == 0 { 177 | thread_sleep(Duration::from_millis(500)).await; 178 | return 0xCAFECAFE; 179 | } 180 | 181 | ctx.run(move |ctx| deep(ctx, n - 1)).await 182 | } 183 | let mut stack = Stack::new(); 184 | 185 | let depth = if cfg!(miri) { 16 } else { 1024 }; 186 | 187 | // run the function to completion on the stack. 188 | let res = stack.enter(|ctx| deep(ctx, depth)).finish_async().await; 189 | assert_eq!(res, 0xCAFECAFE) 190 | }) 191 | }) 192 | } 193 | 194 | #[test] 195 | fn mutate_in_future() { 196 | async fn mutate(ctx: &mut Stk, v: &mut Vec, depth: usize) { 197 | v.push(depth); 198 | if depth != 0 { 199 | ctx.run(|ctx| mutate(ctx, v, depth - 1)).await 200 | } 201 | } 202 | 203 | let mut stack = Stack::new(); 204 | 205 | let mut v = Vec::new(); 206 | stack.enter(|ctx| mutate(ctx, &mut v, 1000)).finish(); 207 | 208 | for (idx, i) in (0..=1000).rev().enumerate() { 209 | assert_eq!(v[idx], i) 210 | } 211 | } 212 | 213 | #[test] 214 | fn mutate_created_in_future() { 215 | async fn root(ctx: &mut Stk) { 216 | let mut v = Vec::new(); 217 | 218 | let depth = if cfg!(miri) { 16 } else { 1024 }; 219 | 220 | ctx.run(|ctx| mutate(ctx, &mut v, depth)).await; 221 | 222 | for (idx, i) in (0..=depth).rev().enumerate() { 223 | assert_eq!(v[idx], i) 224 | } 225 | } 226 | async fn mutate(ctx: &mut Stk, v: &mut Vec, depth: usize) { 227 | // An extra stack allocation to simulate a more complex function. 228 | let mut ballast: MaybeUninit<[u8; 32 * KB]> = std::mem::MaybeUninit::uninit(); 229 | // Make sure the ballast isn't compiled out. 230 | std::hint::black_box(&mut ballast); 231 | v.push(depth); 232 | if depth != 0 { 233 | ctx.run(|ctx| mutate(ctx, v, depth - 1)).await 234 | } 235 | } 236 | 237 | run_with_stack_size(MB, "mutate_created_in_future", || { 238 | let mut stack = Stack::new(); 239 | stack.enter(root).finish(); 240 | }); 241 | } 242 | 243 | #[test] 244 | fn borrow_lifetime_struct() { 245 | struct Ref<'a> { 246 | r: &'a usize, 247 | } 248 | 249 | async fn root(ctx: &mut Stk) { 250 | let depth = 100; 251 | let r = Ref { r: &depth }; 252 | ctx.run(|ctx| go_deep(ctx, r)).await; 253 | } 254 | 255 | async fn go_deep(ctx: &mut Stk, r: Ref<'_>) { 256 | let depth = (*r.r) - 1; 257 | if depth == 0 { 258 | return; 259 | } 260 | let r = Ref { r: &depth }; 261 | ctx.run(|ctx| go_deep(ctx, r)).await 262 | } 263 | 264 | let mut stack = Stack::new(); 265 | 266 | stack.enter(root).finish(); 267 | } 268 | 269 | #[test] 270 | fn test_bigger_alignment() { 271 | #[repr(align(32))] 272 | struct U256(u128, u128); 273 | 274 | struct Rand(u32); 275 | 276 | impl Rand { 277 | fn new() -> Self { 278 | Rand(0x194b93c) 279 | } 280 | 281 | fn next(&mut self) -> u32 { 282 | let mut x = self.0; 283 | x ^= x << 13; 284 | x ^= x >> 17; 285 | x ^= x << 5; 286 | self.0 = x; 287 | x 288 | } 289 | } 290 | 291 | async fn count_u16(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 292 | let mut v = rand.next() as u16; 293 | if depth == 0 { 294 | return v as usize; 295 | } 296 | // make sure that v is placed onto the stack. 297 | std::hint::black_box(&mut v); 298 | let c = match rand.next() % 3 { 299 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u16, 300 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u16, 301 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u16, 302 | _ => unreachable!(), 303 | }; 304 | v.wrapping_add(c) as usize 305 | } 306 | 307 | async fn count_u128(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 308 | let mut v = rand.next() as u128; 309 | if depth == 0 { 310 | return v as usize; 311 | } 312 | // make sure that v is placed onto the stack. 313 | std::hint::black_box(&mut v); 314 | let c = match rand.next() % 3 { 315 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u128, 316 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u128, 317 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u128, 318 | _ => unreachable!(), 319 | }; 320 | v.wrapping_add(c) as usize 321 | } 322 | 323 | async fn count_u256(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 324 | let mut v = U256(rand.next() as u128, 120203); 325 | if depth == 0 { 326 | return v.0 as usize; 327 | } 328 | // make sure that v is placed onto the stack. 329 | std::hint::black_box(&mut v); 330 | std::hint::black_box(v.1); 331 | let c = match rand.next() % 3 { 332 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u128, 333 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u128, 334 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u128, 335 | _ => unreachable!(), 336 | }; 337 | v.0.wrapping_add(c) as usize 338 | } 339 | 340 | let mut rand = Rand::new(); 341 | let mut stack = Stack::new(); 342 | let depth = if cfg!(miri) { 16 } else { 1024 }; 343 | stack 344 | .enter(|stk| count_u128(stk, &mut rand, depth)) 345 | .finish(); 346 | } 347 | 348 | // miri doesn't support epoll properly 349 | #[tokio::test] 350 | #[cfg_attr(miri, ignore)] 351 | async fn read_cargo() { 352 | async fn deep_read(ctx: &mut Stk, n: usize) -> String { 353 | // smaller ballast since tokio only allocates 2MB for its threads. 354 | let mut ballast: MaybeUninit<[u8; 1024]> = std::mem::MaybeUninit::uninit(); 355 | std::hint::black_box(&mut ballast); 356 | 357 | if n == 0 { 358 | tokio::fs::read_to_string("./Cargo.toml").await.unwrap() 359 | } else { 360 | ctx.run(|ctx| deep_read(ctx, n - 1)).await 361 | } 362 | } 363 | 364 | let mut stack = Stack::new(); 365 | 366 | let str = stack.enter(|ctx| deep_read(ctx, 200)).finish_async().await; 367 | 368 | assert_eq!(str, include_str!("../../Cargo.toml")); 369 | } 370 | 371 | // miri doesn't support epoll properly 372 | #[tokio::test] 373 | #[cfg_attr(miri, ignore)] 374 | async fn read_cargo_spawn() { 375 | async fn deep_read(ctx: &mut Stk, n: usize) -> String { 376 | // smaller ballast since tokio only allocates 2MB for its threads. 377 | let mut ballast: MaybeUninit<[u8; 1024]> = std::mem::MaybeUninit::uninit(); 378 | std::hint::black_box(&mut ballast); 379 | 380 | if n == 0 { 381 | tokio::fs::read_to_string("./Cargo.toml").await.unwrap() 382 | } else { 383 | ctx.run(|ctx| deep_read(ctx, n - 1)).await 384 | } 385 | } 386 | 387 | tokio::spawn(async { 388 | let mut stack = Stack::new(); 389 | 390 | let str = stack.enter(|ctx| deep_read(ctx, 200)).finish_async().await; 391 | 392 | assert_eq!(str, include_str!("../../Cargo.toml")); 393 | }) 394 | .await 395 | .unwrap(); 396 | } 397 | 398 | #[tokio::test] 399 | #[cfg_attr(miri, ignore)] 400 | async fn read_cargo_spawn_step() { 401 | async fn deep_read(ctx: &mut Stk, n: usize) -> String { 402 | // smaller ballast since tokio only allocates 2MB for its threads. 403 | let mut ballast: MaybeUninit<[u8; 1024]> = std::mem::MaybeUninit::uninit(); 404 | std::hint::black_box(&mut ballast); 405 | 406 | ctx.yield_now().await; 407 | 408 | if n == 0 { 409 | tokio::fs::read_to_string("./Cargo.toml").await.unwrap() 410 | } else { 411 | ctx.run(|ctx| deep_read(ctx, n - 1)).await 412 | } 413 | } 414 | 415 | tokio::spawn(async { 416 | let mut stack = Stack::new(); 417 | let mut runner = stack.enter(|ctx| deep_read(ctx, 200)); 418 | 419 | loop { 420 | if let Some(x) = runner.step_async().await { 421 | assert_eq!(x, include_str!("../../Cargo.toml")); 422 | break; 423 | } 424 | } 425 | }) 426 | .await 427 | .unwrap(); 428 | } 429 | 430 | #[test] 431 | fn poll_once_then_drop() { 432 | let mut stack = Stack::new(); 433 | 434 | async fn other(stk: &mut Stk) { 435 | stk.yield_now().await; 436 | stk.yield_now().await; 437 | stk.yield_now().await; 438 | } 439 | 440 | async fn inner(stk: &mut Stk) { 441 | let mut done = false; 442 | ManualPoll::wrap(stk.run(other), |f, cx| { 443 | if !done { 444 | done = true; 445 | let r = f.poll(cx); 446 | assert_eq!(r, Poll::Pending); 447 | r 448 | } else { 449 | Poll::Ready(()) 450 | } 451 | }) 452 | .await 453 | } 454 | 455 | stack.enter(inner).finish() 456 | } 457 | 458 | #[test] 459 | fn poll_after_done() { 460 | let mut stack = Stack::new(); 461 | 462 | async fn other(stk: &mut Stk) { 463 | stk.yield_now().await; 464 | stk.yield_now().await; 465 | stk.yield_now().await; 466 | } 467 | 468 | async fn inner(stk: &mut Stk) { 469 | ManualPoll::wrap(stk.run(other), |mut f, cx| match f.as_mut().poll(cx) { 470 | Poll::Pending => Poll::Pending, 471 | Poll::Ready(x) => { 472 | let _ = f.as_mut().poll(cx); 473 | let _ = f.as_mut().poll(cx); 474 | let _ = f.as_mut().poll(cx); 475 | let _ = f.as_mut().poll(cx); 476 | Poll::Ready(x) 477 | } 478 | }) 479 | .await 480 | } 481 | 482 | stack.enter(inner).finish() 483 | } 484 | 485 | #[test] 486 | fn drop_future() { 487 | thread_local! { 488 | static COUNTER: Cell = const{ Cell::new(0)}; 489 | } 490 | 491 | let mut stack = Stack::new(); 492 | 493 | async fn other(_stk: &mut Stk) { 494 | COUNTER.with(|x| x.set(x.get() + 1)) 495 | } 496 | 497 | async fn inner(stk: &mut Stk) { 498 | stk.run(other).await; 499 | std::mem::drop(stk.run(other)); 500 | stk.run(other).await; 501 | } 502 | 503 | stack.enter(inner).finish(); 504 | assert_eq!(COUNTER.get(), 2) 505 | } 506 | 507 | #[test] 508 | fn direct_enter() { 509 | let mut stack = Stack::new(); 510 | pollster::block_on(async { 511 | stack 512 | .enter(|stk| stk.run(|stk| stk.run(|_| thread_sleep(Duration::from_millis(50))))) 513 | .finish_async() 514 | .await; 515 | }) 516 | } 517 | 518 | #[test] 519 | #[cfg_attr(miri, ignore)] 520 | #[should_panic] 521 | fn forget_runner_and_use_again() { 522 | thread_local! { 523 | static COUNTER: Cell = const{ Cell::new(0)}; 524 | } 525 | 526 | let mut stack = Stack::new(); 527 | 528 | async fn other(_stk: &mut Stk) { 529 | COUNTER.with(|x| x.set(x.get() + 1)) 530 | } 531 | 532 | async fn inner(stk: &mut Stk) { 533 | stk.run(other).await; 534 | std::mem::drop(stk.run(other)); 535 | stk.run(other).await; 536 | } 537 | 538 | let runner = stack.enter(inner); 539 | std::mem::forget(runner); 540 | stack.enter(inner).finish(); 541 | assert_eq!(COUNTER.get(), 2) 542 | } 543 | 544 | #[test] 545 | #[should_panic(expected = "Invalid stack state, futures are not being evaluated in stack order")] 546 | fn enter_run_after_enter() { 547 | let mut stack = Stack::new(); 548 | 549 | async fn inner(stk: &mut Stk) { 550 | let a = stk.run(|stk| stk.yield_now()); 551 | let b = Stk::enter_run(|stk| stk.yield_now()); 552 | 553 | ManualPoll::wrap((a, b), |f, ctx| { 554 | let (a, b) = unsafe { f.get_unchecked_mut() }; 555 | 556 | let _ = unsafe { Pin::new_unchecked(a) }.poll(ctx); 557 | let _ = unsafe { Pin::new_unchecked(b) }.poll(ctx); 558 | panic!("didn't properly panic"); 559 | }) 560 | .await 561 | } 562 | stack.enter(inner).finish(); 563 | } 564 | 565 | #[test] 566 | #[should_panic(expected = "Invalid stack state, futures are not being evaluated in stack order")] 567 | fn enter_after_enter_run() { 568 | let mut stack = Stack::new(); 569 | 570 | async fn inner(stk: &mut Stk) { 571 | let a = stk.run(|stk| stk.yield_now()); 572 | let b = Stk::enter_run(|stk| stk.yield_now()); 573 | 574 | ManualPoll::wrap((a, b), |f, ctx| { 575 | let (a, b) = unsafe { f.get_unchecked_mut() }; 576 | 577 | let _ = unsafe { Pin::new_unchecked(b) }.poll(ctx); 578 | let _ = unsafe { Pin::new_unchecked(a) }.poll(ctx); 579 | panic!("didn't properly panic"); 580 | }) 581 | .await 582 | } 583 | stack.enter(inner).finish(); 584 | } 585 | 586 | #[test] 587 | fn cancel_mid_run() { 588 | async fn fibbo(stk: &mut Stk, f: usize) -> usize { 589 | match f { 590 | 0 | 1 => 1, 591 | x => stk.run(|stk| fibbo(stk, x - 1)).await + stk.run(|stk| fibbo(stk, x - 2)).await, 592 | } 593 | } 594 | 595 | let mut stack = Stack::new(); 596 | { 597 | let mut runner = stack.enter(|stk| fibbo(stk, 40)); 598 | for _ in 0..1000 { 599 | assert!(runner.step().is_none()); 600 | } 601 | } 602 | } 603 | 604 | #[test] 605 | fn drop_mid_run() { 606 | async fn fibbo(stk: &mut Stk, f: usize) -> usize { 607 | match f { 608 | 0 | 1 => 1, 609 | x => stk.run(|stk| fibbo(stk, x - 1)).await + stk.run(|stk| fibbo(stk, x - 2)).await, 610 | } 611 | } 612 | 613 | let mut stack = Stack::new(); 614 | let mut runner = stack.enter(|stk| fibbo(stk, 40)); 615 | for _ in 0..1000 { 616 | assert!(runner.step().is_none()); 617 | } 618 | } 619 | 620 | #[test] 621 | fn test_bigger_alignment_with_small_capacity() { 622 | #[repr(align(32))] 623 | struct U256(u128, u128); 624 | 625 | struct Rand(u32); 626 | 627 | impl Rand { 628 | fn new() -> Self { 629 | Rand(0x194b93c) 630 | } 631 | 632 | fn next(&mut self) -> u32 { 633 | let mut x = self.0; 634 | x ^= x << 13; 635 | x ^= x >> 17; 636 | x ^= x << 5; 637 | self.0 = x; 638 | x 639 | } 640 | } 641 | 642 | async fn count_u16(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 643 | let mut v = rand.next() as u16; 644 | if depth == 0 { 645 | return v as usize; 646 | } 647 | // make sure that v is placed onto the stack. 648 | std::hint::black_box(&mut v); 649 | let c = match rand.next() % 3 { 650 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u16, 651 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u16, 652 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u16, 653 | _ => unreachable!(), 654 | }; 655 | v.wrapping_add(c) as usize 656 | } 657 | 658 | async fn count_u128(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 659 | let mut v = rand.next() as u128; 660 | if depth == 0 { 661 | return v as usize; 662 | } 663 | // make sure that v is placed onto the stack. 664 | std::hint::black_box(&mut v); 665 | let c = match rand.next() % 3 { 666 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u128, 667 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u128, 668 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u128, 669 | _ => unreachable!(), 670 | }; 671 | v.wrapping_add(c) as usize 672 | } 673 | 674 | async fn count_u256(stk: &mut Stk, rand: &mut Rand, depth: usize) -> usize { 675 | let mut v = U256(rand.next() as u128, 120203); 676 | if depth == 0 { 677 | return v.0 as usize; 678 | } 679 | // make sure that v is placed onto the stack. 680 | std::hint::black_box(&mut v); 681 | std::hint::black_box(v.1); 682 | let c = match rand.next() % 3 { 683 | 0 => stk.run(|stk| count_u16(stk, rand, depth - 1)).await as u128, 684 | 1 => stk.run(|stk| count_u128(stk, rand, depth - 1)).await as u128, 685 | 2 => stk.run(|stk| count_u256(stk, rand, depth - 1)).await as u128, 686 | _ => unreachable!(), 687 | }; 688 | v.0.wrapping_add(c) as usize 689 | } 690 | 691 | let mut rand = Rand::new(); 692 | let mut stack = Stack::new(); 693 | let depth = if cfg!(miri) { 16 } else { 1024 }; 694 | stack 695 | .enter(|stk| count_u128(stk, &mut rand, depth)) 696 | .finish(); 697 | } 698 | -------------------------------------------------------------------------------- /src/stub_waker.rs: -------------------------------------------------------------------------------- 1 | //! A context for when stack doesn't actually ever wait on any io. 2 | 3 | use std::{ 4 | ptr, 5 | task::{RawWaker, RawWakerVTable, Waker}, 6 | }; 7 | 8 | unsafe fn stub_clone(data: *const ()) -> RawWaker { 9 | //panic!("Called an non-reblessive async function withing a non-async reblessive context"); 10 | RawWaker::new(data, &STUB_WAKER_V_TABLE) 11 | } 12 | 13 | unsafe fn stub_wake(_: *const ()) { 14 | //panic!("Called an non-reblessive async function withing a non-async reblessive context"); 15 | } 16 | 17 | unsafe fn stub_drop(_: *const ()) {} 18 | 19 | static STUB_WAKER_V_TABLE: RawWakerVTable = 20 | RawWakerVTable::new(stub_clone, stub_wake, stub_wake, stub_drop); 21 | 22 | pub fn get() -> Waker { 23 | unsafe { Waker::from_raw(RawWaker::new(ptr::null_mut(), &STUB_WAKER_V_TABLE)) } 24 | } 25 | -------------------------------------------------------------------------------- /src/test.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | pub const KB: usize = 1024; 9 | pub const MB: usize = 1024 * KB; 10 | pub const PAGE_SIZE: usize = 4 * KB; 11 | 12 | /// A sleep that doesn't rely on epoll and is thus usable in miri. 13 | pub(crate) async fn thread_sleep(duration: Duration) { 14 | let deadline = Instant::now() + duration; 15 | let (send, mut recv) = tokio::sync::mpsc::channel::<()>(1); 16 | std::thread::spawn(move || { 17 | let sleep_time = deadline.duration_since(Instant::now()); 18 | std::thread::sleep(sleep_time); 19 | let _ = send.blocking_send(()); 20 | }); 21 | recv.recv().await; 22 | } 23 | 24 | /// Struct for lowering from a async block into a manual polling a future. 25 | pub(crate) struct ManualPoll { 26 | future: F, 27 | poll: Fn, 28 | } 29 | 30 | impl ManualPoll 31 | where 32 | Fn: FnMut(Pin<&mut F>, &mut Context) -> Poll<()>, 33 | { 34 | pub fn wrap(future: F, poll: Fn) -> Self { 35 | ManualPoll { future, poll } 36 | } 37 | } 38 | 39 | impl Future for ManualPoll 40 | where 41 | Fn: FnMut(Pin<&mut F>, &mut Context) -> Poll<()>, 42 | { 43 | type Output = (); 44 | 45 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 46 | let this = unsafe { self.get_unchecked_mut() }; 47 | let future = unsafe { Pin::new_unchecked(&mut this.future) }; 48 | (this.poll)(future, cx).map(|_| ()) 49 | } 50 | } 51 | 52 | pub(crate) fn run_with_stack_size(size: usize, name: &'static str, f: F) -> R 53 | where 54 | F: FnOnce() -> R + Send + 'static, 55 | R: Send + 'static, 56 | { 57 | #[cfg(not(miri))] 58 | { 59 | std::thread::Builder::new() 60 | .name(name.to_string()) 61 | .stack_size(size) 62 | .spawn(f) 63 | .unwrap() 64 | .join() 65 | .unwrap() 66 | } 67 | #[cfg(miri)] 68 | { 69 | let (_, _) = (size, name); 70 | f() 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/tree/future.rs: -------------------------------------------------------------------------------- 1 | use super::{schedular::CancelToken, stk::ScopeStk, Stk}; 2 | use crate::{ 3 | ptr::{map_ptr, Owned}, 4 | stack::{future::InnerStkFuture, StackState}, 5 | Stack, TreeStack, 6 | }; 7 | use std::{ 8 | cell::Cell, 9 | future::Future, 10 | marker::{PhantomData, PhantomPinned}, 11 | pin::Pin, 12 | task::{Context, Poll}, 13 | }; 14 | 15 | /// Future returned by [`Stk::run`] 16 | /// 17 | /// Should be finished completely after the first polling before any other futures returned by [`Stk`] are polled. 18 | /// Failing to do so will cause a panic. 19 | #[must_use = "futures do nothing unless you `.await` or poll them"] 20 | pub struct StkFuture<'a, F, R>(pub(crate) InnerStkFuture<'a, F, R, Stk>); 21 | 22 | impl<'a, F, Fut, R> Future for StkFuture<'a, F, R> 23 | where 24 | F: FnOnce(&'a mut Stk) -> Fut, 25 | Fut: Future + 'a, 26 | { 27 | type Output = R; 28 | 29 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 30 | // Safety: Pinning is structural for inner 31 | unsafe { self.map_unchecked_mut(|x| &mut x.0) }.poll(cx) 32 | } 33 | } 34 | 35 | pub enum ScopeFutureState { 36 | Initial(F), 37 | Running(Cell>, Cell>), 38 | Finished, 39 | } 40 | 41 | /// Future returned by [`Stk::scope`] 42 | /// 43 | /// Should be finished completely after the first polling before any other futures returned by [`Stk`] are polled. 44 | /// Failing to do so will cause a panic. 45 | #[must_use = "futures do nothing unless you `.await` or poll them"] 46 | pub struct ScopeFuture<'a, F, R> { 47 | state: ScopeFutureState, 48 | _marker: PhantomData<&'a Stk>, 49 | _pin_marker: PhantomPinned, 50 | } 51 | 52 | impl ScopeFuture<'_, F, R> { 53 | pub(crate) fn new(f: F) -> Self { 54 | ScopeFuture { 55 | state: ScopeFutureState::Initial(f), 56 | _marker: PhantomData, 57 | _pin_marker: PhantomPinned, 58 | } 59 | } 60 | } 61 | 62 | impl<'a, F, Fut, R> Future for ScopeFuture<'a, F, R> 63 | where 64 | F: FnOnce(&'a ScopeStk) -> Fut, 65 | Fut: Future + 'a, 66 | R: 'a, 67 | { 68 | type Output = R; 69 | 70 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 71 | unsafe { 72 | let this = Owned::from(self.get_unchecked_mut()); 73 | match this.as_ref().state { 74 | ScopeFutureState::Initial(_) => { 75 | let ScopeFutureState::Initial(x) = this 76 | .map_ptr(map_ptr!(Self, state)) 77 | .replace(ScopeFutureState::Running(Cell::new(None), Cell::new(None))) 78 | else { 79 | unreachable!() 80 | }; 81 | let ScopeFutureState::Running(ref place, ref cancel) = 82 | this.map_ptr(map_ptr!(Self, state)).as_ref() 83 | else { 84 | unreachable!(); 85 | }; 86 | 87 | let place = Owned::from(place); 88 | let fut = x(ScopeStk::new()); 89 | 90 | let cancel_token = Stack::with_context(|x| { 91 | x.state.set(StackState::NewTask); 92 | if x.is_rebless_context(cx) { 93 | TreeStack::with_context(|sched| { 94 | sched.push_cancellable(async move { 95 | place.as_ref().set(Some(fut.await)); 96 | }) 97 | }) 98 | } else { 99 | TreeStack::with_context(|sched| { 100 | let waker = cx.waker().clone(); 101 | let r = sched.push_cancellable(async move { 102 | place.as_ref().set(Some(fut.await)); 103 | waker.wake() 104 | }); 105 | r 106 | }) 107 | } 108 | }); 109 | cancel.set(Some(cancel_token)); 110 | 111 | Poll::Pending 112 | } 113 | ScopeFutureState::Running(ref place, _) => { 114 | if let Some(x) = place.take() { 115 | let _ = this 116 | .map_ptr(map_ptr!(Self, state)) 117 | .replace(ScopeFutureState::Finished); 118 | return Poll::Ready(x); 119 | } 120 | Poll::Pending 121 | } 122 | ScopeFutureState::Finished => Poll::Pending, 123 | } 124 | } 125 | } 126 | } 127 | 128 | impl Drop for ScopeFuture<'_, F, R> { 129 | fn drop(&mut self) { 130 | if let ScopeFutureState::Running(_, ref mut c) = self.state { 131 | // drop the cancellation so that the future won't run anymore. 132 | // Manually dropped first so that the future is always dropped before the place is. 133 | std::mem::drop(c.take()); 134 | } 135 | } 136 | } 137 | 138 | #[must_use = "futures do nothing unless you `.await` or poll them"] 139 | pub struct ScopeStkFuture<'a, R> { 140 | stack: Stack, 141 | _marker_borrow: PhantomData<&'a ScopeStk>, 142 | _marker_res: PhantomData, 143 | } 144 | 145 | impl ScopeStkFuture<'_, R> { 146 | pub(crate) fn new(f: F) -> Self 147 | where 148 | F: Future, 149 | { 150 | let stack = Stack::new(); 151 | unsafe { stack.enter_future(f) }; 152 | ScopeStkFuture { 153 | stack, 154 | _marker_borrow: PhantomData, 155 | _marker_res: PhantomData, 156 | } 157 | } 158 | } 159 | 160 | impl Future for ScopeStkFuture<'_, R> { 161 | type Output = R; 162 | 163 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 164 | let this = unsafe { self.get_unchecked_mut() }; 165 | Stack::with_context(|parent| { 166 | // Make sure we immediatlu yield if we need to yield back to the parent stack. 167 | if parent.state.get() == StackState::Yield { 168 | cx.waker().wake_by_ref(); 169 | return Poll::Pending; 170 | } 171 | 172 | Stack::enter_context(&this.stack, || loop { 173 | if let Some(x) = unsafe { this.stack.try_get_result() } { 174 | return Poll::Ready(x); 175 | } 176 | 177 | match unsafe { this.stack.drive_top_task(cx) } { 178 | Poll::Pending => return Poll::Pending, 179 | Poll::Ready(yielding) => { 180 | if yielding { 181 | parent.state.set(StackState::Yield); 182 | cx.waker().wake_by_ref(); 183 | return Poll::Pending; 184 | } 185 | } 186 | } 187 | }) 188 | }) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/tree/mod.rs: -------------------------------------------------------------------------------- 1 | //! A runtime which allows parallel running of branching futures. 2 | 3 | use crate::{defer::Defer, ptr::Owned, stack::StackMarker, Stack}; 4 | use std::{ 5 | cell::Cell, 6 | future::Future, 7 | task::{Context, Poll}, 8 | }; 9 | 10 | mod future; 11 | mod runner; 12 | mod schedular; 13 | mod stk; 14 | 15 | #[cfg(test)] 16 | mod test; 17 | 18 | pub use future::{ScopeFuture, StkFuture}; 19 | use runner::Runner; 20 | pub use runner::{FinishFuture, StepFuture}; 21 | use schedular::Schedular; 22 | pub use stk::Stk; 23 | 24 | thread_local! { 25 | static TREE_PTR: Cell>> = const { Cell::new(None) }; 26 | } 27 | 28 | /// A runtime similar to [`Stack`] but allows running some futures in parallel 29 | pub struct TreeStack { 30 | root: Stack, 31 | schedular: Schedular, 32 | } 33 | 34 | unsafe impl Send for TreeStack {} 35 | unsafe impl Sync for TreeStack {} 36 | 37 | impl TreeStack { 38 | pub fn new() -> Self { 39 | TreeStack { 40 | root: Stack::new(), 41 | schedular: Schedular::new(), 42 | } 43 | } 44 | 45 | pub fn enter<'a, F, Fut, R>(&'a mut self, f: F) -> Runner<'a, R> 46 | where 47 | F: FnOnce(&'a mut Stk) -> Fut, 48 | Fut: Future + 'a, 49 | { 50 | let fut = unsafe { f(Stk::create()) }; 51 | 52 | unsafe { self.root.enter_future(fut) }; 53 | 54 | Runner::new(self) 55 | } 56 | 57 | pub(crate) fn enter_context(&self, f: F) -> R 58 | where 59 | F: FnOnce() -> R, 60 | { 61 | let old = TREE_PTR.replace(Some(Owned::from(&self.schedular))); 62 | let _defer = Defer::new(old, |old| TREE_PTR.set(*old)); 63 | f() 64 | } 65 | 66 | pub(crate) fn with_context(f: F) -> R 67 | where 68 | F: FnOnce(&Schedular) -> R, 69 | { 70 | unsafe { 71 | f(TREE_PTR 72 | .get() 73 | .expect("Used TreeStack functions outside of TreeStack context") 74 | .as_ref()) 75 | } 76 | } 77 | 78 | pub(crate) unsafe fn drive_top_task(&self, context: &mut Context) -> Poll { 79 | self.enter_context(|| { 80 | if !self.schedular.is_empty() { 81 | let pending = self 82 | .root 83 | .enter_context(|| self.schedular.poll(context).is_pending()); 84 | if pending { 85 | return Poll::Pending; 86 | } 87 | } 88 | self.root.drive_top_task(context) 89 | }) 90 | } 91 | } 92 | 93 | impl Default for TreeStack { 94 | fn default() -> Self { 95 | Self::new() 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/tree/runner.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | marker::PhantomData, 4 | pin::Pin, 5 | task::{Context, Poll}, 6 | }; 7 | 8 | use crate::TreeStack; 9 | 10 | #[must_use = "futures do nothing unless you `.await` or poll them"] 11 | pub struct FinishFuture<'a, R> { 12 | stack: &'a TreeStack, 13 | _marker: PhantomData, 14 | } 15 | 16 | impl Future for FinishFuture<'_, R> { 17 | type Output = R; 18 | 19 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 20 | let addr = self.stack.root.set_rebless_context(cx); 21 | loop { 22 | if let Some(x) = unsafe { self.stack.root.try_get_result() } { 23 | self.stack.root.set_rebless_context_addr(addr); 24 | return Poll::Ready(x); 25 | } 26 | 27 | let Poll::Ready(_) = (unsafe { self.stack.drive_top_task(cx) }) else { 28 | self.stack.root.set_rebless_context_addr(addr); 29 | return Poll::Pending; 30 | }; 31 | } 32 | } 33 | } 34 | 35 | #[must_use = "futures do nothing unless you `.await` or poll them"] 36 | pub struct StepFuture<'a, R> { 37 | stack: &'a TreeStack, 38 | _marker: PhantomData, 39 | } 40 | 41 | impl Future for StepFuture<'_, R> { 42 | type Output = Option; 43 | 44 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 45 | let addr = self.stack.root.set_rebless_context(cx); 46 | if let Some(x) = unsafe { self.stack.root.try_get_result() } { 47 | self.stack.root.set_rebless_context_addr(addr); 48 | return Poll::Ready(Some(x)); 49 | } 50 | 51 | let Poll::Ready(_) = (unsafe { self.stack.drive_top_task(cx) }) else { 52 | self.stack.root.set_rebless_context_addr(addr); 53 | return Poll::Pending; 54 | }; 55 | Poll::Ready(None) 56 | } 57 | } 58 | 59 | pub struct Runner<'a, R> { 60 | stack: &'a TreeStack, 61 | _marker: PhantomData, 62 | } 63 | 64 | unsafe impl Send for Runner<'_, R> {} 65 | unsafe impl Sync for Runner<'_, R> {} 66 | 67 | impl<'a, R> Runner<'a, R> { 68 | pub(crate) fn new(runner: &'a TreeStack) -> Self { 69 | Runner { 70 | stack: runner, 71 | _marker: PhantomData, 72 | } 73 | } 74 | 75 | pub fn finish(self) -> FinishFuture<'a, R> { 76 | let res = FinishFuture { 77 | stack: self.stack, 78 | _marker: PhantomData, 79 | }; 80 | std::mem::forget(self); 81 | res 82 | } 83 | 84 | pub fn step(&mut self) -> StepFuture { 85 | StepFuture { 86 | stack: self.stack, 87 | _marker: PhantomData, 88 | } 89 | } 90 | } 91 | 92 | impl Drop for Runner<'_, R> { 93 | fn drop(&mut self) { 94 | self.stack.schedular.clear(); 95 | unsafe { 96 | self.stack.root.clear::(); 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/tree/schedular/atomic_waker.rs: -------------------------------------------------------------------------------- 1 | //! The atomic waker from the futures crate pulled into its own file. 2 | //! This is done to avoid having to pull in the entire futures crate just for a single struct. 3 | //! 4 | //! All copyright of this file belongs to the futures authors. 5 | 6 | use core::cell::UnsafeCell; 7 | use core::fmt; 8 | use core::task::Waker; 9 | 10 | use atomic::AtomicUsize; 11 | use atomic::Ordering::{AcqRel, Acquire, Release}; 12 | 13 | //#[cfg(not(feature = "portable-atomic"))] 14 | use core::sync::atomic; 15 | 16 | /// A synchronization primitive for task wakeup. 17 | /// 18 | /// Sometimes the task interested in a given event will change over time. 19 | /// An `AtomicWaker` can coordinate concurrent notifications with the consumer 20 | /// potentially "updating" the underlying task to wake up. This is useful in 21 | /// scenarios where a computation completes in another thread and wants to 22 | /// notify the consumer, but the consumer is in the process of being migrated to 23 | /// a new logical task. 24 | /// 25 | /// Consumers should call `register` before checking the result of a computation 26 | /// and producers should call `wake` after producing the computation (this 27 | /// differs from the usual `thread::park` pattern). It is also permitted for 28 | /// `wake` to be called **before** `register`. This results in a no-op. 29 | /// 30 | /// A single `AtomicWaker` may be reused for any number of calls to `register` or 31 | /// `wake`. 32 | /// 33 | /// # Memory ordering 34 | /// 35 | /// Calling `register` "acquires" all memory "released" by calls to `wake` 36 | /// before the call to `register`. Later calls to `wake` will wake the 37 | /// registered waker (on contention this wake might be triggered in `register`). 38 | /// 39 | /// For concurrent calls to `register` (should be avoided) the ordering is only 40 | /// guaranteed for the winning call. 41 | pub struct AtomicWaker { 42 | state: AtomicUsize, 43 | waker: UnsafeCell>, 44 | } 45 | 46 | // `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell 47 | // stores a `Waker` value produced by calls to `register` and many threads can 48 | // race to take the waker (to wake it) by calling `wake`. 49 | // 50 | // If a new `Waker` instance is produced by calling `register` before an 51 | // existing one is consumed, then the existing one is overwritten. 52 | // 53 | // While `AtomicWaker` is single-producer, the implementation ensures memory 54 | // safety. In the event of concurrent calls to `register`, there will be a 55 | // single winner whose waker will get stored in the cell. The losers will not 56 | // have their tasks woken. As such, callers should ensure to add synchronization 57 | // to calls to `register`. 58 | // 59 | // The implementation uses a single `AtomicUsize` value to coordinate access to 60 | // the `Waker` cell. There are two bits that are operated on independently. 61 | // These are represented by `REGISTERING` and `WAKING`. 62 | // 63 | // The `REGISTERING` bit is set when a producer enters the critical section. The 64 | // `WAKING` bit is set when a consumer enters the critical section. Neither bit 65 | // being set is represented by `WAITING`. 66 | // 67 | // A thread obtains an exclusive lock on the waker cell by transitioning the 68 | // state from `WAITING` to `REGISTERING` or `WAKING`, depending on the operation 69 | // the thread wishes to perform. When this transition is made, it is guaranteed 70 | // that no other thread will access the waker cell. 71 | // 72 | // # Registering 73 | // 74 | // On a call to `register`, an attempt to transition the state from WAITING to 75 | // REGISTERING is made. On success, the caller obtains a lock on the waker cell. 76 | // 77 | // If the lock is obtained, then the thread sets the waker cell to the waker 78 | // provided as an argument. Then it attempts to transition the state back from 79 | // `REGISTERING` -> `WAITING`. 80 | // 81 | // If this transition is successful, then the registering process is complete 82 | // and the next call to `wake` will observe the waker. 83 | // 84 | // If the transition fails, then there was a concurrent call to `wake` that was 85 | // unable to access the waker cell (due to the registering thread holding the 86 | // lock). To handle this, the registering thread removes the waker it just set 87 | // from the cell and calls `wake` on it. This call to wake represents the 88 | // attempt to wake by the other thread (that set the `WAKING` bit). The state is 89 | // then transitioned from `REGISTERING | WAKING` back to `WAITING`. This 90 | // transition must succeed because, at this point, the state cannot be 91 | // transitioned by another thread. 92 | // 93 | // # Waking 94 | // 95 | // On a call to `wake`, an attempt to transition the state from `WAITING` to 96 | // `WAKING` is made. On success, the caller obtains a lock on the waker cell. 97 | // 98 | // If the lock is obtained, then the thread takes ownership of the current value 99 | // in the waker cell, and calls `wake` on it. The state is then transitioned 100 | // back to `WAITING`. This transition must succeed as, at this point, the state 101 | // cannot be transitioned by another thread. 102 | // 103 | // If the thread is unable to obtain the lock, the `WAKING` bit is still. This 104 | // is because it has either been set by the current thread but the previous 105 | // value included the `REGISTERING` bit **or** a concurrent thread is in the 106 | // `WAKING` critical section. Either way, no action must be taken. 107 | // 108 | // If the current thread is the only concurrent call to `wake` and another 109 | // thread is in the `register` critical section, when the other thread **exits** 110 | // the `register` critical section, it will observe the `WAKING` bit and handle 111 | // the wake itself. 112 | // 113 | // If another thread is in the `wake` critical section, then it will handle 114 | // waking the task. 115 | // 116 | // # A potential race (is safely handled). 117 | // 118 | // Imagine the following situation: 119 | // 120 | // * Thread A obtains the `wake` lock and wakes a task. 121 | // 122 | // * Before thread A releases the `wake` lock, the woken task is scheduled. 123 | // 124 | // * Thread B attempts to wake the task. In theory this should result in the 125 | // task being woken, but it cannot because thread A still holds the wake lock. 126 | // 127 | // This case is handled by requiring users of `AtomicWaker` to call `register` 128 | // **before** attempting to observe the application state change that resulted 129 | // in the task being awoken. The wakers also change the application state before 130 | // calling wake. 131 | // 132 | // Because of this, the waker will do one of two things. 133 | // 134 | // 1) Observe the application state change that Thread B is woken for. In this 135 | // case, it is OK for Thread B's wake to be lost. 136 | // 137 | // 2) Call register before attempting to observe the application state. Since 138 | // Thread A still holds the `wake` lock, the call to `register` will result 139 | // in the task waking itself and get scheduled again. 140 | 141 | /// Idle state 142 | const WAITING: usize = 0; 143 | 144 | /// A new waker value is being registered with the `AtomicWaker` cell. 145 | const REGISTERING: usize = 0b01; 146 | 147 | /// The waker currently registered with the `AtomicWaker` cell is being woken. 148 | const WAKING: usize = 0b10; 149 | 150 | impl AtomicWaker { 151 | /// Create an `AtomicWaker`. 152 | pub const fn new() -> Self { 153 | // Make sure that task is Sync 154 | #[allow(dead_code)] 155 | trait AssertSync: Sync {} 156 | impl AssertSync for Waker {} 157 | 158 | Self { 159 | state: AtomicUsize::new(WAITING), 160 | waker: UnsafeCell::new(None), 161 | } 162 | } 163 | 164 | /// Registers the waker to be notified on calls to `wake`. 165 | /// 166 | /// The new task will take place of any previous tasks that were registered 167 | /// by previous calls to `register`. Any calls to `wake` that happen after 168 | /// a call to `register` (as defined by the memory ordering rules), will 169 | /// notify the `register` caller's task and deregister the waker from future 170 | /// notifications. Because of this, callers should ensure `register` gets 171 | /// invoked with a new `Waker` **each** time they require a wakeup. 172 | /// 173 | /// It is safe to call `register` with multiple other threads concurrently 174 | /// calling `wake`. This will result in the `register` caller's current 175 | /// task being notified once. 176 | /// 177 | /// This function is safe to call concurrently, but this is generally a bad 178 | /// idea. Concurrent calls to `register` will attempt to register different 179 | /// tasks to be notified. One of the callers will win and have its task set, 180 | /// but there is no guarantee as to which caller will succeed. 181 | pub fn register(&self, waker: &Waker) { 182 | match self 183 | .state 184 | .compare_exchange(WAITING, REGISTERING, Acquire, Acquire) 185 | .unwrap_or_else(|x| x) 186 | { 187 | WAITING => { 188 | unsafe { 189 | // Locked acquired, update the waker cell 190 | 191 | // Avoid cloning the waker if the old waker will awaken the same task. 192 | match &*self.waker.get() { 193 | Some(old_waker) if old_waker.will_wake(waker) => (), 194 | _ => *self.waker.get() = Some(waker.clone()), 195 | } 196 | 197 | // Release the lock. If the state transitioned to include 198 | // the `WAKING` bit, this means that at least one wake has 199 | // been called concurrently. 200 | // 201 | // Start by assuming that the state is `REGISTERING` as this 202 | // is what we just set it to. If this holds, we know that no 203 | // other writes were performed in the meantime, so there is 204 | // nothing to acquire, only release. In case of concurrent 205 | // wakers, we need to acquire their releases, so success needs 206 | // to do both. 207 | let res = self 208 | .state 209 | .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); 210 | 211 | match res { 212 | Ok(_) => { 213 | // memory ordering: acquired self.state during CAS 214 | // - if previous wakes went through it syncs with 215 | // their final release (`fetch_and`) 216 | // - if there was no previous wake the next wake 217 | // will wake us, no sync needed. 218 | } 219 | Err(actual) => { 220 | // This branch can only be reached if at least one 221 | // concurrent thread called `wake`. In this 222 | // case, `actual` **must** be `REGISTERING | 223 | // `WAKING`. 224 | debug_assert_eq!(actual, REGISTERING | WAKING); 225 | 226 | // Take the waker to wake once the atomic operation has 227 | // completed. 228 | let waker = (*self.waker.get()).take().unwrap(); 229 | 230 | // We need to return to WAITING state (clear our lock and 231 | // concurrent WAKING flag). This needs to acquire all 232 | // WAKING fetch_or releases and it needs to release our 233 | // update to self.waker, so we need a `swap` operation. 234 | self.state.swap(WAITING, AcqRel); 235 | 236 | // memory ordering: we acquired the state for all 237 | // concurrent wakes, but future wakes might still 238 | // need to wake us in case we can't make progress 239 | // from the pending wakes. 240 | // 241 | // So we simply schedule to come back later (we could 242 | // also simply leave the registration in place above). 243 | waker.wake(); 244 | } 245 | } 246 | } 247 | } 248 | WAKING => { 249 | // Currently in the process of waking the task, i.e., 250 | // `wake` is currently being called on the old task handle. 251 | // 252 | // memory ordering: we acquired the state for all 253 | // concurrent wakes, but future wakes might still 254 | // need to wake us in case we can't make progress 255 | // from the pending wakes. 256 | // 257 | // So we simply schedule to come back later (we 258 | // could also spin here trying to acquire the lock 259 | // to register). 260 | waker.wake_by_ref(); 261 | } 262 | state => { 263 | // In this case, a concurrent thread is holding the 264 | // "registering" lock. This probably indicates a bug in the 265 | // caller's code as racing to call `register` doesn't make much 266 | // sense. 267 | // 268 | // memory ordering: don't care. a concurrent register() is going 269 | // to succeed and provide proper memory ordering. 270 | // 271 | // We just want to maintain memory safety. It is ok to drop the 272 | // call to `register`. 273 | debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); 274 | } 275 | } 276 | } 277 | 278 | /// Calls `wake` on the last `Waker` passed to `register`. 279 | /// 280 | /// If `register` has not been called yet, then this does nothing. 281 | pub fn wake(&self) { 282 | if let Some(waker) = self.take() { 283 | waker.wake(); 284 | } 285 | } 286 | 287 | /// Returns the last `Waker` passed to `register`, so that the user can wake it. 288 | /// 289 | /// 290 | /// Sometimes, just waking the AtomicWaker is not fine grained enough. This allows the user 291 | /// to take the waker and then wake it separately, rather than performing both steps in one 292 | /// atomic action. 293 | /// 294 | /// If a waker has not been registered, this returns `None`. 295 | pub fn take(&self) -> Option { 296 | // AcqRel ordering is used in order to acquire the value of the `task` 297 | // cell as well as to establish a `release` ordering with whatever 298 | // memory the `AtomicWaker` is associated with. 299 | match self.state.fetch_or(WAKING, AcqRel) { 300 | WAITING => { 301 | // The waking lock has been acquired. 302 | let waker = unsafe { (*self.waker.get()).take() }; 303 | 304 | // Release the lock 305 | self.state.fetch_and(!WAKING, Release); 306 | 307 | waker 308 | } 309 | state => { 310 | // There is a concurrent thread currently updating the 311 | // associated task. 312 | // 313 | // Nothing more to do as the `WAKING` bit has been set. It 314 | // doesn't matter if there are concurrent registering threads or 315 | // not. 316 | // 317 | debug_assert!( 318 | state == REGISTERING || state == REGISTERING | WAKING || state == WAKING 319 | ); 320 | None 321 | } 322 | } 323 | } 324 | } 325 | 326 | impl Default for AtomicWaker { 327 | fn default() -> Self { 328 | Self::new() 329 | } 330 | } 331 | 332 | impl fmt::Debug for AtomicWaker { 333 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 334 | write!(f, "AtomicWaker") 335 | } 336 | } 337 | 338 | unsafe impl Send for AtomicWaker {} 339 | unsafe impl Sync for AtomicWaker {} 340 | -------------------------------------------------------------------------------- /src/tree/schedular/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::{Cell, UnsafeCell}, 3 | future::Future, 4 | mem::ManuallyDrop, 5 | pin::Pin, 6 | sync::{ 7 | atomic::{AtomicBool, Ordering}, 8 | Arc, Weak, 9 | }, 10 | task::{Context, Poll}, 11 | }; 12 | 13 | mod atomic_waker; 14 | mod queue; 15 | mod waker; 16 | use queue::Queue; 17 | 18 | use crate::{ 19 | defer::Defer, 20 | ptr::{map_ptr, Owned}, 21 | }; 22 | 23 | use self::queue::NodeHeader; 24 | 25 | pub struct CancelToken(Owned>); 26 | 27 | /* 28 | impl CancelToken { 29 | pub fn detach(self) { 30 | // properly drop the pointer, 31 | // Safety: Below self is forgotten, so it won't be dropped twice. 32 | unsafe { Schedular::decr_task(self.0) }; 33 | // forget so the drop impl wont run. 34 | std::mem::forget(self); 35 | } 36 | } 37 | */ 38 | 39 | impl Drop for CancelToken { 40 | fn drop(&mut self) { 41 | let rf = unsafe { self.0.as_ref() }; 42 | 43 | // use a defer so that a possible panic in the waker will still decrement the arc count. 44 | let _defer = Defer::new(self.0, |this| unsafe { Schedular::decr_task(*this) }); 45 | 46 | if matches!(rf.body.done.get(), TaskState::Cancelled | TaskState::Done) { 47 | // the task was already done or cancelled, so the future was already dropped and only 48 | // arc count needs to be decremented. 49 | return; 50 | } 51 | 52 | // after we have cancelled the task we can drop the future since it will no longer be 53 | // dropped by the main schedular. 54 | rf.body.done.set(TaskState::Cancelled); 55 | unsafe { Schedular::drop_task(self.0) }; 56 | 57 | // Try to schedule the task if it wasn't already so it can be removed from the all task 58 | // list. 59 | if rf 60 | .body 61 | .queued 62 | .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) 63 | .is_ok() 64 | { 65 | if let Some(queue) = rf.body.queue.upgrade() { 66 | unsafe { 67 | queue.waker().wake(); 68 | Pin::new_unchecked(&*queue).push(self.0.cast()); 69 | } 70 | // we transfered the ownership to the queue, so we dont need to decrement the arc 71 | // count. 72 | _defer.take(); 73 | } 74 | } 75 | } 76 | } 77 | 78 | #[derive(Debug, Clone)] 79 | pub(crate) struct VTable { 80 | task_incr: unsafe fn(Owned>), 81 | task_decr: unsafe fn(Owned>), 82 | task_drive: unsafe fn(Owned>, cx: &mut Context) -> Poll<()>, 83 | task_drop: unsafe fn(Owned>), 84 | } 85 | 86 | impl VTable { 87 | pub const fn get>() -> &'static VTable { 88 | trait HasVTable { 89 | const V_TABLE: VTable; 90 | } 91 | 92 | impl> HasVTable for F { 93 | const V_TABLE: VTable = VTable { 94 | task_incr: VTable::incr::, 95 | task_decr: VTable::decr::, 96 | task_drop: VTable::drop::, 97 | task_drive: VTable::drive::, 98 | }; 99 | } 100 | 101 | &::V_TABLE 102 | } 103 | 104 | unsafe fn decr>(ptr: Owned>) { 105 | let ptr = ptr.cast::>(); 106 | Arc::decrement_strong_count(ptr.as_ptr()) 107 | } 108 | 109 | unsafe fn incr>(ptr: Owned>) { 110 | let ptr = ptr.cast::>(); 111 | Arc::increment_strong_count(ptr.as_ptr()) 112 | } 113 | 114 | unsafe fn drop>(ptr: Owned>) { 115 | let future_ptr = ptr.cast::>().map_ptr(map_ptr!(Task, future)); 116 | ManuallyDrop::drop(&mut (*future_ptr.as_ref().get())) 117 | } 118 | 119 | unsafe fn drive>(ptr: Owned>, cx: &mut Context) -> Poll<()> { 120 | let future_ptr = ptr.cast::>().map_ptr(map_ptr!(Task, future)); 121 | Pin::new_unchecked(&mut *(*future_ptr.as_ref().get())).poll(cx) 122 | } 123 | } 124 | 125 | #[repr(C)] 126 | struct Task { 127 | head: NodeHeader, 128 | body: TaskBody, 129 | future: UnsafeCell>, 130 | } 131 | 132 | impl Task 133 | where 134 | F: Future, 135 | { 136 | fn new(queue: Weak, future: F) -> Self { 137 | Task { 138 | head: NodeHeader::new(), 139 | body: TaskBody { 140 | queue, 141 | vtable: VTable::get::(), 142 | next: Cell::new(None), 143 | prev: Cell::new(None), 144 | queued: AtomicBool::new(true), 145 | done: Cell::new(TaskState::Running), 146 | }, 147 | future: UnsafeCell::new(ManuallyDrop::new(future)), 148 | } 149 | } 150 | } 151 | 152 | #[derive(Clone, Copy, Eq, PartialEq)] 153 | pub enum TaskState { 154 | // Task is still actively running, 155 | Running, 156 | // Task was cancelled but not yet freed from the list. 157 | Cancelled, 158 | // Task is done, and should be removed. 159 | Done, 160 | } 161 | 162 | // Seperate struct to not have everything be repr(C) 163 | struct TaskBody { 164 | queue: Weak, 165 | vtable: &'static VTable, 166 | // The double linked list of tasks. 167 | next: Cell>>>, 168 | prev: Cell>>>, 169 | done: Cell, 170 | // wether the task is currently in the queue to be re-polled. 171 | queued: AtomicBool, 172 | } 173 | 174 | pub struct Schedular { 175 | len: Cell, 176 | should_poll: Arc, 177 | all_next: Cell>>>, 178 | all_prev: Cell>>>, 179 | } 180 | 181 | impl Schedular { 182 | pub fn new() -> Self { 183 | let queue = Arc::new(Queue::new()); 184 | unsafe { 185 | Pin::new_unchecked(&*queue).init(); 186 | } 187 | Schedular { 188 | len: Cell::new(0), 189 | should_poll: queue, 190 | all_prev: Cell::new(None), 191 | all_next: Cell::new(None), 192 | } 193 | } 194 | 195 | pub fn is_empty(&self) -> bool { 196 | self.all_next.get().is_none() 197 | } 198 | 199 | /// # Safety 200 | /// This function erases any lifetime associated with the future. 201 | /// Caller must ensure that either the future completes or is dropped before the lifetime 202 | pub unsafe fn push_cancellable(&self, f: F) -> CancelToken 203 | where 204 | F: Future, 205 | { 206 | let queue = Arc::downgrade(&self.should_poll); 207 | 208 | let task = Arc::new(Task::new(queue, f)); 209 | 210 | // One count for the all list and one for the should_poll list and one for the cancel 211 | // token. 212 | let task = Owned::from_ptr_unchecked(Arc::into_raw(task) as *mut Task); 213 | Arc::increment_strong_count(task.as_ptr()); 214 | Arc::increment_strong_count(task.as_ptr()); 215 | let task = task.cast::>(); 216 | 217 | self.push_task_to_all(task); 218 | 219 | Pin::new_unchecked(&*self.should_poll).push(task.cast()); 220 | self.len.set(self.len.get() + 1); 221 | 222 | CancelToken(task) 223 | } 224 | 225 | /// Add a task to the all tasks list. 226 | /// Assumes ownership of a count in the task arc count. 227 | unsafe fn push_task_to_all(&self, task: Owned>) { 228 | task.as_ref().body.next.set(self.all_next.get()); 229 | 230 | if let Some(x) = self.all_next.get() { 231 | x.as_ref().body.prev.set(Some(task)); 232 | } 233 | self.all_next.set(Some(task)); 234 | if self.all_prev.get().is_none() { 235 | self.all_prev.set(Some(task)); 236 | } 237 | } 238 | 239 | unsafe fn pop_task_all(&self, task: Owned>) { 240 | task.as_ref().body.queued.store(true, Ordering::Release); 241 | 242 | if let TaskState::Running = task.as_ref().body.done.replace(TaskState::Done) { 243 | Self::drop_task(task) 244 | } 245 | 246 | // detach the task from the all list 247 | if let Some(next) = task.as_ref().body.next.get() { 248 | next.as_ref().body.prev.set(task.as_ref().body.prev.get()) 249 | } else { 250 | self.all_prev.set(task.as_ref().body.prev.get()); 251 | } 252 | if let Some(prev) = task.as_ref().body.prev.get() { 253 | prev.as_ref().body.next.set(task.as_ref().body.next.get()) 254 | } else { 255 | self.all_next.set(task.as_ref().body.next.get()); 256 | } 257 | 258 | // drop the ownership of the all list, 259 | // Task is now dropped or only owned by wakers or 260 | Self::decr_task(task); 261 | self.len.set(self.len.get() - 1); 262 | } 263 | 264 | unsafe fn drop_task(ptr: Owned>) { 265 | let vtable = ptr.as_ref().body.vtable; 266 | (vtable.task_drop)(ptr) 267 | } 268 | 269 | unsafe fn incr_task(ptr: Owned>) { 270 | (ptr.as_ref().body.vtable.task_incr)(ptr) 271 | } 272 | 273 | unsafe fn decr_task(ptr: Owned>) { 274 | (ptr.as_ref().body.vtable.task_decr)(ptr) 275 | } 276 | 277 | unsafe fn drive_task(ptr: Owned>, ctx: &mut Context) -> Poll<()> { 278 | (ptr.as_ref().body.vtable.task_drive)(ptr, ctx) 279 | } 280 | 281 | pub unsafe fn poll(&self, cx: &mut Context) -> Poll<()> { 282 | // Task are wrapped in an arc which is 'owned' by a number of possible structures. 283 | // - The all task list 284 | // - One or multiple wakers 285 | // - A possible cancellation token 286 | // - The should_poll list if it is scheduled. 287 | // 288 | // The implementations needs to ensure that the arc count stays consistent manually. 289 | 290 | if self.is_empty() { 291 | // No tasks, nothing to be done. 292 | return Poll::Ready(()); 293 | } 294 | 295 | self.should_poll.waker().register(cx.waker()); 296 | 297 | let mut iteration = 0; 298 | let mut yielded = 0; 299 | 300 | loop { 301 | // Popped a task, we now have the count that the should_poll list had. 302 | let cur = match Pin::new_unchecked(&*self.should_poll).pop() { 303 | queue::Pop::Empty => { 304 | return if self.is_empty() { 305 | // No more tasks left, nothing to be done. 306 | Poll::Ready(()) 307 | } else { 308 | // Tasks left but none ready, return ownership. 309 | Poll::Pending 310 | }; 311 | } 312 | queue::Pop::Value(x) => x, 313 | queue::Pop::Inconsistant => { 314 | cx.waker().wake_by_ref(); 315 | return Poll::Pending; 316 | } 317 | }; 318 | 319 | let cur = cur.cast::>(); 320 | 321 | match cur.as_ref().body.done.get() { 322 | TaskState::Cancelled => { 323 | // Task was already cancelled, we just need to remove it from the all task 324 | // list. 325 | self.pop_task_all(cur); 326 | continue; 327 | } 328 | TaskState::Done => { 329 | // Task was already done, we can drop the ownership we got from the queue. 330 | Self::decr_task(cur); 331 | continue; 332 | } 333 | TaskState::Running => {} 334 | } 335 | 336 | // set queued back to false so the future can be rescheduled immediatly if desired. 337 | let prev = cur.as_ref().body.queued.swap(false, Ordering::AcqRel); 338 | assert!(prev); 339 | 340 | // We now transfered the arc count from the queue into the waker which will decrement the count when dropped. 341 | let waker = waker::get(cur); 342 | // if drive_task panics we want to remove the task from the list. 343 | // So we handle it with a drop implementation. 344 | let remove = Defer::new(self, |this| (*this).pop_task_all(cur)); 345 | let mut ctx = Context::from_waker(&waker); 346 | 347 | iteration += 1; 348 | 349 | match Self::drive_task(cur, &mut ctx) { 350 | Poll::Ready(_) => { 351 | // Nothing todo the defer will remove the task from the list. 352 | } 353 | Poll::Pending => { 354 | // Future is still pending so prevent the defer drop from running. 355 | remove.take(); 356 | 357 | // check if we should yield back to the parent schedular because a future 358 | // requires it. 359 | yielded += cur.as_ref().body.queued.load(Ordering::Relaxed) as usize; 360 | if yielded > 2 || iteration > self.len.get() { 361 | cx.waker().wake_by_ref(); 362 | return Poll::Pending; 363 | } 364 | } 365 | } 366 | } 367 | } 368 | 369 | pub fn clear(&self) { 370 | // Clear all pending futures from the all list 371 | while let Some(c) = self.all_next.get() { 372 | unsafe { 373 | // remove it from the all list. 374 | self.pop_task_all(c) 375 | } 376 | } 377 | 378 | // Clear the should_poll list. 379 | // No more futures should be allowed to be scheduled at this point because all of there 380 | // queued flag has been set. 381 | loop { 382 | let cur = match unsafe { Pin::new_unchecked(&*self.should_poll).pop() } { 383 | queue::Pop::Empty => break, 384 | queue::Pop::Value(x) => x, 385 | queue::Pop::Inconsistant => { 386 | std::thread::yield_now(); 387 | continue; 388 | } 389 | }; 390 | 391 | // Task was already dropped so just decrement its count. 392 | unsafe { Self::decr_task(cur.cast()) }; 393 | } 394 | } 395 | } 396 | 397 | impl Drop for Schedular { 398 | fn drop(&mut self) { 399 | self.clear() 400 | } 401 | } 402 | -------------------------------------------------------------------------------- /src/tree/schedular/queue.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::Cell, 3 | pin::Pin, 4 | ptr::{self}, 5 | sync::atomic::{AtomicPtr, Ordering}, 6 | }; 7 | 8 | use crate::ptr::Owned; 9 | 10 | use super::atomic_waker::AtomicWaker; 11 | 12 | pub struct NodeHeader { 13 | next: AtomicPtr, 14 | } 15 | 16 | impl NodeHeader { 17 | pub fn new() -> NodeHeader { 18 | NodeHeader { 19 | next: AtomicPtr::new(ptr::null_mut()), 20 | } 21 | } 22 | } 23 | 24 | pub struct Queue { 25 | waker: AtomicWaker, 26 | head: AtomicPtr, 27 | tail: Cell>, 28 | stub: NodeHeader, 29 | } 30 | 31 | unsafe impl Send for Queue {} 32 | unsafe impl Sync for Queue {} 33 | 34 | pub enum Pop { 35 | Empty, 36 | Value(Owned), 37 | Inconsistant, 38 | } 39 | 40 | /// Intrusive MPSC queue from 1024cores blog. 41 | /// Similar to the one used int the FuturesUnordered implementation 42 | impl Queue { 43 | pub fn new() -> Self { 44 | Queue { 45 | waker: AtomicWaker::new(), 46 | head: AtomicPtr::new(ptr::null_mut()), 47 | tail: Cell::new(Owned::dangling()), 48 | stub: NodeHeader { 49 | next: AtomicPtr::new(ptr::null_mut()), 50 | }, 51 | } 52 | } 53 | 54 | pub fn waker(&self) -> &AtomicWaker { 55 | &self.waker 56 | } 57 | 58 | pub unsafe fn init(self: Pin<&Self>) { 59 | let ptr = Owned::from(&self.stub); 60 | self.head.store(ptr.as_ptr(), Ordering::Release); 61 | self.tail.set(ptr); 62 | } 63 | 64 | /// # Safety 65 | /// - node must be a valid pointer 66 | /// - Queue must have been properly initialized. 67 | pub unsafe fn push(self: Pin<&Self>, node: Owned) { 68 | node.as_ref().next.store(ptr::null_mut(), Ordering::Release); 69 | 70 | let prev = self.get_ref().head.swap(node.as_ptr(), Ordering::AcqRel); 71 | 72 | (*prev).next.store(node.as_ptr(), Ordering::Release); 73 | } 74 | 75 | /// # Safety 76 | /// - Queue must have been properly initialized. 77 | /// - Can only be called from a single thread. 78 | pub unsafe fn pop(self: Pin<&Self>) -> Pop { 79 | let mut tail = self.tail.get(); 80 | let mut next = Owned::from_ptr(tail.as_ref().next.load(Ordering::Acquire)); 81 | 82 | if tail == Owned::from(&self.get_ref().stub) { 83 | let Some(n) = next else { 84 | return Pop::Empty; 85 | }; 86 | 87 | self.tail.set(n); 88 | tail = n; 89 | next = Owned::from_ptr(n.as_ref().next.load(std::sync::atomic::Ordering::Acquire)); 90 | } 91 | 92 | if let Some(n) = next { 93 | self.tail.set(n); 94 | return Pop::Value(tail); 95 | } 96 | 97 | let head = Owned::from_ptr(self.head.load(Ordering::Acquire)); 98 | if head != Some(tail) { 99 | return Pop::Inconsistant; 100 | } 101 | 102 | self.push(Owned::from(&self.get_ref().stub)); 103 | 104 | next = Owned::from_ptr(tail.as_ref().next.load(Ordering::Acquire)); 105 | 106 | if let Some(n) = next { 107 | self.tail.set(n); 108 | return Pop::Value(tail); 109 | } 110 | 111 | Pop::Empty 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/tree/schedular/waker.rs: -------------------------------------------------------------------------------- 1 | //! A context for when stack doesn't actually ever wait on any io. 2 | 3 | use std::{ 4 | pin::Pin, 5 | sync::atomic::Ordering, 6 | task::{RawWaker, RawWakerVTable, Waker}, 7 | }; 8 | 9 | use crate::ptr::Owned; 10 | 11 | use super::{Schedular, Task}; 12 | 13 | unsafe fn inner_clone(ptr: *const ()) { 14 | let nonnull_ptr = Owned::from_ptr_unchecked((ptr as *mut ()).cast::>()); 15 | Schedular::incr_task(nonnull_ptr); 16 | } 17 | 18 | unsafe fn schedular_clone(ptr: *const ()) -> RawWaker { 19 | inner_clone(ptr); 20 | RawWaker::new(ptr, &SCHEDULAR_WAKER_V_TABLE) 21 | } 22 | 23 | unsafe fn schedular_wake(ptr: *const ()) { 24 | let task = Owned::from_ptr_unchecked(ptr as *mut ()).cast::>(); 25 | 26 | if task 27 | .as_ref() 28 | .body 29 | .queued 30 | .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) 31 | .is_err() 32 | { 33 | // Already awoken, skip! 34 | schedular_drop(ptr); 35 | return; 36 | } 37 | 38 | // retrieve the queue, if already dropped, just return as we don't need to awake anything. 39 | let Some(queue) = task.as_ref().body.queue.upgrade() else { 40 | schedular_drop(ptr); 41 | return; 42 | }; 43 | 44 | // push to the que 45 | Pin::new_unchecked(&*queue).push(task.cast()); 46 | 47 | // wake up the schedular. 48 | queue.waker().wake() 49 | } 50 | 51 | unsafe fn schedular_wake_ref(ptr: *const ()) { 52 | inner_clone(ptr); 53 | schedular_wake(ptr) 54 | } 55 | 56 | unsafe fn schedular_drop(ptr: *const ()) { 57 | let ptr = Owned::from_ptr_unchecked((ptr as *mut ()).cast::>()); 58 | Schedular::decr_task(ptr) 59 | } 60 | 61 | static SCHEDULAR_WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new( 62 | schedular_clone, 63 | schedular_wake, 64 | schedular_wake_ref, 65 | schedular_drop, 66 | ); 67 | 68 | pub unsafe fn get(ptr: Owned>) -> Waker { 69 | unsafe { Waker::from_raw(RawWaker::new(ptr.as_ptr().cast(), &SCHEDULAR_WAKER_V_TABLE)) } 70 | } 71 | -------------------------------------------------------------------------------- /src/tree/stk.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, marker::PhantomData}; 2 | 3 | use crate::{ 4 | ptr::Owned, 5 | stack::{ 6 | future::{InnerStkFuture, YieldFuture}, 7 | StackMarker, 8 | }, 9 | TreeStack, 10 | }; 11 | 12 | use crate::tree::future::StkFuture; 13 | 14 | use super::future::{ScopeFuture, ScopeStkFuture}; 15 | 16 | /// A reference back to stack from inside the running future. 17 | /// 18 | /// Used for spawning new futures onto the stack from a future running on the stack. 19 | pub struct Stk(PhantomData<*mut TreeStack>); 20 | 21 | impl StackMarker for Stk { 22 | unsafe fn create() -> &'static mut Self { 23 | Owned::::dangling().as_mut() 24 | } 25 | } 26 | 27 | impl Stk { 28 | /// Run a new future in the runtime. 29 | pub fn run<'a, F, Fut, R>(&'a mut self, f: F) -> StkFuture<'a, F, R> 30 | where 31 | F: FnOnce(&'a mut Stk) -> Fut, 32 | Fut: Future + 'a, 33 | { 34 | StkFuture(InnerStkFuture::new(f)) 35 | } 36 | 37 | /// A less type-safe version of Stk::run which doesn't require passing arround a Stk object. 38 | /// Invalid use of this function can cause a panic or deadlocking an executor. 39 | /// 40 | /// # Panic 41 | /// This function will panic while not within a TreeStack 42 | /// The future returned by this function will panic if another stack futures is created which 43 | /// is not contained within the future returned by this function while the current future is 44 | /// still running 45 | pub fn enter_run<'a, F, Fut, R>(f: F) -> StkFuture<'a, F, R> 46 | where 47 | F: FnOnce(&'a mut Stk) -> Fut, 48 | Fut: Future + 'a, 49 | { 50 | // Check if this is being run in the right context 51 | TreeStack::with_context(|_| ()); 52 | StkFuture(InnerStkFuture::new(f)) 53 | } 54 | 55 | /// Yield the execution of the recursive futures back to the reblessive runtime. 56 | /// 57 | /// When stepping through a function instead of finishing it awaiting the future returned by 58 | /// this function will cause the the current step to complete. 59 | pub fn yield_now(&mut self) -> YieldFuture { 60 | YieldFuture::new() 61 | } 62 | 63 | /// Create a scope in which multiple reblessive futures can be polled at the same time. 64 | pub fn scope<'a, F, Fut, R>(&'a mut self, f: F) -> ScopeFuture<'a, F, R> 65 | where 66 | F: FnOnce(&'a ScopeStk) -> Fut, 67 | Fut: Future + 'a, 68 | { 69 | ScopeFuture::new(f) 70 | } 71 | 72 | /// A less type-safe version of Stk::scope which doesn't require passing arround a Stk object. 73 | /// Invalid use of this function can cause a panic or deadlocking an executor. 74 | /// 75 | /// # Panic 76 | /// This function will panic while not within a TreeStack 77 | pub fn enter_scope<'a, F, Fut, R>(f: F) -> ScopeFuture<'a, F, R> 78 | where 79 | F: FnOnce(&'a ScopeStk) -> Fut, 80 | Fut: Future + 'a, 81 | { 82 | // Check if this is being run in the right context 83 | TreeStack::with_context(|_| ()); 84 | ScopeFuture::new(f) 85 | } 86 | } 87 | 88 | /// A refernce back to stack from inside the running future. 89 | /// 90 | /// Used for spawning new futures onto the stack from a future running on the stack. 91 | pub struct ScopeStk { 92 | marker: PhantomData<*mut TreeStack>, 93 | } 94 | 95 | impl ScopeStk { 96 | pub(super) unsafe fn new() -> &'static mut Self { 97 | Owned::dangling().as_mut() 98 | } 99 | } 100 | 101 | impl ScopeStk { 102 | /// Run a new future in the runtime. 103 | pub fn run<'a, F, Fut, R>(&'a self, f: F) -> ScopeStkFuture<'a, R> 104 | where 105 | F: FnOnce(&'a mut Stk) -> Fut, 106 | Fut: Future + 'a, 107 | { 108 | let future = unsafe { f(Stk::create()) }; 109 | 110 | ScopeStkFuture::new(future) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/tree/test.rs: -------------------------------------------------------------------------------- 1 | use futures_util::future::join_all; 2 | 3 | use super::{Stk, TreeStack}; 4 | use crate::{ 5 | defer::Defer, 6 | test::{run_with_stack_size, thread_sleep, ManualPoll, KB, MB}, 7 | tree::stk::ScopeStk, 8 | }; 9 | use std::{ 10 | cell::Cell, 11 | future::Future, 12 | mem::MaybeUninit, 13 | path::{Path, PathBuf}, 14 | sync::atomic::{AtomicUsize, Ordering}, 15 | task::Poll, 16 | time::{Duration, Instant}, 17 | }; 18 | 19 | async fn fanout<'a, F, Fut, R>(stk: &'a mut Stk, count: usize, f: F) -> Vec 20 | where 21 | F: Fn(&'a mut Stk) -> Fut + 'a, 22 | Fut: Future + 'a, 23 | R: 'a, 24 | { 25 | let r = stk.scope(|stk| async move { 26 | let futures = (0..count).map(|_| stk.run(&f)).collect::>(); 27 | 28 | futures_util::future::join_all(futures).await 29 | }); 30 | 31 | r.await 32 | } 33 | 34 | #[test] 35 | fn basic() { 36 | thread_local! { 37 | static COUNTER: Cell = const{ Cell::new(0) }; 38 | } 39 | 40 | pollster::block_on(async { 41 | let mut stack = TreeStack::new(); 42 | 43 | let before = Instant::now(); 44 | stack 45 | .enter(|stk| async move { 46 | fanout::<_, _, ()>(stk, 10, |_| async { 47 | COUNTER.with(|x| x.set(x.get() + 1)); 48 | thread_sleep(Duration::from_millis(500)).await; 49 | COUNTER.with(|x| x.set(x.get() + 1)); 50 | }) 51 | .await 52 | }) 53 | .finish() 54 | .await; 55 | 56 | assert!(before.elapsed() < Duration::from_millis(1000)); 57 | // make sure the futures actually ran. 58 | assert_eq!(COUNTER.with(|x| x.get()), 20); 59 | }) 60 | } 61 | 62 | #[test] 63 | fn two_depth() { 64 | thread_local! { 65 | static COUNTER: Cell = const{ Cell::new(0) }; 66 | } 67 | 68 | pollster::block_on(async { 69 | let mut stack = TreeStack::new(); 70 | 71 | let before = Instant::now(); 72 | stack 73 | .enter(|stk| { 74 | fanout(stk, 4, |stk| async move { 75 | fanout(stk, 4, |_| async move { 76 | COUNTER.with(|x| x.set(x.get() + 1)); 77 | thread_sleep(Duration::from_millis(500)).await; 78 | COUNTER.with(|x| x.set(x.get() + 1)); 79 | }) 80 | .await 81 | }) 82 | }) 83 | .finish() 84 | .await; 85 | 86 | assert!(before.elapsed() < Duration::from_millis(2000)); 87 | // make sure the futures actually ran. 88 | assert_eq!(COUNTER.with(|x| x.get()), 32); 89 | }) 90 | } 91 | 92 | #[test] 93 | fn basic_then_deep() { 94 | thread_local! { 95 | static COUNTER: Cell = const{ Cell::new(0) }; 96 | } 97 | 98 | pollster::block_on(async { 99 | let mut stack = TreeStack::new(); 100 | 101 | async fn go_deep(stk: &mut Stk, depth: usize) { 102 | if depth == 0 { 103 | COUNTER.with(|x| x.set(x.get() + 1)); 104 | thread_sleep(Duration::from_millis(500)).await; 105 | COUNTER.with(|x| x.set(x.get() + 1)); 106 | } else { 107 | stk.run(|stk| go_deep(stk, depth - 1)).await 108 | } 109 | } 110 | 111 | let before = Instant::now(); 112 | stack 113 | .enter(|stk| { 114 | fanout(stk, 10, |stk| async move { 115 | go_deep(stk, 10).await; 116 | }) 117 | }) 118 | .finish() 119 | .await; 120 | 121 | assert!(before.elapsed() < Duration::from_millis(4000)); 122 | // make sure the futures actually ran. 123 | assert_eq!(COUNTER.with(|x| x.get()), 20); 124 | }) 125 | } 126 | 127 | #[test] 128 | fn two_depth_step() { 129 | thread_local! { 130 | static COUNTER: Cell = const{ Cell::new(0) }; 131 | } 132 | 133 | pollster::block_on(async { 134 | let mut stack = TreeStack::new(); 135 | 136 | let before = Instant::now(); 137 | let mut runner = stack.enter(|stk| { 138 | fanout(stk, 4, |stk| async move { 139 | fanout(stk, 4, |_| async move { 140 | COUNTER.with(|x| x.set(x.get() + 1)); 141 | thread_sleep(Duration::from_millis(500)).await; 142 | COUNTER.with(|x| x.set(x.get() + 1)); 143 | }) 144 | .await 145 | }) 146 | }); 147 | 148 | while runner.step().await.is_none() {} 149 | 150 | assert!(before.elapsed() < Duration::from_millis(2000)); 151 | assert_eq!(COUNTER.with(|x| x.get()), 32); 152 | 153 | // make sure the futures actually ran. 154 | }) 155 | } 156 | 157 | #[test] 158 | fn deep_fanout_no_overflow() { 159 | pollster::block_on(async { 160 | let mut stack = TreeStack::new(); 161 | 162 | let depth = if cfg!(miri) { 10 } else { 1000 }; 163 | 164 | async fn go_deep(stk: &mut Stk, deep: usize) -> String { 165 | // An extra stack allocation to simulate a more complex function. 166 | let mut ballast: MaybeUninit<[u8; 1024 * 128]> = std::mem::MaybeUninit::uninit(); 167 | 168 | let res = if deep != 0 { 169 | fanout(stk, 1, move |stk| go_deep(stk, deep - 1)) 170 | .await 171 | .into_iter() 172 | .next() 173 | .unwrap() 174 | } else { 175 | "Foo".to_owned() 176 | }; 177 | 178 | // Make sure the ballast isn't compiled out. 179 | std::hint::black_box(&mut ballast); 180 | 181 | res 182 | } 183 | 184 | let res = stack.enter(|stk| go_deep(stk, depth)).finish().await; 185 | assert_eq!(res, "Foo") 186 | }) 187 | } 188 | 189 | #[test] 190 | fn deep_no_overflow() { 191 | run_with_stack_size(MB, "deep no overflow", || { 192 | pollster::block_on(async { 193 | let mut stack = TreeStack::new(); 194 | 195 | let depth = if cfg!(miri) { 10 } else { 1000 }; 196 | 197 | async fn go_deep(stk: &ScopeStk, deep: usize) -> String { 198 | // An extra stack allocation to simulate a more complex function. 199 | let mut ballast: MaybeUninit<[u8; 32 * KB]> = std::mem::MaybeUninit::uninit(); 200 | 201 | let res = if deep != 0 { 202 | stk.run(|stk| stk.scope(|stk| go_deep(stk, deep - 1))).await 203 | } else { 204 | "Foo".to_owned() 205 | }; 206 | 207 | // Make sure the ballast isn't compiled out. 208 | std::hint::black_box(&mut ballast); 209 | 210 | res 211 | } 212 | 213 | let res = stack 214 | .enter(|stk| async { stk.scope(|stk| go_deep(stk, depth)).await }) 215 | .finish() 216 | .await; 217 | assert_eq!(res, "Foo") 218 | }) 219 | }) 220 | } 221 | 222 | #[test] 223 | fn cancel_scope_future() { 224 | thread_local! { 225 | static COUNTER: Cell = const{ Cell::new(0) }; 226 | } 227 | 228 | pollster::block_on(async { 229 | let mut stack = TreeStack::new(); 230 | stack 231 | .enter(|stk| async { 232 | let scope = stk.scope(|stk| async { 233 | stk.run(|stk| async { 234 | let _defer = Defer::new((), |_| COUNTER.set(1)); 235 | stk.yield_now().await; 236 | let _defer = Defer::new((), |_| { 237 | COUNTER.set(2); 238 | }); 239 | }) 240 | .await 241 | }); 242 | 243 | let mut count = 0; 244 | ManualPoll::wrap(scope, move |future, ctx| { 245 | if count < 1 { 246 | count += 1; 247 | 248 | assert!(matches!(future.poll(ctx), Poll::Pending)); 249 | Poll::Pending 250 | } else { 251 | // first poll done, return read so we can cancel it. 252 | Poll::Ready(()) 253 | } 254 | }) 255 | .await; 256 | stk.yield_now().await; 257 | }) 258 | .finish() 259 | .await; 260 | 261 | assert_eq!(COUNTER.get(), 1) 262 | }); 263 | } 264 | 265 | #[test] 266 | fn drop_task_mid_run() { 267 | thread_local! { 268 | static COUNTER: Cell = const{ Cell::new(0) }; 269 | static TOTAL: Cell = const{ Cell::new(0) }; 270 | } 271 | 272 | pollster::block_on(async { 273 | let mut stack = TreeStack::new(); 274 | let future = stack 275 | .enter(|stk| async { 276 | COUNTER.set(COUNTER.get() + 1); 277 | TOTAL.set(TOTAL.get() + 1); 278 | let _defer = Defer::new((), |_| { 279 | COUNTER.set(COUNTER.get() - 1); 280 | }); 281 | 282 | stk.run(|stk| { 283 | fanout(stk, 10, |stk| { 284 | stk.run(|stk| { 285 | fanout(stk, 5, |_| async { 286 | COUNTER.set(COUNTER.get() + 1); 287 | TOTAL.set(TOTAL.get() + 1); 288 | let _defer = Defer::new((), |_| { 289 | COUNTER.set(COUNTER.get() - 1); 290 | }); 291 | ManualPoll::wrap((), |_, _| Poll::Pending).await; 292 | unreachable!(); 293 | }) 294 | }) 295 | }) 296 | }) 297 | .await; 298 | 299 | unreachable!(); 300 | }) 301 | .finish(); 302 | 303 | ManualPoll::wrap(future, |mut f, ctx| { 304 | for _ in 0..(10 * 5 * 2) { 305 | let _ = f.as_mut().poll(ctx); 306 | } 307 | Poll::Ready(()) 308 | }) 309 | .await; 310 | }); 311 | 312 | assert_eq!(COUNTER.get(), 0); 313 | assert_eq!(TOTAL.get(), 10 * 5 + 1) 314 | } 315 | 316 | #[tokio::test] 317 | #[cfg_attr(miri, ignore)] 318 | async fn tokio_sleep_depth() { 319 | thread_local! { 320 | static COUNTER: Cell = const{ Cell::new(0) }; 321 | } 322 | 323 | COUNTER.with(|x| x.set(0)); 324 | let mut stack = TreeStack::new(); 325 | 326 | let before = Instant::now(); 327 | stack 328 | .enter(|stk| { 329 | fanout(stk, 4, |stk| async move { 330 | fanout(stk, 4, |_| async move { 331 | COUNTER.with(|x| x.set(x.get() + 1)); 332 | tokio::time::sleep(Duration::from_millis(500)).await; 333 | COUNTER.with(|x| x.set(x.get() + 1)); 334 | }) 335 | .await 336 | }) 337 | }) 338 | .finish() 339 | .await; 340 | 341 | assert!(before.elapsed() < Duration::from_millis(2000)); 342 | // make sure the futures actually ran. 343 | assert_eq!(COUNTER.with(|x| x.get()), 32); 344 | } 345 | 346 | #[tokio::test] 347 | #[cfg_attr(miri, ignore)] 348 | async fn read_files() { 349 | static OPEN_COUNT: AtomicUsize = AtomicUsize::new(0); 350 | const MAX_OPEN: usize = 100; 351 | 352 | let mut stack = TreeStack::new(); 353 | 354 | async fn read_dir(stk: &ScopeStk, dir: PathBuf) -> String { 355 | let mut dir = tokio::fs::read_dir(dir).await.unwrap(); 356 | let mut r = Vec::new(); 357 | let mut buf = String::new(); 358 | while let Some(entry) = dir.next_entry().await.unwrap() { 359 | let path = entry.path(); 360 | let kind = entry.file_type().await.unwrap(); 361 | if kind.is_dir() { 362 | if OPEN_COUNT.load(Ordering::Relaxed) > MAX_OPEN { 363 | let str = stk.run(|stk| stk.scope(|stk| read_dir(stk, path))).await; 364 | buf.push_str(&str); 365 | } else { 366 | OPEN_COUNT.fetch_add(1, Ordering::Relaxed); 367 | let f = stk.run(|stk| async { 368 | let r = stk.scope(|stk| read_dir(stk, path)).await; 369 | OPEN_COUNT.fetch_sub(1, Ordering::Relaxed); 370 | r 371 | }); 372 | r.push(f) 373 | } 374 | } else if OPEN_COUNT.load(Ordering::Relaxed) > MAX_OPEN { 375 | let str = stk 376 | .run(|_| async { tokio::fs::read_to_string(path).await.unwrap_or_default() }) 377 | .await; 378 | buf.push_str(&str); 379 | } else { 380 | OPEN_COUNT.fetch_add(1, Ordering::Relaxed); 381 | let f = stk.run(|_| async { 382 | let r = tokio::fs::read_to_string(path).await.unwrap_or_default(); 383 | OPEN_COUNT.fetch_sub(1, Ordering::Relaxed); 384 | r 385 | }); 386 | r.push(f) 387 | } 388 | } 389 | let mut str = join_all(r).await.join("\n=========\n"); 390 | str.push_str(&buf); 391 | str 392 | } 393 | 394 | stack 395 | .enter(|stk| async { 396 | stk.scope(|stk| read_dir(stk, Path::new("./").to_path_buf())) 397 | .await 398 | }) 399 | .finish() 400 | .await; 401 | 402 | //println!("{}", full_text); 403 | } 404 | 405 | #[tokio::test] 406 | #[cfg_attr(miri, ignore)] 407 | async fn read_files_stepping() { 408 | static OPEN_COUNT: AtomicUsize = AtomicUsize::new(0); 409 | const MAX_OPEN: usize = 100; 410 | 411 | let mut stack = TreeStack::new(); 412 | 413 | async fn read_dir(stk: &ScopeStk, dir: PathBuf) -> String { 414 | let mut dir = tokio::fs::read_dir(dir).await.unwrap(); 415 | let mut r = Vec::new(); 416 | let mut buf = String::new(); 417 | while let Some(entry) = dir.next_entry().await.unwrap() { 418 | let path = entry.path(); 419 | let kind = entry.file_type().await.unwrap(); 420 | if kind.is_dir() { 421 | if OPEN_COUNT.load(Ordering::Relaxed) > MAX_OPEN { 422 | let str = stk.run(|stk| stk.scope(|stk| read_dir(stk, path))).await; 423 | buf.push_str(&str); 424 | } else { 425 | OPEN_COUNT.fetch_add(1, Ordering::Relaxed); 426 | let f = stk.run(|stk| async { 427 | let r = stk.scope(|stk| read_dir(stk, path)).await; 428 | OPEN_COUNT.fetch_sub(1, Ordering::Relaxed); 429 | r 430 | }); 431 | r.push(f) 432 | } 433 | } else if OPEN_COUNT.load(Ordering::Relaxed) > MAX_OPEN { 434 | let str = stk 435 | .run(|_| async { tokio::fs::read_to_string(path).await.unwrap_or_default() }) 436 | .await; 437 | buf.push_str(&str); 438 | } else { 439 | OPEN_COUNT.fetch_add(1, Ordering::Relaxed); 440 | let f = stk.run(|_| async { 441 | let r = tokio::fs::read_to_string(path).await.unwrap_or_default(); 442 | OPEN_COUNT.fetch_sub(1, Ordering::Relaxed); 443 | r 444 | }); 445 | r.push(f) 446 | } 447 | } 448 | let mut str = join_all(r).await.join("\n=========\n"); 449 | str.push_str(&buf); 450 | str 451 | } 452 | 453 | let mut runner = stack.enter(|stk| async { 454 | stk.scope(|stk| read_dir(stk, Path::new("./").to_path_buf())) 455 | .await 456 | }); 457 | 458 | loop { 459 | if runner.step().await.is_some() { 460 | break; 461 | } 462 | } 463 | } 464 | -------------------------------------------------------------------------------- /src/vtable.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | use crate::ptr::{map_ptr, Owned}; 8 | 9 | #[repr(C)] 10 | pub struct TaskBox { 11 | pub(crate) v_table: &'static VTable, 12 | pub(crate) future: F, 13 | } 14 | 15 | /// A constant table generated for each type of tasks that is spawned. 16 | #[derive(Debug, Clone)] 17 | pub(crate) struct VTable { 18 | /// Funtion to drop the task in place. 19 | pub(crate) dropper: unsafe fn(Owned>), 20 | /// Funtion to drive the task forward. 21 | pub(crate) driver: unsafe fn(Owned>, ctx: &mut Context<'_>) -> Poll<()>, 22 | } 23 | 24 | impl VTable { 25 | pub fn get>() -> &'static VTable { 26 | trait HasVTable { 27 | const V_TABLE: VTable; 28 | } 29 | 30 | impl> HasVTable for F { 31 | const V_TABLE: VTable = VTable { 32 | dropper: VTable::drop_impl::, 33 | driver: VTable::drive_impl::, 34 | }; 35 | } 36 | 37 | &::V_TABLE 38 | } 39 | 40 | unsafe fn drop_impl(ptr: Owned>) 41 | where 42 | F: Future, 43 | { 44 | std::ptr::drop_in_place(ptr.cast::>().as_ptr()) 45 | } 46 | 47 | unsafe fn drive_impl(ptr: Owned>, ctx: &mut Context<'_>) -> Poll<()> 48 | where 49 | F: Future, 50 | { 51 | Pin::new_unchecked( 52 | ptr.cast::>() 53 | .map_ptr(map_ptr!(TaskBox, future)) 54 | .as_mut(), 55 | ) 56 | .poll(ctx) 57 | } 58 | } 59 | --------------------------------------------------------------------------------