├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches └── spawn.rs ├── examples ├── spawn-local.rs ├── spawn-on-thread.rs ├── spawn.rs └── with-metadata.rs ├── src ├── header.rs ├── lib.rs ├── raw.rs ├── runnable.rs ├── state.rs ├── task.rs └── utils.rs └── tests ├── basic.rs ├── cancel.rs ├── join.rs ├── metadata.rs ├── panic.rs ├── ready.rs ├── waker_panic.rs ├── waker_pending.rs └── waker_ready.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | commit-message: 8 | prefix: '' 9 | labels: [] 10 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | permissions: 4 | contents: read 5 | 6 | on: 7 | pull_request: 8 | push: 9 | branches: 10 | - master 11 | schedule: 12 | - cron: '0 2 * * 0' 13 | 14 | env: 15 | CARGO_INCREMENTAL: 0 16 | CARGO_NET_GIT_FETCH_WITH_CLI: true 17 | CARGO_NET_RETRY: 10 18 | CARGO_TERM_COLOR: always 19 | RUST_BACKTRACE: 1 20 | RUSTFLAGS: -D warnings 21 | RUSTDOCFLAGS: -D warnings 22 | RUSTUP_MAX_RETRIES: 10 23 | 24 | defaults: 25 | run: 26 | shell: bash 27 | 28 | jobs: 29 | fmt: 30 | uses: smol-rs/.github/.github/workflows/fmt.yml@main 31 | security_audit: 32 | uses: smol-rs/.github/.github/workflows/security_audit.yml@main 33 | permissions: 34 | checks: write 35 | contents: read 36 | issues: write 37 | secrets: inherit 38 | 39 | test: 40 | runs-on: ${{ matrix.os }} 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: [ubuntu-latest] 45 | rust: [nightly, beta, stable] 46 | steps: 47 | - uses: actions/checkout@v4 48 | - name: Install Rust 49 | run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} 50 | - run: rustup target add thumbv7m-none-eabi 51 | - name: Install cargo-hack 52 | uses: taiki-e/install-action@cargo-hack 53 | - name: Install valgrind 54 | uses: taiki-e/install-action@valgrind 55 | - run: cargo build --all --all-features --all-targets 56 | if: startsWith(matrix.rust, 'nightly') 57 | - run: cargo hack build --feature-powerset --no-dev-deps 58 | - run: cargo hack build --feature-powerset --no-dev-deps --target thumbv7m-none-eabi --skip std,default 59 | - run: cargo test 60 | - name: Run cargo test (with valgrind) 61 | run: cargo test -- --test-threads=1 62 | env: 63 | # TODO: use --errors-for-leak-kinds=definite,indirect due to upstream bug (https://github.com/rust-lang/rust/issues/135608) 64 | CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: valgrind -v --error-exitcode=1 --error-limit=no --leak-check=full --show-leak-kinds=all --errors-for-leak-kinds=definite,indirect --track-origins=yes --fair-sched=yes 65 | - name: Run cargo test (with portable-atomic enabled) 66 | run: cargo test --features portable-atomic 67 | - name: Clone async-executor 68 | run: git clone https://github.com/smol-rs/async-executor.git 69 | - name: Add patch section 70 | run: | 71 | echo '[patch.crates-io]' >> async-executor/Cargo.toml 72 | echo 'async-task = { path = ".." }' >> async-executor/Cargo.toml 73 | - name: Test async-executor 74 | run: cargo test --manifest-path async-executor/Cargo.toml 75 | 76 | msrv: 77 | runs-on: ubuntu-latest 78 | steps: 79 | - uses: actions/checkout@v4 80 | - name: Install cargo-hack 81 | uses: taiki-e/install-action@cargo-hack 82 | - run: cargo hack build --feature-powerset --no-dev-deps --rust-version 83 | 84 | clippy: 85 | runs-on: ubuntu-latest 86 | steps: 87 | - uses: actions/checkout@v4 88 | - name: Install Rust 89 | run: rustup update stable 90 | - run: cargo clippy --all-features --tests --examples 91 | 92 | miri: 93 | runs-on: ubuntu-latest 94 | steps: 95 | - uses: actions/checkout@v4 96 | - name: Install Rust 97 | run: rustup toolchain install nightly --component miri && rustup default nightly 98 | - run: cargo miri test 99 | env: 100 | # -Zmiri-ignore-leaks is needed because we use detached threads in doctests: https://github.com/rust-lang/miri/issues/1371 101 | # disable preemption due to https://github.com/rust-lang/rust/issues/55005 102 | MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks -Zmiri-preemption-rate=0 103 | RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout 104 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | permissions: 4 | contents: write 5 | 6 | on: 7 | push: 8 | tags: 9 | - v[0-9]+.* 10 | 11 | jobs: 12 | create-release: 13 | if: github.repository_owner == 'smol-rs' 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: taiki-e/create-gh-release-action@v1 18 | with: 19 | changelog: CHANGELOG.md 20 | branch: master 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Version 4.7.1 2 | 3 | - Improve the panic message for when a task is polled after completion. (#73) 4 | 5 | # Version 4.7.0 6 | 7 | - Add `from_raw` and `into_raw` functions for `Runnable` to ease passing it 8 | across an FFI boundary. (#65) 9 | 10 | # Version 4.6.0 11 | 12 | - Bump MSRV to 1.57. (#63) 13 | - Task layout computation failures are now a compile-time error instead of a 14 | runtime abort. (#63) 15 | 16 | # Version 4.5.0 17 | 18 | - Add a `portable-atomic` feature that enables the usage of fallback primitives for CPUs without atomics. (#58) 19 | 20 | # Version 4.4.1 21 | 22 | - Clarify safety documentation for `spawn_unchecked`. (#49) 23 | 24 | # Version 4.4.0 25 | 26 | - Ensure that the allocation doesn't exceed `isize::MAX` (#32) 27 | - Add `FallibleTask::is_finished()` (#34) 28 | - Add a metadata generic parameter to tasks (#33) 29 | - Add panic propagation to tasks (#37) 30 | - Add a way to tell if the task was woken while running from the schedule function (#42) 31 | 32 | # Version 4.3.0 33 | 34 | - Bump MSRV to Rust 1.47. (#30) 35 | - Evaluate the layouts for the tasks at compile time. (#30) 36 | - Add layout_info field to TaskVTable so that debuggers can decode raw tasks. (#29) 37 | 38 | # Version 4.2.0 39 | 40 | - Add `Task::is_finished`. (#19) 41 | 42 | # Version 4.1.0 43 | 44 | - Add `FallibleTask`. (#21) 45 | 46 | # Version 4.0.3 47 | 48 | - Document the return value of `Runnable::run()` better. 49 | 50 | # Version 4.0.2 51 | 52 | - Nits in the docs. 53 | 54 | # Version 4.0.1 55 | 56 | - Nits in the docs. 57 | 58 | # Version 4.0.0 59 | 60 | - Rename `Task` to `Runnable`. 61 | - Rename `JoinHandle` to `Task`. 62 | - Cancel `Task` on drop. 63 | - Add `Task::detach()` and `Task::cancel()`. 64 | - Add `spawn_unchecked()`. 65 | 66 | # Version 3.0.0 67 | 68 | - Use `ThreadId` in `spawn_local` because OS-provided IDs can get recycled. 69 | - Add `std` feature to `Cargo.toml`. 70 | 71 | # Version 2.1.1 72 | 73 | - Allocate large futures on the heap. 74 | 75 | # Version 2.1.0 76 | 77 | - `JoinHandle` now only evaluates after the task's future has been dropped. 78 | 79 | # Version 2.0.0 80 | 81 | - Return `true` in `Task::run()`. 82 | 83 | # Version 1.3.1 84 | 85 | - Make `spawn_local` available only on unix and windows. 86 | 87 | # Version 1.3.0 88 | 89 | - Add `waker_fn`. 90 | 91 | # Version 1.2.1 92 | 93 | - Add the `no-std` category to the package. 94 | 95 | # Version 1.2.0 96 | 97 | - The crate is now marked with `#![no_std]`. 98 | - Add `Task::waker` and `JoinHandle::waker`. 99 | - Add `Task::into_raw` and `Task::from_raw`. 100 | 101 | # Version 1.1.1 102 | 103 | - Fix a use-after-free bug where the schedule function is dropped while running. 104 | 105 | # Version 1.1.0 106 | 107 | - If a task is dropped or canceled outside the `run` method, it gets re-scheduled. 108 | - Add `spawn_local` constructor. 109 | 110 | # Version 1.0.0 111 | 112 | - Initial release 113 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-task" 3 | # When publishing a new version: 4 | # - Update CHANGELOG.md 5 | # - Create "v4.x.y" git tag 6 | version = "4.7.1" 7 | authors = ["Stjepan Glavina "] 8 | edition = "2021" 9 | rust-version = "1.57" 10 | license = "Apache-2.0 OR MIT" 11 | repository = "https://github.com/smol-rs/async-task" 12 | description = "Task abstraction for building executors" 13 | keywords = ["futures", "task", "executor", "spawn"] 14 | categories = ["asynchronous", "concurrency", "no-std"] 15 | exclude = ["/.*"] 16 | 17 | [features] 18 | default = ["std"] 19 | std = [] 20 | 21 | [dependencies] 22 | # Uses portable-atomic polyfill atomics on targets without them 23 | portable-atomic = { version = "1", optional = true, default-features = false } 24 | 25 | [dev-dependencies] 26 | atomic-waker = "1" 27 | easy-parallel = "3" 28 | flaky_test = "0.2" 29 | flume = { version = "0.11", default-features = false } 30 | futures-lite = "2.0.0" 31 | once_cell = "1" 32 | pin-project-lite = "0.2.10" 33 | smol = "2" 34 | 35 | # rewrite dependencies to use the this version of async-task when running tests 36 | [patch.crates-io] 37 | async-task = { path = "." } 38 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # async-task 2 | 3 | [![Build](https://github.com/smol-rs/async-task/actions/workflows/ci.yml/badge.svg)]( 4 | https://github.com/smol-rs/async-task/actions) 5 | [![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)]( 6 | https://github.com/smol-rs/async-task) 7 | [![Cargo](https://img.shields.io/crates/v/async-task.svg)]( 8 | https://crates.io/crates/async-task) 9 | [![Documentation](https://docs.rs/async-task/badge.svg)]( 10 | https://docs.rs/async-task) 11 | 12 | Task abstraction for building executors. 13 | 14 | To spawn a future onto an executor, we first need to allocate it on the heap and keep some 15 | state attached to it. The state indicates whether the future is ready for polling, waiting to 16 | be woken up, or completed. Such a stateful future is called a *task*. 17 | 18 | All executors have a queue that holds scheduled tasks: 19 | 20 | ```rust 21 | let (sender, receiver) = flume::unbounded(); 22 | ``` 23 | 24 | A task is created using either `spawn()`, `spawn_local()`, or `spawn_unchecked()` which 25 | return a `Runnable` and a `Task`: 26 | 27 | ```rust 28 | // A future that will be spawned. 29 | let future = async { 1 + 2 }; 30 | 31 | // A function that schedules the task when it gets woken up. 32 | let schedule = move |runnable| sender.send(runnable).unwrap(); 33 | 34 | // Construct a task. 35 | let (runnable, task) = async_task::spawn(future, schedule); 36 | 37 | // Push the task into the queue by invoking its schedule function. 38 | runnable.schedule(); 39 | ``` 40 | 41 | The `Runnable` is used to poll the task's future, and the `Task` is used to await its 42 | output. 43 | 44 | Finally, we need a loop that takes scheduled tasks from the queue and runs them: 45 | 46 | ```rust 47 | for runnable in receiver { 48 | runnable.run(); 49 | } 50 | ``` 51 | 52 | Method `run()` polls the task's future once. Then, the `Runnable` 53 | vanishes and only reappears when its `Waker` wakes the task, thus 54 | scheduling it to be run again. 55 | 56 | ## License 57 | 58 | Licensed under either of 59 | 60 | * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 61 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 62 | 63 | at your option. 64 | 65 | #### Contribution 66 | 67 | Unless you explicitly state otherwise, any contribution intentionally submitted 68 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 69 | dual licensed as above, without any additional terms or conditions. 70 | -------------------------------------------------------------------------------- /benches/spawn.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use smol::future; 6 | use test::Bencher; 7 | 8 | #[bench] 9 | fn task_create(b: &mut Bencher) { 10 | b.iter(|| { 11 | let _ = async_task::spawn(async {}, drop); 12 | }); 13 | } 14 | 15 | #[bench] 16 | fn task_run(b: &mut Bencher) { 17 | b.iter(|| { 18 | let (runnable, task) = async_task::spawn(async {}, drop); 19 | runnable.run(); 20 | future::block_on(task); 21 | }); 22 | } 23 | -------------------------------------------------------------------------------- /examples/spawn-local.rs: -------------------------------------------------------------------------------- 1 | //! A simple single-threaded executor that can spawn non-`Send` futures. 2 | 3 | use std::cell::Cell; 4 | use std::future::Future; 5 | use std::rc::Rc; 6 | 7 | use async_task::{Runnable, Task}; 8 | 9 | thread_local! { 10 | // A queue that holds scheduled tasks. 11 | static QUEUE: (flume::Sender, flume::Receiver) = flume::unbounded(); 12 | } 13 | 14 | /// Spawns a future on the executor. 15 | fn spawn(future: F) -> Task 16 | where 17 | F: Future + 'static, 18 | T: 'static, 19 | { 20 | // Create a task that is scheduled by pushing itself into the queue. 21 | let schedule = |runnable| QUEUE.with(|(s, _)| s.send(runnable).unwrap()); 22 | let (runnable, task) = async_task::spawn_local(future, schedule); 23 | 24 | // Schedule the task by pushing it into the queue. 25 | runnable.schedule(); 26 | 27 | task 28 | } 29 | 30 | /// Runs a future to completion. 31 | fn run(future: F) -> T 32 | where 33 | F: Future + 'static, 34 | T: 'static, 35 | { 36 | // Spawn a task that sends its result through a channel. 37 | let (s, r) = flume::unbounded(); 38 | spawn(async move { drop(s.send(future.await)) }).detach(); 39 | 40 | loop { 41 | // If the original task has completed, return its result. 42 | if let Ok(val) = r.try_recv() { 43 | return val; 44 | } 45 | 46 | // Otherwise, take a task from the queue and run it. 47 | QUEUE.with(|(_, r)| r.recv().unwrap().run()); 48 | } 49 | } 50 | 51 | fn main() { 52 | let val = Rc::new(Cell::new(0)); 53 | 54 | // Run a future that increments a non-`Send` value. 55 | run({ 56 | let val = val.clone(); 57 | async move { 58 | // Spawn a future that increments the value. 59 | let task = spawn({ 60 | let val = val.clone(); 61 | async move { 62 | val.set(dbg!(val.get()) + 1); 63 | } 64 | }); 65 | 66 | val.set(dbg!(val.get()) + 1); 67 | task.await; 68 | } 69 | }); 70 | 71 | // The value should be 2 at the end of the program. 72 | dbg!(val.get()); 73 | } 74 | -------------------------------------------------------------------------------- /examples/spawn-on-thread.rs: -------------------------------------------------------------------------------- 1 | //! A function that runs a future to completion on a dedicated thread. 2 | 3 | use std::future::Future; 4 | use std::sync::Arc; 5 | use std::thread; 6 | 7 | use async_task::Task; 8 | use smol::future; 9 | 10 | /// Spawns a future on a new dedicated thread. 11 | /// 12 | /// The returned task can be used to await the output of the future. 13 | fn spawn_on_thread(future: F) -> Task 14 | where 15 | F: Future + Send + 'static, 16 | T: Send + 'static, 17 | { 18 | // Create a channel that holds the task when it is scheduled for running. 19 | let (sender, receiver) = flume::unbounded(); 20 | let sender = Arc::new(sender); 21 | let s = Arc::downgrade(&sender); 22 | 23 | // Wrap the future into one that disconnects the channel on completion. 24 | let future = async move { 25 | // When the inner future completes, the sender gets dropped and disconnects the channel. 26 | let _sender = sender; 27 | future.await 28 | }; 29 | 30 | // Create a task that is scheduled by sending it into the channel. 31 | let schedule = move |runnable| s.upgrade().unwrap().send(runnable).unwrap(); 32 | let (runnable, task) = async_task::spawn(future, schedule); 33 | 34 | // Schedule the task by sending it into the channel. 35 | runnable.schedule(); 36 | 37 | // Spawn a thread running the task to completion. 38 | thread::spawn(move || { 39 | // Keep taking the task from the channel and running it until completion. 40 | for runnable in receiver { 41 | runnable.run(); 42 | } 43 | }); 44 | 45 | task 46 | } 47 | 48 | fn main() { 49 | // Spawn a future on a dedicated thread. 50 | future::block_on(spawn_on_thread(async { 51 | println!("Hello, world!"); 52 | })); 53 | } 54 | -------------------------------------------------------------------------------- /examples/spawn.rs: -------------------------------------------------------------------------------- 1 | //! A simple single-threaded executor. 2 | 3 | use std::future::Future; 4 | use std::panic::catch_unwind; 5 | use std::thread; 6 | 7 | use async_task::{Runnable, Task}; 8 | use once_cell::sync::Lazy; 9 | use smol::future; 10 | 11 | /// Spawns a future on the executor. 12 | fn spawn(future: F) -> Task 13 | where 14 | F: Future + Send + 'static, 15 | T: Send + 'static, 16 | { 17 | // A queue that holds scheduled tasks. 18 | static QUEUE: Lazy> = Lazy::new(|| { 19 | let (sender, receiver) = flume::unbounded::(); 20 | 21 | // Start the executor thread. 22 | thread::spawn(|| { 23 | for runnable in receiver { 24 | // Ignore panics inside futures. 25 | let _ignore_panic = catch_unwind(|| runnable.run()); 26 | } 27 | }); 28 | 29 | sender 30 | }); 31 | 32 | // Create a task that is scheduled by pushing it into the queue. 33 | let schedule = |runnable| QUEUE.send(runnable).unwrap(); 34 | let (runnable, task) = async_task::spawn(future, schedule); 35 | 36 | // Schedule the task by pushing it into the queue. 37 | runnable.schedule(); 38 | 39 | task 40 | } 41 | 42 | fn main() { 43 | // Spawn a future and await its result. 44 | let task = spawn(async { 45 | println!("Hello, world!"); 46 | }); 47 | future::block_on(task); 48 | } 49 | -------------------------------------------------------------------------------- /examples/with-metadata.rs: -------------------------------------------------------------------------------- 1 | //! A single threaded executor that uses shortest-job-first scheduling. 2 | 3 | use std::cell::RefCell; 4 | use std::collections::BinaryHeap; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | use std::time::{Duration, Instant}; 9 | use std::{cell::Cell, future::Future}; 10 | 11 | use async_task::{Builder, Runnable, Task}; 12 | use pin_project_lite::pin_project; 13 | use smol::{channel, future}; 14 | 15 | struct ByDuration(Runnable); 16 | 17 | impl ByDuration { 18 | fn duration(&self) -> Duration { 19 | self.0.metadata().inner.get() 20 | } 21 | } 22 | 23 | impl PartialEq for ByDuration { 24 | fn eq(&self, other: &Self) -> bool { 25 | self.duration() == other.duration() 26 | } 27 | } 28 | 29 | impl Eq for ByDuration {} 30 | 31 | impl PartialOrd for ByDuration { 32 | fn partial_cmp(&self, other: &Self) -> Option { 33 | Some(self.cmp(other)) 34 | } 35 | } 36 | 37 | impl Ord for ByDuration { 38 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 39 | self.duration().cmp(&other.duration()).reverse() 40 | } 41 | } 42 | 43 | pin_project! { 44 | #[must_use = "futures do nothing unless you `.await` or poll them"] 45 | struct MeasureRuntime<'a, F> { 46 | #[pin] 47 | f: F, 48 | duration: &'a Cell 49 | } 50 | } 51 | 52 | impl Future for MeasureRuntime<'_, F> { 53 | type Output = F::Output; 54 | 55 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 56 | let this = self.project(); 57 | let duration_cell: &Cell = this.duration; 58 | let start = Instant::now(); 59 | let res = F::poll(this.f, cx); 60 | let new_duration = Instant::now() - start; 61 | duration_cell.set(duration_cell.get() / 2 + new_duration / 2); 62 | res 63 | } 64 | } 65 | 66 | pub struct DurationMetadata { 67 | inner: Cell, 68 | } 69 | 70 | thread_local! { 71 | // A queue that holds scheduled tasks. 72 | static QUEUE: RefCell> = RefCell::new(BinaryHeap::new()); 73 | } 74 | 75 | fn make_future_fn<'a, F>( 76 | future: F, 77 | ) -> impl (FnOnce(&'a DurationMetadata) -> MeasureRuntime<'a, F>) { 78 | move |duration_meta| MeasureRuntime { 79 | f: future, 80 | duration: &duration_meta.inner, 81 | } 82 | } 83 | 84 | fn ensure_safe_schedule(f: F) -> F { 85 | f 86 | } 87 | 88 | /// Spawns a future on the executor. 89 | pub fn spawn(future: F) -> Task 90 | where 91 | F: Future + 'static, 92 | T: 'static, 93 | { 94 | let spawn_thread_id = thread::current().id(); 95 | // Create a task that is scheduled by pushing it into the queue. 96 | let schedule = ensure_safe_schedule(move |runnable| { 97 | if thread::current().id() != spawn_thread_id { 98 | panic!("Task would be run on a different thread than spawned on."); 99 | } 100 | QUEUE.with(move |queue| queue.borrow_mut().push(ByDuration(runnable))); 101 | }); 102 | let future_fn = make_future_fn(future); 103 | let (runnable, task) = unsafe { 104 | Builder::new() 105 | .metadata(DurationMetadata { 106 | inner: Cell::new(Duration::default()), 107 | }) 108 | .spawn_unchecked(future_fn, schedule) 109 | }; 110 | 111 | // Schedule the task by pushing it into the queue. 112 | runnable.schedule(); 113 | 114 | task 115 | } 116 | 117 | pub fn block_on(future: F) 118 | where 119 | F: Future + 'static, 120 | { 121 | let task = spawn(future); 122 | while !task.is_finished() { 123 | let Some(runnable) = QUEUE.with(|queue| queue.borrow_mut().pop()) else { 124 | thread::yield_now(); 125 | continue; 126 | }; 127 | runnable.0.run(); 128 | } 129 | } 130 | 131 | fn main() { 132 | // Spawn a future and await its result. 133 | block_on(async { 134 | let (sender, receiver) = channel::bounded(1); 135 | let world = spawn(async move { 136 | receiver.recv().await.unwrap(); 137 | println!("world.") 138 | }); 139 | let hello = spawn(async move { 140 | sender.send(()).await.unwrap(); 141 | print!("Hello, ") 142 | }); 143 | future::zip(hello, world).await; 144 | }); 145 | } 146 | -------------------------------------------------------------------------------- /src/header.rs: -------------------------------------------------------------------------------- 1 | use core::cell::UnsafeCell; 2 | use core::fmt; 3 | use core::task::Waker; 4 | 5 | #[cfg(not(feature = "portable-atomic"))] 6 | use core::sync::atomic::AtomicUsize; 7 | use core::sync::atomic::Ordering; 8 | #[cfg(feature = "portable-atomic")] 9 | use portable_atomic::AtomicUsize; 10 | 11 | use crate::raw::TaskVTable; 12 | use crate::state::*; 13 | use crate::utils::abort_on_panic; 14 | 15 | /// The header of a task. 16 | /// 17 | /// This header is stored in memory at the beginning of the heap-allocated task. 18 | pub(crate) struct Header { 19 | /// Current state of the task. 20 | /// 21 | /// Contains flags representing the current state and the reference count. 22 | pub(crate) state: AtomicUsize, 23 | 24 | /// The task that is blocked on the `Task` handle. 25 | /// 26 | /// This waker needs to be woken up once the task completes or is closed. 27 | pub(crate) awaiter: UnsafeCell>, 28 | 29 | /// The virtual table. 30 | /// 31 | /// In addition to the actual waker virtual table, it also contains pointers to several other 32 | /// methods necessary for bookkeeping the heap-allocated task. 33 | pub(crate) vtable: &'static TaskVTable, 34 | 35 | /// Metadata associated with the task. 36 | /// 37 | /// This metadata may be provided to the user. 38 | pub(crate) metadata: M, 39 | 40 | /// Whether or not a panic that occurs in the task should be propagated. 41 | #[cfg(feature = "std")] 42 | pub(crate) propagate_panic: bool, 43 | } 44 | 45 | impl Header { 46 | /// Notifies the awaiter blocked on this task. 47 | /// 48 | /// If the awaiter is the same as the current waker, it will not be notified. 49 | #[inline] 50 | pub(crate) fn notify(&self, current: Option<&Waker>) { 51 | if let Some(w) = self.take(current) { 52 | abort_on_panic(|| w.wake()); 53 | } 54 | } 55 | 56 | /// Takes the awaiter blocked on this task. 57 | /// 58 | /// If there is no awaiter or if it is the same as the current waker, returns `None`. 59 | #[inline] 60 | pub(crate) fn take(&self, current: Option<&Waker>) -> Option { 61 | // Set the bit indicating that the task is notifying its awaiter. 62 | let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel); 63 | 64 | // If the task was not notifying or registering an awaiter... 65 | if state & (NOTIFYING | REGISTERING) == 0 { 66 | // Take the waker out. 67 | let waker = unsafe { (*self.awaiter.get()).take() }; 68 | 69 | // Unset the bit indicating that the task is notifying its awaiter. 70 | self.state 71 | .fetch_and(!NOTIFYING & !AWAITER, Ordering::Release); 72 | 73 | // Finally, notify the waker if it's different from the current waker. 74 | if let Some(w) = waker { 75 | match current { 76 | None => return Some(w), 77 | Some(c) if !w.will_wake(c) => return Some(w), 78 | Some(_) => abort_on_panic(|| drop(w)), 79 | } 80 | } 81 | } 82 | 83 | None 84 | } 85 | 86 | /// Registers a new awaiter blocked on this task. 87 | /// 88 | /// This method is called when `Task` is polled and it has not yet completed. 89 | #[inline] 90 | pub(crate) fn register(&self, waker: &Waker) { 91 | // Load the state and synchronize with it. 92 | let mut state = self.state.fetch_or(0, Ordering::Acquire); 93 | 94 | loop { 95 | // There can't be two concurrent registrations because `Task` can only be polled 96 | // by a unique pinned reference. 97 | debug_assert!(state & REGISTERING == 0); 98 | 99 | // If we're in the notifying state at this moment, just wake and return without 100 | // registering. 101 | if state & NOTIFYING != 0 { 102 | abort_on_panic(|| waker.wake_by_ref()); 103 | return; 104 | } 105 | 106 | // Mark the state to let other threads know we're registering a new awaiter. 107 | match self.state.compare_exchange_weak( 108 | state, 109 | state | REGISTERING, 110 | Ordering::AcqRel, 111 | Ordering::Acquire, 112 | ) { 113 | Ok(_) => { 114 | state |= REGISTERING; 115 | break; 116 | } 117 | Err(s) => state = s, 118 | } 119 | } 120 | 121 | // Put the waker into the awaiter field. 122 | unsafe { 123 | abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone())); 124 | } 125 | 126 | // This variable will contain the newly registered waker if a notification comes in before 127 | // we complete registration. 128 | let mut waker = None; 129 | 130 | loop { 131 | // If there was a notification, take the waker out of the awaiter field. 132 | if state & NOTIFYING != 0 { 133 | if let Some(w) = unsafe { (*self.awaiter.get()).take() } { 134 | abort_on_panic(|| waker = Some(w)); 135 | } 136 | } 137 | 138 | // The new state is not being notified nor registered, but there might or might not be 139 | // an awaiter depending on whether there was a concurrent notification. 140 | let new = if waker.is_none() { 141 | (state & !NOTIFYING & !REGISTERING) | AWAITER 142 | } else { 143 | state & !NOTIFYING & !REGISTERING & !AWAITER 144 | }; 145 | 146 | match self 147 | .state 148 | .compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire) 149 | { 150 | Ok(_) => break, 151 | Err(s) => state = s, 152 | } 153 | } 154 | 155 | // If there was a notification during registration, wake the awaiter now. 156 | if let Some(w) = waker { 157 | abort_on_panic(|| w.wake()); 158 | } 159 | } 160 | } 161 | 162 | impl fmt::Debug for Header { 163 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 164 | let state = self.state.load(Ordering::SeqCst); 165 | 166 | f.debug_struct("Header") 167 | .field("scheduled", &(state & SCHEDULED != 0)) 168 | .field("running", &(state & RUNNING != 0)) 169 | .field("completed", &(state & COMPLETED != 0)) 170 | .field("closed", &(state & CLOSED != 0)) 171 | .field("awaiter", &(state & AWAITER != 0)) 172 | .field("task", &(state & TASK != 0)) 173 | .field("ref_count", &(state / REFERENCE)) 174 | .field("metadata", &self.metadata) 175 | .finish() 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Task abstraction for building executors. 2 | //! 3 | //! To spawn a future onto an executor, we first need to allocate it on the heap and keep some 4 | //! state attached to it. The state indicates whether the future is ready for polling, waiting to 5 | //! be woken up, or completed. Such a stateful future is called a *task*. 6 | //! 7 | //! All executors have a queue that holds scheduled tasks: 8 | //! 9 | //! ``` 10 | //! let (sender, receiver) = flume::unbounded(); 11 | //! # 12 | //! # // A future that will get spawned. 13 | //! # let future = async { 1 + 2 }; 14 | //! # 15 | //! # // A function that schedules the task when it gets woken up. 16 | //! # let schedule = move |runnable| sender.send(runnable).unwrap(); 17 | //! # 18 | //! # // Create a task. 19 | //! # let (runnable, task) = async_task::spawn(future, schedule); 20 | //! ``` 21 | //! 22 | //! A task is created using either [`spawn()`], [`spawn_local()`], or [`spawn_unchecked()`] which 23 | //! return a [`Runnable`] and a [`Task`]: 24 | //! 25 | //! ``` 26 | //! # let (sender, receiver) = flume::unbounded(); 27 | //! # 28 | //! // A future that will be spawned. 29 | //! let future = async { 1 + 2 }; 30 | //! 31 | //! // A function that schedules the task when it gets woken up. 32 | //! let schedule = move |runnable| sender.send(runnable).unwrap(); 33 | //! 34 | //! // Construct a task. 35 | //! let (runnable, task) = async_task::spawn(future, schedule); 36 | //! 37 | //! // Push the task into the queue by invoking its schedule function. 38 | //! runnable.schedule(); 39 | //! ``` 40 | //! 41 | //! The [`Runnable`] is used to poll the task's future, and the [`Task`] is used to await its 42 | //! output. 43 | //! 44 | //! Finally, we need a loop that takes scheduled tasks from the queue and runs them: 45 | //! 46 | //! ```no_run 47 | //! # let (sender, receiver) = flume::unbounded(); 48 | //! # 49 | //! # // A future that will get spawned. 50 | //! # let future = async { 1 + 2 }; 51 | //! # 52 | //! # // A function that schedules the task when it gets woken up. 53 | //! # let schedule = move |runnable| sender.send(runnable).unwrap(); 54 | //! # 55 | //! # // Create a task. 56 | //! # let (runnable, task) = async_task::spawn(future, schedule); 57 | //! # 58 | //! # // Push the task into the queue by invoking its schedule function. 59 | //! # runnable.schedule(); 60 | //! # 61 | //! for runnable in receiver { 62 | //! runnable.run(); 63 | //! } 64 | //! ``` 65 | //! 66 | //! Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] 67 | //! vanishes and only reappears when its [`Waker`][`core::task::Waker`] wakes the task, thus 68 | //! scheduling it to be run again. 69 | 70 | #![no_std] 71 | #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] 72 | #![doc(test(attr(deny(rust_2018_idioms, warnings))))] 73 | #![doc(test(attr(allow(unused_extern_crates, unused_variables))))] 74 | #![doc( 75 | html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" 76 | )] 77 | #![doc( 78 | html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png" 79 | )] 80 | 81 | extern crate alloc; 82 | #[cfg(feature = "std")] 83 | extern crate std; 84 | 85 | /// We can't use `?` in const contexts yet, so this macro acts 86 | /// as a workaround. 87 | macro_rules! leap { 88 | ($x: expr) => {{ 89 | match ($x) { 90 | Some(val) => val, 91 | None => return None, 92 | } 93 | }}; 94 | } 95 | 96 | macro_rules! leap_unwrap { 97 | ($x: expr) => {{ 98 | match ($x) { 99 | Some(val) => val, 100 | None => panic!("called `Option::unwrap()` on a `None` value"), 101 | } 102 | }}; 103 | } 104 | 105 | mod header; 106 | mod raw; 107 | mod runnable; 108 | mod state; 109 | mod task; 110 | mod utils; 111 | 112 | pub use crate::runnable::{ 113 | spawn, spawn_unchecked, Builder, Runnable, Schedule, ScheduleInfo, WithInfo, 114 | }; 115 | pub use crate::task::{FallibleTask, Task}; 116 | 117 | #[cfg(feature = "std")] 118 | pub use crate::runnable::spawn_local; 119 | -------------------------------------------------------------------------------- /src/raw.rs: -------------------------------------------------------------------------------- 1 | use alloc::alloc::Layout as StdLayout; 2 | use core::cell::UnsafeCell; 3 | use core::future::Future; 4 | use core::mem::{self, ManuallyDrop}; 5 | use core::pin::Pin; 6 | use core::ptr::NonNull; 7 | use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; 8 | 9 | #[cfg(not(feature = "portable-atomic"))] 10 | use core::sync::atomic::AtomicUsize; 11 | use core::sync::atomic::Ordering; 12 | #[cfg(feature = "portable-atomic")] 13 | use portable_atomic::AtomicUsize; 14 | 15 | use crate::header::Header; 16 | use crate::runnable::{Schedule, ScheduleInfo}; 17 | use crate::state::*; 18 | use crate::utils::{abort, abort_on_panic, max, Layout}; 19 | use crate::Runnable; 20 | 21 | #[cfg(feature = "std")] 22 | pub(crate) type Panic = alloc::boxed::Box; 23 | 24 | #[cfg(not(feature = "std"))] 25 | pub(crate) type Panic = core::convert::Infallible; 26 | 27 | /// The vtable for a task. 28 | pub(crate) struct TaskVTable { 29 | /// Schedules the task. 30 | pub(crate) schedule: unsafe fn(*const (), ScheduleInfo), 31 | 32 | /// Drops the future inside the task. 33 | pub(crate) drop_future: unsafe fn(*const ()), 34 | 35 | /// Returns a pointer to the output stored after completion. 36 | pub(crate) get_output: unsafe fn(*const ()) -> *const (), 37 | 38 | /// Drops the task reference (`Runnable` or `Waker`). 39 | pub(crate) drop_ref: unsafe fn(ptr: *const ()), 40 | 41 | /// Destroys the task. 42 | pub(crate) destroy: unsafe fn(*const ()), 43 | 44 | /// Runs the task. 45 | pub(crate) run: unsafe fn(*const ()) -> bool, 46 | 47 | /// Creates a new waker associated with the task. 48 | pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker, 49 | 50 | /// The memory layout of the task. This information enables 51 | /// debuggers to decode raw task memory blobs. Do not remove 52 | /// the field, even if it appears to be unused. 53 | #[allow(unused)] 54 | pub(crate) layout_info: &'static TaskLayout, 55 | } 56 | 57 | /// Memory layout of a task. 58 | /// 59 | /// This struct contains the following information: 60 | /// 61 | /// 1. How to allocate and deallocate the task. 62 | /// 2. How to access the fields inside the task. 63 | #[derive(Clone, Copy)] 64 | pub(crate) struct TaskLayout { 65 | /// Memory layout of the whole task. 66 | pub(crate) layout: StdLayout, 67 | 68 | /// Offset into the task at which the schedule function is stored. 69 | pub(crate) offset_s: usize, 70 | 71 | /// Offset into the task at which the future is stored. 72 | pub(crate) offset_f: usize, 73 | 74 | /// Offset into the task at which the output is stored. 75 | pub(crate) offset_r: usize, 76 | } 77 | 78 | /// Raw pointers to the fields inside a task. 79 | pub(crate) struct RawTask { 80 | /// The task header. 81 | pub(crate) header: *const Header, 82 | 83 | /// The schedule function. 84 | pub(crate) schedule: *const S, 85 | 86 | /// The future. 87 | pub(crate) future: *mut F, 88 | 89 | /// The output of the future. 90 | pub(crate) output: *mut Result, 91 | } 92 | 93 | impl Copy for RawTask {} 94 | 95 | impl Clone for RawTask { 96 | fn clone(&self) -> Self { 97 | *self 98 | } 99 | } 100 | 101 | impl RawTask { 102 | const TASK_LAYOUT: TaskLayout = Self::eval_task_layout(); 103 | 104 | /// Computes the memory layout for a task. 105 | #[inline] 106 | const fn eval_task_layout() -> TaskLayout { 107 | // Compute the layouts for `Header`, `S`, `F`, and `T`. 108 | let layout_header = Layout::new::>(); 109 | let layout_s = Layout::new::(); 110 | let layout_f = Layout::new::(); 111 | let layout_r = Layout::new::>(); 112 | 113 | // Compute the layout for `union { F, T }`. 114 | let size_union = max(layout_f.size(), layout_r.size()); 115 | let align_union = max(layout_f.align(), layout_r.align()); 116 | let layout_union = Layout::from_size_align(size_union, align_union); 117 | 118 | // Compute the layout for `Header` followed `S` and `union { F, T }`. 119 | let layout = layout_header; 120 | let (layout, offset_s) = leap_unwrap!(layout.extend(layout_s)); 121 | let (layout, offset_union) = leap_unwrap!(layout.extend(layout_union)); 122 | let offset_f = offset_union; 123 | let offset_r = offset_union; 124 | 125 | TaskLayout { 126 | layout: unsafe { layout.into_std() }, 127 | offset_s, 128 | offset_f, 129 | offset_r, 130 | } 131 | } 132 | } 133 | 134 | impl RawTask 135 | where 136 | F: Future, 137 | S: Schedule, 138 | { 139 | const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( 140 | Self::clone_waker, 141 | Self::wake, 142 | Self::wake_by_ref, 143 | Self::drop_waker, 144 | ); 145 | 146 | /// Allocates a task with the given `future` and `schedule` function. 147 | /// 148 | /// It is assumed that initially only the `Runnable` and the `Task` exist. 149 | pub(crate) fn allocate<'a, Gen: FnOnce(&'a M) -> F>( 150 | future: Gen, 151 | schedule: S, 152 | builder: crate::Builder, 153 | ) -> NonNull<()> 154 | where 155 | F: 'a, 156 | M: 'a, 157 | { 158 | // Compute the layout of the task for allocation. Abort if the computation fails. 159 | // 160 | // n.b. notgull: task_layout now automatically aborts instead of panicking 161 | let task_layout = Self::task_layout(); 162 | 163 | unsafe { 164 | // Allocate enough space for the entire task. 165 | let ptr = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) { 166 | None => abort(), 167 | Some(p) => p, 168 | }; 169 | 170 | let raw = Self::from_ptr(ptr.as_ptr()); 171 | 172 | let crate::Builder { 173 | metadata, 174 | #[cfg(feature = "std")] 175 | propagate_panic, 176 | } = builder; 177 | 178 | // Write the header as the first field of the task. 179 | (raw.header as *mut Header).write(Header { 180 | state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE), 181 | awaiter: UnsafeCell::new(None), 182 | vtable: &TaskVTable { 183 | schedule: Self::schedule, 184 | drop_future: Self::drop_future, 185 | get_output: Self::get_output, 186 | drop_ref: Self::drop_ref, 187 | destroy: Self::destroy, 188 | run: Self::run, 189 | clone_waker: Self::clone_waker, 190 | layout_info: &Self::TASK_LAYOUT, 191 | }, 192 | metadata, 193 | #[cfg(feature = "std")] 194 | propagate_panic, 195 | }); 196 | 197 | // Write the schedule function as the third field of the task. 198 | (raw.schedule as *mut S).write(schedule); 199 | 200 | // Generate the future, now that the metadata has been pinned in place. 201 | let future = abort_on_panic(|| future(&(*raw.header).metadata)); 202 | 203 | // Write the future as the fourth field of the task. 204 | raw.future.write(future); 205 | 206 | ptr 207 | } 208 | } 209 | 210 | /// Creates a `RawTask` from a raw task pointer. 211 | #[inline] 212 | pub(crate) fn from_ptr(ptr: *const ()) -> Self { 213 | let task_layout = Self::task_layout(); 214 | let p = ptr as *const u8; 215 | 216 | unsafe { 217 | Self { 218 | header: p as *const Header, 219 | schedule: p.add(task_layout.offset_s) as *const S, 220 | future: p.add(task_layout.offset_f) as *mut F, 221 | output: p.add(task_layout.offset_r) as *mut Result, 222 | } 223 | } 224 | } 225 | 226 | /// Returns the layout of the task. 227 | #[inline] 228 | fn task_layout() -> TaskLayout { 229 | Self::TASK_LAYOUT 230 | } 231 | /// Wakes a waker. 232 | unsafe fn wake(ptr: *const ()) { 233 | // This is just an optimization. If the schedule function has captured variables, then 234 | // we'll do less reference counting if we wake the waker by reference and then drop it. 235 | if mem::size_of::() > 0 { 236 | Self::wake_by_ref(ptr); 237 | Self::drop_waker(ptr); 238 | return; 239 | } 240 | 241 | let raw = Self::from_ptr(ptr); 242 | 243 | let mut state = (*raw.header).state.load(Ordering::Acquire); 244 | 245 | loop { 246 | // If the task is completed or closed, it can't be woken up. 247 | if state & (COMPLETED | CLOSED) != 0 { 248 | // Drop the waker. 249 | Self::drop_waker(ptr); 250 | break; 251 | } 252 | 253 | // If the task is already scheduled, we just need to synchronize with the thread that 254 | // will run the task by "publishing" our current view of the memory. 255 | if state & SCHEDULED != 0 { 256 | // Update the state without actually modifying it. 257 | match (*raw.header).state.compare_exchange_weak( 258 | state, 259 | state, 260 | Ordering::AcqRel, 261 | Ordering::Acquire, 262 | ) { 263 | Ok(_) => { 264 | // Drop the waker. 265 | Self::drop_waker(ptr); 266 | break; 267 | } 268 | Err(s) => state = s, 269 | } 270 | } else { 271 | // Mark the task as scheduled. 272 | match (*raw.header).state.compare_exchange_weak( 273 | state, 274 | state | SCHEDULED, 275 | Ordering::AcqRel, 276 | Ordering::Acquire, 277 | ) { 278 | Ok(_) => { 279 | // If the task is not yet scheduled and isn't currently running, now is the 280 | // time to schedule it. 281 | if state & RUNNING == 0 { 282 | // Schedule the task. 283 | Self::schedule(ptr, ScheduleInfo::new(false)); 284 | } else { 285 | // Drop the waker. 286 | Self::drop_waker(ptr); 287 | } 288 | 289 | break; 290 | } 291 | Err(s) => state = s, 292 | } 293 | } 294 | } 295 | } 296 | 297 | /// Wakes a waker by reference. 298 | unsafe fn wake_by_ref(ptr: *const ()) { 299 | let raw = Self::from_ptr(ptr); 300 | 301 | let mut state = (*raw.header).state.load(Ordering::Acquire); 302 | 303 | loop { 304 | // If the task is completed or closed, it can't be woken up. 305 | if state & (COMPLETED | CLOSED) != 0 { 306 | break; 307 | } 308 | 309 | // If the task is already scheduled, we just need to synchronize with the thread that 310 | // will run the task by "publishing" our current view of the memory. 311 | if state & SCHEDULED != 0 { 312 | // Update the state without actually modifying it. 313 | match (*raw.header).state.compare_exchange_weak( 314 | state, 315 | state, 316 | Ordering::AcqRel, 317 | Ordering::Acquire, 318 | ) { 319 | Ok(_) => break, 320 | Err(s) => state = s, 321 | } 322 | } else { 323 | // If the task is not running, we can schedule right away. 324 | let new = if state & RUNNING == 0 { 325 | (state | SCHEDULED) + REFERENCE 326 | } else { 327 | state | SCHEDULED 328 | }; 329 | 330 | // Mark the task as scheduled. 331 | match (*raw.header).state.compare_exchange_weak( 332 | state, 333 | new, 334 | Ordering::AcqRel, 335 | Ordering::Acquire, 336 | ) { 337 | Ok(_) => { 338 | // If the task is not running, now is the time to schedule. 339 | if state & RUNNING == 0 { 340 | // If the reference count overflowed, abort. 341 | if state > isize::MAX as usize { 342 | abort(); 343 | } 344 | 345 | // Schedule the task. There is no need to call `Self::schedule(ptr)` 346 | // because the schedule function cannot be destroyed while the waker is 347 | // still alive. 348 | let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); 349 | (*raw.schedule).schedule(task, ScheduleInfo::new(false)); 350 | } 351 | 352 | break; 353 | } 354 | Err(s) => state = s, 355 | } 356 | } 357 | } 358 | } 359 | 360 | /// Clones a waker. 361 | unsafe fn clone_waker(ptr: *const ()) -> RawWaker { 362 | let raw = Self::from_ptr(ptr); 363 | 364 | // Increment the reference count. With any kind of reference-counted data structure, 365 | // relaxed ordering is appropriate when incrementing the counter. 366 | let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed); 367 | 368 | // If the reference count overflowed, abort. 369 | if state > isize::MAX as usize { 370 | abort(); 371 | } 372 | 373 | RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) 374 | } 375 | 376 | /// Drops a waker. 377 | /// 378 | /// This function will decrement the reference count. If it drops down to zero, the associated 379 | /// `Task` has been dropped too, and the task has not been completed, then it will get 380 | /// scheduled one more time so that its future gets dropped by the executor. 381 | #[inline] 382 | unsafe fn drop_waker(ptr: *const ()) { 383 | let raw = Self::from_ptr(ptr); 384 | 385 | // Decrement the reference count. 386 | let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; 387 | 388 | // If this was the last reference to the task and the `Task` has been dropped too, 389 | // then we need to decide how to destroy the task. 390 | if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { 391 | if new & (COMPLETED | CLOSED) == 0 { 392 | // If the task was not completed nor closed, close it and schedule one more time so 393 | // that its future gets dropped by the executor. 394 | (*raw.header) 395 | .state 396 | .store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release); 397 | Self::schedule(ptr, ScheduleInfo::new(false)); 398 | } else { 399 | // Otherwise, destroy the task right away. 400 | Self::destroy(ptr); 401 | } 402 | } 403 | } 404 | 405 | /// Drops a task reference (`Runnable` or `Waker`). 406 | /// 407 | /// This function will decrement the reference count. If it drops down to zero and the 408 | /// associated `Task` handle has been dropped too, then the task gets destroyed. 409 | #[inline] 410 | unsafe fn drop_ref(ptr: *const ()) { 411 | let raw = Self::from_ptr(ptr); 412 | 413 | // Decrement the reference count. 414 | let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; 415 | 416 | // If this was the last reference to the task and the `Task` has been dropped too, 417 | // then destroy the task. 418 | if new & !(REFERENCE - 1) == 0 && new & TASK == 0 { 419 | Self::destroy(ptr); 420 | } 421 | } 422 | 423 | /// Schedules a task for running. 424 | /// 425 | /// This function doesn't modify the state of the task. It only passes the task reference to 426 | /// its schedule function. 427 | unsafe fn schedule(ptr: *const (), info: ScheduleInfo) { 428 | let raw = Self::from_ptr(ptr); 429 | 430 | // If the schedule function has captured variables, create a temporary waker that prevents 431 | // the task from getting deallocated while the function is being invoked. 432 | let _waker; 433 | if mem::size_of::() > 0 { 434 | _waker = Waker::from_raw(Self::clone_waker(ptr)); 435 | } 436 | 437 | let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ())); 438 | (*raw.schedule).schedule(task, info); 439 | } 440 | 441 | /// Drops the future inside a task. 442 | #[inline] 443 | unsafe fn drop_future(ptr: *const ()) { 444 | let raw = Self::from_ptr(ptr); 445 | 446 | // We need a safeguard against panics because the destructor can panic. 447 | abort_on_panic(|| { 448 | raw.future.drop_in_place(); 449 | }) 450 | } 451 | 452 | /// Returns a pointer to the output inside a task. 453 | unsafe fn get_output(ptr: *const ()) -> *const () { 454 | let raw = Self::from_ptr(ptr); 455 | raw.output as *const () 456 | } 457 | 458 | /// Cleans up task's resources and deallocates it. 459 | /// 460 | /// The schedule function will be dropped, and the task will then get deallocated. 461 | /// The task must be closed before this function is called. 462 | #[inline] 463 | unsafe fn destroy(ptr: *const ()) { 464 | let raw = Self::from_ptr(ptr); 465 | let task_layout = Self::task_layout(); 466 | 467 | // We need a safeguard against panics because destructors can panic. 468 | abort_on_panic(|| { 469 | // Drop the header along with the metadata. 470 | (raw.header as *mut Header).drop_in_place(); 471 | 472 | // Drop the schedule function. 473 | (raw.schedule as *mut S).drop_in_place(); 474 | }); 475 | 476 | // Finally, deallocate the memory reserved by the task. 477 | alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout); 478 | } 479 | 480 | /// Runs a task. 481 | /// 482 | /// If polling its future panics, the task will be closed and the panic will be propagated into 483 | /// the caller. 484 | unsafe fn run(ptr: *const ()) -> bool { 485 | let raw = Self::from_ptr(ptr); 486 | 487 | // Create a context from the raw task pointer and the vtable inside the its header. 488 | let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE))); 489 | let cx = &mut Context::from_waker(&waker); 490 | 491 | let mut state = (*raw.header).state.load(Ordering::Acquire); 492 | 493 | // Update the task's state before polling its future. 494 | loop { 495 | // If the task has already been closed, drop the task reference and return. 496 | if state & CLOSED != 0 { 497 | // Drop the future. 498 | Self::drop_future(ptr); 499 | 500 | // Mark the task as unscheduled. 501 | let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel); 502 | 503 | // Take the awaiter out. 504 | let mut awaiter = None; 505 | if state & AWAITER != 0 { 506 | awaiter = (*raw.header).take(None); 507 | } 508 | 509 | // Drop the task reference. 510 | Self::drop_ref(ptr); 511 | 512 | // Notify the awaiter that the future has been dropped. 513 | if let Some(w) = awaiter { 514 | abort_on_panic(|| w.wake()); 515 | } 516 | return false; 517 | } 518 | 519 | // Mark the task as unscheduled and running. 520 | match (*raw.header).state.compare_exchange_weak( 521 | state, 522 | (state & !SCHEDULED) | RUNNING, 523 | Ordering::AcqRel, 524 | Ordering::Acquire, 525 | ) { 526 | Ok(_) => { 527 | // Update the state because we're continuing with polling the future. 528 | state = (state & !SCHEDULED) | RUNNING; 529 | break; 530 | } 531 | Err(s) => state = s, 532 | } 533 | } 534 | 535 | // Poll the inner future, but surround it with a guard that closes the task in case polling 536 | // panics. 537 | // If available, we should also try to catch the panic so that it is propagated correctly. 538 | let guard = Guard(raw); 539 | 540 | // Panic propagation is not available for no_std. 541 | #[cfg(not(feature = "std"))] 542 | let poll = ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok); 543 | 544 | #[cfg(feature = "std")] 545 | let poll = { 546 | // Check if we should propagate panics. 547 | if (*raw.header).propagate_panic { 548 | // Use catch_unwind to catch the panic. 549 | match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { 550 | ::poll(Pin::new_unchecked(&mut *raw.future), cx) 551 | })) { 552 | Ok(Poll::Ready(v)) => Poll::Ready(Ok(v)), 553 | Ok(Poll::Pending) => Poll::Pending, 554 | Err(e) => Poll::Ready(Err(e)), 555 | } 556 | } else { 557 | ::poll(Pin::new_unchecked(&mut *raw.future), cx).map(Ok) 558 | } 559 | }; 560 | 561 | mem::forget(guard); 562 | 563 | match poll { 564 | Poll::Ready(out) => { 565 | // Replace the future with its output. 566 | Self::drop_future(ptr); 567 | raw.output.write(out); 568 | 569 | // The task is now completed. 570 | loop { 571 | // If the `Task` is dropped, we'll need to close it and drop the output. 572 | let new = if state & TASK == 0 { 573 | (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED 574 | } else { 575 | (state & !RUNNING & !SCHEDULED) | COMPLETED 576 | }; 577 | 578 | // Mark the task as not running and completed. 579 | match (*raw.header).state.compare_exchange_weak( 580 | state, 581 | new, 582 | Ordering::AcqRel, 583 | Ordering::Acquire, 584 | ) { 585 | Ok(_) => { 586 | // If the `Task` is dropped or if the task was closed while running, 587 | // now it's time to drop the output. 588 | if state & TASK == 0 || state & CLOSED != 0 { 589 | // Drop the output. 590 | abort_on_panic(|| raw.output.drop_in_place()); 591 | } 592 | 593 | // Take the awaiter out. 594 | let mut awaiter = None; 595 | if state & AWAITER != 0 { 596 | awaiter = (*raw.header).take(None); 597 | } 598 | 599 | // Drop the task reference. 600 | Self::drop_ref(ptr); 601 | 602 | // Notify the awaiter that the future has been dropped. 603 | if let Some(w) = awaiter { 604 | abort_on_panic(|| w.wake()); 605 | } 606 | break; 607 | } 608 | Err(s) => state = s, 609 | } 610 | } 611 | } 612 | Poll::Pending => { 613 | let mut future_dropped = false; 614 | 615 | // The task is still not completed. 616 | loop { 617 | // If the task was closed while running, we'll need to unschedule in case it 618 | // was woken up and then destroy it. 619 | let new = if state & CLOSED != 0 { 620 | state & !RUNNING & !SCHEDULED 621 | } else { 622 | state & !RUNNING 623 | }; 624 | 625 | if state & CLOSED != 0 && !future_dropped { 626 | // The thread that closed the task didn't drop the future because it was 627 | // running so now it's our responsibility to do so. 628 | Self::drop_future(ptr); 629 | future_dropped = true; 630 | } 631 | 632 | // Mark the task as not running. 633 | match (*raw.header).state.compare_exchange_weak( 634 | state, 635 | new, 636 | Ordering::AcqRel, 637 | Ordering::Acquire, 638 | ) { 639 | Ok(state) => { 640 | // If the task was closed while running, we need to notify the awaiter. 641 | // If the task was woken up while running, we need to schedule it. 642 | // Otherwise, we just drop the task reference. 643 | if state & CLOSED != 0 { 644 | // Take the awaiter out. 645 | let mut awaiter = None; 646 | if state & AWAITER != 0 { 647 | awaiter = (*raw.header).take(None); 648 | } 649 | 650 | // Drop the task reference. 651 | Self::drop_ref(ptr); 652 | 653 | // Notify the awaiter that the future has been dropped. 654 | if let Some(w) = awaiter { 655 | abort_on_panic(|| w.wake()); 656 | } 657 | } else if state & SCHEDULED != 0 { 658 | // The thread that woke the task up didn't reschedule it because 659 | // it was running so now it's our responsibility to do so. 660 | Self::schedule(ptr, ScheduleInfo::new(true)); 661 | return true; 662 | } else { 663 | // Drop the task reference. 664 | Self::drop_ref(ptr); 665 | } 666 | break; 667 | } 668 | Err(s) => state = s, 669 | } 670 | } 671 | } 672 | } 673 | 674 | return false; 675 | 676 | /// A guard that closes the task if polling its future panics. 677 | struct Guard(RawTask) 678 | where 679 | F: Future, 680 | S: Schedule; 681 | 682 | impl Drop for Guard 683 | where 684 | F: Future, 685 | S: Schedule, 686 | { 687 | fn drop(&mut self) { 688 | let raw = self.0; 689 | let ptr = raw.header as *const (); 690 | 691 | unsafe { 692 | let mut state = (*raw.header).state.load(Ordering::Acquire); 693 | 694 | loop { 695 | // If the task was closed while running, then unschedule it, drop its 696 | // future, and drop the task reference. 697 | if state & CLOSED != 0 { 698 | // The thread that closed the task didn't drop the future because it 699 | // was running so now it's our responsibility to do so. 700 | RawTask::::drop_future(ptr); 701 | 702 | // Mark the task as not running and not scheduled. 703 | (*raw.header) 704 | .state 705 | .fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel); 706 | 707 | // Take the awaiter out. 708 | let mut awaiter = None; 709 | if state & AWAITER != 0 { 710 | awaiter = (*raw.header).take(None); 711 | } 712 | 713 | // Drop the task reference. 714 | RawTask::::drop_ref(ptr); 715 | 716 | // Notify the awaiter that the future has been dropped. 717 | if let Some(w) = awaiter { 718 | abort_on_panic(|| w.wake()); 719 | } 720 | break; 721 | } 722 | 723 | // Mark the task as not running, not scheduled, and closed. 724 | match (*raw.header).state.compare_exchange_weak( 725 | state, 726 | (state & !RUNNING & !SCHEDULED) | CLOSED, 727 | Ordering::AcqRel, 728 | Ordering::Acquire, 729 | ) { 730 | Ok(state) => { 731 | // Drop the future because the task is now closed. 732 | RawTask::::drop_future(ptr); 733 | 734 | // Take the awaiter out. 735 | let mut awaiter = None; 736 | if state & AWAITER != 0 { 737 | awaiter = (*raw.header).take(None); 738 | } 739 | 740 | // Drop the task reference. 741 | RawTask::::drop_ref(ptr); 742 | 743 | // Notify the awaiter that the future has been dropped. 744 | if let Some(w) = awaiter { 745 | abort_on_panic(|| w.wake()); 746 | } 747 | break; 748 | } 749 | Err(s) => state = s, 750 | } 751 | } 752 | } 753 | } 754 | } 755 | } 756 | } 757 | -------------------------------------------------------------------------------- /src/runnable.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::future::Future; 3 | use core::marker::PhantomData; 4 | use core::mem; 5 | use core::ptr::NonNull; 6 | use core::sync::atomic::Ordering; 7 | use core::task::Waker; 8 | 9 | use alloc::boxed::Box; 10 | 11 | use crate::header::Header; 12 | use crate::raw::RawTask; 13 | use crate::state::*; 14 | use crate::Task; 15 | 16 | mod sealed { 17 | use super::*; 18 | pub trait Sealed {} 19 | 20 | impl Sealed for F where F: Fn(Runnable) {} 21 | 22 | impl Sealed for WithInfo where F: Fn(Runnable, ScheduleInfo) {} 23 | } 24 | 25 | /// A builder that creates a new task. 26 | #[derive(Debug)] 27 | pub struct Builder { 28 | /// The metadata associated with the task. 29 | pub(crate) metadata: M, 30 | 31 | /// Whether or not a panic that occurs in the task should be propagated. 32 | #[cfg(feature = "std")] 33 | pub(crate) propagate_panic: bool, 34 | } 35 | 36 | impl Default for Builder { 37 | fn default() -> Self { 38 | Builder::new().metadata(M::default()) 39 | } 40 | } 41 | 42 | /// Extra scheduling information that can be passed to the scheduling function. 43 | /// 44 | /// The data source of this struct is directly from the actual implementation 45 | /// of the crate itself, different from [`Runnable`]'s metadata, which is 46 | /// managed by the caller. 47 | /// 48 | /// # Examples 49 | /// 50 | /// ``` 51 | /// use async_task::{Runnable, ScheduleInfo, WithInfo}; 52 | /// use std::sync::{Arc, Mutex}; 53 | /// 54 | /// // The future inside the task. 55 | /// let future = async { 56 | /// println!("Hello, world!"); 57 | /// }; 58 | /// 59 | /// // If the task gets woken up while running, it will be sent into this channel. 60 | /// let (s, r) = flume::unbounded(); 61 | /// // Otherwise, it will be placed into this slot. 62 | /// let lifo_slot = Arc::new(Mutex::new(None)); 63 | /// let schedule = move |runnable: Runnable, info: ScheduleInfo| { 64 | /// if info.woken_while_running { 65 | /// s.send(runnable).unwrap() 66 | /// } else { 67 | /// let last = lifo_slot.lock().unwrap().replace(runnable); 68 | /// if let Some(last) = last { 69 | /// s.send(last).unwrap() 70 | /// } 71 | /// } 72 | /// }; 73 | /// 74 | /// // Create the actual scheduler to be spawned with some future. 75 | /// let scheduler = WithInfo(schedule); 76 | /// // Create a task with the future and the scheduler. 77 | /// let (runnable, task) = async_task::spawn(future, scheduler); 78 | /// ``` 79 | #[derive(Debug, Copy, Clone)] 80 | #[non_exhaustive] 81 | pub struct ScheduleInfo { 82 | /// Indicates whether the task gets woken up while running. 83 | /// 84 | /// It is set to true usually because the task has yielded itself to the 85 | /// scheduler. 86 | pub woken_while_running: bool, 87 | } 88 | 89 | impl ScheduleInfo { 90 | pub(crate) fn new(woken_while_running: bool) -> Self { 91 | ScheduleInfo { 92 | woken_while_running, 93 | } 94 | } 95 | } 96 | 97 | /// The trait for scheduling functions. 98 | pub trait Schedule: sealed::Sealed { 99 | /// The actual scheduling procedure. 100 | fn schedule(&self, runnable: Runnable, info: ScheduleInfo); 101 | } 102 | 103 | impl Schedule for F 104 | where 105 | F: Fn(Runnable), 106 | { 107 | fn schedule(&self, runnable: Runnable, _: ScheduleInfo) { 108 | self(runnable) 109 | } 110 | } 111 | 112 | /// Pass a scheduling function with more scheduling information - a.k.a. 113 | /// [`ScheduleInfo`]. 114 | /// 115 | /// Sometimes, it's useful to pass the runnable's state directly to the 116 | /// scheduling function, such as whether it's woken up while running. The 117 | /// scheduler can thus use the information to determine its scheduling 118 | /// strategy. 119 | /// 120 | /// The data source of [`ScheduleInfo`] is directly from the actual 121 | /// implementation of the crate itself, different from [`Runnable`]'s metadata, 122 | /// which is managed by the caller. 123 | /// 124 | /// # Examples 125 | /// 126 | /// ``` 127 | /// use async_task::{ScheduleInfo, WithInfo}; 128 | /// use std::sync::{Arc, Mutex}; 129 | /// 130 | /// // The future inside the task. 131 | /// let future = async { 132 | /// println!("Hello, world!"); 133 | /// }; 134 | /// 135 | /// // If the task gets woken up while running, it will be sent into this channel. 136 | /// let (s, r) = flume::unbounded(); 137 | /// // Otherwise, it will be placed into this slot. 138 | /// let lifo_slot = Arc::new(Mutex::new(None)); 139 | /// let schedule = move |runnable, info: ScheduleInfo| { 140 | /// if info.woken_while_running { 141 | /// s.send(runnable).unwrap() 142 | /// } else { 143 | /// let last = lifo_slot.lock().unwrap().replace(runnable); 144 | /// if let Some(last) = last { 145 | /// s.send(last).unwrap() 146 | /// } 147 | /// } 148 | /// }; 149 | /// 150 | /// // Create a task with the future and the schedule function. 151 | /// let (runnable, task) = async_task::spawn(future, WithInfo(schedule)); 152 | /// ``` 153 | #[derive(Debug)] 154 | pub struct WithInfo(pub F); 155 | 156 | impl From for WithInfo { 157 | fn from(value: F) -> Self { 158 | WithInfo(value) 159 | } 160 | } 161 | 162 | impl Schedule for WithInfo 163 | where 164 | F: Fn(Runnable, ScheduleInfo), 165 | { 166 | fn schedule(&self, runnable: Runnable, info: ScheduleInfo) { 167 | (self.0)(runnable, info) 168 | } 169 | } 170 | 171 | impl Builder<()> { 172 | /// Creates a new task builder. 173 | /// 174 | /// By default, this task builder has no metadata. Use the [`metadata`] method to 175 | /// set the metadata. 176 | /// 177 | /// # Examples 178 | /// 179 | /// ``` 180 | /// use async_task::Builder; 181 | /// 182 | /// let (runnable, task) = Builder::new().spawn(|()| async {}, |_| {}); 183 | /// ``` 184 | pub fn new() -> Builder<()> { 185 | Builder { 186 | metadata: (), 187 | #[cfg(feature = "std")] 188 | propagate_panic: false, 189 | } 190 | } 191 | 192 | /// Adds metadata to the task. 193 | /// 194 | /// In certain cases, it may be useful to associate some metadata with a task. For instance, 195 | /// you may want to associate a name with a task, or a priority for a priority queue. This 196 | /// method allows the user to attach arbitrary metadata to a task that is available through 197 | /// the [`Runnable`] or the [`Task`]. 198 | /// 199 | /// # Examples 200 | /// 201 | /// This example creates an executor that associates a "priority" number with each task, and 202 | /// then runs the tasks in order of priority. 203 | /// 204 | /// ``` 205 | /// use async_task::{Builder, Runnable}; 206 | /// use once_cell::sync::Lazy; 207 | /// use std::cmp; 208 | /// use std::collections::BinaryHeap; 209 | /// use std::sync::Mutex; 210 | /// 211 | /// # smol::future::block_on(async { 212 | /// /// A wrapper around a `Runnable` that implements `Ord` so that it can be used in a 213 | /// /// priority queue. 214 | /// struct TaskWrapper(Runnable); 215 | /// 216 | /// impl PartialEq for TaskWrapper { 217 | /// fn eq(&self, other: &Self) -> bool { 218 | /// self.0.metadata() == other.0.metadata() 219 | /// } 220 | /// } 221 | /// 222 | /// impl Eq for TaskWrapper {} 223 | /// 224 | /// impl PartialOrd for TaskWrapper { 225 | /// fn partial_cmp(&self, other: &Self) -> Option { 226 | /// Some(self.cmp(other)) 227 | /// } 228 | /// } 229 | /// 230 | /// impl Ord for TaskWrapper { 231 | /// fn cmp(&self, other: &Self) -> cmp::Ordering { 232 | /// self.0.metadata().cmp(other.0.metadata()) 233 | /// } 234 | /// } 235 | /// 236 | /// static EXECUTOR: Lazy>> = Lazy::new(|| { 237 | /// Mutex::new(BinaryHeap::new()) 238 | /// }); 239 | /// 240 | /// let schedule = |runnable| { 241 | /// EXECUTOR.lock().unwrap().push(TaskWrapper(runnable)); 242 | /// }; 243 | /// 244 | /// // Spawn a few tasks with different priorities. 245 | /// let spawn_task = move |priority| { 246 | /// let (runnable, task) = Builder::new().metadata(priority).spawn( 247 | /// move |_| async move { priority }, 248 | /// schedule, 249 | /// ); 250 | /// runnable.schedule(); 251 | /// task 252 | /// }; 253 | /// 254 | /// let t1 = spawn_task(1); 255 | /// let t2 = spawn_task(2); 256 | /// let t3 = spawn_task(3); 257 | /// 258 | /// // Run the tasks in order of priority. 259 | /// let mut metadata_seen = vec![]; 260 | /// while let Some(TaskWrapper(runnable)) = EXECUTOR.lock().unwrap().pop() { 261 | /// metadata_seen.push(*runnable.metadata()); 262 | /// runnable.run(); 263 | /// } 264 | /// 265 | /// assert_eq!(metadata_seen, vec![3, 2, 1]); 266 | /// assert_eq!(t1.await, 1); 267 | /// assert_eq!(t2.await, 2); 268 | /// assert_eq!(t3.await, 3); 269 | /// # }); 270 | /// ``` 271 | pub fn metadata(self, metadata: M) -> Builder { 272 | Builder { 273 | metadata, 274 | #[cfg(feature = "std")] 275 | propagate_panic: self.propagate_panic, 276 | } 277 | } 278 | } 279 | 280 | impl Builder { 281 | /// Propagates panics that occur in the task. 282 | /// 283 | /// When this is `true`, panics that occur in the task will be propagated to the caller of 284 | /// the [`Task`]. When this is false, no special action is taken when a panic occurs in the 285 | /// task, meaning that the caller of [`Runnable::run`] will observe a panic. 286 | /// 287 | /// This is only available when the `std` feature is enabled. By default, this is `false`. 288 | /// 289 | /// # Examples 290 | /// 291 | /// ``` 292 | /// use async_task::Builder; 293 | /// use futures_lite::future::poll_fn; 294 | /// use std::future::Future; 295 | /// use std::panic; 296 | /// use std::pin::Pin; 297 | /// use std::task::{Context, Poll}; 298 | /// 299 | /// fn did_panic(f: F) -> bool { 300 | /// panic::catch_unwind(panic::AssertUnwindSafe(f)).is_err() 301 | /// } 302 | /// 303 | /// # smol::future::block_on(async { 304 | /// let (runnable1, mut task1) = Builder::new() 305 | /// .propagate_panic(true) 306 | /// .spawn(|()| async move { panic!() }, |_| {}); 307 | /// 308 | /// let (runnable2, mut task2) = Builder::new() 309 | /// .propagate_panic(false) 310 | /// .spawn(|()| async move { panic!() }, |_| {}); 311 | /// 312 | /// assert!(!did_panic(|| { runnable1.run(); })); 313 | /// assert!(did_panic(|| { runnable2.run(); })); 314 | /// 315 | /// let waker = poll_fn(|cx| Poll::Ready(cx.waker().clone())).await; 316 | /// let mut cx = Context::from_waker(&waker); 317 | /// assert!(did_panic(|| { let _ = Pin::new(&mut task1).poll(&mut cx); })); 318 | /// assert!(did_panic(|| { let _ = Pin::new(&mut task2).poll(&mut cx); })); 319 | /// # }); 320 | /// ``` 321 | #[cfg(feature = "std")] 322 | pub fn propagate_panic(self, propagate_panic: bool) -> Builder { 323 | Builder { 324 | metadata: self.metadata, 325 | propagate_panic, 326 | } 327 | } 328 | 329 | /// Creates a new task. 330 | /// 331 | /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its 332 | /// output. 333 | /// 334 | /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] 335 | /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run 336 | /// again. 337 | /// 338 | /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. 339 | /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it 340 | /// should push it into a task queue so that it can be processed later. 341 | /// 342 | /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider 343 | /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. 344 | /// 345 | /// # Examples 346 | /// 347 | /// ``` 348 | /// use async_task::Builder; 349 | /// 350 | /// // The future inside the task. 351 | /// let future = async { 352 | /// println!("Hello, world!"); 353 | /// }; 354 | /// 355 | /// // A function that schedules the task when it gets woken up. 356 | /// let (s, r) = flume::unbounded(); 357 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 358 | /// 359 | /// // Create a task with the future and the schedule function. 360 | /// let (runnable, task) = Builder::new().spawn(|()| future, schedule); 361 | /// ``` 362 | pub fn spawn(self, future: F, schedule: S) -> (Runnable, Task) 363 | where 364 | F: FnOnce(&M) -> Fut, 365 | Fut: Future + Send + 'static, 366 | Fut::Output: Send + 'static, 367 | S: Schedule + Send + Sync + 'static, 368 | { 369 | unsafe { self.spawn_unchecked(future, schedule) } 370 | } 371 | 372 | /// Creates a new thread-local task. 373 | /// 374 | /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the 375 | /// [`Runnable`] is used or dropped on another thread, a panic will occur. 376 | /// 377 | /// This function is only available when the `std` feature for this crate is enabled. 378 | /// 379 | /// # Examples 380 | /// 381 | /// ``` 382 | /// use async_task::{Builder, Runnable}; 383 | /// use flume::{Receiver, Sender}; 384 | /// use std::rc::Rc; 385 | /// 386 | /// thread_local! { 387 | /// // A queue that holds scheduled tasks. 388 | /// static QUEUE: (Sender, Receiver) = flume::unbounded(); 389 | /// } 390 | /// 391 | /// // Make a non-Send future. 392 | /// let msg: Rc = "Hello, world!".into(); 393 | /// let future = async move { 394 | /// println!("{}", msg); 395 | /// }; 396 | /// 397 | /// // A function that schedules the task when it gets woken up. 398 | /// let s = QUEUE.with(|(s, _)| s.clone()); 399 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 400 | /// 401 | /// // Create a task with the future and the schedule function. 402 | /// let (runnable, task) = Builder::new().spawn_local(move |()| future, schedule); 403 | /// ``` 404 | #[cfg(feature = "std")] 405 | pub fn spawn_local( 406 | self, 407 | future: F, 408 | schedule: S, 409 | ) -> (Runnable, Task) 410 | where 411 | F: FnOnce(&M) -> Fut, 412 | Fut: Future + 'static, 413 | Fut::Output: 'static, 414 | S: Schedule + Send + Sync + 'static, 415 | { 416 | use std::mem::ManuallyDrop; 417 | use std::pin::Pin; 418 | use std::task::{Context, Poll}; 419 | use std::thread::{self, ThreadId}; 420 | 421 | #[inline] 422 | fn thread_id() -> ThreadId { 423 | std::thread_local! { 424 | static ID: ThreadId = thread::current().id(); 425 | } 426 | ID.try_with(|id| *id) 427 | .unwrap_or_else(|_| thread::current().id()) 428 | } 429 | 430 | struct Checked { 431 | id: ThreadId, 432 | inner: ManuallyDrop, 433 | } 434 | 435 | impl Drop for Checked { 436 | fn drop(&mut self) { 437 | assert!( 438 | self.id == thread_id(), 439 | "local task dropped by a thread that didn't spawn it" 440 | ); 441 | unsafe { 442 | ManuallyDrop::drop(&mut self.inner); 443 | } 444 | } 445 | } 446 | 447 | impl Future for Checked { 448 | type Output = F::Output; 449 | 450 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 451 | assert!( 452 | self.id == thread_id(), 453 | "local task polled by a thread that didn't spawn it" 454 | ); 455 | unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) } 456 | } 457 | } 458 | 459 | // Wrap the future into one that checks which thread it's on. 460 | let future = move |meta| { 461 | let future = future(meta); 462 | 463 | Checked { 464 | id: thread_id(), 465 | inner: ManuallyDrop::new(future), 466 | } 467 | }; 468 | 469 | unsafe { self.spawn_unchecked(future, schedule) } 470 | } 471 | 472 | /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. 473 | /// 474 | /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and 475 | /// `'static` on `future` and `schedule`. 476 | /// 477 | /// # Safety 478 | /// 479 | /// - If `Fut` is not [`Send`], its [`Runnable`] must be used and dropped on the original 480 | /// thread. 481 | /// - If `Fut` is not `'static`, borrowed non-metadata variables must outlive its [`Runnable`]. 482 | /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] 483 | /// must be used and dropped on the original thread. 484 | /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the 485 | /// [`Runnable`]'s [`Waker`]. 486 | /// 487 | /// # Examples 488 | /// 489 | /// ``` 490 | /// use async_task::Builder; 491 | /// 492 | /// // The future inside the task. 493 | /// let future = async { 494 | /// println!("Hello, world!"); 495 | /// }; 496 | /// 497 | /// // If the task gets woken up, it will be sent into this channel. 498 | /// let (s, r) = flume::unbounded(); 499 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 500 | /// 501 | /// // Create a task with the future and the schedule function. 502 | /// let (runnable, task) = unsafe { Builder::new().spawn_unchecked(move |()| future, schedule) }; 503 | /// ``` 504 | pub unsafe fn spawn_unchecked<'a, F, Fut, S>( 505 | self, 506 | future: F, 507 | schedule: S, 508 | ) -> (Runnable, Task) 509 | where 510 | F: FnOnce(&'a M) -> Fut, 511 | Fut: Future + 'a, 512 | S: Schedule, 513 | M: 'a, 514 | { 515 | // Allocate large futures on the heap. 516 | let ptr = if mem::size_of::() >= 2048 { 517 | let future = |meta| { 518 | let future = future(meta); 519 | Box::pin(future) 520 | }; 521 | 522 | RawTask::<_, Fut::Output, S, M>::allocate(future, schedule, self) 523 | } else { 524 | RawTask::::allocate(future, schedule, self) 525 | }; 526 | 527 | let runnable = Runnable::from_raw(ptr); 528 | let task = Task { 529 | ptr, 530 | _marker: PhantomData, 531 | }; 532 | (runnable, task) 533 | } 534 | } 535 | 536 | /// Creates a new task. 537 | /// 538 | /// The returned [`Runnable`] is used to poll the `future`, and the [`Task`] is used to await its 539 | /// output. 540 | /// 541 | /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] 542 | /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run 543 | /// again. 544 | /// 545 | /// When the task is woken, its [`Runnable`] is passed to the `schedule` function. 546 | /// The `schedule` function should not attempt to run the [`Runnable`] nor to drop it. Instead, it 547 | /// should push it into a task queue so that it can be processed later. 548 | /// 549 | /// If you need to spawn a future that does not implement [`Send`] or isn't `'static`, consider 550 | /// using [`spawn_local()`] or [`spawn_unchecked()`] instead. 551 | /// 552 | /// # Examples 553 | /// 554 | /// ``` 555 | /// // The future inside the task. 556 | /// let future = async { 557 | /// println!("Hello, world!"); 558 | /// }; 559 | /// 560 | /// // A function that schedules the task when it gets woken up. 561 | /// let (s, r) = flume::unbounded(); 562 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 563 | /// 564 | /// // Create a task with the future and the schedule function. 565 | /// let (runnable, task) = async_task::spawn(future, schedule); 566 | /// ``` 567 | pub fn spawn(future: F, schedule: S) -> (Runnable, Task) 568 | where 569 | F: Future + Send + 'static, 570 | F::Output: Send + 'static, 571 | S: Schedule + Send + Sync + 'static, 572 | { 573 | unsafe { spawn_unchecked(future, schedule) } 574 | } 575 | 576 | /// Creates a new thread-local task. 577 | /// 578 | /// This function is same as [`spawn()`], except it does not require [`Send`] on `future`. If the 579 | /// [`Runnable`] is used or dropped on another thread, a panic will occur. 580 | /// 581 | /// This function is only available when the `std` feature for this crate is enabled. 582 | /// 583 | /// # Examples 584 | /// 585 | /// ``` 586 | /// use async_task::Runnable; 587 | /// use flume::{Receiver, Sender}; 588 | /// use std::rc::Rc; 589 | /// 590 | /// thread_local! { 591 | /// // A queue that holds scheduled tasks. 592 | /// static QUEUE: (Sender, Receiver) = flume::unbounded(); 593 | /// } 594 | /// 595 | /// // Make a non-Send future. 596 | /// let msg: Rc = "Hello, world!".into(); 597 | /// let future = async move { 598 | /// println!("{}", msg); 599 | /// }; 600 | /// 601 | /// // A function that schedules the task when it gets woken up. 602 | /// let s = QUEUE.with(|(s, _)| s.clone()); 603 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 604 | /// 605 | /// // Create a task with the future and the schedule function. 606 | /// let (runnable, task) = async_task::spawn_local(future, schedule); 607 | /// ``` 608 | #[cfg(feature = "std")] 609 | pub fn spawn_local(future: F, schedule: S) -> (Runnable, Task) 610 | where 611 | F: Future + 'static, 612 | F::Output: 'static, 613 | S: Schedule + Send + Sync + 'static, 614 | { 615 | Builder::new().spawn_local(move |()| future, schedule) 616 | } 617 | 618 | /// Creates a new task without [`Send`], [`Sync`], and `'static` bounds. 619 | /// 620 | /// This function is same as [`spawn()`], except it does not require [`Send`], [`Sync`], and 621 | /// `'static` on `future` and `schedule`. 622 | /// 623 | /// # Safety 624 | /// 625 | /// - If `future` is not [`Send`], its [`Runnable`] must be used and dropped on the original 626 | /// thread. 627 | /// - If `future` is not `'static`, borrowed variables must outlive its [`Runnable`]. 628 | /// - If `schedule` is not [`Send`] and [`Sync`], all instances of the [`Runnable`]'s [`Waker`] 629 | /// must be used and dropped on the original thread. 630 | /// - If `schedule` is not `'static`, borrowed variables must outlive all instances of the 631 | /// [`Runnable`]'s [`Waker`]. 632 | /// 633 | /// # Examples 634 | /// 635 | /// ``` 636 | /// // The future inside the task. 637 | /// let future = async { 638 | /// println!("Hello, world!"); 639 | /// }; 640 | /// 641 | /// // If the task gets woken up, it will be sent into this channel. 642 | /// let (s, r) = flume::unbounded(); 643 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 644 | /// 645 | /// // Create a task with the future and the schedule function. 646 | /// let (runnable, task) = unsafe { async_task::spawn_unchecked(future, schedule) }; 647 | /// ``` 648 | pub unsafe fn spawn_unchecked(future: F, schedule: S) -> (Runnable, Task) 649 | where 650 | F: Future, 651 | S: Schedule, 652 | { 653 | Builder::new().spawn_unchecked(move |()| future, schedule) 654 | } 655 | 656 | /// A handle to a runnable task. 657 | /// 658 | /// Every spawned task has a single [`Runnable`] handle, which only exists when the task is 659 | /// scheduled for running. 660 | /// 661 | /// Method [`run()`][`Runnable::run()`] polls the task's future once. Then, the [`Runnable`] 662 | /// vanishes and only reappears when its [`Waker`] wakes the task, thus scheduling it to be run 663 | /// again. 664 | /// 665 | /// Dropping a [`Runnable`] cancels the task, which means its future won't be polled again, and 666 | /// awaiting the [`Task`] after that will result in a panic. 667 | /// 668 | /// # Examples 669 | /// 670 | /// ``` 671 | /// use async_task::Runnable; 672 | /// use once_cell::sync::Lazy; 673 | /// use std::{panic, thread}; 674 | /// 675 | /// // A simple executor. 676 | /// static QUEUE: Lazy> = Lazy::new(|| { 677 | /// let (sender, receiver) = flume::unbounded::(); 678 | /// thread::spawn(|| { 679 | /// for runnable in receiver { 680 | /// let _ignore_panic = panic::catch_unwind(|| runnable.run()); 681 | /// } 682 | /// }); 683 | /// sender 684 | /// }); 685 | /// 686 | /// // Create a task with a simple future. 687 | /// let schedule = |runnable| QUEUE.send(runnable).unwrap(); 688 | /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); 689 | /// 690 | /// // Schedule the task and await its output. 691 | /// runnable.schedule(); 692 | /// assert_eq!(smol::future::block_on(task), 3); 693 | /// ``` 694 | pub struct Runnable { 695 | /// A pointer to the heap-allocated task. 696 | pub(crate) ptr: NonNull<()>, 697 | 698 | /// A marker capturing generic type `M`. 699 | pub(crate) _marker: PhantomData, 700 | } 701 | 702 | unsafe impl Send for Runnable {} 703 | unsafe impl Sync for Runnable {} 704 | 705 | #[cfg(feature = "std")] 706 | impl std::panic::UnwindSafe for Runnable {} 707 | #[cfg(feature = "std")] 708 | impl std::panic::RefUnwindSafe for Runnable {} 709 | 710 | impl Runnable { 711 | /// Get the metadata associated with this task. 712 | /// 713 | /// Tasks can be created with a metadata object associated with them; by default, this 714 | /// is a `()` value. See the [`Builder::metadata()`] method for more information. 715 | pub fn metadata(&self) -> &M { 716 | &self.header().metadata 717 | } 718 | 719 | /// Schedules the task. 720 | /// 721 | /// This is a convenience method that passes the [`Runnable`] to the schedule function. 722 | /// 723 | /// # Examples 724 | /// 725 | /// ``` 726 | /// // A function that schedules the task when it gets woken up. 727 | /// let (s, r) = flume::unbounded(); 728 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 729 | /// 730 | /// // Create a task with a simple future and the schedule function. 731 | /// let (runnable, task) = async_task::spawn(async {}, schedule); 732 | /// 733 | /// // Schedule the task. 734 | /// assert_eq!(r.len(), 0); 735 | /// runnable.schedule(); 736 | /// assert_eq!(r.len(), 1); 737 | /// ``` 738 | pub fn schedule(self) { 739 | let ptr = self.ptr.as_ptr(); 740 | let header = ptr as *const Header; 741 | mem::forget(self); 742 | 743 | unsafe { 744 | ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); 745 | } 746 | } 747 | 748 | /// Runs the task by polling its future. 749 | /// 750 | /// Returns `true` if the task was woken while running, in which case the [`Runnable`] gets 751 | /// rescheduled at the end of this method invocation. Otherwise, returns `false` and the 752 | /// [`Runnable`] vanishes until the task is woken. 753 | /// The return value is just a hint: `true` usually indicates that the task has yielded, i.e. 754 | /// it woke itself and then gave the control back to the executor. 755 | /// 756 | /// If the [`Task`] handle was dropped or if [`cancel()`][`Task::cancel()`] was called, then 757 | /// this method simply destroys the task. 758 | /// 759 | /// If the polled future panics, this method propagates the panic, and awaiting the [`Task`] 760 | /// after that will also result in a panic. 761 | /// 762 | /// # Examples 763 | /// 764 | /// ``` 765 | /// // A function that schedules the task when it gets woken up. 766 | /// let (s, r) = flume::unbounded(); 767 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 768 | /// 769 | /// // Create a task with a simple future and the schedule function. 770 | /// let (runnable, task) = async_task::spawn(async { 1 + 2 }, schedule); 771 | /// 772 | /// // Run the task and check its output. 773 | /// runnable.run(); 774 | /// assert_eq!(smol::future::block_on(task), 3); 775 | /// ``` 776 | pub fn run(self) -> bool { 777 | let ptr = self.ptr.as_ptr(); 778 | let header = ptr as *const Header; 779 | mem::forget(self); 780 | 781 | unsafe { ((*header).vtable.run)(ptr) } 782 | } 783 | 784 | /// Returns a waker associated with this task. 785 | /// 786 | /// # Examples 787 | /// 788 | /// ``` 789 | /// use smol::future; 790 | /// 791 | /// // A function that schedules the task when it gets woken up. 792 | /// let (s, r) = flume::unbounded(); 793 | /// let schedule = move |runnable| s.send(runnable).unwrap(); 794 | /// 795 | /// // Create a task with a simple future and the schedule function. 796 | /// let (runnable, task) = async_task::spawn(future::pending::<()>(), schedule); 797 | /// 798 | /// // Take a waker and run the task. 799 | /// let waker = runnable.waker(); 800 | /// runnable.run(); 801 | /// 802 | /// // Reschedule the task by waking it. 803 | /// assert_eq!(r.len(), 0); 804 | /// waker.wake(); 805 | /// assert_eq!(r.len(), 1); 806 | /// ``` 807 | pub fn waker(&self) -> Waker { 808 | let ptr = self.ptr.as_ptr(); 809 | let header = ptr as *const Header; 810 | 811 | unsafe { 812 | let raw_waker = ((*header).vtable.clone_waker)(ptr); 813 | Waker::from_raw(raw_waker) 814 | } 815 | } 816 | 817 | fn header(&self) -> &Header { 818 | unsafe { &*(self.ptr.as_ptr() as *const Header) } 819 | } 820 | 821 | /// Converts this task into a raw pointer. 822 | /// 823 | /// To avoid a memory leak the pointer must be converted back to a Runnable using [`Runnable::from_raw`][from_raw]. 824 | /// 825 | /// `into_raw` does not change the state of the [`Task`], but there is no guarantee that it will be in the same state after calling [`Runnable::from_raw`][from_raw], 826 | /// as the corresponding [`Task`] might have been dropped or cancelled. 827 | /// 828 | /// # Examples 829 | /// 830 | /// ```rust 831 | /// use async_task::{Runnable, spawn}; 832 | /// 833 | /// let (runnable, task) = spawn(async {}, |_| {}); 834 | /// let runnable_pointer = runnable.into_raw(); 835 | /// 836 | /// unsafe { 837 | /// // Convert back to an `Runnable` to prevent leak. 838 | /// let runnable = Runnable::<()>::from_raw(runnable_pointer); 839 | /// runnable.run(); 840 | /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. 841 | /// } 842 | /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! 843 | /// ``` 844 | /// [from_raw]: #method.from_raw 845 | pub fn into_raw(self) -> NonNull<()> { 846 | let ptr = self.ptr; 847 | mem::forget(self); 848 | ptr 849 | } 850 | 851 | /// Converts a raw pointer into a Runnable. 852 | /// 853 | /// # Safety 854 | /// 855 | /// This method should only be used with raw pointers returned from [`Runnable::into_raw`][into_raw]. 856 | /// It is not safe to use the provided pointer once it is passed to `from_raw`. 857 | /// Crucially, it is unsafe to call `from_raw` multiple times with the same pointer - even if the resulting [`Runnable`] is not used - 858 | /// as internally `async-task` uses reference counting. 859 | /// 860 | /// It is however safe to call [`Runnable::into_raw`][into_raw] on a [`Runnable`] created with `from_raw` or 861 | /// after the [`Task`] associated with a given Runnable has been dropped or cancelled. 862 | /// 863 | /// The state of the [`Runnable`] created with `from_raw` is not specified. 864 | /// 865 | /// # Examples 866 | /// 867 | /// ```rust 868 | /// use async_task::{Runnable, spawn}; 869 | /// 870 | /// let (runnable, task) = spawn(async {}, |_| {}); 871 | /// let runnable_pointer = runnable.into_raw(); 872 | /// 873 | /// drop(task); 874 | /// unsafe { 875 | /// // Convert back to an `Runnable` to prevent leak. 876 | /// let runnable = Runnable::<()>::from_raw(runnable_pointer); 877 | /// let did_poll = runnable.run(); 878 | /// assert!(!did_poll); 879 | /// // Further calls to `Runnable::from_raw(runnable_pointer)` would be memory-unsafe. 880 | /// } 881 | /// // The memory was freed when `x` went out of scope above, so `runnable_pointer` is now dangling! 882 | /// ``` 883 | /// 884 | /// [into_raw]: #method.into_raw 885 | pub unsafe fn from_raw(ptr: NonNull<()>) -> Self { 886 | Self { 887 | ptr, 888 | _marker: Default::default(), 889 | } 890 | } 891 | } 892 | 893 | impl Drop for Runnable { 894 | fn drop(&mut self) { 895 | let ptr = self.ptr.as_ptr(); 896 | let header = self.header(); 897 | 898 | unsafe { 899 | let mut state = header.state.load(Ordering::Acquire); 900 | 901 | loop { 902 | // If the task has been completed or closed, it can't be canceled. 903 | if state & (COMPLETED | CLOSED) != 0 { 904 | break; 905 | } 906 | 907 | // Mark the task as closed. 908 | match header.state.compare_exchange_weak( 909 | state, 910 | state | CLOSED, 911 | Ordering::AcqRel, 912 | Ordering::Acquire, 913 | ) { 914 | Ok(_) => break, 915 | Err(s) => state = s, 916 | } 917 | } 918 | 919 | // Drop the future. 920 | (header.vtable.drop_future)(ptr); 921 | 922 | // Mark the task as unscheduled. 923 | let state = header.state.fetch_and(!SCHEDULED, Ordering::AcqRel); 924 | 925 | // Notify the awaiter that the future has been dropped. 926 | if state & AWAITER != 0 { 927 | (*header).notify(None); 928 | } 929 | 930 | // Drop the task reference. 931 | (header.vtable.drop_ref)(ptr); 932 | } 933 | } 934 | } 935 | 936 | impl fmt::Debug for Runnable { 937 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 938 | let ptr = self.ptr.as_ptr(); 939 | let header = ptr as *const Header; 940 | 941 | f.debug_struct("Runnable") 942 | .field("header", unsafe { &(*header) }) 943 | .finish() 944 | } 945 | } 946 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | /// Set if the task is scheduled for running. 2 | /// 3 | /// A task is considered to be scheduled whenever its `Runnable` exists. 4 | /// 5 | /// This flag can't be set when the task is completed. However, it can be set while the task is 6 | /// running, in which case it will be rescheduled as soon as polling finishes. 7 | pub(crate) const SCHEDULED: usize = 1 << 0; 8 | 9 | /// Set if the task is running. 10 | /// 11 | /// A task is in running state while its future is being polled. 12 | /// 13 | /// This flag can't be set when the task is completed. However, it can be in scheduled state while 14 | /// it is running, in which case it will be rescheduled as soon as polling finishes. 15 | pub(crate) const RUNNING: usize = 1 << 1; 16 | 17 | /// Set if the task has been completed. 18 | /// 19 | /// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored 20 | /// inside the task until it becomes closed. In fact, `Task` picks up the output by marking 21 | /// the task as closed. 22 | /// 23 | /// This flag can't be set when the task is scheduled or running. 24 | pub(crate) const COMPLETED: usize = 1 << 2; 25 | 26 | /// Set if the task is closed. 27 | /// 28 | /// If a task is closed, that means it's either canceled or its output has been consumed by the 29 | /// `Task`. A task becomes closed in the following cases: 30 | /// 31 | /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. 32 | /// 2. Its output gets awaited by the `Task`. 33 | /// 3. It panics while polling the future. 34 | /// 4. It is completed and the `Task` gets dropped. 35 | pub(crate) const CLOSED: usize = 1 << 3; 36 | 37 | /// Set if the `Task` still exists. 38 | /// 39 | /// The `Task` is a special case in that it is only tracked by this flag, while all other 40 | /// task references (`Runnable` and `Waker`s) are tracked by the reference count. 41 | pub(crate) const TASK: usize = 1 << 4; 42 | 43 | /// Set if the `Task` is awaiting the output. 44 | /// 45 | /// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the 46 | /// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast 47 | /// check that tells us if we need to wake anyone. 48 | pub(crate) const AWAITER: usize = 1 << 5; 49 | 50 | /// Set if an awaiter is being registered. 51 | /// 52 | /// This flag is set when `Task` is polled and we are registering a new awaiter. 53 | pub(crate) const REGISTERING: usize = 1 << 6; 54 | 55 | /// Set if the awaiter is being notified. 56 | /// 57 | /// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and 58 | /// notified, whichever side came first will take over the reposibility of resolving the race. 59 | pub(crate) const NOTIFYING: usize = 1 << 7; 60 | 61 | /// A single reference. 62 | /// 63 | /// The lower bits in the state contain various flags representing the task state, while the upper 64 | /// bits contain the reference count. The value of `REFERENCE` represents a single reference in the 65 | /// total reference count. 66 | /// 67 | /// Note that the reference counter only tracks the `Runnable` and `Waker`s. The `Task` is 68 | /// tracked separately by the `TASK` flag. 69 | pub(crate) const REFERENCE: usize = 1 << 8; 70 | -------------------------------------------------------------------------------- /src/task.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::future::Future; 3 | use core::marker::PhantomData; 4 | use core::mem; 5 | use core::pin::Pin; 6 | use core::ptr::NonNull; 7 | use core::sync::atomic::Ordering; 8 | use core::task::{Context, Poll}; 9 | 10 | use crate::header::Header; 11 | use crate::raw::Panic; 12 | use crate::runnable::ScheduleInfo; 13 | use crate::state::*; 14 | 15 | /// A spawned task. 16 | /// 17 | /// A [`Task`] can be awaited to retrieve the output of its future. 18 | /// 19 | /// Dropping a [`Task`] cancels it, which means its future won't be polled again. To drop the 20 | /// [`Task`] handle without canceling it, use [`detach()`][`Task::detach()`] instead. To cancel a 21 | /// task gracefully and wait until it is fully destroyed, use the [`cancel()`][Task::cancel()] 22 | /// method. 23 | /// 24 | /// Note that canceling a task actually wakes it and reschedules one last time. Then, the executor 25 | /// can destroy the task by simply dropping its [`Runnable`][`super::Runnable`] or by invoking 26 | /// [`run()`][`super::Runnable::run()`]. 27 | /// 28 | /// # Examples 29 | /// 30 | /// ``` 31 | /// use smol::{future, Executor}; 32 | /// use std::thread; 33 | /// 34 | /// let ex = Executor::new(); 35 | /// 36 | /// // Spawn a future onto the executor. 37 | /// let task = ex.spawn(async { 38 | /// println!("Hello from a task!"); 39 | /// 1 + 2 40 | /// }); 41 | /// 42 | /// // Run an executor thread. 43 | /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); 44 | /// 45 | /// // Wait for the task's output. 46 | /// assert_eq!(future::block_on(task), 3); 47 | /// ``` 48 | #[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] 49 | pub struct Task { 50 | /// A raw task pointer. 51 | pub(crate) ptr: NonNull<()>, 52 | 53 | /// A marker capturing generic types `T` and `M`. 54 | pub(crate) _marker: PhantomData<(T, M)>, 55 | } 56 | 57 | unsafe impl Send for Task {} 58 | unsafe impl Sync for Task {} 59 | 60 | impl Unpin for Task {} 61 | 62 | #[cfg(feature = "std")] 63 | impl std::panic::UnwindSafe for Task {} 64 | #[cfg(feature = "std")] 65 | impl std::panic::RefUnwindSafe for Task {} 66 | 67 | impl Task { 68 | /// Detaches the task to let it keep running in the background. 69 | /// 70 | /// # Examples 71 | /// 72 | /// ``` 73 | /// use smol::{Executor, Timer}; 74 | /// use std::time::Duration; 75 | /// 76 | /// let ex = Executor::new(); 77 | /// 78 | /// // Spawn a daemon future. 79 | /// ex.spawn(async { 80 | /// loop { 81 | /// println!("I'm a daemon task looping forever."); 82 | /// Timer::after(Duration::from_secs(1)).await; 83 | /// } 84 | /// }) 85 | /// .detach(); 86 | /// ``` 87 | pub fn detach(self) { 88 | let mut this = self; 89 | let _out = this.set_detached(); 90 | mem::forget(this); 91 | } 92 | 93 | /// Cancels the task and waits for it to stop running. 94 | /// 95 | /// Returns the task's output if it was completed just before it got canceled, or [`None`] if 96 | /// it didn't complete. 97 | /// 98 | /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of 99 | /// canceling because it also waits for the task to stop running. 100 | /// 101 | /// # Examples 102 | /// 103 | /// ``` 104 | /// # if cfg!(miri) { return; } // Miri does not support epoll 105 | /// use smol::{future, Executor, Timer}; 106 | /// use std::thread; 107 | /// use std::time::Duration; 108 | /// 109 | /// let ex = Executor::new(); 110 | /// 111 | /// // Spawn a daemon future. 112 | /// let task = ex.spawn(async { 113 | /// loop { 114 | /// println!("Even though I'm in an infinite loop, you can still cancel me!"); 115 | /// Timer::after(Duration::from_secs(1)).await; 116 | /// } 117 | /// }); 118 | /// 119 | /// // Run an executor thread. 120 | /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); 121 | /// 122 | /// future::block_on(async { 123 | /// Timer::after(Duration::from_secs(3)).await; 124 | /// task.cancel().await; 125 | /// }); 126 | /// ``` 127 | pub async fn cancel(self) -> Option { 128 | let mut this = self; 129 | this.set_canceled(); 130 | this.fallible().await 131 | } 132 | 133 | /// Converts this task into a [`FallibleTask`]. 134 | /// 135 | /// Like [`Task`], a fallible task will poll the task's output until it is 136 | /// completed or cancelled due to its [`Runnable`][`super::Runnable`] being 137 | /// dropped without being run. Resolves to the task's output when completed, 138 | /// or [`None`] if it didn't complete. 139 | /// 140 | /// # Examples 141 | /// 142 | /// ``` 143 | /// use smol::{future, Executor}; 144 | /// use std::thread; 145 | /// 146 | /// let ex = Executor::new(); 147 | /// 148 | /// // Spawn a future onto the executor. 149 | /// let task = ex.spawn(async { 150 | /// println!("Hello from a task!"); 151 | /// 1 + 2 152 | /// }) 153 | /// .fallible(); 154 | /// 155 | /// // Run an executor thread. 156 | /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); 157 | /// 158 | /// // Wait for the task's output. 159 | /// assert_eq!(future::block_on(task), Some(3)); 160 | /// ``` 161 | /// 162 | /// ``` 163 | /// use smol::future; 164 | /// 165 | /// // Schedule function which drops the runnable without running it. 166 | /// let schedule = move |runnable| drop(runnable); 167 | /// 168 | /// // Create a task with the future and the schedule function. 169 | /// let (runnable, task) = async_task::spawn(async { 170 | /// println!("Hello from a task!"); 171 | /// 1 + 2 172 | /// }, schedule); 173 | /// runnable.schedule(); 174 | /// 175 | /// // Wait for the task's output. 176 | /// assert_eq!(future::block_on(task.fallible()), None); 177 | /// ``` 178 | pub fn fallible(self) -> FallibleTask { 179 | FallibleTask { task: self } 180 | } 181 | 182 | /// Puts the task in canceled state. 183 | fn set_canceled(&mut self) { 184 | let ptr = self.ptr.as_ptr(); 185 | let header = ptr as *const Header; 186 | 187 | unsafe { 188 | let mut state = (*header).state.load(Ordering::Acquire); 189 | 190 | loop { 191 | // If the task has been completed or closed, it can't be canceled. 192 | if state & (COMPLETED | CLOSED) != 0 { 193 | break; 194 | } 195 | 196 | // If the task is not scheduled nor running, we'll need to schedule it. 197 | let new = if state & (SCHEDULED | RUNNING) == 0 { 198 | (state | SCHEDULED | CLOSED) + REFERENCE 199 | } else { 200 | state | CLOSED 201 | }; 202 | 203 | // Mark the task as closed. 204 | match (*header).state.compare_exchange_weak( 205 | state, 206 | new, 207 | Ordering::AcqRel, 208 | Ordering::Acquire, 209 | ) { 210 | Ok(_) => { 211 | // If the task is not scheduled nor running, schedule it one more time so 212 | // that its future gets dropped by the executor. 213 | if state & (SCHEDULED | RUNNING) == 0 { 214 | ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); 215 | } 216 | 217 | // Notify the awaiter that the task has been closed. 218 | if state & AWAITER != 0 { 219 | (*header).notify(None); 220 | } 221 | 222 | break; 223 | } 224 | Err(s) => state = s, 225 | } 226 | } 227 | } 228 | } 229 | 230 | /// Puts the task in detached state. 231 | fn set_detached(&mut self) -> Option> { 232 | let ptr = self.ptr.as_ptr(); 233 | let header = ptr as *const Header; 234 | 235 | unsafe { 236 | // A place where the output will be stored in case it needs to be dropped. 237 | let mut output = None; 238 | 239 | // Optimistically assume the `Task` is being detached just after creating the task. 240 | // This is a common case so if the `Task` is datached, the overhead of it is only one 241 | // compare-exchange operation. 242 | if let Err(mut state) = (*header).state.compare_exchange_weak( 243 | SCHEDULED | TASK | REFERENCE, 244 | SCHEDULED | REFERENCE, 245 | Ordering::AcqRel, 246 | Ordering::Acquire, 247 | ) { 248 | loop { 249 | // If the task has been completed but not yet closed, that means its output 250 | // must be dropped. 251 | if state & COMPLETED != 0 && state & CLOSED == 0 { 252 | // Mark the task as closed in order to grab its output. 253 | match (*header).state.compare_exchange_weak( 254 | state, 255 | state | CLOSED, 256 | Ordering::AcqRel, 257 | Ordering::Acquire, 258 | ) { 259 | Ok(_) => { 260 | // Read the output. 261 | output = Some( 262 | (((*header).vtable.get_output)(ptr) as *mut Result) 263 | .read(), 264 | ); 265 | 266 | // Update the state variable because we're continuing the loop. 267 | state |= CLOSED; 268 | } 269 | Err(s) => state = s, 270 | } 271 | } else { 272 | // If this is the last reference to the task and it's not closed, then 273 | // close it and schedule one more time so that its future gets dropped by 274 | // the executor. 275 | let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 { 276 | SCHEDULED | CLOSED | REFERENCE 277 | } else { 278 | state & !TASK 279 | }; 280 | 281 | // Unset the `TASK` flag. 282 | match (*header).state.compare_exchange_weak( 283 | state, 284 | new, 285 | Ordering::AcqRel, 286 | Ordering::Acquire, 287 | ) { 288 | Ok(_) => { 289 | // If this is the last reference to the task, we need to either 290 | // schedule dropping its future or destroy it. 291 | if state & !(REFERENCE - 1) == 0 { 292 | if state & CLOSED == 0 { 293 | ((*header).vtable.schedule)(ptr, ScheduleInfo::new(false)); 294 | } else { 295 | ((*header).vtable.destroy)(ptr); 296 | } 297 | } 298 | 299 | break; 300 | } 301 | Err(s) => state = s, 302 | } 303 | } 304 | } 305 | } 306 | 307 | output 308 | } 309 | } 310 | 311 | /// Polls the task to retrieve its output. 312 | /// 313 | /// Returns `Some` if the task has completed or `None` if it was closed. 314 | /// 315 | /// A task becomes closed in the following cases: 316 | /// 317 | /// 1. It gets canceled by `Runnable::drop()`, `Task::drop()`, or `Task::cancel()`. 318 | /// 2. Its output gets awaited by the `Task`. 319 | /// 3. It panics while polling the future. 320 | /// 4. It is completed and the `Task` gets dropped. 321 | fn poll_task(&mut self, cx: &mut Context<'_>) -> Poll> { 322 | let ptr = self.ptr.as_ptr(); 323 | let header = ptr as *const Header; 324 | 325 | unsafe { 326 | let mut state = (*header).state.load(Ordering::Acquire); 327 | 328 | loop { 329 | // If the task has been closed, notify the awaiter and return `None`. 330 | if state & CLOSED != 0 { 331 | // If the task is scheduled or running, we need to wait until its future is 332 | // dropped. 333 | if state & (SCHEDULED | RUNNING) != 0 { 334 | // Replace the waker with one associated with the current task. 335 | (*header).register(cx.waker()); 336 | 337 | // Reload the state after registering. It is possible changes occurred just 338 | // before registration so we need to check for that. 339 | state = (*header).state.load(Ordering::Acquire); 340 | 341 | // If the task is still scheduled or running, we need to wait because its 342 | // future is not dropped yet. 343 | if state & (SCHEDULED | RUNNING) != 0 { 344 | return Poll::Pending; 345 | } 346 | } 347 | 348 | // Even though the awaiter is most likely the current task, it could also be 349 | // another task. 350 | (*header).notify(Some(cx.waker())); 351 | return Poll::Ready(None); 352 | } 353 | 354 | // If the task is not completed, register the current task. 355 | if state & COMPLETED == 0 { 356 | // Replace the waker with one associated with the current task. 357 | (*header).register(cx.waker()); 358 | 359 | // Reload the state after registering. It is possible that the task became 360 | // completed or closed just before registration so we need to check for that. 361 | state = (*header).state.load(Ordering::Acquire); 362 | 363 | // If the task has been closed, restart. 364 | if state & CLOSED != 0 { 365 | continue; 366 | } 367 | 368 | // If the task is still not completed, we're blocked on it. 369 | if state & COMPLETED == 0 { 370 | return Poll::Pending; 371 | } 372 | } 373 | 374 | // Since the task is now completed, mark it as closed in order to grab its output. 375 | match (*header).state.compare_exchange( 376 | state, 377 | state | CLOSED, 378 | Ordering::AcqRel, 379 | Ordering::Acquire, 380 | ) { 381 | Ok(_) => { 382 | // Notify the awaiter. Even though the awaiter is most likely the current 383 | // task, it could also be another task. 384 | if state & AWAITER != 0 { 385 | (*header).notify(Some(cx.waker())); 386 | } 387 | 388 | // Take the output from the task. 389 | let output = ((*header).vtable.get_output)(ptr) as *mut Result; 390 | let output = output.read(); 391 | 392 | // Propagate the panic if the task panicked. 393 | let output = match output { 394 | Ok(output) => output, 395 | #[allow(unreachable_patterns)] 396 | Err(panic) => { 397 | #[cfg(feature = "std")] 398 | std::panic::resume_unwind(panic); 399 | 400 | #[cfg(not(feature = "std"))] 401 | match panic {} 402 | } 403 | }; 404 | 405 | return Poll::Ready(Some(output)); 406 | } 407 | Err(s) => state = s, 408 | } 409 | } 410 | } 411 | } 412 | 413 | fn header(&self) -> &Header { 414 | let ptr = self.ptr.as_ptr(); 415 | let header = ptr as *const Header; 416 | unsafe { &*header } 417 | } 418 | 419 | /// Returns `true` if the current task is finished. 420 | /// 421 | /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. 422 | pub fn is_finished(&self) -> bool { 423 | let ptr = self.ptr.as_ptr(); 424 | let header = ptr as *const Header; 425 | 426 | unsafe { 427 | let state = (*header).state.load(Ordering::Acquire); 428 | state & (CLOSED | COMPLETED) != 0 429 | } 430 | } 431 | 432 | /// Get the metadata associated with this task. 433 | /// 434 | /// Tasks can be created with a metadata object associated with them; by default, this 435 | /// is a `()` value. See the [`Builder::metadata()`] method for more information. 436 | pub fn metadata(&self) -> &M { 437 | &self.header().metadata 438 | } 439 | } 440 | 441 | impl Drop for Task { 442 | fn drop(&mut self) { 443 | self.set_canceled(); 444 | self.set_detached(); 445 | } 446 | } 447 | 448 | impl Future for Task { 449 | type Output = T; 450 | 451 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 452 | match self.poll_task(cx) { 453 | Poll::Ready(t) => Poll::Ready(t.expect("Task polled after completion")), 454 | Poll::Pending => Poll::Pending, 455 | } 456 | } 457 | } 458 | 459 | impl fmt::Debug for Task { 460 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 461 | f.debug_struct("Task") 462 | .field("header", self.header()) 463 | .finish() 464 | } 465 | } 466 | 467 | /// A spawned task with a fallible response. 468 | /// 469 | /// This type behaves like [`Task`], however it produces an `Option` when 470 | /// polled and will return `None` if the executor dropped its 471 | /// [`Runnable`][`super::Runnable`] without being run. 472 | /// 473 | /// This can be useful to avoid the panic produced when polling the `Task` 474 | /// future if the executor dropped its `Runnable`. 475 | #[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"] 476 | pub struct FallibleTask { 477 | task: Task, 478 | } 479 | 480 | impl FallibleTask { 481 | /// Detaches the task to let it keep running in the background. 482 | /// 483 | /// # Examples 484 | /// 485 | /// ``` 486 | /// use smol::{Executor, Timer}; 487 | /// use std::time::Duration; 488 | /// 489 | /// let ex = Executor::new(); 490 | /// 491 | /// // Spawn a daemon future. 492 | /// ex.spawn(async { 493 | /// loop { 494 | /// println!("I'm a daemon task looping forever."); 495 | /// Timer::after(Duration::from_secs(1)).await; 496 | /// } 497 | /// }) 498 | /// .fallible() 499 | /// .detach(); 500 | /// ``` 501 | pub fn detach(self) { 502 | self.task.detach() 503 | } 504 | 505 | /// Cancels the task and waits for it to stop running. 506 | /// 507 | /// Returns the task's output if it was completed just before it got canceled, or [`None`] if 508 | /// it didn't complete. 509 | /// 510 | /// While it's possible to simply drop the [`Task`] to cancel it, this is a cleaner way of 511 | /// canceling because it also waits for the task to stop running. 512 | /// 513 | /// # Examples 514 | /// 515 | /// ``` 516 | /// # if cfg!(miri) { return; } // Miri does not support epoll 517 | /// use smol::{future, Executor, Timer}; 518 | /// use std::thread; 519 | /// use std::time::Duration; 520 | /// 521 | /// let ex = Executor::new(); 522 | /// 523 | /// // Spawn a daemon future. 524 | /// let task = ex.spawn(async { 525 | /// loop { 526 | /// println!("Even though I'm in an infinite loop, you can still cancel me!"); 527 | /// Timer::after(Duration::from_secs(1)).await; 528 | /// } 529 | /// }) 530 | /// .fallible(); 531 | /// 532 | /// // Run an executor thread. 533 | /// thread::spawn(move || future::block_on(ex.run(future::pending::<()>()))); 534 | /// 535 | /// future::block_on(async { 536 | /// Timer::after(Duration::from_secs(3)).await; 537 | /// task.cancel().await; 538 | /// }); 539 | /// ``` 540 | pub async fn cancel(self) -> Option { 541 | self.task.cancel().await 542 | } 543 | 544 | /// Returns `true` if the current task is finished. 545 | /// 546 | /// Note that in a multithreaded environment, this task can change finish immediately after calling this function. 547 | pub fn is_finished(&self) -> bool { 548 | self.task.is_finished() 549 | } 550 | } 551 | 552 | impl Future for FallibleTask { 553 | type Output = Option; 554 | 555 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 556 | self.task.poll_task(cx) 557 | } 558 | } 559 | 560 | impl fmt::Debug for FallibleTask { 561 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 562 | f.debug_struct("FallibleTask") 563 | .field("header", self.task.header()) 564 | .finish() 565 | } 566 | } 567 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::Layout as StdLayout; 2 | use core::mem; 3 | 4 | /// Aborts the process. 5 | /// 6 | /// To abort, this function simply panics while panicking. 7 | pub(crate) fn abort() -> ! { 8 | struct Panic; 9 | 10 | impl Drop for Panic { 11 | fn drop(&mut self) { 12 | panic!("aborting the process"); 13 | } 14 | } 15 | 16 | let _panic = Panic; 17 | panic!("aborting the process"); 18 | } 19 | 20 | /// Calls a function and aborts if it panics. 21 | /// 22 | /// This is useful in unsafe code where we can't recover from panics. 23 | #[inline] 24 | pub(crate) fn abort_on_panic(f: impl FnOnce() -> T) -> T { 25 | struct Bomb; 26 | 27 | impl Drop for Bomb { 28 | fn drop(&mut self) { 29 | abort(); 30 | } 31 | } 32 | 33 | let bomb = Bomb; 34 | let t = f(); 35 | mem::forget(bomb); 36 | t 37 | } 38 | 39 | /// A version of `alloc::alloc::Layout` that can be used in the const 40 | /// position. 41 | #[derive(Clone, Copy, Debug)] 42 | pub(crate) struct Layout { 43 | size: usize, 44 | align: usize, 45 | } 46 | 47 | impl Layout { 48 | /// Creates a new `Layout` with the given size and alignment. 49 | #[inline] 50 | pub(crate) const fn from_size_align(size: usize, align: usize) -> Self { 51 | Self { size, align } 52 | } 53 | 54 | /// Creates a new `Layout` for the given sized type. 55 | #[inline] 56 | pub(crate) const fn new() -> Self { 57 | Self::from_size_align(mem::size_of::(), mem::align_of::()) 58 | } 59 | 60 | /// Convert this into the standard library's layout type. 61 | /// 62 | /// # Safety 63 | /// 64 | /// - `align` must be non-zero and a power of two. 65 | /// - When rounded up to the nearest multiple of `align`, the size 66 | /// must not overflow. 67 | #[inline] 68 | pub(crate) const unsafe fn into_std(self) -> StdLayout { 69 | StdLayout::from_size_align_unchecked(self.size, self.align) 70 | } 71 | 72 | /// Get the alignment of this layout. 73 | #[inline] 74 | pub(crate) const fn align(&self) -> usize { 75 | self.align 76 | } 77 | 78 | /// Get the size of this layout. 79 | #[inline] 80 | pub(crate) const fn size(&self) -> usize { 81 | self.size 82 | } 83 | 84 | /// Returns the layout for `a` followed by `b` and the offset of `b`. 85 | /// 86 | /// This function was adapted from the `Layout::extend()`: 87 | /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend 88 | #[inline] 89 | pub(crate) const fn extend(self, other: Layout) -> Option<(Layout, usize)> { 90 | let new_align = max(self.align(), other.align()); 91 | let pad = self.padding_needed_for(other.align()); 92 | 93 | let offset = leap!(self.size().checked_add(pad)); 94 | let new_size = leap!(offset.checked_add(other.size())); 95 | 96 | // return None if any of the following are true: 97 | // - align is 0 (implied false by is_power_of_two()) 98 | // - align is not a power of 2 99 | // - size rounded up to align overflows 100 | if !new_align.is_power_of_two() || new_size > isize::MAX as usize - (new_align - 1) { 101 | return None; 102 | } 103 | 104 | let layout = Layout::from_size_align(new_size, new_align); 105 | Some((layout, offset)) 106 | } 107 | 108 | /// Returns the padding after `layout` that aligns the following address to `align`. 109 | /// 110 | /// This function was adapted from the `Layout::padding_needed_for()`: 111 | /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for 112 | #[inline] 113 | pub(crate) const fn padding_needed_for(self, align: usize) -> usize { 114 | let len = self.size(); 115 | let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); 116 | len_rounded_up.wrapping_sub(len) 117 | } 118 | } 119 | 120 | #[inline] 121 | pub(crate) const fn max(left: usize, right: usize) -> usize { 122 | if left > right { 123 | left 124 | } else { 125 | right 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /tests/basic.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::ptr::NonNull; 4 | use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 5 | use std::sync::Arc; 6 | use std::task::{Context, Poll}; 7 | 8 | use async_task::Runnable; 9 | use smol::future; 10 | 11 | // Creates a future with event counters. 12 | // 13 | // Usage: `future!(f, POLL, DROP)` 14 | // 15 | // The future `f` always returns `Poll::Ready`. 16 | // When it gets polled, `POLL` is incremented. 17 | // When it gets dropped, `DROP` is incremented. 18 | macro_rules! future { 19 | ($name:pat, $poll:ident, $drop:ident) => { 20 | static $poll: AtomicUsize = AtomicUsize::new(0); 21 | static $drop: AtomicUsize = AtomicUsize::new(0); 22 | 23 | let $name = { 24 | struct Fut(#[allow(dead_code)] Box); 25 | 26 | impl Future for Fut { 27 | type Output = Box; 28 | 29 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 30 | $poll.fetch_add(1, Ordering::SeqCst); 31 | Poll::Ready(Box::new(0)) 32 | } 33 | } 34 | 35 | impl Drop for Fut { 36 | fn drop(&mut self) { 37 | $drop.fetch_add(1, Ordering::SeqCst); 38 | } 39 | } 40 | 41 | Fut(Box::new(0)) 42 | }; 43 | }; 44 | } 45 | 46 | // Creates a schedule function with event counters. 47 | // 48 | // Usage: `schedule!(s, SCHED, DROP)` 49 | // 50 | // The schedule function `s` does nothing. 51 | // When it gets invoked, `SCHED` is incremented. 52 | // When it gets dropped, `DROP` is incremented. 53 | macro_rules! schedule { 54 | ($name:pat, $sched:ident, $drop:ident) => { 55 | static $drop: AtomicUsize = AtomicUsize::new(0); 56 | static $sched: AtomicUsize = AtomicUsize::new(0); 57 | 58 | let $name = { 59 | struct Guard(#[allow(dead_code)] Box); 60 | 61 | impl Drop for Guard { 62 | fn drop(&mut self) { 63 | $drop.fetch_add(1, Ordering::SeqCst); 64 | } 65 | } 66 | 67 | let guard = Guard(Box::new(0)); 68 | move |_runnable| { 69 | let _ = &guard; 70 | $sched.fetch_add(1, Ordering::SeqCst); 71 | } 72 | }; 73 | }; 74 | } 75 | 76 | fn try_await(f: impl Future) -> Option { 77 | future::block_on(future::poll_once(f)) 78 | } 79 | 80 | #[test] 81 | fn drop_and_detach() { 82 | future!(f, POLL, DROP_F); 83 | schedule!(s, SCHEDULE, DROP_S); 84 | let (runnable, task) = async_task::spawn(f, s); 85 | 86 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 87 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 88 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 89 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 90 | 91 | drop(runnable); 92 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 93 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 94 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 95 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 96 | 97 | task.detach(); 98 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 99 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 100 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 101 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 102 | } 103 | 104 | #[test] 105 | fn detach_and_drop() { 106 | future!(f, POLL, DROP_F); 107 | schedule!(s, SCHEDULE, DROP_S); 108 | let (runnable, task) = async_task::spawn(f, s); 109 | 110 | task.detach(); 111 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 112 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 113 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 114 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 115 | 116 | drop(runnable); 117 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 118 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 119 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 120 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 121 | } 122 | 123 | #[test] 124 | fn detach_and_run() { 125 | future!(f, POLL, DROP_F); 126 | schedule!(s, SCHEDULE, DROP_S); 127 | let (runnable, task) = async_task::spawn(f, s); 128 | 129 | task.detach(); 130 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 131 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 132 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 133 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 134 | 135 | runnable.run(); 136 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 137 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 138 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 139 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 140 | } 141 | 142 | #[test] 143 | fn run_and_detach() { 144 | future!(f, POLL, DROP_F); 145 | schedule!(s, SCHEDULE, DROP_S); 146 | let (runnable, task) = async_task::spawn(f, s); 147 | 148 | runnable.run(); 149 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 150 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 151 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 152 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 153 | 154 | task.detach(); 155 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 156 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 157 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 158 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 159 | } 160 | 161 | #[test] 162 | fn cancel_and_run() { 163 | future!(f, POLL, DROP_F); 164 | schedule!(s, SCHEDULE, DROP_S); 165 | let (runnable, task) = async_task::spawn(f, s); 166 | 167 | drop(task); 168 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 169 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 170 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 171 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 172 | 173 | runnable.run(); 174 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 175 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 176 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 177 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 178 | } 179 | 180 | #[test] 181 | fn run_and_cancel() { 182 | future!(f, POLL, DROP_F); 183 | schedule!(s, SCHEDULE, DROP_S); 184 | let (runnable, task) = async_task::spawn(f, s); 185 | 186 | runnable.run(); 187 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 188 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 189 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 190 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 191 | 192 | drop(task); 193 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 194 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 195 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 196 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 197 | } 198 | 199 | #[test] 200 | fn cancel_join() { 201 | future!(f, POLL, DROP_F); 202 | schedule!(s, SCHEDULE, DROP_S); 203 | let (runnable, mut task) = async_task::spawn(f, s); 204 | 205 | assert!(try_await(&mut task).is_none()); 206 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 207 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 208 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 209 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 210 | 211 | runnable.run(); 212 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 213 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 214 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 215 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 216 | 217 | assert!(try_await(&mut task).is_some()); 218 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 219 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 220 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 221 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 222 | 223 | drop(task); 224 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 225 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 226 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 227 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 228 | } 229 | 230 | #[test] 231 | fn schedule() { 232 | let (s, r) = flume::unbounded(); 233 | let schedule = move |runnable| s.send(runnable).unwrap(); 234 | let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); 235 | 236 | assert!(r.is_empty()); 237 | runnable.schedule(); 238 | 239 | let runnable = r.recv().unwrap(); 240 | assert!(r.is_empty()); 241 | runnable.schedule(); 242 | 243 | let runnable = r.recv().unwrap(); 244 | assert!(r.is_empty()); 245 | runnable.schedule(); 246 | 247 | r.recv().unwrap(); 248 | } 249 | 250 | #[test] 251 | fn schedule_counter() { 252 | static COUNT: AtomicUsize = AtomicUsize::new(0); 253 | 254 | let (s, r) = flume::unbounded(); 255 | let schedule = move |runnable: Runnable| { 256 | COUNT.fetch_add(1, Ordering::SeqCst); 257 | s.send(runnable).unwrap(); 258 | }; 259 | let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); 260 | runnable.schedule(); 261 | 262 | r.recv().unwrap().schedule(); 263 | r.recv().unwrap().schedule(); 264 | assert_eq!(COUNT.load(Ordering::SeqCst), 3); 265 | r.recv().unwrap(); 266 | } 267 | 268 | #[test] 269 | fn drop_inside_schedule() { 270 | struct DropGuard(AtomicUsize); 271 | impl Drop for DropGuard { 272 | fn drop(&mut self) { 273 | self.0.fetch_add(1, Ordering::SeqCst); 274 | } 275 | } 276 | let guard = DropGuard(AtomicUsize::new(0)); 277 | 278 | let (runnable, _) = async_task::spawn(async {}, move |runnable| { 279 | assert_eq!(guard.0.load(Ordering::SeqCst), 0); 280 | drop(runnable); 281 | assert_eq!(guard.0.load(Ordering::SeqCst), 0); 282 | }); 283 | runnable.schedule(); 284 | } 285 | 286 | #[test] 287 | fn waker() { 288 | let (s, r) = flume::unbounded(); 289 | let schedule = move |runnable| s.send(runnable).unwrap(); 290 | let (runnable, _task) = async_task::spawn(future::poll_fn(|_| Poll::<()>::Pending), schedule); 291 | 292 | assert!(r.is_empty()); 293 | let waker = runnable.waker(); 294 | runnable.run(); 295 | waker.wake_by_ref(); 296 | 297 | let runnable = r.recv().unwrap(); 298 | runnable.run(); 299 | waker.wake(); 300 | r.recv().unwrap(); 301 | } 302 | 303 | #[test] 304 | fn raw() { 305 | // Dispatch schedules a function for execution at a later point. For tests, we execute it straight away. 306 | fn dispatch(trampoline: extern "C" fn(NonNull<()>), context: NonNull<()>) { 307 | trampoline(context) 308 | } 309 | extern "C" fn trampoline(runnable: NonNull<()>) { 310 | let task = unsafe { Runnable::<()>::from_raw(runnable) }; 311 | task.run(); 312 | } 313 | 314 | let task_got_executed = Arc::new(AtomicBool::new(false)); 315 | let (runnable, _handle) = async_task::spawn( 316 | { 317 | let task_got_executed = task_got_executed.clone(); 318 | async move { task_got_executed.store(true, Ordering::SeqCst) } 319 | }, 320 | |runnable: Runnable<()>| dispatch(trampoline, runnable.into_raw()), 321 | ); 322 | runnable.schedule(); 323 | 324 | assert!(task_got_executed.load(Ordering::SeqCst)); 325 | } 326 | -------------------------------------------------------------------------------- /tests/cancel.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Runnable; 9 | use easy_parallel::Parallel; 10 | use smol::future; 11 | 12 | // Creates a future with event counters. 13 | // 14 | // Usage: `future!(f, POLL, DROP_F, DROP_T)` 15 | // 16 | // The future `f` outputs `Poll::Ready`. 17 | // When it gets polled, `POLL` is incremented. 18 | // When it gets dropped, `DROP_F` is incremented. 19 | // When the output gets dropped, `DROP_T` is incremented. 20 | macro_rules! future { 21 | ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { 22 | static $poll: AtomicUsize = AtomicUsize::new(0); 23 | static $drop_f: AtomicUsize = AtomicUsize::new(0); 24 | static $drop_t: AtomicUsize = AtomicUsize::new(0); 25 | 26 | let $name = { 27 | struct Fut(#[allow(dead_code)] Box); 28 | 29 | impl Future for Fut { 30 | type Output = Out; 31 | 32 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 33 | $poll.fetch_add(1, Ordering::SeqCst); 34 | thread::sleep(ms(400)); 35 | Poll::Ready(Out(Box::new(0), true)) 36 | } 37 | } 38 | 39 | impl Drop for Fut { 40 | fn drop(&mut self) { 41 | $drop_f.fetch_add(1, Ordering::SeqCst); 42 | } 43 | } 44 | 45 | #[derive(Default)] 46 | struct Out(#[allow(dead_code)] Box, bool); 47 | 48 | impl Drop for Out { 49 | fn drop(&mut self) { 50 | if self.1 { 51 | $drop_t.fetch_add(1, Ordering::SeqCst); 52 | } 53 | } 54 | } 55 | 56 | Fut(Box::new(0)) 57 | }; 58 | }; 59 | } 60 | 61 | // Creates a schedule function with event counters. 62 | // 63 | // Usage: `schedule!(s, SCHED, DROP)` 64 | // 65 | // The schedule function `s` does nothing. 66 | // When it gets invoked, `SCHED` is incremented. 67 | // When it gets dropped, `DROP` is incremented. 68 | macro_rules! schedule { 69 | ($name:pat, $sched:ident, $drop:ident) => { 70 | static $drop: AtomicUsize = AtomicUsize::new(0); 71 | static $sched: AtomicUsize = AtomicUsize::new(0); 72 | 73 | let $name = { 74 | struct Guard(#[allow(dead_code)] Box); 75 | 76 | impl Drop for Guard { 77 | fn drop(&mut self) { 78 | $drop.fetch_add(1, Ordering::SeqCst); 79 | } 80 | } 81 | 82 | let guard = Guard(Box::new(0)); 83 | move |runnable: Runnable| { 84 | let _ = &guard; 85 | runnable.schedule(); 86 | $sched.fetch_add(1, Ordering::SeqCst); 87 | } 88 | }; 89 | }; 90 | } 91 | 92 | fn ms(ms: u64) -> Duration { 93 | Duration::from_millis(ms) 94 | } 95 | 96 | #[test] 97 | fn run_and_cancel() { 98 | future!(f, POLL, DROP_F, DROP_T); 99 | schedule!(s, SCHEDULE, DROP_S); 100 | let (runnable, task) = async_task::spawn(f, s); 101 | 102 | runnable.run(); 103 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 104 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 105 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 106 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 107 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 108 | 109 | assert!(future::block_on(task.cancel()).is_some()); 110 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 111 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 112 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 113 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 114 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 115 | } 116 | 117 | #[test] 118 | fn cancel_and_run() { 119 | future!(f, POLL, DROP_F, DROP_T); 120 | schedule!(s, SCHEDULE, DROP_S); 121 | let (runnable, task) = async_task::spawn(f, s); 122 | 123 | Parallel::new() 124 | .add(|| { 125 | thread::sleep(ms(200)); 126 | runnable.run(); 127 | 128 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 129 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 130 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 131 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 132 | 133 | thread::sleep(ms(200)); 134 | 135 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 136 | }) 137 | .add(|| { 138 | assert!(future::block_on(task.cancel()).is_none()); 139 | 140 | thread::sleep(ms(200)); 141 | 142 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 143 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 144 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 145 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 146 | 147 | thread::sleep(ms(200)); 148 | 149 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 150 | }) 151 | .run(); 152 | } 153 | 154 | #[test] 155 | fn cancel_during_run() { 156 | future!(f, POLL, DROP_F, DROP_T); 157 | schedule!(s, SCHEDULE, DROP_S); 158 | let (runnable, task) = async_task::spawn(f, s); 159 | 160 | Parallel::new() 161 | .add(|| { 162 | runnable.run(); 163 | 164 | thread::sleep(ms(200)); 165 | 166 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 167 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 168 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 169 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 170 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 171 | }) 172 | .add(|| { 173 | thread::sleep(ms(200)); 174 | 175 | assert!(future::block_on(task.cancel()).is_none()); 176 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 177 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 178 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 179 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 180 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 181 | }) 182 | .run(); 183 | } 184 | -------------------------------------------------------------------------------- /tests/join.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::panic::{catch_unwind, AssertUnwindSafe}; 4 | use std::pin::Pin; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | use std::time::Duration; 9 | 10 | use async_task::Runnable; 11 | use easy_parallel::Parallel; 12 | use smol::future; 13 | 14 | // Creates a future with event counters. 15 | // 16 | // Usage: `future!(f, POLL, DROP_F, DROP_T)` 17 | // 18 | // The future `f` outputs `Poll::Ready`. 19 | // When it gets polled, `POLL` is incremented. 20 | // When it gets dropped, `DROP_F` is incremented. 21 | // When the output gets dropped, `DROP_T` is incremented. 22 | macro_rules! future { 23 | ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { 24 | static $poll: AtomicUsize = AtomicUsize::new(0); 25 | static $drop_f: AtomicUsize = AtomicUsize::new(0); 26 | static $drop_t: AtomicUsize = AtomicUsize::new(0); 27 | 28 | let $name = { 29 | struct Fut(#[allow(dead_code)] Box); 30 | 31 | impl Future for Fut { 32 | type Output = Out; 33 | 34 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 35 | $poll.fetch_add(1, Ordering::SeqCst); 36 | Poll::Ready(Out(Box::new(0), true)) 37 | } 38 | } 39 | 40 | impl Drop for Fut { 41 | fn drop(&mut self) { 42 | $drop_f.fetch_add(1, Ordering::SeqCst); 43 | } 44 | } 45 | 46 | #[derive(Default)] 47 | struct Out(#[allow(dead_code)] Box, bool); 48 | 49 | impl Drop for Out { 50 | fn drop(&mut self) { 51 | if self.1 { 52 | $drop_t.fetch_add(1, Ordering::SeqCst); 53 | } 54 | } 55 | } 56 | 57 | Fut(Box::new(0)) 58 | }; 59 | }; 60 | } 61 | 62 | // Creates a schedule function with event counters. 63 | // 64 | // Usage: `schedule!(s, SCHED, DROP)` 65 | // 66 | // The schedule function `s` does nothing. 67 | // When it gets invoked, `SCHED` is incremented. 68 | // When it gets dropped, `DROP` is incremented. 69 | macro_rules! schedule { 70 | ($name:pat, $sched:ident, $drop:ident) => { 71 | static $drop: AtomicUsize = AtomicUsize::new(0); 72 | static $sched: AtomicUsize = AtomicUsize::new(0); 73 | 74 | let $name = { 75 | struct Guard(#[allow(dead_code)] Box); 76 | 77 | impl Drop for Guard { 78 | fn drop(&mut self) { 79 | $drop.fetch_add(1, Ordering::SeqCst); 80 | } 81 | } 82 | 83 | let guard = Guard(Box::new(0)); 84 | move |runnable: Runnable| { 85 | let _ = &guard; 86 | runnable.schedule(); 87 | $sched.fetch_add(1, Ordering::SeqCst); 88 | } 89 | }; 90 | }; 91 | } 92 | 93 | fn ms(ms: u64) -> Duration { 94 | Duration::from_millis(ms) 95 | } 96 | 97 | #[test] 98 | fn drop_and_join() { 99 | future!(f, POLL, DROP_F, DROP_T); 100 | schedule!(s, SCHEDULE, DROP_S); 101 | let (runnable, task) = async_task::spawn(f, s); 102 | 103 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 104 | 105 | drop(runnable); 106 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 107 | 108 | assert!(catch_unwind(|| future::block_on(task)).is_err()); 109 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 110 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 111 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 112 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 113 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 114 | } 115 | 116 | #[test] 117 | fn run_and_join() { 118 | future!(f, POLL, DROP_F, DROP_T); 119 | schedule!(s, SCHEDULE, DROP_S); 120 | let (runnable, task) = async_task::spawn(f, s); 121 | 122 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 123 | 124 | runnable.run(); 125 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 126 | 127 | assert!(catch_unwind(|| future::block_on(task)).is_ok()); 128 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 129 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 130 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 131 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 132 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 133 | } 134 | 135 | #[test] 136 | fn detach_and_run() { 137 | future!(f, POLL, DROP_F, DROP_T); 138 | schedule!(s, SCHEDULE, DROP_S); 139 | let (runnable, task) = async_task::spawn(f, s); 140 | 141 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 142 | 143 | task.detach(); 144 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 145 | 146 | runnable.run(); 147 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 148 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 149 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 150 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 151 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 152 | } 153 | 154 | #[test] 155 | fn join_twice() { 156 | future!(f, POLL, DROP_F, DROP_T); 157 | schedule!(s, SCHEDULE, DROP_S); 158 | let (runnable, mut task) = async_task::spawn(f, s); 159 | 160 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 161 | 162 | runnable.run(); 163 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 164 | 165 | future::block_on(&mut task); 166 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 167 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 168 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 169 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 170 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 171 | 172 | assert!(catch_unwind(AssertUnwindSafe(|| future::block_on(&mut task))).is_err()); 173 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 174 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 175 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 176 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 177 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 178 | 179 | task.detach(); 180 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 181 | } 182 | 183 | #[test] 184 | fn join_and_cancel() { 185 | future!(f, POLL, DROP_F, DROP_T); 186 | schedule!(s, SCHEDULE, DROP_S); 187 | let (runnable, task) = async_task::spawn(f, s); 188 | 189 | Parallel::new() 190 | .add(|| { 191 | thread::sleep(ms(200)); 192 | drop(runnable); 193 | 194 | thread::sleep(ms(400)); 195 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 196 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 197 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 198 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 199 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 200 | }) 201 | .add(|| { 202 | assert!(catch_unwind(|| future::block_on(task)).is_err()); 203 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 204 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 205 | 206 | thread::sleep(ms(200)); 207 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 208 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 209 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 210 | }) 211 | .run(); 212 | } 213 | 214 | #[test] 215 | fn join_and_run() { 216 | future!(f, POLL, DROP_F, DROP_T); 217 | schedule!(s, SCHEDULE, DROP_S); 218 | let (runnable, task) = async_task::spawn(f, s); 219 | 220 | Parallel::new() 221 | .add(|| { 222 | thread::sleep(ms(400)); 223 | 224 | runnable.run(); 225 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 226 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 227 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 228 | 229 | thread::sleep(ms(200)); 230 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 231 | }) 232 | .add(|| { 233 | future::block_on(task); 234 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 235 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 236 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 237 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 238 | 239 | thread::sleep(ms(200)); 240 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 241 | }) 242 | .run(); 243 | } 244 | 245 | #[test] 246 | fn try_join_and_run_and_join() { 247 | future!(f, POLL, DROP_F, DROP_T); 248 | schedule!(s, SCHEDULE, DROP_S); 249 | let (runnable, mut task) = async_task::spawn(f, s); 250 | 251 | Parallel::new() 252 | .add(|| { 253 | thread::sleep(ms(400)); 254 | 255 | runnable.run(); 256 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 257 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 258 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 259 | 260 | thread::sleep(ms(200)); 261 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 262 | }) 263 | .add(|| { 264 | future::block_on(future::or(&mut task, future::ready(Default::default()))); 265 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 266 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 267 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 268 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 269 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 270 | 271 | future::block_on(task); 272 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 273 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 274 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 275 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 276 | 277 | thread::sleep(ms(200)); 278 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 279 | }) 280 | .run(); 281 | } 282 | 283 | #[test] 284 | fn try_join_and_cancel_and_run() { 285 | future!(f, POLL, DROP_F, DROP_T); 286 | schedule!(s, SCHEDULE, DROP_S); 287 | let (runnable, mut task) = async_task::spawn(f, s); 288 | 289 | Parallel::new() 290 | .add(|| { 291 | thread::sleep(ms(200)); 292 | 293 | runnable.run(); 294 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 295 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 296 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 297 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 298 | }) 299 | .add(|| { 300 | future::block_on(future::or(&mut task, future::ready(Default::default()))); 301 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 302 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 303 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 304 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 305 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 306 | 307 | drop(task); 308 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 309 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 310 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 311 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 312 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 313 | }) 314 | .run(); 315 | } 316 | 317 | #[test] 318 | fn try_join_and_run_and_cancel() { 319 | future!(f, POLL, DROP_F, DROP_T); 320 | schedule!(s, SCHEDULE, DROP_S); 321 | let (runnable, mut task) = async_task::spawn(f, s); 322 | 323 | Parallel::new() 324 | .add(|| { 325 | thread::sleep(ms(200)); 326 | 327 | runnable.run(); 328 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 329 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 330 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 331 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 332 | }) 333 | .add(|| { 334 | future::block_on(future::or(&mut task, future::ready(Default::default()))); 335 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 336 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 337 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 338 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 339 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 340 | 341 | thread::sleep(ms(400)); 342 | 343 | drop(task); 344 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 345 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 346 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 347 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 348 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 349 | }) 350 | .run(); 351 | } 352 | 353 | #[test] 354 | fn await_output() { 355 | struct Fut(Cell>); 356 | 357 | impl Fut { 358 | fn new(t: T) -> Fut { 359 | Fut(Cell::new(Some(t))) 360 | } 361 | } 362 | 363 | impl Future for Fut { 364 | type Output = T; 365 | 366 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 367 | Poll::Ready(self.0.take().unwrap()) 368 | } 369 | } 370 | 371 | for i in 0..10 { 372 | let (runnable, task) = async_task::spawn(Fut::new(i), drop); 373 | runnable.run(); 374 | assert_eq!(future::block_on(task), i); 375 | } 376 | 377 | for i in 0..10 { 378 | let (runnable, task) = async_task::spawn(Fut::new(vec![7; i]), drop); 379 | runnable.run(); 380 | assert_eq!(future::block_on(task), vec![7; i]); 381 | } 382 | 383 | let (runnable, task) = async_task::spawn(Fut::new("foo".to_string()), drop); 384 | runnable.run(); 385 | assert_eq!(future::block_on(task), "foo"); 386 | } 387 | -------------------------------------------------------------------------------- /tests/metadata.rs: -------------------------------------------------------------------------------- 1 | use async_task::{Builder, Runnable}; 2 | use flume::unbounded; 3 | use smol::future; 4 | 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | 7 | #[test] 8 | fn metadata_use_case() { 9 | // Each future has a counter that is incremented every time it is scheduled. 10 | let (sender, receiver) = unbounded::>(); 11 | let schedule = move |runnable: Runnable| { 12 | runnable.metadata().fetch_add(1, Ordering::SeqCst); 13 | sender.send(runnable).ok(); 14 | }; 15 | 16 | async fn my_future(counter: &AtomicUsize) { 17 | loop { 18 | // Loop until we've been scheduled five times. 19 | let count = counter.load(Ordering::SeqCst); 20 | if count < 5 { 21 | // Make sure that we are immediately scheduled again. 22 | future::yield_now().await; 23 | continue; 24 | } 25 | 26 | // We've been scheduled five times, so we're done. 27 | break; 28 | } 29 | } 30 | 31 | let make_task = || { 32 | // SAFETY: We are spawning a non-'static future, so we need to use the unsafe API. 33 | // The borrowed variables, in this case the metadata, are guaranteed to outlive the runnable. 34 | let (runnable, task) = unsafe { 35 | Builder::new() 36 | .metadata(AtomicUsize::new(0)) 37 | .spawn_unchecked(my_future, schedule.clone()) 38 | }; 39 | 40 | runnable.schedule(); 41 | task 42 | }; 43 | 44 | // Make tasks. 45 | let t1 = make_task(); 46 | let t2 = make_task(); 47 | 48 | // Run the tasks. 49 | while let Ok(runnable) = receiver.try_recv() { 50 | runnable.run(); 51 | } 52 | 53 | // Unwrap the tasks. 54 | smol::future::block_on(async move { 55 | t1.await; 56 | t2.await; 57 | }); 58 | } 59 | -------------------------------------------------------------------------------- /tests/panic.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::panic::catch_unwind; 3 | use std::pin::Pin; 4 | use std::sync::atomic::{AtomicUsize, Ordering}; 5 | use std::task::{Context, Poll}; 6 | use std::thread; 7 | use std::time::Duration; 8 | 9 | use async_task::Runnable; 10 | use easy_parallel::Parallel; 11 | use smol::future; 12 | 13 | // Creates a future with event counters. 14 | // 15 | // Usage: `future!(f, POLL, DROP)` 16 | // 17 | // The future `f` sleeps for 200 ms and then panics. 18 | // When it gets polled, `POLL` is incremented. 19 | // When it gets dropped, `DROP` is incremented. 20 | macro_rules! future { 21 | ($name:pat, $poll:ident, $drop:ident) => { 22 | static $poll: AtomicUsize = AtomicUsize::new(0); 23 | static $drop: AtomicUsize = AtomicUsize::new(0); 24 | 25 | let $name = { 26 | struct Fut(#[allow(dead_code)] Box); 27 | 28 | impl Future for Fut { 29 | type Output = (); 30 | 31 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 32 | $poll.fetch_add(1, Ordering::SeqCst); 33 | thread::sleep(ms(400)); 34 | panic!() 35 | } 36 | } 37 | 38 | impl Drop for Fut { 39 | fn drop(&mut self) { 40 | $drop.fetch_add(1, Ordering::SeqCst); 41 | } 42 | } 43 | 44 | Fut(Box::new(0)) 45 | }; 46 | }; 47 | } 48 | 49 | // Creates a schedule function with event counters. 50 | // 51 | // Usage: `schedule!(s, SCHED, DROP)` 52 | // 53 | // The schedule function `s` does nothing. 54 | // When it gets invoked, `SCHED` is incremented. 55 | // When it gets dropped, `DROP` is incremented. 56 | macro_rules! schedule { 57 | ($name:pat, $sched:ident, $drop:ident) => { 58 | static $drop: AtomicUsize = AtomicUsize::new(0); 59 | static $sched: AtomicUsize = AtomicUsize::new(0); 60 | 61 | let $name = { 62 | struct Guard(#[allow(dead_code)] Box); 63 | 64 | impl Drop for Guard { 65 | fn drop(&mut self) { 66 | $drop.fetch_add(1, Ordering::SeqCst); 67 | } 68 | } 69 | 70 | let guard = Guard(Box::new(0)); 71 | move |_runnable: Runnable| { 72 | let _ = &guard; 73 | $sched.fetch_add(1, Ordering::SeqCst); 74 | } 75 | }; 76 | }; 77 | } 78 | 79 | fn ms(ms: u64) -> Duration { 80 | Duration::from_millis(ms) 81 | } 82 | 83 | #[test] 84 | fn cancel_during_run() { 85 | future!(f, POLL, DROP_F); 86 | schedule!(s, SCHEDULE, DROP_S); 87 | let (runnable, task) = async_task::spawn(f, s); 88 | 89 | Parallel::new() 90 | .add(|| { 91 | assert!(catch_unwind(|| runnable.run()).is_err()); 92 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 93 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 94 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 95 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 96 | }) 97 | .add(|| { 98 | thread::sleep(ms(200)); 99 | 100 | drop(task); 101 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 102 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 103 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 104 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 105 | }) 106 | .run(); 107 | } 108 | 109 | #[test] 110 | fn run_and_join() { 111 | future!(f, POLL, DROP_F); 112 | schedule!(s, SCHEDULE, DROP_S); 113 | let (runnable, task) = async_task::spawn(f, s); 114 | 115 | assert!(catch_unwind(|| runnable.run()).is_err()); 116 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 117 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 118 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 119 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 120 | 121 | assert!(catch_unwind(|| future::block_on(task)).is_err()); 122 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 123 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 124 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 125 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 126 | } 127 | 128 | #[test] 129 | fn try_join_and_run_and_join() { 130 | future!(f, POLL, DROP_F); 131 | schedule!(s, SCHEDULE, DROP_S); 132 | let (runnable, mut task) = async_task::spawn(f, s); 133 | 134 | future::block_on(future::or(&mut task, future::ready(()))); 135 | assert_eq!(POLL.load(Ordering::SeqCst), 0); 136 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 137 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 138 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 139 | 140 | assert!(catch_unwind(|| runnable.run()).is_err()); 141 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 142 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 143 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 144 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 145 | 146 | assert!(catch_unwind(|| future::block_on(task)).is_err()); 147 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 148 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 149 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 150 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 151 | } 152 | 153 | #[test] 154 | fn join_during_run() { 155 | future!(f, POLL, DROP_F); 156 | schedule!(s, SCHEDULE, DROP_S); 157 | let (runnable, task) = async_task::spawn(f, s); 158 | 159 | Parallel::new() 160 | .add(|| { 161 | assert!(catch_unwind(|| runnable.run()).is_err()); 162 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 163 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 164 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 165 | 166 | thread::sleep(ms(200)); 167 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 168 | }) 169 | .add(|| { 170 | thread::sleep(ms(200)); 171 | 172 | assert!(catch_unwind(|| future::block_on(task)).is_err()); 173 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 174 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 175 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 176 | 177 | thread::sleep(ms(200)); 178 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 179 | }) 180 | .run(); 181 | } 182 | 183 | #[test] 184 | fn try_join_during_run() { 185 | future!(f, POLL, DROP_F); 186 | schedule!(s, SCHEDULE, DROP_S); 187 | let (runnable, mut task) = async_task::spawn(f, s); 188 | 189 | Parallel::new() 190 | .add(|| { 191 | assert!(catch_unwind(|| runnable.run()).is_err()); 192 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 193 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 194 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 195 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 196 | }) 197 | .add(|| { 198 | thread::sleep(ms(200)); 199 | 200 | future::block_on(future::or(&mut task, future::ready(()))); 201 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 202 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 203 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 204 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 205 | drop(task); 206 | }) 207 | .run(); 208 | } 209 | 210 | #[test] 211 | fn detach_during_run() { 212 | future!(f, POLL, DROP_F); 213 | schedule!(s, SCHEDULE, DROP_S); 214 | let (runnable, task) = async_task::spawn(f, s); 215 | 216 | Parallel::new() 217 | .add(|| { 218 | assert!(catch_unwind(|| runnable.run()).is_err()); 219 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 220 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 221 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 222 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 223 | }) 224 | .add(|| { 225 | thread::sleep(ms(200)); 226 | 227 | task.detach(); 228 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 229 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 230 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 231 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 232 | }) 233 | .run(); 234 | } 235 | -------------------------------------------------------------------------------- /tests/ready.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Runnable; 9 | use easy_parallel::Parallel; 10 | use smol::future; 11 | 12 | // Creates a future with event counters. 13 | // 14 | // Usage: `future!(f, POLL, DROP_F, DROP_T)` 15 | // 16 | // The future `f` sleeps for 200 ms and outputs `Poll::Ready`. 17 | // When it gets polled, `POLL` is incremented. 18 | // When it gets dropped, `DROP_F` is incremented. 19 | // When the output gets dropped, `DROP_T` is incremented. 20 | macro_rules! future { 21 | ($name:pat, $poll:ident, $drop_f:ident, $drop_t:ident) => { 22 | static $poll: AtomicUsize = AtomicUsize::new(0); 23 | static $drop_f: AtomicUsize = AtomicUsize::new(0); 24 | static $drop_t: AtomicUsize = AtomicUsize::new(0); 25 | 26 | let $name = { 27 | struct Fut(#[allow(dead_code)] Box); 28 | 29 | impl Future for Fut { 30 | type Output = Out; 31 | 32 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 33 | $poll.fetch_add(1, Ordering::SeqCst); 34 | thread::sleep(ms(400)); 35 | Poll::Ready(Out(Box::new(0), true)) 36 | } 37 | } 38 | 39 | impl Drop for Fut { 40 | fn drop(&mut self) { 41 | $drop_f.fetch_add(1, Ordering::SeqCst); 42 | } 43 | } 44 | 45 | #[derive(Default)] 46 | struct Out(#[allow(dead_code)] Box, bool); 47 | 48 | impl Drop for Out { 49 | fn drop(&mut self) { 50 | if self.1 { 51 | $drop_t.fetch_add(1, Ordering::SeqCst); 52 | } 53 | } 54 | } 55 | 56 | Fut(Box::new(0)) 57 | }; 58 | }; 59 | } 60 | 61 | // Creates a schedule function with event counters. 62 | // 63 | // Usage: `schedule!(s, SCHED, DROP)` 64 | // 65 | // The schedule function `s` does nothing. 66 | // When it gets invoked, `SCHED` is incremented. 67 | // When it gets dropped, `DROP` is incremented. 68 | macro_rules! schedule { 69 | ($name:pat, $sched:ident, $drop:ident) => { 70 | static $drop: AtomicUsize = AtomicUsize::new(0); 71 | static $sched: AtomicUsize = AtomicUsize::new(0); 72 | 73 | let $name = { 74 | struct Guard(#[allow(dead_code)] Box); 75 | 76 | impl Drop for Guard { 77 | fn drop(&mut self) { 78 | $drop.fetch_add(1, Ordering::SeqCst); 79 | } 80 | } 81 | 82 | let guard = Guard(Box::new(0)); 83 | move |_runnable: Runnable| { 84 | let _ = &guard; 85 | $sched.fetch_add(1, Ordering::SeqCst); 86 | } 87 | }; 88 | }; 89 | } 90 | 91 | fn ms(ms: u64) -> Duration { 92 | Duration::from_millis(ms) 93 | } 94 | 95 | #[test] 96 | fn cancel_during_run() { 97 | future!(f, POLL, DROP_F, DROP_T); 98 | schedule!(s, SCHEDULE, DROP_S); 99 | let (runnable, task) = async_task::spawn(f, s); 100 | 101 | Parallel::new() 102 | .add(|| { 103 | runnable.run(); 104 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 105 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 106 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 107 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 108 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 109 | }) 110 | .add(|| { 111 | thread::sleep(ms(200)); 112 | 113 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 114 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 115 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 116 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 117 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 118 | 119 | drop(task); 120 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 121 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 122 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 123 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 124 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 125 | 126 | thread::sleep(ms(400)); 127 | 128 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 129 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 130 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 131 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 132 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 133 | }) 134 | .run(); 135 | } 136 | 137 | #[test] 138 | fn join_during_run() { 139 | future!(f, POLL, DROP_F, DROP_T); 140 | schedule!(s, SCHEDULE, DROP_S); 141 | let (runnable, task) = async_task::spawn(f, s); 142 | 143 | Parallel::new() 144 | .add(|| { 145 | runnable.run(); 146 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 147 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 148 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 149 | 150 | thread::sleep(ms(200)); 151 | 152 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 153 | }) 154 | .add(|| { 155 | thread::sleep(ms(200)); 156 | 157 | future::block_on(task); 158 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 159 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 160 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 161 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 162 | 163 | thread::sleep(ms(200)); 164 | 165 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 166 | }) 167 | .run(); 168 | } 169 | 170 | #[test] 171 | fn try_join_during_run() { 172 | future!(f, POLL, DROP_F, DROP_T); 173 | schedule!(s, SCHEDULE, DROP_S); 174 | let (runnable, mut task) = async_task::spawn(f, s); 175 | 176 | Parallel::new() 177 | .add(|| { 178 | runnable.run(); 179 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 180 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 181 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 182 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 183 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 184 | }) 185 | .add(|| { 186 | thread::sleep(ms(200)); 187 | 188 | future::block_on(future::or(&mut task, future::ready(Default::default()))); 189 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 190 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 191 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 192 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 193 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 194 | drop(task); 195 | }) 196 | .run(); 197 | } 198 | 199 | #[test] 200 | fn detach_during_run() { 201 | future!(f, POLL, DROP_F, DROP_T); 202 | schedule!(s, SCHEDULE, DROP_S); 203 | let (runnable, task) = async_task::spawn(f, s); 204 | 205 | Parallel::new() 206 | .add(|| { 207 | runnable.run(); 208 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 209 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 210 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 211 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 212 | assert_eq!(DROP_T.load(Ordering::SeqCst), 1); 213 | }) 214 | .add(|| { 215 | thread::sleep(ms(200)); 216 | 217 | task.detach(); 218 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 219 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 220 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 221 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 222 | assert_eq!(DROP_T.load(Ordering::SeqCst), 0); 223 | }) 224 | .run(); 225 | } 226 | -------------------------------------------------------------------------------- /tests/waker_panic.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::panic::{catch_unwind, AssertUnwindSafe}; 4 | use std::pin::Pin; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | use std::time::Duration; 9 | 10 | use async_task::Runnable; 11 | use atomic_waker::AtomicWaker; 12 | use easy_parallel::Parallel; 13 | use smol::future; 14 | 15 | // Creates a future with event counters. 16 | // 17 | // Usage: `future!(f, get_waker, POLL, DROP)` 18 | // 19 | // The future `f` always sleeps for 200 ms, and panics the second time it is polled. 20 | // When it gets polled, `POLL` is incremented. 21 | // When it gets dropped, `DROP` is incremented. 22 | // 23 | // Every time the future is run, it stores the waker into a global variable. 24 | // This waker can be extracted using the `get_waker()` function. 25 | macro_rules! future { 26 | ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { 27 | static $poll: AtomicUsize = AtomicUsize::new(0); 28 | static $drop: AtomicUsize = AtomicUsize::new(0); 29 | static WAKER: AtomicWaker = AtomicWaker::new(); 30 | 31 | let ($name, $get_waker) = { 32 | struct Fut(Cell, #[allow(dead_code)] Box); 33 | 34 | impl Future for Fut { 35 | type Output = (); 36 | 37 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 38 | WAKER.register(cx.waker()); 39 | $poll.fetch_add(1, Ordering::SeqCst); 40 | thread::sleep(ms(400)); 41 | 42 | if self.0.get() { 43 | panic!() 44 | } else { 45 | self.0.set(true); 46 | Poll::Pending 47 | } 48 | } 49 | } 50 | 51 | impl Drop for Fut { 52 | fn drop(&mut self) { 53 | $drop.fetch_add(1, Ordering::SeqCst); 54 | } 55 | } 56 | 57 | (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) 58 | }; 59 | }; 60 | } 61 | 62 | // Creates a schedule function with event counters. 63 | // 64 | // Usage: `schedule!(s, chan, SCHED, DROP)` 65 | // 66 | // The schedule function `s` pushes the task into `chan`. 67 | // When it gets invoked, `SCHED` is incremented. 68 | // When it gets dropped, `DROP` is incremented. 69 | // 70 | // Receiver `chan` extracts the task when it is scheduled. 71 | macro_rules! schedule { 72 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 73 | static $drop: AtomicUsize = AtomicUsize::new(0); 74 | static $sched: AtomicUsize = AtomicUsize::new(0); 75 | 76 | let ($name, $chan) = { 77 | let (s, r) = flume::unbounded(); 78 | 79 | struct Guard(#[allow(dead_code)] Box); 80 | 81 | impl Drop for Guard { 82 | fn drop(&mut self) { 83 | $drop.fetch_add(1, Ordering::SeqCst); 84 | } 85 | } 86 | 87 | let guard = Guard(Box::new(0)); 88 | let sched = move |runnable: Runnable| { 89 | let _ = &guard; 90 | $sched.fetch_add(1, Ordering::SeqCst); 91 | s.send(runnable).unwrap(); 92 | }; 93 | 94 | (sched, r) 95 | }; 96 | }; 97 | } 98 | 99 | fn ms(ms: u64) -> Duration { 100 | Duration::from_millis(ms) 101 | } 102 | 103 | fn try_await(f: impl Future) -> Option { 104 | future::block_on(future::poll_once(f)) 105 | } 106 | 107 | #[test] 108 | fn wake_during_run() { 109 | future!(f, get_waker, POLL, DROP_F); 110 | schedule!(s, chan, SCHEDULE, DROP_S); 111 | let (runnable, task) = async_task::spawn(f, s); 112 | 113 | runnable.run(); 114 | let waker = get_waker(); 115 | waker.wake_by_ref(); 116 | let runnable = chan.recv().unwrap(); 117 | 118 | Parallel::new() 119 | .add(|| { 120 | assert!(catch_unwind(|| runnable.run()).is_err()); 121 | drop(get_waker()); 122 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 123 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 124 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 125 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 126 | assert_eq!(chan.len(), 0); 127 | }) 128 | .add(|| { 129 | thread::sleep(ms(200)); 130 | 131 | waker.wake(); 132 | task.detach(); 133 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 134 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 135 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 136 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 137 | assert_eq!(chan.len(), 0); 138 | 139 | thread::sleep(ms(400)); 140 | 141 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 142 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 143 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 144 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 145 | assert_eq!(chan.len(), 0); 146 | }) 147 | .run(); 148 | } 149 | 150 | #[test] 151 | fn cancel_during_run() { 152 | future!(f, get_waker, POLL, DROP_F); 153 | schedule!(s, chan, SCHEDULE, DROP_S); 154 | let (runnable, task) = async_task::spawn(f, s); 155 | 156 | runnable.run(); 157 | let waker = get_waker(); 158 | waker.wake(); 159 | let runnable = chan.recv().unwrap(); 160 | 161 | Parallel::new() 162 | .add(|| { 163 | assert!(catch_unwind(|| runnable.run()).is_err()); 164 | drop(get_waker()); 165 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 166 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 167 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 168 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 169 | assert_eq!(chan.len(), 0); 170 | }) 171 | .add(|| { 172 | thread::sleep(ms(200)); 173 | 174 | drop(task); 175 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 176 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 177 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 178 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 179 | assert_eq!(chan.len(), 0); 180 | 181 | thread::sleep(ms(400)); 182 | 183 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 184 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 185 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 186 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 187 | assert_eq!(chan.len(), 0); 188 | }) 189 | .run(); 190 | } 191 | 192 | #[test] 193 | fn wake_and_cancel_during_run() { 194 | future!(f, get_waker, POLL, DROP_F); 195 | schedule!(s, chan, SCHEDULE, DROP_S); 196 | let (runnable, task) = async_task::spawn(f, s); 197 | 198 | runnable.run(); 199 | let waker = get_waker(); 200 | waker.wake_by_ref(); 201 | let runnable = chan.recv().unwrap(); 202 | 203 | Parallel::new() 204 | .add(|| { 205 | assert!(catch_unwind(|| runnable.run()).is_err()); 206 | drop(get_waker()); 207 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 208 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 209 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 210 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 211 | assert_eq!(chan.len(), 0); 212 | }) 213 | .add(|| { 214 | thread::sleep(ms(200)); 215 | 216 | waker.wake(); 217 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 218 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 219 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 220 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 221 | assert_eq!(chan.len(), 0); 222 | 223 | drop(task); 224 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 225 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 226 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 227 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 228 | assert_eq!(chan.len(), 0); 229 | 230 | thread::sleep(ms(400)); 231 | 232 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 233 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 234 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 235 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 236 | assert_eq!(chan.len(), 0); 237 | }) 238 | .run(); 239 | } 240 | 241 | #[flaky_test::flaky_test] 242 | fn cancel_and_wake_during_run() { 243 | future!(f, get_waker, POLL, DROP_F); 244 | schedule!(s, chan, SCHEDULE, DROP_S); 245 | POLL.store(0, Ordering::SeqCst); 246 | DROP_F.store(0, Ordering::SeqCst); 247 | SCHEDULE.store(0, Ordering::SeqCst); 248 | DROP_S.store(0, Ordering::SeqCst); 249 | 250 | let (runnable, task) = async_task::spawn(f, s); 251 | 252 | runnable.run(); 253 | let waker = get_waker(); 254 | waker.wake_by_ref(); 255 | let runnable = chan.recv().unwrap(); 256 | 257 | Parallel::new() 258 | .add(|| { 259 | assert!(catch_unwind(|| runnable.run()).is_err()); 260 | drop(get_waker()); 261 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 262 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 263 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 264 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 265 | assert_eq!(chan.len(), 0); 266 | }) 267 | .add(|| { 268 | thread::sleep(ms(200)); 269 | 270 | drop(task); 271 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 272 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 273 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 274 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 275 | assert_eq!(chan.len(), 0); 276 | 277 | waker.wake(); 278 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 279 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 280 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 281 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 282 | assert_eq!(chan.len(), 0); 283 | 284 | thread::sleep(ms(400)); 285 | 286 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 287 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 288 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 289 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 290 | assert_eq!(chan.len(), 0); 291 | }) 292 | .run(); 293 | } 294 | 295 | #[test] 296 | fn panic_and_poll() { 297 | future!(f, get_waker, POLL, DROP_F); 298 | schedule!(s, chan, SCHEDULE, DROP_S); 299 | let (runnable, task) = async_task::spawn(f, s); 300 | 301 | runnable.run(); 302 | get_waker().wake(); 303 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 304 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 305 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 306 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 307 | 308 | let mut task = task; 309 | assert!(try_await(&mut task).is_none()); 310 | 311 | let runnable = chan.recv().unwrap(); 312 | assert!(catch_unwind(|| runnable.run()).is_err()); 313 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 314 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 315 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 316 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 317 | 318 | assert!(catch_unwind(AssertUnwindSafe(|| try_await(&mut task))).is_err()); 319 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 320 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 321 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 322 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 323 | 324 | drop(get_waker()); 325 | drop(task); 326 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 327 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 328 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 329 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 330 | } 331 | -------------------------------------------------------------------------------- /tests/waker_pending.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Runnable; 9 | use atomic_waker::AtomicWaker; 10 | use easy_parallel::Parallel; 11 | 12 | // Creates a future with event counters. 13 | // 14 | // Usage: `future!(f, get_waker, POLL, DROP)` 15 | // 16 | // The future `f` always sleeps for 200 ms and returns `Poll::Pending`. 17 | // When it gets polled, `POLL` is incremented. 18 | // When it gets dropped, `DROP` is incremented. 19 | // 20 | // Every time the future is run, it stores the waker into a global variable. 21 | // This waker can be extracted using the `get_waker()` function. 22 | macro_rules! future { 23 | ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { 24 | static $poll: AtomicUsize = AtomicUsize::new(0); 25 | static $drop: AtomicUsize = AtomicUsize::new(0); 26 | static WAKER: AtomicWaker = AtomicWaker::new(); 27 | 28 | let ($name, $get_waker) = { 29 | struct Fut(#[allow(dead_code)] Box); 30 | 31 | impl Future for Fut { 32 | type Output = (); 33 | 34 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 35 | WAKER.register(cx.waker()); 36 | $poll.fetch_add(1, Ordering::SeqCst); 37 | thread::sleep(ms(400)); 38 | Poll::Pending 39 | } 40 | } 41 | 42 | impl Drop for Fut { 43 | fn drop(&mut self) { 44 | $drop.fetch_add(1, Ordering::SeqCst); 45 | } 46 | } 47 | 48 | (Fut(Box::new(0)), || WAKER.take().unwrap()) 49 | }; 50 | }; 51 | } 52 | 53 | // Creates a schedule function with event counters. 54 | // 55 | // Usage: `schedule!(s, chan, SCHED, DROP)` 56 | // 57 | // The schedule function `s` pushes the task into `chan`. 58 | // When it gets invoked, `SCHED` is incremented. 59 | // When it gets dropped, `DROP` is incremented. 60 | // 61 | // Receiver `chan` extracts the task when it is scheduled. 62 | macro_rules! schedule { 63 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 64 | static $drop: AtomicUsize = AtomicUsize::new(0); 65 | static $sched: AtomicUsize = AtomicUsize::new(0); 66 | 67 | let ($name, $chan) = { 68 | let (s, r) = flume::unbounded(); 69 | 70 | struct Guard(#[allow(dead_code)] Box); 71 | 72 | impl Drop for Guard { 73 | fn drop(&mut self) { 74 | $drop.fetch_add(1, Ordering::SeqCst); 75 | } 76 | } 77 | 78 | let guard = Guard(Box::new(0)); 79 | let sched = move |runnable: Runnable| { 80 | let _ = &guard; 81 | $sched.fetch_add(1, Ordering::SeqCst); 82 | s.send(runnable).unwrap(); 83 | }; 84 | 85 | (sched, r) 86 | }; 87 | }; 88 | } 89 | 90 | fn ms(ms: u64) -> Duration { 91 | Duration::from_millis(ms) 92 | } 93 | 94 | #[test] 95 | fn wake_during_run() { 96 | future!(f, get_waker, POLL, DROP_F); 97 | schedule!(s, chan, SCHEDULE, DROP_S); 98 | let (runnable, _task) = async_task::spawn(f, s); 99 | 100 | runnable.run(); 101 | let waker = get_waker(); 102 | waker.wake_by_ref(); 103 | let runnable = chan.recv().unwrap(); 104 | 105 | Parallel::new() 106 | .add(|| { 107 | runnable.run(); 108 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 109 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); 110 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 111 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 112 | assert_eq!(chan.len(), 1); 113 | }) 114 | .add(|| { 115 | thread::sleep(ms(200)); 116 | 117 | waker.wake_by_ref(); 118 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 119 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 120 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 121 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 122 | assert_eq!(chan.len(), 0); 123 | 124 | thread::sleep(ms(400)); 125 | 126 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 127 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 2); 128 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 129 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 130 | assert_eq!(chan.len(), 1); 131 | }) 132 | .run(); 133 | 134 | chan.recv().unwrap(); 135 | drop(get_waker()); 136 | } 137 | 138 | #[test] 139 | fn cancel_during_run() { 140 | future!(f, get_waker, POLL, DROP_F); 141 | schedule!(s, chan, SCHEDULE, DROP_S); 142 | let (runnable, task) = async_task::spawn(f, s); 143 | 144 | runnable.run(); 145 | let waker = get_waker(); 146 | waker.wake(); 147 | let runnable = chan.recv().unwrap(); 148 | 149 | Parallel::new() 150 | .add(|| { 151 | runnable.run(); 152 | drop(get_waker()); 153 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 154 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 155 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 156 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 157 | assert_eq!(chan.len(), 0); 158 | }) 159 | .add(|| { 160 | thread::sleep(ms(200)); 161 | 162 | drop(task); 163 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 164 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 165 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 166 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 167 | assert_eq!(chan.len(), 0); 168 | 169 | thread::sleep(ms(400)); 170 | 171 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 172 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 173 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 174 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 175 | assert_eq!(chan.len(), 0); 176 | }) 177 | .run(); 178 | } 179 | 180 | #[test] 181 | fn wake_and_cancel_during_run() { 182 | future!(f, get_waker, POLL, DROP_F); 183 | schedule!(s, chan, SCHEDULE, DROP_S); 184 | let (runnable, task) = async_task::spawn(f, s); 185 | 186 | runnable.run(); 187 | let waker = get_waker(); 188 | waker.wake_by_ref(); 189 | let runnable = chan.recv().unwrap(); 190 | 191 | Parallel::new() 192 | .add(|| { 193 | runnable.run(); 194 | drop(get_waker()); 195 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 196 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 197 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 198 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 199 | assert_eq!(chan.len(), 0); 200 | }) 201 | .add(|| { 202 | thread::sleep(ms(200)); 203 | 204 | waker.wake(); 205 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 206 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 207 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 208 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 209 | assert_eq!(chan.len(), 0); 210 | 211 | drop(task); 212 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 213 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 214 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 215 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 216 | assert_eq!(chan.len(), 0); 217 | 218 | thread::sleep(ms(400)); 219 | 220 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 221 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 222 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 223 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 224 | assert_eq!(chan.len(), 0); 225 | }) 226 | .run(); 227 | } 228 | 229 | #[test] 230 | fn cancel_and_wake_during_run() { 231 | future!(f, get_waker, POLL, DROP_F); 232 | schedule!(s, chan, SCHEDULE, DROP_S); 233 | let (runnable, task) = async_task::spawn(f, s); 234 | 235 | runnable.run(); 236 | let waker = get_waker(); 237 | waker.wake_by_ref(); 238 | let runnable = chan.recv().unwrap(); 239 | 240 | Parallel::new() 241 | .add(|| { 242 | runnable.run(); 243 | drop(get_waker()); 244 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 245 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 246 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 247 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 248 | assert_eq!(chan.len(), 0); 249 | }) 250 | .add(|| { 251 | thread::sleep(ms(200)); 252 | 253 | drop(task); 254 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 255 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 256 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 257 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 258 | assert_eq!(chan.len(), 0); 259 | 260 | waker.wake(); 261 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 262 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 263 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 264 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 265 | assert_eq!(chan.len(), 0); 266 | 267 | thread::sleep(ms(400)); 268 | 269 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 270 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 271 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 272 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 273 | assert_eq!(chan.len(), 0); 274 | }) 275 | .run(); 276 | } 277 | 278 | #[test] 279 | fn drop_last_waker() { 280 | future!(f, get_waker, POLL, DROP_F); 281 | schedule!(s, chan, SCHEDULE, DROP_S); 282 | let (runnable, task) = async_task::spawn(f, s); 283 | 284 | runnable.run(); 285 | let waker = get_waker(); 286 | 287 | task.detach(); 288 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 289 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 290 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 291 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 292 | assert_eq!(chan.len(), 0); 293 | 294 | drop(waker); 295 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 296 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 297 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 298 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 299 | assert_eq!(chan.len(), 1); 300 | 301 | chan.recv().unwrap().run(); 302 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 303 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 304 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 305 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 306 | assert_eq!(chan.len(), 0); 307 | } 308 | 309 | #[test] 310 | fn cancel_last_task() { 311 | future!(f, get_waker, POLL, DROP_F); 312 | schedule!(s, chan, SCHEDULE, DROP_S); 313 | let (runnable, task) = async_task::spawn(f, s); 314 | 315 | runnable.run(); 316 | drop(get_waker()); 317 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 318 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 319 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 320 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 321 | assert_eq!(chan.len(), 0); 322 | 323 | drop(task); 324 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 325 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 326 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 327 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 328 | assert_eq!(chan.len(), 1); 329 | 330 | chan.recv().unwrap().run(); 331 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 332 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 333 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 334 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 335 | assert_eq!(chan.len(), 0); 336 | } 337 | 338 | #[test] 339 | fn drop_last_task() { 340 | future!(f, get_waker, POLL, DROP_F); 341 | schedule!(s, chan, SCHEDULE, DROP_S); 342 | let (runnable, task) = async_task::spawn(f, s); 343 | 344 | runnable.run(); 345 | drop(get_waker()); 346 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 347 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 348 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 349 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 350 | assert_eq!(chan.len(), 0); 351 | 352 | task.detach(); 353 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 354 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 355 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 356 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 357 | assert_eq!(chan.len(), 1); 358 | 359 | chan.recv().unwrap().run(); 360 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 361 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 362 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 363 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 364 | assert_eq!(chan.len(), 0); 365 | } 366 | -------------------------------------------------------------------------------- /tests/waker_ready.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::pin::Pin; 4 | use std::sync::atomic::{AtomicUsize, Ordering}; 5 | use std::task::{Context, Poll}; 6 | use std::thread; 7 | use std::time::Duration; 8 | 9 | use async_task::Runnable; 10 | use atomic_waker::AtomicWaker; 11 | 12 | // Creates a future with event counters. 13 | // 14 | // Usage: `future!(f, get_waker, POLL, DROP)` 15 | // 16 | // The future `f` always sleeps for 200 ms, and returns `Poll::Ready` the second time it is polled. 17 | // When it gets polled, `POLL` is incremented. 18 | // When it gets dropped, `DROP` is incremented. 19 | // 20 | // Every time the future is run, it stores the waker into a global variable. 21 | // This waker can be extracted using the `get_waker()` function. 22 | macro_rules! future { 23 | ($name:pat, $get_waker:pat, $poll:ident, $drop:ident) => { 24 | static $poll: AtomicUsize = AtomicUsize::new(0); 25 | static $drop: AtomicUsize = AtomicUsize::new(0); 26 | static WAKER: AtomicWaker = AtomicWaker::new(); 27 | 28 | let ($name, $get_waker) = { 29 | struct Fut(Cell, #[allow(dead_code)] Box); 30 | 31 | impl Future for Fut { 32 | type Output = Box; 33 | 34 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 35 | WAKER.register(cx.waker()); 36 | $poll.fetch_add(1, Ordering::SeqCst); 37 | thread::sleep(ms(200)); 38 | 39 | if self.0.get() { 40 | Poll::Ready(Box::new(0)) 41 | } else { 42 | self.0.set(true); 43 | Poll::Pending 44 | } 45 | } 46 | } 47 | 48 | impl Drop for Fut { 49 | fn drop(&mut self) { 50 | $drop.fetch_add(1, Ordering::SeqCst); 51 | } 52 | } 53 | 54 | (Fut(Cell::new(false), Box::new(0)), || WAKER.take().unwrap()) 55 | }; 56 | }; 57 | } 58 | 59 | // Creates a schedule function with event counters. 60 | // 61 | // Usage: `schedule!(s, chan, SCHED, DROP)` 62 | // 63 | // The schedule function `s` pushes the task into `chan`. 64 | // When it gets invoked, `SCHED` is incremented. 65 | // When it gets dropped, `DROP` is incremented. 66 | // 67 | // Receiver `chan` extracts the task when it is scheduled. 68 | macro_rules! schedule { 69 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 70 | static $drop: AtomicUsize = AtomicUsize::new(0); 71 | static $sched: AtomicUsize = AtomicUsize::new(0); 72 | 73 | let ($name, $chan) = { 74 | let (s, r) = flume::unbounded(); 75 | 76 | struct Guard(#[allow(dead_code)] Box); 77 | 78 | impl Drop for Guard { 79 | fn drop(&mut self) { 80 | $drop.fetch_add(1, Ordering::SeqCst); 81 | } 82 | } 83 | 84 | let guard = Guard(Box::new(0)); 85 | let sched = move |runnable: Runnable| { 86 | let _ = &guard; 87 | $sched.fetch_add(1, Ordering::SeqCst); 88 | s.send(runnable).unwrap(); 89 | }; 90 | 91 | (sched, r) 92 | }; 93 | }; 94 | } 95 | 96 | fn ms(ms: u64) -> Duration { 97 | Duration::from_millis(ms) 98 | } 99 | 100 | #[test] 101 | fn wake() { 102 | future!(f, get_waker, POLL, DROP_F); 103 | schedule!(s, chan, SCHEDULE, DROP_S); 104 | let (mut runnable, task) = async_task::spawn(f, s); 105 | task.detach(); 106 | 107 | assert!(chan.is_empty()); 108 | 109 | runnable.run(); 110 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 111 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 112 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 113 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 114 | assert_eq!(chan.len(), 0); 115 | 116 | get_waker().wake(); 117 | runnable = chan.recv().unwrap(); 118 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 119 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 120 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 121 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 122 | assert_eq!(chan.len(), 0); 123 | 124 | runnable.run(); 125 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 126 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 127 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 128 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 129 | assert_eq!(chan.len(), 0); 130 | 131 | get_waker().wake(); 132 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 133 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 134 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 135 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 136 | assert_eq!(chan.len(), 0); 137 | } 138 | 139 | #[test] 140 | fn wake_by_ref() { 141 | future!(f, get_waker, POLL, DROP_F); 142 | schedule!(s, chan, SCHEDULE, DROP_S); 143 | let (mut runnable, task) = async_task::spawn(f, s); 144 | task.detach(); 145 | 146 | assert!(chan.is_empty()); 147 | 148 | runnable.run(); 149 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 150 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 151 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 152 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 153 | assert_eq!(chan.len(), 0); 154 | 155 | get_waker().wake_by_ref(); 156 | runnable = chan.recv().unwrap(); 157 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 158 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 159 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 160 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 161 | assert_eq!(chan.len(), 0); 162 | 163 | runnable.run(); 164 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 165 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 166 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 167 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 168 | assert_eq!(chan.len(), 0); 169 | 170 | get_waker().wake_by_ref(); 171 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 172 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 173 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 174 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 175 | assert_eq!(chan.len(), 0); 176 | } 177 | 178 | #[allow(clippy::redundant_clone)] // This is intentional 179 | #[test] 180 | fn clone() { 181 | future!(f, get_waker, POLL, DROP_F); 182 | schedule!(s, chan, SCHEDULE, DROP_S); 183 | let (mut runnable, task) = async_task::spawn(f, s); 184 | task.detach(); 185 | 186 | runnable.run(); 187 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 188 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 189 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 190 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 191 | assert_eq!(chan.len(), 0); 192 | 193 | let w2 = get_waker().clone(); 194 | let w3 = w2.clone(); 195 | let w4 = w3.clone(); 196 | w4.wake(); 197 | 198 | runnable = chan.recv().unwrap(); 199 | runnable.run(); 200 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 201 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 202 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 203 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 204 | assert_eq!(chan.len(), 0); 205 | 206 | w3.wake(); 207 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 208 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 209 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 210 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 211 | assert_eq!(chan.len(), 0); 212 | 213 | drop(w2); 214 | drop(get_waker()); 215 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 216 | } 217 | 218 | #[test] 219 | fn wake_dropped() { 220 | future!(f, get_waker, POLL, DROP_F); 221 | schedule!(s, chan, SCHEDULE, DROP_S); 222 | let (runnable, task) = async_task::spawn(f, s); 223 | task.detach(); 224 | 225 | runnable.run(); 226 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 227 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 228 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 229 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 230 | assert_eq!(chan.len(), 0); 231 | 232 | let waker = get_waker(); 233 | 234 | waker.wake_by_ref(); 235 | drop(chan.recv().unwrap()); 236 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 237 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 238 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 239 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 240 | assert_eq!(chan.len(), 0); 241 | 242 | waker.wake(); 243 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 244 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 245 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 246 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 247 | assert_eq!(chan.len(), 0); 248 | } 249 | 250 | #[test] 251 | fn wake_completed() { 252 | future!(f, get_waker, POLL, DROP_F); 253 | schedule!(s, chan, SCHEDULE, DROP_S); 254 | let (runnable, task) = async_task::spawn(f, s); 255 | task.detach(); 256 | 257 | runnable.run(); 258 | let waker = get_waker(); 259 | assert_eq!(POLL.load(Ordering::SeqCst), 1); 260 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 0); 261 | assert_eq!(DROP_F.load(Ordering::SeqCst), 0); 262 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 263 | assert_eq!(chan.len(), 0); 264 | 265 | waker.wake(); 266 | chan.recv().unwrap().run(); 267 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 268 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 269 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 270 | assert_eq!(DROP_S.load(Ordering::SeqCst), 0); 271 | assert_eq!(chan.len(), 0); 272 | 273 | get_waker().wake(); 274 | assert_eq!(POLL.load(Ordering::SeqCst), 2); 275 | assert_eq!(SCHEDULE.load(Ordering::SeqCst), 1); 276 | assert_eq!(DROP_F.load(Ordering::SeqCst), 1); 277 | assert_eq!(DROP_S.load(Ordering::SeqCst), 1); 278 | assert_eq!(chan.len(), 0); 279 | } 280 | --------------------------------------------------------------------------------