├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── spawn.rs └── waker_fn.rs ├── examples ├── block.rs ├── panic-propagation.rs ├── panic-result.rs ├── spawn-local.rs ├── spawn-on-thread.rs ├── spawn.rs └── task-id.rs ├── src ├── header.rs ├── join_handle.rs ├── lib.rs ├── raw.rs ├── state.rs ├── task.rs ├── utils.rs └── waker_fn.rs └── tests ├── basic.rs ├── join.rs ├── panic.rs ├── ready.rs ├── waker_fn.rs ├── waker_panic.rs ├── waker_pending.rs └── waker_ready.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | os: 4 | - linux 5 | - osx 6 | - windows 7 | 8 | rust: nightly 9 | 10 | env: 11 | - RUSTFLAGS="-D warnings" 12 | - CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="valgrind --leak-check=full --error-exitcode=1" 13 | 14 | addons: 15 | apt: 16 | packages: 17 | - valgrind 18 | 19 | before_script: 20 | - rustup component add rustfmt 21 | 22 | script: 23 | - cargo fmt --all -- --check 24 | - cargo check --benches --bins --examples --tests 25 | - cargo test -- --test-threads=1 26 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Version 1.3.1 2 | 3 | - Make `spawn_local` available only on unix and windows. 4 | 5 | # Version 1.3.0 6 | 7 | - Add `waker_fn`. 8 | 9 | # Version 1.2.1 10 | 11 | - Add the `no-std` category to the package. 12 | 13 | # Version 1.2.0 14 | 15 | - The crate is now marked with `#![no_std]`. 16 | - Add `Task::waker` and `JoinHandle::waker`. 17 | - Add `Task::into_raw` and `Task::from_raw`. 18 | 19 | # Version 1.1.1 20 | 21 | - Fix a use-after-free bug where the schedule function is dropped while running. 22 | 23 | # Version 1.1.0 24 | 25 | - If a task is dropped or cancelled outside the `run` method, it gets re-scheduled. 26 | - Add `spawn_local` constructor. 27 | 28 | # Version 1.0.0 29 | 30 | - Initial release 31 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-task" 3 | version = "1.3.1" 4 | authors = ["Stjepan Glavina "] 5 | edition = "2018" 6 | license = "Apache-2.0/MIT" 7 | repository = "https://github.com/async-rs/async-task" 8 | homepage = "https://github.com/async-rs/async-task" 9 | documentation = "https://docs.rs/async-task" 10 | description = "Task abstraction for building executors" 11 | keywords = ["futures", "task", "executor", "spawn"] 12 | categories = ["asynchronous", "concurrency", "no-std"] 13 | readme = "README.md" 14 | 15 | [target.'cfg(unix)'.dependencies] 16 | libc = "0.2.66" 17 | 18 | [target.'cfg(windows)'.dependencies] 19 | winapi = { version = "0.3.8", features = ["processthreadsapi"] } 20 | 21 | [dev-dependencies] 22 | crossbeam = "0.7.3" 23 | crossbeam-utils = "0.7.0" 24 | futures = "0.3.1" 25 | lazy_static = "1.4.0" 26 | pin-utils = "0.1.0-alpha.4" 27 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

async-task

2 |
3 | 4 | A task abstraction for building executors. 5 | 6 |
7 | 8 |
9 | 10 |
11 | 12 | 13 | Crates.io version 15 | 16 | 17 | 18 | Download 20 | 21 | 22 | 23 | docs.rs docs 25 | 26 | 27 | chat 29 | 30 |
31 | 32 |
33 |

34 | 35 | API Docs 36 | 37 | | 38 | 39 | Releases 40 | 41 | | 42 | 43 | Contributing 44 | 45 |

46 |
47 | 48 | ## Installation 49 | 50 | With [cargo add][cargo-add] installed run: 51 | 52 | ```sh 53 | $ cargo add async-task 54 | ``` 55 | 56 | [cargo-add]: https://github.com/killercup/cargo-edit 57 | 58 | ## Contributing 59 | Want to join us? Check out our ["Contributing" guide][contributing] and take a 60 | look at some of these issues: 61 | 62 | - [Issues labeled "good first issue"][good-first-issue] 63 | - [Issues labeled "help wanted"][help-wanted] 64 | 65 | [contributing]: https://github.com/async-rs/async-task/blob/master.github/CONTRIBUTING.md 66 | [good-first-issue]: https://github.com/async-rs/async-task/labels/good%20first%20issue 67 | [help-wanted]: https://github.com/async-rs/async-task/labels/help%20wanted 68 | 69 | ## License 70 | 71 | 72 | Licensed under either of Apache License, Version 73 | 2.0 or MIT license at your option. 74 | 75 | 76 |
77 | 78 | 79 | Unless you explicitly state otherwise, any contribution intentionally submitted 80 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 81 | be dual licensed as above, without any additional terms or conditions. 82 | 83 | -------------------------------------------------------------------------------- /benches/spawn.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use futures::channel::oneshot; 6 | use futures::executor; 7 | use futures::future::TryFutureExt; 8 | use test::Bencher; 9 | 10 | #[bench] 11 | fn task_create(b: &mut Bencher) { 12 | b.iter(|| { 13 | async_task::spawn(async {}, drop, ()); 14 | }); 15 | } 16 | 17 | #[bench] 18 | fn task_run(b: &mut Bencher) { 19 | b.iter(|| { 20 | let (task, handle) = async_task::spawn(async {}, drop, ()); 21 | task.run(); 22 | executor::block_on(handle).unwrap(); 23 | }); 24 | } 25 | 26 | #[bench] 27 | fn oneshot_create(b: &mut Bencher) { 28 | b.iter(|| { 29 | let (tx, _rx) = oneshot::channel::<()>(); 30 | let _task = Box::new(async move { tx.send(()).map_err(|_| ()) }); 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn oneshot_run(b: &mut Bencher) { 36 | b.iter(|| { 37 | let (tx, rx) = oneshot::channel::<()>(); 38 | let task = Box::new(async move { tx.send(()).map_err(|_| ()) }); 39 | 40 | let future = task.and_then(|_| rx.map_err(|_| ())); 41 | executor::block_on(future).unwrap(); 42 | }); 43 | } 44 | -------------------------------------------------------------------------------- /benches/waker_fn.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | 5 | use std::cell::RefCell; 6 | use std::future::Future; 7 | use std::pin::Pin; 8 | use std::task::{Context, Poll, Waker}; 9 | 10 | use crossbeam::sync::Parker; 11 | use test::Bencher; 12 | 13 | /// Runs a future to completion on the current thread. 14 | fn block_on(future: F) -> F::Output { 15 | // Pin the future on the stack. 16 | pin_utils::pin_mut!(future); 17 | 18 | thread_local! { 19 | // Parker and waker associated with the current thread. 20 | static CACHE: RefCell<(Parker, Waker)> = { 21 | let parker = Parker::new(); 22 | let unparker = parker.unparker().clone(); 23 | let waker = async_task::waker_fn(move || unparker.unpark()); 24 | RefCell::new((parker, waker)) 25 | }; 26 | } 27 | 28 | CACHE.with(|cache| { 29 | // Panic if `block_on()` is called recursively. 30 | let (parker, waker) = &mut *cache.try_borrow_mut().ok().expect("recursive `block_on`"); 31 | 32 | // Create the task context. 33 | let cx = &mut Context::from_waker(&waker); 34 | 35 | // Keep polling the future until completion. 36 | loop { 37 | match future.as_mut().poll(cx) { 38 | Poll::Ready(output) => return output, 39 | Poll::Pending => parker.park(), 40 | } 41 | } 42 | }) 43 | } 44 | 45 | #[bench] 46 | fn custom_block_on_0_yields(b: &mut Bencher) { 47 | b.iter(|| block_on(Yields(0))); 48 | } 49 | 50 | #[bench] 51 | fn custom_block_on_10_yields(b: &mut Bencher) { 52 | b.iter(|| block_on(Yields(10))); 53 | } 54 | 55 | #[bench] 56 | fn custom_block_on_50_yields(b: &mut Bencher) { 57 | b.iter(|| block_on(Yields(50))); 58 | } 59 | 60 | #[bench] 61 | fn futures_block_on_0_yields(b: &mut Bencher) { 62 | b.iter(|| futures::executor::block_on(Yields(0))); 63 | } 64 | 65 | #[bench] 66 | fn futures_block_on_10_yields(b: &mut Bencher) { 67 | b.iter(|| futures::executor::block_on(Yields(10))); 68 | } 69 | 70 | #[bench] 71 | fn futures_block_on_50_yields(b: &mut Bencher) { 72 | b.iter(|| futures::executor::block_on(Yields(50))); 73 | } 74 | 75 | struct Yields(u32); 76 | 77 | impl Future for Yields { 78 | type Output = (); 79 | 80 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 81 | if self.0 == 0 { 82 | Poll::Ready(()) 83 | } else { 84 | self.0 -= 1; 85 | cx.waker().wake_by_ref(); 86 | Poll::Pending 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /examples/block.rs: -------------------------------------------------------------------------------- 1 | //! A simple implementation of `block_on`. 2 | 3 | use std::cell::RefCell; 4 | use std::future::Future; 5 | use std::task::{Context, Poll, Waker}; 6 | use std::thread; 7 | use std::time::Duration; 8 | 9 | use crossbeam::sync::Parker; 10 | use futures::channel::oneshot; 11 | 12 | /// Runs a future to completion on the current thread. 13 | fn block_on(future: F) -> F::Output { 14 | // Pin the future on the stack. 15 | pin_utils::pin_mut!(future); 16 | 17 | thread_local! { 18 | // Parker and waker associated with the current thread. 19 | static CACHE: RefCell<(Parker, Waker)> = { 20 | let parker = Parker::new(); 21 | let unparker = parker.unparker().clone(); 22 | let waker = async_task::waker_fn(move || unparker.unpark()); 23 | RefCell::new((parker, waker)) 24 | }; 25 | } 26 | 27 | CACHE.with(|cache| { 28 | // Panic if `block_on()` is called recursively. 29 | let (parker, waker) = &mut *cache.try_borrow_mut().ok().expect("recursive block_on()"); 30 | 31 | // Create the task context. 32 | let cx = &mut Context::from_waker(&waker); 33 | 34 | // Keep polling the future until completion. 35 | loop { 36 | match future.as_mut().poll(cx) { 37 | Poll::Ready(output) => return output, 38 | Poll::Pending => parker.park(), 39 | } 40 | } 41 | }) 42 | } 43 | 44 | fn main() { 45 | let (s, r) = oneshot::channel(); 46 | 47 | // Spawn a thread that will send a message through the channel. 48 | thread::spawn(move || { 49 | thread::sleep(Duration::from_secs(1)); 50 | s.send("Hello, world!").unwrap(); 51 | }); 52 | 53 | // Block until the message is received. 54 | let msg = block_on(async { 55 | println!("Awaiting..."); 56 | r.await.unwrap() 57 | }); 58 | 59 | println!("{}", msg); 60 | } 61 | -------------------------------------------------------------------------------- /examples/panic-propagation.rs: -------------------------------------------------------------------------------- 1 | //! A single-threaded executor where join handles propagate panics from tasks. 2 | 3 | use std::future::Future; 4 | use std::panic::{resume_unwind, AssertUnwindSafe}; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | 9 | use crossbeam::channel::{unbounded, Sender}; 10 | use futures::executor; 11 | use futures::future::FutureExt; 12 | use lazy_static::lazy_static; 13 | 14 | type Task = async_task::Task<()>; 15 | 16 | /// Spawns a future on the executor. 17 | fn spawn(future: F) -> JoinHandle 18 | where 19 | F: Future + Send + 'static, 20 | R: Send + 'static, 21 | { 22 | lazy_static! { 23 | // A channel that holds scheduled tasks. 24 | static ref QUEUE: Sender = { 25 | let (sender, receiver) = unbounded::(); 26 | 27 | // Start the executor thread. 28 | thread::spawn(|| { 29 | for task in receiver { 30 | // No need for `catch_unwind()` here because panics are already caught. 31 | task.run(); 32 | } 33 | }); 34 | 35 | sender 36 | }; 37 | } 38 | 39 | // Create a future that catches panics within itself. 40 | let future = AssertUnwindSafe(future).catch_unwind(); 41 | 42 | // Create a task that is scheduled by sending itself into the channel. 43 | let schedule = |t| QUEUE.send(t).unwrap(); 44 | let (task, handle) = async_task::spawn(future, schedule, ()); 45 | 46 | // Schedule the task by sending it into the channel. 47 | task.schedule(); 48 | 49 | // Wrap the handle into one that propagates panics. 50 | JoinHandle(handle) 51 | } 52 | 53 | /// A join handle that propagates panics inside the task. 54 | struct JoinHandle(async_task::JoinHandle, ()>); 55 | 56 | impl Future for JoinHandle { 57 | type Output = Option; 58 | 59 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 60 | match Pin::new(&mut self.0).poll(cx) { 61 | Poll::Pending => Poll::Pending, 62 | Poll::Ready(None) => Poll::Ready(None), 63 | Poll::Ready(Some(Ok(val))) => Poll::Ready(Some(val)), 64 | Poll::Ready(Some(Err(err))) => resume_unwind(err), 65 | } 66 | } 67 | } 68 | 69 | fn main() { 70 | // Spawn a future that panics and block on it. 71 | let handle = spawn(async { 72 | panic!("Ooops!"); 73 | }); 74 | executor::block_on(handle); 75 | } 76 | -------------------------------------------------------------------------------- /examples/panic-result.rs: -------------------------------------------------------------------------------- 1 | //! A single-threaded executor where join handles catch panics inside tasks. 2 | 3 | use std::future::Future; 4 | use std::panic::AssertUnwindSafe; 5 | use std::thread; 6 | 7 | use crossbeam::channel::{unbounded, Sender}; 8 | use futures::executor; 9 | use futures::future::FutureExt; 10 | use lazy_static::lazy_static; 11 | 12 | type Task = async_task::Task<()>; 13 | type JoinHandle = async_task::JoinHandle; 14 | 15 | /// Spawns a future on the executor. 16 | fn spawn(future: F) -> JoinHandle> 17 | where 18 | F: Future + Send + 'static, 19 | R: Send + 'static, 20 | { 21 | lazy_static! { 22 | // A channel that holds scheduled tasks. 23 | static ref QUEUE: Sender = { 24 | let (sender, receiver) = unbounded::(); 25 | 26 | // Start the executor thread. 27 | thread::spawn(|| { 28 | for task in receiver { 29 | // No need for `catch_unwind()` here because panics are already caught. 30 | task.run(); 31 | } 32 | }); 33 | 34 | sender 35 | }; 36 | } 37 | 38 | // Create a future that catches panics within itself. 39 | let future = AssertUnwindSafe(future).catch_unwind(); 40 | 41 | // Create a task that is scheduled by sending itself into the channel. 42 | let schedule = |t| QUEUE.send(t).unwrap(); 43 | let (task, handle) = async_task::spawn(future, schedule, ()); 44 | 45 | // Schedule the task by sending it into the channel. 46 | task.schedule(); 47 | 48 | handle 49 | } 50 | 51 | fn main() { 52 | // Spawn a future that completes succesfully. 53 | let handle = spawn(async { 54 | println!("Hello, world!"); 55 | }); 56 | 57 | // Block on the future and report its result. 58 | match executor::block_on(handle) { 59 | None => println!("The task was cancelled."), 60 | Some(Ok(val)) => println!("The task completed with {:?}", val), 61 | Some(Err(_)) => println!("The task has panicked"), 62 | } 63 | 64 | // Spawn a future that panics. 65 | let handle = spawn(async { 66 | panic!("Ooops!"); 67 | }); 68 | 69 | // Block on the future and report its result. 70 | match executor::block_on(handle) { 71 | None => println!("The task was cancelled."), 72 | Some(Ok(val)) => println!("The task completed with {:?}", val), 73 | Some(Err(_)) => println!("The task has panicked"), 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /examples/spawn-local.rs: -------------------------------------------------------------------------------- 1 | //! A simple single-threaded executor that can spawn non-`Send` futures. 2 | 3 | use std::cell::Cell; 4 | use std::future::Future; 5 | use std::rc::Rc; 6 | 7 | use crossbeam::channel::{unbounded, Receiver, Sender}; 8 | 9 | type Task = async_task::Task<()>; 10 | type JoinHandle = async_task::JoinHandle; 11 | 12 | thread_local! { 13 | // A channel that holds scheduled tasks. 14 | static QUEUE: (Sender, Receiver) = unbounded(); 15 | } 16 | 17 | /// Spawns a future on the executor. 18 | fn spawn(future: F) -> JoinHandle 19 | where 20 | F: Future + 'static, 21 | R: 'static, 22 | { 23 | // Create a task that is scheduled by sending itself into the channel. 24 | let schedule = |t| QUEUE.with(|(s, _)| s.send(t).unwrap()); 25 | let (task, handle) = async_task::spawn_local(future, schedule, ()); 26 | 27 | // Schedule the task by sending it into the queue. 28 | task.schedule(); 29 | 30 | handle 31 | } 32 | 33 | /// Runs a future to completion. 34 | fn run(future: F) -> R 35 | where 36 | F: Future + 'static, 37 | R: 'static, 38 | { 39 | // Spawn a task that sends its result through a channel. 40 | let (s, r) = unbounded(); 41 | spawn(async move { s.send(future.await).unwrap() }); 42 | 43 | loop { 44 | // If the original task has completed, return its result. 45 | if let Ok(val) = r.try_recv() { 46 | return val; 47 | } 48 | 49 | // Otherwise, take a task from the queue and run it. 50 | QUEUE.with(|(_, r)| r.recv().unwrap().run()); 51 | } 52 | } 53 | 54 | fn main() { 55 | let val = Rc::new(Cell::new(0)); 56 | 57 | // Run a future that increments a non-`Send` value. 58 | run({ 59 | let val = val.clone(); 60 | async move { 61 | // Spawn a future that increments the value. 62 | let handle = spawn({ 63 | let val = val.clone(); 64 | async move { 65 | val.set(dbg!(val.get()) + 1); 66 | } 67 | }); 68 | 69 | val.set(dbg!(val.get()) + 1); 70 | handle.await; 71 | } 72 | }); 73 | 74 | // The value should be 2 at the end of the program. 75 | dbg!(val.get()); 76 | } 77 | -------------------------------------------------------------------------------- /examples/spawn-on-thread.rs: -------------------------------------------------------------------------------- 1 | //! A function that runs a future to completion on a dedicated thread. 2 | 3 | use std::future::Future; 4 | use std::sync::Arc; 5 | use std::thread; 6 | 7 | use crossbeam::channel; 8 | use futures::executor; 9 | 10 | type JoinHandle = async_task::JoinHandle; 11 | 12 | /// Spawns a future on a new dedicated thread. 13 | /// 14 | /// The returned handle can be used to await the output of the future. 15 | fn spawn_on_thread(future: F) -> JoinHandle 16 | where 17 | F: Future + Send + 'static, 18 | R: Send + 'static, 19 | { 20 | // Create a channel that holds the task when it is scheduled for running. 21 | let (sender, receiver) = channel::unbounded(); 22 | let sender = Arc::new(sender); 23 | let s = Arc::downgrade(&sender); 24 | 25 | // Wrap the future into one that disconnects the channel on completion. 26 | let future = async move { 27 | // When the inner future completes, the sender gets dropped and disconnects the channel. 28 | let _sender = sender; 29 | future.await 30 | }; 31 | 32 | // Create a task that is scheduled by sending itself into the channel. 33 | let schedule = move |t| s.upgrade().unwrap().send(t).unwrap(); 34 | let (task, handle) = async_task::spawn(future, schedule, ()); 35 | 36 | // Schedule the task by sending it into the channel. 37 | task.schedule(); 38 | 39 | // Spawn a thread running the task to completion. 40 | thread::spawn(move || { 41 | // Keep taking the task from the channel and running it until completion. 42 | for task in receiver { 43 | task.run(); 44 | } 45 | }); 46 | 47 | handle 48 | } 49 | 50 | fn main() { 51 | // Spawn a future on a dedicated thread. 52 | executor::block_on(spawn_on_thread(async { 53 | println!("Hello, world!"); 54 | })); 55 | } 56 | -------------------------------------------------------------------------------- /examples/spawn.rs: -------------------------------------------------------------------------------- 1 | //! A simple single-threaded executor. 2 | 3 | use std::future::Future; 4 | use std::panic::catch_unwind; 5 | use std::thread; 6 | 7 | use crossbeam::channel::{unbounded, Sender}; 8 | use futures::executor; 9 | use lazy_static::lazy_static; 10 | 11 | type Task = async_task::Task<()>; 12 | type JoinHandle = async_task::JoinHandle; 13 | 14 | /// Spawns a future on the executor. 15 | fn spawn(future: F) -> JoinHandle 16 | where 17 | F: Future + Send + 'static, 18 | R: Send + 'static, 19 | { 20 | lazy_static! { 21 | // A channel that holds scheduled tasks. 22 | static ref QUEUE: Sender = { 23 | let (sender, receiver) = unbounded::(); 24 | 25 | // Start the executor thread. 26 | thread::spawn(|| { 27 | for task in receiver { 28 | // Ignore panics for simplicity. 29 | let _ignore_panic = catch_unwind(|| task.run()); 30 | } 31 | }); 32 | 33 | sender 34 | }; 35 | } 36 | 37 | // Create a task that is scheduled by sending itself into the channel. 38 | let schedule = |t| QUEUE.send(t).unwrap(); 39 | let (task, handle) = async_task::spawn(future, schedule, ()); 40 | 41 | // Schedule the task by sending it into the channel. 42 | task.schedule(); 43 | 44 | handle 45 | } 46 | 47 | fn main() { 48 | // Spawn a future and await its result. 49 | let handle = spawn(async { 50 | println!("Hello, world!"); 51 | }); 52 | executor::block_on(handle); 53 | } 54 | -------------------------------------------------------------------------------- /examples/task-id.rs: -------------------------------------------------------------------------------- 1 | //! An executor that assigns an ID to every spawned task. 2 | 3 | use std::cell::Cell; 4 | use std::future::Future; 5 | use std::panic::catch_unwind; 6 | use std::thread; 7 | 8 | use crossbeam::atomic::AtomicCell; 9 | use crossbeam::channel::{unbounded, Sender}; 10 | use futures::executor; 11 | use lazy_static::lazy_static; 12 | 13 | #[derive(Clone, Copy, Debug)] 14 | struct TaskId(usize); 15 | 16 | type Task = async_task::Task; 17 | type JoinHandle = async_task::JoinHandle; 18 | 19 | thread_local! { 20 | /// The ID of the current task. 21 | static TASK_ID: Cell> = Cell::new(None); 22 | } 23 | 24 | /// Returns the ID of the currently executing task. 25 | /// 26 | /// Returns `None` if called outside the runtime. 27 | fn task_id() -> Option { 28 | TASK_ID.with(|id| id.get()) 29 | } 30 | 31 | /// Spawns a future on the executor. 32 | fn spawn(future: F) -> JoinHandle 33 | where 34 | F: Future + Send + 'static, 35 | R: Send + 'static, 36 | { 37 | lazy_static! { 38 | // A channel that holds scheduled tasks. 39 | static ref QUEUE: Sender = { 40 | let (sender, receiver) = unbounded::(); 41 | 42 | // Start the executor thread. 43 | thread::spawn(|| { 44 | TASK_ID.with(|id| { 45 | for task in receiver { 46 | // Store the task ID into the thread-local before running. 47 | id.set(Some(*task.tag())); 48 | 49 | // Ignore panics for simplicity. 50 | let _ignore_panic = catch_unwind(|| task.run()); 51 | } 52 | }) 53 | }); 54 | 55 | sender 56 | }; 57 | 58 | // A counter that assigns IDs to spawned tasks. 59 | static ref COUNTER: AtomicCell = AtomicCell::new(0); 60 | } 61 | 62 | // Reserve an ID for the new task. 63 | let id = TaskId(COUNTER.fetch_add(1)); 64 | 65 | // Create a task that is scheduled by sending itself into the channel. 66 | let schedule = |task| QUEUE.send(task).unwrap(); 67 | let (task, handle) = async_task::spawn(future, schedule, id); 68 | 69 | // Schedule the task by sending it into the channel. 70 | task.schedule(); 71 | 72 | handle 73 | } 74 | 75 | fn main() { 76 | let mut handles = vec![]; 77 | 78 | // Spawn a bunch of tasks. 79 | for _ in 0..10 { 80 | handles.push(spawn(async move { 81 | println!("Hello from task with {:?}", task_id()); 82 | })); 83 | } 84 | 85 | // Wait for the tasks to finish. 86 | for handle in handles { 87 | executor::block_on(handle); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/header.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::Layout; 2 | use core::cell::UnsafeCell; 3 | use core::fmt; 4 | use core::sync::atomic::{AtomicUsize, Ordering}; 5 | use core::task::Waker; 6 | 7 | use crate::raw::TaskVTable; 8 | use crate::state::*; 9 | use crate::utils::{abort_on_panic, extend}; 10 | 11 | /// The header of a task. 12 | /// 13 | /// This header is stored right at the beginning of every heap-allocated task. 14 | pub(crate) struct Header { 15 | /// Current state of the task. 16 | /// 17 | /// Contains flags representing the current state and the reference count. 18 | pub(crate) state: AtomicUsize, 19 | 20 | /// The task that is blocked on the `JoinHandle`. 21 | /// 22 | /// This waker needs to be woken up once the task completes or is closed. 23 | pub(crate) awaiter: UnsafeCell>, 24 | 25 | /// The virtual table. 26 | /// 27 | /// In addition to the actual waker virtual table, it also contains pointers to several other 28 | /// methods necessary for bookkeeping the heap-allocated task. 29 | pub(crate) vtable: &'static TaskVTable, 30 | } 31 | 32 | impl Header { 33 | /// Cancels the task. 34 | /// 35 | /// This method will mark the task as closed and notify the awaiter, but it won't reschedule 36 | /// the task if it's not completed. 37 | pub(crate) fn cancel(&self) { 38 | let mut state = self.state.load(Ordering::Acquire); 39 | 40 | loop { 41 | // If the task has been completed or closed, it can't be cancelled. 42 | if state & (COMPLETED | CLOSED) != 0 { 43 | break; 44 | } 45 | 46 | // Mark the task as closed. 47 | match self.state.compare_exchange_weak( 48 | state, 49 | state | CLOSED, 50 | Ordering::AcqRel, 51 | Ordering::Acquire, 52 | ) { 53 | Ok(_) => { 54 | // Notify the awaiter that the task has been closed. 55 | if state & AWAITER != 0 { 56 | self.notify(None); 57 | } 58 | 59 | break; 60 | } 61 | Err(s) => state = s, 62 | } 63 | } 64 | } 65 | 66 | /// Notifies the awaiter blocked on this task. 67 | /// 68 | /// If the awaiter is the same as the current waker, it will not be notified. 69 | #[inline] 70 | pub(crate) fn notify(&self, current: Option<&Waker>) { 71 | // Mark the awaiter as being notified. 72 | let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel); 73 | 74 | // If the awaiter was not being notified nor registered... 75 | if state & (NOTIFYING | REGISTERING) == 0 { 76 | // Take the waker out. 77 | let waker = unsafe { (*self.awaiter.get()).take() }; 78 | 79 | // Mark the state as not being notified anymore nor containing an awaiter. 80 | self.state 81 | .fetch_and(!NOTIFYING & !AWAITER, Ordering::Release); 82 | 83 | if let Some(w) = waker { 84 | // We need a safeguard against panics because waking can panic. 85 | abort_on_panic(|| match current { 86 | None => w.wake(), 87 | Some(c) if !w.will_wake(c) => w.wake(), 88 | Some(_) => {} 89 | }); 90 | } 91 | } 92 | } 93 | 94 | /// Registers a new awaiter blocked on this task. 95 | /// 96 | /// This method is called when `JoinHandle` is polled and the task has not completed. 97 | #[inline] 98 | pub(crate) fn register(&self, waker: &Waker) { 99 | // Load the state and synchronize with it. 100 | let mut state = self.state.fetch_or(0, Ordering::Acquire); 101 | 102 | loop { 103 | // There can't be two concurrent registrations because `JoinHandle` can only be polled 104 | // by a unique pinned reference. 105 | debug_assert!(state & REGISTERING == 0); 106 | 107 | // If we're in the notifying state at this moment, just wake and return without 108 | // registering. 109 | if state & NOTIFYING != 0 { 110 | waker.wake_by_ref(); 111 | return; 112 | } 113 | 114 | // Mark the state to let other threads know we're registering a new awaiter. 115 | match self.state.compare_exchange_weak( 116 | state, 117 | state | REGISTERING, 118 | Ordering::AcqRel, 119 | Ordering::Acquire, 120 | ) { 121 | Ok(_) => { 122 | state |= REGISTERING; 123 | break; 124 | } 125 | Err(s) => state = s, 126 | } 127 | } 128 | 129 | // Put the waker into the awaiter field. 130 | unsafe { 131 | abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone())); 132 | } 133 | 134 | // This variable will contain the newly registered waker if a notification comes in before 135 | // we complete registration. 136 | let mut waker = None; 137 | 138 | loop { 139 | // If there was a notification, take the waker out of the awaiter field. 140 | if state & NOTIFYING != 0 { 141 | if let Some(w) = unsafe { (*self.awaiter.get()).take() } { 142 | waker = Some(w); 143 | } 144 | } 145 | 146 | // The new state is not being notified nor registered, but there might or might not be 147 | // an awaiter depending on whether there was a concurrent notification. 148 | let new = if waker.is_none() { 149 | (state & !NOTIFYING & !REGISTERING) | AWAITER 150 | } else { 151 | state & !NOTIFYING & !REGISTERING & !AWAITER 152 | }; 153 | 154 | match self 155 | .state 156 | .compare_exchange_weak(state, new, Ordering::AcqRel, Ordering::Acquire) 157 | { 158 | Ok(_) => break, 159 | Err(s) => state = s, 160 | } 161 | } 162 | 163 | // If there was a notification during registration, wake the awaiter now. 164 | if let Some(w) = waker { 165 | abort_on_panic(|| w.wake()); 166 | } 167 | } 168 | 169 | /// Returns the offset at which the tag of type `T` is stored. 170 | #[inline] 171 | pub(crate) fn offset_tag() -> usize { 172 | let layout_header = Layout::new::
(); 173 | let layout_t = Layout::new::(); 174 | let (_, offset_t) = extend(layout_header, layout_t); 175 | offset_t 176 | } 177 | } 178 | 179 | impl fmt::Debug for Header { 180 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 181 | let state = self.state.load(Ordering::SeqCst); 182 | 183 | f.debug_struct("Header") 184 | .field("scheduled", &(state & SCHEDULED != 0)) 185 | .field("running", &(state & RUNNING != 0)) 186 | .field("completed", &(state & COMPLETED != 0)) 187 | .field("closed", &(state & CLOSED != 0)) 188 | .field("awaiter", &(state & AWAITER != 0)) 189 | .field("handle", &(state & HANDLE != 0)) 190 | .field("ref_count", &(state / REFERENCE)) 191 | .finish() 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /src/join_handle.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::future::Future; 3 | use core::marker::{PhantomData, Unpin}; 4 | use core::pin::Pin; 5 | use core::ptr::NonNull; 6 | use core::sync::atomic::Ordering; 7 | use core::task::{Context, Poll, Waker}; 8 | 9 | use crate::header::Header; 10 | use crate::state::*; 11 | use crate::utils::abort_on_panic; 12 | 13 | /// A handle that awaits the result of a task. 14 | /// 15 | /// This type is a future that resolves to an `Option` where: 16 | /// 17 | /// * `None` indicates the task has panicked or was cancelled. 18 | /// * `Some(result)` indicates the task has completed with `result` of type `R`. 19 | pub struct JoinHandle { 20 | /// A raw task pointer. 21 | pub(crate) raw_task: NonNull<()>, 22 | 23 | /// A marker capturing generic types `R` and `T`. 24 | pub(crate) _marker: PhantomData<(R, T)>, 25 | } 26 | 27 | unsafe impl Send for JoinHandle {} 28 | unsafe impl Sync for JoinHandle {} 29 | 30 | impl Unpin for JoinHandle {} 31 | 32 | impl JoinHandle { 33 | /// Cancels the task. 34 | /// 35 | /// If the task has already completed, calling this method will have no effect. 36 | /// 37 | /// When a task is cancelled, its future will not be polled again. 38 | pub fn cancel(&self) { 39 | let ptr = self.raw_task.as_ptr(); 40 | let header = ptr as *const Header; 41 | 42 | unsafe { 43 | let mut state = (*header).state.load(Ordering::Acquire); 44 | 45 | loop { 46 | // If the task has been completed or closed, it can't be cancelled. 47 | if state & (COMPLETED | CLOSED) != 0 { 48 | break; 49 | } 50 | 51 | // If the task is not scheduled nor running, we'll need to schedule it. 52 | let new = if state & (SCHEDULED | RUNNING) == 0 { 53 | (state | SCHEDULED | CLOSED) + REFERENCE 54 | } else { 55 | state | CLOSED 56 | }; 57 | 58 | // Mark the task as closed. 59 | match (*header).state.compare_exchange_weak( 60 | state, 61 | new, 62 | Ordering::AcqRel, 63 | Ordering::Acquire, 64 | ) { 65 | Ok(_) => { 66 | // If the task is not scheduled nor running, schedule it one more time so 67 | // that its future gets dropped by the executor. 68 | if state & (SCHEDULED | RUNNING) == 0 { 69 | ((*header).vtable.schedule)(ptr); 70 | } 71 | 72 | // Notify the awaiter that the task has been closed. 73 | if state & AWAITER != 0 { 74 | (*header).notify(None); 75 | } 76 | 77 | break; 78 | } 79 | Err(s) => state = s, 80 | } 81 | } 82 | } 83 | } 84 | 85 | /// Returns a reference to the tag stored inside the task. 86 | pub fn tag(&self) -> &T { 87 | let offset = Header::offset_tag::(); 88 | let ptr = self.raw_task.as_ptr(); 89 | 90 | unsafe { 91 | let raw = (ptr as *mut u8).add(offset) as *const T; 92 | &*raw 93 | } 94 | } 95 | 96 | /// Returns a waker associated with the task. 97 | pub fn waker(&self) -> Waker { 98 | let ptr = self.raw_task.as_ptr(); 99 | let header = ptr as *const Header; 100 | 101 | unsafe { 102 | let raw_waker = ((*header).vtable.clone_waker)(ptr); 103 | Waker::from_raw(raw_waker) 104 | } 105 | } 106 | } 107 | 108 | impl Drop for JoinHandle { 109 | fn drop(&mut self) { 110 | let ptr = self.raw_task.as_ptr(); 111 | let header = ptr as *const Header; 112 | 113 | // A place where the output will be stored in case it needs to be dropped. 114 | let mut output = None; 115 | 116 | unsafe { 117 | // Optimistically assume the `JoinHandle` is being dropped just after creating the 118 | // task. This is a common case so if the handle is not used, the overhead of it is only 119 | // one compare-exchange operation. 120 | if let Err(mut state) = (*header).state.compare_exchange_weak( 121 | SCHEDULED | HANDLE | REFERENCE, 122 | SCHEDULED | REFERENCE, 123 | Ordering::AcqRel, 124 | Ordering::Acquire, 125 | ) { 126 | loop { 127 | // If the task has been completed but not yet closed, that means its output 128 | // must be dropped. 129 | if state & COMPLETED != 0 && state & CLOSED == 0 { 130 | // Mark the task as closed in order to grab its output. 131 | match (*header).state.compare_exchange_weak( 132 | state, 133 | state | CLOSED, 134 | Ordering::AcqRel, 135 | Ordering::Acquire, 136 | ) { 137 | Ok(_) => { 138 | // Read the output. 139 | output = 140 | Some((((*header).vtable.get_output)(ptr) as *mut R).read()); 141 | 142 | // Update the state variable because we're continuing the loop. 143 | state |= CLOSED; 144 | } 145 | Err(s) => state = s, 146 | } 147 | } else { 148 | // If this is the last reference to the task and it's not closed, then 149 | // close it and schedule one more time so that its future gets dropped by 150 | // the executor. 151 | let new = if state & (!(REFERENCE - 1) | CLOSED) == 0 { 152 | SCHEDULED | CLOSED | REFERENCE 153 | } else { 154 | state & !HANDLE 155 | }; 156 | 157 | // Unset the handle flag. 158 | match (*header).state.compare_exchange_weak( 159 | state, 160 | new, 161 | Ordering::AcqRel, 162 | Ordering::Acquire, 163 | ) { 164 | Ok(_) => { 165 | // If this is the last reference to the task, we need to either 166 | // schedule dropping its future or destroy it. 167 | if state & !(REFERENCE - 1) == 0 { 168 | if state & CLOSED == 0 { 169 | ((*header).vtable.schedule)(ptr); 170 | } else { 171 | ((*header).vtable.destroy)(ptr); 172 | } 173 | } 174 | 175 | break; 176 | } 177 | Err(s) => state = s, 178 | } 179 | } 180 | } 181 | } 182 | } 183 | 184 | // Drop the output if it was taken out of the task. 185 | drop(output); 186 | } 187 | } 188 | 189 | impl Future for JoinHandle { 190 | type Output = Option; 191 | 192 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 193 | let ptr = self.raw_task.as_ptr(); 194 | let header = ptr as *const Header; 195 | 196 | unsafe { 197 | let mut state = (*header).state.load(Ordering::Acquire); 198 | 199 | loop { 200 | // If the task has been closed, notify the awaiter and return `None`. 201 | if state & CLOSED != 0 { 202 | // Even though the awaiter is most likely the current task, it could also be 203 | // another task. 204 | (*header).notify(Some(cx.waker())); 205 | return Poll::Ready(None); 206 | } 207 | 208 | // If the task is not completed, register the current task. 209 | if state & COMPLETED == 0 { 210 | // Replace the waker with one associated with the current task. We need a 211 | // safeguard against panics because dropping the previous waker can panic. 212 | abort_on_panic(|| { 213 | (*header).register(cx.waker()); 214 | }); 215 | 216 | // Reload the state after registering. It is possible that the task became 217 | // completed or closed just before registration so we need to check for that. 218 | state = (*header).state.load(Ordering::Acquire); 219 | 220 | // If the task has been closed, return `None`. We do not need to notify the 221 | // awaiter here, since we have replaced the waker above, and the executor can 222 | // only set it back to `None`. 223 | if state & CLOSED != 0 { 224 | return Poll::Ready(None); 225 | } 226 | 227 | // If the task is still not completed, we're blocked on it. 228 | if state & COMPLETED == 0 { 229 | return Poll::Pending; 230 | } 231 | } 232 | 233 | // Since the task is now completed, mark it as closed in order to grab its output. 234 | match (*header).state.compare_exchange( 235 | state, 236 | state | CLOSED, 237 | Ordering::AcqRel, 238 | Ordering::Acquire, 239 | ) { 240 | Ok(_) => { 241 | // Notify the awaiter. Even though the awaiter is most likely the current 242 | // task, it could also be another task. 243 | if state & AWAITER != 0 { 244 | (*header).notify(Some(cx.waker())); 245 | } 246 | 247 | // Take the output from the task. 248 | let output = ((*header).vtable.get_output)(ptr) as *mut R; 249 | return Poll::Ready(Some(output.read())); 250 | } 251 | Err(s) => state = s, 252 | } 253 | } 254 | } 255 | } 256 | } 257 | 258 | impl fmt::Debug for JoinHandle { 259 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 260 | let ptr = self.raw_task.as_ptr(); 261 | let header = ptr as *const Header; 262 | 263 | f.debug_struct("JoinHandle") 264 | .field("header", unsafe { &(*header) }) 265 | .finish() 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Task abstraction for building executors. 2 | //! 3 | //! # Spawning 4 | //! 5 | //! To spawn a future onto an executor, we first need to allocate it on the heap and keep some 6 | //! state alongside it. The state indicates whether the future is ready for polling, waiting to be 7 | //! woken up, or completed. Such a future is called a *task*. 8 | //! 9 | //! All executors have some kind of queue that holds runnable tasks: 10 | //! 11 | //! ``` 12 | //! let (sender, receiver) = crossbeam::channel::unbounded(); 13 | //! # 14 | //! # // A future that will get spawned. 15 | //! # let future = async { 1 + 2 }; 16 | //! # 17 | //! # // A function that schedules the task when it gets woken up. 18 | //! # let schedule = move |task| sender.send(task).unwrap(); 19 | //! # 20 | //! # // Construct a task. 21 | //! # let (task, handle) = async_task::spawn(future, schedule, ()); 22 | //! ``` 23 | //! 24 | //! A task is constructed using either [`spawn`] or [`spawn_local`]: 25 | //! 26 | //! ``` 27 | //! # let (sender, receiver) = crossbeam::channel::unbounded(); 28 | //! # 29 | //! // A future that will be spawned. 30 | //! let future = async { 1 + 2 }; 31 | //! 32 | //! // A function that schedules the task when it gets woken up. 33 | //! let schedule = move |task| sender.send(task).unwrap(); 34 | //! 35 | //! // Construct a task. 36 | //! let (task, handle) = async_task::spawn(future, schedule, ()); 37 | //! 38 | //! // Push the task into the queue by invoking its schedule function. 39 | //! task.schedule(); 40 | //! ``` 41 | //! 42 | //! The last argument to the [`spawn`] function is a *tag*, an arbitrary piece of data associated 43 | //! with the task. In most executors, this is typically a task identifier or task-local storage. 44 | //! 45 | //! The function returns a runnable [`Task`] and a [`JoinHandle`] that can await the result. 46 | //! 47 | //! # Execution 48 | //! 49 | //! Task executors have some kind of main loop that drives tasks to completion. That means taking 50 | //! runnable tasks out of the queue and running each one in order: 51 | //! 52 | //! ```no_run 53 | //! # let (sender, receiver) = crossbeam::channel::unbounded(); 54 | //! # 55 | //! # // A future that will get spawned. 56 | //! # let future = async { 1 + 2 }; 57 | //! # 58 | //! # // A function that schedules the task when it gets woken up. 59 | //! # let schedule = move |task| sender.send(task).unwrap(); 60 | //! # 61 | //! # // Construct a task. 62 | //! # let (task, handle) = async_task::spawn(future, schedule, ()); 63 | //! # 64 | //! # // Push the task into the queue by invoking its schedule function. 65 | //! # task.schedule(); 66 | //! # 67 | //! for task in receiver { 68 | //! task.run(); 69 | //! } 70 | //! ``` 71 | //! 72 | //! When a task is run, its future gets polled. If polling does not complete the task, that means 73 | //! it's waiting for another future and needs to go to sleep. When woken up, its schedule function 74 | //! will be invoked, pushing it back into the queue so that it can be run again. 75 | //! 76 | //! # Cancellation 77 | //! 78 | //! Both [`Task`] and [`JoinHandle`] have methods that cancel the task. When cancelled, the task's 79 | //! future will not be polled again and will get dropped instead. 80 | //! 81 | //! If cancelled by the [`Task`] instance, the task is destroyed immediately. If cancelled by the 82 | //! [`JoinHandle`] instance, it will be scheduled one more time and the next attempt to run it will 83 | //! simply destroy it. 84 | //! 85 | //! # Performance 86 | //! 87 | //! Task construction incurs a single allocation that holds its state, the schedule function, and 88 | //! the future or the result of the future if completed. 89 | //! 90 | //! The layout of a task is equivalent to 4 `usize`s followed by the schedule function, and then by 91 | //! a union of the future and its output. 92 | //! 93 | //! # Waking 94 | //! 95 | //! The handy [`waker_fn`] constructor converts any function into a [`Waker`]. Every time it is 96 | //! woken, the function gets called: 97 | //! 98 | //! ``` 99 | //! let waker = async_task::waker_fn(|| println!("Wake!")); 100 | //! 101 | //! // Prints "Wake!" twice. 102 | //! waker.wake_by_ref(); 103 | //! waker.wake_by_ref(); 104 | //! ``` 105 | //! 106 | //! This is useful for implementing single-future executors like [`block_on`]. 107 | //! 108 | //! [`spawn`]: fn.spawn.html 109 | //! [`spawn_local`]: fn.spawn_local.html 110 | //! [`waker_fn`]: fn.waker_fn.html 111 | //! [`Task`]: struct.Task.html 112 | //! [`JoinHandle`]: struct.JoinHandle.html 113 | //! [`Waker`]: https://doc.rust-lang.org/std/task/struct.Waker.html 114 | //! [`block_on`]: https://github.com/async-rs/async-task/blob/master/examples/block.rs 115 | 116 | #![no_std] 117 | #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] 118 | #![doc(test(attr(deny(rust_2018_idioms, warnings))))] 119 | #![doc(test(attr(allow(unused_extern_crates, unused_variables))))] 120 | 121 | extern crate alloc; 122 | 123 | mod header; 124 | mod join_handle; 125 | mod raw; 126 | mod state; 127 | mod task; 128 | mod utils; 129 | mod waker_fn; 130 | 131 | pub use crate::join_handle::JoinHandle; 132 | pub use crate::task::{spawn, Task}; 133 | pub use crate::waker_fn::waker_fn; 134 | 135 | #[cfg(any(unix, windows))] 136 | pub use crate::task::spawn_local; 137 | -------------------------------------------------------------------------------- /src/raw.rs: -------------------------------------------------------------------------------- 1 | use alloc::alloc::Layout; 2 | use core::cell::UnsafeCell; 3 | use core::future::Future; 4 | use core::marker::PhantomData; 5 | use core::mem::{self, ManuallyDrop}; 6 | use core::pin::Pin; 7 | use core::ptr::NonNull; 8 | use core::sync::atomic::{AtomicUsize, Ordering}; 9 | use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; 10 | 11 | use crate::header::Header; 12 | use crate::state::*; 13 | use crate::utils::{abort, abort_on_panic, extend}; 14 | use crate::Task; 15 | 16 | /// The vtable for a task. 17 | pub(crate) struct TaskVTable { 18 | /// Schedules the task. 19 | pub(crate) schedule: unsafe fn(*const ()), 20 | 21 | /// Drops the future inside the task. 22 | pub(crate) drop_future: unsafe fn(*const ()), 23 | 24 | /// Returns a pointer to the output stored after completion. 25 | pub(crate) get_output: unsafe fn(*const ()) -> *const (), 26 | 27 | /// Drops the task. 28 | pub(crate) drop_task: unsafe fn(ptr: *const ()), 29 | 30 | /// Destroys the task. 31 | pub(crate) destroy: unsafe fn(*const ()), 32 | 33 | /// Runs the task. 34 | pub(crate) run: unsafe fn(*const ()), 35 | 36 | /// Creates a new waker associated with the task. 37 | pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker, 38 | } 39 | 40 | /// Memory layout of a task. 41 | /// 42 | /// This struct contains the following information: 43 | /// 44 | /// 1. How to allocate and deallocate the task. 45 | /// 2. How to access the fields inside the task. 46 | #[derive(Clone, Copy)] 47 | pub(crate) struct TaskLayout { 48 | /// Memory layout of the whole task. 49 | pub(crate) layout: Layout, 50 | 51 | /// Offset into the task at which the tag is stored. 52 | pub(crate) offset_t: usize, 53 | 54 | /// Offset into the task at which the schedule function is stored. 55 | pub(crate) offset_s: usize, 56 | 57 | /// Offset into the task at which the future is stored. 58 | pub(crate) offset_f: usize, 59 | 60 | /// Offset into the task at which the output is stored. 61 | pub(crate) offset_r: usize, 62 | } 63 | 64 | /// Raw pointers to the fields inside a task. 65 | pub(crate) struct RawTask { 66 | /// The task header. 67 | pub(crate) header: *const Header, 68 | 69 | /// The schedule function. 70 | pub(crate) schedule: *const S, 71 | 72 | /// The tag inside the task. 73 | pub(crate) tag: *mut T, 74 | 75 | /// The future. 76 | pub(crate) future: *mut F, 77 | 78 | /// The output of the future. 79 | pub(crate) output: *mut R, 80 | } 81 | 82 | impl Copy for RawTask {} 83 | 84 | impl Clone for RawTask { 85 | fn clone(&self) -> Self { 86 | Self { 87 | header: self.header, 88 | schedule: self.schedule, 89 | tag: self.tag, 90 | future: self.future, 91 | output: self.output, 92 | } 93 | } 94 | } 95 | 96 | impl RawTask 97 | where 98 | F: Future + 'static, 99 | S: Fn(Task) + Send + Sync + 'static, 100 | { 101 | const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( 102 | Self::clone_waker, 103 | Self::wake, 104 | Self::wake_by_ref, 105 | Self::drop_waker, 106 | ); 107 | 108 | /// Allocates a task with the given `future` and `schedule` function. 109 | /// 110 | /// It is assumed that initially only the `Task` reference and the `JoinHandle` exist. 111 | pub(crate) fn allocate(future: F, schedule: S, tag: T) -> NonNull<()> { 112 | // Compute the layout of the task for allocation. Abort if the computation fails. 113 | let task_layout = abort_on_panic(|| Self::task_layout()); 114 | 115 | unsafe { 116 | // Allocate enough space for the entire task. 117 | let raw_task = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) { 118 | None => abort(), 119 | Some(p) => p, 120 | }; 121 | 122 | let raw = Self::from_ptr(raw_task.as_ptr()); 123 | 124 | // Write the header as the first field of the task. 125 | (raw.header as *mut Header).write(Header { 126 | state: AtomicUsize::new(SCHEDULED | HANDLE | REFERENCE), 127 | awaiter: UnsafeCell::new(None), 128 | vtable: &TaskVTable { 129 | schedule: Self::schedule, 130 | drop_future: Self::drop_future, 131 | get_output: Self::get_output, 132 | drop_task: Self::drop_task, 133 | destroy: Self::destroy, 134 | run: Self::run, 135 | clone_waker: Self::clone_waker, 136 | }, 137 | }); 138 | 139 | // Write the tag as the second field of the task. 140 | (raw.tag as *mut T).write(tag); 141 | 142 | // Write the schedule function as the third field of the task. 143 | (raw.schedule as *mut S).write(schedule); 144 | 145 | // Write the future as the fourth field of the task. 146 | raw.future.write(future); 147 | 148 | raw_task 149 | } 150 | } 151 | 152 | /// Creates a `RawTask` from a raw task pointer. 153 | #[inline] 154 | pub(crate) fn from_ptr(ptr: *const ()) -> Self { 155 | let task_layout = Self::task_layout(); 156 | let p = ptr as *const u8; 157 | 158 | unsafe { 159 | Self { 160 | header: p as *const Header, 161 | tag: p.add(task_layout.offset_t) as *mut T, 162 | schedule: p.add(task_layout.offset_s) as *const S, 163 | future: p.add(task_layout.offset_f) as *mut F, 164 | output: p.add(task_layout.offset_r) as *mut R, 165 | } 166 | } 167 | } 168 | 169 | /// Returns the memory layout for a task. 170 | #[inline] 171 | fn task_layout() -> TaskLayout { 172 | // Compute the layouts for `Header`, `T`, `S`, `F`, and `R`. 173 | let layout_header = Layout::new::
(); 174 | let layout_t = Layout::new::(); 175 | let layout_s = Layout::new::(); 176 | let layout_f = Layout::new::(); 177 | let layout_r = Layout::new::(); 178 | 179 | // Compute the layout for `union { F, R }`. 180 | let size_union = layout_f.size().max(layout_r.size()); 181 | let align_union = layout_f.align().max(layout_r.align()); 182 | let layout_union = unsafe { Layout::from_size_align_unchecked(size_union, align_union) }; 183 | 184 | // Compute the layout for `Header` followed by `T`, then `S`, and finally `union { F, R }`. 185 | let layout = layout_header; 186 | let (layout, offset_t) = extend(layout, layout_t); 187 | let (layout, offset_s) = extend(layout, layout_s); 188 | let (layout, offset_union) = extend(layout, layout_union); 189 | let offset_f = offset_union; 190 | let offset_r = offset_union; 191 | 192 | TaskLayout { 193 | layout, 194 | offset_t, 195 | offset_s, 196 | offset_f, 197 | offset_r, 198 | } 199 | } 200 | 201 | /// Wakes a waker. 202 | unsafe fn wake(ptr: *const ()) { 203 | // This is just an optimization. If the schedule function has captured variables, then 204 | // we'll do less reference counting if we wake the waker by reference and then drop it. 205 | if mem::size_of::() > 0 { 206 | Self::wake_by_ref(ptr); 207 | Self::drop_waker(ptr); 208 | return; 209 | } 210 | 211 | let raw = Self::from_ptr(ptr); 212 | 213 | let mut state = (*raw.header).state.load(Ordering::Acquire); 214 | 215 | loop { 216 | // If the task is completed or closed, it can't be woken up. 217 | if state & (COMPLETED | CLOSED) != 0 { 218 | // Drop the waker. 219 | Self::drop_waker(ptr); 220 | break; 221 | } 222 | 223 | // If the task is already scheduled, we just need to synchronize with the thread that 224 | // will run the task by "publishing" our current view of the memory. 225 | if state & SCHEDULED != 0 { 226 | // Update the state without actually modifying it. 227 | match (*raw.header).state.compare_exchange_weak( 228 | state, 229 | state, 230 | Ordering::AcqRel, 231 | Ordering::Acquire, 232 | ) { 233 | Ok(_) => { 234 | // Drop the waker. 235 | Self::drop_waker(ptr); 236 | break; 237 | } 238 | Err(s) => state = s, 239 | } 240 | } else { 241 | // Mark the task as scheduled. 242 | match (*raw.header).state.compare_exchange_weak( 243 | state, 244 | state | SCHEDULED, 245 | Ordering::AcqRel, 246 | Ordering::Acquire, 247 | ) { 248 | Ok(_) => { 249 | // If the task is not yet scheduled and isn't currently running, now is the 250 | // time to schedule it. 251 | if state & RUNNING == 0 { 252 | // Schedule the task. 253 | Self::schedule(ptr); 254 | } else { 255 | // Drop the waker. 256 | Self::drop_waker(ptr); 257 | } 258 | 259 | break; 260 | } 261 | Err(s) => state = s, 262 | } 263 | } 264 | } 265 | } 266 | 267 | /// Wakes a waker by reference. 268 | unsafe fn wake_by_ref(ptr: *const ()) { 269 | let raw = Self::from_ptr(ptr); 270 | 271 | let mut state = (*raw.header).state.load(Ordering::Acquire); 272 | 273 | loop { 274 | // If the task is completed or closed, it can't be woken up. 275 | if state & (COMPLETED | CLOSED) != 0 { 276 | break; 277 | } 278 | 279 | // If the task is already scheduled, we just need to synchronize with the thread that 280 | // will run the task by "publishing" our current view of the memory. 281 | if state & SCHEDULED != 0 { 282 | // Update the state without actually modifying it. 283 | match (*raw.header).state.compare_exchange_weak( 284 | state, 285 | state, 286 | Ordering::AcqRel, 287 | Ordering::Acquire, 288 | ) { 289 | Ok(_) => break, 290 | Err(s) => state = s, 291 | } 292 | } else { 293 | // If the task is not running, we can schedule right away. 294 | let new = if state & RUNNING == 0 { 295 | (state | SCHEDULED) + REFERENCE 296 | } else { 297 | state | SCHEDULED 298 | }; 299 | 300 | // Mark the task as scheduled. 301 | match (*raw.header).state.compare_exchange_weak( 302 | state, 303 | new, 304 | Ordering::AcqRel, 305 | Ordering::Acquire, 306 | ) { 307 | Ok(_) => { 308 | // If the task is not running, now is the time to schedule. 309 | if state & RUNNING == 0 { 310 | // If the reference count overflowed, abort. 311 | if state > isize::max_value() as usize { 312 | abort(); 313 | } 314 | 315 | // Schedule the task. There is no need to call `Self::schedule(ptr)` 316 | // because the schedule function cannot be destroyed while the waker is 317 | // still alive. 318 | let task = Task { 319 | raw_task: NonNull::new_unchecked(ptr as *mut ()), 320 | _marker: PhantomData, 321 | }; 322 | (*raw.schedule)(task); 323 | } 324 | 325 | break; 326 | } 327 | Err(s) => state = s, 328 | } 329 | } 330 | } 331 | } 332 | 333 | /// Clones a waker. 334 | unsafe fn clone_waker(ptr: *const ()) -> RawWaker { 335 | let raw = Self::from_ptr(ptr); 336 | 337 | // Increment the reference count. With any kind of reference-counted data structure, 338 | // relaxed ordering is appropriate when incrementing the counter. 339 | let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed); 340 | 341 | // If the reference count overflowed, abort. 342 | if state > isize::max_value() as usize { 343 | abort(); 344 | } 345 | 346 | RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) 347 | } 348 | 349 | /// Drops a waker. 350 | /// 351 | /// This function will decrement the reference count. If it drops down to zero, the associated 352 | /// join handle has been dropped too, and the task has not been completed, then it will get 353 | /// scheduled one more time so that its future gets dropped by the executor. 354 | #[inline] 355 | unsafe fn drop_waker(ptr: *const ()) { 356 | let raw = Self::from_ptr(ptr); 357 | 358 | // Decrement the reference count. 359 | let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; 360 | 361 | // If this was the last reference to the task and the `JoinHandle` has been dropped too, 362 | // then we need to decide how to destroy the task. 363 | if new & !(REFERENCE - 1) == 0 && new & HANDLE == 0 { 364 | if new & (COMPLETED | CLOSED) == 0 { 365 | // If the task was not completed nor closed, close it and schedule one more time so 366 | // that its future gets dropped by the executor. 367 | (*raw.header) 368 | .state 369 | .store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release); 370 | Self::schedule(ptr); 371 | } else { 372 | // Otherwise, destroy the task right away. 373 | Self::destroy(ptr); 374 | } 375 | } 376 | } 377 | 378 | /// Drops a task. 379 | /// 380 | /// This function will decrement the reference count. If it drops down to zero and the 381 | /// associated join handle has been dropped too, then the task gets destroyed. 382 | #[inline] 383 | unsafe fn drop_task(ptr: *const ()) { 384 | let raw = Self::from_ptr(ptr); 385 | 386 | // Decrement the reference count. 387 | let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE; 388 | 389 | // If this was the last reference to the task and the `JoinHandle` has been dropped too, 390 | // then destroy the task. 391 | if new & !(REFERENCE - 1) == 0 && new & HANDLE == 0 { 392 | Self::destroy(ptr); 393 | } 394 | } 395 | 396 | /// Schedules a task for running. 397 | /// 398 | /// This function doesn't modify the state of the task. It only passes the task reference to 399 | /// its schedule function. 400 | unsafe fn schedule(ptr: *const ()) { 401 | let raw = Self::from_ptr(ptr); 402 | 403 | // If the schedule function has captured variables, create a temporary waker that prevents 404 | // the task from getting deallocated while the function is being invoked. 405 | let _waker; 406 | if mem::size_of::() > 0 { 407 | _waker = Waker::from_raw(Self::clone_waker(ptr)); 408 | } 409 | 410 | let task = Task { 411 | raw_task: NonNull::new_unchecked(ptr as *mut ()), 412 | _marker: PhantomData, 413 | }; 414 | (*raw.schedule)(task); 415 | } 416 | 417 | /// Drops the future inside a task. 418 | #[inline] 419 | unsafe fn drop_future(ptr: *const ()) { 420 | let raw = Self::from_ptr(ptr); 421 | 422 | // We need a safeguard against panics because the destructor can panic. 423 | abort_on_panic(|| { 424 | raw.future.drop_in_place(); 425 | }) 426 | } 427 | 428 | /// Returns a pointer to the output inside a task. 429 | unsafe fn get_output(ptr: *const ()) -> *const () { 430 | let raw = Self::from_ptr(ptr); 431 | raw.output as *const () 432 | } 433 | 434 | /// Cleans up task's resources and deallocates it. 435 | /// 436 | /// The schedule function and the tag will be dropped, and the task will then get deallocated. 437 | /// The task must be closed before this function is called. 438 | #[inline] 439 | unsafe fn destroy(ptr: *const ()) { 440 | let raw = Self::from_ptr(ptr); 441 | let task_layout = Self::task_layout(); 442 | 443 | // We need a safeguard against panics because destructors can panic. 444 | abort_on_panic(|| { 445 | // Drop the schedule function. 446 | (raw.schedule as *mut S).drop_in_place(); 447 | 448 | // Drop the tag. 449 | (raw.tag as *mut T).drop_in_place(); 450 | }); 451 | 452 | // Finally, deallocate the memory reserved by the task. 453 | alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout); 454 | } 455 | 456 | /// Runs a task. 457 | /// 458 | /// If polling its future panics, the task will be closed and the panic will be propagated into 459 | /// the caller. 460 | unsafe fn run(ptr: *const ()) { 461 | let raw = Self::from_ptr(ptr); 462 | 463 | // Create a context from the raw task pointer and the vtable inside the its header. 464 | let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE))); 465 | let cx = &mut Context::from_waker(&waker); 466 | 467 | let mut state = (*raw.header).state.load(Ordering::Acquire); 468 | 469 | // Update the task's state before polling its future. 470 | loop { 471 | // If the task has already been closed, drop the task reference and return. 472 | if state & CLOSED != 0 { 473 | // Notify the awaiter that the task has been closed. 474 | if state & AWAITER != 0 { 475 | (*raw.header).notify(None); 476 | } 477 | 478 | // Drop the future. 479 | Self::drop_future(ptr); 480 | 481 | // Drop the task reference. 482 | Self::drop_task(ptr); 483 | return; 484 | } 485 | 486 | // Mark the task as unscheduled and running. 487 | match (*raw.header).state.compare_exchange_weak( 488 | state, 489 | (state & !SCHEDULED) | RUNNING, 490 | Ordering::AcqRel, 491 | Ordering::Acquire, 492 | ) { 493 | Ok(_) => { 494 | // Update the state because we're continuing with polling the future. 495 | state = (state & !SCHEDULED) | RUNNING; 496 | break; 497 | } 498 | Err(s) => state = s, 499 | } 500 | } 501 | 502 | // Poll the inner future, but surround it with a guard that closes the task in case polling 503 | // panics. 504 | let guard = Guard(raw); 505 | let poll = ::poll(Pin::new_unchecked(&mut *raw.future), cx); 506 | mem::forget(guard); 507 | 508 | match poll { 509 | Poll::Ready(out) => { 510 | // Replace the future with its output. 511 | Self::drop_future(ptr); 512 | raw.output.write(out); 513 | 514 | // A place where the output will be stored in case it needs to be dropped. 515 | let mut output = None; 516 | 517 | // The task is now completed. 518 | loop { 519 | // If the handle is dropped, we'll need to close it and drop the output. 520 | let new = if state & HANDLE == 0 { 521 | (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED 522 | } else { 523 | (state & !RUNNING & !SCHEDULED) | COMPLETED 524 | }; 525 | 526 | // Mark the task as not running and completed. 527 | match (*raw.header).state.compare_exchange_weak( 528 | state, 529 | new, 530 | Ordering::AcqRel, 531 | Ordering::Acquire, 532 | ) { 533 | Ok(_) => { 534 | // If the handle is dropped or if the task was closed while running, 535 | // now it's time to drop the output. 536 | if state & HANDLE == 0 || state & CLOSED != 0 { 537 | // Read the output. 538 | output = Some(raw.output.read()); 539 | } 540 | 541 | // Notify the awaiter that the task has been completed. 542 | if state & AWAITER != 0 { 543 | (*raw.header).notify(None); 544 | } 545 | 546 | // Drop the task reference. 547 | Self::drop_task(ptr); 548 | break; 549 | } 550 | Err(s) => state = s, 551 | } 552 | } 553 | 554 | // Drop the output if it was taken out of the task. 555 | drop(output); 556 | } 557 | Poll::Pending => { 558 | // The task is still not completed. 559 | loop { 560 | // If the task was closed while running, we'll need to unschedule in case it 561 | // was woken up and then destroy it. 562 | let new = if state & CLOSED != 0 { 563 | state & !RUNNING & !SCHEDULED 564 | } else { 565 | state & !RUNNING 566 | }; 567 | 568 | // Mark the task as not running. 569 | match (*raw.header).state.compare_exchange_weak( 570 | state, 571 | new, 572 | Ordering::AcqRel, 573 | Ordering::Acquire, 574 | ) { 575 | Ok(state) => { 576 | // If the task was closed while running, we need to drop its future. 577 | // If the task was woken up while running, we need to schedule it. 578 | // Otherwise, we just drop the task reference. 579 | if state & CLOSED != 0 { 580 | // The thread that closed the task didn't drop the future because 581 | // it was running so now it's our responsibility to do so. 582 | Self::drop_future(ptr); 583 | 584 | // Drop the task reference. 585 | Self::drop_task(ptr); 586 | } else if state & SCHEDULED != 0 { 587 | // The thread that woke the task up didn't reschedule it because 588 | // it was running so now it's our responsibility to do so. 589 | Self::schedule(ptr); 590 | } else { 591 | // Drop the task reference. 592 | Self::drop_task(ptr); 593 | } 594 | break; 595 | } 596 | Err(s) => state = s, 597 | } 598 | } 599 | } 600 | } 601 | 602 | /// A guard that closes the task if polling its future panics. 603 | struct Guard(RawTask) 604 | where 605 | F: Future + 'static, 606 | S: Fn(Task) + Send + Sync + 'static; 607 | 608 | impl Drop for Guard 609 | where 610 | F: Future + 'static, 611 | S: Fn(Task) + Send + Sync + 'static, 612 | { 613 | fn drop(&mut self) { 614 | let raw = self.0; 615 | let ptr = raw.header as *const (); 616 | 617 | unsafe { 618 | let mut state = (*raw.header).state.load(Ordering::Acquire); 619 | 620 | loop { 621 | // If the task was closed while running, then unschedule it, drop its 622 | // future, and drop the task reference. 623 | if state & CLOSED != 0 { 624 | // We still need to unschedule the task because it is possible it was 625 | // woken up while running. 626 | (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel); 627 | 628 | // The thread that closed the task didn't drop the future because it 629 | // was running so now it's our responsibility to do so. 630 | RawTask::::drop_future(ptr); 631 | 632 | // Drop the task reference. 633 | RawTask::::drop_task(ptr); 634 | break; 635 | } 636 | 637 | // Mark the task as not running, not scheduled, and closed. 638 | match (*raw.header).state.compare_exchange_weak( 639 | state, 640 | (state & !RUNNING & !SCHEDULED) | CLOSED, 641 | Ordering::AcqRel, 642 | Ordering::Acquire, 643 | ) { 644 | Ok(state) => { 645 | // Drop the future because the task is now closed. 646 | RawTask::::drop_future(ptr); 647 | 648 | // Notify the awaiter that the task has been closed. 649 | if state & AWAITER != 0 { 650 | (*raw.header).notify(None); 651 | } 652 | 653 | // Drop the task reference. 654 | RawTask::::drop_task(ptr); 655 | break; 656 | } 657 | Err(s) => state = s, 658 | } 659 | } 660 | } 661 | } 662 | } 663 | } 664 | } 665 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | /// Set if the task is scheduled for running. 2 | /// 3 | /// A task is considered to be scheduled whenever its `Task` reference exists. It therefore also 4 | /// begins in scheduled state at the moment of creation. 5 | /// 6 | /// This flag can't be set when the task is completed. However, it can be set while the task is 7 | /// running, in which case it will be rescheduled as soon as polling finishes. 8 | pub(crate) const SCHEDULED: usize = 1 << 0; 9 | 10 | /// Set if the task is running. 11 | /// 12 | /// A task is in running state while its future is being polled. 13 | /// 14 | /// This flag can't be set when the task is completed. However, it can be in scheduled state while 15 | /// it is running, in which case it will be rescheduled as soon as polling finishes. 16 | pub(crate) const RUNNING: usize = 1 << 1; 17 | 18 | /// Set if the task has been completed. 19 | /// 20 | /// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored 21 | /// inside the task until it becomes closed. In fact, `JoinHandle` picks up the output by marking 22 | /// the task as closed. 23 | /// 24 | /// This flag can't be set when the task is scheduled or running. 25 | pub(crate) const COMPLETED: usize = 1 << 2; 26 | 27 | /// Set if the task is closed. 28 | /// 29 | /// If a task is closed, that means it's either cancelled or its output has been consumed by the 30 | /// `JoinHandle`. A task becomes closed when: 31 | /// 32 | /// 1. It gets cancelled by `Task::cancel()`, `Task::drop()`, or `JoinHandle::cancel()`. 33 | /// 2. Its output gets awaited by the `JoinHandle`. 34 | /// 3. It panics while polling the future. 35 | /// 4. It is completed and the `JoinHandle` gets dropped. 36 | pub(crate) const CLOSED: usize = 1 << 3; 37 | 38 | /// Set if the `JoinHandle` still exists. 39 | /// 40 | /// The `JoinHandle` is a special case in that it is only tracked by this flag, while all other 41 | /// task references (`Task` and `Waker`s) are tracked by the reference count. 42 | pub(crate) const HANDLE: usize = 1 << 4; 43 | 44 | /// Set if the `JoinHandle` is awaiting the output. 45 | /// 46 | /// This flag is set while there is a registered awaiter of type `Waker` inside the task. When the 47 | /// task gets closed or completed, we need to wake the awaiter. This flag can be used as a fast 48 | /// check that tells us if we need to wake anyone without acquiring the lock inside the task. 49 | pub(crate) const AWAITER: usize = 1 << 5; 50 | 51 | /// Set if an awaiter is being registered. 52 | /// 53 | /// This flag is set when `JoinHandle` is polled and we are registering a new awaiter. 54 | pub(crate) const REGISTERING: usize = 1 << 6; 55 | 56 | /// Set if the awaiter is being notified. 57 | /// 58 | /// This flag is set when notifying the awaiter. If an awaiter is concurrently registered and 59 | /// notified, whichever side came first will take over the reposibility of resolving the race. 60 | pub(crate) const NOTIFYING: usize = 1 << 7; 61 | 62 | /// A single reference. 63 | /// 64 | /// The lower bits in the state contain various flags representing the task state, while the upper 65 | /// bits contain the reference count. The value of `REFERENCE` represents a single reference in the 66 | /// total reference count. 67 | /// 68 | /// Note that the reference counter only tracks the `Task` and `Waker`s. The `JoinHandle` is 69 | /// tracked separately by the `HANDLE` flag. 70 | pub(crate) const REFERENCE: usize = 1 << 8; 71 | -------------------------------------------------------------------------------- /src/task.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::future::Future; 3 | use core::marker::PhantomData; 4 | use core::mem::{self, ManuallyDrop}; 5 | use core::pin::Pin; 6 | use core::ptr::NonNull; 7 | use core::task::{Context, Poll, Waker}; 8 | 9 | use crate::header::Header; 10 | use crate::raw::RawTask; 11 | use crate::JoinHandle; 12 | 13 | /// Creates a new task. 14 | /// 15 | /// This constructor returns a [`Task`] reference that runs the future and a [`JoinHandle`] that 16 | /// awaits its result. 17 | /// 18 | /// When run, the task polls `future`. When woken up, it gets scheduled for running by the 19 | /// `schedule` function. Argument `tag` is an arbitrary piece of data stored inside the task. 20 | /// 21 | /// The schedule function should not attempt to run the task nor to drop it. Instead, it should 22 | /// push the task into some kind of queue so that it can be processed later. 23 | /// 24 | /// If you need to spawn a future that does not implement [`Send`], consider using the 25 | /// [`spawn_local`] function instead. 26 | /// 27 | /// [`Task`]: struct.Task.html 28 | /// [`JoinHandle`]: struct.JoinHandle.html 29 | /// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html 30 | /// [`spawn_local`]: fn.spawn_local.html 31 | /// 32 | /// # Examples 33 | /// 34 | /// ``` 35 | /// use crossbeam::channel; 36 | /// 37 | /// // The future inside the task. 38 | /// let future = async { 39 | /// println!("Hello, world!"); 40 | /// }; 41 | /// 42 | /// // If the task gets woken up, it will be sent into this channel. 43 | /// let (s, r) = channel::unbounded(); 44 | /// let schedule = move |task| s.send(task).unwrap(); 45 | /// 46 | /// // Create a task with the future and the schedule function. 47 | /// let (task, handle) = async_task::spawn(future, schedule, ()); 48 | /// ``` 49 | pub fn spawn(future: F, schedule: S, tag: T) -> (Task, JoinHandle) 50 | where 51 | F: Future + Send + 'static, 52 | R: Send + 'static, 53 | S: Fn(Task) + Send + Sync + 'static, 54 | T: Send + Sync + 'static, 55 | { 56 | let raw_task = RawTask::::allocate(future, schedule, tag); 57 | let task = Task { 58 | raw_task, 59 | _marker: PhantomData, 60 | }; 61 | let handle = JoinHandle { 62 | raw_task, 63 | _marker: PhantomData, 64 | }; 65 | (task, handle) 66 | } 67 | 68 | /// Creates a new local task. 69 | /// 70 | /// This constructor returns a [`Task`] reference that runs the future and a [`JoinHandle`] that 71 | /// awaits its result. 72 | /// 73 | /// When run, the task polls `future`. When woken up, it gets scheduled for running by the 74 | /// `schedule` function. Argument `tag` is an arbitrary piece of data stored inside the task. 75 | /// 76 | /// The schedule function should not attempt to run the task nor to drop it. Instead, it should 77 | /// push the task into some kind of queue so that it can be processed later. 78 | /// 79 | /// Unlike [`spawn`], this function does not require the future to implement [`Send`]. If the 80 | /// [`Task`] reference is run or dropped on a thread it was not created on, a panic will occur. 81 | /// 82 | /// [`Task`]: struct.Task.html 83 | /// [`JoinHandle`]: struct.JoinHandle.html 84 | /// [`spawn`]: fn.spawn.html 85 | /// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html 86 | /// 87 | /// # Examples 88 | /// 89 | /// ``` 90 | /// use crossbeam::channel; 91 | /// 92 | /// // The future inside the task. 93 | /// let future = async { 94 | /// println!("Hello, world!"); 95 | /// }; 96 | /// 97 | /// // If the task gets woken up, it will be sent into this channel. 98 | /// let (s, r) = channel::unbounded(); 99 | /// let schedule = move |task| s.send(task).unwrap(); 100 | /// 101 | /// // Create a task with the future and the schedule function. 102 | /// let (task, handle) = async_task::spawn_local(future, schedule, ()); 103 | /// ``` 104 | #[cfg(any(unix, windows))] 105 | pub fn spawn_local(future: F, schedule: S, tag: T) -> (Task, JoinHandle) 106 | where 107 | F: Future + 'static, 108 | R: 'static, 109 | S: Fn(Task) + Send + Sync + 'static, 110 | T: Send + Sync + 'static, 111 | { 112 | #[cfg(unix)] 113 | #[inline] 114 | fn thread_id() -> usize { 115 | unsafe { libc::pthread_self() as usize } 116 | } 117 | 118 | #[cfg(windows)] 119 | #[inline] 120 | fn thread_id() -> usize { 121 | unsafe { winapi::um::processthreadsapi::GetCurrentThreadId() as usize } 122 | } 123 | 124 | struct Checked { 125 | id: usize, 126 | inner: ManuallyDrop, 127 | } 128 | 129 | impl Drop for Checked { 130 | fn drop(&mut self) { 131 | assert!( 132 | self.id == thread_id(), 133 | "local task dropped by a thread that didn't spawn it" 134 | ); 135 | unsafe { 136 | ManuallyDrop::drop(&mut self.inner); 137 | } 138 | } 139 | } 140 | 141 | impl Future for Checked { 142 | type Output = F::Output; 143 | 144 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 145 | assert!( 146 | self.id == thread_id(), 147 | "local task polled by a thread that didn't spawn it" 148 | ); 149 | unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) } 150 | } 151 | } 152 | 153 | let future = Checked { 154 | id: thread_id(), 155 | inner: ManuallyDrop::new(future), 156 | }; 157 | 158 | let raw_task = RawTask::<_, R, S, T>::allocate(future, schedule, tag); 159 | let task = Task { 160 | raw_task, 161 | _marker: PhantomData, 162 | }; 163 | let handle = JoinHandle { 164 | raw_task, 165 | _marker: PhantomData, 166 | }; 167 | (task, handle) 168 | } 169 | 170 | /// A task reference that runs its future. 171 | /// 172 | /// At any moment in time, there is at most one [`Task`] reference associated with a particular 173 | /// task. Running consumes the [`Task`] reference and polls its internal future. If the future is 174 | /// still pending after getting polled, the [`Task`] reference simply won't exist until a [`Waker`] 175 | /// notifies the task. If the future completes, its result becomes available to the [`JoinHandle`]. 176 | /// 177 | /// When a task is woken up, its [`Task`] reference is recreated and passed to the schedule 178 | /// function. In most executors, scheduling simply pushes the [`Task`] reference into a queue of 179 | /// runnable tasks. 180 | /// 181 | /// If the [`Task`] reference is dropped without getting run, the task is automatically cancelled. 182 | /// When cancelled, the task won't be scheduled again even if a [`Waker`] wakes it. It is possible 183 | /// for the [`JoinHandle`] to cancel while the [`Task`] reference exists, in which case an attempt 184 | /// to run the task won't do anything. 185 | /// 186 | /// [`run()`]: struct.Task.html#method.run 187 | /// [`JoinHandle`]: struct.JoinHandle.html 188 | /// [`Task`]: struct.Task.html 189 | /// [`Waker`]: https://doc.rust-lang.org/std/task/struct.Waker.html 190 | pub struct Task { 191 | /// A pointer to the heap-allocated task. 192 | pub(crate) raw_task: NonNull<()>, 193 | 194 | /// A marker capturing the generic type `T`. 195 | pub(crate) _marker: PhantomData, 196 | } 197 | 198 | unsafe impl Send for Task {} 199 | unsafe impl Sync for Task {} 200 | 201 | impl Task { 202 | /// Schedules the task. 203 | /// 204 | /// This is a convenience method that simply reschedules the task by passing it to its schedule 205 | /// function. 206 | /// 207 | /// If the task is cancelled, this method won't do anything. 208 | pub fn schedule(self) { 209 | let ptr = self.raw_task.as_ptr(); 210 | let header = ptr as *const Header; 211 | mem::forget(self); 212 | 213 | unsafe { 214 | ((*header).vtable.schedule)(ptr); 215 | } 216 | } 217 | 218 | /// Runs the task. 219 | /// 220 | /// This method polls the task's future. If the future completes, its result will become 221 | /// available to the [`JoinHandle`]. And if the future is still pending, the task will have to 222 | /// be woken up in order to be rescheduled and run again. 223 | /// 224 | /// If the task was cancelled by a [`JoinHandle`] before it gets run, then this method won't do 225 | /// anything. 226 | /// 227 | /// It is possible that polling the future panics, in which case the panic will be propagated 228 | /// into the caller. It is advised that invocations of this method are wrapped inside 229 | /// [`catch_unwind`]. If a panic occurs, the task is automatically cancelled. 230 | /// 231 | /// [`JoinHandle`]: struct.JoinHandle.html 232 | /// [`catch_unwind`]: https://doc.rust-lang.org/std/panic/fn.catch_unwind.html 233 | pub fn run(self) { 234 | let ptr = self.raw_task.as_ptr(); 235 | let header = ptr as *const Header; 236 | mem::forget(self); 237 | 238 | unsafe { 239 | ((*header).vtable.run)(ptr); 240 | } 241 | } 242 | 243 | /// Cancels the task. 244 | /// 245 | /// When cancelled, the task won't be scheduled again even if a [`Waker`] wakes it. An attempt 246 | /// to run it won't do anything. 247 | /// 248 | /// [`Waker`]: https://doc.rust-lang.org/std/task/struct.Waker.html 249 | pub fn cancel(&self) { 250 | let ptr = self.raw_task.as_ptr(); 251 | let header = ptr as *const Header; 252 | 253 | unsafe { 254 | (*header).cancel(); 255 | } 256 | } 257 | 258 | /// Returns a reference to the tag stored inside the task. 259 | pub fn tag(&self) -> &T { 260 | let offset = Header::offset_tag::(); 261 | let ptr = self.raw_task.as_ptr(); 262 | 263 | unsafe { 264 | let raw = (ptr as *mut u8).add(offset) as *const T; 265 | &*raw 266 | } 267 | } 268 | 269 | /// Converts this task into a raw pointer to the tag. 270 | pub fn into_raw(self) -> *const T { 271 | let offset = Header::offset_tag::(); 272 | let ptr = self.raw_task.as_ptr(); 273 | mem::forget(self); 274 | 275 | unsafe { (ptr as *mut u8).add(offset) as *const T } 276 | } 277 | 278 | /// Converts a raw pointer to the tag into a task. 279 | /// 280 | /// This method should only be used with raw pointers returned from [`into_raw`]. 281 | /// 282 | /// [`into_raw`]: #method.into_raw 283 | pub unsafe fn from_raw(raw: *const T) -> Task { 284 | let offset = Header::offset_tag::(); 285 | let ptr = (raw as *mut u8).sub(offset) as *mut (); 286 | 287 | Task { 288 | raw_task: NonNull::new_unchecked(ptr), 289 | _marker: PhantomData, 290 | } 291 | } 292 | 293 | /// Returns a waker associated with this task. 294 | pub fn waker(&self) -> Waker { 295 | let ptr = self.raw_task.as_ptr(); 296 | let header = ptr as *const Header; 297 | 298 | unsafe { 299 | let raw_waker = ((*header).vtable.clone_waker)(ptr); 300 | Waker::from_raw(raw_waker) 301 | } 302 | } 303 | } 304 | 305 | impl Drop for Task { 306 | fn drop(&mut self) { 307 | let ptr = self.raw_task.as_ptr(); 308 | let header = ptr as *const Header; 309 | 310 | unsafe { 311 | // Cancel the task. 312 | (*header).cancel(); 313 | 314 | // Drop the future. 315 | ((*header).vtable.drop_future)(ptr); 316 | 317 | // Drop the task reference. 318 | ((*header).vtable.drop_task)(ptr); 319 | } 320 | } 321 | } 322 | 323 | impl fmt::Debug for Task { 324 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 325 | let ptr = self.raw_task.as_ptr(); 326 | let header = ptr as *const Header; 327 | 328 | f.debug_struct("Task") 329 | .field("header", unsafe { &(*header) }) 330 | .field("tag", self.tag()) 331 | .finish() 332 | } 333 | } 334 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::Layout; 2 | use core::mem; 3 | 4 | /// Aborts the process. 5 | /// 6 | /// To abort, this function simply panics while panicking. 7 | pub(crate) fn abort() -> ! { 8 | struct Panic; 9 | 10 | impl Drop for Panic { 11 | fn drop(&mut self) { 12 | panic!("aborting the process"); 13 | } 14 | } 15 | 16 | let _panic = Panic; 17 | panic!("aborting the process"); 18 | } 19 | 20 | /// Calls a function and aborts if it panics. 21 | /// 22 | /// This is useful in unsafe code where we can't recover from panics. 23 | #[inline] 24 | pub(crate) fn abort_on_panic(f: impl FnOnce() -> T) -> T { 25 | struct Bomb; 26 | 27 | impl Drop for Bomb { 28 | fn drop(&mut self) { 29 | abort(); 30 | } 31 | } 32 | 33 | let bomb = Bomb; 34 | let t = f(); 35 | mem::forget(bomb); 36 | t 37 | } 38 | 39 | /// Returns the layout for `a` followed by `b` and the offset of `b`. 40 | /// 41 | /// This function was adapted from the currently unstable `Layout::extend()`: 42 | /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.extend 43 | #[inline] 44 | pub(crate) fn extend(a: Layout, b: Layout) -> (Layout, usize) { 45 | let new_align = a.align().max(b.align()); 46 | let pad = padding_needed_for(a, b.align()); 47 | 48 | let offset = a.size().checked_add(pad).unwrap(); 49 | let new_size = offset.checked_add(b.size()).unwrap(); 50 | 51 | let layout = Layout::from_size_align(new_size, new_align).unwrap(); 52 | (layout, offset) 53 | } 54 | 55 | /// Returns the padding after `layout` that aligns the following address to `align`. 56 | /// 57 | /// This function was adapted from the currently unstable `Layout::padding_needed_for()`: 58 | /// https://doc.rust-lang.org/nightly/std/alloc/struct.Layout.html#method.padding_needed_for 59 | #[inline] 60 | pub(crate) fn padding_needed_for(layout: Layout, align: usize) -> usize { 61 | let len = layout.size(); 62 | let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); 63 | len_rounded_up.wrapping_sub(len) 64 | } 65 | -------------------------------------------------------------------------------- /src/waker_fn.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use core::mem::{self, ManuallyDrop}; 3 | use core::task::{RawWaker, RawWakerVTable, Waker}; 4 | 5 | /// Creates a waker from a wake function. 6 | /// 7 | /// The function gets called every time the waker is woken. 8 | pub fn waker_fn(f: F) -> Waker { 9 | let raw = Arc::into_raw(Arc::new(f)) as *const (); 10 | let vtable = &Helper::::VTABLE; 11 | unsafe { Waker::from_raw(RawWaker::new(raw, vtable)) } 12 | } 13 | 14 | struct Helper(F); 15 | 16 | impl Helper { 17 | const VTABLE: RawWakerVTable = RawWakerVTable::new( 18 | Self::clone_waker, 19 | Self::wake, 20 | Self::wake_by_ref, 21 | Self::drop_waker, 22 | ); 23 | 24 | unsafe fn clone_waker(ptr: *const ()) -> RawWaker { 25 | let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const F)); 26 | mem::forget(arc.clone()); 27 | RawWaker::new(ptr, &Self::VTABLE) 28 | } 29 | 30 | unsafe fn wake(ptr: *const ()) { 31 | let arc = Arc::from_raw(ptr as *const F); 32 | (arc)(); 33 | } 34 | 35 | unsafe fn wake_by_ref(ptr: *const ()) { 36 | let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const F)); 37 | (arc)(); 38 | } 39 | 40 | unsafe fn drop_waker(ptr: *const ()) { 41 | drop(Arc::from_raw(ptr as *const F)); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /tests/basic.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::task::{Context, Poll}; 5 | 6 | use async_task::Task; 7 | use crossbeam::atomic::AtomicCell; 8 | use crossbeam::channel; 9 | use futures::future; 10 | use lazy_static::lazy_static; 11 | 12 | // Creates a future with event counters. 13 | // 14 | // Usage: `future!(f, POLL, DROP)` 15 | // 16 | // The future `f` always returns `Poll::Ready`. 17 | // When it gets polled, `POLL` is incremented. 18 | // When it gets dropped, `DROP` is incremented. 19 | macro_rules! future { 20 | ($name:pat, $poll:ident, $drop:ident) => { 21 | lazy_static! { 22 | static ref $poll: AtomicCell = AtomicCell::new(0); 23 | static ref $drop: AtomicCell = AtomicCell::new(0); 24 | } 25 | 26 | let $name = { 27 | struct Fut(Box); 28 | 29 | impl Future for Fut { 30 | type Output = Box; 31 | 32 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 33 | $poll.fetch_add(1); 34 | Poll::Ready(Box::new(0)) 35 | } 36 | } 37 | 38 | impl Drop for Fut { 39 | fn drop(&mut self) { 40 | $drop.fetch_add(1); 41 | } 42 | } 43 | 44 | Fut(Box::new(0)) 45 | }; 46 | }; 47 | } 48 | 49 | // Creates a schedule function with event counters. 50 | // 51 | // Usage: `schedule!(s, SCHED, DROP)` 52 | // 53 | // The schedule function `s` does nothing. 54 | // When it gets invoked, `SCHED` is incremented. 55 | // When it gets dropped, `DROP` is incremented. 56 | macro_rules! schedule { 57 | ($name:pat, $sched:ident, $drop:ident) => { 58 | lazy_static! { 59 | static ref $sched: AtomicCell = AtomicCell::new(0); 60 | static ref $drop: AtomicCell = AtomicCell::new(0); 61 | } 62 | 63 | let $name = { 64 | struct Guard(Box); 65 | 66 | impl Drop for Guard { 67 | fn drop(&mut self) { 68 | $drop.fetch_add(1); 69 | } 70 | } 71 | 72 | let guard = Guard(Box::new(0)); 73 | move |_task| { 74 | &guard; 75 | $sched.fetch_add(1); 76 | } 77 | }; 78 | }; 79 | } 80 | 81 | // Creates a task with event counters. 82 | // 83 | // Usage: `task!(task, handle f, s, DROP)` 84 | // 85 | // A task with future `f` and schedule function `s` is created. 86 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 87 | // When the tag inside the task gets dropped, `DROP` is incremented. 88 | macro_rules! task { 89 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 90 | lazy_static! { 91 | static ref $drop: AtomicCell = AtomicCell::new(0); 92 | } 93 | 94 | let ($task, $handle) = { 95 | struct Tag(Box); 96 | 97 | impl Drop for Tag { 98 | fn drop(&mut self) { 99 | $drop.fetch_add(1); 100 | } 101 | } 102 | 103 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 104 | }; 105 | }; 106 | } 107 | 108 | #[test] 109 | fn cancel_and_drop_handle() { 110 | future!(f, POLL, DROP_F); 111 | schedule!(s, SCHEDULE, DROP_S); 112 | task!(task, handle, f, s, DROP_T); 113 | 114 | assert_eq!(POLL.load(), 0); 115 | assert_eq!(SCHEDULE.load(), 0); 116 | assert_eq!(DROP_F.load(), 0); 117 | assert_eq!(DROP_S.load(), 0); 118 | assert_eq!(DROP_T.load(), 0); 119 | 120 | task.cancel(); 121 | assert_eq!(POLL.load(), 0); 122 | assert_eq!(SCHEDULE.load(), 0); 123 | assert_eq!(DROP_F.load(), 0); 124 | assert_eq!(DROP_S.load(), 0); 125 | assert_eq!(DROP_T.load(), 0); 126 | 127 | drop(handle); 128 | assert_eq!(POLL.load(), 0); 129 | assert_eq!(SCHEDULE.load(), 0); 130 | assert_eq!(DROP_F.load(), 0); 131 | assert_eq!(DROP_S.load(), 0); 132 | assert_eq!(DROP_T.load(), 0); 133 | 134 | drop(task); 135 | assert_eq!(POLL.load(), 0); 136 | assert_eq!(SCHEDULE.load(), 0); 137 | assert_eq!(DROP_F.load(), 1); 138 | assert_eq!(DROP_S.load(), 1); 139 | assert_eq!(DROP_T.load(), 1); 140 | } 141 | 142 | #[test] 143 | fn run_and_drop_handle() { 144 | future!(f, POLL, DROP_F); 145 | schedule!(s, SCHEDULE, DROP_S); 146 | task!(task, handle, f, s, DROP_T); 147 | 148 | drop(handle); 149 | assert_eq!(POLL.load(), 0); 150 | assert_eq!(SCHEDULE.load(), 0); 151 | assert_eq!(DROP_F.load(), 0); 152 | assert_eq!(DROP_S.load(), 0); 153 | assert_eq!(DROP_T.load(), 0); 154 | 155 | task.run(); 156 | assert_eq!(POLL.load(), 1); 157 | assert_eq!(SCHEDULE.load(), 0); 158 | assert_eq!(DROP_F.load(), 1); 159 | assert_eq!(DROP_S.load(), 1); 160 | assert_eq!(DROP_T.load(), 1); 161 | } 162 | 163 | #[test] 164 | fn drop_handle_and_run() { 165 | future!(f, POLL, DROP_F); 166 | schedule!(s, SCHEDULE, DROP_S); 167 | task!(task, handle, f, s, DROP_T); 168 | 169 | drop(handle); 170 | assert_eq!(POLL.load(), 0); 171 | assert_eq!(SCHEDULE.load(), 0); 172 | assert_eq!(DROP_F.load(), 0); 173 | assert_eq!(DROP_S.load(), 0); 174 | assert_eq!(DROP_T.load(), 0); 175 | 176 | task.run(); 177 | assert_eq!(POLL.load(), 1); 178 | assert_eq!(SCHEDULE.load(), 0); 179 | assert_eq!(DROP_F.load(), 1); 180 | assert_eq!(DROP_S.load(), 1); 181 | assert_eq!(DROP_T.load(), 1); 182 | } 183 | 184 | #[test] 185 | fn cancel_and_run() { 186 | future!(f, POLL, DROP_F); 187 | schedule!(s, SCHEDULE, DROP_S); 188 | task!(task, handle, f, s, DROP_T); 189 | 190 | handle.cancel(); 191 | assert_eq!(POLL.load(), 0); 192 | assert_eq!(SCHEDULE.load(), 0); 193 | assert_eq!(DROP_F.load(), 0); 194 | assert_eq!(DROP_S.load(), 0); 195 | assert_eq!(DROP_T.load(), 0); 196 | 197 | drop(handle); 198 | assert_eq!(POLL.load(), 0); 199 | assert_eq!(SCHEDULE.load(), 0); 200 | assert_eq!(DROP_F.load(), 0); 201 | assert_eq!(DROP_S.load(), 0); 202 | assert_eq!(DROP_T.load(), 0); 203 | 204 | task.run(); 205 | assert_eq!(POLL.load(), 0); 206 | assert_eq!(SCHEDULE.load(), 0); 207 | assert_eq!(DROP_F.load(), 1); 208 | assert_eq!(DROP_S.load(), 1); 209 | assert_eq!(DROP_T.load(), 1); 210 | } 211 | 212 | #[test] 213 | fn run_and_cancel() { 214 | future!(f, POLL, DROP_F); 215 | schedule!(s, SCHEDULE, DROP_S); 216 | task!(task, handle, f, s, DROP_T); 217 | 218 | task.run(); 219 | assert_eq!(POLL.load(), 1); 220 | assert_eq!(SCHEDULE.load(), 0); 221 | assert_eq!(DROP_F.load(), 1); 222 | assert_eq!(DROP_S.load(), 0); 223 | assert_eq!(DROP_T.load(), 0); 224 | 225 | handle.cancel(); 226 | assert_eq!(POLL.load(), 1); 227 | assert_eq!(SCHEDULE.load(), 0); 228 | assert_eq!(DROP_F.load(), 1); 229 | assert_eq!(DROP_S.load(), 0); 230 | assert_eq!(DROP_T.load(), 0); 231 | 232 | drop(handle); 233 | assert_eq!(POLL.load(), 1); 234 | assert_eq!(SCHEDULE.load(), 0); 235 | assert_eq!(DROP_F.load(), 1); 236 | assert_eq!(DROP_S.load(), 1); 237 | assert_eq!(DROP_T.load(), 1); 238 | } 239 | 240 | #[test] 241 | fn schedule() { 242 | let (s, r) = channel::unbounded(); 243 | let schedule = move |t| s.send(t).unwrap(); 244 | let (task, _handle) = async_task::spawn( 245 | future::poll_fn(|_| Poll::<()>::Pending), 246 | schedule, 247 | Box::new(0), 248 | ); 249 | 250 | assert!(r.is_empty()); 251 | task.schedule(); 252 | 253 | let task = r.recv().unwrap(); 254 | assert!(r.is_empty()); 255 | task.schedule(); 256 | 257 | let task = r.recv().unwrap(); 258 | assert!(r.is_empty()); 259 | task.schedule(); 260 | 261 | r.recv().unwrap(); 262 | } 263 | 264 | #[test] 265 | fn tag() { 266 | let (s, r) = channel::unbounded(); 267 | let schedule = move |t| s.send(t).unwrap(); 268 | let (task, handle) = async_task::spawn( 269 | future::poll_fn(|_| Poll::<()>::Pending), 270 | schedule, 271 | AtomicUsize::new(7), 272 | ); 273 | 274 | assert!(r.is_empty()); 275 | task.schedule(); 276 | 277 | let task = r.recv().unwrap(); 278 | assert!(r.is_empty()); 279 | handle.tag().fetch_add(1, Ordering::SeqCst); 280 | task.schedule(); 281 | 282 | let task = r.recv().unwrap(); 283 | assert_eq!(task.tag().load(Ordering::SeqCst), 8); 284 | assert!(r.is_empty()); 285 | task.schedule(); 286 | 287 | r.recv().unwrap(); 288 | } 289 | 290 | #[test] 291 | fn schedule_counter() { 292 | let (s, r) = channel::unbounded(); 293 | let schedule = move |t: Task| { 294 | t.tag().fetch_add(1, Ordering::SeqCst); 295 | s.send(t).unwrap(); 296 | }; 297 | let (task, handle) = async_task::spawn( 298 | future::poll_fn(|_| Poll::<()>::Pending), 299 | schedule, 300 | AtomicUsize::new(0), 301 | ); 302 | task.schedule(); 303 | 304 | assert_eq!(handle.tag().load(Ordering::SeqCst), 1); 305 | r.recv().unwrap().schedule(); 306 | 307 | assert_eq!(handle.tag().load(Ordering::SeqCst), 2); 308 | r.recv().unwrap().schedule(); 309 | 310 | assert_eq!(handle.tag().load(Ordering::SeqCst), 3); 311 | r.recv().unwrap(); 312 | } 313 | 314 | #[test] 315 | fn drop_inside_schedule() { 316 | struct DropGuard(AtomicUsize); 317 | impl Drop for DropGuard { 318 | fn drop(&mut self) { 319 | self.0.fetch_add(1, Ordering::SeqCst); 320 | } 321 | } 322 | let guard = DropGuard(AtomicUsize::new(0)); 323 | 324 | let (task, _) = async_task::spawn( 325 | async {}, 326 | move |task| { 327 | assert_eq!(guard.0.load(Ordering::SeqCst), 0); 328 | drop(task); 329 | assert_eq!(guard.0.load(Ordering::SeqCst), 0); 330 | }, 331 | (), 332 | ); 333 | task.schedule(); 334 | } 335 | 336 | #[test] 337 | fn waker() { 338 | let (s, r) = channel::unbounded(); 339 | let schedule = move |t| s.send(t).unwrap(); 340 | let (task, handle) = async_task::spawn( 341 | future::poll_fn(|_| Poll::<()>::Pending), 342 | schedule, 343 | Box::new(0), 344 | ); 345 | 346 | assert!(r.is_empty()); 347 | let w = task.waker(); 348 | task.run(); 349 | w.wake(); 350 | 351 | let task = r.recv().unwrap(); 352 | task.run(); 353 | handle.waker().wake(); 354 | 355 | r.recv().unwrap(); 356 | } 357 | 358 | #[test] 359 | fn raw() { 360 | let (task, _handle) = async_task::spawn(async {}, |_| panic!(), Box::new(AtomicUsize::new(7))); 361 | 362 | let a = task.into_raw(); 363 | let task = unsafe { 364 | (*a).fetch_add(1, Ordering::SeqCst); 365 | Task::from_raw(a) 366 | }; 367 | 368 | assert_eq!(task.tag().load(Ordering::SeqCst), 8); 369 | task.run(); 370 | } 371 | -------------------------------------------------------------------------------- /tests/join.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::pin::Pin; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Task; 9 | use crossbeam::atomic::AtomicCell; 10 | use futures::executor::block_on; 11 | use futures::future; 12 | use lazy_static::lazy_static; 13 | 14 | // Creates a future with event counters. 15 | // 16 | // Usage: `future!(f, POLL, DROP_F, DROP_O)` 17 | // 18 | // The future `f` outputs `Poll::Ready`. 19 | // When it gets polled, `POLL` is incremented. 20 | // When it gets dropped, `DROP_F` is incremented. 21 | // When the output gets dropped, `DROP_O` is incremented. 22 | macro_rules! future { 23 | ($name:pat, $poll:ident, $drop_f:ident, $drop_o:ident) => { 24 | lazy_static! { 25 | static ref $poll: AtomicCell = AtomicCell::new(0); 26 | static ref $drop_f: AtomicCell = AtomicCell::new(0); 27 | static ref $drop_o: AtomicCell = AtomicCell::new(0); 28 | } 29 | 30 | let $name = { 31 | struct Fut(Box); 32 | 33 | impl Future for Fut { 34 | type Output = Out; 35 | 36 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 37 | $poll.fetch_add(1); 38 | Poll::Ready(Out(Box::new(0))) 39 | } 40 | } 41 | 42 | impl Drop for Fut { 43 | fn drop(&mut self) { 44 | $drop_f.fetch_add(1); 45 | } 46 | } 47 | 48 | struct Out(Box); 49 | 50 | impl Drop for Out { 51 | fn drop(&mut self) { 52 | $drop_o.fetch_add(1); 53 | } 54 | } 55 | 56 | Fut(Box::new(0)) 57 | }; 58 | }; 59 | } 60 | 61 | // Creates a schedule function with event counters. 62 | // 63 | // Usage: `schedule!(s, SCHED, DROP)` 64 | // 65 | // The schedule function `s` does nothing. 66 | // When it gets invoked, `SCHED` is incremented. 67 | // When it gets dropped, `DROP` is incremented. 68 | macro_rules! schedule { 69 | ($name:pat, $sched:ident, $drop:ident) => { 70 | lazy_static! { 71 | static ref $sched: AtomicCell = AtomicCell::new(0); 72 | static ref $drop: AtomicCell = AtomicCell::new(0); 73 | } 74 | 75 | let $name = { 76 | struct Guard(Box); 77 | 78 | impl Drop for Guard { 79 | fn drop(&mut self) { 80 | $drop.fetch_add(1); 81 | } 82 | } 83 | 84 | let guard = Guard(Box::new(0)); 85 | move |task: Task<_>| { 86 | &guard; 87 | task.schedule(); 88 | $sched.fetch_add(1); 89 | } 90 | }; 91 | }; 92 | } 93 | 94 | // Creates a task with event counters. 95 | // 96 | // Usage: `task!(task, handle f, s, DROP)` 97 | // 98 | // A task with future `f` and schedule function `s` is created. 99 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 100 | // When the tag inside the task gets dropped, `DROP` is incremented. 101 | macro_rules! task { 102 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 103 | lazy_static! { 104 | static ref $drop: AtomicCell = AtomicCell::new(0); 105 | } 106 | 107 | let ($task, $handle) = { 108 | struct Tag(Box); 109 | 110 | impl Drop for Tag { 111 | fn drop(&mut self) { 112 | $drop.fetch_add(1); 113 | } 114 | } 115 | 116 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 117 | }; 118 | }; 119 | } 120 | 121 | fn ms(ms: u64) -> Duration { 122 | Duration::from_millis(ms) 123 | } 124 | 125 | #[test] 126 | fn cancel_and_join() { 127 | future!(f, POLL, DROP_F, DROP_O); 128 | schedule!(s, SCHEDULE, DROP_S); 129 | task!(task, handle, f, s, DROP_T); 130 | 131 | assert_eq!(DROP_O.load(), 0); 132 | 133 | task.cancel(); 134 | drop(task); 135 | assert_eq!(DROP_O.load(), 0); 136 | 137 | assert!(block_on(handle).is_none()); 138 | assert_eq!(POLL.load(), 0); 139 | assert_eq!(SCHEDULE.load(), 0); 140 | assert_eq!(DROP_F.load(), 1); 141 | assert_eq!(DROP_S.load(), 1); 142 | assert_eq!(DROP_T.load(), 1); 143 | assert_eq!(DROP_O.load(), 0); 144 | } 145 | 146 | #[test] 147 | fn run_and_join() { 148 | future!(f, POLL, DROP_F, DROP_O); 149 | schedule!(s, SCHEDULE, DROP_S); 150 | task!(task, handle, f, s, DROP_T); 151 | 152 | assert_eq!(DROP_O.load(), 0); 153 | 154 | task.run(); 155 | assert_eq!(DROP_O.load(), 0); 156 | 157 | assert!(block_on(handle).is_some()); 158 | assert_eq!(POLL.load(), 1); 159 | assert_eq!(SCHEDULE.load(), 0); 160 | assert_eq!(DROP_F.load(), 1); 161 | assert_eq!(DROP_S.load(), 1); 162 | assert_eq!(DROP_T.load(), 1); 163 | assert_eq!(DROP_O.load(), 1); 164 | } 165 | 166 | #[test] 167 | fn drop_handle_and_run() { 168 | future!(f, POLL, DROP_F, DROP_O); 169 | schedule!(s, SCHEDULE, DROP_S); 170 | task!(task, handle, f, s, DROP_T); 171 | 172 | assert_eq!(DROP_O.load(), 0); 173 | 174 | drop(handle); 175 | assert_eq!(DROP_O.load(), 0); 176 | 177 | task.run(); 178 | assert_eq!(POLL.load(), 1); 179 | assert_eq!(SCHEDULE.load(), 0); 180 | assert_eq!(DROP_F.load(), 1); 181 | assert_eq!(DROP_S.load(), 1); 182 | assert_eq!(DROP_T.load(), 1); 183 | assert_eq!(DROP_O.load(), 1); 184 | } 185 | 186 | #[test] 187 | fn join_twice() { 188 | future!(f, POLL, DROP_F, DROP_O); 189 | schedule!(s, SCHEDULE, DROP_S); 190 | task!(task, mut handle, f, s, DROP_T); 191 | 192 | assert_eq!(DROP_O.load(), 0); 193 | 194 | task.run(); 195 | assert_eq!(DROP_O.load(), 0); 196 | 197 | assert!(block_on(&mut handle).is_some()); 198 | assert_eq!(POLL.load(), 1); 199 | assert_eq!(SCHEDULE.load(), 0); 200 | assert_eq!(DROP_F.load(), 1); 201 | assert_eq!(DROP_S.load(), 0); 202 | assert_eq!(DROP_T.load(), 0); 203 | assert_eq!(DROP_O.load(), 1); 204 | 205 | assert!(block_on(&mut handle).is_none()); 206 | assert_eq!(POLL.load(), 1); 207 | assert_eq!(SCHEDULE.load(), 0); 208 | assert_eq!(DROP_F.load(), 1); 209 | assert_eq!(DROP_S.load(), 0); 210 | assert_eq!(DROP_T.load(), 0); 211 | assert_eq!(DROP_O.load(), 1); 212 | 213 | drop(handle); 214 | assert_eq!(DROP_S.load(), 1); 215 | assert_eq!(DROP_T.load(), 1); 216 | } 217 | 218 | #[test] 219 | fn join_and_cancel() { 220 | future!(f, POLL, DROP_F, DROP_O); 221 | schedule!(s, SCHEDULE, DROP_S); 222 | task!(task, handle, f, s, DROP_T); 223 | 224 | crossbeam::scope(|scope| { 225 | scope.spawn(|_| { 226 | thread::sleep(ms(200)); 227 | 228 | task.cancel(); 229 | drop(task); 230 | 231 | thread::sleep(ms(400)); 232 | assert_eq!(POLL.load(), 0); 233 | assert_eq!(SCHEDULE.load(), 0); 234 | assert_eq!(DROP_F.load(), 1); 235 | assert_eq!(DROP_O.load(), 0); 236 | assert_eq!(DROP_S.load(), 1); 237 | assert_eq!(DROP_T.load(), 1); 238 | }); 239 | 240 | assert!(block_on(handle).is_none()); 241 | assert_eq!(POLL.load(), 0); 242 | assert_eq!(SCHEDULE.load(), 0); 243 | 244 | thread::sleep(ms(200)); 245 | assert_eq!(DROP_F.load(), 1); 246 | assert_eq!(DROP_O.load(), 0); 247 | assert_eq!(DROP_S.load(), 1); 248 | assert_eq!(DROP_T.load(), 1); 249 | }) 250 | .unwrap(); 251 | } 252 | 253 | #[test] 254 | fn join_and_run() { 255 | future!(f, POLL, DROP_F, DROP_O); 256 | schedule!(s, SCHEDULE, DROP_S); 257 | task!(task, handle, f, s, DROP_T); 258 | 259 | crossbeam::scope(|scope| { 260 | scope.spawn(|_| { 261 | thread::sleep(ms(400)); 262 | 263 | task.run(); 264 | assert_eq!(POLL.load(), 1); 265 | assert_eq!(SCHEDULE.load(), 0); 266 | assert_eq!(DROP_F.load(), 1); 267 | 268 | thread::sleep(ms(200)); 269 | assert_eq!(DROP_S.load(), 1); 270 | assert_eq!(DROP_T.load(), 1); 271 | }); 272 | 273 | assert!(block_on(handle).is_some()); 274 | assert_eq!(POLL.load(), 1); 275 | assert_eq!(SCHEDULE.load(), 0); 276 | assert_eq!(DROP_F.load(), 1); 277 | assert_eq!(DROP_O.load(), 1); 278 | 279 | thread::sleep(ms(200)); 280 | assert_eq!(DROP_S.load(), 1); 281 | assert_eq!(DROP_T.load(), 1); 282 | }) 283 | .unwrap(); 284 | } 285 | 286 | #[test] 287 | fn try_join_and_run_and_join() { 288 | future!(f, POLL, DROP_F, DROP_O); 289 | schedule!(s, SCHEDULE, DROP_S); 290 | task!(task, mut handle, f, s, DROP_T); 291 | 292 | crossbeam::scope(|scope| { 293 | scope.spawn(|_| { 294 | thread::sleep(ms(400)); 295 | 296 | task.run(); 297 | assert_eq!(POLL.load(), 1); 298 | assert_eq!(SCHEDULE.load(), 0); 299 | assert_eq!(DROP_F.load(), 1); 300 | 301 | thread::sleep(ms(200)); 302 | assert_eq!(DROP_S.load(), 1); 303 | assert_eq!(DROP_T.load(), 1); 304 | }); 305 | 306 | block_on(future::select(&mut handle, future::ready(()))); 307 | assert_eq!(POLL.load(), 0); 308 | assert_eq!(SCHEDULE.load(), 0); 309 | assert_eq!(DROP_F.load(), 0); 310 | assert_eq!(DROP_S.load(), 0); 311 | assert_eq!(DROP_T.load(), 0); 312 | assert_eq!(DROP_O.load(), 0); 313 | 314 | assert!(block_on(handle).is_some()); 315 | assert_eq!(POLL.load(), 1); 316 | assert_eq!(SCHEDULE.load(), 0); 317 | assert_eq!(DROP_F.load(), 1); 318 | assert_eq!(DROP_O.load(), 1); 319 | 320 | thread::sleep(ms(200)); 321 | assert_eq!(DROP_S.load(), 1); 322 | assert_eq!(DROP_T.load(), 1); 323 | }) 324 | .unwrap(); 325 | } 326 | 327 | #[test] 328 | fn try_join_and_cancel_and_run() { 329 | future!(f, POLL, DROP_F, DROP_O); 330 | schedule!(s, SCHEDULE, DROP_S); 331 | task!(task, mut handle, f, s, DROP_T); 332 | 333 | crossbeam::scope(|scope| { 334 | scope.spawn(|_| { 335 | thread::sleep(ms(400)); 336 | 337 | task.run(); 338 | assert_eq!(POLL.load(), 0); 339 | assert_eq!(SCHEDULE.load(), 0); 340 | assert_eq!(DROP_F.load(), 1); 341 | assert_eq!(DROP_S.load(), 1); 342 | assert_eq!(DROP_T.load(), 1); 343 | }); 344 | 345 | block_on(future::select(&mut handle, future::ready(()))); 346 | assert_eq!(POLL.load(), 0); 347 | assert_eq!(SCHEDULE.load(), 0); 348 | assert_eq!(DROP_F.load(), 0); 349 | assert_eq!(DROP_S.load(), 0); 350 | assert_eq!(DROP_T.load(), 0); 351 | assert_eq!(DROP_O.load(), 0); 352 | 353 | handle.cancel(); 354 | assert_eq!(POLL.load(), 0); 355 | assert_eq!(SCHEDULE.load(), 0); 356 | assert_eq!(DROP_F.load(), 0); 357 | assert_eq!(DROP_S.load(), 0); 358 | assert_eq!(DROP_T.load(), 0); 359 | assert_eq!(DROP_O.load(), 0); 360 | 361 | drop(handle); 362 | assert_eq!(POLL.load(), 0); 363 | assert_eq!(SCHEDULE.load(), 0); 364 | assert_eq!(DROP_F.load(), 0); 365 | assert_eq!(DROP_S.load(), 0); 366 | assert_eq!(DROP_T.load(), 0); 367 | assert_eq!(DROP_O.load(), 0); 368 | }) 369 | .unwrap(); 370 | } 371 | 372 | #[test] 373 | fn try_join_and_run_and_cancel() { 374 | future!(f, POLL, DROP_F, DROP_O); 375 | schedule!(s, SCHEDULE, DROP_S); 376 | task!(task, mut handle, f, s, DROP_T); 377 | 378 | crossbeam::scope(|scope| { 379 | scope.spawn(|_| { 380 | thread::sleep(ms(200)); 381 | 382 | task.run(); 383 | assert_eq!(POLL.load(), 1); 384 | assert_eq!(SCHEDULE.load(), 0); 385 | assert_eq!(DROP_F.load(), 1); 386 | assert_eq!(DROP_S.load(), 0); 387 | assert_eq!(DROP_T.load(), 0); 388 | }); 389 | 390 | block_on(future::select(&mut handle, future::ready(()))); 391 | assert_eq!(POLL.load(), 0); 392 | assert_eq!(SCHEDULE.load(), 0); 393 | assert_eq!(DROP_F.load(), 0); 394 | assert_eq!(DROP_S.load(), 0); 395 | assert_eq!(DROP_T.load(), 0); 396 | assert_eq!(DROP_O.load(), 0); 397 | 398 | thread::sleep(ms(400)); 399 | 400 | handle.cancel(); 401 | assert_eq!(POLL.load(), 1); 402 | assert_eq!(SCHEDULE.load(), 0); 403 | assert_eq!(DROP_F.load(), 1); 404 | assert_eq!(DROP_S.load(), 0); 405 | assert_eq!(DROP_T.load(), 0); 406 | assert_eq!(DROP_O.load(), 0); 407 | 408 | drop(handle); 409 | assert_eq!(POLL.load(), 1); 410 | assert_eq!(SCHEDULE.load(), 0); 411 | assert_eq!(DROP_F.load(), 1); 412 | assert_eq!(DROP_S.load(), 1); 413 | assert_eq!(DROP_T.load(), 1); 414 | assert_eq!(DROP_O.load(), 1); 415 | }) 416 | .unwrap(); 417 | } 418 | 419 | #[test] 420 | fn await_output() { 421 | struct Fut(Cell>); 422 | 423 | impl Fut { 424 | fn new(t: T) -> Fut { 425 | Fut(Cell::new(Some(t))) 426 | } 427 | } 428 | 429 | impl Future for Fut { 430 | type Output = T; 431 | 432 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 433 | Poll::Ready(self.0.take().unwrap()) 434 | } 435 | } 436 | 437 | for i in 0..10 { 438 | let (task, handle) = async_task::spawn(Fut::new(i), drop, Box::new(0)); 439 | task.run(); 440 | assert_eq!(block_on(handle), Some(i)); 441 | } 442 | 443 | for i in 0..10 { 444 | let (task, handle) = async_task::spawn(Fut::new(vec![7; i]), drop, Box::new(0)); 445 | task.run(); 446 | assert_eq!(block_on(handle), Some(vec![7; i])); 447 | } 448 | 449 | let (task, handle) = async_task::spawn(Fut::new("foo".to_string()), drop, Box::new(0)); 450 | task.run(); 451 | assert_eq!(block_on(handle), Some("foo".to_string())); 452 | } 453 | -------------------------------------------------------------------------------- /tests/panic.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::panic::catch_unwind; 3 | use std::pin::Pin; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Task; 9 | use crossbeam::atomic::AtomicCell; 10 | use futures::executor::block_on; 11 | use futures::future; 12 | use lazy_static::lazy_static; 13 | 14 | // Creates a future with event counters. 15 | // 16 | // Usage: `future!(f, POLL, DROP)` 17 | // 18 | // The future `f` sleeps for 200 ms and then panics. 19 | // When it gets polled, `POLL` is incremented. 20 | // When it gets dropped, `DROP` is incremented. 21 | macro_rules! future { 22 | ($name:pat, $poll:ident, $drop:ident) => { 23 | lazy_static! { 24 | static ref $poll: AtomicCell = AtomicCell::new(0); 25 | static ref $drop: AtomicCell = AtomicCell::new(0); 26 | } 27 | 28 | let $name = { 29 | struct Fut(Box); 30 | 31 | impl Future for Fut { 32 | type Output = (); 33 | 34 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 35 | $poll.fetch_add(1); 36 | thread::sleep(ms(400)); 37 | panic!() 38 | } 39 | } 40 | 41 | impl Drop for Fut { 42 | fn drop(&mut self) { 43 | $drop.fetch_add(1); 44 | } 45 | } 46 | 47 | Fut(Box::new(0)) 48 | }; 49 | }; 50 | } 51 | 52 | // Creates a schedule function with event counters. 53 | // 54 | // Usage: `schedule!(s, SCHED, DROP)` 55 | // 56 | // The schedule function `s` does nothing. 57 | // When it gets invoked, `SCHED` is incremented. 58 | // When it gets dropped, `DROP` is incremented. 59 | macro_rules! schedule { 60 | ($name:pat, $sched:ident, $drop:ident) => { 61 | lazy_static! { 62 | static ref $sched: AtomicCell = AtomicCell::new(0); 63 | static ref $drop: AtomicCell = AtomicCell::new(0); 64 | } 65 | 66 | let $name = { 67 | struct Guard(Box); 68 | 69 | impl Drop for Guard { 70 | fn drop(&mut self) { 71 | $drop.fetch_add(1); 72 | } 73 | } 74 | 75 | let guard = Guard(Box::new(0)); 76 | move |_task: Task<_>| { 77 | &guard; 78 | $sched.fetch_add(1); 79 | } 80 | }; 81 | }; 82 | } 83 | 84 | // Creates a task with event counters. 85 | // 86 | // Usage: `task!(task, handle f, s, DROP)` 87 | // 88 | // A task with future `f` and schedule function `s` is created. 89 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 90 | // When the tag inside the task gets dropped, `DROP` is incremented. 91 | macro_rules! task { 92 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 93 | lazy_static! { 94 | static ref $drop: AtomicCell = AtomicCell::new(0); 95 | } 96 | 97 | let ($task, $handle) = { 98 | struct Tag(Box); 99 | 100 | impl Drop for Tag { 101 | fn drop(&mut self) { 102 | $drop.fetch_add(1); 103 | } 104 | } 105 | 106 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 107 | }; 108 | }; 109 | } 110 | 111 | fn ms(ms: u64) -> Duration { 112 | Duration::from_millis(ms) 113 | } 114 | 115 | #[test] 116 | fn cancel_during_run() { 117 | future!(f, POLL, DROP_F); 118 | schedule!(s, SCHEDULE, DROP_S); 119 | task!(task, handle, f, s, DROP_T); 120 | 121 | crossbeam::scope(|scope| { 122 | scope.spawn(|_| { 123 | assert!(catch_unwind(|| task.run()).is_err()); 124 | assert_eq!(POLL.load(), 1); 125 | assert_eq!(SCHEDULE.load(), 0); 126 | assert_eq!(DROP_F.load(), 1); 127 | assert_eq!(DROP_S.load(), 1); 128 | assert_eq!(DROP_T.load(), 1); 129 | }); 130 | 131 | thread::sleep(ms(200)); 132 | 133 | handle.cancel(); 134 | assert_eq!(POLL.load(), 1); 135 | assert_eq!(SCHEDULE.load(), 0); 136 | assert_eq!(DROP_F.load(), 0); 137 | assert_eq!(DROP_S.load(), 0); 138 | assert_eq!(DROP_T.load(), 0); 139 | 140 | drop(handle); 141 | assert_eq!(POLL.load(), 1); 142 | assert_eq!(SCHEDULE.load(), 0); 143 | assert_eq!(DROP_F.load(), 0); 144 | assert_eq!(DROP_S.load(), 0); 145 | assert_eq!(DROP_T.load(), 0); 146 | }) 147 | .unwrap(); 148 | } 149 | 150 | #[test] 151 | fn run_and_join() { 152 | future!(f, POLL, DROP_F); 153 | schedule!(s, SCHEDULE, DROP_S); 154 | task!(task, handle, f, s, DROP_T); 155 | 156 | assert!(catch_unwind(|| task.run()).is_err()); 157 | assert_eq!(POLL.load(), 1); 158 | assert_eq!(SCHEDULE.load(), 0); 159 | assert_eq!(DROP_F.load(), 1); 160 | assert_eq!(DROP_S.load(), 0); 161 | assert_eq!(DROP_T.load(), 0); 162 | 163 | assert!(block_on(handle).is_none()); 164 | assert_eq!(POLL.load(), 1); 165 | assert_eq!(SCHEDULE.load(), 0); 166 | assert_eq!(DROP_F.load(), 1); 167 | assert_eq!(DROP_S.load(), 1); 168 | assert_eq!(DROP_T.load(), 1); 169 | } 170 | 171 | #[test] 172 | fn try_join_and_run_and_join() { 173 | future!(f, POLL, DROP_F); 174 | schedule!(s, SCHEDULE, DROP_S); 175 | task!(task, mut handle, f, s, DROP_T); 176 | 177 | block_on(future::select(&mut handle, future::ready(()))); 178 | assert_eq!(POLL.load(), 0); 179 | assert_eq!(SCHEDULE.load(), 0); 180 | assert_eq!(DROP_F.load(), 0); 181 | assert_eq!(DROP_S.load(), 0); 182 | assert_eq!(DROP_T.load(), 0); 183 | 184 | assert!(catch_unwind(|| task.run()).is_err()); 185 | assert_eq!(POLL.load(), 1); 186 | assert_eq!(SCHEDULE.load(), 0); 187 | assert_eq!(DROP_F.load(), 1); 188 | assert_eq!(DROP_S.load(), 0); 189 | assert_eq!(DROP_T.load(), 0); 190 | 191 | assert!(block_on(handle).is_none()); 192 | assert_eq!(POLL.load(), 1); 193 | assert_eq!(SCHEDULE.load(), 0); 194 | assert_eq!(DROP_F.load(), 1); 195 | assert_eq!(DROP_S.load(), 1); 196 | assert_eq!(DROP_T.load(), 1); 197 | } 198 | 199 | #[test] 200 | fn join_during_run() { 201 | future!(f, POLL, DROP_F); 202 | schedule!(s, SCHEDULE, DROP_S); 203 | task!(task, handle, f, s, DROP_T); 204 | 205 | crossbeam::scope(|scope| { 206 | scope.spawn(|_| { 207 | assert!(catch_unwind(|| task.run()).is_err()); 208 | assert_eq!(POLL.load(), 1); 209 | assert_eq!(SCHEDULE.load(), 0); 210 | assert_eq!(DROP_F.load(), 1); 211 | 212 | thread::sleep(ms(200)); 213 | assert_eq!(DROP_S.load(), 1); 214 | assert_eq!(DROP_T.load(), 1); 215 | }); 216 | 217 | thread::sleep(ms(200)); 218 | 219 | assert!(block_on(handle).is_none()); 220 | assert_eq!(POLL.load(), 1); 221 | assert_eq!(SCHEDULE.load(), 0); 222 | assert_eq!(DROP_F.load(), 1); 223 | 224 | thread::sleep(ms(200)); 225 | assert_eq!(DROP_S.load(), 1); 226 | assert_eq!(DROP_T.load(), 1); 227 | }) 228 | .unwrap(); 229 | } 230 | 231 | #[test] 232 | fn try_join_during_run() { 233 | future!(f, POLL, DROP_F); 234 | schedule!(s, SCHEDULE, DROP_S); 235 | task!(task, mut handle, f, s, DROP_T); 236 | 237 | crossbeam::scope(|scope| { 238 | scope.spawn(|_| { 239 | assert!(catch_unwind(|| task.run()).is_err()); 240 | assert_eq!(POLL.load(), 1); 241 | assert_eq!(SCHEDULE.load(), 0); 242 | assert_eq!(DROP_F.load(), 1); 243 | assert_eq!(DROP_S.load(), 1); 244 | assert_eq!(DROP_T.load(), 1); 245 | }); 246 | 247 | thread::sleep(ms(200)); 248 | 249 | block_on(future::select(&mut handle, future::ready(()))); 250 | assert_eq!(POLL.load(), 1); 251 | assert_eq!(SCHEDULE.load(), 0); 252 | assert_eq!(DROP_F.load(), 0); 253 | assert_eq!(DROP_S.load(), 0); 254 | assert_eq!(DROP_T.load(), 0); 255 | drop(handle); 256 | }) 257 | .unwrap(); 258 | } 259 | 260 | #[test] 261 | fn drop_handle_during_run() { 262 | future!(f, POLL, DROP_F); 263 | schedule!(s, SCHEDULE, DROP_S); 264 | task!(task, handle, f, s, DROP_T); 265 | 266 | crossbeam::scope(|scope| { 267 | scope.spawn(|_| { 268 | assert!(catch_unwind(|| task.run()).is_err()); 269 | assert_eq!(POLL.load(), 1); 270 | assert_eq!(SCHEDULE.load(), 0); 271 | assert_eq!(DROP_F.load(), 1); 272 | assert_eq!(DROP_S.load(), 1); 273 | assert_eq!(DROP_T.load(), 1); 274 | }); 275 | 276 | thread::sleep(ms(200)); 277 | 278 | drop(handle); 279 | assert_eq!(POLL.load(), 1); 280 | assert_eq!(SCHEDULE.load(), 0); 281 | assert_eq!(DROP_F.load(), 0); 282 | assert_eq!(DROP_S.load(), 0); 283 | assert_eq!(DROP_T.load(), 0); 284 | }) 285 | .unwrap(); 286 | } 287 | -------------------------------------------------------------------------------- /tests/ready.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | use std::thread; 5 | use std::time::Duration; 6 | 7 | use async_task::Task; 8 | use crossbeam::atomic::AtomicCell; 9 | use futures::executor::block_on; 10 | use futures::future; 11 | use lazy_static::lazy_static; 12 | 13 | // Creates a future with event counters. 14 | // 15 | // Usage: `future!(f, POLL, DROP_F, DROP_O)` 16 | // 17 | // The future `f` sleeps for 200 ms and outputs `Poll::Ready`. 18 | // When it gets polled, `POLL` is incremented. 19 | // When it gets dropped, `DROP_F` is incremented. 20 | // When the output gets dropped, `DROP_O` is incremented. 21 | macro_rules! future { 22 | ($name:pat, $poll:ident, $drop_f:ident, $drop_o:ident) => { 23 | lazy_static! { 24 | static ref $poll: AtomicCell = AtomicCell::new(0); 25 | static ref $drop_f: AtomicCell = AtomicCell::new(0); 26 | static ref $drop_o: AtomicCell = AtomicCell::new(0); 27 | } 28 | 29 | let $name = { 30 | struct Fut(Box); 31 | 32 | impl Future for Fut { 33 | type Output = Out; 34 | 35 | fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { 36 | $poll.fetch_add(1); 37 | thread::sleep(ms(400)); 38 | Poll::Ready(Out(Box::new(0))) 39 | } 40 | } 41 | 42 | impl Drop for Fut { 43 | fn drop(&mut self) { 44 | $drop_f.fetch_add(1); 45 | } 46 | } 47 | 48 | struct Out(Box); 49 | 50 | impl Drop for Out { 51 | fn drop(&mut self) { 52 | $drop_o.fetch_add(1); 53 | } 54 | } 55 | 56 | Fut(Box::new(0)) 57 | }; 58 | }; 59 | } 60 | 61 | // Creates a schedule function with event counters. 62 | // 63 | // Usage: `schedule!(s, SCHED, DROP)` 64 | // 65 | // The schedule function `s` does nothing. 66 | // When it gets invoked, `SCHED` is incremented. 67 | // When it gets dropped, `DROP` is incremented. 68 | macro_rules! schedule { 69 | ($name:pat, $sched:ident, $drop:ident) => { 70 | lazy_static! { 71 | static ref $sched: AtomicCell = AtomicCell::new(0); 72 | static ref $drop: AtomicCell = AtomicCell::new(0); 73 | } 74 | 75 | let $name = { 76 | struct Guard(Box); 77 | 78 | impl Drop for Guard { 79 | fn drop(&mut self) { 80 | $drop.fetch_add(1); 81 | } 82 | } 83 | 84 | let guard = Guard(Box::new(0)); 85 | move |_task: Task<_>| { 86 | &guard; 87 | $sched.fetch_add(1); 88 | } 89 | }; 90 | }; 91 | } 92 | 93 | // Creates a task with event counters. 94 | // 95 | // Usage: `task!(task, handle f, s, DROP)` 96 | // 97 | // A task with future `f` and schedule function `s` is created. 98 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 99 | // When the tag inside the task gets dropped, `DROP` is incremented. 100 | macro_rules! task { 101 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 102 | lazy_static! { 103 | static ref $drop: AtomicCell = AtomicCell::new(0); 104 | } 105 | 106 | let ($task, $handle) = { 107 | struct Tag(Box); 108 | 109 | impl Drop for Tag { 110 | fn drop(&mut self) { 111 | $drop.fetch_add(1); 112 | } 113 | } 114 | 115 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 116 | }; 117 | }; 118 | } 119 | 120 | fn ms(ms: u64) -> Duration { 121 | Duration::from_millis(ms) 122 | } 123 | 124 | #[test] 125 | fn cancel_during_run() { 126 | future!(f, POLL, DROP_F, DROP_O); 127 | schedule!(s, SCHEDULE, DROP_S); 128 | task!(task, handle, f, s, DROP_T); 129 | 130 | crossbeam::scope(|scope| { 131 | scope.spawn(|_| { 132 | task.run(); 133 | assert_eq!(POLL.load(), 1); 134 | assert_eq!(SCHEDULE.load(), 0); 135 | assert_eq!(DROP_F.load(), 1); 136 | assert_eq!(DROP_S.load(), 0); 137 | assert_eq!(DROP_T.load(), 0); 138 | assert_eq!(DROP_O.load(), 1); 139 | }); 140 | 141 | thread::sleep(ms(200)); 142 | 143 | handle.cancel(); 144 | assert_eq!(POLL.load(), 1); 145 | assert_eq!(SCHEDULE.load(), 0); 146 | assert_eq!(DROP_F.load(), 0); 147 | assert_eq!(DROP_S.load(), 0); 148 | assert_eq!(DROP_T.load(), 0); 149 | assert_eq!(DROP_O.load(), 0); 150 | 151 | thread::sleep(ms(400)); 152 | 153 | assert_eq!(POLL.load(), 1); 154 | assert_eq!(SCHEDULE.load(), 0); 155 | assert_eq!(DROP_F.load(), 1); 156 | assert_eq!(DROP_S.load(), 0); 157 | assert_eq!(DROP_T.load(), 0); 158 | assert_eq!(DROP_O.load(), 1); 159 | 160 | drop(handle); 161 | assert_eq!(POLL.load(), 1); 162 | assert_eq!(SCHEDULE.load(), 0); 163 | assert_eq!(DROP_F.load(), 1); 164 | assert_eq!(DROP_S.load(), 1); 165 | assert_eq!(DROP_T.load(), 1); 166 | assert_eq!(DROP_O.load(), 1); 167 | }) 168 | .unwrap(); 169 | } 170 | 171 | #[test] 172 | fn join_during_run() { 173 | future!(f, POLL, DROP_F, DROP_O); 174 | schedule!(s, SCHEDULE, DROP_S); 175 | task!(task, handle, f, s, DROP_T); 176 | 177 | crossbeam::scope(|scope| { 178 | scope.spawn(|_| { 179 | task.run(); 180 | assert_eq!(POLL.load(), 1); 181 | assert_eq!(SCHEDULE.load(), 0); 182 | assert_eq!(DROP_F.load(), 1); 183 | 184 | thread::sleep(ms(200)); 185 | assert_eq!(DROP_S.load(), 1); 186 | assert_eq!(DROP_T.load(), 1); 187 | }); 188 | 189 | thread::sleep(ms(200)); 190 | 191 | assert!(block_on(handle).is_some()); 192 | assert_eq!(POLL.load(), 1); 193 | assert_eq!(SCHEDULE.load(), 0); 194 | assert_eq!(DROP_F.load(), 1); 195 | assert_eq!(DROP_O.load(), 1); 196 | 197 | thread::sleep(ms(200)); 198 | assert_eq!(DROP_S.load(), 1); 199 | assert_eq!(DROP_T.load(), 1); 200 | }) 201 | .unwrap(); 202 | } 203 | 204 | #[test] 205 | fn try_join_during_run() { 206 | future!(f, POLL, DROP_F, DROP_O); 207 | schedule!(s, SCHEDULE, DROP_S); 208 | task!(task, mut handle, f, s, DROP_T); 209 | 210 | crossbeam::scope(|scope| { 211 | scope.spawn(|_| { 212 | task.run(); 213 | assert_eq!(POLL.load(), 1); 214 | assert_eq!(SCHEDULE.load(), 0); 215 | assert_eq!(DROP_F.load(), 1); 216 | assert_eq!(DROP_S.load(), 1); 217 | assert_eq!(DROP_T.load(), 1); 218 | assert_eq!(DROP_O.load(), 1); 219 | }); 220 | 221 | thread::sleep(ms(200)); 222 | 223 | block_on(future::select(&mut handle, future::ready(()))); 224 | assert_eq!(POLL.load(), 1); 225 | assert_eq!(SCHEDULE.load(), 0); 226 | assert_eq!(DROP_F.load(), 0); 227 | assert_eq!(DROP_S.load(), 0); 228 | assert_eq!(DROP_T.load(), 0); 229 | assert_eq!(DROP_O.load(), 0); 230 | drop(handle); 231 | }) 232 | .unwrap(); 233 | } 234 | 235 | #[test] 236 | fn drop_handle_during_run() { 237 | future!(f, POLL, DROP_F, DROP_O); 238 | schedule!(s, SCHEDULE, DROP_S); 239 | task!(task, handle, f, s, DROP_T); 240 | 241 | crossbeam::scope(|scope| { 242 | scope.spawn(|_| { 243 | task.run(); 244 | assert_eq!(POLL.load(), 1); 245 | assert_eq!(SCHEDULE.load(), 0); 246 | assert_eq!(DROP_F.load(), 1); 247 | assert_eq!(DROP_S.load(), 1); 248 | assert_eq!(DROP_T.load(), 1); 249 | assert_eq!(DROP_O.load(), 1); 250 | }); 251 | 252 | thread::sleep(ms(200)); 253 | 254 | drop(handle); 255 | assert_eq!(POLL.load(), 1); 256 | assert_eq!(SCHEDULE.load(), 0); 257 | assert_eq!(DROP_F.load(), 0); 258 | assert_eq!(DROP_S.load(), 0); 259 | assert_eq!(DROP_T.load(), 0); 260 | assert_eq!(DROP_O.load(), 0); 261 | }) 262 | .unwrap(); 263 | } 264 | -------------------------------------------------------------------------------- /tests/waker_fn.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::sync::Arc; 3 | 4 | #[test] 5 | fn wake() { 6 | let a = Arc::new(AtomicUsize::new(0)); 7 | let w = async_task::waker_fn({ 8 | let a = a.clone(); 9 | move || { 10 | a.fetch_add(1, Ordering::SeqCst); 11 | } 12 | }); 13 | 14 | assert_eq!(a.load(Ordering::SeqCst), 0); 15 | w.wake_by_ref(); 16 | assert_eq!(a.load(Ordering::SeqCst), 1); 17 | 18 | let w2 = w.clone(); 19 | assert_eq!(a.load(Ordering::SeqCst), 1); 20 | w2.wake_by_ref(); 21 | assert_eq!(a.load(Ordering::SeqCst), 2); 22 | drop(w2); 23 | assert_eq!(a.load(Ordering::SeqCst), 2); 24 | 25 | let w3 = w.clone(); 26 | assert_eq!(a.load(Ordering::SeqCst), 2); 27 | w3.wake(); 28 | assert_eq!(a.load(Ordering::SeqCst), 3); 29 | } 30 | -------------------------------------------------------------------------------- /tests/waker_panic.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::panic::catch_unwind; 4 | use std::pin::Pin; 5 | use std::task::Waker; 6 | use std::task::{Context, Poll}; 7 | use std::thread; 8 | use std::time::Duration; 9 | 10 | use async_task::Task; 11 | use crossbeam::atomic::AtomicCell; 12 | use crossbeam::channel; 13 | use lazy_static::lazy_static; 14 | 15 | // Creates a future with event counters. 16 | // 17 | // Usage: `future!(f, waker, POLL, DROP)` 18 | // 19 | // The future `f` always sleeps for 200 ms, and panics the second time it is polled. 20 | // When it gets polled, `POLL` is incremented. 21 | // When it gets dropped, `DROP` is incremented. 22 | // 23 | // Every time the future is run, it stores the waker into a global variable. 24 | // This waker can be extracted using the `waker` function. 25 | macro_rules! future { 26 | ($name:pat, $waker:pat, $poll:ident, $drop:ident) => { 27 | lazy_static! { 28 | static ref $poll: AtomicCell = AtomicCell::new(0); 29 | static ref $drop: AtomicCell = AtomicCell::new(0); 30 | static ref WAKER: AtomicCell> = AtomicCell::new(None); 31 | } 32 | 33 | let ($name, $waker) = { 34 | struct Fut(Cell, Box); 35 | 36 | impl Future for Fut { 37 | type Output = (); 38 | 39 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 40 | WAKER.store(Some(cx.waker().clone())); 41 | $poll.fetch_add(1); 42 | thread::sleep(ms(400)); 43 | 44 | if self.0.get() { 45 | panic!() 46 | } else { 47 | self.0.set(true); 48 | Poll::Pending 49 | } 50 | } 51 | } 52 | 53 | impl Drop for Fut { 54 | fn drop(&mut self) { 55 | $drop.fetch_add(1); 56 | } 57 | } 58 | 59 | (Fut(Cell::new(false), Box::new(0)), || { 60 | WAKER.swap(None).unwrap() 61 | }) 62 | }; 63 | }; 64 | } 65 | 66 | // Creates a schedule function with event counters. 67 | // 68 | // Usage: `schedule!(s, chan, SCHED, DROP)` 69 | // 70 | // The schedule function `s` pushes the task into `chan`. 71 | // When it gets invoked, `SCHED` is incremented. 72 | // When it gets dropped, `DROP` is incremented. 73 | // 74 | // Receiver `chan` extracts the task when it is scheduled. 75 | macro_rules! schedule { 76 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 77 | lazy_static! { 78 | static ref $sched: AtomicCell = AtomicCell::new(0); 79 | static ref $drop: AtomicCell = AtomicCell::new(0); 80 | } 81 | 82 | let ($name, $chan) = { 83 | let (s, r) = channel::unbounded(); 84 | 85 | struct Guard(Box); 86 | 87 | impl Drop for Guard { 88 | fn drop(&mut self) { 89 | $drop.fetch_add(1); 90 | } 91 | } 92 | 93 | let guard = Guard(Box::new(0)); 94 | let sched = move |task: Task<_>| { 95 | &guard; 96 | $sched.fetch_add(1); 97 | s.send(task).unwrap(); 98 | }; 99 | 100 | (sched, r) 101 | }; 102 | }; 103 | } 104 | 105 | // Creates a task with event counters. 106 | // 107 | // Usage: `task!(task, handle f, s, DROP)` 108 | // 109 | // A task with future `f` and schedule function `s` is created. 110 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 111 | // When the tag inside the task gets dropped, `DROP` is incremented. 112 | macro_rules! task { 113 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 114 | lazy_static! { 115 | static ref $drop: AtomicCell = AtomicCell::new(0); 116 | } 117 | 118 | let ($task, $handle) = { 119 | struct Tag(Box); 120 | 121 | impl Drop for Tag { 122 | fn drop(&mut self) { 123 | $drop.fetch_add(1); 124 | } 125 | } 126 | 127 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 128 | }; 129 | }; 130 | } 131 | 132 | fn ms(ms: u64) -> Duration { 133 | Duration::from_millis(ms) 134 | } 135 | 136 | #[test] 137 | fn wake_during_run() { 138 | future!(f, waker, POLL, DROP_F); 139 | schedule!(s, chan, SCHEDULE, DROP_S); 140 | task!(task, handle, f, s, DROP_T); 141 | 142 | task.run(); 143 | let w = waker(); 144 | w.wake_by_ref(); 145 | let task = chan.recv().unwrap(); 146 | 147 | crossbeam::scope(|scope| { 148 | scope.spawn(|_| { 149 | assert!(catch_unwind(|| task.run()).is_err()); 150 | drop(waker()); 151 | assert_eq!(POLL.load(), 2); 152 | assert_eq!(SCHEDULE.load(), 1); 153 | assert_eq!(DROP_F.load(), 1); 154 | assert_eq!(DROP_S.load(), 1); 155 | assert_eq!(DROP_T.load(), 1); 156 | assert_eq!(chan.len(), 0); 157 | }); 158 | 159 | thread::sleep(ms(200)); 160 | 161 | w.wake(); 162 | drop(handle); 163 | assert_eq!(POLL.load(), 2); 164 | assert_eq!(SCHEDULE.load(), 1); 165 | assert_eq!(DROP_F.load(), 0); 166 | assert_eq!(DROP_S.load(), 0); 167 | assert_eq!(DROP_T.load(), 0); 168 | assert_eq!(chan.len(), 0); 169 | 170 | thread::sleep(ms(400)); 171 | 172 | assert_eq!(POLL.load(), 2); 173 | assert_eq!(SCHEDULE.load(), 1); 174 | assert_eq!(DROP_F.load(), 1); 175 | assert_eq!(DROP_S.load(), 1); 176 | assert_eq!(DROP_T.load(), 1); 177 | assert_eq!(chan.len(), 0); 178 | }) 179 | .unwrap(); 180 | } 181 | 182 | #[test] 183 | fn cancel_during_run() { 184 | future!(f, waker, POLL, DROP_F); 185 | schedule!(s, chan, SCHEDULE, DROP_S); 186 | task!(task, handle, f, s, DROP_T); 187 | 188 | task.run(); 189 | let w = waker(); 190 | w.wake(); 191 | let task = chan.recv().unwrap(); 192 | 193 | crossbeam::scope(|scope| { 194 | scope.spawn(|_| { 195 | assert!(catch_unwind(|| task.run()).is_err()); 196 | drop(waker()); 197 | assert_eq!(POLL.load(), 2); 198 | assert_eq!(SCHEDULE.load(), 1); 199 | assert_eq!(DROP_F.load(), 1); 200 | assert_eq!(DROP_S.load(), 1); 201 | assert_eq!(DROP_T.load(), 1); 202 | assert_eq!(chan.len(), 0); 203 | }); 204 | 205 | thread::sleep(ms(200)); 206 | 207 | handle.cancel(); 208 | assert_eq!(POLL.load(), 2); 209 | assert_eq!(SCHEDULE.load(), 1); 210 | assert_eq!(DROP_F.load(), 0); 211 | assert_eq!(DROP_S.load(), 0); 212 | assert_eq!(DROP_T.load(), 0); 213 | assert_eq!(chan.len(), 0); 214 | 215 | drop(handle); 216 | assert_eq!(POLL.load(), 2); 217 | assert_eq!(SCHEDULE.load(), 1); 218 | assert_eq!(DROP_F.load(), 0); 219 | assert_eq!(DROP_S.load(), 0); 220 | assert_eq!(DROP_T.load(), 0); 221 | assert_eq!(chan.len(), 0); 222 | 223 | thread::sleep(ms(400)); 224 | 225 | assert_eq!(POLL.load(), 2); 226 | assert_eq!(SCHEDULE.load(), 1); 227 | assert_eq!(DROP_F.load(), 1); 228 | assert_eq!(DROP_S.load(), 1); 229 | assert_eq!(DROP_T.load(), 1); 230 | assert_eq!(chan.len(), 0); 231 | }) 232 | .unwrap(); 233 | } 234 | 235 | #[test] 236 | fn wake_and_cancel_during_run() { 237 | future!(f, waker, POLL, DROP_F); 238 | schedule!(s, chan, SCHEDULE, DROP_S); 239 | task!(task, handle, f, s, DROP_T); 240 | 241 | task.run(); 242 | let w = waker(); 243 | w.wake_by_ref(); 244 | let task = chan.recv().unwrap(); 245 | 246 | crossbeam::scope(|scope| { 247 | scope.spawn(|_| { 248 | assert!(catch_unwind(|| task.run()).is_err()); 249 | drop(waker()); 250 | assert_eq!(POLL.load(), 2); 251 | assert_eq!(SCHEDULE.load(), 1); 252 | assert_eq!(DROP_F.load(), 1); 253 | assert_eq!(DROP_S.load(), 1); 254 | assert_eq!(DROP_T.load(), 1); 255 | assert_eq!(chan.len(), 0); 256 | }); 257 | 258 | thread::sleep(ms(200)); 259 | 260 | w.wake(); 261 | assert_eq!(POLL.load(), 2); 262 | assert_eq!(SCHEDULE.load(), 1); 263 | assert_eq!(DROP_F.load(), 0); 264 | assert_eq!(DROP_S.load(), 0); 265 | assert_eq!(DROP_T.load(), 0); 266 | assert_eq!(chan.len(), 0); 267 | 268 | handle.cancel(); 269 | assert_eq!(POLL.load(), 2); 270 | assert_eq!(SCHEDULE.load(), 1); 271 | assert_eq!(DROP_F.load(), 0); 272 | assert_eq!(DROP_S.load(), 0); 273 | assert_eq!(DROP_T.load(), 0); 274 | assert_eq!(chan.len(), 0); 275 | 276 | drop(handle); 277 | assert_eq!(POLL.load(), 2); 278 | assert_eq!(SCHEDULE.load(), 1); 279 | assert_eq!(DROP_F.load(), 0); 280 | assert_eq!(DROP_S.load(), 0); 281 | assert_eq!(DROP_T.load(), 0); 282 | assert_eq!(chan.len(), 0); 283 | 284 | thread::sleep(ms(400)); 285 | 286 | assert_eq!(POLL.load(), 2); 287 | assert_eq!(SCHEDULE.load(), 1); 288 | assert_eq!(DROP_F.load(), 1); 289 | assert_eq!(DROP_S.load(), 1); 290 | assert_eq!(DROP_T.load(), 1); 291 | assert_eq!(chan.len(), 0); 292 | }) 293 | .unwrap(); 294 | } 295 | 296 | #[test] 297 | fn cancel_and_wake_during_run() { 298 | future!(f, waker, POLL, DROP_F); 299 | schedule!(s, chan, SCHEDULE, DROP_S); 300 | task!(task, handle, f, s, DROP_T); 301 | 302 | task.run(); 303 | let w = waker(); 304 | w.wake_by_ref(); 305 | let task = chan.recv().unwrap(); 306 | 307 | crossbeam::scope(|scope| { 308 | scope.spawn(|_| { 309 | assert!(catch_unwind(|| task.run()).is_err()); 310 | drop(waker()); 311 | assert_eq!(POLL.load(), 2); 312 | assert_eq!(SCHEDULE.load(), 1); 313 | assert_eq!(DROP_F.load(), 1); 314 | assert_eq!(DROP_S.load(), 1); 315 | assert_eq!(DROP_T.load(), 1); 316 | assert_eq!(chan.len(), 0); 317 | }); 318 | 319 | thread::sleep(ms(200)); 320 | 321 | handle.cancel(); 322 | assert_eq!(POLL.load(), 2); 323 | assert_eq!(SCHEDULE.load(), 1); 324 | assert_eq!(DROP_F.load(), 0); 325 | assert_eq!(DROP_S.load(), 0); 326 | assert_eq!(DROP_T.load(), 0); 327 | assert_eq!(chan.len(), 0); 328 | 329 | drop(handle); 330 | assert_eq!(POLL.load(), 2); 331 | assert_eq!(SCHEDULE.load(), 1); 332 | assert_eq!(DROP_F.load(), 0); 333 | assert_eq!(DROP_S.load(), 0); 334 | assert_eq!(DROP_T.load(), 0); 335 | assert_eq!(chan.len(), 0); 336 | 337 | w.wake(); 338 | assert_eq!(POLL.load(), 2); 339 | assert_eq!(SCHEDULE.load(), 1); 340 | assert_eq!(DROP_F.load(), 0); 341 | assert_eq!(DROP_S.load(), 0); 342 | assert_eq!(DROP_T.load(), 0); 343 | assert_eq!(chan.len(), 0); 344 | 345 | thread::sleep(ms(400)); 346 | 347 | assert_eq!(POLL.load(), 2); 348 | assert_eq!(SCHEDULE.load(), 1); 349 | assert_eq!(DROP_F.load(), 1); 350 | assert_eq!(DROP_S.load(), 1); 351 | assert_eq!(DROP_T.load(), 1); 352 | assert_eq!(chan.len(), 0); 353 | }) 354 | .unwrap(); 355 | } 356 | -------------------------------------------------------------------------------- /tests/waker_pending.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::Waker; 4 | use std::task::{Context, Poll}; 5 | use std::thread; 6 | use std::time::Duration; 7 | 8 | use async_task::Task; 9 | use crossbeam::atomic::AtomicCell; 10 | use crossbeam::channel; 11 | use lazy_static::lazy_static; 12 | 13 | // Creates a future with event counters. 14 | // 15 | // Usage: `future!(f, waker, POLL, DROP)` 16 | // 17 | // The future `f` always sleeps for 200 ms and returns `Poll::Pending`. 18 | // When it gets polled, `POLL` is incremented. 19 | // When it gets dropped, `DROP` is incremented. 20 | // 21 | // Every time the future is run, it stores the waker into a global variable. 22 | // This waker can be extracted using the `waker` function. 23 | macro_rules! future { 24 | ($name:pat, $waker:pat, $poll:ident, $drop:ident) => { 25 | lazy_static! { 26 | static ref $poll: AtomicCell = AtomicCell::new(0); 27 | static ref $drop: AtomicCell = AtomicCell::new(0); 28 | static ref WAKER: AtomicCell> = AtomicCell::new(None); 29 | } 30 | 31 | let ($name, $waker) = { 32 | struct Fut(Box); 33 | 34 | impl Future for Fut { 35 | type Output = (); 36 | 37 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 38 | WAKER.store(Some(cx.waker().clone())); 39 | $poll.fetch_add(1); 40 | thread::sleep(ms(400)); 41 | Poll::Pending 42 | } 43 | } 44 | 45 | impl Drop for Fut { 46 | fn drop(&mut self) { 47 | $drop.fetch_add(1); 48 | } 49 | } 50 | 51 | (Fut(Box::new(0)), || WAKER.swap(None).unwrap()) 52 | }; 53 | }; 54 | } 55 | 56 | // Creates a schedule function with event counters. 57 | // 58 | // Usage: `schedule!(s, chan, SCHED, DROP)` 59 | // 60 | // The schedule function `s` pushes the task into `chan`. 61 | // When it gets invoked, `SCHED` is incremented. 62 | // When it gets dropped, `DROP` is incremented. 63 | // 64 | // Receiver `chan` extracts the task when it is scheduled. 65 | macro_rules! schedule { 66 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 67 | lazy_static! { 68 | static ref $sched: AtomicCell = AtomicCell::new(0); 69 | static ref $drop: AtomicCell = AtomicCell::new(0); 70 | } 71 | 72 | let ($name, $chan) = { 73 | let (s, r) = channel::unbounded(); 74 | 75 | struct Guard(Box); 76 | 77 | impl Drop for Guard { 78 | fn drop(&mut self) { 79 | $drop.fetch_add(1); 80 | } 81 | } 82 | 83 | let guard = Guard(Box::new(0)); 84 | let sched = move |task: Task<_>| { 85 | &guard; 86 | $sched.fetch_add(1); 87 | s.send(task).unwrap(); 88 | }; 89 | 90 | (sched, r) 91 | }; 92 | }; 93 | } 94 | 95 | // Creates a task with event counters. 96 | // 97 | // Usage: `task!(task, handle f, s, DROP)` 98 | // 99 | // A task with future `f` and schedule function `s` is created. 100 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 101 | // When the tag inside the task gets dropped, `DROP` is incremented. 102 | macro_rules! task { 103 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 104 | lazy_static! { 105 | static ref $drop: AtomicCell = AtomicCell::new(0); 106 | } 107 | 108 | let ($task, $handle) = { 109 | struct Tag(Box); 110 | 111 | impl Drop for Tag { 112 | fn drop(&mut self) { 113 | $drop.fetch_add(1); 114 | } 115 | } 116 | 117 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 118 | }; 119 | }; 120 | } 121 | 122 | fn ms(ms: u64) -> Duration { 123 | Duration::from_millis(ms) 124 | } 125 | 126 | #[test] 127 | fn wake_during_run() { 128 | future!(f, waker, POLL, DROP_F); 129 | schedule!(s, chan, SCHEDULE, DROP_S); 130 | task!(task, _handle, f, s, DROP_T); 131 | 132 | task.run(); 133 | let w = waker(); 134 | w.wake_by_ref(); 135 | let task = chan.recv().unwrap(); 136 | 137 | crossbeam::scope(|scope| { 138 | scope.spawn(|_| { 139 | task.run(); 140 | assert_eq!(POLL.load(), 2); 141 | assert_eq!(SCHEDULE.load(), 2); 142 | assert_eq!(DROP_F.load(), 0); 143 | assert_eq!(DROP_S.load(), 0); 144 | assert_eq!(DROP_T.load(), 0); 145 | assert_eq!(chan.len(), 1); 146 | }); 147 | 148 | thread::sleep(ms(200)); 149 | 150 | w.wake_by_ref(); 151 | assert_eq!(POLL.load(), 2); 152 | assert_eq!(SCHEDULE.load(), 1); 153 | assert_eq!(DROP_F.load(), 0); 154 | assert_eq!(DROP_S.load(), 0); 155 | assert_eq!(DROP_T.load(), 0); 156 | assert_eq!(chan.len(), 0); 157 | 158 | thread::sleep(ms(400)); 159 | 160 | assert_eq!(POLL.load(), 2); 161 | assert_eq!(SCHEDULE.load(), 2); 162 | assert_eq!(DROP_F.load(), 0); 163 | assert_eq!(DROP_S.load(), 0); 164 | assert_eq!(DROP_T.load(), 0); 165 | assert_eq!(chan.len(), 1); 166 | }) 167 | .unwrap(); 168 | 169 | chan.recv().unwrap(); 170 | drop(waker()); 171 | } 172 | 173 | #[test] 174 | fn cancel_during_run() { 175 | future!(f, waker, POLL, DROP_F); 176 | schedule!(s, chan, SCHEDULE, DROP_S); 177 | task!(task, handle, f, s, DROP_T); 178 | 179 | task.run(); 180 | let w = waker(); 181 | w.wake(); 182 | let task = chan.recv().unwrap(); 183 | 184 | crossbeam::scope(|scope| { 185 | scope.spawn(|_| { 186 | task.run(); 187 | drop(waker()); 188 | assert_eq!(POLL.load(), 2); 189 | assert_eq!(SCHEDULE.load(), 1); 190 | assert_eq!(DROP_F.load(), 1); 191 | assert_eq!(DROP_S.load(), 1); 192 | assert_eq!(DROP_T.load(), 1); 193 | assert_eq!(chan.len(), 0); 194 | }); 195 | 196 | thread::sleep(ms(200)); 197 | 198 | handle.cancel(); 199 | assert_eq!(POLL.load(), 2); 200 | assert_eq!(SCHEDULE.load(), 1); 201 | assert_eq!(DROP_F.load(), 0); 202 | assert_eq!(DROP_S.load(), 0); 203 | assert_eq!(DROP_T.load(), 0); 204 | assert_eq!(chan.len(), 0); 205 | 206 | drop(handle); 207 | assert_eq!(POLL.load(), 2); 208 | assert_eq!(SCHEDULE.load(), 1); 209 | assert_eq!(DROP_F.load(), 0); 210 | assert_eq!(DROP_S.load(), 0); 211 | assert_eq!(DROP_T.load(), 0); 212 | assert_eq!(chan.len(), 0); 213 | 214 | thread::sleep(ms(400)); 215 | 216 | assert_eq!(POLL.load(), 2); 217 | assert_eq!(SCHEDULE.load(), 1); 218 | assert_eq!(DROP_F.load(), 1); 219 | assert_eq!(DROP_S.load(), 1); 220 | assert_eq!(DROP_T.load(), 1); 221 | assert_eq!(chan.len(), 0); 222 | }) 223 | .unwrap(); 224 | } 225 | 226 | #[test] 227 | fn wake_and_cancel_during_run() { 228 | future!(f, waker, POLL, DROP_F); 229 | schedule!(s, chan, SCHEDULE, DROP_S); 230 | task!(task, handle, f, s, DROP_T); 231 | 232 | task.run(); 233 | let w = waker(); 234 | w.wake_by_ref(); 235 | let task = chan.recv().unwrap(); 236 | 237 | crossbeam::scope(|scope| { 238 | scope.spawn(|_| { 239 | task.run(); 240 | drop(waker()); 241 | assert_eq!(POLL.load(), 2); 242 | assert_eq!(SCHEDULE.load(), 1); 243 | assert_eq!(DROP_F.load(), 1); 244 | assert_eq!(DROP_S.load(), 1); 245 | assert_eq!(DROP_T.load(), 1); 246 | assert_eq!(chan.len(), 0); 247 | }); 248 | 249 | thread::sleep(ms(200)); 250 | 251 | w.wake(); 252 | assert_eq!(POLL.load(), 2); 253 | assert_eq!(SCHEDULE.load(), 1); 254 | assert_eq!(DROP_F.load(), 0); 255 | assert_eq!(DROP_S.load(), 0); 256 | assert_eq!(DROP_T.load(), 0); 257 | assert_eq!(chan.len(), 0); 258 | 259 | handle.cancel(); 260 | assert_eq!(POLL.load(), 2); 261 | assert_eq!(SCHEDULE.load(), 1); 262 | assert_eq!(DROP_F.load(), 0); 263 | assert_eq!(DROP_S.load(), 0); 264 | assert_eq!(DROP_T.load(), 0); 265 | assert_eq!(chan.len(), 0); 266 | 267 | drop(handle); 268 | assert_eq!(POLL.load(), 2); 269 | assert_eq!(SCHEDULE.load(), 1); 270 | assert_eq!(DROP_F.load(), 0); 271 | assert_eq!(DROP_S.load(), 0); 272 | assert_eq!(DROP_T.load(), 0); 273 | assert_eq!(chan.len(), 0); 274 | 275 | thread::sleep(ms(400)); 276 | 277 | assert_eq!(POLL.load(), 2); 278 | assert_eq!(SCHEDULE.load(), 1); 279 | assert_eq!(DROP_F.load(), 1); 280 | assert_eq!(DROP_S.load(), 1); 281 | assert_eq!(DROP_T.load(), 1); 282 | assert_eq!(chan.len(), 0); 283 | }) 284 | .unwrap(); 285 | } 286 | 287 | #[test] 288 | fn cancel_and_wake_during_run() { 289 | future!(f, waker, POLL, DROP_F); 290 | schedule!(s, chan, SCHEDULE, DROP_S); 291 | task!(task, handle, f, s, DROP_T); 292 | 293 | task.run(); 294 | let w = waker(); 295 | w.wake_by_ref(); 296 | let task = chan.recv().unwrap(); 297 | 298 | crossbeam::scope(|scope| { 299 | scope.spawn(|_| { 300 | task.run(); 301 | drop(waker()); 302 | assert_eq!(POLL.load(), 2); 303 | assert_eq!(SCHEDULE.load(), 1); 304 | assert_eq!(DROP_F.load(), 1); 305 | assert_eq!(DROP_S.load(), 1); 306 | assert_eq!(DROP_T.load(), 1); 307 | assert_eq!(chan.len(), 0); 308 | }); 309 | 310 | thread::sleep(ms(200)); 311 | 312 | handle.cancel(); 313 | assert_eq!(POLL.load(), 2); 314 | assert_eq!(SCHEDULE.load(), 1); 315 | assert_eq!(DROP_F.load(), 0); 316 | assert_eq!(DROP_S.load(), 0); 317 | assert_eq!(DROP_T.load(), 0); 318 | assert_eq!(chan.len(), 0); 319 | 320 | drop(handle); 321 | assert_eq!(POLL.load(), 2); 322 | assert_eq!(SCHEDULE.load(), 1); 323 | assert_eq!(DROP_F.load(), 0); 324 | assert_eq!(DROP_S.load(), 0); 325 | assert_eq!(DROP_T.load(), 0); 326 | assert_eq!(chan.len(), 0); 327 | 328 | w.wake(); 329 | assert_eq!(POLL.load(), 2); 330 | assert_eq!(SCHEDULE.load(), 1); 331 | assert_eq!(DROP_F.load(), 0); 332 | assert_eq!(DROP_S.load(), 0); 333 | assert_eq!(DROP_T.load(), 0); 334 | assert_eq!(chan.len(), 0); 335 | 336 | thread::sleep(ms(400)); 337 | 338 | assert_eq!(POLL.load(), 2); 339 | assert_eq!(SCHEDULE.load(), 1); 340 | assert_eq!(DROP_F.load(), 1); 341 | assert_eq!(DROP_S.load(), 1); 342 | assert_eq!(DROP_T.load(), 1); 343 | assert_eq!(chan.len(), 0); 344 | }) 345 | .unwrap(); 346 | } 347 | 348 | #[test] 349 | fn drop_last_waker() { 350 | future!(f, waker, POLL, DROP_F); 351 | schedule!(s, chan, SCHEDULE, DROP_S); 352 | task!(task, handle, f, s, DROP_T); 353 | 354 | task.run(); 355 | let w = waker(); 356 | 357 | drop(handle); 358 | assert_eq!(POLL.load(), 1); 359 | assert_eq!(SCHEDULE.load(), 0); 360 | assert_eq!(DROP_F.load(), 0); 361 | assert_eq!(DROP_S.load(), 0); 362 | assert_eq!(DROP_T.load(), 0); 363 | assert_eq!(chan.len(), 0); 364 | 365 | drop(w); 366 | assert_eq!(POLL.load(), 1); 367 | assert_eq!(SCHEDULE.load(), 1); 368 | assert_eq!(DROP_F.load(), 0); 369 | assert_eq!(DROP_S.load(), 0); 370 | assert_eq!(DROP_T.load(), 0); 371 | assert_eq!(chan.len(), 1); 372 | 373 | chan.recv().unwrap().run(); 374 | assert_eq!(POLL.load(), 1); 375 | assert_eq!(SCHEDULE.load(), 1); 376 | assert_eq!(DROP_F.load(), 1); 377 | assert_eq!(DROP_S.load(), 1); 378 | assert_eq!(DROP_T.load(), 1); 379 | assert_eq!(chan.len(), 0); 380 | } 381 | 382 | #[test] 383 | fn cancel_last_handle() { 384 | future!(f, waker, POLL, DROP_F); 385 | schedule!(s, chan, SCHEDULE, DROP_S); 386 | task!(task, handle, f, s, DROP_T); 387 | 388 | task.run(); 389 | drop(waker()); 390 | assert_eq!(POLL.load(), 1); 391 | assert_eq!(SCHEDULE.load(), 0); 392 | assert_eq!(DROP_F.load(), 0); 393 | assert_eq!(DROP_S.load(), 0); 394 | assert_eq!(DROP_T.load(), 0); 395 | assert_eq!(chan.len(), 0); 396 | 397 | handle.cancel(); 398 | assert_eq!(POLL.load(), 1); 399 | assert_eq!(SCHEDULE.load(), 1); 400 | assert_eq!(DROP_F.load(), 0); 401 | assert_eq!(DROP_S.load(), 0); 402 | assert_eq!(DROP_T.load(), 0); 403 | assert_eq!(chan.len(), 1); 404 | 405 | chan.recv().unwrap().run(); 406 | assert_eq!(POLL.load(), 1); 407 | assert_eq!(SCHEDULE.load(), 1); 408 | assert_eq!(DROP_F.load(), 1); 409 | assert_eq!(DROP_S.load(), 0); 410 | assert_eq!(DROP_T.load(), 0); 411 | assert_eq!(chan.len(), 0); 412 | 413 | drop(handle); 414 | assert_eq!(POLL.load(), 1); 415 | assert_eq!(SCHEDULE.load(), 1); 416 | assert_eq!(DROP_F.load(), 1); 417 | assert_eq!(DROP_S.load(), 1); 418 | assert_eq!(DROP_T.load(), 1); 419 | assert_eq!(chan.len(), 0); 420 | } 421 | 422 | #[test] 423 | fn drop_last_handle() { 424 | future!(f, waker, POLL, DROP_F); 425 | schedule!(s, chan, SCHEDULE, DROP_S); 426 | task!(task, handle, f, s, DROP_T); 427 | 428 | task.run(); 429 | drop(waker()); 430 | assert_eq!(POLL.load(), 1); 431 | assert_eq!(SCHEDULE.load(), 0); 432 | assert_eq!(DROP_F.load(), 0); 433 | assert_eq!(DROP_S.load(), 0); 434 | assert_eq!(DROP_T.load(), 0); 435 | assert_eq!(chan.len(), 0); 436 | 437 | drop(handle); 438 | assert_eq!(POLL.load(), 1); 439 | assert_eq!(SCHEDULE.load(), 1); 440 | assert_eq!(DROP_F.load(), 0); 441 | assert_eq!(DROP_S.load(), 0); 442 | assert_eq!(DROP_T.load(), 0); 443 | assert_eq!(chan.len(), 1); 444 | 445 | chan.recv().unwrap().run(); 446 | assert_eq!(POLL.load(), 1); 447 | assert_eq!(SCHEDULE.load(), 1); 448 | assert_eq!(DROP_F.load(), 1); 449 | assert_eq!(DROP_S.load(), 1); 450 | assert_eq!(DROP_T.load(), 1); 451 | assert_eq!(chan.len(), 0); 452 | } 453 | -------------------------------------------------------------------------------- /tests/waker_ready.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::future::Future; 3 | use std::pin::Pin; 4 | use std::task::Waker; 5 | use std::task::{Context, Poll}; 6 | use std::thread; 7 | use std::time::Duration; 8 | 9 | use async_task::Task; 10 | use crossbeam::atomic::AtomicCell; 11 | use crossbeam::channel; 12 | use lazy_static::lazy_static; 13 | 14 | // Creates a future with event counters. 15 | // 16 | // Usage: `future!(f, waker, POLL, DROP)` 17 | // 18 | // The future `f` always sleeps for 200 ms, and returns `Poll::Ready` the second time it is polled. 19 | // When it gets polled, `POLL` is incremented. 20 | // When it gets dropped, `DROP` is incremented. 21 | // 22 | // Every time the future is run, it stores the waker into a global variable. 23 | // This waker can be extracted using the `waker` function. 24 | macro_rules! future { 25 | ($name:pat, $waker:pat, $poll:ident, $drop:ident) => { 26 | lazy_static! { 27 | static ref $poll: AtomicCell = AtomicCell::new(0); 28 | static ref $drop: AtomicCell = AtomicCell::new(0); 29 | static ref WAKER: AtomicCell> = AtomicCell::new(None); 30 | } 31 | 32 | let ($name, $waker) = { 33 | struct Fut(Cell, Box); 34 | 35 | impl Future for Fut { 36 | type Output = Box; 37 | 38 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 39 | WAKER.store(Some(cx.waker().clone())); 40 | $poll.fetch_add(1); 41 | thread::sleep(ms(200)); 42 | 43 | if self.0.get() { 44 | Poll::Ready(Box::new(0)) 45 | } else { 46 | self.0.set(true); 47 | Poll::Pending 48 | } 49 | } 50 | } 51 | 52 | impl Drop for Fut { 53 | fn drop(&mut self) { 54 | $drop.fetch_add(1); 55 | } 56 | } 57 | 58 | (Fut(Cell::new(false), Box::new(0)), || { 59 | WAKER.swap(None).unwrap() 60 | }) 61 | }; 62 | }; 63 | } 64 | 65 | // Creates a schedule function with event counters. 66 | // 67 | // Usage: `schedule!(s, chan, SCHED, DROP)` 68 | // 69 | // The schedule function `s` pushes the task into `chan`. 70 | // When it gets invoked, `SCHED` is incremented. 71 | // When it gets dropped, `DROP` is incremented. 72 | // 73 | // Receiver `chan` extracts the task when it is scheduled. 74 | macro_rules! schedule { 75 | ($name:pat, $chan:pat, $sched:ident, $drop:ident) => { 76 | lazy_static! { 77 | static ref $sched: AtomicCell = AtomicCell::new(0); 78 | static ref $drop: AtomicCell = AtomicCell::new(0); 79 | } 80 | 81 | let ($name, $chan) = { 82 | let (s, r) = channel::unbounded(); 83 | 84 | struct Guard(Box); 85 | 86 | impl Drop for Guard { 87 | fn drop(&mut self) { 88 | $drop.fetch_add(1); 89 | } 90 | } 91 | 92 | let guard = Guard(Box::new(0)); 93 | let sched = move |task: Task<_>| { 94 | &guard; 95 | $sched.fetch_add(1); 96 | s.send(task).unwrap(); 97 | }; 98 | 99 | (sched, r) 100 | }; 101 | }; 102 | } 103 | 104 | // Creates a task with event counters. 105 | // 106 | // Usage: `task!(task, handle f, s, DROP)` 107 | // 108 | // A task with future `f` and schedule function `s` is created. 109 | // The `Task` and `JoinHandle` are bound to `task` and `handle`, respectively. 110 | // When the tag inside the task gets dropped, `DROP` is incremented. 111 | macro_rules! task { 112 | ($task:pat, $handle: pat, $future:expr, $schedule:expr, $drop:ident) => { 113 | lazy_static! { 114 | static ref $drop: AtomicCell = AtomicCell::new(0); 115 | } 116 | 117 | let ($task, $handle) = { 118 | struct Tag(Box); 119 | 120 | impl Drop for Tag { 121 | fn drop(&mut self) { 122 | $drop.fetch_add(1); 123 | } 124 | } 125 | 126 | async_task::spawn($future, $schedule, Tag(Box::new(0))) 127 | }; 128 | }; 129 | } 130 | 131 | fn ms(ms: u64) -> Duration { 132 | Duration::from_millis(ms) 133 | } 134 | 135 | #[test] 136 | fn wake() { 137 | future!(f, waker, POLL, DROP_F); 138 | schedule!(s, chan, SCHEDULE, DROP_S); 139 | task!(mut task, _, f, s, DROP_T); 140 | 141 | assert!(chan.is_empty()); 142 | 143 | task.run(); 144 | assert_eq!(POLL.load(), 1); 145 | assert_eq!(SCHEDULE.load(), 0); 146 | assert_eq!(DROP_F.load(), 0); 147 | assert_eq!(DROP_S.load(), 0); 148 | assert_eq!(DROP_T.load(), 0); 149 | assert_eq!(chan.len(), 0); 150 | 151 | waker().wake(); 152 | task = chan.recv().unwrap(); 153 | assert_eq!(POLL.load(), 1); 154 | assert_eq!(SCHEDULE.load(), 1); 155 | assert_eq!(DROP_F.load(), 0); 156 | assert_eq!(DROP_S.load(), 0); 157 | assert_eq!(DROP_T.load(), 0); 158 | assert_eq!(chan.len(), 0); 159 | 160 | task.run(); 161 | assert_eq!(POLL.load(), 2); 162 | assert_eq!(SCHEDULE.load(), 1); 163 | assert_eq!(DROP_F.load(), 1); 164 | assert_eq!(DROP_S.load(), 0); 165 | assert_eq!(DROP_T.load(), 0); 166 | assert_eq!(chan.len(), 0); 167 | 168 | waker().wake(); 169 | assert_eq!(POLL.load(), 2); 170 | assert_eq!(SCHEDULE.load(), 1); 171 | assert_eq!(DROP_F.load(), 1); 172 | assert_eq!(DROP_S.load(), 1); 173 | assert_eq!(DROP_T.load(), 1); 174 | assert_eq!(chan.len(), 0); 175 | } 176 | 177 | #[test] 178 | fn wake_by_ref() { 179 | future!(f, waker, POLL, DROP_F); 180 | schedule!(s, chan, SCHEDULE, DROP_S); 181 | task!(mut task, _, f, s, DROP_T); 182 | 183 | assert!(chan.is_empty()); 184 | 185 | task.run(); 186 | assert_eq!(POLL.load(), 1); 187 | assert_eq!(SCHEDULE.load(), 0); 188 | assert_eq!(DROP_F.load(), 0); 189 | assert_eq!(DROP_S.load(), 0); 190 | assert_eq!(DROP_T.load(), 0); 191 | assert_eq!(chan.len(), 0); 192 | 193 | waker().wake_by_ref(); 194 | task = chan.recv().unwrap(); 195 | assert_eq!(POLL.load(), 1); 196 | assert_eq!(SCHEDULE.load(), 1); 197 | assert_eq!(DROP_F.load(), 0); 198 | assert_eq!(DROP_S.load(), 0); 199 | assert_eq!(DROP_T.load(), 0); 200 | assert_eq!(chan.len(), 0); 201 | 202 | task.run(); 203 | assert_eq!(POLL.load(), 2); 204 | assert_eq!(SCHEDULE.load(), 1); 205 | assert_eq!(DROP_F.load(), 1); 206 | assert_eq!(DROP_S.load(), 0); 207 | assert_eq!(DROP_T.load(), 0); 208 | assert_eq!(chan.len(), 0); 209 | 210 | waker().wake_by_ref(); 211 | assert_eq!(POLL.load(), 2); 212 | assert_eq!(SCHEDULE.load(), 1); 213 | assert_eq!(DROP_F.load(), 1); 214 | assert_eq!(DROP_S.load(), 1); 215 | assert_eq!(DROP_T.load(), 1); 216 | assert_eq!(chan.len(), 0); 217 | } 218 | 219 | #[test] 220 | fn clone() { 221 | future!(f, waker, POLL, DROP_F); 222 | schedule!(s, chan, SCHEDULE, DROP_S); 223 | task!(mut task, _, f, s, DROP_T); 224 | 225 | task.run(); 226 | assert_eq!(POLL.load(), 1); 227 | assert_eq!(SCHEDULE.load(), 0); 228 | assert_eq!(DROP_F.load(), 0); 229 | assert_eq!(DROP_S.load(), 0); 230 | assert_eq!(DROP_T.load(), 0); 231 | assert_eq!(chan.len(), 0); 232 | 233 | let w2 = waker().clone(); 234 | let w3 = w2.clone(); 235 | let w4 = w3.clone(); 236 | w4.wake(); 237 | 238 | task = chan.recv().unwrap(); 239 | task.run(); 240 | assert_eq!(POLL.load(), 2); 241 | assert_eq!(SCHEDULE.load(), 1); 242 | assert_eq!(DROP_F.load(), 1); 243 | assert_eq!(DROP_S.load(), 0); 244 | assert_eq!(DROP_T.load(), 0); 245 | assert_eq!(chan.len(), 0); 246 | 247 | w3.wake(); 248 | assert_eq!(POLL.load(), 2); 249 | assert_eq!(SCHEDULE.load(), 1); 250 | assert_eq!(DROP_F.load(), 1); 251 | assert_eq!(DROP_S.load(), 0); 252 | assert_eq!(DROP_T.load(), 0); 253 | assert_eq!(chan.len(), 0); 254 | 255 | drop(w2); 256 | drop(waker()); 257 | assert_eq!(DROP_S.load(), 1); 258 | assert_eq!(DROP_T.load(), 1); 259 | } 260 | 261 | #[test] 262 | fn wake_cancelled() { 263 | future!(f, waker, POLL, DROP_F); 264 | schedule!(s, chan, SCHEDULE, DROP_S); 265 | task!(task, _, f, s, DROP_T); 266 | 267 | task.run(); 268 | assert_eq!(POLL.load(), 1); 269 | assert_eq!(SCHEDULE.load(), 0); 270 | assert_eq!(DROP_F.load(), 0); 271 | assert_eq!(DROP_S.load(), 0); 272 | assert_eq!(DROP_T.load(), 0); 273 | assert_eq!(chan.len(), 0); 274 | 275 | let w = waker(); 276 | 277 | w.wake_by_ref(); 278 | chan.recv().unwrap().cancel(); 279 | assert_eq!(POLL.load(), 1); 280 | assert_eq!(SCHEDULE.load(), 1); 281 | assert_eq!(DROP_F.load(), 1); 282 | assert_eq!(DROP_S.load(), 0); 283 | assert_eq!(DROP_T.load(), 0); 284 | assert_eq!(chan.len(), 0); 285 | 286 | w.wake(); 287 | assert_eq!(POLL.load(), 1); 288 | assert_eq!(SCHEDULE.load(), 1); 289 | assert_eq!(DROP_F.load(), 1); 290 | assert_eq!(DROP_S.load(), 1); 291 | assert_eq!(DROP_T.load(), 1); 292 | assert_eq!(chan.len(), 0); 293 | } 294 | 295 | #[test] 296 | fn wake_completed() { 297 | future!(f, waker, POLL, DROP_F); 298 | schedule!(s, chan, SCHEDULE, DROP_S); 299 | task!(task, _, f, s, DROP_T); 300 | 301 | task.run(); 302 | let w = waker(); 303 | assert_eq!(POLL.load(), 1); 304 | assert_eq!(SCHEDULE.load(), 0); 305 | assert_eq!(DROP_F.load(), 0); 306 | assert_eq!(DROP_S.load(), 0); 307 | assert_eq!(DROP_T.load(), 0); 308 | assert_eq!(chan.len(), 0); 309 | 310 | w.wake(); 311 | chan.recv().unwrap().run(); 312 | assert_eq!(POLL.load(), 2); 313 | assert_eq!(SCHEDULE.load(), 1); 314 | assert_eq!(DROP_F.load(), 1); 315 | assert_eq!(DROP_S.load(), 0); 316 | assert_eq!(DROP_T.load(), 0); 317 | assert_eq!(chan.len(), 0); 318 | 319 | waker().wake(); 320 | assert_eq!(POLL.load(), 2); 321 | assert_eq!(SCHEDULE.load(), 1); 322 | assert_eq!(DROP_F.load(), 1); 323 | assert_eq!(DROP_S.load(), 1); 324 | assert_eq!(DROP_T.load(), 1); 325 | assert_eq!(chan.len(), 0); 326 | } 327 | --------------------------------------------------------------------------------