├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── rustfmt.toml └── src └── lib.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-once-cell" 3 | version = "0.5.4" 4 | authors = ["Daniel De Graaf "] 5 | license = "MIT OR Apache-2.0" 6 | edition = "2018" 7 | 8 | description = "Async single assignment cells and lazy values." 9 | readme = "README.md" 10 | documentation = "https://docs.rs/async_once_cell" 11 | 12 | repository = "https://github.com/danieldg/async-once-cell" 13 | keywords = ["lazy", "static", "async"] 14 | categories = ["rust-patterns", "memory-management"] 15 | 16 | [features] 17 | critical-section = ['dep:critical-section'] 18 | std = [] 19 | 20 | [dependencies] 21 | critical-section = { version = "1", optional = true } 22 | 23 | [package.metadata.docs.rs] 24 | all-features = true 25 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023 Daniel De Graaf 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Daniel De Graaf 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Crates.io](https://img.shields.io/crates/v/async_once_cell.svg)](https://crates.io/crates/async-once-cell) 2 | [![API reference](https://docs.rs/async-once-cell/badge.svg)](https://docs.rs/async-once-cell/) 3 | 4 | # Overview 5 | 6 | `async_once_cell` is a version of [once_cell](https://crates.io/crates/once_cell) 7 | that adds support for async initialization of cells. The short version of the 8 | API is: 9 | 10 | ```rust 11 | impl OnceCell { 12 | fn new() -> OnceCell; 13 | fn get(&self) -> Option<&T>; 14 | async fn get_or_init(&self, init: impl Future) -> &T; 15 | } 16 | ``` 17 | 18 | More patterns and use-cases are in the [docs](https://docs.rs/async-once-cell/)! 19 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | use_small_heuristics = "Max" 2 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A collection of lazy initialized values that are created by `Future`s. 2 | //! 3 | //! [OnceCell]'s API is similar to the [`once_cell`](https://crates.io/crates/once_cell) crate, 4 | //! [`std::cell::OnceCell`], or [`std::sync::OnceLock`]. It provides an async version of a cell 5 | //! that can only be initialized once, permitting tasks to wait on the initialization if it is 6 | //! already running instead of racing multiple initialization tasks. 7 | //! 8 | //! Unlike threads, tasks can be cancelled at any point where they block. [OnceCell] deals with 9 | //! this by allowing another initializer to run if the task currently initializing the cell is 10 | //! dropped. This also allows for fallible initialization using [OnceCell::get_or_try_init] and 11 | //! for the initializing `Future` to contain borrows or use references to thread-local data. 12 | //! 13 | //! [Lazy] takes the opposite approach: it wraps a single `Future` which is cooperatively run to 14 | //! completion by any polling task. This requires that the initialization function be independent 15 | //! of the calling context, but will never restart an initializing function just because the 16 | //! surrounding task was cancelled. Using a trait object (`Pin>`) for the future 17 | //! may simplify using this type in data structures. 18 | //! 19 | //! # Overhead 20 | //! 21 | //! Both cells use two `usize`s to store state and do not retain any allocations after 22 | //! initialization is complete. Allocations are only required if there is contention. 23 | //! 24 | //! Accessing an already-initialized cell is as cheap as possible: only one atomic load with 25 | //! Acquire ordering. 26 | //! 27 | //! # Features 28 | //! 29 | //! ## The `critical-section` feature 30 | //! 31 | //! If this feature is enabled, the [`critical-section`](https://crates.io/crates/critical-section) 32 | //! crate is used instead of an `std` mutex. You must depend on that crate and select a locking 33 | //! implementation; see [its documentation](https://docs.rs/critical-section/) for details. 34 | //! 35 | //! ## The `std` feature 36 | //! 37 | //! This is currently a no-op, but might in the future be used to expose APIs that depends on 38 | //! types only in `std`. It does *not* control the locking implementation. 39 | 40 | // How it works: 41 | // 42 | // The basic design goal of async_once_cell is to make the simpler, more common cases as fast and 43 | // efficient as possible while reverting to a reasonably performant implementation when that's not 44 | // possible. 45 | // 46 | // The fastest path is "access an already-initialized cell": this takes one atomic load with 47 | // acquire ordering, and doing it with any less is not possible without extreme, platform-specific 48 | // mechanisms (for example, the membarrier system call on Linux) which would make filling the cell 49 | // significantly more expensive. 50 | // 51 | // The fast path for filling a cell is when there is no contention. The types in this crate will 52 | // not allocate in this scenario, which proceeds according to this summary: 53 | // 54 | // 1. A single task runs get_or_try_init, which calls Inner::initialize(true) 55 | // 2. Inner::state transitions from NEW to QINIT_BIT, and a QuickInitGuard is returned 56 | // 3. The init future is run and completes successfully (possibly after yielding) 57 | // 4. The value is written to the UnsafeCell 58 | // 5. Inner::state transitions from QINIT_BIT to READY_BIT during QuickInitGuard's Drop 59 | // 60 | // If the init future fails (due to returning an error or a panic), then: 61 | // 4. The UnsafeCell remains uninitialized 62 | // 5. Inner::state transitions from QINIT_BIT to NEW during QuickInitGuard's Drop 63 | // 64 | // The fast path does not use Inner::queue at all, and only needs to check it once the cell 65 | // transitions to the ready state (in order to handle the edge case where a queue was created but 66 | // was not actually needed). 67 | // 68 | // Slow path: 69 | // 70 | // If a second task attempts to start initialization, it will not succeed in transitioning 71 | // Inner::state from NEW to QINIT_BIT. Instead, it will create a Queue on the heap, storing it in 72 | // Inner::queue and creating a QueueRef pointing at it. This Queue will hold the Wakers for all 73 | // tasks that attempt to perform initialization. When a QuickInitGuard or QueueHead is dropped, 74 | // all tasks are woken and will either proceed directly to obtaining a reference (if initialization 75 | // was successful) or race to create a new QueueHead, with losers re-queuing in a new Waker list. 76 | // 77 | // Once a Queue has been created for an Inner, it remains valid as long as either a reference 78 | // exists (as determined by the reference count in Inner::state) or the state is not ready. A 79 | // QueueRef represents one reference to the Queue (similar to how Arc would act). 80 | // 81 | // The wake-up behavior used here is optimized for the common case where an initialization function 82 | // succeeds and a mass wake-up results in all woken tasks able to proceed with returning a 83 | // reference to the just-stored value. If initialization fails, it would in theory be possible to 84 | // only wake one of the pending tasks, since only one task will be able to make useful progress by 85 | // becoming the new QueueHead. However, to avoid a lost wakeup, this would require tracking wakers 86 | // and removing them when a QueueRef is dropped. The extra overhead required to maintain the list 87 | // of wakers is not worth the extra complexity and locking in the common case where the QueueRef 88 | // was dropped due to a successful initialization. 89 | 90 | #![cfg_attr(feature = "critical-section", no_std)] 91 | extern crate alloc; 92 | 93 | #[cfg(any(not(feature = "critical-section"), feature = "std"))] 94 | extern crate std; 95 | 96 | use alloc::{boxed::Box, vec, vec::Vec}; 97 | 98 | use core::{ 99 | cell::UnsafeCell, 100 | convert::Infallible, 101 | fmt, 102 | future::{Future, IntoFuture}, 103 | marker::{PhantomData, PhantomPinned}, 104 | mem::{self, ManuallyDrop, MaybeUninit}, 105 | panic::{RefUnwindSafe, UnwindSafe}, 106 | pin::{pin, Pin}, 107 | ptr, 108 | sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, 109 | task, 110 | }; 111 | 112 | #[cfg(feature = "critical-section")] 113 | struct Mutex { 114 | data: UnsafeCell, 115 | locked: core::sync::atomic::AtomicBool, 116 | } 117 | 118 | #[cfg(feature = "critical-section")] 119 | impl Mutex { 120 | const fn new(data: T) -> Self { 121 | Mutex { data: UnsafeCell::new(data), locked: core::sync::atomic::AtomicBool::new(false) } 122 | } 123 | } 124 | 125 | #[cfg(not(feature = "critical-section"))] 126 | use std::sync::Mutex; 127 | 128 | #[cfg(feature = "critical-section")] 129 | fn with_lock(mutex: &Mutex, f: impl FnOnce(&mut T) -> R) -> R { 130 | struct Guard<'a, T>(&'a Mutex); 131 | impl<'a, T> Drop for Guard<'a, T> { 132 | fn drop(&mut self) { 133 | self.0.locked.store(false, Ordering::Relaxed); 134 | } 135 | } 136 | critical_section::with(|_| { 137 | if mutex.locked.swap(true, Ordering::Relaxed) { 138 | // Note: this can in theory happen if the delegated Clone impl on a Waker provided in 139 | // an initialization context turns around and tries to initialize the same cell. This 140 | // is an absurd thing to do, but it's safe so we can't assume nobody will ever do it. 141 | panic!("Attempted reentrant locking"); 142 | } 143 | let guard = Guard(mutex); 144 | // Safety: we just checked that we were the one to set `locked` to true, and the data in 145 | // this Mutex will only be accessed while the lock is true. We use Relaxed memory ordering 146 | // instead of Acquire/Release because critical_section::with itself must provide an 147 | // Acquire/Release barrier around its closure, and also guarantees that there will not be 148 | // more than one such closure executing at a time. 149 | let rv = unsafe { f(&mut *mutex.data.get()) }; 150 | drop(guard); 151 | rv 152 | }) 153 | } 154 | 155 | #[cfg(not(feature = "critical-section"))] 156 | fn with_lock(mutex: &Mutex, f: impl FnOnce(&mut T) -> R) -> R { 157 | f(&mut *mutex.lock().unwrap()) 158 | } 159 | 160 | /// A cell which can be written to only once. 161 | /// 162 | /// This allows initialization using an async closure that borrows from its environment. 163 | /// 164 | /// ``` 165 | /// use std::rc::Rc; 166 | /// use std::sync::Arc; 167 | /// use async_once_cell::OnceCell; 168 | /// 169 | /// # async fn run() { 170 | /// let non_send_value = Rc::new(4); 171 | /// let shared = Arc::new(OnceCell::new()); 172 | /// 173 | /// let value : &i32 = shared.get_or_init(async { 174 | /// *non_send_value 175 | /// }).await; 176 | /// assert_eq!(value, &4); 177 | /// 178 | /// // A second init is not called 179 | /// let second = shared.get_or_init(async { 180 | /// unreachable!() 181 | /// }).await; 182 | /// assert_eq!(second, &4); 183 | /// 184 | /// # } 185 | /// # use std::future::Future; 186 | /// # struct NeverWake; 187 | /// # impl std::task::Wake for NeverWake { 188 | /// # fn wake(self: Arc) {} 189 | /// # } 190 | /// # let w = Arc::new(NeverWake).into(); 191 | /// # let mut cx = std::task::Context::from_waker(&w); 192 | /// # assert!(std::pin::pin!(run()).poll(&mut cx).is_ready()); 193 | /// ``` 194 | pub struct OnceCell { 195 | value: UnsafeCell>, 196 | inner: Inner, 197 | _marker: PhantomData, 198 | } 199 | 200 | // Safety: our UnsafeCell should be treated like an RwLock 201 | unsafe impl Sync for OnceCell {} 202 | unsafe impl Send for OnceCell {} 203 | impl Unpin for OnceCell {} 204 | impl RefUnwindSafe for OnceCell {} 205 | impl UnwindSafe for OnceCell {} 206 | 207 | /// Monomorphic portion of the state of a OnceCell or Lazy. 208 | /// 209 | /// The top two bits of state are flags (READY_BIT and QINIT_BIT) that define the state of the 210 | /// cell. The rest of the bits count the number of QueueRef objects associated with this Inner. 211 | /// 212 | /// The queue pointer starts out as NULL. If contention is detected during the initialization of 213 | /// the object, it is initialized to a Box, and will remain pointing at that Queue until the 214 | /// state has changed to ready with zero active QueueRefs. 215 | struct Inner { 216 | state: AtomicUsize, 217 | queue: AtomicPtr, 218 | } 219 | 220 | impl fmt::Debug for Inner { 221 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 222 | let state = self.state.load(Ordering::Relaxed); 223 | let queue = self.queue.load(Ordering::Relaxed); 224 | fmt.debug_struct("Inner") 225 | .field("ready", &(state & READY_BIT != 0)) 226 | .field("quick_init", &(state & QINIT_BIT != 0)) 227 | .field("refcount", &(state & (QINIT_BIT - 1))) 228 | .field("queue", &queue) 229 | .finish() 230 | } 231 | } 232 | 233 | /// Transient state during initialization 234 | /// 235 | /// Unlike the sync OnceCell, this cannot be a linked list through stack frames, because Futures 236 | /// can be freed at any point by any thread. Instead, this structure is allocated on the heap 237 | /// during the first initialization call and freed after the value is set (or when the OnceCell is 238 | /// dropped, if the value never gets set). 239 | struct Queue { 240 | wakers: Mutex>>, 241 | } 242 | 243 | /// A reference to the Queue held inside an Inner. 244 | /// 245 | /// This is somewhat like Arc, the refcount is held in Inner instead of Queue so it can be 246 | /// freed once the cell's initialization is complete. 247 | /// 248 | /// Holding a QueueRef guarantees that either: 249 | /// - queue points to a valid Queue that will not be freed until this QueueRef is dropped 250 | /// - inner.state is ready 251 | /// 252 | /// The value of QueueRef::queue may be dangling or null if inner.state was ready at the time the 253 | /// value was loaded. The holder of a QueueRef must observe a non-ready state prior to using 254 | /// queue; because this is already done by all holders of QueueRef for other reasons, this second 255 | /// check is not included in Inner::initialize. 256 | /// 257 | /// The creation of a QueueRef performs an Acquire ordering operation on Inner::state; its Drop 258 | /// performs a Release on the same value. 259 | /// 260 | /// The value of QueueRef::queue may also become dangling during QueueRef's Drop impl even when the 261 | /// lifetime 'a is still valid, so a raw pointer is required for correctness. 262 | struct QueueRef<'a> { 263 | inner: &'a Inner, 264 | queue: *const Queue, 265 | } 266 | // Safety: the queue is a reference (only the lack of a valid lifetime requires it to be a pointer) 267 | unsafe impl<'a> Sync for QueueRef<'a> {} 268 | unsafe impl<'a> Send for QueueRef<'a> {} 269 | 270 | /// A write guard for an active initialization of the associated UnsafeCell 271 | /// 272 | /// This is created on the fast (no-allocation) path only. 273 | #[derive(Debug)] 274 | struct QuickInitGuard<'a> { 275 | inner: &'a Inner, 276 | ready: bool, 277 | } 278 | 279 | /// A Future that waits for acquisition of a QueueHead 280 | struct QueueWaiter<'a> { 281 | guard: Option>, 282 | } 283 | 284 | /// A write guard for the active initialization of the associated UnsafeCell 285 | /// 286 | /// Creation of a QueueHead must always be done with the Queue's Mutex held. If no QuickInitGuard 287 | /// exists, the task creating the QueueHead is the task that transitions the contents of the Mutex 288 | /// from None to Some; it must verify QINIT_BIT is unset with the lock held. 289 | /// 290 | /// Only QueueHead::drop may transition the contents of the Mutex from Some to None. 291 | /// 292 | /// Dropping this object will wake all tasks that have blocked on the currently-running 293 | /// initialization. 294 | struct QueueHead<'a> { 295 | guard: QueueRef<'a>, 296 | } 297 | 298 | const NEW: usize = 0x0; 299 | const QINIT_BIT: usize = 1 + (usize::MAX >> 2); 300 | const READY_BIT: usize = 1 + (usize::MAX >> 1); 301 | const EMPTY_STATE: usize = !0; 302 | 303 | impl Inner { 304 | const fn new() -> Self { 305 | Inner { state: AtomicUsize::new(NEW), queue: AtomicPtr::new(ptr::null_mut()) } 306 | } 307 | 308 | const fn new_ready() -> Self { 309 | Inner { state: AtomicUsize::new(READY_BIT), queue: AtomicPtr::new(ptr::null_mut()) } 310 | } 311 | 312 | /// Initialize the queue (if needed) and return a waiter that can be polled to get a QueueHead 313 | /// that gives permission to initialize the OnceCell. 314 | /// 315 | /// The Queue referenced in the returned QueueRef will not be freed until the cell is populated 316 | /// and all references have been dropped. If any references remain, further calls to 317 | /// initialize will return the existing queue. 318 | #[cold] 319 | fn initialize(&self, try_quick: bool) -> Result { 320 | if try_quick { 321 | if self 322 | .state 323 | .compare_exchange(NEW, QINIT_BIT, Ordering::Acquire, Ordering::Relaxed) 324 | .is_ok() 325 | { 326 | // On success, we know that there were no other QueueRef objects active, and we 327 | // just set QINIT_BIT which makes us the only party allowed to create a QueueHead. 328 | // This remains true even if the queue is created later. 329 | return Err(QuickInitGuard { inner: self, ready: false }); 330 | } 331 | } 332 | 333 | // Increment the queue's reference count. This ensures that queue won't be freed until we exit. 334 | let prev_state = self.state.fetch_add(1, Ordering::Acquire); 335 | 336 | // Note: unlike Arc, refcount overflow is impossible. The only way to increment the 337 | // refcount is by calling poll on the Future returned by get_or_try_init, which is !Unpin. 338 | // The poll call requires a Pinned pointer to this Future, and the contract of Pin requires 339 | // Drop to be called on any !Unpin value that was pinned before the memory is reused. 340 | // Because the Drop impl of QueueRef decrements the refcount, an overflow would require 341 | // more than (usize::MAX / 4) QueueRef objects in memory, which is impossible as these 342 | // objects take up more than 4 bytes. 343 | 344 | let mut guard = QueueRef { inner: self, queue: self.queue.load(Ordering::Acquire) }; 345 | 346 | if guard.queue.is_null() && prev_state & READY_BIT == 0 { 347 | let wakers = Mutex::new(None); 348 | 349 | // Race with other callers of initialize to create the queue 350 | let new_queue = Box::into_raw(Box::new(Queue { wakers })); 351 | 352 | match self.queue.compare_exchange( 353 | ptr::null_mut(), 354 | new_queue, 355 | Ordering::AcqRel, 356 | Ordering::Acquire, 357 | ) { 358 | Ok(_null) => { 359 | // Normal case: it was actually set. The Release part of AcqRel orders this 360 | // with all Acquires on the queue. 361 | guard.queue = new_queue; 362 | } 363 | Err(actual) => { 364 | // we lost the race, but we have the (non-null) value now. 365 | guard.queue = actual; 366 | // Safety: we just allocated it, and nobody else has seen it 367 | unsafe { 368 | drop(Box::from_raw(new_queue)); 369 | } 370 | } 371 | } 372 | } 373 | Ok(QueueWaiter { guard: Some(guard) }) 374 | } 375 | 376 | fn set_ready(&self) { 377 | // This Release pairs with the Acquire any time we check READY_BIT, and ensures that the 378 | // writes to the cell's value are visible to the cell's readers. 379 | let prev_state = self.state.fetch_or(READY_BIT, Ordering::Release); 380 | 381 | debug_assert_eq!(prev_state & READY_BIT, 0, "Invalid state: someone else set READY_BIT"); 382 | } 383 | } 384 | 385 | impl<'a> Drop for QueueRef<'a> { 386 | fn drop(&mut self) { 387 | // Release the reference to queue 388 | let prev_state = self.inner.state.fetch_sub(1, Ordering::Release); 389 | // Note: as of now, self.queue may be invalid 390 | 391 | let curr_state = prev_state - 1; 392 | if curr_state == READY_BIT || curr_state == READY_BIT | QINIT_BIT { 393 | // We just removed the only waiter on an initialized cell. This means the 394 | // queue is no longer needed. Acquire the queue again so we can free it. 395 | let queue = self.inner.queue.swap(ptr::null_mut(), Ordering::Acquire); 396 | if !queue.is_null() { 397 | // Safety: the last guard is being freed, and queue is only used by guard-holders. 398 | // Due to the swap, we are the only one who is freeing this particular queue. 399 | unsafe { 400 | drop(Box::from_raw(queue)); 401 | } 402 | } 403 | } 404 | } 405 | } 406 | 407 | impl<'a> Drop for QuickInitGuard<'a> { 408 | fn drop(&mut self) { 409 | // When our QuickInitGuard was created, Inner::state was changed to QINIT_BIT. If it is 410 | // either unchanged or has changed back to that value, we can finish on the fast path. 411 | let fast_target = if self.ready { READY_BIT } else { NEW }; 412 | if self 413 | .inner 414 | .state 415 | .compare_exchange(QINIT_BIT, fast_target, Ordering::Release, Ordering::Relaxed) 416 | .is_ok() 417 | { 418 | // Because the exchange succeeded, we know there are no active QueueRefs and so no 419 | // wakers need to be woken. If self.ready is true, the Release ordering pairs with 420 | // the Acquire on another thread's access to state to check READY_BIT. 421 | 422 | if self.ready { 423 | // It's possible (though unlikely) that someone created the queue but abandoned 424 | // their QueueRef before we finished our poll, resulting in us not observing 425 | // them. No wakes are needed in this case because there are no waiting tasks, 426 | // but we should still clean up the allocation. 427 | let queue = self.inner.queue.swap(ptr::null_mut(), Ordering::Relaxed); 428 | if !queue.is_null() { 429 | // Synchronize with both the fetch_sub that lowered the refcount and the 430 | // queue initialization. 431 | core::sync::atomic::fence(Ordering::Acquire); 432 | // Safety: we observed no active QueueRefs, and queue is only used by 433 | // guard-holders. Due to the swap, we are the only one who is freeing this 434 | // particular queue. 435 | unsafe { 436 | drop(Box::from_raw(queue)); 437 | } 438 | } 439 | } 440 | return; 441 | } 442 | 443 | // Slow path: get a guard, create the QueueHead we should have been holding, then drop it 444 | // so that the tasks are woken as intended. This is needed regardless of if we succeeded 445 | // or not - either waiters need to run init themselves, or they need to read the value we 446 | // set. 447 | // 448 | // The guard is guaranteed to have been created with no QueueHead available because 449 | // QINIT_BIT is still set. 450 | let waiter = self.inner.initialize(false).expect("Got a QuickInitGuard in slow init"); 451 | let guard = waiter.guard.expect("No guard available even without polling"); 452 | 453 | // Safety: the guard holds a place on the waiter list, and we know READY_BIT was not yet 454 | // set when Inner::initialize was called, so the queue must be present. It will remain 455 | // valid until guard is dropped. 456 | debug_assert!(!guard.queue.is_null(), "Queue must not be NULL when READY_BIT is not set"); 457 | let queue = unsafe { &*guard.queue }; 458 | 459 | with_lock(&queue.wakers, |lock| { 460 | // Creating a QueueHead requires that the Mutex contain Some. While this is likely 461 | // already true, it is not guaranteed because the first concurrent thread might have 462 | // been preempted before it was able to start its first QueueWaiter::poll call. Ensure 463 | // that nobody else can grab the QueueHead between when we release QINIT_BIT and when 464 | // our QueueHead is dropped. 465 | lock.get_or_insert_with(Vec::new); 466 | 467 | // We must clear QINIT_BIT, which will allow someone else to take the head position 468 | // once we drop it. 469 | // 470 | // If our initialization was successful, we also need to set READY_BIT. These 471 | // operations can be combined because we know the current state of both bits (only 472 | // QINIT_BIT is set) and because READY_BIT == 2 * QINIT_BIT. 473 | // 474 | // Ordering for QINIT_BIT is handled by the Mutex, but ordering for READY_BIT is not; 475 | // it needs Release ordering to ensure that the UnsafeCell's value is visible prior to 476 | // that bit being observed as set by other threads. 477 | let prev_state = if self.ready { 478 | self.inner.state.fetch_add(QINIT_BIT, Ordering::Release) 479 | } else { 480 | self.inner.state.fetch_sub(QINIT_BIT, Ordering::Relaxed) 481 | }; 482 | debug_assert_eq!( 483 | prev_state & (QINIT_BIT | READY_BIT), 484 | QINIT_BIT, 485 | "Invalid state during QuickInitGuard drop" 486 | ); 487 | }); 488 | 489 | // Safety: we just took the head position 490 | drop(QueueHead { guard }) 491 | } 492 | } 493 | 494 | impl Drop for Inner { 495 | fn drop(&mut self) { 496 | let queue = *self.queue.get_mut(); 497 | if !queue.is_null() { 498 | // Safety: nobody else could have a reference 499 | unsafe { 500 | drop(Box::from_raw(queue)); 501 | } 502 | } 503 | } 504 | } 505 | 506 | impl<'a> Future for QueueWaiter<'a> { 507 | type Output = Option>; 508 | fn poll( 509 | mut self: Pin<&mut Self>, 510 | cx: &mut task::Context<'_>, 511 | ) -> task::Poll>> { 512 | let guard = self.guard.as_ref().expect("Polled future after finished"); 513 | 514 | // Fast path for waiters that get notified after the value is set 515 | let state = guard.inner.state.load(Ordering::Acquire); 516 | if state & READY_BIT != 0 { 517 | return task::Poll::Ready(None); 518 | } 519 | 520 | // Safety: the guard holds a place on the waiter list and we just checked that the state is 521 | // not ready, so the queue is non-null and will remain valid until guard is dropped. 522 | let queue = unsafe { &*guard.queue }; 523 | let rv = with_lock(&queue.wakers, |lock| { 524 | // Another task might have set READY_BIT between our optimistic lock-free check and our 525 | // lock acquisition. Don't return a QueueHead unless we know for sure that we are 526 | // allowed to initialize. 527 | let state = guard.inner.state.load(Ordering::Acquire); 528 | if state & READY_BIT != 0 { 529 | return task::Poll::Ready(None); 530 | } 531 | 532 | match lock.as_mut() { 533 | None if state & QINIT_BIT == 0 => { 534 | // take the head position and start a waker queue 535 | *lock = Some(Vec::new()); 536 | 537 | task::Poll::Ready(Some(())) 538 | } 539 | None => { 540 | // Someone else has a QuickInitGuard; they will wake us when they finish. 541 | let waker = cx.waker().clone(); 542 | *lock = Some(vec![waker]); 543 | task::Poll::Pending 544 | } 545 | Some(wakers) => { 546 | // Wait for the QueueHead to be dropped 547 | let my_waker = cx.waker(); 548 | for waker in wakers.iter() { 549 | if waker.will_wake(my_waker) { 550 | return task::Poll::Pending; 551 | } 552 | } 553 | wakers.push(my_waker.clone()); 554 | task::Poll::Pending 555 | } 556 | } 557 | }); 558 | 559 | // Safety: If rv is Ready/Some, we know: 560 | // - we are holding a QueueRef (in guard) that prevents state from being 0 561 | // - creating a new QuickInitGuard requires the state to be 0 562 | // - we just checked QINIT_BIT and saw there isn't a QuickInitGuard active 563 | // - the queue was None, meaning there are no current QueueHeads 564 | // - we just set the queue to Some, claiming the head 565 | // 566 | // If rv is Ready/None, this is due to READY_BIT being set. 567 | // If rv is Pending, we have a waker in the queue. 568 | rv.map(|o| o.map(|()| QueueHead { guard: self.guard.take().unwrap() })) 569 | } 570 | } 571 | 572 | impl<'a> Drop for QueueHead<'a> { 573 | fn drop(&mut self) { 574 | // Safety: if queue is not null, then it is valid as long as the guard is alive, and a 575 | // QueueHead is never created with a NULL queue (that requires READY_BIT to have been set 576 | // inside Inner::initialize, and in that case no QueueHead objects will be created). 577 | let queue = unsafe { &*self.guard.queue }; 578 | 579 | // Take the waker queue, allowing another QueueHead to be created if READY_BIT is unset. 580 | let wakers = 581 | with_lock(&queue.wakers, Option::take).expect("QueueHead dropped without a waker list"); 582 | 583 | for waker in wakers { 584 | waker.wake(); 585 | } 586 | } 587 | } 588 | 589 | enum Step<'a> { 590 | Start { inner: &'a Inner }, 591 | Quick { guard: QuickInitGuard<'a> }, 592 | Wait { guard: QueueWaiter<'a> }, 593 | Run { head: QueueHead<'a> }, 594 | Done, 595 | } 596 | 597 | enum EitherHead<'a, 'b> { 598 | Quick(&'b mut QuickInitGuard<'a>), 599 | Normal(&'b QueueHead<'a>), 600 | } 601 | 602 | impl EitherHead<'_, '_> { 603 | fn set_ready(&mut self) { 604 | match self { 605 | Self::Quick(guard) => guard.ready = true, 606 | Self::Normal(head) => head.guard.inner.set_ready(), 607 | } 608 | } 609 | } 610 | 611 | impl<'a> Step<'a> { 612 | /// Run one step the state machine. 613 | /// 614 | /// - The provided `done` value will only be returned only if READY_BIT is observed 615 | /// - The `init` closure will be run when the initialization lock is acquired. It should call 616 | /// [EitherHead::set_ready] in order to set READY_BIT if it succeeds; this will cause tasks 617 | /// that are waiting on initialization to wake up. 618 | fn poll_init(&mut self, cx: &mut task::Context<'_>, done: R, mut init: F) -> task::Poll 619 | where 620 | F: FnMut(&mut task::Context<'_>, EitherHead<'a, '_>) -> task::Poll, 621 | { 622 | loop { 623 | match mem::replace(self, Step::Done) { 624 | Step::Start { inner } => { 625 | let state = inner.state.load(Ordering::Acquire); 626 | if state & READY_BIT == 0 { 627 | *self = match inner.initialize(state == NEW) { 628 | Err(guard) => Step::Quick { guard }, 629 | Ok(guard) => Step::Wait { guard }, 630 | }; 631 | continue; 632 | } 633 | 634 | // Safety: we just saw READY_BIT set 635 | return task::Poll::Ready(done); 636 | } 637 | Step::Quick { mut guard } => { 638 | let rv = init(cx, EitherHead::Quick(&mut guard)); 639 | 640 | if rv.is_pending() { 641 | *self = Step::Quick { guard }; 642 | } 643 | 644 | return rv; 645 | } 646 | Step::Wait { mut guard } => match Pin::new(&mut guard).poll(cx) { 647 | task::Poll::Pending => { 648 | *self = Step::Wait { guard }; 649 | return task::Poll::Pending; 650 | } 651 | task::Poll::Ready(None) => { 652 | // Safety: getting None from QueueWaiter means it is ready 653 | return task::Poll::Ready(done); 654 | } 655 | task::Poll::Ready(Some(head)) => { 656 | *self = Step::Run { head }; 657 | continue; 658 | } 659 | }, 660 | Step::Run { head } => { 661 | let rv = init(cx, EitherHead::Normal(&head)); 662 | 663 | if rv.is_pending() { 664 | *self = Step::Run { head }; 665 | } 666 | 667 | // drop of QueueHead notifies other Futures 668 | // drop of QueueRef (might) free the Queue 669 | return rv; 670 | } 671 | Step::Done => { 672 | panic!("Polled future after completion"); 673 | } 674 | } 675 | } 676 | } 677 | } 678 | 679 | struct InitFuture<'a, T, F> { 680 | cell: &'a OnceCell, 681 | init: F, 682 | step: Step<'a>, 683 | } 684 | 685 | impl<'a, T, F> InitFuture<'a, T, F> { 686 | fn new(cell: &'a OnceCell, init: F) -> Self 687 | where 688 | F: for<'c> FnMut(&mut task::Context<'c>) -> task::Poll + Unpin, 689 | { 690 | Self { cell, init, step: Step::Start { inner: &cell.inner } } 691 | } 692 | } 693 | 694 | impl<'a, T, F, E> Future for InitFuture<'a, T, F> 695 | where 696 | F: for<'c> FnMut(&mut task::Context<'c>) -> task::Poll> + Unpin, 697 | { 698 | type Output = Result<&'a T, E>; 699 | fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll { 700 | struct Filled; 701 | let this = self.get_mut(); 702 | let cell = this.cell; 703 | let init = &mut this.init; 704 | this.step 705 | .poll_init(cx, Ok(Filled), |cx, mut head| { 706 | let value = task::ready!(init(cx))?; 707 | 708 | // Safety: We hold the head, so nobody else can write to value 709 | unsafe { 710 | (*cell.value.get()).write(value); 711 | } 712 | head.set_ready(); 713 | 714 | task::Poll::Ready(Ok(Filled)) 715 | }) 716 | .map(|r| { 717 | // Safety: a Filled struct is only returned when either READY_BIT was seen or when 718 | // we wrote the value. 719 | r.map(|Filled| unsafe { (*this.cell.value.get()).assume_init_ref() }) 720 | }) 721 | } 722 | } 723 | 724 | impl OnceCell { 725 | /// Creates a new empty cell. 726 | pub const fn new() -> Self { 727 | Self { 728 | value: UnsafeCell::new(MaybeUninit::uninit()), 729 | inner: Inner::new(), 730 | _marker: PhantomData, 731 | } 732 | } 733 | 734 | /// Creates a new cell with the given contents. 735 | pub const fn new_with(value: T) -> Self { 736 | Self { 737 | value: UnsafeCell::new(MaybeUninit::new(value)), 738 | inner: Inner::new_ready(), 739 | _marker: PhantomData, 740 | } 741 | } 742 | 743 | /// Gets the contents of the cell, initializing it with `init` if the cell was empty. 744 | /// 745 | /// Many tasks may call `get_or_init` concurrently with different initializing futures, but 746 | /// it is guaranteed that only one future will be executed as long as the resulting future is 747 | /// polled to completion. 748 | /// 749 | /// If `init` panics, the panic is propagated to the caller, and the cell remains uninitialized. 750 | /// 751 | /// If the Future returned by this function is dropped prior to completion, the cell remains 752 | /// uninitialized, and another `init` function will be started (if any are available). 753 | /// 754 | /// Attempting to reentrantly initialize the cell from `init` will generally cause a deadlock; 755 | /// the reentrant call will immediately yield and wait for the pending initialization. If the 756 | /// actual initialization can complete despite this (for example, by polling multiple futures 757 | /// and discarding incomplete ones instead of polling them to completion), then the cell will 758 | /// successfully be initialized. 759 | pub async fn get_or_init(&self, init: impl Future) -> &T { 760 | let mut init = pin!(init); 761 | // TODO replace this match to Result::into_ok when that is stabilized 762 | match InitFuture::new(self, |cx| init.as_mut().poll(cx).map(Ok::)).await { 763 | Ok(t) => t, 764 | Err(e) => match e {}, 765 | } 766 | } 767 | 768 | /// Gets the contents of the cell, initializing it with `init` if the cell was empty. If the 769 | /// cell was empty and `init` failed, an error is returned. 770 | /// 771 | /// Many tasks may call `get_or_init` and/or `get_or_try_init` concurrently with different 772 | /// initializing futures, but it is guaranteed that only one of the futures will be executed as 773 | /// long as the resulting future is polled to completion. 774 | /// 775 | /// If `init` panics or returns an error, the panic or error is propagated to the caller, and 776 | /// the cell remains uninitialized. In this case, another `init` function from a concurrent 777 | /// caller will be selected to execute, if one is available. 778 | /// 779 | /// If the Future returned by this function is dropped prior to completion, the cell remains 780 | /// uninitialized, and another `init` function will be started (if any are available). 781 | /// 782 | /// Attempting to reentrantly initialize the cell from `init` will generally cause a deadlock; 783 | /// the reentrant call will immediately yield and wait for the pending initialization. If the 784 | /// actual initialization can complete despite this (for example, by polling multiple futures 785 | /// and discarding incomplete ones instead of polling them to completion), then the cell will 786 | /// successfully be initialized. 787 | pub async fn get_or_try_init( 788 | &self, 789 | init: impl Future>, 790 | ) -> Result<&T, E> { 791 | let mut init = pin!(init); 792 | 793 | InitFuture::new(self, |cx| init.as_mut().poll(cx)).await 794 | } 795 | 796 | /// Gets the reference to the underlying value. 797 | /// 798 | /// Returns `None` if the cell is empty or being initialized. This method never blocks. 799 | pub fn get(&self) -> Option<&T> { 800 | let state = self.inner.state.load(Ordering::Acquire); 801 | 802 | if state & READY_BIT == 0 { 803 | None 804 | } else { 805 | Some(unsafe { (*self.value.get()).assume_init_ref() }) 806 | } 807 | } 808 | 809 | /// Gets a mutable reference to the underlying value. 810 | pub fn get_mut(&mut self) -> Option<&mut T> { 811 | let state = *self.inner.state.get_mut(); 812 | if state & READY_BIT == 0 { 813 | None 814 | } else { 815 | Some(unsafe { self.value.get_mut().assume_init_mut() }) 816 | } 817 | } 818 | 819 | /// Takes the value out of this `OnceCell`, moving it back to an uninitialized state. 820 | pub fn take(&mut self) -> Option { 821 | let state = *self.inner.state.get_mut(); 822 | self.inner = Inner::new(); 823 | if state & READY_BIT == 0 { 824 | None 825 | } else { 826 | Some(unsafe { self.value.get_mut().assume_init_read() }) 827 | } 828 | } 829 | 830 | /// Consumes the OnceCell, returning the wrapped value. Returns None if the cell was empty. 831 | pub fn into_inner(mut self) -> Option { 832 | self.take() 833 | } 834 | } 835 | 836 | impl fmt::Debug for OnceCell { 837 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 838 | let value = self.get(); 839 | fmt.debug_struct("OnceCell").field("value", &value).field("inner", &self.inner).finish() 840 | } 841 | } 842 | 843 | impl Drop for OnceCell { 844 | fn drop(&mut self) { 845 | let state = *self.inner.state.get_mut(); 846 | if state & READY_BIT != 0 { 847 | unsafe { 848 | self.value.get_mut().assume_init_drop(); 849 | } 850 | } 851 | } 852 | } 853 | 854 | impl Default for OnceCell { 855 | fn default() -> Self { 856 | Self::new() 857 | } 858 | } 859 | 860 | impl From for OnceCell { 861 | fn from(value: T) -> Self { 862 | Self::new_with(value) 863 | } 864 | } 865 | 866 | #[cfg(test)] 867 | mod test { 868 | use super::*; 869 | use alloc::sync::Arc; 870 | use core::pin::pin; 871 | 872 | #[derive(Default)] 873 | struct CountWaker(AtomicUsize); 874 | impl alloc::task::Wake for CountWaker { 875 | fn wake(self: Arc) { 876 | self.0.fetch_add(1, Ordering::Relaxed); 877 | } 878 | } 879 | 880 | struct CmdWait<'a>(&'a AtomicUsize); 881 | impl Future for CmdWait<'_> { 882 | type Output = usize; 883 | fn poll(self: Pin<&mut Self>, _: &mut task::Context<'_>) -> task::Poll { 884 | match self.0.load(Ordering::Relaxed) { 885 | 0 => task::Poll::Pending, 886 | n => task::Poll::Ready(n), 887 | } 888 | } 889 | } 890 | 891 | impl Drop for CmdWait<'_> { 892 | fn drop(&mut self) { 893 | if self.0.load(Ordering::Relaxed) == 6 { 894 | panic!("Panic on drop"); 895 | } 896 | } 897 | } 898 | 899 | async fn maybe(cmd: &AtomicUsize, cell: &OnceCell) -> Result { 900 | cell.get_or_try_init(async { 901 | match dbg!(CmdWait(cmd).await) { 902 | 1 => Err(1), 903 | 2 => Ok(2), 904 | _ => unreachable!(), 905 | } 906 | }) 907 | .await 908 | .map(|v| *v) 909 | } 910 | 911 | async fn never_init(cell: &OnceCell) { 912 | let v = cell.get_or_init(async { unreachable!() }).await; 913 | assert_eq!(v, &2); 914 | } 915 | 916 | #[test] 917 | fn slow_path() { 918 | let w = Arc::new(CountWaker::default()).into(); 919 | let mut cx = std::task::Context::from_waker(&w); 920 | 921 | let cmd = AtomicUsize::new(0); 922 | let cell = OnceCell::new(); 923 | 924 | let mut f1 = pin!(maybe(&cmd, &cell)); 925 | let mut f2 = pin!(never_init(&cell)); 926 | 927 | println!("{:?}", cell); 928 | assert!(f1.as_mut().poll(&mut cx).is_pending()); 929 | println!("{:?}", cell); 930 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 931 | println!("{:?}", cell); 932 | cmd.store(2, Ordering::Relaxed); 933 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 934 | assert!(f1.as_mut().poll(&mut cx).is_ready()); 935 | println!("{:?}", cell); 936 | assert!(f2.as_mut().poll(&mut cx).is_ready()); 937 | } 938 | 939 | #[test] 940 | fn fast_path_tricked() { 941 | // f1 will complete on the fast path, but a queue was created anyway 942 | let w = Arc::new(CountWaker::default()).into(); 943 | let mut cx = std::task::Context::from_waker(&w); 944 | 945 | let cmd = AtomicUsize::new(0); 946 | let cell = OnceCell::new(); 947 | 948 | let mut f1 = pin!(maybe(&cmd, &cell)); 949 | let mut f2 = pin!(never_init(&cell)); 950 | 951 | println!("{:?}", cell); 952 | assert!(f1.as_mut().poll(&mut cx).is_pending()); 953 | println!("{:?}", cell); 954 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 955 | println!("{:?}", cell); 956 | cmd.store(2, Ordering::Relaxed); 957 | f2.set(never_init(&cell)); 958 | println!("{:?}", cell); 959 | assert!(f1.as_mut().poll(&mut cx).is_ready()); 960 | println!("{:?}", cell); 961 | assert!(f2.as_mut().poll(&mut cx).is_ready()); 962 | } 963 | 964 | #[test] 965 | fn second_try() { 966 | let waker = Arc::new(CountWaker::default()); 967 | let w = waker.clone().into(); 968 | let mut cx = std::task::Context::from_waker(&w); 969 | 970 | let cmd = AtomicUsize::new(0); 971 | let cell = OnceCell::new(); 972 | 973 | let mut f1 = pin!(maybe(&cmd, &cell)); 974 | let mut f2 = pin!(maybe(&cmd, &cell)); 975 | let mut f3 = pin!(maybe(&cmd, &cell)); 976 | let mut f4 = pin!(maybe(&cmd, &cell)); 977 | 978 | assert!(f1.as_mut().poll(&mut cx).is_pending()); 979 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), QINIT_BIT); 980 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 981 | assert!(f3.as_mut().poll(&mut cx).is_pending()); 982 | assert!(f4.as_mut().poll(&mut cx).is_pending()); 983 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), QINIT_BIT | 3); 984 | 985 | cmd.store(1, Ordering::Relaxed); 986 | // f2 should do nothing, as f1 holds QuickInitGuard 987 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 988 | assert_eq!(waker.0.load(Ordering::Relaxed), 0); 989 | 990 | // f1 fails, as commanded 991 | assert_eq!(f1.as_mut().poll(&mut cx), task::Poll::Ready(Err(1))); 992 | // it released QINIT_BIT (and doesn't still hold a reference) 993 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), 3); 994 | // f1 caused a wake to be sent (only one, as they have the same waker) 995 | assert_eq!(waker.0.load(Ordering::Relaxed), 1); 996 | 997 | // drop one waiting task and check that the refcount drops 998 | f4.set(maybe(&cmd, &cell)); 999 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), 2); 1000 | 1001 | // have f2 start init 1002 | cmd.store(0, Ordering::Relaxed); 1003 | assert!(f2.as_mut().poll(&mut cx).is_pending()); 1004 | 1005 | // allow f2 to actually complete init 1006 | cmd.store(2, Ordering::Relaxed); 1007 | 1008 | // f3 should add itself to the queue again, but not complete 1009 | assert!(f3.as_mut().poll(&mut cx).is_pending()); 1010 | assert_eq!(waker.0.load(Ordering::Relaxed), 1); 1011 | 1012 | assert_eq!(f2.as_mut().poll(&mut cx), task::Poll::Ready(Ok(2))); 1013 | 1014 | // Nobody else should run their closure 1015 | cmd.store(3, Ordering::Relaxed); 1016 | 1017 | // Other tasks can now immediately access the value 1018 | assert_eq!(f4.as_mut().poll(&mut cx), task::Poll::Ready(Ok(2))); 1019 | 1020 | // f3 is still waiting; the queue should not be freed yet, and it should have seen a wake 1021 | assert_eq!(waker.0.load(Ordering::Relaxed), 2); 1022 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), READY_BIT | 1); 1023 | assert!(!cell.inner.queue.load(Ordering::Relaxed).is_null()); 1024 | 1025 | assert_eq!(f3.as_mut().poll(&mut cx), task::Poll::Ready(Ok(2))); 1026 | // the cell should be fully ready, with the queue deallocated 1027 | 1028 | assert_eq!(cell.inner.state.load(Ordering::Relaxed), READY_BIT); 1029 | assert!(cell.inner.queue.load(Ordering::Relaxed).is_null()); 1030 | 1031 | // no more wakes were sent 1032 | assert_eq!(waker.0.load(Ordering::Relaxed), 2); 1033 | } 1034 | 1035 | #[test] 1036 | fn lazy_panic() { 1037 | let w = Arc::new(CountWaker::default()).into(); 1038 | 1039 | let cmd = AtomicUsize::new(6); 1040 | let lz = Lazy::new(CmdWait(&cmd)); 1041 | 1042 | assert_eq!(std::mem::size_of_val(&lz), 3 * std::mem::size_of::(), "Extra overhead?"); 1043 | 1044 | // A panic during F::drop must properly transition the Lazy to ready in order to avoid a 1045 | // double-drop of F or a drop of an invalid T 1046 | assert!(std::panic::catch_unwind(|| { 1047 | let mut cx = std::task::Context::from_waker(&w); 1048 | pin!(lz.get_unpin()).poll(&mut cx) 1049 | }) 1050 | .is_err()); 1051 | 1052 | assert_eq!(lz.try_get(), Some(&6)); 1053 | } 1054 | } 1055 | 1056 | union LazyState { 1057 | running: ManuallyDrop, 1058 | ready: ManuallyDrop, 1059 | _empty: (), 1060 | } 1061 | 1062 | /// A value which is computed on demand by running a future. 1063 | /// 1064 | /// Unlike [OnceCell], if a task is cancelled, the initializing future's execution will be 1065 | /// continued by other (concurrent or future) callers of [Lazy::get]. 1066 | /// 1067 | /// ``` 1068 | /// use std::sync::Arc; 1069 | /// use async_once_cell::Lazy; 1070 | /// 1071 | /// # async fn run() { 1072 | /// struct Data { 1073 | /// id: u32, 1074 | /// } 1075 | /// 1076 | /// let shared = Arc::pin(Lazy::new(async move { 1077 | /// Data { id: 4 } 1078 | /// })); 1079 | /// 1080 | /// assert_eq!(shared.as_ref().await.id, 4); 1081 | /// # } 1082 | /// # use std::future::Future; 1083 | /// # struct NeverWake; 1084 | /// # impl std::task::Wake for NeverWake { 1085 | /// # fn wake(self: Arc) {} 1086 | /// # } 1087 | /// # let w = Arc::new(NeverWake).into(); 1088 | /// # let mut cx = std::task::Context::from_waker(&w); 1089 | /// # assert!(std::pin::pin!(run()).poll(&mut cx).is_ready()); 1090 | /// ``` 1091 | /// 1092 | /// Using this type with an `async` block in a `static` item requries unstable rust: 1093 | /// 1094 | /// ```no_build 1095 | /// #![feature(const_async_blocks)] 1096 | /// #![feature(type_alias_impl_trait)] 1097 | /// mod example { 1098 | /// use async_once_cell::Lazy; 1099 | /// use std::future::Future; 1100 | /// type H = impl Future + 'static; 1101 | /// static LAZY: Lazy = Lazy::new(async { 4 }); 1102 | /// } 1103 | /// ``` 1104 | /// 1105 | /// However, it is possile to use if you have a named struct that implements `Future`: 1106 | /// 1107 | /// ``` 1108 | /// use async_once_cell::Lazy; 1109 | /// use std::{future::Future, pin::Pin, task}; 1110 | /// 1111 | /// struct F; 1112 | /// impl Future for F { 1113 | /// type Output = i32; 1114 | /// fn poll(self: Pin<&mut Self>, _: &mut task::Context) -> task::Poll { 1115 | /// return task::Poll::Ready(4); 1116 | /// } 1117 | /// } 1118 | /// 1119 | /// static LAZY: Lazy = Lazy::new(F); 1120 | /// ``` 1121 | /// 1122 | /// And this type of struct can still use `async` syntax in its implementation: 1123 | /// 1124 | /// ``` 1125 | /// use async_once_cell::Lazy; 1126 | /// use std::{future::Future, pin::Pin, task}; 1127 | /// 1128 | /// struct F(Option + Send>>>); 1129 | /// impl Future for F { 1130 | /// type Output = i32; 1131 | /// fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> task::Poll { 1132 | /// Pin::new(self.0.get_or_insert_with(|| Box::pin(async { 1133 | /// 4 1134 | /// }))).poll(cx) 1135 | /// } 1136 | /// } 1137 | /// 1138 | /// static LAZY: Lazy = Lazy::new(F(None)); 1139 | /// ``` 1140 | 1141 | pub struct Lazy { 1142 | value: UnsafeCell>, 1143 | inner: Inner, 1144 | } 1145 | 1146 | // Safety: our UnsafeCell should be treated like (RwLock, Mutex) 1147 | unsafe impl Sync for Lazy {} 1148 | unsafe impl Send for Lazy {} 1149 | impl Unpin for Lazy {} 1150 | impl RefUnwindSafe for Lazy {} 1151 | impl UnwindSafe for Lazy {} 1152 | 1153 | impl Lazy 1154 | where 1155 | F: Future, 1156 | { 1157 | /// Creates a new lazy value with the given initializing future. 1158 | pub const fn new(future: F) -> Self { 1159 | Self::from_future(future) 1160 | } 1161 | 1162 | /// Forces the evaluation of this lazy value and returns a reference to the result. 1163 | /// 1164 | /// This is equivalent to calling `.await` on a pinned reference, but is more explicit. 1165 | /// 1166 | /// The [Pin::static_ref] function may be useful if this is a static value. 1167 | pub async fn get(self: Pin<&Self>) -> Pin<&T> { 1168 | self.await 1169 | } 1170 | } 1171 | 1172 | /// A helper struct for both of [Lazy]'s [IntoFuture]s 1173 | /// 1174 | /// Note: the Lazy value may or may not be pinned, depending on what public struct wraps this one. 1175 | struct LazyFuture<'a, T, F> { 1176 | lazy: &'a Lazy, 1177 | step: Step<'a>, 1178 | // This is needed to guarantee Inner's refcount never overflows 1179 | _pin: PhantomPinned, 1180 | } 1181 | 1182 | impl<'a, T, F> LazyFuture<'a, T, F> 1183 | where 1184 | F: Future, 1185 | { 1186 | fn poll(&mut self, cx: &mut task::Context<'_>) -> task::Poll<&'a T> { 1187 | struct ReplaceGuard<'a, 'b, T, F> { 1188 | this: &'a Lazy, 1189 | value: ManuallyDrop, 1190 | head: EitherHead<'a, 'b>, 1191 | } 1192 | 1193 | // Prevent double-drop in case of panic in ManuallyDrop::drop 1194 | impl Drop for ReplaceGuard<'_, '_, T, F> { 1195 | fn drop(&mut self) { 1196 | // Safety: the union is currently empty and must be filled with a ready value 1197 | unsafe { 1198 | let value = ManuallyDrop::take(&mut self.value); 1199 | (*self.this.value.get()).ready = ManuallyDrop::new(value); 1200 | } 1201 | self.head.set_ready(); 1202 | } 1203 | } 1204 | 1205 | let this = &self.lazy; 1206 | self.step 1207 | .poll_init(cx, (), |cx, head| { 1208 | // Safety: this closure is only called when we have the queue head, so the 1209 | // union is in the running state and is pinned like self 1210 | let init = unsafe { Pin::new_unchecked(&mut *(*this.value.get()).running) }; 1211 | 1212 | let value = ManuallyDrop::new(task::ready!(init.poll(cx))); 1213 | 1214 | // Safety: the guard will cause the replace and set-ready operations to happen 1215 | // even if the future panics on drop, so the union will not be vacant even on 1216 | // unwind. 1217 | unsafe { 1218 | let guard = ReplaceGuard { this, value, head }; 1219 | ManuallyDrop::drop(&mut (*this.value.get()).running); 1220 | drop(guard); 1221 | } 1222 | 1223 | // Safety: just initialized 1224 | task::Poll::Ready(()) 1225 | }) 1226 | .map(|()| { 1227 | // Safety: Ready is only returned when either READY_BIT was seen or we returned Ready 1228 | // from our closure 1229 | unsafe { &*(*this.value.get()).ready } 1230 | }) 1231 | } 1232 | } 1233 | 1234 | /// A helper struct for [Lazy]'s [IntoFuture] 1235 | pub struct LazyFuturePin<'a, T, F>(LazyFuture<'a, T, F>); 1236 | 1237 | impl<'a, T, F> IntoFuture for Pin<&'a Lazy> 1238 | where 1239 | F: Future, 1240 | { 1241 | type Output = Pin<&'a T>; 1242 | type IntoFuture = LazyFuturePin<'a, T, F>; 1243 | fn into_future(self) -> Self::IntoFuture { 1244 | // Safety: this is Pin::deref, but with a lifetime of 'a 1245 | let lazy = unsafe { Pin::into_inner_unchecked(self) }; 1246 | LazyFuturePin(LazyFuture { 1247 | lazy, 1248 | step: Step::Start { inner: &lazy.inner }, 1249 | _pin: PhantomPinned, 1250 | }) 1251 | } 1252 | } 1253 | 1254 | impl<'a, T, F> Future for LazyFuturePin<'a, T, F> 1255 | where 1256 | F: Future, 1257 | { 1258 | type Output = Pin<&'a T>; 1259 | fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll> { 1260 | // Safety: we don't move anything that needs to be pinned. 1261 | let inner = unsafe { &mut Pin::into_inner_unchecked(self).0 }; 1262 | // Safety: because the original Lazy was pinned, the T it produces is also pinned 1263 | inner.poll(cx).map(|p| unsafe { Pin::new_unchecked(p) }) 1264 | } 1265 | } 1266 | 1267 | impl Lazy 1268 | where 1269 | F: Future + Unpin, 1270 | { 1271 | /// Forces the evaluation of this lazy value and returns a reference to the result. 1272 | /// 1273 | /// This is equivalent to calling `.await` on a reference, but may be clearer to call 1274 | /// explicitly. 1275 | /// 1276 | /// Unlike [Self::get], this does not require pinning the object. 1277 | pub async fn get_unpin(&self) -> &T { 1278 | self.await 1279 | } 1280 | } 1281 | 1282 | /// A helper struct for [Lazy]'s [IntoFuture] 1283 | pub struct LazyFutureUnpin<'a, T, F>(LazyFuture<'a, T, F>); 1284 | 1285 | impl<'a, T, F> IntoFuture for &'a Lazy 1286 | where 1287 | F: Future + Unpin, 1288 | { 1289 | type Output = &'a T; 1290 | type IntoFuture = LazyFutureUnpin<'a, T, F>; 1291 | fn into_future(self) -> Self::IntoFuture { 1292 | LazyFutureUnpin(LazyFuture { 1293 | lazy: self, 1294 | step: Step::Start { inner: &self.inner }, 1295 | _pin: PhantomPinned, 1296 | }) 1297 | } 1298 | } 1299 | 1300 | impl<'a, T, F> Future for LazyFutureUnpin<'a, T, F> 1301 | where 1302 | F: Future + Unpin, 1303 | { 1304 | type Output = &'a T; 1305 | fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll<&'a T> { 1306 | // Safety: we don't move anything that needs to be pinned. 1307 | unsafe { Pin::into_inner_unchecked(self) }.0.poll(cx) 1308 | } 1309 | } 1310 | 1311 | impl Lazy { 1312 | /// Creates a new lazy value with the given initializing future. 1313 | /// 1314 | /// This is equivalent to [Self::new] but with no type bound. 1315 | pub const fn from_future(future: F) -> Self { 1316 | Self { 1317 | value: UnsafeCell::new(LazyState { running: ManuallyDrop::new(future) }), 1318 | inner: Inner::new(), 1319 | } 1320 | } 1321 | 1322 | /// Creates an already-initialized lazy value. 1323 | pub const fn with_value(value: T) -> Self { 1324 | Self { 1325 | value: UnsafeCell::new(LazyState { ready: ManuallyDrop::new(value) }), 1326 | inner: Inner::new_ready(), 1327 | } 1328 | } 1329 | 1330 | /// Gets the value without blocking or starting the initialization. 1331 | pub fn try_get(&self) -> Option<&T> { 1332 | let state = self.inner.state.load(Ordering::Acquire); 1333 | 1334 | if state & READY_BIT == 0 { 1335 | None 1336 | } else { 1337 | // Safety: just checked ready 1338 | unsafe { Some(&(*self.value.get()).ready) } 1339 | } 1340 | } 1341 | 1342 | /// Gets the value without blocking or starting the initialization. 1343 | /// 1344 | /// This requires mutable access to self, so rust's aliasing rules prevent any concurrent 1345 | /// access and allow violating the usual rules for accessing this cell. 1346 | pub fn try_get_mut(self: Pin<&mut Self>) -> Option> { 1347 | // Safety: unpinning for access 1348 | let this = unsafe { self.get_unchecked_mut() }; 1349 | let state = *this.inner.state.get_mut(); 1350 | if state & READY_BIT == 0 { 1351 | None 1352 | } else { 1353 | // Safety: just checked ready, and pinned as a projection 1354 | unsafe { Some(Pin::new_unchecked(&mut this.value.get_mut().ready)) } 1355 | } 1356 | } 1357 | 1358 | /// Gets the value without blocking or starting the initialization. 1359 | /// 1360 | /// This requires mutable access to self, so rust's aliasing rules prevent any concurrent 1361 | /// access and allow violating the usual rules for accessing this cell. 1362 | pub fn try_get_mut_unpin(&mut self) -> Option<&mut T> { 1363 | let state = *self.inner.state.get_mut(); 1364 | if state & READY_BIT == 0 { 1365 | None 1366 | } else { 1367 | // Safety: just checked ready 1368 | unsafe { Some(&mut self.value.get_mut().ready) } 1369 | } 1370 | } 1371 | 1372 | /// Takes ownership of the value if it was set. 1373 | /// 1374 | /// Similar to the try_get functions, this returns None if the future has not yet completed, 1375 | /// even if the value would be available without blocking. 1376 | pub fn into_inner(self) -> Option { 1377 | self.into_parts().ok() 1378 | } 1379 | 1380 | /// Takes ownership of the value or the initializing future. 1381 | pub fn into_parts(mut self) -> Result { 1382 | let state = *self.inner.state.get_mut(); 1383 | 1384 | // Safety: we can take ownership of the contents of self.value as long as we avoid dropping 1385 | // it when self goes out of scope. The value EMPTY_STATE (!0) is used as a sentinel to 1386 | // indicate that the union is empty - it's impossible for state to be set to that value 1387 | // normally by the same logic that prevents refcount overflow. 1388 | // 1389 | // Note: it is not safe to do this in a &mut self method because none of the get() 1390 | // functions handle EMPTY_STATE; that's not relevant here as we took ownership of self. 1391 | // A working "Lazy::take(&mut self)" function would also need to create a new initializing 1392 | // future, and at that point it's best done by just using mem::replace with a new Lazy. 1393 | unsafe { 1394 | *self.inner.state.get_mut() = EMPTY_STATE; 1395 | if state & READY_BIT == 0 { 1396 | Err(ptr::read(&*self.value.get_mut().running)) 1397 | } else { 1398 | Ok(ptr::read(&*self.value.get_mut().ready)) 1399 | } 1400 | } 1401 | } 1402 | 1403 | /// Takes ownership of the value from a pinned object. 1404 | /// 1405 | /// This is equivalent to `mem::replace(self, replacement).into_inner()` but does not require 1406 | /// that `F` be `Unpin` like that expression would. 1407 | pub fn replace_and_take(self: Pin<&mut Self>, replacement: Self) -> Option 1408 | where 1409 | T: Unpin, 1410 | { 1411 | // Safety: this reads fields and then open-codes Pin::set 1412 | let this = unsafe { self.get_unchecked_mut() }; 1413 | let state = *this.inner.state.get_mut(); 1414 | let value = if state & READY_BIT == 0 { 1415 | None 1416 | } else { 1417 | *this.inner.state.get_mut() = EMPTY_STATE; 1418 | Some(unsafe { ptr::read(&*this.value.get_mut().ready) }) 1419 | }; 1420 | *this = replacement; 1421 | value 1422 | } 1423 | } 1424 | 1425 | impl Drop for Lazy { 1426 | fn drop(&mut self) { 1427 | let state = *self.inner.state.get_mut(); 1428 | // Safety: the state always reflects the variant of the union that we must drop 1429 | unsafe { 1430 | if state == EMPTY_STATE { 1431 | // do nothing (see into_inner and the _empty variant) 1432 | } else if state & READY_BIT == 0 { 1433 | ManuallyDrop::drop(&mut self.value.get_mut().running); 1434 | } else { 1435 | ManuallyDrop::drop(&mut self.value.get_mut().ready); 1436 | } 1437 | } 1438 | } 1439 | } 1440 | 1441 | impl fmt::Debug for Lazy { 1442 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 1443 | let value = self.try_get(); 1444 | fmt.debug_struct("Lazy").field("value", &value).field("inner", &self.inner).finish() 1445 | } 1446 | } 1447 | --------------------------------------------------------------------------------