├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-MIT ├── README.md ├── examples └── qutex.rs └── src ├── async_await.rs ├── lib.rs ├── qrw_lock.rs └── qutex.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Other 11 | /*.png 12 | *.sublime-workspace 13 | **/result.png 14 | /examples/**/result**.png 15 | # /examples/images-safe-clamp/result_patches.png 16 | # /examples/images-safe-clamp/result_unrolled.png 17 | **/result*.png 18 | 19 | # Generated by Cargo 20 | Cargo.lock 21 | **/Cargo.lock 22 | /target/ 23 | /examples/**/target/ 24 | 25 | # My junk 26 | /data 27 | **/tmp 28 | /src/junk 29 | /bak 30 | 31 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Version 0.2.3 (2019-04-18) 2 | ========================== 3 | 4 | * Fix bug encountered while processing queue when a requester has dropped 5 | while popping read lock requests. 6 | 7 | Version 0.2.2 (2019-03-08) 8 | ========================== 9 | 10 | * Fix `Guard::unlock` to actually unlock qutex. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "qutex" 3 | version = "0.2.6" 4 | authors = ["Nick Sanders "] 5 | license = "MIT" 6 | description = """\ 7 | Synchronization mechanisms that rely on lock-free and other \ 8 | non-(thread)blocking techniques, such as Rust futures, to guarantee \ 9 | mutually exclusive or shared exclusive access to data.""" 10 | documentation = "https://docs.rs/qutex" 11 | homepage = "https://github.com/cogciprocate/qutex" 12 | repository = "https://github.com/cogciprocate/qutex" 13 | readme = "README.md" 14 | keywords = ["futures", "async", "data-structures", "mutex", "lock"] 15 | categories = ["asynchronous", "concurrency", "data-structures"] 16 | edition = "2021" 17 | 18 | [features] 19 | default = [] 20 | async_await = ["futures03"] 21 | 22 | [dependencies] 23 | crossbeam = "0.8" 24 | futures = "0.1" 25 | 26 | [dependencies.futures03] 27 | package = "futures-preview" 28 | version = "0.3.0-alpha.14" 29 | features = ["compat"] 30 | optional = true 31 | 32 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2014 Cogciprocate 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Qutex [![](http://meritbadge.herokuapp.com/qutex)](https://crates.io/crates/qutex) [![](https://docs.rs/qutex/badge.svg)](https://docs.rs/qutex) 2 | 3 | Non-thread-blocking queue-backed data locks based on Rust futures. 4 | 5 | Includes futures capable versions of `Mutex` and `RwLock`. 6 | 7 | #### [Documentation](https://docs.rs/qutex) 8 | 9 | 10 | ## Example 11 | 12 | `Cargo.toml`: 13 | 14 | ```toml 15 | [dependencies] 16 | qutex = "0.2" 17 | ``` 18 | 19 | `main.rs`: 20 | 21 | ```rust 22 | extern crate qutex; 23 | extern crate futures; 24 | 25 | use std::thread; 26 | use futures::Future; 27 | use qutex::Qutex; 28 | 29 | fn main() { 30 | let thread_count = 100; 31 | let mut threads = Vec::with_capacity(thread_count); 32 | let start_val = 0; 33 | let qutex = Qutex::new(start_val); 34 | 35 | for _ in 0..thread_count { 36 | let future_val = qutex.clone().lock(); 37 | 38 | let future_add = future_val.map(|mut val| { 39 | *val += 1; 40 | }); 41 | 42 | threads.push(thread::spawn(|| { 43 | future_add.wait().unwrap(); 44 | })); 45 | } 46 | 47 | for thread in threads { 48 | thread.join().unwrap(); 49 | } 50 | 51 | let val = qutex.lock().wait().unwrap(); 52 | assert_eq!(*val, start_val + thread_count); 53 | println!("Qutex final value: {}", *val); 54 | } 55 | 56 | ``` -------------------------------------------------------------------------------- /examples/qutex.rs: -------------------------------------------------------------------------------- 1 | extern crate futures; 2 | extern crate qutex; 3 | 4 | use futures::Future; 5 | use qutex::Qutex; 6 | use std::thread; 7 | 8 | fn main() { 9 | let thread_count = 100; 10 | let mut threads = Vec::with_capacity(thread_count); 11 | let start_val = 0; 12 | 13 | // Create a `Qutex` protecting a start value of zero. 14 | let qutex = Qutex::new(start_val); 15 | 16 | // Spawn several threads, each adding 1 to the protected value. 17 | for _ in 0..thread_count { 18 | // Obtain a 'guard' (akin to a `std::sync::MutexGuard`). 19 | let future_val = qutex.clone().lock(); 20 | 21 | // Add 1 to the protected value. `future_val` is a `FutureGuard` which 22 | // will resolve to a `Guard` providing mutable access to the protected 23 | // value. The guard can be passed between futures combinators and will 24 | // unlock the `Qutex` when dropped. 25 | let future_add = future_val.map(|mut val| { 26 | *val += 1; 27 | }); 28 | 29 | // Spawn a thread which blocks upon completion of the above lock and 30 | // add operations. 31 | threads.push(thread::spawn(|| { 32 | future_add.wait().unwrap(); 33 | })); 34 | } 35 | 36 | for thread in threads { 37 | thread.join().unwrap(); 38 | } 39 | 40 | let val = qutex.lock().wait().unwrap(); 41 | assert_eq!(*val, start_val + thread_count); 42 | println!("Qutex final value: {}", *val); 43 | } 44 | -------------------------------------------------------------------------------- /src/async_await.rs: -------------------------------------------------------------------------------- 1 | use crate::*; 2 | use futures::sync::oneshot::Canceled; 3 | use futures03::compat::Future01CompatExt; 4 | 5 | impl Qutex { 6 | pub async fn lock_async(self) -> Result, Canceled> { 7 | self.lock().compat().await 8 | } 9 | } 10 | 11 | impl QrwLock { 12 | pub async fn write_async(self) -> Result, Canceled> { 13 | self.write().compat().await 14 | } 15 | 16 | pub async fn read_async(self) -> Result, Canceled> { 17 | self.read().compat().await 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A collection of locking data structures, both thread-safe and 2 | //! single-thread-optimized, which use Rust futures instead of 3 | //! thread-blocking. 4 | //! 5 | //! [![](https://img.shields.io/badge/github-qutex-blue.svg)][repo] [![](http://meritbadge.herokuapp.com/qutex)](https://crates.io/crates/qutex) 6 | //! 7 | //! [repo]: https://github.com/cogciprocate/qutex 8 | 9 | extern crate crossbeam; 10 | extern crate futures; 11 | 12 | #[cfg(feature = "async_await")] 13 | mod async_await; 14 | 15 | mod qrw_lock; 16 | mod qutex; 17 | 18 | pub use self::qrw_lock::{ 19 | FutureReadGuard, FutureWriteGuard, QrwLock, QrwRequest, ReadGuard, RequestKind, WriteGuard, 20 | }; 21 | pub use self::qutex::{FutureGuard, Guard, Qutex, Request}; 22 | 23 | #[cfg(feature = "async_await")] 24 | pub use async_await::*; 25 | -------------------------------------------------------------------------------- /src/qrw_lock.rs: -------------------------------------------------------------------------------- 1 | //! A queue-backed read/write data lock. 2 | //! 3 | //! As with any queue-backed system, deadlocks must be carefully avoided when 4 | //! interoperating with other queues. 5 | //! 6 | // 7 | // * It is unclear how many of the unsafe methods within need actually remain 8 | // unsafe. 9 | // * Virtually every aspect of each of the types in this module would benefit 10 | // from simplifying refactoring. 11 | // - Locking for processing, looping, and resetting the processing flag all 12 | // need to be standardized (factored out). 13 | // * Evaluate whether or not sleeping when the lock is contended (`CONTENDED` 14 | // bit) is the best approach. This may be slower than it needs to be when 15 | // multiple cores are concurrently attempting to access. Use 16 | // `thread::yield_now()` instead? Spin a few times first? Whatever. [UPDATE: 17 | // Doing some spinning now] 18 | // 19 | 20 | use crossbeam::queue::SegQueue; 21 | use futures::sync::oneshot::{self, Canceled, Receiver, Sender}; 22 | use futures::{Async, Future, Poll}; 23 | use std::cell::UnsafeCell; 24 | use std::ops::{Deref, DerefMut}; 25 | use std::sync::atomic::Ordering::{Acquire, SeqCst}; 26 | use std::sync::atomic::{fence, AtomicUsize}; 27 | use std::sync::Arc; 28 | use std::thread; 29 | 30 | const READ_COUNT_MASK: usize = 0x00FFFFFF; 31 | const WRITE_LOCKED: usize = 1 << 24; 32 | const CONTENDED: usize = 1 << 25; 33 | 34 | const PRINT_DEBUG: bool = false; 35 | 36 | /// Prints a debugging message if enabled. 37 | #[inline(always)] 38 | fn print_debug(msg: &str) { 39 | if PRINT_DEBUG { 40 | println!( 41 | "[Thread: {}] {}", 42 | ::std::thread::current().name().unwrap_or(""), 43 | msg 44 | ); 45 | } 46 | } 47 | 48 | /// Our currently favored thread 'chill out' method used when multiple threads 49 | /// are attempting to contend concurrently. 50 | #[inline] 51 | fn chill_out() { 52 | // thread::sleep(::std::time::Duration::new(0, 1)); 53 | thread::yield_now(); 54 | 55 | // NOTE: It's possible that sleeping or yielding here prolongs the time it 56 | // takes to process the queue to an unreasonable degree. There may be an 57 | // efficiency vs. duration balance to strike here (compared to spinning). 58 | } 59 | 60 | /// Extracts a `QrwLock` from a guard of either type. 61 | // 62 | // This saves us two unnecessary atomic stores (the reference count of lock 63 | // going up then down when releasing or up/downgrading) which would occur if 64 | // we were to clone then drop. 65 | // 66 | // QUESTION: Is there a more elegant way to do this? 67 | unsafe fn extract_lock>(guard: G) -> QrwLock { 68 | let lock = ::std::ptr::read(guard.lock()); 69 | ::std::mem::forget(guard); 70 | lock 71 | } 72 | 73 | /// Very forgettable guards. 74 | trait Guard 75 | where 76 | Self: ::std::marker::Sized, 77 | { 78 | fn lock(&self) -> &QrwLock; 79 | 80 | unsafe fn forget(self) { 81 | ::std::mem::forget(self); 82 | } 83 | } 84 | 85 | /// Allows read-only access to the data contained within a lock. 86 | #[derive(Debug)] 87 | pub struct ReadGuard { 88 | lock: QrwLock, 89 | } 90 | 91 | impl ReadGuard { 92 | pub fn upgrade(guard: ReadGuard) -> FutureUpgrade { 93 | debug_assert!(guard.lock.read_count().unwrap() > 0); 94 | 95 | match unsafe { guard.lock.upgrade_read_lock() } { 96 | Ok(_) => { 97 | print_debug("qutex::ReadGuard::upgrade: Read lock is now upgraded."); 98 | unsafe { FutureUpgrade::new(extract_lock(guard), None) } 99 | } 100 | Err(rx) => { 101 | print_debug("qutex::ReadGuard::upgrade: Waiting for the read count to reach 1..."); 102 | unsafe { FutureUpgrade::new(extract_lock(guard), Some(rx)) } 103 | } 104 | } 105 | } 106 | 107 | /// Releases the lock held by this `ReadGuard` and returns the original `QrwLock`. 108 | pub fn release(guard: ReadGuard) -> QrwLock { 109 | unsafe { 110 | guard.lock.release_read_lock(); 111 | extract_lock(guard) 112 | } 113 | } 114 | } 115 | 116 | impl Deref for ReadGuard { 117 | type Target = T; 118 | 119 | fn deref(&self) -> &T { 120 | unsafe { &*self.lock.inner.cell.get() } 121 | } 122 | } 123 | 124 | impl Drop for ReadGuard { 125 | fn drop(&mut self) { 126 | unsafe { self.lock.release_read_lock() } 127 | } 128 | } 129 | 130 | impl Guard for ReadGuard { 131 | fn lock(&self) -> &QrwLock { 132 | &self.lock 133 | } 134 | } 135 | 136 | /// Allows read or write access to the data contained within a lock. 137 | #[derive(Debug)] 138 | pub struct WriteGuard { 139 | lock: QrwLock, 140 | } 141 | 142 | impl WriteGuard { 143 | /// Converts this `WriteGuard` into a `ReadGuard` and fulfills any other 144 | /// pending read requests. 145 | pub fn downgrade(guard: WriteGuard) -> ReadGuard { 146 | unsafe { 147 | guard.lock.downgrade_write_lock(); 148 | ReadGuard { 149 | lock: extract_lock(guard), 150 | } 151 | } 152 | } 153 | 154 | /// Releases the lock held by this `WriteGuard` and returns the original 155 | /// `QrwLock`. 156 | // 157 | // * TODO: Create a test that ensures the write lock is released. 158 | // Commenting out the `release_write_lock()' line appears to have no 159 | // effect on the outcome of the current tests. 160 | pub fn release(guard: WriteGuard) -> QrwLock { 161 | unsafe { 162 | guard.lock.release_write_lock(); 163 | extract_lock(guard) 164 | } 165 | } 166 | } 167 | 168 | impl Deref for WriteGuard { 169 | type Target = T; 170 | 171 | fn deref(&self) -> &T { 172 | unsafe { &*self.lock.inner.cell.get() } 173 | } 174 | } 175 | 176 | impl DerefMut for WriteGuard { 177 | fn deref_mut(&mut self) -> &mut T { 178 | unsafe { &mut *self.lock.inner.cell.get() } 179 | } 180 | } 181 | 182 | impl Drop for WriteGuard { 183 | fn drop(&mut self) { 184 | unsafe { self.lock.release_write_lock() } 185 | } 186 | } 187 | 188 | impl Guard for WriteGuard { 189 | fn lock(&self) -> &QrwLock { 190 | &self.lock 191 | } 192 | } 193 | 194 | /// A precursor to a `WriteGuard`. 195 | #[must_use = "futures do nothing unless polled"] 196 | #[derive(Debug)] 197 | pub struct FutureUpgrade { 198 | lock: Option>, 199 | // Designates whether or not to resolve immediately: 200 | rx: Option>, 201 | } 202 | 203 | impl FutureUpgrade { 204 | /// Returns a new `FutureUpgrade`. 205 | fn new(lock: QrwLock, rx: Option>) -> FutureUpgrade { 206 | FutureUpgrade { 207 | lock: Some(lock), 208 | rx: rx, 209 | } 210 | } 211 | 212 | /// Blocks the current thread until this future resolves. 213 | #[inline] 214 | pub fn wait(self) -> Result, Canceled> { 215 | ::wait(self) 216 | } 217 | } 218 | 219 | impl Future for FutureUpgrade { 220 | type Item = WriteGuard; 221 | type Error = Canceled; 222 | 223 | #[inline] 224 | fn poll(&mut self) -> Poll { 225 | if self.lock.is_some() { 226 | // FUTURE NOTE: Lexical borrowing should allow this to be 227 | // restructured without the extra `.unwrap()` below. 228 | if self.rx.is_none() { 229 | print_debug("qutex::FutureUpgrade::poll: Uncontended. Upgrading."); 230 | Ok(Async::Ready(WriteGuard { 231 | lock: self.lock.take().unwrap(), 232 | })) 233 | } else { 234 | unsafe { self.lock.as_ref().unwrap().process_queues() } 235 | self.rx.as_mut().unwrap().poll().map(|res| { 236 | res.map(|_| { 237 | print_debug("qutex::FutureUpgrade::poll: Ready. Upgrading."); 238 | WriteGuard { 239 | lock: self.lock.take().unwrap(), 240 | } 241 | }) 242 | }) 243 | } 244 | } else { 245 | panic!("FutureUpgrade::poll: Task already completed."); 246 | } 247 | } 248 | } 249 | 250 | impl Drop for FutureUpgrade { 251 | /// Gracefully unlock if this guard has a lock acquired but has not yet 252 | /// been polled to completion. 253 | fn drop(&mut self) { 254 | if let Some(lock) = self.lock.take() { 255 | match self.rx.take() { 256 | Some(mut rx) => { 257 | rx.close(); 258 | match rx.try_recv() { 259 | Ok(status) => { 260 | if status.is_some() { 261 | unsafe { lock.release_write_lock() } 262 | } 263 | } 264 | Err(_) => (), 265 | } 266 | } 267 | None => unsafe { lock.release_write_lock() }, 268 | } 269 | } 270 | } 271 | } 272 | 273 | /// A future which resolves to a `ReadGuard`. 274 | #[must_use = "futures do nothing unless polled"] 275 | #[derive(Debug)] 276 | pub struct FutureReadGuard { 277 | lock: Option>, 278 | rx: Receiver<()>, 279 | } 280 | 281 | impl FutureReadGuard { 282 | /// Returns a new `FutureReadGuard`. 283 | fn new(lock: QrwLock, rx: Receiver<()>) -> FutureReadGuard { 284 | FutureReadGuard { 285 | lock: Some(lock), 286 | rx: rx, 287 | } 288 | } 289 | 290 | /// Blocks the current thread until this future resolves. 291 | #[inline] 292 | pub fn wait(self) -> Result, Canceled> { 293 | ::wait(self) 294 | } 295 | } 296 | 297 | impl Future for FutureReadGuard { 298 | type Item = ReadGuard; 299 | type Error = Canceled; 300 | 301 | #[inline] 302 | fn poll(&mut self) -> Poll { 303 | if self.lock.is_some() { 304 | unsafe { self.lock.as_ref().unwrap().process_queues() } 305 | self.rx.poll().map(|res| { 306 | res.map(|_| { 307 | print_debug("qutex::FutureReadGuard::poll: ReadGuard acquired."); 308 | ReadGuard { 309 | lock: self.lock.take().unwrap(), 310 | } 311 | }) 312 | }) 313 | } else { 314 | panic!("FutureReadGuard::poll: Task already completed."); 315 | } 316 | } 317 | } 318 | 319 | impl Drop for FutureReadGuard { 320 | /// Gracefully unlock if this guard has a lock acquired but has not yet 321 | /// been polled to completion. 322 | fn drop(&mut self) { 323 | if let Some(lock) = self.lock.take() { 324 | self.rx.close(); 325 | match self.rx.try_recv() { 326 | Ok(status) => { 327 | if status.is_some() { 328 | unsafe { lock.release_read_lock() } 329 | } 330 | } 331 | Err(_) => (), 332 | } 333 | } 334 | } 335 | } 336 | 337 | /// A future which resolves to a `WriteGuard`. 338 | #[must_use = "futures do nothing unless polled"] 339 | #[derive(Debug)] 340 | pub struct FutureWriteGuard { 341 | lock: Option>, 342 | rx: Receiver<()>, 343 | } 344 | 345 | impl FutureWriteGuard { 346 | /// Returns a new `FutureWriteGuard`. 347 | fn new(lock: QrwLock, rx: Receiver<()>) -> FutureWriteGuard { 348 | FutureWriteGuard { 349 | lock: Some(lock), 350 | rx: rx, 351 | } 352 | } 353 | 354 | /// Blocks the current thread until this future resolves. 355 | #[inline] 356 | pub fn wait(self) -> Result, Canceled> { 357 | ::wait(self) 358 | } 359 | } 360 | 361 | impl Future for FutureWriteGuard { 362 | type Item = WriteGuard; 363 | type Error = Canceled; 364 | 365 | #[inline] 366 | fn poll(&mut self) -> Poll { 367 | if self.lock.is_some() { 368 | unsafe { self.lock.as_ref().unwrap().process_queues() } 369 | self.rx.poll().map(|res| { 370 | res.map(|_| { 371 | print_debug("qutex::FutureWriteGuard::poll: WriteGuard acquired."); 372 | WriteGuard { 373 | lock: self.lock.take().unwrap(), 374 | } 375 | }) 376 | }) 377 | } else { 378 | panic!("FutureWriteGuard::poll: Task already completed."); 379 | } 380 | } 381 | } 382 | 383 | impl Drop for FutureWriteGuard { 384 | /// Gracefully unlock if this guard has a lock acquired but has not yet 385 | /// been polled to completion. 386 | fn drop(&mut self) { 387 | if let Some(lock) = self.lock.take() { 388 | self.rx.close(); 389 | match self.rx.try_recv() { 390 | Ok(status) => { 391 | if status.is_some() { 392 | unsafe { lock.release_write_lock() } 393 | } 394 | } 395 | Err(_) => (), 396 | } 397 | } 398 | } 399 | } 400 | 401 | /// Specifies whether a `QrwRequest` is a read or write request. 402 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 403 | pub enum RequestKind { 404 | Read, 405 | Write, 406 | } 407 | 408 | /// A request to lock the lock for either read or write access. 409 | #[derive(Debug)] 410 | pub struct QrwRequest { 411 | tx: Sender<()>, 412 | kind: RequestKind, 413 | } 414 | 415 | impl QrwRequest { 416 | /// Returns a new `QrwRequest`. 417 | pub fn new(tx: Sender<()>, kind: RequestKind) -> QrwRequest { 418 | QrwRequest { tx: tx, kind: kind } 419 | } 420 | } 421 | 422 | /// The guts of a `QrwLock`. 423 | #[derive(Debug)] 424 | struct Inner { 425 | // TODO: Convert to `AtomicBool` if no additional states are needed: 426 | state: AtomicUsize, 427 | cell: UnsafeCell, 428 | queue: SegQueue, 429 | tip: UnsafeCell>, 430 | upgrade_queue: SegQueue>, 431 | } 432 | 433 | impl From for Inner { 434 | #[inline] 435 | fn from(val: T) -> Inner { 436 | Inner { 437 | state: AtomicUsize::new(0), 438 | cell: UnsafeCell::new(val), 439 | queue: SegQueue::new(), 440 | tip: UnsafeCell::new(None), 441 | upgrade_queue: SegQueue::new(), 442 | } 443 | } 444 | } 445 | 446 | unsafe impl Send for Inner {} 447 | unsafe impl Sync for Inner {} 448 | 449 | /// A queue-backed read/write data lock. 450 | /// 451 | /// As with any queue-backed system, deadlocks must be carefully avoided when 452 | /// interoperating with other queues. 453 | /// 454 | #[derive(Debug)] 455 | pub struct QrwLock { 456 | inner: Arc>, 457 | } 458 | 459 | impl QrwLock { 460 | /// Creates and returns a new `QrwLock`. 461 | #[inline] 462 | pub fn new(val: T) -> QrwLock { 463 | QrwLock { 464 | inner: Arc::new(Inner::from(val)), 465 | } 466 | } 467 | 468 | /// Returns a new `FutureReadGuard` which can be used as a future and will 469 | /// resolve into a `ReadGuard`. 470 | #[inline] 471 | pub fn read(self) -> FutureReadGuard { 472 | print_debug("qutex::QrwLock::read: Requesting read lock..."); 473 | let (tx, rx) = oneshot::channel(); 474 | unsafe { 475 | self.enqueue_lock_request(QrwRequest::new(tx, RequestKind::Read)); 476 | } 477 | FutureReadGuard::new(self, rx) 478 | } 479 | 480 | /// Returns a new `FutureWriteGuard` which can be used as a future and will 481 | /// resolve into a `WriteGuard`. 482 | #[inline] 483 | pub fn write(self) -> FutureWriteGuard { 484 | print_debug("qutex::QrwLock::write: Requesting write lock..."); 485 | let (tx, rx) = oneshot::channel(); 486 | unsafe { 487 | self.enqueue_lock_request(QrwRequest::new(tx, RequestKind::Write)); 488 | } 489 | FutureWriteGuard::new(self, rx) 490 | } 491 | 492 | /// Pushes a lock request onto the queue. 493 | /// 494 | // 495 | // TODO: Evaluate unsafe-ness (appears unlikely this can be misused except 496 | // to deadlock the queue which is fine). 497 | // 498 | #[inline] 499 | pub unsafe fn enqueue_lock_request(&self, req: QrwRequest) { 500 | self.inner.queue.push(req); 501 | } 502 | 503 | /// Returns a mutable reference to the inner `Vec` if there are currently 504 | /// no other copies of this `QrwLock`. 505 | /// 506 | /// Since this call borrows the inner lock mutably, no actual locking needs to 507 | /// take place---the mutable borrow statically guarantees no locks exist. 508 | /// 509 | #[inline] 510 | pub fn get_mut(&mut self) -> Option<&mut T> { 511 | Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() }) 512 | } 513 | 514 | /// Returns a reference to the inner value. 515 | /// 516 | #[inline] 517 | pub fn as_ptr(&self) -> *const T { 518 | self.inner.cell.get() 519 | } 520 | 521 | /// Returns a mutable reference to the inner value. 522 | /// 523 | #[inline] 524 | pub fn as_mut_ptr(&self) -> *mut T { 525 | self.inner.cell.get() 526 | } 527 | 528 | /// Pops the next read or write lock request and returns it or `None` if the queue is empty. 529 | #[inline] 530 | fn pop_request(&self) -> Option { 531 | debug_assert_eq!(self.inner.state.load(Acquire) & CONTENDED, CONTENDED); 532 | print_debug("qutex::QrwLock::pop_request: Popping request from queue..."); 533 | 534 | unsafe { 535 | // Pop twice if the tip was `None` but the queue was not empty. 536 | ::std::mem::replace(&mut *self.inner.tip.get(), self.inner.queue.pop()).or_else(|| { 537 | if (*self.inner.tip.get()).is_some() { 538 | self.pop_request() 539 | } else { 540 | None 541 | } 542 | }) 543 | } 544 | } 545 | 546 | /// Returns the `RequestKind` for the next pending read or write lock request. 547 | #[inline] 548 | fn peek_request_kind(&self) -> Option { 549 | debug_assert_eq!(self.inner.state.load(Acquire) & CONTENDED, CONTENDED); 550 | 551 | unsafe { 552 | if (*self.inner.tip.get()).is_none() { 553 | // We know ::replace return value is `None`. 554 | _ = ::std::mem::replace(&mut *self.inner.tip.get(), self.inner.queue.pop()); 555 | } 556 | (*self.inner.tip.get()).as_ref().map(|req| req.kind) 557 | } 558 | } 559 | 560 | /// Fulfill a request if possible. 561 | #[inline] 562 | fn fulfill_request(&self, mut state: usize) -> usize { 563 | loop { 564 | debug_assert_eq!(self.inner.state.load(Acquire) & CONTENDED, CONTENDED); 565 | debug_assert_eq!(self.inner.state.load(Acquire) & WRITE_LOCKED, 0); 566 | 567 | if let Some(req) = self.pop_request() { 568 | // If there is a send error, a requester has dropped its 569 | // receiver so just go to the next, otherwise process. 570 | if req.tx.send(()).is_ok() { 571 | debug_assert_eq!(self.inner.state.load(Acquire) & WRITE_LOCKED, 0); 572 | 573 | match req.kind { 574 | RequestKind::Read => { 575 | state += 1; 576 | print_debug("qutex::QrwLock::fulfill_request: Locked for reading."); 577 | } 578 | RequestKind::Write => { 579 | debug_assert_eq!(state, 0); 580 | state = WRITE_LOCKED; 581 | print_debug("qutex::QrwLock::fulfill_request: Locked for writing"); 582 | break; 583 | } 584 | } 585 | } else { 586 | print_debug("qutex::QrwLock::fulfill_request: A requester has dropped."); 587 | } 588 | 589 | if let Some(RequestKind::Read) = self.peek_request_kind() { 590 | debug_assert!(state != WRITE_LOCKED); 591 | print_debug( 592 | "qutex::QrwLock::fulfill_request: \ 593 | Next request kind is a read, popping next request...", 594 | ); 595 | continue; 596 | } else { 597 | break; 598 | } 599 | } else { 600 | break; 601 | } 602 | } 603 | 604 | state 605 | } 606 | 607 | /// Returns the current number of read locks. 608 | /// 609 | /// Currently used for debug purposes only. 610 | #[inline] 611 | fn read_count(&self) -> Option { 612 | let state = self.inner.state.load(Acquire); 613 | let read_count = state & READ_COUNT_MASK; 614 | 615 | if state & READ_COUNT_MASK == read_count { 616 | print_debug("qutex::QrwLock::read_count: Read count: {}."); 617 | Some(read_count as u32) 618 | } else { 619 | None 620 | } 621 | } 622 | 623 | /// Acquires exclusive access to the lock state and returns it. 624 | #[inline(always)] 625 | fn contend(&self) -> usize { 626 | print_debug("qutex::QrwLock::contend: Processing state..."); 627 | let mut spins: u32 = 0; 628 | 629 | loop { 630 | let state = self.inner.state.fetch_or(CONTENDED, SeqCst); 631 | if state & CONTENDED != 0 { 632 | if spins >= 16 { 633 | chill_out(); 634 | } else { 635 | for _ in 0..(2 << spins) { 636 | fence(SeqCst); 637 | } 638 | } 639 | spins += 1; 640 | } else { 641 | return state; 642 | } 643 | } 644 | } 645 | 646 | /// Fulfills an upgrade request. 647 | fn process_upgrade_queue(&self) -> bool { 648 | print_debug("qutex::QrwLock::process_upgrade_queue: Processing upgrade queue..."); 649 | debug_assert!(self.inner.state.load(Acquire) == CONTENDED); 650 | 651 | loop { 652 | match self.inner.upgrade_queue.pop() { 653 | Some(tx) => match tx.send(()) { 654 | Ok(_) => { 655 | print_debug( 656 | "qutex::QrwLock::process_upgrade_queue: \ 657 | Upgrading to write lock...", 658 | ); 659 | return true; 660 | } 661 | Err(()) => { 662 | print_debug( 663 | "qutex::QrwLock::process_upgrade_queue: \ 664 | Unable to upgrade: error completing oneshot.", 665 | ); 666 | continue; 667 | } 668 | }, 669 | None => break, 670 | } 671 | } 672 | false 673 | } 674 | 675 | /// Pops the next lock request in the queue if possible. 676 | /// 677 | // 678 | // TODO: Clarify the following (or remove): 679 | // 680 | // If this (the caller's?) lock is released, read or write-locks this lock 681 | // and unparks the next requester task in the queue. 682 | // 683 | // If this (the caller's?) lock is write-locked, this function does 684 | // nothing. 685 | // 686 | // If this (the caller's?) lock is read-locked and the next request or consecutive 687 | // requests in the queue are read requests, those requests will be 688 | // fulfilled, unparking their respective tasks and incrementing the 689 | // read-lock count appropriately. 690 | // 691 | // 692 | // TODO: 693 | // * This is currently public due to 'derivers' (aka. sub-types). Evaluate. 694 | // * Consider removing unsafe qualifier (should be fine, this fn assumes 695 | // no particular state). 696 | // * Return proper error type. 697 | // 698 | // pub unsafe fn process_queues(&self) -> Result<(), ()> { 699 | #[inline] 700 | pub unsafe fn process_queues(&self) { 701 | print_debug("qutex::QrwLock::process_queues: Processing queue..."); 702 | 703 | match self.contend() { 704 | // Unlocked: 705 | 0 => { 706 | print_debug("qutex::QrwLock::process_queues: Unlocked."); 707 | let new_state = if self.process_upgrade_queue() { 708 | WRITE_LOCKED 709 | } else { 710 | self.fulfill_request(0) 711 | }; 712 | 713 | self.inner.state.store(new_state, SeqCst); 714 | } 715 | // Write locked, unset CONTENDED flag: 716 | WRITE_LOCKED => { 717 | print_debug("qutex::QrwLock::process_queues: Write locked."); 718 | self.inner.state.store(WRITE_LOCKED, SeqCst); 719 | } 720 | // Either read locked or already being processed: 721 | state => { 722 | debug_assert!(self.inner.state.load(SeqCst) & CONTENDED != 0); 723 | debug_assert!(state <= READ_COUNT_MASK); 724 | 725 | if PRINT_DEBUG { 726 | println!( 727 | "[Thread: {}] Processing queue: Other {{ state: {}, peek: {:?} }}", 728 | thread::current().name().unwrap_or(""), 729 | state, 730 | self.peek_request_kind(), 731 | ); 732 | } 733 | 734 | if self.peek_request_kind() == Some(RequestKind::Read) { 735 | // We are read locked and the next request is a read. 736 | let new_state = self.fulfill_request(state); 737 | self.inner.state.store(new_state, SeqCst); 738 | } else { 739 | // Either the next request is empty or a write and 740 | // we are already read locked. Leave the request 741 | // there and restore our original state, removing 742 | // the CONTENDED flag. 743 | self.inner.state.store(state, SeqCst); 744 | } 745 | } 746 | } 747 | } 748 | 749 | /// Enqueues an upgrade. 750 | fn enqueue_upgrade_request(&self, state: usize) -> Receiver<()> { 751 | debug_assert!(state > 0 && state & CONTENDED == 0 && state & WRITE_LOCKED == 0); 752 | let (tx, rx) = oneshot::channel(); 753 | self.inner.upgrade_queue.push(tx); 754 | self.inner.state.store(state - 1, SeqCst); 755 | rx 756 | } 757 | 758 | /// Converts a single read lock (read count of '1') into a write lock. 759 | /// 760 | /// Returns an error containing a oneshot receiver if there is currently 761 | /// more than one read lock. When the read count reaches one, the receiver 762 | /// channel will be completed (i.e. poll it). 763 | /// 764 | /// Panics if there are no read locks. 765 | /// 766 | /// Do not call this method directly unless you are using a custom guard 767 | /// or are otherwise managing the lock state manually. Use 768 | /// `ReadGuard::upgrade` instead. 769 | #[inline] 770 | pub unsafe fn upgrade_read_lock(&self) -> Result<(), Receiver<()>> { 771 | print_debug("qutex::QrwLock::upgrade_read_lock: Attempting to upgrade reader to writer..."); 772 | 773 | match self.contend() { 774 | 0 => panic!("Unable to upgrade this QrwLock: no read locks."), 775 | WRITE_LOCKED => panic!("Unable to upgrade this QrwLock: already write locked."), 776 | state => { 777 | debug_assert!(self.inner.state.load(SeqCst) & CONTENDED != 0); 778 | debug_assert_eq!(state, self.read_count().unwrap() as usize); 779 | 780 | if state == 1 { 781 | if self.inner.upgrade_queue.is_empty() { 782 | self.inner.state.store(WRITE_LOCKED, SeqCst); 783 | Ok(()) 784 | } else { 785 | Err(self.enqueue_upgrade_request(state)) 786 | } 787 | } else { 788 | Err(self.enqueue_upgrade_request(state)) 789 | } 790 | } 791 | } 792 | } 793 | 794 | /// Converts a write lock into a read lock then processes the queue, 795 | /// allowing additional read requests to acquire locks. 796 | /// 797 | /// Use `WriteGuard::downgrade` rather than calling this directly. 798 | #[inline] 799 | pub unsafe fn downgrade_write_lock(&self) { 800 | print_debug("qutex::QrwLock::downgrade_write_lock: Attempting to downgrade write lock..."); 801 | debug_assert_eq!(self.inner.state.load(SeqCst) & WRITE_LOCKED, WRITE_LOCKED); 802 | 803 | match self.contend() { 804 | 0 => debug_assert!(false, "unreachable"), 805 | WRITE_LOCKED => { 806 | self.inner.state.store(1, SeqCst); 807 | } 808 | _state => debug_assert!(false, "unreachable"), 809 | } 810 | 811 | // fence(SeqCst); 812 | self.process_queues(); 813 | } 814 | 815 | /// Decreases the reader count by one and unparks the next requester task 816 | /// in the queue if possible. 817 | /// 818 | /// If a reader is waiting to be upgraded and the read lock count reaches 819 | /// 1, the upgrade sender will be completed. 820 | // 821 | // TODO: Consider using `Ordering::Release`. 822 | // TODO: Wire up upgrade checking (if reader count == 1, complete `upgrade_tx`). 823 | #[inline] 824 | pub unsafe fn release_read_lock(&self) { 825 | print_debug("qutex::QrwLock::release_read_lock: Releasing read lock..."); 826 | // Ensure we are read locked and not processing or write locked: 827 | debug_assert!(self.inner.state.load(SeqCst) & READ_COUNT_MASK != 0); 828 | debug_assert_eq!(self.inner.state.load(SeqCst) & WRITE_LOCKED, 0); 829 | 830 | match self.contend() { 831 | 0 => debug_assert!(false, "unreachable"), 832 | WRITE_LOCKED => debug_assert!(false, "unreachable"), 833 | state => { 834 | debug_assert!(self.inner.state.load(SeqCst) & CONTENDED != 0); 835 | assert!(state > 0 && state <= READ_COUNT_MASK); 836 | let new_state = state - 1; 837 | self.inner.state.store(new_state, SeqCst); 838 | self.process_queues(); 839 | } 840 | } 841 | } 842 | 843 | /// Unlocks this (the caller's) lock and unparks the next requester task 844 | /// in the queue if possible. 845 | // 846 | // TODO: Consider using `Ordering::Release`. 847 | #[inline] 848 | pub unsafe fn release_write_lock(&self) { 849 | print_debug("qutex::QrwLock::release_write_lock: Releasing write lock..."); 850 | 851 | // If we are not WRITE_LOCKED, we must be CONTENDED (and will soon be 852 | // write locked). 853 | debug_assert!({ 854 | let state = self.inner.state.load(SeqCst); 855 | (state & CONTENDED == CONTENDED) == (state & WRITE_LOCKED != WRITE_LOCKED) 856 | || (state & CONTENDED == CONTENDED) && (state & WRITE_LOCKED == WRITE_LOCKED) 857 | }); 858 | 859 | // Ensure we are not read locked. 860 | debug_assert!(self.inner.state.load(SeqCst) & READ_COUNT_MASK == 0); 861 | 862 | match self.contend() { 863 | 0 => debug_assert!(false, "unreachable"), 864 | WRITE_LOCKED => { 865 | self.inner.state.store(0, SeqCst); 866 | self.process_queues(); 867 | } 868 | _state => debug_assert!(false, "unreachable"), 869 | } 870 | } 871 | } 872 | 873 | impl From for QrwLock { 874 | #[inline] 875 | fn from(val: T) -> QrwLock { 876 | QrwLock::new(val) 877 | } 878 | } 879 | 880 | // Avoids needing `T: Clone`. 881 | impl Clone for QrwLock { 882 | #[inline] 883 | fn clone(&self) -> QrwLock { 884 | QrwLock { 885 | inner: self.inner.clone(), 886 | } 887 | } 888 | } 889 | 890 | impl Default for QrwLock { 891 | fn default() -> Self { 892 | Self::new(T::default()) 893 | } 894 | } 895 | 896 | #[cfg(test)] 897 | // Woefully incomplete. 898 | mod tests { 899 | use super::*; 900 | use futures::{future, Future}; 901 | use std::thread; 902 | 903 | #[test] 904 | fn simple() { 905 | let lock = QrwLock::from(0i32); 906 | 907 | let future_r0 = Box::new(lock.clone().read().and_then(|guard| { 908 | assert_eq!(*guard, 0); 909 | println!("val[r0]: {}", *guard); 910 | ReadGuard::release(guard); 911 | Ok(()) 912 | })); 913 | 914 | let future_w0 = Box::new(lock.clone().write().and_then(|mut guard| { 915 | *guard = 5; 916 | println!("val is now: {}", *guard); 917 | Ok(()) 918 | })); 919 | 920 | let future_r1 = Box::new(lock.clone().read().and_then(|guard| { 921 | assert_eq!(*guard, 5); 922 | println!("val[r1]: {}", *guard); 923 | Ok(()) 924 | })); 925 | 926 | let future_r2 = Box::new(lock.clone().read().and_then(|guard| { 927 | assert_eq!(*guard, 5); 928 | println!("val[r2]: {}", *guard); 929 | Ok(()) 930 | })); 931 | 932 | let future_u0 = Box::new(lock.clone().read().and_then(|read_guard| { 933 | println!("Upgrading read guard..."); 934 | ReadGuard::upgrade(read_guard).and_then(|mut write_guard| { 935 | println!("Read guard upgraded."); 936 | *write_guard = 6; 937 | Ok(()) 938 | }) 939 | })); 940 | 941 | // This read will take place before the above read lock can be 942 | // upgraded because read requests are processed in a chained fashion: 943 | let future_r3 = Box::new(lock.clone().read().and_then(|guard| { 944 | // Value should not yet be affected by the events following the 945 | // above write guard upgrade. 946 | assert_eq!(*guard, 5); 947 | println!("val[r3]: {}", *guard); 948 | Ok(()) 949 | })); 950 | 951 | // future_r0.join4(future_w0, future_r1, future_r2).wait().unwrap(); 952 | 953 | let futures: Vec>> = vec![ 954 | future_r0, future_w0, future_r1, future_r2, future_u0, future_r3, 955 | ]; 956 | future::join_all(futures).wait().unwrap(); 957 | 958 | let future_guard = lock.clone().read(); 959 | let guard = future_guard.wait().unwrap(); 960 | assert_eq!(*guard, 6); 961 | } 962 | 963 | // This doesn't really prove much... 964 | // 965 | // * TODO: *Actually* determine whether or not the lock acquisition order is 966 | // upheld. 967 | // 968 | #[test] 969 | fn concurrent() { 970 | let start_val = 0i32; 971 | let lock = QrwLock::new(start_val); 972 | let thread_count = 20; 973 | let mut threads = Vec::with_capacity(thread_count); 974 | 975 | for _i in 0..thread_count { 976 | let future_write_guard = lock.clone().write(); 977 | let future_read_guard = lock.clone().read(); 978 | 979 | let future_write = future_write_guard.and_then(|mut guard| { 980 | *guard += 1; 981 | WriteGuard::downgrade(guard); 982 | Ok(()) 983 | }); 984 | 985 | let future_read = future_read_guard.and_then(move |guard| { 986 | // println!("Value for thread '{}' is: {}", _i, *_guard); 987 | Ok(guard) 988 | }); 989 | 990 | threads.push(thread::spawn(|| { 991 | future_write.join(future_read).wait().unwrap(); 992 | })); 993 | } 994 | 995 | for i in 0..thread_count { 996 | let future_write_guard = lock.clone().write(); 997 | 998 | threads.push( 999 | thread::Builder::new() 1000 | .name(format!("test_thread_{}", i)) 1001 | .spawn(|| { 1002 | let mut guard = future_write_guard.wait().unwrap(); 1003 | *guard -= 1 1004 | }) 1005 | .unwrap(), 1006 | ); 1007 | 1008 | let future_read_guard = lock.clone().read(); 1009 | 1010 | threads.push( 1011 | thread::Builder::new() 1012 | .name(format!("test_thread_{}", i)) 1013 | .spawn(|| { 1014 | let _val = *future_read_guard.wait().unwrap(); 1015 | }) 1016 | .unwrap(), 1017 | ); 1018 | } 1019 | 1020 | for thread in threads { 1021 | thread.join().unwrap(); 1022 | } 1023 | 1024 | let guard = lock.clone().read().wait().unwrap(); 1025 | assert_eq!(*guard, start_val); 1026 | } 1027 | 1028 | #[test] 1029 | fn read_write_thread_loop() { 1030 | let lock = QrwLock::new(vec![0usize; 1 << 13]); 1031 | let loop_count = 15; 1032 | let redundancy_count = 700; 1033 | let mut threads = Vec::with_capacity(loop_count * 2); 1034 | 1035 | for i in 0..loop_count { 1036 | let future_write_guard = lock.clone().write(); 1037 | let future_read_guard = lock.clone().read(); 1038 | 1039 | threads.push( 1040 | thread::Builder::new() 1041 | .name(format!("write_thread_{}", i)) 1042 | .spawn(move || { 1043 | let mut guard = future_write_guard.wait().unwrap(); 1044 | for _ in 0..redundancy_count { 1045 | for idx in guard.iter_mut() { 1046 | *idx += 1; 1047 | } 1048 | } 1049 | }) 1050 | .unwrap(), 1051 | ); 1052 | 1053 | threads.push( 1054 | thread::Builder::new() 1055 | .name(format!("read_thread_{}", i)) 1056 | .spawn(move || { 1057 | let guard = future_read_guard.wait().unwrap(); 1058 | let expected_val = redundancy_count * (i + 1); 1059 | for idx in guard.iter() { 1060 | assert!( 1061 | *idx == expected_val, 1062 | "Lock data mismatch. \ 1063 | {} expected, {} found.", 1064 | expected_val, 1065 | *idx 1066 | ); 1067 | } 1068 | }) 1069 | .unwrap(), 1070 | ); 1071 | } 1072 | 1073 | for thread in threads { 1074 | thread.join().unwrap(); 1075 | } 1076 | 1077 | let guard = lock.clone().read().wait().unwrap(); 1078 | for idx in guard.iter() { 1079 | assert_eq!(*idx, loop_count * redundancy_count); 1080 | } 1081 | } 1082 | 1083 | #[test] 1084 | fn multiple_upgrades() { 1085 | let lock = QrwLock::new(0usize); 1086 | let upgrade_count = 12; 1087 | let mut threads = Vec::with_capacity(upgrade_count * 2); 1088 | 1089 | for i in 0..upgrade_count { 1090 | let future_read_guard_a = lock.clone().read(); 1091 | let future_read_guard_b = lock.clone().read(); 1092 | 1093 | threads.push( 1094 | thread::Builder::new() 1095 | .name(format!("read_thread_{}", i)) 1096 | .spawn(move || { 1097 | let _read_guard = future_read_guard_a.wait().expect("[0]"); 1098 | ::std::thread::sleep(::std::time::Duration::from_millis(500)); 1099 | }) 1100 | .expect("[1]"), 1101 | ); 1102 | 1103 | threads.push( 1104 | thread::Builder::new() 1105 | .name(format!("upgrade_thread_{}", i)) 1106 | .spawn(move || { 1107 | let read_guard = future_read_guard_b.wait().expect("[2]"); 1108 | let upgrade_guard = ReadGuard::upgrade(read_guard); 1109 | let mut write_guard = upgrade_guard 1110 | .wait() 1111 | .expect("Error waiting for upgrade guard"); 1112 | *write_guard += 1; 1113 | }) 1114 | .expect("[4]"), 1115 | ); 1116 | } 1117 | 1118 | for handle in threads { 1119 | let name = handle.thread().name().unwrap().to_owned(); 1120 | handle 1121 | .join() 1122 | .expect(&format!("Error joining thread: {:?}", name)); 1123 | } 1124 | 1125 | let guard = lock.read().wait().expect("[6]"); 1126 | assert_eq!(*guard, upgrade_count); 1127 | } 1128 | } 1129 | -------------------------------------------------------------------------------- /src/qutex.rs: -------------------------------------------------------------------------------- 1 | //! A queue-backed exclusive data lock. 2 | //! 3 | // 4 | // * It is unclear how many of the unsafe methods within need actually remain 5 | // unsafe. 6 | 7 | use crossbeam::queue::SegQueue; 8 | use futures::sync::oneshot::{self, Canceled, Receiver, Sender}; 9 | use futures::{Future, Poll}; 10 | use std::cell::UnsafeCell; 11 | use std::ops::{Deref, DerefMut}; 12 | use std::sync::atomic::AtomicUsize; 13 | use std::sync::atomic::Ordering::SeqCst; 14 | use std::sync::Arc; 15 | 16 | /// Allows access to the data contained within a lock just like a mutex guard. 17 | #[derive(Debug)] 18 | pub struct Guard { 19 | qutex: Qutex, 20 | } 21 | 22 | impl Guard { 23 | /// Releases the lock held by a `Guard` and returns the original `Qutex`. 24 | pub fn unlock(guard: Guard) -> Qutex { 25 | let qutex = unsafe { ::std::ptr::read(&guard.qutex) }; 26 | ::std::mem::forget(guard); 27 | unsafe { qutex.direct_unlock() } 28 | qutex 29 | } 30 | } 31 | 32 | impl Deref for Guard { 33 | type Target = T; 34 | 35 | fn deref(&self) -> &T { 36 | unsafe { &*self.qutex.inner.cell.get() } 37 | } 38 | } 39 | 40 | impl DerefMut for Guard { 41 | fn deref_mut(&mut self) -> &mut T { 42 | unsafe { &mut *self.qutex.inner.cell.get() } 43 | } 44 | } 45 | 46 | impl Drop for Guard { 47 | fn drop(&mut self) { 48 | // unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") }; 49 | unsafe { self.qutex.direct_unlock() } 50 | } 51 | } 52 | 53 | /// A future which resolves to a `Guard`. 54 | #[must_use = "futures do nothing unless polled"] 55 | #[derive(Debug)] 56 | pub struct FutureGuard { 57 | qutex: Option>, 58 | rx: Receiver<()>, 59 | } 60 | 61 | impl FutureGuard { 62 | /// Returns a new `FutureGuard`. 63 | fn new(qutex: Qutex, rx: Receiver<()>) -> FutureGuard { 64 | FutureGuard { 65 | qutex: Some(qutex), 66 | rx: rx, 67 | } 68 | } 69 | 70 | /// Blocks the current thread until this future resolves. 71 | #[inline] 72 | pub fn wait(self) -> Result, Canceled> { 73 | ::wait(self) 74 | } 75 | } 76 | 77 | impl Future for FutureGuard { 78 | type Item = Guard; 79 | type Error = Canceled; 80 | 81 | #[inline] 82 | fn poll(&mut self) -> Poll { 83 | if self.qutex.is_some() { 84 | unsafe { self.qutex.as_ref().unwrap().process_queue() } 85 | 86 | match self.rx.poll() { 87 | Ok(status) => Ok(status.map(|_| Guard { 88 | qutex: self.qutex.take().unwrap(), 89 | })), 90 | Err(e) => Err(e.into()), 91 | } 92 | } else { 93 | panic!("FutureGuard::poll: Task already completed."); 94 | } 95 | } 96 | } 97 | 98 | impl Drop for FutureGuard { 99 | /// Gracefully unlock if this guard has a lock acquired but has not yet 100 | /// been polled to completion. 101 | fn drop(&mut self) { 102 | if let Some(qutex) = self.qutex.take() { 103 | self.rx.close(); 104 | 105 | match self.rx.try_recv() { 106 | Ok(status) => { 107 | if status.is_some() { 108 | unsafe { 109 | qutex.direct_unlock(); 110 | } 111 | } 112 | } 113 | Err(_) => (), 114 | } 115 | } 116 | } 117 | } 118 | 119 | /// A request to lock the qutex for exclusive access. 120 | #[derive(Debug)] 121 | pub struct Request { 122 | tx: Sender<()>, 123 | } 124 | 125 | impl Request { 126 | /// Returns a new `Request`. 127 | pub fn new(tx: Sender<()>) -> Request { 128 | Request { tx: tx } 129 | } 130 | } 131 | 132 | #[derive(Debug)] 133 | struct Inner { 134 | // TODO: Convert to `AtomicBool` if no additional states are needed: 135 | state: AtomicUsize, 136 | cell: UnsafeCell, 137 | queue: SegQueue, 138 | } 139 | 140 | impl From for Inner { 141 | #[inline] 142 | fn from(val: T) -> Inner { 143 | Inner { 144 | state: AtomicUsize::new(0), 145 | cell: UnsafeCell::new(val), 146 | queue: SegQueue::new(), 147 | } 148 | } 149 | } 150 | 151 | unsafe impl Send for Inner {} 152 | unsafe impl Sync for Inner {} 153 | 154 | /// A lock-free-queue-backed exclusive data lock. 155 | #[derive(Debug)] 156 | pub struct Qutex { 157 | inner: Arc>, 158 | } 159 | 160 | impl Qutex { 161 | /// Creates and returns a new `Qutex`. 162 | #[inline] 163 | pub fn new(val: T) -> Qutex { 164 | Qutex { 165 | inner: Arc::new(Inner::from(val)), 166 | } 167 | } 168 | 169 | /// Returns a new `FutureGuard` which can be used as a future and will 170 | /// resolve into a `Guard`. 171 | pub fn lock(self) -> FutureGuard { 172 | let (tx, rx) = oneshot::channel(); 173 | unsafe { 174 | self.push_request(Request::new(tx)); 175 | } 176 | FutureGuard::new(self, rx) 177 | } 178 | 179 | /// Pushes a lock request onto the queue. 180 | /// 181 | // 182 | // TODO: Evaluate unsafe-ness. 183 | // 184 | #[inline] 185 | pub unsafe fn push_request(&self, req: Request) { 186 | self.inner.queue.push(req); 187 | } 188 | 189 | /// Returns a mutable reference to the inner `Vec` if there are currently 190 | /// no other copies of this `Qutex`. 191 | /// 192 | /// Since this call borrows the inner lock mutably, no actual locking needs to 193 | /// take place---the mutable borrow statically guarantees no locks exist. 194 | /// 195 | #[inline] 196 | pub fn get_mut(&mut self) -> Option<&mut T> { 197 | Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() }) 198 | } 199 | 200 | /// Returns a reference to the inner value. 201 | /// 202 | #[inline] 203 | pub fn as_ptr(&self) -> *const T { 204 | self.inner.cell.get() 205 | } 206 | 207 | /// Returns a mutable reference to the inner value. 208 | /// 209 | #[inline] 210 | pub fn as_mut_ptr(&self) -> *mut T { 211 | self.inner.cell.get() 212 | } 213 | 214 | /// Pops the next lock request in the queue if this (the caller's) lock is 215 | /// unlocked. 216 | // 217 | // TODO: 218 | // * This is currently public due to 'derivers' (aka. sub-types). Evaluate. 219 | // * Consider removing unsafe qualifier. 220 | // * Return proper error type. 221 | // * [performance] Determine whether or not `compare_exchange_weak` should be used instead. 222 | // * [performance] Consider failure ordering. 223 | // 224 | pub unsafe fn process_queue(&self) { 225 | match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) { 226 | // Unlocked: 227 | Ok(0) => { 228 | loop { 229 | if let Some(req) = self.inner.queue.pop() { 230 | // If there is a send error, a requester has dropped 231 | // its receiver so just go to the next. 232 | if req.tx.send(()).is_err() { 233 | continue; 234 | } else { 235 | break; 236 | } 237 | } else { 238 | self.inner.state.store(0, SeqCst); 239 | break; 240 | } 241 | } 242 | } 243 | // Already locked, leave it alone: 244 | Err(1) => (), 245 | // Already locked, leave it alone: 246 | // 247 | // TODO: Remove this option. Should be unreachable. 248 | // 249 | Ok(1) => unreachable!(), 250 | // Something else: 251 | Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n), 252 | Err(n) => panic!("Qutex::process_queue: error: {}.", n), 253 | } 254 | } 255 | 256 | /// Unlocks this (the caller's) lock and wakes up the next task in the 257 | /// queue. 258 | // 259 | // TODO: 260 | // * Evaluate unsafe-ness. 261 | // * Return proper error type 262 | // pub unsafe fn direct_unlock(&self) -> Result<(), ()> { 263 | pub unsafe fn direct_unlock(&self) { 264 | // TODO: Consider using `Ordering::Release`. 265 | self.inner.state.store(0, SeqCst); 266 | self.process_queue() 267 | } 268 | } 269 | 270 | impl From for Qutex { 271 | #[inline] 272 | fn from(val: T) -> Qutex { 273 | Qutex::new(val) 274 | } 275 | } 276 | 277 | // Avoids needing `T: Clone`. 278 | impl Clone for Qutex { 279 | #[inline] 280 | fn clone(&self) -> Qutex { 281 | Qutex { 282 | inner: self.inner.clone(), 283 | } 284 | } 285 | } 286 | 287 | impl Default for Qutex { 288 | fn default() -> Self { 289 | Self::new(T::default()) 290 | } 291 | } 292 | 293 | #[cfg(test)] 294 | // Woefully incomplete: 295 | mod tests { 296 | use super::*; 297 | use futures::Future; 298 | 299 | #[test] 300 | fn simple() { 301 | let val = Qutex::from(999i32); 302 | 303 | println!("Reading val..."); 304 | { 305 | let future_guard = val.clone().lock(); 306 | let guard = future_guard.wait().unwrap(); 307 | println!("val: {}", *guard); 308 | } 309 | 310 | println!("Storing new val..."); 311 | { 312 | let future_guard = val.clone().lock(); 313 | let mut guard = future_guard.wait().unwrap(); 314 | 315 | *guard = 5; 316 | } 317 | 318 | println!("Reading val..."); 319 | { 320 | let future_guard = val.clone().lock(); 321 | let guard = future_guard.wait().unwrap(); 322 | println!("val: {}", *guard); 323 | } 324 | } 325 | 326 | #[test] 327 | fn concurrent() { 328 | use std::thread; 329 | 330 | let thread_count = 20; 331 | let mut threads = Vec::with_capacity(thread_count); 332 | let start_val = 0i32; 333 | let qutex = Qutex::new(start_val); 334 | 335 | for i in 0..thread_count { 336 | let future_guard = qutex.clone().lock(); 337 | 338 | let future_write = future_guard.and_then(|mut guard| { 339 | *guard += 1; 340 | Ok(()) 341 | }); 342 | 343 | threads.push( 344 | thread::Builder::new() 345 | .name(format!("test_thread_{}", i)) 346 | .spawn(|| future_write.wait().unwrap()) 347 | .unwrap(), 348 | ); 349 | } 350 | 351 | for i in 0..thread_count { 352 | let future_guard = qutex.clone().lock(); 353 | 354 | threads.push( 355 | thread::Builder::new() 356 | .name(format!("test_thread_{}", i + thread_count)) 357 | .spawn(|| { 358 | let mut guard = future_guard.wait().unwrap(); 359 | *guard -= 1; 360 | }) 361 | .unwrap(), 362 | ) 363 | } 364 | 365 | for thread in threads { 366 | thread.join().unwrap(); 367 | } 368 | 369 | let guard = qutex.clone().lock().wait().unwrap(); 370 | assert_eq!(*guard, start_val); 371 | } 372 | 373 | #[test] 374 | fn future_guard_drop() { 375 | let lock = Qutex::from(true); 376 | let _future_guard_0 = lock.clone().lock(); 377 | let _future_guard_1 = lock.clone().lock(); 378 | let _future_guard_2 = lock.clone().lock(); 379 | 380 | // TODO: FINISH ME 381 | } 382 | 383 | #[test] 384 | fn explicit_unlock() { 385 | let lock = Qutex::from(true); 386 | 387 | let mut guard_0 = lock.clone().lock().wait().unwrap(); 388 | *guard_0 = false; 389 | let _ = Guard::unlock(guard_0); 390 | // Will deadlock if this doesn't work: 391 | let guard_1 = lock.clone().lock().wait().unwrap(); 392 | assert!(*guard_1 == false); 393 | } 394 | } 395 | --------------------------------------------------------------------------------