├── .gitignore ├── benchmarks ├── src │ ├── lib.rs │ ├── rwlock │ │ ├── mod.rs │ │ ├── smol.rs │ │ ├── tokio.rs │ │ ├── fast_async_mutex.rs │ │ └── fast_async_mutex_ordered.rs │ └── mutex │ │ ├── mod.rs │ │ ├── smol.rs │ │ ├── futures.rs │ │ ├── tokio.rs │ │ ├── fast_async_mutex.rs │ │ └── fast_async_mutex_ordered.rs └── Cargo.toml ├── Cargo.toml ├── .github └── workflows │ └── rust.yml ├── LICENSE-MIT ├── src ├── lib.rs ├── inner.rs ├── utils.rs ├── mutex.rs ├── mutex_ordered.rs ├── rwlock.rs └── rwlock_ordered.rs ├── README.md └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | .idea 4 | benchmarks/target -------------------------------------------------------------------------------- /benchmarks/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | 3 | extern crate test; 4 | mod mutex; 5 | mod rwlock; 6 | -------------------------------------------------------------------------------- /benchmarks/src/rwlock/mod.rs: -------------------------------------------------------------------------------- 1 | mod fast_async_mutex; 2 | mod fast_async_mutex_ordered; 3 | mod smol; 4 | mod tokio; 5 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/mod.rs: -------------------------------------------------------------------------------- 1 | mod fast_async_mutex; 2 | mod fast_async_mutex_ordered; 3 | mod futures; 4 | mod smol; 5 | mod tokio; 6 | -------------------------------------------------------------------------------- /benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "benchmarks" 3 | version = "0.1.0" 4 | authors = ["Mnwa "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dev-dependencies] 10 | smol = "1" 11 | futures = "0.3" 12 | tokio = { version = "0.3", features = ["full"] } 13 | fast-async-mutex = { version = "0.6", path = "../" } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fast-async-mutex" 3 | version = "0.6.7" 4 | authors = ["Mnwa "] 5 | edition = "2018" 6 | description = "It is a lib which provide asynchronous locking mechanisms (Mutex, RwLock, OrderedMutex and OrderedRwLock)" 7 | license = "Apache-2.0/MIT" 8 | keywords = ["mutex", "lock", "thread", "spin", "concurrency"] 9 | categories = ["concurrency"] 10 | readme = "README.md" 11 | documentation = "https://docs.rs/fast-async-mutex/" 12 | repository = "https://github.com/Mnwa/fast-async-mutex" 13 | 14 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 15 | 16 | [dependencies] 17 | 18 | [dev-dependencies] 19 | tokio = { version = "0.3", features = ["full"] } 20 | futures = "0.3" -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Build 15 | run: cargo build --verbose 16 | 17 | test: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Run tests 22 | run: cargo test 23 | 24 | 25 | codeStyle: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - uses: actions/checkout@v2 29 | - name: Run cargo fmt 30 | run: cargo fmt --all -- --check 31 | 32 | clippy: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v2 36 | - run: rustup component add clippy 37 | - uses: actions-rs/clippy-check@v1 38 | with: 39 | token: ${{ secrets.GITHUB_TOKEN }} 40 | args: --all-features 41 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Mikhail Panfilov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/smol.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use smol::lock::Mutex; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| Mutex::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_without_waiting(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(Mutex::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.lock().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }) 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_without_waiting(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = Mutex::new(0); 42 | 43 | for _ in 0..num { 44 | runtime.block_on(async { 45 | let mut lock = mutex.lock().await; 46 | *lock += 1; 47 | }) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/futures.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use futures::lock::Mutex; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| Mutex::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_without_waiting(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(Mutex::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.lock().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }) 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_without_waiting(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = Mutex::new(0); 42 | 43 | for _ in 0..num { 44 | runtime.block_on(async { 45 | let mut lock = mutex.lock().await; 46 | *lock += 1; 47 | }) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/tokio.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use std::sync::Arc; 4 | use test::Bencher; 5 | use tokio::sync::Mutex; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| Mutex::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_without_waiting(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(Mutex::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.lock().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }) 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_without_waiting(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = Mutex::new(0); 42 | 43 | for _ in 0..num { 44 | runtime.block_on(async { 45 | let mut lock = mutex.lock().await; 46 | *lock += 1; 47 | }) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/fast_async_mutex.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use fast_async_mutex::mutex::Mutex; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| Mutex::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_without_waiting(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(Mutex::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.lock().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }) 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_without_waiting(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = Mutex::new(0); 42 | 43 | for _ in 0..num { 44 | runtime.block_on(async { 45 | let mut lock = mutex.lock().await; 46 | *lock += 1; 47 | }) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! `fast_async_mutex` it is a lib which provide asynchronous locking mechanisms, which used spinlock algorithm. 2 | //! It's maybe very efficient because when mutex tries to acquire data unsuccessfully, these returning control to an async runtime back. 3 | //! This lib built only on atomics and don't use others std synchronous data structures, which make this lib so fast. 4 | 5 | /// The simple Mutex, which will provide unique access to you data between multiple threads/futures. 6 | pub mod mutex; 7 | 8 | /// The Ordered Mutex has its mechanism of locking order when you have concurrent access to data. 9 | /// It will work well when you needed step by step data locking like sending UDP packages in a specific order. 10 | pub mod mutex_ordered; 11 | 12 | /// The RW Lock mechanism accepts you get concurrent shared access to your data without waiting. 13 | /// And get unique access with locks like a Mutex. 14 | pub mod rwlock; 15 | 16 | /// The RW Lock mechanism accepts you get shared access to your data without locking. 17 | /// The Ordered RW Lock will be locking all reads, which starting after write and unlocking them only when write will realize. 18 | /// It may be slow down the reads speed, but decrease time to write on systems, where it is critical. 19 | /// 20 | /// **BUT RW Lock has some limitations. You should avoid acquiring the second reading before realizing first inside the one future. 21 | /// Because it can happen that between your readings a write from another thread will acquire the mutex, and you will get a deadlock.** 22 | pub mod rwlock_ordered; 23 | 24 | pub(crate) mod inner; 25 | pub(crate) mod utils; 26 | -------------------------------------------------------------------------------- /benchmarks/src/mutex/fast_async_mutex_ordered.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use fast_async_mutex::mutex_ordered::OrderedMutex; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| OrderedMutex::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_without_waiting(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(OrderedMutex::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.lock().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }) 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_without_waiting(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = OrderedMutex::new(0); 42 | 43 | for _ in 0..num { 44 | runtime.block_on(async { 45 | let mut lock = mutex.lock().await; 46 | *lock += 1; 47 | }) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/inner.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 3 | use std::task::Waker; 4 | 5 | #[derive(Debug)] 6 | pub(crate) struct Inner { 7 | is_acquired: AtomicBool, 8 | pub(crate) data: UnsafeCell, 9 | } 10 | 11 | impl Inner { 12 | #[inline] 13 | pub const fn new(data: T) -> Inner { 14 | Inner { 15 | is_acquired: AtomicBool::new(false), 16 | data: UnsafeCell::new(data), 17 | } 18 | } 19 | } 20 | 21 | impl Inner { 22 | #[inline] 23 | pub(crate) fn unlock(&self) { 24 | self.is_acquired.store(false, Ordering::Release); 25 | } 26 | 27 | #[inline] 28 | pub(crate) fn store_waker(&self, waker: &Waker) { 29 | waker.wake_by_ref(); 30 | } 31 | 32 | #[inline] 33 | pub(crate) fn try_acquire(&self) -> bool { 34 | self.is_acquired 35 | .compare_exchange_weak(false, true, Ordering::AcqRel, Ordering::Relaxed) 36 | .is_ok() 37 | } 38 | } 39 | 40 | #[derive(Debug)] 41 | pub(crate) struct OrderedInner { 42 | pub(crate) state: AtomicUsize, 43 | pub(crate) current: AtomicUsize, 44 | pub(crate) data: UnsafeCell, 45 | } 46 | 47 | impl OrderedInner { 48 | #[inline] 49 | pub const fn new(data: T) -> OrderedInner { 50 | OrderedInner { 51 | state: AtomicUsize::new(0), 52 | current: AtomicUsize::new(0), 53 | data: UnsafeCell::new(data), 54 | } 55 | } 56 | } 57 | 58 | impl OrderedInner { 59 | #[inline] 60 | pub(crate) fn generate_id(&self) -> usize { 61 | self.state.fetch_add(1, Ordering::Relaxed) 62 | } 63 | 64 | #[inline] 65 | pub(crate) fn unlock(&self) { 66 | self.current.fetch_add(1, Ordering::Release); 67 | } 68 | 69 | #[inline] 70 | pub(crate) fn store_waker(&self, waker: &Waker) { 71 | waker.wake_by_ref(); 72 | } 73 | 74 | #[inline] 75 | pub(crate) fn try_acquire(&self, id: usize) -> bool { 76 | id == self.current.load(Ordering::Acquire) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /benchmarks/src/rwlock/smol.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use smol::lock::RwLock; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| RwLock::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_write(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(RwLock::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.write().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_writing(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = RwLock::new(0); 42 | for _ in 0..num { 43 | runtime.block_on(async { 44 | let mut lock = mutex.write().await; 45 | *lock += 1; 46 | }) 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn concurrency_read(b: &mut Bencher) { 53 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 54 | b.iter(|| { 55 | let num = 100; 56 | let mutex = Arc::new(RwLock::new(0)); 57 | let ths: Vec<_> = (0..num) 58 | .map(|_| { 59 | let mutex = mutex.clone(); 60 | runtime.spawn(async move { 61 | let _lock = mutex.read().await; 62 | }) 63 | }) 64 | .collect(); 65 | 66 | for thread in ths { 67 | runtime.block_on(thread).unwrap(); 68 | } 69 | }); 70 | } 71 | 72 | #[bench] 73 | fn step_by_step_read(b: &mut Bencher) { 74 | let runtime = tokio::runtime::Builder::new_current_thread() 75 | .build() 76 | .unwrap(); 77 | b.iter(|| { 78 | let num = 100; 79 | let mutex = RwLock::new(0); 80 | for _ in 0..num { 81 | runtime.block_on(async { 82 | let _lock = mutex.read().await; 83 | }) 84 | } 85 | }); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /benchmarks/src/rwlock/tokio.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use std::sync::Arc; 4 | use test::Bencher; 5 | use tokio::sync::RwLock; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| RwLock::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_write(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(RwLock::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.write().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_writing(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = RwLock::new(0); 42 | for _ in 0..num { 43 | runtime.block_on(async { 44 | let mut lock = mutex.write().await; 45 | *lock += 1; 46 | }) 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn concurrency_read(b: &mut Bencher) { 53 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 54 | b.iter(|| { 55 | let num = 100; 56 | let mutex = Arc::new(RwLock::new(0)); 57 | let ths: Vec<_> = (0..num) 58 | .map(|_| { 59 | let mutex = mutex.clone(); 60 | runtime.spawn(async move { 61 | let _lock = mutex.read().await; 62 | }) 63 | }) 64 | .collect(); 65 | 66 | for thread in ths { 67 | runtime.block_on(thread).unwrap(); 68 | } 69 | }); 70 | } 71 | 72 | #[bench] 73 | fn step_by_step_read(b: &mut Bencher) { 74 | let runtime = tokio::runtime::Builder::new_current_thread() 75 | .build() 76 | .unwrap(); 77 | b.iter(|| { 78 | let num = 100; 79 | let mutex = RwLock::new(0); 80 | for _ in 0..num { 81 | runtime.block_on(async { 82 | let _lock = mutex.read().await; 83 | }) 84 | } 85 | }); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /benchmarks/src/rwlock/fast_async_mutex.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use fast_async_mutex::rwlock::RwLock; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| RwLock::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_write(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(RwLock::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.write().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_writing(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = RwLock::new(0); 42 | for _ in 0..num { 43 | runtime.block_on(async { 44 | let mut lock = mutex.write().await; 45 | *lock += 1; 46 | }) 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn concurrency_read(b: &mut Bencher) { 53 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 54 | b.iter(|| { 55 | let num = 100; 56 | let mutex = Arc::new(RwLock::new(0)); 57 | let ths: Vec<_> = (0..num) 58 | .map(|_| { 59 | let mutex = mutex.clone(); 60 | runtime.spawn(async move { 61 | let _lock = mutex.read().await; 62 | }) 63 | }) 64 | .collect(); 65 | 66 | for thread in ths { 67 | runtime.block_on(thread).unwrap(); 68 | } 69 | }); 70 | } 71 | 72 | #[bench] 73 | fn step_by_step_read(b: &mut Bencher) { 74 | let runtime = tokio::runtime::Builder::new_current_thread() 75 | .build() 76 | .unwrap(); 77 | b.iter(|| { 78 | let num = 100; 79 | let mutex = RwLock::new(0); 80 | for _ in 0..num { 81 | runtime.block_on(async { 82 | let _lock = mutex.read().await; 83 | }) 84 | } 85 | }); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /benchmarks/src/rwlock/fast_async_mutex_ordered.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use fast_async_mutex::rwlock_ordered::OrderedRwLock; 4 | use std::sync::Arc; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn create(b: &mut Bencher) { 9 | b.iter(|| OrderedRwLock::new(())); 10 | } 11 | 12 | #[bench] 13 | fn concurrency_write(b: &mut Bencher) { 14 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 15 | b.iter(|| { 16 | let num = 100; 17 | let mutex = Arc::new(OrderedRwLock::new(0)); 18 | let ths: Vec<_> = (0..num) 19 | .map(|_| { 20 | let mutex = mutex.clone(); 21 | runtime.spawn(async move { 22 | let mut lock = mutex.write().await; 23 | *lock += 1; 24 | }) 25 | }) 26 | .collect(); 27 | 28 | for thread in ths { 29 | runtime.block_on(thread).unwrap(); 30 | } 31 | }); 32 | } 33 | 34 | #[bench] 35 | fn step_by_step_writing(b: &mut Bencher) { 36 | let runtime = tokio::runtime::Builder::new_current_thread() 37 | .build() 38 | .unwrap(); 39 | b.iter(|| { 40 | let num = 100; 41 | let mutex = OrderedRwLock::new(0); 42 | for _ in 0..num { 43 | runtime.block_on(async { 44 | let mut lock = mutex.write().await; 45 | *lock += 1; 46 | }) 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn concurrency_read(b: &mut Bencher) { 53 | let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap(); 54 | b.iter(|| { 55 | let num = 100; 56 | let mutex = Arc::new(OrderedRwLock::new(0)); 57 | let ths: Vec<_> = (0..num) 58 | .map(|_| { 59 | let mutex = mutex.clone(); 60 | runtime.spawn(async move { 61 | let _lock = mutex.read().await; 62 | }) 63 | }) 64 | .collect(); 65 | 66 | for thread in ths { 67 | runtime.block_on(thread).unwrap(); 68 | } 69 | }); 70 | } 71 | 72 | #[bench] 73 | fn step_by_step_read(b: &mut Bencher) { 74 | let runtime = tokio::runtime::Builder::new_current_thread() 75 | .build() 76 | .unwrap(); 77 | b.iter(|| { 78 | let num = 100; 79 | let mutex = OrderedRwLock::new(0); 80 | for _ in 0..num { 81 | runtime.block_on(async { 82 | let _lock = mutex.read().await; 83 | }) 84 | } 85 | }); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod deref { 3 | #[macro_export] 4 | macro_rules! impl_deref_mut { 5 | ($struct_name:ident) => { 6 | $crate::impl_deref!($struct_name); 7 | impl std::ops::DerefMut for $struct_name { 8 | fn deref_mut(&mut self) -> &mut Self::Target { 9 | unsafe { &mut *self.mutex.inner.data.get() } 10 | } 11 | } 12 | }; 13 | ($struct_name:ident, $lifetime:lifetime) => { 14 | $crate::impl_deref!($struct_name, $lifetime); 15 | impl<$lifetime, T: ?Sized> std::ops::DerefMut for $struct_name<$lifetime, T> { 16 | fn deref_mut(&mut self) -> &mut Self::Target { 17 | unsafe { &mut *self.mutex.inner.data.get() } 18 | } 19 | } 20 | }; 21 | } 22 | 23 | #[macro_export] 24 | macro_rules! impl_deref { 25 | ($struct_name:ident) => { 26 | impl std::ops::Deref for $struct_name { 27 | type Target = T; 28 | 29 | fn deref(&self) -> &Self::Target { 30 | unsafe { &*self.mutex.inner.data.get() } 31 | } 32 | } 33 | }; 34 | ($struct_name:ident, $lifetime:lifetime) => { 35 | impl<$lifetime, T: ?Sized> std::ops::Deref for $struct_name<$lifetime, T> { 36 | type Target = T; 37 | 38 | fn deref(&self) -> &Self::Target { 39 | unsafe { &*self.mutex.inner.data.get() } 40 | } 41 | } 42 | }; 43 | } 44 | } 45 | 46 | #[macro_use] 47 | mod drop { 48 | #[macro_export] 49 | macro_rules! impl_drop_guard { 50 | ($struct_name:ident, $unlock_fn:ident) => { 51 | impl Drop for $struct_name { 52 | fn drop(&mut self) { 53 | self.mutex.inner.$unlock_fn() 54 | } 55 | } 56 | }; 57 | ($struct_name:ident, $lifetime:lifetime, $unlock_fn:ident) => { 58 | impl<$lifetime, T: ?Sized> Drop for $struct_name<$lifetime, T> { 59 | fn drop(&mut self) { 60 | self.mutex.inner.$unlock_fn() 61 | } 62 | } 63 | }; 64 | } 65 | #[macro_export] 66 | macro_rules! impl_drop_guard_self { 67 | ($struct_name:ident, $unlock_fn:ident) => { 68 | impl Drop for $struct_name { 69 | fn drop(&mut self) { 70 | self.mutex.$unlock_fn() 71 | } 72 | } 73 | }; 74 | ($struct_name:ident, $lifetime:lifetime, $unlock_fn:ident) => { 75 | impl<$lifetime, T: ?Sized> Drop for $struct_name<$lifetime, T> { 76 | fn drop(&mut self) { 77 | self.mutex.$unlock_fn() 78 | } 79 | } 80 | }; 81 | } 82 | 83 | #[macro_export] 84 | macro_rules! impl_drop_guard_future { 85 | ($struct_name:ident, $unlock_fn:ident) => { 86 | impl Drop for $struct_name { 87 | fn drop(&mut self) { 88 | if !self.is_realized { 89 | self.mutex.inner.$unlock_fn() 90 | } 91 | } 92 | } 93 | }; 94 | ($struct_name:ident, $lifetime:lifetime, $unlock_fn:ident) => { 95 | impl<$lifetime, T: ?Sized> Drop for $struct_name<$lifetime, T> { 96 | fn drop(&mut self) { 97 | if !self.is_realized { 98 | self.mutex.inner.$unlock_fn() 99 | } 100 | } 101 | } 102 | }; 103 | } 104 | } 105 | 106 | #[macro_use] 107 | mod sync { 108 | #[macro_export] 109 | macro_rules! impl_send_sync_rwlock { 110 | ($mutex_name:ident, $read_guard:ident, $read_guard_owned:ident, $write_guard:ident, $write_guard_owned:ident) => { 111 | unsafe impl Send for $mutex_name where T: Send + ?Sized {} 112 | unsafe impl Sync for $mutex_name where T: Send + Sync + ?Sized {} 113 | 114 | unsafe impl Send for $read_guard<'_, T> where T: ?Sized + Send {} 115 | unsafe impl Sync for $read_guard<'_, T> where T: Send + Sync + ?Sized {} 116 | 117 | unsafe impl Send for $read_guard_owned where T: ?Sized + Send {} 118 | unsafe impl Sync for $read_guard_owned where T: Send + Sync + ?Sized {} 119 | 120 | unsafe impl Send for $write_guard<'_, T> where T: ?Sized + Send {} 121 | unsafe impl Sync for $write_guard<'_, T> where T: Send + Sync + ?Sized {} 122 | 123 | unsafe impl Send for $write_guard_owned where T: ?Sized + Send {} 124 | unsafe impl Sync for $write_guard_owned where T: Send + Sync + ?Sized {} 125 | }; 126 | } 127 | 128 | #[macro_export] 129 | macro_rules! impl_send_sync_mutex { 130 | ($mutex_name:ident, $mutex_guard:ident, $mutex_guard_owned:ident) => { 131 | unsafe impl Send for $mutex_name where T: Send + ?Sized {} 132 | unsafe impl Sync for $mutex_name where T: Send + Sync + ?Sized {} 133 | 134 | unsafe impl Send for $mutex_guard<'_, T> where T: ?Sized + Send {} 135 | unsafe impl Sync for $mutex_guard<'_, T> where T: Send + Sync + ?Sized {} 136 | 137 | unsafe impl Send for $mutex_guard_owned where T: ?Sized + Send {} 138 | unsafe impl Sync for $mutex_guard_owned where T: Send + Sync + ?Sized {} 139 | }; 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fast-async-mutex 2 | [![](https://github.com/Mnwa/fast-async-mutex/workflows/build/badge.svg?branch=master)](https://github.com/Mnwa/fast-async-mutex/actions?query=workflow%3Abuild) 3 | [![](https://docs.rs/fast-async-mutex/badge.svg)](https://docs.rs/fast-async-mutex/) 4 | [![](https://img.shields.io/crates/v/fast-async-mutex.svg)](https://crates.io/crates/fast-async-mutex) 5 | [![](https://img.shields.io/crates/d/fast-async-mutex.svg)](https://crates.io/crates/fast-async-mutex) 6 | 7 | It is a lib which provide asynchronous locking mechanisms, which used spinlock algorithm. 8 | It's maybe very efficient because when mutex tries to acquire data unsuccessfully, these returning control to an async runtime back. 9 | This lib built only on atomics and don't use others std synchronous data structures, which make this lib so fast. 10 | 11 | ## Examples 12 | 13 | ```rust 14 | use fast_async_mutex::mutex::Mutex; 15 | 16 | #[tokio::main] 17 | async fn main() { 18 | let mutex = Mutex::new(10); 19 | let guard = mutex.lock().await; 20 | assert_eq!(*guard, 10); 21 | } 22 | ``` 23 | 24 | ## Benchmarks 25 | 26 | There is result of benchmarks which runs on `MacBook Pro (16-inch, 2019) 2,3 GHz 8-Core Intel Core i9 16GB RAM` 27 | Tests you can find in the [benchmarks dir](benchmarks). 28 | ``` 29 | running 35 tests 30 | test mutex::fast_async_mutex::tests::concurrency_without_waiting ... bench: 49,844 ns/iter (+/- 3,223) 31 | test mutex::fast_async_mutex::tests::create ... bench: 0 ns/iter (+/- 0) 32 | test mutex::fast_async_mutex::tests::step_by_step_without_waiting ... bench: 22,685 ns/iter (+/- 18,466) 33 | test mutex::fast_async_mutex_ordered::tests::concurrency_without_waiting ... bench: 50,173 ns/iter (+/- 1,163) 34 | test mutex::fast_async_mutex_ordered::tests::create ... bench: 0 ns/iter (+/- 0) 35 | test mutex::fast_async_mutex_ordered::tests::step_by_step_without_waiting ... bench: 22,007 ns/iter (+/- 2,974) 36 | test mutex::futures::tests::concurrency_without_waiting ... bench: 76,535 ns/iter (+/- 12,181) 37 | test mutex::futures::tests::create ... bench: 92 ns/iter (+/- 17) 38 | test mutex::futures::tests::step_by_step_without_waiting ... bench: 23,109 ns/iter (+/- 7,627) 39 | test mutex::smol::tests::concurrency_without_waiting ... bench: 66,312 ns/iter (+/- 4,924) 40 | test mutex::smol::tests::create ... bench: 0 ns/iter (+/- 0) 41 | test mutex::smol::tests::step_by_step_without_waiting ... bench: 28,466 ns/iter (+/- 3,385) 42 | test mutex::tokio::tests::concurrency_without_waiting ... bench: 60,833 ns/iter (+/- 11,640) 43 | test mutex::tokio::tests::create ... bench: 6 ns/iter (+/- 3) 44 | test mutex::tokio::tests::step_by_step_without_waiting ... bench: 31,924 ns/iter (+/- 9,327) 45 | test rwlock::fast_async_mutex::tests::concurrency_read ... bench: 53,613 ns/iter (+/- 4,125) 46 | test rwlock::fast_async_mutex::tests::concurrency_write ... bench: 50,652 ns/iter (+/- 1,181) 47 | test rwlock::fast_async_mutex::tests::create ... bench: 0 ns/iter (+/- 0) 48 | test rwlock::fast_async_mutex::tests::step_by_step_read ... bench: 23,161 ns/iter (+/- 5,225) 49 | test rwlock::fast_async_mutex::tests::step_by_step_writing ... bench: 23,330 ns/iter (+/- 4,819) 50 | test rwlock::fast_async_mutex_ordered::tests::concurrency_read ... bench: 50,208 ns/iter (+/- 896) 51 | test rwlock::fast_async_mutex_ordered::tests::concurrency_write ... bench: 50,227 ns/iter (+/- 1,984) 52 | test rwlock::fast_async_mutex_ordered::tests::create ... bench: 0 ns/iter (+/- 0) 53 | test rwlock::fast_async_mutex_ordered::tests::step_by_step_read ... bench: 23,059 ns/iter (+/- 2,393) 54 | test rwlock::fast_async_mutex_ordered::tests::step_by_step_writing ... bench: 22,074 ns/iter (+/- 5,107) 55 | test rwlock::smol::tests::concurrency_read ... bench: 55,767 ns/iter (+/- 1,843) 56 | test rwlock::smol::tests::concurrency_write ... bench: 85,189 ns/iter (+/- 2,852) 57 | test rwlock::smol::tests::create ... bench: 1 ns/iter (+/- 0) 58 | test rwlock::smol::tests::step_by_step_read ... bench: 22,644 ns/iter (+/- 1,688) 59 | test rwlock::smol::tests::step_by_step_writing ... bench: 25,769 ns/iter (+/- 2,010) 60 | test rwlock::tokio::tests::concurrency_read ... bench: 52,960 ns/iter (+/- 939) 61 | test rwlock::tokio::tests::concurrency_write ... bench: 60,748 ns/iter (+/- 4,604) 62 | test rwlock::tokio::tests::create ... bench: 5 ns/iter (+/- 1) 63 | test rwlock::tokio::tests::step_by_step_read ... bench: 31,437 ns/iter (+/- 6,948) 64 | test rwlock::tokio::tests::step_by_step_writing ... bench: 29,766 ns/iter (+/- 8,081) 65 | 66 | test result: ok. 0 passed; 0 failed; 0 ignored; 35 measured; 0 filtered out 67 | ``` 68 | 69 | ## License 70 | 71 | Licensed under either of 72 | 73 | * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 74 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 75 | 76 | at your option. 77 | 78 | #### Contribution 79 | 80 | Unless you explicitly state otherwise, any contribution intentionally submitted 81 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 82 | dual licensed as above, without any additional terms or conditions. 83 | -------------------------------------------------------------------------------- /src/mutex.rs: -------------------------------------------------------------------------------- 1 | use crate::inner::Inner; 2 | use std::fmt::Debug; 3 | use std::future::Future; 4 | use std::pin::Pin; 5 | use std::sync::Arc; 6 | use std::task::{Context, Poll}; 7 | 8 | /// The simple Mutex, which will provide unique access to you data between multiple threads/futures. 9 | #[derive(Debug)] 10 | pub struct Mutex { 11 | inner: Inner, 12 | } 13 | 14 | impl Mutex { 15 | /// Create a new `Mutex` 16 | #[inline] 17 | pub const fn new(data: T) -> Mutex { 18 | Mutex { 19 | inner: Inner::new(data), 20 | } 21 | } 22 | } 23 | 24 | impl Mutex { 25 | /// Acquires the mutex. 26 | /// 27 | /// Returns a guard that releases the mutex and wake the next locker when dropped. 28 | /// 29 | /// # Examples 30 | /// 31 | /// ``` 32 | /// use fast_async_mutex::mutex::Mutex; 33 | /// 34 | /// #[tokio::main] 35 | /// async fn main() { 36 | /// let mutex = Mutex::new(10); 37 | /// let guard = mutex.lock().await; 38 | /// assert_eq!(*guard, 10); 39 | /// } 40 | /// ``` 41 | #[inline] 42 | pub const fn lock(&self) -> MutexGuardFuture { 43 | MutexGuardFuture { 44 | mutex: &self, 45 | is_realized: false, 46 | } 47 | } 48 | 49 | /// Acquires the mutex. 50 | /// 51 | /// Returns a guard that releases the mutex and wake the next locker when dropped. 52 | /// `MutexOwnedGuardFuture` have a `'static` lifetime, but requires the `Arc>` type 53 | /// 54 | /// # Examples 55 | /// 56 | /// ``` 57 | /// use fast_async_mutex::mutex::Mutex; 58 | /// use std::sync::Arc; 59 | /// #[tokio::main] 60 | /// async fn main() { 61 | /// let mutex = Arc::new(Mutex::new(10)); 62 | /// let guard = mutex.lock_owned().await; 63 | /// assert_eq!(*guard, 10); 64 | /// } 65 | /// ``` 66 | #[inline] 67 | pub fn lock_owned(self: &Arc) -> MutexOwnedGuardFuture { 68 | MutexOwnedGuardFuture { 69 | mutex: self.clone(), 70 | is_realized: false, 71 | } 72 | } 73 | } 74 | 75 | /// The Simple Mutex Guard 76 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally borrows the Mutex, so the mutex will not be dropped while a guard exists. 77 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 78 | #[derive(Debug)] 79 | pub struct MutexGuard<'a, T: ?Sized> { 80 | mutex: &'a Mutex, 81 | } 82 | 83 | #[derive(Debug)] 84 | pub struct MutexGuardFuture<'a, T: ?Sized> { 85 | mutex: &'a Mutex, 86 | is_realized: bool, 87 | } 88 | 89 | /// An owned handle to a held Mutex. 90 | /// This guard is only available from a Mutex that is wrapped in an `Arc`. It is identical to `MutexGuard`, except that rather than borrowing the `Mutex`, it clones the `Arc`, incrementing the reference count. This means that unlike `MutexGuard`, it will have the `'static` lifetime. 91 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `Mutex`, so even if the lock goes away, the guard remains valid. 92 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 93 | #[derive(Debug)] 94 | pub struct MutexOwnedGuard { 95 | mutex: Arc>, 96 | } 97 | 98 | #[derive(Debug)] 99 | pub struct MutexOwnedGuardFuture { 100 | mutex: Arc>, 101 | is_realized: bool, 102 | } 103 | 104 | impl<'a, T: ?Sized> Future for MutexGuardFuture<'a, T> { 105 | type Output = MutexGuard<'a, T>; 106 | 107 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 108 | if self.mutex.inner.try_acquire() { 109 | self.is_realized = true; 110 | Poll::Ready(MutexGuard { mutex: self.mutex }) 111 | } else { 112 | self.mutex.inner.store_waker(cx.waker()); 113 | Poll::Pending 114 | } 115 | } 116 | } 117 | 118 | impl Future for MutexOwnedGuardFuture { 119 | type Output = MutexOwnedGuard; 120 | 121 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 122 | if self.mutex.inner.try_acquire() { 123 | self.is_realized = true; 124 | Poll::Ready(MutexOwnedGuard { 125 | mutex: self.mutex.clone(), 126 | }) 127 | } else { 128 | self.mutex.inner.store_waker(cx.waker()); 129 | Poll::Pending 130 | } 131 | } 132 | } 133 | 134 | crate::impl_send_sync_mutex!(Mutex, MutexGuard, MutexOwnedGuard); 135 | 136 | crate::impl_deref_mut!(MutexGuard, 'a); 137 | crate::impl_deref_mut!(MutexOwnedGuard); 138 | 139 | crate::impl_drop_guard!(MutexGuard, 'a, unlock); 140 | crate::impl_drop_guard!(MutexOwnedGuard, unlock); 141 | crate::impl_drop_guard_future!(MutexGuardFuture, 'a, unlock); 142 | crate::impl_drop_guard_future!(MutexOwnedGuardFuture, unlock); 143 | 144 | #[cfg(test)] 145 | mod tests { 146 | use crate::mutex::{Mutex, MutexGuard, MutexOwnedGuard}; 147 | use futures::executor::block_on; 148 | use futures::{FutureExt, StreamExt, TryStreamExt}; 149 | use std::ops::AddAssign; 150 | use std::sync::Arc; 151 | use tokio::time::{sleep, Duration}; 152 | 153 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 154 | async fn test_mutex() { 155 | let c = Mutex::new(0); 156 | 157 | futures::stream::iter(0..10000) 158 | .for_each_concurrent(None, |_| async { 159 | let mut co: MutexGuard = c.lock().await; 160 | *co += 1; 161 | }) 162 | .await; 163 | 164 | let co = c.lock().await; 165 | assert_eq!(*co, 10000) 166 | } 167 | 168 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 169 | async fn test_mutex_delay() { 170 | let expected_result = 100; 171 | let c = Mutex::new(0); 172 | 173 | futures::stream::iter(0..expected_result) 174 | .then(|i| c.lock().map(move |co| (i, co))) 175 | .for_each_concurrent(None, |(i, mut co)| async move { 176 | sleep(Duration::from_millis(expected_result - i)).await; 177 | *co += 1; 178 | }) 179 | .await; 180 | 181 | let co = c.lock().await; 182 | assert_eq!(*co, expected_result) 183 | } 184 | 185 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 186 | async fn test_owned_mutex() { 187 | let c = Arc::new(Mutex::new(0)); 188 | 189 | futures::stream::iter(0..10000) 190 | .for_each_concurrent(None, |_| async { 191 | let mut co: MutexOwnedGuard = c.lock_owned().await; 192 | *co += 1; 193 | }) 194 | .await; 195 | 196 | let co = c.lock_owned().await; 197 | assert_eq!(*co, 10000) 198 | } 199 | 200 | #[tokio::test] 201 | async fn test_container() { 202 | let c = Mutex::new(String::from("lol")); 203 | 204 | let mut co: MutexGuard = c.lock().await; 205 | co.add_assign("lol"); 206 | 207 | assert_eq!(*co, "lollol"); 208 | } 209 | 210 | #[tokio::test] 211 | async fn test_timeout() { 212 | let c = Mutex::new(String::from("lol")); 213 | 214 | let co: MutexGuard = c.lock().await; 215 | 216 | futures::stream::iter(0..10000i32) 217 | .then(|_| tokio::time::timeout(Duration::from_nanos(1), c.lock())) 218 | .try_for_each_concurrent(None, |_c| futures::future::ok(())) 219 | .await 220 | .expect_err("timout must be"); 221 | 222 | drop(co); 223 | 224 | let mut co: MutexGuard = c.lock().await; 225 | co.add_assign("lol"); 226 | 227 | assert_eq!(*co, "lollol"); 228 | } 229 | 230 | #[test] 231 | fn multithreading_test() { 232 | let num = 100; 233 | let mutex = Arc::new(Mutex::new(0)); 234 | let ths: Vec<_> = (0..num) 235 | .map(|_| { 236 | let mutex = mutex.clone(); 237 | std::thread::spawn(move || { 238 | block_on(async { 239 | let mut lock = mutex.lock().await; 240 | *lock += 1; 241 | }) 242 | }) 243 | }) 244 | .collect(); 245 | 246 | for thread in ths { 247 | thread.join().unwrap(); 248 | } 249 | 250 | block_on(async { 251 | let lock = mutex.lock().await; 252 | assert_eq!(num, *lock) 253 | }) 254 | } 255 | } 256 | -------------------------------------------------------------------------------- /src/mutex_ordered.rs: -------------------------------------------------------------------------------- 1 | use crate::inner::OrderedInner; 2 | use std::fmt::Debug; 3 | use std::future::Future; 4 | use std::pin::Pin; 5 | use std::sync::Arc; 6 | use std::task::{Context, Poll}; 7 | 8 | /// The Ordered Mutex has its mechanism of locking order when you have concurrent access to data. 9 | /// It will work well when you needed step by step data locking like sending UDP packages in a specific order. 10 | /// 11 | /// The main difference with the standard `Mutex` is ordered mutex will check an ordering of blocking. 12 | /// This way has some guaranties of mutex execution order, but it's a little bit slowly than original mutex. 13 | #[derive(Debug)] 14 | pub struct OrderedMutex { 15 | inner: OrderedInner, 16 | } 17 | 18 | impl OrderedMutex { 19 | /// Create a new `OrderedMutex` 20 | #[inline] 21 | pub const fn new(data: T) -> OrderedMutex { 22 | OrderedMutex { 23 | inner: OrderedInner::new(data), 24 | } 25 | } 26 | } 27 | 28 | impl OrderedMutex { 29 | /// Acquires the mutex. 30 | /// 31 | /// Returns a guard that releases the mutex and wake the next locker when dropped. 32 | /// 33 | /// # Examples 34 | /// 35 | /// ``` 36 | /// use fast_async_mutex::mutex_ordered::OrderedMutex; 37 | /// 38 | /// #[tokio::main] 39 | /// async fn main() { 40 | /// let mutex = OrderedMutex::new(10); 41 | /// let guard = mutex.lock().await; 42 | /// assert_eq!(*guard, 10); 43 | /// } 44 | /// ``` 45 | #[inline] 46 | pub fn lock(&self) -> OrderedMutexGuardFuture { 47 | OrderedMutexGuardFuture { 48 | mutex: &self, 49 | id: self.inner.generate_id(), 50 | is_realized: false, 51 | } 52 | } 53 | 54 | /// Acquires the mutex. 55 | /// 56 | /// Returns a guard that releases the mutex and wake the next locker when dropped. 57 | /// `OrderedMutexOwnedGuard` have a `'static` lifetime, but requires the `Arc>` type 58 | /// 59 | /// # Examples 60 | /// 61 | /// ``` 62 | /// use fast_async_mutex::mutex_ordered::OrderedMutex; 63 | /// use std::sync::Arc; 64 | /// #[tokio::main] 65 | /// async fn main() { 66 | /// let mutex = Arc::new(OrderedMutex::new(10)); 67 | /// let guard = mutex.lock_owned().await; 68 | /// assert_eq!(*guard, 10); 69 | /// } 70 | /// ``` 71 | #[inline] 72 | pub fn lock_owned(self: &Arc) -> OrderedMutexOwnedGuardFuture { 73 | OrderedMutexOwnedGuardFuture { 74 | mutex: self.clone(), 75 | id: self.inner.generate_id(), 76 | is_realized: false, 77 | } 78 | } 79 | } 80 | 81 | /// The Simple OrderedMutex Guard 82 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally borrows the OrderedMutex, so the mutex will not be dropped while a guard exists. 83 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 84 | #[derive(Debug)] 85 | pub struct OrderedMutexGuard<'a, T: ?Sized> { 86 | mutex: &'a OrderedMutex, 87 | } 88 | 89 | #[derive(Debug)] 90 | pub struct OrderedMutexGuardFuture<'a, T: ?Sized> { 91 | mutex: &'a OrderedMutex, 92 | id: usize, 93 | is_realized: bool, 94 | } 95 | 96 | /// An owned handle to a held OrderedMutex. 97 | /// This guard is only available from a OrderedMutex that is wrapped in an `Arc`. It is identical to `OrderedMutexGuard`, except that rather than borrowing the `OrderedMutex`, it clones the `Arc`, incrementing the reference count. This means that unlike `OrderedMutexGuard`, it will have the `'static` lifetime. 98 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `OrderedMutex`, so even if the lock goes away, the guard remains valid. 99 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 100 | #[derive(Debug)] 101 | pub struct OrderedMutexOwnedGuard { 102 | mutex: Arc>, 103 | } 104 | 105 | #[derive(Debug)] 106 | pub struct OrderedMutexOwnedGuardFuture { 107 | mutex: Arc>, 108 | id: usize, 109 | is_realized: bool, 110 | } 111 | 112 | impl<'a, T: ?Sized> Future for OrderedMutexGuardFuture<'a, T> { 113 | type Output = OrderedMutexGuard<'a, T>; 114 | 115 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 116 | if self.mutex.inner.try_acquire(self.id) { 117 | self.is_realized = true; 118 | Poll::Ready(OrderedMutexGuard { mutex: self.mutex }) 119 | } else { 120 | self.mutex.inner.store_waker(cx.waker()); 121 | Poll::Pending 122 | } 123 | } 124 | } 125 | 126 | impl Future for OrderedMutexOwnedGuardFuture { 127 | type Output = OrderedMutexOwnedGuard; 128 | 129 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 130 | if self.mutex.inner.try_acquire(self.id) { 131 | self.is_realized = true; 132 | Poll::Ready(OrderedMutexOwnedGuard { 133 | mutex: self.mutex.clone(), 134 | }) 135 | } else { 136 | self.mutex.inner.store_waker(cx.waker()); 137 | Poll::Pending 138 | } 139 | } 140 | } 141 | 142 | crate::impl_send_sync_mutex!(OrderedMutex, OrderedMutexGuard, OrderedMutexOwnedGuard); 143 | 144 | crate::impl_deref_mut!(OrderedMutexGuard, 'a); 145 | crate::impl_deref_mut!(OrderedMutexOwnedGuard); 146 | 147 | crate::impl_drop_guard!(OrderedMutexGuard, 'a, unlock); 148 | crate::impl_drop_guard!(OrderedMutexOwnedGuard, unlock); 149 | crate::impl_drop_guard_future!(OrderedMutexGuardFuture, 'a, unlock); 150 | crate::impl_drop_guard_future!(OrderedMutexOwnedGuardFuture, unlock); 151 | 152 | #[cfg(test)] 153 | mod tests { 154 | use crate::mutex_ordered::{OrderedMutex, OrderedMutexGuard, OrderedMutexOwnedGuard}; 155 | use futures::executor::block_on; 156 | use futures::{FutureExt, StreamExt, TryStreamExt}; 157 | use std::ops::AddAssign; 158 | use std::sync::atomic::AtomicUsize; 159 | use std::sync::Arc; 160 | use tokio::time::{sleep, Duration}; 161 | 162 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 163 | async fn test_mutex() { 164 | let c = OrderedMutex::new(0); 165 | 166 | futures::stream::iter(0..10000) 167 | .for_each_concurrent(None, |_| async { 168 | let mut co: OrderedMutexGuard = c.lock().await; 169 | *co += 1; 170 | }) 171 | .await; 172 | 173 | let co = c.lock().await; 174 | assert_eq!(*co, 10000) 175 | } 176 | 177 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 178 | async fn test_mutex_delay() { 179 | let expected_result = 100; 180 | let c = OrderedMutex::new(0); 181 | 182 | futures::stream::iter(0..expected_result) 183 | .then(|i| c.lock().map(move |co| (i, co))) 184 | .for_each_concurrent(None, |(i, mut co)| async move { 185 | sleep(Duration::from_millis(expected_result - i)).await; 186 | *co += 1; 187 | }) 188 | .await; 189 | 190 | let co = c.lock().await; 191 | assert_eq!(*co, expected_result) 192 | } 193 | 194 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 195 | async fn test_owned_mutex() { 196 | let c = Arc::new(OrderedMutex::new(0)); 197 | 198 | futures::stream::iter(0..10000) 199 | .for_each_concurrent(None, |_| async { 200 | let mut co: OrderedMutexOwnedGuard = c.lock_owned().await; 201 | *co += 1; 202 | }) 203 | .await; 204 | 205 | let co = c.lock_owned().await; 206 | assert_eq!(*co, 10000) 207 | } 208 | 209 | #[tokio::test] 210 | async fn test_container() { 211 | let c = OrderedMutex::new(String::from("lol")); 212 | 213 | let mut co: OrderedMutexGuard = c.lock().await; 214 | co.add_assign("lol"); 215 | 216 | assert_eq!(*co, "lollol"); 217 | } 218 | 219 | #[tokio::test] 220 | async fn test_overflow() { 221 | let mut c = OrderedMutex::new(String::from("lol")); 222 | 223 | c.inner.state = AtomicUsize::new(usize::max_value()); 224 | c.inner.current = AtomicUsize::new(usize::max_value()); 225 | 226 | let mut co: OrderedMutexGuard = c.lock().await; 227 | co.add_assign("lol"); 228 | 229 | assert_eq!(*co, "lollol"); 230 | } 231 | 232 | #[tokio::test] 233 | async fn test_timeout() { 234 | let c = OrderedMutex::new(String::from("lol")); 235 | 236 | let co: OrderedMutexGuard = c.lock().await; 237 | 238 | futures::stream::iter(0..10000i32) 239 | .then(|_| tokio::time::timeout(Duration::from_nanos(1), c.lock())) 240 | .try_for_each_concurrent(None, |_c| futures::future::ok(())) 241 | .await 242 | .expect_err("timout must be"); 243 | 244 | drop(co); 245 | 246 | let mut co: OrderedMutexGuard = c.lock().await; 247 | co.add_assign("lol"); 248 | 249 | assert_eq!(*co, "lollol"); 250 | } 251 | 252 | #[test] 253 | fn multithreading_test() { 254 | let num = 100; 255 | let mutex = Arc::new(OrderedMutex::new(0)); 256 | let ths: Vec<_> = (0..num) 257 | .map(|_| { 258 | let mutex = mutex.clone(); 259 | std::thread::spawn(move || { 260 | block_on(async { 261 | let mut lock = mutex.lock().await; 262 | *lock += 1; 263 | }) 264 | }) 265 | }) 266 | .collect(); 267 | 268 | for thread in ths { 269 | thread.join().unwrap(); 270 | } 271 | 272 | block_on(async { 273 | let lock = mutex.lock().await; 274 | assert_eq!(num, *lock) 275 | }) 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright (c) 2020 Mikhail Panfilov 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/rwlock.rs: -------------------------------------------------------------------------------- 1 | use crate::inner::Inner; 2 | use std::fmt::Debug; 3 | use std::future::Future; 4 | use std::pin::Pin; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | use std::sync::Arc; 7 | use std::task::{Context, Poll}; 8 | 9 | /// The RW Lock mechanism accepts you get concurrent shared access to your data without waiting. 10 | /// And get unique access with locks like a Mutex. 11 | #[derive(Debug)] 12 | pub struct RwLock { 13 | readers: AtomicUsize, 14 | inner: Inner, 15 | } 16 | 17 | impl RwLock { 18 | /// Create a new `RWLock` 19 | #[inline] 20 | pub const fn new(data: T) -> RwLock { 21 | RwLock { 22 | readers: AtomicUsize::new(0), 23 | inner: Inner::new(data), 24 | } 25 | } 26 | } 27 | 28 | impl RwLock { 29 | /// Acquires the mutex for are write. 30 | /// 31 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 32 | /// 33 | /// # Examples 34 | /// 35 | /// ``` 36 | /// use fast_async_mutex::rwlock::RwLock; 37 | /// 38 | /// #[tokio::main] 39 | /// async fn main() { 40 | /// let mutex = RwLock::new(10); 41 | /// let mut guard = mutex.write().await; 42 | /// *guard += 1; 43 | /// assert_eq!(*guard, 11); 44 | /// } 45 | /// ``` 46 | #[inline] 47 | pub fn write(&self) -> RwLockWriteGuardFuture { 48 | RwLockWriteGuardFuture { 49 | mutex: &self, 50 | is_realized: false, 51 | } 52 | } 53 | 54 | /// Acquires the mutex for are write. 55 | /// 56 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 57 | /// `WriteLockOwnedGuard` have a `'static` lifetime, but requires the `Arc>` type 58 | /// 59 | /// # Examples 60 | /// 61 | /// ``` 62 | /// use fast_async_mutex::rwlock::RwLock; 63 | /// use std::sync::Arc; 64 | /// #[tokio::main] 65 | /// async fn main() { 66 | /// let mutex = Arc::new(RwLock::new(10)); 67 | /// let mut guard = mutex.write_owned().await; 68 | /// *guard += 1; 69 | /// assert_eq!(*guard, 11); 70 | /// } 71 | /// ``` 72 | #[inline] 73 | pub fn write_owned(self: &Arc) -> RwLockWriteOwnedGuardFuture { 74 | RwLockWriteOwnedGuardFuture { 75 | mutex: self.clone(), 76 | is_realized: false, 77 | } 78 | } 79 | 80 | /// Acquires the mutex for are read. 81 | /// 82 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 83 | /// 84 | /// # Examples 85 | /// 86 | /// ``` 87 | /// use fast_async_mutex::rwlock::RwLock; 88 | /// 89 | /// #[tokio::main] 90 | /// async fn main() { 91 | /// let mutex = RwLock::new(10); 92 | /// let guard = mutex.read().await; 93 | /// let guard2 = mutex.read().await; 94 | /// assert_eq!(*guard, *guard2); 95 | /// } 96 | /// ``` 97 | #[inline] 98 | pub fn read(&self) -> RwLockReadGuardFuture { 99 | RwLockReadGuardFuture { 100 | mutex: &self, 101 | is_realized: false, 102 | } 103 | } 104 | 105 | /// Acquires the mutex for are write. 106 | /// 107 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 108 | /// `WriteLockOwnedGuard` have a `'static` lifetime, but requires the `Arc>` type 109 | /// 110 | /// # Examples 111 | /// 112 | /// ``` 113 | /// use fast_async_mutex::rwlock::RwLock; 114 | /// use std::sync::Arc; 115 | /// #[tokio::main] 116 | /// async fn main() { 117 | /// let mutex = Arc::new(RwLock::new(10)); 118 | /// let guard = mutex.read().await; 119 | /// let guard2 = mutex.read().await; 120 | /// assert_eq!(*guard, *guard2); 121 | /// } 122 | /// ``` 123 | #[inline] 124 | pub fn read_owned(self: &Arc) -> RwLockReadOwnedGuardFuture { 125 | RwLockReadOwnedGuardFuture { 126 | mutex: self.clone(), 127 | is_realized: false, 128 | } 129 | } 130 | 131 | #[inline] 132 | fn unlock_reader(&self) { 133 | if self.readers.fetch_sub(1, Ordering::Release) == 1 { 134 | self.inner.unlock() 135 | } 136 | } 137 | 138 | #[inline] 139 | fn add_reader(&self) { 140 | self.readers.fetch_add(1, Ordering::Release); 141 | } 142 | 143 | #[inline] 144 | fn try_acquire_reader(&self) -> bool { 145 | self.readers.load(Ordering::Acquire) > 0 || self.inner.try_acquire() 146 | } 147 | } 148 | 149 | /// The Simple Write Lock Guard 150 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally borrows the RWLock, so the mutex will not be dropped while a guard exists. 151 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 152 | #[derive(Debug)] 153 | pub struct RwLockWriteGuard<'a, T: ?Sized> { 154 | mutex: &'a RwLock, 155 | } 156 | 157 | #[derive(Debug)] 158 | pub struct RwLockWriteGuardFuture<'a, T: ?Sized> { 159 | mutex: &'a RwLock, 160 | is_realized: bool, 161 | } 162 | 163 | /// An owned handle to a held RWLock. 164 | /// This guard is only available from a RWLock that is wrapped in an `Arc`. It is identical to `WriteLockGuard`, except that rather than borrowing the `RWLock`, it clones the `Arc`, incrementing the reference count. This means that unlike `WriteLockGuard`, it will have the `'static` lifetime. 165 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `RWLock`, so even if the lock goes away, the guard remains valid. 166 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 167 | #[derive(Debug)] 168 | pub struct RwLockWriteOwnedGuard { 169 | mutex: Arc>, 170 | } 171 | 172 | #[derive(Debug)] 173 | pub struct RwLockWriteOwnedGuardFuture { 174 | mutex: Arc>, 175 | is_realized: bool, 176 | } 177 | 178 | /// The Simple Write Lock Guard 179 | /// As long as you have this guard, you have shared access to the underlying `T`. The guard internally borrows the `RWLock`, so the mutex will not be dropped while a guard exists. 180 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 181 | #[derive(Debug)] 182 | pub struct RwLockReadGuard<'a, T: ?Sized> { 183 | mutex: &'a RwLock, 184 | } 185 | 186 | #[derive(Debug)] 187 | pub struct RwLockReadGuardFuture<'a, T: ?Sized> { 188 | mutex: &'a RwLock, 189 | is_realized: bool, 190 | } 191 | 192 | /// An owned handle to a held RWLock. 193 | /// This guard is only available from a RWLock that is wrapped in an `Arc`. It is identical to `WriteLockGuard`, except that rather than borrowing the `RWLock`, it clones the `Arc`, incrementing the reference count. This means that unlike `WriteLockGuard`, it will have the `'static` lifetime. 194 | /// As long as you have this guard, you have shared access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `RWLock`, so even if the lock goes away, the guard remains valid. 195 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 196 | #[derive(Debug)] 197 | pub struct RwLockReadOwnedGuard { 198 | mutex: Arc>, 199 | } 200 | 201 | #[derive(Debug)] 202 | pub struct RwLockReadOwnedGuardFuture { 203 | mutex: Arc>, 204 | is_realized: bool, 205 | } 206 | 207 | impl<'a, T: ?Sized> Future for RwLockWriteGuardFuture<'a, T> { 208 | type Output = RwLockWriteGuard<'a, T>; 209 | 210 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 211 | if self.mutex.inner.try_acquire() { 212 | self.is_realized = true; 213 | Poll::Ready(RwLockWriteGuard { mutex: self.mutex }) 214 | } else { 215 | self.mutex.inner.store_waker(cx.waker()); 216 | Poll::Pending 217 | } 218 | } 219 | } 220 | 221 | impl Future for RwLockWriteOwnedGuardFuture { 222 | type Output = RwLockWriteOwnedGuard; 223 | 224 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 225 | if self.mutex.inner.try_acquire() { 226 | self.is_realized = true; 227 | Poll::Ready(RwLockWriteOwnedGuard { 228 | mutex: self.mutex.clone(), 229 | }) 230 | } else { 231 | self.mutex.inner.store_waker(cx.waker()); 232 | Poll::Pending 233 | } 234 | } 235 | } 236 | 237 | impl<'a, T: ?Sized> Future for RwLockReadGuardFuture<'a, T> { 238 | type Output = RwLockReadGuard<'a, T>; 239 | 240 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 241 | if self.mutex.try_acquire_reader() { 242 | self.is_realized = true; 243 | self.mutex.add_reader(); 244 | Poll::Ready(RwLockReadGuard { mutex: self.mutex }) 245 | } else { 246 | self.mutex.inner.store_waker(cx.waker()); 247 | Poll::Pending 248 | } 249 | } 250 | } 251 | 252 | impl Future for RwLockReadOwnedGuardFuture { 253 | type Output = RwLockReadOwnedGuard; 254 | 255 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 256 | if self.mutex.try_acquire_reader() { 257 | self.is_realized = true; 258 | self.mutex.add_reader(); 259 | Poll::Ready(RwLockReadOwnedGuard { 260 | mutex: self.mutex.clone(), 261 | }) 262 | } else { 263 | self.mutex.inner.store_waker(cx.waker()); 264 | Poll::Pending 265 | } 266 | } 267 | } 268 | 269 | crate::impl_send_sync_rwlock!( 270 | RwLock, 271 | RwLockReadGuard, 272 | RwLockReadOwnedGuard, 273 | RwLockWriteGuard, 274 | RwLockWriteOwnedGuard 275 | ); 276 | 277 | crate::impl_deref_mut!(RwLockWriteGuard, 'a); 278 | crate::impl_deref_mut!(RwLockWriteOwnedGuard); 279 | crate::impl_deref!(RwLockReadGuard, 'a); 280 | crate::impl_deref!(RwLockReadOwnedGuard); 281 | 282 | crate::impl_drop_guard!(RwLockWriteGuard, 'a, unlock); 283 | crate::impl_drop_guard!(RwLockWriteOwnedGuard, unlock); 284 | crate::impl_drop_guard_self!(RwLockReadGuard, 'a, unlock_reader); 285 | crate::impl_drop_guard_self!(RwLockReadOwnedGuard, unlock_reader); 286 | 287 | crate::impl_drop_guard_future!(RwLockWriteGuardFuture, 'a, unlock); 288 | crate::impl_drop_guard_future!(RwLockWriteOwnedGuardFuture, unlock); 289 | crate::impl_drop_guard_future!(RwLockReadGuardFuture, 'a, unlock); 290 | crate::impl_drop_guard_future!(RwLockReadOwnedGuardFuture, unlock); 291 | 292 | #[cfg(test)] 293 | mod tests { 294 | use crate::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard, RwLockWriteOwnedGuard}; 295 | use futures::executor::block_on; 296 | use futures::{FutureExt, StreamExt, TryStreamExt}; 297 | use std::ops::AddAssign; 298 | use std::sync::Arc; 299 | use tokio::time::{sleep, Duration}; 300 | 301 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 302 | async fn test_mutex() { 303 | let c = RwLock::new(0); 304 | 305 | futures::stream::iter(0..10000) 306 | .for_each_concurrent(None, |_| async { 307 | let mut co: RwLockWriteGuard = c.write().await; 308 | *co += 1; 309 | }) 310 | .await; 311 | 312 | let co = c.write().await; 313 | assert_eq!(*co, 10000) 314 | } 315 | 316 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 317 | async fn test_mutex_delay() { 318 | let expected_result = 100; 319 | let c = RwLock::new(0); 320 | 321 | futures::stream::iter(0..expected_result) 322 | .then(|i| c.write().map(move |co| (i, co))) 323 | .for_each_concurrent(None, |(i, mut co)| async move { 324 | sleep(Duration::from_millis(expected_result - i)).await; 325 | *co += 1; 326 | }) 327 | .await; 328 | 329 | let co = c.write().await; 330 | assert_eq!(*co, expected_result) 331 | } 332 | 333 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 334 | async fn test_owned_mutex() { 335 | let c = Arc::new(RwLock::new(0)); 336 | 337 | futures::stream::iter(0..10000) 338 | .for_each_concurrent(None, |_| async { 339 | let mut co: RwLockWriteOwnedGuard = c.write_owned().await; 340 | *co += 1; 341 | }) 342 | .await; 343 | 344 | let co = c.write_owned().await; 345 | assert_eq!(*co, 10000) 346 | } 347 | 348 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 349 | async fn test_container() { 350 | let c = RwLock::new(String::from("lol")); 351 | 352 | let mut co: RwLockWriteGuard = c.write().await; 353 | co.add_assign("lol"); 354 | 355 | assert_eq!(*co, "lollol"); 356 | } 357 | 358 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 359 | async fn test_timeout() { 360 | let c = RwLock::new(String::from("lol")); 361 | 362 | let co: RwLockWriteGuard = c.write().await; 363 | 364 | futures::stream::iter(0..10000i32) 365 | .then(|_| tokio::time::timeout(Duration::from_nanos(1), c.write())) 366 | .try_for_each_concurrent(None, |_c| futures::future::ok(())) 367 | .await 368 | .expect_err("timout must be"); 369 | 370 | drop(co); 371 | 372 | let mut co: RwLockWriteGuard = c.write().await; 373 | co.add_assign("lol"); 374 | 375 | assert_eq!(*co, "lollol"); 376 | } 377 | 378 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 379 | async fn test_concurrent_reading() { 380 | let c = RwLock::new(String::from("lol")); 381 | 382 | let co: RwLockReadGuard = c.read().await; 383 | 384 | futures::stream::iter(0..10000i32) 385 | .then(|_| c.read()) 386 | .inspect(|c| assert_eq!(*co, **c)) 387 | .for_each_concurrent(None, |_c| futures::future::ready(())) 388 | .await; 389 | 390 | assert!(matches!( 391 | tokio::time::timeout(Duration::from_millis(1), c.write()).await, 392 | Err(_) 393 | )); 394 | 395 | let co2: RwLockReadGuard = c.read().await; 396 | assert_eq!(*co, *co2); 397 | } 398 | 399 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 400 | async fn test_concurrent_reading_writing() { 401 | let c = RwLock::new(String::from("lol")); 402 | 403 | let co: RwLockReadGuard = c.read().await; 404 | let co2: RwLockReadGuard = c.read().await; 405 | assert_eq!(*co, *co2); 406 | 407 | drop(co); 408 | drop(co2); 409 | 410 | let mut co: RwLockWriteGuard = c.write().await; 411 | 412 | assert!(matches!( 413 | tokio::time::timeout(Duration::from_millis(1), c.read()).await, 414 | Err(_) 415 | )); 416 | 417 | *co += "lol"; 418 | 419 | drop(co); 420 | 421 | let co: RwLockReadGuard = c.read().await; 422 | let co2: RwLockReadGuard = c.read().await; 423 | assert_eq!(*co, "lollol"); 424 | assert_eq!(*co, *co2); 425 | } 426 | 427 | #[test] 428 | fn multithreading_test() { 429 | let num = 100; 430 | let mutex = Arc::new(RwLock::new(0)); 431 | let ths: Vec<_> = (0..num) 432 | .map(|i| { 433 | let mutex = mutex.clone(); 434 | std::thread::spawn(move || { 435 | block_on(async { 436 | if i % 2 == 0 { 437 | let mut lock = mutex.write().await; 438 | *lock += 1; 439 | drop(lock) 440 | } else { 441 | let lock1 = mutex.read().await; 442 | let lock2 = mutex.read().await; 443 | assert_eq!(*lock1, *lock2); 444 | drop(lock1); 445 | drop(lock2); 446 | } 447 | }) 448 | }) 449 | }) 450 | .collect(); 451 | 452 | for thread in ths { 453 | thread.join().unwrap(); 454 | } 455 | 456 | block_on(async { 457 | let lock = mutex.read().await; 458 | assert_eq!(num / 2, *lock) 459 | }) 460 | } 461 | } 462 | -------------------------------------------------------------------------------- /src/rwlock_ordered.rs: -------------------------------------------------------------------------------- 1 | use crate::inner::OrderedInner; 2 | use std::fmt::Debug; 3 | use std::future::Future; 4 | use std::pin::Pin; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | use std::sync::Arc; 7 | use std::task::{Context, Poll}; 8 | 9 | /// The Ordered RW Lock will be locking all reads, which starting after write and unlocking them only when write will realize. 10 | /// It may be slow down the reads speed, but decrease time to write on systems, where it is critical. 11 | /// 12 | /// **BUT RW Lock has some limitations. You should avoid acquiring the second reading before realizing first inside the one future. 13 | /// Because it can happen that between your readings a write from another thread will acquire the mutex, and you will get a deadlock.** 14 | #[derive(Debug)] 15 | pub struct OrderedRwLock { 16 | readers: AtomicUsize, 17 | inner: OrderedInner, 18 | } 19 | 20 | impl OrderedRwLock { 21 | /// Create a new `OrderedRWLock` 22 | #[inline] 23 | pub const fn new(data: T) -> OrderedRwLock { 24 | OrderedRwLock { 25 | readers: AtomicUsize::new(0), 26 | inner: OrderedInner::new(data), 27 | } 28 | } 29 | } 30 | 31 | impl OrderedRwLock { 32 | /// Acquires the mutex for are write. 33 | /// 34 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 35 | /// 36 | /// # Examples 37 | /// 38 | /// ``` 39 | /// use fast_async_mutex::rwlock_ordered::OrderedRwLock; 40 | /// 41 | /// #[tokio::main] 42 | /// async fn main() { 43 | /// let mutex = OrderedRwLock::new(10); 44 | /// let mut guard = mutex.write().await; 45 | /// *guard += 1; 46 | /// assert_eq!(*guard, 11); 47 | /// } 48 | /// ``` 49 | #[inline] 50 | pub fn write(&self) -> OrderedRwLockWriteGuardFuture { 51 | OrderedRwLockWriteGuardFuture { 52 | mutex: &self, 53 | id: self.inner.generate_id(), 54 | is_realized: false, 55 | } 56 | } 57 | 58 | /// Acquires the mutex for are write. 59 | /// 60 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 61 | /// `WriteLockOwnedGuard` have a `'static` lifetime, but requires the `Arc>` type 62 | /// 63 | /// # Examples 64 | /// 65 | /// ``` 66 | /// use fast_async_mutex::rwlock_ordered::OrderedRwLock; 67 | /// use std::sync::Arc; 68 | /// #[tokio::main] 69 | /// async fn main() { 70 | /// let mutex = Arc::new(OrderedRwLock::new(10)); 71 | /// let mut guard = mutex.write_owned().await; 72 | /// *guard += 1; 73 | /// assert_eq!(*guard, 11); 74 | /// } 75 | /// ``` 76 | #[inline] 77 | pub fn write_owned(self: &Arc) -> OrderedRwLockWriteOwnedGuardFuture { 78 | OrderedRwLockWriteOwnedGuardFuture { 79 | mutex: self.clone(), 80 | id: self.inner.generate_id(), 81 | is_realized: false, 82 | } 83 | } 84 | 85 | /// Acquires the mutex for are read. 86 | /// 87 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 88 | /// 89 | /// # Examples 90 | /// 91 | /// ``` 92 | /// use fast_async_mutex::rwlock_ordered::OrderedRwLock; 93 | /// 94 | /// #[tokio::main] 95 | /// async fn main() { 96 | /// let mutex = OrderedRwLock::new(10); 97 | /// let guard = mutex.read().await; 98 | /// let guard2 = mutex.read().await; 99 | /// assert_eq!(*guard, *guard2); 100 | /// } 101 | /// ``` 102 | #[inline] 103 | pub fn read(&self) -> OrderedRwLockReadGuardFuture { 104 | OrderedRwLockReadGuardFuture { 105 | mutex: &self, 106 | id: self.inner.generate_id(), 107 | is_realized: false, 108 | } 109 | } 110 | 111 | /// Acquires the mutex for are write. 112 | /// 113 | /// Returns a guard that releases the mutex and wake the next locker when it will be dropped. 114 | /// `WriteLockOwnedGuard` have a `'static` lifetime, but requires the `Arc>` type 115 | /// 116 | /// # Examples 117 | /// 118 | /// ``` 119 | /// use fast_async_mutex::rwlock_ordered::OrderedRwLock; 120 | /// use std::sync::Arc; 121 | /// #[tokio::main] 122 | /// async fn main() { 123 | /// let mutex = Arc::new(OrderedRwLock::new(10)); 124 | /// let guard = mutex.read().await; 125 | /// let guard2 = mutex.read().await; 126 | /// assert_eq!(*guard, *guard2); 127 | /// } 128 | /// ``` 129 | #[inline] 130 | pub fn read_owned(self: &Arc) -> OrderedRwLockReadOwnedGuardFuture { 131 | OrderedRwLockReadOwnedGuardFuture { 132 | mutex: self.clone(), 133 | id: self.inner.generate_id(), 134 | is_realized: false, 135 | } 136 | } 137 | 138 | #[inline] 139 | fn unlock_reader(&self) { 140 | self.readers.fetch_sub(1, Ordering::Release); 141 | self.inner.unlock() 142 | } 143 | 144 | #[inline] 145 | fn add_reader(&self) { 146 | self.readers.fetch_add(1, Ordering::Release); 147 | } 148 | 149 | #[inline] 150 | pub fn try_acquire_reader(&self, id: usize) -> bool { 151 | id == self.inner.current.load(Ordering::Acquire) + self.readers.load(Ordering::Acquire) 152 | } 153 | } 154 | 155 | /// The Simple Write Lock Guard 156 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally borrows the RWLock, so the mutex will not be dropped while a guard exists. 157 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 158 | #[derive(Debug)] 159 | pub struct OrderedRwLockWriteGuard<'a, T: ?Sized> { 160 | mutex: &'a OrderedRwLock, 161 | } 162 | 163 | #[derive(Debug)] 164 | pub struct OrderedRwLockWriteGuardFuture<'a, T: ?Sized> { 165 | mutex: &'a OrderedRwLock, 166 | id: usize, 167 | is_realized: bool, 168 | } 169 | 170 | /// An owned handle to a held RWLock. 171 | /// This guard is only available from a RWLock that is wrapped in an `Arc`. It is identical to `WriteLockGuard`, except that rather than borrowing the `RWLock`, it clones the `Arc`, incrementing the reference count. This means that unlike `WriteLockGuard`, it will have the `'static` lifetime. 172 | /// As long as you have this guard, you have exclusive access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `RWLock`, so even if the lock goes away, the guard remains valid. 173 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 174 | #[derive(Debug)] 175 | pub struct OrderedRwLockWriteOwnedGuard { 176 | mutex: Arc>, 177 | } 178 | 179 | #[derive(Debug)] 180 | pub struct OrderedRwLockWriteOwnedGuardFuture { 181 | mutex: Arc>, 182 | id: usize, 183 | is_realized: bool, 184 | } 185 | 186 | /// The Simple Write Lock Guard 187 | /// As long as you have this guard, you have shared access to the underlying `T`. The guard internally borrows the `RWLock`, so the mutex will not be dropped while a guard exists. 188 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 189 | #[derive(Debug)] 190 | pub struct OrderedRwLockReadGuard<'a, T: ?Sized> { 191 | mutex: &'a OrderedRwLock, 192 | } 193 | 194 | #[derive(Debug)] 195 | pub struct OrderedRwLockReadGuardFuture<'a, T: ?Sized> { 196 | mutex: &'a OrderedRwLock, 197 | id: usize, 198 | is_realized: bool, 199 | } 200 | 201 | /// An owned handle to a held RWLock. 202 | /// This guard is only available from a RWLock that is wrapped in an `Arc`. It is identical to `WriteLockGuard`, except that rather than borrowing the `RWLock`, it clones the `Arc`, incrementing the reference count. This means that unlike `WriteLockGuard`, it will have the `'static` lifetime. 203 | /// As long as you have this guard, you have shared access to the underlying `T`. The guard internally keeps a reference-couned pointer to the original `RWLock`, so even if the lock goes away, the guard remains valid. 204 | /// The lock is automatically released and waked the next locker whenever the guard is dropped, at which point lock will succeed yet again. 205 | #[derive(Debug)] 206 | pub struct OrderedRwLockReadOwnedGuard { 207 | mutex: Arc>, 208 | } 209 | 210 | #[derive(Debug)] 211 | pub struct OrderedRwLockReadOwnedGuardFuture { 212 | mutex: Arc>, 213 | id: usize, 214 | is_realized: bool, 215 | } 216 | 217 | impl<'a, T: ?Sized> Future for OrderedRwLockWriteGuardFuture<'a, T> { 218 | type Output = OrderedRwLockWriteGuard<'a, T>; 219 | 220 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 221 | if self.mutex.inner.try_acquire(self.id) { 222 | self.is_realized = true; 223 | Poll::Ready(OrderedRwLockWriteGuard { mutex: self.mutex }) 224 | } else { 225 | self.mutex.inner.store_waker(cx.waker()); 226 | Poll::Pending 227 | } 228 | } 229 | } 230 | 231 | impl Future for OrderedRwLockWriteOwnedGuardFuture { 232 | type Output = OrderedRwLockWriteOwnedGuard; 233 | 234 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 235 | if self.mutex.inner.try_acquire(self.id) { 236 | self.is_realized = true; 237 | Poll::Ready(OrderedRwLockWriteOwnedGuard { 238 | mutex: self.mutex.clone(), 239 | }) 240 | } else { 241 | self.mutex.inner.store_waker(cx.waker()); 242 | Poll::Pending 243 | } 244 | } 245 | } 246 | 247 | impl<'a, T: ?Sized> Future for OrderedRwLockReadGuardFuture<'a, T> { 248 | type Output = OrderedRwLockReadGuard<'a, T>; 249 | 250 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 251 | if self.mutex.try_acquire_reader(self.id) { 252 | self.is_realized = true; 253 | self.mutex.add_reader(); 254 | Poll::Ready(OrderedRwLockReadGuard { mutex: &self.mutex }) 255 | } else { 256 | self.mutex.inner.store_waker(cx.waker()); 257 | Poll::Pending 258 | } 259 | } 260 | } 261 | 262 | impl Future for OrderedRwLockReadOwnedGuardFuture { 263 | type Output = OrderedRwLockReadOwnedGuard; 264 | 265 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 266 | if self.mutex.try_acquire_reader(self.id) { 267 | self.is_realized = true; 268 | self.mutex.add_reader(); 269 | Poll::Ready(OrderedRwLockReadOwnedGuard { 270 | mutex: self.mutex.clone(), 271 | }) 272 | } else { 273 | self.mutex.inner.store_waker(cx.waker()); 274 | Poll::Pending 275 | } 276 | } 277 | } 278 | 279 | crate::impl_send_sync_rwlock!( 280 | OrderedRwLock, 281 | OrderedRwLockReadGuard, 282 | OrderedRwLockReadOwnedGuard, 283 | OrderedRwLockWriteGuard, 284 | OrderedRwLockWriteOwnedGuard 285 | ); 286 | 287 | crate::impl_deref_mut!(OrderedRwLockWriteGuard, 'a); 288 | crate::impl_deref_mut!(OrderedRwLockWriteOwnedGuard); 289 | crate::impl_deref!(OrderedRwLockReadGuard, 'a); 290 | crate::impl_deref!(OrderedRwLockReadOwnedGuard); 291 | 292 | crate::impl_drop_guard!(OrderedRwLockWriteGuard, 'a, unlock); 293 | crate::impl_drop_guard!(OrderedRwLockWriteOwnedGuard, unlock); 294 | crate::impl_drop_guard_self!(OrderedRwLockReadGuard, 'a, unlock_reader); 295 | crate::impl_drop_guard_self!(OrderedRwLockReadOwnedGuard, unlock_reader); 296 | 297 | crate::impl_drop_guard_future!(OrderedRwLockWriteGuardFuture, 'a, unlock); 298 | crate::impl_drop_guard_future!(OrderedRwLockWriteOwnedGuardFuture, unlock); 299 | crate::impl_drop_guard_future!(OrderedRwLockReadGuardFuture, 'a, unlock); 300 | crate::impl_drop_guard_future!(OrderedRwLockReadOwnedGuardFuture, unlock); 301 | 302 | #[cfg(test)] 303 | mod tests { 304 | use crate::rwlock_ordered::{ 305 | OrderedRwLock, OrderedRwLockReadGuard, OrderedRwLockWriteGuard, 306 | OrderedRwLockWriteOwnedGuard, 307 | }; 308 | use futures::executor::block_on; 309 | use futures::{FutureExt, StreamExt, TryStreamExt}; 310 | use std::ops::AddAssign; 311 | use std::sync::atomic::AtomicUsize; 312 | use std::sync::Arc; 313 | use tokio::time::{sleep, Duration}; 314 | 315 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 316 | async fn test_mutex() { 317 | let c = OrderedRwLock::new(0); 318 | 319 | futures::stream::iter(0..10000) 320 | .for_each_concurrent(None, |_| async { 321 | let mut co: OrderedRwLockWriteGuard = c.write().await; 322 | *co += 1; 323 | }) 324 | .await; 325 | 326 | let co = c.write().await; 327 | assert_eq!(*co, 10000) 328 | } 329 | 330 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 331 | async fn test_mutex_delay() { 332 | let expected_result = 100; 333 | let c = OrderedRwLock::new(0); 334 | 335 | futures::stream::iter(0..expected_result) 336 | .then(|i| c.write().map(move |co| (i, co))) 337 | .for_each_concurrent(None, |(i, mut co)| async move { 338 | sleep(Duration::from_millis(expected_result - i)).await; 339 | *co += 1; 340 | }) 341 | .await; 342 | 343 | let co = c.write().await; 344 | assert_eq!(*co, expected_result) 345 | } 346 | 347 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 348 | async fn test_owned_mutex() { 349 | let c = Arc::new(OrderedRwLock::new(0)); 350 | 351 | futures::stream::iter(0..10000) 352 | .for_each_concurrent(None, |_| async { 353 | let mut co: OrderedRwLockWriteOwnedGuard = c.write_owned().await; 354 | *co += 1; 355 | }) 356 | .await; 357 | 358 | let co = c.write_owned().await; 359 | assert_eq!(*co, 10000) 360 | } 361 | 362 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 363 | async fn test_container() { 364 | let c = OrderedRwLock::new(String::from("lol")); 365 | 366 | let mut co: OrderedRwLockWriteGuard = c.write().await; 367 | co.add_assign("lol"); 368 | 369 | assert_eq!(*co, "lollol"); 370 | } 371 | 372 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 373 | async fn test_overflow() { 374 | let mut c = OrderedRwLock::new(String::from("lol")); 375 | 376 | c.inner.state = AtomicUsize::new(usize::max_value()); 377 | c.inner.current = AtomicUsize::new(usize::max_value()); 378 | 379 | let mut co: OrderedRwLockWriteGuard = c.write().await; 380 | co.add_assign("lol"); 381 | 382 | assert_eq!(*co, "lollol"); 383 | } 384 | 385 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 386 | async fn test_timeout() { 387 | let c = OrderedRwLock::new(String::from("lol")); 388 | 389 | let co: OrderedRwLockWriteGuard = c.write().await; 390 | 391 | futures::stream::iter(0..10000i32) 392 | .then(|_| tokio::time::timeout(Duration::from_nanos(1), c.write())) 393 | .try_for_each_concurrent(None, |_c| futures::future::ok(())) 394 | .await 395 | .expect_err("timout must be"); 396 | 397 | drop(co); 398 | 399 | let mut co: OrderedRwLockWriteGuard = c.write().await; 400 | co.add_assign("lol"); 401 | 402 | assert_eq!(*co, "lollol"); 403 | } 404 | 405 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 406 | async fn test_concurrent_reading() { 407 | let c = OrderedRwLock::new(String::from("lol")); 408 | 409 | let co: OrderedRwLockReadGuard = c.read().await; 410 | 411 | futures::stream::iter(0..10000i32) 412 | .then(|_| c.read()) 413 | .inspect(|c| assert_eq!(*co, **c)) 414 | .for_each_concurrent(None, |_c| futures::future::ready(())) 415 | .await; 416 | 417 | assert!(matches!( 418 | tokio::time::timeout(Duration::from_millis(1), c.write()).await, 419 | Err(_) 420 | )); 421 | 422 | let co2: OrderedRwLockReadGuard = c.read().await; 423 | assert_eq!(*co, *co2); 424 | } 425 | 426 | #[tokio::test(flavor = "multi_thread", worker_threads = 12)] 427 | async fn test_concurrent_reading_writing() { 428 | let c = OrderedRwLock::new(String::from("lol")); 429 | 430 | let co: OrderedRwLockReadGuard = c.read().await; 431 | let co2: OrderedRwLockReadGuard = c.read().await; 432 | assert_eq!(*co, *co2); 433 | 434 | drop(co); 435 | drop(co2); 436 | 437 | let mut co: OrderedRwLockWriteGuard = c.write().await; 438 | 439 | assert!(matches!( 440 | tokio::time::timeout(Duration::from_millis(1), c.read()).await, 441 | Err(_) 442 | )); 443 | 444 | *co += "lol"; 445 | 446 | drop(co); 447 | 448 | let co: OrderedRwLockReadGuard = c.read().await; 449 | let co2: OrderedRwLockReadGuard = c.read().await; 450 | assert_eq!(*co, "lollol"); 451 | assert_eq!(*co, *co2); 452 | } 453 | 454 | #[test] 455 | fn multithreading_test() { 456 | let num = 100; 457 | let mutex = Arc::new(OrderedRwLock::new(0)); 458 | let ths: Vec<_> = (0..num) 459 | .map(|i| { 460 | let mutex = mutex.clone(); 461 | std::thread::spawn(move || { 462 | block_on(async { 463 | if i % 2 == 0 { 464 | let mut lock = mutex.write().await; 465 | *lock += 1; 466 | drop(lock); 467 | } else { 468 | let _lock = mutex.read().await; 469 | } 470 | }) 471 | }) 472 | }) 473 | .collect(); 474 | 475 | for thread in ths { 476 | thread.join().unwrap(); 477 | } 478 | 479 | block_on(async { 480 | let lock = mutex.read().await; 481 | assert_eq!(num / 2, *lock) 482 | }) 483 | } 484 | } 485 | --------------------------------------------------------------------------------