├── .gitignore ├── rustfmt.toml ├── src ├── lib.rs ├── pool_allocator.rs ├── concurrent.rs └── thread_local.rs ├── Cargo.toml ├── LICENSE ├── tests ├── thread_local.rs └── concurrent.rs ├── benches └── bench.rs ├── .github └── workflows │ └── pipeline.yaml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /dev 3 | Cargo.lock -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | wrap_comments = true 2 | imports_granularity = "Crate" 3 | group_imports = "StdExternalCrate" 4 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![doc = include_str!("../README.md")] 3 | #![warn(missing_docs, missing_debug_implementations)] 4 | extern crate alloc; 5 | mod concurrent; 6 | mod pool_allocator; 7 | mod thread_local; 8 | 9 | pub use concurrent::*; 10 | pub use pool_allocator::*; 11 | pub use thread_local::*; 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "opool" 3 | version = "0.2.0" 4 | edition = "2021" 5 | authors = ["Khashayar Fereidani"] 6 | description = "High-performance, lock-free local and concurrent object pool with automated allocation, cleanup, and verification." 7 | repository = "https://github.com/fereidani/opool" 8 | documentation = "https://docs.rs/opool" 9 | keywords = ["pool", "object-pool", "lock-free", "no_std"] 10 | categories = ["memory-management", "concurrency", "no-std"] 11 | license = "MIT" 12 | readme = "README.md" 13 | 14 | [dependencies] 15 | crossbeam-queue = "0.3" 16 | 17 | [dev-dependencies] 18 | criterion = "0.4" 19 | rayon = "1" 20 | 21 | [[bench]] 22 | name = "bench" 23 | harness = false 24 | -------------------------------------------------------------------------------- /src/pool_allocator.rs: -------------------------------------------------------------------------------- 1 | /// A trait defining the interface for a pool allocator. 2 | /// 3 | /// This trait provides methods for resetting and creating new objects, 4 | /// as well as validating objects before they are stored back in the object 5 | /// pool. 6 | pub trait PoolAllocator { 7 | /// Resets the state of an object to its initial state if necessary. 8 | /// 9 | /// By default, this method do nothing. Override this method to provide 10 | /// custom reset logic. 11 | #[inline(always)] 12 | fn reset(&self, _obj: &mut T) {} 13 | 14 | /// Creates a new object of type T. 15 | fn allocate(&self) -> T; 16 | 17 | /// validates that an object is in a good state to be stored back in the 18 | /// object pool. 19 | /// 20 | /// By default, this method always returns true. Override this method to 21 | /// provide custom validation logic. 22 | #[inline(always)] 23 | fn is_valid(&self, _obj: &T) -> bool { 24 | true 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022-2023 Khashayar Fereidani 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/thread_local.rs: -------------------------------------------------------------------------------- 1 | use opool::*; 2 | 3 | struct SimpleAllocator; 4 | 5 | impl PoolAllocator> for SimpleAllocator { 6 | fn allocate(&self) -> Box { 7 | Box::new(10) 8 | } 9 | } 10 | 11 | #[test] 12 | fn test_new() { 13 | let pool = LocalPool::new(10, SimpleAllocator); 14 | assert_eq!(**pool.get(), 10); 15 | } 16 | 17 | #[test] 18 | fn test_new_prefilled() { 19 | let pool = LocalPool::new_prefilled(10, SimpleAllocator); 20 | assert_eq!(**pool.get(), 10); 21 | } 22 | 23 | #[test] 24 | fn test_get_into_inner() { 25 | let pool = LocalPool::new_prefilled(10, SimpleAllocator); 26 | let guard = pool.get().into_inner(); 27 | assert_eq!(*guard, 10); 28 | } 29 | 30 | #[test] 31 | fn test_try_get_rc() { 32 | let pool = LocalPool::new_prefilled(1, SimpleAllocator).to_rc(); 33 | let guard = pool.clone().try_get_rc().unwrap(); 34 | let guard2 = pool.clone().try_get_rc(); 35 | assert_eq!(**guard, 10); 36 | assert_eq!(guard2, None); 37 | } 38 | 39 | #[test] 40 | fn test_get_rc() { 41 | let pool = LocalPool::new_prefilled(10, SimpleAllocator).to_rc(); 42 | let guard = pool.clone().get_rc(); 43 | assert_eq!(**guard, 10); 44 | } 45 | 46 | #[test] 47 | fn test_get_rc_into_inner() { 48 | let pool = LocalPool::new_prefilled(10, SimpleAllocator).to_rc(); 49 | let guard = pool.clone().get_rc().into_inner(); 50 | assert_eq!(*guard, 10); 51 | } 52 | -------------------------------------------------------------------------------- /tests/concurrent.rs: -------------------------------------------------------------------------------- 1 | use opool::*; 2 | 3 | struct SimpleAllocator; 4 | 5 | impl PoolAllocator> for SimpleAllocator { 6 | fn allocate(&self) -> Box { 7 | Box::new(10) 8 | } 9 | } 10 | 11 | #[test] 12 | fn test_new() { 13 | let pool = Pool::new(10, SimpleAllocator); 14 | assert_eq!(**pool.get(), 10); 15 | } 16 | 17 | #[test] 18 | fn test_new_prefilled() { 19 | let pool = Pool::new_prefilled(10, SimpleAllocator); 20 | assert_eq!(**pool.get(), 10); 21 | } 22 | 23 | #[test] 24 | fn test_try_get() { 25 | let pool = Pool::new_prefilled(1, SimpleAllocator); 26 | let guard = pool.try_get().unwrap(); 27 | let guard2 = pool.try_get(); 28 | assert_eq!(**guard, 10); 29 | assert_eq!(guard2, None); 30 | } 31 | 32 | #[test] 33 | fn test_get() { 34 | let pool = Pool::new_prefilled(10, SimpleAllocator); 35 | let guard = pool.get(); 36 | assert_eq!(**guard, 10); 37 | } 38 | 39 | #[test] 40 | fn test_get_into_inner() { 41 | let pool = Pool::new_prefilled(10, SimpleAllocator); 42 | let guard = pool.get().into_inner(); 43 | assert_eq!(*guard, 10); 44 | } 45 | 46 | #[test] 47 | fn test_try_get_rc() { 48 | let pool = Pool::new_prefilled(1, SimpleAllocator).to_rc(); 49 | let guard = pool.clone().try_get_rc().unwrap(); 50 | let guard2 = pool.clone().try_get_rc(); 51 | assert_eq!(**guard, 10); 52 | assert_eq!(guard2, None); 53 | } 54 | 55 | #[test] 56 | fn test_get_rc() { 57 | let pool = Pool::new_prefilled(10, SimpleAllocator).to_rc(); 58 | let guard = pool.clone().get_rc(); 59 | assert_eq!(**guard, 10); 60 | } 61 | 62 | #[test] 63 | fn test_get_rc_into_inner() { 64 | let pool = Pool::new_prefilled(10, SimpleAllocator).to_rc(); 65 | let guard = pool.clone().get_rc().into_inner(); 66 | assert_eq!(*guard, 10); 67 | } 68 | -------------------------------------------------------------------------------- /benches/bench.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use opool::*; 3 | 4 | struct Allocator {} 5 | 6 | const POOL_OBJECT_SIZE: usize = 1024 * 1024 * 1024; 7 | impl PoolAllocator> for Allocator { 8 | #[inline(always)] 9 | fn allocate(&self) -> Vec { 10 | Vec::with_capacity(POOL_OBJECT_SIZE) 11 | } 12 | 13 | #[inline(always)] 14 | fn reset(&self, obj: &mut Vec) { 15 | obj.clear() 16 | } 17 | 18 | #[inline(always)] 19 | fn is_valid(&self, obj: &Vec) -> bool { 20 | obj.capacity() == POOL_OBJECT_SIZE 21 | } 22 | } 23 | 24 | fn allocate(c: &mut Criterion) { 25 | c.bench_function("opool", |b| { 26 | let pool = Pool::new(1024, Allocator {}); 27 | b.iter(|| { 28 | let obj = black_box(pool.get()); 29 | black_box(obj.capacity()) 30 | }) 31 | }); 32 | c.bench_function("opool_thread_local", |b| { 33 | let pool = LocalPool::new(1024, Allocator {}); 34 | b.iter(|| { 35 | let obj = black_box(pool.get()); 36 | black_box(obj.capacity()) 37 | }) 38 | }); 39 | c.bench_function("system", |b| { 40 | let alloc: Allocator = Allocator {}; 41 | b.iter(|| { 42 | let obj = black_box(alloc.allocate()); 43 | black_box(obj.capacity()) 44 | }) 45 | }); 46 | } 47 | 48 | fn allocate_multi(c: &mut Criterion) { 49 | use rayon::prelude::*; 50 | c.bench_function("opool_multi", |b| { 51 | let pool = Pool::new(1024, Allocator {}); 52 | b.iter(|| { 53 | (0..8192).into_par_iter().for_each(|_i| { 54 | let obj = black_box(pool.get()); 55 | black_box(obj.capacity()); 56 | }); 57 | }) 58 | }); 59 | 60 | c.bench_function("system_multi", |b| { 61 | let alloc: Allocator = Allocator {}; 62 | b.iter(|| { 63 | (0..8192).into_par_iter().for_each(|_i| { 64 | let obj = black_box(alloc.allocate()); 65 | black_box(obj.capacity()); 66 | }); 67 | }) 68 | }); 69 | } 70 | criterion_group!(benches, allocate, allocate_multi); 71 | criterion_main!(benches); 72 | -------------------------------------------------------------------------------- /.github/workflows/pipeline.yaml: -------------------------------------------------------------------------------- 1 | name: pipeline 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - feature/* 8 | pull_request: 9 | branches: 10 | - main 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | jobs: 16 | cargo_fmt: 17 | name: "Cargo Format" 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v3 21 | - name: Install nightly toolchain 22 | run: rustup toolchain install nightly --component rustfmt 23 | - name: Check formatting 24 | run: cargo +nightly fmt --all -- --check 25 | cargo_clippy: 26 | name: "Cargo Clippy" 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v3 30 | - name: Install Clippy 31 | run: rustup component add clippy 32 | - name: Run Clippy 33 | run: cargo clippy --all-targets --all-features -- -D warnings 34 | cargo_test: 35 | name: "Cargo Test" 36 | runs-on: ubuntu-latest 37 | steps: 38 | - uses: actions/checkout@v3 39 | - name: Build 40 | run: cargo build --verbose 41 | - name: Test with Cargo 42 | run: cargo test --verbose 43 | cargo_test_release: 44 | name: "Cargo Test (Release)" 45 | runs-on: ubuntu-latest 46 | needs: [cargo_test] 47 | steps: 48 | - uses: actions/checkout@v3 49 | - name: Build 50 | run: cargo build --release --verbose 51 | - name: Test with Cargo (Release) 52 | run: cargo test --release --verbose 53 | cargo_test_no_default_features: 54 | name: "Cargo Test (No Default Features)" 55 | runs-on: ubuntu-latest 56 | needs: [cargo_test_release] 57 | steps: 58 | - uses: actions/checkout@v3 59 | - name: Build 60 | run: cargo build --no-default-features --verbose 61 | - name: Test with Cargo (No Default Features) 62 | run: cargo test --no-default-features --verbose 63 | miri_test: 64 | name: "Miri Test" 65 | runs-on: ubuntu-latest 66 | needs: [cargo_test_release] 67 | steps: 68 | - uses: actions/checkout@v3 69 | - name: Install Miri 70 | run: | 71 | rustup toolchain install nightly --component miri 72 | rustup override set nightly 73 | cargo miri setup 74 | - name: Test with Miri 75 | run: cargo miri test 76 | miri_test_with_flags: 77 | name: "Miri Test (RUSTFLAGS)" 78 | env: 79 | RUSTFLAGS: "-Z randomize-layout" 80 | runs-on: ubuntu-latest 81 | needs: [miri_test] 82 | steps: 83 | - uses: actions/checkout@v3 84 | - name: Install Miri 85 | run: | 86 | rustup toolchain install nightly --component miri 87 | rustup override set nightly 88 | cargo miri setup 89 | - name: Test with Miri 90 | run: cargo miri test 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Opool: Fast lock-free concurrent and local object pool 2 | 3 | [![Crates.io][crates-badge]][crates-url] 4 | [![Documentation][doc-badge]][doc-url] 5 | [![MIT licensed][mit-badge]][mit-url] 6 | 7 | [crates-badge]: https://img.shields.io/crates/v/opool.svg?style=for-the-badge 8 | [crates-url]: https://crates.io/crates/opool 9 | [mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg?style=for-the-badge 10 | [mit-url]: https://github.com/fereidani/opool/blob/main/LICENSE 11 | [doc-badge]: https://img.shields.io/docsrs/opool?style=for-the-badge 12 | [doc-url]: https://docs.rs/opool 13 | 14 | Opool is a high-performance Rust library that offers a concurrent and local object pool implementation. It aims to provide efficiency and flexibility, enabling you to manage the lifecycle of your objects and reuse them to minimize allocation overhead. Opool supports `no_std` with alloc available. 15 | 16 | ## Why Use Opool 17 | 18 | - Superior Performance: Opool outperforms alternatives due to its design choices, particularly its utilization of the [`PoolAllocator`], which facilitates function inlining by the compiler. This results in better-executing code by reducing unnecessary function calls and jumps. 19 | - Lock-Free Design: Opool operates without any mutexes, ensuring a lock-free implementation. It minimizes reliance on operating system syscalls, apart from those provided by the alloc crate, further enhancing performance. 20 | - Enhanced Compatibility: Opool supports `no_std` environments with the availability of alloc, making it suitable for a wide range of Rust projects. 21 | - Comprehensive Interface: Opool provides a complete interface that automates object allocation, cleanup, and verification for your object pool. You no longer need to manually clean up pool-allocated data, and you can optionally provide a related [`PoolAllocator::reset`] function to clean the object upon automatic collection. 22 | - Reference Counted References: Opool supports reference-counted references, although it is recommended to use static references whenever possible. This feature simplifies the lifetimes of your Rust code, particularly in specific scenarios. 23 | 24 | ## Structures 25 | 26 | - **[`PoolAllocator`] Trait**: This trait defines the interface for a pool allocator. It includes methods for allocating, resetting, and validating objects. The resetting and validating functions are optional. 27 | - **[`Pool`] Struct**: This struct represents an object pool. It uses an ArrayQueue for storage and a PoolAllocator for object management. 28 | - **[`LocalPool`] Struct**: This struct represents a thread-local object pool, restricted to use within the current thread. It utilizes a VecDeque for storage and a PoolAllocator for object management. 29 | - **[`RefGuard`], [`RcGuard`], [`RefLocalGuard`] and [`RcLocalGuard`] Structs**: These structs are smart pointers that automatically return the object to the pool when they are dropped. They also provide methods for accessing the underlying object. 30 | 31 | ## Usage 32 | 33 | First, define your allocator by implementing the [`PoolAllocator`] trait. This involves providing a [`PoolAllocator::allocate`] method to create new objects and optionally a [`PoolAllocator::reset`] method to reset objects to their initial state and a [`PoolAllocator::is_valid`] method to check if an object is still valid for pushing back into the pool. 34 | 35 | Then, create a [`Pool`] or [`LocalPool`] with your allocator. You can use the `new` method to create an empty pool or the `new_prefilled` method to create a pool that is initially filled with a certain number of objects. 36 | 37 | To get an object from the pool, use the `get` method. This will return a `RefGuard` or `RcGuard` depending on whether you called `get` or `get_rc`. These guards automatically return the object to the pool when they are dropped. 38 | 39 | To use `get_rc` you need to convert the pool to reference counted flavor by calling `to_rc` on it. 40 | 41 | Here is an example: 42 | 43 | ```rust 44 | use opool::{Pool, PoolAllocator}; 45 | struct MyAllocator; 46 | 47 | const BUF_SIZE: usize = 1024 * 8; 48 | impl PoolAllocator> for MyAllocator { 49 | #[inline] 50 | fn allocate(&self) -> Vec { 51 | vec![0; BUF_SIZE] 52 | } 53 | 54 | /// OPTIONAL METHODS: 55 | 56 | #[inline] 57 | fn reset(&self, _obj: &mut Vec) { 58 | // Optionally you can clear or zero object fields here 59 | } 60 | 61 | #[inline] 62 | fn is_valid(&self, obj: &Vec) -> bool { 63 | // you can optionally is_valid if object is good to be pushed back to the pool 64 | obj.capacity() == BUF_SIZE 65 | } 66 | } 67 | 68 | let pool = Pool::new(64, MyAllocator); 69 | let obj = pool.get(); 70 | // Use the object, and it will be automatically recycled after its lifetime ends. 71 | 72 | ``` 73 | 74 | ## Installation 75 | 76 | Add this to your `Cargo.toml`: 77 | 78 | ```toml 79 | [dependencies] 80 | opool = "0.1" 81 | ``` 82 | 83 | ## License 84 | 85 | Opool is licensed under the MIT license. Please see the `LICENSE` file for more details. 86 | -------------------------------------------------------------------------------- /src/concurrent.rs: -------------------------------------------------------------------------------- 1 | use alloc::{fmt, sync::Arc}; 2 | use core::{ 3 | hash::{Hash, Hasher}, 4 | mem::{forget, MaybeUninit}, 5 | ops::{Deref, DerefMut}, 6 | ptr, 7 | }; 8 | 9 | use crossbeam_queue::ArrayQueue; 10 | 11 | use crate::PoolAllocator; 12 | 13 | /// A struct representing an object pool. 14 | /// 15 | /// This struct uses an allocator to create and manage objects, and stores them 16 | /// in an ArrayQueue. 17 | #[derive(Debug)] 18 | pub struct Pool, T> { 19 | allocator: P, 20 | storage: ArrayQueue, 21 | } 22 | 23 | // If T is Send it is safe to move object pool between threads 24 | unsafe impl, T: Send> Send for Pool {} 25 | 26 | impl, T> Pool { 27 | /// Creates a new Pool with a given size and allocator. 28 | /// 29 | /// This method immediately fills the pool with new objects created by the 30 | /// allocator. 31 | pub fn new_prefilled(pool_size: usize, allocator: P) -> Self { 32 | let storage = ArrayQueue::new(pool_size); 33 | for _ in 0..pool_size { 34 | let _ = storage.push(allocator.allocate()); 35 | } 36 | Pool { allocator, storage } 37 | } 38 | 39 | /// Creates a new Object Pool with a given size and allocator. 40 | /// 41 | /// Unlike [`Self::new_prefilled`], this method does not immediately fill 42 | /// the pool with objects. 43 | pub fn new(pool_size: usize, allocator: P) -> Self { 44 | let storage = ArrayQueue::new(pool_size); 45 | Pool { allocator, storage } 46 | } 47 | 48 | /// Wraps the pool allocator with an atomic reference counter, enabling the 49 | /// use of [`Self::get_rc`] to obtain pool-allocated objects that rely on 50 | /// reference counted references instead of borrowed references. 51 | pub fn to_rc(self) -> Arc { 52 | Arc::new(self) 53 | } 54 | 55 | /// Attempts to get an object from the pool. 56 | /// 57 | /// If the pool is empty, None is returned. 58 | pub fn try_get(&self) -> Option> { 59 | self.storage.pop().map(|mut obj| { 60 | self.allocator.reset(&mut obj); 61 | RefGuard::new(obj, self) 62 | }) 63 | } 64 | 65 | /// Gets an object from the pool. 66 | /// 67 | /// If the pool is empty, a new object is created using the allocator. 68 | pub fn get(&'_ self) -> RefGuard<'_, P, T> { 69 | match self.storage.pop() { 70 | Some(mut obj) => { 71 | self.allocator.reset(&mut obj); 72 | RefGuard::new(obj, self) 73 | } 74 | None => RefGuard::new(self.allocator.allocate(), self), 75 | } 76 | } 77 | 78 | /// Attempts to get an object from the pool that holds an arc reference to 79 | /// the owning pool. Allocated objects are not as efficient as those 80 | /// allocated by [`Self::get`] method but they are easier to move as 81 | /// they are not limited by allocator lifetime directly. 82 | /// 83 | /// If the pool is empty, None is returned. 84 | pub fn try_get_rc(self: Arc) -> Option> { 85 | self.storage.pop().map(|mut obj| { 86 | self.allocator.reset(&mut obj); 87 | RcGuard::new(obj, &self) 88 | }) 89 | } 90 | 91 | /// Gets an object from the pool that holds an arc reference to the owning 92 | /// pool. Allocated objects are not as efficient as those allocated by 93 | /// [`Self::get`] method but they are easier to move as they are not limited 94 | /// by allocator lifetime directly. 95 | /// 96 | /// If the pool is empty, a new object is created using the allocator. 97 | pub fn get_rc(self: Arc) -> RcGuard { 98 | match self.storage.pop() { 99 | Some(mut obj) => { 100 | self.allocator.reset(&mut obj); 101 | RcGuard::new(obj, &self) 102 | } 103 | None => RcGuard::new(self.allocator.allocate(), &self), 104 | } 105 | } 106 | 107 | /// Gets the number of objects currently in the pool. 108 | /// 109 | /// Returns the length of the internal storage, indicating the number of 110 | /// objects that are ready to be recycled from the pool. 111 | pub fn len(&self) -> usize { 112 | self.storage.len() 113 | } 114 | 115 | /// Checks if the pool is empty. 116 | /// 117 | /// Returns `true` if there are no objects currently in the pool that are 118 | /// ready to be recycled. 119 | pub fn is_empty(&self) -> bool { 120 | self.storage.is_empty() 121 | } 122 | 123 | /// Gets the capacity of the pool. 124 | /// 125 | /// Returns the maximum number of objects that the pool can hold. This does 126 | /// not indicate the maximum number of objects that can be allocated, 127 | /// but maximum objects that can be stored and recycled from the pool. 128 | pub fn capacity(&self) -> usize { 129 | self.storage.capacity() 130 | } 131 | } 132 | 133 | /// A struct representing a guard over an object in the pool. 134 | /// 135 | /// This struct ensures that the object is returned to the pool when it is 136 | /// dropped. 137 | pub struct RefGuard<'a, P: PoolAllocator, T> { 138 | obj: MaybeUninit, 139 | pool: &'a Pool, 140 | } 141 | 142 | impl<'a, P: PoolAllocator, T> RefGuard<'a, P, T> { 143 | /// Creates a new Guard for an object and a reference to the pool it 144 | /// belongs to. 145 | fn new(obj: T, pool: &'a Pool) -> Self { 146 | RefGuard { 147 | obj: MaybeUninit::new(obj), 148 | pool, 149 | } 150 | } 151 | 152 | /// Consumes the guard and returns the object, without returning it to the 153 | /// pool. 154 | /// 155 | /// This method should be used with caution, as it leads to objects not 156 | /// being returned to the pool. 157 | pub fn into_inner(self) -> T { 158 | let obj = unsafe { self.obj.as_ptr().read() }; 159 | forget(self); 160 | obj 161 | } 162 | } 163 | 164 | impl<'a, P: PoolAllocator, T> Deref for RefGuard<'a, P, T> { 165 | type Target = T; 166 | #[inline(always)] 167 | fn deref(&self) -> &Self::Target { 168 | unsafe { &*self.obj.as_ptr() } 169 | } 170 | } 171 | 172 | impl<'a, P: PoolAllocator, T> DerefMut for RefGuard<'a, P, T> { 173 | #[inline(always)] 174 | fn deref_mut(&mut self) -> &mut Self::Target { 175 | unsafe { &mut *self.obj.as_mut_ptr() } 176 | } 177 | } 178 | 179 | /// Implementation of the Drop trait for Guard. 180 | /// 181 | /// This ensures that the object is returned to the pool when the guard is 182 | /// dropped, unless the object fails validation. 183 | impl<'a, P: PoolAllocator, T> Drop for RefGuard<'a, P, T> { 184 | fn drop(&mut self) { 185 | if self.pool.allocator.is_valid(self.deref()) { 186 | let _ = self 187 | .pool 188 | .storage 189 | .push(unsafe { self.obj.as_mut_ptr().read() }); 190 | } else { 191 | unsafe { 192 | ptr::drop_in_place(self.obj.as_mut_ptr()); 193 | } 194 | } 195 | } 196 | } 197 | 198 | impl<'a, P: PoolAllocator, T: Hash> Hash for RefGuard<'a, P, T> { 199 | #[inline] 200 | fn hash(&self, state: &mut H) { 201 | (**self).hash(state); 202 | } 203 | } 204 | impl<'a, P: PoolAllocator, T: fmt::Display> fmt::Display for RefGuard<'a, P, T> { 205 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 206 | fmt::Display::fmt(&**self, f) 207 | } 208 | } 209 | impl<'a, P: PoolAllocator, T: fmt::Debug> fmt::Debug for RefGuard<'a, P, T> { 210 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 211 | fmt::Debug::fmt(&**self, f) 212 | } 213 | } 214 | impl<'a, P: PoolAllocator, T> fmt::Pointer for RefGuard<'a, P, T> { 215 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 216 | fmt::Pointer::fmt(&(&**self as *const T), f) 217 | } 218 | } 219 | impl<'a, P: PoolAllocator, T: PartialEq> PartialEq for RefGuard<'a, P, T> { 220 | #[inline] 221 | fn eq(&self, other: &RefGuard<'a, P, T>) -> bool { 222 | self.deref().eq(other) 223 | } 224 | } 225 | impl<'a, P: PoolAllocator, T: Eq> Eq for RefGuard<'a, P, T> {} 226 | impl<'a, P: PoolAllocator, T: PartialOrd> PartialOrd for RefGuard<'a, P, T> { 227 | #[inline] 228 | fn partial_cmp(&self, other: &RefGuard<'a, P, T>) -> Option { 229 | (**self).partial_cmp(&**other) 230 | } 231 | #[inline] 232 | fn lt(&self, other: &RefGuard<'a, P, T>) -> bool { 233 | **self < **other 234 | } 235 | #[inline] 236 | fn le(&self, other: &RefGuard<'a, P, T>) -> bool { 237 | **self <= **other 238 | } 239 | #[inline] 240 | fn gt(&self, other: &RefGuard<'a, P, T>) -> bool { 241 | **self > **other 242 | } 243 | #[inline] 244 | fn ge(&self, other: &RefGuard<'a, P, T>) -> bool { 245 | **self >= **other 246 | } 247 | } 248 | impl<'a, P: PoolAllocator, T: Ord> Ord for RefGuard<'a, P, T> { 249 | #[inline] 250 | fn cmp(&self, other: &RefGuard<'a, P, T>) -> core::cmp::Ordering { 251 | (**self).cmp(&**other) 252 | } 253 | } 254 | impl<'a, P: PoolAllocator, T> core::borrow::Borrow for RefGuard<'a, P, T> { 255 | #[inline(always)] 256 | fn borrow(&self) -> &T { 257 | self 258 | } 259 | } 260 | impl<'a, P: PoolAllocator, T> AsRef for RefGuard<'a, P, T> { 261 | #[inline(always)] 262 | fn as_ref(&self) -> &T { 263 | self 264 | } 265 | } 266 | 267 | /// A struct representing a guard over an object in the pool. 268 | /// 269 | /// This struct ensures that the object is returned to the pool when it is 270 | /// dropped. 271 | pub struct RcGuard, T> { 272 | obj: MaybeUninit, 273 | pool: Arc>, 274 | } 275 | 276 | impl, T> RcGuard { 277 | /// Creates a new Guard for an object and a reference to the pool it 278 | /// belongs to. 279 | fn new(obj: T, pool: &Arc>) -> Self { 280 | Self { 281 | obj: MaybeUninit::new(obj), 282 | pool: pool.clone(), 283 | } 284 | } 285 | 286 | /// Consumes the guard and returns the object, without returning it to the 287 | /// pool. 288 | /// 289 | /// This method should be used with caution, as it leads to objects not 290 | /// being returned to the pool. 291 | pub fn into_inner(mut self) -> T { 292 | let obj = unsafe { self.obj.as_ptr().read() }; 293 | // Drop the arc reference 294 | unsafe { ptr::drop_in_place(&mut self.pool) } 295 | forget(self); 296 | obj 297 | } 298 | } 299 | 300 | impl, T> Deref for RcGuard { 301 | type Target = T; 302 | 303 | fn deref(&self) -> &Self::Target { 304 | unsafe { &*self.obj.as_ptr() } 305 | } 306 | } 307 | 308 | impl, T> DerefMut for RcGuard { 309 | fn deref_mut(&mut self) -> &mut Self::Target { 310 | unsafe { &mut *self.obj.as_mut_ptr() } 311 | } 312 | } 313 | 314 | /// Implementation of the Drop trait for Guard. 315 | /// 316 | /// This ensures that the object is returned to the pool when the guard is 317 | /// dropped, unless the object fails validation. 318 | impl, T> Drop for RcGuard { 319 | fn drop(&mut self) { 320 | if self.pool.allocator.is_valid(self.deref()) { 321 | let _ = self 322 | .pool 323 | .storage 324 | .push(unsafe { self.obj.as_mut_ptr().read() }); 325 | } else { 326 | unsafe { 327 | ptr::drop_in_place(self.obj.as_mut_ptr()); 328 | } 329 | } 330 | } 331 | } 332 | 333 | impl, T: Hash> Hash for RcGuard { 334 | #[inline] 335 | fn hash(&self, state: &mut H) { 336 | (**self).hash(state); 337 | } 338 | } 339 | impl, T: fmt::Display> fmt::Display for RcGuard { 340 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 341 | fmt::Display::fmt(&**self, f) 342 | } 343 | } 344 | impl, T: fmt::Debug> fmt::Debug for RcGuard { 345 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 346 | fmt::Debug::fmt(&**self, f) 347 | } 348 | } 349 | impl, T> fmt::Pointer for RcGuard { 350 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 351 | fmt::Pointer::fmt(&(&**self as *const T), f) 352 | } 353 | } 354 | impl, T: PartialEq> PartialEq for RcGuard { 355 | #[inline] 356 | fn eq(&self, other: &RcGuard) -> bool { 357 | self.deref().eq(other) 358 | } 359 | } 360 | impl, T: Eq> Eq for RcGuard {} 361 | impl, T: PartialOrd> PartialOrd for RcGuard { 362 | #[inline] 363 | fn partial_cmp(&self, other: &RcGuard) -> Option { 364 | (**self).partial_cmp(&**other) 365 | } 366 | #[inline] 367 | fn lt(&self, other: &RcGuard) -> bool { 368 | **self < **other 369 | } 370 | #[inline] 371 | fn le(&self, other: &RcGuard) -> bool { 372 | **self <= **other 373 | } 374 | #[inline] 375 | fn gt(&self, other: &RcGuard) -> bool { 376 | **self > **other 377 | } 378 | #[inline] 379 | fn ge(&self, other: &RcGuard) -> bool { 380 | **self >= **other 381 | } 382 | } 383 | impl, T: Ord> Ord for RcGuard { 384 | #[inline] 385 | fn cmp(&self, other: &RcGuard) -> core::cmp::Ordering { 386 | (**self).cmp(&**other) 387 | } 388 | } 389 | impl, T> core::borrow::Borrow for RcGuard { 390 | #[inline(always)] 391 | fn borrow(&self) -> &T { 392 | self 393 | } 394 | } 395 | impl, T> AsRef for RcGuard { 396 | #[inline(always)] 397 | fn as_ref(&self) -> &T { 398 | self 399 | } 400 | } 401 | -------------------------------------------------------------------------------- /src/thread_local.rs: -------------------------------------------------------------------------------- 1 | use alloc::{collections::VecDeque, fmt, rc::Rc}; 2 | use core::{ 3 | cell::UnsafeCell, 4 | hash::{Hash, Hasher}, 5 | marker::PhantomData, 6 | mem::{forget, MaybeUninit}, 7 | ops::{Deref, DerefMut}, 8 | ptr, 9 | }; 10 | 11 | use crate::PoolAllocator; 12 | 13 | /// A struct representing an object pool for local thread, it cannot be moved 14 | /// between threads. 15 | /// 16 | /// This struct uses an allocator to create and manage objects, and stores them 17 | /// in an array. 18 | #[derive(Debug)] 19 | pub struct LocalPool, T> { 20 | allocator: P, 21 | storage: UnsafeCell>, 22 | // force the struct to be !Send 23 | _phantom: PhantomData<*mut usize>, 24 | } 25 | 26 | impl, T> LocalPool { 27 | /// Creates a new LocalPool with a given size and allocator. 28 | /// 29 | /// This method immediately fills the pool with new objects created by the 30 | /// allocator. 31 | pub fn new_prefilled(pool_size: usize, allocator: P) -> Self { 32 | let mut storage = VecDeque::with_capacity(pool_size); 33 | for _ in 0..pool_size { 34 | storage.push_back(allocator.allocate()); 35 | } 36 | LocalPool { 37 | allocator, 38 | storage: UnsafeCell::new(storage), 39 | _phantom: PhantomData, 40 | } 41 | } 42 | 43 | /// Creates a new Object Pool with a given size and allocator. 44 | /// 45 | /// Unlike [`Self::new_prefilled`], this method does not immediately fill 46 | /// the pool with objects. 47 | pub fn new(pool_size: usize, allocator: P) -> Self { 48 | LocalPool { 49 | allocator, 50 | storage: UnsafeCell::new(VecDeque::with_capacity(pool_size)), 51 | _phantom: PhantomData, 52 | } 53 | } 54 | 55 | /// Get storage as mutable reference 56 | /// Safety: it's safe to call only if the pool is used by a single threaded. 57 | #[allow(clippy::mut_from_ref)] 58 | fn storage_mut(&self) -> &mut VecDeque { 59 | unsafe { &mut *self.storage.get() } 60 | } 61 | 62 | /// Borrows storage as immutable reference 63 | /// Safety: it's safe to call only if the pool is used by a single threaded. 64 | #[allow(clippy::mut_from_ref)] 65 | fn storage_borrow(&self) -> &VecDeque { 66 | unsafe { &*self.storage.get() } 67 | } 68 | 69 | /// Wraps the pool allocator with an reference counter, enabling the 70 | /// use of [`Self::get_rc`] to obtain pool-allocated objects that rely on 71 | /// reference counted references instead of borrowed references. 72 | pub fn to_rc(self) -> Rc { 73 | Rc::new(self) 74 | } 75 | 76 | /// Attempts to get an object from the pool. 77 | /// 78 | /// If the pool is empty, None is returned. 79 | pub fn try_get(&self) -> Option> { 80 | self.storage_mut().pop_front().map(|mut obj| { 81 | self.allocator.reset(&mut obj); 82 | RefLocalGuard::new(obj, self) 83 | }) 84 | } 85 | 86 | /// Gets an object from the pool. 87 | /// 88 | /// If the pool is empty, a new object is created using the allocator. 89 | pub fn get(&'_ self) -> RefLocalGuard<'_, P, T> { 90 | match self.storage_mut().pop_front() { 91 | Some(mut obj) => { 92 | self.allocator.reset(&mut obj); 93 | RefLocalGuard::new(obj, self) 94 | } 95 | None => RefLocalGuard::new(self.allocator.allocate(), self), 96 | } 97 | } 98 | 99 | /// Attempts to get an object from the pool that holds an rc reference to 100 | /// the owning pool. Allocated objects are not as efficient as those 101 | /// allocated by [`Self::get`] method but they are easier to move as 102 | /// they are not limited by allocator lifetime directly. 103 | /// 104 | /// If the pool is empty, None is returned. 105 | pub fn try_get_rc(self: Rc) -> Option> { 106 | self.storage_mut().pop_front().map(|mut obj| { 107 | self.allocator.reset(&mut obj); 108 | RcLocalGuard::new(obj, &self) 109 | }) 110 | } 111 | 112 | /// Gets an object from the pool that holds an rc reference to the owning 113 | /// pool. Allocated objects are not as efficient as those allocated by 114 | /// [`Self::get`] method but they are easier to move as they are not limited 115 | /// by allocator lifetime directly. 116 | /// 117 | /// If the pool is empty, a new object is created using the allocator. 118 | pub fn get_rc(self: Rc) -> RcLocalGuard { 119 | match self.storage_mut().pop_front() { 120 | Some(mut obj) => { 121 | self.allocator.reset(&mut obj); 122 | RcLocalGuard::new(obj, &self) 123 | } 124 | None => RcLocalGuard::new(self.allocator.allocate(), &self), 125 | } 126 | } 127 | 128 | /// Gets the number of objects currently in the pool. 129 | /// 130 | /// Returns the length of the internal storage, indicating the number of 131 | /// objects that are ready to be recycled from the pool. 132 | pub fn len(&self) -> usize { 133 | self.storage_borrow().len() 134 | } 135 | 136 | /// Checks if the pool is empty. 137 | /// 138 | /// Returns `true` if there are no objects currently in the pool that are 139 | /// ready to be recycled. 140 | pub fn is_empty(&self) -> bool { 141 | self.storage_borrow().is_empty() 142 | } 143 | 144 | /// Gets the capacity of the pool. 145 | /// 146 | /// Returns the maximum number of objects that the pool can hold. This does 147 | /// not indicate the maximum number of objects that can be allocated, 148 | /// but maximum objects that can be stored and recycled from the pool. 149 | pub fn capacity(&self) -> usize { 150 | self.storage_borrow().capacity() 151 | } 152 | } 153 | 154 | /// A struct representing a guard over an object in the pool. 155 | /// 156 | /// This struct ensures that the object is returned to the pool when it is 157 | /// dropped. 158 | pub struct RefLocalGuard<'a, P: PoolAllocator, T> { 159 | obj: MaybeUninit, 160 | pool: &'a LocalPool, 161 | } 162 | 163 | impl<'a, P: PoolAllocator, T> RefLocalGuard<'a, P, T> { 164 | /// Creates a new Guard for an object and a reference to the pool it 165 | /// belongs to. 166 | fn new(obj: T, pool: &'a LocalPool) -> Self { 167 | RefLocalGuard { 168 | obj: MaybeUninit::new(obj), 169 | pool, 170 | } 171 | } 172 | 173 | /// Consumes the guard and returns the object, without returning it to the 174 | /// pool. 175 | /// 176 | /// This method should be used with caution, as it leads to objects not 177 | /// being returned to the pool. 178 | pub fn into_inner(self) -> T { 179 | let obj = unsafe { self.obj.as_ptr().read() }; 180 | forget(self); 181 | obj 182 | } 183 | } 184 | 185 | impl<'a, P: PoolAllocator, T> Deref for RefLocalGuard<'a, P, T> { 186 | type Target = T; 187 | 188 | fn deref(&self) -> &Self::Target { 189 | unsafe { &*self.obj.as_ptr() } 190 | } 191 | } 192 | 193 | impl<'a, P: PoolAllocator, T> DerefMut for RefLocalGuard<'a, P, T> { 194 | fn deref_mut(&mut self) -> &mut Self::Target { 195 | unsafe { &mut *self.obj.as_mut_ptr() } 196 | } 197 | } 198 | 199 | /// Implementation of the Drop trait for Guard. 200 | /// 201 | /// This ensures that the object is returned to the pool when the guard is 202 | /// dropped, unless the object fails validation. 203 | impl<'a, P: PoolAllocator, T> Drop for RefLocalGuard<'a, P, T> { 204 | fn drop(&mut self) { 205 | let storage = self.pool.storage_mut(); 206 | if self.pool.allocator.is_valid(self.deref()) && storage.len() < storage.capacity() { 207 | // Safety: object is not moved and valid for this single move to the pool. 208 | storage.push_back(unsafe { ptr::read(self.obj.as_mut_ptr()) }); 209 | } else { 210 | // Safety: object is not moved back to the pool it is safe to drop it in place. 211 | unsafe { 212 | ptr::drop_in_place(self.obj.as_mut_ptr()); 213 | } 214 | } 215 | } 216 | } 217 | 218 | impl<'a, P: PoolAllocator, T: Hash> Hash for RefLocalGuard<'a, P, T> { 219 | #[inline] 220 | fn hash(&self, state: &mut H) { 221 | (**self).hash(state); 222 | } 223 | } 224 | impl<'a, P: PoolAllocator, T: fmt::Display> fmt::Display for RefLocalGuard<'a, P, T> { 225 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 226 | fmt::Display::fmt(&**self, f) 227 | } 228 | } 229 | impl<'a, P: PoolAllocator, T: fmt::Debug> fmt::Debug for RefLocalGuard<'a, P, T> { 230 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 231 | fmt::Debug::fmt(&**self, f) 232 | } 233 | } 234 | impl<'a, P: PoolAllocator, T> fmt::Pointer for RefLocalGuard<'a, P, T> { 235 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 236 | fmt::Pointer::fmt(&(&**self as *const T), f) 237 | } 238 | } 239 | impl<'a, P: PoolAllocator, T: PartialEq> PartialEq for RefLocalGuard<'a, P, T> { 240 | #[inline] 241 | fn eq(&self, other: &RefLocalGuard<'a, P, T>) -> bool { 242 | self.deref().eq(other) 243 | } 244 | } 245 | impl<'a, P: PoolAllocator, T: Eq> Eq for RefLocalGuard<'a, P, T> {} 246 | impl<'a, P: PoolAllocator, T: PartialOrd> PartialOrd for RefLocalGuard<'a, P, T> { 247 | #[inline] 248 | fn partial_cmp(&self, other: &RefLocalGuard<'a, P, T>) -> Option { 249 | (**self).partial_cmp(&**other) 250 | } 251 | #[inline] 252 | fn lt(&self, other: &RefLocalGuard<'a, P, T>) -> bool { 253 | **self < **other 254 | } 255 | #[inline] 256 | fn le(&self, other: &RefLocalGuard<'a, P, T>) -> bool { 257 | **self <= **other 258 | } 259 | #[inline] 260 | fn gt(&self, other: &RefLocalGuard<'a, P, T>) -> bool { 261 | **self > **other 262 | } 263 | #[inline] 264 | fn ge(&self, other: &RefLocalGuard<'a, P, T>) -> bool { 265 | **self >= **other 266 | } 267 | } 268 | impl<'a, P: PoolAllocator, T: Ord> Ord for RefLocalGuard<'a, P, T> { 269 | #[inline] 270 | fn cmp(&self, other: &RefLocalGuard<'a, P, T>) -> core::cmp::Ordering { 271 | (**self).cmp(&**other) 272 | } 273 | } 274 | impl<'a, P: PoolAllocator, T> core::borrow::Borrow for RefLocalGuard<'a, P, T> { 275 | #[inline(always)] 276 | fn borrow(&self) -> &T { 277 | self 278 | } 279 | } 280 | impl<'a, P: PoolAllocator, T> AsRef for RefLocalGuard<'a, P, T> { 281 | #[inline(always)] 282 | fn as_ref(&self) -> &T { 283 | self 284 | } 285 | } 286 | 287 | /// A struct representing a guard over an object in the pool. 288 | /// 289 | /// This struct ensures that the object is returned to the pool when it is 290 | /// dropped. 291 | pub struct RcLocalGuard, T> { 292 | obj: MaybeUninit, 293 | pool: Rc>, 294 | } 295 | 296 | impl, T> RcLocalGuard { 297 | /// Creates a new Guard for an object and a reference to the pool it 298 | /// belongs to. 299 | fn new(obj: T, pool: &Rc>) -> Self { 300 | Self { 301 | obj: MaybeUninit::new(obj), 302 | pool: pool.clone(), 303 | } 304 | } 305 | 306 | /// Consumes the guard and returns the object, without returning it to the 307 | /// pool. 308 | /// 309 | /// This method should be used with caution, as it leads to objects not 310 | /// being returned to the pool. 311 | pub fn into_inner(mut self) -> T { 312 | let obj = unsafe { self.obj.as_ptr().read() }; 313 | // Drop the arc reference 314 | unsafe { ptr::drop_in_place(&mut self.pool) } 315 | forget(self); 316 | obj 317 | } 318 | } 319 | 320 | impl, T> Deref for RcLocalGuard { 321 | type Target = T; 322 | #[inline(always)] 323 | fn deref(&self) -> &Self::Target { 324 | unsafe { &*self.obj.as_ptr() } 325 | } 326 | } 327 | 328 | impl, T> DerefMut for RcLocalGuard { 329 | #[inline(always)] 330 | fn deref_mut(&mut self) -> &mut Self::Target { 331 | unsafe { &mut *self.obj.as_mut_ptr() } 332 | } 333 | } 334 | 335 | /// Implementation of the Drop trait for Guard. 336 | /// 337 | /// This ensures that the object is returned to the pool when the guard is 338 | /// dropped, unless the object fails validation. 339 | impl, T> Drop for RcLocalGuard { 340 | fn drop(&mut self) { 341 | let storage = self.pool.storage_mut(); 342 | if self.pool.allocator.is_valid(self.deref()) && storage.len() < storage.capacity() { 343 | // Safety: object is not moved and valid for this single move to the pool. 344 | storage.push_back(unsafe { ptr::read(self.obj.as_mut_ptr()) }); 345 | } else { 346 | // Safety: object is not moved back to the pool it is safe to drop it in place. 347 | unsafe { 348 | ptr::drop_in_place(self.obj.as_mut_ptr()); 349 | } 350 | } 351 | } 352 | } 353 | 354 | impl, T: Hash> Hash for RcLocalGuard { 355 | #[inline] 356 | fn hash(&self, state: &mut H) { 357 | (**self).hash(state); 358 | } 359 | } 360 | impl, T: fmt::Display> fmt::Display for RcLocalGuard { 361 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 362 | fmt::Display::fmt(&**self, f) 363 | } 364 | } 365 | impl, T: fmt::Debug> fmt::Debug for RcLocalGuard { 366 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 367 | fmt::Debug::fmt(&**self, f) 368 | } 369 | } 370 | impl, T> fmt::Pointer for RcLocalGuard { 371 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 372 | fmt::Pointer::fmt(&(&**self as *const T), f) 373 | } 374 | } 375 | impl, T: PartialEq> PartialEq for RcLocalGuard { 376 | #[inline] 377 | fn eq(&self, other: &RcLocalGuard) -> bool { 378 | self.deref().eq(other) 379 | } 380 | } 381 | impl, T: Eq> Eq for RcLocalGuard {} 382 | impl, T: PartialOrd> PartialOrd for RcLocalGuard { 383 | #[inline] 384 | fn partial_cmp(&self, other: &RcLocalGuard) -> Option { 385 | (**self).partial_cmp(&**other) 386 | } 387 | #[inline] 388 | fn lt(&self, other: &RcLocalGuard) -> bool { 389 | **self < **other 390 | } 391 | #[inline] 392 | fn le(&self, other: &RcLocalGuard) -> bool { 393 | **self <= **other 394 | } 395 | #[inline] 396 | fn gt(&self, other: &RcLocalGuard) -> bool { 397 | **self > **other 398 | } 399 | #[inline] 400 | fn ge(&self, other: &RcLocalGuard) -> bool { 401 | **self >= **other 402 | } 403 | } 404 | impl, T: Ord> Ord for RcLocalGuard { 405 | #[inline] 406 | fn cmp(&self, other: &RcLocalGuard) -> core::cmp::Ordering { 407 | (**self).cmp(&**other) 408 | } 409 | } 410 | impl, T> core::borrow::Borrow for RcLocalGuard { 411 | #[inline(always)] 412 | fn borrow(&self) -> &T { 413 | self 414 | } 415 | } 416 | impl, T> AsRef for RcLocalGuard { 417 | #[inline(always)] 418 | fn as_ref(&self) -> &T { 419 | self 420 | } 421 | } 422 | --------------------------------------------------------------------------------