├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── deploy.sh ├── src ├── lib.rs └── reset.rs └── test └── test.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudo: false 3 | language: rust 4 | rust: 5 | - nightly 6 | - beta 7 | 8 | script: 9 | - cargo test 10 | - cargo doc --no-deps 11 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "pool" 4 | version = "0.1.4" 5 | license = "MIT" 6 | authors = ["Carl Lerche "] 7 | description = "A pool of reusable values" 8 | documentation = "https://carllerche.github.io/pool/pool" 9 | homepage = "https://github.com/carllerche/pool" 10 | repository = "https://github.com/carllerche/pool" 11 | readme = "README.md" 12 | keywords = ["pool"] 13 | exclude = [ 14 | ".gitignore", 15 | ".travis.yml", 16 | "deploy.sh", 17 | "test/**/*", 18 | ] 19 | 20 | [[test]] 21 | 22 | name = "test" 23 | path = "test/test.rs" 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Carl Lerche 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a 4 | copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included 12 | in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 17 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 18 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 19 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 20 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A pool of reusable values 2 | 3 | A Rust library providing a pool structure for managing reusable values. 4 | All values in the pool are initialized when the pool is created. Values 5 | can be checked out from the pool at any time. When the checked out value 6 | goes out of scope, the value is returned to the pool and made available 7 | for checkout at a later time. 8 | 9 | [![Build Status](https://travis-ci.org/carllerche/pool.svg?branch=master)](https://travis-ci.org/carllerche/pool) 10 | 11 | - [API documentation](http://carllerche.github.io/pool/pool/) 12 | 13 | - [Crates.io](https://crates.io/crates/pool) 14 | 15 | ## Usage 16 | 17 | To use `pool`, first add this to your `Cargo.toml`: 18 | 19 | ```toml 20 | [dependencies] 21 | pool = "0.1.3" 22 | ``` 23 | 24 | Then, add this to your crate root: 25 | 26 | ```rust 27 | extern crate pool; 28 | ``` 29 | 30 | ## Features 31 | 32 | * Simple 33 | * Lock-free: values can be returned to the pool across threads 34 | * Stores typed values and / or slabs of memory 35 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rev=$(git rev-parse --short HEAD) 4 | 5 | cd target/doc 6 | 7 | git init 8 | git config user.name "Carl Lerche" 9 | git config user.email "me@carllerche.com" 10 | 11 | git remote add upstream "https://$GH_TOKEN@github.com/carllerche/pool" 12 | git fetch upstream && git reset upstream/gh-pages 13 | 14 | touch . 15 | 16 | git add -A . 17 | git commit -m "rebuild pages at ${rev}" 18 | git push -q upstream HEAD:gh-pages 19 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # A store of pre-initialized values. 2 | //! 3 | //! Values can be checked out when needed, operated on, and will automatically 4 | //! be returned to the pool when they go out of scope. It can be used when 5 | //! handling values that are expensive to create. Based on the [object pool 6 | //! pattern](http://en.wikipedia.org/wiki/Object_pool_pattern). 7 | //! 8 | //! Example: 9 | //! 10 | //! ``` 11 | //! use pool::{Pool, Dirty}; 12 | //! use std::thread; 13 | //! 14 | //! let mut pool = Pool::with_capacity(20, 0, || Dirty(Vec::with_capacity(16_384))); 15 | //! 16 | //! let mut vec = pool.checkout().unwrap(); 17 | //! 18 | //! // Do some work with the value, this can happen in another thread 19 | //! thread::spawn(move || { 20 | //! for i in 0..10_000 { 21 | //! vec.push(i); 22 | //! } 23 | //! 24 | //! assert_eq!(10_000, vec.len()); 25 | //! }).join(); 26 | //! 27 | //! // The vec will have been returned to the pool by now 28 | //! let vec = pool.checkout().unwrap(); 29 | //! 30 | //! // The pool operates LIFO, so this vec will be the same value that was used 31 | //! // in the thread above. The value will also be left as it was when it was 32 | //! // returned to the pool, this may or may not be desirable depending on the 33 | //! // use case. 34 | //! assert_eq!(10_000, vec.len()); 35 | //! 36 | //! ``` 37 | //! 38 | //! ## Extra byte storage 39 | //! 40 | //! Each value in the pool can be padded with an arbitrary number of bytes that 41 | //! can be accessed as a slice. This is useful if implementing something like a 42 | //! pool of buffers. The metadata could be stored as the `Pool` value and the 43 | //! byte array can be stored in the padding. 44 | //! 45 | //! ## Threading 46 | //! 47 | //! Checking out values from the pool requires a mutable reference to the pool 48 | //! so cannot happen concurrently across threads, but returning values to the 49 | //! pool is thread safe and lock free, so if the value being pooled is `Sync` 50 | //! then `Checkout` is `Sync` as well. 51 | //! 52 | //! The easiest way to have a single pool shared across many threads would be 53 | //! to wrap `Pool` in a mutex. 54 | use std::{mem, ops, ptr, usize}; 55 | use std::cell::UnsafeCell; 56 | use std::sync::Arc; 57 | use std::sync::atomic::{self, AtomicUsize, Ordering}; 58 | pub use reset::{Reset, Dirty}; 59 | 60 | mod reset; 61 | 62 | /// A pool of reusable values 63 | pub struct Pool { 64 | inner: Arc>>, 65 | } 66 | 67 | impl Pool { 68 | /// Creates a new pool that can contain up to `capacity` entries as well as 69 | /// `extra` extra bytes. Initializes each entry with the given function. 70 | pub fn with_capacity(count: usize, mut extra: usize, init: F) -> Pool 71 | where F: Fn() -> T { 72 | 73 | let mut inner = PoolInner::with_capacity(count, extra); 74 | 75 | // Get the actual number of extra bytes 76 | extra = inner.entry_size - mem::size_of::>(); 77 | 78 | // Initialize the entries 79 | for i in 0..count { 80 | unsafe { 81 | ptr::write(inner.entry_mut(i), Entry { 82 | data: init(), 83 | next: i + 1, 84 | extra: extra, 85 | }); 86 | } 87 | inner.init += 1; 88 | } 89 | 90 | Pool { inner: Arc::new(UnsafeCell::new(inner)) } 91 | } 92 | 93 | /// Checkout a value from the pool. Returns `None` if the pool is currently 94 | /// at capacity. 95 | /// 96 | /// The value returned from the pool has not been reset and contains the 97 | /// state that it previously had when it was last released. 98 | pub fn checkout(&mut self) -> Option> { 99 | self.inner_mut().checkout() 100 | .map(|ptr| { 101 | Checkout { 102 | entry: ptr, 103 | inner: self.inner.clone(), 104 | } 105 | }).map(|mut checkout| { 106 | checkout.reset(); 107 | checkout 108 | }) 109 | } 110 | 111 | fn inner_mut(&self) -> &mut PoolInner { 112 | unsafe { mem::transmute(self.inner.get()) } 113 | } 114 | } 115 | 116 | unsafe impl Send for Pool { } 117 | 118 | /// A handle to a checked out value. When dropped out of scope, the value will 119 | /// be returned to the pool. 120 | pub struct Checkout { 121 | entry: *mut Entry, 122 | inner: Arc>>, 123 | } 124 | 125 | impl Checkout { 126 | /// Read access to the raw bytes 127 | pub fn extra(&self) -> &[u8] { 128 | self.entry().extra() 129 | } 130 | 131 | /// Write access to the extra bytes 132 | pub fn extra_mut(&mut self) -> &mut [u8] { 133 | self.entry_mut().extra_mut() 134 | } 135 | 136 | fn entry(&self) -> &Entry { 137 | unsafe { mem::transmute(self.entry) } 138 | } 139 | 140 | fn entry_mut(&mut self) -> &mut Entry { 141 | unsafe { mem::transmute(self.entry) } 142 | } 143 | 144 | fn inner(&self) -> &mut PoolInner { 145 | unsafe { mem::transmute(self.inner.get()) } 146 | } 147 | } 148 | 149 | impl ops::Deref for Checkout { 150 | type Target = T; 151 | 152 | fn deref(&self) -> &T { 153 | &self.entry().data 154 | } 155 | } 156 | 157 | impl ops::DerefMut for Checkout { 158 | fn deref_mut(&mut self) -> &mut T { 159 | &mut self.entry_mut().data 160 | } 161 | } 162 | 163 | impl Drop for Checkout { 164 | fn drop(&mut self) { 165 | self.inner().checkin(self.entry); 166 | } 167 | } 168 | 169 | unsafe impl Send for Checkout { } 170 | unsafe impl Sync for Checkout { } 171 | 172 | struct PoolInner { 173 | #[allow(dead_code)] 174 | memory: Box<[u8]>, // Ownership of raw memory 175 | next: AtomicUsize, // Offset to next available value 176 | ptr: *mut Entry, // Pointer to first entry 177 | init: usize, // Number of initialized entries 178 | count: usize, // Total number of entries 179 | entry_size: usize, // Byte size of each entry 180 | } 181 | 182 | // Max size of the pool 183 | const MAX: usize = usize::MAX >> 1; 184 | 185 | impl PoolInner { 186 | fn with_capacity(count: usize, mut extra: usize) -> PoolInner { 187 | // The required alignment for the entry. The start of the entry must 188 | // align with this number 189 | let align = mem::align_of::>(); 190 | 191 | // Check that the capacity is not too large 192 | assert!(count < MAX, "requested pool size too big"); 193 | assert!(align > 0, "something weird is up with the requested alignment"); 194 | 195 | let mask = align - 1; 196 | 197 | // If the requested extra memory does not match with the align, 198 | // increase it so that it does. 199 | if extra & mask != 0 { 200 | extra = (extra + align) & !mask; 201 | } 202 | 203 | // Calculate the size of each entry. Since the extra bytes are 204 | // immediately after the entry, just add the sizes 205 | let entry_size = mem::size_of::>() + extra; 206 | 207 | // This should always be true, but let's check it anyway 208 | assert!(entry_size & mask == 0, "entry size is not aligned"); 209 | 210 | // Ensure that the total memory needed is possible. It must be 211 | // representable by an `isize` value in order for pointer offset to 212 | // work. 213 | assert!(entry_size.checked_mul(count).is_some(), "requested pool capacity too big"); 214 | assert!(entry_size * count < MAX, "requested pool capacity too big"); 215 | 216 | let size = count * entry_size; 217 | 218 | // Allocate the memory 219 | let (memory, ptr) = alloc(size, align); 220 | 221 | // Zero out the memory for safety 222 | unsafe { 223 | ptr::write_bytes(ptr, 0, size); 224 | } 225 | 226 | PoolInner { 227 | memory: memory, 228 | next: AtomicUsize::new(0), 229 | ptr: ptr as *mut Entry, 230 | init: 0, 231 | count: count, 232 | entry_size: entry_size, 233 | } 234 | } 235 | 236 | fn checkout(&mut self) -> Option<*mut Entry> { 237 | let mut idx = self.next.load(Ordering::Acquire); 238 | 239 | loop { 240 | debug_assert!(idx <= self.count, "invalid index: {}", idx); 241 | 242 | if idx == self.count { 243 | // The pool is depleted 244 | return None; 245 | } 246 | 247 | let nxt = self.entry_mut(idx).next; 248 | 249 | debug_assert!(nxt <= self.count, "invalid next index: {}", idx); 250 | 251 | let res = self.next.compare_and_swap(idx, nxt, Ordering::Relaxed); 252 | 253 | if res == idx { 254 | break; 255 | } 256 | 257 | // Re-acquire the memory before trying again 258 | atomic::fence(Ordering::Acquire); 259 | idx = res; 260 | } 261 | 262 | Some(self.entry_mut(idx) as *mut Entry) 263 | } 264 | 265 | fn checkin(&self, ptr: *mut Entry) { 266 | let mut idx; 267 | let mut entry: &mut Entry; 268 | 269 | unsafe { 270 | // Figure out the index 271 | idx = ((ptr as usize) - (self.ptr as usize)) / self.entry_size; 272 | entry = mem::transmute(ptr); 273 | } 274 | 275 | debug_assert!(idx < self.count, "invalid index; idx={}", idx); 276 | 277 | let mut nxt = self.next.load(Ordering::Relaxed); 278 | 279 | loop { 280 | // Update the entry's next pointer 281 | entry.next = nxt; 282 | 283 | let actual = self.next.compare_and_swap(nxt, idx, Ordering::Release); 284 | 285 | if actual == nxt { 286 | break; 287 | } 288 | 289 | nxt = actual; 290 | } 291 | } 292 | 293 | fn entry(&self, idx: usize) -> &Entry { 294 | unsafe { 295 | debug_assert!(idx < self.count, "invalid index"); 296 | let ptr = self.ptr.offset(idx as isize); 297 | mem::transmute(ptr) 298 | } 299 | } 300 | 301 | #[allow(mutable_transmutes)] 302 | fn entry_mut(&mut self, idx: usize) -> &mut Entry { 303 | unsafe { mem::transmute(self.entry(idx)) } 304 | } 305 | } 306 | 307 | impl Drop for PoolInner { 308 | fn drop(&mut self) { 309 | for i in 0..self.init { 310 | unsafe { 311 | let _ = ptr::read(self.entry(i)); 312 | } 313 | } 314 | } 315 | } 316 | 317 | struct Entry { 318 | data: T, // Keep first 319 | next: usize, // Index of next available entry 320 | extra: usize, // Number of extra bytes available 321 | } 322 | 323 | impl Entry { 324 | fn extra(&self) -> &[u8] { 325 | use std::slice; 326 | 327 | unsafe { 328 | let ptr: *const u8 = mem::transmute(self); 329 | let ptr = ptr.offset(mem::size_of::>() as isize); 330 | 331 | slice::from_raw_parts(ptr, self.extra) 332 | } 333 | } 334 | 335 | #[allow(mutable_transmutes)] 336 | fn extra_mut(&mut self) -> &mut [u8] { 337 | unsafe { mem::transmute(self.extra()) } 338 | } 339 | } 340 | 341 | /// Allocate memory 342 | fn alloc(mut size: usize, align: usize) -> (Box<[u8]>, *mut u8) { 343 | size += align; 344 | 345 | unsafe { 346 | // Allocate the memory 347 | let mut vec = Vec::with_capacity(size); 348 | vec.set_len(size); 349 | 350 | // Juggle values around 351 | let mut mem = vec.into_boxed_slice(); 352 | let ptr = (*mem).as_mut_ptr(); 353 | 354 | // Align the pointer 355 | let p = ptr as usize; 356 | let m = align - 1; 357 | 358 | if p & m != 0 { 359 | let p = (p + align) & !m; 360 | return (mem, p as *mut u8); 361 | } 362 | 363 | (mem, ptr) 364 | } 365 | } 366 | -------------------------------------------------------------------------------- /src/reset.rs: -------------------------------------------------------------------------------- 1 | use std::default::Default; 2 | use std::ops::{Deref, DerefMut}; 3 | 4 | #[derive(Debug)] 5 | pub struct Dirty(pub T); 6 | 7 | impl Reset for Dirty { 8 | fn reset(&mut self) { 9 | // Do nothing! 10 | } 11 | } 12 | 13 | unsafe impl Send for Dirty {} 14 | unsafe impl Sync for Dirty {} 15 | 16 | impl Deref for Dirty { 17 | type Target = T; 18 | fn deref(&self) -> &T { 19 | &self.0 20 | } 21 | } 22 | 23 | impl DerefMut for Dirty { 24 | fn deref_mut(&mut self) -> &mut T { 25 | &mut self.0 26 | } 27 | } 28 | 29 | /// Resetting an object reverts that object back to a default state. 30 | pub trait Reset { 31 | fn reset(&mut self); 32 | } 33 | 34 | // For most of the stdlib collections, this will "clear" the collection 35 | // without deallocating. 36 | impl Reset for T { 37 | fn reset(&mut self) { 38 | self.clone_from(&Default::default()); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /test/test.rs: -------------------------------------------------------------------------------- 1 | extern crate pool; 2 | 3 | use pool::{Pool, Dirty}; 4 | 5 | #[test] 6 | pub fn test_checkout_checkin() { 7 | let mut pool: Pool> = Pool::with_capacity(10, 0, || Dirty(0)); 8 | 9 | let mut val = pool.checkout().unwrap(); 10 | assert_eq!(**val, 0); 11 | 12 | // Update the value & return to the pool 13 | *val = Dirty(1); 14 | drop(val); 15 | 16 | let val = pool.checkout().unwrap(); 17 | assert_eq!(**val, 1); 18 | } 19 | 20 | #[test] 21 | pub fn test_multiple_checkouts() { 22 | let mut pool: Pool = Pool::with_capacity(10, 0, || 0); 23 | 24 | // Use this to hold on to the checkouts 25 | let mut vec = vec![]; 26 | 27 | for _ in 0..10 { 28 | let mut i = pool.checkout().unwrap(); 29 | assert_eq!(*i, 0); 30 | *i = 1; 31 | vec.push(i); 32 | } 33 | } 34 | 35 | #[test] 36 | pub fn test_depleting_pool() { 37 | let mut pool: Pool = Pool::with_capacity(5, 0, || 0); 38 | 39 | let mut vec = vec![]; 40 | 41 | for _ in 0..5 { 42 | vec.push(pool.checkout().unwrap()); 43 | } 44 | 45 | assert!(pool.checkout().is_none()); 46 | drop(vec); 47 | assert!(pool.checkout().is_some()); 48 | } 49 | 50 | #[test] 51 | pub fn test_resetting_pool() { 52 | let mut pool: Pool> = Pool::with_capacity(1, 0, || Vec::new()); 53 | { 54 | let mut val = pool.checkout().unwrap(); 55 | val.push(5); 56 | val.push(6); 57 | } 58 | { 59 | let val = pool.checkout().unwrap(); 60 | assert!(val.len() == 0); 61 | } 62 | } 63 | 64 | #[derive(Clone, Default)] 65 | struct Zomg; 66 | 67 | impl Drop for Zomg { 68 | fn drop(&mut self) { 69 | println!("Dropping"); 70 | } 71 | } 72 | 73 | #[test] 74 | pub fn test_works_with_drop_types() { 75 | let _ = pool::Pool::with_capacity(1, 0, || Zomg); 76 | } 77 | 78 | #[test] 79 | #[should_panic] 80 | pub fn test_safe_when_init_panics() { 81 | let _ = pool::Pool::::with_capacity(1, 0, || panic!("oops")); 82 | } 83 | 84 | // TODO: Add concurrency stress tests 85 | --------------------------------------------------------------------------------