├── .travis.yml ├── .gitignore ├── Cargo.toml ├── src ├── lib.rs └── block_allocator.rs ├── LICENSE ├── README.md └── Cargo.lock /.travis.yml: -------------------------------------------------------------------------------- 1 | 2 | language: rust 3 | rust: 4 | - nightly 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | *.swp 7 | *.swo 8 | *.log 9 | 10 | # Executables 11 | *.exe 12 | 13 | # Generated by Cargo 14 | /target/ 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "block_allocator" 3 | version = "0.2.2" 4 | description = "Thread-safe, fixed-size allocator that serves re-usable blocks" 5 | repository = "https://github.com/rrichardson/block_alloc" 6 | homepage = "https://github.com/rrichardson/block_alloc" 7 | authors = ["Rick Richardson "] 8 | license-file = "LICENSE" 9 | 10 | [dependencies] 11 | memmap = "0.6" 12 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A thread-safe, reusable allocator of fixed-size blocks 2 | //! 3 | //! (c) 2015 Rick Richardson 4 | //! 5 | //! Provides fixed size, buffers out of a pre-allocated arena. 6 | //! 7 | //! # Limitations 8 | //! * Max number of buffers that can be provided is `u32::MAX` - 1 9 | //! * Max size of a buffer is `u32::MAX` 10 | //! * Currently only works on 64 bit architectures 11 | //! * (all of these due to a present limitation in Atomic types) 12 | //! 13 | //! 14 | #![feature(test)] 15 | 16 | #![feature(integer_atomics)] 17 | 18 | extern crate memmap; 19 | extern crate test; 20 | extern crate core; 21 | 22 | mod block_allocator; 23 | 24 | pub use block_allocator::Allocator; 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Rick Richardson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Block Allocator 3 | 4 | [![crates.io](https://img.shields.io/crates/v/block_allocator.svg)](https://crates.io/crates/block_allocator/) 5 | 6 | [![Build Status](https://travis-ci.org/rrichardson/block_alloc.svg?branch=master)](https://travis-ci.org/rrichardson/block_alloc) 7 | 8 | This is a basic, thread-safe, fixed sized arena which allocates re-usable blocks 9 | of your specified sized. Re-usable meaning this isn't a basic arena which is 10 | use-once, blocks are free-able and re-allocatable. The intended use case for this 11 | allocator are multithreaded services which frequently allocate and deallocate uniform blocks of 12 | memory, such as web servers or videogames. It can run forever without any loss of performance 13 | due to fragmentation. 14 | 15 | Presently it will allocate mutable u8 slices which are bound to the lifetime of the allocator itself, or raw *mut u8 16 | pointers. 17 | 18 | Its current limitations are that it can only work on 64 bit architectures, and it can only manage UINT32_MAX - 1 blocks because it uses a pair of 32 bit numbers 19 | for offset management. After some [refactoring to atomics](https://github.com/rust-lang/rust/issues/24564) some time in 20 | the future, both limitations will be lifted. 21 | 22 | It is currently fairly fast, running concurrently, it can alloc then free in 28ns per iteration, regardless of the size 23 | of the buffer or arena. 24 | 25 | To use it, simply construct a new allocator, specifying the size of the block and the number of blocks you would like 26 | the allocator to manage (note that this number is not growable at runtime, so choose wisely) 27 | 28 | ```rust 29 | // create blocks of size 256 with a max of 100 30 | let myalloc = Allocator::new(256, 100).unwrap(); 31 | ``` 32 | 33 | then you can alloc and free to your heart`s content 34 | 35 | ```rust 36 | let buf : &mut [u8] = myalloc.alloc(); 37 | myalloc.free(buf); 38 | 39 | or 40 | 41 | let ptr : *mut u8 = myalloc.alloc_raw(); 42 | myalloc.free_raw(ptr); 43 | ``` 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "block_allocator" 3 | version = "0.2.2" 4 | dependencies = [ 5 | "memmap 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", 6 | ] 7 | 8 | [[package]] 9 | name = "kernel32-sys" 10 | version = "0.2.2" 11 | source = "registry+https://github.com/rust-lang/crates.io-index" 12 | dependencies = [ 13 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 14 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 15 | ] 16 | 17 | [[package]] 18 | name = "libc" 19 | version = "0.2.33" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | 22 | [[package]] 23 | name = "memmap" 24 | version = "0.6.0" 25 | source = "registry+https://github.com/rust-lang/crates.io-index" 26 | dependencies = [ 27 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 28 | "libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", 29 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 30 | ] 31 | 32 | [[package]] 33 | name = "winapi" 34 | version = "0.2.8" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | 37 | [[package]] 38 | name = "winapi-build" 39 | version = "0.1.1" 40 | source = "registry+https://github.com/rust-lang/crates.io-index" 41 | 42 | [metadata] 43 | "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" 44 | "checksum libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "5ba3df4dcb460b9dfbd070d41c94c19209620c191b0340b929ce748a2bcd42d2" 45 | "checksum memmap 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a1593a4c81566ec131e1df1bb7c5822e47cec15ffb4fc58cd0e154625b2986d9" 46 | "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" 47 | "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" 48 | -------------------------------------------------------------------------------- /src/block_allocator.rs: -------------------------------------------------------------------------------- 1 | //! Basic block allocator implementation 2 | //! 3 | //! (c) 2015, 2016 Rick Richardson 4 | //! 5 | //! 6 | //! 7 | 8 | 9 | use memmap::MmapMut; 10 | use std::mem; 11 | use std::u32; 12 | use std::sync::atomic::{AtomicUsize, AtomicU32, Ordering}; 13 | use std::slice; 14 | use std::error::Error; 15 | use std::fmt::{self, Display}; 16 | use std::marker::PhantomData; 17 | use std::cell::UnsafeCell; 18 | 19 | const PAGE_SIZE : u32 = (1 << 12); 20 | 21 | /// Allocator 22 | /// Provides fixed-sized buffers from a pre-allocated arena specified at creation 23 | /// Current limitations: Max number of buffers it can produce is `u32::MAX` - 1 24 | /// Multiple allocators may be in use at any time, but their buffers may not be 25 | /// used interchangibly :) 26 | /// 27 | /// Note : This allocator will only produce blocks in sizes of powers of two. Any size 28 | /// requested that isn't a power of two will result in an error 29 | /// 30 | /// Implementation 31 | /// This keeps track of the next available buffer in the slab using a Double CAS Treiber Stack 32 | /// Since Rust atomics don't actually support a double CAS yet, I am simulating it by 33 | /// CAS'ing on a single 64 bit value that is actually a [u32; 2], where the lower bits 34 | /// are the counter, and the higher order bits is the next offset 35 | /// 36 | /// # Example 37 | /// ``` 38 | /// use block_allocator::Allocator; 39 | /// 40 | /// //reserve 100 usable blocks of size 512 (-4) bytes 41 | /// let myalloc = Allocator::new(512, 100).unwrap(); 42 | /// let buf = myalloc.alloc().unwrap(); 43 | /// 44 | /// //do stuff 45 | /// 46 | /// myalloc.free(buf); 47 | /// ``` 48 | /// 49 | pub struct Allocator<'a> { 50 | head : AtomicUsize, 51 | block_size : u32, 52 | freelist : UnsafeCell<&'static [AtomicU32]>, 53 | data : UnsafeCell<*mut u8>, 54 | _region : MmapMut, 55 | num_blocks : u32, 56 | _phantom: PhantomData<&'a u8> 57 | } 58 | 59 | impl<'a> Allocator<'a> { 60 | 61 | /// Constructs a new Block Allocator 62 | pub fn new(block_size: u32, num_blocks: u32) -> Result, AllocError> { 63 | // for now this can only work on 64 bit platforms 64 | // it would be nice to have atomics other than register sizes 65 | assert!(mem::size_of::() >= mem::size_of::()); 66 | assert!(num_blocks < u32::MAX); //we can support u32::MAX - 1 entries 67 | assert!(block_size >= mem::size_of::() as u32); 68 | assert!(num_blocks > 0); 69 | assert!(block_size.is_power_of_two()); 70 | 71 | let table_size = num_blocks * mem::size_of::() as u32; 72 | let table_size = PAGE_SIZE + (table_size & !(PAGE_SIZE - 1)); 73 | 74 | let mut rgn = match MmapMut::map_anon(table_size as usize + (block_size as usize * num_blocks as usize)) { 75 | Ok(r) => r, 76 | Err(e) => return Err(AllocError::MemoryMapFail(format!("{}", e))) 77 | }; 78 | 79 | let table : &[AtomicU32] = unsafe { 80 | slice::from_raw_parts_mut(mem::transmute::<_,_>(rgn.as_mut_ptr()), num_blocks as usize) 81 | }; 82 | 83 | //initialize the "linked list" within the table 84 | for i in 0 .. (num_blocks as usize - 1) { 85 | table[i].store(i as u32 + 1, Ordering::Relaxed); 86 | } 87 | table[num_blocks as usize - 1].store(u32::MAX, Ordering::Relaxed); //sentinel value indicating end of list 88 | 89 | let data = unsafe { rgn.as_mut_ptr().offset(table_size as isize) }; 90 | 91 | Ok(Allocator { 92 | head : AtomicUsize::new(0), 93 | block_size : block_size, 94 | num_blocks : num_blocks, 95 | _region : rgn, 96 | data : UnsafeCell::new(data), 97 | freelist : UnsafeCell::new(table), 98 | _phantom: PhantomData 99 | }) 100 | } 101 | 102 | /// Acquire the next free buffer from the allocator's slab 103 | pub fn alloc(&self) -> Result<&'a mut [u8], AllocError> { unsafe { 104 | self.alloc_raw().map(|a| 105 | slice::from_raw_parts_mut(a, self.block_size as usize) 106 | ) 107 | }} 108 | 109 | /// Free the buffer back into the allocator's slab 110 | pub fn free(&self, buf: &'a mut [u8]) -> Result<(), AllocError> { 111 | if buf.len() as u32 != self.block_size { 112 | return Err(AllocError::BadArgument("Slice != allocator's block_size".to_string())); 113 | } 114 | unsafe {self.free_raw(buf.as_mut_ptr()) } 115 | } 116 | 117 | /// Acquire the next buffer as a raw `std::u8` pointer from the allocator's slab 118 | pub unsafe fn alloc_raw(&self) -> Result<*mut u8, AllocError> { 119 | 120 | let mut hd = self.head.load(Ordering::Acquire); 121 | let hd_ary : &[u32 ;2] = mem::transmute(&hd); 122 | let mut offset = hd_ary[0]; //top 32 bits are the offset to the start of the free list 123 | //println!("alloc - Loaded head {} | {}", hd_ary[0], hd_ary[1]); 124 | if offset == u32::MAX { 125 | return Err(AllocError::NoMemory); 126 | } 127 | 128 | loop { 129 | 130 | offset = self.get_next_offset(hd_ary[0]); 131 | let counter = hd_ary[1]; 132 | let newhd_ary = [offset, counter.wrapping_add(1)]; 133 | //println!("alloc - Setting newhd to {} | {}", offset, counter.wrapping_add(1)); 134 | let oldhead = hd; 135 | hd = self.head.compare_and_swap(hd, mem::transmute(newhd_ary), Ordering::SeqCst); 136 | if hd == oldhead { 137 | return Ok(self.get_cell(hd_ary[0])) 138 | } 139 | if hd_ary[0] == u32::MAX { 140 | return Err(AllocError::NoMemory); 141 | } 142 | } 143 | } 144 | 145 | /// Free a raw (previously alloc'd pointer) back into the allocator's slab 146 | pub unsafe fn free_raw(&self, item : *mut u8) -> Result<(), AllocError> { 147 | // this gets the offset from the pointer that is being freed, then 148 | // uses that in the freelist table. The offset becomes the new head 149 | // and the previous head becomes the 'next' offset 150 | if item.is_null() { 151 | return Err(AllocError::BadArgument("Null".to_string())); 152 | } 153 | 154 | let cell_addr : usize = mem::transmute(item); 155 | let start_addr : usize = mem::transmute(*self.data.get()); 156 | let end_addr : usize = mem::transmute((*self.data.get()).offset((self.block_size * self.num_blocks) as isize)); 157 | 158 | if (cell_addr < start_addr) || (cell_addr > end_addr) { 159 | return Err(AllocError::BadArgument("Out of bounds".to_string())); 160 | } 161 | 162 | 163 | //ensure that the ptr falls on the alignment of the block_size 164 | if ((cell_addr - start_addr) & (self.block_size as usize - 1)) != 0 { 165 | return Err(AllocError::BadArgument("Misaligned value".to_string())); 166 | } 167 | 168 | let newoffset = (cell_addr - start_addr) as u32 / self.block_size; 169 | let mut hd = self.head.load(Ordering::Acquire); 170 | let hd_ary : &[u32; 2] = mem::transmute(&hd); 171 | //println!("free - Loaded head {} | {}", hd_ary[0], hd_ary[1]); 172 | 173 | loop { 174 | let counter = hd_ary[1]; 175 | //println!("free - Setting newhd to {} | {}", newoffset, counter.wrapping_add(1)); 176 | let newhd_ary = [newoffset, counter.wrapping_add(1)]; 177 | 178 | let oldhead = hd; 179 | let oldhd_ary : &[u32; 2] = mem::transmute(&oldhead); 180 | 181 | (*self.freelist.get())[newoffset as usize].store(oldhd_ary[0], Ordering::Relaxed); 182 | 183 | hd = self.head.compare_and_swap(hd, mem::transmute(newhd_ary), Ordering::SeqCst); 184 | if hd == oldhead { 185 | break; 186 | } 187 | } 188 | 189 | Ok(()) 190 | } 191 | 192 | #[inline(always)] 193 | fn get_next_offset(&self, index : u32) -> u32 { unsafe { 194 | (*self.freelist.get())[index as usize].load(Ordering::Relaxed) 195 | } } 196 | 197 | #[inline(always)] 198 | fn get_cell(&self, index : u32) -> *mut u8 { unsafe { 199 | (*self.data.get()).offset(index as isize * self.block_size as isize) 200 | }} 201 | 202 | #[inline(always)] 203 | pub fn get_block_size(&self) -> u32 { 204 | self.block_size 205 | } 206 | } 207 | 208 | unsafe impl<'a> Send for Allocator<'a> {} 209 | unsafe impl<'a> Sync for Allocator<'a> {} 210 | 211 | #[inline] 212 | fn _next_pow_of_2(mut n : u32) -> u32 213 | { 214 | n -= 1; 215 | n |= n >> 1; 216 | n |= n >> 2; 217 | n |= n >> 4; 218 | n |= n >> 8; 219 | n |= n >> 16; 220 | n += 1; 221 | n 222 | } 223 | 224 | #[derive(Debug)] 225 | pub enum AllocError { 226 | BadArgument(String), 227 | MemoryMapFail(String), 228 | NoMemory, 229 | } 230 | 231 | impl Display for AllocError { 232 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 233 | match *self { 234 | AllocError::BadArgument(ref s) => write!(f, "Bad Argument : {}", s), 235 | AllocError::MemoryMapFail(ref s) => write!(f, "Memory Map Failure : {}", s), 236 | AllocError::NoMemory => write!(f, "Out of memory") 237 | } 238 | } 239 | } 240 | 241 | impl Error for AllocError { 242 | fn description(&self) -> &str { 243 | match *self { 244 | AllocError::BadArgument(_) => "Bad Argument", 245 | AllocError::MemoryMapFail(_) => "Memory Map Failure", 246 | AllocError::NoMemory => "Out of memory" 247 | } 248 | } 249 | 250 | fn cause(&self) -> Option<&Error> { 251 | None 252 | } 253 | } 254 | 255 | #[cfg(test)] 256 | mod tests { 257 | use super::*; 258 | use std::thread; 259 | use std::sync::Arc; 260 | use test::Bencher; 261 | use std::mem; 262 | 263 | #[test] 264 | fn basic() { 265 | let myalloc = Allocator::new(256, 100).unwrap(); 266 | let ptr = myalloc.alloc().unwrap(); 267 | myalloc.free(ptr).unwrap(); 268 | } 269 | 270 | #[test] 271 | fn realbig() { 272 | let myalloc = Allocator::new(2 << 14, 100000).unwrap(); 273 | let ptr = myalloc.alloc().unwrap(); 274 | myalloc.free(ptr).unwrap(); 275 | } 276 | 277 | #[test] 278 | fn max_cap() { 279 | let myalloc = Allocator::new(256, 10).unwrap(); 280 | for _ in 0..10 { 281 | let _ = myalloc.alloc().unwrap(); 282 | } 283 | let ptr = myalloc.alloc(); 284 | assert!(ptr.is_err()); 285 | } 286 | 287 | #[test] 288 | fn sizing() { unsafe { 289 | let myalloc = Allocator::new(256, 10).unwrap(); 290 | let a = myalloc.alloc().unwrap(); 291 | let b = myalloc.alloc().unwrap(); 292 | let a1 : usize = mem::transmute(a.as_ptr()); 293 | let b1 : usize = mem::transmute(b.as_ptr()); 294 | let diff = b1 - a1; 295 | println!("block_size: {}", diff); 296 | assert!(diff >= 256); 297 | } } 298 | 299 | #[test] 300 | fn up_down() { 301 | let myalloc = Allocator::new(256, 10).unwrap(); 302 | let mut ptrs : Vec<&mut [u8]> = (0..10).map(|_| myalloc.alloc().unwrap()).collect(); 303 | 304 | let ptr = myalloc.alloc(); 305 | assert!(ptr.is_err()); 306 | 307 | for p in ptrs.iter_mut() { 308 | myalloc.free(*p).unwrap(); 309 | } 310 | 311 | let mut ptrs : Vec<&mut [u8]> = (0..10).map(|_| myalloc.alloc().unwrap()).collect(); 312 | 313 | let ptr = myalloc.alloc(); 314 | assert!(ptr.is_err()); 315 | 316 | for p in ptrs.iter_mut() { 317 | myalloc.free(*p).unwrap(); 318 | } 319 | 320 | let ptr = myalloc.alloc(); 321 | assert!(ptr.is_ok()); 322 | } 323 | #[test] 324 | fn concurrency() { 325 | let myalloc = Arc::new(Allocator::new(256, 1000).unwrap()); 326 | 327 | let threads : Vec> = (0..10).map(|_| { 328 | let ma = myalloc.clone(); 329 | thread::spawn(move || { 330 | for _ in 0 .. 100000 { 331 | let p = ma.alloc().unwrap(); 332 | ma.free(p).unwrap(); 333 | let p = ma.alloc().unwrap(); 334 | ma.free(p).unwrap(); 335 | let p = ma.alloc().unwrap(); 336 | ma.free(p).unwrap(); 337 | let p = ma.alloc().unwrap(); 338 | ma.free(p).unwrap(); 339 | } 340 | })}).collect(); 341 | 342 | for t in threads { 343 | t.join().unwrap(); 344 | } 345 | 346 | //we should be back to 0 at this point, so this should succeed 347 | let _ : Vec<&mut [u8]> = (0..1000).map(|_| myalloc.alloc().unwrap()).collect(); 348 | // then this should fail 349 | let ptr = myalloc.alloc(); 350 | assert!(ptr.is_err()); 351 | } 352 | 353 | 354 | 355 | #[test] 356 | fn basic_raw() { unsafe { 357 | let myalloc = Allocator::new(256, 100).unwrap(); 358 | let ptr = myalloc.alloc_raw().unwrap(); 359 | myalloc.free_raw(ptr).unwrap(); 360 | } } 361 | 362 | #[test] 363 | fn max_cap_raw() { unsafe { 364 | let myalloc = Allocator::new(256, 10).unwrap(); 365 | for _ in 0..10 { 366 | let _ = myalloc.alloc_raw().unwrap(); 367 | } 368 | let ptr = myalloc.alloc_raw(); 369 | assert!(ptr.is_err()); 370 | } } 371 | 372 | #[test] 373 | fn sizing_raw() { unsafe { 374 | let myalloc = Allocator::new(256, 10).unwrap(); 375 | let a = myalloc.alloc_raw().unwrap(); 376 | let b = myalloc.alloc_raw().unwrap(); 377 | let a1 : usize = mem::transmute(a); 378 | let b1 : usize = mem::transmute(b); 379 | let diff = b1 - a1; 380 | println!("block_size: {}", diff); 381 | assert!(diff >= 256); 382 | } } 383 | 384 | #[test] 385 | fn up_down_raw() { unsafe { 386 | let myalloc = Allocator::new(256, 10).unwrap(); 387 | let ptrs : Vec<*mut u8> = (0..10).map(|_| myalloc.alloc_raw().unwrap()).collect(); 388 | 389 | let ptr = myalloc.alloc_raw(); 390 | assert!(ptr.is_err()); 391 | 392 | for p in ptrs.iter() { 393 | myalloc.free_raw(*p).unwrap(); 394 | } 395 | 396 | let ptrs : Vec<*mut u8> = (0..10).map(|_| myalloc.alloc_raw().unwrap()).collect(); 397 | 398 | let ptr = myalloc.alloc_raw(); 399 | assert!(ptr.is_err()); 400 | 401 | for p in ptrs.iter() { 402 | myalloc.free_raw(*p).unwrap(); 403 | } 404 | 405 | let ptr = myalloc.alloc_raw(); 406 | assert!(ptr.is_ok()); 407 | } } 408 | #[test] 409 | fn concurrency_raw() { unsafe { 410 | let myalloc = Arc::new(Allocator::new(256, 1000).unwrap()); 411 | 412 | let threads : Vec> = (0..10).map(|_| { 413 | let ma = myalloc.clone(); 414 | thread::spawn(move || { 415 | for _ in 0 .. 100000 { 416 | let p = ma.alloc_raw().unwrap(); 417 | ma.free_raw(p).unwrap(); 418 | let p = ma.alloc_raw().unwrap(); 419 | ma.free_raw(p).unwrap(); 420 | let p = ma.alloc_raw().unwrap(); 421 | ma.free_raw(p).unwrap(); 422 | let p = ma.alloc_raw().unwrap(); 423 | ma.free_raw(p).unwrap(); 424 | } 425 | })}).collect(); 426 | 427 | for t in threads { 428 | t.join().unwrap(); 429 | } 430 | 431 | //we should be back to 0 at this point, so this should succeed 432 | let _ : Vec<*mut u8> = (0..1000).map(|_| myalloc.alloc_raw().unwrap()).collect(); 433 | // then this should fail 434 | let ptr = myalloc.alloc_raw(); 435 | assert!(ptr.is_err()); 436 | } } 437 | 438 | #[bench] 439 | fn speedtest(b: &mut Bencher) { 440 | let myalloc = Arc::new(Allocator::new(256, 1000).unwrap()); 441 | b.iter(||{ 442 | let p = myalloc.alloc().unwrap(); 443 | myalloc.free(p).unwrap(); 444 | }); 445 | } 446 | 447 | #[bench] 448 | fn speedtest_big(b: &mut Bencher) { 449 | let myalloc = Arc::new(Allocator::new(1 << 14, 1000).unwrap()); 450 | b.iter(||{ 451 | let p = myalloc.alloc().unwrap(); 452 | myalloc.free(p).unwrap(); 453 | }); 454 | } 455 | 456 | #[bench] 457 | fn concurrent_speed(b: &mut Bencher) { 458 | let myalloc = Arc::new(Allocator::new(256, 1000).unwrap()); 459 | b.iter(||{ 460 | let threads : Vec> = (0..20).map(|_| { 461 | let ma = myalloc.clone(); 462 | thread::spawn(move || { 463 | for _ in 0 .. 1000 { 464 | let p = ma.alloc().unwrap(); 465 | ma.free(p).unwrap(); 466 | let p = ma.alloc().unwrap(); 467 | ma.free(p).unwrap(); 468 | let p = ma.alloc().unwrap(); 469 | ma.free(p).unwrap(); 470 | let p = ma.alloc().unwrap(); 471 | ma.free(p).unwrap(); 472 | } 473 | })}).collect(); 474 | 475 | for t in threads { 476 | t.join().unwrap(); 477 | } 478 | }); 479 | } 480 | } 481 | 482 | --------------------------------------------------------------------------------