├── .directory ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md └── src ├── lib.rs ├── slab.rs └── test.rs /.directory: -------------------------------------------------------------------------------- 1 | [Dolphin] 2 | Timestamp=2018,1,8,23,11,44 3 | Version=4 4 | 5 | [Settings] 6 | HiddenFilesShown=true 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - nightly 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "slab_allocator" 3 | version = "0.3.5" 4 | authors = ["Robert Węcławski "] 5 | license = "MIT" 6 | 7 | description = "Slab allocator for no_std systems. Uses multiple slabs with blocks of different sizes and a linked list for blocks larger than 4096 bytes" 8 | keywords = ["slab", "allocator", "no_std", "heap", "kernel"] 9 | 10 | repository = "https://github.com/weclaw1/slab_allocator" 11 | 12 | [dependencies] 13 | linked_list_allocator = "0.6.3" 14 | spin = "0.4.9" 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Robert Węcławski 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # slab_allocator 2 | 3 | [![Build Status](https://travis-ci.org/weclaw1/slab_allocator.svg?branch=master)](https://travis-ci.org/weclaw1/slab_allocator) 4 | 5 | [Documentation](https://docs.rs/crate/slab_allocator) 6 | 7 | ## Usage 8 | 9 | Create a static allocator in your root module: 10 | 11 | ```rust 12 | use slab_allocator::LockedHeap; 13 | 14 | #[global_allocator] 15 | static ALLOCATOR: LockedHeap = LockedHeap::empty(); 16 | ``` 17 | 18 | Before using this allocator, you need to init it: 19 | 20 | ```rust 21 | pub fn init_heap() { 22 | let heap_start = …; 23 | let heap_end = …; 24 | let heap_size = heap_end - heap_start; 25 | unsafe { 26 | ALLOCATOR.init(heap_start, heap_size); 27 | } 28 | } 29 | ``` 30 | 31 | ## License 32 | This crate is licensed under MIT. See LICENSE for details. 33 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(alloc, allocator_api)] 2 | #![feature(const_fn)] 3 | #![no_std] 4 | 5 | extern crate alloc; 6 | 7 | extern crate spin; 8 | 9 | extern crate linked_list_allocator; 10 | 11 | mod slab; 12 | 13 | use core::ops::Deref; 14 | 15 | use alloc::alloc::{Alloc, AllocErr, Layout}; 16 | use core::alloc::GlobalAlloc; 17 | use core::ptr::NonNull; 18 | use slab::Slab; 19 | 20 | use spin::Mutex; 21 | 22 | #[cfg(test)] 23 | mod test; 24 | 25 | pub const NUM_OF_SLABS: usize = 8; 26 | pub const MIN_SLAB_SIZE: usize = 4096; 27 | pub const MIN_HEAP_SIZE: usize = NUM_OF_SLABS * MIN_SLAB_SIZE; 28 | 29 | #[derive(Copy, Clone)] 30 | pub enum HeapAllocator { 31 | Slab64Bytes, 32 | Slab128Bytes, 33 | Slab256Bytes, 34 | Slab512Bytes, 35 | Slab1024Bytes, 36 | Slab2048Bytes, 37 | Slab4096Bytes, 38 | LinkedListAllocator, 39 | } 40 | 41 | /// A fixed size heap backed by multiple slabs with blocks of different sizes. 42 | /// Allocations over 4096 bytes are served by linked list allocator. 43 | pub struct Heap { 44 | slab_64_bytes: Slab, 45 | slab_128_bytes: Slab, 46 | slab_256_bytes: Slab, 47 | slab_512_bytes: Slab, 48 | slab_1024_bytes: Slab, 49 | slab_2048_bytes: Slab, 50 | slab_4096_bytes: Slab, 51 | linked_list_allocator: linked_list_allocator::Heap, 52 | } 53 | 54 | impl Heap { 55 | /// Creates a new heap with the given `heap_start_addr` and `heap_size`. The start address must be valid 56 | /// and the memory in the `[heap_start_addr, heap_start_addr + heap_size)` range must not be used for 57 | /// anything else. This function is unsafe because it can cause undefined behavior if the 58 | /// given address is invalid. 59 | pub unsafe fn new(heap_start_addr: usize, heap_size: usize) -> Heap { 60 | assert!( 61 | heap_start_addr % 4096 == 0, 62 | "Start address should be page aligned" 63 | ); 64 | assert!( 65 | heap_size >= MIN_HEAP_SIZE, 66 | "Heap size should be greater or equal to minimum heap size" 67 | ); 68 | assert!( 69 | heap_size % MIN_HEAP_SIZE == 0, 70 | "Heap size should be a multiple of minimum heap size" 71 | ); 72 | let slab_size = heap_size / NUM_OF_SLABS; 73 | Heap { 74 | slab_64_bytes: Slab::new(heap_start_addr, slab_size, 64), 75 | slab_128_bytes: Slab::new(heap_start_addr + slab_size, slab_size, 128), 76 | slab_256_bytes: Slab::new(heap_start_addr + 2 * slab_size, slab_size, 256), 77 | slab_512_bytes: Slab::new(heap_start_addr + 3 * slab_size, slab_size, 512), 78 | slab_1024_bytes: Slab::new(heap_start_addr + 4 * slab_size, slab_size, 1024), 79 | slab_2048_bytes: Slab::new(heap_start_addr + 5 * slab_size, slab_size, 2048), 80 | slab_4096_bytes: Slab::new(heap_start_addr + 6 * slab_size, slab_size, 4096), 81 | linked_list_allocator: linked_list_allocator::Heap::new( 82 | heap_start_addr + 7 * slab_size, 83 | slab_size, 84 | ), 85 | } 86 | } 87 | 88 | /// Adds memory to the heap. The start address must be valid 89 | /// and the memory in the `[mem_start_addr, mem_start_addr + heap_size)` range must not be used for 90 | /// anything else. 91 | /// In case of linked list allocator the memory can only be extended. 92 | /// This function is unsafe because it can cause undefined behavior if the 93 | /// given address is invalid. 94 | pub unsafe fn grow(&mut self, mem_start_addr: usize, mem_size: usize, slab: HeapAllocator) { 95 | match slab { 96 | HeapAllocator::Slab64Bytes => self.slab_64_bytes.grow(mem_start_addr, mem_size), 97 | HeapAllocator::Slab128Bytes => self.slab_128_bytes.grow(mem_start_addr, mem_size), 98 | HeapAllocator::Slab256Bytes => self.slab_256_bytes.grow(mem_start_addr, mem_size), 99 | HeapAllocator::Slab512Bytes => self.slab_512_bytes.grow(mem_start_addr, mem_size), 100 | HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.grow(mem_start_addr, mem_size), 101 | HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.grow(mem_start_addr, mem_size), 102 | HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.grow(mem_start_addr, mem_size), 103 | HeapAllocator::LinkedListAllocator => self.linked_list_allocator.extend(mem_size), 104 | } 105 | } 106 | 107 | /// Allocates a chunk of the given size with the given alignment. Returns a pointer to the 108 | /// beginning of that chunk if it was successful. Else it returns `Err`. 109 | /// This function finds the slab of lowest size which can still accomodate the given chunk. 110 | /// The runtime is in `O(1)` for chunks of size <= 4096, and `O(n)` when chunk size is > 4096, 111 | pub fn allocate(&mut self, layout: Layout) -> Result, AllocErr> { 112 | match Heap::layout_to_allocator(&layout) { 113 | HeapAllocator::Slab64Bytes => self.slab_64_bytes.allocate(layout), 114 | HeapAllocator::Slab128Bytes => self.slab_128_bytes.allocate(layout), 115 | HeapAllocator::Slab256Bytes => self.slab_256_bytes.allocate(layout), 116 | HeapAllocator::Slab512Bytes => self.slab_512_bytes.allocate(layout), 117 | HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.allocate(layout), 118 | HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.allocate(layout), 119 | HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.allocate(layout), 120 | HeapAllocator::LinkedListAllocator => { 121 | self.linked_list_allocator.allocate_first_fit(layout) 122 | } 123 | } 124 | } 125 | 126 | /// Frees the given allocation. `ptr` must be a pointer returned 127 | /// by a call to the `allocate` function with identical size and alignment. Undefined 128 | /// behavior may occur for invalid arguments, thus this function is unsafe. 129 | /// 130 | /// This function finds the slab which contains address of `ptr` and adds the blocks beginning 131 | /// with `ptr` address to the list of free blocks. 132 | /// This operation is in `O(1)` for blocks <= 4096 bytes and `O(n)` for blocks > 4096 bytes. 133 | pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { 134 | match Heap::layout_to_allocator(&layout) { 135 | HeapAllocator::Slab64Bytes => self.slab_64_bytes.deallocate(ptr), 136 | HeapAllocator::Slab128Bytes => self.slab_128_bytes.deallocate(ptr), 137 | HeapAllocator::Slab256Bytes => self.slab_256_bytes.deallocate(ptr), 138 | HeapAllocator::Slab512Bytes => self.slab_512_bytes.deallocate(ptr), 139 | HeapAllocator::Slab1024Bytes => self.slab_1024_bytes.deallocate(ptr), 140 | HeapAllocator::Slab2048Bytes => self.slab_2048_bytes.deallocate(ptr), 141 | HeapAllocator::Slab4096Bytes => self.slab_4096_bytes.deallocate(ptr), 142 | HeapAllocator::LinkedListAllocator => { 143 | self.linked_list_allocator.deallocate(ptr, layout) 144 | } 145 | } 146 | } 147 | 148 | /// Returns bounds on the guaranteed usable size of a successful 149 | /// allocation created with the specified `layout`. 150 | pub fn usable_size(&self, layout: &Layout) -> (usize, usize) { 151 | match Heap::layout_to_allocator(&layout) { 152 | HeapAllocator::Slab64Bytes => (layout.size(), 64), 153 | HeapAllocator::Slab128Bytes => (layout.size(), 128), 154 | HeapAllocator::Slab256Bytes => (layout.size(), 256), 155 | HeapAllocator::Slab512Bytes => (layout.size(), 512), 156 | HeapAllocator::Slab1024Bytes => (layout.size(), 1024), 157 | HeapAllocator::Slab2048Bytes => (layout.size(), 2048), 158 | HeapAllocator::Slab4096Bytes => (layout.size(), 4096), 159 | HeapAllocator::LinkedListAllocator => (layout.size(), layout.size()), 160 | } 161 | } 162 | 163 | ///Finds allocator to use based on layout size and alignment 164 | pub fn layout_to_allocator(layout: &Layout) -> HeapAllocator { 165 | if layout.size() > 4096 { 166 | HeapAllocator::LinkedListAllocator 167 | } else if layout.size() <= 64 && layout.align() <= 64 { 168 | HeapAllocator::Slab64Bytes 169 | } else if layout.size() <= 128 && layout.align() <= 128 { 170 | HeapAllocator::Slab128Bytes 171 | } else if layout.size() <= 256 && layout.align() <= 256 { 172 | HeapAllocator::Slab256Bytes 173 | } else if layout.size() <= 512 && layout.align() <= 512 { 174 | HeapAllocator::Slab512Bytes 175 | } else if layout.size() <= 1024 && layout.align() <= 1024 { 176 | HeapAllocator::Slab1024Bytes 177 | } else if layout.size() <= 2048 && layout.align() <= 2048 { 178 | HeapAllocator::Slab2048Bytes 179 | } else { 180 | HeapAllocator::Slab4096Bytes 181 | } 182 | } 183 | } 184 | 185 | unsafe impl Alloc for Heap { 186 | unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { 187 | self.allocate(layout) 188 | } 189 | 190 | unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { 191 | self.deallocate(ptr, layout) 192 | } 193 | 194 | fn usable_size(&self, layout: &Layout) -> (usize, usize) { 195 | self.usable_size(layout) 196 | } 197 | } 198 | 199 | pub struct LockedHeap(Mutex>); 200 | 201 | impl LockedHeap { 202 | pub const fn empty() -> LockedHeap { 203 | LockedHeap(Mutex::new(None)) 204 | } 205 | 206 | pub unsafe fn init(&self, heap_start_addr: usize, size: usize) { 207 | *self.0.lock() = Some(Heap::new(heap_start_addr, size)); 208 | } 209 | 210 | /// Creates a new heap with the given `heap_start_addr` and `heap_size`. The start address must be valid 211 | /// and the memory in the `[heap_start_addr, heap_bottom + heap_size)` range must not be used for 212 | /// anything else. This function is unsafe because it can cause undefined behavior if the 213 | /// given address is invalid. 214 | pub unsafe fn new(heap_start_addr: usize, heap_size: usize) -> LockedHeap { 215 | LockedHeap(Mutex::new(Some(Heap::new(heap_start_addr, heap_size)))) 216 | } 217 | } 218 | 219 | impl Deref for LockedHeap { 220 | type Target = Mutex>; 221 | 222 | fn deref(&self) -> &Mutex> { 223 | &self.0 224 | } 225 | } 226 | 227 | unsafe impl<'a> Alloc for &'a LockedHeap { 228 | unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { 229 | if let Some(ref mut heap) = *self.0.lock() { 230 | heap.allocate(layout) 231 | } else { 232 | panic!("allocate: heap not initialized"); 233 | } 234 | } 235 | 236 | unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { 237 | if let Some(ref mut heap) = *self.0.lock() { 238 | heap.deallocate(ptr, layout) 239 | } else { 240 | panic!("deallocate: heap not initialized"); 241 | } 242 | } 243 | 244 | fn usable_size(&self, layout: &Layout) -> (usize, usize) { 245 | if let Some(ref mut heap) = *self.0.lock() { 246 | heap.usable_size(layout) 247 | } else { 248 | panic!("usable_size: heap not initialized"); 249 | } 250 | } 251 | } 252 | 253 | unsafe impl GlobalAlloc for LockedHeap { 254 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 255 | if let Some(ref mut heap) = *self.0.lock() { 256 | if let Ok(ref mut nnptr) = heap.allocate(layout) { 257 | return nnptr.as_ptr(); 258 | } else { 259 | panic!("allocate: failed"); 260 | } 261 | } else { 262 | panic!("allocate: heap not initialzied"); 263 | } 264 | } 265 | 266 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 267 | if let Some(ref mut heap) = *self.0.lock() { 268 | if let Some(p) = NonNull::new(ptr) { 269 | heap.deallocate(p, layout) 270 | } 271 | } else { 272 | panic!("deallocate: heap not initialized"); 273 | } 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /src/slab.rs: -------------------------------------------------------------------------------- 1 | use alloc::alloc::{AllocErr, Layout}; 2 | use core::ptr::NonNull; 3 | 4 | pub struct Slab { 5 | block_size: usize, 6 | free_block_list: FreeBlockList, 7 | } 8 | 9 | impl Slab { 10 | pub unsafe fn new(start_addr: usize, slab_size: usize, block_size: usize) -> Slab { 11 | let num_of_blocks = slab_size / block_size; 12 | Slab { 13 | block_size, 14 | free_block_list: FreeBlockList::new(start_addr, block_size, num_of_blocks), 15 | } 16 | } 17 | 18 | pub unsafe fn grow(&mut self, start_addr: usize, slab_size: usize) { 19 | let num_of_blocks = slab_size / self.block_size; 20 | let mut block_list = FreeBlockList::new(start_addr, self.block_size, num_of_blocks); 21 | while let Some(block) = block_list.pop() { 22 | self.free_block_list.push(block); 23 | } 24 | } 25 | 26 | pub fn allocate(&mut self, _layout: Layout) -> Result, AllocErr> { 27 | match self.free_block_list.pop() { 28 | Some(block) => Ok(unsafe { NonNull::new_unchecked(block.addr() as *mut u8) }), 29 | None => Err(AllocErr), 30 | } 31 | } 32 | 33 | /// Safety: ptr must have been previously allocated by self. 34 | pub unsafe fn deallocate(&mut self, ptr: NonNull) { 35 | // Since ptr was allocated by self, its alignment must be at least 36 | // the alignment of FreeBlock. Casting a less aligned pointer to 37 | // &mut FreeBlock would be undefined behavior. 38 | #[cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))] 39 | let ptr = ptr.as_ptr() as *mut FreeBlock; 40 | self.free_block_list.push(&mut *ptr); 41 | } 42 | } 43 | 44 | struct FreeBlockList { 45 | len: usize, 46 | head: Option<&'static mut FreeBlock>, 47 | } 48 | 49 | impl FreeBlockList { 50 | unsafe fn new(start_addr: usize, block_size: usize, num_of_blocks: usize) -> FreeBlockList { 51 | let mut new_list = FreeBlockList { len: 0, head: None }; 52 | for i in (0..num_of_blocks).rev() { 53 | let new_block = (start_addr + i * block_size) as *mut FreeBlock; 54 | new_list.push(&mut *new_block); 55 | } 56 | new_list 57 | } 58 | 59 | fn pop(&mut self) -> Option<&'static mut FreeBlock> { 60 | self.head.take().map(|node| { 61 | self.head = node.next.take(); 62 | self.len -= 1; 63 | node 64 | }) 65 | } 66 | 67 | fn push(&mut self, free_block: &'static mut FreeBlock) { 68 | free_block.next = self.head.take(); 69 | self.len += 1; 70 | self.head = Some(free_block); 71 | } 72 | } 73 | 74 | impl Drop for FreeBlockList { 75 | fn drop(&mut self) { 76 | while let Some(_) = self.pop() {} 77 | } 78 | } 79 | 80 | struct FreeBlock { 81 | next: Option<&'static mut FreeBlock>, 82 | } 83 | 84 | impl FreeBlock { 85 | fn addr(&self) -> usize { 86 | self as *const _ as usize 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/test.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use alloc::alloc::Layout; 3 | use core::mem::{align_of, size_of}; 4 | 5 | const HEAP_SIZE: usize = 8 * 4096; 6 | const BIG_HEAP_SIZE: usize = HEAP_SIZE * 10; 7 | 8 | #[repr(align(4096))] 9 | struct TestHeap { 10 | heap_space: [u8; HEAP_SIZE], 11 | } 12 | 13 | #[repr(align(4096))] 14 | struct TestBigHeap { 15 | heap_space: [u8; BIG_HEAP_SIZE], 16 | } 17 | 18 | fn new_heap() -> Heap { 19 | let test_heap = TestHeap { 20 | heap_space: [0u8; HEAP_SIZE], 21 | }; 22 | let heap = unsafe { Heap::new(&test_heap.heap_space[0] as *const u8 as usize, HEAP_SIZE) }; 23 | heap 24 | } 25 | 26 | fn new_locked_heap() -> LockedHeap { 27 | let test_heap = TestHeap { 28 | heap_space: [0u8; HEAP_SIZE], 29 | }; 30 | let locked_heap = LockedHeap::empty(); 31 | unsafe { 32 | locked_heap.init(&test_heap.heap_space[0] as *const u8 as usize, HEAP_SIZE); 33 | } 34 | locked_heap 35 | } 36 | 37 | fn new_big_heap() -> Heap { 38 | let test_heap = TestBigHeap { 39 | heap_space: [0u8; BIG_HEAP_SIZE], 40 | }; 41 | let heap = unsafe { 42 | Heap::new( 43 | &test_heap.heap_space[0] as *const u8 as usize, 44 | BIG_HEAP_SIZE, 45 | ) 46 | }; 47 | heap 48 | } 49 | 50 | #[test] 51 | fn oom() { 52 | let mut heap = new_heap(); 53 | let layout = Layout::from_size_align(HEAP_SIZE + 1, align_of::()); 54 | let addr = heap.allocate(layout.unwrap()); 55 | assert!(addr.is_err()); 56 | } 57 | 58 | #[test] 59 | fn allocate_double_usize() { 60 | let mut heap = new_heap(); 61 | let size = size_of::() * 2; 62 | let layout = Layout::from_size_align(size, align_of::()); 63 | let addr = heap.allocate(layout.unwrap()); 64 | assert!(addr.is_ok()); 65 | } 66 | 67 | #[test] 68 | fn allocate_and_free_double_usize() { 69 | let mut heap = new_heap(); 70 | let layout = Layout::from_size_align(size_of::() * 2, align_of::()).unwrap(); 71 | let addr = heap.allocate(layout.clone()); 72 | assert!(addr.is_ok()); 73 | let addr = addr.unwrap(); 74 | unsafe { 75 | let pair_addr = addr.as_ptr() as *mut (usize, usize); 76 | *pair_addr = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); 77 | heap.deallocate(addr, layout.clone()); 78 | } 79 | } 80 | 81 | #[test] 82 | fn reallocate_double_usize() { 83 | let mut heap = new_heap(); 84 | 85 | let layout = Layout::from_size_align(size_of::() * 2, align_of::()).unwrap(); 86 | 87 | let x = heap.allocate(layout.clone()).unwrap(); 88 | unsafe { 89 | heap.deallocate(x, layout.clone()); 90 | } 91 | 92 | let y = heap.allocate(layout.clone()).unwrap(); 93 | unsafe { 94 | heap.deallocate(y, layout.clone()); 95 | } 96 | 97 | assert_eq!(x, y); 98 | } 99 | 100 | #[test] 101 | fn allocate_multiple_sizes() { 102 | let mut heap = new_heap(); 103 | let base_size = size_of::(); 104 | let base_align = align_of::(); 105 | 106 | let layout_1 = Layout::from_size_align(base_size * 2, base_align).unwrap(); 107 | let layout_2 = Layout::from_size_align(base_size * 3, base_align).unwrap(); 108 | let layout_3 = Layout::from_size_align(base_size * 3, base_align * 8).unwrap(); 109 | let layout_4 = Layout::from_size_align(base_size * 10, base_align).unwrap(); 110 | 111 | let x = heap.allocate(layout_1.clone()).unwrap(); 112 | let y = heap.allocate(layout_2.clone()).unwrap(); 113 | assert_eq!(unsafe { x.as_ptr().offset(64) }, y.as_ptr()); 114 | let z = heap.allocate(layout_3.clone()).unwrap(); 115 | assert_eq!(z.as_ptr() as usize % (base_size * 8), 0); 116 | 117 | unsafe { 118 | heap.deallocate(x, layout_1.clone()); 119 | } 120 | 121 | let a = heap.allocate(layout_4.clone()).unwrap(); 122 | let b = heap.allocate(layout_1.clone()).unwrap(); 123 | assert_eq!(a.as_ptr(), unsafe { x.as_ptr().offset(4096) }); 124 | assert_eq!(x, b); 125 | 126 | unsafe { 127 | heap.deallocate(y, layout_2); 128 | heap.deallocate(z, layout_3); 129 | heap.deallocate(a, layout_4); 130 | heap.deallocate(b, layout_1); 131 | } 132 | } 133 | 134 | #[test] 135 | fn locked_heap_allocate_multiple_sizes() { 136 | let heap = new_locked_heap(); 137 | let base_size = size_of::(); 138 | let base_align = align_of::(); 139 | 140 | let layout_1 = Layout::from_size_align(base_size * 2, base_align).unwrap(); 141 | let layout_2 = Layout::from_size_align(base_size * 3, base_align).unwrap(); 142 | let layout_3 = Layout::from_size_align(base_size * 3, base_align * 8).unwrap(); 143 | let layout_4 = Layout::from_size_align(base_size * 10, base_align).unwrap(); 144 | 145 | let x = unsafe { heap.alloc(layout_1.clone()) }; 146 | let y = unsafe { heap.alloc(layout_2.clone()) }; 147 | assert_eq!(unsafe { x.offset(64) }, y); 148 | let z = unsafe { heap.alloc(layout_3.clone()) }; 149 | assert_eq!(z as usize % (base_size * 8), 0); 150 | 151 | unsafe { 152 | heap.dealloc(x, layout_1.clone()); 153 | } 154 | 155 | let a = unsafe { heap.alloc(layout_4.clone()) }; 156 | let b = unsafe { heap.alloc(layout_1.clone()) }; 157 | assert_eq!(a, unsafe { x.offset(4096) }); 158 | assert_eq!(x, b); 159 | 160 | unsafe { 161 | heap.dealloc(y, layout_2); 162 | heap.dealloc(z, layout_3); 163 | heap.dealloc(a, layout_4); 164 | heap.dealloc(b, layout_1); 165 | } 166 | } 167 | 168 | #[test] 169 | fn allocate_one_4096_block() { 170 | let mut heap = new_big_heap(); 171 | let base_size = size_of::(); 172 | let base_align = align_of::(); 173 | 174 | let layout = Layout::from_size_align(base_size * 512, base_align).unwrap(); 175 | 176 | let x = heap.allocate(layout.clone()).unwrap(); 177 | 178 | unsafe { 179 | heap.deallocate(x, layout.clone()); 180 | } 181 | } 182 | 183 | #[test] 184 | fn allocate_multiple_4096_blocks() { 185 | let mut heap = new_big_heap(); 186 | let base_size = size_of::(); 187 | let base_align = align_of::(); 188 | 189 | let layout = Layout::from_size_align(base_size * 512, base_align).unwrap(); 190 | let layout_2 = Layout::from_size_align(base_size * 1024, base_align).unwrap(); 191 | 192 | let x = heap.allocate(layout.clone()).unwrap(); 193 | let y = heap.allocate(layout.clone()).unwrap(); 194 | let z = heap.allocate(layout.clone()).unwrap(); 195 | 196 | unsafe { 197 | heap.deallocate(y, layout.clone()); 198 | } 199 | 200 | let a = heap.allocate(layout.clone()).unwrap(); 201 | let b = heap.allocate(layout.clone()).unwrap(); 202 | assert_eq!(unsafe { x.as_ptr().offset(4096) }, a.as_ptr()); 203 | 204 | unsafe { 205 | heap.deallocate(a, layout.clone()); 206 | heap.deallocate(z, layout.clone()); 207 | } 208 | let c = heap.allocate(layout_2.clone()).unwrap(); 209 | let d = heap.allocate(layout.clone()).unwrap(); 210 | unsafe { 211 | *(c.as_ptr() as *mut (u64, u64)) = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); 212 | } 213 | assert_eq!(unsafe { a.as_ptr().offset(9 * 4096) }, c.as_ptr()); 214 | assert_eq!(unsafe { b.as_ptr().offset(-4096) }, d.as_ptr()); 215 | } 216 | 217 | #[test] 218 | fn allocate_one_8192_block() { 219 | let mut heap = new_big_heap(); 220 | let base_size = size_of::(); 221 | let base_align = align_of::(); 222 | 223 | let layout = Layout::from_size_align(base_size * 1024, base_align).unwrap(); 224 | 225 | let x = heap.allocate(layout.clone()).unwrap(); 226 | 227 | unsafe { 228 | heap.deallocate(x, layout.clone()); 229 | } 230 | } --------------------------------------------------------------------------------