├── .gitignore ├── Cargo.toml ├── benches ├── bitmap.rs └── rb_tree.rs ├── src ├── lib.rs ├── main.rs ├── buddy_allocator_bitmap.rs ├── buddy_allocator_tree.rs └── buddy_allocator_lists.rs ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea/ 5 | **/*.iml 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buddy_allocator_workshop" 3 | version = "0.1.0" 4 | authors = ["restioson "] 5 | 6 | [dependencies] 7 | static_assertions = "0.2.5" 8 | structopt = "0.2.5" 9 | array-init = "0.0.2" 10 | failure = "0.1.1" 11 | intrusive-collections = "0.7.0" 12 | bit_field = "0.9.0" 13 | flame = { version = "0.2.0", optional = true } 14 | flamer = { version = "^0.2.0", optional = true } 15 | 16 | [features] 17 | default = [] 18 | flame_profile = ["flame", "flamer"] 19 | 20 | [dev-dependencies] 21 | criterion = "0.2" 22 | 23 | [[bench]] 24 | name = "rb_tree" 25 | harness = false 26 | 27 | [[bench]] 28 | name = "bitmap" 29 | harness = false 30 | 31 | [profile.release] 32 | debug = true -------------------------------------------------------------------------------- /benches/bitmap.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | extern crate array_init; 4 | extern crate buddy_allocator_workshop; 5 | 6 | use criterion::Criterion; 7 | 8 | fn bitmap(c: &mut Criterion) { 9 | use buddy_allocator_workshop::buddy_allocator_bitmap::*; 10 | use buddy_allocator_workshop::{MAX_ORDER, BASE_ORDER}; 11 | 12 | let mut tree = Tree::new(); 13 | 14 | c.bench_function("bitmap allocate_exact", move |b| { 15 | b.iter(|| { 16 | match tree.alloc_exact(0) { 17 | Some(_) => (), 18 | None => { 19 | tree = Tree::new(); 20 | tree.alloc_exact(0); 21 | }, 22 | }; 23 | }); 24 | }); 25 | } 26 | 27 | criterion_group!(benches, bitmap); 28 | criterion_main!(benches); 29 | -------------------------------------------------------------------------------- /benches/rb_tree.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | extern crate buddy_allocator_workshop; 4 | 5 | use criterion::Criterion; 6 | 7 | fn rb_tree_vecs(c: &mut Criterion) { 8 | use buddy_allocator_workshop::buddy_allocator_tree::*; 9 | use buddy_allocator_workshop::{MAX_ORDER, BASE_ORDER}; 10 | 11 | let mut allocator = BuddyAllocator::>::new(); 12 | allocator.create_top_level(0); 13 | allocator.create_top_level(2usize.pow((BASE_ORDER + MAX_ORDER) as u32)); 14 | 15 | let mut blocks_created_top_level = 1; 16 | 17 | c.bench_function("rb_tree_vecs allocate_exact", move |b| { 18 | b.iter(|| { 19 | match allocator.allocate_exact(0) { 20 | Ok(_) => (), 21 | Err(BlockAllocateError::NoBlocksAvailable) => { 22 | let size_of_block = 2usize.pow((BASE_ORDER + MAX_ORDER) as u32); 23 | allocator.create_top_level(size_of_block * blocks_created_top_level); 24 | blocks_created_top_level += 1; 25 | } 26 | Err(e) => panic!("Error: {:?}", e), 27 | }; 28 | }); 29 | }); 30 | } 31 | 32 | criterion_group!(benches, rb_tree_vecs); 33 | criterion_main!(benches); 34 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(const_fn)] 2 | #![feature(nll)] 3 | #![feature(slice_patterns)] 4 | #![feature(duration_extras)] 5 | #![feature(arbitrary_self_types)] 6 | #![feature(test)] 7 | #![feature(integer_atomics)] 8 | #![feature(box_syntax)] 9 | #![feature(custom_attribute)] 10 | #![feature(plugin)] 11 | #![cfg_attr(feature = "flame_profile", plugin(flamer))] 12 | 13 | extern crate array_init; 14 | extern crate test; 15 | #[macro_use] 16 | extern crate static_assertions; 17 | #[macro_use] 18 | extern crate intrusive_collections; 19 | extern crate bit_field; 20 | #[cfg(feature = "flame_profile")] 21 | extern crate flame; 22 | 23 | pub mod buddy_allocator_bitmap; 24 | pub mod buddy_allocator_lists; 25 | pub mod buddy_allocator_tree; 26 | 27 | /// Number of orders. **This constant is OK to modify for configuration.** 28 | pub const LEVEL_COUNT: u8 = 19; 29 | /// The maximum order. **This constant is not Ok to modify for configuration.** 30 | pub const MAX_ORDER: u8 = LEVEL_COUNT - 1; 31 | /// The minimum order. All orders are in context of this -- i.e the size of a block of order `k` is 32 | /// `2^(k + MIN_ORDER)`, not `2^k`. **This constant is OK to modify for configuration.** 33 | /// 34 | /// # Note 35 | /// 36 | /// **NB: Must be greater than log base 2 of 4096.** This is so that 4kib pages can always be 37 | /// allocated, regardless of min order. 38 | pub const BASE_ORDER: u8 = 12; 39 | const_assert!(__min_order_less_or_eq_than_4kib; BASE_ORDER <= 12); 40 | /// The size as a power of two of the maximum order. 41 | pub const MAX_ORDER_SIZE: u8 = BASE_ORDER + MAX_ORDER; 42 | 43 | trait PhysicalAllocator { 44 | fn alloc(&mut self, size: PageSize) -> *const u8; 45 | fn dealloc(&mut self, addr: *const u8); 46 | } 47 | 48 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 49 | pub enum PageSize { 50 | Kib4, 51 | Mib2, 52 | Gib1, 53 | } 54 | 55 | impl PageSize { 56 | pub fn power_of_two(self) -> u8 { 57 | use self::PageSize::*; 58 | match self { 59 | Kib4 => 12, 60 | Mib2 => 21, 61 | Gib1 => 30, 62 | } 63 | } 64 | } 65 | 66 | pub fn top_level_blocks(blocks: u32, block_size: u8) -> u64 { 67 | let a = 2f64.powi(i32::from(block_size + BASE_ORDER)) * f64::from(blocks) 68 | / 2f64.powi(i32::from(MAX_ORDER + BASE_ORDER)); 69 | 70 | a.ceil() as u64 71 | } 72 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(custom_attribute)] 2 | #![feature(plugin)] 3 | #![feature(nll)] 4 | #![cfg_attr(feature = "flame_profile", plugin(flamer))] 5 | #![allow(unused_attributes)] 6 | 7 | extern crate buddy_allocator_workshop; 8 | 9 | #[macro_use] 10 | extern crate structopt; 11 | #[cfg(feature = "flame_profile")] 12 | extern crate flame; 13 | #[macro_use] 14 | extern crate failure; 15 | 16 | use buddy_allocator_workshop::*; 17 | use failure::Fail; 18 | use structopt::StructOpt; 19 | use std::time::Duration; 20 | 21 | const DEFAULT_DEMOS: &[&str] = &[ 22 | "vecs", 23 | "linked_lists", 24 | "rb_tree_vecs", 25 | "rb_tree_linked_lists", 26 | "bitmap", 27 | ]; 28 | 29 | #[derive(StructOpt, Debug)] 30 | #[structopt(name = "buddy_allocator_workshop")] 31 | struct Options { 32 | /// Print the addresses of blocks as they are allocated. This will slow down performance, and as 33 | /// such should not be used for benchmarking. 34 | #[structopt(short = "p", long = "print-addresses")] 35 | print_addresses: bool, 36 | /// Which demos to run. Defaults to all demos. Accepted values: `vecs`, `linked_lists`, 37 | /// `rb_tree_vecs`, `rb_tree_linked_lists`. 38 | #[structopt(short = "d", long = "demos")] 39 | demos: Vec, 40 | /// How many blocks to demo allocate. Defaults to 100 000 41 | #[structopt(short = "b", long = "blocks")] 42 | blocks: Option, 43 | /// The order of the blocks to allocate. Defaults to `0`, which is `2^MIN_ORDER` bytes. Must not 44 | /// be greater than `MAX_ORDER`. 45 | #[structopt(short = "o", long = "order")] 46 | order: Option, 47 | } 48 | 49 | #[derive(Debug, Fail)] 50 | enum DemosError { 51 | #[fail(display = "Unknown demo \"{}\"", name)] 52 | UnknownDemo { name: String }, 53 | #[fail(display = "Order {} too large, max is {}", order, max_order)] 54 | OrderTooLarge { 55 | order: u8, 56 | /// Must be equal to [MAX_ORDER]. Required as a field due to a limitation in fail. 57 | max_order: u8, 58 | }, 59 | } 60 | 61 | fn main() { 62 | let Options { 63 | print_addresses, 64 | demos, 65 | blocks, 66 | order, 67 | } = Options::from_args(); 68 | 69 | let demos = if demos.is_empty() { 70 | DEFAULT_DEMOS.iter().map(|s| s.to_string()).collect() 71 | } else { 72 | demos 73 | }; 74 | 75 | let (blocks, order) = ( 76 | blocks.unwrap_or(100_000), 77 | order.unwrap_or(PageSize::Kib4.power_of_two() - BASE_ORDER), 78 | ); 79 | 80 | if order > MAX_ORDER { 81 | raise(DemosError::OrderTooLarge { 82 | order, 83 | max_order: MAX_ORDER, 84 | }); 85 | } 86 | 87 | demos 88 | .into_iter() 89 | .map(|name| { 90 | ( 91 | match &*name { 92 | "linked_lists" => buddy_allocator_lists::demo_linked_lists, 93 | "vecs" => buddy_allocator_lists::demo_vecs, 94 | "rb_tree_vecs" => buddy_allocator_tree::demo_vecs, 95 | "rb_tree_linked_lists" => buddy_allocator_tree::demo_linked_lists, 96 | "bitmap" => buddy_allocator_bitmap::demo, 97 | _ => Err(DemosError::UnknownDemo { name: name.to_string() }).raise(), 98 | }, 99 | name 100 | ) 101 | }) 102 | .collect::>() // Force detect unknown demos ASAP 103 | .into_iter() 104 | .for_each(|(demo, name)| { 105 | run_demo(demo, print_addresses, blocks, order, name) 106 | }); 107 | 108 | flame_dump(); 109 | } 110 | 111 | trait ResultExt { 112 | fn raise(self) -> T; 113 | } 114 | 115 | impl ResultExt for Result { 116 | fn raise(self) -> T { 117 | match self { 118 | Ok(ok) => ok, 119 | Err(err) => raise(err), 120 | } 121 | } 122 | } 123 | 124 | fn raise(failure: F) -> ! { 125 | println!("error: {}", failure); 126 | std::process::exit(1) 127 | } 128 | 129 | fn run_demo(demo: fn(bool, u32, u8) -> Duration, print_addresses: bool, blocks: u32, order: u8, name: String) { 130 | const NANOS_PER_SEC: f64 = 1_000_000_000.0; // Taken from std::time::Duration because las 131 | const RUN_COUNT: usize = 1; 132 | 133 | println!("Running {} demo...", name); 134 | 135 | let mut durations = Vec::with_capacity(RUN_COUNT); 136 | for _ in 0..RUN_COUNT { 137 | durations.push(demo(print_addresses, blocks, order)); 138 | } 139 | 140 | let times_sum: Duration = durations.into_iter().sum(); 141 | 142 | println!( 143 | "Finished {} demo in {}s", 144 | name.replace('_', " "), 145 | (times_sum.as_secs() as f64 + f64::from(times_sum.subsec_nanos()) / NANOS_PER_SEC) / RUN_COUNT as f64, 146 | ); 147 | } 148 | 149 | #[cfg(feature = "flame_profile")] 150 | fn flame_dump() { 151 | use std::fs::File; 152 | flame::dump_html(&mut File::create("flame-graph.html").unwrap()).unwrap(); 153 | } 154 | 155 | #[cfg(not(feature = "flame_profile"))] 156 | fn flame_dump() {} 157 | -------------------------------------------------------------------------------- /src/buddy_allocator_bitmap.rs: -------------------------------------------------------------------------------- 1 | ///! A modified buddy bitmap allocator 2 | use std::cmp; 3 | use std::mem; 4 | use std::time::{Duration, Instant}; 5 | use super::{BASE_ORDER, LEVEL_COUNT, MAX_ORDER, MAX_ORDER_SIZE}; 6 | 7 | /// A block in the bitmap 8 | struct Block { 9 | /// The order of the biggest block under this block - 1. 0 denotes used 10 | order_free: u8, 11 | } 12 | 13 | // TODO move lock to tree itself 14 | impl Block { 15 | pub fn new_free(order: u8) -> Self { 16 | Block { 17 | order_free: order + 1, 18 | } 19 | } 20 | } 21 | 22 | /// A tree of blocks. Contains the flat representation of the tree as a flat array 23 | // TODO i might have a *few* cache misses here, eh? 24 | pub struct Tree { 25 | /// Flat array representation of tree. Used with the help of the `flat_tree` crate. 26 | flat_blocks: Box<[Block; Tree::blocks_in_tree(LEVEL_COUNT)]>, 27 | } 28 | 29 | impl Tree { 30 | const fn blocks_in_tree(levels: u8) -> usize { 31 | ((1 << levels) - 1) as usize 32 | } 33 | 34 | pub fn new() -> Tree { 35 | const BLOCKS_IN_TREE: usize = Tree::blocks_in_tree(LEVEL_COUNT); 36 | let mut flat_blocks: Box<[Block; BLOCKS_IN_TREE]> = box unsafe { mem::uninitialized() }; 37 | 38 | let mut start: usize = 0; 39 | for level in 0..LEVEL_COUNT { 40 | let order = MAX_ORDER - level; 41 | let size = 1 << (level as usize); 42 | for block in start..(start + size) { 43 | flat_blocks[block] = Block::new_free(order); 44 | } 45 | start += size; 46 | } 47 | 48 | Tree { flat_blocks } 49 | } 50 | 51 | pub const fn blocks_in_level(order: u8) -> usize { 52 | (1 << (BASE_ORDER + order) as usize) / (1 << (BASE_ORDER as usize)) 53 | } 54 | 55 | #[inline] 56 | unsafe fn block_mut(&mut self, index: usize) -> &mut Block { 57 | debug_assert!(index < Tree::blocks_in_tree(LEVEL_COUNT)); 58 | self.flat_blocks.get_unchecked_mut(index) 59 | } 60 | 61 | #[inline] 62 | unsafe fn block(&self, index: usize) -> &Block { 63 | debug_assert!(index < Tree::blocks_in_tree(LEVEL_COUNT)); 64 | self.flat_blocks.get_unchecked(index) 65 | } 66 | 67 | pub fn alloc_exact(&mut self, desired_order: u8) -> Option<*const u8> { 68 | let root = unsafe { self.block_mut(0) }; 69 | 70 | // If the root node has no orders free, or if it does not have the desired order free 71 | if root.order_free == 0 || (root.order_free - 1) < desired_order { 72 | return None; 73 | } 74 | 75 | let mut addr: u32 = 0; 76 | let mut node_index = 1; 77 | 78 | let max_level = MAX_ORDER - desired_order; 79 | 80 | for level in 0..max_level { 81 | let left_child_index = flat_tree::left_child(node_index); 82 | let left_child = unsafe { self.block(left_child_index - 1) }; 83 | 84 | let o = left_child.order_free; 85 | // If the child is not used (o!=0) or (desired_order in o-1) 86 | // Due to the +1 offset, we need to subtract 1 from 0: 87 | // However, (o - 1) >= desired_order can be simplified to o > desired_order 88 | node_index = if o != 0 && o > desired_order { 89 | left_child_index 90 | } else { 91 | // Move over to the right: if the parent had a free order and the left didn't, the right must, or the parent is invalid and does not uphold invariants 92 | // Since the address is moving from the left hand side, we need to increase it 93 | // Block size in bytes = 2^(BASE_ORDER + order) 94 | // We also only want to allocate on the order of the child, hence subtracting 1 95 | addr += 1 << ((MAX_ORDER_SIZE - level - 1) as u32); 96 | left_child_index + 1 97 | }; 98 | } 99 | 100 | let block = unsafe { self.block_mut(node_index - 1) }; 101 | block.order_free = 0; 102 | 103 | // Iterate upwards and set parents accordingly 104 | for _ in 0..max_level { 105 | // Treat as right index because we need to be 0 indexed here! 106 | // If we exclude the last bit, we'll always get an even number (the left node while 1 indexed) 107 | let right_index = node_index & !1; 108 | node_index = flat_tree::parent(node_index); 109 | 110 | let left = unsafe { self.block(right_index - 1) }.order_free; 111 | let right = unsafe { self.block(right_index) }.order_free; 112 | 113 | unsafe { self.block_mut(node_index - 1) }.order_free = cmp::max(left, right); 114 | } 115 | 116 | Some(addr as *const u8) 117 | } 118 | } 119 | 120 | /// Flat tree things. 121 | /// 122 | /// # Note 123 | /// **1 INDEXED!** 124 | mod flat_tree { 125 | #[inline] 126 | pub fn left_child(index: usize) -> usize { 127 | index << 1 128 | } 129 | 130 | #[inline] 131 | pub fn parent(index: usize) -> usize { 132 | index >> 1 133 | } 134 | } 135 | 136 | pub fn demo(print_addresses: bool, blocks: u32, order: u8) -> Duration { 137 | let num_trees = ((blocks as f32) / (Tree::blocks_in_level(MAX_ORDER - order) as f32)).ceil() as usize; 138 | 139 | let mut trees = Vec::with_capacity(num_trees); 140 | for _ in 0..num_trees { 141 | trees.push(Tree::new()); 142 | } 143 | 144 | let start = Instant::now(); 145 | let mut current_tree = 0; 146 | 147 | for _ in 0..blocks { 148 | let addr = match trees[current_tree].alloc_exact(order) { 149 | Some(addr) => addr, 150 | None => { 151 | current_tree += 1; 152 | trees[current_tree].alloc_exact(order).unwrap() 153 | } 154 | }; 155 | 156 | if print_addresses { 157 | println!("Address: {:#x}", addr as usize); 158 | } 159 | } 160 | 161 | start.elapsed() 162 | } 163 | 164 | #[cfg(test)] 165 | mod test { 166 | use std::collections::BTreeSet; 167 | use super::*; 168 | 169 | #[test] 170 | fn test_flat_tree_fns() { 171 | use super::flat_tree::*; 172 | // 1 173 | // 2 3 174 | // 4 5 6 7 175 | assert_eq!(left_child(1), 2); 176 | assert_eq!(parent(2), 1); 177 | } 178 | 179 | #[test] 180 | fn test_blocks_in_tree() { 181 | assert_eq!(Tree::blocks_in_tree(3), 1 + 2 + 4); 182 | assert_eq!(Tree::blocks_in_tree(1), 1); 183 | } 184 | 185 | #[test] 186 | fn test_tree_runs_out_of_blocks() { 187 | let mut tree = Tree::new(); 188 | let max_blocks = Tree::blocks_in_level(MAX_ORDER); 189 | for _ in 0..max_blocks { 190 | assert_ne!(tree.alloc_exact(0), None); 191 | } 192 | 193 | assert_eq!(tree.alloc_exact(0), None); 194 | } 195 | 196 | #[test] 197 | fn test_init_tree() { 198 | let tree = Tree::new(); 199 | 200 | // Highest level has 1 block, next has 2, next 4 201 | assert_eq!(tree.flat_blocks[0].order_free, 19); 202 | 203 | assert_eq!(tree.flat_blocks[1].order_free, 18); 204 | assert_eq!(tree.flat_blocks[2].order_free, 18); 205 | 206 | assert_eq!(tree.flat_blocks[3].order_free, 17); 207 | assert_eq!(tree.flat_blocks[4].order_free, 17); 208 | assert_eq!(tree.flat_blocks[5].order_free, 17); 209 | assert_eq!(tree.flat_blocks[6].order_free, 17); 210 | } 211 | 212 | #[test] 213 | fn test_alloc_exact() { 214 | let mut tree = Tree::new(); 215 | tree.alloc_exact(3).unwrap(); 216 | 217 | tree = Tree::new(); 218 | assert_eq!(tree.alloc_exact(MAX_ORDER - 1), Some(0x0 as *const u8)); 219 | assert_eq!( 220 | tree.alloc_exact(MAX_ORDER - 1), 221 | Some((2usize.pow(MAX_ORDER_SIZE as u32) / 2) as *const u8) 222 | ); 223 | assert_eq!(tree.alloc_exact(0), None); 224 | assert_eq!(tree.alloc_exact(MAX_ORDER - 1), None); 225 | 226 | tree = Tree::new(); 227 | assert_eq!(tree.alloc_exact(MAX_ORDER), Some(0x0 as *const u8)); 228 | assert_eq!(tree.alloc_exact(MAX_ORDER), None); 229 | } 230 | 231 | #[test] 232 | fn test_alloc_unique_addresses() { 233 | let max_blocks = Tree::blocks_in_level(MAX_ORDER); 234 | let mut seen = BTreeSet::new(); 235 | let mut tree = Tree::new(); 236 | 237 | for _ in 0..max_blocks { 238 | let addr = tree.alloc_exact(0).unwrap(); 239 | 240 | if seen.contains(&addr) { 241 | panic!("Allocator must return addresses never been allocated before!"); 242 | } else { 243 | seen.insert(addr); 244 | } 245 | } 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Buddy Allocator Workshop 2 | 3 | This repository contains some small example implementations of 4 | [buddy allocators][buddy memory allocation]. They are designed to 5 | allocate physical memory, although they could be used for other types of 6 | allocation, such as the heap. Eventually, the best performing one will 7 | be merged into [flower][flower]. 8 | 9 | # Getting Started 10 | 11 | First, clone the repo. Then, `cd` into it and do `cargo +nightly run` to 12 | run all the demo allocators. By default, the block size is 4kib and the 13 | amount of blocks is 100 000, so this may take a while for the linked 14 | lists example. Don't worry, it won't actually allocate anything -- only 15 | mock memory blocks. Pass `-h` or `--help` to get help and view the 16 | usage. You can edit the source code to change min/max block sizes, etc. 17 | To run the unit tests, run `cargo test`. Unfortunately there are no 18 | cargo benchmarks yet, but I have benchmarked it rather unscientifically 19 | on my Windows machine. 20 | 21 | # Implementations 22 | 23 | ## Benchmark 24 | 25 | I tested the algorithms by timing various implementations using the 26 | builtin reporting, allocating a gibibyte in 4kib blocks (with printing 27 | off) on my Windows machine. If you have any other benchmarks to add, 28 | please see [Contributing][contributing section]. 29 | 30 | ### Specifications 31 | 32 | ![Computer Specifications][specs] 33 | 34 | (MSI CX61-2QF) 35 | 36 | ### Table 37 | 38 | | Implementation | Time | Throughput | 39 | |-------------------------------|--------|-----------------| 40 | | Lists - Vectors | 2 min | ~8.33e-3 GiB/s | 41 | | Lists - Doubly Linked Lists | 25min | ~6.66e-4 GiB/s | 42 | | RB Trees - Vectors | ~0.3s | ~3.33 GiB/s | 43 | | RB Trees - Singly Linked Lists| ~0.5s | ~2 GiB/s | 44 | | Bitmap Tree | ~0.07s | ~14.28 GiB/s | 45 | 46 | **Note:** The throughput is extrapolated from the time it took to 47 | allocate 1 GiB in 4kib blocks. For implementations that have complexity 48 | \>O(log n) (such as the naive list based implementation), this will not 49 | be accurate -- the throughput will slow down as more blocks are 50 | allocated. This should be accurate for ones that have a complexity 51 | of O(log n) or less though. 52 | 53 | ## Naive List Based Implementation 54 | 55 | This implementation keeps a list per order of block. It is generic over 56 | the typeof lists used. I decided to use two kinds of lists: vectors 57 | (`Vec` from `std`), and doubly linked lists (`LinkedList`, also from 58 | `std`). Linked lists are often prized for their predictable push time 59 | (no reallocation necessary for pushing), while vectors have better cache 60 | locality as the elements are allocated in a contiguous memory block. I 61 | used doubly linked lists because they are faster for indexing than 62 | singly linked lists, as they can iterate from the back or front 63 | depending on whether the index is closer to the beginning or end of the 64 | list. I decided to test both to see which would perform better overall. 65 | 66 | The implementation is recursive. To allocate a free block of order *k*, 67 | it first searches for any free blocks in the list of order *k* blocks. 68 | It does not keep a free list. If none are found, it recurses by trying 69 | to allocating a block of order *k* + 1. Finally, if at no point were any 70 | free blocks found it gives up and panics. As soon as one is it splits it 71 | in half, removing the original block from it's order list and pushing 72 | the halves to the order list immediately lower. It then returns the 73 | order and index of the first block in its order list. You can find this 74 | algorithm in [`find_or_split`][find_or_split lists]. 75 | 76 | 77 | ### Vectors 78 | A quick, un-scientific benchmark on my Windows machine says that it took 79 | around two minutes to allocate a full gibibyte (1024^3 bytes). I did 80 | notice split second pauses every now and again when it had to reallocate 81 | the entire vector to push an element. 82 | 83 | ### `std`'s Doubly Linked Lists 84 | 85 | A similar benchmark says that it took **twenty-five** minutes to 86 | allocate a full gibibyte. This is **over twelve times slower** than 87 | the same implementation with vectors. However, this implementation 88 | wasn't optimised for linked lists, so it is slightly unfair. Unlike the 89 | implementation with vectors, I did not notice any pauses, but allocation 90 | gradually got slower and slower. 91 | 92 | ---- 93 | 94 | We can conclude that although doubly linked lists *in theory* are faster 95 | at pushing than vectors are, they were still 12 times slower than 96 | vectors. This could be because the implementation was slightly in favour 97 | of vectors (lots of indexing), or because the vectors had a higher cache 98 | locality and therefore experienced less cache misses, while linked lists 99 | experience high cache misses as they have individually heap-allocated 100 | elements. 101 | 102 | ## Red-Black Tree 103 | 104 | This implementation keeps one red-black tree (from 105 | `intrusive_collections`) for all blocks and a free list for each order. 106 | The free lists were implemented for std's `Vec` and 107 | `intrusive_collections`'s `SinglyLinkedList`. I chose a singly linked 108 | list as there would have been no real benefit to double linking -- the 109 | only method that would have benefited (negligibly so) is 110 | `FreeList::remove`, but this is always called at most on the second 111 | element in this free list, so there is no real point in optimizing this. 112 | The red-black tree individually heap allocates each node, which makes 113 | the cache efficiency worse, but unlike `std`'s `BTreeSet`/`BTreeMap` its 114 | search is `O(log n)`, while `std`'s uses a linear search, which is not 115 | `O(log n)` (you can read about this [here][btreemap]). However, `std`'s 116 | trees do not individually heap allocate nodes, so cache locality is 117 | better. I decided that although this was true, since a buddy allocator 118 | must deal with incredibly large numbers of blocks, it was more important 119 | to have a more efficient search algorithm. 120 | 121 | The implementation is recursive. To allocate a free block of order *k*, 122 | it first searches for any free blocks in the free list list of order *k* 123 | blocks. If none are found, it recurses by trying to allocating a block 124 | of order *k* + 1. Finally, if at no point were any free blocks found it 125 | gives up and panics. As soon as one is it splits it in half, removing 126 | the original block from the tree and inserting the halves, pushing their 127 | addresses to the relevant free list. It then returns a cursor pointing 128 | to the first block. You can find this algorithm in 129 | [`find_or_split`][find_or_split trees]. At the outermost layer of 130 | recursion (the function that actually calls the recursive 131 | `find_or_split` function), the returned block is marked as used and 132 | removed from the free list. 133 | 134 | ### Vectors as Free Lists 135 | 136 | Using vectors as free lists took ~0.3s to allocate a full GiB. This is 137 | ~0.2s faster than the linked lists as free lists version. This is 138 | probably due to vectors having better cache locality. 139 | 140 | ### Linked Lists as Free Lists 141 | 142 | Using linked lists as free lists took ~0.5s to allocate a full GiB. See 143 | the [Vectors as Free Lists][vectors as free lists] section above. 144 | 145 | --- 146 | 147 | This implementation was *400x faster* than the naive list based 148 | implementation (at best, using vectors as free lists). This is probably 149 | due to red-black trees having `O(log n)` operations across the board, 150 | faster than the searches, inserts, and removes of vectors or linked 151 | lists. 152 | 153 | ## Bitmap Tree Buddy Allocator 154 | 155 | This implementation is not strictly a bitmap, per se, but is a 156 | modification of a bitmap system. Essentially, each block in the tree 157 | stores the largest order (fully merged) *somewhere* underneath it. For 158 | instance, a tree which is all free with 4 orders looks like this: 159 | 160 | ``` 161 | 3 162 | 2 2 163 | 1 1 1 1 164 | 0 0 0 0 0 0 0 0 165 | ``` 166 | 167 | If we allocate one order 0 block, it looks like this (T is **t**aken): 168 | 169 | ``` 170 | 2 171 | 1 2 172 | 0 1 1 1 173 | T 0 0 0 0 0 0 0 174 | ``` 175 | 176 | It is implemented as a flattened array, where for a tree like 177 | ``` 178 | 1 179 | 2 3 180 | ``` 181 | 182 | the representation is `1; 2; 3`. This has the nice property that if we 183 | use indices beginning at 1 (i.e indices and not offsets), then the index 184 | of the left child of any given index is `2 * index`, and the right child 185 | is simply `2 * index + 1`. The parent is `floor(index / 2)`. Because all 186 | of these operations work with 2s, we can use efficient bitshifting to 187 | execute them (`index << 1`, `(index << 1) | 1`, and `index >> 1`). 188 | 189 | We can do a binary search to find the a block that is free of the 190 | desired order. First, we check if there are any blocks of the desired 191 | order free by checking the root block. If there are, we check if the 192 | left child has enough free. If it does, then we again check it's left 193 | child, etc. If a block's left child does not have enough blocks free, we 194 | simply use its right child. We know that the right child must then have 195 | enough free, or the root block is invalid. 196 | 197 | 198 | This implementation was *very fast.* On my computer, it only took ~0.07s 199 | to allocate 1GiB. I have seen it perform up to 0.04s on my computer, 200 | though -- performance does fluctuate a bit. I assume that this is to do 201 | with CPU load. 202 | 203 | This implementation does not have very good cache locality, as levels 204 | are stored far from eachother, so a parent block can be very far from 205 | its child. However, everything else is still very fast, so it is made 206 | up for. It is also O(log n), but practically it is so fast that this 207 | does not really matter. For reference: allocating 8GiB took 0.6s for me, 208 | but I have seen it perform much better at >150ms on [@gegy1000][gegy]'s 209 | laptop. 210 | 211 | # Contributing 212 | 213 | If you have any thing to add (such as an edit to the readme or another 214 | implementation or benchmark) feel free to 215 | [submit a pull request][submit a pr]! You can also 216 | [create an issue][create an issue]. If you just want to chat, feel free 217 | to ping me on the [Rust Discord][rust discord] (Restioson#8323). 218 | 219 | [flower]: https://github.com/Restioson/flower 220 | [specs]: https://i.imgur.com/DLLVS55.png 221 | [find_or_split lists]: https://github.com/Restioson/buddy-allocator-workshop/blob/master/src/buddy_allocator_lists.rs#L256 222 | [buddy memory allocation]: https://en.wikipedia.org/wiki/Buddy_memory_allocation 223 | [rust discord]: https://discord.me/rust-lang 224 | [create an issue]: https://github.com/Restioson/buddy-allocator-workshop/issues/new 225 | [submit a pr]: https://github.com/Restioson/buddy-allocator-workshop/compare 226 | [contributing section]: https://github.com/Restioson/buddy-allocator-workshop#contributing 227 | [btreemap]: https://doc.rust-lang.org/std/collections/struct.BTreeMap.html 228 | [find_or_split trees]: https://github.com/Restioson/buddy-allocator-workshop/blob/master/src/buddy_allocator_tree.rs#L225 229 | [vectors as free lists]: https://github.com/Restioson/buddy-allocator-workshop#vectors-as-free-lists 230 | [gegy]: https://github.com/gegy1000 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/buddy_allocator_tree.rs: -------------------------------------------------------------------------------- 1 | use super::{top_level_blocks, MAX_ORDER, BASE_ORDER, LEVEL_COUNT, MAX_ORDER_SIZE}; 2 | use array_init; 3 | use bit_field::BitField; 4 | #[cfg(feature = "flame_profile")] 5 | use flame; 6 | use intrusive_collections::rbtree::CursorMut; 7 | use intrusive_collections::{KeyAdapter, RBTree, RBTreeLink, SinglyLinkedList, SinglyLinkedListLink}; 8 | use std::cell::Cell; 9 | use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; 10 | use std::ptr; 11 | use std::time::{Instant, Duration}; 12 | 13 | #[derive(Debug)] 14 | pub struct Block { 15 | link: RBTreeLink, 16 | bit_field: Cell, 17 | } 18 | 19 | impl Block { 20 | fn new(begin_address: usize, order: u8, used: bool) -> Self { 21 | let mut bit_field = 0u64; 22 | bit_field.set_bit(0, used); 23 | bit_field.set_bits(1..8, u64::from(order)); 24 | bit_field.set_bits(8..64, begin_address as u64); 25 | 26 | Block { 27 | link: RBTreeLink::new(), 28 | bit_field: Cell::new(bit_field), 29 | } 30 | } 31 | 32 | #[inline] 33 | fn used(&self) -> bool { 34 | self.bit_field.get().get_bit(0) 35 | } 36 | 37 | /// Set the state of this block. Unsafe because the caller could not have unique access to the 38 | /// block. Needed to mutate the block while it is in the tree 39 | #[inline] 40 | unsafe fn set_used(&self, used: bool) { 41 | let mut copy = self.bit_field.get(); 42 | copy.set_bit(0, used); 43 | 44 | self.bit_field.set(copy) 45 | } 46 | 47 | #[inline] 48 | fn order(&self) -> u8 { 49 | self.bit_field.get().get_bits(1..8) as u8 // 7 bits for max = 64 50 | } 51 | 52 | #[inline] 53 | fn address(&self) -> usize { 54 | self.bit_field.get().get_bits(8..64) as usize // max physical memory = 2^56 - 1 bytes 55 | } 56 | } 57 | 58 | intrusive_adapter!(pub BlockAdapter = Box: Block { link: RBTreeLink }); 59 | 60 | impl<'a> KeyAdapter<'a> for BlockAdapter { 61 | type Key = usize; 62 | fn get_key(&self, block: &'a Block) -> usize { 63 | block.address() 64 | } 65 | } 66 | 67 | impl PartialOrd for Block { 68 | fn partial_cmp(&self, other: &Block) -> Option { 69 | self.address().partial_cmp(&other.address()) 70 | } 71 | } 72 | 73 | impl Ord for Block { 74 | fn cmp(&self, other: &Block) -> Ordering { 75 | self.address().cmp(&other.address()) 76 | } 77 | } 78 | 79 | impl PartialEq for Block { 80 | fn eq(&self, other: &Block) -> bool { 81 | let properties_eq = self.order() == other.order() && self.used() == other.used(); 82 | let address_eq = self.address() == other.address(); 83 | 84 | // Addresses can't be the same without properties being the same 85 | if cfg!(debug_assertions) && address_eq && !properties_eq { 86 | panic!("Addresses can't be the same without properties being the same!"); 87 | } 88 | 89 | properties_eq && address_eq 90 | } 91 | } 92 | 93 | impl Eq for Block {} 94 | 95 | #[derive(Debug)] 96 | pub struct BuddyAllocator { 97 | tree: RBTree, 98 | free: [L; LEVEL_COUNT as usize], 99 | } 100 | 101 | pub trait FreeList { 102 | fn push(&mut self, block: *const Block); 103 | fn pop(&mut self) -> Option<*const Block>; 104 | /// Search for an address and remove it from the list 105 | fn remove(&mut self, addr: *const Block) -> Option<()>; 106 | } 107 | 108 | impl FreeList for Vec<*const Block> { 109 | fn push(&mut self, block: *const Block) { 110 | Vec::push(self, block); 111 | } 112 | 113 | fn pop(&mut self) -> Option<*const Block> { 114 | Vec::pop(self) 115 | } 116 | 117 | fn remove(&mut self, block: *const Block) -> Option<()> { 118 | self.remove(self.iter().position(|i| ptr::eq(*i, block))?); 119 | Some(()) 120 | } 121 | } 122 | 123 | #[derive(Debug)] 124 | pub struct BlockPtr { 125 | link: SinglyLinkedListLink, 126 | ptr: *const Block, 127 | } 128 | 129 | impl BlockPtr { 130 | /// Creates a new, unlinked [BlockPtrAdapter]. 131 | fn new(ptr: *const Block) -> BlockPtr { 132 | BlockPtr { 133 | link: SinglyLinkedListLink::new(), 134 | ptr, 135 | } 136 | } 137 | } 138 | 139 | intrusive_adapter!(pub BlockPtrAdapter = Box: BlockPtr { link: SinglyLinkedListLink }); 140 | 141 | impl FreeList for SinglyLinkedList { 142 | fn push(&mut self, block: *const Block) { 143 | self.push_front(Box::new(BlockPtr::new(block))) 144 | } 145 | 146 | fn pop(&mut self) -> Option<*const Block> { 147 | self.pop_front().map(|b| b.ptr) 148 | } 149 | 150 | fn remove(&mut self, block: *const Block) -> Option<()> { 151 | let pos = self.iter().position(|i| ptr::eq(i.ptr, block))?; 152 | 153 | let mut cursor = self.front_mut(); 154 | 155 | // Get cursor to be elem before position 156 | if pos > 0 { 157 | for _ in 0..pos - 1 { 158 | cursor.move_next(); 159 | } 160 | } 161 | 162 | cursor.remove_next().unwrap(); 163 | 164 | Some(()) 165 | } 166 | } 167 | 168 | impl BuddyAllocator> { 169 | pub fn new() -> Self { 170 | BuddyAllocator { 171 | tree: RBTree::new(BlockAdapter::new()), 172 | free: array_init::array_init(|_| Vec::new()), 173 | } 174 | } 175 | } 176 | 177 | impl BuddyAllocator> { 178 | pub fn new() -> Self { 179 | BuddyAllocator { 180 | tree: RBTree::new(BlockAdapter::new()), 181 | free: array_init::array_init(|_| SinglyLinkedList::new(BlockPtrAdapter::new())), 182 | } 183 | } 184 | } 185 | 186 | impl BuddyAllocator { 187 | pub fn create_top_level(&mut self, begin_address: usize) -> CursorMut { 188 | let cursor = self.tree 189 | .insert(Box::new(Block::new(begin_address, MAX_ORDER, false))); 190 | self.free[MAX_ORDER as usize].push(cursor.get().unwrap() as *const _); 191 | cursor 192 | } 193 | 194 | /// Splits a block in place, returning the addresses of the two blocks split. Does not add them 195 | /// to the free list, or remove the original. The cursor will point to the first block. 196 | /// 197 | /// # Panicking 198 | /// 199 | /// 1. Index incorrect and points null block (this is a programming error) 200 | /// 2. Attempt to split used block (this is also a programming error) 201 | #[cfg_attr(feature = "flame_profile", flame)] 202 | fn split(cursor: &mut CursorMut) -> Result<[*const Block; 2], BlockSplitError> { 203 | #[cfg(feature = "flame_profile")] 204 | flame::note("split", None); 205 | let block = cursor.get().unwrap(); 206 | 207 | if block.used() { 208 | panic!("Attempted to split used block {:?}!", block); 209 | } 210 | 211 | let original_order = block.order(); 212 | let order = original_order - 1; 213 | 214 | if block.order() == 0 { 215 | return Err(BlockSplitError::BlockSmallestPossible); 216 | } 217 | 218 | let buddies: [Block; 2] = array_init::array_init(|n| { 219 | Block::new( 220 | if n == 0 { 221 | block.address() 222 | } else { 223 | block.address() + 2usize.pow(u32::from(order + BASE_ORDER)) 224 | }, 225 | order, 226 | false, 227 | ) 228 | }); 229 | 230 | let [first, second] = buddies; 231 | 232 | // Reuse the old box 233 | let mut old = cursor.remove().unwrap(); 234 | *old = first; 235 | cursor.insert_before(old); 236 | cursor.insert_before(Box::new(second)); 237 | 238 | // Reversed pointers 239 | let ptrs: [*const _; 2] = array_init::array_init(|_| { 240 | cursor.move_prev(); 241 | cursor.get().unwrap() as *const _ 242 | }); 243 | 244 | Ok([ptrs[1], ptrs[0]]) 245 | } 246 | 247 | /// Find a frame of a given order or splits other frames recursively until one is made and then 248 | /// returns a cursor pointing to it. Does not set state to used. 249 | /// 250 | /// # Panicking 251 | /// 252 | /// Panics if the order is greater than max or if a programming error is encountered such as 253 | /// attempting to split a block of the smallest possible size. 254 | #[cfg_attr(feature = "flame_profile", flame)] 255 | fn find_or_split<'a>( 256 | free: &mut [L; 19], 257 | tree: &'a mut RBTree, 258 | order: u8, 259 | ) -> Result, BlockAllocateError> { 260 | #[cfg(feature = "flame_profile")] 261 | flame::note("find_or_split", None); 262 | 263 | if order > MAX_ORDER { 264 | panic!("Order {} larger than max of {}!", order, MAX_ORDER); 265 | } 266 | 267 | // Find free block of size >= order 268 | let next_free = free[order as usize].pop(); 269 | 270 | match next_free { 271 | Some(ptr) => Ok(unsafe { tree.cursor_mut_from_ptr(ptr) }), 272 | None if order == MAX_ORDER => Err(BlockAllocateError::NoBlocksAvailable), 273 | None => { 274 | let mut cursor = BuddyAllocator::find_or_split(free, tree, order + 1)?; 275 | debug_assert!( 276 | !cursor.is_null(), 277 | "Find or split must return a valid pointer!" 278 | ); 279 | 280 | // Split block and remove it from the free list 281 | let old_ptr = cursor.get().unwrap() as *const _; 282 | let ptrs = Self::split(&mut cursor).unwrap(); 283 | free[order as usize + 1].remove(old_ptr); 284 | 285 | // Push split blocks to free list 286 | free[order as usize].push(ptrs[0]); 287 | free[order as usize].push(ptrs[1]); 288 | 289 | Ok(cursor) 290 | } 291 | } 292 | } 293 | 294 | #[cfg_attr(feature = "flame_profile", flame)] 295 | pub fn allocate_exact( 296 | &mut self, 297 | order: u8, 298 | ) -> Result, BlockAllocateError> { 299 | #[cfg(feature = "flame_profile")] 300 | flame::note("allocate exact", None); 301 | 302 | if order > MAX_ORDER { 303 | return Err(BlockAllocateError::OrderTooLarge(order)); 304 | } 305 | 306 | #[cfg(feature = "flame_profile")] 307 | flame::note("allocate begin", None); 308 | 309 | let block = BuddyAllocator::find_or_split(&mut self.free, &mut self.tree, order)?; 310 | 311 | // Safe because we have exclusive access to `block`. 312 | unsafe { 313 | block.get().unwrap().set_used(true); 314 | } 315 | 316 | let ptr = block.get().unwrap() as *const _; 317 | self.free[order as usize].remove(ptr); 318 | 319 | Ok(block) 320 | } 321 | } 322 | 323 | #[derive(Debug, Copy, Clone)] 324 | pub enum BlockSplitError { 325 | BlockSmallestPossible, 326 | } 327 | 328 | #[derive(Debug, Copy, Clone)] 329 | pub enum BlockAllocateError { 330 | NoBlocksAvailable, 331 | OrderTooLarge(u8), 332 | } 333 | 334 | pub fn demo_vecs(print_addresses: bool, blocks: u32, block_size: u8) -> Duration { 335 | let allocator = BuddyAllocator::>::new(); 336 | demo(allocator, print_addresses, blocks, block_size) 337 | } 338 | 339 | pub fn demo_linked_lists(print_addresses: bool, blocks: u32, block_size: u8) -> Duration { 340 | let allocator = BuddyAllocator::>::new(); 341 | demo(allocator, print_addresses, blocks, block_size) 342 | } 343 | 344 | fn demo( 345 | mut allocator: BuddyAllocator, 346 | print_addresses: bool, 347 | blocks: u32, 348 | block_size: u8, 349 | ) -> Duration { 350 | let top_level_blocks = top_level_blocks(blocks, block_size); 351 | 352 | for block_number in 0..top_level_blocks { 353 | allocator 354 | .create_top_level(2usize.pow(u32::from(MAX_ORDER + BASE_ORDER)) * block_number as usize); 355 | } 356 | 357 | let begin = Instant::now(); 358 | 359 | for _ in 0..blocks { 360 | let cursor = allocator.allocate_exact(block_size).unwrap(); 361 | let addr = cursor.get().unwrap().address(); 362 | 363 | if print_addresses { 364 | println!("Address: {:#x}", addr); 365 | } 366 | } 367 | 368 | begin.elapsed() 369 | } 370 | 371 | #[cfg(test)] 372 | mod test { 373 | use super::*; 374 | 375 | #[test] 376 | fn test_create_top_level() { 377 | let mut allocator = BuddyAllocator::>::new(); 378 | allocator.create_top_level(0); 379 | allocator.create_top_level(2usize.pow(MAX_ORDER_SIZE as u32)); 380 | 381 | let expected = vec![ 382 | Block::new(0, MAX_ORDER, false), 383 | Block::new(2usize.pow(MAX_ORDER_SIZE as u32), MAX_ORDER, false), 384 | ]; 385 | 386 | assert_eq!( 387 | allocator 388 | .tree 389 | .into_iter() 390 | .map(|b| *b) 391 | .collect::>(), 392 | expected 393 | ); 394 | } 395 | 396 | #[test] 397 | fn split() { 398 | let mut allocator = BuddyAllocator::>::new(); 399 | let mut block = allocator.create_top_level(0); 400 | BuddyAllocator::>::split(&mut block).unwrap(); 401 | 402 | let expected = vec![ 403 | Block::new(0, MAX_ORDER - 1, false), 404 | Block::new( 405 | 2usize.pow((MAX_ORDER_SIZE - 1) as u32), 406 | MAX_ORDER - 1, 407 | false, 408 | ), 409 | ]; 410 | 411 | assert_eq!( 412 | allocator 413 | .tree 414 | .into_iter() 415 | .map(|b| *b) 416 | .collect::>(), 417 | expected 418 | ); 419 | } 420 | 421 | #[test] 422 | fn test_allocate_exact_with_free() { 423 | let mut allocator = BuddyAllocator::>::new(); 424 | allocator.create_top_level(0); 425 | let cursor = allocator.allocate_exact(MAX_ORDER).unwrap(); 426 | let expected_block = Block::new(0, MAX_ORDER, true); 427 | assert_eq!(*cursor.get().unwrap(), expected_block); 428 | } 429 | 430 | #[test] 431 | fn test_allocate_exact_no_free() { 432 | let mut allocator = BuddyAllocator::>::new(); 433 | allocator.create_top_level(0); 434 | let cursor = allocator.allocate_exact(MAX_ORDER - 2).unwrap(); 435 | let expected_block = Block::new(0, MAX_ORDER - 2, true); 436 | 437 | assert_eq!(*cursor.get().unwrap(), expected_block); 438 | } 439 | 440 | #[test] 441 | fn test_linked_list_remove() { 442 | let mut list = SinglyLinkedList::::new(BlockPtrAdapter::new()); 443 | list.push_front(Box::new(BlockPtr::new(1 as *const _))); 444 | list.push_front(Box::new(BlockPtr::new(2 as *const _))); 445 | list.push_front(Box::new(BlockPtr::new(3 as *const _))); 446 | list.push_front(Box::new(BlockPtr::new(4 as *const _))); 447 | list.push_front(Box::new(BlockPtr::new(5 as *const _))); 448 | list.remove(2 as *const _).unwrap(); 449 | 450 | assert_eq!( 451 | list.iter().map(|i| i.ptr).collect::>(), 452 | vec![5 as *const _, 4 as *const _, 3 as *const _, 1 as *const _] 453 | ); 454 | } 455 | 456 | #[test] 457 | fn test_unique_addresses_vecs() { 458 | let mut allocator = BuddyAllocator::>::new(); 459 | 460 | for block_number in 0..top_level_blocks(1000, 0) { 461 | allocator.create_top_level( 462 | 2usize.pow((MAX_ORDER + BASE_ORDER) as u32) * block_number as usize, 463 | ); 464 | } 465 | 466 | let mut seen = Vec::with_capacity(1000); 467 | for _ in 0..1000 { 468 | let cursor = allocator.allocate_exact(0).unwrap(); 469 | let addr = cursor.get().unwrap().address(); 470 | 471 | if seen.contains(&addr) { 472 | panic!("Allocator must return addresses never been allocated before!"); 473 | } else { 474 | seen.push(addr); 475 | } 476 | } 477 | } 478 | 479 | #[test] 480 | fn test_unique_addresses_linked_lists() { 481 | let mut allocator = BuddyAllocator::>::new(); 482 | 483 | for block_number in 0..top_level_blocks(1000, 0) { 484 | allocator.create_top_level( 485 | 2usize.pow((MAX_ORDER + BASE_ORDER) as u32) * block_number as usize, 486 | ); 487 | } 488 | 489 | let mut seen = Vec::with_capacity(1000); 490 | for _ in 0..1000 { 491 | let cursor = allocator.allocate_exact(0).unwrap(); 492 | let addr = cursor.get().unwrap().address(); 493 | 494 | if seen.contains(&addr) { 495 | panic!("Allocator must return addresses never been allocated before!"); 496 | } else { 497 | seen.push(addr); 498 | } 499 | } 500 | } 501 | 502 | #[test] 503 | fn test_block_bitfields() { 504 | let block = Block::new(2usize.pow(56) - 1, 64, false); 505 | 506 | assert!(!block.used()); 507 | assert_eq!(block.order(), 64); 508 | assert_eq!(block.address(), 2usize.pow(56) - 1); 509 | 510 | unsafe { block.set_used(true) }; 511 | assert!(block.used()); 512 | } 513 | } 514 | -------------------------------------------------------------------------------- /src/buddy_allocator_lists.rs: -------------------------------------------------------------------------------- 1 | use super::{top_level_blocks, PageSize, PhysicalAllocator, MAX_ORDER, BASE_ORDER, LEVEL_COUNT}; 2 | use array_init; 3 | #[cfg(feature = "flame_profile")] 4 | use flame; 5 | 6 | use std::collections::LinkedList; 7 | use std::vec::Vec; 8 | use std::time::{Instant, Duration}; 9 | 10 | #[derive(Debug, Eq, PartialEq)] 11 | pub struct Block { 12 | begin_address: usize, 13 | order: u8, 14 | state: BlockState, 15 | } 16 | 17 | #[repr(u8)] 18 | #[derive(Debug, Eq, PartialEq)] 19 | pub enum BlockState { 20 | Used, 21 | Free, 22 | } 23 | 24 | pub trait BlockList { 25 | fn push(&mut self, item: Block); 26 | fn position bool>(&mut self, pred: P) -> Option; 27 | fn len(&self) -> usize; 28 | fn get(&self, index: usize) -> Option<&Block>; 29 | fn get_mut(&mut self, index: usize) -> Option<&mut Block>; 30 | fn remove(&mut self, index: usize); 31 | } 32 | 33 | impl BlockList for LinkedList { 34 | fn push(&mut self, item: Block) { 35 | self.push_back(item) 36 | } 37 | 38 | fn len(&self) -> usize { 39 | LinkedList::len(self) 40 | } 41 | 42 | fn position bool>(&mut self, pred: P) -> Option { 43 | self.iter().position(pred) 44 | } 45 | 46 | fn get(&self, index: usize) -> Option<&Block> { 47 | let len = self.len(); 48 | if len == 0 { 49 | return None; 50 | } 51 | 52 | if index < len / 2 { 53 | self.iter().nth(index) 54 | } else { 55 | self.iter().rev().nth(len - 1 - index) 56 | } 57 | } 58 | 59 | fn get_mut(&mut self, index: usize) -> Option<&mut Block> { 60 | let len = self.len(); 61 | if len == 0 { 62 | return None; 63 | } 64 | 65 | if index < len / 2 { 66 | self.iter_mut().nth(index) 67 | } else { 68 | self.iter_mut().rev().nth(len - 1 - index) 69 | } 70 | } 71 | fn remove(&mut self, index: usize) { 72 | let mut second_part = self.split_off(index); 73 | second_part.pop_front(); 74 | self.append(&mut second_part); 75 | } 76 | } 77 | 78 | impl BlockList for Vec { 79 | fn push(&mut self, item: Block) { 80 | Vec::push(self, item); 81 | } 82 | 83 | fn position bool>(&mut self, pred: P) -> Option { 84 | self.iter().position(pred) 85 | } 86 | 87 | fn len(&self) -> usize { 88 | Vec::len(self) 89 | } 90 | 91 | fn get(&self, index: usize) -> Option<&Block> { 92 | if self.len() > index { 93 | Some(&self[index]) 94 | } else { 95 | None 96 | } 97 | } 98 | 99 | fn get_mut(&mut self, index: usize) -> Option<&mut Block> { 100 | if self.len() > index { 101 | Some(&mut self[index]) 102 | } else { 103 | None 104 | } 105 | } 106 | fn remove(&mut self, index: usize) { 107 | self.remove(index); 108 | } 109 | } 110 | 111 | pub struct BuddyAllocator { 112 | lists: [L; LEVEL_COUNT as usize], 113 | } 114 | 115 | /// A very temporary block index. Is not to be trusted to remain pointing to the same block. Use at 116 | /// own risk! 117 | #[derive(Debug, Copy, Clone)] 118 | struct BlockIndex { 119 | order: u8, 120 | index: usize, 121 | } 122 | 123 | impl BuddyAllocator> { 124 | pub fn new() -> Self { 125 | BuddyAllocator { 126 | lists: array_init::array_init(|_| LinkedList::new()), 127 | } 128 | } 129 | } 130 | 131 | impl BuddyAllocator> { 132 | pub fn new() -> Self { 133 | BuddyAllocator { 134 | lists: array_init::array_init(|_| Vec::new()), 135 | } 136 | } 137 | } 138 | 139 | impl BuddyAllocator { 140 | /// Get a block by its index. 141 | /// 142 | /// # Panicking 143 | /// 144 | /// Panics if the order is larger than maximum. This indicates a programming error. 145 | fn get(&self, block: &BlockIndex) -> Option<&Block> { 146 | let list = &self.lists[block.order as usize]; 147 | list.get(block.index) 148 | } 149 | 150 | /// Get a block by its index mutably. 151 | /// 152 | /// # Panicking 153 | /// 154 | /// Panics if the order is larger than maximum. This indicates a programming error. 155 | fn get_mut(&mut self, block: &BlockIndex) -> Option<&mut Block> { 156 | let list = &mut self.lists[block.order as usize]; 157 | list.get_mut(block.index) 158 | } 159 | 160 | /// Modify a block by setting its state to a new one. This will not merge blocks if set to free, 161 | /// it will just mark the block as freed. 162 | /// 163 | /// # Panicking 164 | /// 165 | /// This function will panic if the index is incorrect 166 | fn modify(&mut self, index: &mut BlockIndex, new_state: BlockState) { 167 | let block = self.get_mut(index).unwrap(); 168 | block.state = new_state; 169 | } 170 | 171 | /// Create a top level block 172 | pub fn create_top_level(&mut self, begin_address: usize) { 173 | self.lists[MAX_ORDER as usize].push(Block { 174 | begin_address, 175 | order: MAX_ORDER, 176 | state: BlockState::Free, 177 | }); 178 | } 179 | 180 | /// Splits a block in place. Index will be invalidated. Returns index of first buddy 181 | /// 182 | /// # Panicking 183 | /// 184 | /// 1. Index incorrect (doesn't point to block or order > max) 185 | /// 2. Attempt to split used block 186 | /// 3. List state bad (order x in list order of y != x) 187 | fn split(&mut self, index: BlockIndex) -> Result { 188 | let block = self.get(&index).unwrap(); 189 | 190 | if block.state == BlockState::Used { 191 | panic!("Attempted to split used block at index {:?}", index); 192 | } 193 | 194 | debug_assert_eq!( 195 | block.order, index.order, 196 | "Index should have order equal to block!" 197 | ); 198 | 199 | let original_order = block.order; 200 | let order = original_order - 1; 201 | 202 | if index.order == 0 { 203 | return Err(BlockSplitError::BlockSmallestPossible); 204 | } 205 | 206 | let buddies: [Block; 2] = array_init::array_init(|n| Block { 207 | begin_address: if n == 0 { 208 | block.begin_address 209 | } else { 210 | block.begin_address + 2usize.pow(u32::from(order + BASE_ORDER)) 211 | }, 212 | order, 213 | state: BlockState::Free, 214 | }); 215 | 216 | self.lists[original_order as usize].remove(index.index); 217 | 218 | let [first, second] = buddies; 219 | self.lists[order as usize].push(first); 220 | self.lists[order as usize].push(second); 221 | 222 | Ok(BlockIndex { 223 | order, 224 | index: self.lists[order as usize].len() - 2, 225 | }) 226 | } 227 | 228 | #[cfg_attr(feature = "flame_profile", flame)] 229 | fn allocate_exact(&mut self, order: u8) -> Result { 230 | if order > MAX_ORDER { 231 | return Err(BlockAllocateError::OrderTooLarge(order)); 232 | } 233 | 234 | #[cfg(feature = "flame_profile")] 235 | flame::note("allocate begin", None); 236 | 237 | let mut index = self.find_or_split(order)?; 238 | 239 | self.modify(&mut index, BlockState::Used); 240 | Ok(index) 241 | } 242 | 243 | /// Find a frame of a given order or splits other frames recursively until one is made. Does not 244 | /// set state to used. 245 | /// 246 | /// # Panicking 247 | /// 248 | /// Panics if the order is greater than max or if a programming error is encountered such as 249 | /// attempting to split a block of the smallest possible size. 250 | fn find_or_split(&mut self, order: u8) -> Result { 251 | if order > MAX_ORDER { 252 | panic!("Order {} larger than max of {}!", order, MAX_ORDER); 253 | } 254 | 255 | let opt: Option = self.lists[order as usize] 256 | .position(|block| block.state == BlockState::Free) 257 | .map(|index| BlockIndex { order, index }); 258 | 259 | let block = match opt { 260 | Some(thing) => Ok(thing), 261 | None => { 262 | if order >= MAX_ORDER { 263 | Err(BlockAllocateError::NoBlocksAvailable) 264 | } else { 265 | let block_index = self.find_or_split(order + 1)?; 266 | let first = self.split(block_index).unwrap(); 267 | Ok(first) 268 | } 269 | } 270 | }?; 271 | 272 | Ok(block) 273 | } 274 | } 275 | 276 | #[derive(Debug, Copy, Clone)] 277 | pub enum BlockSplitError { 278 | BlockSmallestPossible, 279 | } 280 | 281 | #[derive(Debug, Copy, Clone)] 282 | pub enum BlockAllocateError { 283 | NoBlocksAvailable, 284 | OrderTooLarge(u8), 285 | } 286 | 287 | impl PhysicalAllocator for BuddyAllocator { 288 | fn alloc(&mut self, size: PageSize) -> *const u8 { 289 | let index = self.allocate_exact(size.power_of_two() - BASE_ORDER) 290 | .unwrap(); 291 | let block = self.get(&index).unwrap(); 292 | block.begin_address as *const u8 293 | } 294 | 295 | fn dealloc(&mut self, _frame: *const u8) { 296 | unimplemented!() 297 | } 298 | } 299 | 300 | pub fn demo_linked_lists(print_addresses: bool, blocks: u32, block_size: u8) -> Duration { 301 | let allocator = BuddyAllocator::>::new(); 302 | demo(allocator, print_addresses, blocks, block_size) 303 | } 304 | 305 | pub fn demo_vecs(print_addresses: bool, blocks: u32, block_size: u8) -> Duration { 306 | let allocator = BuddyAllocator::>::new(); 307 | demo(allocator, print_addresses, blocks, block_size) 308 | } 309 | 310 | fn demo( 311 | mut allocator: BuddyAllocator, 312 | print_addresses: bool, 313 | blocks: u32, 314 | block_size: u8, 315 | ) -> Duration { 316 | let top_level_blocks = top_level_blocks(blocks, block_size); 317 | 318 | for block_number in 0..top_level_blocks { 319 | allocator 320 | .create_top_level(2usize.pow(u32::from(MAX_ORDER + BASE_ORDER)) * block_number as usize); 321 | } 322 | 323 | let start = Instant::now(); 324 | 325 | for _ in 0..blocks { 326 | let index = allocator.allocate_exact(block_size).unwrap(); 327 | let addr = allocator.get(&index).unwrap().begin_address; 328 | 329 | if print_addresses { 330 | println!("Address: {:#x}", addr); 331 | } 332 | } 333 | 334 | start.elapsed() 335 | } 336 | 337 | #[cfg(test)] 338 | mod test { 339 | use super::*; 340 | use ::MAX_ORDER_SIZE; 341 | 342 | #[test] 343 | fn test_create_top_level() { 344 | let mut allocator = BuddyAllocator::>::new(); 345 | allocator.create_top_level(0); 346 | allocator.create_top_level(2usize.pow(MAX_ORDER_SIZE as u32)); 347 | 348 | let expected = vec![ 349 | Block { 350 | begin_address: 0, 351 | order: MAX_ORDER, 352 | state: BlockState::Free, 353 | }, 354 | Block { 355 | begin_address: 2usize.pow(MAX_ORDER_SIZE as u32), 356 | order: MAX_ORDER, 357 | state: BlockState::Free, 358 | }, 359 | ]; 360 | 361 | assert_eq!(allocator.lists[MAX_ORDER as usize - 1].len(), 0); 362 | assert_eq!(allocator.lists[MAX_ORDER as usize], expected); 363 | } 364 | 365 | #[test] 366 | fn test_split() { 367 | let mut allocator = BuddyAllocator::>::new(); 368 | allocator.create_top_level(0); 369 | allocator 370 | .split(BlockIndex { 371 | index: 0, 372 | order: MAX_ORDER, 373 | }) 374 | .unwrap(); 375 | 376 | let expected_blocks = [ 377 | Block { 378 | begin_address: 0, 379 | order: MAX_ORDER - 1, 380 | state: BlockState::Free, 381 | }, 382 | Block { 383 | begin_address: 2usize.pow(MAX_ORDER_SIZE as u32 - 1), 384 | order: MAX_ORDER - 1, 385 | state: BlockState::Free, 386 | }, 387 | ]; 388 | 389 | assert_eq!(allocator.lists[MAX_ORDER as usize - 1].len(), 2); 390 | assert_eq!(allocator.lists[MAX_ORDER as usize].len(), 0); 391 | 392 | allocator.lists[MAX_ORDER as usize - 1] 393 | .iter() 394 | .zip(expected_blocks.iter()) 395 | .for_each(|(block, expected)| assert_eq!(block, expected)); 396 | } 397 | 398 | #[test] 399 | fn test_get_linked_list() { 400 | let mut allocator = BuddyAllocator::>::new(); 401 | allocator.create_top_level(0); 402 | allocator.create_top_level(2usize.pow((MAX_ORDER + BASE_ORDER) as u32) as usize); 403 | 404 | let mut indices: [BlockIndex; 2] = array_init::array_init(|_| { 405 | allocator 406 | .split(BlockIndex { 407 | index: 0, 408 | order: MAX_ORDER, 409 | }) 410 | .unwrap() 411 | }); 412 | 413 | indices[1].index += 1; // Make sure we iterate from back too 414 | 415 | let expected_blocks = [ 416 | Block { 417 | begin_address: 0, 418 | order: MAX_ORDER - 1, 419 | state: BlockState::Free, 420 | }, 421 | Block { 422 | begin_address: 2usize.pow(MAX_ORDER_SIZE as u32 - 1) * indices[1].index, 423 | order: MAX_ORDER - 1, 424 | state: BlockState::Free, 425 | }, 426 | ]; 427 | 428 | for (index, expected) in indices.iter().zip(expected_blocks.iter()) { 429 | let block = allocator.get(index).unwrap(); 430 | assert_eq!(block, expected) 431 | } 432 | } 433 | 434 | #[test] 435 | fn test_get_mut_linked_list() { 436 | let mut allocator = BuddyAllocator::>::new(); 437 | allocator.create_top_level(0); 438 | allocator.create_top_level(1024 * 1024 * 1024); 439 | 440 | let mut indices: [BlockIndex; 2] = array_init::array_init(|_| { 441 | allocator 442 | .split(BlockIndex { 443 | index: 0, 444 | order: MAX_ORDER, 445 | }) 446 | .unwrap() 447 | }); 448 | 449 | indices[1].index += 1; // Make sure we iterate from back too 450 | 451 | let expected_blocks = [ 452 | Block { 453 | begin_address: 0, 454 | order: MAX_ORDER - 1, 455 | state: BlockState::Free, 456 | }, 457 | Block { 458 | begin_address: 2usize.pow((MAX_ORDER_SIZE - 1) as u32) * indices[1].index, 459 | order: MAX_ORDER - 1, 460 | state: BlockState::Free, 461 | }, 462 | ]; 463 | 464 | for (index, expected) in indices.iter().zip(expected_blocks.iter()) { 465 | let block = allocator.get_mut(index).unwrap(); 466 | assert_eq!(block, expected) 467 | } 468 | } 469 | 470 | #[test] 471 | fn test_allocate_exact_with_free() { 472 | let mut allocator = BuddyAllocator::>::new(); 473 | allocator.create_top_level(0); 474 | let index = allocator.allocate_exact(MAX_ORDER).unwrap(); 475 | let expected_block = Block { 476 | begin_address: 0, 477 | order: MAX_ORDER, 478 | state: BlockState::Used, 479 | }; 480 | assert_eq!(*allocator.get(&index).unwrap(), expected_block); 481 | } 482 | 483 | #[test] 484 | fn test_allocate_exact_no_free() { 485 | let mut allocator = BuddyAllocator::>::new(); 486 | allocator.create_top_level(0); 487 | let index = allocator.allocate_exact(MAX_ORDER - 2).unwrap(); 488 | let expected_block = Block { 489 | begin_address: 0, 490 | order: MAX_ORDER - 2, 491 | state: BlockState::Used, 492 | }; 493 | 494 | assert_eq!(*allocator.get(&index).unwrap(), expected_block); 495 | } 496 | 497 | #[test] 498 | fn test_unique_addresses_linked_lists() { 499 | let mut allocator = BuddyAllocator::>::new(); 500 | 501 | for block_number in 0..top_level_blocks(1000, 0) { 502 | allocator.create_top_level( 503 | 2usize.pow((MAX_ORDER + BASE_ORDER) as u32) * block_number as usize, 504 | ); 505 | } 506 | let mut seen = Vec::with_capacity(1000); 507 | for _ in 0..1000 { 508 | let index = allocator.allocate_exact(0).unwrap(); 509 | let addr = allocator.get(&index).unwrap().begin_address; 510 | 511 | if seen.contains(&addr) { 512 | panic!("Allocator must return addresses never been allocated before!"); 513 | } else { 514 | seen.push(addr); 515 | } 516 | } 517 | } 518 | 519 | #[test] 520 | fn test_unique_addresses_vecs() { 521 | let mut allocator = BuddyAllocator::>::new(); 522 | 523 | for block_number in 0..top_level_blocks(1000, 0) { 524 | allocator.create_top_level( 525 | 2usize.pow((MAX_ORDER + BASE_ORDER) as u32) * block_number as usize, 526 | ); 527 | } 528 | 529 | let mut seen = Vec::with_capacity(1000); 530 | for _ in 0..1000 { 531 | let index = allocator.allocate_exact(0).unwrap(); 532 | let addr = allocator.get(&index).unwrap().begin_address; 533 | 534 | if seen.contains(&addr) { 535 | panic!("Allocator must return addresses never been allocated before!"); 536 | } else { 537 | seen.push(addr); 538 | } 539 | } 540 | } 541 | 542 | // TODO test allocate_exact failing case propagates error right 543 | } 544 | --------------------------------------------------------------------------------