├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── memory_allocator_benchmark.rs └── src ├── frame.rs ├── lib.rs ├── linked_list.rs └── test.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | rust: 16 | - stable 17 | - nightly 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Use ${{ matrix.rust }} 22 | run: rustup default ${{ matrix.rust }} 23 | - name: Build 24 | run: cargo build --verbose 25 | - name: Build without default features 26 | run: cargo build --no-default-features --verbose 27 | - name: Build with all features 28 | run: cargo build --all-features --verbose 29 | - name: Run tests 30 | run: cargo test --verbose 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buddy_system_allocator" 3 | description = "A bare metal allocator that uses buddy system." 4 | documentation = "https://docs.rs/buddy_system_allocator" 5 | homepage = "https://github.com/rcore-os/buddy_system_allocator" 6 | repository = "https://github.com/rcore-os/buddy_system_allocator" 7 | keywords = ["allocator", "no_std", "heap"] 8 | version = "0.11.0" 9 | authors = [ 10 | "Jiajie Chen ", 11 | "Vinay Chandra Dommeti ", 12 | "Andrew Walbran ", 13 | ] 14 | edition = "2021" 15 | license = "MIT" 16 | 17 | [features] 18 | default = ["alloc", "use_spin"] 19 | alloc = [] 20 | use_spin = ["spin"] 21 | 22 | [dependencies.spin] 23 | version = "0.9.8" 24 | optional = true 25 | 26 | [dev-dependencies] 27 | criterion = "0.5.1" 28 | ctor = "0.2.6" 29 | rand = "0.8.5" 30 | rand_chacha = "0.3.1" 31 | 32 | [[bench]] 33 | name = "memory_allocator_benchmark" 34 | harness = false 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2019-2020 Jiajie Chen 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # buddy_system_allocator 2 | 3 | [![Crates.io version][crate-img]][crate] 4 | [![docs.rs][docs-img]][docs] 5 | 6 | An (almost) drop-in replacement for [phil-opp/linked-list-allocator](https://github.com/phil-opp/linked-list-allocator). But it uses buddy system instead. 7 | 8 | ## Usage 9 | 10 | To use buddy_system_allocator for global allocator: 11 | 12 | ```rust 13 | use buddy_system_allocator::LockedHeap; 14 | 15 | #[global_allocator] 16 | static HEAP_ALLOCATOR: LockedHeap = LockedHeap::<33>::empty(); 17 | ``` 18 | 19 | To init the allocator: 20 | 21 | ```rust 22 | unsafe { 23 | HEAP_ALLOCATOR.lock().init(heap_start, heap_size); 24 | // or 25 | HEAP_ALLOCATOR.lock().add_to_heap(heap_start, heap_end); 26 | } 27 | ``` 28 | 29 | You can also use `FrameAllocator` and `LockedHeapWithRescue`, see their documentation for usage. 30 | 31 | ## Features 32 | 33 | - **`alloc`** (default): Provide `FrameAllocator` and `LockedFrameAllocator`, which depend on a 34 | global allocator. 35 | - **`use_spin`** (default): Provide a `LockedHeap` type that implements the [`GlobalAlloc`] trait by 36 | using a spinlock. 37 | 38 | [`GlobalAlloc`]: https://doc.rust-lang.org/nightly/core/alloc/trait.GlobalAlloc.html 39 | 40 | ## License 41 | 42 | Some code comes from phil-opp's linked-list-allocator. 43 | 44 | Licensed under MIT License. Thanks phill-opp's linked-list-allocator for inspirations and interface. 45 | 46 | [crate-img]: https://img.shields.io/crates/v/buddy_system_allocator.svg 47 | [crate]: https://crates.io/crates/buddy_system_allocator 48 | [docs-img]: https://docs.rs/buddy_system_allocator/badge.svg 49 | [docs]: https://docs.rs/buddy_system_allocator 50 | -------------------------------------------------------------------------------- /benches/memory_allocator_benchmark.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate alloc; 3 | #[macro_use] 4 | extern crate ctor; 5 | 6 | use std::sync::Arc; 7 | use std::thread; 8 | use std::thread::sleep; 9 | use std::time::Duration; 10 | 11 | use alloc::alloc::GlobalAlloc; 12 | use alloc::alloc::Layout; 13 | use buddy_system_allocator::LockedHeap; 14 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 15 | use rand::{Rng, SeedableRng}; 16 | 17 | const SMALL_SIZE: usize = 8; 18 | const LARGE_SIZE: usize = 1024 * 1024; // 1M 19 | const ALIGN: usize = 8; 20 | 21 | /// Alloc small object 22 | #[inline] 23 | pub fn small_alloc(heap: &LockedHeap) { 24 | let layout = unsafe { Layout::from_size_align_unchecked(SMALL_SIZE, ALIGN) }; 25 | unsafe { 26 | let addr = heap.alloc(layout); 27 | heap.dealloc(addr, layout); 28 | } 29 | } 30 | 31 | /// Alloc large object 32 | #[inline] 33 | pub fn large_alloc(heap: &LockedHeap) { 34 | let layout = unsafe { Layout::from_size_align_unchecked(LARGE_SIZE, ALIGN) }; 35 | unsafe { 36 | let addr = heap.alloc(layout); 37 | heap.dealloc(addr, layout); 38 | } 39 | } 40 | 41 | /// Multithreads alloc random sizes of object 42 | #[inline] 43 | pub fn mutil_thread_random_size(heap: &'static LockedHeap) { 44 | const THREAD_SIZE: usize = 10; 45 | 46 | let mut threads = Vec::with_capacity(THREAD_SIZE); 47 | let alloc = Arc::new(heap); 48 | for i in 0..THREAD_SIZE { 49 | let prethread_alloc = alloc.clone(); 50 | let handle = thread::spawn(move || { 51 | // generate a random size of object use seed `i` to ensure the fixed 52 | // result of each turn 53 | let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(i as u64); 54 | // generate a random object size in range of [SMALL_SIZE ..= LARGE_SIZE] 55 | let layout = unsafe { 56 | Layout::from_size_align_unchecked(rng.gen_range(SMALL_SIZE..=LARGE_SIZE), ALIGN) 57 | }; 58 | let addr = unsafe { prethread_alloc.alloc(layout) }; 59 | 60 | // sleep for a while 61 | sleep(Duration::from_nanos((THREAD_SIZE - i) as u64)); 62 | 63 | unsafe { prethread_alloc.dealloc(addr, layout) } 64 | }); 65 | threads.push(handle); 66 | } 67 | drop(alloc); 68 | 69 | for t in threads { 70 | t.join().unwrap(); 71 | } 72 | } 73 | 74 | /// Multithread benchmark inspired by **Hoard** benchmark 75 | /// 76 | /// Warning: This benchmark generally needs long time to finish 77 | /// 78 | /// ---------------------------------------------------------------------- 79 | /// Hoard: A Fast, Scalable, and Memory-Efficient Allocator 80 | /// for Shared-Memory Multiprocessors 81 | /// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery 82 | // 83 | /// Copyright (c) 1998-2000, The University of Texas at Austin. 84 | /// 85 | /// This library is free software; you can redistribute it and/or modify 86 | /// it under the terms of the GNU Library General Public License as 87 | /// published by the Free Software Foundation, http://www.fsf.org. 88 | /// 89 | /// This library is distributed in the hope that it will be useful, but 90 | /// WITHOUT ANY WARRANTY; without even the implied warranty of 91 | /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 92 | /// Library General Public License for more details. 93 | /// ---------------------------------------------------------------------- 94 | /// 95 | #[inline] 96 | pub fn thread_test() { 97 | const N_ITERATIONS: usize = 50; 98 | const N_OBJECTS: usize = 30000; 99 | const N_THREADS: usize = 10; 100 | const OBJECT_SIZE: usize = 1; 101 | 102 | #[derive(Clone)] 103 | struct Foo { 104 | pub a: i32, 105 | pub b: i32, 106 | } 107 | 108 | let mut threads = Vec::with_capacity(N_THREADS); 109 | 110 | for _i in 0..N_THREADS { 111 | let handle = thread::spawn(move || { 112 | // let a = new Foo * [nobjects / nthreads]; 113 | let mut a = Vec::with_capacity(N_OBJECTS / N_THREADS); 114 | for j in 0..N_ITERATIONS { 115 | // inner object: 116 | // a[i] = new Foo[objSize]; 117 | for k in 0..(N_OBJECTS / N_THREADS) { 118 | a.push(vec![ 119 | Foo { 120 | a: k as i32, 121 | b: j as i32 122 | }; 123 | OBJECT_SIZE 124 | ]); 125 | 126 | // in order to prevent optimization delete allocation directly 127 | // FIXME: don't know whether it works or not 128 | a[k][0].a += a[k][0].b; 129 | } 130 | } 131 | // auto drop here 132 | }); 133 | threads.push(handle); 134 | } 135 | 136 | for t in threads { 137 | t.join().unwrap(); 138 | } 139 | } 140 | 141 | const ORDER: usize = 33; 142 | const MACHINE_ALIGN: usize = core::mem::size_of::(); 143 | /// for now 128M is needed 144 | /// TODO: reduce memory use 145 | const KERNEL_HEAP_SIZE: usize = 128 * 1024 * 1024; 146 | const HEAP_BLOCK: usize = KERNEL_HEAP_SIZE / MACHINE_ALIGN; 147 | static mut HEAP: [usize; HEAP_BLOCK] = [0; HEAP_BLOCK]; 148 | 149 | /// Use `LockedHeap` as global allocator 150 | #[global_allocator] 151 | static HEAP_ALLOCATOR: LockedHeap = LockedHeap::::new(); 152 | 153 | /// Init heap 154 | /// 155 | /// We need `ctor` here because benchmark is running behind the std enviroment, 156 | /// which means std will do some initialization before execute `fn main()`. 157 | /// However, our memory allocator must be init in runtime(use linkedlist, which 158 | /// can not be evaluated in compile time). And in the initialization phase, heap 159 | /// memory is needed. 160 | /// 161 | /// So the solution in this dilemma is to run `fn init_heap()` in initialization phase 162 | /// rather than in `fn main()`. We need `ctor` to do this. 163 | #[ctor] 164 | fn init_heap() { 165 | let heap_start = unsafe { HEAP.as_ptr() as usize }; 166 | unsafe { 167 | HEAP_ALLOCATOR 168 | .lock() 169 | .init(heap_start, HEAP_BLOCK * MACHINE_ALIGN); 170 | } 171 | } 172 | 173 | /// Entry of benchmarks 174 | pub fn criterion_benchmark(c: &mut Criterion) { 175 | // run benchmark 176 | c.bench_function("small alloc", |b| { 177 | b.iter(|| small_alloc(black_box(&HEAP_ALLOCATOR))) 178 | }); 179 | c.bench_function("large alloc", |b| { 180 | b.iter(|| large_alloc(black_box(&HEAP_ALLOCATOR))) 181 | }); 182 | c.bench_function("mutil thread random size", |b| { 183 | b.iter(|| mutil_thread_random_size(black_box(&HEAP_ALLOCATOR))) 184 | }); 185 | c.bench_function("threadtest", |b| b.iter(|| thread_test())); 186 | } 187 | 188 | criterion_group!(benches, criterion_benchmark); 189 | criterion_main!(benches); 190 | -------------------------------------------------------------------------------- /src/frame.rs: -------------------------------------------------------------------------------- 1 | use super::prev_power_of_two; 2 | use alloc::collections::BTreeSet; 3 | use core::alloc::Layout; 4 | use core::cmp::{max, min}; 5 | use core::ops::Range; 6 | 7 | #[cfg(feature = "use_spin")] 8 | use core::ops::Deref; 9 | #[cfg(feature = "use_spin")] 10 | use spin::Mutex; 11 | 12 | /// A frame allocator that uses buddy system, requiring a global allocator. 13 | /// 14 | /// The max order of the allocator is determined by the const generic parameter `ORDER` (`MAX_ORDER = ORDER - 1`). 15 | /// The frame allocator will only be able to allocate ranges of size up to 2MAX_ORDER, out of a total 16 | /// range of size at most 2MAX_ORDER + 1 - 1. 17 | /// 18 | /// # Usage 19 | /// 20 | /// Create a frame allocator and add some frames to it: 21 | /// ``` 22 | /// use buddy_system_allocator::*; 23 | /// // Notice that the max order is `ORDER - 1`. 24 | /// let mut frame = FrameAllocator::<33>::new(); 25 | /// assert!(frame.alloc(1).is_none()); 26 | /// 27 | /// frame.add_frame(0, 3); 28 | /// let num = frame.alloc(1); 29 | /// assert_eq!(num, Some(2)); 30 | /// let num = frame.alloc(2); 31 | /// assert_eq!(num, Some(0)); 32 | /// ``` 33 | pub struct FrameAllocator { 34 | // buddy system with max order of `ORDER - 1` 35 | free_list: [BTreeSet; ORDER], 36 | 37 | // statistics 38 | allocated: usize, 39 | total: usize, 40 | } 41 | 42 | impl FrameAllocator { 43 | /// Create an empty frame allocator 44 | pub const fn new() -> Self { 45 | Self { 46 | free_list: [const { BTreeSet::new() }; ORDER], 47 | allocated: 0, 48 | total: 0, 49 | } 50 | } 51 | 52 | /// Add a range of frame number [start, end) to the allocator 53 | pub fn add_frame(&mut self, start: usize, end: usize) { 54 | assert!(start <= end); 55 | 56 | let mut total = 0; 57 | let mut current_start = start; 58 | 59 | while current_start < end { 60 | let lowbit = if current_start > 0 { 61 | current_start & (!current_start + 1) 62 | } else { 63 | 32 64 | }; 65 | let size = min( 66 | min(lowbit, prev_power_of_two(end - current_start)), 67 | 1 << (ORDER - 1), 68 | ); 69 | total += size; 70 | 71 | self.free_list[size.trailing_zeros() as usize].insert(current_start); 72 | current_start += size; 73 | } 74 | 75 | self.total += total; 76 | } 77 | 78 | /// Add a range of frames to the allocator. 79 | pub fn insert(&mut self, range: Range) { 80 | self.add_frame(range.start, range.end); 81 | } 82 | 83 | /// Allocate a range of frames from the allocator, returning the first frame of the allocated 84 | /// range. 85 | pub fn alloc(&mut self, count: usize) -> Option { 86 | let size = count.next_power_of_two(); 87 | self.alloc_power_of_two(size) 88 | } 89 | 90 | /// Allocate a range of frames with the given size and alignment from the allocator, returning 91 | /// the first frame of the allocated range. 92 | /// The allocated size is the maximum of the next power of two of the given size and the 93 | /// alignment. 94 | pub fn alloc_aligned(&mut self, layout: Layout) -> Option { 95 | let size = max(layout.size().next_power_of_two(), layout.align()); 96 | self.alloc_power_of_two(size) 97 | } 98 | 99 | /// Allocate a range of frames of the given size from the allocator. The size must be a power of 100 | /// two. The allocated range will have alignment equal to the size. 101 | fn alloc_power_of_two(&mut self, size: usize) -> Option { 102 | let class = size.trailing_zeros() as usize; 103 | for i in class..self.free_list.len() { 104 | // Find the first non-empty size class 105 | if !self.free_list[i].is_empty() { 106 | // Split buffers 107 | for j in (class + 1..i + 1).rev() { 108 | if let Some(block_ref) = self.free_list[j].iter().next() { 109 | let block = *block_ref; 110 | self.free_list[j - 1].insert(block + (1 << (j - 1))); 111 | self.free_list[j - 1].insert(block); 112 | self.free_list[j].remove(&block); 113 | } else { 114 | return None; 115 | } 116 | } 117 | 118 | let result = self.free_list[class].iter().next(); 119 | if let Some(result_ref) = result { 120 | let result = *result_ref; 121 | self.free_list[class].remove(&result); 122 | self.allocated += size; 123 | return Some(result); 124 | } else { 125 | return None; 126 | } 127 | } 128 | } 129 | None 130 | } 131 | 132 | /// Deallocate a range of frames [frame, frame+count) from the frame allocator. 133 | /// 134 | /// The range should be exactly the same when it was allocated, as in heap allocator 135 | pub fn dealloc(&mut self, start_frame: usize, count: usize) { 136 | let size = count.next_power_of_two(); 137 | self.dealloc_power_of_two(start_frame, size) 138 | } 139 | 140 | /// Deallocate a range of frames which was previously allocated by [`alloc_aligned`]. 141 | /// 142 | /// The layout must be exactly the same as when it was allocated. 143 | pub fn dealloc_aligned(&mut self, start_frame: usize, layout: Layout) { 144 | let size = max(layout.size().next_power_of_two(), layout.align()); 145 | self.dealloc_power_of_two(start_frame, size) 146 | } 147 | 148 | /// Deallocate a range of frames with the given size from the allocator. The size must be a 149 | /// power of two. 150 | fn dealloc_power_of_two(&mut self, start_frame: usize, size: usize) { 151 | let class = size.trailing_zeros() as usize; 152 | 153 | // Merge free buddy lists 154 | let mut current_ptr = start_frame; 155 | let mut current_class = class; 156 | while current_class < self.free_list.len() { 157 | let buddy = current_ptr ^ (1 << current_class); 158 | if self.free_list[current_class].remove(&buddy) { 159 | // Free buddy found 160 | current_ptr = min(current_ptr, buddy); 161 | current_class += 1; 162 | } else { 163 | self.free_list[current_class].insert(current_ptr); 164 | break; 165 | } 166 | } 167 | 168 | self.allocated -= size; 169 | } 170 | } 171 | 172 | /// A locked version of `FrameAllocator` 173 | /// 174 | /// # Usage 175 | /// 176 | /// Create a locked frame allocator and add frames to it: 177 | /// ``` 178 | /// use buddy_system_allocator::*; 179 | /// // Notice that the max order is `ORDER - 1`. 180 | /// let mut frame = LockedFrameAllocator::<33>::new(); 181 | /// assert!(frame.lock().alloc(1).is_none()); 182 | /// 183 | /// frame.lock().add_frame(0, 3); 184 | /// let num = frame.lock().alloc(1); 185 | /// assert_eq!(num, Some(2)); 186 | /// let num = frame.lock().alloc(2); 187 | /// assert_eq!(num, Some(0)); 188 | /// ``` 189 | #[cfg(feature = "use_spin")] 190 | pub struct LockedFrameAllocator(Mutex>); 191 | 192 | #[cfg(feature = "use_spin")] 193 | impl LockedFrameAllocator { 194 | /// Creates an empty heap 195 | pub fn new() -> Self { 196 | Self(Mutex::new(FrameAllocator::new())) 197 | } 198 | } 199 | 200 | #[cfg(feature = "use_spin")] 201 | impl Deref for LockedFrameAllocator { 202 | type Target = Mutex>; 203 | 204 | fn deref(&self) -> &Mutex> { 205 | &self.0 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | #[cfg(test)] 4 | #[macro_use] 5 | extern crate std; 6 | 7 | #[cfg(feature = "use_spin")] 8 | extern crate spin; 9 | 10 | #[cfg(feature = "alloc")] 11 | extern crate alloc; 12 | 13 | #[cfg(feature = "use_spin")] 14 | use core::alloc::GlobalAlloc; 15 | use core::alloc::Layout; 16 | use core::cmp::{max, min}; 17 | use core::fmt; 18 | use core::mem::size_of; 19 | #[cfg(feature = "use_spin")] 20 | use core::ops::Deref; 21 | use core::ptr::NonNull; 22 | #[cfg(feature = "use_spin")] 23 | use spin::Mutex; 24 | 25 | #[cfg(feature = "alloc")] 26 | mod frame; 27 | pub mod linked_list; 28 | #[cfg(test)] 29 | mod test; 30 | 31 | #[cfg(feature = "alloc")] 32 | pub use frame::*; 33 | 34 | /// A heap that uses buddy system with configurable order. 35 | /// 36 | /// # Usage 37 | /// 38 | /// Create a heap and add a memory region to it: 39 | /// ``` 40 | /// use buddy_system_allocator::*; 41 | /// # use core::mem::size_of; 42 | /// // The max order of the buddy system is `ORDER - 1`. 43 | /// // For example, to create a heap with a maximum block size of 2^32 bytes, 44 | /// // you should define the heap with `ORDER = 33`. 45 | /// let mut heap = Heap::<33>::empty(); 46 | /// # let space: [usize; 100] = [0; 100]; 47 | /// # let begin: usize = space.as_ptr() as usize; 48 | /// # let end: usize = begin + 100 * size_of::(); 49 | /// # let size: usize = 100 * size_of::(); 50 | /// unsafe { 51 | /// heap.init(begin, size); 52 | /// // or 53 | /// heap.add_to_heap(begin, end); 54 | /// } 55 | /// ``` 56 | pub struct Heap { 57 | // buddy system with max order of `ORDER - 1` 58 | free_list: [linked_list::LinkedList; ORDER], 59 | 60 | // statistics 61 | user: usize, 62 | allocated: usize, 63 | total: usize, 64 | } 65 | 66 | impl Heap { 67 | /// Create an empty heap 68 | pub const fn new() -> Self { 69 | Heap { 70 | free_list: [linked_list::LinkedList::new(); ORDER], 71 | user: 0, 72 | allocated: 0, 73 | total: 0, 74 | } 75 | } 76 | 77 | /// Create an empty heap 78 | pub const fn empty() -> Self { 79 | Self::new() 80 | } 81 | 82 | /// Add a range of memory [start, end) to the heap 83 | pub unsafe fn add_to_heap(&mut self, mut start: usize, mut end: usize) { 84 | // avoid unaligned access on some platforms 85 | start = (start + size_of::() - 1) & (!size_of::() + 1); 86 | end &= !size_of::() + 1; 87 | assert!(start <= end); 88 | 89 | let mut total = 0; 90 | let mut current_start = start; 91 | 92 | while current_start + size_of::() <= end { 93 | let lowbit = current_start & (!current_start + 1); 94 | let mut size = min(lowbit, prev_power_of_two(end - current_start)); 95 | 96 | // If the order of size is larger than the max order, 97 | // split it into smaller blocks. 98 | let mut order = size.trailing_zeros() as usize; 99 | if order > ORDER - 1 { 100 | order = ORDER - 1; 101 | size = 1 << order; 102 | } 103 | total += size; 104 | 105 | self.free_list[order].push(current_start as *mut usize); 106 | current_start += size; 107 | } 108 | 109 | self.total += total; 110 | } 111 | 112 | /// Add a range of memory [start, start+size) to the heap 113 | pub unsafe fn init(&mut self, start: usize, size: usize) { 114 | self.add_to_heap(start, start + size); 115 | } 116 | 117 | /// Alloc a range of memory from the heap satifying `layout` requirements 118 | pub fn alloc(&mut self, layout: Layout) -> Result, ()> { 119 | let size = max( 120 | layout.size().next_power_of_two(), 121 | max(layout.align(), size_of::()), 122 | ); 123 | let class = size.trailing_zeros() as usize; 124 | for i in class..self.free_list.len() { 125 | // Find the first non-empty size class 126 | if !self.free_list[i].is_empty() { 127 | // Split buffers 128 | for j in (class + 1..i + 1).rev() { 129 | if let Some(block) = self.free_list[j].pop() { 130 | unsafe { 131 | self.free_list[j - 1] 132 | .push((block as usize + (1 << (j - 1))) as *mut usize); 133 | self.free_list[j - 1].push(block); 134 | } 135 | } else { 136 | return Err(()); 137 | } 138 | } 139 | 140 | let result = NonNull::new( 141 | self.free_list[class] 142 | .pop() 143 | .expect("current block should have free space now") 144 | as *mut u8, 145 | ); 146 | if let Some(result) = result { 147 | self.user += layout.size(); 148 | self.allocated += size; 149 | return Ok(result); 150 | } else { 151 | return Err(()); 152 | } 153 | } 154 | } 155 | Err(()) 156 | } 157 | 158 | /// Dealloc a range of memory from the heap 159 | pub fn dealloc(&mut self, ptr: NonNull, layout: Layout) { 160 | let size = max( 161 | layout.size().next_power_of_two(), 162 | max(layout.align(), size_of::()), 163 | ); 164 | let class = size.trailing_zeros() as usize; 165 | 166 | unsafe { 167 | // Put back into free list 168 | self.free_list[class].push(ptr.as_ptr() as *mut usize); 169 | 170 | // Merge free buddy lists 171 | let mut current_ptr = ptr.as_ptr() as usize; 172 | let mut current_class = class; 173 | 174 | while current_class < self.free_list.len() - 1 { 175 | let buddy = current_ptr ^ (1 << current_class); 176 | let mut flag = false; 177 | for block in self.free_list[current_class].iter_mut() { 178 | if block.value() as usize == buddy { 179 | block.pop(); 180 | flag = true; 181 | break; 182 | } 183 | } 184 | 185 | // Free buddy found 186 | if flag { 187 | self.free_list[current_class].pop(); 188 | current_ptr = min(current_ptr, buddy); 189 | current_class += 1; 190 | self.free_list[current_class].push(current_ptr as *mut usize); 191 | } else { 192 | break; 193 | } 194 | } 195 | } 196 | 197 | self.user -= layout.size(); 198 | self.allocated -= size; 199 | } 200 | 201 | /// Return the number of bytes that user requests 202 | pub fn stats_alloc_user(&self) -> usize { 203 | self.user 204 | } 205 | 206 | /// Return the number of bytes that are actually allocated 207 | pub fn stats_alloc_actual(&self) -> usize { 208 | self.allocated 209 | } 210 | 211 | /// Return the total number of bytes in the heap 212 | pub fn stats_total_bytes(&self) -> usize { 213 | self.total 214 | } 215 | } 216 | 217 | impl fmt::Debug for Heap { 218 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 219 | fmt.debug_struct("Heap") 220 | .field("user", &self.user) 221 | .field("allocated", &self.allocated) 222 | .field("total", &self.total) 223 | .finish() 224 | } 225 | } 226 | 227 | /// A locked version of `Heap` 228 | /// 229 | /// # Usage 230 | /// 231 | /// Create a locked heap and add a memory region to it: 232 | /// ``` 233 | /// use buddy_system_allocator::*; 234 | /// # use core::mem::size_of; 235 | /// // The max order of the buddy system is `ORDER - 1`. 236 | /// // For example, to create a heap with a maximum block size of 2^32 bytes, 237 | /// // you should define the heap with `ORDER = 33`. 238 | /// let mut heap = LockedHeap::<33>::new(); 239 | /// # let space: [usize; 100] = [0; 100]; 240 | /// # let begin: usize = space.as_ptr() as usize; 241 | /// # let end: usize = begin + 100 * size_of::(); 242 | /// # let size: usize = 100 * size_of::(); 243 | /// unsafe { 244 | /// heap.lock().init(begin, size); 245 | /// // or 246 | /// heap.lock().add_to_heap(begin, end); 247 | /// } 248 | /// ``` 249 | #[cfg(feature = "use_spin")] 250 | pub struct LockedHeap(Mutex>); 251 | 252 | #[cfg(feature = "use_spin")] 253 | impl LockedHeap { 254 | /// Creates an empty heap 255 | pub const fn new() -> Self { 256 | LockedHeap(Mutex::new(Heap::::new())) 257 | } 258 | 259 | /// Creates an empty heap 260 | pub const fn empty() -> Self { 261 | LockedHeap(Mutex::new(Heap::::new())) 262 | } 263 | } 264 | 265 | #[cfg(feature = "use_spin")] 266 | impl Deref for LockedHeap { 267 | type Target = Mutex>; 268 | 269 | fn deref(&self) -> &Self::Target { 270 | &self.0 271 | } 272 | } 273 | 274 | #[cfg(feature = "use_spin")] 275 | unsafe impl GlobalAlloc for LockedHeap { 276 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 277 | self.0 278 | .lock() 279 | .alloc(layout) 280 | .ok() 281 | .map_or(core::ptr::null_mut(), |allocation| allocation.as_ptr()) 282 | } 283 | 284 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 285 | self.0.lock().dealloc(NonNull::new_unchecked(ptr), layout) 286 | } 287 | } 288 | 289 | /// A locked version of `Heap` with rescue before oom 290 | /// 291 | /// # Usage 292 | /// 293 | /// Create a locked heap: 294 | /// ``` 295 | /// use buddy_system_allocator::*; 296 | /// let heap = LockedHeapWithRescue::new(|heap: &mut Heap<33>, layout: &core::alloc::Layout| {}); 297 | /// ``` 298 | /// 299 | /// Before oom, the allocator will try to call rescue function and try for one more time. 300 | #[cfg(feature = "use_spin")] 301 | pub struct LockedHeapWithRescue { 302 | inner: Mutex>, 303 | rescue: fn(&mut Heap, &Layout), 304 | } 305 | 306 | #[cfg(feature = "use_spin")] 307 | impl LockedHeapWithRescue { 308 | /// Creates an empty heap 309 | pub const fn new(rescue: fn(&mut Heap, &Layout)) -> Self { 310 | LockedHeapWithRescue { 311 | inner: Mutex::new(Heap::::new()), 312 | rescue, 313 | } 314 | } 315 | } 316 | 317 | #[cfg(feature = "use_spin")] 318 | impl Deref for LockedHeapWithRescue { 319 | type Target = Mutex>; 320 | 321 | fn deref(&self) -> &Self::Target { 322 | &self.inner 323 | } 324 | } 325 | 326 | #[cfg(feature = "use_spin")] 327 | unsafe impl GlobalAlloc for LockedHeapWithRescue { 328 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 329 | let mut inner = self.inner.lock(); 330 | match inner.alloc(layout) { 331 | Ok(allocation) => allocation.as_ptr(), 332 | Err(_) => { 333 | (self.rescue)(&mut inner, &layout); 334 | inner 335 | .alloc(layout) 336 | .ok() 337 | .map_or(core::ptr::null_mut(), |allocation| allocation.as_ptr()) 338 | } 339 | } 340 | } 341 | 342 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 343 | self.inner 344 | .lock() 345 | .dealloc(NonNull::new_unchecked(ptr), layout) 346 | } 347 | } 348 | 349 | pub(crate) fn prev_power_of_two(num: usize) -> usize { 350 | 1 << (usize::BITS as usize - num.leading_zeros() as usize - 1) 351 | } 352 | -------------------------------------------------------------------------------- /src/linked_list.rs: -------------------------------------------------------------------------------- 1 | //! Provide the intrusive LinkedList 2 | 3 | use core::marker::PhantomData; 4 | use core::{fmt, ptr}; 5 | 6 | /// An intrusive linked list 7 | /// 8 | /// A clean room implementation of the one used in CS140e 2018 Winter 9 | /// 10 | /// Thanks Sergio Benitez for his excellent work, 11 | /// See [CS140e](https://cs140e.sergio.bz/) for more information 12 | #[derive(Copy, Clone)] 13 | pub struct LinkedList { 14 | head: *mut usize, 15 | } 16 | 17 | unsafe impl Send for LinkedList {} 18 | 19 | impl LinkedList { 20 | /// Create a new LinkedList 21 | pub const fn new() -> LinkedList { 22 | LinkedList { 23 | head: ptr::null_mut(), 24 | } 25 | } 26 | 27 | /// Return `true` if the list is empty 28 | pub fn is_empty(&self) -> bool { 29 | self.head.is_null() 30 | } 31 | 32 | /// Push `item` to the front of the list 33 | pub unsafe fn push(&mut self, item: *mut usize) { 34 | *item = self.head as usize; 35 | self.head = item; 36 | } 37 | 38 | /// Try to remove the first item in the list 39 | pub fn pop(&mut self) -> Option<*mut usize> { 40 | match self.is_empty() { 41 | true => None, 42 | false => { 43 | // Advance head pointer 44 | let item = self.head; 45 | self.head = unsafe { *item as *mut usize }; 46 | Some(item) 47 | } 48 | } 49 | } 50 | 51 | /// Return an iterator over the items in the list 52 | pub fn iter(&self) -> Iter { 53 | Iter { 54 | curr: self.head, 55 | list: PhantomData, 56 | } 57 | } 58 | 59 | /// Return an mutable iterator over the items in the list 60 | pub fn iter_mut(&mut self) -> IterMut { 61 | IterMut { 62 | prev: &mut self.head as *mut *mut usize as *mut usize, 63 | curr: self.head, 64 | list: PhantomData, 65 | } 66 | } 67 | } 68 | 69 | impl fmt::Debug for LinkedList { 70 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 71 | f.debug_list().entries(self.iter()).finish() 72 | } 73 | } 74 | 75 | /// An iterator over the linked list 76 | pub struct Iter<'a> { 77 | curr: *mut usize, 78 | list: PhantomData<&'a LinkedList>, 79 | } 80 | 81 | impl<'a> Iterator for Iter<'a> { 82 | type Item = *mut usize; 83 | 84 | fn next(&mut self) -> Option { 85 | if self.curr.is_null() { 86 | None 87 | } else { 88 | let item = self.curr; 89 | let next = unsafe { *item as *mut usize }; 90 | self.curr = next; 91 | Some(item) 92 | } 93 | } 94 | } 95 | 96 | /// Represent a mutable node in `LinkedList` 97 | pub struct ListNode { 98 | prev: *mut usize, 99 | curr: *mut usize, 100 | } 101 | 102 | impl ListNode { 103 | /// Remove the node from the list 104 | pub fn pop(self) -> *mut usize { 105 | // Skip the current one 106 | unsafe { 107 | *(self.prev) = *(self.curr); 108 | } 109 | self.curr 110 | } 111 | 112 | /// Returns the pointed address 113 | pub fn value(&self) -> *mut usize { 114 | self.curr 115 | } 116 | } 117 | 118 | /// A mutable iterator over the linked list 119 | pub struct IterMut<'a> { 120 | list: PhantomData<&'a mut LinkedList>, 121 | prev: *mut usize, 122 | curr: *mut usize, 123 | } 124 | 125 | impl<'a> Iterator for IterMut<'a> { 126 | type Item = ListNode; 127 | 128 | fn next(&mut self) -> Option { 129 | if self.curr.is_null() { 130 | None 131 | } else { 132 | let res = ListNode { 133 | prev: self.prev, 134 | curr: self.curr, 135 | }; 136 | self.prev = self.curr; 137 | self.curr = unsafe { *self.curr as *mut usize }; 138 | Some(res) 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/test.rs: -------------------------------------------------------------------------------- 1 | use crate::linked_list; 2 | use crate::FrameAllocator; 3 | use crate::Heap; 4 | use crate::LockedHeapWithRescue; 5 | use core::alloc::GlobalAlloc; 6 | use core::alloc::Layout; 7 | use core::mem::size_of; 8 | 9 | #[test] 10 | fn test_linked_list() { 11 | let mut value1: usize = 0; 12 | let mut value2: usize = 0; 13 | let mut value3: usize = 0; 14 | let mut list = linked_list::LinkedList::new(); 15 | unsafe { 16 | list.push(&mut value1 as *mut usize); 17 | list.push(&mut value2 as *mut usize); 18 | list.push(&mut value3 as *mut usize); 19 | } 20 | 21 | // Test links 22 | assert_eq!(value3, &value2 as *const usize as usize); 23 | assert_eq!(value2, &value1 as *const usize as usize); 24 | assert_eq!(value1, 0); 25 | 26 | // Test iter 27 | let mut iter = list.iter(); 28 | assert_eq!(iter.next(), Some(&mut value3 as *mut usize)); 29 | assert_eq!(iter.next(), Some(&mut value2 as *mut usize)); 30 | assert_eq!(iter.next(), Some(&mut value1 as *mut usize)); 31 | assert_eq!(iter.next(), None); 32 | 33 | // Test iter_mut 34 | 35 | let mut iter_mut = list.iter_mut(); 36 | assert_eq!(iter_mut.next().unwrap().pop(), &mut value3 as *mut usize); 37 | 38 | // Test pop 39 | assert_eq!(list.pop(), Some(&mut value2 as *mut usize)); 40 | assert_eq!(list.pop(), Some(&mut value1 as *mut usize)); 41 | assert_eq!(list.pop(), None); 42 | } 43 | 44 | #[test] 45 | fn test_empty_heap() { 46 | let mut heap = Heap::<32>::new(); 47 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err()); 48 | } 49 | 50 | #[test] 51 | fn test_heap_add() { 52 | let mut heap = Heap::<32>::new(); 53 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err()); 54 | 55 | let space: [usize; 100] = [0; 100]; 56 | unsafe { 57 | heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize); 58 | } 59 | let addr = heap.alloc(Layout::from_size_align(1, 1).unwrap()); 60 | assert!(addr.is_ok()); 61 | } 62 | 63 | #[test] 64 | fn test_heap_add_large() { 65 | // Max size of block is 2^7 == 128 bytes 66 | let mut heap = Heap::<8>::new(); 67 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err()); 68 | 69 | // 512 bytes of space 70 | let space: [u8; 512] = [0; 512]; 71 | unsafe { 72 | heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(512) as usize); 73 | } 74 | let addr = heap.alloc(Layout::from_size_align(1, 1).unwrap()); 75 | assert!(addr.is_ok()); 76 | } 77 | 78 | #[test] 79 | fn test_heap_oom() { 80 | let mut heap = Heap::<32>::new(); 81 | let space: [usize; 100] = [0; 100]; 82 | unsafe { 83 | heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize); 84 | } 85 | 86 | assert!(heap 87 | .alloc(Layout::from_size_align(100 * size_of::(), 1).unwrap()) 88 | .is_err()); 89 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_ok()); 90 | } 91 | 92 | #[test] 93 | fn test_heap_oom_rescue() { 94 | static mut SPACE: [usize; 100] = [0; 100]; 95 | let heap = LockedHeapWithRescue::new(|heap: &mut Heap<32>, _layout: &Layout| unsafe { 96 | heap.add_to_heap(SPACE.as_ptr() as usize, SPACE.as_ptr().add(100) as usize); 97 | }); 98 | 99 | unsafe { 100 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()) as usize != 0); 101 | } 102 | } 103 | 104 | #[test] 105 | fn test_heap_alloc_and_free() { 106 | let mut heap = Heap::<32>::new(); 107 | assert!(heap.alloc(Layout::from_size_align(1, 1).unwrap()).is_err()); 108 | 109 | let space: [usize; 100] = [0; 100]; 110 | unsafe { 111 | heap.add_to_heap(space.as_ptr() as usize, space.as_ptr().add(100) as usize); 112 | } 113 | for _ in 0..100 { 114 | let addr = heap.alloc(Layout::from_size_align(1, 1).unwrap()).unwrap(); 115 | heap.dealloc(addr, Layout::from_size_align(1, 1).unwrap()); 116 | } 117 | } 118 | 119 | #[test] 120 | fn test_empty_frame_allocator() { 121 | let mut frame = FrameAllocator::<32>::new(); 122 | assert!(frame.alloc(1).is_none()); 123 | } 124 | 125 | #[test] 126 | fn test_frame_allocator_add() { 127 | let mut frame = FrameAllocator::<32>::new(); 128 | assert!(frame.alloc(1).is_none()); 129 | 130 | frame.insert(0..3); 131 | let num = frame.alloc(1); 132 | assert_eq!(num, Some(2)); 133 | let num = frame.alloc(2); 134 | assert_eq!(num, Some(0)); 135 | assert!(frame.alloc(1).is_none()); 136 | assert!(frame.alloc(2).is_none()); 137 | } 138 | 139 | #[test] 140 | fn test_frame_allocator_allocate_large() { 141 | let mut frame = FrameAllocator::<32>::new(); 142 | assert_eq!(frame.alloc(10_000_000_000), None); 143 | } 144 | 145 | #[test] 146 | fn test_frame_allocator_add_large_size_split() { 147 | let mut frame = FrameAllocator::<32>::new(); 148 | 149 | frame.insert(0..10_000_000_000); 150 | 151 | assert_eq!(frame.alloc(0x8000_0001), None); 152 | assert_eq!(frame.alloc(0x8000_0000), Some(0x8000_0000)); 153 | assert_eq!(frame.alloc(0x8000_0000), Some(0x1_0000_0000)); 154 | } 155 | 156 | #[test] 157 | fn test_frame_allocator_add_large_size() { 158 | let mut frame = FrameAllocator::<33>::new(); 159 | 160 | frame.insert(0..10_000_000_000); 161 | assert_eq!(frame.alloc(0x8000_0001), Some(0x1_0000_0000)); 162 | } 163 | 164 | #[test] 165 | fn test_frame_allocator_alloc_and_free() { 166 | let mut frame = FrameAllocator::<32>::new(); 167 | assert!(frame.alloc(1).is_none()); 168 | 169 | frame.add_frame(0, 1024); 170 | for _ in 0..100 { 171 | let addr = frame.alloc(512).unwrap(); 172 | frame.dealloc(addr, 512); 173 | } 174 | } 175 | 176 | #[test] 177 | fn test_frame_allocator_alloc_and_free_complex() { 178 | let mut frame = FrameAllocator::<32>::new(); 179 | frame.add_frame(100, 1024); 180 | for _ in 0..10 { 181 | let addr = frame.alloc(1).unwrap(); 182 | frame.dealloc(addr, 1); 183 | } 184 | let addr1 = frame.alloc(1).unwrap(); 185 | let addr2 = frame.alloc(1).unwrap(); 186 | assert_ne!(addr1, addr2); 187 | } 188 | 189 | #[test] 190 | fn test_frame_allocator_aligned() { 191 | let mut frame = FrameAllocator::<32>::new(); 192 | frame.add_frame(1, 64); 193 | assert_eq!( 194 | frame.alloc_aligned(Layout::from_size_align(2, 4).unwrap()), 195 | Some(4) 196 | ); 197 | assert_eq!( 198 | frame.alloc_aligned(Layout::from_size_align(2, 2).unwrap()), 199 | Some(2) 200 | ); 201 | assert_eq!( 202 | frame.alloc_aligned(Layout::from_size_align(2, 1).unwrap()), 203 | Some(8) 204 | ); 205 | assert_eq!( 206 | frame.alloc_aligned(Layout::from_size_align(1, 16).unwrap()), 207 | Some(16) 208 | ); 209 | } 210 | 211 | #[test] 212 | fn test_heap_merge_final_order() { 213 | const NUM_ORDERS: usize = 5; 214 | 215 | let backing_size = 1 << NUM_ORDERS; 216 | let backing_layout = Layout::from_size_align(backing_size, backing_size).unwrap(); 217 | 218 | // create a new heap with 5 orders 219 | let mut heap = Heap::::new(); 220 | 221 | // allocate host memory for use by heap 222 | let backing_allocation = unsafe { std::alloc::alloc(backing_layout) }; 223 | 224 | let start = backing_allocation as usize; 225 | let middle = unsafe { backing_allocation.add(backing_size / 2) } as usize; 226 | let end = unsafe { backing_allocation.add(backing_size) } as usize; 227 | 228 | // add two contiguous ranges of memory 229 | unsafe { heap.add_to_heap(start, middle) }; 230 | unsafe { heap.add_to_heap(middle, end) }; 231 | 232 | // NUM_ORDERS - 1 is the maximum order of the heap 233 | let layout = Layout::from_size_align(1 << (NUM_ORDERS - 1), 1).unwrap(); 234 | 235 | // allocation should succeed, using one of the added ranges 236 | let alloc = heap.alloc(layout).unwrap(); 237 | 238 | // deallocation should not attempt to merge the two contiguous ranges as the next order does not exist 239 | heap.dealloc(alloc, layout); 240 | } 241 | --------------------------------------------------------------------------------