├── .editorconfig ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── chained.rs ├── fast_vectors.rs ├── local_alloc.rs ├── local_string.rs └── threaded.rs ├── rust-toolchain ├── rustfmt.toml └── src ├── align.rs ├── alloc.rs ├── chain.rs ├── lib.rs ├── syncstalloc.rs ├── tests.rs └── unsafestalloc.rs /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = tab 5 | tab_width = 4 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 4 4 | 5 | [[package]] 6 | name = "allocator-api2" 7 | version = "0.3.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "78200ac3468a57d333cd0ea5dd398e25111194dcacd49208afca95c629a6311d" 10 | 11 | [[package]] 12 | name = "stalloc" 13 | version = "0.6.1" 14 | dependencies = [ 15 | "allocator-api2", 16 | ] 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stalloc" 3 | version = "0.6.1" 4 | edition = "2024" 5 | repository = "https://github.com/abgros/stalloc" 6 | description = "Stalloc is a fast first-fit memory allocator that you can use to quickly speed up your Rust programs." 7 | license = "MIT" 8 | 9 | [dependencies] 10 | allocator-api2 = { version = "0.3", optional = true, default-features = false } 11 | 12 | [features] 13 | default = ["std"] 14 | allocator-api = [] 15 | allocator-api2 = ["dep:allocator-api2"] 16 | std = [] 17 | 18 | [[example]] 19 | name = "fast_vectors" 20 | 21 | [[example]] 22 | name = "local_alloc" 23 | required-features = ["allocator-api"] 24 | 25 | [[example]] 26 | name = "local_string" 27 | required-features = ["allocator-api"] 28 | 29 | [[example]] 30 | name = "threaded" 31 | required-features = ["allocator-api", "std"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Arbel Groshaus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Stalloc (Stack + alloc) is a fast first-fit memory allocator. From my benchmarking, it can be over 3x as fast as the default OS allocator! This is because all memory is allocated from the stack, which allows it to avoid all OS overhead. Since it doesn't rely on the OS (aside from `SyncStalloc`), this library is `no_std` compatible. 2 | 3 | Note that Stalloc uses a fixed amount of memory. If it ever runs out, it could result in your program crashing immediately. Stalloc is especially good for programs that make lots of small allocations. 4 | 5 | Stalloc is extremely memory-efficient. Within a 32-byte "heap", you can allocate eight `Box`s, free them, then allocate four `Box`s, free them, and then allocate two `Box`s. This can be especially useful if you're working in a very memory-constrained environment and you need a static upper limit on your application's memory usage. 6 | 7 | There are three main ways to use this library: 8 | 9 | ## With the allocator API (requires the `allocator-api` feature to be enabled in your Cargo.toml) 10 | ```rs 11 | #![feature(allocator_api)] 12 | 13 | let alloc = Stalloc::<200, 4>::new(); // 200 blocks, 4 bytes each 14 | let mut v = Vec::new_in(&alloc); 15 | v.push(25); 16 | 17 | // Since the allocator is about to get dropped anyway, no need to call the destructor of `v`. 18 | mem::forget(v); 19 | // `alloc` gets dropped at the end of the scope 20 | ``` 21 | 22 | ## With the unsafe APIs 23 | ```rs 24 | let alloc = Stalloc::<80, 8>::new(); 25 | 26 | let alignment = 1; // measured in block size, so 8 bytes 27 | let ptr = unsafe { alloc.allocate_blocks(80, alignment) }.unwrap(); 28 | assert!(alloc.is_oom()); 29 | // do stuff with your new allocation 30 | 31 | // later... 32 | unsafe { 33 | alloc.deallocate_blocks(ptr, 80); 34 | } 35 | ``` 36 | 37 | ## As a global allocator 38 | ```rs 39 | #[global_allocator] 40 | static GLOBAL: SyncStalloc<1000, 4> = SyncStalloc::new(); 41 | 42 | fn main() { 43 | // allocations and stuff 44 | let v = vec![1, 2, 3, 4, 5]; 45 | 46 | // we can check on the allocator state 47 | println!("{GLOBAL:?}"); 48 | } 49 | ``` 50 | 51 | If your program is single-threaded, you can avoid a little bit of overhead by using `UnsafeStalloc`, which isn't thread-safe. 52 | ```rs 53 | #[global_allocator] 54 | static GLOBAL: UnsafeStalloc<1000, 4> = unsafe { UnsafeStalloc::new() }; 55 | ``` 56 | 57 | To avoid the risk of OOM, you can create an allocator chain, which uses the next one as a fallback if something has gone wrong: 58 | ```rs 59 | // Create an allocator chain, where we try to use the fast `SyncStalloc`, but fall back to `System`. 60 | #[global_allocator] 61 | static GLOBAL: AllocChain, System> = SyncStalloc::new().chain(&System); 62 | ``` 63 | 64 | When you create a Stallocator, you configure it with two numbers: `L` is the number of blocks, and `B` is the size of each block in bytes. The total size of this type comes out to `L * B + 4` bytes, of which `L * B` can be used (4 bytes are needed to hold some metadata). The buffer is automatically aligned to `B`. If you want it to be more aligned than that, you can create a wrapper like this: 65 | 66 | ```rs 67 | #[repr(align(16))] // aligned to 16 bytes 68 | struct MoreAlignedStalloc(Stalloc<8, 4>); // eight blocks of four bytes each 69 | ``` 70 | 71 | To use this library with no-std, add the following to your `Cargo.toml`: 72 | ``` 73 | [dependencies] 74 | stalloc = {version = , default-features = false} 75 | ``` 76 | To use this library with the allocator API, add the following instead: 77 | ``` 78 | [dependencies] 79 | stalloc = {version = , features = ["allocator-api"]} 80 | ``` 81 | Or, if you prefer using the [allocator-api2](https://crates.io/crates/allocator-api2) crate with the stable compiler: 82 | ``` 83 | [dependencies] 84 | stalloc = {version = , features = ["allocator-api2"]} 85 | ``` -------------------------------------------------------------------------------- /examples/chained.rs: -------------------------------------------------------------------------------- 1 | use stalloc::{AllocChain, SyncStalloc}; 2 | 3 | use std::{alloc::System, hint::black_box, time::Instant}; 4 | 5 | // Create a global allocator with 1024 blocks of stack memory, 6 | // but fall back to the system allocator if we ever OOM. 7 | // Note: changing this to `UnsafeStalloc` almost doubles speed... 8 | #[global_allocator] 9 | static GLOBAL: AllocChain, System> = SyncStalloc::new().chain(&System); 10 | 11 | fn main() { 12 | let start = Instant::now(); 13 | 14 | let mut big_strings = vec![]; 15 | 16 | // Now create lots of small strings 17 | for i in 0..100_000_000 { 18 | black_box(String::from("hello!")); 19 | 20 | // Every once in a while, create and store a really big string 21 | if i % 10000 == 0 { 22 | big_strings.push("x".repeat(100_000)); 23 | } 24 | } 25 | 26 | for s in big_strings { 27 | black_box(s); 28 | } 29 | 30 | println!("Elapsed: {}ms", start.elapsed().as_millis()); 31 | } 32 | -------------------------------------------------------------------------------- /examples/fast_vectors.rs: -------------------------------------------------------------------------------- 1 | use stalloc::UnsafeStalloc; 2 | use std::{mem, time::Instant}; 3 | 4 | // Create a global allocator with 1000 blocks, each 4 bytes in length. 5 | // SAFETY: The program is single-threaded. 6 | #[global_allocator] 7 | static GLOBAL: UnsafeStalloc<1000, 4> = unsafe { UnsafeStalloc::new() }; 8 | 9 | fn main() { 10 | let start = Instant::now(); 11 | for _ in 0..10_000_000 { 12 | let mut a = vec![]; 13 | let mut b = vec![]; 14 | for i in 0..10 { 15 | a.push(i); 16 | b.push(i); 17 | } 18 | 19 | mem::forget(a); 20 | mem::forget(b); 21 | 22 | // By clearing the global allocator, we can quickly drop both vectors together. 23 | // SAFETY: There are no more active allocations into `GLOBAL`. 24 | unsafe { 25 | GLOBAL.clear(); 26 | } 27 | } 28 | 29 | println!("Elapsed: {}ms", start.elapsed().as_millis()); 30 | } 31 | -------------------------------------------------------------------------------- /examples/local_alloc.rs: -------------------------------------------------------------------------------- 1 | #![feature(allocator_api)] 2 | use stalloc::Stalloc; 3 | use std::{mem, time::Instant}; 4 | 5 | fn main() { 6 | let start = Instant::now(); 7 | for _ in 0..10_000_000 { 8 | let alloc = Stalloc::<200, 4>::new(); 9 | 10 | let mut a = Vec::new_in(&alloc); 11 | let mut b = Vec::new_in(&alloc); 12 | for i in 0..10 { 13 | a.push(i); 14 | b.push(i); 15 | } 16 | 17 | // Since the allocator is about to get dropped anyway, no need to drop the individual vectors. 18 | mem::forget(a); 19 | mem::forget(b); 20 | } 21 | 22 | println!("Elapsed: {}ms", start.elapsed().as_millis()); 23 | } 24 | -------------------------------------------------------------------------------- /examples/local_string.rs: -------------------------------------------------------------------------------- 1 | use stalloc::Stalloc; 2 | use std::{hint::black_box, mem, ptr::NonNull, time::Instant}; 3 | 4 | fn main() { 5 | let start = Instant::now(); 6 | 7 | const BLOCK_SIZE: usize = 4; 8 | let s = Stalloc::<200, BLOCK_SIZE>::new(); 9 | 10 | for _ in 0..100_000_000 { 11 | // SAFETY: `msg` will never try to deallocate or reallocate. 12 | let mut msg = unsafe { 13 | String::from_raw_parts( 14 | s.allocate_blocks(50, 1).unwrap().as_ptr(), 15 | 0, 16 | 50 * BLOCK_SIZE, 17 | ) 18 | }; 19 | msg.push_str("Hello, "); 20 | msg.push_str("world!"); 21 | msg = black_box(msg); 22 | 23 | unsafe { 24 | s.deallocate_blocks(NonNull::new_unchecked(msg.as_mut_ptr()), 50); 25 | } 26 | 27 | // If we let `msg` drop itself, it will call `dealloc()` on the global allocator (not `s`), 28 | // resulting in undefined behaviour. 29 | mem::forget(msg); 30 | } 31 | 32 | println!("Elapsed: {}ms", start.elapsed().as_millis()); 33 | } 34 | -------------------------------------------------------------------------------- /examples/threaded.rs: -------------------------------------------------------------------------------- 1 | #![feature(allocator_api)] 2 | 3 | use std::{hint::black_box, thread, time::Instant}; 4 | 5 | use stalloc::SyncStalloc; 6 | 7 | const THREAD_COUNT: usize = 6; 8 | 9 | fn main() { 10 | let start = Instant::now(); 11 | 12 | for _ in 0..5000 { 13 | let alloc = SyncStalloc::::new(); 14 | 15 | thread::scope(|s| { 16 | for _ in 0..THREAD_COUNT { 17 | s.spawn(|| { 18 | let mut total = 0; 19 | for i in 0..1000 { 20 | // Reuse the same lock for creating and dropping the Box 21 | let lock = alloc.acquire_locked(); 22 | total += *black_box(Box::new_in(i, &*lock)); 23 | } 24 | assert_eq!(total, 499500); // ensure no data races have occurred 25 | }); 26 | } 27 | }); 28 | } 29 | 30 | println!("Elapsed: {}ms", start.elapsed().as_millis()); 31 | } 32 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | hard_tabs = true -------------------------------------------------------------------------------- /src/align.rs: -------------------------------------------------------------------------------- 1 | /// A ZST with a given alignment. `Align` and `Alignment` are used to ensure that `Block`, and hence 2 | /// `Stalloc`, are aligned to a particular value. 3 | /// 4 | /// The definition of `Block` is: 5 | /// ```rs 6 | /// union Block 7 | /// where 8 | /// Align: Alignment, 9 | /// { 10 | /// header: Header, 11 | /// bytes: [MaybeUninit; B], 12 | /// _align: Align, 13 | /// } 14 | /// ``` 15 | /// This struct and trait are made public to allow you to define your own wrapper around `Stalloc`. 16 | /// For example, `SyncStalloc` is defined as: 17 | /// 18 | /// ```rs 19 | /// #[repr(transparent)] 20 | /// pub struct SyncStalloc(Mutex>) 21 | /// where 22 | /// Align: Alignment; 23 | /// ``` 24 | #[derive(Clone, Copy)] 25 | #[repr(transparent)] 26 | pub struct Align(::Inner) 27 | where 28 | Self: Alignment; 29 | 30 | #[doc(hidden)] 31 | pub trait Alignment { 32 | /// See the documentation for `Align`. 33 | type Inner: Copy; 34 | } 35 | 36 | macro_rules! impl_alignments { 37 | ($($name:ident as $n:literal),*) => { $( 38 | #[derive(Copy, Clone)] 39 | #[repr(align($n))] 40 | #[doc(hidden)] 41 | pub struct $name; 42 | impl Alignment for Align<$n> { 43 | type Inner = $name; 44 | } 45 | )* }; 46 | } 47 | 48 | impl_alignments!( 49 | Align1 as 1, Align2 as 2, Align4 as 4, Align8 as 8, Align16 as 16, Align32 as 32, 50 | Align64 as 64, Align128 as 128, Align256 as 256, Align512 as 512, Align1024 as 1024, 51 | Align2048 as 2048, Align4096 as 4096, Align8192 as 8192, Align16384 as 16384, 52 | Align32768 as 32768, Align65536 as 65536, Align131072 as 131_072, Align262144 as 262_144, 53 | Align524288 as 524_288, Align1048576 as 1_048_576, Align2097152 as 2_097_152, 54 | Align4194304 as 4_194_304, Align8388608 as 8_388_608, Align16777216 as 16_777_216, 55 | Align33554432 as 33_554_432, Align67108864 as 67_108_864, Align134217728 as 134_217_728, 56 | Align268435456 as 268_435_456, Align536870912 as 536_870_912 57 | ); 58 | -------------------------------------------------------------------------------- /src/alloc.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(feature = "allocator-api", feature = "allocator-api2"))] 2 | compile_error!("The `allocator-api` and `allocator-api2` features are mutually exclusive."); 3 | 4 | #[cfg(not(any(feature = "allocator-api", feature = "allocator-api2")))] 5 | /// An error type representing some kind of allocation error due to memory exhaustion. 6 | /// This is a polyfill for `core::alloc::AllocError`, available through the nightly Allocator API. 7 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 8 | pub struct AllocError; 9 | 10 | #[cfg(not(any(feature = "allocator-api", feature = "allocator-api2")))] 11 | impl core::error::Error for AllocError {} 12 | 13 | #[cfg(not(any(feature = "allocator-api", feature = "allocator-api2")))] 14 | impl core::fmt::Display for AllocError { 15 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 16 | f.write_str("memory allocation failed") 17 | } 18 | } 19 | 20 | #[cfg(feature = "allocator-api2")] 21 | pub use allocator_api2::alloc::AllocError; 22 | 23 | #[cfg(feature = "allocator-api")] 24 | pub use core::alloc::AllocError; 25 | 26 | #[cfg(feature = "allocator-api")] 27 | pub use core::alloc::{Allocator, Layout}; 28 | 29 | #[cfg(feature = "allocator-api2")] 30 | pub use allocator_api2::alloc::{Allocator, Layout}; 31 | -------------------------------------------------------------------------------- /src/chain.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | 3 | /// A trait representing an allocator that another allocator can be chained to. 4 | /// 5 | /// # Safety 6 | /// `addr_in_bounds` must return true if and only if the address could belong to 7 | /// a pointer which is valid for the allocator. This trait is used to decide 8 | /// which allocator to call when the user calls `deallocate()` and related functions. 9 | pub unsafe trait ChainableAlloc { 10 | /// Checks whether a certain address is contained within the allocator. This 11 | /// is called when using `deallocate()` and related functions in order to 12 | /// determine which allocator needs to free the pointer. 13 | fn addr_in_bounds(&self, addr: usize) -> bool; 14 | } 15 | 16 | /// A chain of allocators. If the first allocator is exhuasted, the second one is used as a fallback. 17 | /// 18 | /// # Examples 19 | /// ``` 20 | /// // If the `SyncStalloc` is full, fall back to the system allocator. 21 | /// use stalloc::{SyncStalloc, Stalloc}; 22 | /// use std::alloc::System; 23 | /// 24 | /// let alloc_with_fallback = SyncStalloc::<1024, 8>::new().chain(&System); 25 | /// 26 | /// let crazy_chain = Stalloc::<128, 4>::new() 27 | /// .chain(&Stalloc::<1024, 8>::new()) 28 | /// .chain(&Stalloc::<8192, 16>::new()) 29 | /// .chain(&System); 30 | /// ``` 31 | pub struct AllocChain<'a, A, B>(A, &'a B); 32 | 33 | impl<'a, A, B> AllocChain<'a, A, B> { 34 | /// Initializes a new `AllocChain`. 35 | pub const fn new(a: A, b: &'a B) -> Self { 36 | Self(a, b) 37 | } 38 | 39 | /// Creates a new `AllocChain` containing this chain and `next`. 40 | pub const fn chain(self, next: &T) -> AllocChain<'_, Self, T> 41 | where 42 | Self: Sized, 43 | { 44 | AllocChain::new(self, next) 45 | } 46 | } 47 | 48 | unsafe impl GlobalAlloc for AllocChain<'_, A, B> { 49 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 50 | let ptr_a = unsafe { self.0.alloc(layout) }; 51 | if ptr_a.is_null() { 52 | unsafe { self.1.alloc(layout) } 53 | } else { 54 | ptr_a 55 | } 56 | } 57 | 58 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 59 | if self.0.addr_in_bounds(ptr.addr()) { 60 | unsafe { self.0.dealloc(ptr, layout) }; 61 | } else { 62 | unsafe { self.1.dealloc(ptr, layout) }; 63 | } 64 | } 65 | 66 | unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 67 | if self.0.addr_in_bounds(ptr.addr()) { 68 | let ptr_a = unsafe { self.0.realloc(ptr, layout, new_size) }; 69 | if !ptr_a.is_null() { 70 | return ptr_a; 71 | } 72 | 73 | let layout_b = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }; 74 | let ptr_b = unsafe { self.1.alloc(layout_b) }; 75 | 76 | if !ptr_b.is_null() { 77 | // Copy the allocation from `A` to `B`. 78 | unsafe { 79 | ptr.copy_to_nonoverlapping(ptr_b, layout.size()); 80 | self.0.dealloc(ptr, layout); 81 | } 82 | } 83 | 84 | // This is either a valid pointer or null. 85 | ptr_b 86 | } else { 87 | unsafe { self.1.realloc(ptr, layout, new_size) } 88 | // Don't fall back to `A`. 89 | } 90 | } 91 | } 92 | 93 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 94 | use { 95 | crate::{AllocError, Allocator}, 96 | core::ptr::NonNull, 97 | }; 98 | 99 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 100 | unsafe impl Allocator for &AllocChain<'_, A, B> 101 | where 102 | for<'a> &'a A: Allocator, 103 | for<'a> &'a B: Allocator, 104 | { 105 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 106 | (&self.0) 107 | .allocate(layout) 108 | .or_else(|_| self.1.allocate(layout)) 109 | } 110 | 111 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 112 | if self.0.addr_in_bounds(ptr.addr().into()) { 113 | unsafe { (&self.0).deallocate(ptr, layout) }; 114 | } else { 115 | unsafe { self.1.deallocate(ptr, layout) } 116 | } 117 | } 118 | 119 | unsafe fn grow( 120 | &self, 121 | ptr: NonNull, 122 | old_layout: Layout, 123 | new_layout: Layout, 124 | ) -> Result, AllocError> { 125 | if self.0.addr_in_bounds(ptr.addr().into()) { 126 | let res_a = unsafe { (&self.0).grow(ptr, old_layout, new_layout) }; 127 | if res_a.is_ok() { 128 | return res_a; 129 | } 130 | 131 | let res_b = self.1.allocate(new_layout); 132 | if let Ok(ptr_b) = res_b { 133 | // Copy the allocation from `A` to `B`. 134 | unsafe { 135 | ptr.copy_to_nonoverlapping(ptr_b.cast(), old_layout.size()); 136 | (&self.0).deallocate(ptr, old_layout); 137 | } 138 | } 139 | 140 | res_b 141 | } else { 142 | unsafe { self.1.grow(ptr, old_layout, new_layout) } 143 | // Don't fall back to `A`. 144 | } 145 | } 146 | 147 | unsafe fn grow_zeroed( 148 | &self, 149 | ptr: NonNull, 150 | old_layout: Layout, 151 | new_layout: Layout, 152 | ) -> Result, AllocError> { 153 | unsafe { 154 | // SAFETY: Upheld by the caller. 155 | let new_ptr = self.grow(ptr, old_layout, new_layout)?; 156 | let count = new_ptr.len() - old_layout.size(); 157 | 158 | // SAFETY: We are filling in the extra capacity with zeros. 159 | new_ptr 160 | .cast::() 161 | .add(old_layout.size()) 162 | .write_bytes(0, count); 163 | 164 | Ok(new_ptr) 165 | } 166 | } 167 | 168 | unsafe fn shrink( 169 | &self, 170 | ptr: NonNull, 171 | old_layout: Layout, 172 | new_layout: Layout, 173 | ) -> Result, AllocError> { 174 | if self.0.addr_in_bounds(ptr.addr().into()) { 175 | let res_a = unsafe { (&self.0).shrink(ptr, old_layout, new_layout) }; 176 | if res_a.is_ok() { 177 | return res_a; 178 | } 179 | 180 | let res_b = self.1.allocate(new_layout); 181 | if let Ok(ptr_b) = res_b { 182 | // Copy the allocation from `A` to `B`. 183 | unsafe { 184 | ptr.copy_to_nonoverlapping(ptr_b.cast(), old_layout.size()); 185 | (&self.0).deallocate(ptr, old_layout); 186 | } 187 | } 188 | 189 | res_b 190 | } else { 191 | unsafe { self.1.shrink(ptr, old_layout, new_layout) } 192 | // Don't fall back to `A`. 193 | } 194 | } 195 | 196 | fn by_ref(&self) -> &Self 197 | where 198 | Self: Sized, 199 | { 200 | self 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![deny(missing_docs)] 3 | #![cfg_attr(feature = "allocator-api", feature(allocator_api))] 4 | #![warn(clippy::nursery, clippy::pedantic)] 5 | 6 | //! Stalloc (Stack + alloc) is a fast first-fit memory allocator. From my benchmarking, 7 | //! it can be over 3x as fast as the default OS allocator! This is because all memory 8 | //! is allocated from the stack, which allows it to avoid all OS overhead. Since it 9 | //! doesn't rely on the OS (aside from `SyncStalloc`), this library is `no_std` compatible. 10 | //! 11 | //! ``` 12 | //! use stalloc::SyncStalloc; 13 | //! 14 | //! // Create a global allocator with 1000 blocks, each 4 bytes in length. 15 | //! #[global_allocator] 16 | //! static GLOBAL: SyncStalloc<1000, 4> = SyncStalloc::new(); 17 | //! 18 | //! fn main() { 19 | //! // All of these allocations are being handled by the global `SyncStalloc` instance. 20 | //! let s1 = String::from("Hello"); 21 | //! let s2 = String::from("world"); 22 | //! let msg = format!("{s1}, {s2}!"); 23 | //! 24 | //! assert!(!GLOBAL.is_oom()); 25 | //! println!("Allocator state: {GLOBAL:?}"); 26 | //! } 27 | //! ``` 28 | //! 29 | //! To avoid the risk of OOM, you can "chain" your allocator to the system allocator, using it as a fallback. 30 | //! ``` 31 | //! use stalloc::{AllocChain, SyncStalloc}; 32 | //! use std::alloc::System; 33 | //! 34 | //! #[global_allocator] 35 | //! static GLOBAL: AllocChain, System> = SyncStalloc::new().chain(&System); 36 | //! ``` 37 | //! 38 | //! # Feature flags 39 | //! - `std` (on by default) — used in the implementation of `SyncStalloc` 40 | //! - `allocator-api` (requires nightly) 41 | //! - `allocator-api2` (pulls in the `allocator-api2` crate) 42 | 43 | use core::cell::UnsafeCell; 44 | use core::fmt::{self, Debug, Formatter}; 45 | use core::hint::assert_unchecked; 46 | use core::mem::MaybeUninit; 47 | use core::ptr::NonNull; 48 | 49 | mod align; 50 | pub use align::*; 51 | mod unsafestalloc; 52 | pub use unsafestalloc::*; 53 | mod chain; 54 | pub use chain::*; 55 | 56 | mod alloc; 57 | #[allow(clippy::wildcard_imports)] 58 | use alloc::*; 59 | 60 | #[cfg(feature = "std")] 61 | mod syncstalloc; 62 | #[cfg(feature = "std")] 63 | pub use syncstalloc::*; 64 | 65 | #[cfg(test)] 66 | #[cfg(feature = "allocator-api")] 67 | mod tests; 68 | 69 | #[derive(Clone, Copy)] 70 | #[repr(C)] 71 | struct Header { 72 | next: u16, 73 | length: u16, 74 | } 75 | 76 | #[derive(Clone, Copy)] 77 | #[repr(C)] 78 | union Block 79 | where 80 | Align: Alignment, 81 | { 82 | header: Header, 83 | bytes: [MaybeUninit; B], 84 | _align: Align, 85 | } 86 | 87 | /// This function is always safe to call, as `ptr` is not dereferenced. 88 | fn header_in_block(ptr: *mut Block) -> *mut Header 89 | where 90 | Align: Alignment, 91 | { 92 | unsafe { &raw mut (*ptr).header } 93 | } 94 | 95 | /// Converts from `usize` to `u16` assuming that no truncation occurs. 96 | /// Safety precondition: `val` must be less than or equal to `0xffff`. 97 | #[allow(clippy::cast_possible_truncation)] 98 | const unsafe fn as_u16(val: usize) -> u16 { 99 | unsafe { 100 | assert_unchecked(val <= 0xffff); 101 | } 102 | 103 | val as u16 104 | } 105 | 106 | // The `base` Header has a unique meaning here. Because `base.length` is useless (always 0), 107 | // we use it as a special flag to check whether `data` is completely filled. Every call to 108 | // `allocate()` and related functions must verify that base.length != OOM_MARKER. 109 | const OOM_MARKER: u16 = u16::MAX; 110 | 111 | /// A fast first-fit memory allocator. 112 | /// 113 | /// When you create an instance of this allocator, you pass in a value for `L` and `B`. 114 | /// `L` is the number of blocks, and `B` is the size of each block in bytes. The total size of this type 115 | /// comes out to `L * B + 4` bytes, of which `L * B` can be used (4 bytes are needed to hold some metadata). 116 | /// `B` must be a power of two from 4 and 2^29, and `L` must be a number in the range `1..65536`. 117 | /// 118 | /// `B` represents the smallest unit of memory that the allocator can manage. If `B == 16`, then asking 119 | /// for 17 bytes will give you a 32 byte allocation (the amount is rounded up). 120 | /// The alignment of the allocator is always equal to `B`. For maximum efficiency, it is recommended 121 | /// to set `B` equal to the alignment of the type you expect to store the most of. For example, if you're storing 122 | /// a lot of `u64`s, you should set `B == 8`. 123 | /// 124 | /// Note that `Stalloc` cannot be used as a global allocator because it is not thread-safe. To switch out the global 125 | /// allocator, use `SyncStalloc` or `UnsafeStalloc`, which can be used concurrently. 126 | #[repr(C)] 127 | pub struct Stalloc 128 | where 129 | Align: Alignment, 130 | { 131 | data: UnsafeCell<[Block; L]>, 132 | base: UnsafeCell
, 133 | } 134 | 135 | impl Stalloc 136 | where 137 | Align: Alignment, 138 | { 139 | /// Initializes a new empty `Stalloc` instance. 140 | /// 141 | /// # Examples 142 | /// ``` 143 | /// use stalloc::Stalloc; 144 | /// 145 | /// let alloc = Stalloc::<200, 8>::new(); 146 | /// ``` 147 | #[must_use] 148 | #[inline] 149 | pub const fn new() -> Self { 150 | const { 151 | assert!(L >= 1 && L <= 0xffff, "block count must be in 1..65536"); 152 | assert!(B >= 4, "block size must be at least 4 bytes"); 153 | } 154 | 155 | let mut blocks = [Block { 156 | bytes: const { [MaybeUninit::uninit(); B] }, 157 | }; L]; 158 | 159 | // Write the first header. SAFETY: we have already checked that `L <= 0xffff`. 160 | blocks[0].header = Header { 161 | next: 0, 162 | length: unsafe { as_u16(L) }, 163 | }; 164 | 165 | Self { 166 | base: UnsafeCell::new(Header { next: 0, length: 0 }), 167 | data: UnsafeCell::new(blocks), 168 | } 169 | } 170 | 171 | /// Checks if the allocator is completely out of memory. 172 | /// If this is false, then you are guaranteed to be able to allocate 173 | /// a layout with a size and alignment of `B` bytes. 174 | /// This runs in O(1). 175 | /// 176 | /// # Examples 177 | /// ``` 178 | /// use stalloc::Stalloc; 179 | /// 180 | /// let alloc = Stalloc::<200, 8>::new(); 181 | /// assert!(!alloc.is_oom()); 182 | /// let ptr = unsafe { alloc.allocate_blocks(200, 1).unwrap() }; 183 | /// assert!(alloc.is_oom()); 184 | /// ``` 185 | pub const fn is_oom(&self) -> bool { 186 | unsafe { *self.base.get() }.length == OOM_MARKER 187 | } 188 | 189 | /// Checks if the allocator is empty. 190 | /// If this is true, then you are guaranteed to be able to allocate 191 | /// a layout with a size of `B * L` bytes and an alignment of `B` bytes. 192 | /// If this is false, then this is guaranteed to be impossible. 193 | /// This runs in O(1). 194 | /// 195 | /// # Examples 196 | /// ``` 197 | /// use stalloc::Stalloc; 198 | /// 199 | /// let alloc = Stalloc::<60, 4>::new(); 200 | /// assert!(alloc.is_empty()); 201 | /// 202 | /// let ptr = unsafe { alloc.allocate_blocks(60, 1).unwrap() }; 203 | /// assert!(!alloc.is_empty()); 204 | /// 205 | /// unsafe { alloc.deallocate_blocks(ptr, 60) }; 206 | /// assert!(alloc.is_empty()); 207 | /// ``` 208 | pub fn is_empty(&self) -> bool { 209 | !self.is_oom() && unsafe { *self.base.get() }.next == 0 210 | } 211 | 212 | /// # Safety 213 | /// 214 | /// Calling this function immediately invalidates all pointers into the allocator. Calling 215 | /// `deallocate_blocks()` with an invalidated pointer will result in the free list being corrupted. 216 | /// 217 | /// # Examples 218 | /// ``` 219 | /// use stalloc::Stalloc; 220 | /// 221 | /// let alloc = Stalloc::<60, 4>::new(); 222 | /// 223 | /// let ptr1 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap(); 224 | /// let ptr2 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap(); 225 | /// let ptr3 = unsafe { alloc.allocate_blocks(20, 1) }.unwrap(); 226 | /// 227 | /// unsafe { alloc.clear() }; // invalidate all allocated pointers 228 | /// 229 | /// assert!(alloc.is_empty()); 230 | /// ``` 231 | pub unsafe fn clear(&self) { 232 | unsafe { 233 | (*self.base.get()).next = 0; 234 | (*self.base.get()).length = 0; 235 | (*self.header_at(0)).next = 0; 236 | (*self.header_at(0)).length = as_u16(L); 237 | } 238 | } 239 | 240 | /// Tries to allocate `count` blocks. If the allocation succeeds, a pointer is returned. This function 241 | /// never allocates more than necessary. Note that `align` is measured in units of `B`. 242 | /// 243 | /// # Safety 244 | /// 245 | /// `size` must be nonzero, and `align` must be a power of 2 in the range `1..=2^29 / B`. 246 | /// 247 | /// # Errors 248 | /// 249 | /// Will return `AllocError` if the allocation was unsuccessful, in which case this function was a no-op. 250 | /// 251 | /// # Examples 252 | /// ``` 253 | /// use stalloc::Stalloc; 254 | /// 255 | /// const BLOCK_SIZE: usize = 4; 256 | /// let alloc = Stalloc::<10, BLOCK_SIZE>::new(); 257 | /// 258 | /// let ptr = unsafe { alloc.allocate_blocks(10, 1) }.unwrap(); 259 | /// unsafe { ptr.write_bytes(42, 10 * BLOCK_SIZE) }; 260 | /// 261 | /// assert!(alloc.is_oom()); 262 | /// ``` 263 | pub unsafe fn allocate_blocks( 264 | &self, 265 | size: usize, 266 | align: usize, 267 | ) -> Result, AllocError> { 268 | // Assert unsafe preconditions. 269 | unsafe { 270 | assert_unchecked(size >= 1 && align.is_power_of_two() && align <= 2usize.pow(29) / B); 271 | } 272 | 273 | if self.is_oom() { 274 | return Err(AllocError); 275 | } 276 | 277 | // Loop through the free list, and find the first header whose length satisfies the layout. 278 | unsafe { 279 | // `prev` and `curr` are pointers that run through the free list. 280 | let base = self.base.get(); 281 | let mut prev = base; 282 | let mut curr = self.header_at((*base).next.into()); 283 | 284 | loop { 285 | let curr_idx = usize::from((*prev).next); 286 | let next_idx = (*curr).next.into(); 287 | 288 | // Check if the current free chunk satisfies the layout. 289 | let curr_chunk_len = (*curr).length.into(); 290 | 291 | // If the alignment is more than 1, there might be spare blocks in front. 292 | // If it is extremely large, there might have to be more spare blocks than are available. 293 | let spare_front = (curr.addr() / B).wrapping_neg() % align; 294 | 295 | if spare_front + size <= curr_chunk_len { 296 | let avail_blocks = curr_chunk_len - spare_front; 297 | let avail_blocks_ptr = self.block_at(curr_idx + spare_front); 298 | let spare_back = avail_blocks - size; 299 | 300 | // If there are spare blocks, add them to the free list. 301 | if spare_back > 0 { 302 | let spare_back_idx = curr_idx + spare_front + size; 303 | let spare_back_ptr = self.header_at(spare_back_idx); 304 | (*spare_back_ptr).next = as_u16(next_idx); 305 | (*spare_back_ptr).length = as_u16(spare_back); 306 | 307 | if spare_front > 0 { 308 | (*curr).next = as_u16(spare_back_idx); 309 | (*curr).length = as_u16(spare_front); 310 | } else { 311 | (*prev).next = as_u16(spare_back_idx); 312 | } 313 | } else if spare_front > 0 { 314 | (*curr).next = as_u16(curr_idx + spare_front + size); 315 | (*curr).length = as_u16(spare_front); 316 | (*prev).next = as_u16(next_idx); 317 | } else { 318 | (*prev).next = as_u16(next_idx); 319 | // If this is the last block of memory, set the OOM marker. 320 | if next_idx == 0 { 321 | (*base).length = OOM_MARKER; 322 | } 323 | } 324 | 325 | return Ok(NonNull::new_unchecked(avail_blocks_ptr.cast())); 326 | } 327 | 328 | // Check if we've already made a whole loop around without finding anything. 329 | if next_idx == 0 { 330 | return Err(AllocError); 331 | } 332 | 333 | prev = curr; 334 | curr = self.header_at(next_idx); 335 | } 336 | } 337 | } 338 | 339 | /// Deallocates a pointer. This function always succeeds. 340 | /// 341 | /// # Safety 342 | /// 343 | /// `ptr` must point to an allocation, and `size` must be the number of blocks 344 | /// in the allocation. That is, `size` is always in `1..=L`. 345 | /// 346 | /// # Examples 347 | /// ``` 348 | /// use stalloc::Stalloc; 349 | /// 350 | /// let alloc = Stalloc::<100, 16>::new(); 351 | /// 352 | /// let ptr = unsafe { alloc.allocate_blocks(100, 1) }.unwrap(); 353 | /// assert!(alloc.is_oom()); 354 | /// 355 | /// unsafe { alloc.deallocate_blocks(ptr, 100) }; 356 | /// assert!(alloc.is_empty()); 357 | /// ``` 358 | pub unsafe fn deallocate_blocks(&self, ptr: NonNull, size: usize) { 359 | // Assert unsafe precondition. 360 | unsafe { 361 | assert_unchecked(size >= 1 && size <= L); 362 | } 363 | 364 | let freed_ptr = header_in_block(ptr.as_ptr().cast()); 365 | let freed_idx = self.index_of(freed_ptr); 366 | let base = self.base.get(); 367 | let before = self.header_before(freed_idx); 368 | 369 | unsafe { 370 | let prev_next = (*before).next.into(); 371 | (*freed_ptr).next = as_u16(prev_next); 372 | (*freed_ptr).length = as_u16(size); 373 | 374 | // Try to merge with the next free block. 375 | if freed_idx + size == prev_next { 376 | let header_to_merge = self.header_at(prev_next); 377 | (*freed_ptr).next = (*header_to_merge).next; 378 | (*freed_ptr).length += (*header_to_merge).length; 379 | } 380 | 381 | // Try to merge with the previous free block. 382 | if before.eq(&base) { 383 | (*base).next = as_u16(freed_idx); 384 | (*base).length = 0; 385 | } else if self.index_of(before) + usize::from((*before).length) == freed_idx { 386 | (*before).next = (*freed_ptr).next; 387 | (*before).length += (*freed_ptr).length; 388 | } else { 389 | // No merge is possible. 390 | (*before).next = as_u16(freed_idx); 391 | } 392 | } 393 | } 394 | 395 | /// Shrinks the allocation. This function always succeeds and never reallocates. 396 | /// 397 | /// # Safety 398 | /// 399 | /// `ptr` must point to a valid allocation of `old_size` blocks, and `new_size` must be in `1..old_size`. 400 | /// 401 | /// # Examples 402 | /// ``` 403 | /// use stalloc::Stalloc; 404 | /// 405 | /// let alloc = Stalloc::<100, 16>::new(); 406 | /// 407 | /// let ptr = unsafe { alloc.allocate_blocks(100, 1) }.unwrap(); 408 | /// assert!(alloc.is_oom()); 409 | /// 410 | /// // shrink the allocation from 100 to 90 blocks 411 | /// unsafe { alloc.shrink_in_place(ptr, 100, 90) }; 412 | /// assert!(!alloc.is_oom()); 413 | /// ``` 414 | pub unsafe fn shrink_in_place(&self, ptr: NonNull, old_size: usize, new_size: usize) { 415 | // Assert unsafe preconditions. 416 | unsafe { 417 | assert_unchecked(new_size > 0 && new_size < old_size); 418 | } 419 | 420 | let curr_block: *mut Block = ptr.as_ptr().cast(); 421 | let curr_idx = (curr_block.addr() - self.data.get().addr()) / B; 422 | 423 | // A new chunk will be created in the gap. 424 | let new_idx = curr_idx + new_size; 425 | let spare_blocks = old_size - new_size; 426 | 427 | unsafe { 428 | // Check if we can merge the block with a chunk immediately after. 429 | let prev_free_chunk = self.header_before(curr_idx); 430 | 431 | let next_free_idx = (*prev_free_chunk).next.into(); // possibly zero 432 | let new_chunk = header_in_block(curr_block.add(new_size)); 433 | 434 | (*prev_free_chunk).next = as_u16(new_idx); 435 | 436 | if new_idx + spare_blocks == next_free_idx { 437 | let next_free_chunk = self.header_at(next_free_idx); 438 | (*new_chunk).next = (*next_free_chunk).next; 439 | (*new_chunk).length = as_u16(spare_blocks) + (*next_free_chunk).length; 440 | } else { 441 | (*new_chunk).next = as_u16(next_free_idx); 442 | (*new_chunk).length = as_u16(spare_blocks); 443 | } 444 | 445 | // We are definitely no longer OOM. 446 | (*self.base.get()).length = 0; 447 | } 448 | } 449 | 450 | /// Tries to grow the current allocation in-place. If that isn't possible, this function is a no-op. 451 | /// 452 | /// # Safety 453 | /// 454 | /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`. 455 | /// 456 | /// # Errors 457 | /// 458 | /// Will return `AllocError` if the grow was unsuccessful, in which case this function was a no-op. 459 | /// 460 | /// # Examples 461 | /// ``` 462 | /// use stalloc::Stalloc; 463 | /// 464 | /// let alloc = Stalloc::<100, 16>::new(); 465 | /// 466 | /// let ptr = unsafe { alloc.allocate_blocks(25, 1) }.unwrap(); 467 | /// assert!(!alloc.is_oom()); 468 | /// 469 | /// // grow the allocation from 25 to 100 blocks 470 | /// unsafe { alloc.grow_in_place(ptr, 25, 100) }.unwrap(); 471 | /// assert!(alloc.is_oom()); 472 | /// ``` 473 | pub unsafe fn grow_in_place( 474 | &self, 475 | ptr: NonNull, 476 | old_size: usize, 477 | new_size: usize, 478 | ) -> Result<(), AllocError> { 479 | // Assert unsafe preconditions. 480 | unsafe { 481 | assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size); 482 | } 483 | 484 | let curr_block: *mut Block = ptr.as_ptr().cast(); 485 | let curr_idx = (curr_block.addr() - self.data.get().addr()) / B; 486 | let prev_free_chunk = self.header_before(curr_idx); 487 | 488 | unsafe { 489 | let next_free_idx = (*prev_free_chunk).next.into(); 490 | 491 | // The next free chunk must be directly adjacent to the current allocation. 492 | if curr_idx + old_size != next_free_idx { 493 | return Err(AllocError); 494 | } 495 | 496 | let next_free_chunk = self.header_at(next_free_idx); 497 | let room_to_grow = (*next_free_chunk).length.into(); 498 | 499 | // There must be enough room to grow. 500 | let needed_blocks = new_size - old_size; 501 | if needed_blocks > room_to_grow { 502 | return Err(AllocError); 503 | } 504 | 505 | // Check if there would be any blocks left over after growing into the next chunk. 506 | let blocks_left_over = room_to_grow - needed_blocks; 507 | 508 | if blocks_left_over > 0 { 509 | let new_chunk_idx = next_free_idx + needed_blocks; 510 | let new_chunk_head = self.header_at(new_chunk_idx); 511 | 512 | // Insert the new chunk into the free list. 513 | (*prev_free_chunk).next = as_u16(new_chunk_idx); 514 | (*new_chunk_head).next = (*next_free_chunk).next; 515 | (*new_chunk_head).length = as_u16(blocks_left_over); 516 | } else { 517 | // The free chunk is completely consumed. 518 | (*prev_free_chunk).next = (*next_free_chunk).next; 519 | 520 | // If `prev_free_chunk` is the base pointer and we just set it to 0, we are OOM. 521 | let base = self.base.get(); 522 | if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 { 523 | (*base).length = OOM_MARKER; 524 | } 525 | } 526 | 527 | Ok(()) 528 | } 529 | } 530 | 531 | /// Tries to grow the current allocation in-place. If that isn't possible, the allocator grows by as much 532 | /// as it is able to, and the new length of the allocation is returned. The new length is guaranteed to be 533 | /// in the range `old_size..=new_size`. 534 | /// # Safety 535 | /// 536 | /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`. 537 | /// 538 | /// # Examples 539 | /// ``` 540 | /// use stalloc::Stalloc; 541 | /// 542 | /// let alloc1 = Stalloc::<7, 4>::new(); 543 | /// unsafe { 544 | /// let ptr = alloc1.allocate_blocks(3, 1).unwrap(); // allocate 3 blocks 545 | /// let new_size = alloc1.grow_up_to(ptr, 3, 9999); // try to grow to a ridiculous amount 546 | /// assert_eq!(new_size, 7); // can only grow up to 7 547 | /// } 548 | /// 549 | /// let alloc2 = Stalloc::<21, 16>::new(); 550 | /// unsafe { 551 | /// let ptr = alloc2.allocate_blocks(9, 1).unwrap(); // allocate 9 blocks 552 | /// let new_size = alloc2.grow_up_to(ptr, 9, 21); 553 | /// assert_eq!(new_size, 21); // grow was successful 554 | /// } 555 | /// ``` 556 | pub unsafe fn grow_up_to(&self, ptr: NonNull, old_size: usize, new_size: usize) -> usize { 557 | // Assert unsafe preconditions. 558 | unsafe { 559 | assert_unchecked(old_size >= 1 && old_size <= L && new_size > old_size); 560 | } 561 | 562 | let curr_block: *mut Block = ptr.as_ptr().cast(); 563 | let curr_idx = (curr_block.addr() - self.data.get().addr()) / B; 564 | let prev_free_chunk = self.header_before(curr_idx); 565 | 566 | unsafe { 567 | let next_free_idx = (*prev_free_chunk).next.into(); 568 | 569 | // The next free chunk must be directly adjacent to the current allocation. 570 | if curr_idx + old_size != next_free_idx { 571 | return old_size; 572 | } 573 | 574 | let next_free_chunk = self.header_at(next_free_idx); 575 | let room_to_grow = (*next_free_chunk).length.into(); 576 | 577 | // If there isn't enough room to grow, grow as much as possible. 578 | let needed_blocks = (new_size - old_size).min(room_to_grow); 579 | 580 | // Check if there would be any blocks left over after growing into the next chunk. 581 | let blocks_left_over = room_to_grow - needed_blocks; 582 | 583 | if blocks_left_over > 0 { 584 | let new_chunk_idx = next_free_idx + needed_blocks; 585 | let new_chunk_head = self.header_at(new_chunk_idx); 586 | 587 | // Insert the new chunk into the free list. 588 | (*prev_free_chunk).next = as_u16(new_chunk_idx); 589 | (*new_chunk_head).next = (*next_free_chunk).next; 590 | (*new_chunk_head).length = as_u16(blocks_left_over); 591 | } else { 592 | // The free chunk is completely consumed. 593 | (*prev_free_chunk).next = (*next_free_chunk).next; 594 | 595 | // If `prev_free_chunk` is the base pointer and we just set it to 0, we are OOM. 596 | let base = self.base.get(); 597 | if prev_free_chunk.eq(&base) && (*next_free_chunk).next == 0 { 598 | (*base).length = OOM_MARKER; 599 | } 600 | } 601 | 602 | old_size + needed_blocks 603 | } 604 | } 605 | } 606 | 607 | // Internal functions. 608 | impl Stalloc 609 | where 610 | Align: Alignment, 611 | { 612 | /// Get the index of a pointer to `data`. This function is always safe 613 | /// to call, but the result may not be meaningful. 614 | /// Even if the header is not at the start of the block (compiler's choice), 615 | /// dividing by B rounds down and produces the correct result. 616 | fn index_of(&self, ptr: *mut Header) -> usize { 617 | (ptr.addr() - self.data.get().addr()) / B 618 | } 619 | 620 | /// Safety precondition: idx must be in `0..L`. 621 | const unsafe fn block_at(&self, idx: usize) -> *mut Block { 622 | let root: *mut Block = self.data.get().cast(); 623 | unsafe { root.add(idx) } 624 | } 625 | 626 | /// Safety precondition: idx must be in `0..L`. 627 | unsafe fn header_at(&self, idx: usize) -> *mut Header { 628 | header_in_block(unsafe { self.block_at(idx) }) 629 | } 630 | 631 | /// This function always is safe to call. If `idx` is very large, 632 | /// the returned value will simply be the last header in the free list. 633 | /// Note: this function may return a pointer to `base`. 634 | fn header_before(&self, idx: usize) -> *mut Header { 635 | let mut ptr = self.base.get(); 636 | 637 | unsafe { 638 | if (*ptr).length == OOM_MARKER || usize::from((*ptr).next) >= idx { 639 | return ptr; 640 | } 641 | 642 | loop { 643 | ptr = self.header_at((*ptr).next.into()); 644 | let next_idx = usize::from((*ptr).next); 645 | if next_idx == 0 || next_idx >= idx { 646 | return ptr; 647 | } 648 | } 649 | } 650 | } 651 | } 652 | 653 | impl Debug for Stalloc 654 | where 655 | Align: Alignment, 656 | { 657 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 658 | write!(f, "Stallocator with {L} blocks of {B} bytes each")?; 659 | 660 | let mut ptr = self.base.get(); 661 | if unsafe { (*ptr).length } == OOM_MARKER { 662 | return write!(f, "\n\tNo free blocks (OOM)"); 663 | } 664 | 665 | loop { 666 | unsafe { 667 | let idx = (*ptr).next.into(); 668 | ptr = self.header_at(idx); 669 | 670 | let length = (*ptr).length; 671 | if length == 1 { 672 | write!(f, "\n\tindex {idx}: {length} free block")?; 673 | } else { 674 | write!(f, "\n\tindex {idx}: {length} free blocks")?; 675 | } 676 | 677 | if (*ptr).next == 0 { 678 | return Ok(()); 679 | } 680 | } 681 | } 682 | } 683 | } 684 | 685 | impl Default for Stalloc 686 | where 687 | Align: Alignment, 688 | { 689 | fn default() -> Self { 690 | Self::new() 691 | } 692 | } 693 | 694 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 695 | unsafe impl Allocator for &Stalloc 696 | where 697 | Align: Alignment, 698 | { 699 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 700 | // We can only allocate memory in units of `B`, so round up. 701 | let size = layout.size().div_ceil(B); 702 | let align = layout.align().div_ceil(B); 703 | 704 | // If `size` is zero, give away a dangling pointer. 705 | if size == 0 { 706 | let dangling = NonNull::new(layout.align() as _).unwrap(); 707 | return Ok(NonNull::slice_from_raw_parts(dangling, 0)); 708 | } 709 | 710 | // SAFETY: We have made sure that `size` and `align` are valid. 711 | unsafe { self.allocate_blocks(size, align) } 712 | .map(|p| NonNull::slice_from_raw_parts(p, size * B)) 713 | } 714 | 715 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 716 | let ptr = self.allocate(layout)?; 717 | 718 | // We intentionally shorten the length of the allocated pointer and hence write fewer zeros. 719 | let ptr = NonNull::slice_from_raw_parts(ptr.cast(), layout.size()); 720 | 721 | // SAFETY: We are filling in the entire allocated range with zeros. 722 | unsafe { ptr.cast::().write_bytes(0, ptr.len()) } 723 | Ok(ptr) 724 | } 725 | 726 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 727 | let size = layout.size().div_ceil(B); 728 | 729 | if size == 0 { 730 | return; 731 | } 732 | 733 | // SAFETY: We just made sure that size != 0. Everything else is upheld by the caller. 734 | unsafe { self.deallocate_blocks(ptr, size) }; 735 | } 736 | 737 | unsafe fn grow( 738 | &self, 739 | ptr: NonNull, 740 | old_layout: Layout, 741 | new_layout: Layout, 742 | ) -> Result, AllocError> { 743 | let old_size = old_layout.size().div_ceil(B); 744 | let new_size = new_layout.size().div_ceil(B); 745 | let align = new_layout.align().div_ceil(B); 746 | 747 | // If the size hasn't changed, do nothing. 748 | if new_size == old_size { 749 | return Ok(NonNull::slice_from_raw_parts(ptr, new_size * B)); 750 | } 751 | 752 | // If the old size was 0, the pointer was dangling, so just allocate. 753 | if old_size == 0 { 754 | // SAFETY: we know that `new_size` is non-zero, because we just made sure 755 | // that `new_size != old_size`, and we know that `align` has a valid value. 756 | return unsafe { 757 | self.allocate_blocks(new_size, align) 758 | .map(|p| NonNull::slice_from_raw_parts(p, new_size * B)) 759 | }; 760 | } 761 | 762 | unsafe { 763 | // Try to grow in place. 764 | // SAFETY: `ptr` and `old_size` are upheld by the caller. As for `new_size`, 765 | // we have already made sure that `old_size != new_size`, and the fact that 766 | // new_size >= old_size is upheld by the caller. 767 | if self.grow_in_place(ptr, old_size, new_size).is_ok() { 768 | Ok(NonNull::slice_from_raw_parts(ptr, new_size * B)) 769 | } else { 770 | // Otherwise just reallocate and copy. 771 | // SAFETY: We have made sure that `new_size > 0` and that `align` is valid. 772 | let new = self.allocate_blocks(new_size, align)?; 773 | 774 | // SAFETY: We are copying all the necessary bytes from `ptr` into `new`. 775 | // `ptr` and `new` both point to an allocation of at least `old_layout.size()` bytes. 776 | ptr.copy_to_nonoverlapping(new, old_layout.size()); 777 | 778 | // SAFETY: We already made sure that old_size > 0. 779 | self.deallocate_blocks(ptr, old_size); 780 | 781 | Ok(NonNull::slice_from_raw_parts(new, new_size * B)) 782 | } 783 | } 784 | } 785 | 786 | unsafe fn grow_zeroed( 787 | &self, 788 | ptr: NonNull, 789 | old_layout: Layout, 790 | new_layout: Layout, 791 | ) -> Result, AllocError> { 792 | unsafe { 793 | // SAFETY: Upheld by the caller. 794 | let new_ptr = self.grow(ptr, old_layout, new_layout)?; 795 | let count = new_ptr.len() - old_layout.size(); 796 | 797 | // SAFETY: We are filling in the extra capacity with zeros. 798 | new_ptr 799 | .cast::() 800 | .add(old_layout.size()) 801 | .write_bytes(0, count); 802 | 803 | Ok(new_ptr) 804 | } 805 | } 806 | 807 | unsafe fn shrink( 808 | &self, 809 | ptr: NonNull, 810 | old_layout: Layout, 811 | new_layout: Layout, 812 | ) -> Result, AllocError> { 813 | let old_size = old_layout.size().div_ceil(B); 814 | let new_size = new_layout.size().div_ceil(B); 815 | 816 | // Check if the old size is zero, in which case we can just return a dangling pointer. 817 | if new_size == 0 { 818 | unsafe { 819 | // SAFETY: If `old_size` isn't zero, we need to free it. The caller 820 | // upholds that `ptr` and `old_size` are valid. 821 | if old_size != 0 { 822 | self.deallocate_blocks(ptr, old_size); 823 | } 824 | 825 | // SAFETY: Alignment is always nonzero. 826 | let dangling = NonNull::new_unchecked(new_layout.align() as _); 827 | 828 | return Ok(NonNull::slice_from_raw_parts(dangling, 0)); 829 | } 830 | } 831 | 832 | // We have to reallocate only if the alignment isn't good enough anymore. 833 | if ptr.as_ptr().addr() % new_layout.align() != 0 { 834 | // Since the address of `ptr` must be a multiple of `B` (upheld by the caller), 835 | // entering this branch means that `new_layout.align() > B`. 836 | let align = new_layout.align() / B; 837 | 838 | unsafe { 839 | // SAFETY: We just made sure that `new_size > 0`, and `align` is always valid. 840 | let new = self.allocate_blocks(new_size, align)?; 841 | 842 | // SAFETY: We are copying all the necessary bytes from `ptr` into `new`. 843 | // `ptr` and `new` both point to an allocation of at least `old_layout.size()` bytes. 844 | ptr.copy_to_nonoverlapping(new, old_layout.size()); 845 | 846 | // SAFETY: We already made sure that old_size > 0. 847 | self.deallocate_blocks(ptr, old_size); 848 | 849 | return Ok(NonNull::slice_from_raw_parts(new, new_size * B)); 850 | } 851 | } 852 | 853 | // Check if the size hasn't changed. 854 | if old_size == new_size { 855 | return Ok(NonNull::slice_from_raw_parts(ptr, old_size * B)); 856 | } 857 | 858 | // SAFETY: We just made sure that new_size > 0 and old_size > new_size, 859 | // and `ptr` and `old_size` are valid (upheld by the caller). 860 | unsafe { 861 | self.shrink_in_place(ptr, old_size, new_size); 862 | } 863 | 864 | Ok(NonNull::slice_from_raw_parts(ptr, new_size * B)) 865 | } 866 | } 867 | 868 | unsafe impl ChainableAlloc for Stalloc 869 | where 870 | Align: Alignment, 871 | { 872 | fn addr_in_bounds(&self, addr: usize) -> bool { 873 | addr >= self.data.get().addr() && addr < self.data.get().addr() + B * L 874 | } 875 | } 876 | 877 | impl Stalloc 878 | where 879 | Align: Alignment, 880 | { 881 | /// Creates a new `AllocChain` containing this allocator and `next`. 882 | pub const fn chain(self, next: &T) -> AllocChain<'_, Self, T> 883 | where 884 | Self: Sized, 885 | { 886 | AllocChain::new(self, next) 887 | } 888 | } 889 | -------------------------------------------------------------------------------- /src/syncstalloc.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | use core::fmt::{self, Debug, Formatter}; 3 | use core::marker::PhantomData; 4 | use core::ops::Deref; 5 | use core::ptr::NonNull; 6 | 7 | extern crate std; 8 | use std::sync::{Mutex, MutexGuard}; 9 | 10 | use crate::align::{Align, Alignment}; 11 | use crate::{AllocChain, AllocError, ChainableAlloc, UnsafeStalloc}; 12 | 13 | /// A wrapper around `UnsafeStalloc` that is safe to create because it prevents data races using a Mutex. 14 | /// In comparison to `UnsafeStalloc`, the mutex may cause a slight overhead. 15 | #[repr(C)] 16 | pub struct SyncStalloc(Mutex<()>, UnsafeStalloc) 17 | where 18 | Align: Alignment; 19 | 20 | /// A lock around `SyncStalloc`. Constructing this type is proof that the user holds an exclusive 21 | /// lock on the inner `UnsafeStalloc`. When this falls out of scope, the `SyncStalloc` is unlocked. 22 | /// 23 | /// This is effectively a reimplementation of `std::sync::MutexGuard`. 24 | pub struct StallocGuard<'a, const L: usize, const B: usize> 25 | where 26 | Align: Alignment, 27 | { 28 | _guard: MutexGuard<'a, ()>, 29 | inner: &'a UnsafeStalloc, 30 | _not_sync: PhantomData<*const ()>, 31 | } 32 | 33 | impl Deref for StallocGuard<'_, L, B> 34 | where 35 | Align: Alignment, 36 | { 37 | type Target = UnsafeStalloc; 38 | 39 | fn deref(&self) -> &Self::Target { 40 | self.inner 41 | } 42 | } 43 | 44 | impl SyncStalloc 45 | where 46 | Align: Alignment, 47 | { 48 | /// Initializes a new empty `SyncStalloc` instance. 49 | /// 50 | /// # Examples 51 | /// ``` 52 | /// use stalloc::SyncStalloc; 53 | /// 54 | /// let alloc = SyncStalloc::<200, 8>::new(); 55 | /// ``` 56 | #[must_use] 57 | pub const fn new() -> Self { 58 | // SAFETY: The `UnsafeStalloc` can only be accessed through `acquire_locked()`, 59 | // which guarantees that the mutex is locked before proceeding. 60 | Self(Mutex::new(()), unsafe { UnsafeStalloc::::new() }) 61 | } 62 | 63 | /// Checks if the allocator is completely out of memory. 64 | /// If this is false, then you are guaranteed to be able to allocate 65 | /// a layout with a size and alignment of `B` bytes. 66 | /// This runs in O(1). 67 | pub fn is_oom(&self) -> bool { 68 | self.acquire_locked().is_oom() 69 | } 70 | 71 | /// Checks if the allocator is empty. 72 | /// If this is true, then you are guaranteed to be able to allocate 73 | /// a layout with a size of `B * L` bytes and an alignment of `B` bytes. 74 | /// If this is false, then this is guaranteed to be impossible. 75 | /// This runs in O(1). 76 | pub fn is_empty(&self) -> bool { 77 | self.acquire_locked().is_empty() 78 | } 79 | 80 | /// # Safety 81 | /// 82 | /// Calling this function immediately invalidates all pointers into the allocator. Calling 83 | /// `deallocate_blocks()` with an invalidated pointer will result in the free list being corrupted. 84 | pub unsafe fn clear(&self) { 85 | // SAFETY: Upheld by the caller. 86 | unsafe { self.acquire_locked().clear() } 87 | } 88 | 89 | /// Tries to allocate `count` blocks. If the allocation succeed, a pointer is returned. This function 90 | /// never allocates more than necessary. 91 | /// 92 | /// # Safety 93 | /// 94 | /// `size` must be nonzero, and `align` must be a power of 2 in the range `1..=2^29 / B`. 95 | /// 96 | /// # Errors 97 | /// 98 | /// Will return `AllocError` if the allocation was unsuccessful, in which case this function was a no-op. 99 | pub unsafe fn allocate_blocks( 100 | &self, 101 | size: usize, 102 | align: usize, 103 | ) -> Result, AllocError> { 104 | // SAFETY: Upheld by the caller. 105 | unsafe { self.acquire_locked().allocate_blocks(size, align) } 106 | } 107 | 108 | /// Deallocates a pointer. 109 | /// 110 | /// # Safety 111 | /// 112 | /// `ptr` must point to an allocation, and `size` must be the number of blocks 113 | /// in the allocation. That is, `size` is always in `1..=L`. 114 | pub unsafe fn deallocate_blocks(&self, ptr: NonNull, size: usize) { 115 | // SAFETY: Upheld by the caller. 116 | unsafe { self.acquire_locked().deallocate_blocks(ptr, size) } 117 | } 118 | 119 | /// Shrinks the allocation. This function always succeeds and never reallocates. 120 | /// 121 | /// # Safety 122 | /// 123 | /// `ptr` must point to a valid allocation of `old_size` blocks, and `new_size` must be in `1..old_size`. 124 | pub unsafe fn shrink_in_place(&self, ptr: NonNull, old_size: usize, new_size: usize) { 125 | // SAFETY: Upheld by the caller. 126 | unsafe { 127 | self.acquire_locked() 128 | .shrink_in_place(ptr, old_size, new_size); 129 | } 130 | } 131 | 132 | /// Tries to grow the current allocation in-place. If that isn't possible, this function is a no-op. 133 | /// 134 | /// # Safety 135 | /// 136 | /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`. 137 | /// 138 | /// # Errors 139 | /// 140 | /// Will return `AllocError` if the grow was unsuccessful, in which case this function was a no-op. 141 | pub unsafe fn grow_in_place( 142 | &self, 143 | ptr: NonNull, 144 | old_size: usize, 145 | new_size: usize, 146 | ) -> Result<(), AllocError> { 147 | // SAFETY: Upheld by the caller. 148 | unsafe { self.acquire_locked().grow_in_place(ptr, old_size, new_size) } 149 | } 150 | 151 | /// Tries to grow the current allocation in-place. If that isn't possible, the allocator grows by as much 152 | /// as it is able to, and the new length of the allocation is returned. The new length is guaranteed to be 153 | /// in the range `old_size..=new_size`. 154 | /// # Safety 155 | /// 156 | /// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`. 157 | pub unsafe fn grow_up_to(&self, ptr: NonNull, old_size: usize, new_size: usize) -> usize { 158 | // SAFETY: Upheld by the caller. 159 | unsafe { self.acquire_locked().grow_up_to(ptr, old_size, new_size) } 160 | } 161 | 162 | /// Acquires an exclusive lock for the allocator. This can be used to chain multiple 163 | /// operations on the allocator without having to repeatedly acquire locks for each one. 164 | /// 165 | /// # Example 166 | /// ``` 167 | /// use stalloc::SyncStalloc; 168 | /// 169 | /// let alloc = SyncStalloc::<100, 4>::new(); 170 | /// 171 | /// let lock = alloc.acquire_locked(); 172 | /// for _ in 0..20 { 173 | /// // make multiple allocations in a row 174 | /// unsafe { lock.allocate_blocks(5, 1) }.unwrap(); 175 | /// } 176 | /// drop(lock); // until we drop the lock, all accesses to `alloc` will block 177 | /// 178 | /// assert!(alloc.is_oom()); 179 | /// ``` 180 | pub fn acquire_locked(&self) -> StallocGuard { 181 | // SAFETY: if this Mutex is poisoned, it means that one of the allocator functions panicked, 182 | // which is already declared to be UB. Therefore, we can assume that this is never poisoned. 183 | StallocGuard { 184 | _guard: unsafe { self.0.lock().unwrap_unchecked() }, 185 | inner: &self.1, 186 | _not_sync: PhantomData, 187 | } 188 | } 189 | } 190 | 191 | impl Default for SyncStalloc 192 | where 193 | Align: Alignment, 194 | { 195 | fn default() -> Self { 196 | Self::new() 197 | } 198 | } 199 | 200 | impl Debug for SyncStalloc 201 | where 202 | Align: Alignment, 203 | { 204 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 205 | write!(f, "{:?}", self.acquire_locked().inner) 206 | } 207 | } 208 | 209 | unsafe impl GlobalAlloc for SyncStalloc 210 | where 211 | Align: Alignment, 212 | { 213 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 214 | // SAFETY: Upheld by the caller. 215 | unsafe { self.acquire_locked().alloc(layout) } 216 | } 217 | 218 | unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { 219 | // SAFETY: Upheld by the caller. 220 | unsafe { self.acquire_locked().alloc_zeroed(layout) } 221 | } 222 | 223 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 224 | // SAFETY: Upheld by the caller. 225 | unsafe { self.acquire_locked().dealloc(ptr, layout) } 226 | } 227 | 228 | unsafe fn realloc(&self, ptr: *mut u8, old_layout: Layout, new_size: usize) -> *mut u8 { 229 | // SAFETY: Upheld by the caller. 230 | unsafe { self.acquire_locked().realloc(ptr, old_layout, new_size) } 231 | } 232 | } 233 | 234 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 235 | use crate::Allocator; 236 | 237 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 238 | unsafe impl Allocator for &SyncStalloc 239 | where 240 | Align: Alignment, 241 | { 242 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 243 | (&*self.acquire_locked()).allocate(layout) 244 | } 245 | 246 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 247 | // SAFETY: Upheld by the caller. 248 | unsafe { 249 | (&*self.acquire_locked()).deallocate(ptr, layout); 250 | } 251 | } 252 | 253 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 254 | (&*self.acquire_locked()).allocate_zeroed(layout) 255 | } 256 | 257 | unsafe fn grow( 258 | &self, 259 | ptr: NonNull, 260 | old_layout: Layout, 261 | new_layout: Layout, 262 | ) -> Result, AllocError> { 263 | // SAFETY: Upheld by the caller. 264 | unsafe { (&*self.acquire_locked()).grow(ptr, old_layout, new_layout) } 265 | } 266 | 267 | unsafe fn grow_zeroed( 268 | &self, 269 | ptr: NonNull, 270 | old_layout: Layout, 271 | new_layout: Layout, 272 | ) -> Result, AllocError> { 273 | // SAFETY: Upheld by the caller. 274 | unsafe { (&*self.acquire_locked()).grow_zeroed(ptr, old_layout, new_layout) } 275 | } 276 | 277 | unsafe fn shrink( 278 | &self, 279 | ptr: NonNull, 280 | old_layout: Layout, 281 | new_layout: Layout, 282 | ) -> Result, AllocError> { 283 | // SAFETY: Upheld by the caller. 284 | unsafe { (&*self.acquire_locked()).shrink(ptr, old_layout, new_layout) } 285 | } 286 | 287 | fn by_ref(&self) -> &Self 288 | where 289 | Self: Sized, 290 | { 291 | self 292 | } 293 | } 294 | 295 | unsafe impl ChainableAlloc for SyncStalloc 296 | where 297 | Align: Alignment, 298 | { 299 | fn addr_in_bounds(&self, addr: usize) -> bool { 300 | self.1.addr_in_bounds(addr) 301 | } 302 | } 303 | 304 | impl SyncStalloc 305 | where 306 | Align: Alignment, 307 | { 308 | /// Creates a new `AllocChain` containing this allocator and `next`. 309 | pub const fn chain(self, next: &T) -> AllocChain<'_, Self, T> 310 | where 311 | Self: Sized, 312 | { 313 | AllocChain::new(self, next) 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::nursery)] 2 | #![allow(clippy::cast_possible_truncation)] 3 | 4 | use crate::Stalloc; 5 | 6 | extern crate alloc; 7 | extern crate std; 8 | use alloc::boxed::Box; 9 | use alloc::vec::Vec; 10 | use core::mem; 11 | use core::mem::MaybeUninit; 12 | #[allow(unused_imports)] 13 | use std::dbg; 14 | 15 | #[test] 16 | fn test_vec() { 17 | let alloc = Stalloc::<1, 4>::new(); 18 | let mut v: Vec = Vec::with_capacity_in(4, &alloc); 19 | for i in 0..v.capacity() { 20 | v.push(42 + i as u8); 21 | } 22 | } 23 | 24 | #[test] 25 | fn test_2_vecs() { 26 | let alloc = Stalloc::<2, 4>::new(); 27 | let mut v: Vec = Vec::with_capacity_in(4, &alloc); 28 | for i in 0..v.capacity() { 29 | v.push(42 + i as u8); 30 | } 31 | let mut v: Vec = Vec::with_capacity_in(4, &alloc); 32 | for i in 0..v.capacity() { 33 | v.push(42 + i as u8); 34 | } 35 | } 36 | 37 | #[test] 38 | fn test_differently_sized_vecs() { 39 | let alloc = Stalloc::<28, 4>::new(); 40 | let _v: Vec = Vec::with_capacity_in(1, &alloc); 41 | let _v: Vec = Vec::with_capacity_in(2, &alloc); 42 | let _v: Vec = Vec::with_capacity_in(3, &alloc); 43 | let _v: Vec = Vec::with_capacity_in(4, &alloc); 44 | let _v: Vec = Vec::with_capacity_in(5, &alloc); 45 | let _v: Vec = Vec::with_capacity_in(6, &alloc); 46 | let _v: Vec = Vec::with_capacity_in(7, &alloc); 47 | assert!(alloc.is_oom()); 48 | } 49 | 50 | #[test] 51 | #[should_panic(expected = "TryReserveError")] 52 | fn test_oom() { 53 | let alloc = Stalloc::<3, 4>::new(); 54 | let mut v: Vec = Vec::try_with_capacity_in(8, &alloc).unwrap(); 55 | for i in 0..v.capacity() { 56 | v.push(42 + i as u8); 57 | } 58 | let mut v: Vec = Vec::try_with_capacity_in(8, &alloc).unwrap(); 59 | for i in 0..v.capacity() { 60 | v.push(42 + i as u8); 61 | } 62 | } 63 | 64 | #[test] 65 | #[should_panic(expected = "TryReserveError")] 66 | fn test_oom2() { 67 | let alloc = Stalloc::<4, 4>::new(); 68 | let _v: Vec = Vec::try_with_capacity_in(1, &alloc).unwrap(); 69 | let _v: Vec = Vec::try_with_capacity_in(1, &alloc).unwrap(); 70 | let _v: Vec = Vec::try_with_capacity_in(1, &alloc).unwrap(); 71 | let _v: Vec = Vec::try_with_capacity_in(1, &alloc).unwrap(); 72 | let _v: Vec = Vec::try_with_capacity_in(1, &alloc).unwrap(); 73 | } 74 | 75 | #[test] 76 | fn test_free() { 77 | let alloc = Stalloc::<4, 4>::new(); 78 | let v: Vec = Vec::with_capacity_in(4, &alloc); 79 | drop(v); 80 | let v: Vec = Vec::with_capacity_in(4, &alloc); 81 | drop(v); 82 | let v: Vec = Vec::with_capacity_in(4, &alloc); 83 | drop(v); 84 | let v: Vec = Vec::with_capacity_in(4, &alloc); 85 | drop(v); 86 | assert!(alloc.is_empty()); 87 | } 88 | 89 | #[test] 90 | fn test_free_and_realloc() { 91 | let alloc = Stalloc::<4, 4>::new(); 92 | let v1: Vec = Vec::with_capacity_in(1, &alloc); 93 | let v2: Vec = Vec::with_capacity_in(1, &alloc); 94 | let v3: Vec = Vec::with_capacity_in(1, &alloc); 95 | let v4: Vec = Vec::with_capacity_in(1, &alloc); 96 | drop(v2); 97 | drop(v4); 98 | drop(v1); 99 | drop(v3); 100 | let v5: Vec = Vec::with_capacity_in(4, &alloc); 101 | drop(v5); 102 | assert!(alloc.is_empty()); 103 | } 104 | 105 | #[test] 106 | fn test_complex_alloc_and_free() { 107 | let alloc = Stalloc::<64, 8>::new(); 108 | 109 | let v1: Vec = Vec::with_capacity_in(4, &alloc); 110 | let v2: Vec = Vec::with_capacity_in(8, &alloc); 111 | let v3: Vec = Vec::with_capacity_in(12, &alloc); 112 | let v4: Vec = Vec::with_capacity_in(6, &alloc); 113 | drop(v2); 114 | let v5: Vec = Vec::with_capacity_in(6, &alloc); 115 | let v6: Vec = Vec::with_capacity_in(3, &alloc); 116 | drop(v1); 117 | let v7: Vec = Vec::with_capacity_in(5, &alloc); 118 | let v8: Vec = Vec::with_capacity_in(2, &alloc); 119 | drop(v3); 120 | let v9: Vec = Vec::with_capacity_in(10, &alloc); 121 | drop(v4); 122 | drop(v6); 123 | let v10: Vec = Vec::with_capacity_in(4, &alloc); 124 | let v11: Vec = Vec::with_capacity_in(7, &alloc); 125 | drop(v5); 126 | let v12: Vec = Vec::with_capacity_in(6, &alloc); 127 | drop(v7); 128 | drop(v8); 129 | let v13: Vec = Vec::with_capacity_in(9, &alloc); 130 | drop(v9); 131 | let v14: Vec = Vec::with_capacity_in(5, &alloc); 132 | drop(v10); 133 | drop(v11); 134 | drop(v12); 135 | drop(v13); 136 | drop(v14); 137 | 138 | assert!(alloc.is_empty()); 139 | } 140 | 141 | #[test] 142 | fn test_shrink() { 143 | let alloc = Stalloc::<6, 4>::new(); 144 | 145 | let mut v: Vec = Vec::with_capacity_in(6, &alloc); 146 | assert!(alloc.is_oom()); 147 | v.shrink_to(5); 148 | assert!(!alloc.is_oom()); 149 | v.shrink_to(4); 150 | drop(v); 151 | assert!(alloc.is_empty()); 152 | } 153 | 154 | #[test] 155 | fn test_shrink2() { 156 | let alloc = Stalloc::<6, 4>::new(); 157 | 158 | let mut v: Vec = Vec::with_capacity_in(6, &alloc); 159 | v.shrink_to(0); 160 | assert!(alloc.is_empty()); 161 | } 162 | 163 | #[test] 164 | fn test_shrink3() { 165 | let alloc = Stalloc::<10, 4>::new(); 166 | 167 | let mut v1: Vec = Vec::with_capacity_in(8, &alloc); 168 | v1.shrink_to(6); 169 | let v2: Vec = Vec::with_capacity_in(4, &alloc); 170 | assert!(alloc.is_oom()); 171 | v1.shrink_to(1); 172 | let v3: Vec = Vec::with_capacity_in(5, &alloc); 173 | 174 | drop(v2); 175 | drop(v1); 176 | drop(v3); 177 | 178 | assert!(alloc.is_empty()); 179 | } 180 | 181 | #[test] 182 | fn test_grow() { 183 | let alloc = Stalloc::<6, 4>::new(); 184 | 185 | let mut v: Vec = Vec::with_capacity_in(3, &alloc); 186 | v.reserve_exact(6); 187 | assert!(alloc.is_oom()); 188 | } 189 | 190 | #[test] 191 | fn test_grow_realloc() { 192 | let alloc = Stalloc::<12, 4>::new(); 193 | 194 | let mut v1: Vec = Vec::with_capacity_in(3, &alloc); 195 | let _v2: Vec = Vec::with_capacity_in(3, &alloc); 196 | v1.reserve_exact(6); 197 | let _v3: Vec = Vec::with_capacity_in(3, &alloc); 198 | assert!(alloc.is_oom()); 199 | } 200 | 201 | #[test] 202 | fn test_multiple_allocations_and_drops() { 203 | let alloc = Stalloc::<16, 4>::new(); 204 | 205 | let mut v1: Vec = Vec::with_capacity_in(2, &alloc); 206 | let v2: Vec = Vec::with_capacity_in(5, &alloc); 207 | let v3: Vec = Vec::with_capacity_in(9, &alloc); 208 | assert!(alloc.is_oom()); 209 | 210 | drop(v2); 211 | v1.reserve_exact(7); 212 | assert!(alloc.is_oom()); 213 | 214 | drop(v3); 215 | v1.reserve_exact(16); 216 | assert!(alloc.is_oom()); 217 | 218 | drop(v1); 219 | assert!(alloc.is_empty()); 220 | } 221 | 222 | #[test] 223 | fn test_simple_push() { 224 | let alloc = Stalloc::<128, 4>::new(); 225 | 226 | let mut v: Vec = Vec::new_in(&alloc); 227 | for i in 0..128 { 228 | v.push(42 + i); 229 | } 230 | assert!(alloc.is_oom()); 231 | } 232 | 233 | #[test] 234 | fn test_boxes() { 235 | let alloc = Stalloc::<128, 4>::new(); 236 | 237 | for _ in 0..128 { 238 | let b = Box::new_in(42, &alloc); 239 | mem::forget(b); 240 | } 241 | assert!(alloc.is_oom()); 242 | } 243 | 244 | #[test] 245 | fn self_referential() { 246 | let alloc = Stalloc::<256, 16>::new(); 247 | 248 | let mut boxes = Vec::with_capacity_in(128, &alloc); 249 | for _ in 0..128 { 250 | boxes.push(Box::new_in(*b"hi there", &alloc)); 251 | } 252 | assert!(alloc.is_oom()); 253 | 254 | drop(boxes); 255 | assert!(alloc.is_empty()); 256 | } 257 | 258 | #[test] 259 | fn self_referential_growing() { 260 | let alloc = Stalloc::<512, 16>::new(); 261 | 262 | let mut boxes = Vec::new_in(&alloc); 263 | for _ in 0..128 { 264 | boxes.push(Box::new_in(*b"hi there", &alloc)); 265 | } 266 | 267 | drop(boxes); 268 | assert!(alloc.is_empty()); 269 | } 270 | 271 | #[test] 272 | fn grow_from_1() { 273 | let alloc = Stalloc::<256, 8>::new(); 274 | 275 | let mut v = Vec::with_capacity_in(1, &alloc); 276 | for i in 0..256 { 277 | v.push(42 + i); 278 | } 279 | } 280 | 281 | #[test] 282 | fn test_grow_and_free() { 283 | let alloc = Stalloc::<4, 4>::new(); 284 | 285 | let mut v1: Vec = Vec::with_capacity_in(1, &alloc); 286 | let _v2: Vec = Vec::with_capacity_in(1, &alloc); 287 | v1.reserve_exact(2); 288 | let _v3: Vec = Vec::with_capacity_in(1, &alloc); 289 | assert!(alloc.is_oom()); 290 | } 291 | 292 | #[test] 293 | fn vec_and_growing_vec() { 294 | let alloc = Stalloc::<9, 4>::new(); 295 | 296 | let mut v1: Vec = Vec::with_capacity_in(1, &alloc); 297 | v1.push(0); 298 | let mut v2 = Vec::with_capacity_in(4, &alloc); 299 | v2.push(1); 300 | v2.push(2); 301 | v2.push(3); 302 | v2.push(4); 303 | v2.push(5); 304 | 305 | assert!(alloc.is_oom()); 306 | } 307 | 308 | #[test] 309 | fn vec_and_growing_vec2() { 310 | let alloc = Stalloc::<14, 4>::new(); 311 | 312 | let mut v1: Vec = Vec::with_capacity_in(1, &alloc); 313 | v1.push(0); 314 | 315 | let mut v2 = Vec::with_capacity_in(4, &alloc); 316 | v2.extend_from_slice(&[1, 2, 3, 4]); 317 | 318 | let mut v3: Vec = Vec::with_capacity_in(1, &alloc); 319 | v3.push(0); 320 | 321 | v2.extend_from_slice(&[5, 6, 7, 8]); 322 | 323 | let mut v4: Vec = Vec::with_capacity_in(4, &alloc); 324 | v4.extend_from_slice(&[11, 12, 13, 14]); 325 | 326 | assert!(alloc.is_oom()); 327 | } 328 | 329 | #[test] 330 | fn test_small_alloc() { 331 | let alloc = Stalloc::<3, 8>::new(); 332 | 333 | let a = Box::new_in(0u8, &alloc); 334 | let b = Box::new_in(0u16, &alloc); 335 | let c = Box::new_in(0u32, &alloc); 336 | assert!(alloc.is_oom()); 337 | 338 | drop(b); 339 | drop(a); 340 | drop(c); 341 | assert!(alloc.is_empty()); 342 | } 343 | 344 | #[test] 345 | fn test_large_and_small_alloc() { 346 | let alloc = Stalloc::<12, 4>::new(); 347 | 348 | let a = Box::new_in(0u64, &alloc); 349 | let b = Box::new_in(1u128, &alloc); 350 | let c = Box::new_in(2u64, &alloc); 351 | 352 | let small1 = Box::new_in(42u8, &alloc); 353 | let small2 = small1.clone(); 354 | let small3 = small1.clone(); 355 | let small4 = small1.clone(); 356 | 357 | assert!(alloc.is_oom()); 358 | 359 | drop(c); 360 | drop(small3); 361 | drop(small2); 362 | drop(a); 363 | drop(small4); 364 | drop(small1); 365 | drop(b); 366 | 367 | assert!(alloc.is_empty()); 368 | } 369 | 370 | #[test] 371 | fn test_boxes_vec_grow() { 372 | let alloc = Stalloc::<12, 4>::new(); 373 | 374 | let a = Box::new_in(MaybeUninit::::uninit(), &alloc); 375 | let b = Box::new_in(5, &alloc); 376 | let mut c = Vec::with_capacity_in(9, &alloc); 377 | drop(b); 378 | c.reserve_exact(10); 379 | c.push(1); 380 | drop(a); 381 | } 382 | 383 | #[test] 384 | fn test_multiple_shrink() { 385 | let alloc = Stalloc::<24, 4>::new(); 386 | 387 | for i in 0..24 { 388 | let mut v: Vec = Vec::with_capacity_in(24 - i, &alloc); 389 | v.shrink_to(1); 390 | mem::forget(v); 391 | } 392 | 393 | assert!(alloc.is_oom()); 394 | } 395 | 396 | #[test] 397 | fn test_zeroed() { 398 | let alloc = Stalloc::<256, 4>::new(); 399 | 400 | let mut v: Vec = Vec::with_capacity_in(256, &alloc); 401 | v.extend_from_slice(&[0; 256]); 402 | assert!(v.iter().all(|&b| b == 0)); 403 | 404 | assert!(alloc.is_oom()); 405 | } 406 | 407 | #[test] 408 | fn test_vec_capacity() { 409 | let alloc = Stalloc::<1, 1024>::new(); 410 | 411 | let mut v: Vec = Vec::with_capacity_in(1, &alloc); 412 | 413 | for i in 0..1024u32 { 414 | v.push(i as u8); 415 | } 416 | } 417 | 418 | #[test] 419 | fn test34() { 420 | let _a = Stalloc::<34, 4>::new(); 421 | let _b = crate::SyncStalloc::<34, 4>::new(); 422 | } 423 | -------------------------------------------------------------------------------- /src/unsafestalloc.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | use core::fmt::{self, Debug, Formatter}; 3 | use core::hint::assert_unchecked; 4 | use core::ops::Deref; 5 | use core::ptr::{self, NonNull}; 6 | 7 | use crate::align::{Align, Alignment}; 8 | use crate::{AllocChain, ChainableAlloc, Stalloc}; 9 | 10 | /// A wrapper around `Stalloc` that implements both `Sync` and `GlobalAlloc`. 11 | /// 12 | /// This type is unsafe to create, because it does not prevent data races. 13 | /// Therefore, it is encouraged to only use it in single-threaded environments. 14 | #[repr(transparent)] 15 | pub struct UnsafeStalloc(Stalloc) 16 | where 17 | Align: Alignment; 18 | 19 | impl Deref for UnsafeStalloc 20 | where 21 | Align: Alignment, 22 | { 23 | type Target = Stalloc; 24 | 25 | fn deref(&self) -> &Self::Target { 26 | &self.0 27 | } 28 | } 29 | 30 | impl Debug for UnsafeStalloc 31 | where 32 | Align: Alignment, 33 | { 34 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 35 | write!(f, "{:?}", self.0) 36 | } 37 | } 38 | 39 | impl UnsafeStalloc 40 | where 41 | Align: Alignment, 42 | { 43 | /// # Safety 44 | /// 45 | /// `UnsafeStalloc` does not prevent data races. It is strongly recommend 46 | /// to only use it in a single-threaded environment. 47 | /// 48 | /// # Examples 49 | /// ``` 50 | /// use stalloc::UnsafeStalloc; 51 | /// 52 | /// let alloc = unsafe { UnsafeStalloc::<200, 8>::new() }; 53 | /// ``` 54 | #[must_use] 55 | pub const unsafe fn new() -> Self { 56 | Self(Stalloc::::new()) 57 | } 58 | } 59 | 60 | unsafe impl Sync for UnsafeStalloc where Align: Alignment {} 61 | 62 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 63 | use crate::{AllocError, Allocator}; 64 | 65 | #[cfg(any(feature = "allocator-api", feature = "allocator-api2"))] 66 | unsafe impl Allocator for &UnsafeStalloc 67 | where 68 | Align: Alignment, 69 | { 70 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 71 | (&self.0).allocate(layout) 72 | } 73 | 74 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 75 | // SAFETY: Upheld by the caller. 76 | unsafe { 77 | (&self.0).deallocate(ptr, layout); 78 | } 79 | } 80 | 81 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 82 | (&self.0).allocate_zeroed(layout) 83 | } 84 | 85 | unsafe fn grow( 86 | &self, 87 | ptr: NonNull, 88 | old_layout: Layout, 89 | new_layout: Layout, 90 | ) -> Result, AllocError> { 91 | // SAFETY: Upheld by the caller. 92 | unsafe { (&self.0).grow(ptr, old_layout, new_layout) } 93 | } 94 | 95 | unsafe fn grow_zeroed( 96 | &self, 97 | ptr: NonNull, 98 | old_layout: Layout, 99 | new_layout: Layout, 100 | ) -> Result, AllocError> { 101 | // SAFETY: Upheld by the caller. 102 | unsafe { (&self.0).grow_zeroed(ptr, old_layout, new_layout) } 103 | } 104 | 105 | unsafe fn shrink( 106 | &self, 107 | ptr: NonNull, 108 | old_layout: Layout, 109 | new_layout: Layout, 110 | ) -> Result, AllocError> { 111 | // SAFETY: Upheld by the caller. 112 | unsafe { (&self.0).shrink(ptr, old_layout, new_layout) } 113 | } 114 | 115 | fn by_ref(&self) -> &Self 116 | where 117 | Self: Sized, 118 | { 119 | self 120 | } 121 | } 122 | 123 | unsafe impl GlobalAlloc for UnsafeStalloc 124 | where 125 | Align: Alignment, 126 | { 127 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 128 | let size = layout.size().div_ceil(B); 129 | let align = layout.align().div_ceil(B); 130 | 131 | // SAFETY: `size` and `align` are valid. 132 | unsafe { 133 | self.allocate_blocks(size, align) 134 | .map(|p| p.as_ptr().cast()) 135 | .unwrap_or(ptr::null_mut()) 136 | } 137 | } 138 | 139 | unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { 140 | let size = layout.size().div_ceil(B); 141 | 142 | // SAFETY: Upheld by the caller. 143 | let new = unsafe { self.alloc(layout) }; 144 | if !new.is_null() { 145 | // SAFETY: `new` points to a valid allocation of `size * B` bytes. 146 | unsafe { ptr::write_bytes(new, 0, size * B) }; 147 | } 148 | new 149 | } 150 | 151 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 152 | let size = layout.size().div_ceil(B); 153 | 154 | // SAFETY: Upheld by the caller. 155 | unsafe { 156 | self.deallocate_blocks(NonNull::new_unchecked(ptr), size); 157 | } 158 | } 159 | 160 | unsafe fn realloc(&self, ptr: *mut u8, old_layout: Layout, new_size: usize) -> *mut u8 { 161 | // Assert unsafe precondition. 162 | unsafe { 163 | assert_unchecked(new_size > 0); 164 | } 165 | 166 | let old_size = old_layout.size() / B; 167 | let new_size = new_size.div_ceil(B); 168 | 169 | unsafe { 170 | // SAFETY: Upheld by the caller. 171 | let ptr: NonNull = NonNull::new_unchecked(ptr); 172 | 173 | // SAFETY: Upheld by the caller. 174 | if new_size > old_size && self.grow_in_place(ptr, old_size, new_size).is_ok() { 175 | return ptr.as_ptr(); 176 | } else if new_size > old_size { 177 | // Reallocate and copy. 178 | // SAFETY: We have made sure that `new_size > 0` and that `align` is valid. 179 | let Ok(new) = self.allocate_blocks(new_size, old_layout.align()) else { 180 | return ptr::null_mut(); 181 | }; 182 | 183 | // SAFETY: We are copying all the necessary bytes from `ptr` into `new`. 184 | // `ptr` and `new` both point to an allocation of at least `old_layout.size()` bytes. 185 | ptr::copy_nonoverlapping(ptr.as_ptr(), new.as_ptr(), old_layout.size()); 186 | 187 | // SAFETY: The caller upholds that old_size > 0. 188 | self.deallocate_blocks(ptr, old_size); 189 | 190 | return new.as_ptr(); 191 | } else if old_size > new_size { 192 | // SAFETY: Upheld by the caller. 193 | self.shrink_in_place(ptr, old_size, new_size); 194 | } 195 | 196 | ptr.as_ptr() 197 | } 198 | } 199 | } 200 | 201 | unsafe impl ChainableAlloc for UnsafeStalloc 202 | where 203 | Align: Alignment, 204 | { 205 | fn addr_in_bounds(&self, addr: usize) -> bool { 206 | self.0.addr_in_bounds(addr) 207 | } 208 | } 209 | 210 | impl UnsafeStalloc 211 | where 212 | Align: Alignment, 213 | { 214 | /// Creates a new `AllocChain` containing this allocator and `next`. 215 | pub const fn chain(self, next: &T) -> AllocChain<'_, Self, T> 216 | where 217 | Self: Sized, 218 | { 219 | AllocChain::new(self, next) 220 | } 221 | } 222 | --------------------------------------------------------------------------------