├── .github └── workflows │ └── build.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── dynstack.rs └── src ├── alloc.rs ├── lib.rs ├── mem.rs └── stack_req.rs /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # Run tests 2 | name: Test 3 | 4 | on: 5 | push: 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | cargo-benches: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | 17 | - name: Install Rust 18 | uses: dtolnay/rust-toolchain@stable 19 | 20 | - name: Compile benches 21 | run: cargo bench --no-run 22 | 23 | cargo-tests: 24 | runs-on: ${{ matrix.os }} 25 | 26 | strategy: 27 | matrix: 28 | os: [ubuntu-latest, macos-latest, macos-13, windows-latest] 29 | 30 | steps: 31 | - uses: actions/checkout@v3 32 | 33 | - name: Install Rust 34 | uses: dtolnay/rust-toolchain@stable 35 | 36 | - name: Test debug 37 | run: cargo test 38 | 39 | - name: Test debug no-std 40 | run: cargo build --no-default-features 41 | 42 | cargo-tests-msrv: 43 | runs-on: ${{ matrix.os }} 44 | 45 | strategy: 46 | matrix: 47 | os: [ubuntu-latest, macos-latest, macos-13, windows-latest] 48 | 49 | steps: 50 | - uses: actions/checkout@v3 51 | 52 | - name: Install Rust 53 | uses: dtolnay/rust-toolchain@1.67.0 54 | 55 | - name: Test debug 56 | run: cargo build 57 | 58 | - name: Test debug no-std 59 | run: cargo build --no-default-features 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dyn-stack" 3 | version = "0.13.0" 4 | edition = "2021" 5 | authors = ["sarah <>"] 6 | description = "Dynamic stack wrapper for unsized allocations" 7 | readme = "README.md" 8 | repository = "https://github.com/kitegi/dynstack/" 9 | license = "MIT" 10 | keywords = ["stack", "allocation", "unsized", "array", "slice"] 11 | 12 | [dependencies] 13 | bytemuck = "1" 14 | 15 | [features] 16 | default = ["alloc", "std"] 17 | core-error = [] 18 | std = [] 19 | alloc = [] 20 | 21 | [dev-dependencies] 22 | criterion = "0.4" 23 | 24 | [[bench]] 25 | name = "dynstack" 26 | harness = false 27 | 28 | [package.metadata.docs.rs] 29 | all-features = true 30 | rustdoc-args = ["--cfg", "docsrs"] 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 sarah 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dynstack 2 | Stack that allows users to allocate dynamically sized arrays. 3 | 4 | The stack wraps a buffer of bytes that it uses as a workspace. 5 | Allocating an array takes a chunk of memory from the stack, which can be reused once the array 6 | is dropped. 7 | 8 | # Features 9 | - `alloc`: enables the allocator api. 10 | 11 | # Examples 12 | ```rust 13 | use core::mem::MaybeUninit; 14 | use dynstack::{DynStack, StackReq}; 15 | 16 | // We allocate enough storage for 3 `i32` and 4 `u8`. 17 | let mut buf = [MaybeUninit::uninit(); 18 | StackReq::new::(3) 19 | .and(StackReq::new::(4)) 20 | .unaligned_bytes_required()]; 21 | let stack = DynStack::new(&mut buf); 22 | 23 | { 24 | // We can have nested allocations. 25 | // 3×`i32` 26 | let (array_i32, substack) = stack.make_with::(3, |i| i as i32); 27 | // and 4×`u8` 28 | let (mut array_u8, _) = substack.make_with::(4, |_| 0); 29 | 30 | // We can read from the arrays, 31 | assert_eq!(array_i32[0], 0); 32 | assert_eq!(array_i32[1], 1); 33 | assert_eq!(array_i32[2], 2); 34 | 35 | // and write to them. 36 | array_u8[0] = 1; 37 | 38 | assert_eq!(array_u8[0], 1); 39 | assert_eq!(array_u8[1], 0); 40 | assert_eq!(array_u8[2], 0); 41 | assert_eq!(array_u8[3], 0); 42 | } 43 | 44 | { 45 | // We can also have disjoint allocations. 46 | // 3×`i32` 47 | let (mut array_i32, _) = stack.make_with::(3, |i| i as i32); 48 | assert_eq!(array_i32[0], 0); 49 | assert_eq!(array_i32[1], 1); 50 | assert_eq!(array_i32[2], 2); 51 | } 52 | 53 | { 54 | // or 4×`u8` 55 | let (mut array_u8, _) = stack.make_with::(4, |i| i as i32 + 3); 56 | assert_eq!(array_u8[0], 3); 57 | assert_eq!(array_u8[1], 4); 58 | assert_eq!(array_u8[2], 5); 59 | assert_eq!(array_u8[3], 6); 60 | } 61 | ``` 62 | -------------------------------------------------------------------------------- /benches/dynstack.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use dyn_stack::{MemBuffer, MemStack, PodBuffer, PodStack, StackReq}; 3 | 4 | pub fn criterion_benchmark(c: &mut Criterion) { 5 | c.bench_function("memalloc", |b| { 6 | b.iter(|| black_box(MemBuffer::new(StackReq::new_aligned::(1 << 20, 16)))) 7 | }); 8 | c.bench_function("memalloc-zeroed", |b| { 9 | b.iter(|| black_box(PodBuffer::new(StackReq::new_aligned::(1 << 20, 16)))) 10 | }); 11 | 12 | for n in [32, 64, 1024, 16384] { 13 | let align = 64; 14 | let single_scratch = StackReq::new_aligned::(n, align); 15 | let scratch = single_scratch.and(single_scratch); 16 | 17 | let mut mem = MemBuffer::new(scratch); 18 | let stack = MemStack::new(&mut *mem); 19 | 20 | { 21 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 22 | let (mut dst, _) = stack.make_aligned_with(n, align, |_| 0.0_f32); 23 | c.bench_function(&format!("preallocated-{}", n), |b| { 24 | b.iter(|| { 25 | for (d, s) in dst.iter_mut().zip(src.iter()) { 26 | *d = s + s; 27 | } 28 | black_box(&mut dst); 29 | }) 30 | }); 31 | } 32 | 33 | { 34 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 35 | c.bench_function(&format!("allocate-on-demand-init-{}", n), |b| { 36 | b.iter(|| { 37 | let (mut dst, _) = stack.make_aligned_with(n, align, |_| 0.0_f32); 38 | for (d, s) in dst.iter_mut().zip(src.iter()) { 39 | *d = s + s; 40 | } 41 | black_box(&mut dst); 42 | }) 43 | }); 44 | } 45 | 46 | { 47 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 48 | c.bench_function(&format!("allocate-on-demand-uninit-{}", n), |b| { 49 | b.iter(|| { 50 | let (mut dst, _) = stack.make_aligned_uninit::(n, align); 51 | for (d, s) in dst.iter_mut().zip(src.iter()) { 52 | d.write(s + s); 53 | } 54 | black_box(&mut dst); 55 | }) 56 | }); 57 | } 58 | 59 | { 60 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 61 | c.bench_function(&format!("allocate-on-demand-collect-{}", n), |b| { 62 | b.iter(|| { 63 | let (mut dst, _) = stack 64 | .collect_aligned(align, src.iter().zip(src.iter()).map(|(a, b)| a + b)); 65 | black_box(&mut dst); 66 | }) 67 | }); 68 | } 69 | } 70 | 71 | for n in [32, 64, 1024, 16384] { 72 | let align = 64; 73 | let single_scratch = StackReq::new_aligned::(n, align); 74 | let scratch = single_scratch.and(single_scratch); 75 | 76 | let mut mem = PodBuffer::new(scratch); 77 | let stack = PodStack::new(&mut *mem); 78 | 79 | { 80 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 81 | let (mut dst, _) = stack.make_aligned_with(n, align, |_| 0.0_f32); 82 | c.bench_function(&format!("pod-preallocated-{}", n), |b| { 83 | b.iter(|| { 84 | for (d, s) in dst.iter_mut().zip(src.iter()) { 85 | *d = s + s; 86 | } 87 | black_box(&mut dst); 88 | }) 89 | }); 90 | } 91 | 92 | { 93 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 94 | c.bench_function(&format!("pod-allocate-on-demand-init-{}", n), |b| { 95 | b.iter(|| { 96 | let (mut dst, _) = stack.make_aligned_with(n, align, |_| 0.0_f32); 97 | for (d, s) in dst.iter_mut().zip(src.iter()) { 98 | *d = s + s; 99 | } 100 | black_box(&mut dst); 101 | }) 102 | }); 103 | } 104 | 105 | { 106 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 107 | c.bench_function(&format!("pod-allocate-on-demand-uninit-{}", n), |b| { 108 | b.iter(|| { 109 | let (mut dst, _) = stack.make_aligned_raw::(n, align); 110 | for (d, s) in dst.iter_mut().zip(src.iter()) { 111 | *d = s + s; 112 | } 113 | black_box(&mut dst); 114 | }) 115 | }); 116 | } 117 | 118 | { 119 | let (src, stack) = stack.make_aligned_with(n, align, |_| 0.0_f32); 120 | c.bench_function(&format!("pod-allocate-on-demand-collect-{}", n), |b| { 121 | b.iter(|| { 122 | let (mut dst, _) = stack 123 | .collect_aligned(align, src.iter().zip(src.iter()).map(|(a, b)| a + b)); 124 | black_box(&mut dst); 125 | }) 126 | }); 127 | } 128 | } 129 | } 130 | 131 | criterion_group!(benches, criterion_benchmark); 132 | criterion_main!(benches); 133 | -------------------------------------------------------------------------------- /src/alloc.rs: -------------------------------------------------------------------------------- 1 | // copied from libcore/liballoc 2 | 3 | use core::alloc::Layout; 4 | use core::cell::UnsafeCell; 5 | use core::fmt; 6 | use core::marker::PhantomData; 7 | use core::mem::MaybeUninit; 8 | use core::ptr; 9 | use core::ptr::NonNull; 10 | 11 | extern crate alloc; 12 | 13 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 14 | pub struct AllocError; 15 | 16 | #[cfg(any(feature = "std", feature = "core-error"))] 17 | impl crate::Error for AllocError {} 18 | 19 | impl fmt::Display for AllocError { 20 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 21 | f.write_str("memory allocation failed") 22 | } 23 | } 24 | 25 | /// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of 26 | /// data described via [`Layout`][]. 27 | /// 28 | /// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having 29 | /// an allocator like `MyAllocator([u8; N])` cannot be moved, without updating the pointers to the 30 | /// allocated memory. 31 | /// 32 | /// Unlike [`alloc::alloc::GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying 33 | /// allocator does not support this (like jemalloc) or return a null pointer (such as 34 | /// `libc::malloc`), this must be caught by the implementation. 35 | /// 36 | /// ### Currently allocated memory 37 | /// 38 | /// Some of the methods require that a memory block be *currently allocated* via an allocator. This 39 | /// means that: 40 | /// 41 | /// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or 42 | /// [`shrink`], and 43 | /// 44 | /// * the memory block has not been subsequently deallocated, where blocks are either deallocated 45 | /// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or 46 | /// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer 47 | /// remains valid. 48 | /// 49 | /// [`allocate`]: Allocator::allocate 50 | /// [`grow`]: Allocator::grow 51 | /// [`shrink`]: Allocator::shrink 52 | /// [`deallocate`]: Allocator::deallocate 53 | /// 54 | /// ### Memory fitting 55 | /// 56 | /// Some of the methods require that a layout *fit* a memory block. What it means for a layout to 57 | /// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the 58 | /// following conditions must hold: 59 | /// 60 | /// * The block must be allocated with the same alignment as [`layout.align()`], and 61 | /// 62 | /// * The provided [`layout.size()`] must fall in the range `min ..= max`, where: 63 | /// - `min` is the size of the layout most recently used to allocate the block, and 64 | /// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`]. 65 | /// 66 | /// [`layout.align()`]: Layout::align 67 | /// [`layout.size()`]: Layout::size 68 | /// 69 | /// # Safety 70 | /// 71 | /// * Memory blocks returned from an allocator that are [*currently allocated*] must point to 72 | /// valid memory and retain their validity while they are [*currently allocated*] and the shorter 73 | /// of: 74 | /// - the borrow-checker lifetime of the allocator type itself. 75 | /// 76 | /// * any pointer to a memory block which is [*currently allocated*] may be passed to any other 77 | /// method of the allocator. 78 | /// 79 | /// [*currently allocated*]: #currently-allocated-memory 80 | pub unsafe trait Allocator { 81 | /// Attempts to allocate a block of memory. 82 | /// 83 | /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. 84 | /// 85 | /// The returned block may have a larger size than specified by `layout.size()`, and may or may 86 | /// not have its contents initialized. 87 | /// 88 | /// The returned block of memory remains valid as long as it is [*currently allocated*] and the shorter of: 89 | /// - the borrow-checker lifetime of the allocator type itself. 90 | /// 91 | /// # Errors 92 | /// 93 | /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet 94 | /// allocator's size or alignment constraints. 95 | /// 96 | /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or 97 | /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement 98 | /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) 99 | /// 100 | /// Clients wishing to abort computation in response to an allocation error are encouraged to 101 | /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. 102 | /// 103 | /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html 104 | fn allocate(&self, layout: Layout) -> Result, AllocError>; 105 | 106 | /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized. 107 | /// 108 | /// # Errors 109 | /// 110 | /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet 111 | /// allocator's size or alignment constraints. 112 | /// 113 | /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or 114 | /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement 115 | /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) 116 | /// 117 | /// Clients wishing to abort computation in response to an allocation error are encouraged to 118 | /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. 119 | /// 120 | /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html 121 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 122 | let ptr = self.allocate(layout)?; 123 | // SAFETY: `alloc` returns a valid memory block 124 | unsafe { (ptr.as_ptr() as *mut u8).write_bytes(0, ptr.len()) } 125 | Ok(ptr) 126 | } 127 | 128 | /// Deallocates the memory referenced by `ptr`. 129 | /// 130 | /// # Safety 131 | /// 132 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and 133 | /// * `layout` must [*fit*] that block of memory. 134 | /// 135 | /// [*currently allocated*]: #currently-allocated-memory 136 | /// [*fit*]: #memory-fitting 137 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); 138 | 139 | /// Attempts to extend the memory block. 140 | /// 141 | /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated 142 | /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish 143 | /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout. 144 | /// 145 | /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been 146 | /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the 147 | /// allocation was grown in-place. The newly returned pointer is the only valid pointer 148 | /// for accessing this memory now. 149 | /// 150 | /// If this method returns `Err`, then ownership of the memory block has not been transferred to 151 | /// this allocator, and the contents of the memory block are unaltered. 152 | /// 153 | /// # Safety 154 | /// 155 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. 156 | /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). 157 | /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. 158 | /// 159 | /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 160 | /// 161 | /// [*currently allocated*]: #currently-allocated-memory 162 | /// [*fit*]: #memory-fitting 163 | /// 164 | /// # Errors 165 | /// 166 | /// Returns `Err` if the new layout does not meet the allocator's size and alignment 167 | /// constraints of the allocator, or if growing otherwise fails. 168 | /// 169 | /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or 170 | /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement 171 | /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) 172 | /// 173 | /// Clients wishing to abort computation in response to an allocation error are encouraged to 174 | /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. 175 | /// 176 | /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html 177 | unsafe fn grow( 178 | &self, 179 | ptr: NonNull, 180 | old_layout: Layout, 181 | new_layout: Layout, 182 | ) -> Result, AllocError> { 183 | debug_assert!( 184 | new_layout.size() >= old_layout.size(), 185 | "`new_layout.size()` must be greater than or equal to `old_layout.size()`" 186 | ); 187 | 188 | let new_ptr = self.allocate(new_layout)?; 189 | 190 | // SAFETY: because `new_layout.size()` must be greater than or equal to 191 | // `old_layout.size()`, both the old and new memory allocation are valid for reads and 192 | // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet 193 | // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is 194 | // safe. The safety contract for `dealloc` must be upheld by the caller. 195 | unsafe { 196 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size()); 197 | self.deallocate(ptr, old_layout); 198 | } 199 | 200 | Ok(new_ptr) 201 | } 202 | 203 | /// Behaves like `grow`, but also ensures that the new contents are set to zero before being 204 | /// returned. 205 | /// 206 | /// The memory block will contain the following contents after a successful call to 207 | /// `grow_zeroed`: 208 | /// * Bytes `0..old_layout.size()` are preserved from the original allocation. 209 | /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on 210 | /// the allocator implementation. `old_size` refers to the size of the memory block prior 211 | /// to the `grow_zeroed` call, which may be larger than the size that was originally 212 | /// requested when it was allocated. 213 | /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory 214 | /// block returned by the `grow_zeroed` call. 215 | /// 216 | /// # Safety 217 | /// 218 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. 219 | /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). 220 | /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. 221 | /// 222 | /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 223 | /// 224 | /// [*currently allocated*]: #currently-allocated-memory 225 | /// [*fit*]: #memory-fitting 226 | /// 227 | /// # Errors 228 | /// 229 | /// Returns `Err` if the new layout does not meet the allocator's size and alignment 230 | /// constraints of the allocator, or if growing otherwise fails. 231 | /// 232 | /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or 233 | /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement 234 | /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) 235 | /// 236 | /// Clients wishing to abort computation in response to an allocation error are encouraged to 237 | /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. 238 | /// 239 | /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html 240 | unsafe fn grow_zeroed( 241 | &self, 242 | ptr: NonNull, 243 | old_layout: Layout, 244 | new_layout: Layout, 245 | ) -> Result, AllocError> { 246 | debug_assert!( 247 | new_layout.size() >= old_layout.size(), 248 | "`new_layout.size()` must be greater than or equal to `old_layout.size()`" 249 | ); 250 | 251 | let new_ptr = self.allocate_zeroed(new_layout)?; 252 | 253 | // SAFETY: because `new_layout.size()` must be greater than or equal to 254 | // `old_layout.size()`, both the old and new memory allocation are valid for reads and 255 | // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet 256 | // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is 257 | // safe. The safety contract for `dealloc` must be upheld by the caller. 258 | unsafe { 259 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, old_layout.size()); 260 | self.deallocate(ptr, old_layout); 261 | } 262 | 263 | Ok(new_ptr) 264 | } 265 | 266 | /// Attempts to shrink the memory block. 267 | /// 268 | /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated 269 | /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish 270 | /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout. 271 | /// 272 | /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been 273 | /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the 274 | /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer 275 | /// for accessing this memory now. 276 | /// 277 | /// If this method returns `Err`, then ownership of the memory block has not been transferred to 278 | /// this allocator, and the contents of the memory block are unaltered. 279 | /// 280 | /// # Safety 281 | /// 282 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. 283 | /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). 284 | /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. 285 | /// 286 | /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. 287 | /// 288 | /// [*currently allocated*]: #currently-allocated-memory 289 | /// [*fit*]: #memory-fitting 290 | /// 291 | /// # Errors 292 | /// 293 | /// Returns `Err` if the new layout does not meet the allocator's size and alignment 294 | /// constraints of the allocator, or if shrinking otherwise fails. 295 | /// 296 | /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or 297 | /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement 298 | /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) 299 | /// 300 | /// Clients wishing to abort computation in response to an allocation error are encouraged to 301 | /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. 302 | /// 303 | /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html 304 | unsafe fn shrink( 305 | &self, 306 | ptr: NonNull, 307 | old_layout: Layout, 308 | new_layout: Layout, 309 | ) -> Result, AllocError> { 310 | debug_assert!( 311 | new_layout.size() <= old_layout.size(), 312 | "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" 313 | ); 314 | 315 | let new_ptr = self.allocate(new_layout)?; 316 | 317 | // SAFETY: because `new_layout.size()` must be lower than or equal to 318 | // `old_layout.size()`, both the old and new memory allocation are valid for reads and 319 | // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet 320 | // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is 321 | // safe. The safety contract for `dealloc` must be upheld by the caller. 322 | unsafe { 323 | ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr() as *mut u8, new_layout.size()); 324 | self.deallocate(ptr, old_layout); 325 | } 326 | 327 | Ok(new_ptr) 328 | } 329 | 330 | /// Creates a "by reference" adapter for this instance of `Allocator`. 331 | /// 332 | /// The returned adapter also implements `Allocator` and will simply borrow this. 333 | #[inline(always)] 334 | fn by_ref(&self) -> &Self 335 | where 336 | Self: Sized, 337 | { 338 | self 339 | } 340 | } 341 | 342 | unsafe impl Allocator for &T { 343 | #[inline(always)] 344 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 345 | (**self).allocate(layout) 346 | } 347 | 348 | #[inline(always)] 349 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 350 | (**self).deallocate(ptr, layout) 351 | } 352 | 353 | #[inline(always)] 354 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 355 | (**self).allocate_zeroed(layout) 356 | } 357 | 358 | #[inline(always)] 359 | unsafe fn grow( 360 | &self, 361 | ptr: NonNull, 362 | old_layout: Layout, 363 | new_layout: Layout, 364 | ) -> Result, AllocError> { 365 | (**self).grow(ptr, old_layout, new_layout) 366 | } 367 | 368 | #[inline(always)] 369 | unsafe fn grow_zeroed( 370 | &self, 371 | ptr: NonNull, 372 | old_layout: Layout, 373 | new_layout: Layout, 374 | ) -> Result, AllocError> { 375 | (**self).grow_zeroed(ptr, old_layout, new_layout) 376 | } 377 | 378 | #[inline(always)] 379 | unsafe fn shrink( 380 | &self, 381 | ptr: NonNull, 382 | old_layout: Layout, 383 | new_layout: Layout, 384 | ) -> Result, AllocError> { 385 | (**self).shrink(ptr, old_layout, new_layout) 386 | } 387 | } 388 | 389 | unsafe impl Allocator for &mut T { 390 | #[inline(always)] 391 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 392 | (**self).allocate(layout) 393 | } 394 | 395 | #[inline(always)] 396 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 397 | (**self).deallocate(ptr, layout) 398 | } 399 | 400 | #[inline(always)] 401 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 402 | (**self).allocate_zeroed(layout) 403 | } 404 | 405 | #[inline(always)] 406 | unsafe fn grow( 407 | &self, 408 | ptr: NonNull, 409 | old_layout: Layout, 410 | new_layout: Layout, 411 | ) -> Result, AllocError> { 412 | (**self).grow(ptr, old_layout, new_layout) 413 | } 414 | 415 | #[inline(always)] 416 | unsafe fn grow_zeroed( 417 | &self, 418 | ptr: NonNull, 419 | old_layout: Layout, 420 | new_layout: Layout, 421 | ) -> Result, AllocError> { 422 | (**self).grow_zeroed(ptr, old_layout, new_layout) 423 | } 424 | 425 | #[inline(always)] 426 | unsafe fn shrink( 427 | &self, 428 | ptr: NonNull, 429 | old_layout: Layout, 430 | new_layout: Layout, 431 | ) -> Result, AllocError> { 432 | (**self).shrink(ptr, old_layout, new_layout) 433 | } 434 | } 435 | 436 | #[cfg(feature = "alloc")] 437 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 438 | unsafe impl Allocator for alloc::boxed::Box { 439 | #[inline(always)] 440 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 441 | (**self).allocate(layout) 442 | } 443 | 444 | #[inline(always)] 445 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 446 | (**self).deallocate(ptr, layout) 447 | } 448 | 449 | #[inline(always)] 450 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 451 | (**self).allocate_zeroed(layout) 452 | } 453 | 454 | #[inline(always)] 455 | unsafe fn grow( 456 | &self, 457 | ptr: NonNull, 458 | old_layout: Layout, 459 | new_layout: Layout, 460 | ) -> Result, AllocError> { 461 | (**self).grow(ptr, old_layout, new_layout) 462 | } 463 | 464 | #[inline(always)] 465 | unsafe fn grow_zeroed( 466 | &self, 467 | ptr: NonNull, 468 | old_layout: Layout, 469 | new_layout: Layout, 470 | ) -> Result, AllocError> { 471 | (**self).grow_zeroed(ptr, old_layout, new_layout) 472 | } 473 | 474 | #[inline(always)] 475 | unsafe fn shrink( 476 | &self, 477 | ptr: NonNull, 478 | old_layout: Layout, 479 | new_layout: Layout, 480 | ) -> Result, AllocError> { 481 | (**self).shrink(ptr, old_layout, new_layout) 482 | } 483 | } 484 | 485 | #[cfg(feature = "alloc")] 486 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 487 | pub struct Global; 488 | 489 | #[cfg(feature = "alloc")] 490 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 491 | unsafe impl Allocator for Global { 492 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 493 | let ptr = if layout.size() == 0 { 494 | core::ptr::null_mut::().wrapping_add(layout.align()) 495 | } else { 496 | unsafe { alloc::alloc::alloc(layout) } 497 | }; 498 | 499 | if ptr.is_null() { 500 | Err(AllocError) 501 | } else { 502 | Ok(unsafe { 503 | NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size())) 504 | }) 505 | } 506 | } 507 | 508 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 509 | let ptr = if layout.size() == 0 { 510 | core::ptr::null_mut::().wrapping_add(layout.align()) 511 | } else { 512 | unsafe { alloc::alloc::alloc_zeroed(layout) } 513 | }; 514 | 515 | if ptr.is_null() { 516 | Err(AllocError) 517 | } else { 518 | Ok(unsafe { 519 | NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, layout.size())) 520 | }) 521 | } 522 | } 523 | 524 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 525 | if layout.size() != 0 { 526 | alloc::alloc::dealloc(ptr.as_ptr(), layout); 527 | } 528 | } 529 | 530 | unsafe fn grow( 531 | &self, 532 | ptr: NonNull, 533 | old_layout: Layout, 534 | new_layout: Layout, 535 | ) -> Result, AllocError> { 536 | core::debug_assert!( 537 | new_layout.size() >= old_layout.size(), 538 | "`new_layout.size()` must be greater than or equal to `old_layout.size()`" 539 | ); 540 | 541 | if old_layout.align() == new_layout.align() { 542 | let ptr = if new_layout.size() == 0 { 543 | core::ptr::null_mut::().wrapping_add(new_layout.align()) 544 | } else { 545 | alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size()) 546 | }; 547 | if ptr.is_null() { 548 | Err(AllocError) 549 | } else { 550 | Ok(unsafe { 551 | NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut( 552 | ptr, 553 | new_layout.size(), 554 | )) 555 | }) 556 | } 557 | } else { 558 | let new_ptr = self.allocate(new_layout)?; 559 | 560 | // SAFETY: because `new_layout.size()` must be greater than or equal to 561 | // `old_layout.size()`, both the old and new memory allocation are valid for reads and 562 | // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet 563 | // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is 564 | // safe. The safety contract for `dealloc` must be upheld by the caller. 565 | unsafe { 566 | ptr::copy_nonoverlapping( 567 | ptr.as_ptr(), 568 | new_ptr.as_ptr() as *mut u8, 569 | old_layout.size(), 570 | ); 571 | self.deallocate(ptr, old_layout); 572 | } 573 | 574 | Ok(new_ptr) 575 | } 576 | } 577 | 578 | unsafe fn shrink( 579 | &self, 580 | ptr: NonNull, 581 | old_layout: Layout, 582 | new_layout: Layout, 583 | ) -> Result, AllocError> { 584 | core::debug_assert!( 585 | new_layout.size() <= old_layout.size(), 586 | "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" 587 | ); 588 | 589 | if old_layout.align() == new_layout.align() { 590 | let ptr = if new_layout.size() == 0 { 591 | core::ptr::null_mut::().wrapping_add(new_layout.align()) 592 | } else { 593 | alloc::alloc::realloc(ptr.as_ptr(), old_layout, new_layout.size()) 594 | }; 595 | 596 | if ptr.is_null() { 597 | Err(AllocError) 598 | } else { 599 | Ok(unsafe { 600 | NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut( 601 | ptr, 602 | new_layout.size(), 603 | )) 604 | }) 605 | } 606 | } else { 607 | let new_ptr = self.allocate(new_layout)?; 608 | 609 | // SAFETY: because `new_layout.size()` must be lower than or equal to 610 | // `old_layout.size()`, both the old and new memory allocation are valid for reads and 611 | // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet 612 | // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is 613 | // safe. The safety contract for `dealloc` must be upheld by the caller. 614 | unsafe { 615 | ptr::copy_nonoverlapping( 616 | ptr.as_ptr(), 617 | new_ptr.as_ptr() as *mut u8, 618 | new_layout.size(), 619 | ); 620 | self.deallocate(ptr, old_layout); 621 | } 622 | 623 | Ok(new_ptr) 624 | } 625 | } 626 | } 627 | 628 | #[derive(Copy, Clone, Debug)] 629 | pub(crate) struct VTable { 630 | pub allocate: unsafe fn(*const (), Layout) -> Result, AllocError>, 631 | pub allocate_zeroed: unsafe fn(*const (), Layout) -> Result, AllocError>, 632 | pub deallocate: unsafe fn(*const (), ptr: NonNull, Layout), 633 | pub grow: 634 | unsafe fn(*const (), NonNull, Layout, Layout) -> Result, AllocError>, 635 | pub grow_zeroed: 636 | unsafe fn(*const (), NonNull, Layout, Layout) -> Result, AllocError>, 637 | pub shrink: 638 | unsafe fn(*const (), NonNull, Layout, Layout) -> Result, AllocError>, 639 | 640 | pub clone: Option, 641 | pub drop: unsafe fn(*mut ()), 642 | } 643 | 644 | pub struct DynAlloc<'a> { 645 | pub(crate) alloc: UnsafeCell>, 646 | pub(crate) vtable: &'static VTable, 647 | __marker: PhantomData<&'a ()>, 648 | } 649 | 650 | unsafe impl Send for DynAlloc<'_> {} 651 | 652 | unsafe impl Allocator for DynAlloc<'_> { 653 | #[inline] 654 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 655 | unsafe { (self.vtable.allocate)(core::ptr::addr_of!(self.alloc) as *const (), layout) } 656 | } 657 | 658 | #[inline] 659 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 660 | unsafe { 661 | (self.vtable.deallocate)(core::ptr::addr_of!(self.alloc) as *const (), ptr, layout) 662 | } 663 | } 664 | 665 | #[inline] 666 | fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { 667 | unsafe { 668 | (self.vtable.allocate_zeroed)(core::ptr::addr_of!(self.alloc) as *const (), layout) 669 | } 670 | } 671 | 672 | #[inline] 673 | unsafe fn grow( 674 | &self, 675 | ptr: NonNull, 676 | old_layout: Layout, 677 | new_layout: Layout, 678 | ) -> Result, AllocError> { 679 | unsafe { 680 | (self.vtable.grow)( 681 | core::ptr::addr_of!(self.alloc) as *const (), 682 | ptr, 683 | old_layout, 684 | new_layout, 685 | ) 686 | } 687 | } 688 | 689 | #[inline] 690 | unsafe fn grow_zeroed( 691 | &self, 692 | ptr: NonNull, 693 | old_layout: Layout, 694 | new_layout: Layout, 695 | ) -> Result, AllocError> { 696 | unsafe { 697 | (self.vtable.grow_zeroed)( 698 | core::ptr::addr_of!(self.alloc) as *const (), 699 | ptr, 700 | old_layout, 701 | new_layout, 702 | ) 703 | } 704 | } 705 | 706 | #[inline] 707 | unsafe fn shrink( 708 | &self, 709 | ptr: NonNull, 710 | old_layout: Layout, 711 | new_layout: Layout, 712 | ) -> Result, AllocError> { 713 | unsafe { 714 | (self.vtable.shrink)( 715 | core::ptr::addr_of!(self.alloc) as *const (), 716 | ptr, 717 | old_layout, 718 | new_layout, 719 | ) 720 | } 721 | } 722 | } 723 | 724 | impl Drop for DynAlloc<'_> { 725 | #[inline] 726 | fn drop(&mut self) { 727 | unsafe { (self.vtable.drop)(core::ptr::addr_of_mut!(self.alloc) as *mut ()) } 728 | } 729 | } 730 | 731 | impl Clone for DynAlloc<'_> { 732 | #[inline] 733 | fn clone(&self) -> Self { 734 | let mut alloc = UnsafeCell::new(MaybeUninit::uninit()); 735 | unsafe { 736 | self.vtable.clone.unwrap()( 737 | core::ptr::addr_of_mut!(alloc) as *mut (), 738 | core::ptr::addr_of!(self.alloc) as *const (), 739 | ); 740 | } 741 | 742 | Self { 743 | alloc, 744 | vtable: self.vtable, 745 | __marker: PhantomData, 746 | } 747 | } 748 | } 749 | 750 | impl<'a> DynAlloc<'a> { 751 | #[inline] 752 | pub fn try_new_unclone(alloc: A) -> Result { 753 | if core::mem::size_of::() <= core::mem::size_of::<*const ()>() 754 | && core::mem::align_of::() <= core::mem::align_of::<*const ()>() 755 | { 756 | trait AllocUnclone: Allocator + Send { 757 | const VTABLE: &'static VTable = &unsafe { 758 | VTable { 759 | allocate: core::mem::transmute(Self::allocate as fn(&Self, _) -> _), 760 | allocate_zeroed: core::mem::transmute( 761 | Self::allocate_zeroed as fn(&Self, _) -> _, 762 | ), 763 | deallocate: core::mem::transmute( 764 | Self::deallocate as unsafe fn(&Self, _, _) -> _, 765 | ), 766 | grow: core::mem::transmute(Self::grow as unsafe fn(&Self, _, _, _) -> _), 767 | grow_zeroed: core::mem::transmute( 768 | Self::grow_zeroed as unsafe fn(&Self, _, _, _) -> _, 769 | ), 770 | shrink: core::mem::transmute( 771 | Self::shrink as unsafe fn(&Self, _, _, _) -> _, 772 | ), 773 | 774 | clone: None, 775 | drop: core::mem::transmute( 776 | core::ptr::drop_in_place:: as unsafe fn(_) -> _, 777 | ), 778 | } 779 | }; 780 | } 781 | impl AllocUnclone for A {} 782 | 783 | Ok(Self { 784 | alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) }, 785 | vtable: ::VTABLE, 786 | __marker: PhantomData, 787 | }) 788 | } else { 789 | Err(alloc) 790 | } 791 | } 792 | 793 | #[inline] 794 | pub fn try_new_clone(alloc: A) -> Result { 795 | if core::mem::size_of::() <= core::mem::size_of::<*const ()>() 796 | && core::mem::align_of::() <= core::mem::align_of::<*const ()>() 797 | { 798 | trait AllocClone: Allocator + Send + Clone { 799 | const VTABLE: &'static VTable = &unsafe { 800 | VTable { 801 | allocate: core::mem::transmute(Self::allocate as fn(_, _) -> _), 802 | allocate_zeroed: core::mem::transmute( 803 | Self::allocate_zeroed as fn(_, _) -> _, 804 | ), 805 | deallocate: core::mem::transmute( 806 | Self::deallocate as unsafe fn(_, _, _) -> _, 807 | ), 808 | grow: core::mem::transmute(Self::grow as unsafe fn(_, _, _, _) -> _), 809 | grow_zeroed: core::mem::transmute( 810 | Self::grow_zeroed as unsafe fn(_, _, _, _) -> _, 811 | ), 812 | shrink: core::mem::transmute(Self::shrink as unsafe fn(_, _, _, _) -> _), 813 | 814 | clone: Some(|dst: *mut (), src: *const ()| { 815 | (dst as *mut Self).write((*(src as *const Self)).clone()) 816 | }), 817 | drop: core::mem::transmute( 818 | core::ptr::drop_in_place:: as unsafe fn(_) -> _, 819 | ), 820 | } 821 | }; 822 | } 823 | impl AllocClone for A {} 824 | 825 | Ok(Self { 826 | alloc: unsafe { core::mem::transmute_copy(&core::mem::ManuallyDrop::new(alloc)) }, 827 | vtable: ::VTABLE, 828 | __marker: PhantomData, 829 | }) 830 | } else { 831 | Err(alloc) 832 | } 833 | } 834 | 835 | #[inline] 836 | pub fn from_ref(alloc: &'a A) -> Self { 837 | match Self::try_new_clone(alloc) { 838 | Ok(me) => me, 839 | Err(_) => unreachable!(), 840 | } 841 | } 842 | 843 | #[inline] 844 | pub fn from_mut(alloc: &'a mut A) -> Self { 845 | match Self::try_new_unclone(alloc) { 846 | Ok(me) => me, 847 | Err(_) => unreachable!(), 848 | } 849 | } 850 | 851 | #[inline] 852 | pub fn by_mut(&mut self) -> DynAlloc<'_> { 853 | DynAlloc::from_mut(self) 854 | } 855 | 856 | #[inline] 857 | pub fn cloneable(&self) -> bool { 858 | self.vtable.clone.is_some() 859 | } 860 | } 861 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![cfg_attr(docsrs, feature(doc_cfg))] 3 | #![deny(elided_lifetimes_in_paths)] 4 | #![allow(clippy::missing_transmute_annotations, clippy::type_complexity)] 5 | 6 | //! Stack that allows users to allocate dynamically sized arrays. 7 | //! 8 | //! The stack wraps a buffer of bytes that it uses as a workspace. 9 | //! Allocating an array takes a chunk of memory from the stack, which can be reused once the array 10 | //! is dropped. 11 | //! 12 | //! # Examples: 13 | //! ``` 14 | //! use core::mem::MaybeUninit; 15 | //! use dyn_stack::{MemStack, StackReq}; 16 | //! 17 | //! // We allocate enough storage for 3 `i32` and 4 `u8`. 18 | //! let mut buf = [MaybeUninit::uninit(); 19 | //! StackReq::new::(3) 20 | //! .and(StackReq::new::(4)) 21 | //! .unaligned_bytes_required()]; 22 | //! let stack = MemStack::new(&mut buf); 23 | //! 24 | //! { 25 | //! // We can have nested allocations. 26 | //! // 3×`i32` 27 | //! let (array_i32, substack) = stack.make_with::(3, |i| i as i32); 28 | //! // and 4×`u8` 29 | //! let (mut array_u8, _) = substack.make_with::(4, |_| 0); 30 | //! 31 | //! // We can read from the arrays, 32 | //! assert_eq!(array_i32[0], 0); 33 | //! assert_eq!(array_i32[1], 1); 34 | //! assert_eq!(array_i32[2], 2); 35 | //! 36 | //! // and write to them. 37 | //! array_u8[0] = 1; 38 | //! 39 | //! assert_eq!(array_u8[0], 1); 40 | //! assert_eq!(array_u8[1], 0); 41 | //! assert_eq!(array_u8[2], 0); 42 | //! assert_eq!(array_u8[3], 0); 43 | //! } 44 | //! 45 | //! // We can also have disjoint allocations. 46 | //! { 47 | //! // 3×`i32` 48 | //! let (mut array_i32, _) = stack.make_with::(3, |i| i as i32); 49 | //! assert_eq!(array_i32[0], 0); 50 | //! assert_eq!(array_i32[1], 1); 51 | //! assert_eq!(array_i32[2], 2); 52 | //! } 53 | //! 54 | //! { 55 | //! // or 4×`u8` 56 | //! let (mut array_u8, _) = stack.make_with::(4, |i| i as i32 + 3); 57 | //! assert_eq!(array_u8[0], 3); 58 | //! assert_eq!(array_u8[1], 4); 59 | //! assert_eq!(array_u8[2], 5); 60 | //! assert_eq!(array_u8[3], 6); 61 | //! } 62 | //! ``` 63 | 64 | #[cfg(feature = "std")] 65 | extern crate std; 66 | 67 | #[cfg(feature = "std")] 68 | pub use std::error::Error; 69 | 70 | #[cfg(all(feature = "core-error", not(feature = "std")))] 71 | pub use core::error::Error; 72 | 73 | pub mod alloc; 74 | 75 | pub mod mem; 76 | 77 | pub type DynStack = MemStack; 78 | 79 | use bytemuck::Pod; 80 | 81 | #[cfg(feature = "alloc")] 82 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 83 | pub use mem::MemBuffer; 84 | #[cfg(feature = "alloc")] 85 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 86 | pub use mem::PodBuffer; 87 | 88 | mod stack_req; 89 | pub use stack_req::StackReq; 90 | 91 | use core::fmt; 92 | use core::fmt::Debug; 93 | use core::marker::PhantomData; 94 | use core::mem::MaybeUninit; 95 | use core::ops::Deref; 96 | use core::ops::DerefMut; 97 | use core::ptr::NonNull; 98 | use core::slice; 99 | 100 | /// Stack wrapper around a buffer of uninitialized bytes. 101 | #[repr(transparent)] 102 | pub struct MemStack { 103 | buffer: [MaybeUninit], 104 | } 105 | /// Stack wrapper around a buffer of bytes. 106 | #[repr(transparent)] 107 | pub struct PodStack { 108 | buffer: [u8], 109 | } 110 | 111 | /// Owns an unsized array of data, allocated from some stack. 112 | pub struct DynArray<'a, T> { 113 | ptr: NonNull, 114 | len: usize, 115 | __marker: PhantomData<(&'a T, T)>, 116 | } 117 | 118 | impl DynArray<'_, T> { 119 | #[inline] 120 | #[doc(hidden)] 121 | pub fn into_raw_parts(self) -> (*mut T, usize) { 122 | let this = core::mem::ManuallyDrop::new(self); 123 | (this.ptr.as_ptr(), this.len) 124 | } 125 | 126 | #[inline] 127 | #[doc(hidden)] 128 | pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self { 129 | Self { 130 | ptr: NonNull::new_unchecked(ptr), 131 | len, 132 | __marker: PhantomData, 133 | } 134 | } 135 | } 136 | 137 | /// Owns an unsized array of data, allocated from some stack. 138 | pub struct UnpodStack<'a> { 139 | ptr: NonNull, 140 | len: usize, 141 | __marker: PhantomData<&'a ()>, 142 | } 143 | 144 | impl Debug for DynArray<'_, T> { 145 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 146 | (**self).fmt(fmt) 147 | } 148 | } 149 | 150 | unsafe impl Send for DynArray<'_, T> where T: Send {} 151 | unsafe impl Sync for DynArray<'_, T> where T: Sync {} 152 | 153 | unsafe impl Send for UnpodStack<'_> {} 154 | unsafe impl Sync for UnpodStack<'_> {} 155 | 156 | impl Drop for DynArray<'_, T> { 157 | #[inline] 158 | fn drop(&mut self) { 159 | unsafe { 160 | core::ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut( 161 | self.ptr.as_ptr(), 162 | self.len, 163 | )) 164 | }; 165 | } 166 | } 167 | 168 | macro_rules! if_cfg { 169 | (if $cfg: meta $if_true: block else $if_false: block $(,)?) => { 170 | #[cfg($cfg)] 171 | { 172 | $if_true 173 | } 174 | #[cfg(not($cfg))] 175 | { 176 | $if_false 177 | } 178 | }; 179 | } 180 | 181 | /// fool the compiler into thinking we init the data 182 | /// 183 | /// # Safety 184 | /// `[ptr, ptr + len)` must have been fully initialized at some point before this is called 185 | #[inline(always)] 186 | unsafe fn launder(ptr: *mut u8, len: usize) { 187 | unsafe { 188 | if_cfg!(if all( 189 | not(debug_assertions), 190 | not(miri), 191 | any( 192 | target_arch = "x86", 193 | target_arch = "x86_64", 194 | target_arch = "arm", 195 | target_arch = "aarch64", 196 | target_arch = "loongarch64", 197 | target_arch = "riscv32", 198 | target_arch = "riscv64", 199 | ) 200 | ) { 201 | _ = len; 202 | core::arch::asm! { "/* {0} */", in(reg) ptr, options(nostack) } 203 | } else { 204 | const ARBITRARY_BYTE: u8 = 0xCD; 205 | core::ptr::write_bytes(ptr, ARBITRARY_BYTE, len) 206 | }); 207 | } 208 | } 209 | 210 | impl Drop for UnpodStack<'_> { 211 | #[inline] 212 | fn drop(&mut self) { 213 | unsafe { launder(self.ptr.as_ptr(), self.len) }; 214 | } 215 | } 216 | 217 | impl Deref for DynArray<'_, T> { 218 | type Target = [T]; 219 | 220 | #[inline] 221 | fn deref(&self) -> &'_ Self::Target { 222 | unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } 223 | } 224 | } 225 | 226 | impl DerefMut for DynArray<'_, T> { 227 | #[inline] 228 | fn deref_mut(&mut self) -> &mut Self::Target { 229 | unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } 230 | } 231 | } 232 | 233 | impl AsRef<[T]> for DynArray<'_, T> { 234 | #[inline] 235 | fn as_ref(&self) -> &'_ [T] { 236 | unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } 237 | } 238 | } 239 | 240 | impl AsMut<[T]> for DynArray<'_, T> { 241 | #[inline] 242 | fn as_mut(&mut self) -> &'_ mut [T] { 243 | unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } 244 | } 245 | } 246 | 247 | impl Deref for UnpodStack<'_> { 248 | type Target = MemStack; 249 | 250 | #[inline] 251 | fn deref(&self) -> &'_ Self::Target { 252 | unsafe { 253 | &*(core::ptr::slice_from_raw_parts(self.ptr.as_ptr(), self.len) as *const MemStack) 254 | } 255 | } 256 | } 257 | 258 | impl DerefMut for UnpodStack<'_> { 259 | #[inline] 260 | fn deref_mut(&mut self) -> &mut Self::Target { 261 | unsafe { 262 | &mut *(core::ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), self.len) 263 | as *mut MemStack) 264 | } 265 | } 266 | } 267 | 268 | #[inline] 269 | unsafe fn transmute_slice(slice: &mut [MaybeUninit], size: usize) -> &mut [T] { 270 | slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut T, size) 271 | } 272 | #[inline] 273 | unsafe fn transmute_pod_slice(slice: &mut [u8], size: usize) -> &mut [T] { 274 | slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut T, size) 275 | } 276 | 277 | struct DropGuard { 278 | ptr: *mut T, 279 | len: usize, 280 | } 281 | 282 | impl Drop for DropGuard { 283 | #[inline] 284 | fn drop(&mut self) { 285 | unsafe { 286 | core::ptr::drop_in_place(core::ptr::slice_from_raw_parts_mut(self.ptr, self.len)) 287 | }; 288 | } 289 | } 290 | 291 | #[inline] 292 | fn init_array_with(mut f: impl FnMut(usize) -> T, array: &mut [MaybeUninit]) -> &mut [T] { 293 | let len = array.len(); 294 | let ptr = array.as_mut_ptr() as *mut T; 295 | 296 | let mut guard = DropGuard { ptr, len: 0 }; 297 | 298 | for i in 0..len { 299 | guard.len = i; 300 | unsafe { ptr.add(i).write(f(i)) }; 301 | } 302 | core::mem::forget(guard); 303 | 304 | unsafe { slice::from_raw_parts_mut(ptr, len) } 305 | } 306 | 307 | #[inline] 308 | fn init_pod_array_with(mut f: impl FnMut(usize) -> T, array: &mut [T]) -> &mut [T] { 309 | for (i, x) in array.iter_mut().enumerate() { 310 | *x = f(i); 311 | } 312 | array 313 | } 314 | 315 | #[inline] 316 | unsafe fn init_array_with_iter>( 317 | iter: I, 318 | ptr: &mut [MaybeUninit], 319 | ) -> usize { 320 | let max_len = ptr.len(); 321 | let ptr = ptr.as_mut_ptr(); 322 | let mut guard = DropGuard { ptr, len: 0 }; 323 | 324 | iter.take(max_len).enumerate().for_each(|(i, item)| { 325 | *ptr.add(i) = MaybeUninit::new(item); 326 | guard.len += 1; 327 | }); 328 | 329 | let len = guard.len; 330 | core::mem::forget(guard); 331 | 332 | len 333 | } 334 | 335 | #[inline] 336 | fn init_pod_array_with_iter>(iter: I, ptr: &mut [T]) -> usize { 337 | let mut len = 0; 338 | iter.zip(ptr).for_each(|(item, dst)| { 339 | *dst = item; 340 | len += 1; 341 | }); 342 | len 343 | } 344 | 345 | #[track_caller] 346 | #[inline] 347 | fn check_alignment(align: usize, alignof_val: usize, type_name: &'static str) { 348 | assert!( 349 | (align & (align.wrapping_sub(1))) == 0, 350 | r#" 351 | requested alignment is not a power of two: 352 | - requested alignment: {} 353 | "#, 354 | align 355 | ); 356 | assert!( 357 | alignof_val <= align, 358 | r#" 359 | requested alignment is less than the minimum valid alignment for `{}`: 360 | - requested alignment: {} 361 | - minimum alignment: {} 362 | "#, 363 | type_name, 364 | align, 365 | alignof_val, 366 | ); 367 | } 368 | 369 | #[track_caller] 370 | #[inline] 371 | fn check_enough_space_for_align_offset(len: usize, align: usize, align_offset: usize) { 372 | assert!( 373 | len >= align_offset, 374 | r#" 375 | buffer is not large enough to accomodate the requested alignment 376 | - buffer length: {} 377 | - requested alignment: {} 378 | - byte offset for alignment: {} 379 | "#, 380 | len, 381 | align, 382 | align_offset, 383 | ); 384 | } 385 | 386 | #[track_caller] 387 | #[inline] 388 | fn check_enough_space_for_array( 389 | remaining_len: usize, 390 | sizeof_val: usize, 391 | array_len: usize, 392 | type_name: &'static str, 393 | ) { 394 | if sizeof_val == 0 { 395 | return; 396 | } 397 | assert!( 398 | remaining_len / sizeof_val >= array_len, 399 | r#" 400 | buffer is not large enough to allocate an array of type `{}` of the requested length: 401 | - remaining buffer length (after adjusting for alignment): {}, 402 | - requested array length: {} ({} bytes), 403 | "#, 404 | type_name, 405 | remaining_len, 406 | array_len, 407 | array_len * sizeof_val, 408 | ); 409 | } 410 | 411 | #[repr(transparent)] 412 | pub struct Bump<'stack> { 413 | ptr: core::cell::UnsafeCell<&'stack mut MemStack>, 414 | } 415 | 416 | unsafe impl alloc::Allocator for Bump<'_> { 417 | fn allocate(&self, layout: core::alloc::Layout) -> Result, alloc::AllocError> { 418 | let ptr = unsafe { &mut *self.ptr.get() }; 419 | let old = core::mem::replace(ptr, MemStack::new(&mut [])); 420 | 421 | if old.can_hold(StackReq::new_aligned::(layout.size(), layout.align())) { 422 | let (alloc, new) = old.make_aligned_uninit::(layout.size(), layout.align()); 423 | *ptr = new; 424 | 425 | let len = alloc.len(); 426 | let ptr = alloc.as_mut_ptr() as *mut u8; 427 | Ok(unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr, len)) }) 428 | } else { 429 | Err(alloc::AllocError) 430 | } 431 | } 432 | 433 | #[inline] 434 | unsafe fn deallocate(&self, ptr: NonNull, layout: core::alloc::Layout) { 435 | let _ = (ptr, layout); 436 | } 437 | } 438 | 439 | impl MemStack { 440 | /// Returns a new [`MemStack`] from the provided memory buffer. 441 | #[inline] 442 | pub fn new(buffer: &mut [MaybeUninit]) -> &mut Self { 443 | unsafe { &mut *(buffer as *mut [MaybeUninit] as *mut Self) } 444 | } 445 | 446 | /// Returns a new [`MemStack`] from the provided memory buffer. 447 | #[inline] 448 | pub fn new_any(buffer: &mut [MaybeUninit]) -> &mut Self { 449 | let len = core::mem::size_of_val(buffer); 450 | Self::new(unsafe { slice::from_raw_parts_mut(buffer.as_mut_ptr() as *mut _, len) }) 451 | } 452 | 453 | /// Returns `true` if the stack can hold an allocation with the given size and alignment 454 | /// requirements. 455 | #[inline] 456 | #[must_use] 457 | pub fn can_hold(&self, alloc_req: StackReq) -> bool { 458 | let align = alloc_req.align_bytes(); 459 | let size = alloc_req.size_bytes(); 460 | let align_offset = self.buffer.as_ptr().align_offset(align); 461 | let self_size = self.buffer.len(); 462 | (self_size >= align_offset) && (self_size - align_offset >= size) 463 | } 464 | 465 | /// Returns the number of bytes that this stack can hold. 466 | #[inline] 467 | pub fn len_bytes(&self) -> usize { 468 | self.buffer.len() 469 | } 470 | 471 | /// Returns a pointer to the (possibly uninitialized) stack memory. 472 | #[inline] 473 | pub fn as_ptr(&self) -> *const u8 { 474 | self.buffer.as_ptr() as _ 475 | } 476 | 477 | #[track_caller] 478 | #[inline] 479 | fn split_buffer<'out>( 480 | buffer: &'out mut [MaybeUninit], 481 | size: usize, 482 | align: usize, 483 | sizeof_val: usize, 484 | alignof_val: usize, 485 | type_name: &'static str, 486 | ) -> (&'out mut [MaybeUninit], &'out mut [MaybeUninit]) { 487 | let len = buffer.len(); 488 | let align_offset = buffer.as_mut_ptr().align_offset(align); 489 | 490 | check_alignment(align, alignof_val, type_name); 491 | check_enough_space_for_align_offset(len, align, align_offset); 492 | check_enough_space_for_array(len - align_offset, sizeof_val, size, type_name); 493 | 494 | let buffer = unsafe { buffer.get_unchecked_mut(align_offset..) }; 495 | let len = len - align_offset; 496 | 497 | let begin = buffer.as_mut_ptr(); 498 | let begin_len = size * sizeof_val; 499 | let mid = unsafe { begin.add(begin_len) }; 500 | let mid_len = len - begin_len; 501 | unsafe { 502 | ( 503 | slice::from_raw_parts_mut(begin, begin_len), 504 | slice::from_raw_parts_mut(mid, mid_len), 505 | ) 506 | } 507 | } 508 | 509 | /// Returns a new aligned and uninitialized [`DynArray`] and a stack over the remainder of the 510 | /// buffer. 511 | /// 512 | /// # Panics 513 | /// 514 | /// Panics if the stack isn't large enough to allocate the array. 515 | #[track_caller] 516 | #[inline] 517 | #[must_use] 518 | pub fn make_aligned_uninit( 519 | &mut self, 520 | size: usize, 521 | align: usize, 522 | ) -> (&mut [MaybeUninit], &mut Self) { 523 | let (taken, remaining) = Self::split_buffer( 524 | &mut self.buffer, 525 | size, 526 | align, 527 | core::mem::size_of::(), 528 | core::mem::align_of::(), 529 | core::any::type_name::(), 530 | ); 531 | 532 | ( 533 | unsafe { transmute_slice::>(taken, size) }, 534 | MemStack::new(remaining), 535 | ) 536 | } 537 | 538 | /// Returns a new aligned [`DynArray`], initialized with the provided function, and a stack 539 | /// over the remainder of the buffer. 540 | /// 541 | /// # Panics 542 | /// 543 | /// Panics if the stack isn't large enough to allocate the array, or if the provided function 544 | /// panics. 545 | #[track_caller] 546 | #[inline] 547 | #[must_use] 548 | pub fn make_aligned_with( 549 | &mut self, 550 | size: usize, 551 | align: usize, 552 | f: impl FnMut(usize) -> T, 553 | ) -> (DynArray<'_, T>, &mut Self) { 554 | let (taken, remaining) = self.make_aligned_uninit(size, align); 555 | let (len, ptr) = { 556 | let taken = init_array_with(f, taken); 557 | (taken.len(), taken.as_mut_ptr()) 558 | }; 559 | ( 560 | DynArray { 561 | ptr: unsafe { NonNull::::new_unchecked(ptr) }, 562 | len, 563 | __marker: PhantomData, 564 | }, 565 | remaining, 566 | ) 567 | } 568 | 569 | #[track_caller] 570 | #[inline] 571 | #[must_use] 572 | #[doc(hidden)] 573 | pub unsafe fn make_raw(&mut self, size: usize) -> (&mut [T], &mut Self) { 574 | self.make_aligned_raw(size, core::mem::align_of::()) 575 | } 576 | 577 | #[track_caller] 578 | #[inline] 579 | #[must_use] 580 | #[doc(hidden)] 581 | pub unsafe fn make_aligned_raw( 582 | &mut self, 583 | size: usize, 584 | align: usize, 585 | ) -> (&mut [T], &mut Self) { 586 | let (mem, stack) = self.make_aligned_uninit::(size, align); 587 | unsafe { (&mut *(mem as *mut [MaybeUninit] as *mut [T]), stack) } 588 | } 589 | 590 | /// Returns a new uninitialized [`DynArray`] and a stack over the remainder of the buffer. 591 | /// 592 | /// # Panics 593 | /// 594 | /// Panics if the stack isn't large enough to allocate the array. 595 | #[track_caller] 596 | #[inline] 597 | #[must_use] 598 | pub fn make_uninit(&mut self, size: usize) -> (&mut [MaybeUninit], &mut Self) { 599 | self.make_aligned_uninit(size, core::mem::align_of::()) 600 | } 601 | 602 | /// Returns a new [`DynArray`], initialized with the provided function, and a stack over the 603 | /// remainder of the buffer. 604 | /// 605 | /// # Panics 606 | /// 607 | /// Panics if the stack isn't large enough to allocate the array, or if the provided function 608 | /// panics. 609 | #[track_caller] 610 | #[inline] 611 | #[must_use] 612 | pub fn make_with( 613 | &mut self, 614 | size: usize, 615 | f: impl FnMut(usize) -> T, 616 | ) -> (DynArray<'_, T>, &mut Self) { 617 | self.make_aligned_with(size, core::mem::align_of::(), f) 618 | } 619 | 620 | /// Returns a new aligned [`DynArray`], initialized with the provided iterator, and a stack 621 | /// over the remainder of the buffer. 622 | /// If there isn't enough space for all the iterator items, then the returned array only 623 | /// contains the first elements that fit into the stack. 624 | /// 625 | /// # Panics 626 | /// 627 | /// Panics if the provided iterator panics. 628 | #[track_caller] 629 | #[inline] 630 | #[must_use] 631 | pub fn collect_aligned( 632 | &mut self, 633 | align: usize, 634 | iter: impl IntoIterator, 635 | ) -> (DynArray<'_, I>, &mut Self) { 636 | self.collect_aligned_impl(align, iter.into_iter()) 637 | } 638 | 639 | /// Returns a new [`DynArray`], initialized with the provided iterator, and a stack over the 640 | /// remainder of the buffer. 641 | /// If there isn't enough space for all the iterator items, then the returned array only 642 | /// contains the first elements that fit into the stack. 643 | /// 644 | /// # Panics 645 | /// 646 | /// Panics if the provided iterator panics. 647 | #[track_caller] 648 | #[inline] 649 | #[must_use] 650 | pub fn collect( 651 | &mut self, 652 | iter: impl IntoIterator, 653 | ) -> (DynArray<'_, I>, &mut Self) { 654 | self.collect_aligned_impl(core::mem::align_of::(), iter.into_iter()) 655 | } 656 | 657 | #[track_caller] 658 | #[inline] 659 | fn collect_aligned_impl( 660 | &mut self, 661 | align: usize, 662 | iter: I, 663 | ) -> (DynArray<'_, I::Item>, &mut Self) { 664 | let sizeof_val = core::mem::size_of::(); 665 | let alignof_val = core::mem::align_of::(); 666 | let align_offset = self.buffer.as_mut_ptr().align_offset(align); 667 | 668 | check_alignment(align, alignof_val, core::any::type_name::()); 669 | check_enough_space_for_align_offset(self.buffer.len(), align, align_offset); 670 | 671 | let buffer = unsafe { self.buffer.get_unchecked_mut(align_offset..) }; 672 | let buffer_len = buffer.len(); 673 | let buffer_ptr = buffer.as_mut_ptr(); 674 | unsafe { 675 | let len = init_array_with_iter( 676 | iter, 677 | slice::from_raw_parts_mut( 678 | buffer_ptr as *mut MaybeUninit, 679 | if sizeof_val == 0 { 680 | usize::MAX 681 | } else { 682 | buffer_len / sizeof_val 683 | }, 684 | ), 685 | ); 686 | 687 | let remaining_slice = slice::from_raw_parts_mut( 688 | buffer_ptr.add(len * sizeof_val), 689 | buffer.len() - len * sizeof_val, 690 | ); 691 | ( 692 | DynArray { 693 | ptr: NonNull::new_unchecked(buffer_ptr as *mut I::Item), 694 | len, 695 | __marker: PhantomData, 696 | }, 697 | Self::new(remaining_slice), 698 | ) 699 | } 700 | } 701 | 702 | #[inline] 703 | pub fn bump<'bump, 'stack>(self: &'bump mut &'stack mut Self) -> &'bump mut Bump<'stack> { 704 | unsafe { &mut *(self as *mut &mut Self as *mut Bump<'stack>) } 705 | } 706 | } 707 | 708 | impl PodStack { 709 | /// Returns a new [`PodStack`] from the provided memory buffer. 710 | #[inline] 711 | pub fn new(buffer: &mut [u8]) -> &mut Self { 712 | unsafe { &mut *(buffer as *mut [u8] as *mut Self) } 713 | } 714 | 715 | /// Returns a new [`MemStack`] from the provided memory buffer. 716 | #[inline] 717 | pub fn new_any(buffer: &mut [T]) -> &mut Self { 718 | let len = core::mem::size_of_val(buffer); 719 | Self::new(unsafe { slice::from_raw_parts_mut(buffer.as_mut_ptr() as *mut _, len) }) 720 | } 721 | 722 | /// Returns `true` if the stack can hold an allocation with the given size and alignment 723 | /// requirements. 724 | #[inline] 725 | #[must_use] 726 | pub fn can_hold(&self, alloc_req: StackReq) -> bool { 727 | let align = alloc_req.align_bytes(); 728 | let size = alloc_req.size_bytes(); 729 | let align_offset = self.buffer.as_ptr().align_offset(align); 730 | let self_size = self.buffer.len(); 731 | (self_size >= align_offset) && (self_size - align_offset >= size) 732 | } 733 | 734 | /// Returns the number of bytes that this stack can hold. 735 | #[inline] 736 | pub fn len_bytes(&self) -> usize { 737 | self.buffer.len() 738 | } 739 | 740 | /// Returns a pointer to the stack memory. 741 | #[inline] 742 | pub fn as_ptr(&self) -> *const u8 { 743 | self.buffer.as_ptr() as _ 744 | } 745 | 746 | #[track_caller] 747 | #[inline] 748 | fn split_buffer<'out>( 749 | buffer: &'out mut [u8], 750 | size: usize, 751 | align: usize, 752 | sizeof_val: usize, 753 | alignof_val: usize, 754 | type_name: &'static str, 755 | ) -> (&'out mut [u8], &'out mut [u8]) { 756 | let len = buffer.len(); 757 | let align_offset = buffer.as_mut_ptr().align_offset(align); 758 | 759 | check_alignment(align, alignof_val, type_name); 760 | check_enough_space_for_align_offset(len, align, align_offset); 761 | check_enough_space_for_array(len - align_offset, sizeof_val, size, type_name); 762 | 763 | let buffer = unsafe { buffer.get_unchecked_mut(align_offset..) }; 764 | let len = len - align_offset; 765 | 766 | let begin = buffer.as_mut_ptr(); 767 | let begin_len = size * sizeof_val; 768 | let mid = unsafe { begin.add(begin_len) }; 769 | let mid_len = len - begin_len; 770 | unsafe { 771 | ( 772 | slice::from_raw_parts_mut(begin, begin_len), 773 | slice::from_raw_parts_mut(mid, mid_len), 774 | ) 775 | } 776 | } 777 | 778 | /// Returns a new aligned and uninitialized slice and a stack over the remainder of the 779 | /// buffer. 780 | /// 781 | /// # Panics 782 | /// 783 | /// Panics if the stack isn't large enough to allocate the array. 784 | #[track_caller] 785 | #[inline] 786 | #[must_use] 787 | pub fn make_aligned_raw(&mut self, size: usize, align: usize) -> (&mut [T], &mut Self) { 788 | let (taken, remaining) = Self::split_buffer( 789 | &mut self.buffer, 790 | size, 791 | align, 792 | core::mem::size_of::(), 793 | core::mem::align_of::(), 794 | core::any::type_name::(), 795 | ); 796 | 797 | let taken = unsafe { transmute_pod_slice::(taken, size) }; 798 | (taken, Self::new(remaining)) 799 | } 800 | 801 | /// Returns a new aligned and uninitialized slice and a stack over the remainder of the 802 | /// buffer. 803 | /// 804 | /// # Panics 805 | /// 806 | /// Panics if the stack isn't large enough to allocate the array. 807 | /// 808 | /// # Safety 809 | /// 810 | /// The return value must be dropped if any uninitialized values are written to the bytes by the time the borrow ends. 811 | pub unsafe fn make_aligned_unpod( 812 | &mut self, 813 | size: usize, 814 | align: usize, 815 | ) -> (UnpodStack<'_>, &mut Self) { 816 | let (taken, remaining) = Self::split_buffer(&mut self.buffer, size, align, 1, 1, "[Bytes]"); 817 | ( 818 | UnpodStack { 819 | ptr: NonNull::new_unchecked(taken.as_mut_ptr()), 820 | len: size, 821 | __marker: PhantomData, 822 | }, 823 | Self::new(remaining), 824 | ) 825 | } 826 | 827 | /// Returns a new aligned slice, initialized with the provided function, and a stack 828 | /// over the remainder of the buffer. 829 | /// 830 | /// # Panics 831 | /// 832 | /// Panics if the stack isn't large enough to allocate the array, or if the provided function 833 | /// panics. 834 | #[track_caller] 835 | #[inline] 836 | #[must_use] 837 | pub fn make_aligned_with( 838 | &mut self, 839 | size: usize, 840 | align: usize, 841 | f: impl FnMut(usize) -> T, 842 | ) -> (&mut [T], &mut Self) { 843 | let (taken, remaining) = self.make_aligned_raw(size, align); 844 | let taken = init_pod_array_with(f, taken); 845 | (taken, remaining) 846 | } 847 | 848 | /// Returns a new uninitialized slice and a stack over the remainder of the buffer. 849 | /// 850 | /// # Panics 851 | /// 852 | /// Panics if the stack isn't large enough to allocate the array. 853 | #[track_caller] 854 | #[inline] 855 | #[must_use] 856 | pub fn make_raw(&mut self, size: usize) -> (&mut [T], &mut Self) { 857 | self.make_aligned_raw(size, core::mem::align_of::()) 858 | } 859 | 860 | /// Returns a new slice, initialized with the provided function, and a stack over the 861 | /// remainder of the buffer. 862 | /// 863 | /// # Panics 864 | /// 865 | /// Panics if the stack isn't large enough to allocate the array, or if the provided function 866 | /// panics. 867 | #[track_caller] 868 | #[inline] 869 | #[must_use] 870 | pub fn make_with( 871 | &mut self, 872 | size: usize, 873 | f: impl FnMut(usize) -> T, 874 | ) -> (&mut [T], &mut Self) { 875 | self.make_aligned_with(size, core::mem::align_of::(), f) 876 | } 877 | 878 | /// Returns a new aligned slice, initialized with the provided iterator, and a stack 879 | /// over the remainder of the buffer. 880 | /// If there isn't enough space for all the iterator items, then the returned array only 881 | /// contains the first elements that fit into the stack. 882 | /// 883 | /// # Panics 884 | /// 885 | /// Panics if the provided iterator panics. 886 | #[track_caller] 887 | #[inline] 888 | #[must_use] 889 | pub fn collect_aligned( 890 | &mut self, 891 | align: usize, 892 | iter: impl IntoIterator, 893 | ) -> (&mut [I], &mut Self) { 894 | self.collect_aligned_impl(align, iter.into_iter()) 895 | } 896 | 897 | /// Returns a new slice, initialized with the provided iterator, and a stack over the 898 | /// remainder of the buffer. 899 | /// If there isn't enough space for all the iterator items, then the returned array only 900 | /// contains the first elements that fit into the stack. 901 | /// 902 | /// # Panics 903 | /// 904 | /// Panics if the provided iterator panics. 905 | #[track_caller] 906 | #[inline] 907 | #[must_use] 908 | pub fn collect(&mut self, iter: impl IntoIterator) -> (&mut [I], &mut Self) { 909 | self.collect_aligned_impl(core::mem::align_of::(), iter.into_iter()) 910 | } 911 | 912 | #[track_caller] 913 | #[inline] 914 | fn collect_aligned_impl( 915 | &mut self, 916 | align: usize, 917 | iter: I, 918 | ) -> (&mut [I::Item], &mut Self) 919 | where 920 | I::Item: Pod, 921 | { 922 | let sizeof_val = core::mem::size_of::(); 923 | let alignof_val = core::mem::align_of::(); 924 | let align_offset = self.buffer.as_mut_ptr().align_offset(align); 925 | 926 | check_alignment(align, alignof_val, core::any::type_name::()); 927 | check_enough_space_for_align_offset(self.buffer.len(), align, align_offset); 928 | 929 | let buffer = unsafe { self.buffer.get_unchecked_mut(align_offset..) }; 930 | let buffer_len = buffer.len(); 931 | let buffer_ptr = buffer.as_mut_ptr(); 932 | unsafe { 933 | let len = init_pod_array_with_iter( 934 | iter, 935 | slice::from_raw_parts_mut( 936 | buffer_ptr as *mut I::Item, 937 | if sizeof_val == 0 { 938 | usize::MAX 939 | } else { 940 | buffer_len / sizeof_val 941 | }, 942 | ), 943 | ); 944 | 945 | let taken = slice::from_raw_parts_mut(buffer_ptr as *mut I::Item, len); 946 | let remaining_slice = slice::from_raw_parts_mut( 947 | buffer_ptr.add(len * sizeof_val), 948 | buffer_len - len * sizeof_val, 949 | ); 950 | (taken, Self::new(remaining_slice)) 951 | } 952 | } 953 | } 954 | 955 | #[cfg(all(test, feature = "alloc"))] 956 | mod dyn_stack_tests { 957 | use super::*; 958 | use alloc::Global; 959 | 960 | #[test] 961 | fn empty_in() { 962 | let mut buf = MemBuffer::new_in(StackReq::new::(0), Global); 963 | let stack = MemStack::new(&mut buf); 964 | let (_arr0, _stack) = stack.make_with::(0, |i| i as i32); 965 | } 966 | 967 | #[test] 968 | #[should_panic] 969 | fn empty_overflow_in() { 970 | let mut buf = MemBuffer::new_in(StackReq::new::(0), Global); 971 | let stack = MemStack::new(&mut buf); 972 | let (_arr0, _stack) = stack.make_with::(1, |i| i as i32); 973 | } 974 | 975 | #[test] 976 | fn empty_collect_in() { 977 | let mut buf = MemBuffer::new_in(StackReq::new::(0), Global); 978 | let stack = MemStack::new(&mut buf); 979 | let (_arr0, _stack) = stack.collect(0..0); 980 | } 981 | 982 | #[test] 983 | fn empty_collect_overflow_in() { 984 | let mut buf = MemBuffer::new_in(StackReq::new::(0), Global); 985 | let stack = MemStack::new(&mut buf); 986 | let (arr0, _stack) = stack.collect(0..1); 987 | assert!(arr0.is_empty()); 988 | } 989 | 990 | #[test] 991 | #[should_panic] 992 | fn overflow_in() { 993 | let mut buf = MemBuffer::new_in(StackReq::new::(1), Global); 994 | let stack = MemStack::new(&mut buf); 995 | let (_arr0, _stack) = stack.make_with::(2, |i| i as i32); 996 | } 997 | 998 | #[test] 999 | fn collect_overflow_in() { 1000 | let mut buf = MemBuffer::new_in(StackReq::new::(1), Global); 1001 | let stack = MemStack::new(&mut buf); 1002 | let (arr0, _stack) = stack.collect(1..3); 1003 | assert_eq!(arr0.len(), 1); 1004 | assert_eq!(arr0[0], 1) 1005 | } 1006 | 1007 | #[test] 1008 | fn empty() { 1009 | let mut buf = MemBuffer::new(StackReq::new::(0)); 1010 | let stack = MemStack::new(&mut buf); 1011 | let (_arr0, _stack) = stack.make_with::(0, |i| i as i32); 1012 | } 1013 | 1014 | #[test] 1015 | #[should_panic] 1016 | fn empty_overflow() { 1017 | let mut buf = MemBuffer::new(StackReq::new::(0)); 1018 | let stack = MemStack::new(&mut buf); 1019 | let (_arr0, _stack) = stack.make_with::(1, |i| i as i32); 1020 | } 1021 | 1022 | #[test] 1023 | fn empty_collect() { 1024 | let mut buf = MemBuffer::new(StackReq::new::(0)); 1025 | let stack = MemStack::new(&mut buf); 1026 | let (_arr0, _stack) = stack.collect(0..0); 1027 | } 1028 | 1029 | #[test] 1030 | fn empty_collect_overflow() { 1031 | let mut buf = MemBuffer::new(StackReq::new::(0)); 1032 | let stack = MemStack::new(&mut buf); 1033 | let (arr0, _stack) = stack.collect(0..1); 1034 | assert!(arr0.is_empty()); 1035 | } 1036 | 1037 | #[test] 1038 | #[should_panic] 1039 | fn overflow() { 1040 | let mut buf = MemBuffer::new(StackReq::new::(1)); 1041 | let stack = MemStack::new(&mut buf); 1042 | let (_arr0, _stack) = stack.make_with::(2, |i| i as i32); 1043 | } 1044 | 1045 | #[test] 1046 | fn collect_overflow() { 1047 | let mut buf = MemBuffer::new(StackReq::new::(1)); 1048 | let stack = MemStack::new(&mut buf); 1049 | let (arr0, _stack) = stack.collect(1..3); 1050 | assert_eq!(arr0.len(), 1); 1051 | assert_eq!(arr0[0], 1) 1052 | } 1053 | 1054 | #[test] 1055 | fn basic_nested() { 1056 | let mut buf = MemBuffer::new(StackReq::new::(6)); 1057 | 1058 | let stack = MemStack::new(&mut buf); 1059 | assert!(stack.can_hold(StackReq::new::(6))); 1060 | assert!(!stack.can_hold(StackReq::new::(7))); 1061 | 1062 | let (arr0, stack) = stack.make_with::(3, |i| i as i32); 1063 | assert_eq!(arr0[0], 0); 1064 | assert_eq!(arr0[1], 1); 1065 | assert_eq!(arr0[2], 2); 1066 | 1067 | let (arr1, _) = stack.make_with::(3, |i| i as i32 + 3); 1068 | 1069 | // first values are untouched 1070 | assert_eq!(arr0[0], 0); 1071 | assert_eq!(arr0[1], 1); 1072 | assert_eq!(arr0[2], 2); 1073 | 1074 | assert_eq!(arr1[0], 3); 1075 | assert_eq!(arr1[1], 4); 1076 | assert_eq!(arr1[2], 5); 1077 | } 1078 | 1079 | #[test] 1080 | fn basic_disjoint() { 1081 | let mut buf = MemBuffer::new(StackReq::new::(3)); 1082 | 1083 | let stack = MemStack::new(&mut buf); 1084 | 1085 | { 1086 | let (arr0, _) = stack.make_with::(3, |i| i as i32); 1087 | assert_eq!(arr0[0], 0); 1088 | assert_eq!(arr0[1], 1); 1089 | assert_eq!(arr0[2], 2); 1090 | } 1091 | { 1092 | let (arr1, _) = stack.make_with::(3, |i| i as i32 + 3); 1093 | 1094 | assert_eq!(arr1[0], 3); 1095 | assert_eq!(arr1[1], 4); 1096 | assert_eq!(arr1[2], 5); 1097 | } 1098 | } 1099 | 1100 | #[test] 1101 | fn basic_nested_collect() { 1102 | let mut buf = MemBuffer::new(StackReq::new::(6)); 1103 | let stack = MemStack::new(&mut buf); 1104 | 1105 | let (arr0, stack) = stack.collect(0..3_i32); 1106 | assert_eq!(arr0[0], 0); 1107 | assert_eq!(arr0[1], 1); 1108 | assert_eq!(arr0[2], 2); 1109 | 1110 | let (arr1, _) = stack.collect(3..6_i32); 1111 | 1112 | // first values are untouched 1113 | assert_eq!(arr0[0], 0); 1114 | assert_eq!(arr0[1], 1); 1115 | assert_eq!(arr0[2], 2); 1116 | 1117 | assert_eq!(arr1[0], 3); 1118 | assert_eq!(arr1[1], 4); 1119 | assert_eq!(arr1[2], 5); 1120 | } 1121 | 1122 | #[test] 1123 | fn basic_disjoint_collect() { 1124 | let mut buf = MemBuffer::new(StackReq::new::(3)); 1125 | 1126 | let stack = MemStack::new(&mut buf); 1127 | 1128 | { 1129 | let (arr0, _) = stack.collect(0..3_i32); 1130 | assert_eq!(arr0[0], 0); 1131 | assert_eq!(arr0[1], 1); 1132 | assert_eq!(arr0[2], 2); 1133 | } 1134 | { 1135 | let (arr1, _) = stack.collect(3..6_i32); 1136 | 1137 | assert_eq!(arr1[0], 3); 1138 | assert_eq!(arr1[1], 4); 1139 | assert_eq!(arr1[2], 5); 1140 | } 1141 | } 1142 | 1143 | #[test] 1144 | fn drop_nested() { 1145 | use core::sync::atomic::{AtomicI32, Ordering}; 1146 | static DROP_COUNT: AtomicI32 = AtomicI32::new(0); 1147 | 1148 | struct CountedDrop; 1149 | impl Drop for CountedDrop { 1150 | fn drop(&mut self) { 1151 | DROP_COUNT.fetch_add(1, Ordering::SeqCst); 1152 | } 1153 | } 1154 | 1155 | let mut buf = MemBuffer::new(StackReq::new::(6)); 1156 | let stack = MemStack::new(&mut buf); 1157 | 1158 | let stack = { 1159 | let (_arr, stack) = stack.make_with(3, |_| CountedDrop); 1160 | stack 1161 | }; 1162 | assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 3); 1163 | let _stack = { 1164 | let (_arr, stack) = stack.make_with(4, |_| CountedDrop); 1165 | stack 1166 | }; 1167 | assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 7); 1168 | } 1169 | 1170 | #[test] 1171 | fn drop_disjoint() { 1172 | use core::sync::atomic::{AtomicI32, Ordering}; 1173 | static DROP_COUNT: AtomicI32 = AtomicI32::new(0); 1174 | 1175 | struct CountedDrop; 1176 | impl Drop for CountedDrop { 1177 | fn drop(&mut self) { 1178 | DROP_COUNT.fetch_add(1, Ordering::SeqCst); 1179 | } 1180 | } 1181 | 1182 | let mut buf = MemBuffer::new(StackReq::new::(6)); 1183 | let stack = MemStack::new(&mut buf); 1184 | 1185 | { 1186 | let _ = stack.make_with(3, |_| CountedDrop); 1187 | assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 3); 1188 | } 1189 | 1190 | { 1191 | let _ = stack.make_with(4, |_| CountedDrop); 1192 | assert_eq!(DROP_COUNT.load(Ordering::SeqCst), 7); 1193 | } 1194 | } 1195 | } 1196 | 1197 | #[cfg(all(test, feature = "alloc"))] 1198 | mod pod_stack_tests { 1199 | use super::*; 1200 | 1201 | #[test] 1202 | fn empty() { 1203 | let mut buf = PodBuffer::new(StackReq::new::(0)); 1204 | let stack = PodStack::new(&mut buf); 1205 | let (_arr0, _stack) = stack.make_with::(0, |i| i as i32); 1206 | } 1207 | 1208 | #[test] 1209 | #[should_panic] 1210 | fn empty_overflow() { 1211 | let mut buf = PodBuffer::new(StackReq::new::(0)); 1212 | let stack = PodStack::new(&mut buf); 1213 | let (_arr0, _stack) = stack.make_with::(1, |i| i as i32); 1214 | } 1215 | 1216 | #[test] 1217 | fn empty_collect() { 1218 | let mut buf = PodBuffer::new(StackReq::new::(0)); 1219 | let stack = PodStack::new(&mut buf); 1220 | let (_arr0, _stack) = stack.collect(0..0); 1221 | } 1222 | 1223 | #[test] 1224 | fn empty_collect_overflow() { 1225 | let mut buf = PodBuffer::new(StackReq::new::(0)); 1226 | let stack = PodStack::new(&mut buf); 1227 | let (arr0, _stack) = stack.collect(0..1); 1228 | assert!(arr0.is_empty()); 1229 | } 1230 | 1231 | #[test] 1232 | #[should_panic] 1233 | fn overflow() { 1234 | let mut buf = PodBuffer::new(StackReq::new::(1)); 1235 | let stack = PodStack::new(&mut buf); 1236 | let (_arr0, _stack) = stack.make_with::(2, |i| i as i32); 1237 | } 1238 | 1239 | #[test] 1240 | fn collect_overflow() { 1241 | let mut buf = PodBuffer::new(StackReq::new::(1)); 1242 | let stack = PodStack::new(&mut buf); 1243 | let (arr0, _stack) = stack.collect(1..3); 1244 | assert_eq!(arr0.len(), 1); 1245 | assert_eq!(arr0[0], 1) 1246 | } 1247 | 1248 | #[test] 1249 | fn basic_nested() { 1250 | let mut buf = PodBuffer::new(StackReq::new::(6)); 1251 | 1252 | let stack = PodStack::new(&mut buf); 1253 | assert!(stack.can_hold(StackReq::new::(6))); 1254 | assert!(!stack.can_hold(StackReq::new::(7))); 1255 | 1256 | let (arr0, stack) = stack.make_with::(3, |i| i as i32); 1257 | assert_eq!(arr0[0], 0); 1258 | assert_eq!(arr0[1], 1); 1259 | assert_eq!(arr0[2], 2); 1260 | 1261 | let (arr1, _) = stack.make_with::(3, |i| i as i32 + 3); 1262 | 1263 | // first values are untouched 1264 | assert_eq!(arr0[0], 0); 1265 | assert_eq!(arr0[1], 1); 1266 | assert_eq!(arr0[2], 2); 1267 | 1268 | assert_eq!(arr1[0], 3); 1269 | assert_eq!(arr1[1], 4); 1270 | assert_eq!(arr1[2], 5); 1271 | } 1272 | 1273 | #[test] 1274 | fn basic_disjoint() { 1275 | let mut buf = PodBuffer::new(StackReq::new::(3)); 1276 | 1277 | let stack = PodStack::new(&mut buf); 1278 | 1279 | { 1280 | let (arr0, _) = stack.make_with::(3, |i| i as i32); 1281 | assert_eq!(arr0[0], 0); 1282 | assert_eq!(arr0[1], 1); 1283 | assert_eq!(arr0[2], 2); 1284 | } 1285 | { 1286 | let (arr1, _) = stack.make_with::(3, |i| i as i32 + 3); 1287 | 1288 | assert_eq!(arr1[0], 3); 1289 | assert_eq!(arr1[1], 4); 1290 | assert_eq!(arr1[2], 5); 1291 | } 1292 | } 1293 | 1294 | #[test] 1295 | fn basic_nested_collect() { 1296 | let mut buf = PodBuffer::new(StackReq::new::(6)); 1297 | let stack = PodStack::new(&mut buf); 1298 | 1299 | let (arr0, stack) = stack.collect(0..3_i32); 1300 | assert_eq!(arr0[0], 0); 1301 | assert_eq!(arr0[1], 1); 1302 | assert_eq!(arr0[2], 2); 1303 | 1304 | let (arr1, _) = stack.collect(3..6_i32); 1305 | 1306 | // first values are untouched 1307 | assert_eq!(arr0[0], 0); 1308 | assert_eq!(arr0[1], 1); 1309 | assert_eq!(arr0[2], 2); 1310 | 1311 | assert_eq!(arr1[0], 3); 1312 | assert_eq!(arr1[1], 4); 1313 | assert_eq!(arr1[2], 5); 1314 | } 1315 | 1316 | #[test] 1317 | fn basic_disjoint_collect() { 1318 | let mut buf = PodBuffer::new(StackReq::new::(3)); 1319 | 1320 | let stack = PodStack::new(&mut buf); 1321 | 1322 | { 1323 | let (arr0, _) = stack.collect(0..3_i32); 1324 | assert_eq!(arr0[0], 0); 1325 | assert_eq!(arr0[1], 1); 1326 | assert_eq!(arr0[2], 2); 1327 | } 1328 | { 1329 | let (arr1, _) = stack.collect(3..6_i32); 1330 | 1331 | assert_eq!(arr1[0], 3); 1332 | assert_eq!(arr1[1], 4); 1333 | assert_eq!(arr1[2], 5); 1334 | } 1335 | } 1336 | 1337 | #[test] 1338 | fn make_raw() { 1339 | let mut buf = PodBuffer::new(StackReq::new::(3)); 1340 | buf.fill(0); 1341 | 1342 | let stack = PodStack::new(&mut buf); 1343 | 1344 | { 1345 | let (arr0, _) = stack.make_raw::(3); 1346 | assert_eq!(arr0[0], 0); 1347 | assert_eq!(arr0[1], 0); 1348 | assert_eq!(arr0[2], 0); 1349 | } 1350 | { 1351 | let (arr0, _) = stack.collect(0..3_i32); 1352 | assert_eq!(arr0[0], 0); 1353 | assert_eq!(arr0[1], 1); 1354 | assert_eq!(arr0[2], 2); 1355 | } 1356 | { 1357 | let (arr1, _) = stack.make_raw::(3); 1358 | 1359 | assert_eq!(arr1[0], 0); 1360 | assert_eq!(arr1[1], 1); 1361 | assert_eq!(arr1[2], 2); 1362 | } 1363 | } 1364 | 1365 | #[test] 1366 | fn make_unpod() { 1367 | let mut buf = PodBuffer::new(StackReq::new::(3)); 1368 | let stack = PodStack::new(&mut buf); 1369 | 1370 | { 1371 | let (mut stack, _) = unsafe { stack.make_aligned_unpod(12, 4) }; 1372 | 1373 | let stack = &mut *stack; 1374 | let (mem, _) = stack.make_uninit::(3); 1375 | mem.fill(MaybeUninit::uninit()); 1376 | 1377 | let mut stack = stack; 1378 | let mut buf = MemBuffer::new_in( 1379 | StackReq::new::(3), 1380 | alloc::DynAlloc::from_mut(stack.bump()), 1381 | ); 1382 | let stack = MemStack::new(&mut buf); 1383 | let _ = stack.make_uninit::(3); 1384 | } 1385 | 1386 | let (mem, _) = stack.make_raw::(3); 1387 | for x in mem { 1388 | *x = *x; 1389 | } 1390 | } 1391 | } 1392 | -------------------------------------------------------------------------------- /src/mem.rs: -------------------------------------------------------------------------------- 1 | use crate::stack_req::StackReq; 2 | use alloc::alloc::handle_alloc_error; 3 | use core::alloc::Layout; 4 | use core::mem::ManuallyDrop; 5 | use core::mem::MaybeUninit; 6 | use core::ptr::NonNull; 7 | 8 | use crate::alloc::*; 9 | extern crate alloc; 10 | 11 | impl core::fmt::Display for AllocError { 12 | fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { 13 | fmt.write_str("memory allocation failed") 14 | } 15 | } 16 | 17 | #[cfg(any(feature = "std", feature = "core-error"))] 18 | impl crate::Error for AllocError {} 19 | 20 | use super::*; 21 | 22 | #[inline] 23 | fn to_layout(req: StackReq) -> Result { 24 | req.layout().ok().ok_or(AllocError) 25 | } 26 | 27 | #[cfg(feature = "alloc")] 28 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 29 | impl MemBuffer { 30 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 31 | /// global allocator. 32 | /// 33 | /// Calls [`alloc::alloc::handle_alloc_error`] in the case of failure. 34 | /// 35 | /// # Example 36 | /// ``` 37 | /// use dyn_stack::{MemStack, StackReq, MemBuffer}; 38 | /// 39 | /// let req = StackReq::new::(3); 40 | /// let mut buf = MemBuffer::new(req); 41 | /// let stack = MemStack::new(&mut buf); 42 | /// 43 | /// // use the stack 44 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 45 | /// ``` 46 | pub fn new(req: StackReq) -> Self { 47 | Self::new_in(req, Global) 48 | } 49 | 50 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 51 | /// global allocator, or an error if the allocation did not succeed. 52 | /// 53 | /// # Example 54 | /// ``` 55 | /// use dyn_stack::{MemStack, StackReq, MemBuffer}; 56 | /// 57 | /// let req = StackReq::new::(3); 58 | /// let mut buf = MemBuffer::new(req); 59 | /// let stack = MemStack::new(&mut buf); 60 | /// 61 | /// // use the stack 62 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 63 | /// ``` 64 | pub fn try_new(req: StackReq) -> Result { 65 | Self::try_new_in(req, Global) 66 | } 67 | 68 | /// Creates a `MemBuffer` from its raw components. 69 | /// 70 | /// # Safety 71 | /// 72 | /// The arguments to this function must have been acquired from a call to 73 | /// [`MemBuffer::into_raw_parts`] 74 | #[inline] 75 | pub unsafe fn from_raw_parts(ptr: *mut u8, len: usize, align: usize) -> Self { 76 | Self { 77 | ptr: NonNull::new_unchecked(ptr), 78 | len, 79 | align, 80 | alloc: Global, 81 | } 82 | } 83 | 84 | /// Decomposes a `MemBuffer` into its raw components in this order: ptr, length and 85 | /// alignment. 86 | #[inline] 87 | pub fn into_raw_parts(self) -> (*mut u8, usize, usize) { 88 | let no_drop = ManuallyDrop::new(self); 89 | (no_drop.ptr.as_ptr(), no_drop.len, no_drop.align) 90 | } 91 | } 92 | 93 | #[cfg(feature = "alloc")] 94 | #[cfg_attr(docsrs, doc(cfg(feature = "alloc")))] 95 | impl PodBuffer { 96 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 97 | /// global allocator. 98 | /// 99 | /// Calls [`alloc::alloc::handle_alloc_error`] in the case of failure. 100 | /// 101 | /// # Example 102 | /// ``` 103 | /// use dyn_stack::{PodStack, StackReq, PodBuffer}; 104 | /// 105 | /// let req = StackReq::new::(3); 106 | /// let mut buf = PodBuffer::new(req); 107 | /// let stack = PodStack::new(&mut buf); 108 | /// 109 | /// // use the stack 110 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 111 | /// ``` 112 | pub fn new(req: StackReq) -> Self { 113 | Self::new_in(req, Global) 114 | } 115 | 116 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 117 | /// global allocator, or an error if the allocation did not succeed. 118 | /// 119 | /// # Example 120 | /// ``` 121 | /// use dyn_stack::{PodStack, StackReq, PodBuffer}; 122 | /// 123 | /// let req = StackReq::new::(3); 124 | /// let mut buf = PodBuffer::new(req); 125 | /// let stack = PodStack::new(&mut buf); 126 | /// 127 | /// // use the stack 128 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 129 | /// ``` 130 | pub fn try_new(req: StackReq) -> Result { 131 | Self::try_new_in(req, Global) 132 | } 133 | 134 | /// Creates a `PodBuffer` from its raw components. 135 | /// 136 | /// # Safety 137 | /// 138 | /// The arguments to this function must have been acquired from a call to 139 | /// [`PodBuffer::into_raw_parts`] 140 | #[inline] 141 | pub unsafe fn from_raw_parts(ptr: *mut u8, len: usize, align: usize) -> Self { 142 | Self { 143 | ptr: NonNull::new_unchecked(ptr), 144 | len, 145 | align, 146 | alloc: Global, 147 | } 148 | } 149 | 150 | /// Decomposes a `PodBuffer` into its raw components in this order: ptr, length and 151 | /// alignment. 152 | #[inline] 153 | pub fn into_raw_parts(self) -> (*mut u8, usize, usize) { 154 | let no_drop = ManuallyDrop::new(self); 155 | (no_drop.ptr.as_ptr(), no_drop.len, no_drop.align) 156 | } 157 | } 158 | 159 | #[cfg(feature = "alloc")] 160 | /// Buffer of uninitialized bytes to serve as workspace for dynamic arrays. 161 | pub struct MemBuffer { 162 | ptr: NonNull, 163 | len: usize, 164 | align: usize, 165 | alloc: A, 166 | } 167 | 168 | #[cfg(feature = "alloc")] 169 | /// Buffer of initialized bytes to serve as workspace for dynamic arrays. 170 | pub struct PodBuffer { 171 | ptr: NonNull, 172 | len: usize, 173 | align: usize, 174 | alloc: A, 175 | } 176 | 177 | #[cfg(not(feature = "alloc"))] 178 | /// Buffer of uninitialized bytes to serve as workspace for dynamic arrays. 179 | pub struct MemBuffer { 180 | ptr: NonNull, 181 | len: usize, 182 | align: usize, 183 | alloc: A, 184 | } 185 | 186 | #[cfg(not(feature = "alloc"))] 187 | /// Buffer of initialized bytes to serve as workspace for dynamic arrays. 188 | pub struct PodBuffer { 189 | ptr: NonNull, 190 | len: usize, 191 | align: usize, 192 | alloc: A, 193 | } 194 | 195 | unsafe impl Sync for MemBuffer {} 196 | unsafe impl Send for MemBuffer {} 197 | 198 | unsafe impl Sync for PodBuffer {} 199 | unsafe impl Send for PodBuffer {} 200 | 201 | impl Drop for MemBuffer { 202 | #[inline] 203 | fn drop(&mut self) { 204 | // SAFETY: this was initialized with std::alloc::alloc 205 | unsafe { 206 | self.alloc.deallocate( 207 | self.ptr, 208 | Layout::from_size_align_unchecked(self.len, self.align), 209 | ) 210 | } 211 | } 212 | } 213 | 214 | impl Drop for PodBuffer { 215 | #[inline] 216 | fn drop(&mut self) { 217 | // SAFETY: this was initialized with std::alloc::alloc 218 | unsafe { 219 | self.alloc.deallocate( 220 | self.ptr, 221 | Layout::from_size_align_unchecked(self.len, self.align), 222 | ) 223 | } 224 | } 225 | } 226 | 227 | impl PodBuffer { 228 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 229 | /// provided allocator. 230 | /// 231 | /// Calls [`alloc::alloc::handle_alloc_error`] in the case of failure. 232 | /// 233 | /// # Example 234 | /// ``` 235 | /// use dyn_stack::{PodStack, StackReq, PodBuffer}; 236 | /// use dyn_stack::alloc::Global; 237 | /// 238 | /// let req = StackReq::new::(3); 239 | /// let mut buf = PodBuffer::new_in(req, Global); 240 | /// let stack = PodStack::new(&mut buf); 241 | /// 242 | /// // use the stack 243 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 244 | /// ``` 245 | pub fn new_in(req: StackReq, alloc: A) -> Self { 246 | Self::try_new_in(req, alloc).unwrap_or_else(|_| handle_alloc_error(to_layout(req).unwrap())) 247 | } 248 | 249 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 250 | /// provided allocator, or an `AllocError` in the case of failure. 251 | /// 252 | /// # Example 253 | /// ``` 254 | /// use dyn_stack::{PodStack, StackReq, PodBuffer}; 255 | /// use dyn_stack::alloc::Global; 256 | /// 257 | /// let req = StackReq::new::(3); 258 | /// let mut buf = PodBuffer::new_in(req, Global); 259 | /// let stack = PodStack::new(&mut buf); 260 | /// 261 | /// // use the stack 262 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 263 | /// ``` 264 | pub fn try_new_in(req: StackReq, alloc: A) -> Result { 265 | unsafe { 266 | let ptr = &mut *(alloc 267 | .allocate_zeroed(to_layout(req)?) 268 | .map_err(|_| AllocError)? 269 | .as_ptr() as *mut [MaybeUninit]); 270 | #[cfg(debug_assertions)] 271 | ptr.fill(MaybeUninit::new(0xCD)); 272 | 273 | let len = ptr.len(); 274 | let ptr = NonNull::new_unchecked(ptr.as_mut_ptr() as *mut u8); 275 | Ok(PodBuffer { 276 | alloc, 277 | ptr, 278 | len, 279 | align: req.align_bytes(), 280 | }) 281 | } 282 | } 283 | 284 | /// Creates a `PodBuffer` from its raw components. 285 | /// 286 | /// # Safety 287 | /// 288 | /// The arguments to this function must have been acquired from a call to 289 | /// [`PodBuffer::into_raw_parts`] 290 | #[inline] 291 | pub unsafe fn from_raw_parts_in(ptr: *mut u8, len: usize, align: usize, alloc: A) -> Self { 292 | Self { 293 | ptr: NonNull::new_unchecked(ptr), 294 | len, 295 | align, 296 | alloc, 297 | } 298 | } 299 | 300 | /// Decomposes a `PodBuffer` into its raw components in this order: ptr, length and 301 | /// alignment. 302 | #[inline] 303 | pub fn into_raw_parts_with_alloc(self) -> (*mut u8, usize, usize, A) { 304 | let me = ManuallyDrop::new(self); 305 | (me.ptr.as_ptr(), me.len, me.align, unsafe { 306 | core::ptr::read(core::ptr::addr_of!(me.alloc)) 307 | }) 308 | } 309 | } 310 | 311 | impl MemBuffer { 312 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 313 | /// provided allocator. 314 | /// 315 | /// Calls [`alloc::alloc::handle_alloc_error`] in the case of failure. 316 | /// 317 | /// # Example 318 | /// ``` 319 | /// use dyn_stack::{MemStack, StackReq, MemBuffer}; 320 | /// use dyn_stack::alloc::Global; 321 | /// 322 | /// let req = StackReq::new::(3); 323 | /// let mut buf = MemBuffer::new_in(req, Global); 324 | /// let stack = MemStack::new(&mut buf); 325 | /// 326 | /// // use the stack 327 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 328 | /// ``` 329 | pub fn new_in(req: StackReq, alloc: A) -> Self { 330 | Self::try_new_in(req, alloc).unwrap_or_else(|_| handle_alloc_error(to_layout(req).unwrap())) 331 | } 332 | 333 | /// Allocate a memory buffer with sufficient storage for the given stack requirements, using the 334 | /// provided allocator, or an `AllocError` in the case of failure. 335 | /// 336 | /// # Example 337 | /// ``` 338 | /// use dyn_stack::{MemStack, StackReq, MemBuffer}; 339 | /// use dyn_stack::alloc::Global; 340 | /// 341 | /// let req = StackReq::new::(3); 342 | /// let mut buf = MemBuffer::new_in(req, Global); 343 | /// let stack = MemStack::new(&mut buf); 344 | /// 345 | /// // use the stack 346 | /// let (arr, _) = stack.make_with::(3, |i| i as i32); 347 | /// ``` 348 | pub fn try_new_in(req: StackReq, alloc: A) -> Result { 349 | unsafe { 350 | let ptr = &mut *(alloc 351 | .allocate(to_layout(req)?) 352 | .map_err(|_| AllocError)? 353 | .as_ptr() as *mut [MaybeUninit]); 354 | let len = ptr.len(); 355 | let ptr = NonNull::new_unchecked(ptr.as_mut_ptr() as *mut u8); 356 | Ok(MemBuffer { 357 | alloc, 358 | ptr, 359 | len, 360 | align: req.align_bytes(), 361 | }) 362 | } 363 | } 364 | 365 | /// Creates a `MemBuffer` from its raw components. 366 | /// 367 | /// # Safety 368 | /// 369 | /// The arguments to this function must have been acquired from a call to 370 | /// [`MemBuffer::into_raw_parts`] 371 | #[inline] 372 | pub unsafe fn from_raw_parts_in(ptr: *mut u8, len: usize, align: usize, alloc: A) -> Self { 373 | Self { 374 | ptr: NonNull::new_unchecked(ptr), 375 | len, 376 | align, 377 | alloc, 378 | } 379 | } 380 | 381 | /// Decomposes a `MemBuffer` into its raw components in this order: ptr, length and 382 | /// alignment. 383 | #[inline] 384 | pub fn into_raw_parts_with_alloc(self) -> (*mut u8, usize, usize, A) { 385 | let me = ManuallyDrop::new(self); 386 | (me.ptr.as_ptr(), me.len, me.align, unsafe { 387 | core::ptr::read(core::ptr::addr_of!(me.alloc)) 388 | }) 389 | } 390 | } 391 | 392 | impl core::ops::Deref for MemBuffer { 393 | type Target = [MaybeUninit]; 394 | 395 | #[inline] 396 | fn deref(&self) -> &Self::Target { 397 | unsafe { 398 | core::slice::from_raw_parts(self.ptr.as_ptr() as *const MaybeUninit, self.len) 399 | } 400 | } 401 | } 402 | 403 | impl core::ops::DerefMut for MemBuffer { 404 | #[inline] 405 | fn deref_mut(&mut self) -> &mut Self::Target { 406 | unsafe { 407 | core::slice::from_raw_parts_mut(self.ptr.as_ptr() as *mut MaybeUninit, self.len) 408 | } 409 | } 410 | } 411 | 412 | impl core::ops::Deref for PodBuffer { 413 | type Target = [u8]; 414 | 415 | #[inline] 416 | fn deref(&self) -> &Self::Target { 417 | unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.len) } 418 | } 419 | } 420 | 421 | impl core::ops::DerefMut for PodBuffer { 422 | #[inline] 423 | fn deref_mut(&mut self) -> &mut Self::Target { 424 | unsafe { core::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } 425 | } 426 | } 427 | 428 | /// Error during memory allocation. 429 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 430 | pub struct AllocError; 431 | -------------------------------------------------------------------------------- /src/stack_req.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | alloc::{Layout, LayoutError}, 3 | num::NonZeroUsize, 4 | }; 5 | 6 | /// Stack allocation requirements. 7 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 8 | pub struct StackReq { 9 | align: Option, 10 | size: usize, 11 | } 12 | 13 | impl Default for StackReq { 14 | #[inline] 15 | fn default() -> Self { 16 | Self::empty() 17 | } 18 | } 19 | 20 | #[inline(always)] 21 | const fn try_round_up_pow2(a: usize, b: usize) -> Option { 22 | match a.checked_add(!b.wrapping_neg()) { 23 | None => None, 24 | Some(x) => Some(x & b.wrapping_neg()), 25 | } 26 | } 27 | 28 | #[inline(always)] 29 | const fn max(a: usize, b: usize) -> usize { 30 | if a > b { 31 | a 32 | } else { 33 | b 34 | } 35 | } 36 | 37 | impl StackReq { 38 | /// Allocation requirements for an empty unaligned buffer. 39 | pub const EMPTY: Self = Self { 40 | align: unsafe { Some(NonZeroUsize::new_unchecked(1)) }, 41 | size: 0, 42 | }; 43 | 44 | /// Unsatisfiable allocation requirements. 45 | pub const OVERFLOW: Self = Self { 46 | align: None, 47 | size: 0, 48 | }; 49 | 50 | /// Allocation requirements for an empty unaligned buffer. 51 | #[inline] 52 | pub const fn empty() -> StackReq { 53 | Self::EMPTY 54 | } 55 | 56 | /// Allocation requirements sufficient for `n` elements of type `T`, overaligned with alignment 57 | /// `align`. 58 | /// 59 | /// # Errors 60 | /// 61 | /// * if `align` is smaller than the minimum required alignment for an object of type `T`. 62 | /// * if `align` is not a power of two. 63 | /// * if the size computation overflows 64 | #[inline] 65 | pub const fn new_aligned(n: usize, align: usize) -> StackReq { 66 | if align >= core::mem::align_of::() && align.is_power_of_two() { 67 | StackReq { 68 | align: unsafe { Some(NonZeroUsize::new_unchecked(align)) }, 69 | size: core::mem::size_of::(), 70 | } 71 | .array(n) 72 | } else { 73 | StackReq { 74 | align: None, 75 | size: 0, 76 | } 77 | } 78 | } 79 | 80 | /// Allocation requirements sufficient for `n` elements of type `T`. 81 | /// 82 | /// # Errors 83 | /// 84 | /// * if the size computation overflows 85 | #[inline] 86 | pub const fn new(n: usize) -> StackReq { 87 | StackReq::new_aligned::(n, core::mem::align_of::()) 88 | } 89 | 90 | /// The number of allocated bytes required, aligned to `self.align_bytes()`. 91 | #[inline] 92 | pub const fn size_bytes(&self) -> usize { 93 | self.size 94 | } 95 | 96 | /// The alignment of allocated bytes required, or `0` if the size overflowed. 97 | #[inline] 98 | pub const fn align_bytes(&self) -> usize { 99 | match self.align { 100 | Some(align) => align.get(), 101 | None => 0, 102 | } 103 | } 104 | 105 | /// The number of allocated bytes required, with no alignment constraints, or `usize::MAX` in the case of overflow. 106 | /// 107 | /// # Panics 108 | /// 109 | /// * if the size computation overflowed 110 | #[inline] 111 | pub const fn unaligned_bytes_required(&self) -> usize { 112 | match self.layout() { 113 | Ok(layout) => layout.size() + (layout.align() - 1), 114 | Err(_) => usize::MAX, 115 | } 116 | } 117 | 118 | /// Returns the corresponding layout for the allocation size and alignment. 119 | #[inline] 120 | pub const fn layout(self) -> Result { 121 | Layout::from_size_align(self.size_bytes(), self.align_bytes()) 122 | } 123 | 124 | /// The required allocation to allocate storage sufficient for both of `self` and `other`, 125 | /// simultaneously and in any order. 126 | /// 127 | /// # Panics 128 | /// 129 | /// * if the allocation requirement computation overflows. 130 | #[inline] 131 | pub const fn and(self, other: StackReq) -> StackReq { 132 | match (self.align, other.align) { 133 | (Some(left), Some(right)) => { 134 | let align = max(left.get(), right.get()); 135 | let left = try_round_up_pow2(self.size, align); 136 | let right = try_round_up_pow2(other.size, align); 137 | 138 | match (left, right) { 139 | (Some(left), Some(right)) => { 140 | match left.checked_add(right) { 141 | Some(size) => StackReq { 142 | // SAFETY: align is either self.align or other.align, both of which are non zero 143 | align: unsafe { Some(NonZeroUsize::new_unchecked(align)) }, 144 | size, 145 | }, 146 | _ => StackReq::OVERFLOW, 147 | } 148 | } 149 | _ => StackReq::OVERFLOW, 150 | } 151 | } 152 | _ => StackReq::OVERFLOW, 153 | } 154 | } 155 | 156 | /// The required allocation to allocate storage sufficient for all the requirements produced by 157 | /// the given iterator, simultaneously and in any order. 158 | /// 159 | /// # Panics 160 | /// 161 | /// * if the allocation requirement computation overflows. 162 | #[inline] 163 | pub const fn all_of(reqs: &[Self]) -> Self { 164 | let mut total = StackReq::EMPTY; 165 | let mut reqs = reqs; 166 | while let Some((req, next)) = reqs.split_first() { 167 | total = total.and(*req); 168 | reqs = next; 169 | } 170 | total 171 | } 172 | 173 | /// The required allocation to allocate storage sufficient for either of `self` and `other`, 174 | /// with only one being active at a time. 175 | /// 176 | /// # Panics 177 | /// 178 | /// * if the allocation requirement computation overflows. 179 | #[inline] 180 | pub const fn or(self, other: StackReq) -> StackReq { 181 | match (self.align, other.align) { 182 | (Some(left), Some(right)) => { 183 | let align = max(left.get(), right.get()); 184 | let left = try_round_up_pow2(self.size, align); 185 | let right = try_round_up_pow2(other.size, align); 186 | 187 | match (left, right) { 188 | (Some(left), Some(right)) => { 189 | let size = max(left, right); 190 | StackReq { 191 | // SAFETY: align is either self.align or other.align, both of which are non zero 192 | align: unsafe { Some(NonZeroUsize::new_unchecked(align)) }, 193 | size, 194 | } 195 | } 196 | _ => StackReq::OVERFLOW, 197 | } 198 | } 199 | _ => StackReq::OVERFLOW, 200 | } 201 | } 202 | 203 | /// The required allocation to allocate storage sufficient for any of the requirements produced 204 | /// by the given iterator, with at most one being active at a time. 205 | /// 206 | /// # Panics 207 | /// 208 | /// * if the allocation requirement computation overflows. 209 | #[inline] 210 | pub fn any_of(reqs: &[StackReq]) -> StackReq { 211 | let mut total = StackReq::EMPTY; 212 | let mut reqs = reqs; 213 | while let Some((req, next)) = reqs.split_first() { 214 | total = total.or(*req); 215 | reqs = next; 216 | } 217 | total 218 | } 219 | 220 | /// Same as [`StackReq::and`] repeated `n` times. 221 | #[inline] 222 | pub const fn array(self, n: usize) -> StackReq { 223 | match self.align { 224 | Some(align) => { 225 | let size = self.size.checked_mul(n); 226 | match size { 227 | Some(size) => StackReq { 228 | size, 229 | align: Some(align), 230 | }, 231 | None => StackReq::OVERFLOW, 232 | } 233 | } 234 | None => StackReq::OVERFLOW, 235 | } 236 | } 237 | } 238 | 239 | #[cfg(test)] 240 | mod tests { 241 | use super::*; 242 | 243 | #[test] 244 | fn round_up() { 245 | assert_eq!(try_round_up_pow2(0, 4), Some(0)); 246 | assert_eq!(try_round_up_pow2(1, 4), Some(4)); 247 | assert_eq!(try_round_up_pow2(2, 4), Some(4)); 248 | assert_eq!(try_round_up_pow2(3, 4), Some(4)); 249 | assert_eq!(try_round_up_pow2(4, 4), Some(4)); 250 | } 251 | 252 | #[test] 253 | fn overflow() { 254 | assert_eq!(StackReq::new::(usize::MAX).align_bytes(), 0); 255 | } 256 | 257 | #[test] 258 | fn and_overflow() { 259 | assert_eq!( 260 | StackReq::new::(usize::MAX) 261 | .and(StackReq::new::(1)) 262 | .align_bytes(), 263 | 0, 264 | ); 265 | } 266 | } 267 | --------------------------------------------------------------------------------