├── .gitignore ├── README.tpl ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── bitmap.rs └── tlsf.rs ├── CHANGELOG.md ├── src ├── lib.rs ├── bitmap.rs ├── int.rs ├── ring.rs ├── bitmaputils.rs ├── arena.rs └── tlsf.rs └── tests ├── bitmap.rs ├── tlsf.rs └── ring.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /README.tpl: -------------------------------------------------------------------------------- 1 | # {{crate}} 2 | 3 | [docs.rs](https://docs.rs/xalloc/) 4 | 5 | {{readme}} 6 | 7 | License: {{license}} -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xalloc" 3 | version = "0.2.7" 4 | authors = ["yvt "] 5 | license = "MIT" 6 | readme = "README.md" 7 | repository = "https://github.com/yvt/xalloc-rs" 8 | description = """ 9 | Suballocators for external memory (e.g., Vulkan device memory) 10 | """ 11 | keywords = ["allocator", "vulkan", "memory", "suballocator", "tlsf"] 12 | 13 | [features] 14 | default = ["std"] 15 | nightly = [] 16 | std = ["num/std"] 17 | 18 | [dependencies] 19 | num = { version = ">= 0.2.0, < 0.4.0", default_features = false } 20 | unreachable = "1.0.0" 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 yvt 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # xalloc 2 | 3 | [docs.rs](https://docs.rs/xalloc/) 4 | 5 | Dynamic suballocators for external memory (e.g., Vulkan device memory). 6 | 7 | ## Provided Algorithms 8 | 9 | ### Generic 10 | 11 | | Name | Time Complexity | Space Complexity | 12 | | ------------------------------- | --------------- | ----------------- | 13 | | TLSF (Two-Level Segregated Fit) | `O(1)` | `O(N + log size)` | 14 | | Free space bitmap | `O(size)` | `O(size)` | 15 | 16 | ### Specialized 17 | 18 | | Name | Time Complexity | Space Complexity | 19 | | ------------------------------- | --------------- | ----------------- | 20 | | Ring buffer | `O(1)` | `O(N)` | 21 | 22 | (`size`: heap size measured by the number of allocation units, `N`: number of allocations) 23 | 24 | ## Examples 25 | 26 | ```rust 27 | use xalloc::{SysTlsf, SysTlsfRegion}; 28 | let mut tlsf = xalloc::SysTlsf::new(8u32); 29 | 30 | // Allocate regions 31 | let alloc1: (SysTlsfRegion, u32) = tlsf.alloc(4).unwrap(); 32 | let alloc2: (SysTlsfRegion, u32) = tlsf.alloc(4).unwrap(); 33 | let (region1, offset1) = alloc1; 34 | let (region2, offset2) = alloc2; 35 | println!("allocated #1: {:?}", (®ion1, offset1)); 36 | println!("allocated #2: {:?}", (®ion2, offset2)); 37 | 38 | // Deallocate a region 39 | tlsf.dealloc(region1).unwrap(); 40 | 41 | // Now we can allocate again 42 | tlsf.alloc(2).unwrap(); 43 | tlsf.alloc(2).unwrap(); 44 | ``` 45 | 46 | ## Feature Flags 47 | 48 | - `nightly` — Enables optimizations which currently require a Nightly Rust 49 | compiler. This flag is now unused due to the [stabilization] of `NonNull` 50 | in Rust 1.25. 51 | 52 | [stabilization]: https://blog.rust-lang.org/2018/03/29/Rust-1.25.html 53 | 54 | 55 | License: MIT 56 | -------------------------------------------------------------------------------- /benches/bitmap.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | #![feature(test)] 10 | extern crate test; 11 | extern crate unreachable; 12 | extern crate xalloc; 13 | 14 | use test::Bencher; 15 | use unreachable::UncheckedOptionExt; 16 | use xalloc::*; 17 | 18 | struct Xorshift32(u32); 19 | 20 | impl Xorshift32 { 21 | fn next(&mut self) -> u32 { 22 | self.0 ^= self.0 << 13; 23 | self.0 ^= self.0 >> 17; 24 | self.0 ^= self.0 << 5; 25 | !self.0 26 | } 27 | } 28 | 29 | #[bench] 30 | fn bitmap_random(b: &mut Bencher) { 31 | let mut v: Vec<_> = (0..512).map(|_| None).collect(); 32 | let mut sa = BitmapAlloc::new(512); 33 | b.iter(|| unsafe { 34 | let mut r = Xorshift32(0x11451419); 35 | for _ in 0..65536 { 36 | let i = ((r.next() >> 8) & 511) as usize; 37 | if v[i].is_some() { 38 | sa.dealloc_relaxed(v[i].take().unchecked_unwrap()); 39 | } else { 40 | v[i] = Some(sa.alloc(1).unchecked_unwrap().0); 41 | } 42 | } 43 | for x in v.iter_mut() { 44 | if let Some(x) = x.take() { 45 | sa.dealloc_relaxed(x); 46 | } 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn bitmap_4_random(b: &mut Bencher) { 53 | let mut v: Vec<_> = (0..512).map(|_| None).collect(); 54 | let mut sa = BitmapAlloc::new(512 * 4); 55 | b.iter(|| unsafe { 56 | let mut r = Xorshift32(0x11451419); 57 | for _ in 0..65536 { 58 | let i = ((r.next() >> 8) & 511) as usize; 59 | if v[i].is_some() { 60 | sa.dealloc_relaxed(v[i].take().unchecked_unwrap()); 61 | } else { 62 | v[i] = Some(sa.alloc(4).unchecked_unwrap().0); 63 | } 64 | } 65 | for x in v.iter_mut() { 66 | if let Some(x) = x.take() { 67 | sa.dealloc_relaxed(x); 68 | } 69 | } 70 | }); 71 | } 72 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.2.7] - 2020-12-09 11 | 12 | - Get rid of illegal uses of `std::mem::uninitialized`, replacing them with `MaybeUninit` 13 | 14 | ## [0.2.6] - 2019-02-28 15 | 16 | - Add `Ring::dealloc_front_until` and `Ring::dealloc_back_until` 17 | 18 | ## [0.2.5] - 2018-12-05 19 | 20 | - Add `Ring`, a circular memory allocator 21 | - Wrap `allow(clippy::*)` with `cfg_attr(feature = "cargo-clippy", ...)` in order to make it compatible with the current stable toolchain 22 | 23 | ## [0.2.4] - 2018-11-07 24 | 25 | - Fix a compilation issue that happens when multiple versions of `num-*` crates are mixed up 26 | - Optimize for performance through a use of the `unreachable` crate and an architecture-specific micro-tuning 27 | 28 | ## [0.2.3] - 2018-04-02 29 | 30 | - Use `std::ptr::NonNull` stabilized in [Rust 1.25] 31 | 32 | [Rust 1.25]: https://blog.rust-lang.org/2018/03/29/Rust-1.25.html 33 | 34 | ## [0.2.2] - 2017-10-28 35 | 36 | - Make more types `Debug` 37 | - Add type aliases `SysTlsfRegion` and `SafeTlsfRegion` 38 | 39 | ## [0.2.1] - 2017-10-28 40 | 41 | - Fix a link in `README.md` 42 | 43 | ## [0.2.0] - 2017-10-28 44 | 45 | - Rename `TlsfSuballocRegion` to `TlsfRegion`. 46 | - Add a free space bitmap-based allocator. 47 | - Add CHANGELOG to track notable changes among versions. 48 | 49 | ## 0.1.0 - 2017-10-28 50 | 51 | - Initial release. 52 | 53 | [Unreleased]: https://github.com/yvt/xalloc-rs/compare/v0.2.7...HEAD 54 | [0.2.7]: https://github.com/yvt/xalloc-rs/compare/v0.2.6...v0.2.7 55 | [0.2.6]: https://github.com/yvt/xalloc-rs/compare/v0.2.5...v0.2.6 56 | [0.2.5]: https://github.com/yvt/xalloc-rs/compare/v0.2.4...v0.2.5 57 | [0.2.4]: https://github.com/yvt/xalloc-rs/compare/v0.2.3...v0.2.4 58 | [0.2.3]: https://github.com/yvt/xalloc-rs/compare/v0.2.2...v0.2.3 59 | [0.2.2]: https://github.com/yvt/xalloc-rs/compare/v0.2.1...v0.2.2 60 | [0.2.1]: https://github.com/yvt/xalloc-rs/compare/v0.2.0...v0.2.1 61 | [0.2.0]: https://github.com/yvt/xalloc-rs/compare/v0.1.0...v0.2.0 62 | -------------------------------------------------------------------------------- /benches/tlsf.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | #![feature(test)] 10 | extern crate test; 11 | extern crate unreachable; 12 | extern crate xalloc; 13 | 14 | use test::Bencher; 15 | use unreachable::UncheckedOptionExt; 16 | use xalloc::*; 17 | 18 | struct Xorshift32(u32); 19 | 20 | impl Xorshift32 { 21 | fn next(&mut self) -> u32 { 22 | self.0 ^= self.0 << 13; 23 | self.0 ^= self.0 >> 17; 24 | self.0 ^= self.0 << 5; 25 | !self.0 26 | } 27 | } 28 | 29 | #[bench] 30 | fn systlsf_random(b: &mut Bencher) { 31 | let mut v: Vec<_> = (0..512).map(|_| None).collect(); 32 | let mut sa = SysTlsf::with_capacity(512u32, 512); 33 | b.iter(|| unsafe { 34 | let mut r = Xorshift32(0x11451419); 35 | for _ in 0..65536 { 36 | let i = ((r.next() >> 8) & 511) as usize; 37 | if v[i].is_some() { 38 | sa.dealloc_unchecked(v[i].take().unchecked_unwrap()); 39 | } else { 40 | v[i] = Some(sa.alloc(1u32).unchecked_unwrap().0); 41 | } 42 | } 43 | for x in v.iter_mut() { 44 | if let Some(x) = x.take() { 45 | sa.dealloc_unchecked(x); 46 | } 47 | } 48 | }); 49 | } 50 | 51 | #[bench] 52 | fn sys_random(b: &mut Bencher) { 53 | let mut v: Vec<_> = (0..512).map(|_| None).collect(); 54 | b.iter(|| { 55 | let mut r = Xorshift32(0x11451419); 56 | for _ in 0..65536 { 57 | let i = ((r.next() >> 8) & 511) as usize; 58 | if v[i].is_some() { 59 | v[i].take(); 60 | } else { 61 | v[i] = Some(Box::new(1u32)); 62 | } 63 | } 64 | for x in v.iter_mut() { 65 | x.take(); 66 | } 67 | }); 68 | } 69 | 70 | #[bench] 71 | fn systlsf_stack(b: &mut Bencher) { 72 | let mut sa = SysTlsf::with_capacity(65536u32, 65536); 73 | let mut v = Vec::with_capacity(65536); 74 | b.iter(|| unsafe { 75 | for _ in 0..65536 { 76 | v.push(sa.alloc(1u32).unchecked_unwrap().0); 77 | } 78 | while let Some(x) = v.pop() { 79 | sa.dealloc_unchecked(x); 80 | } 81 | }); 82 | } 83 | 84 | #[bench] 85 | fn sys_stack(b: &mut Bencher) { 86 | let mut v = Vec::with_capacity(65536); 87 | b.iter(|| { 88 | for _ in 0..65536 { 89 | v.push(Box::new(1u32)); 90 | } 91 | while let Some(_) = v.pop() {} 92 | }); 93 | } 94 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! Dynamic suballocators for external memory (e.g., Vulkan device memory). 10 | //! 11 | //! # Provided Algorithms 12 | //! 13 | //! ## Generic 14 | //! 15 | //! | Name | Time Complexity | Space Complexity | 16 | //! | ------------------------------- | --------------- | ----------------- | 17 | //! | TLSF (Two-Level Segregated Fit) | `O(1)` | `O(N + log size)` | 18 | //! | Free space bitmap | `O(size)` | `O(size)` | 19 | //! 20 | //! ## Specialized 21 | //! 22 | //! | Name | Time Complexity | Space Complexity | 23 | //! | ------------------------------- | --------------- | ----------------- | 24 | //! | Ring buffer | `O(1)` | `O(N)` | 25 | //! 26 | //! (`size`: heap size measured by the number of allocation units, `N`: number of allocations) 27 | //! 28 | //! # Examples 29 | //! 30 | //! ``` 31 | //! use xalloc::{SysTlsf, SysTlsfRegion}; 32 | //! let mut tlsf = xalloc::SysTlsf::new(8u32); 33 | //! 34 | //! // Allocate regions 35 | //! let alloc1: (SysTlsfRegion, u32) = tlsf.alloc(4).unwrap(); 36 | //! let alloc2: (SysTlsfRegion, u32) = tlsf.alloc(4).unwrap(); 37 | //! let (region1, offset1) = alloc1; 38 | //! let (region2, offset2) = alloc2; 39 | //! println!("allocated #1: {:?}", (®ion1, offset1)); 40 | //! println!("allocated #2: {:?}", (®ion2, offset2)); 41 | //! 42 | //! // Deallocate a region 43 | //! tlsf.dealloc(region1).unwrap(); 44 | //! 45 | //! // Now we can allocate again 46 | //! tlsf.alloc(2).unwrap(); 47 | //! tlsf.alloc(2).unwrap(); 48 | //! ``` 49 | //! 50 | //! # Feature Flags 51 | //! 52 | //! - `nightly` — Enables optimizations which currently require a Nightly Rust 53 | //! compiler. This flag is now unused due to the [stabilization] of `NonNull` 54 | //! in Rust 1.25. 55 | //! - `std` — Enables the use of the standard library. Enabled by default. 56 | //! 57 | //! [stabilization]: https://blog.rust-lang.org/2018/03/29/Rust-1.25.html 58 | //! 59 | // Clippy does not understand that generic numeric types are not always 60 | // as capable as built-in ones and raise false warnings 61 | #![cfg_attr(feature = "cargo-clippy", allow(clippy::op_ref))] 62 | #![no_std] 63 | #[cfg(feature = "std")] 64 | extern crate std; 65 | 66 | extern crate alloc; 67 | pub extern crate num; 68 | extern crate unreachable; 69 | 70 | pub mod arena; 71 | pub mod bitmap; 72 | mod bitmaputils; 73 | pub mod int; 74 | pub mod ring; 75 | pub mod tlsf; 76 | 77 | pub use self::bitmap::{BitmapAlloc, BitmapAllocRegion}; 78 | pub use self::ring::{Ring, RingRegion}; 79 | 80 | pub use self::tlsf::{ 81 | SafeTlsf, SafeTlsfRegion, SysTlsf, SysTlsfRegion, Tlsf, TlsfBlock, TlsfRegion, 82 | }; 83 | -------------------------------------------------------------------------------- /tests/bitmap.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | extern crate xalloc; 10 | 11 | use std::ops; 12 | use xalloc::BitmapAlloc; 13 | 14 | struct Xorshift32(u32); 15 | 16 | impl Xorshift32 { 17 | /// Returns a random integer in `[0, 0xfffffffe]` 18 | fn next(&mut self) -> u32 { 19 | self.0 ^= self.0 << 13; 20 | self.0 ^= self.0 >> 17; 21 | self.0 ^= self.0 << 5; 22 | !self.0 23 | } 24 | fn next_range(&mut self, range: ops::Range) -> u32 { 25 | let len = range.end - range.start; 26 | let mut mask = len - 1; 27 | mask |= mask >> 1; 28 | mask |= mask >> 2; 29 | mask |= mask >> 4; 30 | mask |= mask >> 8; 31 | mask |= mask >> 16; 32 | 33 | // Let's not care about the unbounded execution time :) 34 | let mut value = self.next() & mask; 35 | while value >= len { 36 | value = self.next() & mask; 37 | } 38 | 39 | value + range.start 40 | } 41 | } 42 | 43 | #[test] 44 | fn create() { 45 | for i in 1..100 { 46 | println!("new({})", i); 47 | BitmapAlloc::new(i); 48 | } 49 | } 50 | 51 | #[test] 52 | fn full_allocate() { 53 | for i in 1..100 { 54 | println!("new({})", i); 55 | let mut sa = BitmapAlloc::new(i); 56 | let result = sa.alloc(i); 57 | assert!(result.is_some()); 58 | } 59 | } 60 | 61 | #[test] 62 | fn allocate_two() { 63 | for i in 1..50 { 64 | for k in 1..i { 65 | println!("new({})", i); 66 | let mut sa = BitmapAlloc::new(i); 67 | println!(" alloc({})", k); 68 | let result1 = sa.alloc(k); 69 | assert!(result1.is_some()); 70 | println!(" alloc({})", i - k); 71 | let result2 = sa.alloc(i - k); 72 | assert!(result2.is_some()); 73 | } 74 | } 75 | } 76 | 77 | #[test] 78 | fn allocate_three() { 79 | for i in 1..50 { 80 | for k in 1..i { 81 | for j in 1..i - k { 82 | println!("new({})", i); 83 | let mut sa = BitmapAlloc::new(i); 84 | println!(" alloc({})", k); 85 | let result1 = sa.alloc(k); 86 | assert!(result1.is_some()); 87 | println!(" alloc({})", i - k - j); 88 | let result2 = sa.alloc(i - k - j); 89 | assert!(result2.is_some()); 90 | println!(" alloc({})", j); 91 | let result3 = sa.alloc(j); 92 | assert!(result3.is_some()); 93 | } 94 | } 95 | } 96 | } 97 | 98 | #[test] 99 | fn allocate_deallocate_two1() { 100 | for i in 1..50 { 101 | for k in 1..i { 102 | println!("new({})", i); 103 | let mut sa = BitmapAlloc::new(i); 104 | println!(" alloc({})", k); 105 | let result1 = sa.alloc(k); 106 | assert!(result1.is_some()); 107 | println!(" alloc({})", i - k); 108 | let result2 = sa.alloc(i - k); 109 | assert!(result2.is_some()); 110 | 111 | println!(" dealloc(result1)"); 112 | sa.dealloc_relaxed(result1.unwrap().0); 113 | println!(" dealloc(result2)"); 114 | sa.dealloc_relaxed(result2.unwrap().0); 115 | } 116 | } 117 | } 118 | 119 | #[test] 120 | fn stress() { 121 | let mut sa = BitmapAlloc::new(1000); 122 | let mut allocated = Vec::new(); 123 | let mut r = Xorshift32(0x11451419u32); 124 | for _ in 0..1000 { 125 | let len = 1u32 + (r.next() & 127u32); 126 | println!("alloc({})", len); 127 | let reg = sa.alloc(len as usize); 128 | if let Some((reg, pos)) = reg { 129 | println!(" success: {:?}", (®, pos)); 130 | allocated.push(reg); 131 | } else { 132 | assert!(allocated.len() > 0); 133 | let a_index = r.next_range(0..allocated.len() as u32); 134 | let old_reg = allocated.swap_remove(a_index as usize); 135 | println!(" failed, deallocating {:?}", old_reg); 136 | sa.dealloc_relaxed(old_reg); 137 | } 138 | } 139 | for reg in allocated { 140 | println!("dealloc({:?})", reg); 141 | sa.dealloc_relaxed(reg); 142 | } 143 | 144 | // Try the full allocation 145 | println!("alloc({})", 1000u32); 146 | let reg = sa.alloc(1000); 147 | assert!(reg.is_some()); 148 | } 149 | -------------------------------------------------------------------------------- /src/bitmap.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! Free space bitmap-based external memory allocator. 10 | //! 11 | //! [`BitmapAlloc`] uses a simple bitmap to track the allocation state, and 12 | //! relies on a naïve bit scan for allocation. 13 | //! 14 | //! ## Memory Overhead 15 | //! 16 | //! Since it uses a bitmap to track free space, it consumes a memory proportional 17 | //! to the size of the heap. The memory consumption is estimated to be roughly 18 | //! `size / 8` bytes, where `size` is the size of the heap measured by the 19 | //! number of allocation units. 20 | //! 21 | //! `BitmapAlloc` does not store information about individual allocated regions 22 | //! by itself. Therefore, `BitmapAlloc` would be preferred when the number of 23 | //! allocations is quite high and each allocation is very small (preferably, 24 | //! just one allocation unit). 25 | //! 26 | //! The following table shows the memory overhead comparison between `Tlsf` and 27 | //! `BitmapAlloc` with a varying number of allocations (assuming the heap is 28 | //! fully occupied). 29 | //! 30 | //! | `size` | # of allocations | `Tlsf` (bytes) | `BitmapAloc` (bytes) | 31 | //! | ------ | ---------------- | -------------- | -------------------- | 32 | //! | 1,024 | 0 | 1,118 | 128 | 33 | //! | | 1 | 1,174 | 128 | 34 | //! | | 256 | 15,454 | 128 | 35 | //! | | 1,024 | 58,462 | 128 | 36 | //! | 65,536 | 0 | 1,118 | 8,192 | 37 | //! | | 1 | 1,174 | 8,192 | 38 | //! | | 1,024 | 58,462 | 8,192 | 39 | //! | | 65,536 | 3,671,134 | 8,192 | 40 | //! 41 | //! ## Performance 42 | //! 43 | //! The allocation throughput is roughly the same as of jemalloc. 44 | use alloc::boxed::Box; 45 | use alloc::string::String; 46 | use alloc::vec; 47 | use alloc::vec::Vec; 48 | use bitmaputils::*; 49 | use core::ops::Range; 50 | use int::BinaryInteger; 51 | /// Free space bitmap-based external memory allocator. 52 | /// 53 | /// See [the module-level documentation] for more. 54 | /// 55 | /// [the module-level documentation]: index.html 56 | #[derive(Debug, Clone)] 57 | pub struct BitmapAlloc { 58 | bitmap: Vec, 59 | size: usize, 60 | next: usize, 61 | } 62 | 63 | /// A handle type to a region allocated in a `BitmapAlloc`. 64 | /// 65 | /// `BitmapAllocRegion` returned by a `BitmapAlloc` only can be used with the 66 | /// same `BitmapAlloc`. 67 | #[derive(Debug, PartialEq, Eq, Hash)] 68 | pub struct BitmapAllocRegion { 69 | range: Range, 70 | } 71 | 72 | impl BitmapAlloc { 73 | /// Construct a `BitmapAlloc`. 74 | pub fn new(size: usize) -> Self { 75 | let width = ::max_digits() as usize; 76 | Self { 77 | bitmap: vec![0; (size + width - 1) / width], 78 | size, 79 | next: 0, 80 | } 81 | } 82 | 83 | /// Alias of [`alloc_next`]. 84 | /// 85 | /// [`alloc_next`]: #method.alloc_next 86 | pub fn alloc(&mut self, size: usize) -> Option<(BitmapAllocRegion, usize)> { 87 | self.alloc_next(size) 88 | } 89 | 90 | /// Allocate a region of the size `size` using a Next-Fit strategy. 91 | /// The time complexity is linear to the size of the heap. 92 | /// 93 | /// Returns a handle of the allocated region and its offset if the 94 | /// allocation succeeds. Returns `None` otherwise. 95 | /// 96 | /// `size` must not be zero. 97 | pub fn alloc_next(&mut self, size: usize) -> Option<(BitmapAllocRegion, usize)> { 98 | let next = self.next; 99 | self.alloc_inner(size, next) 100 | .or_else(|| self.alloc_first(size)) 101 | } 102 | 103 | /// Allocate a region of the size `size` using a First-Fit strategy. 104 | /// The time complexity is linear to the size of the heap. 105 | /// 106 | /// Returns a handle of the allocated region and its offset if the 107 | /// allocation succeeds. Returns `None` otherwise. 108 | /// 109 | /// `size` must not be zero. 110 | pub fn alloc_first(&mut self, size: usize) -> Option<(BitmapAllocRegion, usize)> { 111 | self.alloc_inner(size, 0) 112 | } 113 | 114 | fn alloc_inner(&mut self, size: usize, start: usize) -> Option<(BitmapAllocRegion, usize)> { 115 | assert!(size != 0); 116 | 117 | if start + size > self.size { 118 | return None; 119 | } 120 | 121 | find_zeros(&self.bitmap, start, size).and_then(|offs| { 122 | let range = offs..offs + size; 123 | if range.end <= self.size { 124 | set_bits_ranged(&mut self.bitmap, range.clone()); 125 | if range.end == self.size { 126 | self.next = 0; 127 | } else { 128 | self.next = range.end; 129 | } 130 | Some((BitmapAllocRegion { range }, offs)) 131 | } else { 132 | None 133 | } 134 | }) 135 | } 136 | 137 | /// Deallocate the specified region. 138 | /// 139 | /// `r` must originate from the same instance of `BitmapAlloc`. Otherwise, 140 | /// `BitmapAlloc` enters an inconsistent state and possibly panics, but does 141 | /// not cause an undefined behavior. 142 | pub fn dealloc_relaxed(&mut self, r: BitmapAllocRegion) { 143 | // Optimize for stack-like usage 144 | if self.next == r.range.end { 145 | self.next = r.range.start; 146 | } 147 | 148 | clear_bits_ranged(&mut self.bitmap, r.range); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /tests/tlsf.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | #![cfg(feature = "std")] 10 | extern crate xalloc; 11 | 12 | use std::ops; 13 | use xalloc::SafeTlsf; 14 | 15 | struct Xorshift32(u32); 16 | 17 | impl Xorshift32 { 18 | /// Returns a random integer in `[0, 0xfffffffe]` 19 | fn next(&mut self) -> u32 { 20 | self.0 ^= self.0 << 13; 21 | self.0 ^= self.0 >> 17; 22 | self.0 ^= self.0 << 5; 23 | !self.0 24 | } 25 | fn next_range(&mut self, range: ops::Range) -> u32 { 26 | let len = range.end - range.start; 27 | let mut mask = len - 1; 28 | mask |= mask >> 1; 29 | mask |= mask >> 2; 30 | mask |= mask >> 4; 31 | mask |= mask >> 8; 32 | mask |= mask >> 16; 33 | 34 | // Let's not care about the unbounded execution time :) 35 | let mut value = self.next() & mask; 36 | while value >= len { 37 | value = self.next() & mask; 38 | } 39 | 40 | value + range.start 41 | } 42 | } 43 | 44 | #[test] 45 | fn create() { 46 | for i in 1..100 { 47 | println!("new({})", i); 48 | SafeTlsf::new(i as u32); 49 | } 50 | } 51 | 52 | #[test] 53 | fn full_allocate() { 54 | for i in 1..100 { 55 | println!("new({})", i); 56 | let mut sa = SafeTlsf::new(i as u32); 57 | let result = sa.alloc(i as u32); 58 | assert!(result.is_some()); 59 | } 60 | } 61 | 62 | #[test] 63 | fn allocate_two() { 64 | for i in 1..50 { 65 | for k in 1..i { 66 | println!("new({})", i); 67 | let mut sa = SafeTlsf::new(i as u32); 68 | println!(" alloc({})", k); 69 | let result1 = sa.alloc(k as u32); 70 | assert!(result1.is_some()); 71 | println!(" alloc({})", i - k); 72 | let result2 = sa.alloc((i - k) as u32); 73 | assert!(result2.is_some()); 74 | } 75 | } 76 | } 77 | 78 | #[test] 79 | fn allocate_three() { 80 | for i in 1..50 { 81 | for k in 1..i { 82 | for j in 1..i - k { 83 | println!("new({})", i); 84 | let mut sa = SafeTlsf::new(i as u32); 85 | println!(" alloc({})", k); 86 | let result1 = sa.alloc(k as u32); 87 | assert!(result1.is_some()); 88 | println!(" alloc({})", i - k - j); 89 | let result2 = sa.alloc((i - k - j) as u32); 90 | assert!(result2.is_some()); 91 | println!(" alloc({})", j); 92 | let result3 = sa.alloc((j) as u32); 93 | assert!(result3.is_some()); 94 | } 95 | } 96 | } 97 | } 98 | 99 | #[test] 100 | fn allocate_two_aligned() { 101 | for i in 1..50 { 102 | for k in 1..i - 8 { 103 | println!("new({})", i); 104 | let mut sa = SafeTlsf::new(i as u32); 105 | println!(" alloc({})", k); 106 | let result = sa.alloc(k as u32); 107 | assert!(result.is_some()); 108 | println!(" alloc_aligned({}, 8)", i - k - 8); 109 | let result = sa.alloc_aligned((i - k - 8) as u32, 8); 110 | assert!(result.is_some()); 111 | assert_eq!(result.as_ref().unwrap().1 & 7, 0, "unaligned: {:?}", result); 112 | println!(" success: {:?}", result); 113 | } 114 | } 115 | } 116 | 117 | #[test] 118 | fn allocate_deallocate_two1() { 119 | for i in 1..50 { 120 | for k in 1..i { 121 | println!("new({})", i); 122 | let mut sa = SafeTlsf::new(i as u32); 123 | println!(" alloc({})", k); 124 | let result1 = sa.alloc(k as u32); 125 | assert!(result1.is_some()); 126 | println!(" alloc({})", i - k); 127 | let result2 = sa.alloc((i - k) as u32); 128 | assert!(result2.is_some()); 129 | 130 | println!(" dealloc(result1)"); 131 | sa.dealloc(result1.unwrap().0).unwrap(); 132 | println!(" dealloc(result2)"); 133 | sa.dealloc(result2.unwrap().0).unwrap(); 134 | } 135 | } 136 | } 137 | 138 | #[test] 139 | fn stress() { 140 | let mut sa = SafeTlsf::new(1000u32); 141 | let mut allocated = Vec::new(); 142 | let mut r = Xorshift32(0x11451419u32); 143 | for _ in 0..1000 { 144 | let len = 1u32 + (r.next() & 127u32); 145 | println!("alloc({})", len); 146 | let reg = sa.alloc(len); 147 | if let Some((reg, pos)) = reg { 148 | println!(" success: {:?}", (®, pos)); 149 | allocated.push(reg); 150 | } else { 151 | assert!(allocated.len() > 0); 152 | let a_index = r.next_range(0..(allocated.len() as u32)); 153 | let old_reg = allocated.swap_remove(a_index as usize); 154 | println!(" failed, deallocating {:?}", old_reg); 155 | sa.dealloc(old_reg).unwrap(); 156 | } 157 | if allocated.len() > 0 { 158 | unsafe { 159 | sa.test_integrity(&allocated[0]); 160 | } 161 | } 162 | } 163 | for reg in allocated { 164 | println!("dealloc({:?})", reg); 165 | sa.dealloc(reg).unwrap(); 166 | } 167 | 168 | // Try the full allocation 169 | println!("alloc({})", 1000u32); 170 | let reg = sa.alloc(1000u32); 171 | assert!(reg.is_some()); 172 | } 173 | 174 | #[test] 175 | fn stress_aligned() { 176 | let mut sa = SafeTlsf::new(4000u32); 177 | let mut allocated = Vec::new(); 178 | let mut r = Xorshift32(0x11451419u32); 179 | for _ in 0..1000 { 180 | let len = 1u32 + (r.next() & 127u32); 181 | println!("alloc_aligned({}, {})", len, 64); 182 | let reg = sa.alloc_aligned(len, 64); 183 | if let Some((reg, pos)) = reg { 184 | println!(" success: {:?}", (®, pos)); 185 | allocated.push(reg); 186 | } else { 187 | assert!(allocated.len() > 0); 188 | let a_index = r.next_range(0..(allocated.len() as u32)); 189 | let old_reg = allocated.swap_remove(a_index as usize); 190 | println!(" failed, deallocating {:?}", old_reg); 191 | sa.dealloc(old_reg).unwrap(); 192 | } 193 | if allocated.len() > 0 { 194 | unsafe { 195 | sa.test_integrity(&allocated[0]); 196 | } 197 | } 198 | } 199 | for reg in allocated { 200 | println!("dealloc({:?})", reg); 201 | sa.dealloc(reg).unwrap(); 202 | } 203 | 204 | // Try the full allocation 205 | println!("alloc({})", 1000u32); 206 | let reg = sa.alloc(1000u32); 207 | assert!(reg.is_some()); 208 | } 209 | -------------------------------------------------------------------------------- /tests/ring.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2018 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | extern crate xalloc; 10 | 11 | use xalloc::Ring; 12 | 13 | #[test] 14 | fn create() { 15 | let ring = Ring::new(114514u32); 16 | assert!(ring.is_empty()); 17 | } 18 | 19 | #[test] 20 | fn create_zero_sized() { 21 | let ring = Ring::new(0u32); 22 | assert!(ring.is_empty()); 23 | assert!(ring.is_full()); 24 | } 25 | 26 | #[test] 27 | fn reset1() { 28 | let mut ring = Ring::new(8u32); 29 | let (region1, offset1) = ring.alloc_back(4).unwrap(); 30 | let (region2, offset2) = ring.alloc_back(4).unwrap(); 31 | assert_eq!(offset1, 0); 32 | assert_eq!(offset2, 4); 33 | assert!(ring.is_full()); 34 | ring.dealloc_front(region1); 35 | ring.dealloc_back(region2); 36 | 37 | // The `Ring` is now empty 38 | assert!(ring.is_empty()); 39 | 40 | let (_, offset) = ring.alloc_back(8).unwrap(); 41 | assert_eq!(offset, 0); 42 | } 43 | 44 | #[test] 45 | fn reset2() { 46 | let mut ring = Ring::new(8u32); 47 | let (region1, offset1) = ring.alloc_back(4).unwrap(); 48 | let (region2, offset2) = ring.alloc_back(4).unwrap(); 49 | assert_eq!(offset1, 0); 50 | assert_eq!(offset2, 4); 51 | assert!(ring.is_full()); 52 | ring.dealloc_back(region2); 53 | ring.dealloc_front(region1); 54 | 55 | // The `Ring` is now empty 56 | assert!(ring.is_empty()); 57 | 58 | let (_, offset) = ring.alloc_back(8).unwrap(); 59 | assert_eq!(offset, 0); 60 | } 61 | 62 | #[test] 63 | fn reset3() { 64 | let mut ring = Ring::new(8u32); 65 | let (region1, offset1) = ring.alloc_back(4).unwrap(); 66 | let (_, offset2) = ring.alloc_back(4).unwrap(); 67 | assert_eq!(offset1, 0); 68 | assert_eq!(offset2, 4); 69 | assert!(ring.is_full()); 70 | ring.dealloc_back_until(region1); 71 | ring.dealloc_back(region1); 72 | 73 | // The `Ring` is now empty 74 | assert!(ring.is_empty()); 75 | } 76 | 77 | #[test] 78 | fn reset4() { 79 | let mut ring = Ring::new(8u32); 80 | let (_, offset1) = ring.alloc_back(4).unwrap(); 81 | let (region2, offset2) = ring.alloc_back(4).unwrap(); 82 | assert_eq!(offset1, 0); 83 | assert_eq!(offset2, 4); 84 | assert!(ring.is_full()); 85 | ring.dealloc_front_until(region2); 86 | ring.dealloc_front(region2); 87 | 88 | // The `Ring` is now empty 89 | assert!(ring.is_empty()); 90 | } 91 | 92 | #[test] 93 | #[should_panic] 94 | fn alloc_back_zero_size() { 95 | let mut ring = Ring::new(8u32); 96 | ring.alloc_back_aligned(0, 1); 97 | } 98 | 99 | #[test] 100 | #[should_panic] 101 | fn alloc_back_bad_align() { 102 | let mut ring = Ring::new(8u32); 103 | ring.alloc_back_aligned(4, 3); 104 | } 105 | 106 | #[test] 107 | #[should_panic] 108 | fn alloc_front_zero_size() { 109 | let mut ring = Ring::new(8u32); 110 | ring.alloc_front_aligned(0, 1); 111 | } 112 | 113 | #[test] 114 | #[should_panic] 115 | fn alloc_front_bad_align() { 116 | let mut ring = Ring::new(8u32); 117 | ring.alloc_front_aligned(4, 3); 118 | } 119 | 120 | #[test] 121 | fn alloc_back_too_large() { 122 | let mut ring = Ring::new(8u32); 123 | assert_eq!(ring.alloc_back_aligned(16, 1), None); 124 | } 125 | 126 | #[test] 127 | fn alloc_front_too_large() { 128 | let mut ring = Ring::new(8u32); 129 | assert_eq!(ring.alloc_front_aligned(16, 1), None); 130 | } 131 | 132 | #[test] 133 | fn alloc_back_too_large2() { 134 | let mut ring = Ring::new(8u32); 135 | ring.alloc_back(1).unwrap(); 136 | assert_eq!(ring.alloc_back_aligned(8, 1), None); 137 | } 138 | 139 | #[test] 140 | fn alloc_front_too_large2() { 141 | let mut ring = Ring::new(8u32); 142 | ring.alloc_back(1).unwrap(); 143 | assert_eq!(ring.alloc_front_aligned(8, 1), None); 144 | } 145 | 146 | #[test] 147 | fn alloc_back_aligned() { 148 | let mut ring = Ring::new(8u32); 149 | let (_, offset1) = ring.alloc_back_aligned(2, 64).unwrap(); 150 | let (_, offset2) = ring.alloc_back_aligned(2, 4).unwrap(); 151 | 152 | // 0 1 2 3 4 5 6 7 8 153 | // [1 ] [2 ] 154 | assert_eq!(offset1, 0); 155 | assert_eq!(offset2, 4); 156 | } 157 | 158 | #[test] 159 | fn alloc_back_wrap() { 160 | let mut ring = Ring::new(8u32); 161 | let (region, offset1) = ring.alloc_back(4).unwrap(); 162 | let (_, offset2) = ring.alloc_back(2).unwrap(); 163 | assert_eq!(offset1, 0); 164 | assert_eq!(offset2, 4); 165 | ring.dealloc_front(region); 166 | 167 | // 0 1 2 3 4 5 6 7 8 168 | // [new ][2 ] 169 | assert_eq!(ring.alloc_back(6), None); 170 | assert_eq!(ring.alloc_back(5), None); 171 | let (_, offset) = ring.alloc_back(4).unwrap(); 172 | assert_eq!(offset, 0); 173 | } 174 | 175 | #[test] 176 | fn alloc_back_aligned_wrap() { 177 | let mut ring = Ring::new(8u32); 178 | let (region, offset1) = ring.alloc_back(4).unwrap(); 179 | let (_, offset2) = ring.alloc_back(2).unwrap(); 180 | assert_eq!(offset1, 0); 181 | assert_eq!(offset2, 4); 182 | ring.dealloc_front(region); 183 | 184 | // 0 1 2 3 4 5 6 7 8 185 | // [new ] [2 ] 186 | let (_, offset) = ring.alloc_back_aligned(2, 4).unwrap(); 187 | assert_eq!(offset, 0); 188 | } 189 | 190 | #[test] 191 | fn alloc_back_wrap_fail() { 192 | let mut ring = Ring::new(8u32); 193 | let (region, offset1) = ring.alloc_back(6).unwrap(); 194 | let (_, offset2) = ring.alloc_back(2).unwrap(); 195 | assert_eq!(offset1, 0); 196 | assert_eq!(offset2, 6); 197 | ring.dealloc_front(region); 198 | 199 | let (_, offset3) = ring.alloc_back(2).unwrap(); 200 | assert_eq!(offset3, 0); 201 | 202 | // 0 1 2 3 4 5 6 7 8 203 | // [3 ][4 ] [2 ] 204 | let (_, offset4) = ring.alloc_back(2).unwrap(); 205 | assert_eq!(offset4, 2); 206 | 207 | assert_eq!(ring.alloc_back(8), None); 208 | assert_eq!(ring.alloc_back(6), None); 209 | assert_eq!(ring.alloc_back(4), None); 210 | assert_eq!(ring.alloc_back(2).unwrap().1, 4); 211 | } 212 | 213 | #[test] 214 | fn alloc_front_aligned() { 215 | let mut ring = Ring::new(8u32); 216 | let (_, offset1) = ring.alloc_front_aligned(2, 64).unwrap(); 217 | let (_, offset2) = ring.alloc_front_aligned(3, 4).unwrap(); 218 | 219 | // 0 1 2 3 4 5 6 7 8 220 | // [1 ] [2 ] 221 | assert_eq!(offset1, 0); 222 | assert_eq!(offset2, 4); 223 | } 224 | 225 | #[test] 226 | fn alloc_front_wrap() { 227 | let mut ring = Ring::new(8u32); 228 | let (region, offset1) = ring.alloc_back(2).unwrap(); 229 | let (_, offset2) = ring.alloc_back(2).unwrap(); 230 | assert_eq!(offset1, 0); 231 | assert_eq!(offset2, 2); 232 | ring.dealloc_front(region); 233 | 234 | // 0 1 2 3 4 5 6 7 8 235 | // [2 ][new ] 236 | assert_eq!(ring.alloc_front(5), None); 237 | let (_, offset) = ring.alloc_front(4).unwrap(); 238 | assert_eq!(offset, 4); 239 | } 240 | 241 | #[test] 242 | fn alloc_front_aligned_wrap() { 243 | let mut ring = Ring::new(8u32); 244 | let (region, offset1) = ring.alloc_back(1).unwrap(); 245 | let (_, offset2) = ring.alloc_back(4).unwrap(); 246 | assert_eq!(offset1, 0); 247 | assert_eq!(offset2, 1); 248 | ring.dealloc_front(region); 249 | 250 | // 0 1 2 3 4 5 6 7 8 251 | // [2 ] [new ] 252 | assert_eq!(ring.alloc_front_aligned(2, 4), None); 253 | let (_, offset) = ring.alloc_front_aligned(2, 2).unwrap(); 254 | assert_eq!(offset, 6); 255 | } 256 | 257 | #[test] 258 | fn alloc_front_aligned2() { 259 | let mut ring = Ring::new(8u32); 260 | let (region, offset1) = ring.alloc_back(2).unwrap(); 261 | let (_, offset2) = ring.alloc_back(4).unwrap(); 262 | assert_eq!(offset1, 0); 263 | assert_eq!(offset2, 2); 264 | ring.dealloc_front(region); 265 | 266 | // 0 1 2 3 4 5 6 7 8 267 | // [new ][2 ] 268 | let (_, offset) = ring.alloc_front_aligned(2, 8).unwrap(); 269 | assert_eq!(offset, 0); 270 | } 271 | 272 | #[test] 273 | #[should_panic] 274 | fn dealloc_front_bad() { 275 | let mut ring = Ring::new(8u32); 276 | let (_, _) = ring.alloc_back(3).unwrap(); 277 | let (region2, _) = ring.alloc_back(3).unwrap(); 278 | ring.dealloc_front(region2); 279 | } 280 | 281 | #[test] 282 | #[should_panic] 283 | fn dealloc_back_bad() { 284 | let mut ring = Ring::new(8u32); 285 | let (region1, _) = ring.alloc_back(3).unwrap(); 286 | let (_, _) = ring.alloc_back(3).unwrap(); 287 | ring.dealloc_back(region1); 288 | } 289 | 290 | #[test] 291 | #[should_panic] 292 | fn dealloc_front_bad2() { 293 | let mut ring = Ring::new(8u32); 294 | let (region, _) = ring.alloc_back(3).unwrap(); 295 | ring.dealloc_front(region); 296 | ring.dealloc_front(region); 297 | } 298 | 299 | #[test] 300 | #[should_panic] 301 | fn dealloc_back_bad2() { 302 | let mut ring = Ring::new(8u32); 303 | let (region, _) = ring.alloc_back(3).unwrap(); 304 | ring.dealloc_back(region); 305 | ring.dealloc_back(region); 306 | } 307 | -------------------------------------------------------------------------------- /src/int.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! Traits for integral types. 10 | use core::mem::size_of; 11 | use core::{fmt, ops}; 12 | 13 | use num::Integer; 14 | 15 | /// Integral types with efficient binary operations. 16 | pub trait BinaryInteger: 17 | Integer 18 | + Clone 19 | + Sized 20 | + ops::AddAssign 21 | + ops::SubAssign 22 | + ops::MulAssign 23 | + ops::DivAssign 24 | + ops::BitOrAssign 25 | + ops::BitAndAssign 26 | + ops::BitXorAssign 27 | + ops::BitOr 28 | + ops::BitAnd 29 | + ops::BitXor 30 | + ops::Shl 31 | + ops::Shr 32 | + ops::Not 33 | + RefSaturatingAdd 34 | + fmt::Debug 35 | { 36 | type OneDigits: Iterator; 37 | 38 | fn max_value() -> Self; 39 | fn min_value() -> Self; 40 | 41 | fn max_digits() -> u32; 42 | 43 | fn ones(range: ops::Range) -> Self; 44 | 45 | fn ones_truncated(range: ops::Range) -> Self; 46 | 47 | /// Return the number of trailing zeros in its binary representation. 48 | fn trailing_zeros(&self) -> u32; 49 | 50 | /// Return the number of leading zeros in its binary representation. 51 | fn leading_zeros(&self) -> u32; 52 | 53 | /// Return the number of ones in its binary representation. 54 | fn count_ones(&self) -> u32; 55 | 56 | /// Return the position of the least significant set bit since the position 57 | /// `start`. 58 | /// 59 | /// Retruns `Self::max_digits()` if none was found. 60 | fn bit_scan_forward(&self, start: u32) -> u32; 61 | 62 | /// Slice a part of its binary representation as `u32`. 63 | fn extract_u32(&self, range: ops::Range) -> u32; 64 | 65 | /// Retrieve whether the specified bit is set or not. 66 | fn get_bit(&self, i: u32) -> bool; 67 | 68 | /// Set a single bit. 69 | fn set_bit(&mut self, i: u32); 70 | 71 | /// Clear a single bit. 72 | fn clear_bit(&mut self, i: u32); 73 | 74 | /// Perform `ceil` treating the value as a fixed point number with `fp` 75 | /// fractional part digits. 76 | fn checked_ceil_fix(self, fp: u32) -> Option; 77 | 78 | /// Get an iterator over set bits, from the least significant bit to 79 | /// the most significant one. 80 | fn one_digits(&self) -> Self::OneDigits; 81 | } 82 | 83 | /// Types that supports saturating addition. 84 | pub trait RefSaturatingAdd { 85 | type Output; 86 | fn ref_saturating_add(&self, rhs: RHS) -> Self::Output; 87 | } 88 | 89 | /// Unsigned integral types with efficient binary operations. 90 | pub trait BinaryUInteger: BinaryInteger { 91 | /// Return `ture` if and only if `self == 2^k` for some `k`. 92 | fn is_power_of_two(&self) -> bool; 93 | } 94 | 95 | #[doc(hidden)] 96 | pub struct OneDigits(T); 97 | 98 | macro_rules! impl_binary_integer { 99 | ($type:ty) => { 100 | impl BinaryInteger for $type { 101 | type OneDigits = OneDigits; 102 | 103 | #[inline] 104 | fn max_value() -> Self { 105 | <$type>::max_value() 106 | } 107 | #[inline] 108 | fn min_value() -> Self { 109 | <$type>::min_value() 110 | } 111 | 112 | #[inline] 113 | fn max_digits() -> u32 { 114 | (size_of::<$type>() * 8) as u32 115 | } 116 | #[inline] 117 | fn ones(range: ops::Range) -> Self { 118 | assert!(range.end <= Self::max_digits()); 119 | Self::ones_truncated(range) 120 | } 121 | #[inline] 122 | fn ones_truncated(range: ops::Range) -> Self { 123 | assert!(range.start <= range.end); 124 | if range.end >= Self::max_digits() { 125 | (0 as Self).wrapping_sub(1 << range.start) 126 | } else { 127 | ((1 as Self) << range.end).wrapping_sub(1 << range.start) 128 | } 129 | } 130 | #[inline] 131 | fn trailing_zeros(&self) -> u32 { 132 | (*self).trailing_zeros() 133 | } 134 | #[inline] 135 | fn leading_zeros(&self) -> u32 { 136 | (*self).leading_zeros() 137 | } 138 | #[inline] 139 | fn count_ones(&self) -> u32 { 140 | (*self).count_ones() 141 | } 142 | #[inline] 143 | fn bit_scan_forward(&self, start: u32) -> u32 { 144 | if start >= Self::max_digits() { 145 | Self::max_digits() 146 | } else { 147 | (*self & !Self::ones(0..start)).trailing_zeros() 148 | } 149 | } 150 | #[inline] 151 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] 152 | fn extract_u32(&self, range: ops::Range) -> u32 { 153 | let start = range.start; 154 | ((self & Self::ones_truncated(range)) >> start) as u32 155 | } 156 | #[inline] 157 | fn get_bit(&self, i: u32) -> bool { 158 | if i < Self::max_digits() { 159 | self & ((1 as Self) << i) != 0 160 | } else { 161 | false 162 | } 163 | } 164 | #[inline] 165 | fn set_bit(&mut self, i: u32) { 166 | if i < Self::max_digits() { 167 | *self |= (1 as Self) << i; 168 | } 169 | } 170 | #[inline] 171 | fn clear_bit(&mut self, i: u32) { 172 | if i < Self::max_digits() { 173 | *self &= !((1 as Self) << i); 174 | } 175 | } 176 | #[inline] 177 | fn checked_ceil_fix(self, fp: u32) -> Option { 178 | if fp >= Self::max_digits() { 179 | if self == 0 { 180 | Some(0) 181 | } else { 182 | None 183 | } 184 | } else { 185 | let mask = Self::ones(0..fp); 186 | self.checked_add(mask).map(|x| x & !mask) 187 | } 188 | } 189 | #[inline] 190 | fn one_digits(&self) -> Self::OneDigits { 191 | OneDigits(*self) 192 | } 193 | } 194 | impl RefSaturatingAdd for $type { 195 | type Output = Self; 196 | fn ref_saturating_add(&self, rhs: Self) -> Self::Output { 197 | (*self).saturating_add(rhs) 198 | } 199 | } 200 | impl Iterator for OneDigits<$type> { 201 | type Item = u32; 202 | fn next(&mut self) -> Option { 203 | if self.0 == 0 { 204 | None 205 | } else { 206 | let index = self.0.trailing_zeros(); 207 | self.0 &= !((1 as $type) << index); 208 | Some(index) 209 | } 210 | } 211 | fn size_hint(&self) -> (usize, Option) { 212 | let ones = self.len(); 213 | (ones, Some(ones)) 214 | } 215 | fn count(self) -> usize { 216 | self.len() 217 | } 218 | } 219 | impl ExactSizeIterator for OneDigits<$type> { 220 | fn len(&self) -> usize { 221 | self.0.count_ones() as usize 222 | } 223 | } 224 | impl DoubleEndedIterator for OneDigits<$type> { 225 | fn next_back(&mut self) -> Option { 226 | if self.0 == 0 { 227 | None 228 | } else { 229 | let index = (size_of::<$type>() * 8) as u32 - 1 - self.0.leading_zeros(); 230 | self.0 &= !((1 as $type) << index); 231 | Some(index) 232 | } 233 | } 234 | } 235 | }; 236 | } 237 | 238 | macro_rules! impl_binary_uinteger { 239 | ($type:ty) => { 240 | impl BinaryUInteger for $type { 241 | #[inline] 242 | fn is_power_of_two(&self) -> bool { 243 | Self::is_power_of_two(*self) 244 | } 245 | } 246 | }; 247 | } 248 | 249 | impl_binary_integer!(i8); 250 | impl_binary_integer!(i16); 251 | impl_binary_integer!(i32); 252 | impl_binary_integer!(i64); 253 | impl_binary_integer!(isize); 254 | 255 | impl_binary_integer!(u8); 256 | impl_binary_integer!(u16); 257 | impl_binary_integer!(u32); 258 | impl_binary_integer!(u64); 259 | impl_binary_integer!(usize); 260 | 261 | impl_binary_uinteger!(u8); 262 | impl_binary_uinteger!(u16); 263 | impl_binary_uinteger!(u32); 264 | impl_binary_uinteger!(u64); 265 | impl_binary_uinteger!(usize); 266 | 267 | use num::One; 268 | 269 | pub(crate) fn round_up(x: &T, align: &T) -> T { 270 | (x.clone() + align.clone() - One::one()) & !(align.clone() - One::one()) 271 | } 272 | 273 | pub(crate) fn round_down(x: &T, align: &T) -> T { 274 | x.clone() & !(align.clone() - One::one()) 275 | } 276 | -------------------------------------------------------------------------------- /src/ring.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2018 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! A dynamic external memory allocator implementing the functionality of a 10 | //! [circular buffer]. 11 | //! 12 | //! [circular buffer]: https://en.wikipedia.org/wiki/Circular_buffer 13 | //! 14 | //! The calling program is responsible for tracking which allocated part is 15 | //! currently the frontmost/backmost region of a [`Ring`]. When deallocating 16 | //! a region, it must appropriately call `dealloc_front` or `dealloc_back` 17 | //! depending on the position of the region within a `Ring`. 18 | //! 19 | //! # Examples 20 | //! 21 | //! ``` 22 | //! use xalloc::{Ring, RingRegion}; 23 | //! let mut ring: Ring = Ring::new(10); 24 | //! 25 | //! // Allocate regions 26 | //! // [ ] 27 | //! let alloc1: (RingRegion, u32) = ring.alloc_back(4).unwrap(); 28 | //! // [[ 1 ] ] 29 | //! let alloc2: (RingRegion, u32) = ring.alloc_back(4).unwrap(); 30 | //! // [[ 1 ][ 2 ] ] 31 | //! let (region1, offset1) = alloc1; 32 | //! let (region2, offset2) = alloc2; 33 | //! println!("allocated #1: {:?}", (®ion1, offset1)); 34 | //! println!("allocated #2: {:?}", (®ion2, offset2)); 35 | //! 36 | //! // Deallocate regions 37 | //! // [[ 1 ][ 2 ] ] 38 | //! ring.dealloc_front(region1); 39 | //! // [ [ 2 ] ] 40 | //! ring.dealloc_front(region2); 41 | //! // [ ] 42 | //! ``` 43 | use num::{One, Zero}; 44 | 45 | use int::{round_down, round_up, BinaryUInteger}; 46 | 47 | /// A dynamic external memory allocator providing the functionality of a 48 | /// [circular buffer]. 49 | /// 50 | /// [circular buffer]: https://en.wikipedia.org/wiki/Circular_buffer 51 | /// 52 | /// See [the module-level documentation] for more. 53 | /// 54 | /// [the module-level documentation]: index.html 55 | /// 56 | /// ## Type parameters 57 | /// 58 | /// - `T` is an integer type used to represent region sizes. You usually use 59 | /// `u32` or `u64` for this. 60 | /// 61 | #[derive(Debug)] 62 | pub struct Ring { 63 | size: T, 64 | 65 | /// The starting location of the allocated region. Must be less than `size`. 66 | start: T, 67 | 68 | /// The ending location of the allocated region. Must be less than `size`. 69 | end: T, 70 | 71 | /// Indicates whether this `Ring` is empty or not. 72 | empty: bool, 73 | } 74 | 75 | /// A handle type to a region allocated in a [`Ring`]. 76 | /// 77 | /// `RingRegion` returned by a `Ring` only can be used with the 78 | /// same `Ring`. 79 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 80 | pub struct RingRegion { 81 | start: T, 82 | end: T, 83 | } 84 | 85 | impl Ring { 86 | /// Construct a `RingRegion`. 87 | /// 88 | /// `size` must be smaller than `T::max_value() >> 1` (this is a precaution 89 | /// taken not to cause unintentional overflows). 90 | pub fn new(size: T) -> Self { 91 | assert!(size < T::max_value() >> 1); 92 | 93 | Self { 94 | size: size.clone(), 95 | start: Zero::zero(), 96 | end: Zero::zero(), 97 | empty: true, 98 | } 99 | } 100 | 101 | /// Return `true` if `Ring` has no allocated regions. 102 | pub fn is_empty(&self) -> bool { 103 | self.empty 104 | } 105 | 106 | /// Return `true` if `Ring` has no free space. 107 | pub fn is_full(&self) -> bool { 108 | self.start == self.end 109 | } 110 | 111 | /// Allocate a region of the size `size` to the back of the allocated 112 | /// region. 113 | /// 114 | /// Returns a handle of the allocated region and its offset if the 115 | /// allocation succeeds. Returns `None` otherwise. 116 | /// 117 | /// `size` must not be zero. 118 | pub fn alloc_back(&mut self, size: T) -> Option<(RingRegion, T)> { 119 | self.alloc_back_aligned(size, One::one()) 120 | } 121 | 122 | /// Allocate a region of the size `size` to the front of the allocated 123 | /// region. 124 | /// 125 | /// Returns a handle of the allocated region and its offset if the 126 | /// allocation succeeds. Returns `None` otherwise. 127 | /// 128 | /// `size` must not be zero. 129 | pub fn alloc_front(&mut self, size: T) -> Option<(RingRegion, T)> { 130 | self.alloc_front_aligned(size, One::one()) 131 | } 132 | 133 | /// Allocate a region of the size `size` with a given alignment requirement 134 | /// to the back of the allocated region. 135 | /// 136 | /// Returns a handle of the allocated region and its offset if the 137 | /// allocation succeeds. Returns `None` otherwise. 138 | /// 139 | /// - `align` must be a power of two. 140 | /// - `size` must not be zero. 141 | pub fn alloc_back_aligned(&mut self, size: T, align: T) -> Option<(RingRegion, T)> { 142 | assert_ne!(size, Zero::zero()); 143 | assert!(align.is_power_of_two()); 144 | 145 | if self.empty { 146 | self.alloc_empty(size) 147 | } else if size >= self.size { 148 | None 149 | } else { 150 | let mut new_wrapped = self.end <= self.start; 151 | let mut new_end = round_up(&self.end, &align); 152 | if new_end.clone() + size.clone() > self.size && !new_wrapped { 153 | new_end = Zero::zero(); 154 | new_wrapped = true; 155 | } 156 | if new_wrapped && new_end.clone() + size.clone() > self.start { 157 | return None; 158 | } 159 | 160 | let offset = new_end.clone(); 161 | new_end += size; 162 | if new_end == self.size { 163 | new_end = Zero::zero(); 164 | } 165 | let region = RingRegion { 166 | start: self.end.clone(), 167 | end: new_end.clone(), 168 | }; 169 | self.end = new_end; 170 | Some((region, offset)) 171 | } 172 | } 173 | 174 | /// Allocate a region of the size `size` with a given alignment requirement 175 | /// to the front of the allocated region. 176 | /// 177 | /// Returns a handle of the allocated region and its offset if the 178 | /// allocation succeeds. Returns `None` otherwise. 179 | /// 180 | /// - `align` must be a power of two. 181 | /// - `size` must not be zero. 182 | pub fn alloc_front_aligned(&mut self, size: T, align: T) -> Option<(RingRegion, T)> { 183 | assert_ne!(size, Zero::zero()); 184 | assert!(align.is_power_of_two()); 185 | 186 | if self.empty { 187 | self.alloc_empty(size) 188 | } else if size >= self.size { 189 | None 190 | } else { 191 | // 0 1align 2align 192 | // | | size | 193 | // |===================|=====| | 194 | // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ enlarged_size 195 | // ^^^^^^^^^^^^^^^ pad 196 | let enlarged_size = round_up(&size, &align); 197 | let pad = enlarged_size.clone() - size.clone(); 198 | 199 | let mut new_wrapped = self.end <= self.start; 200 | let mut new_start = round_down(&(self.start.clone() + pad.clone()), &align); 201 | if new_start < enlarged_size.clone() && !new_wrapped { 202 | new_start = round_down(&(self.size.clone() + pad.clone()), &align); 203 | new_wrapped = true; 204 | } 205 | if new_wrapped && self.end.clone() + enlarged_size.clone() > new_start { 206 | return None; 207 | } 208 | 209 | new_start -= enlarged_size; 210 | let offset = new_start.clone(); 211 | let region = RingRegion { 212 | start: new_start.clone(), 213 | end: self.start.clone(), 214 | }; 215 | self.start = new_start; 216 | Some((region, offset)) 217 | } 218 | } 219 | 220 | fn alloc_empty(&mut self, size: T) -> Option<(RingRegion, T)> { 221 | debug_assert!(self.empty); 222 | 223 | if size <= self.size { 224 | self.start = Zero::zero(); 225 | self.end = if size == self.size { 226 | Zero::zero() 227 | } else { 228 | size.clone() 229 | }; 230 | self.empty = false; 231 | Some(( 232 | RingRegion { 233 | start: Zero::zero(), 234 | end: size, 235 | }, 236 | Zero::zero(), 237 | )) 238 | } else { 239 | None 240 | } 241 | } 242 | 243 | /// Deallocate frontmost (first) regions until `r` becomes the new frontmost 244 | /// region. `r` is not removed. 245 | /// 246 | /// `r` must be in `Ring`. 247 | /// Otherwise, `Ring` might enter an inconsistent state and/or panic, but 248 | /// does not cause an undefined behavior. 249 | pub fn dealloc_front_until(&mut self, r: RingRegion) { 250 | assert!(!self.empty, "empty"); 251 | self.start = r.start; 252 | } 253 | 254 | /// Deallocate backmost (last) regions until `r` becomes the new backmost 255 | /// region. `r` is not removed. 256 | /// 257 | /// `r` must be in `Ring`. 258 | /// Otherwise, `Ring` might enter an inconsistent state and/or panic, but 259 | /// does not cause an undefined behavior. 260 | pub fn dealloc_back_until(&mut self, r: RingRegion) { 261 | assert!(!self.empty, "empty"); 262 | self.end = r.end; 263 | } 264 | 265 | /// Deallocate the frontmost (first) region. 266 | /// 267 | /// `r` must be the current frontmost region of `Ring`. 268 | /// Otherwise, `Ring` might enter an inconsistent state and/or panic, but 269 | /// does not cause an undefined behavior. 270 | pub fn dealloc_front(&mut self, r: RingRegion) { 271 | assert!(!self.empty, "empty"); 272 | assert!(self.start == r.start, "not front"); 273 | self.start = r.end; 274 | if self.start == self.end { 275 | self.empty = true; 276 | } 277 | } 278 | 279 | /// Deallocate the backmost (last) region. 280 | /// 281 | /// `r` must be the current backmost region of `Ring`. 282 | /// Otherwise, `Ring` might enter an inconsistent state and/or panic, but 283 | /// does not cause an undefined behavior. 284 | pub fn dealloc_back(&mut self, r: RingRegion) { 285 | assert!(!self.empty, "empty"); 286 | assert!(self.end == r.end, "not back"); 287 | self.end = r.start; 288 | if self.start == self.end { 289 | self.empty = true; 290 | } 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /src/bitmaputils.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | use core::ops::Range; 10 | use int::BinaryUInteger; 11 | 12 | pub fn set_bits_ranged(map: &mut [T], range: Range) { 13 | let width = T::max_digits() as usize; 14 | 15 | if range.start >= range.end { 16 | return; 17 | } 18 | let mut start_i = range.start / width; 19 | let start_f = range.start % width; 20 | let end_i = range.end / width; 21 | let end_f = range.end % width; 22 | 23 | assert!(start_i < map.len()); 24 | assert!(end_i <= map.len()); 25 | 26 | if start_i == end_i { 27 | map[start_i] |= T::ones(start_f as u32..end_f as u32); 28 | } else { 29 | if start_f != 0 { 30 | map[start_i] |= T::ones(start_f as u32..width as u32); 31 | start_i += 1; 32 | } 33 | for e in &mut map[start_i..end_i] { 34 | *e = !T::zero(); 35 | } 36 | if end_f != 0 { 37 | map[end_i] |= T::ones(0..end_f as u32); 38 | } 39 | } 40 | } 41 | 42 | pub fn clear_bits_ranged(map: &mut [T], range: Range) { 43 | let width = T::max_digits() as usize; 44 | 45 | if range.start >= range.end { 46 | return; 47 | } 48 | let mut start_i = range.start / width; 49 | let start_f = range.start % width; 50 | let end_i = range.end / width; 51 | let end_f = range.end % width; 52 | 53 | assert!(start_i < map.len()); 54 | assert!(end_i <= map.len()); 55 | 56 | if start_i == end_i { 57 | map[start_i] &= !T::ones(start_f as u32..end_f as u32); 58 | } else { 59 | if start_f != 0 { 60 | map[start_i] &= !T::ones(start_f as u32..width as u32); 61 | start_i += 1; 62 | } 63 | for e in &mut map[start_i..end_i] { 64 | *e = T::zero(); 65 | } 66 | if end_f != 0 { 67 | map[end_i] &= !T::ones(0..end_f as u32); 68 | } 69 | } 70 | } 71 | 72 | pub fn find_zeros( 73 | map: &[T], 74 | start: usize, 75 | count: usize, 76 | ) -> Option { 77 | let width = T::max_digits() as usize; 78 | 79 | if start >= map.len() * width { 80 | None 81 | } else if count >= width { 82 | find_zeros_large(map, start, count) 83 | } else if count == 0 { 84 | Some(start) 85 | } else if count == 1 { 86 | find_zeros_small(map, start, count, |i| i) 87 | } else if count == 2 { 88 | find_zeros_small(map, start, count, |i| i | i >> 1) 89 | } else if count <= 4 { 90 | let last = (count - 2) as u32; 91 | find_zeros_small(map, start, count, move |mut i| { 92 | i |= i >> 1; 93 | i |= i >> last; 94 | i 95 | }) 96 | } else if count <= 8 { 97 | let last = (count - 4) as u32; 98 | find_zeros_small(map, start, count, move |mut i| { 99 | i |= i >> 1; 100 | i |= i >> 2; 101 | i |= i >> last; 102 | i 103 | }) 104 | } else if count <= 16 { 105 | let last = (count - 8) as u32; 106 | find_zeros_small(map, start, count, move |mut i| { 107 | i |= i >> 1; 108 | i |= i >> 2; 109 | i |= i >> 4; 110 | i |= i >> last; 111 | i 112 | }) 113 | } else if count <= 32 { 114 | let last = (count - 16) as u32; 115 | find_zeros_small(map, start, count, move |mut i| { 116 | i |= i >> 1; 117 | i |= i >> 2; 118 | i |= i >> 4; 119 | i |= i >> 8; 120 | i |= i >> last; 121 | i 122 | }) 123 | } else if count <= 64 { 124 | let last = (count - 32) as u32; 125 | find_zeros_small(map, start, count, move |mut i| { 126 | i |= i >> 1; 127 | i |= i >> 2; 128 | i |= i >> 4; 129 | i |= i >> 8; 130 | i |= i >> 16; 131 | i |= i >> last; 132 | i 133 | }) 134 | } else if count <= 128 { 135 | let last = (count - 64) as u32; 136 | find_zeros_small(map, start, count, move |mut i| { 137 | i |= i >> 1; 138 | i |= i >> 2; 139 | i |= i >> 4; 140 | i |= i >> 8; 141 | i |= i >> 16; 142 | i |= i >> 32; 143 | i |= i >> last; 144 | i 145 | }) 146 | } else { 147 | panic!("unsupported: T is too large (> 128 bits)"); 148 | } 149 | } 150 | 151 | fn find_zeros_large( 152 | map: &[T], 153 | start: usize, 154 | count: usize, 155 | ) -> Option { 156 | let width = T::max_digits() as usize; 157 | debug_assert!(count >= width); 158 | 159 | let i_f = start % width; 160 | let mut i_i = start / width; 161 | let mut run = 0; 162 | debug_assert!(i_i < map.len()); 163 | 164 | if i_f != 0 { 165 | run = (map[i_i] | T::ones(0..i_f as u32)).leading_zeros() as usize; 166 | i_i += 1; 167 | } 168 | 169 | while i_i < map.len() { 170 | if map[i_i].is_zero() { 171 | run += width; 172 | if run >= count { 173 | return Some(i_i * width + width - run); 174 | } 175 | } else { 176 | let tz = map[i_i].trailing_zeros() as usize; 177 | run += tz; 178 | if run >= count { 179 | return Some(i_i * width + tz - run); 180 | } 181 | run = map[i_i].leading_zeros() as usize; 182 | } 183 | i_i += 1; 184 | } 185 | 186 | None 187 | } 188 | 189 | /// `find_zeros` for the cases where `count < T::max_digits()`. 190 | /// 191 | /// `F` must return `(0..count).fold(0, |a, i| a | (x >> i))` for the input `x`. 192 | fn find_zeros_small( 193 | map: &[T], 194 | start: usize, 195 | count: usize, 196 | dilate: F, 197 | ) -> Option 198 | where 199 | F: Fn(T) -> T, 200 | { 201 | let width = T::max_digits() as usize; 202 | debug_assert!(count > 0); 203 | debug_assert!(count < width); 204 | 205 | let dilate_mask = T::ones(0..(width + 1 - count) as u32); 206 | 207 | let i_f = start % width; 208 | let mut i_i = start / width; 209 | let mut run = 0; 210 | debug_assert!(i_i < map.len()); 211 | 212 | if i_f != 0 { 213 | let m = dilate(map[i_i]); 214 | let mask = dilate_mask & T::ones(i_f as u32..T::max_digits()); 215 | if m & mask != mask { 216 | return Some((!m).bit_scan_forward(i_f as u32) as usize + i_i * width); 217 | } 218 | run = (map[i_i] | T::ones(0..i_f as u32)).leading_zeros() as usize; 219 | debug_assert!(run < count); 220 | i_i += 1; 221 | } 222 | 223 | while i_i < map.len() { 224 | let tz = map[i_i].trailing_zeros() as usize; 225 | run += tz; 226 | if run >= count { 227 | return Some(i_i * width + tz - run); 228 | } 229 | 230 | let m = dilate(map[i_i]); 231 | if m & dilate_mask != dilate_mask { 232 | return Some((!m).trailing_zeros() as usize + i_i * width); 233 | } 234 | 235 | run = map[i_i].leading_zeros() as usize; 236 | debug_assert!(run < count); 237 | 238 | i_i += 1; 239 | } 240 | 241 | None 242 | } 243 | 244 | #[cfg(test)] 245 | mod find_zeros_tests { 246 | use super::*; 247 | 248 | struct Xorshift32(u32); 249 | 250 | impl Xorshift32 { 251 | /// Returns a random integer in `[0, 0xfffffffe]` 252 | fn next(&mut self) -> u32 { 253 | self.0 ^= self.0 << 13; 254 | self.0 ^= self.0 >> 17; 255 | self.0 ^= self.0 << 5; 256 | !self.0 257 | } 258 | } 259 | 260 | fn patterns(mut f: F) { 261 | f(&[0; 16]); 262 | f(&[0xffffffff; 16]); 263 | f(&[0x01010101; 16]); 264 | f(&[0x80808080; 16]); 265 | f(&[0xdeadbeef; 16]); 266 | f(&[0x11451419; 16]); 267 | 268 | let mut buf = [0; 16]; 269 | let mut rng = Xorshift32(12345678); 270 | for _ in 0..32 { 271 | for x in buf.iter_mut() { 272 | *x = rng.next(); 273 | } 274 | f(&buf); 275 | } 276 | for _ in 0..32 { 277 | for x in buf.iter_mut() { 278 | *x = rng.next() & rng.next() & rng.next(); 279 | } 280 | f(&buf); 281 | } 282 | 283 | use int::BinaryInteger; 284 | for _ in 0..32 { 285 | for x in buf.iter_mut() { 286 | *x = 0; 287 | if rng.next() & 0x1 != 0 { 288 | x.set_bit(rng.next() & 31); 289 | } 290 | } 291 | f(&buf); 292 | } 293 | } 294 | 295 | fn find_zeros_naive( 296 | map: &[T], 297 | start: usize, 298 | count: usize, 299 | ) -> Option { 300 | let width = T::max_digits() as usize; 301 | let mut run = 0; 302 | if count == 0 && start < map.len() * width { 303 | return Some(start); 304 | } 305 | for i in start..map.len() * width { 306 | let i_f = i % width; 307 | let i_i = i / width; 308 | if map[i_i].get_bit(i_f as u32) { 309 | run = 0; 310 | } else { 311 | run += 1; 312 | if run >= count { 313 | return Some(i + 1 - count); 314 | } 315 | } 316 | } 317 | None 318 | } 319 | 320 | struct BitField<'a, T: 'a>(&'a [T]); 321 | 322 | use core::fmt; 323 | impl<'a, T: BinaryUInteger + Copy + 'a> fmt::Debug for BitField<'a, T> { 324 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 325 | write!(f, "BitField (")?; 326 | 327 | let width = T::max_digits(); 328 | for x in self.0.iter() { 329 | write!(f, " ")?; 330 | for i in 0..width { 331 | write!(f, "{}", if x.get_bit(i) { "1" } else { "0" })?; 332 | } 333 | } 334 | 335 | write!(f, " )") 336 | } 337 | } 338 | 339 | fn test_one(count: usize) { 340 | patterns(|p| { 341 | for &s in [0, 1, 2, 30, 31, 32].iter() { 342 | assert_eq!( 343 | find_zeros(p, s, count), 344 | find_zeros_naive(p, s, count), 345 | "{:?}", 346 | (BitField(p), s, count) 347 | ); 348 | } 349 | }); 350 | } 351 | 352 | #[test] 353 | fn test_0() { 354 | test_one(0) 355 | } 356 | 357 | #[test] 358 | fn test_1() { 359 | test_one(1) 360 | } 361 | 362 | #[test] 363 | fn test_2() { 364 | test_one(2) 365 | } 366 | 367 | #[test] 368 | fn test_3() { 369 | test_one(3) 370 | } 371 | 372 | #[test] 373 | fn test_4() { 374 | test_one(4) 375 | } 376 | 377 | #[test] 378 | fn test_6() { 379 | test_one(6) 380 | } 381 | 382 | #[test] 383 | fn test_8() { 384 | test_one(8) 385 | } 386 | 387 | #[test] 388 | fn test_12() { 389 | test_one(12) 390 | } 391 | 392 | #[test] 393 | fn test_16() { 394 | test_one(16) 395 | } 396 | 397 | #[test] 398 | fn test_24() { 399 | test_one(24) 400 | } 401 | 402 | #[test] 403 | fn test_32() { 404 | test_one(32) 405 | } 406 | 407 | #[test] 408 | fn test_48() { 409 | test_one(48) 410 | } 411 | 412 | #[test] 413 | fn test_64() { 414 | test_one(64) 415 | } 416 | } 417 | -------------------------------------------------------------------------------- /src/arena.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! Memory arena traits (used by [`Tlsf`]). 10 | //! 11 | //! [`Tlsf`]: crate::tlsf::Tlsf 12 | use alloc::boxed::Box; 13 | use core::fmt; 14 | 15 | /// Homogeneous memory arena types supporting operations that do not guarantee 16 | /// memory safety. 17 | /// 18 | /// Methods prefixed with `_unchecked` all assume given pointers are valid; 19 | /// specifically, they assumes that: 20 | /// 21 | /// 1. The pointers were constructed by the same instance of `UnsafeArena`. 22 | /// 2. The pointers have not been removed from the arena yet. 23 | /// 24 | pub trait UnsafeArena { 25 | /// Pointer type. 26 | /// 27 | /// - `Ptr::clone(p)` returns a pointer that points the same object. 28 | /// - `Ptr::default()` returns an uninitialized pointer with a well-defined 29 | /// value (e.g., null pointer). 30 | /// - `Ptr::eq(x, y)` checks the equality of two pointers. Both pointers 31 | /// must originate from the same arena. Otherwise, the returned value 32 | /// does not make sense. 33 | type Ptr: fmt::Debug + Clone + Default + PartialEq + Eq; 34 | 35 | /// Insert a value into the arena. 36 | /// 37 | /// Returns a pointer that points the inserted value. 38 | fn insert(&mut self, x: T) -> Self::Ptr; 39 | 40 | /// Reserves capacity for at least `additional` values to be inserted in the arena. 41 | fn reserve(&mut self, _additional: usize) {} 42 | 43 | /// Get a reference to a contained value, without a pointer validity check. 44 | unsafe fn get_unchecked(&self, ptr: &Self::Ptr) -> &T; 45 | 46 | /// Get a mutable reference to a contained value, without a pointer validity 47 | /// check. 48 | unsafe fn get_unchecked_mut(&mut self, ptr: &Self::Ptr) -> &mut T; 49 | 50 | /// Remove a value from the arena, without a pointer validity check. 51 | /// 52 | /// Returns the removed value. 53 | unsafe fn remove_unchecked(&mut self, ptr: &Self::Ptr) -> T; 54 | } 55 | 56 | /// Marker trait indicating all operations from `UnsafeArena` are actually 57 | /// implemented as memory-safe (not `unsafe`). 58 | /// 59 | /// An invalid operation on `SafeArena` must result in one of the following 60 | /// possible outcomes: 61 | /// 62 | /// 1. A panic. The internal state can be left in an inconsistent state, but 63 | /// even if so, further operations on the arena should not violate the 64 | /// memory safety, for example by employing the "poisoning" strategy. 65 | /// 2. It behaves as if arbitrary valid values were provided as the parameter. 66 | /// 67 | pub trait SafeArena: UnsafeArena {} 68 | 69 | /// Homogeneous memory arena types capable of checking whether a given pointer 70 | /// was created by the same instance of the arena. 71 | pub trait UnsafeArenaWithMembershipCheck: UnsafeArena { 72 | /// Return `true` if the pointer was created from the same instance of the 73 | /// arena. 74 | /// 75 | /// Calling this with an already-freed pointer or an uninitialized pointer 76 | /// might result in an undefined behavior. 77 | unsafe fn contains_unchecked(&self, ptr: &Self::Ptr) -> bool; 78 | } 79 | 80 | /// Memory-safe homogeneous memory arena types. 81 | pub trait Arena: UnsafeArena { 82 | /// Get a reference to a contained value. 83 | fn get(&self, ptr: &Self::Ptr) -> Option<&T>; 84 | 85 | /// Get a mutable reference to a contained value. 86 | fn get_mut(&mut self, ptr: &Self::Ptr) -> Option<&mut T>; 87 | 88 | /// Remove a value from the arena. 89 | /// 90 | /// Returns the removed value. 91 | fn remove(&mut self, ptr: &Self::Ptr) -> Option; 92 | } 93 | 94 | #[cfg(test)] 95 | fn test_common>(arena: &mut T) { 96 | let p1 = arena.insert("twi"); 97 | let p2 = arena.insert("aj"); 98 | 99 | unsafe { 100 | assert!(arena.contains_unchecked(&p1)); 101 | assert!(arena.contains_unchecked(&p2)); 102 | 103 | assert_eq!(arena.get_unchecked(&p1), &"twi"); 104 | assert_eq!(arena.get_unchecked_mut(&p2), &"aj"); 105 | 106 | *arena.get_unchecked_mut(&p2) = "flutter"; 107 | 108 | assert_eq!(&arena.remove_unchecked(&p1), &"twi"); 109 | assert_eq!(&arena.remove_unchecked(&p2), &"flutter"); 110 | } 111 | } 112 | 113 | /// `UnsafeArena` implementation that relies on the system memory allocator. 114 | pub mod sys { 115 | use super::*; 116 | use core::mem::{transmute, transmute_copy, ManuallyDrop}; 117 | use core::ptr::read; 118 | use core::ptr::NonNull; 119 | 120 | /// `UnsafeArena` implementation that relies on the system memory allocator. 121 | #[derive(Debug, Clone, Copy)] 122 | pub struct SysAllocator; 123 | 124 | /// Pointer type of `SysAllocator`. 125 | pub struct Ptr(NonNull); 126 | 127 | impl Ptr { 128 | unsafe fn new(ptr: *mut T) -> Self { 129 | debug_assert!(!ptr.is_null()); 130 | Ptr(transmute(ptr)) 131 | } 132 | 133 | fn as_ptr(&self) -> *mut T { 134 | unsafe { transmute_copy(&self.0) } 135 | } 136 | } 137 | impl PartialEq for Ptr { 138 | fn eq(&self, other: &Self) -> bool { 139 | self.as_ptr::() == other.as_ptr::() 140 | } 141 | } 142 | impl Eq for Ptr {} 143 | impl Clone for Ptr { 144 | fn clone(&self) -> Self { 145 | unsafe { transmute_copy(self) } 146 | } 147 | } 148 | impl Default for Ptr { 149 | fn default() -> Self { 150 | static X: u8 = 0; 151 | unsafe { Ptr::new((&X) as *const u8 as *mut u8) } 152 | } 153 | } 154 | impl fmt::Debug for Ptr { 155 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 156 | f.debug_tuple("Ptr").field(&self.as_ptr::()).finish() 157 | } 158 | } 159 | impl fmt::Pointer for Ptr { 160 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 161 | write!(f, "{:p}", &self.as_ptr::()) 162 | } 163 | } 164 | 165 | unsafe impl Sync for Ptr {} 166 | unsafe impl Send for Ptr {} 167 | 168 | impl UnsafeArena for SysAllocator { 169 | type Ptr = Ptr; 170 | 171 | fn insert(&mut self, x: T) -> Self::Ptr { 172 | unsafe { Ptr::new(Box::into_raw(Box::new(ManuallyDrop::new(x)))) } 173 | } 174 | 175 | unsafe fn get_unchecked(&self, ptr: &Self::Ptr) -> &T { 176 | &**ptr.as_ptr::>() 177 | } 178 | 179 | unsafe fn get_unchecked_mut(&mut self, ptr: &Self::Ptr) -> &mut T { 180 | &mut **ptr.as_ptr::>() 181 | } 182 | 183 | unsafe fn remove_unchecked(&mut self, ptr: &Self::Ptr) -> T { 184 | let b = Box::from_raw(ptr.as_ptr::>()); 185 | read(&**b) 186 | } 187 | } 188 | 189 | impl UnsafeArenaWithMembershipCheck for SysAllocator { 190 | unsafe fn contains_unchecked(&self, _ptr: &Self::Ptr) -> bool { 191 | true 192 | } 193 | } 194 | 195 | #[test] 196 | fn test() { 197 | test_common(&mut SysAllocator); 198 | } 199 | } 200 | 201 | pub use self::sys::SysAllocator; 202 | 203 | /// Naïve memory-safe implementation of `Arena`. 204 | pub mod checked { 205 | use super::*; 206 | use alloc::collections::BTreeMap as MapType; 207 | use alloc::sync::Arc; 208 | /// Naïve memory-safe implementation of `Arena`. 209 | /// 210 | /// For a test purpose only. Do not use this in production. It is really slow. 211 | #[derive(Debug)] 212 | pub struct CheckedArena { 213 | map: MapType, 214 | id: Arc<()>, 215 | next_key: u64, 216 | } 217 | 218 | /// Pointer type of `CheckedArena`. 219 | #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] 220 | pub struct Ptr(Arc<()>, u64); 221 | 222 | impl CheckedArena { 223 | /// Construct a `CheckedArena`. 224 | pub fn new() -> Self { 225 | Self { 226 | map: MapType::new(), 227 | id: Arc::new(()), 228 | next_key: 0, 229 | } 230 | } 231 | } 232 | 233 | impl Default for CheckedArena { 234 | fn default() -> Self { 235 | Self::new() 236 | } 237 | } 238 | 239 | impl UnsafeArena for CheckedArena { 240 | type Ptr = Ptr; 241 | 242 | fn insert(&mut self, x: T) -> Self::Ptr { 243 | let key = self.next_key; 244 | self.next_key = self.next_key.checked_add(1).unwrap(); 245 | assert!(self.map.insert(key, x).is_none()); 246 | Ptr(Arc::clone(&self.id), key) 247 | } 248 | 249 | unsafe fn get_unchecked(&self, ptr: &Self::Ptr) -> &T { 250 | assert!(self.contains_unchecked(ptr), "invalid arena"); 251 | self.map.get(&ptr.1).expect("already removed") 252 | } 253 | 254 | unsafe fn get_unchecked_mut(&mut self, ptr: &Self::Ptr) -> &mut T { 255 | assert!(self.contains_unchecked(ptr), "invalid arena"); 256 | self.map.get_mut(&ptr.1).expect("already removed") 257 | } 258 | 259 | unsafe fn remove_unchecked(&mut self, ptr: &Self::Ptr) -> T { 260 | assert!(self.contains_unchecked(ptr), "invalid arena"); 261 | self.map.remove(&ptr.1).expect("already removed") 262 | } 263 | } 264 | 265 | impl SafeArena for CheckedArena {} 266 | 267 | impl UnsafeArenaWithMembershipCheck for CheckedArena { 268 | unsafe fn contains_unchecked(&self, ptr: &Self::Ptr) -> bool { 269 | Arc::ptr_eq(&ptr.0, &self.id) 270 | } 271 | } 272 | 273 | impl Arena for CheckedArena { 274 | fn get(&self, ptr: &Self::Ptr) -> Option<&T> { 275 | if unsafe { !self.contains_unchecked(ptr) } { 276 | None 277 | } else { 278 | self.map.get(&ptr.1) 279 | } 280 | } 281 | 282 | fn get_mut(&mut self, ptr: &Self::Ptr) -> Option<&mut T> { 283 | if unsafe { !self.contains_unchecked(ptr) } { 284 | None 285 | } else { 286 | self.map.get_mut(&ptr.1) 287 | } 288 | } 289 | 290 | fn remove(&mut self, ptr: &Self::Ptr) -> Option { 291 | if unsafe { !self.contains_unchecked(ptr) } { 292 | None 293 | } else { 294 | self.map.remove(&ptr.1) 295 | } 296 | } 297 | } 298 | 299 | #[test] 300 | fn test1() { 301 | test_common(&mut CheckedArena::new()); 302 | } 303 | 304 | #[test] 305 | fn test2() { 306 | let mut arena1 = CheckedArena::new(); 307 | let mut arena2 = CheckedArena::new(); 308 | let p1 = arena1.insert("twi"); 309 | let p2 = arena1.insert("aj"); 310 | let p3 = arena2.insert("r"); 311 | 312 | unsafe { 313 | assert!(arena1.contains_unchecked(&p1)); 314 | assert!(arena1.contains_unchecked(&p2)); 315 | assert!(arena2.contains_unchecked(&p3)); 316 | 317 | assert!(!arena2.contains_unchecked(&p1)); 318 | assert!(!arena2.contains_unchecked(&p2)); 319 | assert!(!arena1.contains_unchecked(&p3)); 320 | 321 | assert_eq!(arena1.get_unchecked(&p1), &"twi"); 322 | assert_eq!(arena1.get_unchecked_mut(&p2), &"aj"); 323 | 324 | *arena1.get_unchecked_mut(&p2) = "flutter"; 325 | 326 | assert_eq!(&arena1.remove_unchecked(&p1), &"twi"); 327 | assert_eq!(&arena1.remove_unchecked(&p2), &"flutter"); 328 | } 329 | } 330 | } 331 | 332 | pub use self::checked::CheckedArena; 333 | 334 | /// Adds a `Vec`-based pool to any memory arena for faster reallocation. 335 | pub mod pooled { 336 | use super::*; 337 | use core::marker::PhantomData; 338 | use core::mem::MaybeUninit; 339 | use core::ptr::{drop_in_place, read}; 340 | 341 | /// Adds a vacant entry pool to any memory arena for faster reallocation. 342 | #[derive(Debug)] 343 | pub struct PooledArena 344 | where 345 | A: UnsafeArena, Ptr = P>, 346 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 347 | { 348 | inner: A, 349 | first_vacant: Option

, 350 | _phantom: PhantomData, 351 | } 352 | 353 | #[derive(Debug)] 354 | pub struct Entry { 355 | data: MaybeUninit, 356 | occupied: bool, 357 | next: Option

, 358 | } 359 | 360 | impl Drop for Entry { 361 | fn drop(&mut self) { 362 | if self.occupied { 363 | unsafe { drop_in_place(self.data.as_mut_ptr()) }; 364 | } 365 | } 366 | } 367 | 368 | impl PooledArena 369 | where 370 | A: UnsafeArena, Ptr = P>, 371 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 372 | { 373 | /// Construct a `PooledArena`. 374 | pub fn new(inner: A) -> Self { 375 | Self::with_capacity(inner, 0) 376 | } 377 | 378 | /// Construct a `PooledArena` with the specified number of pre-allocated 379 | /// entries. 380 | pub fn with_capacity(inner: A, capacity: usize) -> Self { 381 | let mut arena = Self { 382 | inner, 383 | first_vacant: None, 384 | _phantom: PhantomData, 385 | }; 386 | 387 | for _ in 0..capacity { 388 | let p = arena.inner.insert(Entry { 389 | data: MaybeUninit::uninit(), 390 | occupied: false, 391 | next: arena.first_vacant.take(), 392 | }); 393 | arena.first_vacant = Some(p); 394 | } 395 | 396 | arena 397 | } 398 | 399 | /// Discard all vacant entries. 400 | pub fn purge(&mut self) { 401 | while let Some(p) = self.first_vacant.take() { 402 | let mut e = unsafe { self.inner.remove_unchecked(&p) }; 403 | 404 | // Skip `T::drop()` because we know it is a vacant entry. 405 | // We can't just `forget(e)` because `P` might be `Drop`. 406 | debug_assert!(!e.occupied); 407 | e.occupied = false; 408 | } 409 | } 410 | } 411 | 412 | impl UnsafeArena for PooledArena 413 | where 414 | A: UnsafeArena, Ptr = P>, 415 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 416 | { 417 | type Ptr = A::Ptr; 418 | 419 | fn insert(&mut self, x: T) -> Self::Ptr { 420 | if let Some(ptr) = self.first_vacant.take() { 421 | let ent = unsafe { self.inner.get_unchecked_mut(&ptr) }; 422 | 423 | debug_assert!(!ent.occupied); 424 | 425 | ent.occupied = true; 426 | ent.data = MaybeUninit::new(x); 427 | 428 | self.first_vacant = ent.next.take(); 429 | 430 | ptr 431 | } else { 432 | self.inner.insert(Entry { 433 | data: MaybeUninit::new(x), 434 | occupied: true, 435 | next: None, 436 | }) 437 | } 438 | } 439 | 440 | unsafe fn get_unchecked(&self, ptr: &Self::Ptr) -> &T { 441 | debug_assert!(self.inner.get_unchecked(ptr).occupied); 442 | &*self.inner.get_unchecked(ptr).data.as_ptr() 443 | } 444 | 445 | unsafe fn get_unchecked_mut(&mut self, ptr: &Self::Ptr) -> &mut T { 446 | debug_assert!(self.inner.get_unchecked(ptr).occupied); 447 | &mut *self.inner.get_unchecked_mut(ptr).data.as_mut_ptr() 448 | } 449 | 450 | unsafe fn remove_unchecked(&mut self, ptr: &Self::Ptr) -> T { 451 | let entry = self.inner.get_unchecked_mut(ptr); 452 | debug_assert!(entry.occupied); 453 | 454 | let value = read(entry.data.as_ptr()); 455 | entry.occupied = false; 456 | entry.next = self.first_vacant.take(); 457 | 458 | self.first_vacant = Some(ptr.clone()); 459 | 460 | value 461 | } 462 | } 463 | 464 | impl UnsafeArenaWithMembershipCheck for PooledArena 465 | where 466 | A: UnsafeArena, Ptr = P> + UnsafeArenaWithMembershipCheck>, 467 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 468 | { 469 | unsafe fn contains_unchecked(&self, ptr: &Self::Ptr) -> bool { 470 | self.inner.contains_unchecked(ptr) 471 | } 472 | } 473 | 474 | impl Arena for PooledArena 475 | where 476 | A: UnsafeArena, Ptr = P> + Arena>, 477 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 478 | { 479 | fn get(&self, ptr: &Self::Ptr) -> Option<&T> { 480 | self.inner.get(ptr).map(|x| { 481 | debug_assert!(x.occupied); 482 | unsafe { &*x.data.as_ptr() } 483 | }) 484 | } 485 | 486 | fn get_mut(&mut self, ptr: &Self::Ptr) -> Option<&mut T> { 487 | self.inner.get_mut(ptr).map(|x| { 488 | debug_assert!(x.occupied); 489 | unsafe { &mut *x.data.as_mut_ptr() } 490 | }) 491 | } 492 | 493 | fn remove(&mut self, ptr: &Self::Ptr) -> Option { 494 | if let Some(r) = self.inner.get_mut(ptr) { 495 | debug_assert!(r.occupied); 496 | 497 | let value = unsafe { read(r.data.as_ptr()) }; 498 | r.occupied = false; 499 | r.next = self.first_vacant.take(); 500 | 501 | self.first_vacant = Some(ptr.clone()); 502 | 503 | Some(value) 504 | } else { 505 | None 506 | } 507 | } 508 | } 509 | 510 | #[test] 511 | fn test1() { 512 | test_common(&mut PooledArena::new(CheckedArena::new())); 513 | } 514 | 515 | #[test] 516 | fn test2() { 517 | let mut arena = PooledArena::new(CheckedArena::new()); 518 | 519 | for _ in 0..2 { 520 | let p1 = arena.insert("twi"); 521 | let p2 = arena.insert("aj"); 522 | 523 | unsafe { 524 | assert!(arena.contains_unchecked(&p1)); 525 | assert!(arena.contains_unchecked(&p2)); 526 | } 527 | 528 | assert_eq!(arena.get(&p1), Some(&"twi")); 529 | assert_eq!(arena.get_mut(&p2), Some(&mut "aj")); 530 | 531 | *arena.get_mut(&p2).unwrap() = "flutter"; 532 | 533 | assert_eq!(arena.remove(&p1), Some("twi")); 534 | assert_eq!(arena.remove(&p2), Some("flutter")); 535 | } 536 | 537 | arena.purge(); 538 | } 539 | } 540 | 541 | pub use self::pooled::PooledArena; 542 | -------------------------------------------------------------------------------- /src/tlsf.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2017 yvt, all rights reserved. 3 | // 4 | // Licensed under the MIT license . This file may 6 | // not be copied, modified,or distributed except 7 | // according to those terms. 8 | // 9 | //! A dynamic external memory allocator based on the TLSF (Two-Level Segregated Fit) 10 | //! algorithm[^1]. 11 | //! 12 | //! [^1]: Masmano, Miguel, et al. "TLSF: A new dynamic memory allocator for real-time systems." 13 | //! Real-Time Systems, 2004. ECRTS 2004. Proceedings. 16th Euromicro Conference on. IEEE, 2004. 14 | //! 15 | //! ## Type parameters 16 | //! 17 | //! - `T` is an integer type used to represent region sizes. You usually use 18 | //! `u32` or `u64` for this. 19 | //! - `A` is a memory arena type used to allocate internal block structures. 20 | //! 21 | //! ## A Caveat 22 | //! 23 | //! This TLSF allocator implements a Good-Fit strategy. In order to achieve the 24 | //! O(1) execution time, only the first element of each free space list is examined. 25 | //! As a result, allocations are not guaranteed to succeed even if there 26 | //! is an enough free space if the following condition is met: 27 | //! 28 | //! - There is no free space that is larger than the requested size by a certain 29 | //! amount. 30 | //! - There is a free space that is almost as large as the requested size. 31 | //! 32 | //! Or more strictly: 33 | //! 34 | //! - Let `S`, `mapping` the number of bytes to allocate and the mapping 35 | //! function that calculates the indexes into the TLSF data structure given 36 | //! the size of a block, respectively. There exists no free space with a size 37 | //! `s` where `mapping(s) != mapping(S) && s > S`. 38 | //! - There exists a free space with a size `s` where 39 | //! `mapping(s) == mapping(S) && s < S`. 40 | //! 41 | //! ## Memory Overhead 42 | //! 43 | //! A TLSF allocator requires the following internal storage to operate (some 44 | //! details are excluded): 45 | //! 46 | //! - A variable storing the size of the heap. 47 | //! - One first-level list that consists of pointers to second-level lists and 48 | //! a bit field of type `T` where each bit indicates whether a free block is 49 | //! available in the corresponding second-level list or not. 50 | //! - `FLI` second-level lists each of which consists of `1 << SLI` pointers to 51 | //! free blocks and a bit field of `SLI`-bit wide where each bit indicates 52 | //! whether the corresponding entry of the free block is valid or not. 53 | //! 54 | //! When the heap size `size` is a power of two and larger than `1 << SLI`, 55 | //! `FLI` can be written as `log2(size) + 1 - SLI`. `SLI` is hard-coded to `4` 56 | //! in this implementation. Using these, the baseline memory consumption can be 57 | //! calculated by the formula `2 * T + 3 * PS + FLI * (3 * PS + SLI * P + SLI / 8)` 58 | //! (where `PS = size_of::()`). 59 | //! 60 | //! The following table shows the estimated baseline memory consumption of 61 | //! [`SysTlsf`] for common configurations. 62 | //! 63 | //! | `size_of::()` | `T` | `size` | memory consumption (bytes) | 64 | //! | -------------------- | ----- | ----------------- | -------------------------- | 65 | //! | `8` (64-bit system) | `u32` | `16` | 186 | 66 | //! | | `u32` | `1 << 10` (1KiB) | 1,110 | 67 | //! | | `u32` | `1 << 24` (16MiB) | 3,266 | 68 | //! | | `u32` | `1 << 30` (1GiB) | 4,190 | 69 | //! | | `u64` | `16` | 194 | 70 | //! | | `u64` | `1 << 10` (1KiB) | 1,118 | 71 | //! | | `u64` | `1 << 24` (16MiB) | 3,274 | 72 | //! | | `u64` | `1 << 30` (1GiB) | 4,198 | 73 | //! | | `u64` | `1 << 36` (64GiB) | 5,122 | 74 | //! | `4` (32-bit system) | `u32` | `16` | 98 | 75 | //! | | `u32` | `1 << 10` (1KiB) | 566 | 76 | //! | | `u32` | `1 << 24` (16MiB) | 1,658 | 77 | //! | | `u32` | `1 << 30` (1GiB) | 2,126 | 78 | //! 79 | //! [`SysTlsf`]: type.SysTlsf.html 80 | //! 81 | //! Note that this does not include the overhead incurred by the system memory 82 | //! allocator. 83 | //! 84 | //! Furthermore, each allocated/free region (represented by `TlsfBlock`) 85 | //! consumes a certain amount of memory. The exact size of `TlsfBlock` might 86 | //! differ among compiler versions due to structure layout optimizations, but 87 | //! we can know the lower bound: 88 | //! 89 | //! ``` 90 | //! use xalloc::tlsf::TlsfBlock; 91 | //! use std::mem::size_of; 92 | //! assert!(size_of::>() >= 25); 93 | //! assert!(size_of::>() >= 41); 94 | //! assert!(size_of::>() >= 49); 95 | //! ``` 96 | //! 97 | //! ## Performance 98 | //! 99 | //! The allocation throughput is mostly equivalent to that of jemalloc. 100 | use core::fmt; 101 | 102 | use alloc::{boxed::Box, string::String, vec, vec::Vec}; 103 | use num::{One, Zero}; 104 | use unreachable::{unreachable, UncheckedOptionExt}; 105 | 106 | use arena::{SafeArena, UnsafeArena, UnsafeArenaWithMembershipCheck}; 107 | use int::{BinaryInteger, BinaryUInteger}; 108 | 109 | type TlsfL2Bitmap = u16; 110 | const LOG2_L2_SIZE: u32 = 4; // must be <= log2(sizeof(TlsfL2Bitmap)*8) 111 | const L2_SIZE: u32 = 1 << LOG2_L2_SIZE; 112 | 113 | /// TLSF-based external memory allocator. 114 | /// 115 | /// See [the module-level documentation] for more. 116 | /// 117 | /// [the module-level documentation]: index.html 118 | /// 119 | /// ## Type parameters 120 | /// 121 | /// - `T` is an integer type used to represent region sizes. You usually use 122 | /// `u32` or `u64` for this. 123 | /// - `A` is a memory arena type used to allocate internal block structures. 124 | /// 125 | #[derive(Debug)] 126 | pub struct Tlsf 127 | where 128 | A: UnsafeArena, Ptr = P>, 129 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 130 | { 131 | size: T, 132 | l1: TlsfL1, 133 | blocks: A, 134 | } 135 | 136 | use arena; 137 | 138 | /// [`Tlsf`] that uses [`CheckedArena`] for rigorous memory safety check. 139 | /// 140 | /// It is really slow. Use [`SysTlsf`] in a production code. 141 | /// 142 | /// [`CheckedArena`]: crate::arena::CheckedArena 143 | /// 144 | /// ## Type parameter 145 | /// 146 | /// - `T` is an integer type used to represent region sizes. You usually use 147 | /// `u32` or `u64` for this. 148 | /// 149 | pub type SafeTlsf = 150 | Tlsf>, arena::checked::Ptr>; 151 | 152 | /// Type alias of [`TlsfRegion`] for [`SafeTlsf`]. 153 | pub type SafeTlsfRegion = TlsfRegion; 154 | 155 | impl SafeTlsf { 156 | /// Construct a `SafeTlsf`. 157 | pub fn new(size: T) -> Self { 158 | Tlsf::with_arena(size, arena::CheckedArena::new()) 159 | } 160 | } 161 | 162 | /// `Tlsf` that uses the system allocator for the internal storage allocation. 163 | /// 164 | /// ## Type parameter 165 | /// 166 | /// - `T` is an integer type used to represent region sizes. You usually use 167 | /// `u32` or `u64` for this. 168 | /// 169 | pub type SysTlsf = Tlsf< 170 | T, 171 | arena::PooledArena, arena::SysAllocator, arena::sys::Ptr>, 172 | arena::sys::Ptr, 173 | >; 174 | 175 | /// Type alias of [`TlsfRegion`] for [`SysTlsf`]. 176 | pub type SysTlsfRegion = TlsfRegion; 177 | 178 | impl SysTlsf { 179 | /// Construct a `SysTlsf`. 180 | pub fn new(size: T) -> Self { 181 | Tlsf::with_arena(size, arena::PooledArena::new(arena::SysAllocator)) 182 | } 183 | 184 | /// Construct a `SysTlsf` with a specific capacity. 185 | pub fn with_capacity(size: T, capacity: usize) -> Self { 186 | Tlsf::with_arena( 187 | size, 188 | arena::PooledArena::with_capacity(arena::SysAllocator, capacity), 189 | ) 190 | } 191 | } 192 | 193 | /// A handle type to a region allocated in a [`Tlsf`]. 194 | /// 195 | /// `TlsfRegion` returned by a `Tlsf` only can be used with the 196 | /// same `Tlsf`. 197 | #[derive(Debug, PartialEq, Eq, Hash)] 198 | pub struct TlsfRegion

(P); 199 | 200 | /// Internal data structure used by [`Tlsf`] that represents a free/occpied 201 | /// memory block. 202 | #[derive(Debug)] 203 | pub struct TlsfBlock { 204 | /// Points the previous (in terms of the external memory address) block. 205 | prev: Option

, 206 | 207 | /// Points the next (in terms of the external memory address) block. 208 | next: Option

, 209 | 210 | /// The external memory address. 211 | address: T, 212 | 213 | /// The size of the block in the external memory space. 214 | size: T, 215 | state: TlsfBlockState

, 216 | } 217 | 218 | #[derive(Debug, PartialEq, Eq)] 219 | enum TlsfBlockState

{ 220 | Free { 221 | /// The previous free block in the same free space list. 222 | prev_free: Option

, 223 | 224 | /// The next free block in the same free space list. 225 | next_free: Option

, 226 | }, 227 | Used, 228 | } 229 | 230 | impl

TlsfBlockState

{ 231 | fn is_used(&self) -> bool { 232 | match self { 233 | TlsfBlockState::Used => true, 234 | _ => false, 235 | } 236 | } 237 | } 238 | 239 | /// First level table. 240 | #[derive(Debug)] 241 | struct TlsfL1 { 242 | /// Array of second level tables. 243 | /// 244 | /// - `l1[0]` contains segregated lists for free spaces smaller 245 | /// than `L2_SIZE`. 246 | /// `l1[0].l2[L] contains the segregated list for free spaces whose sizes 247 | /// are equal to `L`. 248 | /// - `l1[K]` contains segregated lists for free spaces whose sizes are 249 | /// in the range `L2_SIZE << (K - 1) .. L2_Size << K`. 250 | /// `l1[K].l2[L] contains the segregated list for free spaces whose sizes 251 | /// are in the range 252 | /// `(L2_SIZE << (K - 1)) + (1 << (K - 1)) * L .. (L2_Size << (K - 1)) + (1 << (K - 1)) * (L + 1)` 253 | /// 254 | l1: Vec>, 255 | 256 | /// Each bit indices whether the corresponding element of 257 | /// `l1` has at least one free space or not. 258 | /// 259 | /// The following invariant holds: 260 | /// 261 | /// - `(bitmap.extract_u32(i..(i+1)) != 0) == (i1[i].bitmap != 0)` 262 | // 263 | /// The number of L2 tables is proportional to the number of digits of the pool 264 | /// size, so using `T` here would be a good choice. 265 | bitmap: T, 266 | 267 | /// Points the free block that fills entire the available space 268 | /// (used only if the pool size is a power of two and no 269 | /// segregated list entry is available for it) 270 | entire: Option

, 271 | } 272 | 273 | /// Second level table. 274 | #[derive(Debug, Clone)] 275 | struct TlsfL2

{ 276 | /// Each bit indicates whether the corresponding element of 277 | /// `l2` is valid or not. 278 | bitmap: TlsfL2Bitmap, 279 | 280 | /// Each element represents the first block in a free space list. 281 | /// 282 | /// Points blocks stored in `Tlsf::blocks`. The validity of each 283 | /// element is indicated by the corresponding bit of `bitmap`. 284 | l2: [P; L2_SIZE as usize], 285 | } 286 | 287 | impl Tlsf 288 | where 289 | T: BinaryUInteger, 290 | A: UnsafeArena, Ptr = P>, 291 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 292 | { 293 | /// Construct a `Tlsf`. 294 | pub fn with_arena(size: T, arena: A) -> Self { 295 | let mut sa = Tlsf { 296 | l1: TlsfL1::new(&size), 297 | size, 298 | blocks: arena, 299 | }; 300 | 301 | // Create the initial free block 302 | let block = TlsfBlock { 303 | prev: None, 304 | next: None, 305 | address: Zero::zero(), 306 | size: sa.size.clone(), 307 | state: TlsfBlockState::Used, // don't care 308 | }; 309 | let block_ptr = sa.blocks.insert(block); 310 | unsafe { 311 | sa.l1.link(&mut sa.blocks, block_ptr); 312 | } 313 | 314 | sa 315 | } 316 | 317 | /// Get a reference to the underlying memory arena. 318 | pub fn arena(&self) -> &A { 319 | &self.blocks 320 | } 321 | 322 | /// Get a mutable reference to the underlying memory arena. 323 | pub fn arena_mut(&mut self) -> &mut A { 324 | &mut self.blocks 325 | } 326 | 327 | /// Allocate a region of the size `size` with a given alignment requirement. 328 | /// 329 | /// Returns a handle of the allocated region and its offset if the 330 | /// allocation succeeds. Returns `None` otherwise. 331 | /// 332 | /// - `align` must be a power of two. 333 | /// - `size` must not be zero. 334 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] 335 | pub fn alloc_aligned(&mut self, size: T, align: T) -> Option<(TlsfRegion

, T)> { 336 | assert!(align.is_power_of_two()); 337 | self.allocate_aligned_log2(size, align.trailing_zeros()) 338 | } 339 | 340 | /// Allocate a region of the size `size`. 341 | /// 342 | /// Returns a handle of the allocated region and its offset if the 343 | /// allocation succeeds. Returns `None` otherwise. 344 | /// 345 | /// `size` must not be zero. 346 | pub fn alloc(&mut self, size: T) -> Option<(TlsfRegion

, T)> { 347 | self.allocate_aligned_log2(size, 0) 348 | } 349 | 350 | fn allocate_aligned_log2(&mut self, size: T, align_bits: u32) -> Option<(TlsfRegion

, T)> { 351 | if size > self.size { 352 | return None; 353 | } 354 | assert_ne!(size, Zero::zero()); 355 | 356 | let suitable = unsafe { self.l1.search_suitable(&mut self.blocks, &size, align_bits) }; 357 | suitable.map(|(position, free_block_ptr, pad)| unsafe { 358 | let (mut prev, mut next, free_block_address, free_block_size) = { 359 | let block = self.blocks.get_unchecked(&free_block_ptr); 360 | ( 361 | block.prev.clone(), 362 | block.next.clone(), 363 | block.address.clone(), 364 | block.size.clone(), 365 | ) 366 | }; 367 | let data_end = pad.clone() + size.clone(); 368 | 369 | // For exception safety... 370 | let mut reserve = 0; 371 | if pad != Zero::zero() { 372 | reserve += 1; 373 | } 374 | if data_end != free_block_size { 375 | reserve += 1; 376 | } 377 | self.blocks.reserve(reserve); 378 | 379 | self.l1 380 | .unlink_head(&mut self.blocks, free_block_ptr.clone(), position); 381 | self.blocks.remove_unchecked(&free_block_ptr); 382 | 383 | if pad != Zero::zero() { 384 | let block = TlsfBlock { 385 | prev: prev.clone(), 386 | next: None, // linked later 387 | address: free_block_address.clone(), 388 | size: pad.clone(), 389 | state: TlsfBlockState::Used, // don't care 390 | }; 391 | let block_ptr = self.blocks.insert(block); 392 | self.l1.link(&mut self.blocks, block_ptr.clone()); 393 | if let Some(ref old_prev) = prev { 394 | self.blocks.get_unchecked_mut(old_prev).next = Some(block_ptr.clone()); 395 | } 396 | prev = Some(block_ptr); 397 | } 398 | 399 | if data_end != free_block_size { 400 | let block = TlsfBlock { 401 | prev: None, // linked later 402 | next: next.clone(), 403 | address: free_block_address.clone() + data_end.clone(), 404 | size: free_block_size.clone() - data_end.clone(), 405 | state: TlsfBlockState::Used, // don't care 406 | }; 407 | let block_ptr = self.blocks.insert(block); 408 | self.l1.link(&mut self.blocks, block_ptr.clone()); 409 | if let Some(ref old_next) = next { 410 | self.blocks.get_unchecked_mut(old_next).prev = Some(block_ptr.clone()); 411 | } 412 | next = Some(block_ptr); 413 | } 414 | 415 | let main_ptr = { 416 | let block = TlsfBlock { 417 | prev: prev.clone(), 418 | next: next.clone(), 419 | address: free_block_address.clone() + pad.clone(), 420 | size, 421 | state: TlsfBlockState::Used, // care! 422 | }; 423 | self.blocks.insert(block) 424 | }; 425 | 426 | // Connect neighboring blocks to this 427 | let address = self.blocks.get_unchecked(&main_ptr).address.clone(); 428 | 429 | if let Some(ptr) = prev { 430 | self.blocks.get_unchecked_mut(&ptr).next = Some(main_ptr.clone()); 431 | } 432 | if let Some(ptr) = next { 433 | self.blocks.get_unchecked_mut(&ptr).prev = Some(main_ptr.clone()); 434 | } 435 | 436 | (TlsfRegion(main_ptr), address) 437 | }) 438 | } 439 | 440 | /// Deallocate the specified region, without checking the origin of the 441 | /// `TlsfRegion`. 442 | /// 443 | /// This might result in an undefined behavior if `r` originates from 444 | /// a different instance of `Tlsf`. 445 | pub unsafe fn dealloc_unchecked(&mut self, r: TlsfRegion

) { 446 | let block_ptr = r.0; 447 | 448 | let (prev_ptr, next_ptr) = { 449 | let block = self.blocks.get_unchecked(&block_ptr); 450 | if let TlsfBlockState::Used = block.state { 451 | } else { 452 | // It's impossible for the application to obtain a 453 | // `TlsfRegion` for a free block. `TlsfRegion` isn't even 454 | // `Clone` nor `Copy`. 455 | unreachable(); 456 | } 457 | (block.prev.clone(), block.next.clone()) 458 | }; 459 | 460 | // Try to merge neighboring free blocks 461 | let prev_info = if let Some(ref ptr) = prev_ptr { 462 | let block = self.blocks.get_unchecked(ptr); 463 | if let TlsfBlockState::Free { .. } = block.state { 464 | Some((block.prev.clone(), block.size.clone())) 465 | } else { 466 | None 467 | } 468 | } else { 469 | None 470 | }; 471 | let next_info = if let Some(ref ptr) = next_ptr { 472 | let block = self.blocks.get_unchecked(ptr); 473 | if let TlsfBlockState::Free { .. } = block.state { 474 | Some((block.next.clone(), block.size.clone())) 475 | } else { 476 | None 477 | } 478 | } else { 479 | None 480 | }; 481 | { 482 | let block = self.blocks.get_unchecked_mut(&block_ptr); 483 | if let Some((ref new_prev_ptr, ref prev_size)) = prev_info { 484 | block.prev = new_prev_ptr.clone(); 485 | block.size += prev_size.clone(); 486 | block.address -= prev_size.clone(); 487 | } 488 | if let Some((ref new_next_ptr, ref next_size)) = next_info { 489 | block.next = new_next_ptr.clone(); 490 | block.size += next_size.clone(); 491 | } 492 | } 493 | 494 | if prev_info.is_some() { 495 | self.l1 496 | .unlink(&mut self.blocks, prev_ptr.clone().unchecked_unwrap()); 497 | self.blocks.remove_unchecked(&prev_ptr.unchecked_unwrap()); 498 | } 499 | if next_info.is_some() { 500 | self.l1 501 | .unlink(&mut self.blocks, next_ptr.clone().unchecked_unwrap()); 502 | self.blocks.remove_unchecked(&next_ptr.unchecked_unwrap()); 503 | } 504 | 505 | if let Some((Some(new_prev_ptr), _)) = prev_info { 506 | let block = self.blocks.get_unchecked_mut(&new_prev_ptr); 507 | block.next = Some(block_ptr.clone()); 508 | } 509 | if let Some((Some(new_next_ptr), _)) = next_info { 510 | let block = self.blocks.get_unchecked_mut(&new_next_ptr); 511 | block.prev = Some(block_ptr.clone()); 512 | } 513 | 514 | self.l1.link(&mut self.blocks, block_ptr); 515 | } 516 | 517 | #[doc(hidden)] 518 | pub unsafe fn test_integrity(&mut self, root_ptr: &TlsfRegion

) 519 | where 520 | P: fmt::Debug + PartialEq, 521 | { 522 | // Find the physically first block 523 | let mut first_ptr = root_ptr.0.clone(); 524 | while self.blocks.get_unchecked(&first_ptr).prev.is_some() { 525 | first_ptr = self.blocks.get_unchecked(&first_ptr).prev.clone().unwrap(); 526 | } 527 | 528 | let dump = || { 529 | use core::fmt::Write; 530 | 531 | let mut s = String::new(); 532 | let mut cur_ptr = first_ptr.clone(); 533 | loop { 534 | let cur = self.blocks.get_unchecked(&cur_ptr); 535 | let next_ptr = cur.next.clone(); 536 | writeln!( 537 | &mut s, 538 | "{:?} - [{:?}, {:?}] - {:?}", 539 | cur.prev, cur_ptr, cur.state, cur.next 540 | ) 541 | .unwrap(); 542 | if let Some(next_ptr) = next_ptr { 543 | cur_ptr = next_ptr; 544 | } else { 545 | break; 546 | } 547 | } 548 | s 549 | }; 550 | 551 | // scan every block and check the physical connections 552 | let mut cur_ptr = first_ptr.clone(); 553 | let mut addr = Zero::zero(); 554 | loop { 555 | let cur = self.blocks.get_unchecked(&cur_ptr); 556 | assert_eq!( 557 | cur.address, 558 | addr, 559 | "[{:?}].prev ({:?}) should be {:?}. Dump: \n{}", 560 | cur_ptr, 561 | &cur.address, 562 | &addr, 563 | dump() 564 | ); 565 | addr += cur.size.clone(); 566 | 567 | let next_ptr = cur.next.clone(); 568 | if let Some(next_ptr) = next_ptr { 569 | let next = self.blocks.get_unchecked(&next_ptr); 570 | assert_eq!( 571 | next.prev, 572 | Some(cur_ptr.clone()), 573 | "[{:?}].prev ({:?}) should be {:?}. Dump: \n{}", 574 | next_ptr, 575 | next.prev, 576 | cur_ptr, 577 | dump() 578 | ); 579 | assert!( 580 | next.state.is_used() || cur.state.is_used(), 581 | "[{:?}].state and [{:?}].state must not be Free at the same time. Dump: \n{}", 582 | next_ptr, 583 | cur_ptr, 584 | dump() 585 | ); 586 | cur_ptr = next_ptr; 587 | } else { 588 | break; 589 | } 590 | } 591 | assert_eq!( 592 | self.size, 593 | addr, 594 | "self.size ({:?}) should be {:?}. Dump: \n{}", 595 | &self.size, 596 | &addr, 597 | dump() 598 | ); 599 | } 600 | } 601 | 602 | impl Tlsf 603 | where 604 | T: BinaryUInteger, 605 | A: UnsafeArena, Ptr = P> + UnsafeArenaWithMembershipCheck>, 606 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 607 | { 608 | /// Deallocate the specified region. 609 | /// 610 | /// Returns `Err(r)` if `r` does not originate from the same instance of `Tlsf`. 611 | pub fn dealloc(&mut self, r: TlsfRegion

) -> Result<(), TlsfRegion

> { 612 | unsafe { 613 | if self.blocks.contains_unchecked(&r.0) { 614 | self.dealloc_unchecked(r); 615 | Ok(()) 616 | } else { 617 | Err(r) 618 | } 619 | } 620 | } 621 | } 622 | 623 | impl Tlsf 624 | where 625 | T: BinaryUInteger, 626 | A: UnsafeArena, Ptr = P> + SafeArena>, 627 | P: Clone + Default + PartialEq + Eq + fmt::Debug, 628 | { 629 | /// Deallocate the specified region. 630 | /// 631 | /// `r` must originate from the same instance of `Tlsf`. Otherwise, `Tlsf` 632 | /// enters an inconsistent state and possibly panics, but does not cause an 633 | /// undefined behavior. 634 | pub fn dealloc_relaxed(&mut self, r: TlsfRegion

) { 635 | unsafe { self.dealloc_unchecked(r) } 636 | } 637 | } 638 | 639 | impl TlsfBlock { 640 | /// Return whether the requested region can fit in this space (assuming it 641 | /// is free). 642 | /// 643 | /// The returned value is the size of padding required to meet the 644 | /// alignment requirement. `None` if it cannot fit. 645 | fn can_fit(&self, size: &T, align_bits: u32) -> Option { 646 | if align_bits == 0 { 647 | if size <= &self.size { 648 | Some(Zero::zero()) 649 | } else { 650 | None 651 | } 652 | } else { 653 | let start = self.address.clone().checked_ceil_fix(align_bits); 654 | let end_block = self.address.clone() + self.size.clone(); 655 | if let Some(start) = start { 656 | if start < end_block && size <= &(end_block.clone() - start.clone()) { 657 | Some(start - self.address.clone()) 658 | } else { 659 | None 660 | } 661 | } else { 662 | start 663 | } 664 | } 665 | } 666 | } 667 | 668 | impl TlsfL1 { 669 | /// Constructs `TlsfL1`. 670 | fn new(size: &T) -> Self { 671 | assert!(size > &Zero::zero()); 672 | 673 | let size_m1 = size.clone() - One::one(); 674 | let num_l2s = T::max_digits().saturating_sub(LOG2_L2_SIZE + size_m1.leading_zeros()) + 1; 675 | 676 | Self { 677 | l1: vec![ 678 | TlsfL2 { 679 | bitmap: Zero::zero(), 680 | l2: [ 681 | // L2_SIZE elements 682 | P::default(), 683 | P::default(), 684 | P::default(), 685 | P::default(), 686 | P::default(), 687 | P::default(), 688 | P::default(), 689 | P::default(), 690 | P::default(), 691 | P::default(), 692 | P::default(), 693 | P::default(), 694 | P::default(), 695 | P::default(), 696 | P::default(), 697 | P::default(), 698 | ], 699 | }; 700 | num_l2s as usize 701 | ], 702 | bitmap: Zero::zero(), 703 | entire: None, 704 | } 705 | } 706 | 707 | /// Compute the first and second level table index for a given size of free 708 | /// space. 709 | #[inline] 710 | fn map_size(&self, size: &T) -> (u32, u32) { 711 | // Equivalent to: 712 | // `let l1_index = T::max_digits().saturating_sub(LOG2_L2_SIZE + size.leading_zeros());` 713 | let l1_index = T::max_digits() 714 | - LOG2_L2_SIZE 715 | - (size.clone() | T::ones(0..LOG2_L2_SIZE)).leading_zeros(); 716 | 717 | // Branch-less equivalent of: 718 | // `let min_bit_index = l1_index.saturating_sub(1);` 719 | let min_bit_index = l1_index - if l1_index == 0 { 0 } else { 1 }; 720 | 721 | let l2_index = (size.clone() >> min_bit_index).extract_u32(0..LOG2_L2_SIZE); 722 | 723 | (l1_index, l2_index) 724 | } 725 | 726 | /// Search a free block at least as large as `size` with the alignment 727 | /// requirement `1 << align_bits`. 728 | /// 729 | /// The result can be one of the following: 730 | /// 731 | /// - `None`: No suitable block was found. 732 | /// - `Some((position, block_ptr, pad)): A suitable block was found. `position` is either of: 733 | /// - `Some((l1, l2))`: `block_ptr` is the head of the free space list at the position `(l1, l2)`. 734 | /// - `None`: `block_ptr` is `self.entire`. 735 | /// 736 | /// `size` must be less than or equal to the size of the heap. 737 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))] 738 | unsafe fn search_suitable, Ptr = P>>( 739 | &self, 740 | blocks: &mut A, 741 | size: &T, 742 | align_bits: u32, 743 | ) -> Option<(Option<(u32, u32)>, P, T)> { 744 | if let Some(ref entire) = self.entire { 745 | return Some((None, entire.clone(), Zero::zero())); 746 | } 747 | 748 | let (l1_first, l2_first) = self.map_size(size); 749 | if self.bitmap.get_bit(l1_first) { 750 | if l1_first as usize >= self.l1.len() { 751 | unreachable(); 752 | } 753 | let l2t: &TlsfL2

= &self.l1[l1_first as usize]; 754 | if l2t.bitmap.get_bit(l2_first) { 755 | // Found a free block in the same bucket. 756 | let block_ptr = l2t.l2[l2_first as usize].clone(); 757 | let block = blocks.get_unchecked(&block_ptr); 758 | if let Some(pad) = block.can_fit(size, align_bits) { 759 | return Some((Some((l1_first, l2_first)), block_ptr, pad)); 760 | } 761 | } 762 | 763 | // Search the same second level table. 764 | let l2 = l2t.bitmap.bit_scan_forward(l2_first + 1); 765 | if l2 < L2_SIZE { 766 | // Found one 767 | let block_ptr = l2t.l2[l2 as usize].clone(); 768 | let can_fit = if align_bits == 0 { 769 | Some(Zero::zero()) 770 | } else { 771 | blocks.get_unchecked(&block_ptr).can_fit(size, align_bits) 772 | }; 773 | if let Some(pad) = can_fit { 774 | if align_bits == 0 { 775 | debug_assert!(blocks 776 | .get_unchecked(&block_ptr) 777 | .can_fit(size, align_bits) 778 | .is_some()); 779 | } 780 | return Some((Some((l1_first, l2)), block_ptr, pad)); 781 | } 782 | } 783 | } 784 | 785 | let mut l1_first = self.bitmap.bit_scan_forward(l1_first + 1); 786 | let mut l2_first = if l1_first == T::max_digits() { 787 | return None; 788 | } else { 789 | if l1_first as usize >= self.l1.len() { 790 | unreachable(); 791 | } 792 | let l2t: &TlsfL2

= &self.l1[l1_first as usize]; 793 | let l2 = l2t.bitmap.bit_scan_forward(0); 794 | debug_assert_ne!(l2, TlsfL2Bitmap::max_digits()); 795 | let block_ptr = l2t.l2[l2 as usize].clone(); 796 | let can_fit = if align_bits == 0 { 797 | Some(Zero::zero()) 798 | } else { 799 | blocks.get_unchecked(&block_ptr).can_fit(size, align_bits) 800 | }; 801 | if let Some(pad) = can_fit { 802 | if align_bits == 0 { 803 | debug_assert!(blocks 804 | .get_unchecked(&block_ptr) 805 | .can_fit(size, align_bits) 806 | .is_some()); 807 | } 808 | return Some((Some((l1_first, l2)), block_ptr, pad)); 809 | } 810 | l2 811 | }; 812 | 813 | // For aligned allocations, there are cases where no free space that can 814 | // satisfy the alignment requirement even if the size requirement is met. 815 | // We need to check more free lists. 816 | // 817 | // The code below should be unreachable for allocations without an 818 | // alignment requirement. 819 | debug_assert_ne!(align_bits, 0); 820 | 821 | // FIXME: add explanation 822 | let worst_size = size.ref_saturating_add(T::ones(0..align_bits)); 823 | let (l1_worst, l2_worst) = self.map_size(&worst_size); 824 | while (l1_first, l2_first) < (l1_worst, l2_worst) { 825 | // Determine the next search start position 826 | l2_first += 1; 827 | if l2_first >= TlsfL2Bitmap::max_digits() { 828 | l1_first = self.bitmap.bit_scan_forward(l1_first + 1); 829 | if l1_first == T::max_digits() { 830 | return None; 831 | } 832 | l2_first = 0; 833 | } 834 | 835 | let l2t: &TlsfL2

= &self.l1[l1_first as usize]; 836 | let l2 = l2t.bitmap.bit_scan_forward(l2_first); 837 | if l2 == TlsfL2Bitmap::max_digits() { 838 | l2_first = l2; 839 | continue; 840 | } 841 | let block_ptr = l2t.l2[l2 as usize].clone(); 842 | if let Some(pad) = blocks.get_unchecked(&block_ptr).can_fit(size, align_bits) { 843 | return Some((Some((l1_first, l2)), block_ptr, pad)); 844 | } else { 845 | l2_first = l2; 846 | } 847 | } 848 | 849 | None 850 | } 851 | 852 | /// Remove the given block from the free space list. 853 | #[inline] 854 | unsafe fn unlink, Ptr = P>>( 855 | &mut self, 856 | blocks: &mut A, 857 | block_ptr: P, 858 | ) { 859 | let (l1, l2) = self.map_size(&blocks.get_unchecked(&block_ptr).size); 860 | if l1 as usize >= self.l1.len() { 861 | self.entire = None; 862 | } else { 863 | { 864 | debug_assert!(self.bitmap.get_bit(l1)); 865 | debug_assert!( 866 | self.l1[l1 as usize].bitmap.get_bit(l2), 867 | "L2 bitmap 0b{:b} has not bit {} set.", 868 | &self.l1[l1 as usize].bitmap, 869 | l2 870 | ); 871 | if self.l1[l1 as usize].l2[l2 as usize] == block_ptr { 872 | return self.unlink_head(blocks, block_ptr, Some((l1, l2))); 873 | } 874 | } 875 | 876 | // Retrieve the neighboring blocks (in the free space list) 877 | let (prev_ptr, o_next_ptr) = { 878 | let block = blocks.get_unchecked(&block_ptr); 879 | if let TlsfBlockState::Free { 880 | prev_free: Some(ref prev_free), 881 | ref next_free, 882 | } = block.state 883 | { 884 | (prev_free.clone(), next_free.clone()) 885 | } else { 886 | unreachable(); 887 | } 888 | }; 889 | 890 | // Unlink the current block 891 | if let Some(ref next_ptr) = o_next_ptr { 892 | let next_block = blocks.get_unchecked_mut(next_ptr); 893 | if let TlsfBlockState::Free { 894 | ref mut prev_free, .. 895 | } = next_block.state 896 | { 897 | debug_assert_eq!(*prev_free, Some(block_ptr.clone())); 898 | *prev_free = Some(prev_ptr.clone()); 899 | } else { 900 | unreachable(); 901 | } 902 | } 903 | 904 | { 905 | let prev_block = blocks.get_unchecked_mut(&prev_ptr); 906 | if let TlsfBlockState::Free { 907 | ref mut next_free, .. 908 | } = prev_block.state 909 | { 910 | debug_assert_eq!(*next_free, Some(block_ptr.clone())); 911 | *next_free = o_next_ptr; 912 | } else { 913 | unreachable(); 914 | } 915 | } 916 | } 917 | } 918 | 919 | /// Remove the given block from the free space list. 920 | /// 921 | /// `block_ptr` must be the head of the free space list specified by `position`. 922 | /// `block_ptr` returned by `search_suitable` always satisfies this condition, 923 | /// supposing no intervening modification was done. 924 | #[inline] 925 | unsafe fn unlink_head, Ptr = P>>( 926 | &mut self, 927 | blocks: &mut A, 928 | block_ptr: P, 929 | position: Option<(u32, u32)>, 930 | ) { 931 | if let Some((l1, l2)) = position { 932 | let l2t: &mut TlsfL2

= &mut self.l1[l1 as usize]; 933 | 934 | debug_assert!(self.bitmap.get_bit(l1)); 935 | debug_assert!( 936 | l2t.bitmap.get_bit(l2), 937 | "L2 bitmap 0b{:b} has not bit {} set.", 938 | &l2t.bitmap, 939 | l2 940 | ); 941 | debug_assert_eq!(block_ptr, l2t.l2[l2 as usize]); 942 | 943 | let next_block_ptr = { 944 | let block = blocks.get_unchecked(&block_ptr); 945 | if let TlsfBlockState::Free { ref next_free, .. } = block.state { 946 | next_free.clone() 947 | } else { 948 | unreachable(); 949 | } 950 | }; 951 | 952 | if let Some(next_block_ptr) = next_block_ptr { 953 | let next_block = blocks.get_unchecked_mut(&next_block_ptr); 954 | if let TlsfBlockState::Free { 955 | ref mut prev_free, .. 956 | } = next_block.state 957 | { 958 | debug_assert_eq!(*prev_free, Some(block_ptr)); 959 | *prev_free = None; 960 | } else { 961 | unreachable(); 962 | } 963 | 964 | l2t.l2[l2 as usize] = next_block_ptr; 965 | } else { 966 | l2t.bitmap.clear_bit(l2); 967 | if l2t.bitmap == Zero::zero() { 968 | self.bitmap.clear_bit(l1); 969 | } 970 | 971 | // don't care about the value of `l2t.l2[l2 as usize]` 972 | } 973 | } else { 974 | debug_assert_eq!(Some(block_ptr), self.entire); 975 | self.entire = None; 976 | } 977 | } 978 | 979 | /// Insert the given block to a free space list. 980 | /// 981 | /// `block_ptr` must point a valid `TlsfBlock` in `blocks`. 982 | /// The given block's `TlsfBlock::state` will be overwritten with a new 983 | /// `TlsfBlockState::Free` value. 984 | #[inline] 985 | unsafe fn link(&mut self, blocks: &mut A, block_ptr: P) 986 | where 987 | A: UnsafeArena, Ptr = P>, 988 | { 989 | let (l1, l2) = self.map_size(&blocks.get_unchecked(&block_ptr).size); 990 | if l1 as usize >= self.l1.len() { 991 | self.entire = Some(block_ptr); 992 | } else { 993 | let l2t: &mut TlsfL2

= &mut self.l1[l1 as usize]; 994 | 995 | // Update bitmaps 996 | let head_valid = l2t.bitmap.get_bit(l2); 997 | l2t.bitmap.set_bit(l2); 998 | self.bitmap.set_bit(l1); 999 | 1000 | // Link the given block to the list 1001 | let head = &mut l2t.l2[l2 as usize]; 1002 | 1003 | { 1004 | let block = blocks.get_unchecked_mut(&block_ptr); 1005 | block.state = TlsfBlockState::Free { 1006 | prev_free: None, 1007 | next_free: if head_valid { Some(head.clone()) } else { None }, 1008 | }; 1009 | } 1010 | if head_valid { 1011 | let next_block = blocks.get_unchecked_mut(head); 1012 | if let TlsfBlockState::Free { 1013 | ref mut prev_free, .. 1014 | } = next_block.state 1015 | { 1016 | debug_assert!(prev_free.is_none()); 1017 | *prev_free = Some(block_ptr.clone()); 1018 | } else { 1019 | unreachable(); 1020 | } 1021 | } 1022 | 1023 | *head = block_ptr; 1024 | } 1025 | } 1026 | } 1027 | 1028 | #[test] 1029 | fn num_l2s() { 1030 | for i in 1..L2_SIZE { 1031 | let l1 = TlsfL1::<_, u32>::new(&(i as u32)); 1032 | assert_eq!(l1.l1.len(), 1); 1033 | } 1034 | for k in 0..4 { 1035 | let i = L2_SIZE << k; 1036 | let l1 = TlsfL1::<_, u32>::new(&i); 1037 | assert_eq!(l1.l1.len(), k + 1); 1038 | } 1039 | } 1040 | --------------------------------------------------------------------------------