, Ptr = P>,
346 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
347 | {
348 | inner: A,
349 | first_vacant: Option,
350 | _phantom: PhantomData,
351 | }
352 |
353 | #[derive(Debug)]
354 | pub struct Entry {
355 | data: MaybeUninit,
356 | occupied: bool,
357 | next: Option,
358 | }
359 |
360 | impl Drop for Entry {
361 | fn drop(&mut self) {
362 | if self.occupied {
363 | unsafe { drop_in_place(self.data.as_mut_ptr()) };
364 | }
365 | }
366 | }
367 |
368 | impl PooledArena
369 | where
370 | A: UnsafeArena, Ptr = P>,
371 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
372 | {
373 | /// Construct a `PooledArena`.
374 | pub fn new(inner: A) -> Self {
375 | Self::with_capacity(inner, 0)
376 | }
377 |
378 | /// Construct a `PooledArena` with the specified number of pre-allocated
379 | /// entries.
380 | pub fn with_capacity(inner: A, capacity: usize) -> Self {
381 | let mut arena = Self {
382 | inner,
383 | first_vacant: None,
384 | _phantom: PhantomData,
385 | };
386 |
387 | for _ in 0..capacity {
388 | let p = arena.inner.insert(Entry {
389 | data: MaybeUninit::uninit(),
390 | occupied: false,
391 | next: arena.first_vacant.take(),
392 | });
393 | arena.first_vacant = Some(p);
394 | }
395 |
396 | arena
397 | }
398 |
399 | /// Discard all vacant entries.
400 | pub fn purge(&mut self) {
401 | while let Some(p) = self.first_vacant.take() {
402 | let mut e = unsafe { self.inner.remove_unchecked(&p) };
403 |
404 | // Skip `T::drop()` because we know it is a vacant entry.
405 | // We can't just `forget(e)` because `P` might be `Drop`.
406 | debug_assert!(!e.occupied);
407 | e.occupied = false;
408 | }
409 | }
410 | }
411 |
412 | impl UnsafeArena for PooledArena
413 | where
414 | A: UnsafeArena, Ptr = P>,
415 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
416 | {
417 | type Ptr = A::Ptr;
418 |
419 | fn insert(&mut self, x: T) -> Self::Ptr {
420 | if let Some(ptr) = self.first_vacant.take() {
421 | let ent = unsafe { self.inner.get_unchecked_mut(&ptr) };
422 |
423 | debug_assert!(!ent.occupied);
424 |
425 | ent.occupied = true;
426 | ent.data = MaybeUninit::new(x);
427 |
428 | self.first_vacant = ent.next.take();
429 |
430 | ptr
431 | } else {
432 | self.inner.insert(Entry {
433 | data: MaybeUninit::new(x),
434 | occupied: true,
435 | next: None,
436 | })
437 | }
438 | }
439 |
440 | unsafe fn get_unchecked(&self, ptr: &Self::Ptr) -> &T {
441 | debug_assert!(self.inner.get_unchecked(ptr).occupied);
442 | &*self.inner.get_unchecked(ptr).data.as_ptr()
443 | }
444 |
445 | unsafe fn get_unchecked_mut(&mut self, ptr: &Self::Ptr) -> &mut T {
446 | debug_assert!(self.inner.get_unchecked(ptr).occupied);
447 | &mut *self.inner.get_unchecked_mut(ptr).data.as_mut_ptr()
448 | }
449 |
450 | unsafe fn remove_unchecked(&mut self, ptr: &Self::Ptr) -> T {
451 | let entry = self.inner.get_unchecked_mut(ptr);
452 | debug_assert!(entry.occupied);
453 |
454 | let value = read(entry.data.as_ptr());
455 | entry.occupied = false;
456 | entry.next = self.first_vacant.take();
457 |
458 | self.first_vacant = Some(ptr.clone());
459 |
460 | value
461 | }
462 | }
463 |
464 | impl UnsafeArenaWithMembershipCheck for PooledArena
465 | where
466 | A: UnsafeArena, Ptr = P> + UnsafeArenaWithMembershipCheck>,
467 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
468 | {
469 | unsafe fn contains_unchecked(&self, ptr: &Self::Ptr) -> bool {
470 | self.inner.contains_unchecked(ptr)
471 | }
472 | }
473 |
474 | impl Arena for PooledArena
475 | where
476 | A: UnsafeArena, Ptr = P> + Arena>,
477 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
478 | {
479 | fn get(&self, ptr: &Self::Ptr) -> Option<&T> {
480 | self.inner.get(ptr).map(|x| {
481 | debug_assert!(x.occupied);
482 | unsafe { &*x.data.as_ptr() }
483 | })
484 | }
485 |
486 | fn get_mut(&mut self, ptr: &Self::Ptr) -> Option<&mut T> {
487 | self.inner.get_mut(ptr).map(|x| {
488 | debug_assert!(x.occupied);
489 | unsafe { &mut *x.data.as_mut_ptr() }
490 | })
491 | }
492 |
493 | fn remove(&mut self, ptr: &Self::Ptr) -> Option {
494 | if let Some(r) = self.inner.get_mut(ptr) {
495 | debug_assert!(r.occupied);
496 |
497 | let value = unsafe { read(r.data.as_ptr()) };
498 | r.occupied = false;
499 | r.next = self.first_vacant.take();
500 |
501 | self.first_vacant = Some(ptr.clone());
502 |
503 | Some(value)
504 | } else {
505 | None
506 | }
507 | }
508 | }
509 |
510 | #[test]
511 | fn test1() {
512 | test_common(&mut PooledArena::new(CheckedArena::new()));
513 | }
514 |
515 | #[test]
516 | fn test2() {
517 | let mut arena = PooledArena::new(CheckedArena::new());
518 |
519 | for _ in 0..2 {
520 | let p1 = arena.insert("twi");
521 | let p2 = arena.insert("aj");
522 |
523 | unsafe {
524 | assert!(arena.contains_unchecked(&p1));
525 | assert!(arena.contains_unchecked(&p2));
526 | }
527 |
528 | assert_eq!(arena.get(&p1), Some(&"twi"));
529 | assert_eq!(arena.get_mut(&p2), Some(&mut "aj"));
530 |
531 | *arena.get_mut(&p2).unwrap() = "flutter";
532 |
533 | assert_eq!(arena.remove(&p1), Some("twi"));
534 | assert_eq!(arena.remove(&p2), Some("flutter"));
535 | }
536 |
537 | arena.purge();
538 | }
539 | }
540 |
541 | pub use self::pooled::PooledArena;
542 |
--------------------------------------------------------------------------------
/src/tlsf.rs:
--------------------------------------------------------------------------------
1 | //
2 | // Copyright 2017 yvt, all rights reserved.
3 | //
4 | // Licensed under the MIT license . This file may
6 | // not be copied, modified,or distributed except
7 | // according to those terms.
8 | //
9 | //! A dynamic external memory allocator based on the TLSF (Two-Level Segregated Fit)
10 | //! algorithm[^1].
11 | //!
12 | //! [^1]: Masmano, Miguel, et al. "TLSF: A new dynamic memory allocator for real-time systems."
13 | //! Real-Time Systems, 2004. ECRTS 2004. Proceedings. 16th Euromicro Conference on. IEEE, 2004.
14 | //!
15 | //! ## Type parameters
16 | //!
17 | //! - `T` is an integer type used to represent region sizes. You usually use
18 | //! `u32` or `u64` for this.
19 | //! - `A` is a memory arena type used to allocate internal block structures.
20 | //!
21 | //! ## A Caveat
22 | //!
23 | //! This TLSF allocator implements a Good-Fit strategy. In order to achieve the
24 | //! O(1) execution time, only the first element of each free space list is examined.
25 | //! As a result, allocations are not guaranteed to succeed even if there
26 | //! is an enough free space if the following condition is met:
27 | //!
28 | //! - There is no free space that is larger than the requested size by a certain
29 | //! amount.
30 | //! - There is a free space that is almost as large as the requested size.
31 | //!
32 | //! Or more strictly:
33 | //!
34 | //! - Let `S`, `mapping` the number of bytes to allocate and the mapping
35 | //! function that calculates the indexes into the TLSF data structure given
36 | //! the size of a block, respectively. There exists no free space with a size
37 | //! `s` where `mapping(s) != mapping(S) && s > S`.
38 | //! - There exists a free space with a size `s` where
39 | //! `mapping(s) == mapping(S) && s < S`.
40 | //!
41 | //! ## Memory Overhead
42 | //!
43 | //! A TLSF allocator requires the following internal storage to operate (some
44 | //! details are excluded):
45 | //!
46 | //! - A variable storing the size of the heap.
47 | //! - One first-level list that consists of pointers to second-level lists and
48 | //! a bit field of type `T` where each bit indicates whether a free block is
49 | //! available in the corresponding second-level list or not.
50 | //! - `FLI` second-level lists each of which consists of `1 << SLI` pointers to
51 | //! free blocks and a bit field of `SLI`-bit wide where each bit indicates
52 | //! whether the corresponding entry of the free block is valid or not.
53 | //!
54 | //! When the heap size `size` is a power of two and larger than `1 << SLI`,
55 | //! `FLI` can be written as `log2(size) + 1 - SLI`. `SLI` is hard-coded to `4`
56 | //! in this implementation. Using these, the baseline memory consumption can be
57 | //! calculated by the formula `2 * T + 3 * PS + FLI * (3 * PS + SLI * P + SLI / 8)`
58 | //! (where `PS = size_of::()`).
59 | //!
60 | //! The following table shows the estimated baseline memory consumption of
61 | //! [`SysTlsf`] for common configurations.
62 | //!
63 | //! | `size_of::()` | `T` | `size` | memory consumption (bytes) |
64 | //! | -------------------- | ----- | ----------------- | -------------------------- |
65 | //! | `8` (64-bit system) | `u32` | `16` | 186 |
66 | //! | | `u32` | `1 << 10` (1KiB) | 1,110 |
67 | //! | | `u32` | `1 << 24` (16MiB) | 3,266 |
68 | //! | | `u32` | `1 << 30` (1GiB) | 4,190 |
69 | //! | | `u64` | `16` | 194 |
70 | //! | | `u64` | `1 << 10` (1KiB) | 1,118 |
71 | //! | | `u64` | `1 << 24` (16MiB) | 3,274 |
72 | //! | | `u64` | `1 << 30` (1GiB) | 4,198 |
73 | //! | | `u64` | `1 << 36` (64GiB) | 5,122 |
74 | //! | `4` (32-bit system) | `u32` | `16` | 98 |
75 | //! | | `u32` | `1 << 10` (1KiB) | 566 |
76 | //! | | `u32` | `1 << 24` (16MiB) | 1,658 |
77 | //! | | `u32` | `1 << 30` (1GiB) | 2,126 |
78 | //!
79 | //! [`SysTlsf`]: type.SysTlsf.html
80 | //!
81 | //! Note that this does not include the overhead incurred by the system memory
82 | //! allocator.
83 | //!
84 | //! Furthermore, each allocated/free region (represented by `TlsfBlock`)
85 | //! consumes a certain amount of memory. The exact size of `TlsfBlock` might
86 | //! differ among compiler versions due to structure layout optimizations, but
87 | //! we can know the lower bound:
88 | //!
89 | //! ```
90 | //! use xalloc::tlsf::TlsfBlock;
91 | //! use std::mem::size_of;
92 | //! assert!(size_of::>() >= 25);
93 | //! assert!(size_of::>() >= 41);
94 | //! assert!(size_of::>() >= 49);
95 | //! ```
96 | //!
97 | //! ## Performance
98 | //!
99 | //! The allocation throughput is mostly equivalent to that of jemalloc.
100 | use core::fmt;
101 |
102 | use alloc::{boxed::Box, string::String, vec, vec::Vec};
103 | use num::{One, Zero};
104 | use unreachable::{unreachable, UncheckedOptionExt};
105 |
106 | use arena::{SafeArena, UnsafeArena, UnsafeArenaWithMembershipCheck};
107 | use int::{BinaryInteger, BinaryUInteger};
108 |
109 | type TlsfL2Bitmap = u16;
110 | const LOG2_L2_SIZE: u32 = 4; // must be <= log2(sizeof(TlsfL2Bitmap)*8)
111 | const L2_SIZE: u32 = 1 << LOG2_L2_SIZE;
112 |
113 | /// TLSF-based external memory allocator.
114 | ///
115 | /// See [the module-level documentation] for more.
116 | ///
117 | /// [the module-level documentation]: index.html
118 | ///
119 | /// ## Type parameters
120 | ///
121 | /// - `T` is an integer type used to represent region sizes. You usually use
122 | /// `u32` or `u64` for this.
123 | /// - `A` is a memory arena type used to allocate internal block structures.
124 | ///
125 | #[derive(Debug)]
126 | pub struct Tlsf
127 | where
128 | A: UnsafeArena, Ptr = P>,
129 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
130 | {
131 | size: T,
132 | l1: TlsfL1,
133 | blocks: A,
134 | }
135 |
136 | use arena;
137 |
138 | /// [`Tlsf`] that uses [`CheckedArena`] for rigorous memory safety check.
139 | ///
140 | /// It is really slow. Use [`SysTlsf`] in a production code.
141 | ///
142 | /// [`CheckedArena`]: crate::arena::CheckedArena
143 | ///
144 | /// ## Type parameter
145 | ///
146 | /// - `T` is an integer type used to represent region sizes. You usually use
147 | /// `u32` or `u64` for this.
148 | ///
149 | pub type SafeTlsf =
150 | Tlsf>, arena::checked::Ptr>;
151 |
152 | /// Type alias of [`TlsfRegion`] for [`SafeTlsf`].
153 | pub type SafeTlsfRegion = TlsfRegion;
154 |
155 | impl SafeTlsf {
156 | /// Construct a `SafeTlsf`.
157 | pub fn new(size: T) -> Self {
158 | Tlsf::with_arena(size, arena::CheckedArena::new())
159 | }
160 | }
161 |
162 | /// `Tlsf` that uses the system allocator for the internal storage allocation.
163 | ///
164 | /// ## Type parameter
165 | ///
166 | /// - `T` is an integer type used to represent region sizes. You usually use
167 | /// `u32` or `u64` for this.
168 | ///
169 | pub type SysTlsf = Tlsf<
170 | T,
171 | arena::PooledArena, arena::SysAllocator, arena::sys::Ptr>,
172 | arena::sys::Ptr,
173 | >;
174 |
175 | /// Type alias of [`TlsfRegion`] for [`SysTlsf`].
176 | pub type SysTlsfRegion = TlsfRegion;
177 |
178 | impl SysTlsf {
179 | /// Construct a `SysTlsf`.
180 | pub fn new(size: T) -> Self {
181 | Tlsf::with_arena(size, arena::PooledArena::new(arena::SysAllocator))
182 | }
183 |
184 | /// Construct a `SysTlsf` with a specific capacity.
185 | pub fn with_capacity(size: T, capacity: usize) -> Self {
186 | Tlsf::with_arena(
187 | size,
188 | arena::PooledArena::with_capacity(arena::SysAllocator, capacity),
189 | )
190 | }
191 | }
192 |
193 | /// A handle type to a region allocated in a [`Tlsf`].
194 | ///
195 | /// `TlsfRegion` returned by a `Tlsf` only can be used with the
196 | /// same `Tlsf`.
197 | #[derive(Debug, PartialEq, Eq, Hash)]
198 | pub struct TlsfRegion(P);
199 |
200 | /// Internal data structure used by [`Tlsf`] that represents a free/occpied
201 | /// memory block.
202 | #[derive(Debug)]
203 | pub struct TlsfBlock {
204 | /// Points the previous (in terms of the external memory address) block.
205 | prev: Option,
206 |
207 | /// Points the next (in terms of the external memory address) block.
208 | next: Option
,
209 |
210 | /// The external memory address.
211 | address: T,
212 |
213 | /// The size of the block in the external memory space.
214 | size: T,
215 | state: TlsfBlockState
,
216 | }
217 |
218 | #[derive(Debug, PartialEq, Eq)]
219 | enum TlsfBlockState
{
220 | Free {
221 | /// The previous free block in the same free space list.
222 | prev_free: Option
,
223 |
224 | /// The next free block in the same free space list.
225 | next_free: Option
,
226 | },
227 | Used,
228 | }
229 |
230 | impl
TlsfBlockState
{
231 | fn is_used(&self) -> bool {
232 | match self {
233 | TlsfBlockState::Used => true,
234 | _ => false,
235 | }
236 | }
237 | }
238 |
239 | /// First level table.
240 | #[derive(Debug)]
241 | struct TlsfL1 {
242 | /// Array of second level tables.
243 | ///
244 | /// - `l1[0]` contains segregated lists for free spaces smaller
245 | /// than `L2_SIZE`.
246 | /// `l1[0].l2[L] contains the segregated list for free spaces whose sizes
247 | /// are equal to `L`.
248 | /// - `l1[K]` contains segregated lists for free spaces whose sizes are
249 | /// in the range `L2_SIZE << (K - 1) .. L2_Size << K`.
250 | /// `l1[K].l2[L] contains the segregated list for free spaces whose sizes
251 | /// are in the range
252 | /// `(L2_SIZE << (K - 1)) + (1 << (K - 1)) * L .. (L2_Size << (K - 1)) + (1 << (K - 1)) * (L + 1)`
253 | ///
254 | l1: Vec>,
255 |
256 | /// Each bit indices whether the corresponding element of
257 | /// `l1` has at least one free space or not.
258 | ///
259 | /// The following invariant holds:
260 | ///
261 | /// - `(bitmap.extract_u32(i..(i+1)) != 0) == (i1[i].bitmap != 0)`
262 | //
263 | /// The number of L2 tables is proportional to the number of digits of the pool
264 | /// size, so using `T` here would be a good choice.
265 | bitmap: T,
266 |
267 | /// Points the free block that fills entire the available space
268 | /// (used only if the pool size is a power of two and no
269 | /// segregated list entry is available for it)
270 | entire: Option,
271 | }
272 |
273 | /// Second level table.
274 | #[derive(Debug, Clone)]
275 | struct TlsfL2
{
276 | /// Each bit indicates whether the corresponding element of
277 | /// `l2` is valid or not.
278 | bitmap: TlsfL2Bitmap,
279 |
280 | /// Each element represents the first block in a free space list.
281 | ///
282 | /// Points blocks stored in `Tlsf::blocks`. The validity of each
283 | /// element is indicated by the corresponding bit of `bitmap`.
284 | l2: [P; L2_SIZE as usize],
285 | }
286 |
287 | impl Tlsf
288 | where
289 | T: BinaryUInteger,
290 | A: UnsafeArena, Ptr = P>,
291 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
292 | {
293 | /// Construct a `Tlsf`.
294 | pub fn with_arena(size: T, arena: A) -> Self {
295 | let mut sa = Tlsf {
296 | l1: TlsfL1::new(&size),
297 | size,
298 | blocks: arena,
299 | };
300 |
301 | // Create the initial free block
302 | let block = TlsfBlock {
303 | prev: None,
304 | next: None,
305 | address: Zero::zero(),
306 | size: sa.size.clone(),
307 | state: TlsfBlockState::Used, // don't care
308 | };
309 | let block_ptr = sa.blocks.insert(block);
310 | unsafe {
311 | sa.l1.link(&mut sa.blocks, block_ptr);
312 | }
313 |
314 | sa
315 | }
316 |
317 | /// Get a reference to the underlying memory arena.
318 | pub fn arena(&self) -> &A {
319 | &self.blocks
320 | }
321 |
322 | /// Get a mutable reference to the underlying memory arena.
323 | pub fn arena_mut(&mut self) -> &mut A {
324 | &mut self.blocks
325 | }
326 |
327 | /// Allocate a region of the size `size` with a given alignment requirement.
328 | ///
329 | /// Returns a handle of the allocated region and its offset if the
330 | /// allocation succeeds. Returns `None` otherwise.
331 | ///
332 | /// - `align` must be a power of two.
333 | /// - `size` must not be zero.
334 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
335 | pub fn alloc_aligned(&mut self, size: T, align: T) -> Option<(TlsfRegion, T)> {
336 | assert!(align.is_power_of_two());
337 | self.allocate_aligned_log2(size, align.trailing_zeros())
338 | }
339 |
340 | /// Allocate a region of the size `size`.
341 | ///
342 | /// Returns a handle of the allocated region and its offset if the
343 | /// allocation succeeds. Returns `None` otherwise.
344 | ///
345 | /// `size` must not be zero.
346 | pub fn alloc(&mut self, size: T) -> Option<(TlsfRegion
, T)> {
347 | self.allocate_aligned_log2(size, 0)
348 | }
349 |
350 | fn allocate_aligned_log2(&mut self, size: T, align_bits: u32) -> Option<(TlsfRegion
, T)> {
351 | if size > self.size {
352 | return None;
353 | }
354 | assert_ne!(size, Zero::zero());
355 |
356 | let suitable = unsafe { self.l1.search_suitable(&mut self.blocks, &size, align_bits) };
357 | suitable.map(|(position, free_block_ptr, pad)| unsafe {
358 | let (mut prev, mut next, free_block_address, free_block_size) = {
359 | let block = self.blocks.get_unchecked(&free_block_ptr);
360 | (
361 | block.prev.clone(),
362 | block.next.clone(),
363 | block.address.clone(),
364 | block.size.clone(),
365 | )
366 | };
367 | let data_end = pad.clone() + size.clone();
368 |
369 | // For exception safety...
370 | let mut reserve = 0;
371 | if pad != Zero::zero() {
372 | reserve += 1;
373 | }
374 | if data_end != free_block_size {
375 | reserve += 1;
376 | }
377 | self.blocks.reserve(reserve);
378 |
379 | self.l1
380 | .unlink_head(&mut self.blocks, free_block_ptr.clone(), position);
381 | self.blocks.remove_unchecked(&free_block_ptr);
382 |
383 | if pad != Zero::zero() {
384 | let block = TlsfBlock {
385 | prev: prev.clone(),
386 | next: None, // linked later
387 | address: free_block_address.clone(),
388 | size: pad.clone(),
389 | state: TlsfBlockState::Used, // don't care
390 | };
391 | let block_ptr = self.blocks.insert(block);
392 | self.l1.link(&mut self.blocks, block_ptr.clone());
393 | if let Some(ref old_prev) = prev {
394 | self.blocks.get_unchecked_mut(old_prev).next = Some(block_ptr.clone());
395 | }
396 | prev = Some(block_ptr);
397 | }
398 |
399 | if data_end != free_block_size {
400 | let block = TlsfBlock {
401 | prev: None, // linked later
402 | next: next.clone(),
403 | address: free_block_address.clone() + data_end.clone(),
404 | size: free_block_size.clone() - data_end.clone(),
405 | state: TlsfBlockState::Used, // don't care
406 | };
407 | let block_ptr = self.blocks.insert(block);
408 | self.l1.link(&mut self.blocks, block_ptr.clone());
409 | if let Some(ref old_next) = next {
410 | self.blocks.get_unchecked_mut(old_next).prev = Some(block_ptr.clone());
411 | }
412 | next = Some(block_ptr);
413 | }
414 |
415 | let main_ptr = {
416 | let block = TlsfBlock {
417 | prev: prev.clone(),
418 | next: next.clone(),
419 | address: free_block_address.clone() + pad.clone(),
420 | size,
421 | state: TlsfBlockState::Used, // care!
422 | };
423 | self.blocks.insert(block)
424 | };
425 |
426 | // Connect neighboring blocks to this
427 | let address = self.blocks.get_unchecked(&main_ptr).address.clone();
428 |
429 | if let Some(ptr) = prev {
430 | self.blocks.get_unchecked_mut(&ptr).next = Some(main_ptr.clone());
431 | }
432 | if let Some(ptr) = next {
433 | self.blocks.get_unchecked_mut(&ptr).prev = Some(main_ptr.clone());
434 | }
435 |
436 | (TlsfRegion(main_ptr), address)
437 | })
438 | }
439 |
440 | /// Deallocate the specified region, without checking the origin of the
441 | /// `TlsfRegion`.
442 | ///
443 | /// This might result in an undefined behavior if `r` originates from
444 | /// a different instance of `Tlsf`.
445 | pub unsafe fn dealloc_unchecked(&mut self, r: TlsfRegion
) {
446 | let block_ptr = r.0;
447 |
448 | let (prev_ptr, next_ptr) = {
449 | let block = self.blocks.get_unchecked(&block_ptr);
450 | if let TlsfBlockState::Used = block.state {
451 | } else {
452 | // It's impossible for the application to obtain a
453 | // `TlsfRegion` for a free block. `TlsfRegion` isn't even
454 | // `Clone` nor `Copy`.
455 | unreachable();
456 | }
457 | (block.prev.clone(), block.next.clone())
458 | };
459 |
460 | // Try to merge neighboring free blocks
461 | let prev_info = if let Some(ref ptr) = prev_ptr {
462 | let block = self.blocks.get_unchecked(ptr);
463 | if let TlsfBlockState::Free { .. } = block.state {
464 | Some((block.prev.clone(), block.size.clone()))
465 | } else {
466 | None
467 | }
468 | } else {
469 | None
470 | };
471 | let next_info = if let Some(ref ptr) = next_ptr {
472 | let block = self.blocks.get_unchecked(ptr);
473 | if let TlsfBlockState::Free { .. } = block.state {
474 | Some((block.next.clone(), block.size.clone()))
475 | } else {
476 | None
477 | }
478 | } else {
479 | None
480 | };
481 | {
482 | let block = self.blocks.get_unchecked_mut(&block_ptr);
483 | if let Some((ref new_prev_ptr, ref prev_size)) = prev_info {
484 | block.prev = new_prev_ptr.clone();
485 | block.size += prev_size.clone();
486 | block.address -= prev_size.clone();
487 | }
488 | if let Some((ref new_next_ptr, ref next_size)) = next_info {
489 | block.next = new_next_ptr.clone();
490 | block.size += next_size.clone();
491 | }
492 | }
493 |
494 | if prev_info.is_some() {
495 | self.l1
496 | .unlink(&mut self.blocks, prev_ptr.clone().unchecked_unwrap());
497 | self.blocks.remove_unchecked(&prev_ptr.unchecked_unwrap());
498 | }
499 | if next_info.is_some() {
500 | self.l1
501 | .unlink(&mut self.blocks, next_ptr.clone().unchecked_unwrap());
502 | self.blocks.remove_unchecked(&next_ptr.unchecked_unwrap());
503 | }
504 |
505 | if let Some((Some(new_prev_ptr), _)) = prev_info {
506 | let block = self.blocks.get_unchecked_mut(&new_prev_ptr);
507 | block.next = Some(block_ptr.clone());
508 | }
509 | if let Some((Some(new_next_ptr), _)) = next_info {
510 | let block = self.blocks.get_unchecked_mut(&new_next_ptr);
511 | block.prev = Some(block_ptr.clone());
512 | }
513 |
514 | self.l1.link(&mut self.blocks, block_ptr);
515 | }
516 |
517 | #[doc(hidden)]
518 | pub unsafe fn test_integrity(&mut self, root_ptr: &TlsfRegion
)
519 | where
520 | P: fmt::Debug + PartialEq,
521 | {
522 | // Find the physically first block
523 | let mut first_ptr = root_ptr.0.clone();
524 | while self.blocks.get_unchecked(&first_ptr).prev.is_some() {
525 | first_ptr = self.blocks.get_unchecked(&first_ptr).prev.clone().unwrap();
526 | }
527 |
528 | let dump = || {
529 | use core::fmt::Write;
530 |
531 | let mut s = String::new();
532 | let mut cur_ptr = first_ptr.clone();
533 | loop {
534 | let cur = self.blocks.get_unchecked(&cur_ptr);
535 | let next_ptr = cur.next.clone();
536 | writeln!(
537 | &mut s,
538 | "{:?} - [{:?}, {:?}] - {:?}",
539 | cur.prev, cur_ptr, cur.state, cur.next
540 | )
541 | .unwrap();
542 | if let Some(next_ptr) = next_ptr {
543 | cur_ptr = next_ptr;
544 | } else {
545 | break;
546 | }
547 | }
548 | s
549 | };
550 |
551 | // scan every block and check the physical connections
552 | let mut cur_ptr = first_ptr.clone();
553 | let mut addr = Zero::zero();
554 | loop {
555 | let cur = self.blocks.get_unchecked(&cur_ptr);
556 | assert_eq!(
557 | cur.address,
558 | addr,
559 | "[{:?}].prev ({:?}) should be {:?}. Dump: \n{}",
560 | cur_ptr,
561 | &cur.address,
562 | &addr,
563 | dump()
564 | );
565 | addr += cur.size.clone();
566 |
567 | let next_ptr = cur.next.clone();
568 | if let Some(next_ptr) = next_ptr {
569 | let next = self.blocks.get_unchecked(&next_ptr);
570 | assert_eq!(
571 | next.prev,
572 | Some(cur_ptr.clone()),
573 | "[{:?}].prev ({:?}) should be {:?}. Dump: \n{}",
574 | next_ptr,
575 | next.prev,
576 | cur_ptr,
577 | dump()
578 | );
579 | assert!(
580 | next.state.is_used() || cur.state.is_used(),
581 | "[{:?}].state and [{:?}].state must not be Free at the same time. Dump: \n{}",
582 | next_ptr,
583 | cur_ptr,
584 | dump()
585 | );
586 | cur_ptr = next_ptr;
587 | } else {
588 | break;
589 | }
590 | }
591 | assert_eq!(
592 | self.size,
593 | addr,
594 | "self.size ({:?}) should be {:?}. Dump: \n{}",
595 | &self.size,
596 | &addr,
597 | dump()
598 | );
599 | }
600 | }
601 |
602 | impl Tlsf
603 | where
604 | T: BinaryUInteger,
605 | A: UnsafeArena, Ptr = P> + UnsafeArenaWithMembershipCheck>,
606 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
607 | {
608 | /// Deallocate the specified region.
609 | ///
610 | /// Returns `Err(r)` if `r` does not originate from the same instance of `Tlsf`.
611 | pub fn dealloc(&mut self, r: TlsfRegion) -> Result<(), TlsfRegion
> {
612 | unsafe {
613 | if self.blocks.contains_unchecked(&r.0) {
614 | self.dealloc_unchecked(r);
615 | Ok(())
616 | } else {
617 | Err(r)
618 | }
619 | }
620 | }
621 | }
622 |
623 | impl Tlsf
624 | where
625 | T: BinaryUInteger,
626 | A: UnsafeArena, Ptr = P> + SafeArena>,
627 | P: Clone + Default + PartialEq + Eq + fmt::Debug,
628 | {
629 | /// Deallocate the specified region.
630 | ///
631 | /// `r` must originate from the same instance of `Tlsf`. Otherwise, `Tlsf`
632 | /// enters an inconsistent state and possibly panics, but does not cause an
633 | /// undefined behavior.
634 | pub fn dealloc_relaxed(&mut self, r: TlsfRegion) {
635 | unsafe { self.dealloc_unchecked(r) }
636 | }
637 | }
638 |
639 | impl TlsfBlock {
640 | /// Return whether the requested region can fit in this space (assuming it
641 | /// is free).
642 | ///
643 | /// The returned value is the size of padding required to meet the
644 | /// alignment requirement. `None` if it cannot fit.
645 | fn can_fit(&self, size: &T, align_bits: u32) -> Option {
646 | if align_bits == 0 {
647 | if size <= &self.size {
648 | Some(Zero::zero())
649 | } else {
650 | None
651 | }
652 | } else {
653 | let start = self.address.clone().checked_ceil_fix(align_bits);
654 | let end_block = self.address.clone() + self.size.clone();
655 | if let Some(start) = start {
656 | if start < end_block && size <= &(end_block.clone() - start.clone()) {
657 | Some(start - self.address.clone())
658 | } else {
659 | None
660 | }
661 | } else {
662 | start
663 | }
664 | }
665 | }
666 | }
667 |
668 | impl TlsfL1 {
669 | /// Constructs `TlsfL1`.
670 | fn new(size: &T) -> Self {
671 | assert!(size > &Zero::zero());
672 |
673 | let size_m1 = size.clone() - One::one();
674 | let num_l2s = T::max_digits().saturating_sub(LOG2_L2_SIZE + size_m1.leading_zeros()) + 1;
675 |
676 | Self {
677 | l1: vec![
678 | TlsfL2 {
679 | bitmap: Zero::zero(),
680 | l2: [
681 | // L2_SIZE elements
682 | P::default(),
683 | P::default(),
684 | P::default(),
685 | P::default(),
686 | P::default(),
687 | P::default(),
688 | P::default(),
689 | P::default(),
690 | P::default(),
691 | P::default(),
692 | P::default(),
693 | P::default(),
694 | P::default(),
695 | P::default(),
696 | P::default(),
697 | P::default(),
698 | ],
699 | };
700 | num_l2s as usize
701 | ],
702 | bitmap: Zero::zero(),
703 | entire: None,
704 | }
705 | }
706 |
707 | /// Compute the first and second level table index for a given size of free
708 | /// space.
709 | #[inline]
710 | fn map_size(&self, size: &T) -> (u32, u32) {
711 | // Equivalent to:
712 | // `let l1_index = T::max_digits().saturating_sub(LOG2_L2_SIZE + size.leading_zeros());`
713 | let l1_index = T::max_digits()
714 | - LOG2_L2_SIZE
715 | - (size.clone() | T::ones(0..LOG2_L2_SIZE)).leading_zeros();
716 |
717 | // Branch-less equivalent of:
718 | // `let min_bit_index = l1_index.saturating_sub(1);`
719 | let min_bit_index = l1_index - if l1_index == 0 { 0 } else { 1 };
720 |
721 | let l2_index = (size.clone() >> min_bit_index).extract_u32(0..LOG2_L2_SIZE);
722 |
723 | (l1_index, l2_index)
724 | }
725 |
726 | /// Search a free block at least as large as `size` with the alignment
727 | /// requirement `1 << align_bits`.
728 | ///
729 | /// The result can be one of the following:
730 | ///
731 | /// - `None`: No suitable block was found.
732 | /// - `Some((position, block_ptr, pad)): A suitable block was found. `position` is either of:
733 | /// - `Some((l1, l2))`: `block_ptr` is the head of the free space list at the position `(l1, l2)`.
734 | /// - `None`: `block_ptr` is `self.entire`.
735 | ///
736 | /// `size` must be less than or equal to the size of the heap.
737 | #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
738 | unsafe fn search_suitable, Ptr = P>>(
739 | &self,
740 | blocks: &mut A,
741 | size: &T,
742 | align_bits: u32,
743 | ) -> Option<(Option<(u32, u32)>, P, T)> {
744 | if let Some(ref entire) = self.entire {
745 | return Some((None, entire.clone(), Zero::zero()));
746 | }
747 |
748 | let (l1_first, l2_first) = self.map_size(size);
749 | if self.bitmap.get_bit(l1_first) {
750 | if l1_first as usize >= self.l1.len() {
751 | unreachable();
752 | }
753 | let l2t: &TlsfL2 = &self.l1[l1_first as usize];
754 | if l2t.bitmap.get_bit(l2_first) {
755 | // Found a free block in the same bucket.
756 | let block_ptr = l2t.l2[l2_first as usize].clone();
757 | let block = blocks.get_unchecked(&block_ptr);
758 | if let Some(pad) = block.can_fit(size, align_bits) {
759 | return Some((Some((l1_first, l2_first)), block_ptr, pad));
760 | }
761 | }
762 |
763 | // Search the same second level table.
764 | let l2 = l2t.bitmap.bit_scan_forward(l2_first + 1);
765 | if l2 < L2_SIZE {
766 | // Found one
767 | let block_ptr = l2t.l2[l2 as usize].clone();
768 | let can_fit = if align_bits == 0 {
769 | Some(Zero::zero())
770 | } else {
771 | blocks.get_unchecked(&block_ptr).can_fit(size, align_bits)
772 | };
773 | if let Some(pad) = can_fit {
774 | if align_bits == 0 {
775 | debug_assert!(blocks
776 | .get_unchecked(&block_ptr)
777 | .can_fit(size, align_bits)
778 | .is_some());
779 | }
780 | return Some((Some((l1_first, l2)), block_ptr, pad));
781 | }
782 | }
783 | }
784 |
785 | let mut l1_first = self.bitmap.bit_scan_forward(l1_first + 1);
786 | let mut l2_first = if l1_first == T::max_digits() {
787 | return None;
788 | } else {
789 | if l1_first as usize >= self.l1.len() {
790 | unreachable();
791 | }
792 | let l2t: &TlsfL2
= &self.l1[l1_first as usize];
793 | let l2 = l2t.bitmap.bit_scan_forward(0);
794 | debug_assert_ne!(l2, TlsfL2Bitmap::max_digits());
795 | let block_ptr = l2t.l2[l2 as usize].clone();
796 | let can_fit = if align_bits == 0 {
797 | Some(Zero::zero())
798 | } else {
799 | blocks.get_unchecked(&block_ptr).can_fit(size, align_bits)
800 | };
801 | if let Some(pad) = can_fit {
802 | if align_bits == 0 {
803 | debug_assert!(blocks
804 | .get_unchecked(&block_ptr)
805 | .can_fit(size, align_bits)
806 | .is_some());
807 | }
808 | return Some((Some((l1_first, l2)), block_ptr, pad));
809 | }
810 | l2
811 | };
812 |
813 | // For aligned allocations, there are cases where no free space that can
814 | // satisfy the alignment requirement even if the size requirement is met.
815 | // We need to check more free lists.
816 | //
817 | // The code below should be unreachable for allocations without an
818 | // alignment requirement.
819 | debug_assert_ne!(align_bits, 0);
820 |
821 | // FIXME: add explanation
822 | let worst_size = size.ref_saturating_add(T::ones(0..align_bits));
823 | let (l1_worst, l2_worst) = self.map_size(&worst_size);
824 | while (l1_first, l2_first) < (l1_worst, l2_worst) {
825 | // Determine the next search start position
826 | l2_first += 1;
827 | if l2_first >= TlsfL2Bitmap::max_digits() {
828 | l1_first = self.bitmap.bit_scan_forward(l1_first + 1);
829 | if l1_first == T::max_digits() {
830 | return None;
831 | }
832 | l2_first = 0;
833 | }
834 |
835 | let l2t: &TlsfL2
= &self.l1[l1_first as usize];
836 | let l2 = l2t.bitmap.bit_scan_forward(l2_first);
837 | if l2 == TlsfL2Bitmap::max_digits() {
838 | l2_first = l2;
839 | continue;
840 | }
841 | let block_ptr = l2t.l2[l2 as usize].clone();
842 | if let Some(pad) = blocks.get_unchecked(&block_ptr).can_fit(size, align_bits) {
843 | return Some((Some((l1_first, l2)), block_ptr, pad));
844 | } else {
845 | l2_first = l2;
846 | }
847 | }
848 |
849 | None
850 | }
851 |
852 | /// Remove the given block from the free space list.
853 | #[inline]
854 | unsafe fn unlink, Ptr = P>>(
855 | &mut self,
856 | blocks: &mut A,
857 | block_ptr: P,
858 | ) {
859 | let (l1, l2) = self.map_size(&blocks.get_unchecked(&block_ptr).size);
860 | if l1 as usize >= self.l1.len() {
861 | self.entire = None;
862 | } else {
863 | {
864 | debug_assert!(self.bitmap.get_bit(l1));
865 | debug_assert!(
866 | self.l1[l1 as usize].bitmap.get_bit(l2),
867 | "L2 bitmap 0b{:b} has not bit {} set.",
868 | &self.l1[l1 as usize].bitmap,
869 | l2
870 | );
871 | if self.l1[l1 as usize].l2[l2 as usize] == block_ptr {
872 | return self.unlink_head(blocks, block_ptr, Some((l1, l2)));
873 | }
874 | }
875 |
876 | // Retrieve the neighboring blocks (in the free space list)
877 | let (prev_ptr, o_next_ptr) = {
878 | let block = blocks.get_unchecked(&block_ptr);
879 | if let TlsfBlockState::Free {
880 | prev_free: Some(ref prev_free),
881 | ref next_free,
882 | } = block.state
883 | {
884 | (prev_free.clone(), next_free.clone())
885 | } else {
886 | unreachable();
887 | }
888 | };
889 |
890 | // Unlink the current block
891 | if let Some(ref next_ptr) = o_next_ptr {
892 | let next_block = blocks.get_unchecked_mut(next_ptr);
893 | if let TlsfBlockState::Free {
894 | ref mut prev_free, ..
895 | } = next_block.state
896 | {
897 | debug_assert_eq!(*prev_free, Some(block_ptr.clone()));
898 | *prev_free = Some(prev_ptr.clone());
899 | } else {
900 | unreachable();
901 | }
902 | }
903 |
904 | {
905 | let prev_block = blocks.get_unchecked_mut(&prev_ptr);
906 | if let TlsfBlockState::Free {
907 | ref mut next_free, ..
908 | } = prev_block.state
909 | {
910 | debug_assert_eq!(*next_free, Some(block_ptr.clone()));
911 | *next_free = o_next_ptr;
912 | } else {
913 | unreachable();
914 | }
915 | }
916 | }
917 | }
918 |
919 | /// Remove the given block from the free space list.
920 | ///
921 | /// `block_ptr` must be the head of the free space list specified by `position`.
922 | /// `block_ptr` returned by `search_suitable` always satisfies this condition,
923 | /// supposing no intervening modification was done.
924 | #[inline]
925 | unsafe fn unlink_head, Ptr = P>>(
926 | &mut self,
927 | blocks: &mut A,
928 | block_ptr: P,
929 | position: Option<(u32, u32)>,
930 | ) {
931 | if let Some((l1, l2)) = position {
932 | let l2t: &mut TlsfL2 = &mut self.l1[l1 as usize];
933 |
934 | debug_assert!(self.bitmap.get_bit(l1));
935 | debug_assert!(
936 | l2t.bitmap.get_bit(l2),
937 | "L2 bitmap 0b{:b} has not bit {} set.",
938 | &l2t.bitmap,
939 | l2
940 | );
941 | debug_assert_eq!(block_ptr, l2t.l2[l2 as usize]);
942 |
943 | let next_block_ptr = {
944 | let block = blocks.get_unchecked(&block_ptr);
945 | if let TlsfBlockState::Free { ref next_free, .. } = block.state {
946 | next_free.clone()
947 | } else {
948 | unreachable();
949 | }
950 | };
951 |
952 | if let Some(next_block_ptr) = next_block_ptr {
953 | let next_block = blocks.get_unchecked_mut(&next_block_ptr);
954 | if let TlsfBlockState::Free {
955 | ref mut prev_free, ..
956 | } = next_block.state
957 | {
958 | debug_assert_eq!(*prev_free, Some(block_ptr));
959 | *prev_free = None;
960 | } else {
961 | unreachable();
962 | }
963 |
964 | l2t.l2[l2 as usize] = next_block_ptr;
965 | } else {
966 | l2t.bitmap.clear_bit(l2);
967 | if l2t.bitmap == Zero::zero() {
968 | self.bitmap.clear_bit(l1);
969 | }
970 |
971 | // don't care about the value of `l2t.l2[l2 as usize]`
972 | }
973 | } else {
974 | debug_assert_eq!(Some(block_ptr), self.entire);
975 | self.entire = None;
976 | }
977 | }
978 |
979 | /// Insert the given block to a free space list.
980 | ///
981 | /// `block_ptr` must point a valid `TlsfBlock` in `blocks`.
982 | /// The given block's `TlsfBlock::state` will be overwritten with a new
983 | /// `TlsfBlockState::Free` value.
984 | #[inline]
985 | unsafe fn link(&mut self, blocks: &mut A, block_ptr: P)
986 | where
987 | A: UnsafeArena, Ptr = P>,
988 | {
989 | let (l1, l2) = self.map_size(&blocks.get_unchecked(&block_ptr).size);
990 | if l1 as usize >= self.l1.len() {
991 | self.entire = Some(block_ptr);
992 | } else {
993 | let l2t: &mut TlsfL2 = &mut self.l1[l1 as usize];
994 |
995 | // Update bitmaps
996 | let head_valid = l2t.bitmap.get_bit(l2);
997 | l2t.bitmap.set_bit(l2);
998 | self.bitmap.set_bit(l1);
999 |
1000 | // Link the given block to the list
1001 | let head = &mut l2t.l2[l2 as usize];
1002 |
1003 | {
1004 | let block = blocks.get_unchecked_mut(&block_ptr);
1005 | block.state = TlsfBlockState::Free {
1006 | prev_free: None,
1007 | next_free: if head_valid { Some(head.clone()) } else { None },
1008 | };
1009 | }
1010 | if head_valid {
1011 | let next_block = blocks.get_unchecked_mut(head);
1012 | if let TlsfBlockState::Free {
1013 | ref mut prev_free, ..
1014 | } = next_block.state
1015 | {
1016 | debug_assert!(prev_free.is_none());
1017 | *prev_free = Some(block_ptr.clone());
1018 | } else {
1019 | unreachable();
1020 | }
1021 | }
1022 |
1023 | *head = block_ptr;
1024 | }
1025 | }
1026 | }
1027 |
1028 | #[test]
1029 | fn num_l2s() {
1030 | for i in 1..L2_SIZE {
1031 | let l1 = TlsfL1::<_, u32>::new(&(i as u32));
1032 | assert_eq!(l1.l1.len(), 1);
1033 | }
1034 | for k in 0..4 {
1035 | let i = L2_SIZE << k;
1036 | let l1 = TlsfL1::<_, u32>::new(&i);
1037 | assert_eq!(l1.l1.len(), k + 1);
1038 | }
1039 | }
1040 |
--------------------------------------------------------------------------------