├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── README.md ├── posix ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── src │ └── lib.rs ├── rust-toolchain ├── scripts ├── coverage.sh ├── debug.sh ├── perf.sh ├── report.sh └── test.sh ├── src ├── alloc │ ├── block.rs │ ├── heap.rs │ ├── list.rs │ └── mod.rs ├── lib.rs ├── macros.rs ├── sources.rs └── util.rs └── test.c /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Build 20 | run: cargo build --verbose 21 | - name: Run tests 22 | run: cargo test --verbose 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | .idea 4 | perf.data* 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | os: 3 | - linux 4 | 5 | language: rust 6 | # necessary for `travis-cargo coveralls --no-sudo` 7 | addons: 8 | apt: 9 | packages: 10 | - libelf-dev 11 | - libdw-dev 12 | - binutils-dev # optional: only required for the --verify flag of coveralls 13 | 14 | cache: cargo 15 | before_cache: 16 | # Travis can't cache files that are not readable by "others" 17 | - chmod -R a+r $HOME/.cargo 18 | 19 | rust: 20 | - nightly 21 | 22 | # load travis-cargo 23 | before_script: 24 | - | 25 | pip install 'travis-cargo<0.2' --user && 26 | export PATH=$HOME/.local/bin:$PATH 27 | 28 | # the main build 29 | script: 30 | - | 31 | travis-cargo build && 32 | travis-cargo test 33 | after_success: 34 | # measure code coverage and upload to coveralls.io (the verify 35 | # argument mitigates kcov crashes due to malformed debuginfo, at the 36 | # cost of some speed ) 37 | - | 38 | if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then 39 | travis-cargo coveralls --no-sudo --verify 40 | KCOV=./kcov/build/src/kcov ./scripts/coverage.sh 41 | fi 42 | 43 | env: 44 | global: 45 | - TRAVIS_CARGO_NIGHTLY_FEATURE="" 46 | - RUST_BACKTRACE=1 47 | - RUST_TEST_THREADS=1 48 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "collam" 5 | version = "0.0.1" 6 | dependencies = [ 7 | "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 8 | "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", 9 | "libc-print 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", 10 | "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", 11 | ] 12 | 13 | [[package]] 14 | name = "lazy_static" 15 | version = "1.4.0" 16 | source = "registry+https://github.com/rust-lang/crates.io-index" 17 | dependencies = [ 18 | "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", 19 | ] 20 | 21 | [[package]] 22 | name = "libc" 23 | version = "0.2.71" 24 | source = "registry+https://github.com/rust-lang/crates.io-index" 25 | 26 | [[package]] 27 | name = "libc-print" 28 | version = "0.1.13" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | dependencies = [ 31 | "libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)", 32 | ] 33 | 34 | [[package]] 35 | name = "spin" 36 | version = "0.5.2" 37 | source = "registry+https://github.com/rust-lang/crates.io-index" 38 | 39 | [metadata] 40 | "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 41 | "checksum libc 0.2.71 (registry+https://github.com/rust-lang/crates.io-index)" = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" 42 | "checksum libc-print 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "51d69922a3a7e3e1d03ec46cf18da2cdf5d6d2eef5d781c57075a52584f95b32" 43 | "checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" 44 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "collam" 3 | version = "0.0.1" 4 | authors = ["Michael Egger "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "collam" 9 | crate-type = ["rlib"] 10 | 11 | [features] 12 | # Enables debug assertions and trace logs. 13 | # Should only be used during development! 14 | debug = [] 15 | 16 | [dependencies] 17 | lazy_static = { version = "1.4.0", features = ["spin_no_std"] } 18 | libc = { version = "0.2", default-features = false } 19 | libc-print = "0.1.13" 20 | spin = "0.5.2" 21 | 22 | [profile.dev] 23 | panic = "abort" 24 | 25 | [profile.release] 26 | panic = "abort" 27 | opt-level = 3 28 | debug = false 29 | lto = true 30 | debug-assertions = false 31 | codegen-units = 1 32 | overflow-checks = false -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # collam 2 | [![Build Status](https://travis-ci.org/gcarq/collam.svg?branch=master)](https://travis-ci.org/gcarq/collam) [![Coverage Status](https://coveralls.io/repos/github/gcarq/collam/badge.svg?branch=master)](https://coveralls.io/github/gcarq/collam?branch=master) 3 | 4 | A naive and thread safe general-purpose allocator written in Rust built with `#[no_std]`. 5 | This project started as an experiment to get comfortable with `#[no_std]` environments and `unsafe` Rust. 6 | This library is currently *NOT* stable and I'm sure there are plenty of bugs, be warned! 7 | 8 | ## A note on its state 9 | Collam implements the `GlobalAlloc` trait and can be used within Rust. 10 | The sub-crate `posix` exposes `malloc`, `calloc`, `realloc`, `free`, `malloc_usable_size`, `mallopt` and can be used for arbitrary programs, 11 | in its current state its working with almost all tested programs using `LD_PRELOAD`. 12 | 13 | ## Tested platforms 14 | [x] Linux x86_64 15 | 16 | ## Implementation details 17 | Bookkeeping is currently done with an intrusive doubly linked list. 18 | The overhead for each use allocated block is 16 bytes whereas only 12 bytes of them are used. 19 | 20 | ## Performance 21 | In regards of memory usage/overhead it is comparable to dlmalloc with tested applications, 22 | however the performance is not there yet. 23 | 24 | ## Usage within Rust 25 | ```rust 26 | use collam::alloc::Collam; 27 | 28 | #[global_allocator] 29 | static ALLOC: Collam = Collam::new(); 30 | 31 | fn main() { 32 | let mut vec = Vec::new(); 33 | vec.push(42); 34 | assert_eq!(vec.pop().unwrap(), 42); 35 | } 36 | ``` 37 | 38 | ## Testing collam in C/POSIX environment 39 | Make sure you have Rust nightly. 40 | Manually overwrite default allocator: 41 | ```bash 42 | $ cargo build --manifest-path posix/Cargo.toml --release 43 | $ LD_PRELOAD="$(pwd)/posix/target/release/libcollam.so" kwrite 44 | ``` 45 | Or use the test script in the root folder: 46 | ```bash 47 | $ ./scripts/test.sh kwrite 48 | ``` 49 | There are some more helper scripts for debugging, profiling, etc. See `scripts/` folder. 50 | 51 | ## Execute tests 52 | Tests are not thread safe, make sure to force 1 thread only! 53 | ```bash 54 | $ cargo test --all-features -- --test-threads 1 55 | ``` 56 | 57 | ## TODO: 58 | * Proper Page handling 59 | * mmap support 60 | * Thread-local allocation 61 | * Logarithmic-time complexity allocation 62 | * Support for different architectures 63 | * Proper logging -------------------------------------------------------------------------------- /posix/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | .idea -------------------------------------------------------------------------------- /posix/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "collam" 5 | version = "0.0.1" 6 | dependencies = [ 7 | "lazy_static", 8 | "libc", 9 | "libc-print", 10 | "spin", 11 | ] 12 | 13 | [[package]] 14 | name = "lazy_static" 15 | version = "1.4.0" 16 | source = "registry+https://github.com/rust-lang/crates.io-index" 17 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 18 | dependencies = [ 19 | "spin", 20 | ] 21 | 22 | [[package]] 23 | name = "libc" 24 | version = "0.2.71" 25 | source = "registry+https://github.com/rust-lang/crates.io-index" 26 | checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" 27 | 28 | [[package]] 29 | name = "libc-print" 30 | version = "0.1.13" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | checksum = "51d69922a3a7e3e1d03ec46cf18da2cdf5d6d2eef5d781c57075a52584f95b32" 33 | dependencies = [ 34 | "libc", 35 | ] 36 | 37 | [[package]] 38 | name = "posix" 39 | version = "0.0.1" 40 | dependencies = [ 41 | "collam", 42 | ] 43 | 44 | [[package]] 45 | name = "spin" 46 | version = "0.5.2" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" 49 | -------------------------------------------------------------------------------- /posix/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "posix" 3 | version = "0.0.1" 4 | authors = ["Michael Egger "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "collam" 9 | crate-type = ["cdylib"] 10 | 11 | [features] 12 | # Enables debug assertions and trace logs. 13 | # Should only be used during development! 14 | debug = [] 15 | 16 | [dependencies] 17 | collam = { path = "..", features = []} 18 | 19 | [profile.dev] 20 | panic = "abort" 21 | 22 | [profile.release] 23 | panic = "abort" 24 | opt-level = 3 25 | debug = false 26 | lto = true 27 | debug-assertions = false 28 | codegen-units = 1 29 | overflow-checks = false -------------------------------------------------------------------------------- /posix/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(ptr_internals)] 2 | #![feature(core_intrinsics)] 3 | #![feature(lang_items)] 4 | #![no_std] 5 | 6 | #[macro_use] 7 | extern crate collam; 8 | 9 | use core::alloc::{GlobalAlloc, Layout}; 10 | use core::intrinsics::{abort, unlikely}; 11 | use core::ptr::{null_mut, Unique}; 12 | use core::{ffi::c_void, panic}; 13 | 14 | use collam::alloc::{block::BlockPtr, Collam}; 15 | use collam::MIN_ALIGN; 16 | 17 | static COLLAM: Collam = Collam::new(); 18 | 19 | #[no_mangle] 20 | pub unsafe extern "C" fn malloc(size: usize) -> *mut c_void { 21 | let layout = Layout::from_size_align_unchecked(size, MIN_ALIGN); 22 | COLLAM.alloc(layout).cast::() 23 | } 24 | 25 | #[no_mangle] 26 | pub unsafe extern "C" fn calloc(nobj: usize, size: usize) -> *mut c_void { 27 | let total_size = match nobj.checked_mul(size) { 28 | Some(x) => x, 29 | None => { 30 | eprintln!( 31 | "integer overflow detected for calloc(nobj={}, size={})", 32 | nobj, size 33 | ); 34 | return null_mut(); 35 | } 36 | }; 37 | let layout = Layout::from_size_align_unchecked(total_size, MIN_ALIGN); 38 | COLLAM.alloc_zeroed(layout).cast::() 39 | } 40 | 41 | #[no_mangle] 42 | pub unsafe extern "C" fn realloc(p: *mut c_void, size: usize) -> *mut c_void { 43 | if p.is_null() { 44 | // If ptr is NULL, then the call is equivalent to malloc(size), for all values of size. 45 | let layout = Layout::from_size_align_unchecked(size, MIN_ALIGN); 46 | return COLLAM.alloc(layout).cast::(); 47 | } 48 | 49 | let p = p.cast::(); 50 | let layout = Layout::from_size_align_unchecked(0, MIN_ALIGN); 51 | 52 | if size == 0 { 53 | // If size is equal to zero, and ptr is not NULL, 54 | // then the call is equivalent to free(ptr). 55 | COLLAM.dealloc(p, layout); 56 | null_mut() 57 | } else { 58 | COLLAM.realloc(p, layout, size).cast::() 59 | } 60 | } 61 | 62 | #[no_mangle] 63 | pub unsafe extern "C" fn free(ptr: *mut c_void) { 64 | let layout = Layout::from_size_align_unchecked(0, MIN_ALIGN); 65 | COLLAM.dealloc(ptr.cast::(), layout) 66 | } 67 | 68 | #[no_mangle] 69 | pub unsafe extern "C" fn malloc_usable_size(ptr: *mut c_void) -> usize { 70 | if ptr.is_null() { 71 | return 0; 72 | } 73 | 74 | // Its safe to use Unique_unchecked since we already checked for null pointers. 75 | let block = match BlockPtr::from_mem_region(Unique::new_unchecked(ptr.cast::())) { 76 | Some(b) => b, 77 | None => return 0, 78 | }; 79 | if unlikely(!block.as_ref().verify()) { 80 | eprintln!( 81 | "malloc_usable_size(): Unable to verify {} at {:p}", 82 | block.as_ref(), 83 | block 84 | ); 85 | return 0; 86 | } 87 | block.size() 88 | } 89 | 90 | // TODO: implement me 91 | #[no_mangle] 92 | pub extern "C" fn mallopt(param: i32, value: i32) -> i32 { 93 | eprintln!( 94 | "[mallopt] not implemented! (param={}, value={})", 95 | param, value 96 | ); 97 | 1 98 | } 99 | 100 | #[cfg(not(test))] 101 | #[panic_handler] 102 | fn panic(info: &panic::PanicInfo) -> ! { 103 | eprintln!("[libcollam.so]: panic occurred: {:?}", info); 104 | abort() 105 | } 106 | 107 | #[cfg(not(test))] 108 | #[lang = "eh_personality"] 109 | extern "C" fn eh_personality() {} 110 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2020-06-21 -------------------------------------------------------------------------------- /scripts/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Usage: 4 | # ./coverage.sh 5 | # 6 | # Run kcov on the tests, and merge the results. 7 | # 8 | # Environment variables: 9 | # TRAVIS_JOB_ID - id for coveralls, defaults to none 10 | # KCOV - path to kcov, defaults to 'kcov' 11 | 12 | [ -n "$TRAVIS_JOB_ID" ] && COVERALLS_ID="--coveralls-id=$TRAVIS_JOB_ID" 13 | [ -z "$KCOV" ] && KCOV=kcov 14 | 15 | # Rebuild tests with dead code included, and get a list of the filenames. 16 | export RUSTFLAGS="-C link-dead-code" 17 | TEST_FILES=$(cargo test 2>&1 >/dev/null | awk '/^ Running target\/debug\// { print $2 }') 18 | 19 | KCOV_OPTS="--verify --exclude-pattern=/.cargo --include-path $(pwd)" 20 | OUT_DIR=target/kcov 21 | 22 | for f in $TEST_FILES; do 23 | "$KCOV" $KCOV_OPTS "$OUT_DIR" $f 24 | done 25 | "$KCOV" --merge $KCOV_OPTS $COVERALLS_ID "$OUT_DIR" "$OUT_DIR" 26 | -------------------------------------------------------------------------------- /scripts/debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CHANNEL="release" 5 | TMP_DIR="/tmp/collam-test" 6 | 7 | if [ -z "$1" ]; then 8 | EXECUTABLE="${TMP_DIR}/test" 9 | else 10 | EXECUTABLE="${1}" 11 | fi 12 | 13 | # Cleanup workdir 14 | rm -rf ${TMP_DIR} 15 | mkdir -p ${TMP_DIR} 16 | 17 | # Build everything 18 | cargo build --manifest-path posix/Cargo.toml --release 19 | gcc test.c -o ${TMP_DIR}/test 20 | 21 | # Start debugger 22 | gdb --args env LD_PRELOAD="$(pwd)/posix/target/${CHANNEL}/libcollam.so" "${EXECUTABLE}" -------------------------------------------------------------------------------- /scripts/perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CHANNEL="release" 5 | TMP_DIR="/tmp/collam-test" 6 | 7 | if [ -z "$1" ]; then 8 | EXECUTABLE="${TMP_DIR}/test" 9 | else 10 | EXECUTABLE="${1}" 11 | fi 12 | 13 | # Cleanup workdir 14 | rm -rf ${TMP_DIR} 15 | mkdir -p ${TMP_DIR} 16 | 17 | # Build everything 18 | cargo build --manifest-path posix/Cargo.toml --release 19 | gcc test.c -o ${TMP_DIR}/test 20 | 21 | # Start test executable 22 | perf record -g bash -c "LD_PRELOAD=\"$(pwd)/posix/target/${CHANNEL}/libcollam.so\" ${EXECUTABLE}" -------------------------------------------------------------------------------- /scripts/report.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | perf report -v --symbol-filter=collam --max-stack=255 4 | -------------------------------------------------------------------------------- /scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CHANNEL="release" 5 | TMP_DIR="/tmp/collam-test" 6 | 7 | if [ -z "$1" ]; then 8 | EXECUTABLE="${TMP_DIR}/test" 9 | else 10 | EXECUTABLE="${1}" 11 | fi 12 | 13 | # Cleanup workdir 14 | rm -rf ${TMP_DIR} 15 | mkdir -p ${TMP_DIR} 16 | 17 | # Build everything 18 | cargo build --manifest-path posix/Cargo.toml --release 19 | gcc test.c -o ${TMP_DIR}/test 20 | 21 | # Start test executable 22 | time LD_PRELOAD="$(pwd)/posix/target/${CHANNEL}/libcollam.so" "${EXECUTABLE}" 23 | -------------------------------------------------------------------------------- /src/alloc/block.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, intrinsics, mem, ptr::Unique}; 2 | 3 | use libc_print::libc_eprintln; 4 | 5 | use crate::{util, MIN_ALIGN}; 6 | 7 | /// The required block size to store the bare minimum of metadata (size + magic values). 8 | pub const BLOCK_META_SIZE: usize = util::min_align_unchecked(mem::align_of::() * 2); 9 | /// The minimum region size to save intrusive data structures if not allocated by the user. 10 | pub const BLOCK_MIN_REGION_SIZE: usize = 11 | util::min_align_unchecked(mem::align_of::>() * 2); 12 | /// Defines the minimum remaining size of a block to consider splitting it. 13 | pub const BLOCK_SPLIT_MIN_SIZE: usize = 14 | util::min_align_unchecked(BLOCK_META_SIZE + BLOCK_MIN_REGION_SIZE + MIN_ALIGN); 15 | 16 | const BLOCK_MAGIC_FREE: u16 = 0xDEAD; 17 | 18 | /// Represents a mutable non-null Pointer to a `Block`. 19 | #[repr(C)] 20 | #[derive(Copy, Clone)] 21 | pub struct BlockPtr(Unique); 22 | 23 | impl BlockPtr { 24 | /// Creates a `Block` instance at the given raw pointer for the specified size. 25 | #[must_use] 26 | pub fn new(ptr: Unique, size: usize) -> Self { 27 | debug_assert_eq!(size, util::pad_min_align(size).unwrap().size()); 28 | let ptr = ptr.cast::(); 29 | unsafe { *ptr.as_ptr() = Block::new(size) }; 30 | Self(ptr) 31 | } 32 | 33 | /// Returns an existing `BlockPtr` instance from the given memory region raw pointer 34 | #[must_use] 35 | pub fn from_mem_region(ptr: Unique) -> Option { 36 | let block_ptr = unsafe { ptr.as_ptr().sub(BLOCK_META_SIZE).cast::() }; 37 | Some(BlockPtr(Unique::new(block_ptr)?)) 38 | } 39 | 40 | /// Returns a pointer to the assigned memory region for the given block 41 | pub fn mem_region(self) -> Unique { 42 | debug_assert!(self.as_ref().verify()); 43 | // SAFETY: we know the pointer can't be null 44 | // SAFETY: it should be safe to assume the associated memory region is not corrupt 45 | unsafe { Unique::new_unchecked(self.as_ptr().cast::().add(BLOCK_META_SIZE)) } 46 | } 47 | 48 | /// Acquires underlying `*mut Block`. 49 | #[inline] 50 | pub const fn as_ptr(self) -> *mut Block { 51 | self.0.as_ptr() 52 | } 53 | 54 | /// Casts to a pointer of another type. 55 | #[inline] 56 | pub const fn cast(self) -> Unique { 57 | // SAFETY: we know `Unique` can't be null 58 | unsafe { Unique::new_unchecked(self.as_ptr() as *mut U) } 59 | } 60 | 61 | /// Returns a pointer where the next `Block` would start. 62 | /// 63 | /// # Safety 64 | /// 65 | /// Caller must verify if returned pointer is in bounds. 66 | #[inline] 67 | pub unsafe fn next_potential_block(self) -> Unique { 68 | // TODO: implement check if pointer is valid 69 | Unique::new_unchecked(self.cast::().as_ptr().add(self.block_size())) 70 | } 71 | 72 | /// Returns the allocatable size available for the user 73 | #[inline] 74 | pub fn size(self) -> usize { 75 | self.as_ref().size 76 | } 77 | 78 | /// Returns the raw size in memory for this block. 79 | #[inline] 80 | pub fn block_size(self) -> usize { 81 | BLOCK_META_SIZE + self.size() 82 | } 83 | 84 | /// Tries to merge self with the next block, if available. 85 | /// Returns a merged `BlockPtr` if merge was possible, `None` otherwise. 86 | pub fn maybe_merge_next(mut self) -> Option { 87 | let next = self.as_ref().next?; 88 | 89 | unsafe { 90 | if self.next_potential_block().as_ptr() != next.cast::().as_ptr() { 91 | return None; 92 | } 93 | } 94 | 95 | dprintln!("[merge]: {} at {:p}", self.as_ref(), self.0); 96 | dprintln!(" & {} at {:p}", next.as_ref(), next); 97 | // Update related links 98 | self.as_mut().next = next.as_ref().next; 99 | if let Some(mut n) = self.as_ref().next { 100 | n.as_mut().prev = Some(self); 101 | } 102 | // Update to final size 103 | self.as_mut().size += BLOCK_META_SIZE + next.size(); 104 | 105 | // Overwrite block meta data for old block to detect double free 106 | // SAFETY: passed pointer can't be null 107 | unsafe { 108 | intrinsics::volatile_set_memory(next.cast::().as_ptr(), 0, BLOCK_META_SIZE); 109 | } 110 | 111 | dprintln!(" -> {} at {:p}", self.as_ref(), self.0); 112 | Some(self) 113 | } 114 | 115 | /// Shrinks the block in-place to have the exact memory size as specified (excluding metadata). 116 | /// Returns a newly created `BlockPtr` with the remaining size or `None` if split is not possible. 117 | pub fn shrink(&mut self, size: usize) -> Option { 118 | dprintln!("[split]: {} at {:p}", self.as_ref(), self.0); 119 | debug_assert_eq!( 120 | size, 121 | util::pad_min_align(size).expect("unable to align").size() 122 | ); 123 | // Check if its possible to split the block with the requested size 124 | let rem_block_size = self.size().checked_sub(size + BLOCK_META_SIZE)?; 125 | 126 | if rem_block_size < BLOCK_SPLIT_MIN_SIZE { 127 | dprintln!(" -> None"); 128 | return None; 129 | } 130 | 131 | // Update size for old block 132 | self.as_mut().size = size; 133 | 134 | // Create block with remaining size 135 | // SAFETY: we know `self.mem_region()` can't be null and size is within bounds 136 | let new_block_ptr = unsafe { Unique::new_unchecked(self.mem_region().as_ptr().add(size)) }; 137 | let new_block = BlockPtr::new(new_block_ptr, rem_block_size); 138 | 139 | dprintln!(" -> {} at {:p}", self.as_ref(), self.0); 140 | dprintln!(" -> {} at {:p}", new_block.as_ref(), new_block); 141 | dprintln!( 142 | " distance is {} bytes", 143 | new_block.as_ptr() as usize - (self.as_ptr() as usize + self.block_size()) 144 | ); 145 | debug_assert_eq!( 146 | new_block.as_ptr() as usize - (self.as_ptr() as usize + self.block_size()), 147 | 0 148 | ); 149 | Some(new_block) 150 | } 151 | } 152 | 153 | impl AsMut for BlockPtr { 154 | #[inline] 155 | fn as_mut(&mut self) -> &mut Block { 156 | // Safety: we know it is safe to dereference 157 | unsafe { self.0.as_mut() } 158 | } 159 | } 160 | 161 | impl AsRef for BlockPtr { 162 | #[inline] 163 | fn as_ref(&self) -> &Block { 164 | // Safety: we know it is safe to dereference 165 | unsafe { self.0.as_ref() } 166 | } 167 | } 168 | 169 | impl PartialEq for BlockPtr { 170 | #[inline] 171 | fn eq(&self, other: &Self) -> bool { 172 | self.as_ptr() == other.as_ptr() 173 | } 174 | } 175 | 176 | impl fmt::Pointer for BlockPtr { 177 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 178 | write!(f, "{:p}", self.as_ref()) 179 | } 180 | } 181 | 182 | impl fmt::Debug for BlockPtr { 183 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 184 | write!(f, "{} at {:p}", self.as_ref(), self.0) 185 | } 186 | } 187 | 188 | #[repr(C)] 189 | pub struct Block { 190 | // Required metadata 191 | size: usize, 192 | magic: u16, 193 | // Memory region starts here. All following members will be 194 | // overwritten and are unusable if block has been allocated by a user. 195 | pub next: Option, 196 | pub prev: Option, 197 | } 198 | 199 | impl Block { 200 | #[must_use] 201 | pub const fn new(size: usize) -> Self { 202 | Self { 203 | size, 204 | next: None, 205 | prev: None, 206 | magic: BLOCK_MAGIC_FREE, 207 | } 208 | } 209 | 210 | #[inline] 211 | pub fn unlink(&mut self) { 212 | self.next = None; 213 | self.prev = None; 214 | } 215 | 216 | /// Verifies block to detect memory corruption. 217 | /// Returns `true` if block metadata is intact, `false` otherwise. 218 | #[inline] 219 | pub fn verify(&self) -> bool { 220 | self.magic == BLOCK_MAGIC_FREE 221 | } 222 | } 223 | 224 | impl fmt::Display for Block { 225 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 226 | /* 227 | TODO: fix formatter for self.prev and self.next 228 | write!( 229 | f, 230 | "Block(size={}, prev={:?}, next={:?}, magic=0x{:X}, meta_size={})", 231 | self.size, self.prev, self.next, self.magic, BLOCK_META_SIZE, 232 | )*/ 233 | write!( 234 | f, 235 | "Block(size={}, magic=0x{:X}, meta_size={})", 236 | self.size, self.magic, BLOCK_META_SIZE, 237 | ) 238 | } 239 | } 240 | 241 | #[cfg(test)] 242 | mod tests { 243 | use super::*; 244 | use core::ffi::c_void; 245 | 246 | fn assert_block(block: BlockPtr, size: usize) { 247 | assert_eq!(block.size(), size, "block size doesn't match"); 248 | assert_eq!( 249 | block.block_size(), 250 | BLOCK_META_SIZE + size, 251 | "block raw size doesn't match" 252 | ); 253 | assert!(block.as_ref().verify(), "unable to verify block metadata"); 254 | assert!(block.as_ref().next.is_none(), "next is not None"); 255 | assert!(block.as_ref().prev.is_none(), "prev is not None"); 256 | } 257 | 258 | #[test] 259 | fn test_block_new() { 260 | let alloc_size = 64; 261 | let ptr = unsafe { 262 | Unique::new(libc::malloc(BLOCK_META_SIZE + alloc_size)) 263 | .expect("unable to allocate memory") 264 | .cast::() 265 | }; 266 | assert_block(BlockPtr::new(ptr, alloc_size), alloc_size); 267 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 268 | } 269 | 270 | #[test] 271 | fn test_block_shrink_with_remaining() { 272 | let block1_size = 4096; 273 | let ptr = unsafe { 274 | Unique::new(libc::malloc(BLOCK_META_SIZE + block1_size)) 275 | .expect("unable to allocate memory") 276 | .cast::() 277 | }; 278 | let mut block1 = BlockPtr::new(ptr, block1_size); 279 | assert_block(block1, block1_size); 280 | let total_size = block1.block_size(); 281 | assert_eq!(ptr.as_ptr(), block1.as_ptr().cast::()); 282 | 283 | // Shrink block1 to 256 bytes 284 | let mut block2 = block1.shrink(256).expect("split block failed"); 285 | assert_block(block1, 256); 286 | unsafe { 287 | assert_eq!( 288 | block1.next_potential_block().as_ptr(), 289 | block2.cast::().as_ptr() 290 | ); 291 | } 292 | assert_block(block2, total_size - block1.block_size() - BLOCK_META_SIZE); 293 | 294 | // Shrink block2 to 256 bytes 295 | let block3 = block2.shrink(256).expect("split block failed"); 296 | assert_block(block2, 256); 297 | unsafe { 298 | assert_eq!( 299 | block2.next_potential_block().as_ptr(), 300 | block3.cast::().as_ptr() 301 | ); 302 | } 303 | assert_block( 304 | block3, 305 | total_size - block1.block_size() - block2.block_size() - BLOCK_META_SIZE, 306 | ); 307 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 308 | } 309 | 310 | #[test] 311 | fn test_block_shrink_no_remaining() { 312 | let alloc_size = 256; 313 | let ptr = unsafe { 314 | Unique::new(libc::malloc(BLOCK_META_SIZE + alloc_size)) 315 | .expect("unable to allocate memory") 316 | .cast::() 317 | }; 318 | let mut block = BlockPtr::new(ptr, alloc_size); 319 | let remaining = block.shrink(240); 320 | 321 | // Assert correctness of initial block 322 | assert_eq!(ptr.as_ptr(), block.as_ptr().cast::()); 323 | assert_block(block, 256); 324 | 325 | // There should be no remaining block 326 | // since 240 will be aligned to 256 and no space is left. 327 | assert!(remaining.is_none()); 328 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 329 | } 330 | 331 | #[test] 332 | fn test_block_verify_ok() { 333 | let alloc_size = 256; 334 | let ptr = unsafe { 335 | Unique::new(libc::malloc(BLOCK_META_SIZE + alloc_size)) 336 | .expect("unable to allocate memory") 337 | .cast::() 338 | }; 339 | let block = BlockPtr::new(ptr, alloc_size); 340 | assert!(block.as_ref().verify()); 341 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 342 | } 343 | 344 | #[test] 345 | fn test_block_verify_invalid() { 346 | let alloc_size = 256; 347 | let ptr = unsafe { 348 | Unique::new(libc::malloc(BLOCK_META_SIZE + alloc_size)) 349 | .expect("unable to allocate memory") 350 | .cast::() 351 | }; 352 | let mut block = BlockPtr::new(ptr, alloc_size); 353 | block.as_mut().magic = 0x1234; 354 | assert_eq!(block.as_ref().verify(), false); 355 | 356 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 357 | } 358 | 359 | #[test] 360 | fn test_block_mem_region_ok() { 361 | let alloc_size = 64; 362 | let ptr = unsafe { 363 | Unique::new(libc::malloc(BLOCK_META_SIZE + alloc_size)) 364 | .expect("unable to allocate memory") 365 | .cast::() 366 | }; 367 | let block = BlockPtr::new(ptr, alloc_size); 368 | let mem = block.mem_region(); 369 | assert!(mem.as_ptr() > block.as_ptr().cast::()); 370 | let block2 = BlockPtr::from_mem_region(mem).expect("unable to create from mem region"); 371 | assert_eq!(block, block2); 372 | unsafe { libc::free(ptr.cast::().as_ptr()) }; 373 | } 374 | 375 | #[test] 376 | fn test_block_mem_region_err() { 377 | let region = unsafe { Unique::new_unchecked(16 as *mut u8) }; 378 | assert_eq!(BlockPtr::from_mem_region(region), None); 379 | } 380 | } 381 | -------------------------------------------------------------------------------- /src/alloc/heap.rs: -------------------------------------------------------------------------------- 1 | use libc_print::libc_eprintln; 2 | 3 | use crate::alloc::block::BlockPtr; 4 | use crate::alloc::list::IntrusiveList; 5 | use crate::sources::{DataSegment, MemorySource}; 6 | 7 | pub struct Heap { 8 | pub list: IntrusiveList, 9 | source: DataSegment, 10 | } 11 | 12 | impl Heap { 13 | pub const fn new() -> Self { 14 | Self { 15 | list: IntrusiveList::new(), 16 | source: DataSegment, 17 | } 18 | } 19 | 20 | /// Requests and returns a suitable empty `BlockPtr` for the given size. 21 | /// This can be either a reused empty block or a new one requested from kernel. 22 | /// 23 | /// # Safety 24 | /// 25 | /// Function is not thread safe. 26 | pub unsafe fn request(&mut self, size: usize) -> Option { 27 | if let Some(block) = self.list.pop(size) { 28 | dprintln!("[pop]: {} at {:p}", block.as_ref(), block); 29 | return Some(block); 30 | } 31 | self.source.request(size) 32 | } 33 | 34 | /// Releases a given `BlockPtr` back to the allocator or kernel. 35 | /// 36 | /// # Safety 37 | /// 38 | /// Function is not thread safe. 39 | pub unsafe fn release(&mut self, block: BlockPtr) { 40 | #[cfg(feature = "debug")] 41 | self.list.debug(); 42 | 43 | if self.source.release(block) { 44 | return; 45 | } 46 | 47 | dprintln!("[insert]: {} at {:p}", block.as_ref(), block); 48 | if self.list.insert(block).is_err() { 49 | eprintln!("double free detected for ptr {:?}", block.mem_region()); 50 | } 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use super::*; 57 | use core::ffi::c_void; 58 | use libc::sbrk; 59 | 60 | #[test] 61 | fn test_request_block() { 62 | unsafe { 63 | let mut heap = Heap::new(); 64 | let block = heap.request(256).expect("unable to request block"); 65 | let brk = block.next_potential_block().as_ptr(); 66 | assert_eq!(brk.cast::(), sbrk(0)); 67 | } 68 | } 69 | 70 | #[test] 71 | fn test_request_block_split() { 72 | unsafe { 73 | let mut heap = Heap::new(); 74 | let rem_block = heap 75 | .request(256) 76 | .expect("unable to request block") 77 | .shrink(128) 78 | .expect("unable to split block"); 79 | let brk = rem_block.next_potential_block().as_ptr(); 80 | assert_eq!(brk.cast::(), sbrk(0)); 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/alloc/list.rs: -------------------------------------------------------------------------------- 1 | use libc_print::libc_eprintln; 2 | 3 | use crate::alloc::block::{BlockPtr, BLOCK_SPLIT_MIN_SIZE}; 4 | 5 | #[repr(C)] 6 | pub struct IntrusiveList { 7 | pub head: Option, 8 | pub tail: Option, 9 | } 10 | 11 | impl IntrusiveList { 12 | pub const fn new() -> Self { 13 | Self { 14 | head: None, 15 | tail: None, 16 | } 17 | } 18 | 19 | /// Inserts a `BlockPtr` to the existing list and 20 | /// returns `Err` on detected double-free. 21 | pub fn insert(&mut self, mut to_insert: BlockPtr) -> Result<(), ()> { 22 | // Reset pointer locations since they were part as user allocatable data 23 | to_insert.as_mut().unlink(); 24 | 25 | // Add initial element 26 | if self.head.is_none() { 27 | debug_assert!(self.tail.is_none()); 28 | self.head = Some(to_insert); 29 | self.tail = Some(to_insert); 30 | return Ok(()); 31 | } 32 | 33 | debug_assert!(self.head.is_some()); 34 | debug_assert!(self.tail.is_some()); 35 | 36 | match self.find_higher_block(to_insert)? { 37 | Some(block) => IntrusiveList::insert_before(block, to_insert), 38 | None => IntrusiveList::insert_after(self.tail.unwrap(), to_insert), 39 | } 40 | let inserted = IntrusiveList::maybe_merge_adjacent(to_insert); 41 | self.update_ends(inserted); 42 | Ok(()) 43 | } 44 | 45 | /// Removes and returns the first suitable `BlockPtr`. 46 | pub fn pop(&mut self, size: usize) -> Option { 47 | for block in self.iter() { 48 | if size == block.size() { 49 | dprintln!( 50 | "[libcollam.so]: found perfect {} at {:p} for size {}", 51 | block.as_ref(), 52 | block, 53 | size 54 | ); 55 | return Some(self.remove(block)); 56 | } 57 | if size + BLOCK_SPLIT_MIN_SIZE <= block.size() { 58 | dprintln!( 59 | "[libcollam.so]: found suitable {} at {:p} for size {}", 60 | block.as_ref(), 61 | block, 62 | size 63 | ); 64 | return Some(self.remove(block)); 65 | } 66 | } 67 | None 68 | } 69 | 70 | /// Prints some debugging information about the heap structure. 71 | #[cfg(feature = "debug")] 72 | pub fn debug(&self) { 73 | dprintln!("[debug]: === list debug start ==="); 74 | for (i, block) in self.iter().enumerate() { 75 | dprintln!("[debug]: pos: {}\t{} at\t{:p}", i, block.as_ref(), block); 76 | if !block.as_ref().verify() { 77 | panic!("Unable to verify: {} at\t{:p}", block.as_ref(), block); 78 | } 79 | 80 | match block.as_ref().prev { 81 | Some(prev) => { 82 | debug_assert_eq!(prev.as_ref().next.unwrap().as_ptr(), block.as_ptr()); 83 | // rule out self reference 84 | debug_assert_ne!(prev.as_ptr(), block.as_ptr()); 85 | } 86 | None => debug_assert_eq!(self.head.unwrap().as_ptr(), block.as_ptr()), 87 | } 88 | 89 | match block.as_ref().next { 90 | Some(next) => { 91 | debug_assert_eq!(next.as_ref().prev.unwrap().as_ptr(), block.as_ptr()); 92 | // rule out self reference 93 | debug_assert_ne!(next.as_ptr(), block.as_ptr()); 94 | } 95 | None => debug_assert_eq!(self.tail.unwrap().as_ptr(), block.as_ptr()), 96 | } 97 | 98 | if let Some(next) = block.as_ref().next { 99 | debug_assert!( 100 | block.as_ptr() < next.as_ptr(), 101 | "{:p} is not smaller than {:p}", 102 | block, 103 | next 104 | ); 105 | } 106 | } 107 | dprintln!("[debug]: === list debug end ==="); 108 | } 109 | 110 | /// Adds a `BlockPtr` to the list before the given anchor. 111 | fn insert_before(mut anchor: BlockPtr, mut to_insert: BlockPtr) { 112 | // Update links in new block 113 | to_insert.as_mut().prev = anchor.as_ref().prev; 114 | to_insert.as_mut().next = Some(anchor); 115 | 116 | // Update link for element after new block 117 | anchor.as_mut().prev = Some(to_insert); 118 | 119 | // Update link for element before new block 120 | if let Some(mut prev) = to_insert.as_ref().prev { 121 | prev.as_mut().next = Some(to_insert); 122 | } 123 | } 124 | 125 | /// Adds a `BlockPtr` to the list after the given anchor. 126 | fn insert_after(mut anchor: BlockPtr, mut to_insert: BlockPtr) { 127 | // Update links in new block 128 | to_insert.as_mut().next = anchor.as_ref().next; 129 | to_insert.as_mut().prev = Some(anchor); 130 | 131 | // Update link for element before new block 132 | anchor.as_mut().next = Some(to_insert); 133 | 134 | // Update link for element after new block 135 | if let Some(mut next) = to_insert.as_ref().next { 136 | next.as_mut().prev = Some(to_insert); 137 | } 138 | } 139 | 140 | /// Checks if head or tail should be updated with the given `BlockPtr`. 141 | fn update_ends(&mut self, block: BlockPtr) { 142 | // Update head if necessary 143 | if block.as_ref().prev.is_none() { 144 | self.head = Some(block); 145 | } 146 | 147 | // Update tail if necessary 148 | if block.as_ref().next.is_none() { 149 | self.tail = Some(block); 150 | } 151 | } 152 | 153 | /// Takes a `BlockPtr` and tries to merge adjacent blocks if possible. 154 | /// Always returns a `BlockPtr`. 155 | fn maybe_merge_adjacent(block: BlockPtr) -> BlockPtr { 156 | let block = match block.as_ref().prev { 157 | Some(prev) => prev.maybe_merge_next().unwrap_or(block), 158 | None => block, 159 | }; 160 | block.maybe_merge_next().unwrap_or(block) 161 | } 162 | 163 | /// Returns first `BlockPtr` that has a higher memory address than the given `BlockPtr` 164 | /// or `None` if no block exists at a higher memory address. 165 | /// Returns `Err` if given `BlockPtr` is already in list. 166 | /// TODO: implement with better algorithm 167 | fn find_higher_block(&self, to_insert: BlockPtr) -> Result, ()> { 168 | for block in self.iter() { 169 | if block.as_ptr() > to_insert.as_ptr() { 170 | return Ok(Some(block)); 171 | } 172 | if block == to_insert { 173 | // block is already in list. 174 | // One reason for this is double free() 175 | return Err(()); 176 | } 177 | } 178 | Ok(None) 179 | } 180 | 181 | /// Removes the given `BlockPtr` from list and returns it. 182 | fn remove(&mut self, mut elem: BlockPtr) -> BlockPtr { 183 | // Update head 184 | if let Some(head) = self.head { 185 | if elem == head { 186 | self.head = elem.as_ref().next; 187 | } 188 | } 189 | // Update tail 190 | if let Some(tail) = self.tail { 191 | if elem == tail { 192 | self.tail = elem.as_ref().prev; 193 | } 194 | } 195 | 196 | // Update link in previous element 197 | if let Some(mut prev) = elem.as_ref().prev { 198 | prev.as_mut().next = elem.as_ref().next; 199 | } 200 | // Update link in next element 201 | if let Some(mut next) = elem.as_ref().next { 202 | next.as_mut().prev = elem.as_ref().prev; 203 | } 204 | elem.as_mut().unlink(); 205 | elem 206 | } 207 | 208 | #[inline] 209 | pub fn iter(&self) -> Iter { 210 | Iter { next: self.head } 211 | } 212 | } 213 | 214 | pub struct Iter { 215 | next: Option, 216 | } 217 | 218 | impl Iterator for Iter { 219 | type Item = BlockPtr; 220 | fn next(&mut self) -> Option { 221 | self.next.map(|node| { 222 | self.next = node.as_ref().next; 223 | node 224 | }) 225 | } 226 | } 227 | 228 | #[cfg(test)] 229 | mod tests { 230 | use super::*; 231 | use crate::alloc::block::BLOCK_META_SIZE; 232 | use crate::alloc::heap::Heap; 233 | 234 | #[test] 235 | fn test_list_new() { 236 | let list = IntrusiveList::new(); 237 | assert_eq!(list.head, None); 238 | assert_eq!(list.tail, None); 239 | } 240 | 241 | #[test] 242 | fn test_insert_after_no_merge() { 243 | let mut heap = Heap::new(); 244 | let mut block = unsafe { heap.request(256).expect("unable to request block") }; 245 | // Block2 imitates a used block. So it will not be added to list 246 | let mut block2 = block.shrink(64).expect("unable to split block"); 247 | let block3 = block2.shrink(64).expect("unable to split block"); 248 | 249 | // Insert block1 250 | heap.list.insert(block).expect("unable to insert"); 251 | assert_eq!(heap.list.head, Some(block)); 252 | assert_eq!(heap.list.tail, Some(block)); 253 | assert_eq!(block.as_ref().next, None); 254 | assert_eq!(block.as_ref().prev, None); 255 | 256 | // Insert block3 257 | heap.list.insert(block3).expect("unable to insert"); 258 | assert_eq!(heap.list.head, Some(block)); 259 | assert_eq!(heap.list.tail, Some(block3)); 260 | assert_eq!(block.as_ref().next, Some(block3)); 261 | assert_eq!(block.as_ref().prev, None); 262 | assert_eq!(block3.as_ref().next, None); 263 | assert_eq!(block3.as_ref().prev, Some(block)); 264 | } 265 | 266 | #[test] 267 | fn test_insert_before_no_merge() { 268 | let mut heap = Heap::new(); 269 | let mut block = unsafe { heap.request(256).expect("unable to request block") }; 270 | // Block2 imitates a used block. So it will not be added to list 271 | let mut block2 = block.shrink(64).expect("unable to split block"); 272 | let block3 = block2.shrink(64).expect("unable to split block"); 273 | 274 | // Insert block3 275 | heap.list.insert(block3).expect("unable to insert"); 276 | assert_eq!(heap.list.head, Some(block3)); 277 | assert_eq!(heap.list.tail, Some(block3)); 278 | assert_eq!(block3.as_ref().next, None); 279 | assert_eq!(block3.as_ref().prev, None); 280 | 281 | // Insert block1 282 | heap.list.insert(block).expect("unable to insert"); 283 | assert_eq!(heap.list.head, Some(block)); 284 | assert_eq!(heap.list.tail, Some(block3)); 285 | assert_eq!(block.as_ref().next, Some(block3)); 286 | assert_eq!(block.as_ref().prev, None); 287 | assert_eq!(block3.as_ref().next, None); 288 | assert_eq!(block3.as_ref().prev, Some(block)); 289 | } 290 | 291 | #[test] 292 | fn test_insert_merge() { 293 | let mut heap = Heap::new(); 294 | let mut block = unsafe { heap.request(256).expect("unable to request block") }; 295 | let mut block2 = block.shrink(64).expect("unable to split block"); 296 | let block3 = block2.shrink(64).expect("unable to split block"); 297 | 298 | // Insert block1 299 | heap.list.insert(block).expect("unable to insert"); 300 | assert_eq!(heap.list.head, Some(block)); 301 | assert_eq!(heap.list.tail, Some(block)); 302 | assert_eq!(block.as_ref().next, None); 303 | assert_eq!(block.as_ref().prev, None); 304 | assert_eq!(block.size(), 64); 305 | 306 | // Insert block2 307 | heap.list.insert(block2).expect("unable to insert"); 308 | assert_eq!(heap.list.head, Some(block)); 309 | assert_eq!(heap.list.tail, Some(block)); 310 | assert_eq!(block.as_ref().next, None); 311 | assert_eq!(block.as_ref().prev, None); 312 | assert_eq!(block.size(), 64 + BLOCK_META_SIZE + 64); 313 | 314 | // Insert block3 315 | heap.list.insert(block3).expect("unable to insert"); 316 | assert_eq!(heap.list.head, Some(block)); 317 | assert_eq!(heap.list.tail, Some(block)); 318 | assert_eq!(block.as_ref().next, None); 319 | assert_eq!(block.as_ref().prev, None); 320 | assert!(block.size() > 64 + BLOCK_META_SIZE + 64 + BLOCK_META_SIZE); 321 | } 322 | 323 | #[test] 324 | fn test_pop_exact_size() { 325 | let mut heap = Heap::new(); 326 | let mut block = unsafe { heap.request(512).expect("unable to request block") }; 327 | // Block2 imitates a used block. So it will not be added to list 328 | let mut block2 = block.shrink(64).expect("unable to split block"); 329 | let block3 = block2.shrink(64).expect("unable to split block"); 330 | 331 | // Insert block1 332 | heap.list.insert(block).expect("unable to insert"); 333 | // Insert block3 334 | heap.list.insert(block3).expect("unable to insert"); 335 | 336 | let result = heap.list.pop(64).expect("got no block"); 337 | assert_eq!(result, block); 338 | assert_eq!(result.as_ref().next, None); 339 | assert_eq!(result.as_ref().prev, None); 340 | assert_eq!(result.size(), 64); 341 | } 342 | 343 | #[test] 344 | fn test_pop_smaller_size() { 345 | let mut heap = Heap::new(); 346 | let mut block = unsafe { heap.request(512).expect("unable to request block") }; 347 | // Block2 imitates a used block. So it will not be added to list 348 | let mut block2 = block.shrink(64).expect("unable to split block"); 349 | let block3 = block2.shrink(64).expect("unable to split block"); 350 | 351 | // Insert block1 352 | heap.list.insert(block).expect("unable to insert"); 353 | // Insert block3 354 | heap.list.insert(block3).expect("unable to insert"); 355 | 356 | let result = heap.list.pop(16).expect("got no block"); 357 | assert_eq!(result, block); 358 | assert_eq!(result.as_ref().next, None); 359 | assert_eq!(result.as_ref().prev, None); 360 | assert_eq!(result.size(), 64); 361 | } 362 | 363 | #[test] 364 | fn test_iter() { 365 | let mut heap = Heap::new(); 366 | let mut block = unsafe { heap.request(256).expect("unable to request block") }; 367 | let mut block2 = block.shrink(64).expect("unable to split block"); 368 | let block3 = block2.shrink(64).expect("unable to split block"); 369 | 370 | // Insert block1 371 | heap.list.insert(block).expect("unable to insert"); 372 | // Insert block3 373 | heap.list.insert(block3).expect("unable to insert"); 374 | 375 | let mut iter = heap.list.iter(); 376 | assert_eq!(iter.next().unwrap(), block); 377 | assert_eq!(iter.next().unwrap(), block3); 378 | assert!(iter.next().is_none()); 379 | } 380 | 381 | #[cfg(feature = "debug")] 382 | #[test] 383 | fn test_debug() { 384 | let mut heap = Heap::new(); 385 | let mut block = unsafe { heap.request(256).expect("unable to request block") }; 386 | // Block2 imitates a used block. So it will not be added to list 387 | let mut block2 = block.shrink(64).expect("unable to split block"); 388 | let block3 = block2.shrink(64).expect("unable to split block"); 389 | 390 | // Insert block1 391 | heap.list.insert(block).expect("unable to insert"); 392 | // Insert block3 393 | heap.list.insert(block3).expect("unable to insert"); 394 | heap.list.debug(); 395 | } 396 | } 397 | -------------------------------------------------------------------------------- /src/alloc/mod.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | use core::{cmp, intrinsics, ptr::null_mut, ptr::Unique}; 3 | 4 | use libc_print::libc_eprintln; 5 | use spin::Mutex; 6 | 7 | use crate::alloc::block::{BlockPtr, BLOCK_MIN_REGION_SIZE}; 8 | use crate::alloc::heap::Heap; 9 | use crate::util; 10 | 11 | pub mod block; 12 | mod heap; 13 | mod list; 14 | 15 | pub struct Collam { 16 | heap: Mutex, 17 | } 18 | 19 | impl Collam { 20 | #[must_use] 21 | pub const fn new() -> Self { 22 | Self { 23 | heap: spin::Mutex::new(Heap::new()), 24 | } 25 | } 26 | 27 | /// Requests and returns suitable empty `BlockPtr`. 28 | #[inline] 29 | fn request_block(&self, size: usize) -> Option { 30 | // SAFETY: we know it is thread safe, because we're locking the mutex 31 | unsafe { self.heap.lock().request(size) } 32 | } 33 | 34 | /// Releases the given `BlockPtr` back to the allocator. 35 | #[inline] 36 | fn release_block(&self, block: BlockPtr) { 37 | // SAFETY: we know it is thread safe, because we're locking the mutex 38 | unsafe { self.heap.lock().release(block) } 39 | } 40 | } 41 | 42 | unsafe impl GlobalAlloc for Collam { 43 | /// Allocate memory as described by the given `layout`. 44 | /// 45 | /// Returns a pointer to newly-allocated memory, 46 | /// or null to indicate allocation failure. 47 | /// 48 | /// # Safety 49 | /// 50 | /// This function is unsafe because undefined behavior can result 51 | /// if the caller does not ensure that `layout` has non-zero size. 52 | /// 53 | /// (Extension subtraits might provide more specific bounds on 54 | /// behavior, e.g., guarantee a sentinel address or a null pointer 55 | /// in response to a zero-size allocation request.) 56 | /// 57 | /// The allocated block of memory may or may not be initialized. 58 | /// 59 | /// # Errors 60 | /// 61 | /// Returning a null pointer indicates that either memory is exhausted 62 | /// or `layout` does not meet this allocator's size or alignment constraints. 63 | /// 64 | /// Implementations are encouraged to return null on memory 65 | /// exhaustion rather than aborting, but this is not 66 | /// a strict requirement. (Specifically: it is *legal* to 67 | /// implement this trait atop an underlying native allocation 68 | /// library that aborts on memory exhaustion.) 69 | /// 70 | /// Clients wishing to abort computation in response to an 71 | /// allocation error are encouraged to call the [`handle_alloc_error`] function, 72 | /// rather than directly invoking `panic!` or similar. 73 | /// 74 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 75 | if layout.size() == 0 { 76 | return null_mut(); 77 | } 78 | 79 | let layout = match util::pad_min_align(layout.size()) { 80 | Ok(l) => l, 81 | Err(_) => return null_mut(), 82 | }; 83 | 84 | let size = cmp::max(layout.size(), BLOCK_MIN_REGION_SIZE); 85 | dprintln!("[libcollam.so]: alloc(size={})", size); 86 | let mut block = match self.request_block(size) { 87 | Some(b) => b, 88 | None => { 89 | dprintln!("[libcollam.so]: failed for size: {}\n", layout.size()); 90 | return null_mut(); 91 | } 92 | }; 93 | 94 | if let Some(rem_block) = block.shrink(size) { 95 | self.release_block(rem_block); 96 | } 97 | 98 | dprintln!( 99 | "[libcollam.so]: returning {} at {:p}\n", 100 | block.as_ref(), 101 | block 102 | ); 103 | debug_assert!( 104 | block.size() >= size, 105 | "requested_size={}, got_block={}", 106 | size, 107 | block.as_ref() 108 | ); 109 | block.mem_region().as_ptr() 110 | } 111 | 112 | /// Deallocate the block of memory at the given `ptr` pointer with the given `layout`. 113 | /// 114 | /// # Safety 115 | /// 116 | /// This function is unsafe because undefined behavior can result 117 | /// if the caller does not ensure all of the following: 118 | /// 119 | /// * `ptr` must denote a block of memory currently allocated via 120 | /// this allocator, 121 | /// 122 | /// * `layout` must be the same layout that was used 123 | /// to allocate that block of memory, 124 | unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { 125 | if let Some(p) = Unique::new(ptr) { 126 | dprintln!("[libcollam.so]: dealloc(ptr={:p})", ptr); 127 | 128 | let block = match BlockPtr::from_mem_region(p) { 129 | Some(b) => b, 130 | None => return, 131 | }; 132 | if !block.as_ref().verify() { 133 | eprintln!("free(): Unable to verify {} at {:p}", block.as_ref(), block); 134 | return; 135 | } 136 | // Add freed block back to heap structure. 137 | self.release_block(block) 138 | } 139 | } 140 | 141 | /// Shrink or grow a block of memory to the given `new_size`. 142 | /// The block is described by the given `ptr` pointer and `layout`. 143 | /// 144 | /// If this returns a non-null pointer, then ownership of the memory block 145 | /// referenced by `ptr` has been transferred to this allocator. 146 | /// The memory may or may not have been deallocated, 147 | /// and should be considered unusable (unless of course it was 148 | /// transferred back to the caller again via the return value of 149 | /// this method). The new memory block is allocated with `layout`, but 150 | /// with the `size` updated to `new_size`. 151 | /// 152 | /// If this method returns null, then ownership of the memory 153 | /// block has not been transferred to this allocator, and the 154 | /// contents of the memory block are unaltered. 155 | /// 156 | /// # Safety 157 | /// 158 | /// This function is unsafe because undefined behavior can result 159 | /// if the caller does not ensure all of the following: 160 | /// 161 | /// * `ptr` must be currently allocated via this allocator, 162 | /// 163 | /// * `layout` must be the same layout that was used 164 | /// to allocate that block of memory, 165 | /// 166 | /// * `new_size` must be greater than zero. 167 | /// 168 | /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`, 169 | /// must not overflow (i.e., the rounded value must be less than `usize::MAX`). 170 | /// 171 | /// (Extension subtraits might provide more specific bounds on 172 | /// behavior, e.g., guarantee a sentinel address or a null pointer 173 | /// in response to a zero-size allocation request.) 174 | /// 175 | /// # Errors 176 | /// 177 | /// Returns null if the new layout does not meet the size 178 | /// and alignment constraints of the allocator, or if reallocation 179 | /// otherwise fails. 180 | /// 181 | /// Implementations are encouraged to return null on memory 182 | /// exhaustion rather than panicking or aborting, but this is not 183 | /// a strict requirement. (Specifically: it is *legal* to 184 | /// implement this trait atop an underlying native allocation 185 | /// library that aborts on memory exhaustion.) 186 | /// 187 | /// Clients wishing to abort computation in response to a 188 | /// reallocation error are encouraged to call the [`handle_alloc_error`] function, 189 | /// rather than directly invoking `panic!` or similar. 190 | unsafe fn realloc(&self, ptr: *mut u8, _layout: Layout, new_size: usize) -> *mut u8 { 191 | let ptr = match Unique::new(ptr) { 192 | Some(p) => p, 193 | None => return null_mut(), 194 | }; 195 | 196 | dprintln!("[libcollam.so]: realloc(ptr={:p}, size={})", ptr, new_size); 197 | 198 | // FIXME: Alignment to old layout needed? 199 | let new_layout = match util::pad_min_align(new_size) { 200 | Ok(l) => l, 201 | Err(_) => return null_mut(), 202 | }; 203 | 204 | let mut old_block = match BlockPtr::from_mem_region(ptr) { 205 | Some(b) => b, 206 | None => return null_mut(), 207 | }; 208 | 209 | if !old_block.as_ref().verify() { 210 | eprintln!( 211 | "realloc(): Unable to verify {} at {:p}", 212 | old_block.as_ref(), 213 | old_block 214 | ); 215 | return null_mut(); 216 | } 217 | 218 | match new_layout.size().cmp(&old_block.size()) { 219 | cmp::Ordering::Equal => { 220 | // Just return pointer if size didn't change. 221 | ptr.as_ptr() 222 | } 223 | cmp::Ordering::Greater => { 224 | // Allocate new region to fit size. 225 | let new_ptr = self.alloc(new_layout); 226 | let copy_size = cmp::min(new_layout.size(), old_block.size()); 227 | intrinsics::volatile_copy_nonoverlapping_memory(new_ptr, ptr.as_ptr(), copy_size); 228 | // Add old block back to heap structure. 229 | self.release_block(old_block); 230 | new_ptr 231 | } 232 | cmp::Ordering::Less => { 233 | // Shrink allocated block if size is smaller. 234 | let size = cmp::max(new_layout.size(), BLOCK_MIN_REGION_SIZE); 235 | if let Some(rem_block) = old_block.shrink(size) { 236 | self.release_block(rem_block); 237 | } 238 | ptr.as_ptr() 239 | } 240 | } 241 | } 242 | } 243 | 244 | #[cfg(test)] 245 | mod tests { 246 | use super::*; 247 | use crate::alloc::block::BLOCK_META_SIZE; 248 | use crate::util; 249 | use core::intrinsics::write_bytes; 250 | 251 | #[test] 252 | fn test_collam_alloc_ok() { 253 | unsafe { 254 | let collam = Collam::new(); 255 | let layout = util::pad_min_align(123).expect("unable to align layout"); 256 | let ptr = collam.alloc(layout); 257 | assert!(!ptr.is_null()); 258 | write_bytes(ptr, 1, 123); 259 | collam.dealloc(ptr, layout); 260 | } 261 | } 262 | 263 | #[test] 264 | fn test_collam_alloc_zero_size() { 265 | unsafe { 266 | let collam = Collam::new(); 267 | let layout = util::pad_min_align(0).expect("unable to align layout"); 268 | let ptr = collam.alloc(layout); 269 | assert!(ptr.is_null()); 270 | } 271 | } 272 | 273 | #[test] 274 | fn test_collam_realloc_bigger_size() { 275 | unsafe { 276 | let collam = Collam::new(); 277 | let layout = util::pad_min_align(16).expect("unable to align layout"); 278 | let ptr = collam.alloc(layout); 279 | assert!(!ptr.is_null()); 280 | 281 | let ptr = collam.realloc(ptr, layout, 789); 282 | write_bytes(ptr, 2, 789); 283 | collam.dealloc(ptr, layout); 284 | } 285 | } 286 | 287 | #[test] 288 | fn test_collam_realloc_smaller_size() { 289 | unsafe { 290 | let collam = Collam::new(); 291 | let layout = util::pad_min_align(512).expect("unable to align layout"); 292 | let ptr = collam.alloc(layout); 293 | assert!(!ptr.is_null()); 294 | 295 | let ptr = collam.realloc(ptr, layout, 128); 296 | write_bytes(ptr, 2, 128); 297 | collam.dealloc(ptr, layout); 298 | } 299 | } 300 | 301 | #[test] 302 | fn test_collam_realloc_same_size() { 303 | unsafe { 304 | let collam = Collam::new(); 305 | let layout = util::pad_min_align(512).expect("unable to align layout"); 306 | let ptr = collam.alloc(layout); 307 | assert!(!ptr.is_null()); 308 | let ptr2 = collam.realloc(ptr, layout, 512); 309 | assert!(!ptr2.is_null()); 310 | assert_eq!(ptr, ptr2); 311 | collam.dealloc(ptr, layout); 312 | } 313 | } 314 | 315 | #[test] 316 | fn test_collam_realloc_null() { 317 | unsafe { 318 | let collam = Collam::new(); 319 | let layout = util::pad_min_align(16).expect("unable to align layout"); 320 | let ptr = collam.realloc(null_mut(), layout, 789); 321 | assert_eq!(ptr, null_mut()); 322 | } 323 | } 324 | 325 | #[test] 326 | fn test_collam_dealloc_null() { 327 | unsafe { 328 | let collam = Collam::new(); 329 | let layout = util::pad_min_align(16).expect("unable to align layout"); 330 | collam.dealloc(null_mut(), layout); 331 | } 332 | } 333 | 334 | #[test] 335 | fn test_collam_realloc_memory_corruption() { 336 | unsafe { 337 | let collam = Collam::new(); 338 | let layout = util::pad_min_align(16).expect("unable to align layout"); 339 | let ptr = collam.alloc(layout); 340 | assert!(!ptr.is_null()); 341 | 342 | // Overwrite block metadata to simulate memory corruption 343 | let meta_ptr = ptr.sub(BLOCK_META_SIZE); 344 | meta_ptr.write_bytes(0, BLOCK_META_SIZE); 345 | 346 | // Calling realloc on a corrupt memory region 347 | let ptr = collam.realloc(ptr, layout, 789); 348 | assert!(ptr.is_null()); 349 | 350 | // Calling alloc again. We expect to get a new block, the old memory is leaked. 351 | let ptr = collam.alloc(layout); 352 | assert!(!ptr.is_null()); 353 | collam.dealloc(ptr, layout); 354 | } 355 | } 356 | 357 | #[test] 358 | fn test_collam_dealloc_memory_corruption() { 359 | unsafe { 360 | let collam = Collam::new(); 361 | let layout = util::pad_min_align(32).expect("unable to align layout"); 362 | let ptr = collam.alloc(layout); 363 | assert!(!ptr.is_null()); 364 | 365 | // Overwrite block metadata to simulate memory corruption 366 | let meta_ptr = ptr.sub(BLOCK_META_SIZE); 367 | meta_ptr.write_bytes(0, BLOCK_META_SIZE); 368 | collam.dealloc(ptr, layout); 369 | } 370 | } 371 | } 372 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(core_intrinsics)] 2 | #![feature(ptr_internals)] 3 | #![no_std] 4 | 5 | //#![warn(clippy::pedantic)] 6 | 7 | #[macro_use] 8 | extern crate lazy_static; 9 | extern crate libc; 10 | extern crate libc_print; 11 | extern crate spin; 12 | 13 | #[cfg(test)] 14 | #[macro_use] 15 | extern crate std; 16 | 17 | #[allow(unused_imports)] 18 | use libc_print::libc_eprintln; 19 | 20 | mod macros; 21 | 22 | pub mod alloc; 23 | mod sources; 24 | mod util; 25 | 26 | #[cfg(all(any( 27 | target_arch = "arm", 28 | target_arch = "mips", 29 | target_arch = "mipsel", 30 | target_arch = "powerpc" 31 | )))] 32 | pub const MIN_ALIGN: usize = 8; 33 | #[cfg(all(any( 34 | target_arch = "x86", 35 | target_arch = "x86_64", 36 | target_arch = "aarch64", 37 | target_arch = "powerpc64", 38 | target_arch = "powerpc64le", 39 | target_arch = "mips64", 40 | target_arch = "s390x", 41 | target_arch = "sparc64" 42 | )))] 43 | pub const MIN_ALIGN: usize = 16; 44 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #![macro_use] 2 | 3 | #[macro_export] 4 | macro_rules! debug_assert { 5 | ($($arg:tt)*) => (if cfg!(feature = "debug") { assert!($($arg)*); }) 6 | } 7 | 8 | #[macro_export] 9 | macro_rules! debug_assert_eq { 10 | ($($arg:tt)*) => (if cfg!(feature = "debug") { assert_eq!($($arg)*); }) 11 | } 12 | 13 | #[macro_export] 14 | macro_rules! debug_assert_ne { 15 | ($($arg:tt)*) => (if cfg!(feature = "debug") { assert_ne!($($arg)*); }) 16 | } 17 | 18 | #[macro_export] 19 | macro_rules! println { 20 | ($($arg:tt)*) => { 21 | libc_eprintln!($($arg)*) 22 | }; 23 | } 24 | 25 | #[macro_export] 26 | macro_rules! dprintln { 27 | ($($arg:tt)*) => { 28 | if cfg!(feature = "debug") { 29 | println!($($arg)*) 30 | } 31 | }; 32 | } 33 | 34 | #[macro_export] 35 | macro_rules! eprintln { 36 | ($($arg:tt)*) => { 37 | println!($($arg)*) 38 | }; 39 | } 40 | -------------------------------------------------------------------------------- /src/sources.rs: -------------------------------------------------------------------------------- 1 | use core::convert::TryFrom; 2 | use core::ptr::Unique; 3 | 4 | use crate::alloc::block::{BlockPtr, BLOCK_META_SIZE}; 5 | use crate::util; 6 | 7 | use libc_print::libc_eprintln; 8 | 9 | lazy_static! { 10 | static ref PAGE_SIZE: usize = 11 | usize::try_from(unsafe { libc::sysconf(libc::_SC_PAGESIZE) }).unwrap(); 12 | } 13 | 14 | pub trait MemorySource { 15 | /// Requests memory for the minimum specified size from the memory source 16 | unsafe fn request(&self, size: usize) -> Option; 17 | /// Releases given `BlockPtr` back to the memory source. 18 | /// Returns `true` if block has been released, `false` otherwise. 19 | unsafe fn release(&mut self, block: BlockPtr) -> bool; 20 | } 21 | 22 | /// Defines data segment as memory source. 23 | /// Makes use of brk(2). 24 | pub struct DataSegment; 25 | 26 | impl DataSegment { 27 | /// Wrapper for the kernel sbrk call. 28 | /// 29 | /// # Safety 30 | /// 31 | /// Function is not thread safe. 32 | #[inline] 33 | unsafe fn sbrk(size: isize) -> Option> { 34 | let ptr = libc::sbrk(size) as *mut u8; 35 | if ptr == -1_isize as *mut u8 { 36 | None 37 | } else { 38 | Unique::new(ptr) 39 | } 40 | } 41 | } 42 | 43 | impl MemorySource for DataSegment { 44 | /// # Safety 45 | /// 46 | /// Function is not thread safe. 47 | unsafe fn request(&self, size: usize) -> Option { 48 | let size = util::pad_to_align(BLOCK_META_SIZE + size, *PAGE_SIZE) 49 | .ok()? 50 | .size(); 51 | debug_assert!(size > BLOCK_META_SIZE); 52 | let offset = isize::try_from(size).expect("cannot calculate sbrk offset"); 53 | Some(BlockPtr::new(Self::sbrk(offset)?, size - BLOCK_META_SIZE)) 54 | } 55 | 56 | /// # Safety 57 | /// 58 | /// Function is not thread safe. 59 | unsafe fn release(&mut self, block: BlockPtr) -> bool { 60 | let brk = Self::sbrk(0).expect("sbrk(0) failed!").as_ptr(); 61 | if block.next_potential_block().as_ptr() != brk { 62 | return false; 63 | } 64 | 65 | let offset = isize::try_from(block.block_size()).expect("cannot calculate sbrk offset"); 66 | dprintln!( 67 | "[DataSegment]: freeing {} bytes from process (break={:?})", 68 | offset, 69 | Self::sbrk(0).expect("sbrk(0) failed!").as_ptr() 70 | ); 71 | // TODO: remove expect 72 | Self::sbrk(-offset).expect("sbrk failed"); 73 | true 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | 81 | #[test] 82 | fn test_sbrk_ok() { 83 | unsafe { assert!(DataSegment::sbrk(0).is_some()) }; 84 | } 85 | 86 | #[test] 87 | fn test_sbrk_err() { 88 | unsafe { 89 | assert!(DataSegment::sbrk(isize::min_value()).is_none()); 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use crate::MIN_ALIGN; 2 | use core::alloc::{Layout, LayoutErr}; 3 | 4 | /// Aligns passed value to be at lest the size of `MIN_ALIGN` and returns it. 5 | /// NOTE: not checked for overflows! 6 | #[inline] 7 | pub const fn min_align_unchecked(val: usize) -> usize { 8 | (val + MIN_ALIGN - 1) & !(MIN_ALIGN - 1) 9 | } 10 | 11 | /// Returns a `Layout` padded to `MIN_ALIGN`. 12 | #[inline] 13 | pub fn pad_min_align(size: usize) -> Result { 14 | pad_to_align(size, MIN_ALIGN) 15 | } 16 | 17 | /// Returns a `Layout` padded to align. 18 | #[inline] 19 | pub fn pad_to_align(size: usize, align: usize) -> Result { 20 | Ok(Layout::from_size_align(size, align)?.pad_to_align()) 21 | } 22 | 23 | #[cfg(test)] 24 | mod tests { 25 | use super::*; 26 | 27 | #[test] 28 | fn test_min_align_unchecked() { 29 | for val in [0, 5, 491, 5910, 15290, 501920].iter() { 30 | assert_eq!(min_align_unchecked(*val) % MIN_ALIGN, 0); 31 | } 32 | } 33 | 34 | #[test] 35 | fn test_pad_to_align_ok() { 36 | let align = 4096; 37 | for val in [0, 5, 491, 5910, 15290, 501920].iter() { 38 | let layout = pad_to_align(*val, align).expect("unable to align"); 39 | assert_eq!(layout.size() % align, 0); 40 | } 41 | } 42 | 43 | #[test] 44 | fn test_pad_to_align_err() { 45 | assert!(pad_to_align(usize::max_value() - 12, 4096).is_err()); 46 | } 47 | 48 | #[test] 49 | fn test_pad_min_align_ok() { 50 | for val in [0, 5, 491, 5910, 15290, 501920].iter() { 51 | let layout = pad_min_align(*val).expect("unable to align"); 52 | assert_eq!(layout.size() % MIN_ALIGN, 0); 53 | } 54 | } 55 | 56 | #[test] 57 | fn test_pad_min_align_err() { 58 | assert!(pad_min_align(usize::max_value() - 14).is_err()); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | /* returns an array of arrays of char*, all of which NULL */ 9 | char ***alloc_matrix(unsigned rows, unsigned columns) { 10 | char ***matrix = malloc(rows * sizeof(char **)); 11 | unsigned row = 0; 12 | unsigned column = 0; 13 | if (!matrix) abort(); 14 | 15 | for (row = 0; row < rows; row++) { 16 | matrix[row] = calloc(columns, sizeof(char *)); 17 | if (!matrix[row]) abort(); 18 | for (column = 0; column < columns; column++) { 19 | matrix[row][column] = NULL; 20 | } 21 | } 22 | return matrix; 23 | } 24 | 25 | /* deallocates an array of arrays of char*, calling free() on each */ 26 | void free_matrix(char ***matrix, unsigned rows, unsigned columns) { 27 | unsigned row = 0; 28 | unsigned column = 0; 29 | for (row = 0; row < rows; row++) { 30 | for (column = 0; column < columns; column++) { 31 | //printf("column %d row %d\n", column, row); 32 | free(matrix[row][column]); 33 | } 34 | free(matrix[row]); 35 | } 36 | free(matrix); 37 | } 38 | 39 | 40 | int main(int agrc, char **argv) { 41 | int i; 42 | srand(time(NULL)); 43 | int randomnumber; 44 | int size = 1024; 45 | void *p[size]; 46 | for (i = 0; i < size; i++) { 47 | randomnumber = rand() % 10; 48 | p[i] = malloc(1024 * 1024 * randomnumber); 49 | } 50 | 51 | for (i = size-1; i >= 0; i--) { 52 | free(p[i]); 53 | } 54 | 55 | int x = 1024; 56 | char *** matrix = alloc_matrix(x, x); 57 | free_matrix(matrix, x, x); 58 | return (0); 59 | } --------------------------------------------------------------------------------