├── .gitignore ├── lol_alloc ├── .cargo │ └── config.toml ├── Cargo.toml ├── src │ ├── locked_allocator.rs │ ├── single_threaded_allocator.rs │ ├── lib.rs │ ├── trivial_allocators.rs │ └── free_list_allocator.rs └── tests │ └── wasm.rs ├── .vscode ├── settings.json └── launch.json ├── Cargo.toml ├── test.sh ├── example ├── Cargo.toml └── src │ └── lib.rs ├── .github └── workflows │ └── rust.yml ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /lol_alloc/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "wasm32-unknown-unknown" 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.target": "wasm32-unknown-unknown", 3 | } 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["./lol_alloc", "./example"] # , "./test", "./example" 3 | resolver = "2" 4 | 5 | [profile.release] 6 | lto = true 7 | # debug = true 8 | opt-level = "z" -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux -o pipefail 3 | 4 | cargo test 5 | wasm-pack test --node lol_alloc 6 | wasm-pack build --release example 7 | 8 | wc -c example/pkg/lol_alloc_example_bg.wasm -------------------------------------------------------------------------------- /example/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lol_alloc_example" 3 | description = "Example use of lol_alloc" 4 | version = "0.1.0" 5 | edition = "2021" 6 | repository = "https://github.com/Craig-Macomber/lol_alloc" 7 | license = "MIT" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [lib] 12 | crate-type = ["cdylib"] 13 | 14 | [dependencies.lol_alloc] 15 | path = "../lol_alloc" 16 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | branches: ["main"] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Build 19 | run: cargo build --verbose 20 | - name: Run tests 21 | run: cargo test --verbose 22 | - name: Install wasm-pack 23 | run: cargo install wasm-pack 24 | - name: Full tests 25 | run: ./test.sh 26 | -------------------------------------------------------------------------------- /example/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate alloc; 2 | 3 | #[cfg(target_arch = "wasm32")] 4 | use lol_alloc::{FreeListAllocator, LockedAllocator}; 5 | 6 | #[cfg(target_arch = "wasm32")] 7 | #[global_allocator] 8 | static ALLOCATOR: LockedAllocator = 9 | LockedAllocator::new(FreeListAllocator::new()); 10 | use alloc::boxed::Box; 11 | 12 | // Box a `u8`! 13 | #[no_mangle] 14 | pub extern "C" fn hello() -> *mut u8 { 15 | Box::into_raw(Box::new(42)) 16 | } 17 | 18 | /// Free a `Box` that we allocated earlier! 19 | /// # Safety 20 | /// `ptr` must be a pointer from `hello` which is used exactly once. 21 | #[no_mangle] 22 | pub unsafe extern "C" fn goodbye(ptr: *mut u8) { 23 | let _ = Box::from_raw(ptr); 24 | } 25 | -------------------------------------------------------------------------------- /lol_alloc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lol_alloc" 3 | description = "lol_alloc: A laughably simple wasm global_allocator." 4 | readme = "../README.md" 5 | version = "0.4.1" 6 | edition = "2021" 7 | repository = "https://github.com/Craig-Macomber/lol_alloc" 8 | license = "MIT" 9 | categories = ["memory-management", "web-programming", "no-std", "wasm"] 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [dependencies] 14 | spin = "0.9.8" 15 | 16 | [dev-dependencies] 17 | wasm-bindgen-test = "0.3.0" 18 | rand_pcg = "0.3.1" 19 | rand_core = "0.6.3" 20 | rand = "0.8.5" 21 | getrandom = { version = "0.2", features = ["js"] } 22 | 23 | [package.metadata.docs.rs] 24 | targets = ["wasm32-unknown-unknown"] 25 | -------------------------------------------------------------------------------- /lol_alloc/src/locked_allocator.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | 3 | /// A thread safe allocator created by wrapping a (possible not thread-safe) allocator in a spin-lock. 4 | pub struct LockedAllocator { 5 | spin: spin::Mutex, 6 | } 7 | 8 | impl LockedAllocator { 9 | pub const fn new(t: T) -> Self { 10 | LockedAllocator { 11 | spin: spin::Mutex::new(t), 12 | } 13 | } 14 | } 15 | 16 | unsafe impl GlobalAlloc for LockedAllocator { 17 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 18 | self.spin.lock().alloc(layout) 19 | } 20 | 21 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 22 | self.spin.lock().dealloc(ptr, layout); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /lol_alloc/tests/wasm.rs: -------------------------------------------------------------------------------- 1 | #![cfg(target_arch = "wasm32")] 2 | 3 | use std::mem::swap; 4 | 5 | use lol_alloc::{FreeListAllocator, LockedAllocator}; 6 | use wasm_bindgen_test::*; 7 | 8 | #[global_allocator] 9 | static ALLOCATOR: LockedAllocator = 10 | LockedAllocator::new(FreeListAllocator::new()); 11 | 12 | #[wasm_bindgen_test] 13 | fn minimal() { 14 | drop(Box::new(1)); 15 | } 16 | 17 | #[wasm_bindgen_test] 18 | fn small_allocations() { 19 | let a = Box::new(1); 20 | let b = Box::new(2); 21 | assert_eq!(*a, 1); 22 | assert_eq!(*b, 2); 23 | } 24 | 25 | #[wasm_bindgen_test] 26 | fn many_allocations() { 27 | let mut v = vec![]; 28 | for i in 0..10000 { 29 | v.push(Box::new(i)); 30 | } 31 | for b in &mut v { 32 | swap(b, &mut Box::new(0)); 33 | } 34 | v.reserve(1000000); 35 | drop(v); 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Craig 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /lol_alloc/src/single_threaded_allocator.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | 3 | /// A non-thread safe allocator created by wrapping an allocator in a `Sync` implementation that assumes all use is from the same thread. 4 | /// Using this (and thus defeating Rust's thread safety checking) is useful due to global allocators having to be stored in statics, 5 | /// which requires `Sync` even in single threaded applications. 6 | pub struct AssumeSingleThreaded { 7 | inner: T, 8 | } 9 | 10 | impl AssumeSingleThreaded { 11 | /// Converts a potentially non-`Sync` allocator into a `Sync` one by assuming it will only be used by one thread. 12 | /// 13 | /// # Safety 14 | /// 15 | /// The caller must ensure that the returned value is only accessed by a single thread. 16 | pub const unsafe fn new(t: T) -> Self { 17 | AssumeSingleThreaded { inner: t } 18 | } 19 | } 20 | 21 | /// This is an invalid implementation of Sync. 22 | /// AssumeSingleThreaded must not actually be used from multiple threads concurrently. 23 | unsafe impl Sync for AssumeSingleThreaded {} 24 | 25 | unsafe impl GlobalAlloc for AssumeSingleThreaded { 26 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 27 | self.inner.alloc(layout) 28 | } 29 | 30 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 31 | self.inner.dealloc(ptr, layout); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug unit tests in library 'lol_alloc'", 11 | "cargo": { 12 | "args": [ 13 | "test", 14 | "--no-run", 15 | "--lib", 16 | "--package=lol_alloc" 17 | ], 18 | "filter": { 19 | "name": "lol_alloc", 20 | "kind": "lib" 21 | } 22 | }, 23 | "args": [], 24 | "cwd": "${workspaceFolder}" 25 | }, 26 | { 27 | "type": "lldb", 28 | "request": "launch", 29 | "name": "Debug integration test 'wasm'", 30 | "cargo": { 31 | "args": [ 32 | "test", 33 | "--no-run", 34 | "--test=wasm", 35 | "--package=lol_alloc" 36 | ], 37 | "filter": { 38 | "name": "wasm", 39 | "kind": "test" 40 | } 41 | }, 42 | "args": [], 43 | "cwd": "${workspaceFolder}" 44 | } 45 | ] 46 | } -------------------------------------------------------------------------------- /lol_alloc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | #[cfg(test)] 4 | #[macro_use] 5 | extern crate alloc; 6 | 7 | extern crate spin; 8 | 9 | /// A number of WebAssembly memory pages. 10 | #[derive(Eq, PartialEq)] 11 | struct PageCount(usize); 12 | 13 | impl PageCount { 14 | fn size_in_bytes(self) -> usize { 15 | self.0 * PAGE_SIZE 16 | } 17 | } 18 | 19 | /// The WebAssembly page size, in bytes. 20 | const PAGE_SIZE: usize = 65536; 21 | 22 | /// Invalid number of pages used to indicate out of memory errors. 23 | const ERROR_PAGE_COUNT: PageCount = PageCount(usize::MAX); 24 | 25 | /// Wrapper for core::arch::wasm::memory_grow. 26 | /// Adding this level of indirection allows for improved testing, 27 | /// especially on non wasm platforms. 28 | trait MemoryGrower { 29 | /// See core::arch::wasm::memory_grow for semantics. 30 | fn memory_grow(&self, delta: PageCount) -> PageCount; 31 | } 32 | 33 | /// Stateless heap grower. 34 | /// On wasm32, provides a default implementation of [MemoryGrower]. 35 | pub struct DefaultGrower; 36 | 37 | #[cfg(target_arch = "wasm32")] 38 | impl MemoryGrower for DefaultGrower { 39 | fn memory_grow(&self, delta: PageCount) -> PageCount { 40 | // This should use `core::arch::wasm` instead of `core::arch::wasm32`, 41 | // but `core::arch::wasm` depends on `#![feature(simd_wasm64)]` on current nightly. 42 | // See https://github.com/Craig-Macomber/lol_alloc/issues/1 43 | PageCount(core::arch::wasm32::memory_grow(0, delta.0)) 44 | } 45 | } 46 | 47 | mod free_list_allocator; 48 | mod locked_allocator; 49 | mod single_threaded_allocator; 50 | mod trivial_allocators; 51 | #[cfg(target_arch = "wasm32")] 52 | pub use crate::free_list_allocator::FreeListAllocator; 53 | pub use crate::locked_allocator::LockedAllocator; 54 | pub use crate::single_threaded_allocator::AssumeSingleThreaded; 55 | pub use crate::trivial_allocators::FailAllocator; 56 | #[cfg(target_arch = "wasm32")] 57 | pub use crate::trivial_allocators::{LeakingAllocator, LeakingPageAllocator}; 58 | -------------------------------------------------------------------------------- /lol_alloc/src/trivial_allocators.rs: -------------------------------------------------------------------------------- 1 | use crate::{DefaultGrower, MemoryGrower, PageCount, ERROR_PAGE_COUNT, PAGE_SIZE}; 2 | use core::{ 3 | alloc::{GlobalAlloc, Layout}, 4 | cell::UnsafeCell, 5 | ptr::null_mut, 6 | }; 7 | 8 | /// Allocator that fails all allocations. 9 | pub struct FailAllocator; 10 | 11 | unsafe impl GlobalAlloc for FailAllocator { 12 | unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { 13 | null_mut() 14 | } 15 | 16 | unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} 17 | } 18 | 19 | /// Allocator that allocates whole pages for each allocation. 20 | /// Very wasteful for small allocations. 21 | /// Does not free or reuse memory. 22 | #[cfg(target_arch = "wasm32")] 23 | pub struct LeakingPageAllocator; 24 | 25 | #[cfg(target_arch = "wasm32")] 26 | unsafe impl GlobalAlloc for LeakingPageAllocator { 27 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 28 | // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use. 29 | debug_assert!(PAGE_SIZE % layout.align() == 0); 30 | 31 | let requested_pages = (layout.size() + PAGE_SIZE - 1) / PAGE_SIZE; 32 | let previous_page_count = DefaultGrower.memory_grow(PageCount(requested_pages)); 33 | if previous_page_count == ERROR_PAGE_COUNT { 34 | return null_mut(); 35 | } 36 | 37 | let ptr = previous_page_count.size_in_bytes() as *mut u8; 38 | debug_assert!(ptr.align_offset(layout.align()) == 0); 39 | ptr 40 | } 41 | 42 | unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} 43 | } 44 | 45 | /// A non-thread safe bump-pointer allocator. 46 | /// Does not free or reuse memory. 47 | /// Efficient for small allocations. 48 | /// Does tolerate concurrent callers of wasm::memory_grow, 49 | /// but not concurrent use of this allocator. 50 | pub struct LeakingAllocator { 51 | used: UnsafeCell, // bytes 52 | size: UnsafeCell, // bytes 53 | grower: T, 54 | } 55 | 56 | #[cfg(target_arch = "wasm32")] 57 | impl LeakingAllocator { 58 | pub const fn new() -> Self { 59 | LeakingAllocator { 60 | used: UnsafeCell::new(0), 61 | size: UnsafeCell::new(0), 62 | grower: DefaultGrower, 63 | } 64 | } 65 | } 66 | 67 | unsafe impl GlobalAlloc for LeakingAllocator { 68 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 69 | let size: &mut usize = &mut *self.size.get(); 70 | let used: &mut usize = &mut *self.used.get(); 71 | // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use. 72 | // If this is not true, this could go past size. 73 | let alignment = layout.align(); 74 | let offset = *used % alignment; 75 | if offset != 0 { 76 | *used += alignment - offset; 77 | } 78 | 79 | let requested_size = layout.size(); 80 | let new_total = *used + requested_size; 81 | if new_total > *size { 82 | // Request enough new space for this allocation, even if we have some space left over from the last one incase they end up non-contiguous. 83 | // Round up to a number of pages 84 | let requested_pages = (requested_size + PAGE_SIZE - 1) / PAGE_SIZE; 85 | let previous_page_count = self.grower.memory_grow(PageCount(requested_pages)); 86 | if previous_page_count == ERROR_PAGE_COUNT { 87 | return null_mut(); 88 | } 89 | 90 | let previous_size = previous_page_count.size_in_bytes(); 91 | if previous_size != *size { 92 | // New memory is not contiguous with old: something else allocated in-between. 93 | // TODO: is handling this case necessary? Maybe make it optional behind a feature? 94 | // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use. 95 | *used = previous_size; 96 | // TODO: in free mode, have minimum alignment used is rounded up to and is maxed with alignment so we can ensure there is either: 97 | // 1. no space at the end of the page 98 | // 2. enough space we can add it to the free list 99 | } 100 | *size = previous_size + requested_pages * PAGE_SIZE; 101 | } 102 | 103 | let start = *used; 104 | *used += requested_size; 105 | start as *mut u8 106 | } 107 | 108 | unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} 109 | } 110 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lol_alloc 2 | 3 | A laughably simple wasm global_allocator. 4 | 5 | Like [wee_alloc](https://github.com/rustwasm/wee_alloc), but smaller since I used skinnier letters in the name. 6 | 7 | `lol_alloc` is a collection of simple wasm `global_allocator`s. 8 | 9 | I wrote `lol_alloc` to learn about allocators (I hadn't written one before) and because `wee_alloc` [seems unmaintained](https://github.com/rustwasm/wee_alloc/issues/107) and [has a leak](https://github.com/rustwasm/wee_alloc/issues/106). 10 | After looking at `wee_alloc`'s implementation (which I failed to understand or fix), I wanted to find out how hard it really is to make a wasm global_allocator, and it seemed like providing one could be useful to the rust wasm community. 11 | 12 | # Usage 13 | 14 | You can replace the `global_allocator` with `LockedAllocator` for `wasm32` builds using: 15 | 16 | ```rust 17 | extern crate alloc; 18 | 19 | #[cfg(target_arch = "wasm32")] 20 | use lol_alloc::{FreeListAllocator, LockedAllocator}; 21 | 22 | #[cfg(target_arch = "wasm32")] 23 | #[global_allocator] 24 | static ALLOCATOR: LockedAllocator = LockedAllocator::new(FreeListAllocator::new()); 25 | ``` 26 | 27 | For slightly smaller file size and slightly better performance, single threaded WASM applications can use `AssumeSingleThreaded` instead of `LockedAllocator`: 28 | 29 | ```rust 30 | extern crate alloc; 31 | 32 | use lol_alloc::{AssumeSingleThreaded, FreeListAllocator}; 33 | 34 | // SAFETY: This application is single threaded, so using AssumeSingleThreaded is allowed. 35 | #[global_allocator] 36 | static ALLOCATOR: AssumeSingleThreaded = 37 | unsafe { AssumeSingleThreaded::new(FreeListAllocator::new()) }; 38 | ``` 39 | 40 | Applications which do not need any allocator can use `FailAllocator`: 41 | 42 | ```rust 43 | extern crate alloc; 44 | 45 | #[cfg(target_arch = "wasm32")] 46 | use lol_alloc::FailAllocator; 47 | 48 | #[cfg(target_arch = "wasm32")] 49 | #[global_allocator] 50 | static ALLOCATOR: FailAllocator = FailAllocator; 51 | ``` 52 | 53 | Applications which only do a bounded small number of allocations and thus don't require freeing can use one of the leaking allocators. 54 | `LeakingPageAllocator` (shown below), `AssumeSingleThreaded` and `LockedAllocator` are the best options for this case: 55 | 56 | ```rust 57 | extern crate alloc; 58 | 59 | #[cfg(target_arch = "wasm32")] 60 | use lol_alloc::LeakingPageAllocator; 61 | 62 | #[cfg(target_arch = "wasm32")] 63 | #[global_allocator] 64 | static ALLOCATOR: LeakingPageAllocator = LeakingPageAllocator; 65 | ``` 66 | 67 | # Thread Safety 68 | 69 | `LeakingAllocator` and `FreeListAllocator` are NOT `Sync` and must be wrapped in either `LockedAllocator` or the unsafe `AssumeSingleThreaded` to assign to a static (this is enforced by the Rust type system). 70 | Multithreading is possible in wasm these days: do not use `AssumeSingleThreaded` unless you are confident that all allocations and freeing will happen from a single thread. 71 | 72 | `FailAllocator`, `LeakingPageAllocator` are thread-safe and do not need any wrapping. 73 | 74 | # Status 75 | 76 | A few projects have apparently used this library, and there have been no reported issues (none reported success either, so use at your own risk). 77 | 78 | FreeListAllocator has pretty good test suite, and the rest of the allocators are trivial, and had at least minimal testing. 79 | 80 | If you use it, please report any bugs. 81 | If it actually works for you, also let me know (you can post an issue with your report). 82 | 83 | Sizes of allocators include overhead from example (compiled with rustc 1.65.0 and wasm-pack 0.10.3): 84 | 85 | - `FailAllocator`: 195 bytes: errors on allocations. Operations are O(1). 86 | - `LeakingPageAllocator`: 230 bytes: Allocates pages for each allocation. Operations are O(1). 87 | - `LeakingAllocator`: Bump pointer allocator, growing the heap as needed and does not reuse/free memory. Operations are O(1). No allocation space overhead other than for alignment. 88 | - `AssumeSingleThreaded`: 356 bytes. 89 | - `LockedAllocator`: 484 bytes. 90 | - `FreeListAllocator`: Free list based allocator. Operations (both allocation and freeing) are O(length of free list), but it does coalesce adjacent free list nodes. Rounds allocations up to at least 2 words in size, but otherwise should use all the space. Even gaps from high alignment allocations end up in its free list for use by smaller allocations. 91 | - `AssumeSingleThreaded`: 654 bytes. 92 | - `LockedAllocator`: 775 bytes. 93 | - Builtin Rust allocator: 5034 bytes. 94 | 95 | If you can afford the extra code size, use the builtin rust allocator: it is a much better allocator. 96 | 97 | Supports only `wasm32`: other targets may build, but the allocators will not work on them (except: `FailAllocator`, it errors on all platforms just fine). 98 | 99 | # Performance 100 | 101 | Asymptotic behavior is documented in the above status section along with code size. 102 | 103 | These allocators are all optimized for simplicity (and thus code size and maintainability), and not runtime performance. 104 | Runtime performance data for them them (and comparing to the built in allocator) has not been collected but would be interesting: feel free to send patches with benchmarks and/or performance data. 105 | 106 | If you care about runtime performance, you should probably use the built in allocator, or at least carefully measure the impact of using any of these allocators on your code. 107 | 108 | # Soundness 109 | 110 | Soundness of the pointer manipulation in this library is currently unclear. 111 | Since [wasm32::memory_grow](https://doc.rust-lang.org/core/arch/wasm32/fn.memory_grow.html) 112 | does not return a pointer there is no "original pointer" so the [Strict Provenance](https://doc.rust-lang.org/std/ptr/index.html#provenance) rules can not be followed. 113 | Attempting to determine if this library's use of pointes at least meets the requirements for being dereferenceable when it dereferences them is similarly challenging as that [is defined as](https://doc.rust-lang.org/std/ptr/index.html#safety): 114 | 115 | > dereferenceable: the memory range of the given size starting at the pointer must all be within the bounds of a single allocated object. 116 | 117 | The definition of "allocated object" is not clear here. 118 | If the growable wasm heap counts as a single allocated object, then all these allocators are likely ok (in this aspect at least). 119 | However if each call to `wasm32::memory_grow` is considered to create a new allocated object, 120 | then the free list coalescing in `FreeListAllocator` in unsound and could result in undefined behavior. 121 | 122 | # Testing 123 | 124 | There are some normal rust unit tests (run with `cargo test`), 125 | which use a test implementation of `MemoryGrower`. 126 | 127 | There are also some [wasm-pack tests](https://rustwasm.github.io/wasm-bindgen/wasm-bindgen-test/usage.html) (run with `wasm-pack test --node lol_alloc`) 128 | 129 | Size testing: 130 | 131 | ```bash 132 | wasm-pack build --release example && wc -c example/pkg/lol_alloc_example_bg.wasm 133 | ``` 134 | 135 | # Change log 136 | 137 | ## 0.4.1: 138 | 139 | - [Avoid assertion messages in free_list_allocator in release builds](https://github.com/Craig-Macomber/lol_alloc/pull/7). 140 | This should improve code size and performance mainly when the size or alignment of allocations is not fully inlined by the compiler. 141 | 142 | ## 0.4.0: 143 | 144 | - Make implementations which are wasm only not available on other targets, instead of failing at runtime. 145 | - Hint [docs.rs](https://docs.rs/) to only build for wasm. 146 | 147 | ## 0.3.0: 148 | 149 | - Add `AssumeSingleThreaded`. 150 | - Remove unsound `Sync` implementations for `FreeListAllocator` and `LeakingAllocator`: use `AssumeSingleThreaded` and its unsafe `AssumeSingleThreaded::new` function instead: this puts all known safety issues in this library behind an unsafe function. 151 | - Remove default `FreeListAllocator` type parameter from `LockedAllocator`. 152 | 153 | ## 0.2.0: 154 | 155 | - Add `LockedAllocator`. 156 | -------------------------------------------------------------------------------- /lol_alloc/src/free_list_allocator.rs: -------------------------------------------------------------------------------- 1 | use super::{DefaultGrower, MemoryGrower, PageCount, ERROR_PAGE_COUNT, PAGE_SIZE}; 2 | use core::{ 3 | alloc::{GlobalAlloc, Layout}, 4 | cell::UnsafeCell, 5 | ptr::{self, null_mut}, 6 | }; 7 | 8 | /// A non-thread safe allocator that uses a free list. 9 | /// Allocations and frees have runtime O(length of free list). 10 | /// 11 | /// The free list is kept sorted by address, and adjacent blocks of memory are coalesced when inserting new blocks. 12 | pub struct FreeListAllocator { 13 | free_list: UnsafeCell<*mut FreeListNode>, 14 | grower: T, 15 | } 16 | 17 | #[cfg(target_arch = "wasm32")] 18 | impl FreeListAllocator { 19 | pub const fn new() -> Self { 20 | FreeListAllocator { 21 | // Use a special value for empty, which is never valid otherwise. 22 | free_list: UnsafeCell::new(EMPTY_FREE_LIST), 23 | grower: DefaultGrower, 24 | } 25 | } 26 | } 27 | 28 | const EMPTY_FREE_LIST: *mut FreeListNode = usize::MAX as *mut FreeListNode; 29 | 30 | /// Stored at the beginning of each free segment. 31 | /// Note: It would be possible to fit this in 1 word (use the low bit to flag that case, 32 | /// then only use a second word if the allocation has size greater than 1 word) 33 | struct FreeListNode { 34 | next: *mut FreeListNode, 35 | size: usize, 36 | } 37 | 38 | const NODE_SIZE: usize = core::mem::size_of::(); 39 | 40 | // Safety: No one besides us has the raw pointer, so we can safely transfer the 41 | // FreeListAllocator to another thread. 42 | unsafe impl Send for FreeListAllocator {} 43 | 44 | unsafe impl GlobalAlloc for FreeListAllocator { 45 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 46 | // This assumes PAGE_SIZE is always a multiple of the required alignment, which should be true for all practical use. 47 | debug_assert!(PAGE_SIZE % layout.align() == 0); 48 | 49 | let size = full_size(layout); 50 | let alignment = layout.align().max(NODE_SIZE); 51 | let mut free_list: *mut *mut FreeListNode = self.free_list.get(); 52 | // search freelist 53 | loop { 54 | if *free_list == EMPTY_FREE_LIST { 55 | break; 56 | } 57 | // Try to allocate from end of block of free space. 58 | let size_of_block = (**free_list).size; 59 | let start_of_block = *free_list as usize; 60 | let end_of_block = start_of_block + size_of_block; 61 | if size < end_of_block { 62 | let position = multiple_below(end_of_block - size, alignment); 63 | if position >= start_of_block { 64 | // Compute if we need a node after used space due to alignment. 65 | let end_of_used = position + size; 66 | if end_of_used < end_of_block { 67 | // Insert new block 68 | let new_block = end_of_used as *mut FreeListNode; 69 | (*new_block).next = *free_list; 70 | (*new_block).size = end_of_block - end_of_used; 71 | *free_list = new_block; 72 | free_list = ptr::addr_of_mut!((*new_block).next); 73 | } 74 | if position == start_of_block { 75 | // Remove current node from free list. 76 | *free_list = (**free_list).next; 77 | } else { 78 | // Shrink free block 79 | (**free_list).size = position - start_of_block; 80 | } 81 | 82 | let ptr = position as *mut u8; 83 | debug_assert!(ptr.align_offset(NODE_SIZE) == 0); 84 | debug_assert!(ptr.align_offset(layout.align()) == 0); 85 | return ptr; 86 | } 87 | } 88 | 89 | free_list = ptr::addr_of_mut!((**free_list).next); 90 | } 91 | 92 | // Failed to find space in the free list. 93 | // So allocate more space, and allocate from that. 94 | // Simplest way to due that is grow the heap, and "free" the new space then recurse. 95 | // This should never need to recurse more than once. 96 | 97 | let requested_bytes = round_up(size, PAGE_SIZE); 98 | let previous_page_count = self 99 | .grower 100 | .memory_grow(PageCount(requested_bytes / PAGE_SIZE)); 101 | if previous_page_count == ERROR_PAGE_COUNT { 102 | return null_mut(); 103 | } 104 | 105 | let ptr = previous_page_count.size_in_bytes() as *mut u8; 106 | self.dealloc( 107 | ptr, 108 | Layout::from_size_align_unchecked(requested_bytes, PAGE_SIZE), 109 | ); 110 | self.alloc(layout) 111 | } 112 | 113 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 114 | debug_assert!(ptr.align_offset(NODE_SIZE) == 0); 115 | let ptr = ptr as *mut FreeListNode; 116 | let size = full_size(layout); 117 | let after_new = offset_bytes(ptr, size); // Used to merge with next node if adjacent. 118 | 119 | let mut free_list: *mut *mut FreeListNode = self.free_list.get(); 120 | // Insert into freelist which is stored in order of descending pointers. 121 | loop { 122 | if *free_list == EMPTY_FREE_LIST { 123 | (*ptr).next = EMPTY_FREE_LIST; 124 | (*ptr).size = size; 125 | *free_list = ptr; 126 | return; 127 | } 128 | 129 | if *free_list == after_new { 130 | // Merge new node into node after this one. 131 | 132 | let new_size = size + (**free_list).size; 133 | let next = (**free_list).next; 134 | if next != EMPTY_FREE_LIST && offset_bytes(next, (*next).size) == ptr { 135 | // Merge into node before this one, as well as after it. 136 | (*next).size += new_size; 137 | // Sine we are combining 2 existing nodes (with the new one in-between) 138 | // remove one from the list. 139 | *free_list = next; 140 | return; 141 | } 142 | // Edit node in free list, moving its location and updating its size. 143 | *free_list = ptr; 144 | (*ptr).size = new_size; 145 | (*ptr).next = next; 146 | return; 147 | } 148 | 149 | if *free_list < ptr { 150 | // Merge onto end of current if adjacent 151 | if offset_bytes(*free_list, (**free_list).size) == ptr { 152 | // Merge into node before this one, as well as after it. 153 | (**free_list).size += size; 154 | // Sine we are combining the new node into the end of an existing node, no pointer updates, just a size change. 155 | return; 156 | } 157 | // Create a new free list node 158 | (*ptr).next = *free_list; 159 | (*ptr).size = size; 160 | *free_list = ptr; 161 | return; 162 | } 163 | free_list = ptr::addr_of_mut!((**free_list).next); 164 | } 165 | } 166 | } 167 | 168 | fn full_size(layout: Layout) -> usize { 169 | let grown = layout.size().max(NODE_SIZE); 170 | round_up(grown, NODE_SIZE) 171 | } 172 | 173 | /// Round up value to the nearest multiple of increment, which must be a 174 | /// power of 2. If `value` is a multiple of increment, it is returned 175 | /// unchanged. 176 | fn round_up(value: usize, increment: usize) -> usize { 177 | debug_assert!(increment.is_power_of_two()); 178 | 179 | // Compute `value.div_ceil(increment) * increment`, 180 | // in a way that takes advantage of the fact that `increment` is 181 | // always a power of two to avoid using an integer divide, since that 182 | // wouldn't always get optimized out. 183 | multiple_below(value + (increment - 1), increment) 184 | } 185 | 186 | /// Round down value to the nearest multiple of increment, which must be a 187 | /// power of 2. If `value` is a multiple of `increment`, it is returned 188 | /// unchanged. 189 | fn multiple_below(value: usize, increment: usize) -> usize { 190 | debug_assert!(increment.is_power_of_two()); 191 | 192 | // Compute `value / increment * increment` in a way 193 | // that takes advantage of the fact that `increment` is always a power of 194 | // two to avoid using an integer divide, since that wouldn't always get 195 | // optimized out. 196 | value & increment.wrapping_neg() 197 | } 198 | 199 | unsafe fn offset_bytes(ptr: *mut FreeListNode, offset: usize) -> *mut FreeListNode { 200 | (ptr as *mut u8).add(offset) as *mut FreeListNode 201 | } 202 | 203 | #[cfg(test)] 204 | mod tests { 205 | use super::{ 206 | multiple_below, round_up, FreeListAllocator, MemoryGrower, PageCount, EMPTY_FREE_LIST, 207 | NODE_SIZE, 208 | }; 209 | use crate::{ERROR_PAGE_COUNT, PAGE_SIZE}; 210 | use alloc::{boxed::Box, vec::Vec}; 211 | use core::{ 212 | alloc::{GlobalAlloc, Layout}, 213 | cell::{RefCell, UnsafeCell}, 214 | ptr, 215 | }; 216 | 217 | struct Allocation { 218 | layout: Layout, 219 | ptr: *mut u8, 220 | } 221 | 222 | #[derive(Clone, Copy)] 223 | #[repr(C, align(65536))] // align does not appear to work with the PAGE_SIZE constant 224 | struct Page([u8; PAGE_SIZE]); 225 | 226 | struct Slabby { 227 | /// Test array of pages, sequential in memory. 228 | pages: Box<[Page]>, 229 | used_pages: usize, 230 | } 231 | 232 | impl Slabby { 233 | fn new() -> Self { 234 | Slabby { 235 | pages: vec![Page([0; PAGE_SIZE]); 1000].into_boxed_slice(), 236 | used_pages: 0, 237 | } 238 | } 239 | } 240 | 241 | impl MemoryGrower for RefCell { 242 | fn memory_grow(&self, delta: PageCount) -> PageCount { 243 | let mut slabby = self.borrow_mut(); 244 | let old_ptr = ptr::addr_of!(slabby.pages[slabby.used_pages]); 245 | if slabby.used_pages + delta.0 > slabby.pages.len() { 246 | return ERROR_PAGE_COUNT; 247 | } 248 | slabby.used_pages += delta.0; 249 | debug_assert!(old_ptr.align_offset(PAGE_SIZE) == 0); 250 | PageCount(old_ptr as usize / PAGE_SIZE) 251 | } 252 | } 253 | 254 | #[derive(Debug, PartialEq, Eq)] 255 | struct FreeListContent { 256 | size: usize, 257 | /// Offset from beginning of Slabby. 258 | offset: usize, 259 | } 260 | 261 | /// Enumerate and validate free list content 262 | fn free_list_content(allocator: &FreeListAllocator>) -> Vec { 263 | let mut out: Vec = vec![]; 264 | let grower = allocator.grower.borrow(); 265 | let base = grower.pages.as_ptr() as usize; 266 | unsafe { 267 | let mut list = *(allocator.free_list.get()); 268 | while list != EMPTY_FREE_LIST { 269 | debug_assert_eq!(list.align_offset(NODE_SIZE), 0); 270 | debug_assert!(list as usize >= base); 271 | debug_assert!( 272 | (list as usize) 273 | < ptr::addr_of!(grower.pages[grower.used_pages]) as usize + PAGE_SIZE 274 | ); 275 | let offset = list as usize - base; 276 | let size = (*list).size; 277 | debug_assert!(offset + size <= grower.used_pages * PAGE_SIZE); 278 | debug_assert!(size >= NODE_SIZE); 279 | match out.last() { 280 | Some(previous) => { 281 | debug_assert!( 282 | previous.offset > offset + size, 283 | "Free list nodes should not overlap or be adjacent" 284 | ); 285 | } 286 | None => {} 287 | } 288 | out.push(FreeListContent { size, offset }); 289 | list = (*list).next; 290 | } 291 | } 292 | out 293 | } 294 | 295 | #[test] 296 | fn round_up_works() { 297 | assert_eq!(round_up(0, 8), 0); 298 | assert_eq!(round_up(7, 8), 8); 299 | assert_eq!(round_up(8, 8), 8); 300 | assert_eq!(round_up(9, 8), 16); 301 | assert_eq!(round_up(15, 8), 16); 302 | assert_eq!(round_up(16, 8), 16); 303 | 304 | assert_eq!(round_up(127, 128), 128); 305 | assert_eq!(round_up(100223, 128), 100224); 306 | } 307 | 308 | #[test] 309 | fn multiple_below_works() { 310 | assert_eq!(multiple_below(0, 8), 0); 311 | assert_eq!(multiple_below(7, 8), 0); 312 | assert_eq!(multiple_below(8, 8), 8); 313 | assert_eq!(multiple_below(9, 8), 8); 314 | assert_eq!(multiple_below(15, 8), 8); 315 | assert_eq!(multiple_below(16, 8), 16); 316 | 317 | assert_eq!(multiple_below(127, 128), 0); 318 | assert_eq!(multiple_below(100223, 128), 100096); 319 | } 320 | 321 | /// Test performing frees populates the free list, correctly coalescing adjacent pages. 322 | #[test] 323 | fn populates_free_list() { 324 | let allocator = FreeListAllocator { 325 | free_list: UnsafeCell::new(EMPTY_FREE_LIST), 326 | grower: RefCell::new(Slabby::new()), 327 | }; 328 | allocator.grower.borrow_mut().used_pages = 1; // Fake used pages large enough to we don't fail free list validation. 329 | assert_eq!(free_list_content(&allocator), []); 330 | unsafe { 331 | let free = |alloc: FreeListContent| { 332 | allocator.dealloc( 333 | (allocator.grower.borrow().pages.as_ptr() as *mut u8).add(alloc.offset), 334 | Layout::from_size_align(alloc.size, 1).unwrap(), 335 | ) 336 | }; 337 | assert_eq!(free_list_content(&allocator), []); 338 | 339 | free(FreeListContent { 340 | size: NODE_SIZE, 341 | offset: NODE_SIZE * 3, 342 | }); 343 | assert_eq!( 344 | free_list_content(&allocator), 345 | [FreeListContent { 346 | size: NODE_SIZE, 347 | offset: NODE_SIZE * 3, 348 | }] 349 | ); 350 | 351 | // Free before, not contiguous 352 | free(FreeListContent { 353 | size: NODE_SIZE, 354 | offset: NODE_SIZE, 355 | }); 356 | assert_eq!( 357 | free_list_content(&allocator), 358 | [ 359 | FreeListContent { 360 | size: NODE_SIZE, 361 | offset: NODE_SIZE * 3, 362 | }, 363 | FreeListContent { 364 | size: NODE_SIZE, 365 | offset: NODE_SIZE, 366 | } 367 | ] 368 | ); 369 | 370 | // Free before, contiguous 371 | free(FreeListContent { 372 | size: NODE_SIZE, 373 | offset: 0, 374 | }); 375 | assert_eq!( 376 | free_list_content(&allocator), 377 | [ 378 | FreeListContent { 379 | size: NODE_SIZE, 380 | offset: NODE_SIZE * 3, 381 | }, 382 | FreeListContent { 383 | size: NODE_SIZE * 2, 384 | offset: 0, 385 | } 386 | ] 387 | ); 388 | 389 | // Free between, contiguous 390 | free(FreeListContent { 391 | size: NODE_SIZE, 392 | offset: NODE_SIZE * 2, 393 | }); 394 | assert_eq!( 395 | free_list_content(&allocator), 396 | [FreeListContent { 397 | size: NODE_SIZE * 4, 398 | offset: 0, 399 | },] 400 | ); 401 | 402 | // Free after, contiguous 403 | free(FreeListContent { 404 | size: NODE_SIZE, 405 | offset: NODE_SIZE * 4, 406 | }); 407 | assert_eq!( 408 | free_list_content(&allocator), 409 | [FreeListContent { 410 | size: NODE_SIZE * 5, 411 | offset: 0, 412 | },] 413 | ); 414 | 415 | // Free after, not contiguous 416 | free(FreeListContent { 417 | size: NODE_SIZE, 418 | offset: NODE_SIZE * 6, 419 | }); 420 | assert_eq!( 421 | free_list_content(&allocator), 422 | [ 423 | FreeListContent { 424 | size: NODE_SIZE, 425 | offset: NODE_SIZE * 6, 426 | }, 427 | FreeListContent { 428 | size: NODE_SIZE * 5, 429 | offset: 0, 430 | } 431 | ] 432 | ); 433 | } 434 | } 435 | 436 | #[test] 437 | fn it_works() { 438 | let allocator = FreeListAllocator { 439 | free_list: UnsafeCell::new(EMPTY_FREE_LIST), 440 | grower: RefCell::new(Slabby::new()), 441 | }; 442 | assert_eq!(free_list_content(&allocator), []); 443 | unsafe { 444 | let allocate = |size: usize, align: usize| { 445 | let layout = Layout::from_size_align(size, align).unwrap(); 446 | Allocation { 447 | layout, 448 | ptr: allocator.alloc(layout), 449 | } 450 | }; 451 | let free = |alloc: Allocation| allocator.dealloc(alloc.ptr, alloc.layout); 452 | let alloc = allocate(1, 1); 453 | assert_eq!(allocator.grower.borrow().used_pages, 1); 454 | assert_eq!( 455 | free_list_content(&allocator), 456 | [FreeListContent { 457 | size: PAGE_SIZE - NODE_SIZE, 458 | offset: 0, // Expect allocation at the end of first page. 459 | }] 460 | ); 461 | // Merge into end of existing chunk 462 | free(alloc); 463 | assert_eq!( 464 | free_list_content(&allocator), 465 | [FreeListContent { 466 | size: PAGE_SIZE, 467 | offset: 0, 468 | }] 469 | ); 470 | 471 | // Allocate small value to impact alignment 472 | let alloc = allocate(1, 1); 473 | // Allocate larger aligned value to cause a hole after it 474 | let alloc_big = allocate(NODE_SIZE * 2, NODE_SIZE * 2); 475 | assert_eq!( 476 | free_list_content(&allocator), 477 | [ 478 | FreeListContent { 479 | size: NODE_SIZE, 480 | offset: PAGE_SIZE - NODE_SIZE * 2, 481 | }, 482 | FreeListContent { 483 | size: PAGE_SIZE - NODE_SIZE * 4, 484 | offset: 0, 485 | }, 486 | ] 487 | ); 488 | 489 | // Free second allocation, causing 3 way join 490 | free(alloc_big); 491 | assert_eq!( 492 | free_list_content(&allocator), 493 | [FreeListContent { 494 | size: PAGE_SIZE - NODE_SIZE, 495 | offset: 0, 496 | }] 497 | ); 498 | 499 | // Multi-page allocation 500 | assert_eq!(allocator.grower.borrow().used_pages, 1); 501 | let multi_page = allocate(PAGE_SIZE + 1, 1); 502 | assert_eq!(allocator.grower.borrow().used_pages, 3); 503 | assert_eq!( 504 | free_list_content(&allocator), 505 | [ 506 | FreeListContent { 507 | size: PAGE_SIZE - NODE_SIZE, 508 | offset: PAGE_SIZE, 509 | }, 510 | FreeListContent { 511 | size: PAGE_SIZE - NODE_SIZE, 512 | offset: 0, 513 | } 514 | ] 515 | ); 516 | 517 | // Free everything 518 | free(alloc); 519 | assert_eq!( 520 | free_list_content(&allocator), 521 | [FreeListContent { 522 | size: PAGE_SIZE * 2 - NODE_SIZE, 523 | offset: 0, 524 | }] 525 | ); 526 | free(multi_page); 527 | assert_eq!( 528 | free_list_content(&allocator), 529 | [FreeListContent { 530 | size: PAGE_SIZE * 3, 531 | offset: 0, 532 | }] 533 | ); 534 | } 535 | } 536 | 537 | #[test] 538 | fn fuzz() { 539 | use rand::Rng; 540 | use rand_core::SeedableRng; 541 | use rand_pcg::Pcg32; 542 | 543 | let mut rng = Pcg32::seed_from_u64(0); 544 | 545 | for _ in 0..100 { 546 | let allocator = FreeListAllocator { 547 | free_list: UnsafeCell::new(EMPTY_FREE_LIST), 548 | grower: RefCell::new(Slabby::new()), 549 | }; 550 | 551 | let allocate = |size: usize, align: usize| { 552 | let layout = Layout::from_size_align(size, align).unwrap(); 553 | let ptr = unsafe { allocator.alloc(layout) }; 554 | assert!(!ptr.is_null(), "Slab Full"); 555 | Allocation { layout, ptr } 556 | }; 557 | let free = |alloc: Allocation| unsafe { allocator.dealloc(alloc.ptr, alloc.layout) }; 558 | 559 | let mut allocations = vec![]; 560 | for _ in 0..5000 { 561 | // Randomly free some allocations. 562 | while !allocations.is_empty() { 563 | if rng.gen_bool(0.45) { 564 | let alloc = allocations.swap_remove(rng.gen_range(0..allocations.len())); 565 | free(alloc); 566 | } else { 567 | break; 568 | } 569 | } 570 | // Do a random small allocation 571 | let size = rng.gen_range(1..100); 572 | allocations.push(allocate(size, 1 << rng.gen_range(0..7))); 573 | if rng.gen_bool(0.05) { 574 | // Do a random large allocation 575 | let size = rng.gen_range(1..(PAGE_SIZE * 10)); 576 | allocations.push(allocate(size, 1 << rng.gen_range(0..16))); 577 | } 578 | } 579 | free_list_content(&allocator); 580 | while !allocations.is_empty() { 581 | let alloc = allocations.swap_remove(rng.gen_range(0..allocations.len())); 582 | free(alloc); 583 | } 584 | assert_eq!( 585 | free_list_content(&allocator), 586 | [FreeListContent { 587 | size: allocator.grower.borrow().used_pages * PAGE_SIZE, 588 | offset: 0, 589 | }] 590 | ); 591 | } 592 | } 593 | } 594 | --------------------------------------------------------------------------------